2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard.
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 * 4MV & hq & b-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
25 * The simplest mpeg encoder (well, it was the simplest!).
29 #include <math.h> //for PI
32 #include "mpegvideo.h"
36 #include "fastmemcpy.h"
42 #ifdef CONFIG_ENCODERS
43 static void encode_picture(MpegEncContext
*s
, int picture_number
);
44 #endif //CONFIG_ENCODERS
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext
*s
,
46 DCTELEM
*block
, int n
, int qscale
);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext
*s
,
48 DCTELEM
*block
, int n
, int qscale
);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext
*s
,
50 DCTELEM
*block
, int n
, int qscale
);
51 static void dct_unquantize_mpeg2_inter_c(MpegEncContext
*s
,
52 DCTELEM
*block
, int n
, int qscale
);
53 static void dct_unquantize_h263_intra_c(MpegEncContext
*s
,
54 DCTELEM
*block
, int n
, int qscale
);
55 static void dct_unquantize_h263_inter_c(MpegEncContext
*s
,
56 DCTELEM
*block
, int n
, int qscale
);
57 static void draw_edges_c(uint8_t *buf
, int wrap
, int width
, int height
, int w
);
58 #ifdef CONFIG_ENCODERS
59 static int dct_quantize_c(MpegEncContext
*s
, DCTELEM
*block
, int n
, int qscale
, int *overflow
);
60 static int dct_quantize_trellis_c(MpegEncContext
*s
, DCTELEM
*block
, int n
, int qscale
, int *overflow
);
61 static int dct_quantize_refine(MpegEncContext
*s
, DCTELEM
*block
, int16_t *weight
, DCTELEM
*orig
, int n
, int qscale
);
62 static int sse_mb(MpegEncContext
*s
);
63 static void denoise_dct_c(MpegEncContext
*s
, DCTELEM
*block
);
64 #endif //CONFIG_ENCODERS
67 extern int XVMC_field_start(MpegEncContext
*s
, AVCodecContext
*avctx
);
68 extern void XVMC_field_end(MpegEncContext
*s
);
69 extern void XVMC_decode_mb(MpegEncContext
*s
);
72 void (*draw_edges
)(uint8_t *buf
, int wrap
, int width
, int height
, int w
)= draw_edges_c
;
75 /* enable all paranoid tests for rounding, overflows, etc... */
81 /* for jpeg fast DCT */
84 static const uint16_t aanscales
[64] = {
85 /* precomputed values scaled up by 14 bits */
86 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
87 22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270,
88 21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906,
89 19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315,
90 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
91 12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552,
92 8867 , 12299, 11585, 10426, 8867, 6967, 4799, 2446,
93 4520 , 6270, 5906, 5315, 4520, 3552, 2446, 1247
96 static const uint8_t h263_chroma_roundtab
[16] = {
97 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
98 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
101 static const uint8_t ff_default_chroma_qscale_table
[32]={
102 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
103 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
106 #ifdef CONFIG_ENCODERS
107 static uint8_t (*default_mv_penalty
)[MAX_MV
*2+1]=NULL
;
108 static uint8_t default_fcode_tab
[MAX_MV
*2+1];
110 enum PixelFormat ff_yuv420p_list
[2]= {PIX_FMT_YUV420P
, -1};
112 static void convert_matrix(DSPContext
*dsp
, int (*qmat
)[64], uint16_t (*qmat16
)[2][64],
113 const uint16_t *quant_matrix
, int bias
, int qmin
, int qmax
)
117 for(qscale
=qmin
; qscale
<=qmax
; qscale
++){
119 if (dsp
->fdct
== ff_jpeg_fdct_islow
120 #ifdef FAAN_POSTSCALE
121 || dsp
->fdct
== ff_faandct
125 const int j
= dsp
->idct_permutation
[i
];
126 /* 16 <= qscale * quant_matrix[i] <= 7905 */
127 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
128 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
129 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
131 qmat
[qscale
][i
] = (int)((uint64_t_C(1) << QMAT_SHIFT
) /
132 (qscale
* quant_matrix
[j
]));
134 } else if (dsp
->fdct
== fdct_ifast
135 #ifndef FAAN_POSTSCALE
136 || dsp
->fdct
== ff_faandct
140 const int j
= dsp
->idct_permutation
[i
];
141 /* 16 <= qscale * quant_matrix[i] <= 7905 */
142 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
143 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
144 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
146 qmat
[qscale
][i
] = (int)((uint64_t_C(1) << (QMAT_SHIFT
+ 14)) /
147 (aanscales
[i
] * qscale
* quant_matrix
[j
]));
151 const int j
= dsp
->idct_permutation
[i
];
152 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
153 So 16 <= qscale * quant_matrix[i] <= 7905
154 so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905
155 so 32768 >= (1<<19) / (qscale * quant_matrix[i]) >= 67
157 qmat
[qscale
][i
] = (int)((uint64_t_C(1) << QMAT_SHIFT
) / (qscale
* quant_matrix
[j
]));
158 // qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);
159 qmat16
[qscale
][0][i
] = (1 << QMAT_SHIFT_MMX
) / (qscale
* quant_matrix
[j
]);
161 if(qmat16
[qscale
][0][i
]==0 || qmat16
[qscale
][0][i
]==128*256) qmat16
[qscale
][0][i
]=128*256-1;
162 qmat16
[qscale
][1][i
]= ROUNDED_DIV(bias
<<(16-QUANT_BIAS_SHIFT
), qmat16
[qscale
][0][i
]);
168 static inline void update_qscale(MpegEncContext
*s
){
169 s
->qscale
= (s
->lambda
*139 + FF_LAMBDA_SCALE
*64) >> (FF_LAMBDA_SHIFT
+ 7);
170 s
->qscale
= clip(s
->qscale
, s
->avctx
->qmin
, s
->avctx
->qmax
);
172 s
->lambda2
= (s
->lambda
*s
->lambda
+ FF_LAMBDA_SCALE
/2) >> FF_LAMBDA_SHIFT
;
174 #endif //CONFIG_ENCODERS
176 void ff_init_scantable(uint8_t *permutation
, ScanTable
*st
, const uint8_t *src_scantable
){
180 st
->scantable
= src_scantable
;
184 j
= src_scantable
[i
];
185 st
->permutated
[i
] = permutation
[j
];
194 j
= st
->permutated
[i
];
196 st
->raster_end
[i
]= end
;
200 #ifdef CONFIG_ENCODERS
201 void ff_write_quant_matrix(PutBitContext
*pb
, int16_t *matrix
){
207 put_bits(pb
, 8, matrix
[ ff_zigzag_direct
[i
] ]);
212 #endif //CONFIG_ENCODERS
214 /* init common dct for both encoder and decoder */
215 int DCT_common_init(MpegEncContext
*s
)
217 s
->dct_unquantize_h263_intra
= dct_unquantize_h263_intra_c
;
218 s
->dct_unquantize_h263_inter
= dct_unquantize_h263_inter_c
;
219 s
->dct_unquantize_mpeg1_intra
= dct_unquantize_mpeg1_intra_c
;
220 s
->dct_unquantize_mpeg1_inter
= dct_unquantize_mpeg1_inter_c
;
221 s
->dct_unquantize_mpeg2_intra
= dct_unquantize_mpeg2_intra_c
;
222 s
->dct_unquantize_mpeg2_inter
= dct_unquantize_mpeg2_inter_c
;
224 #ifdef CONFIG_ENCODERS
225 s
->dct_quantize
= dct_quantize_c
;
226 s
->denoise_dct
= denoise_dct_c
;
230 MPV_common_init_mmx(s
);
233 MPV_common_init_axp(s
);
236 MPV_common_init_mlib(s
);
239 MPV_common_init_mmi(s
);
242 MPV_common_init_armv4l(s
);
245 MPV_common_init_ppc(s
);
248 #ifdef CONFIG_ENCODERS
249 s
->fast_dct_quantize
= s
->dct_quantize
;
251 if(s
->flags
&CODEC_FLAG_TRELLIS_QUANT
){
252 s
->dct_quantize
= dct_quantize_trellis_c
; //move before MPV_common_init_*
255 #endif //CONFIG_ENCODERS
257 /* load & permutate scantables
258 note: only wmv uses differnt ones
260 if(s
->alternate_scan
){
261 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->inter_scantable
, ff_alternate_vertical_scan
);
262 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->intra_scantable
, ff_alternate_vertical_scan
);
264 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->inter_scantable
, ff_zigzag_direct
);
265 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->intra_scantable
, ff_zigzag_direct
);
267 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->intra_h_scantable
, ff_alternate_horizontal_scan
);
268 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->intra_v_scantable
, ff_alternate_vertical_scan
);
270 s
->picture_structure
= PICT_FRAME
;
275 static void copy_picture(Picture
*dst
, Picture
*src
){
277 dst
->type
= FF_BUFFER_TYPE_COPY
;
280 static void copy_picture_attributes(AVFrame
*dst
, AVFrame
*src
){
281 dst
->pict_type
= src
->pict_type
;
282 dst
->quality
= src
->quality
;
283 dst
->coded_picture_number
= src
->coded_picture_number
;
284 dst
->display_picture_number
= src
->display_picture_number
;
285 // dst->reference = src->reference;
287 dst
->interlaced_frame
= src
->interlaced_frame
;
288 dst
->top_field_first
= src
->top_field_first
;
292 * allocates a Picture
293 * The pixels are allocated/set by calling get_buffer() if shared=0
295 static int alloc_picture(MpegEncContext
*s
, Picture
*pic
, int shared
){
296 const int big_mb_num
= s
->mb_stride
*(s
->mb_height
+1) + 1; //the +1 is needed so memset(,,stride*height) doesnt sig11
297 const int mb_array_size
= s
->mb_stride
*s
->mb_height
;
298 const int b8_array_size
= s
->b8_stride
*s
->mb_height
*2;
299 const int b4_array_size
= s
->b4_stride
*s
->mb_height
*4;
303 assert(pic
->data
[0]);
304 assert(pic
->type
== 0 || pic
->type
== FF_BUFFER_TYPE_SHARED
);
305 pic
->type
= FF_BUFFER_TYPE_SHARED
;
309 assert(!pic
->data
[0]);
311 r
= s
->avctx
->get_buffer(s
->avctx
, (AVFrame
*)pic
);
313 if(r
<0 || !pic
->age
|| !pic
->type
|| !pic
->data
[0]){
314 av_log(s
->avctx
, AV_LOG_ERROR
, "get_buffer() failed (%d %d %d %p)\n", r
, pic
->age
, pic
->type
, pic
->data
[0]);
318 if(s
->linesize
&& (s
->linesize
!= pic
->linesize
[0] || s
->uvlinesize
!= pic
->linesize
[1])){
319 av_log(s
->avctx
, AV_LOG_ERROR
, "get_buffer() failed (stride changed)\n");
323 if(pic
->linesize
[1] != pic
->linesize
[2]){
324 av_log(s
->avctx
, AV_LOG_ERROR
, "get_buffer() failed (uv stride missmatch)\n");
328 s
->linesize
= pic
->linesize
[0];
329 s
->uvlinesize
= pic
->linesize
[1];
332 if(pic
->qscale_table
==NULL
){
334 CHECKED_ALLOCZ(pic
->mb_var
, mb_array_size
* sizeof(int16_t))
335 CHECKED_ALLOCZ(pic
->mc_mb_var
, mb_array_size
* sizeof(int16_t))
336 CHECKED_ALLOCZ(pic
->mb_mean
, mb_array_size
* sizeof(int8_t))
339 CHECKED_ALLOCZ(pic
->mbskip_table
, mb_array_size
* sizeof(uint8_t)+2) //the +2 is for the slice end check
340 CHECKED_ALLOCZ(pic
->qscale_table
, mb_array_size
* sizeof(uint8_t))
341 CHECKED_ALLOCZ(pic
->mb_type_base
, big_mb_num
* sizeof(uint32_t))
342 pic
->mb_type
= pic
->mb_type_base
+ s
->mb_stride
+1;
343 if(s
->out_format
== FMT_H264
){
345 CHECKED_ALLOCZ(pic
->motion_val_base
[i
], 2 * (b4_array_size
+1) * sizeof(int16_t))
346 pic
->motion_val
[i
]= pic
->motion_val_base
[i
]+1;
347 CHECKED_ALLOCZ(pic
->ref_index
[i
] , b8_array_size
* sizeof(uint8_t))
349 pic
->motion_subsample_log2
= 2;
350 }else if(s
->out_format
== FMT_H263
|| s
->encoding
|| (s
->avctx
->debug
&FF_DEBUG_MV
) || (s
->avctx
->debug_mv
)){
352 CHECKED_ALLOCZ(pic
->motion_val_base
[i
], 2 * (b8_array_size
+1) * sizeof(int16_t)*2) //FIXME
353 pic
->motion_val
[i
]= pic
->motion_val_base
[i
]+1;
355 pic
->motion_subsample_log2
= 3;
357 pic
->qstride
= s
->mb_stride
;
358 CHECKED_ALLOCZ(pic
->pan_scan
, 1 * sizeof(AVPanScan
))
361 //it might be nicer if the application would keep track of these but it would require a API change
362 memmove(s
->prev_pict_types
+1, s
->prev_pict_types
, PREV_PICT_TYPES_BUFFER_SIZE
-1);
363 s
->prev_pict_types
[0]= s
->pict_type
;
364 if(pic
->age
< PREV_PICT_TYPES_BUFFER_SIZE
&& s
->prev_pict_types
[pic
->age
] == B_TYPE
)
365 pic
->age
= INT_MAX
; // skiped MBs in b frames are quite rare in mpeg1/2 and its a bit tricky to skip them anyway
368 fail
: //for the CHECKED_ALLOCZ macro
373 * deallocates a picture
375 static void free_picture(MpegEncContext
*s
, Picture
*pic
){
378 if(pic
->data
[0] && pic
->type
!=FF_BUFFER_TYPE_SHARED
){
379 s
->avctx
->release_buffer(s
->avctx
, (AVFrame
*)pic
);
382 av_freep(&pic
->mb_var
);
383 av_freep(&pic
->mc_mb_var
);
384 av_freep(&pic
->mb_mean
);
385 av_freep(&pic
->mbskip_table
);
386 av_freep(&pic
->qscale_table
);
387 av_freep(&pic
->mb_type_base
);
388 av_freep(&pic
->pan_scan
);
391 av_freep(&pic
->motion_val_base
[i
]);
392 av_freep(&pic
->ref_index
[i
]);
395 if(pic
->type
== FF_BUFFER_TYPE_SHARED
){
404 /* init common structure for both encoder and decoder */
405 int MPV_common_init(MpegEncContext
*s
)
407 int y_size
, c_size
, yc_size
, i
, mb_array_size
, mv_table_size
, x
, y
;
409 dsputil_init(&s
->dsp
, s
->avctx
);
412 s
->flags
= s
->avctx
->flags
;
413 s
->flags2
= s
->avctx
->flags2
;
415 s
->mb_width
= (s
->width
+ 15) / 16;
416 s
->mb_height
= (s
->height
+ 15) / 16;
417 s
->mb_stride
= s
->mb_width
+ 1;
418 s
->b8_stride
= s
->mb_width
*2 + 1;
419 s
->b4_stride
= s
->mb_width
*4 + 1;
420 mb_array_size
= s
->mb_height
* s
->mb_stride
;
421 mv_table_size
= (s
->mb_height
+2) * s
->mb_stride
+ 1;
423 /* set default edge pos, will be overriden in decode_header if needed */
424 s
->h_edge_pos
= s
->mb_width
*16;
425 s
->v_edge_pos
= s
->mb_height
*16;
427 s
->mb_num
= s
->mb_width
* s
->mb_height
;
432 s
->block_wrap
[3]= s
->mb_width
*2 + 2;
434 s
->block_wrap
[5]= s
->mb_width
+ 2;
437 s
->c_dc_scale_table
= ff_mpeg1_dc_scale_table
;
438 s
->chroma_qscale_table
= ff_default_chroma_qscale_table
;
440 s
->progressive_sequence
= 1;
441 s
->progressive_frame
= 1;
442 s
->coded_picture_number
= 0;
444 y_size
= (2 * s
->mb_width
+ 2) * (2 * s
->mb_height
+ 2);
445 c_size
= (s
->mb_width
+ 2) * (s
->mb_height
+ 2);
446 yc_size
= y_size
+ 2 * c_size
;
448 /* convert fourcc to upper case */
449 s
->avctx
->codec_tag
= toupper( s
->avctx
->codec_tag
&0xFF)
450 + (toupper((s
->avctx
->codec_tag
>>8 )&0xFF)<<8 )
451 + (toupper((s
->avctx
->codec_tag
>>16)&0xFF)<<16)
452 + (toupper((s
->avctx
->codec_tag
>>24)&0xFF)<<24);
454 s
->avctx
->stream_codec_tag
= toupper( s
->avctx
->stream_codec_tag
&0xFF)
455 + (toupper((s
->avctx
->stream_codec_tag
>>8 )&0xFF)<<8 )
456 + (toupper((s
->avctx
->stream_codec_tag
>>16)&0xFF)<<16)
457 + (toupper((s
->avctx
->stream_codec_tag
>>24)&0xFF)<<24);
459 CHECKED_ALLOCZ(s
->allocated_edge_emu_buffer
, (s
->width
+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance
460 s
->edge_emu_buffer
= s
->allocated_edge_emu_buffer
+ (s
->width
+64)*2*17;
462 s
->avctx
->coded_frame
= (AVFrame
*)&s
->current_picture
;
464 CHECKED_ALLOCZ(s
->mb_index2xy
, (s
->mb_num
+1)*sizeof(int)) //error ressilience code looks cleaner with this
465 for(y
=0; y
<s
->mb_height
; y
++){
466 for(x
=0; x
<s
->mb_width
; x
++){
467 s
->mb_index2xy
[ x
+ y
*s
->mb_width
] = x
+ y
*s
->mb_stride
;
470 s
->mb_index2xy
[ s
->mb_height
*s
->mb_width
] = (s
->mb_height
-1)*s
->mb_stride
+ s
->mb_width
; //FIXME really needed?
473 /* Allocate MV tables */
474 CHECKED_ALLOCZ(s
->p_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
475 CHECKED_ALLOCZ(s
->b_forw_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
476 CHECKED_ALLOCZ(s
->b_back_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
477 CHECKED_ALLOCZ(s
->b_bidir_forw_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
478 CHECKED_ALLOCZ(s
->b_bidir_back_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
479 CHECKED_ALLOCZ(s
->b_direct_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
480 s
->p_mv_table
= s
->p_mv_table_base
+ s
->mb_stride
+ 1;
481 s
->b_forw_mv_table
= s
->b_forw_mv_table_base
+ s
->mb_stride
+ 1;
482 s
->b_back_mv_table
= s
->b_back_mv_table_base
+ s
->mb_stride
+ 1;
483 s
->b_bidir_forw_mv_table
= s
->b_bidir_forw_mv_table_base
+ s
->mb_stride
+ 1;
484 s
->b_bidir_back_mv_table
= s
->b_bidir_back_mv_table_base
+ s
->mb_stride
+ 1;
485 s
->b_direct_mv_table
= s
->b_direct_mv_table_base
+ s
->mb_stride
+ 1;
487 //FIXME should be linesize instead of s->width*2 but that isnt known before get_buffer()
488 CHECKED_ALLOCZ(s
->me
.scratchpad
, s
->width
*2*16*3*sizeof(uint8_t))
490 CHECKED_ALLOCZ(s
->me
.map
, ME_MAP_SIZE
*sizeof(uint32_t))
491 CHECKED_ALLOCZ(s
->me
.score_map
, ME_MAP_SIZE
*sizeof(uint32_t))
493 if(s
->codec_id
==CODEC_ID_MPEG4
){
494 CHECKED_ALLOCZ(s
->tex_pb_buffer
, PB_BUFFER_SIZE
);
495 CHECKED_ALLOCZ( s
->pb2_buffer
, PB_BUFFER_SIZE
);
498 if(s
->msmpeg4_version
){
499 CHECKED_ALLOCZ(s
->ac_stats
, 2*2*(MAX_LEVEL
+1)*(MAX_RUN
+1)*2*sizeof(int));
501 CHECKED_ALLOCZ(s
->avctx
->stats_out
, 256);
503 /* Allocate MB type table */
504 CHECKED_ALLOCZ(s
->mb_type
, mb_array_size
* sizeof(uint16_t)) //needed for encoding
506 CHECKED_ALLOCZ(s
->lambda_table
, mb_array_size
* sizeof(int))
508 CHECKED_ALLOCZ(s
->q_intra_matrix
, 64*32 * sizeof(int))
509 CHECKED_ALLOCZ(s
->q_inter_matrix
, 64*32 * sizeof(int))
510 CHECKED_ALLOCZ(s
->q_intra_matrix16
, 64*32*2 * sizeof(uint16_t))
511 CHECKED_ALLOCZ(s
->q_inter_matrix16
, 64*32*2 * sizeof(uint16_t))
512 CHECKED_ALLOCZ(s
->input_picture
, MAX_PICTURE_COUNT
* sizeof(Picture
*))
513 CHECKED_ALLOCZ(s
->reordered_input_picture
, MAX_PICTURE_COUNT
* sizeof(Picture
*))
515 if(s
->avctx
->noise_reduction
){
516 CHECKED_ALLOCZ(s
->dct_error_sum
, 2 * 64 * sizeof(int))
517 CHECKED_ALLOCZ(s
->dct_offset
, 2 * 64 * sizeof(uint16_t))
520 CHECKED_ALLOCZ(s
->blocks
, 64*6*2 * sizeof(DCTELEM
))
522 CHECKED_ALLOCZ(s
->picture
, MAX_PICTURE_COUNT
* sizeof(Picture
))
524 CHECKED_ALLOCZ(s
->error_status_table
, mb_array_size
*sizeof(uint8_t))
526 if(s
->codec_id
==CODEC_ID_MPEG4
|| (s
->flags
& CODEC_FLAG_INTERLACED_ME
)){
527 /* interlaced direct mode decoding tables */
532 CHECKED_ALLOCZ(s
->b_field_mv_table_base
[i
][j
][k
] , mv_table_size
* 2 * sizeof(int16_t))
533 s
->b_field_mv_table
[i
][j
][k
] = s
->b_field_mv_table_base
[i
][j
][k
] + s
->mb_stride
+ 1;
535 CHECKED_ALLOCZ(s
->b_field_select_table
[i
][j
] , mb_array_size
* 2 * sizeof(uint8_t))
536 CHECKED_ALLOCZ(s
->p_field_mv_table_base
[i
][j
] , mv_table_size
* 2 * sizeof(int16_t))
537 s
->p_field_mv_table
[i
][j
] = s
->p_field_mv_table_base
[i
][j
] + s
->mb_stride
+ 1;
539 CHECKED_ALLOCZ(s
->p_field_select_table
[i
] , mb_array_size
* 2 * sizeof(uint8_t))
542 if (s
->out_format
== FMT_H263
) {
544 CHECKED_ALLOCZ(s
->ac_val
[0], yc_size
* sizeof(int16_t) * 16);
545 s
->ac_val
[1] = s
->ac_val
[0] + y_size
;
546 s
->ac_val
[2] = s
->ac_val
[1] + c_size
;
549 CHECKED_ALLOCZ(s
->coded_block
, y_size
);
551 /* divx501 bitstream reorder buffer */
552 CHECKED_ALLOCZ(s
->bitstream_buffer
, BITSTREAM_BUFFER_SIZE
);
554 /* cbp, ac_pred, pred_dir */
555 CHECKED_ALLOCZ(s
->cbp_table
, mb_array_size
* sizeof(uint8_t))
556 CHECKED_ALLOCZ(s
->pred_dir_table
, mb_array_size
* sizeof(uint8_t))
559 if (s
->h263_pred
|| s
->h263_plus
|| !s
->encoding
) {
561 //MN: we need these for error resilience of intra-frames
562 CHECKED_ALLOCZ(s
->dc_val
[0], yc_size
* sizeof(int16_t));
563 s
->dc_val
[1] = s
->dc_val
[0] + y_size
;
564 s
->dc_val
[2] = s
->dc_val
[1] + c_size
;
565 for(i
=0;i
<yc_size
;i
++)
566 s
->dc_val
[0][i
] = 1024;
569 /* which mb is a intra block */
570 CHECKED_ALLOCZ(s
->mbintra_table
, mb_array_size
);
571 memset(s
->mbintra_table
, 1, mb_array_size
);
573 /* default structure is frame */
574 s
->picture_structure
= PICT_FRAME
;
576 /* init macroblock skip table */
577 CHECKED_ALLOCZ(s
->mbskip_table
, mb_array_size
+2);
578 //Note the +1 is for a quicker mpeg4 slice_end detection
579 CHECKED_ALLOCZ(s
->prev_pict_types
, PREV_PICT_TYPES_BUFFER_SIZE
);
581 s
->block
= s
->blocks
[0];
584 s
->pblocks
[i
] = (short *)(&s
->block
[i
]);
587 s
->parse_context
.state
= -1;
588 if((s
->avctx
->debug
&(FF_DEBUG_VIS_QP
|FF_DEBUG_VIS_MB_TYPE
)) || (s
->avctx
->debug_mv
)){
589 s
->visualization_buffer
[0] = av_malloc((s
->mb_width
*16 + 2*EDGE_WIDTH
) * s
->mb_height
*16 + 2*EDGE_WIDTH
);
590 s
->visualization_buffer
[1] = av_malloc((s
->mb_width
*8 + EDGE_WIDTH
) * s
->mb_height
*8 + EDGE_WIDTH
);
591 s
->visualization_buffer
[2] = av_malloc((s
->mb_width
*8 + EDGE_WIDTH
) * s
->mb_height
*8 + EDGE_WIDTH
);
594 s
->context_initialized
= 1;
604 /* init common structure for both encoder and decoder */
605 void MPV_common_end(MpegEncContext
*s
)
609 av_freep(&s
->parse_context
.buffer
);
610 s
->parse_context
.buffer_size
=0;
612 av_freep(&s
->mb_type
);
613 av_freep(&s
->p_mv_table_base
);
614 av_freep(&s
->b_forw_mv_table_base
);
615 av_freep(&s
->b_back_mv_table_base
);
616 av_freep(&s
->b_bidir_forw_mv_table_base
);
617 av_freep(&s
->b_bidir_back_mv_table_base
);
618 av_freep(&s
->b_direct_mv_table_base
);
620 s
->b_forw_mv_table
= NULL
;
621 s
->b_back_mv_table
= NULL
;
622 s
->b_bidir_forw_mv_table
= NULL
;
623 s
->b_bidir_back_mv_table
= NULL
;
624 s
->b_direct_mv_table
= NULL
;
628 av_freep(&s
->b_field_mv_table_base
[i
][j
][k
]);
629 s
->b_field_mv_table
[i
][j
][k
]=NULL
;
631 av_freep(&s
->b_field_select_table
[i
][j
]);
632 av_freep(&s
->p_field_mv_table_base
[i
][j
]);
633 s
->p_field_mv_table
[i
][j
]=NULL
;
635 av_freep(&s
->p_field_select_table
[i
]);
638 av_freep(&s
->dc_val
[0]);
639 av_freep(&s
->ac_val
[0]);
640 av_freep(&s
->coded_block
);
641 av_freep(&s
->mbintra_table
);
642 av_freep(&s
->cbp_table
);
643 av_freep(&s
->pred_dir_table
);
644 av_freep(&s
->me
.scratchpad
);
645 av_freep(&s
->me
.map
);
646 av_freep(&s
->me
.score_map
);
648 av_freep(&s
->mbskip_table
);
649 av_freep(&s
->prev_pict_types
);
650 av_freep(&s
->bitstream_buffer
);
651 av_freep(&s
->tex_pb_buffer
);
652 av_freep(&s
->pb2_buffer
);
653 av_freep(&s
->allocated_edge_emu_buffer
); s
->edge_emu_buffer
= NULL
;
654 av_freep(&s
->avctx
->stats_out
);
655 av_freep(&s
->ac_stats
);
656 av_freep(&s
->error_status_table
);
657 av_freep(&s
->mb_index2xy
);
658 av_freep(&s
->lambda_table
);
659 av_freep(&s
->q_intra_matrix
);
660 av_freep(&s
->q_inter_matrix
);
661 av_freep(&s
->q_intra_matrix16
);
662 av_freep(&s
->q_inter_matrix16
);
663 av_freep(&s
->blocks
);
664 av_freep(&s
->input_picture
);
665 av_freep(&s
->reordered_input_picture
);
666 av_freep(&s
->dct_error_sum
);
667 av_freep(&s
->dct_offset
);
670 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
671 free_picture(s
, &s
->picture
[i
]);
674 av_freep(&s
->picture
);
675 avcodec_default_free_buffers(s
->avctx
);
676 s
->context_initialized
= 0;
679 s
->current_picture_ptr
= NULL
;
681 if (s
->visualization_buffer
[i
])
682 av_free(s
->visualization_buffer
[i
]);
685 #ifdef CONFIG_ENCODERS
687 /* init video encoder */
688 int MPV_encode_init(AVCodecContext
*avctx
)
690 MpegEncContext
*s
= avctx
->priv_data
;
692 int chroma_h_shift
, chroma_v_shift
;
694 avctx
->pix_fmt
= PIX_FMT_YUV420P
; // FIXME
696 s
->bit_rate
= avctx
->bit_rate
;
697 s
->width
= avctx
->width
;
698 s
->height
= avctx
->height
;
699 if(avctx
->gop_size
> 600){
700 av_log(avctx
, AV_LOG_ERROR
, "Warning keyframe interval too large! reducing it ...\n");
703 s
->gop_size
= avctx
->gop_size
;
705 s
->flags
= avctx
->flags
;
706 s
->flags2
= avctx
->flags2
;
707 s
->max_b_frames
= avctx
->max_b_frames
;
708 s
->codec_id
= avctx
->codec
->id
;
709 s
->luma_elim_threshold
= avctx
->luma_elim_threshold
;
710 s
->chroma_elim_threshold
= avctx
->chroma_elim_threshold
;
711 s
->strict_std_compliance
= avctx
->strict_std_compliance
;
712 s
->data_partitioning
= avctx
->flags
& CODEC_FLAG_PART
;
713 s
->quarter_sample
= (avctx
->flags
& CODEC_FLAG_QPEL
)!=0;
714 s
->mpeg_quant
= avctx
->mpeg_quant
;
715 s
->rtp_mode
= !!avctx
->rtp_payload_size
;
717 if (s
->gop_size
<= 1) {
724 s
->me_method
= avctx
->me_method
;
727 s
->fixed_qscale
= !!(avctx
->flags
& CODEC_FLAG_QSCALE
);
729 s
->adaptive_quant
= ( s
->avctx
->lumi_masking
730 || s
->avctx
->dark_masking
731 || s
->avctx
->temporal_cplx_masking
732 || s
->avctx
->spatial_cplx_masking
733 || s
->avctx
->p_masking
734 || (s
->flags
&CODEC_FLAG_QP_RD
))
737 s
->obmc
= !!(s
->flags
& CODEC_FLAG_OBMC
);
738 s
->loop_filter
= !!(s
->flags
& CODEC_FLAG_LOOP_FILTER
);
739 s
->alternate_scan
= !!(s
->flags
& CODEC_FLAG_ALT_SCAN
);
741 if(avctx
->rc_max_rate
&& !avctx
->rc_buffer_size
){
742 av_log(avctx
, AV_LOG_ERROR
, "a vbv buffer size is needed, for encoding with a maximum bitrate\n");
746 if(avctx
->rc_min_rate
&& avctx
->rc_max_rate
!= avctx
->rc_min_rate
){
747 av_log(avctx
, AV_LOG_INFO
, "Warning min_rate > 0 but min_rate != max_rate isnt recommanded!\n");
750 if((s
->flags
& CODEC_FLAG_4MV
) && s
->codec_id
!= CODEC_ID_MPEG4
751 && s
->codec_id
!= CODEC_ID_H263
&& s
->codec_id
!= CODEC_ID_H263P
){
752 av_log(avctx
, AV_LOG_ERROR
, "4MV not supported by codec\n");
756 if(s
->obmc
&& s
->avctx
->mb_decision
!= FF_MB_DECISION_SIMPLE
){
757 av_log(avctx
, AV_LOG_ERROR
, "OBMC is only supported with simple mb decission\n");
761 if(s
->obmc
&& s
->codec_id
!= CODEC_ID_H263
&& s
->codec_id
!= CODEC_ID_H263P
){
762 av_log(avctx
, AV_LOG_ERROR
, "OBMC is only supported with H263(+)\n");
766 if(s
->quarter_sample
&& s
->codec_id
!= CODEC_ID_MPEG4
){
767 av_log(avctx
, AV_LOG_ERROR
, "qpel not supported by codec\n");
771 if(s
->data_partitioning
&& s
->codec_id
!= CODEC_ID_MPEG4
){
772 av_log(avctx
, AV_LOG_ERROR
, "data partitioning not supported by codec\n");
776 if(s
->max_b_frames
&& s
->codec_id
!= CODEC_ID_MPEG4
&& s
->codec_id
!= CODEC_ID_MPEG1VIDEO
&& s
->codec_id
!= CODEC_ID_MPEG2VIDEO
){
777 av_log(avctx
, AV_LOG_ERROR
, "b frames not supported by codec\n");
781 if(s
->mpeg_quant
&& s
->codec_id
!= CODEC_ID_MPEG4
){ //FIXME mpeg2 uses that too
782 av_log(avctx
, AV_LOG_ERROR
, "mpeg2 style quantization not supporetd by codec\n");
786 if((s
->flags
& CODEC_FLAG_CBP_RD
) && !(s
->flags
& CODEC_FLAG_TRELLIS_QUANT
)){
787 av_log(avctx
, AV_LOG_ERROR
, "CBP RD needs trellis quant\n");
791 if((s
->flags
& CODEC_FLAG_QP_RD
) && s
->avctx
->mb_decision
!= FF_MB_DECISION_RD
){
792 av_log(avctx
, AV_LOG_ERROR
, "QP RD needs mbd=2\n");
796 if(s
->avctx
->scenechange_threshold
< 1000000000 && (s
->flags
& CODEC_FLAG_CLOSED_GOP
)){
797 av_log(avctx
, AV_LOG_ERROR
, "closed gop with scene change detection arent supported yet\n");
801 i
= ff_gcd(avctx
->frame_rate
, avctx
->frame_rate_base
);
803 av_log(avctx
, AV_LOG_INFO
, "removing common factors from framerate\n");
804 avctx
->frame_rate
/= i
;
805 avctx
->frame_rate_base
/= i
;
809 if(s
->codec_id
==CODEC_ID_MJPEG
){
810 s
->intra_quant_bias
= 1<<(QUANT_BIAS_SHIFT
-1); //(a + x/2)/x
811 s
->inter_quant_bias
= 0;
812 }else if(s
->mpeg_quant
|| s
->codec_id
==CODEC_ID_MPEG1VIDEO
|| s
->codec_id
==CODEC_ID_MPEG2VIDEO
){
813 s
->intra_quant_bias
= 3<<(QUANT_BIAS_SHIFT
-3); //(a + x*3/8)/x
814 s
->inter_quant_bias
= 0;
816 s
->intra_quant_bias
=0;
817 s
->inter_quant_bias
=-(1<<(QUANT_BIAS_SHIFT
-2)); //(a - x/4)/x
820 if(avctx
->intra_quant_bias
!= FF_DEFAULT_QUANT_BIAS
)
821 s
->intra_quant_bias
= avctx
->intra_quant_bias
;
822 if(avctx
->inter_quant_bias
!= FF_DEFAULT_QUANT_BIAS
)
823 s
->inter_quant_bias
= avctx
->inter_quant_bias
;
825 avcodec_get_chroma_sub_sample(avctx
->pix_fmt
, &chroma_h_shift
, &chroma_v_shift
);
827 av_reduce(&s
->time_increment_resolution
, &dummy
, s
->avctx
->frame_rate
, s
->avctx
->frame_rate_base
, (1<<16)-1);
828 s
->time_increment_bits
= av_log2(s
->time_increment_resolution
- 1) + 1;
830 switch(avctx
->codec
->id
) {
831 case CODEC_ID_MPEG1VIDEO
:
832 s
->out_format
= FMT_MPEG1
;
833 s
->low_delay
= 0; //s->max_b_frames ? 0 : 1;
834 avctx
->delay
= s
->low_delay ?
0 : (s
->max_b_frames
+ 1);
836 case CODEC_ID_MPEG2VIDEO
:
837 s
->out_format
= FMT_MPEG1
;
838 s
->low_delay
= 0; //s->max_b_frames ? 0 : 1;
839 avctx
->delay
= s
->low_delay ?
0 : (s
->max_b_frames
+ 1);
844 s
->out_format
= FMT_MJPEG
;
845 s
->intra_only
= 1; /* force intra only for jpeg */
846 s
->mjpeg_write_tables
= 1; /* write all tables */
847 s
->mjpeg_data_only_frames
= 0; /* write all the needed headers */
848 s
->mjpeg_vsample
[0] = 1<<chroma_v_shift
;
849 s
->mjpeg_vsample
[1] = 1;
850 s
->mjpeg_vsample
[2] = 1;
851 s
->mjpeg_hsample
[0] = 1<<chroma_h_shift
;
852 s
->mjpeg_hsample
[1] = 1;
853 s
->mjpeg_hsample
[2] = 1;
854 if (mjpeg_init(s
) < 0)
861 if (h263_get_picture_format(s
->width
, s
->height
) == 7) {
862 av_log(avctx
, AV_LOG_INFO
, "Input picture size isn't suitable for h263 codec! try h263+\n");
865 s
->out_format
= FMT_H263
;
866 s
->obmc
= (avctx
->flags
& CODEC_FLAG_OBMC
) ?
1:0;
871 s
->out_format
= FMT_H263
;
874 s
->umvplus
= (avctx
->flags
& CODEC_FLAG_H263P_UMV
) ?
1:0;
875 s
->h263_aic
= (avctx
->flags
& CODEC_FLAG_H263P_AIC
) ?
1:0;
876 s
->modified_quant
= s
->h263_aic
;
877 s
->alt_inter_vlc
= (avctx
->flags
& CODEC_FLAG_H263P_AIV
) ?
1:0;
878 s
->obmc
= (avctx
->flags
& CODEC_FLAG_OBMC
) ?
1:0;
879 s
->loop_filter
= (avctx
->flags
& CODEC_FLAG_LOOP_FILTER
) ?
1:0;
880 s
->unrestricted_mv
= s
->obmc
|| s
->loop_filter
|| s
->umvplus
;
881 s
->h263_slice_structured
= (s
->flags
& CODEC_FLAG_H263P_SLICE_STRUCT
) ?
1:0;
884 /* These are just to be sure */
889 s
->out_format
= FMT_H263
;
890 s
->h263_flv
= 2; /* format = 1; 11-bit codes */
891 s
->unrestricted_mv
= 1;
892 s
->rtp_mode
=0; /* don't allow GOB */
897 s
->out_format
= FMT_H263
;
902 s
->out_format
= FMT_H263
;
904 s
->unrestricted_mv
= 1;
905 s
->low_delay
= s
->max_b_frames ?
0 : 1;
906 avctx
->delay
= s
->low_delay ?
0 : (s
->max_b_frames
+ 1);
908 case CODEC_ID_MSMPEG4V1
:
909 s
->out_format
= FMT_H263
;
912 s
->unrestricted_mv
= 1;
913 s
->msmpeg4_version
= 1;
917 case CODEC_ID_MSMPEG4V2
:
918 s
->out_format
= FMT_H263
;
921 s
->unrestricted_mv
= 1;
922 s
->msmpeg4_version
= 2;
926 case CODEC_ID_MSMPEG4V3
:
927 s
->out_format
= FMT_H263
;
930 s
->unrestricted_mv
= 1;
931 s
->msmpeg4_version
= 3;
932 s
->flipflop_rounding
=1;
937 s
->out_format
= FMT_H263
;
940 s
->unrestricted_mv
= 1;
941 s
->msmpeg4_version
= 4;
942 s
->flipflop_rounding
=1;
947 s
->out_format
= FMT_H263
;
950 s
->unrestricted_mv
= 1;
951 s
->msmpeg4_version
= 5;
952 s
->flipflop_rounding
=1;
961 { /* set up some save defaults, some codecs might override them later */
967 default_mv_penalty
= av_mallocz( sizeof(uint8_t)*(MAX_FCODE
+1)*(2*MAX_MV
+1) );
968 memset(default_mv_penalty
, 0, sizeof(uint8_t)*(MAX_FCODE
+1)*(2*MAX_MV
+1));
969 memset(default_fcode_tab
, 0, sizeof(uint8_t)*(2*MAX_MV
+1));
971 for(i
=-16; i
<16; i
++){
972 default_fcode_tab
[i
+ MAX_MV
]= 1;
976 s
->me
.mv_penalty
= default_mv_penalty
;
977 s
->fcode_tab
= default_fcode_tab
;
979 /* dont use mv_penalty table for crap MV as it would be confused */
980 //FIXME remove after fixing / removing old ME
981 if (s
->me_method
< ME_EPZS
) s
->me
.mv_penalty
= default_mv_penalty
;
986 if (MPV_common_init(s
) < 0)
989 if(s
->modified_quant
)
990 s
->chroma_qscale_table
= ff_h263_chroma_qscale_table
;
991 s
->progressive_frame
=
992 s
->progressive_sequence
= !(avctx
->flags
& (CODEC_FLAG_INTERLACED_DCT
|CODEC_FLAG_INTERLACED_ME
));
994 ff_set_cmp(&s
->dsp
, s
->dsp
.ildct_cmp
, s
->avctx
->ildct_cmp
);
998 #ifdef CONFIG_ENCODERS
1000 if (s
->out_format
== FMT_H263
)
1001 h263_encode_init(s
);
1002 if(s
->msmpeg4_version
)
1003 ff_msmpeg4_encode_init(s
);
1005 if (s
->out_format
== FMT_MPEG1
)
1006 ff_mpeg1_encode_init(s
);
1009 /* init default q matrix */
1011 int j
= s
->dsp
.idct_permutation
[i
];
1013 if(s
->codec_id
==CODEC_ID_MPEG4
&& s
->mpeg_quant
){
1014 s
->intra_matrix
[j
] = ff_mpeg4_default_intra_matrix
[i
];
1015 s
->inter_matrix
[j
] = ff_mpeg4_default_non_intra_matrix
[i
];
1016 }else if(s
->out_format
== FMT_H263
){
1017 s
->intra_matrix
[j
] =
1018 s
->inter_matrix
[j
] = ff_mpeg1_default_non_intra_matrix
[i
];
1022 s
->intra_matrix
[j
] = ff_mpeg1_default_intra_matrix
[i
];
1023 s
->inter_matrix
[j
] = ff_mpeg1_default_non_intra_matrix
[i
];
1025 if(s
->avctx
->intra_matrix
)
1026 s
->intra_matrix
[j
] = s
->avctx
->intra_matrix
[i
];
1027 if(s
->avctx
->inter_matrix
)
1028 s
->inter_matrix
[j
] = s
->avctx
->inter_matrix
[i
];
1031 /* precompute matrix */
1032 /* for mjpeg, we do include qscale in the matrix */
1033 if (s
->out_format
!= FMT_MJPEG
) {
1034 convert_matrix(&s
->dsp
, s
->q_intra_matrix
, s
->q_intra_matrix16
,
1035 s
->intra_matrix
, s
->intra_quant_bias
, 1, 31);
1036 convert_matrix(&s
->dsp
, s
->q_inter_matrix
, s
->q_inter_matrix16
,
1037 s
->inter_matrix
, s
->inter_quant_bias
, 1, 31);
1040 if(ff_rate_control_init(s
) < 0)
1043 s
->picture_number
= 0;
1044 s
->input_picture_number
= 0;
1045 s
->picture_in_gop_number
= 0;
1046 /* motion detector init */
1053 int MPV_encode_end(AVCodecContext
*avctx
)
1055 MpegEncContext
*s
= avctx
->priv_data
;
1061 ff_rate_control_uninit(s
);
1064 if (s
->out_format
== FMT_MJPEG
)
1067 av_freep(&avctx
->extradata
);
1072 #endif //CONFIG_ENCODERS
1074 void init_rl(RLTable
*rl
)
1076 int8_t max_level
[MAX_RUN
+1], max_run
[MAX_LEVEL
+1];
1077 uint8_t index_run
[MAX_RUN
+1];
1078 int last
, run
, level
, start
, end
, i
;
1080 /* compute max_level[], max_run[] and index_run[] */
1081 for(last
=0;last
<2;last
++) {
1090 memset(max_level
, 0, MAX_RUN
+ 1);
1091 memset(max_run
, 0, MAX_LEVEL
+ 1);
1092 memset(index_run
, rl
->n
, MAX_RUN
+ 1);
1093 for(i
=start
;i
<end
;i
++) {
1094 run
= rl
->table_run
[i
];
1095 level
= rl
->table_level
[i
];
1096 if (index_run
[run
] == rl
->n
)
1098 if (level
> max_level
[run
])
1099 max_level
[run
] = level
;
1100 if (run
> max_run
[level
])
1101 max_run
[level
] = run
;
1103 rl
->max_level
[last
] = av_malloc(MAX_RUN
+ 1);
1104 memcpy(rl
->max_level
[last
], max_level
, MAX_RUN
+ 1);
1105 rl
->max_run
[last
] = av_malloc(MAX_LEVEL
+ 1);
1106 memcpy(rl
->max_run
[last
], max_run
, MAX_LEVEL
+ 1);
1107 rl
->index_run
[last
] = av_malloc(MAX_RUN
+ 1);
1108 memcpy(rl
->index_run
[last
], index_run
, MAX_RUN
+ 1);
1112 /* draw the edges of width 'w' of an image of size width, height */
1113 //FIXME check that this is ok for mpeg4 interlaced
1114 static void draw_edges_c(uint8_t *buf
, int wrap
, int width
, int height
, int w
)
1116 uint8_t *ptr
, *last_line
;
1119 last_line
= buf
+ (height
- 1) * wrap
;
1121 /* top and bottom */
1122 memcpy(buf
- (i
+ 1) * wrap
, buf
, width
);
1123 memcpy(last_line
+ (i
+ 1) * wrap
, last_line
, width
);
1125 /* left and right */
1127 for(i
=0;i
<height
;i
++) {
1128 memset(ptr
- w
, ptr
[0], w
);
1129 memset(ptr
+ width
, ptr
[width
-1], w
);
1134 memset(buf
- (i
+ 1) * wrap
- w
, buf
[0], w
); /* top left */
1135 memset(buf
- (i
+ 1) * wrap
+ width
, buf
[width
-1], w
); /* top right */
1136 memset(last_line
+ (i
+ 1) * wrap
- w
, last_line
[0], w
); /* top left */
1137 memset(last_line
+ (i
+ 1) * wrap
+ width
, last_line
[width
-1], w
); /* top right */
1141 int ff_find_unused_picture(MpegEncContext
*s
, int shared
){
1145 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1146 if(s
->picture
[i
].data
[0]==NULL
&& s
->picture
[i
].type
==0) return i
;
1149 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1150 if(s
->picture
[i
].data
[0]==NULL
&& s
->picture
[i
].type
!=0) return i
; //FIXME
1152 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1153 if(s
->picture
[i
].data
[0]==NULL
) return i
;
1161 static void update_noise_reduction(MpegEncContext
*s
){
1164 for(intra
=0; intra
<2; intra
++){
1165 if(s
->dct_count
[intra
] > (1<<16)){
1166 for(i
=0; i
<64; i
++){
1167 s
->dct_error_sum
[intra
][i
] >>=1;
1169 s
->dct_count
[intra
] >>= 1;
1172 for(i
=0; i
<64; i
++){
1173 s
->dct_offset
[intra
][i
]= (s
->avctx
->noise_reduction
* s
->dct_count
[intra
] + s
->dct_error_sum
[intra
][i
]/2) / (s
->dct_error_sum
[intra
][i
]+1);
1179 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1181 int MPV_frame_start(MpegEncContext
*s
, AVCodecContext
*avctx
)
1187 assert(s
->last_picture_ptr
==NULL
|| s
->out_format
!= FMT_H264
|| s
->codec_id
== CODEC_ID_SVQ3
);
1189 /* mark&release old frames */
1190 if (s
->pict_type
!= B_TYPE
&& s
->last_picture_ptr
&& s
->last_picture_ptr
->data
[0]) {
1191 avctx
->release_buffer(avctx
, (AVFrame
*)s
->last_picture_ptr
);
1193 /* release forgotten pictures */
1194 /* if(mpeg124/h263) */
1196 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1197 if(s
->picture
[i
].data
[0] && &s
->picture
[i
] != s
->next_picture_ptr
&& s
->picture
[i
].reference
){
1198 av_log(avctx
, AV_LOG_ERROR
, "releasing zombie picture\n");
1199 avctx
->release_buffer(avctx
, (AVFrame
*)&s
->picture
[i
]);
1206 /* release non refernce frames */
1207 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1208 if(s
->picture
[i
].data
[0] && !s
->picture
[i
].reference
/*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1209 s
->avctx
->release_buffer(s
->avctx
, (AVFrame
*)&s
->picture
[i
]);
1213 if(s
->current_picture_ptr
&& s
->current_picture_ptr
->data
[0]==NULL
)
1214 pic
= (AVFrame
*)s
->current_picture_ptr
; //we allready have a unused image (maybe it was set before reading the header)
1216 i
= ff_find_unused_picture(s
, 0);
1217 pic
= (AVFrame
*)&s
->picture
[i
];
1220 pic
->reference
= s
->pict_type
!= B_TYPE ?
3 : 0;
1222 pic
->coded_picture_number
= s
->coded_picture_number
++;
1224 if( alloc_picture(s
, (Picture
*)pic
, 0) < 0)
1227 s
->current_picture_ptr
= (Picture
*)pic
;
1228 s
->current_picture_ptr
->top_field_first
= s
->top_field_first
; //FIXME use only the vars from current_pic
1229 s
->current_picture_ptr
->interlaced_frame
= !s
->progressive_frame
&& !s
->progressive_sequence
;
1232 s
->current_picture_ptr
->pict_type
= s
->pict_type
;
1233 // if(s->flags && CODEC_FLAG_QSCALE)
1234 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1235 s
->current_picture_ptr
->key_frame
= s
->pict_type
== I_TYPE
;
1237 copy_picture(&s
->current_picture
, s
->current_picture_ptr
);
1239 if(s
->out_format
!= FMT_H264
|| s
->codec_id
== CODEC_ID_SVQ3
){
1240 if (s
->pict_type
!= B_TYPE
) {
1241 s
->last_picture_ptr
= s
->next_picture_ptr
;
1242 s
->next_picture_ptr
= s
->current_picture_ptr
;
1245 if(s
->last_picture_ptr
) copy_picture(&s
->last_picture
, s
->last_picture_ptr
);
1246 if(s
->next_picture_ptr
) copy_picture(&s
->next_picture
, s
->next_picture_ptr
);
1248 if(s
->pict_type
!= I_TYPE
&& (s
->last_picture_ptr
==NULL
|| s
->last_picture_ptr
->data
[0]==NULL
)){
1249 av_log(avctx
, AV_LOG_ERROR
, "warning: first frame is no keyframe\n");
1250 assert(s
->pict_type
!= B_TYPE
); //these should have been dropped if we dont have a reference
1254 assert(s
->pict_type
== I_TYPE
|| (s
->last_picture_ptr
&& s
->last_picture_ptr
->data
[0]));
1256 if(s
->picture_structure
!=PICT_FRAME
){
1259 if(s
->picture_structure
== PICT_BOTTOM_FIELD
){
1260 s
->current_picture
.data
[i
] += s
->current_picture
.linesize
[i
];
1262 s
->current_picture
.linesize
[i
] *= 2;
1263 s
->last_picture
.linesize
[i
] *=2;
1264 s
->next_picture
.linesize
[i
] *=2;
1269 s
->hurry_up
= s
->avctx
->hurry_up
;
1270 s
->error_resilience
= avctx
->error_resilience
;
1272 /* set dequantizer, we cant do it during init as it might change for mpeg4
1273 and we cant do it in the header decode as init isnt called for mpeg4 there yet */
1274 if(s
->mpeg_quant
|| s
->codec_id
== CODEC_ID_MPEG2VIDEO
){
1275 s
->dct_unquantize_intra
= s
->dct_unquantize_mpeg2_intra
;
1276 s
->dct_unquantize_inter
= s
->dct_unquantize_mpeg2_inter
;
1277 }else if(s
->out_format
== FMT_H263
){
1278 s
->dct_unquantize_intra
= s
->dct_unquantize_h263_intra
;
1279 s
->dct_unquantize_inter
= s
->dct_unquantize_h263_inter
;
1281 s
->dct_unquantize_intra
= s
->dct_unquantize_mpeg1_intra
;
1282 s
->dct_unquantize_inter
= s
->dct_unquantize_mpeg1_inter
;
1285 if(s
->dct_error_sum
){
1286 assert(s
->avctx
->noise_reduction
&& s
->encoding
);
1288 update_noise_reduction(s
);
1292 if(s
->avctx
->xvmc_acceleration
)
1293 return XVMC_field_start(s
, avctx
);
1298 /* generic function for encode/decode called after a frame has been coded/decoded */
1299 void MPV_frame_end(MpegEncContext
*s
)
1302 /* draw edge for correct motion prediction if outside */
1304 //just to make sure that all data is rendered.
1305 if(s
->avctx
->xvmc_acceleration
){
1309 if(s
->unrestricted_mv
&& s
->pict_type
!= B_TYPE
&& !s
->intra_only
&& !(s
->flags
&CODEC_FLAG_EMU_EDGE
)) {
1310 draw_edges(s
->current_picture
.data
[0], s
->linesize
, s
->h_edge_pos
, s
->v_edge_pos
, EDGE_WIDTH
);
1311 draw_edges(s
->current_picture
.data
[1], s
->uvlinesize
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1, EDGE_WIDTH
/2);
1312 draw_edges(s
->current_picture
.data
[2], s
->uvlinesize
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1, EDGE_WIDTH
/2);
1316 s
->last_pict_type
= s
->pict_type
;
1317 if(s
->pict_type
!=B_TYPE
){
1318 s
->last_non_b_pict_type
= s
->pict_type
;
1321 /* copy back current_picture variables */
1322 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1323 if(s
->picture
[i
].data
[0] == s
->current_picture
.data
[0]){
1324 s
->picture
[i
]= s
->current_picture
;
1328 assert(i
<MAX_PICTURE_COUNT
);
1332 /* release non refernce frames */
1333 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1334 if(s
->picture
[i
].data
[0] && !s
->picture
[i
].reference
/*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1335 s
->avctx
->release_buffer(s
->avctx
, (AVFrame
*)&s
->picture
[i
]);
1339 // clear copies, to avoid confusion
1341 memset(&s
->last_picture
, 0, sizeof(Picture
));
1342 memset(&s
->next_picture
, 0, sizeof(Picture
));
1343 memset(&s
->current_picture
, 0, sizeof(Picture
));
1348 * draws an line from (ex, ey) -> (sx, sy).
1349 * @param w width of the image
1350 * @param h height of the image
1351 * @param stride stride/linesize of the image
1352 * @param color color of the arrow
1354 static void draw_line(uint8_t *buf
, int sx
, int sy
, int ex
, int ey
, int w
, int h
, int stride
, int color
){
1357 sx
= clip(sx
, 0, w
-1);
1358 sy
= clip(sy
, 0, h
-1);
1359 ex
= clip(ex
, 0, w
-1);
1360 ey
= clip(ey
, 0, h
-1);
1362 buf
[sy
*stride
+ sx
]+= color
;
1364 if(ABS(ex
- sx
) > ABS(ey
- sy
)){
1369 buf
+= sx
+ sy
*stride
;
1371 f
= ((ey
-sy
)<<16)/ex
;
1372 for(x
= 0; x
<= ex
; x
++){
1373 y
= ((x
*f
) + (1<<15))>>16;
1374 buf
[y
*stride
+ x
]+= color
;
1381 buf
+= sx
+ sy
*stride
;
1383 if(ey
) f
= ((ex
-sx
)<<16)/ey
;
1385 for(y
= 0; y
<= ey
; y
++){
1386 x
= ((y
*f
) + (1<<15))>>16;
1387 buf
[y
*stride
+ x
]+= color
;
1393 * draws an arrow from (ex, ey) -> (sx, sy).
1394 * @param w width of the image
1395 * @param h height of the image
1396 * @param stride stride/linesize of the image
1397 * @param color color of the arrow
1399 static void draw_arrow(uint8_t *buf
, int sx
, int sy
, int ex
, int ey
, int w
, int h
, int stride
, int color
){
1402 sx
= clip(sx
, -100, w
+100);
1403 sy
= clip(sy
, -100, h
+100);
1404 ex
= clip(ex
, -100, w
+100);
1405 ey
= clip(ey
, -100, h
+100);
1410 if(dx
*dx
+ dy
*dy
> 3*3){
1413 int length
= ff_sqrt((rx
*rx
+ ry
*ry
)<<8);
1415 //FIXME subpixel accuracy
1416 rx
= ROUNDED_DIV(rx
*3<<4, length
);
1417 ry
= ROUNDED_DIV(ry
*3<<4, length
);
1419 draw_line(buf
, sx
, sy
, sx
+ rx
, sy
+ ry
, w
, h
, stride
, color
);
1420 draw_line(buf
, sx
, sy
, sx
- ry
, sy
+ rx
, w
, h
, stride
, color
);
1422 draw_line(buf
, sx
, sy
, ex
, ey
, w
, h
, stride
, color
);
1426 * prints debuging info for the given picture.
1428 void ff_print_debug_info(MpegEncContext
*s
, AVFrame
*pict
){
1430 if(!pict
|| !pict
->mb_type
) return;
1432 if(s
->avctx
->debug
&(FF_DEBUG_SKIP
| FF_DEBUG_QP
| FF_DEBUG_MB_TYPE
)){
1435 av_log(s
->avctx
,AV_LOG_DEBUG
,"New frame, type: ");
1436 switch (pict
->pict_type
) {
1437 case FF_I_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"I\n"); break;
1438 case FF_P_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"P\n"); break;
1439 case FF_B_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"B\n"); break;
1440 case FF_S_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"S\n"); break;
1441 case FF_SI_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"SI\n"); break;
1442 case FF_SP_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"SP\n"); break;
1444 for(y
=0; y
<s
->mb_height
; y
++){
1445 for(x
=0; x
<s
->mb_width
; x
++){
1446 if(s
->avctx
->debug
&FF_DEBUG_SKIP
){
1447 int count
= s
->mbskip_table
[x
+ y
*s
->mb_stride
];
1448 if(count
>9) count
=9;
1449 av_log(s
->avctx
, AV_LOG_DEBUG
, "%1d", count
);
1451 if(s
->avctx
->debug
&FF_DEBUG_QP
){
1452 av_log(s
->avctx
, AV_LOG_DEBUG
, "%2d", pict
->qscale_table
[x
+ y
*s
->mb_stride
]);
1454 if(s
->avctx
->debug
&FF_DEBUG_MB_TYPE
){
1455 int mb_type
= pict
->mb_type
[x
+ y
*s
->mb_stride
];
1456 //Type & MV direction
1458 av_log(s
->avctx
, AV_LOG_DEBUG
, "P");
1459 else if(IS_INTRA(mb_type
) && IS_ACPRED(mb_type
))
1460 av_log(s
->avctx
, AV_LOG_DEBUG
, "A");
1461 else if(IS_INTRA4x4(mb_type
))
1462 av_log(s
->avctx
, AV_LOG_DEBUG
, "i");
1463 else if(IS_INTRA16x16(mb_type
))
1464 av_log(s
->avctx
, AV_LOG_DEBUG
, "I");
1465 else if(IS_DIRECT(mb_type
) && IS_SKIP(mb_type
))
1466 av_log(s
->avctx
, AV_LOG_DEBUG
, "d");
1467 else if(IS_DIRECT(mb_type
))
1468 av_log(s
->avctx
, AV_LOG_DEBUG
, "D");
1469 else if(IS_GMC(mb_type
) && IS_SKIP(mb_type
))
1470 av_log(s
->avctx
, AV_LOG_DEBUG
, "g");
1471 else if(IS_GMC(mb_type
))
1472 av_log(s
->avctx
, AV_LOG_DEBUG
, "G");
1473 else if(IS_SKIP(mb_type
))
1474 av_log(s
->avctx
, AV_LOG_DEBUG
, "S");
1475 else if(!USES_LIST(mb_type
, 1))
1476 av_log(s
->avctx
, AV_LOG_DEBUG
, ">");
1477 else if(!USES_LIST(mb_type
, 0))
1478 av_log(s
->avctx
, AV_LOG_DEBUG
, "<");
1480 assert(USES_LIST(mb_type
, 0) && USES_LIST(mb_type
, 1));
1481 av_log(s
->avctx
, AV_LOG_DEBUG
, "X");
1486 av_log(s
->avctx
, AV_LOG_DEBUG
, "+");
1487 else if(IS_16X8(mb_type
))
1488 av_log(s
->avctx
, AV_LOG_DEBUG
, "-");
1489 else if(IS_8X16(mb_type
))
1490 av_log(s
->avctx
, AV_LOG_DEBUG
, "¦");
1491 else if(IS_INTRA(mb_type
) || IS_16X16(mb_type
))
1492 av_log(s
->avctx
, AV_LOG_DEBUG
, " ");
1494 av_log(s
->avctx
, AV_LOG_DEBUG
, "?");
1497 if(IS_INTERLACED(mb_type
) && s
->codec_id
== CODEC_ID_H264
)
1498 av_log(s
->avctx
, AV_LOG_DEBUG
, "=");
1500 av_log(s
->avctx
, AV_LOG_DEBUG
, " ");
1502 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1504 av_log(s
->avctx
, AV_LOG_DEBUG
, "\n");
1508 if((s
->avctx
->debug
&(FF_DEBUG_VIS_QP
|FF_DEBUG_VIS_MB_TYPE
)) || (s
->avctx
->debug_mv
)){
1509 const int shift
= 1 + s
->quarter_sample
;
1513 int h_chroma_shift
, v_chroma_shift
;
1514 s
->low_delay
=0; //needed to see the vectors without trashing the buffers
1516 avcodec_get_chroma_sub_sample(s
->avctx
->pix_fmt
, &h_chroma_shift
, &v_chroma_shift
);
1518 memcpy(s
->visualization_buffer
[i
], pict
->data
[i
], (i
==0) ? pict
->linesize
[i
]*s
->height
:pict
->linesize
[i
]*s
->height
>> v_chroma_shift
);
1519 pict
->data
[i
]= s
->visualization_buffer
[i
];
1521 pict
->type
= FF_BUFFER_TYPE_COPY
;
1524 for(mb_y
=0; mb_y
<s
->mb_height
; mb_y
++){
1526 for(mb_x
=0; mb_x
<s
->mb_width
; mb_x
++){
1527 const int mb_index
= mb_x
+ mb_y
*s
->mb_stride
;
1528 if((s
->avctx
->debug_mv
) && pict
->motion_val
){
1530 for(type
=0; type
<3; type
++){
1533 case 0: if ((!(s
->avctx
->debug_mv
&FF_DEBUG_VIS_MV_P_FOR
)) || (pict
->pict_type
!=FF_P_TYPE
))
1537 case 1: if ((!(s
->avctx
->debug_mv
&FF_DEBUG_VIS_MV_B_FOR
)) || (pict
->pict_type
!=FF_B_TYPE
))
1541 case 2: if ((!(s
->avctx
->debug_mv
&FF_DEBUG_VIS_MV_B_BACK
)) || (pict
->pict_type
!=FF_B_TYPE
))
1546 if(!USES_LIST(pict
->mb_type
[mb_index
], direction
))
1549 if(IS_8X8(pict
->mb_type
[mb_index
])){
1552 int sx
= mb_x
*16 + 4 + 8*(i
&1);
1553 int sy
= mb_y
*16 + 4 + 8*(i
>>1);
1554 int xy
= 1 + mb_x
*2 + (i
&1) + (mb_y
*2 + 1 + (i
>>1))*(s
->mb_width
*2 + 2);
1555 int mx
= (pict
->motion_val
[direction
][xy
][0]>>shift
) + sx
;
1556 int my
= (pict
->motion_val
[direction
][xy
][1]>>shift
) + sy
;
1557 draw_arrow(ptr
, sx
, sy
, mx
, my
, s
->width
, s
->height
, s
->linesize
, 100);
1559 }else if(IS_16X8(pict
->mb_type
[mb_index
])){
1563 int sy
=mb_y
*16 + 4 + 8*i
;
1564 int xy
=1 + mb_x
*2 + (mb_y
*2 + 1 + i
)*(s
->mb_width
*2 + 2);
1565 int mx
=(pict
->motion_val
[direction
][xy
][0]>>shift
) + sx
;
1566 int my
=(pict
->motion_val
[direction
][xy
][1]>>shift
) + sy
;
1567 draw_arrow(ptr
, sx
, sy
, mx
, my
, s
->width
, s
->height
, s
->linesize
, 100);
1570 int sx
= mb_x
*16 + 8;
1571 int sy
= mb_y
*16 + 8;
1572 int xy
= 1 + mb_x
*2 + (mb_y
*2 + 1)*(s
->mb_width
*2 + 2);
1573 int mx
= (pict
->motion_val
[direction
][xy
][0]>>shift
) + sx
;
1574 int my
= (pict
->motion_val
[direction
][xy
][1]>>shift
) + sy
;
1575 draw_arrow(ptr
, sx
, sy
, mx
, my
, s
->width
, s
->height
, s
->linesize
, 100);
1579 if((s
->avctx
->debug
&FF_DEBUG_VIS_QP
) && pict
->motion_val
){
1580 uint64_t c
= (pict
->qscale_table
[mb_index
]*128/31) * 0x0101010101010101ULL
;
1583 *(uint64_t*)(pict
->data
[1] + 8*mb_x
+ (8*mb_y
+ y
)*pict
->linesize
[1])= c
;
1584 *(uint64_t*)(pict
->data
[2] + 8*mb_x
+ (8*mb_y
+ y
)*pict
->linesize
[2])= c
;
1587 if((s
->avctx
->debug
&FF_DEBUG_VIS_MB_TYPE
) && pict
->motion_val
){
1588 int mb_type
= pict
->mb_type
[mb_index
];
1591 #define COLOR(theta, r)\
1592 u= (int)(128 + r*cos(theta*3.141592/180));\
1593 v= (int)(128 + r*sin(theta*3.141592/180));
1597 if(IS_PCM(mb_type
)){
1599 }else if((IS_INTRA(mb_type
) && IS_ACPRED(mb_type
)) || IS_INTRA16x16(mb_type
)){
1601 }else if(IS_INTRA4x4(mb_type
)){
1603 }else if(IS_DIRECT(mb_type
) && IS_SKIP(mb_type
)){
1605 }else if(IS_DIRECT(mb_type
)){
1607 }else if(IS_GMC(mb_type
) && IS_SKIP(mb_type
)){
1609 }else if(IS_GMC(mb_type
)){
1611 }else if(IS_SKIP(mb_type
)){
1613 }else if(!USES_LIST(mb_type
, 1)){
1615 }else if(!USES_LIST(mb_type
, 0)){
1618 assert(USES_LIST(mb_type
, 0) && USES_LIST(mb_type
, 1));
1622 u
*= 0x0101010101010101ULL
;
1623 v
*= 0x0101010101010101ULL
;
1625 *(uint64_t*)(pict
->data
[1] + 8*mb_x
+ (8*mb_y
+ y
)*pict
->linesize
[1])= u
;
1626 *(uint64_t*)(pict
->data
[2] + 8*mb_x
+ (8*mb_y
+ y
)*pict
->linesize
[2])= v
;
1630 if(IS_8X8(mb_type
) || IS_16X8(mb_type
)){
1631 *(uint64_t*)(pict
->data
[0] + 16*mb_x
+ 0 + (16*mb_y
+ 8)*pict
->linesize
[0])^= 0x8080808080808080ULL
;
1632 *(uint64_t*)(pict
->data
[0] + 16*mb_x
+ 8 + (16*mb_y
+ 8)*pict
->linesize
[0])^= 0x8080808080808080ULL
;
1634 if(IS_8X8(mb_type
) || IS_8X16(mb_type
)){
1636 pict
->data
[0][16*mb_x
+ 8 + (16*mb_y
+ y
)*pict
->linesize
[0]]^= 0x80;
1639 if(IS_INTERLACED(mb_type
) && s
->codec_id
== CODEC_ID_H264
){
1643 s
->mbskip_table
[mb_index
]=0;
1649 #ifdef CONFIG_ENCODERS
1651 static int get_sae(uint8_t *src
, int ref
, int stride
){
1655 for(y
=0; y
<16; y
++){
1656 for(x
=0; x
<16; x
++){
1657 acc
+= ABS(src
[x
+y
*stride
] - ref
);
1664 static int get_intra_count(MpegEncContext
*s
, uint8_t *src
, uint8_t *ref
, int stride
){
1671 for(y
=0; y
<h
; y
+=16){
1672 for(x
=0; x
<w
; x
+=16){
1673 int offset
= x
+ y
*stride
;
1674 int sad
= s
->dsp
.sad
[0](NULL
, src
+ offset
, ref
+ offset
, stride
, 16);
1675 int mean
= (s
->dsp
.pix_sum(src
+ offset
, stride
) + 128)>>8;
1676 int sae
= get_sae(src
+ offset
, mean
, stride
);
1678 acc
+= sae
+ 500 < sad
;
1685 static int load_input_picture(MpegEncContext
*s
, AVFrame
*pic_arg
){
1688 const int encoding_delay
= s
->max_b_frames
;
1692 if(encoding_delay
&& !(s
->flags
&CODEC_FLAG_INPUT_PRESERVED
)) direct
=0;
1693 if(pic_arg
->linesize
[0] != s
->linesize
) direct
=0;
1694 if(pic_arg
->linesize
[1] != s
->uvlinesize
) direct
=0;
1695 if(pic_arg
->linesize
[2] != s
->uvlinesize
) direct
=0;
1697 // av_log(AV_LOG_DEBUG, "%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize);
1700 i
= ff_find_unused_picture(s
, 1);
1702 pic
= (AVFrame
*)&s
->picture
[i
];
1706 pic
->data
[i
]= pic_arg
->data
[i
];
1707 pic
->linesize
[i
]= pic_arg
->linesize
[i
];
1709 alloc_picture(s
, (Picture
*)pic
, 1);
1712 i
= ff_find_unused_picture(s
, 0);
1714 pic
= (AVFrame
*)&s
->picture
[i
];
1717 alloc_picture(s
, (Picture
*)pic
, 0);
1719 if( pic
->data
[0] + offset
== pic_arg
->data
[0]
1720 && pic
->data
[1] + offset
== pic_arg
->data
[1]
1721 && pic
->data
[2] + offset
== pic_arg
->data
[2]){
1724 int h_chroma_shift
, v_chroma_shift
;
1725 avcodec_get_chroma_sub_sample(s
->avctx
->pix_fmt
, &h_chroma_shift
, &v_chroma_shift
);
1728 int src_stride
= pic_arg
->linesize
[i
];
1729 int dst_stride
= i ? s
->uvlinesize
: s
->linesize
;
1730 int h_shift
= i ? h_chroma_shift
: 0;
1731 int v_shift
= i ? v_chroma_shift
: 0;
1732 int w
= s
->width
>>h_shift
;
1733 int h
= s
->height
>>v_shift
;
1734 uint8_t *src
= pic_arg
->data
[i
];
1735 uint8_t *dst
= pic
->data
[i
] + offset
;
1737 if(src_stride
==dst_stride
)
1738 memcpy(dst
, src
, src_stride
*h
);
1741 memcpy(dst
, src
, w
);
1749 copy_picture_attributes(pic
, pic_arg
);
1751 pic
->display_picture_number
= s
->input_picture_number
++;
1754 /* shift buffer entries */
1755 for(i
=1; i
<MAX_PICTURE_COUNT
/*s->encoding_delay+1*/; i
++)
1756 s
->input_picture
[i
-1]= s
->input_picture
[i
];
1758 s
->input_picture
[encoding_delay
]= (Picture
*)pic
;
1763 static void select_input_picture(MpegEncContext
*s
){
1766 for(i
=1; i
<MAX_PICTURE_COUNT
; i
++)
1767 s
->reordered_input_picture
[i
-1]= s
->reordered_input_picture
[i
];
1768 s
->reordered_input_picture
[MAX_PICTURE_COUNT
-1]= NULL
;
1770 /* set next picture types & ordering */
1771 if(s
->reordered_input_picture
[0]==NULL
&& s
->input_picture
[0]){
1772 if(/*s->picture_in_gop_number >= s->gop_size ||*/ s
->next_picture_ptr
==NULL
|| s
->intra_only
){
1773 s
->reordered_input_picture
[0]= s
->input_picture
[0];
1774 s
->reordered_input_picture
[0]->pict_type
= I_TYPE
;
1775 s
->reordered_input_picture
[0]->coded_picture_number
= s
->coded_picture_number
++;
1779 if(s
->flags
&CODEC_FLAG_PASS2
){
1780 for(i
=0; i
<s
->max_b_frames
+1; i
++){
1781 int pict_num
= s
->input_picture
[0]->display_picture_number
+ i
;
1782 int pict_type
= s
->rc_context
.entry
[pict_num
].new_pict_type
;
1783 s
->input_picture
[i
]->pict_type
= pict_type
;
1785 if(i
+ 1 >= s
->rc_context
.num_entries
) break;
1789 if(s
->input_picture
[0]->pict_type
){
1790 /* user selected pict_type */
1791 for(b_frames
=0; b_frames
<s
->max_b_frames
+1; b_frames
++){
1792 if(s
->input_picture
[b_frames
]->pict_type
!=B_TYPE
) break;
1795 if(b_frames
> s
->max_b_frames
){
1796 av_log(s
->avctx
, AV_LOG_ERROR
, "warning, too many bframes in a row\n");
1797 b_frames
= s
->max_b_frames
;
1799 }else if(s
->avctx
->b_frame_strategy
==0){
1800 b_frames
= s
->max_b_frames
;
1801 while(b_frames
&& !s
->input_picture
[b_frames
]) b_frames
--;
1802 }else if(s
->avctx
->b_frame_strategy
==1){
1803 for(i
=1; i
<s
->max_b_frames
+1; i
++){
1804 if(s
->input_picture
[i
] && s
->input_picture
[i
]->b_frame_score
==0){
1805 s
->input_picture
[i
]->b_frame_score
=
1806 get_intra_count(s
, s
->input_picture
[i
]->data
[0],
1807 s
->input_picture
[i
-1]->data
[0], s
->linesize
) + 1;
1810 for(i
=0; i
<s
->max_b_frames
; i
++){
1811 if(s
->input_picture
[i
]==NULL
|| s
->input_picture
[i
]->b_frame_score
- 1 > s
->mb_num
/40) break;
1814 b_frames
= FFMAX(0, i
-1);
1817 for(i
=0; i
<b_frames
+1; i
++){
1818 s
->input_picture
[i
]->b_frame_score
=0;
1821 av_log(s
->avctx
, AV_LOG_ERROR
, "illegal b frame strategy\n");
1826 //static int b_count=0;
1827 //b_count+= b_frames;
1828 //av_log(s->avctx, AV_LOG_DEBUG, "b_frames: %d\n", b_count);
1829 if(s
->picture_in_gop_number
+ b_frames
>= s
->gop_size
){
1830 if(s
->flags
& CODEC_FLAG_CLOSED_GOP
)
1832 s
->input_picture
[b_frames
]->pict_type
= I_TYPE
;
1835 if( (s
->flags
& CODEC_FLAG_CLOSED_GOP
)
1837 && s
->input_picture
[b_frames
]->pict_type
== I_TYPE
)
1840 s
->reordered_input_picture
[0]= s
->input_picture
[b_frames
];
1841 if(s
->reordered_input_picture
[0]->pict_type
!= I_TYPE
)
1842 s
->reordered_input_picture
[0]->pict_type
= P_TYPE
;
1843 s
->reordered_input_picture
[0]->coded_picture_number
= s
->coded_picture_number
++;
1844 for(i
=0; i
<b_frames
; i
++){
1845 s
->reordered_input_picture
[i
+1]= s
->input_picture
[i
];
1846 s
->reordered_input_picture
[i
+1]->pict_type
= B_TYPE
;
1847 s
->reordered_input_picture
[i
+1]->coded_picture_number
= s
->coded_picture_number
++;
1852 if(s
->reordered_input_picture
[0]){
1853 s
->reordered_input_picture
[0]->reference
= s
->reordered_input_picture
[0]->pict_type
!=B_TYPE ?
3 : 0;
1855 copy_picture(&s
->new_picture
, s
->reordered_input_picture
[0]);
1857 if(s
->reordered_input_picture
[0]->type
== FF_BUFFER_TYPE_SHARED
){
1858 // input is a shared pix, so we cant modifiy it -> alloc a new one & ensure that the shared one is reuseable
1860 int i
= ff_find_unused_picture(s
, 0);
1861 Picture
*pic
= &s
->picture
[i
];
1863 /* mark us unused / free shared pic */
1865 s
->reordered_input_picture
[0]->data
[i
]= NULL
;
1866 s
->reordered_input_picture
[0]->type
= 0;
1868 copy_picture_attributes((AVFrame
*)pic
, (AVFrame
*)s
->reordered_input_picture
[0]);
1869 pic
->reference
= s
->reordered_input_picture
[0]->reference
;
1871 alloc_picture(s
, pic
, 0);
1873 s
->current_picture_ptr
= pic
;
1875 // input is not a shared pix -> reuse buffer for current_pix
1877 assert( s
->reordered_input_picture
[0]->type
==FF_BUFFER_TYPE_USER
1878 || s
->reordered_input_picture
[0]->type
==FF_BUFFER_TYPE_INTERNAL
);
1880 s
->current_picture_ptr
= s
->reordered_input_picture
[0];
1882 s
->new_picture
.data
[i
]+=16;
1885 copy_picture(&s
->current_picture
, s
->current_picture_ptr
);
1887 s
->picture_number
= s
->new_picture
.display_picture_number
;
1888 //printf("dpn:%d\n", s->picture_number);
1890 memset(&s
->new_picture
, 0, sizeof(Picture
));
1894 int MPV_encode_picture(AVCodecContext
*avctx
,
1895 unsigned char *buf
, int buf_size
, void *data
)
1897 MpegEncContext
*s
= avctx
->priv_data
;
1898 AVFrame
*pic_arg
= data
;
1899 int i
, stuffing_count
;
1901 if(avctx
->pix_fmt
!= PIX_FMT_YUV420P
){
1902 av_log(avctx
, AV_LOG_ERROR
, "this codec supports only YUV420P\n");
1906 init_put_bits(&s
->pb
, buf
, buf_size
);
1908 s
->picture_in_gop_number
++;
1910 load_input_picture(s
, pic_arg
);
1912 select_input_picture(s
);
1915 if(s
->new_picture
.data
[0]){
1916 s
->pict_type
= s
->new_picture
.pict_type
;
1918 //printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
1919 MPV_frame_start(s
, avctx
);
1921 encode_picture(s
, s
->picture_number
);
1923 avctx
->real_pict_num
= s
->picture_number
;
1924 avctx
->header_bits
= s
->header_bits
;
1925 avctx
->mv_bits
= s
->mv_bits
;
1926 avctx
->misc_bits
= s
->misc_bits
;
1927 avctx
->i_tex_bits
= s
->i_tex_bits
;
1928 avctx
->p_tex_bits
= s
->p_tex_bits
;
1929 avctx
->i_count
= s
->i_count
;
1930 avctx
->p_count
= s
->mb_num
- s
->i_count
- s
->skip_count
; //FIXME f/b_count in avctx
1931 avctx
->skip_count
= s
->skip_count
;
1935 if (s
->out_format
== FMT_MJPEG
)
1936 mjpeg_picture_trailer(s
);
1938 if(s
->flags
&CODEC_FLAG_PASS1
)
1939 ff_write_pass1_stats(s
);
1942 avctx
->error
[i
] += s
->current_picture_ptr
->error
[i
];
1945 flush_put_bits(&s
->pb
);
1946 s
->frame_bits
= (pbBufPtr(&s
->pb
) - s
->pb
.buf
) * 8;
1948 stuffing_count
= ff_vbv_update(s
, s
->frame_bits
);
1950 switch(s
->codec_id
){
1951 case CODEC_ID_MPEG1VIDEO
:
1952 case CODEC_ID_MPEG2VIDEO
:
1953 while(stuffing_count
--){
1954 put_bits(&s
->pb
, 8, 0);
1957 case CODEC_ID_MPEG4
:
1958 put_bits(&s
->pb
, 16, 0);
1959 put_bits(&s
->pb
, 16, 0x1C3);
1960 stuffing_count
-= 4;
1961 while(stuffing_count
--){
1962 put_bits(&s
->pb
, 8, 0xFF);
1966 av_log(s
->avctx
, AV_LOG_ERROR
, "vbv buffer overflow\n");
1968 flush_put_bits(&s
->pb
);
1969 s
->frame_bits
= (pbBufPtr(&s
->pb
) - s
->pb
.buf
) * 8;
1972 /* update mpeg1/2 vbv_delay for CBR */
1973 if(s
->avctx
->rc_max_rate
&& s
->avctx
->rc_min_rate
== s
->avctx
->rc_max_rate
){
1976 assert(s
->repeat_first_field
==0);
1978 vbv_delay
= lrintf(90000 * s
->rc_context
.buffer_index
/ s
->avctx
->rc_max_rate
);
1979 assert(vbv_delay
< 0xFFFF);
1981 s
->vbv_delay_ptr
[0] &= 0xF8;
1982 s
->vbv_delay_ptr
[0] |= vbv_delay
>>13;
1983 s
->vbv_delay_ptr
[1] = vbv_delay
>>5;
1984 s
->vbv_delay_ptr
[2] &= 0x07;
1985 s
->vbv_delay_ptr
[2] |= vbv_delay
<<3;
1987 s
->total_bits
+= s
->frame_bits
;
1988 avctx
->frame_bits
= s
->frame_bits
;
1990 assert((pbBufPtr(&s
->pb
) == s
->pb
.buf
));
1993 assert((s
->frame_bits
&7)==0);
1995 return s
->frame_bits
/8;
1998 #endif //CONFIG_ENCODERS
2000 static inline void gmc1_motion(MpegEncContext
*s
,
2001 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2003 uint8_t **ref_picture
, int src_offset
)
2006 int offset
, src_x
, src_y
, linesize
, uvlinesize
;
2007 int motion_x
, motion_y
;
2010 motion_x
= s
->sprite_offset
[0][0];
2011 motion_y
= s
->sprite_offset
[0][1];
2012 src_x
= s
->mb_x
* 16 + (motion_x
>> (s
->sprite_warping_accuracy
+1));
2013 src_y
= s
->mb_y
* 16 + (motion_y
>> (s
->sprite_warping_accuracy
+1));
2014 motion_x
<<=(3-s
->sprite_warping_accuracy
);
2015 motion_y
<<=(3-s
->sprite_warping_accuracy
);
2016 src_x
= clip(src_x
, -16, s
->width
);
2017 if (src_x
== s
->width
)
2019 src_y
= clip(src_y
, -16, s
->height
);
2020 if (src_y
== s
->height
)
2023 linesize
= s
->linesize
;
2024 uvlinesize
= s
->uvlinesize
;
2026 ptr
= ref_picture
[0] + (src_y
* linesize
) + src_x
+ src_offset
;
2028 dest_y
+=dest_offset
;
2029 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
2030 if( (unsigned)src_x
>= s
->h_edge_pos
- 17
2031 || (unsigned)src_y
>= s
->v_edge_pos
- 17){
2032 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, linesize
, 17, 17, src_x
, src_y
, s
->h_edge_pos
, s
->v_edge_pos
);
2033 ptr
= s
->edge_emu_buffer
;
2037 if((motion_x
|motion_y
)&7){
2038 s
->dsp
.gmc1(dest_y
, ptr
, linesize
, 16, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
2039 s
->dsp
.gmc1(dest_y
+8, ptr
+8, linesize
, 16, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
2043 dxy
= ((motion_x
>>3)&1) | ((motion_y
>>2)&2);
2044 if (s
->no_rounding
){
2045 s
->dsp
.put_no_rnd_pixels_tab
[0][dxy
](dest_y
, ptr
, linesize
, 16);
2047 s
->dsp
.put_pixels_tab
[0][dxy
](dest_y
, ptr
, linesize
, 16);
2051 if(s
->flags
&CODEC_FLAG_GRAY
) return;
2053 motion_x
= s
->sprite_offset
[1][0];
2054 motion_y
= s
->sprite_offset
[1][1];
2055 src_x
= s
->mb_x
* 8 + (motion_x
>> (s
->sprite_warping_accuracy
+1));
2056 src_y
= s
->mb_y
* 8 + (motion_y
>> (s
->sprite_warping_accuracy
+1));
2057 motion_x
<<=(3-s
->sprite_warping_accuracy
);
2058 motion_y
<<=(3-s
->sprite_warping_accuracy
);
2059 src_x
= clip(src_x
, -8, s
->width
>>1);
2060 if (src_x
== s
->width
>>1)
2062 src_y
= clip(src_y
, -8, s
->height
>>1);
2063 if (src_y
== s
->height
>>1)
2066 offset
= (src_y
* uvlinesize
) + src_x
+ (src_offset
>>1);
2067 ptr
= ref_picture
[1] + offset
;
2068 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
2069 if( (unsigned)src_x
>= (s
->h_edge_pos
>>1) - 9
2070 || (unsigned)src_y
>= (s
->v_edge_pos
>>1) - 9){
2071 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, uvlinesize
, 9, 9, src_x
, src_y
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2072 ptr
= s
->edge_emu_buffer
;
2076 s
->dsp
.gmc1(dest_cb
+ (dest_offset
>>1), ptr
, uvlinesize
, 8, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
2078 ptr
= ref_picture
[2] + offset
;
2080 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, uvlinesize
, 9, 9, src_x
, src_y
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2081 ptr
= s
->edge_emu_buffer
;
2083 s
->dsp
.gmc1(dest_cr
+ (dest_offset
>>1), ptr
, uvlinesize
, 8, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
2088 static inline void gmc_motion(MpegEncContext
*s
,
2089 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2091 uint8_t **ref_picture
, int src_offset
)
2094 int linesize
, uvlinesize
;
2095 const int a
= s
->sprite_warping_accuracy
;
2098 linesize
= s
->linesize
;
2099 uvlinesize
= s
->uvlinesize
;
2101 ptr
= ref_picture
[0] + src_offset
;
2103 dest_y
+=dest_offset
;
2105 ox
= s
->sprite_offset
[0][0] + s
->sprite_delta
[0][0]*s
->mb_x
*16 + s
->sprite_delta
[0][1]*s
->mb_y
*16;
2106 oy
= s
->sprite_offset
[0][1] + s
->sprite_delta
[1][0]*s
->mb_x
*16 + s
->sprite_delta
[1][1]*s
->mb_y
*16;
2108 s
->dsp
.gmc(dest_y
, ptr
, linesize
, 16,
2111 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
2112 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
2113 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
2114 s
->h_edge_pos
, s
->v_edge_pos
);
2115 s
->dsp
.gmc(dest_y
+8, ptr
, linesize
, 16,
2116 ox
+ s
->sprite_delta
[0][0]*8,
2117 oy
+ s
->sprite_delta
[1][0]*8,
2118 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
2119 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
2120 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
2121 s
->h_edge_pos
, s
->v_edge_pos
);
2123 if(s
->flags
&CODEC_FLAG_GRAY
) return;
2126 dest_cb
+=dest_offset
>>1;
2127 dest_cr
+=dest_offset
>>1;
2129 ox
= s
->sprite_offset
[1][0] + s
->sprite_delta
[0][0]*s
->mb_x
*8 + s
->sprite_delta
[0][1]*s
->mb_y
*8;
2130 oy
= s
->sprite_offset
[1][1] + s
->sprite_delta
[1][0]*s
->mb_x
*8 + s
->sprite_delta
[1][1]*s
->mb_y
*8;
2132 ptr
= ref_picture
[1] + (src_offset
>>1);
2133 s
->dsp
.gmc(dest_cb
, ptr
, uvlinesize
, 8,
2136 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
2137 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
2138 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
2139 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2141 ptr
= ref_picture
[2] + (src_offset
>>1);
2142 s
->dsp
.gmc(dest_cr
, ptr
, uvlinesize
, 8,
2145 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
2146 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
2147 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
2148 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2152 * Copies a rectangular area of samples to a temporary buffer and replicates the boarder samples.
2153 * @param buf destination buffer
2154 * @param src source buffer
2155 * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
2156 * @param block_w width of block
2157 * @param block_h height of block
2158 * @param src_x x coordinate of the top left sample of the block in the source buffer
2159 * @param src_y y coordinate of the top left sample of the block in the source buffer
2160 * @param w width of the source buffer
2161 * @param h height of the source buffer
2163 void ff_emulated_edge_mc(uint8_t *buf
, uint8_t *src
, int linesize
, int block_w
, int block_h
,
2164 int src_x
, int src_y
, int w
, int h
){
2166 int start_y
, start_x
, end_y
, end_x
;
2169 src
+= (h
-1-src_y
)*linesize
;
2171 }else if(src_y
<=-block_h
){
2172 src
+= (1-block_h
-src_y
)*linesize
;
2178 }else if(src_x
<=-block_w
){
2179 src
+= (1-block_w
-src_x
);
2183 start_y
= FFMAX(0, -src_y
);
2184 start_x
= FFMAX(0, -src_x
);
2185 end_y
= FFMIN(block_h
, h
-src_y
);
2186 end_x
= FFMIN(block_w
, w
-src_x
);
2188 // copy existing part
2189 for(y
=start_y
; y
<end_y
; y
++){
2190 for(x
=start_x
; x
<end_x
; x
++){
2191 buf
[x
+ y
*linesize
]= src
[x
+ y
*linesize
];
2196 for(y
=0; y
<start_y
; y
++){
2197 for(x
=start_x
; x
<end_x
; x
++){
2198 buf
[x
+ y
*linesize
]= buf
[x
+ start_y
*linesize
];
2203 for(y
=end_y
; y
<block_h
; y
++){
2204 for(x
=start_x
; x
<end_x
; x
++){
2205 buf
[x
+ y
*linesize
]= buf
[x
+ (end_y
-1)*linesize
];
2209 for(y
=0; y
<block_h
; y
++){
2211 for(x
=0; x
<start_x
; x
++){
2212 buf
[x
+ y
*linesize
]= buf
[start_x
+ y
*linesize
];
2216 for(x
=end_x
; x
<block_w
; x
++){
2217 buf
[x
+ y
*linesize
]= buf
[end_x
- 1 + y
*linesize
];
2222 static inline int hpel_motion(MpegEncContext
*s
,
2223 uint8_t *dest
, uint8_t *src
,
2224 int src_x
, int src_y
,
2225 int width
, int height
, int stride
,
2226 int h_edge_pos
, int v_edge_pos
,
2227 int w
, int h
, op_pixels_func
*pix_op
,
2228 int motion_x
, int motion_y
)
2233 dxy
= ((motion_y
& 1) << 1) | (motion_x
& 1);
2234 src_x
+= motion_x
>> 1;
2235 src_y
+= motion_y
>> 1;
2237 /* WARNING: do no forget half pels */
2238 src_x
= clip(src_x
, -16, width
); //FIXME unneeded for emu?
2241 src_y
= clip(src_y
, -16, height
);
2242 if (src_y
== height
)
2244 src
+= src_y
* stride
+ src_x
;
2246 if(s
->unrestricted_mv
&& (s
->flags
&CODEC_FLAG_EMU_EDGE
)){
2247 if( (unsigned)src_x
> h_edge_pos
- (motion_x
&1) - w
2248 || (unsigned)src_y
> v_edge_pos
- (motion_y
&1) - h
){
2249 ff_emulated_edge_mc(s
->edge_emu_buffer
, src
, stride
, w
+1, h
+1,
2250 src_x
, src_y
, h_edge_pos
, v_edge_pos
);
2251 src
= s
->edge_emu_buffer
;
2255 pix_op
[dxy
](dest
, src
, stride
, h
);
2259 /* apply one mpeg motion vector to the three components */
2260 static inline void mpeg_motion(MpegEncContext
*s
,
2261 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2263 uint8_t **ref_picture
, int src_offset
,
2264 int field_based
, op_pixels_func (*pix_op
)[4],
2265 int motion_x
, int motion_y
, int h
)
2268 int dxy
, offset
, mx
, my
, src_x
, src_y
, height
, v_edge_pos
, uvlinesize
;
2271 if(s
->quarter_sample
)
2278 height
= s
->height
>> field_based
;
2279 v_edge_pos
= s
->v_edge_pos
>> field_based
;
2280 uvlinesize
= s
->current_picture
.linesize
[1] << field_based
;
2283 dest_y
+ dest_offset
, ref_picture
[0] + src_offset
,
2284 s
->mb_x
* 16, s
->mb_y
* (16 >> field_based
),
2285 s
->width
, height
, s
->current_picture
.linesize
[0] << field_based
,
2286 s
->h_edge_pos
, v_edge_pos
,
2288 motion_x
, motion_y
);
2291 if(s
->flags
&CODEC_FLAG_GRAY
) return;
2293 if (s
->out_format
== FMT_H263
) {
2295 if ((motion_x
& 3) != 0)
2297 if ((motion_y
& 3) != 0)
2304 dxy
= ((my
& 1) << 1) | (mx
& 1);
2309 src_x
= s
->mb_x
* 8 + mx
;
2310 src_y
= s
->mb_y
* (8 >> field_based
) + my
;
2311 src_x
= clip(src_x
, -8, s
->width
>> 1);
2312 if (src_x
== (s
->width
>> 1))
2314 src_y
= clip(src_y
, -8, height
>> 1);
2315 if (src_y
== (height
>> 1))
2317 offset
= (src_y
* uvlinesize
) + src_x
+ (src_offset
>> 1);
2318 ptr
= ref_picture
[1] + offset
;
2320 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
- (src_offset
>> 1), s
->uvlinesize
, 9, 9+field_based
,
2321 src_x
, src_y
<<field_based
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2322 ptr
= s
->edge_emu_buffer
+ (src_offset
>> 1);
2324 pix_op
[1][dxy
](dest_cb
+ (dest_offset
>> 1), ptr
, uvlinesize
, h
>> 1);
2326 ptr
= ref_picture
[2] + offset
;
2328 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
- (src_offset
>> 1), s
->uvlinesize
, 9, 9+field_based
,
2329 src_x
, src_y
<<field_based
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2330 ptr
= s
->edge_emu_buffer
+ (src_offset
>> 1);
2332 pix_op
[1][dxy
](dest_cr
+ (dest_offset
>> 1), ptr
, uvlinesize
, h
>> 1);
2334 //FIXME move to dsputil, avg variant, 16x16 version
2335 static inline void put_obmc(uint8_t *dst
, uint8_t *src
[5], int stride
){
2337 uint8_t * const top
= src
[1];
2338 uint8_t * const left
= src
[2];
2339 uint8_t * const mid
= src
[0];
2340 uint8_t * const right
= src
[3];
2341 uint8_t * const bottom
= src
[4];
2342 #define OBMC_FILTER(x, t, l, m, r, b)\
2343 dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
2344 #define OBMC_FILTER4(x, t, l, m, r, b)\
2345 OBMC_FILTER(x , t, l, m, r, b);\
2346 OBMC_FILTER(x+1 , t, l, m, r, b);\
2347 OBMC_FILTER(x +stride, t, l, m, r, b);\
2348 OBMC_FILTER(x+1+stride, t, l, m, r, b);
2351 OBMC_FILTER (x
, 2, 2, 4, 0, 0);
2352 OBMC_FILTER (x
+1, 2, 1, 5, 0, 0);
2353 OBMC_FILTER4(x
+2, 2, 1, 5, 0, 0);
2354 OBMC_FILTER4(x
+4, 2, 0, 5, 1, 0);
2355 OBMC_FILTER (x
+6, 2, 0, 5, 1, 0);
2356 OBMC_FILTER (x
+7, 2, 0, 4, 2, 0);
2358 OBMC_FILTER (x
, 1, 2, 5, 0, 0);
2359 OBMC_FILTER (x
+1, 1, 2, 5, 0, 0);
2360 OBMC_FILTER (x
+6, 1, 0, 5, 2, 0);
2361 OBMC_FILTER (x
+7, 1, 0, 5, 2, 0);
2363 OBMC_FILTER4(x
, 1, 2, 5, 0, 0);
2364 OBMC_FILTER4(x
+2, 1, 1, 6, 0, 0);
2365 OBMC_FILTER4(x
+4, 1, 0, 6, 1, 0);
2366 OBMC_FILTER4(x
+6, 1, 0, 5, 2, 0);
2368 OBMC_FILTER4(x
, 0, 2, 5, 0, 1);
2369 OBMC_FILTER4(x
+2, 0, 1, 6, 0, 1);
2370 OBMC_FILTER4(x
+4, 0, 0, 6, 1, 1);
2371 OBMC_FILTER4(x
+6, 0, 0, 5, 2, 1);
2373 OBMC_FILTER (x
, 0, 2, 5, 0, 1);
2374 OBMC_FILTER (x
+1, 0, 2, 5, 0, 1);
2375 OBMC_FILTER4(x
+2, 0, 1, 5, 0, 2);
2376 OBMC_FILTER4(x
+4, 0, 0, 5, 1, 2);
2377 OBMC_FILTER (x
+6, 0, 0, 5, 2, 1);
2378 OBMC_FILTER (x
+7, 0, 0, 5, 2, 1);
2380 OBMC_FILTER (x
, 0, 2, 4, 0, 2);
2381 OBMC_FILTER (x
+1, 0, 1, 5, 0, 2);
2382 OBMC_FILTER (x
+6, 0, 0, 5, 1, 2);
2383 OBMC_FILTER (x
+7, 0, 0, 4, 2, 2);
2386 /* obmc for 1 8x8 luma block */
2387 static inline void obmc_motion(MpegEncContext
*s
,
2388 uint8_t *dest
, uint8_t *src
,
2389 int src_x
, int src_y
,
2390 op_pixels_func
*pix_op
,
2391 int16_t mv
[5][2]/* mid top left right bottom*/)
2397 assert(s
->quarter_sample
==0);
2400 if(i
&& mv
[i
][0]==mv
[MID
][0] && mv
[i
][1]==mv
[MID
][1]){
2403 ptr
[i
]= s
->edge_emu_buffer
+ 16 + 8*(i
&1) + s
->linesize
*8*(i
>>1);
2404 hpel_motion(s
, ptr
[i
], src
,
2406 s
->width
, s
->height
, s
->linesize
,
2407 s
->h_edge_pos
, s
->v_edge_pos
,
2409 mv
[i
][0], mv
[i
][1]);
2413 put_obmc(dest
, ptr
, s
->linesize
);
2416 static inline void qpel_motion(MpegEncContext
*s
,
2417 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2419 uint8_t **ref_picture
, int src_offset
,
2420 int field_based
, op_pixels_func (*pix_op
)[4],
2421 qpel_mc_func (*qpix_op
)[16],
2422 int motion_x
, int motion_y
, int h
)
2425 int dxy
, offset
, mx
, my
, src_x
, src_y
, height
, v_edge_pos
, linesize
, uvlinesize
;
2428 dxy
= ((motion_y
& 3) << 2) | (motion_x
& 3);
2429 src_x
= s
->mb_x
* 16 + (motion_x
>> 2);
2430 src_y
= s
->mb_y
* (16 >> field_based
) + (motion_y
>> 2);
2432 height
= s
->height
>> field_based
;
2433 v_edge_pos
= s
->v_edge_pos
>> field_based
;
2434 src_x
= clip(src_x
, -16, s
->width
);
2435 if (src_x
== s
->width
)
2437 src_y
= clip(src_y
, -16, height
);
2438 if (src_y
== height
)
2440 linesize
= s
->linesize
<< field_based
;
2441 uvlinesize
= s
->uvlinesize
<< field_based
;
2442 ptr
= ref_picture
[0] + (src_y
* linesize
) + src_x
+ src_offset
;
2443 dest_y
+= dest_offset
;
2444 //printf("%d %d %d\n", src_x, src_y, dxy);
2446 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
2447 if( (unsigned)src_x
> s
->h_edge_pos
- (motion_x
&3) - 16
2448 || (unsigned)src_y
> v_edge_pos
- (motion_y
&3) - h
){
2449 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
- src_offset
, s
->linesize
, 17, 17+field_based
,
2450 src_x
, src_y
<<field_based
, s
->h_edge_pos
, s
->v_edge_pos
);
2451 ptr
= s
->edge_emu_buffer
+ src_offset
;
2456 qpix_op
[0][dxy
](dest_y
, ptr
, linesize
);
2458 //damn interlaced mode
2459 //FIXME boundary mirroring is not exactly correct here
2460 qpix_op
[1][dxy
](dest_y
, ptr
, linesize
);
2461 qpix_op
[1][dxy
](dest_y
+8, ptr
+8, linesize
);
2464 if(s
->flags
&CODEC_FLAG_GRAY
) return;
2469 }else if(s
->workaround_bugs
&FF_BUG_QPEL_CHROMA2
){
2470 static const int rtab
[8]= {0,0,1,1,0,0,0,1};
2471 mx
= (motion_x
>>1) + rtab
[motion_x
&7];
2472 my
= (motion_y
>>1) + rtab
[motion_y
&7];
2473 }else if(s
->workaround_bugs
&FF_BUG_QPEL_CHROMA
){
2474 mx
= (motion_x
>>1)|(motion_x
&1);
2475 my
= (motion_y
>>1)|(motion_y
&1);
2483 dxy
= (mx
&1) | ((my
&1)<<1);
2487 src_x
= s
->mb_x
* 8 + mx
;
2488 src_y
= s
->mb_y
* (8 >> field_based
) + my
;
2489 src_x
= clip(src_x
, -8, s
->width
>> 1);
2490 if (src_x
== (s
->width
>> 1))
2492 src_y
= clip(src_y
, -8, height
>> 1);
2493 if (src_y
== (height
>> 1))
2496 offset
= (src_y
* uvlinesize
) + src_x
+ (src_offset
>> 1);
2497 ptr
= ref_picture
[1] + offset
;
2499 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
- (src_offset
>> 1), s
->uvlinesize
, 9, 9 + field_based
,
2500 src_x
, src_y
<<field_based
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2501 ptr
= s
->edge_emu_buffer
+ (src_offset
>> 1);
2503 pix_op
[1][dxy
](dest_cb
+ (dest_offset
>> 1), ptr
, uvlinesize
, h
>> 1);
2505 ptr
= ref_picture
[2] + offset
;
2507 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
- (src_offset
>> 1), s
->uvlinesize
, 9, 9 + field_based
,
2508 src_x
, src_y
<<field_based
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2509 ptr
= s
->edge_emu_buffer
+ (src_offset
>> 1);
2511 pix_op
[1][dxy
](dest_cr
+ (dest_offset
>> 1), ptr
, uvlinesize
, h
>> 1);
2514 inline int ff_h263_round_chroma(int x
){
2516 return (h263_chroma_roundtab
[x
& 0xf] + ((x
>> 3) & ~1));
2519 return -(h263_chroma_roundtab
[x
& 0xf] + ((x
>> 3) & ~1));
2524 * h263 chorma 4mv motion compensation.
2526 static inline void chroma_4mv_motion(MpegEncContext
*s
,
2527 uint8_t *dest_cb
, uint8_t *dest_cr
,
2528 uint8_t **ref_picture
,
2529 op_pixels_func
*pix_op
,
2531 int dxy
, emu
=0, src_x
, src_y
, offset
;
2534 /* In case of 8X8, we construct a single chroma motion vector
2535 with a special rounding */
2536 mx
= ff_h263_round_chroma(mx
);
2537 my
= ff_h263_round_chroma(my
);
2539 dxy
= ((my
& 1) << 1) | (mx
& 1);
2543 src_x
= s
->mb_x
* 8 + mx
;
2544 src_y
= s
->mb_y
* 8 + my
;
2545 src_x
= clip(src_x
, -8, s
->width
/2);
2546 if (src_x
== s
->width
/2)
2548 src_y
= clip(src_y
, -8, s
->height
/2);
2549 if (src_y
== s
->height
/2)
2552 offset
= (src_y
* (s
->uvlinesize
)) + src_x
;
2553 ptr
= ref_picture
[1] + offset
;
2554 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
2555 if( (unsigned)src_x
> (s
->h_edge_pos
>>1) - (dxy
&1) - 8
2556 || (unsigned)src_y
> (s
->v_edge_pos
>>1) - (dxy
>>1) - 8){
2557 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, s
->uvlinesize
, 9, 9, src_x
, src_y
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2558 ptr
= s
->edge_emu_buffer
;
2562 pix_op
[dxy
](dest_cb
, ptr
, s
->uvlinesize
, 8);
2564 ptr
= ref_picture
[2] + offset
;
2566 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, s
->uvlinesize
, 9, 9, src_x
, src_y
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2567 ptr
= s
->edge_emu_buffer
;
2569 pix_op
[dxy
](dest_cr
, ptr
, s
->uvlinesize
, 8);
2573 * motion compesation of a single macroblock
2575 * @param dest_y luma destination pointer
2576 * @param dest_cb chroma cb/u destination pointer
2577 * @param dest_cr chroma cr/v destination pointer
2578 * @param dir direction (0->forward, 1->backward)
2579 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2580 * @param pic_op halfpel motion compensation function (average or put normally)
2581 * @param pic_op qpel motion compensation function (average or put normally)
2582 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2584 static inline void MPV_motion(MpegEncContext
*s
,
2585 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2586 int dir
, uint8_t **ref_picture
,
2587 op_pixels_func (*pix_op
)[4], qpel_mc_func (*qpix_op
)[16])
2589 int dxy
, mx
, my
, src_x
, src_y
, motion_x
, motion_y
;
2591 uint8_t *ptr
, *dest
;
2596 if(s
->obmc
&& s
->pict_type
!= B_TYPE
){
2597 int16_t mv_cache
[4][4][2];
2598 const int xy
= s
->mb_x
+ s
->mb_y
*s
->mb_stride
;
2599 const int mot_stride
= s
->mb_width
*2 + 2;
2600 const int mot_xy
= 1 + mb_x
*2 + (mb_y
*2 + 1)*mot_stride
;
2602 assert(!s
->mb_skiped
);
2604 memcpy(mv_cache
[1][1], s
->current_picture
.motion_val
[0][mot_xy
], sizeof(int16_t)*4);
2605 memcpy(mv_cache
[2][1], s
->current_picture
.motion_val
[0][mot_xy
+mot_stride
], sizeof(int16_t)*4);
2606 memcpy(mv_cache
[3][1], s
->current_picture
.motion_val
[0][mot_xy
+mot_stride
], sizeof(int16_t)*4);
2608 if(mb_y
==0 || IS_INTRA(s
->current_picture
.mb_type
[xy
-s
->mb_stride
])){
2609 memcpy(mv_cache
[0][1], mv_cache
[1][1], sizeof(int16_t)*4);
2611 memcpy(mv_cache
[0][1], s
->current_picture
.motion_val
[0][mot_xy
-mot_stride
], sizeof(int16_t)*4);
2614 if(mb_x
==0 || IS_INTRA(s
->current_picture
.mb_type
[xy
-1])){
2615 *(int32_t*)mv_cache
[1][0]= *(int32_t*)mv_cache
[1][1];
2616 *(int32_t*)mv_cache
[2][0]= *(int32_t*)mv_cache
[2][1];
2618 *(int32_t*)mv_cache
[1][0]= *(int32_t*)s
->current_picture
.motion_val
[0][mot_xy
-1];
2619 *(int32_t*)mv_cache
[2][0]= *(int32_t*)s
->current_picture
.motion_val
[0][mot_xy
-1+mot_stride
];
2622 if(mb_x
+1>=s
->mb_width
|| IS_INTRA(s
->current_picture
.mb_type
[xy
+1])){
2623 *(int32_t*)mv_cache
[1][3]= *(int32_t*)mv_cache
[1][2];
2624 *(int32_t*)mv_cache
[2][3]= *(int32_t*)mv_cache
[2][2];
2626 *(int32_t*)mv_cache
[1][3]= *(int32_t*)s
->current_picture
.motion_val
[0][mot_xy
+2];
2627 *(int32_t*)mv_cache
[2][3]= *(int32_t*)s
->current_picture
.motion_val
[0][mot_xy
+2+mot_stride
];
2633 const int x
= (i
&1)+1;
2634 const int y
= (i
>>1)+1;
2636 {mv_cache
[y
][x
][0], mv_cache
[y
][x
][1]},
2637 {mv_cache
[y
-1][x
][0], mv_cache
[y
-1][x
][1]},
2638 {mv_cache
[y
][x
-1][0], mv_cache
[y
][x
-1][1]},
2639 {mv_cache
[y
][x
+1][0], mv_cache
[y
][x
+1][1]},
2640 {mv_cache
[y
+1][x
][0], mv_cache
[y
+1][x
][1]}};
2642 obmc_motion(s
, dest_y
+ ((i
& 1) * 8) + (i
>> 1) * 8 * s
->linesize
,
2644 mb_x
* 16 + (i
& 1) * 8, mb_y
* 16 + (i
>>1) * 8,
2651 if(!(s
->flags
&CODEC_FLAG_GRAY
))
2652 chroma_4mv_motion(s
, dest_cb
, dest_cr
, ref_picture
, pix_op
[1], mx
, my
);
2657 switch(s
->mv_type
) {
2661 if(s
->real_sprite_warping_points
==1){
2662 gmc1_motion(s
, dest_y
, dest_cb
, dest_cr
, 0,
2665 gmc_motion(s
, dest_y
, dest_cb
, dest_cr
, 0,
2668 }else if(s
->quarter_sample
){
2669 qpel_motion(s
, dest_y
, dest_cb
, dest_cr
, 0,
2672 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 16);
2674 ff_mspel_motion(s
, dest_y
, dest_cb
, dest_cr
,
2675 ref_picture
, pix_op
,
2676 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 16);
2680 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
, 0,
2683 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 16);
2689 if(s
->quarter_sample
){
2691 motion_x
= s
->mv
[dir
][i
][0];
2692 motion_y
= s
->mv
[dir
][i
][1];
2694 dxy
= ((motion_y
& 3) << 2) | (motion_x
& 3);
2695 src_x
= mb_x
* 16 + (motion_x
>> 2) + (i
& 1) * 8;
2696 src_y
= mb_y
* 16 + (motion_y
>> 2) + (i
>>1) * 8;
2698 /* WARNING: do no forget half pels */
2699 src_x
= clip(src_x
, -16, s
->width
);
2700 if (src_x
== s
->width
)
2702 src_y
= clip(src_y
, -16, s
->height
);
2703 if (src_y
== s
->height
)
2706 ptr
= ref_picture
[0] + (src_y
* s
->linesize
) + (src_x
);
2707 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
2708 if( (unsigned)src_x
> s
->h_edge_pos
- (motion_x
&3) - 8
2709 || (unsigned)src_y
> s
->v_edge_pos
- (motion_y
&3) - 8 ){
2710 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, s
->linesize
, 9, 9, src_x
, src_y
, s
->h_edge_pos
, s
->v_edge_pos
);
2711 ptr
= s
->edge_emu_buffer
;
2714 dest
= dest_y
+ ((i
& 1) * 8) + (i
>> 1) * 8 * s
->linesize
;
2715 qpix_op
[1][dxy
](dest
, ptr
, s
->linesize
);
2717 mx
+= s
->mv
[dir
][i
][0]/2;
2718 my
+= s
->mv
[dir
][i
][1]/2;
2722 hpel_motion(s
, dest_y
+ ((i
& 1) * 8) + (i
>> 1) * 8 * s
->linesize
,
2724 mb_x
* 16 + (i
& 1) * 8, mb_y
* 16 + (i
>>1) * 8,
2725 s
->width
, s
->height
, s
->linesize
,
2726 s
->h_edge_pos
, s
->v_edge_pos
,
2728 s
->mv
[dir
][i
][0], s
->mv
[dir
][i
][1]);
2730 mx
+= s
->mv
[dir
][i
][0];
2731 my
+= s
->mv
[dir
][i
][1];
2735 if(!(s
->flags
&CODEC_FLAG_GRAY
))
2736 chroma_4mv_motion(s
, dest_cb
, dest_cr
, ref_picture
, pix_op
[1], mx
, my
);
2739 if (s
->picture_structure
== PICT_FRAME
) {
2740 if(s
->quarter_sample
){
2742 qpel_motion(s
, dest_y
, dest_cb
, dest_cr
, 0,
2743 ref_picture
, s
->field_select
[dir
][0] ? s
->linesize
: 0,
2745 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 8);
2747 qpel_motion(s
, dest_y
, dest_cb
, dest_cr
, s
->linesize
,
2748 ref_picture
, s
->field_select
[dir
][1] ? s
->linesize
: 0,
2750 s
->mv
[dir
][1][0], s
->mv
[dir
][1][1], 8);
2753 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
, 0,
2754 ref_picture
, s
->field_select
[dir
][0] ? s
->linesize
: 0,