2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard.
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 * 4MV & hq & b-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
25 * The simplest mpeg encoder (well, it was the simplest!).
29 #include <math.h> //for PI
32 #include "mpegvideo.h"
36 #include "fastmemcpy.h"
42 #ifdef CONFIG_ENCODERS
43 static void encode_picture(MpegEncContext
*s
, int picture_number
);
44 #endif //CONFIG_ENCODERS
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext
*s
,
46 DCTELEM
*block
, int n
, int qscale
);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext
*s
,
48 DCTELEM
*block
, int n
, int qscale
);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext
*s
,
50 DCTELEM
*block
, int n
, int qscale
);
51 static void dct_unquantize_mpeg2_inter_c(MpegEncContext
*s
,
52 DCTELEM
*block
, int n
, int qscale
);
53 static void dct_unquantize_h263_intra_c(MpegEncContext
*s
,
54 DCTELEM
*block
, int n
, int qscale
);
55 static void dct_unquantize_h263_inter_c(MpegEncContext
*s
,
56 DCTELEM
*block
, int n
, int qscale
);
57 static void draw_edges_c(uint8_t *buf
, int wrap
, int width
, int height
, int w
);
58 #ifdef CONFIG_ENCODERS
59 static int dct_quantize_c(MpegEncContext
*s
, DCTELEM
*block
, int n
, int qscale
, int *overflow
);
60 static int dct_quantize_trellis_c(MpegEncContext
*s
, DCTELEM
*block
, int n
, int qscale
, int *overflow
);
61 static int dct_quantize_refine(MpegEncContext
*s
, DCTELEM
*block
, int16_t *weight
, DCTELEM
*orig
, int n
, int qscale
);
62 static int sse_mb(MpegEncContext
*s
);
63 static void denoise_dct_c(MpegEncContext
*s
, DCTELEM
*block
);
64 #endif //CONFIG_ENCODERS
67 extern int XVMC_field_start(MpegEncContext
*s
, AVCodecContext
*avctx
);
68 extern void XVMC_field_end(MpegEncContext
*s
);
69 extern void XVMC_decode_mb(MpegEncContext
*s
);
72 void (*draw_edges
)(uint8_t *buf
, int wrap
, int width
, int height
, int w
)= draw_edges_c
;
75 /* enable all paranoid tests for rounding, overflows, etc... */
81 /* for jpeg fast DCT */
84 static const uint16_t aanscales
[64] = {
85 /* precomputed values scaled up by 14 bits */
86 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
87 22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270,
88 21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906,
89 19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315,
90 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
91 12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552,
92 8867 , 12299, 11585, 10426, 8867, 6967, 4799, 2446,
93 4520 , 6270, 5906, 5315, 4520, 3552, 2446, 1247
96 static const uint8_t h263_chroma_roundtab
[16] = {
97 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
98 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
101 static const uint8_t ff_default_chroma_qscale_table
[32]={
102 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
103 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
106 #ifdef CONFIG_ENCODERS
107 static uint8_t (*default_mv_penalty
)[MAX_MV
*2+1]=NULL
;
108 static uint8_t default_fcode_tab
[MAX_MV
*2+1];
110 enum PixelFormat ff_yuv420p_list
[2]= {PIX_FMT_YUV420P
, -1};
112 static void convert_matrix(DSPContext
*dsp
, int (*qmat
)[64], uint16_t (*qmat16
)[2][64],
113 const uint16_t *quant_matrix
, int bias
, int qmin
, int qmax
)
117 for(qscale
=qmin
; qscale
<=qmax
; qscale
++){
119 if (dsp
->fdct
== ff_jpeg_fdct_islow
120 #ifdef FAAN_POSTSCALE
121 || dsp
->fdct
== ff_faandct
125 const int j
= dsp
->idct_permutation
[i
];
126 /* 16 <= qscale * quant_matrix[i] <= 7905 */
127 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
128 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
129 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
131 qmat
[qscale
][i
] = (int)((uint64_t_C(1) << QMAT_SHIFT
) /
132 (qscale
* quant_matrix
[j
]));
134 } else if (dsp
->fdct
== fdct_ifast
135 #ifndef FAAN_POSTSCALE
136 || dsp
->fdct
== ff_faandct
140 const int j
= dsp
->idct_permutation
[i
];
141 /* 16 <= qscale * quant_matrix[i] <= 7905 */
142 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
143 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
144 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
146 qmat
[qscale
][i
] = (int)((uint64_t_C(1) << (QMAT_SHIFT
+ 14)) /
147 (aanscales
[i
] * qscale
* quant_matrix
[j
]));
151 const int j
= dsp
->idct_permutation
[i
];
152 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
153 So 16 <= qscale * quant_matrix[i] <= 7905
154 so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905
155 so 32768 >= (1<<19) / (qscale * quant_matrix[i]) >= 67
157 qmat
[qscale
][i
] = (int)((uint64_t_C(1) << QMAT_SHIFT
) / (qscale
* quant_matrix
[j
]));
158 // qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);
159 qmat16
[qscale
][0][i
] = (1 << QMAT_SHIFT_MMX
) / (qscale
* quant_matrix
[j
]);
161 if(qmat16
[qscale
][0][i
]==0 || qmat16
[qscale
][0][i
]==128*256) qmat16
[qscale
][0][i
]=128*256-1;
162 qmat16
[qscale
][1][i
]= ROUNDED_DIV(bias
<<(16-QUANT_BIAS_SHIFT
), qmat16
[qscale
][0][i
]);
168 static inline void update_qscale(MpegEncContext
*s
){
169 s
->qscale
= (s
->lambda
*139 + FF_LAMBDA_SCALE
*64) >> (FF_LAMBDA_SHIFT
+ 7);
170 s
->qscale
= clip(s
->qscale
, s
->avctx
->qmin
, s
->avctx
->qmax
);
172 s
->lambda2
= (s
->lambda
*s
->lambda
+ FF_LAMBDA_SCALE
/2) >> FF_LAMBDA_SHIFT
;
174 #endif //CONFIG_ENCODERS
176 void ff_init_scantable(uint8_t *permutation
, ScanTable
*st
, const uint8_t *src_scantable
){
180 st
->scantable
= src_scantable
;
184 j
= src_scantable
[i
];
185 st
->permutated
[i
] = permutation
[j
];
194 j
= st
->permutated
[i
];
196 st
->raster_end
[i
]= end
;
200 #ifdef CONFIG_ENCODERS
201 void ff_write_quant_matrix(PutBitContext
*pb
, int16_t *matrix
){
207 put_bits(pb
, 8, matrix
[ ff_zigzag_direct
[i
] ]);
212 #endif //CONFIG_ENCODERS
214 /* init common dct for both encoder and decoder */
215 int DCT_common_init(MpegEncContext
*s
)
217 s
->dct_unquantize_h263_intra
= dct_unquantize_h263_intra_c
;
218 s
->dct_unquantize_h263_inter
= dct_unquantize_h263_inter_c
;
219 s
->dct_unquantize_mpeg1_intra
= dct_unquantize_mpeg1_intra_c
;
220 s
->dct_unquantize_mpeg1_inter
= dct_unquantize_mpeg1_inter_c
;
221 s
->dct_unquantize_mpeg2_intra
= dct_unquantize_mpeg2_intra_c
;
222 s
->dct_unquantize_mpeg2_inter
= dct_unquantize_mpeg2_inter_c
;
224 #ifdef CONFIG_ENCODERS
225 s
->dct_quantize
= dct_quantize_c
;
226 s
->denoise_dct
= denoise_dct_c
;
230 MPV_common_init_mmx(s
);
233 MPV_common_init_axp(s
);
236 MPV_common_init_mlib(s
);
239 MPV_common_init_mmi(s
);
242 MPV_common_init_armv4l(s
);
245 MPV_common_init_ppc(s
);
248 #ifdef CONFIG_ENCODERS
249 s
->fast_dct_quantize
= s
->dct_quantize
;
251 if(s
->flags
&CODEC_FLAG_TRELLIS_QUANT
){
252 s
->dct_quantize
= dct_quantize_trellis_c
; //move before MPV_common_init_*
255 #endif //CONFIG_ENCODERS
257 /* load & permutate scantables
258 note: only wmv uses differnt ones
260 if(s
->alternate_scan
){
261 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->inter_scantable
, ff_alternate_vertical_scan
);
262 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->intra_scantable
, ff_alternate_vertical_scan
);
264 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->inter_scantable
, ff_zigzag_direct
);
265 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->intra_scantable
, ff_zigzag_direct
);
267 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->intra_h_scantable
, ff_alternate_horizontal_scan
);
268 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->intra_v_scantable
, ff_alternate_vertical_scan
);
270 s
->picture_structure
= PICT_FRAME
;
275 static void copy_picture(Picture
*dst
, Picture
*src
){
277 dst
->type
= FF_BUFFER_TYPE_COPY
;
280 static void copy_picture_attributes(AVFrame
*dst
, AVFrame
*src
){
281 dst
->pict_type
= src
->pict_type
;
282 dst
->quality
= src
->quality
;
283 dst
->coded_picture_number
= src
->coded_picture_number
;
284 dst
->display_picture_number
= src
->display_picture_number
;
285 // dst->reference = src->reference;
287 dst
->interlaced_frame
= src
->interlaced_frame
;
288 dst
->top_field_first
= src
->top_field_first
;
292 * allocates a Picture
293 * The pixels are allocated/set by calling get_buffer() if shared=0
295 static int alloc_picture(MpegEncContext
*s
, Picture
*pic
, int shared
){
296 const int big_mb_num
= s
->mb_stride
*(s
->mb_height
+1) + 1; //the +1 is needed so memset(,,stride*height) doesnt sig11
297 const int mb_array_size
= s
->mb_stride
*s
->mb_height
;
298 const int b8_array_size
= s
->b8_stride
*s
->mb_height
*2;
299 const int b4_array_size
= s
->b4_stride
*s
->mb_height
*4;
303 assert(pic
->data
[0]);
304 assert(pic
->type
== 0 || pic
->type
== FF_BUFFER_TYPE_SHARED
);
305 pic
->type
= FF_BUFFER_TYPE_SHARED
;
309 assert(!pic
->data
[0]);
311 r
= s
->avctx
->get_buffer(s
->avctx
, (AVFrame
*)pic
);
313 if(r
<0 || !pic
->age
|| !pic
->type
|| !pic
->data
[0]){
314 av_log(s
->avctx
, AV_LOG_ERROR
, "get_buffer() failed (%d %d %d %p)\n", r
, pic
->age
, pic
->type
, pic
->data
[0]);
318 if(s
->linesize
&& (s
->linesize
!= pic
->linesize
[0] || s
->uvlinesize
!= pic
->linesize
[1])){
319 av_log(s
->avctx
, AV_LOG_ERROR
, "get_buffer() failed (stride changed)\n");
323 if(pic
->linesize
[1] != pic
->linesize
[2]){
324 av_log(s
->avctx
, AV_LOG_ERROR
, "get_buffer() failed (uv stride missmatch)\n");
328 s
->linesize
= pic
->linesize
[0];
329 s
->uvlinesize
= pic
->linesize
[1];
332 if(pic
->qscale_table
==NULL
){
334 CHECKED_ALLOCZ(pic
->mb_var
, mb_array_size
* sizeof(int16_t))
335 CHECKED_ALLOCZ(pic
->mc_mb_var
, mb_array_size
* sizeof(int16_t))
336 CHECKED_ALLOCZ(pic
->mb_mean
, mb_array_size
* sizeof(int8_t))
339 CHECKED_ALLOCZ(pic
->mbskip_table
, mb_array_size
* sizeof(uint8_t)+2) //the +2 is for the slice end check
340 CHECKED_ALLOCZ(pic
->qscale_table
, mb_array_size
* sizeof(uint8_t))
341 CHECKED_ALLOCZ(pic
->mb_type_base
, big_mb_num
* sizeof(uint32_t))
342 pic
->mb_type
= pic
->mb_type_base
+ s
->mb_stride
+1;
343 if(s
->out_format
== FMT_H264
){
345 CHECKED_ALLOCZ(pic
->motion_val_base
[i
], 2 * (b4_array_size
+1) * sizeof(int16_t))
346 pic
->motion_val
[i
]= pic
->motion_val_base
[i
]+1;
347 CHECKED_ALLOCZ(pic
->ref_index
[i
] , b8_array_size
* sizeof(uint8_t))
349 pic
->motion_subsample_log2
= 2;
350 }else if(s
->out_format
== FMT_H263
|| s
->encoding
|| (s
->avctx
->debug
&FF_DEBUG_MV
) || (s
->avctx
->debug_mv
)){
352 CHECKED_ALLOCZ(pic
->motion_val_base
[i
], 2 * (b8_array_size
+1) * sizeof(int16_t)*2) //FIXME
353 pic
->motion_val
[i
]= pic
->motion_val_base
[i
]+1;
355 pic
->motion_subsample_log2
= 3;
357 if(s
->avctx
->debug
&FF_DEBUG_DCT_COEFF
) {
358 CHECKED_ALLOCZ(pic
->dct_coeff
, 64 * mb_array_size
* sizeof(DCTELEM
)*6)
360 pic
->qstride
= s
->mb_stride
;
361 CHECKED_ALLOCZ(pic
->pan_scan
, 1 * sizeof(AVPanScan
))
364 //it might be nicer if the application would keep track of these but it would require a API change
365 memmove(s
->prev_pict_types
+1, s
->prev_pict_types
, PREV_PICT_TYPES_BUFFER_SIZE
-1);
366 s
->prev_pict_types
[0]= s
->pict_type
;
367 if(pic
->age
< PREV_PICT_TYPES_BUFFER_SIZE
&& s
->prev_pict_types
[pic
->age
] == B_TYPE
)
368 pic
->age
= INT_MAX
; // skiped MBs in b frames are quite rare in mpeg1/2 and its a bit tricky to skip them anyway
371 fail
: //for the CHECKED_ALLOCZ macro
376 * deallocates a picture
378 static void free_picture(MpegEncContext
*s
, Picture
*pic
){
381 if(pic
->data
[0] && pic
->type
!=FF_BUFFER_TYPE_SHARED
){
382 s
->avctx
->release_buffer(s
->avctx
, (AVFrame
*)pic
);
385 av_freep(&pic
->mb_var
);
386 av_freep(&pic
->mc_mb_var
);
387 av_freep(&pic
->mb_mean
);
388 av_freep(&pic
->mbskip_table
);
389 av_freep(&pic
->qscale_table
);
390 av_freep(&pic
->mb_type_base
);
391 av_freep(&pic
->dct_coeff
);
392 av_freep(&pic
->pan_scan
);
395 av_freep(&pic
->motion_val_base
[i
]);
396 av_freep(&pic
->ref_index
[i
]);
399 if(pic
->type
== FF_BUFFER_TYPE_SHARED
){
408 static int init_duplicate_context(MpegEncContext
*s
, MpegEncContext
*base
){
411 CHECKED_ALLOCZ(s
->allocated_edge_emu_buffer
, (s
->width
+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance
412 s
->edge_emu_buffer
= s
->allocated_edge_emu_buffer
+ (s
->width
+64)*2*17;
414 //FIXME should be linesize instead of s->width*2 but that isnt known before get_buffer()
415 CHECKED_ALLOCZ(s
->me
.scratchpad
, s
->width
*2*16*2*sizeof(uint8_t))
416 s
->rd_scratchpad
= s
->me
.scratchpad
;
417 s
->b_scratchpad
= s
->me
.scratchpad
;
418 s
->obmc_scratchpad
= s
->me
.scratchpad
+ 16;
420 CHECKED_ALLOCZ(s
->me
.map
, ME_MAP_SIZE
*sizeof(uint32_t))
421 CHECKED_ALLOCZ(s
->me
.score_map
, ME_MAP_SIZE
*sizeof(uint32_t))
422 if(s
->avctx
->noise_reduction
){
423 CHECKED_ALLOCZ(s
->dct_error_sum
, 2 * 64 * sizeof(int))
426 CHECKED_ALLOCZ(s
->blocks
, 64*6*2 * sizeof(DCTELEM
))
427 s
->block
= s
->blocks
[0];
430 s
->pblocks
[i
] = (short *)(&s
->block
[i
]);
434 return -1; //free() through MPV_common_end()
437 static void free_duplicate_context(MpegEncContext
*s
){
440 av_freep(&s
->allocated_edge_emu_buffer
); s
->edge_emu_buffer
= NULL
;
441 av_freep(&s
->me
.scratchpad
);
444 s
->obmc_scratchpad
= NULL
;
446 av_freep(&s
->dct_error_sum
);
447 av_freep(&s
->me
.map
);
448 av_freep(&s
->me
.score_map
);
449 av_freep(&s
->blocks
);
453 static void backup_duplicate_context(MpegEncContext
*bak
, MpegEncContext
*src
){
454 #define COPY(a) bak->a= src->a
455 COPY(allocated_edge_emu_buffer
);
456 COPY(edge_emu_buffer
);
460 COPY(obmc_scratchpad
);
467 COPY(me
.map_generation
);
475 void ff_update_duplicate_context(MpegEncContext
*dst
, MpegEncContext
*src
){
478 //FIXME copy only needed parts
480 backup_duplicate_context(&bak
, dst
);
481 memcpy(dst
, src
, sizeof(MpegEncContext
));
482 backup_duplicate_context(dst
, &bak
);
484 dst
->pblocks
[i
] = (short *)(&dst
->block
[i
]);
486 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
489 static void update_duplicate_context_after_me(MpegEncContext
*dst
, MpegEncContext
*src
){
490 #define COPY(a) dst->a= src->a
492 COPY(current_picture
);
498 COPY(picture_in_gop_number
);
499 COPY(gop_picture_number
);
500 COPY(frame_pred_frame_dct
); //FIXME dont set in encode_header
501 COPY(progressive_frame
); //FIXME dont set in encode_header
502 COPY(partitioned_frame
); //FIXME dont set in encode_header
506 /* init common structure for both encoder and decoder */
507 int MPV_common_init(MpegEncContext
*s
)
509 int y_size
, c_size
, yc_size
, i
, mb_array_size
, mv_table_size
, x
, y
;
511 dsputil_init(&s
->dsp
, s
->avctx
);
514 s
->flags
= s
->avctx
->flags
;
515 s
->flags2
= s
->avctx
->flags2
;
517 s
->mb_width
= (s
->width
+ 15) / 16;
518 s
->mb_height
= (s
->height
+ 15) / 16;
519 s
->mb_stride
= s
->mb_width
+ 1;
520 s
->b8_stride
= s
->mb_width
*2 + 1;
521 s
->b4_stride
= s
->mb_width
*4 + 1;
522 mb_array_size
= s
->mb_height
* s
->mb_stride
;
523 mv_table_size
= (s
->mb_height
+2) * s
->mb_stride
+ 1;
525 /* set default edge pos, will be overriden in decode_header if needed */
526 s
->h_edge_pos
= s
->mb_width
*16;
527 s
->v_edge_pos
= s
->mb_height
*16;
529 s
->mb_num
= s
->mb_width
* s
->mb_height
;
534 s
->block_wrap
[3]= s
->mb_width
*2 + 2;
536 s
->block_wrap
[5]= s
->mb_width
+ 2;
539 s
->c_dc_scale_table
= ff_mpeg1_dc_scale_table
;
540 s
->chroma_qscale_table
= ff_default_chroma_qscale_table
;
542 s
->progressive_sequence
= 1;
543 s
->progressive_frame
= 1;
544 s
->coded_picture_number
= 0;
546 y_size
= (2 * s
->mb_width
+ 2) * (2 * s
->mb_height
+ 2);
547 c_size
= (s
->mb_width
+ 2) * (s
->mb_height
+ 2);
548 yc_size
= y_size
+ 2 * c_size
;
550 /* convert fourcc to upper case */
551 s
->avctx
->codec_tag
= toupper( s
->avctx
->codec_tag
&0xFF)
552 + (toupper((s
->avctx
->codec_tag
>>8 )&0xFF)<<8 )
553 + (toupper((s
->avctx
->codec_tag
>>16)&0xFF)<<16)
554 + (toupper((s
->avctx
->codec_tag
>>24)&0xFF)<<24);
556 s
->avctx
->stream_codec_tag
= toupper( s
->avctx
->stream_codec_tag
&0xFF)
557 + (toupper((s
->avctx
->stream_codec_tag
>>8 )&0xFF)<<8 )
558 + (toupper((s
->avctx
->stream_codec_tag
>>16)&0xFF)<<16)
559 + (toupper((s
->avctx
->stream_codec_tag
>>24)&0xFF)<<24);
561 s
->avctx
->coded_frame
= (AVFrame
*)&s
->current_picture
;
563 CHECKED_ALLOCZ(s
->mb_index2xy
, (s
->mb_num
+1)*sizeof(int)) //error ressilience code looks cleaner with this
564 for(y
=0; y
<s
->mb_height
; y
++){
565 for(x
=0; x
<s
->mb_width
; x
++){
566 s
->mb_index2xy
[ x
+ y
*s
->mb_width
] = x
+ y
*s
->mb_stride
;
569 s
->mb_index2xy
[ s
->mb_height
*s
->mb_width
] = (s
->mb_height
-1)*s
->mb_stride
+ s
->mb_width
; //FIXME really needed?
572 /* Allocate MV tables */
573 CHECKED_ALLOCZ(s
->p_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
574 CHECKED_ALLOCZ(s
->b_forw_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
575 CHECKED_ALLOCZ(s
->b_back_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
576 CHECKED_ALLOCZ(s
->b_bidir_forw_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
577 CHECKED_ALLOCZ(s
->b_bidir_back_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
578 CHECKED_ALLOCZ(s
->b_direct_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
579 s
->p_mv_table
= s
->p_mv_table_base
+ s
->mb_stride
+ 1;
580 s
->b_forw_mv_table
= s
->b_forw_mv_table_base
+ s
->mb_stride
+ 1;
581 s
->b_back_mv_table
= s
->b_back_mv_table_base
+ s
->mb_stride
+ 1;
582 s
->b_bidir_forw_mv_table
= s
->b_bidir_forw_mv_table_base
+ s
->mb_stride
+ 1;
583 s
->b_bidir_back_mv_table
= s
->b_bidir_back_mv_table_base
+ s
->mb_stride
+ 1;
584 s
->b_direct_mv_table
= s
->b_direct_mv_table_base
+ s
->mb_stride
+ 1;
586 if(s
->msmpeg4_version
){
587 CHECKED_ALLOCZ(s
->ac_stats
, 2*2*(MAX_LEVEL
+1)*(MAX_RUN
+1)*2*sizeof(int));
589 CHECKED_ALLOCZ(s
->avctx
->stats_out
, 256);
591 /* Allocate MB type table */
592 CHECKED_ALLOCZ(s
->mb_type
, mb_array_size
* sizeof(uint16_t)) //needed for encoding
594 CHECKED_ALLOCZ(s
->lambda_table
, mb_array_size
* sizeof(int))
596 CHECKED_ALLOCZ(s
->q_intra_matrix
, 64*32 * sizeof(int))
597 CHECKED_ALLOCZ(s
->q_inter_matrix
, 64*32 * sizeof(int))
598 CHECKED_ALLOCZ(s
->q_intra_matrix16
, 64*32*2 * sizeof(uint16_t))
599 CHECKED_ALLOCZ(s
->q_inter_matrix16
, 64*32*2 * sizeof(uint16_t))
600 CHECKED_ALLOCZ(s
->input_picture
, MAX_PICTURE_COUNT
* sizeof(Picture
*))
601 CHECKED_ALLOCZ(s
->reordered_input_picture
, MAX_PICTURE_COUNT
* sizeof(Picture
*))
603 if(s
->avctx
->noise_reduction
){
604 CHECKED_ALLOCZ(s
->dct_offset
, 2 * 64 * sizeof(uint16_t))
607 CHECKED_ALLOCZ(s
->picture
, MAX_PICTURE_COUNT
* sizeof(Picture
))
609 CHECKED_ALLOCZ(s
->error_status_table
, mb_array_size
*sizeof(uint8_t))
611 if(s
->codec_id
==CODEC_ID_MPEG4
|| (s
->flags
& CODEC_FLAG_INTERLACED_ME
)){
612 /* interlaced direct mode decoding tables */
617 CHECKED_ALLOCZ(s
->b_field_mv_table_base
[i
][j
][k
] , mv_table_size
* 2 * sizeof(int16_t))
618 s
->b_field_mv_table
[i
][j
][k
] = s
->b_field_mv_table_base
[i
][j
][k
] + s
->mb_stride
+ 1;
620 CHECKED_ALLOCZ(s
->b_field_select_table
[i
][j
] , mb_array_size
* 2 * sizeof(uint8_t))
621 CHECKED_ALLOCZ(s
->p_field_mv_table_base
[i
][j
] , mv_table_size
* 2 * sizeof(int16_t))
622 s
->p_field_mv_table
[i
][j
] = s
->p_field_mv_table_base
[i
][j
] + s
->mb_stride
+ 1;
624 CHECKED_ALLOCZ(s
->p_field_select_table
[i
] , mb_array_size
* 2 * sizeof(uint8_t))
627 if (s
->out_format
== FMT_H263
) {
629 CHECKED_ALLOCZ(s
->ac_val
[0], yc_size
* sizeof(int16_t) * 16);
630 s
->ac_val
[1] = s
->ac_val
[0] + y_size
;
631 s
->ac_val
[2] = s
->ac_val
[1] + c_size
;
634 CHECKED_ALLOCZ(s
->coded_block
, y_size
);
636 /* divx501 bitstream reorder buffer */
637 CHECKED_ALLOCZ(s
->bitstream_buffer
, BITSTREAM_BUFFER_SIZE
);
639 /* cbp, ac_pred, pred_dir */
640 CHECKED_ALLOCZ(s
->cbp_table
, mb_array_size
* sizeof(uint8_t))
641 CHECKED_ALLOCZ(s
->pred_dir_table
, mb_array_size
* sizeof(uint8_t))
644 if (s
->h263_pred
|| s
->h263_plus
|| !s
->encoding
) {
646 //MN: we need these for error resilience of intra-frames
647 CHECKED_ALLOCZ(s
->dc_val
[0], yc_size
* sizeof(int16_t));
648 s
->dc_val
[1] = s
->dc_val
[0] + y_size
;
649 s
->dc_val
[2] = s
->dc_val
[1] + c_size
;
650 for(i
=0;i
<yc_size
;i
++)
651 s
->dc_val
[0][i
] = 1024;
654 /* which mb is a intra block */
655 CHECKED_ALLOCZ(s
->mbintra_table
, mb_array_size
);
656 memset(s
->mbintra_table
, 1, mb_array_size
);
658 /* default structure is frame */
659 s
->picture_structure
= PICT_FRAME
;
661 /* init macroblock skip table */
662 CHECKED_ALLOCZ(s
->mbskip_table
, mb_array_size
+2);
663 //Note the +1 is for a quicker mpeg4 slice_end detection
664 CHECKED_ALLOCZ(s
->prev_pict_types
, PREV_PICT_TYPES_BUFFER_SIZE
);
666 s
->parse_context
.state
= -1;
667 if((s
->avctx
->debug
&(FF_DEBUG_VIS_QP
|FF_DEBUG_VIS_MB_TYPE
)) || (s
->avctx
->debug_mv
)){
668 s
->visualization_buffer
[0] = av_malloc((s
->mb_width
*16 + 2*EDGE_WIDTH
) * s
->mb_height
*16 + 2*EDGE_WIDTH
);
669 s
->visualization_buffer
[1] = av_malloc((s
->mb_width
*8 + EDGE_WIDTH
) * s
->mb_height
*8 + EDGE_WIDTH
);
670 s
->visualization_buffer
[2] = av_malloc((s
->mb_width
*8 + EDGE_WIDTH
) * s
->mb_height
*8 + EDGE_WIDTH
);
673 s
->context_initialized
= 1;
675 s
->thread_context
[0]= s
;
676 for(i
=1; i
<s
->avctx
->thread_count
; i
++){
677 s
->thread_context
[i
]= av_malloc(sizeof(MpegEncContext
));
678 memcpy(s
->thread_context
[i
], s
, sizeof(MpegEncContext
));
681 for(i
=0; i
<s
->avctx
->thread_count
; i
++){
682 if(init_duplicate_context(s
->thread_context
[i
], s
) < 0)
684 s
->thread_context
[i
]->start_mb_y
= (s
->mb_height
*(i
) + s
->avctx
->thread_count
/2) / s
->avctx
->thread_count
;
685 s
->thread_context
[i
]->end_mb_y
= (s
->mb_height
*(i
+1) + s
->avctx
->thread_count
/2) / s
->avctx
->thread_count
;
694 /* init common structure for both encoder and decoder */
695 void MPV_common_end(MpegEncContext
*s
)
699 for(i
=0; i
<s
->avctx
->thread_count
; i
++){
700 free_duplicate_context(s
->thread_context
[i
]);
702 for(i
=1; i
<s
->avctx
->thread_count
; i
++){
703 av_freep(&s
->thread_context
[i
]);
706 av_freep(&s
->parse_context
.buffer
);
707 s
->parse_context
.buffer_size
=0;
709 av_freep(&s
->mb_type
);
710 av_freep(&s
->p_mv_table_base
);
711 av_freep(&s
->b_forw_mv_table_base
);
712 av_freep(&s
->b_back_mv_table_base
);
713 av_freep(&s
->b_bidir_forw_mv_table_base
);
714 av_freep(&s
->b_bidir_back_mv_table_base
);
715 av_freep(&s
->b_direct_mv_table_base
);
717 s
->b_forw_mv_table
= NULL
;
718 s
->b_back_mv_table
= NULL
;
719 s
->b_bidir_forw_mv_table
= NULL
;
720 s
->b_bidir_back_mv_table
= NULL
;
721 s
->b_direct_mv_table
= NULL
;
725 av_freep(&s
->b_field_mv_table_base
[i
][j
][k
]);
726 s
->b_field_mv_table
[i
][j
][k
]=NULL
;
728 av_freep(&s
->b_field_select_table
[i
][j
]);
729 av_freep(&s
->p_field_mv_table_base
[i
][j
]);
730 s
->p_field_mv_table
[i
][j
]=NULL
;
732 av_freep(&s
->p_field_select_table
[i
]);
735 av_freep(&s
->dc_val
[0]);
736 av_freep(&s
->ac_val
[0]);
737 av_freep(&s
->coded_block
);
738 av_freep(&s
->mbintra_table
);
739 av_freep(&s
->cbp_table
);
740 av_freep(&s
->pred_dir_table
);
742 av_freep(&s
->mbskip_table
);
743 av_freep(&s
->prev_pict_types
);
744 av_freep(&s
->bitstream_buffer
);
745 av_freep(&s
->avctx
->stats_out
);
746 av_freep(&s
->ac_stats
);
747 av_freep(&s
->error_status_table
);
748 av_freep(&s
->mb_index2xy
);
749 av_freep(&s
->lambda_table
);
750 av_freep(&s
->q_intra_matrix
);
751 av_freep(&s
->q_inter_matrix
);
752 av_freep(&s
->q_intra_matrix16
);
753 av_freep(&s
->q_inter_matrix16
);
754 av_freep(&s
->input_picture
);
755 av_freep(&s
->reordered_input_picture
);
756 av_freep(&s
->dct_offset
);
759 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
760 free_picture(s
, &s
->picture
[i
]);
763 av_freep(&s
->picture
);
764 avcodec_default_free_buffers(s
->avctx
);
765 s
->context_initialized
= 0;
768 s
->current_picture_ptr
= NULL
;
770 if (s
->visualization_buffer
[i
])
771 av_free(s
->visualization_buffer
[i
]);
774 #ifdef CONFIG_ENCODERS
776 /* init video encoder */
777 int MPV_encode_init(AVCodecContext
*avctx
)
779 MpegEncContext
*s
= avctx
->priv_data
;
781 int chroma_h_shift
, chroma_v_shift
;
783 avctx
->pix_fmt
= PIX_FMT_YUV420P
; // FIXME
785 s
->bit_rate
= avctx
->bit_rate
;
786 s
->width
= avctx
->width
;
787 s
->height
= avctx
->height
;
788 if(avctx
->gop_size
> 600){
789 av_log(avctx
, AV_LOG_ERROR
, "Warning keyframe interval too large! reducing it ...\n");
792 s
->gop_size
= avctx
->gop_size
;
794 s
->flags
= avctx
->flags
;
795 s
->flags2
= avctx
->flags2
;
796 s
->max_b_frames
= avctx
->max_b_frames
;
797 s
->codec_id
= avctx
->codec
->id
;
798 s
->luma_elim_threshold
= avctx
->luma_elim_threshold
;
799 s
->chroma_elim_threshold
= avctx
->chroma_elim_threshold
;
800 s
->strict_std_compliance
= avctx
->strict_std_compliance
;
801 s
->data_partitioning
= avctx
->flags
& CODEC_FLAG_PART
;
802 s
->quarter_sample
= (avctx
->flags
& CODEC_FLAG_QPEL
)!=0;
803 s
->mpeg_quant
= avctx
->mpeg_quant
;
804 s
->rtp_mode
= !!avctx
->rtp_payload_size
;
806 if (s
->gop_size
<= 1) {
813 s
->me_method
= avctx
->me_method
;
816 s
->fixed_qscale
= !!(avctx
->flags
& CODEC_FLAG_QSCALE
);
818 s
->adaptive_quant
= ( s
->avctx
->lumi_masking
819 || s
->avctx
->dark_masking
820 || s
->avctx
->temporal_cplx_masking
821 || s
->avctx
->spatial_cplx_masking
822 || s
->avctx
->p_masking
823 || (s
->flags
&CODEC_FLAG_QP_RD
))
826 s
->obmc
= !!(s
->flags
& CODEC_FLAG_OBMC
);
827 s
->loop_filter
= !!(s
->flags
& CODEC_FLAG_LOOP_FILTER
);
828 s
->alternate_scan
= !!(s
->flags
& CODEC_FLAG_ALT_SCAN
);
830 if(avctx
->rc_max_rate
&& !avctx
->rc_buffer_size
){
831 av_log(avctx
, AV_LOG_ERROR
, "a vbv buffer size is needed, for encoding with a maximum bitrate\n");
835 if(avctx
->rc_min_rate
&& avctx
->rc_max_rate
!= avctx
->rc_min_rate
){
836 av_log(avctx
, AV_LOG_INFO
, "Warning min_rate > 0 but min_rate != max_rate isnt recommanded!\n");
839 if((s
->flags
& CODEC_FLAG_4MV
) && s
->codec_id
!= CODEC_ID_MPEG4
840 && s
->codec_id
!= CODEC_ID_H263
&& s
->codec_id
!= CODEC_ID_H263P
&& s
->codec_id
!= CODEC_ID_FLV1
){
841 av_log(avctx
, AV_LOG_ERROR
, "4MV not supported by codec\n");
845 if(s
->obmc
&& s
->avctx
->mb_decision
!= FF_MB_DECISION_SIMPLE
){
846 av_log(avctx
, AV_LOG_ERROR
, "OBMC is only supported with simple mb decission\n");
850 if(s
->obmc
&& s
->codec_id
!= CODEC_ID_H263
&& s
->codec_id
!= CODEC_ID_H263P
){
851 av_log(avctx
, AV_LOG_ERROR
, "OBMC is only supported with H263(+)\n");
855 if(s
->quarter_sample
&& s
->codec_id
!= CODEC_ID_MPEG4
){
856 av_log(avctx
, AV_LOG_ERROR
, "qpel not supported by codec\n");
860 if(s
->data_partitioning
&& s
->codec_id
!= CODEC_ID_MPEG4
){
861 av_log(avctx
, AV_LOG_ERROR
, "data partitioning not supported by codec\n");
865 if(s
->max_b_frames
&& s
->codec_id
!= CODEC_ID_MPEG4
&& s
->codec_id
!= CODEC_ID_MPEG1VIDEO
&& s
->codec_id
!= CODEC_ID_MPEG2VIDEO
){
866 av_log(avctx
, AV_LOG_ERROR
, "b frames not supported by codec\n");
870 if(s
->mpeg_quant
&& s
->codec_id
!= CODEC_ID_MPEG4
){ //FIXME mpeg2 uses that too
871 av_log(avctx
, AV_LOG_ERROR
, "mpeg2 style quantization not supporetd by codec\n");
875 if((s
->flags
& CODEC_FLAG_CBP_RD
) && !(s
->flags
& CODEC_FLAG_TRELLIS_QUANT
)){
876 av_log(avctx
, AV_LOG_ERROR
, "CBP RD needs trellis quant\n");
880 if((s
->flags
& CODEC_FLAG_QP_RD
) && s
->avctx
->mb_decision
!= FF_MB_DECISION_RD
){
881 av_log(avctx
, AV_LOG_ERROR
, "QP RD needs mbd=2\n");
885 if(s
->avctx
->scenechange_threshold
< 1000000000 && (s
->flags
& CODEC_FLAG_CLOSED_GOP
)){
886 av_log(avctx
, AV_LOG_ERROR
, "closed gop with scene change detection arent supported yet\n");
890 if(s
->avctx
->thread_count
> 1 && s
->codec_id
!= CODEC_ID_MPEG4
891 && s
->codec_id
!= CODEC_ID_MPEG1VIDEO
&& s
->codec_id
!= CODEC_ID_MPEG2VIDEO
892 && (s
->codec_id
!= CODEC_ID_H263P
|| !(s
->flags
& CODEC_FLAG_H263P_SLICE_STRUCT
))){
893 av_log(avctx
, AV_LOG_ERROR
, "multi threaded encoding not supported by codec\n");
897 if(s
->avctx
->thread_count
> MAX_THREADS
|| 16*s
->avctx
->thread_count
> s
->height
){
898 av_log(avctx
, AV_LOG_ERROR
, "too many threads\n");
902 if(s
->avctx
->thread_count
> 1)
905 i
= ff_gcd(avctx
->frame_rate
, avctx
->frame_rate_base
);
907 av_log(avctx
, AV_LOG_INFO
, "removing common factors from framerate\n");
908 avctx
->frame_rate
/= i
;
909 avctx
->frame_rate_base
/= i
;
913 if(s
->codec_id
==CODEC_ID_MJPEG
){
914 s
->intra_quant_bias
= 1<<(QUANT_BIAS_SHIFT
-1); //(a + x/2)/x
915 s
->inter_quant_bias
= 0;
916 }else if(s
->mpeg_quant
|| s
->codec_id
==CODEC_ID_MPEG1VIDEO
|| s
->codec_id
==CODEC_ID_MPEG2VIDEO
){
917 s
->intra_quant_bias
= 3<<(QUANT_BIAS_SHIFT
-3); //(a + x*3/8)/x
918 s
->inter_quant_bias
= 0;
920 s
->intra_quant_bias
=0;
921 s
->inter_quant_bias
=-(1<<(QUANT_BIAS_SHIFT
-2)); //(a - x/4)/x
924 if(avctx
->intra_quant_bias
!= FF_DEFAULT_QUANT_BIAS
)
925 s
->intra_quant_bias
= avctx
->intra_quant_bias
;
926 if(avctx
->inter_quant_bias
!= FF_DEFAULT_QUANT_BIAS
)
927 s
->inter_quant_bias
= avctx
->inter_quant_bias
;
929 avcodec_get_chroma_sub_sample(avctx
->pix_fmt
, &chroma_h_shift
, &chroma_v_shift
);
931 av_reduce(&s
->time_increment_resolution
, &dummy
, s
->avctx
->frame_rate
, s
->avctx
->frame_rate_base
, (1<<16)-1);
932 s
->time_increment_bits
= av_log2(s
->time_increment_resolution
- 1) + 1;
934 switch(avctx
->codec
->id
) {
935 case CODEC_ID_MPEG1VIDEO
:
936 s
->out_format
= FMT_MPEG1
;
937 s
->low_delay
= 0; //s->max_b_frames ? 0 : 1;
938 avctx
->delay
= s
->low_delay ?
0 : (s
->max_b_frames
+ 1);
940 case CODEC_ID_MPEG2VIDEO
:
941 s
->out_format
= FMT_MPEG1
;
942 s
->low_delay
= 0; //s->max_b_frames ? 0 : 1;
943 avctx
->delay
= s
->low_delay ?
0 : (s
->max_b_frames
+ 1);
948 s
->out_format
= FMT_MJPEG
;
949 s
->intra_only
= 1; /* force intra only for jpeg */
950 s
->mjpeg_write_tables
= 1; /* write all tables */
951 s
->mjpeg_data_only_frames
= 0; /* write all the needed headers */
952 s
->mjpeg_vsample
[0] = 1<<chroma_v_shift
;
953 s
->mjpeg_vsample
[1] = 1;
954 s
->mjpeg_vsample
[2] = 1;
955 s
->mjpeg_hsample
[0] = 1<<chroma_h_shift
;
956 s
->mjpeg_hsample
[1] = 1;
957 s
->mjpeg_hsample
[2] = 1;
958 if (mjpeg_init(s
) < 0)
965 if (h263_get_picture_format(s
->width
, s
->height
) == 7) {
966 av_log(avctx
, AV_LOG_INFO
, "Input picture size isn't suitable for h263 codec! try h263+\n");
969 s
->out_format
= FMT_H263
;
970 s
->obmc
= (avctx
->flags
& CODEC_FLAG_OBMC
) ?
1:0;
975 s
->out_format
= FMT_H263
;
978 s
->umvplus
= (avctx
->flags
& CODEC_FLAG_H263P_UMV
) ?
1:0;
979 s
->h263_aic
= (avctx
->flags
& CODEC_FLAG_H263P_AIC
) ?
1:0;
980 s
->modified_quant
= s
->h263_aic
;
981 s
->alt_inter_vlc
= (avctx
->flags
& CODEC_FLAG_H263P_AIV
) ?
1:0;
982 s
->obmc
= (avctx
->flags
& CODEC_FLAG_OBMC
) ?
1:0;
983 s
->loop_filter
= (avctx
->flags
& CODEC_FLAG_LOOP_FILTER
) ?
1:0;
984 s
->unrestricted_mv
= s
->obmc
|| s
->loop_filter
|| s
->umvplus
;
985 s
->h263_slice_structured
= (s
->flags
& CODEC_FLAG_H263P_SLICE_STRUCT
) ?
1:0;
988 /* These are just to be sure */
993 s
->out_format
= FMT_H263
;
994 s
->h263_flv
= 2; /* format = 1; 11-bit codes */
995 s
->unrestricted_mv
= 1;
996 s
->rtp_mode
=0; /* don't allow GOB */
1001 s
->out_format
= FMT_H263
;
1005 case CODEC_ID_MPEG4
:
1006 s
->out_format
= FMT_H263
;
1008 s
->unrestricted_mv
= 1;
1009 s
->low_delay
= s
->max_b_frames ?
0 : 1;
1010 avctx
->delay
= s
->low_delay ?
0 : (s
->max_b_frames
+ 1);
1012 case CODEC_ID_MSMPEG4V1
:
1013 s
->out_format
= FMT_H263
;
1014 s
->h263_msmpeg4
= 1;
1016 s
->unrestricted_mv
= 1;
1017 s
->msmpeg4_version
= 1;
1021 case CODEC_ID_MSMPEG4V2
:
1022 s
->out_format
= FMT_H263
;
1023 s
->h263_msmpeg4
= 1;
1025 s
->unrestricted_mv
= 1;
1026 s
->msmpeg4_version
= 2;
1030 case CODEC_ID_MSMPEG4V3
:
1031 s
->out_format
= FMT_H263
;
1032 s
->h263_msmpeg4
= 1;
1034 s
->unrestricted_mv
= 1;
1035 s
->msmpeg4_version
= 3;
1036 s
->flipflop_rounding
=1;
1041 s
->out_format
= FMT_H263
;
1042 s
->h263_msmpeg4
= 1;
1044 s
->unrestricted_mv
= 1;
1045 s
->msmpeg4_version
= 4;
1046 s
->flipflop_rounding
=1;
1051 s
->out_format
= FMT_H263
;
1052 s
->h263_msmpeg4
= 1;
1054 s
->unrestricted_mv
= 1;
1055 s
->msmpeg4_version
= 5;
1056 s
->flipflop_rounding
=1;
1065 { /* set up some save defaults, some codecs might override them later */
1071 default_mv_penalty
= av_mallocz( sizeof(uint8_t)*(MAX_FCODE
+1)*(2*MAX_MV
+1) );
1072 memset(default_mv_penalty
, 0, sizeof(uint8_t)*(MAX_FCODE
+1)*(2*MAX_MV
+1));
1073 memset(default_fcode_tab
, 0, sizeof(uint8_t)*(2*MAX_MV
+1));
1075 for(i
=-16; i
<16; i
++){
1076 default_fcode_tab
[i
+ MAX_MV
]= 1;
1080 s
->me
.mv_penalty
= default_mv_penalty
;
1081 s
->fcode_tab
= default_fcode_tab
;
1083 /* dont use mv_penalty table for crap MV as it would be confused */
1084 //FIXME remove after fixing / removing old ME
1085 if (s
->me_method
< ME_EPZS
) s
->me
.mv_penalty
= default_mv_penalty
;
1090 if (MPV_common_init(s
) < 0)
1093 if(s
->modified_quant
)
1094 s
->chroma_qscale_table
= ff_h263_chroma_qscale_table
;
1095 s
->progressive_frame
=
1096 s
->progressive_sequence
= !(avctx
->flags
& (CODEC_FLAG_INTERLACED_DCT
|CODEC_FLAG_INTERLACED_ME
));
1097 s
->quant_precision
=5;
1099 ff_set_cmp(&s
->dsp
, s
->dsp
.ildct_cmp
, s
->avctx
->ildct_cmp
);
1103 #ifdef CONFIG_ENCODERS
1105 if (s
->out_format
== FMT_H263
)
1106 h263_encode_init(s
);
1107 if(s
->msmpeg4_version
)
1108 ff_msmpeg4_encode_init(s
);
1110 if (s
->out_format
== FMT_MPEG1
)
1111 ff_mpeg1_encode_init(s
);
1114 /* init default q matrix */
1116 int j
= s
->dsp
.idct_permutation
[i
];
1118 if(s
->codec_id
==CODEC_ID_MPEG4
&& s
->mpeg_quant
){
1119 s
->intra_matrix
[j
] = ff_mpeg4_default_intra_matrix
[i
];
1120 s
->inter_matrix
[j
] = ff_mpeg4_default_non_intra_matrix
[i
];
1121 }else if(s
->out_format
== FMT_H263
){
1122 s
->intra_matrix
[j
] =
1123 s
->inter_matrix
[j
] = ff_mpeg1_default_non_intra_matrix
[i
];
1127 s
->intra_matrix
[j
] = ff_mpeg1_default_intra_matrix
[i
];
1128 s
->inter_matrix
[j
] = ff_mpeg1_default_non_intra_matrix
[i
];
1130 if(s
->avctx
->intra_matrix
)
1131 s
->intra_matrix
[j
] = s
->avctx
->intra_matrix
[i
];
1132 if(s
->avctx
->inter_matrix
)
1133 s
->inter_matrix
[j
] = s
->avctx
->inter_matrix
[i
];
1136 /* precompute matrix */
1137 /* for mjpeg, we do include qscale in the matrix */
1138 if (s
->out_format
!= FMT_MJPEG
) {
1139 convert_matrix(&s
->dsp
, s
->q_intra_matrix
, s
->q_intra_matrix16
,
1140 s
->intra_matrix
, s
->intra_quant_bias
, 1, 31);
1141 convert_matrix(&s
->dsp
, s
->q_inter_matrix
, s
->q_inter_matrix16
,
1142 s
->inter_matrix
, s
->inter_quant_bias
, 1, 31);
1145 if(ff_rate_control_init(s
) < 0)
1148 s
->picture_number
= 0;
1149 s
->input_picture_number
= 0;
1150 s
->picture_in_gop_number
= 0;
1151 /* motion detector init */
1158 int MPV_encode_end(AVCodecContext
*avctx
)
1160 MpegEncContext
*s
= avctx
->priv_data
;
1166 ff_rate_control_uninit(s
);
1169 if (s
->out_format
== FMT_MJPEG
)
1172 av_freep(&avctx
->extradata
);
1177 #endif //CONFIG_ENCODERS
1179 void init_rl(RLTable
*rl
)
1181 int8_t max_level
[MAX_RUN
+1], max_run
[MAX_LEVEL
+1];
1182 uint8_t index_run
[MAX_RUN
+1];
1183 int last
, run
, level
, start
, end
, i
;
1185 /* compute max_level[], max_run[] and index_run[] */
1186 for(last
=0;last
<2;last
++) {
1195 memset(max_level
, 0, MAX_RUN
+ 1);
1196 memset(max_run
, 0, MAX_LEVEL
+ 1);
1197 memset(index_run
, rl
->n
, MAX_RUN
+ 1);
1198 for(i
=start
;i
<end
;i
++) {
1199 run
= rl
->table_run
[i
];
1200 level
= rl
->table_level
[i
];
1201 if (index_run
[run
] == rl
->n
)
1203 if (level
> max_level
[run
])
1204 max_level
[run
] = level
;
1205 if (run
> max_run
[level
])
1206 max_run
[level
] = run
;
1208 rl
->max_level
[last
] = av_malloc(MAX_RUN
+ 1);
1209 memcpy(rl
->max_level
[last
], max_level
, MAX_RUN
+ 1);
1210 rl
->max_run
[last
] = av_malloc(MAX_LEVEL
+ 1);
1211 memcpy(rl
->max_run
[last
], max_run
, MAX_LEVEL
+ 1);
1212 rl
->index_run
[last
] = av_malloc(MAX_RUN
+ 1);
1213 memcpy(rl
->index_run
[last
], index_run
, MAX_RUN
+ 1);
1217 /* draw the edges of width 'w' of an image of size width, height */
1218 //FIXME check that this is ok for mpeg4 interlaced
1219 static void draw_edges_c(uint8_t *buf
, int wrap
, int width
, int height
, int w
)
1221 uint8_t *ptr
, *last_line
;
1224 last_line
= buf
+ (height
- 1) * wrap
;
1226 /* top and bottom */
1227 memcpy(buf
- (i
+ 1) * wrap
, buf
, width
);
1228 memcpy(last_line
+ (i
+ 1) * wrap
, last_line
, width
);
1230 /* left and right */
1232 for(i
=0;i
<height
;i
++) {
1233 memset(ptr
- w
, ptr
[0], w
);
1234 memset(ptr
+ width
, ptr
[width
-1], w
);
1239 memset(buf
- (i
+ 1) * wrap
- w
, buf
[0], w
); /* top left */
1240 memset(buf
- (i
+ 1) * wrap
+ width
, buf
[width
-1], w
); /* top right */
1241 memset(last_line
+ (i
+ 1) * wrap
- w
, last_line
[0], w
); /* top left */
1242 memset(last_line
+ (i
+ 1) * wrap
+ width
, last_line
[width
-1], w
); /* top right */
1246 int ff_find_unused_picture(MpegEncContext
*s
, int shared
){
1250 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1251 if(s
->picture
[i
].data
[0]==NULL
&& s
->picture
[i
].type
==0) return i
;
1254 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1255 if(s
->picture
[i
].data
[0]==NULL
&& s
->picture
[i
].type
!=0) return i
; //FIXME
1257 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1258 if(s
->picture
[i
].data
[0]==NULL
) return i
;
1266 static void update_noise_reduction(MpegEncContext
*s
){
1269 for(intra
=0; intra
<2; intra
++){
1270 if(s
->dct_count
[intra
] > (1<<16)){
1271 for(i
=0; i
<64; i
++){
1272 s
->dct_error_sum
[intra
][i
] >>=1;
1274 s
->dct_count
[intra
] >>= 1;
1277 for(i
=0; i
<64; i
++){
1278 s
->dct_offset
[intra
][i
]= (s
->avctx
->noise_reduction
* s
->dct_count
[intra
] + s
->dct_error_sum
[intra
][i
]/2) / (s
->dct_error_sum
[intra
][i
]+1);
1284 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1286 int MPV_frame_start(MpegEncContext
*s
, AVCodecContext
*avctx
)
1292 assert(s
->last_picture_ptr
==NULL
|| s
->out_format
!= FMT_H264
|| s
->codec_id
== CODEC_ID_SVQ3
);
1294 /* mark&release old frames */
1295 if (s
->pict_type
!= B_TYPE
&& s
->last_picture_ptr
&& s
->last_picture_ptr
->data
[0]) {
1296 avctx
->release_buffer(avctx
, (AVFrame
*)s
->last_picture_ptr
);
1298 /* release forgotten pictures */
1299 /* if(mpeg124/h263) */
1301 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1302 if(s
->picture
[i
].data
[0] && &s
->picture
[i
] != s
->next_picture_ptr
&& s
->picture
[i
].reference
){
1303 av_log(avctx
, AV_LOG_ERROR
, "releasing zombie picture\n");
1304 avctx
->release_buffer(avctx
, (AVFrame
*)&s
->picture
[i
]);
1311 /* release non refernce frames */
1312 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1313 if(s
->picture
[i
].data
[0] && !s
->picture
[i
].reference
/*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1314 s
->avctx
->release_buffer(s
->avctx
, (AVFrame
*)&s
->picture
[i
]);
1318 if(s
->current_picture_ptr
&& s
->current_picture_ptr
->data
[0]==NULL
)
1319 pic
= (AVFrame
*)s
->current_picture_ptr
; //we allready have a unused image (maybe it was set before reading the header)
1321 i
= ff_find_unused_picture(s
, 0);
1322 pic
= (AVFrame
*)&s
->picture
[i
];
1325 pic
->reference
= s
->pict_type
!= B_TYPE ?
3 : 0;
1327 pic
->coded_picture_number
= s
->coded_picture_number
++;
1329 if( alloc_picture(s
, (Picture
*)pic
, 0) < 0)
1332 s
->current_picture_ptr
= (Picture
*)pic
;
1333 s
->current_picture_ptr
->top_field_first
= s
->top_field_first
; //FIXME use only the vars from current_pic
1334 s
->current_picture_ptr
->interlaced_frame
= !s
->progressive_frame
&& !s
->progressive_sequence
;
1337 s
->current_picture_ptr
->pict_type
= s
->pict_type
;
1338 // if(s->flags && CODEC_FLAG_QSCALE)
1339 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1340 s
->current_picture_ptr
->key_frame
= s
->pict_type
== I_TYPE
;
1342 copy_picture(&s
->current_picture
, s
->current_picture_ptr
);
1344 if(s
->out_format
!= FMT_H264
|| s
->codec_id
== CODEC_ID_SVQ3
){
1345 if (s
->pict_type
!= B_TYPE
) {
1346 s
->last_picture_ptr
= s
->next_picture_ptr
;
1347 s
->next_picture_ptr
= s
->current_picture_ptr
;
1350 if(s
->last_picture_ptr
) copy_picture(&s
->last_picture
, s
->last_picture_ptr
);
1351 if(s
->next_picture_ptr
) copy_picture(&s
->next_picture
, s
->next_picture_ptr
);
1353 if(s
->pict_type
!= I_TYPE
&& (s
->last_picture_ptr
==NULL
|| s
->last_picture_ptr
->data
[0]==NULL
)){
1354 av_log(avctx
, AV_LOG_ERROR
, "warning: first frame is no keyframe\n");
1355 assert(s
->pict_type
!= B_TYPE
); //these should have been dropped if we dont have a reference
1359 assert(s
->pict_type
== I_TYPE
|| (s
->last_picture_ptr
&& s
->last_picture_ptr
->data
[0]));
1361 if(s
->picture_structure
!=PICT_FRAME
){
1364 if(s
->picture_structure
== PICT_BOTTOM_FIELD
){
1365 s
->current_picture
.data
[i
] += s
->current_picture
.linesize
[i
];
1367 s
->current_picture
.linesize
[i
] *= 2;
1368 s
->last_picture
.linesize
[i
] *=2;
1369 s
->next_picture
.linesize
[i
] *=2;
1374 s
->hurry_up
= s
->avctx
->hurry_up
;
1375 s
->error_resilience
= avctx
->error_resilience
;
1377 /* set dequantizer, we cant do it during init as it might change for mpeg4
1378 and we cant do it in the header decode as init isnt called for mpeg4 there yet */
1379 if(s
->mpeg_quant
|| s
->codec_id
== CODEC_ID_MPEG2VIDEO
){
1380 s
->dct_unquantize_intra
= s
->dct_unquantize_mpeg2_intra
;
1381 s
->dct_unquantize_inter
= s
->dct_unquantize_mpeg2_inter
;
1382 }else if(s
->out_format
== FMT_H263
){
1383 s
->dct_unquantize_intra
= s
->dct_unquantize_h263_intra
;
1384 s
->dct_unquantize_inter
= s
->dct_unquantize_h263_inter
;
1386 s
->dct_unquantize_intra
= s
->dct_unquantize_mpeg1_intra
;
1387 s
->dct_unquantize_inter
= s
->dct_unquantize_mpeg1_inter
;
1390 if(s
->dct_error_sum
){
1391 assert(s
->avctx
->noise_reduction
&& s
->encoding
);
1393 update_noise_reduction(s
);
1397 if(s
->avctx
->xvmc_acceleration
)
1398 return XVMC_field_start(s
, avctx
);
1403 /* generic function for encode/decode called after a frame has been coded/decoded */
1404 void MPV_frame_end(MpegEncContext
*s
)
1407 /* draw edge for correct motion prediction if outside */
1409 //just to make sure that all data is rendered.
1410 if(s
->avctx
->xvmc_acceleration
){
1414 if(s
->unrestricted_mv
&& s
->pict_type
!= B_TYPE
&& !s
->intra_only
&& !(s
->flags
&CODEC_FLAG_EMU_EDGE
)) {
1415 draw_edges(s
->current_picture
.data
[0], s
->linesize
, s
->h_edge_pos
, s
->v_edge_pos
, EDGE_WIDTH
);
1416 draw_edges(s
->current_picture
.data
[1], s
->uvlinesize
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1, EDGE_WIDTH
/2);
1417 draw_edges(s
->current_picture
.data
[2], s
->uvlinesize
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1, EDGE_WIDTH
/2);
1421 s
->last_pict_type
= s
->pict_type
;
1422 if(s
->pict_type
!=B_TYPE
){
1423 s
->last_non_b_pict_type
= s
->pict_type
;
1426 /* copy back current_picture variables */
1427 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1428 if(s
->picture
[i
].data
[0] == s
->current_picture
.data
[0]){
1429 s
->picture
[i
]= s
->current_picture
;
1433 assert(i
<MAX_PICTURE_COUNT
);
1437 /* release non refernce frames */
1438 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1439 if(s
->picture
[i
].data
[0] && !s
->picture
[i
].reference
/*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1440 s
->avctx
->release_buffer(s
->avctx
, (AVFrame
*)&s
->picture
[i
]);
1444 // clear copies, to avoid confusion
1446 memset(&s
->last_picture
, 0, sizeof(Picture
));
1447 memset(&s
->next_picture
, 0, sizeof(Picture
));
1448 memset(&s
->current_picture
, 0, sizeof(Picture
));
1453 * draws an line from (ex, ey) -> (sx, sy).
1454 * @param w width of the image
1455 * @param h height of the image
1456 * @param stride stride/linesize of the image
1457 * @param color color of the arrow
1459 static void draw_line(uint8_t *buf
, int sx
, int sy
, int ex
, int ey
, int w
, int h
, int stride
, int color
){
1462 sx
= clip(sx
, 0, w
-1);
1463 sy
= clip(sy
, 0, h
-1);
1464 ex
= clip(ex
, 0, w
-1);
1465 ey
= clip(ey
, 0, h
-1);
1467 buf
[sy
*stride
+ sx
]+= color
;
1469 if(ABS(ex
- sx
) > ABS(ey
- sy
)){
1474 buf
+= sx
+ sy
*stride
;
1476 f
= ((ey
-sy
)<<16)/ex
;
1477 for(x
= 0; x
<= ex
; x
++){
1478 y
= ((x
*f
) + (1<<15))>>16;
1479 buf
[y
*stride
+ x
]+= color
;
1486 buf
+= sx
+ sy
*stride
;
1488 if(ey
) f
= ((ex
-sx
)<<16)/ey
;
1490 for(y
= 0; y
<= ey
; y
++){
1491 x
= ((y
*f
) + (1<<15))>>16;
1492 buf
[y
*stride
+ x
]+= color
;
1498 * draws an arrow from (ex, ey) -> (sx, sy).
1499 * @param w width of the image
1500 * @param h height of the image
1501 * @param stride stride/linesize of the image
1502 * @param color color of the arrow
1504 static void draw_arrow(uint8_t *buf
, int sx
, int sy
, int ex
, int ey
, int w
, int h
, int stride
, int color
){
1507 sx
= clip(sx
, -100, w
+100);
1508 sy
= clip(sy
, -100, h
+100);
1509 ex
= clip(ex
, -100, w
+100);
1510 ey
= clip(ey
, -100, h
+100);
1515 if(dx
*dx
+ dy
*dy
> 3*3){
1518 int length
= ff_sqrt((rx
*rx
+ ry
*ry
)<<8);
1520 //FIXME subpixel accuracy
1521 rx
= ROUNDED_DIV(rx
*3<<4, length
);
1522 ry
= ROUNDED_DIV(ry
*3<<4, length
);
1524 draw_line(buf
, sx
, sy
, sx
+ rx
, sy
+ ry
, w
, h
, stride
, color
);
1525 draw_line(buf
, sx
, sy
, sx
- ry
, sy
+ rx
, w
, h
, stride
, color
);
1527 draw_line(buf
, sx
, sy
, ex
, ey
, w
, h
, stride
, color
);
1531 * prints debuging info for the given picture.
1533 void ff_print_debug_info(MpegEncContext
*s
, AVFrame
*pict
){
1535 if(!pict
|| !pict
->mb_type
) return;
1537 if(s
->avctx
->debug
&(FF_DEBUG_SKIP
| FF_DEBUG_QP
| FF_DEBUG_MB_TYPE
)){
1540 av_log(s
->avctx
,AV_LOG_DEBUG
,"New frame, type: ");
1541 switch (pict
->pict_type
) {
1542 case FF_I_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"I\n"); break;
1543 case FF_P_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"P\n"); break;
1544 case FF_B_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"B\n"); break;
1545 case FF_S_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"S\n"); break;
1546 case FF_SI_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"SI\n"); break;
1547 case FF_SP_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"SP\n"); break;
1549 for(y
=0; y
<s
->mb_height
; y
++){
1550 for(x
=0; x
<s
->mb_width
; x
++){
1551 if(s
->avctx
->debug
&FF_DEBUG_SKIP
){
1552 int count
= s
->mbskip_table
[x
+ y
*s
->mb_stride
];
1553 if(count
>9) count
=9;
1554 av_log(s
->avctx
, AV_LOG_DEBUG
, "%1d", count
);
1556 if(s
->avctx
->debug
&FF_DEBUG_QP
){
1557 av_log(s
->avctx
, AV_LOG_DEBUG
, "%2d", pict
->qscale_table
[x
+ y
*s
->mb_stride
]);
1559 if(s
->avctx
->debug
&FF_DEBUG_MB_TYPE
){
1560 int mb_type
= pict
->mb_type
[x
+ y
*s
->mb_stride
];
1561 //Type & MV direction
1563 av_log(s
->avctx
, AV_LOG_DEBUG
, "P");
1564 else if(IS_INTRA(mb_type
) && IS_ACPRED(mb_type
))
1565 av_log(s
->avctx
, AV_LOG_DEBUG
, "A");
1566 else if(IS_INTRA4x4(mb_type
))
1567 av_log(s
->avctx
, AV_LOG_DEBUG
, "i");
1568 else if(IS_INTRA16x16(mb_type
))
1569 av_log(s
->avctx
, AV_LOG_DEBUG
, "I");
1570 else if(IS_DIRECT(mb_type
) && IS_SKIP(mb_type
))
1571 av_log(s
->avctx
, AV_LOG_DEBUG
, "d");
1572 else if(IS_DIRECT(mb_type
))
1573 av_log(s
->avctx
, AV_LOG_DEBUG
, "D");
1574 else if(IS_GMC(mb_type
) && IS_SKIP(mb_type
))
1575 av_log(s
->avctx
, AV_LOG_DEBUG
, "g");
1576 else if(IS_GMC(mb_type
))
1577 av_log(s
->avctx
, AV_LOG_DEBUG
, "G");
1578 else if(IS_SKIP(mb_type
))
1579 av_log(s
->avctx
, AV_LOG_DEBUG
, "S");
1580 else if(!USES_LIST(mb_type
, 1))
1581 av_log(s
->avctx
, AV_LOG_DEBUG
, ">");
1582 else if(!USES_LIST(mb_type
, 0))
1583 av_log(s
->avctx
, AV_LOG_DEBUG
, "<");
1585 assert(USES_LIST(mb_type
, 0) && USES_LIST(mb_type
, 1));
1586 av_log(s
->avctx
, AV_LOG_DEBUG
, "X");
1591 av_log(s
->avctx
, AV_LOG_DEBUG
, "+");
1592 else if(IS_16X8(mb_type
))
1593 av_log(s
->avctx
, AV_LOG_DEBUG
, "-");
1594 else if(IS_8X16(mb_type
))
1595 av_log(s
->avctx
, AV_LOG_DEBUG
, "¦");
1596 else if(IS_INTRA(mb_type
) || IS_16X16(mb_type
))
1597 av_log(s
->avctx
, AV_LOG_DEBUG
, " ");
1599 av_log(s
->avctx
, AV_LOG_DEBUG
, "?");
1602 if(IS_INTERLACED(mb_type
) && s
->codec_id
== CODEC_ID_H264
)
1603 av_log(s
->avctx
, AV_LOG_DEBUG
, "=");
1605 av_log(s
->avctx
, AV_LOG_DEBUG
, " ");
1607 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1609 av_log(s
->avctx
, AV_LOG_DEBUG
, "\n");
1613 if((s
->avctx
->debug
&(FF_DEBUG_VIS_QP
|FF_DEBUG_VIS_MB_TYPE
)) || (s
->avctx
->debug_mv
)){
1614 const int shift
= 1 + s
->quarter_sample
;
1618 int h_chroma_shift
, v_chroma_shift
;
1619 s
->low_delay
=0; //needed to see the vectors without trashing the buffers
1621 avcodec_get_chroma_sub_sample(s
->avctx
->pix_fmt
, &h_chroma_shift
, &v_chroma_shift
);
1623 memcpy(s
->visualization_buffer
[i
], pict
->data
[i
], (i
==0) ? pict
->linesize
[i
]*s
->height
:pict
->linesize
[i
]*s
->height
>> v_chroma_shift
);
1624 pict
->data
[i
]= s
->visualization_buffer
[i
];
1626 pict
->type
= FF_BUFFER_TYPE_COPY
;
1629 for(mb_y
=0; mb_y
<s
->mb_height
; mb_y
++){
1631 for(mb_x
=0; mb_x
<s
->mb_width
; mb_x
++){
1632 const int mb_index
= mb_x
+ mb_y
*s
->mb_stride
;
1633 if((s
->avctx
->debug_mv
) && pict
->motion_val
){
1635 for(type
=0; type
<3; type
++){
1638 case 0: if ((!(s
->avctx
->debug_mv
&FF_DEBUG_VIS_MV_P_FOR
)) || (pict
->pict_type
!=FF_P_TYPE
))
1642 case 1: if ((!(s
->avctx
->debug_mv
&FF_DEBUG_VIS_MV_B_FOR
)) || (pict
->pict_type
!=FF_B_TYPE
))
1646 case 2: if ((!(s
->avctx
->debug_mv
&FF_DEBUG_VIS_MV_B_BACK
)) || (pict
->pict_type
!=FF_B_TYPE
))
1651 if(!USES_LIST(pict
->mb_type
[mb_index
], direction
))
1654 if(IS_8X8(pict
->mb_type
[mb_index
])){
1657 int sx
= mb_x
*16 + 4 + 8*(i
&1);
1658 int sy
= mb_y
*16 + 4 + 8*(i
>>1);
1659 int xy
= 1 + mb_x
*2 + (i
&1) + (mb_y
*2 + 1 + (i
>>1))*(s
->mb_width
*2 + 2);
1660 int mx
= (pict
->motion_val
[direction
][xy
][0]>>shift
) + sx
;
1661 int my
= (pict
->motion_val
[direction
][xy
][1]>>shift
) + sy
;
1662 draw_arrow(ptr
, sx
, sy
, mx
, my
, s
->width
, s
->height
, s
->linesize
, 100);
1664 }else if(IS_16X8(pict
->mb_type
[mb_index
])){
1668 int sy
=mb_y
*16 + 4 + 8*i
;
1669 int xy
=1 + mb_x
*2 + (mb_y
*2 + 1 + i
)*(s
->mb_width
*2 + 2);
1670 int mx
=(pict
->motion_val
[direction
][xy
][0]>>shift
) + sx
;
1671 int my
=(pict
->motion_val
[direction
][xy
][1]>>shift
) + sy
;
1672 draw_arrow(ptr
, sx
, sy
, mx
, my
, s
->width
, s
->height
, s
->linesize
, 100);
1675 int sx
= mb_x
*16 + 8;
1676 int sy
= mb_y
*16 + 8;
1677 int xy
= 1 + mb_x
*2 + (mb_y
*2 + 1)*(s
->mb_width
*2 + 2);
1678 int mx
= (pict
->motion_val
[direction
][xy
][0]>>shift
) + sx
;
1679 int my
= (pict
->motion_val
[direction
][xy
][1]>>shift
) + sy
;
1680 draw_arrow(ptr
, sx
, sy
, mx
, my
, s
->width
, s
->height
, s
->linesize
, 100);
1684 if((s
->avctx
->debug
&FF_DEBUG_VIS_QP
) && pict
->motion_val
){
1685 uint64_t c
= (pict
->qscale_table
[mb_index
]*128/31) * 0x0101010101010101ULL
;
1688 *(uint64_t*)(pict
->data
[1] + 8*mb_x
+ (8*mb_y
+ y
)*pict
->linesize
[1])= c
;
1689 *(uint64_t*)(pict
->data
[2] + 8*mb_x
+ (8*mb_y
+ y
)*pict
->linesize
[2])= c
;
1692 if((s
->avctx
->debug
&FF_DEBUG_VIS_MB_TYPE
) && pict
->motion_val
){
1693 int mb_type
= pict
->mb_type
[mb_index
];
1696 #define COLOR(theta, r)\
1697 u= (int)(128 + r*cos(theta*3.141592/180));\
1698 v= (int)(128 + r*sin(theta*3.141592/180));
1702 if(IS_PCM(mb_type
)){
1704 }else if((IS_INTRA(mb_type
) && IS_ACPRED(mb_type
)) || IS_INTRA16x16(mb_type
)){
1706 }else if(IS_INTRA4x4(mb_type
)){
1708 }else if(IS_DIRECT(mb_type
) && IS_SKIP(mb_type
)){
1710 }else if(IS_DIRECT(mb_type
)){
1712 }else if(IS_GMC(mb_type
) && IS_SKIP(mb_type
)){
1714 }else if(IS_GMC(mb_type
)){
1716 }else if(IS_SKIP(mb_type
)){
1718 }else if(!USES_LIST(mb_type
, 1)){
1720 }else if(!USES_LIST(mb_type
, 0)){
1723 assert(USES_LIST(mb_type
, 0) && USES_LIST(mb_type
, 1));
1727 u
*= 0x0101010101010101ULL
;
1728 v
*= 0x0101010101010101ULL
;
1730 *(uint64_t*)(pict
->data
[1] + 8*mb_x
+ (8*mb_y
+ y
)*pict
->linesize
[1])= u
;
1731 *(uint64_t*)(pict
->data
[2] + 8*mb_x
+ (8*mb_y
+ y
)*pict
->linesize
[2])= v
;
1735 if(IS_8X8(mb_type
) || IS_16X8(mb_type
)){
1736 *(uint64_t*)(pict
->data
[0] + 16*mb_x
+ 0 + (16*mb_y
+ 8)*pict
->linesize
[0])^= 0x8080808080808080ULL
;
1737 *(uint64_t*)(pict
->data
[0] + 16*mb_x
+ 8 + (16*mb_y
+ 8)*pict
->linesize
[0])^= 0x8080808080808080ULL
;
1739 if(IS_8X8(mb_type
) || IS_8X16(mb_type
)){
1741 pict
->data
[0][16*mb_x
+ 8 + (16*mb_y
+ y
)*pict
->linesize
[0]]^= 0x80;
1744 if(IS_INTERLACED(mb_type
) && s
->codec_id
== CODEC_ID_H264
){
1748 s
->mbskip_table
[mb_index
]=0;
1754 #ifdef CONFIG_ENCODERS
1756 static int get_sae(uint8_t *src
, int ref
, int stride
){
1760 for(y
=0; y
<16; y
++){
1761 for(x
=0; x
<16; x
++){
1762 acc
+= ABS(src
[x
+y
*stride
] - ref
);
1769 static int get_intra_count(MpegEncContext
*s
, uint8_t *src
, uint8_t *ref
, int stride
){
1776 for(y
=0; y
<h
; y
+=16){
1777 for(x
=0; x
<w
; x
+=16){
1778 int offset
= x
+ y
*stride
;
1779 int sad
= s
->dsp
.sad
[0](NULL
, src
+ offset
, ref
+ offset
, stride
, 16);
1780 int mean
= (s
->dsp
.pix_sum(src
+ offset
, stride
) + 128)>>8;
1781 int sae
= get_sae(src
+ offset
, mean
, stride
);
1783 acc
+= sae
+ 500 < sad
;
1790 static int load_input_picture(MpegEncContext
*s
, AVFrame
*pic_arg
){
1793 const int encoding_delay
= s
->max_b_frames
;
1797 if(encoding_delay
&& !(s
->flags
&CODEC_FLAG_INPUT_PRESERVED
)) direct
=0;
1798 if(pic_arg
->linesize
[0] != s
->linesize
) direct
=0;
1799 if(pic_arg
->linesize
[1] != s
->uvlinesize
) direct
=0;
1800 if(pic_arg
->linesize
[2] != s
->uvlinesize
) direct
=0;
1802 // av_log(AV_LOG_DEBUG, "%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize);
1805 i
= ff_find_unused_picture(s
, 1);
1807 pic
= (AVFrame
*)&s
->picture
[i
];
1811 pic
->data
[i
]= pic_arg
->data
[i
];
1812 pic
->linesize
[i
]= pic_arg
->linesize
[i
];
1814 alloc_picture(s
, (Picture
*)pic
, 1);
1817 i
= ff_find_unused_picture(s
, 0);
1819 pic
= (AVFrame
*)&s
->picture
[i
];
1822 alloc_picture(s
, (Picture
*)pic
, 0);
1824 if( pic
->data
[0] + offset
== pic_arg
->data
[0]
1825 && pic
->data
[1] + offset
== pic_arg
->data
[1]
1826 && pic
->data
[2] + offset
== pic_arg
->data
[2]){
1829 int h_chroma_shift
, v_chroma_shift
;
1830 avcodec_get_chroma_sub_sample(s
->avctx
->pix_fmt
, &h_chroma_shift
, &v_chroma_shift
);
1833 int src_stride
= pic_arg
->linesize
[i
];
1834 int dst_stride
= i ? s
->uvlinesize
: s
->linesize
;
1835 int h_shift
= i ? h_chroma_shift
: 0;
1836 int v_shift
= i ? v_chroma_shift
: 0;
1837 int w
= s
->width
>>h_shift
;
1838 int h
= s
->height
>>v_shift
;
1839 uint8_t *src
= pic_arg
->data
[i
];
1840 uint8_t *dst
= pic
->data
[i
] + offset
;
1842 if(src_stride
==dst_stride
)
1843 memcpy(dst
, src
, src_stride
*h
);
1846 memcpy(dst
, src
, w
);
1854 copy_picture_attributes(pic
, pic_arg
);
1856 pic
->display_picture_number
= s
->input_picture_number
++;
1857 if(pic
->pts
!= AV_NOPTS_VALUE
){
1858 s
->user_specified_pts
= pic
->pts
;
1860 if(s
->user_specified_pts
){
1861 pic
->pts
= s
->user_specified_pts
+ AV_TIME_BASE
*(int64_t)s
->avctx
->frame_rate_base
/ s
->avctx
->frame_rate
;
1862 av_log(s
->avctx
, AV_LOG_INFO
, "Warning: AVFrame.pts=? trying to guess (%Ld)\n", pic
->pts
);
1864 pic
->pts
= av_rescale(pic
->display_picture_number
*(int64_t)s
->avctx
->frame_rate_base
, AV_TIME_BASE
, s
->avctx
->frame_rate
);
1869 /* shift buffer entries */
1870 for(i
=1; i
<MAX_PICTURE_COUNT
/*s->encoding_delay+1*/; i
++)
1871 s
->input_picture
[i
-1]= s
->input_picture
[i
];
1873 s
->input_picture
[encoding_delay
]= (Picture
*)pic
;
1878 static void select_input_picture(MpegEncContext
*s
){
1881 for(i
=1; i
<MAX_PICTURE_COUNT
; i
++)
1882 s
->reordered_input_picture
[i
-1]= s
->reordered_input_picture
[i
];
1883 s
->reordered_input_picture
[MAX_PICTURE_COUNT
-1]= NULL
;
1885 /* set next picture types & ordering */
1886 if(s
->reordered_input_picture
[0]==NULL
&& s
->input_picture
[0]){
1887 if(/*s->picture_in_gop_number >= s->gop_size ||*/ s
->next_picture_ptr
==NULL
|| s
->intra_only
){
1888 s
->reordered_input_picture
[0]= s
->input_picture
[0];
1889 s
->reordered_input_picture
[0]->pict_type
= I_TYPE
;
1890 s
->reordered_input_picture
[0]->coded_picture_number
= s
->coded_picture_number
++;
1894 if(s
->flags
&CODEC_FLAG_PASS2
){
1895 for(i
=0; i
<s
->max_b_frames
+1; i
++){
1896 int pict_num
= s
->input_picture
[0]->display_picture_number
+ i
;
1897 int pict_type
= s
->rc_context
.entry
[pict_num
].new_pict_type
;
1898 s
->input_picture
[i
]->pict_type
= pict_type
;
1900 if(i
+ 1 >= s
->rc_context
.num_entries
) break;
1904 if(s
->input_picture
[0]->pict_type
){
1905 /* user selected pict_type */
1906 for(b_frames
=0; b_frames
<s
->max_b_frames
+1; b_frames
++){
1907 if(s
->input_picture
[b_frames
]->pict_type
!=B_TYPE
) break;
1910 if(b_frames
> s
->max_b_frames
){
1911 av_log(s
->avctx
, AV_LOG_ERROR
, "warning, too many bframes in a row\n");
1912 b_frames
= s
->max_b_frames
;
1914 }else if(s
->avctx
->b_frame_strategy
==0){
1915 b_frames
= s
->max_b_frames
;
1916 while(b_frames
&& !s
->input_picture
[b_frames
]) b_frames
--;
1917 }else if(s
->avctx
->b_frame_strategy
==1){
1918 for(i
=1; i
<s
->max_b_frames
+1; i
++){
1919 if(s
->input_picture
[i
] && s
->input_picture
[i
]->b_frame_score
==0){
1920 s
->input_picture
[i
]->b_frame_score
=
1921 get_intra_count(s
, s
->input_picture
[i
]->data
[0],
1922 s
->input_picture
[i
-1]->data
[0], s
->linesize
) + 1;
1925 for(i
=0; i
<s
->max_b_frames
; i
++){
1926 if(s
->input_picture
[i
]==NULL
|| s
->input_picture
[i
]->b_frame_score
- 1 > s
->mb_num
/40) break;
1929 b_frames
= FFMAX(0, i
-1);
1932 for(i
=0; i
<b_frames
+1; i
++){
1933 s
->input_picture
[i
]->b_frame_score
=0;
1936 av_log(s
->avctx
, AV_LOG_ERROR
, "illegal b frame strategy\n");
1941 //static int b_count=0;
1942 //b_count+= b_frames;
1943 //av_log(s->avctx, AV_LOG_DEBUG, "b_frames: %d\n", b_count);
1944 if(s
->picture_in_gop_number
+ b_frames
>= s
->gop_size
){
1945 if(s
->flags
& CODEC_FLAG_CLOSED_GOP
)
1947 s
->input_picture
[b_frames
]->pict_type
= I_TYPE
;
1950 if( (s
->flags
& CODEC_FLAG_CLOSED_GOP
)
1952 && s
->input_picture
[b_frames
]->pict_type
== I_TYPE
)
1955 s
->reordered_input_picture
[0]= s
->input_picture
[b_frames
];
1956 if(s
->reordered_input_picture
[0]->pict_type
!= I_TYPE
)
1957 s
->reordered_input_picture
[0]->pict_type
= P_TYPE
;
1958 s
->reordered_input_picture
[0]->coded_picture_number
= s
->coded_picture_number
++;
1959 for(i
=0; i
<b_frames
; i
++){
1960 s
->reordered_input_picture
[i
+1]= s
->input_picture
[i
];
1961 s
->reordered_input_picture
[i
+1]->pict_type
= B_TYPE
;
1962 s
->reordered_input_picture
[i
+1]->coded_picture_number
= s
->coded_picture_number
++;
1967 if(s
->reordered_input_picture
[0]){
1968 s
->reordered_input_picture
[0]->reference
= s
->reordered_input_picture
[0]->pict_type
!=B_TYPE ?
3 : 0;
1970 copy_picture(&s
->new_picture
, s
->reordered_input_picture
[0]);
1972 if(s
->reordered_input_picture
[0]->type
== FF_BUFFER_TYPE_SHARED
){
1973 // input is a shared pix, so we cant modifiy it -> alloc a new one & ensure that the shared one is reuseable
1975 int i
= ff_find_unused_picture(s
, 0);
1976 Picture
*pic
= &s
->picture
[i
];
1978 /* mark us unused / free shared pic */
1980 s
->reordered_input_picture
[0]->data
[i
]= NULL
;
1981 s
->reordered_input_picture
[0]->type
= 0;
1983 copy_picture_attributes((AVFrame
*)pic
, (AVFrame
*)s
->reordered_input_picture
[0]);
1984 pic
->reference
= s
->reordered_input_picture
[0]->reference
;
1986 alloc_picture(s
, pic
, 0);
1988 s
->current_picture_ptr
= pic
;
1990 // input is not a shared pix -> reuse buffer for current_pix
1992 assert( s
->reordered_input_picture
[0]->type
==FF_BUFFER_TYPE_USER
1993 || s
->reordered_input_picture
[0]->type
==FF_BUFFER_TYPE_INTERNAL
);
1995 s
->current_picture_ptr
= s
->reordered_input_picture
[0];
1997 s
->new_picture
.data
[i
]+=16;
2000 copy_picture(&s
->current_picture
, s
->current_picture_ptr
);
2002 s
->picture_number
= s
->new_picture
.display_picture_number
;
2003 //printf("dpn:%d\n", s->picture_number);
2005 memset(&s
->new_picture
, 0, sizeof(Picture
));
2009 int MPV_encode_picture(AVCodecContext
*avctx
,
2010 unsigned char *buf
, int buf_size
, void *data
)
2012 MpegEncContext
*s
= avctx
->priv_data
;
2013 AVFrame
*pic_arg
= data
;
2014 int i
, stuffing_count
;
2016 if(avctx
->pix_fmt
!= PIX_FMT_YUV420P
){
2017 av_log(avctx
, AV_LOG_ERROR
, "this codec supports only YUV420P\n");
2021 for(i
=0; i
<avctx
->thread_count
; i
++){
2022 int y
= s
->thread_context
[i
]->start_mb_y
;
2023 int h
= s
->mb_height
;
2024 uint8_t *start
= buf
+ buf_size
* y
/h
;
2025 uint8_t *end
= buf
+ buf_size
*(y
+1)/h
;
2027 init_put_bits(&s
->thread_context
[i
]->pb
, start
, end
- start
);
2030 s
->picture_in_gop_number
++;
2032 load_input_picture(s
, pic_arg
);
2034 select_input_picture(s
);
2037 if(s
->new_picture
.data
[0]){
2038 s
->pict_type
= s
->new_picture
.pict_type
;
2040 //printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
2041 MPV_frame_start(s
, avctx
);
2043 encode_picture(s
, s
->picture_number
);
2045 avctx
->real_pict_num
= s
->picture_number
;
2046 avctx
->header_bits
= s
->header_bits
;
2047 avctx
->mv_bits
= s
->mv_bits
;
2048 avctx
->misc_bits
= s
->misc_bits
;
2049 avctx
->i_tex_bits
= s
->i_tex_bits
;
2050 avctx
->p_tex_bits
= s
->p_tex_bits
;
2051 avctx
->i_count
= s
->i_count
;
2052 avctx
->p_count
= s
->mb_num
- s
->i_count
- s
->skip_count
; //FIXME f/b_count in avctx
2053 avctx
->skip_count
= s
->skip_count
;
2057 if (s
->out_format
== FMT_MJPEG
)
2058 mjpeg_picture_trailer(s
);
2060 if(s
->flags
&CODEC_FLAG_PASS1
)
2061 ff_write_pass1_stats(s
);
2064 avctx
->error
[i
] += s
->current_picture_ptr
->error
[i
];
2067 flush_put_bits(&s
->pb
);
2068 s
->frame_bits
= put_bits_count(&s
->pb
);
2070 stuffing_count
= ff_vbv_update(s
, s
->frame_bits
);
2072 switch(s
->codec_id
){
2073 case CODEC_ID_MPEG1VIDEO
:
2074 case CODEC_ID_MPEG2VIDEO
:
2075 while(stuffing_count
--){
2076 put_bits(&s
->pb
, 8, 0);
2079 case CODEC_ID_MPEG4
:
2080 put_bits(&s
->pb
, 16, 0);
2081 put_bits(&s
->pb
, 16, 0x1C3);
2082 stuffing_count
-= 4;
2083 while(stuffing_count
--){
2084 put_bits(&s
->pb
, 8, 0xFF);
2088 av_log(s
->avctx
, AV_LOG_ERROR
, "vbv buffer overflow\n");
2090 flush_put_bits(&s
->pb
);
2091 s
->frame_bits
= put_bits_count(&s
->pb
);
2094 /* update mpeg1/2 vbv_delay for CBR */
2095 if(s
->avctx
->rc_max_rate
&& s
->avctx
->rc_min_rate
== s
->avctx
->rc_max_rate
){
2098 assert(s
->repeat_first_field
==0);
2100 vbv_delay
= lrintf(90000 * s
->rc_context
.buffer_index
/ s
->avctx
->rc_max_rate
);
2101 assert(vbv_delay
< 0xFFFF);
2103 s
->vbv_delay_ptr
[0] &= 0xF8;
2104 s
->vbv_delay_ptr
[0] |= vbv_delay
>>13;
2105 s
->vbv_delay_ptr
[1] = vbv_delay
>>5;
2106 s
->vbv_delay_ptr
[2] &= 0x07;
2107 s
->vbv_delay_ptr
[2] |= vbv_delay
<<3;
2109 s
->total_bits
+= s
->frame_bits
;
2110 avctx
->frame_bits
= s
->frame_bits
;
2112 assert((pbBufPtr(&s
->pb
) == s
->pb
.buf
));
2115 assert((s
->frame_bits
&7)==0);
2117 return s
->frame_bits
/8;
2120 #endif //CONFIG_ENCODERS
2122 static inline void gmc1_motion(MpegEncContext
*s
,
2123 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2125 uint8_t **ref_picture
, int src_offset
)
2128 int offset
, src_x
, src_y
, linesize
, uvlinesize
;
2129 int motion_x
, motion_y
;
2132 motion_x
= s
->sprite_offset
[0][0];
2133 motion_y
= s
->sprite_offset
[0][1];
2134 src_x
= s
->mb_x
* 16 + (motion_x
>> (s
->sprite_warping_accuracy
+1));
2135 src_y
= s
->mb_y
* 16 + (motion_y
>> (s
->sprite_warping_accuracy
+1));
2136 motion_x
<<=(3-s
->sprite_warping_accuracy
);
2137 motion_y
<<=(3-s
->sprite_warping_accuracy
);
2138 src_x
= clip(src_x
, -16, s
->width
);
2139 if (src_x
== s
->width
)
2141 src_y
= clip(src_y
, -16, s
->height
);
2142 if (src_y
== s
->height
)
2145 linesize
= s
->linesize
;
2146 uvlinesize
= s
->uvlinesize
;
2148 ptr
= ref_picture
[0] + (src_y
* linesize
) + src_x
+ src_offset
;
2150 dest_y
+=dest_offset
;
2151 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
2152 if( (unsigned)src_x
>= s
->h_edge_pos
- 17
2153 || (unsigned)src_y
>= s
->v_edge_pos
- 17){
2154 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, linesize
, 17, 17, src_x
, src_y
, s
->h_edge_pos
, s
->v_edge_pos
);
2155 ptr
= s
->edge_emu_buffer
;
2159 if((motion_x
|motion_y
)&7){
2160 s
->dsp
.gmc1(dest_y
, ptr
, linesize
, 16, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
2161 s
->dsp
.gmc1(dest_y
+8, ptr
+8, linesize
, 16, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
2165 dxy
= ((motion_x
>>3)&1) | ((motion_y
>>2)&2);
2166 if (s
->no_rounding
){
2167 s
->dsp
.put_no_rnd_pixels_tab
[0][dxy
](dest_y
, ptr
, linesize
, 16);
2169 s
->dsp
.put_pixels_tab
[0][dxy
](dest_y
, ptr
, linesize
, 16);
2173 if(s
->flags
&CODEC_FLAG_GRAY
) return;
2175 motion_x
= s
->sprite_offset
[1][0];
2176 motion_y
= s
->sprite_offset
[1][1];
2177 src_x
= s
->mb_x
* 8 + (motion_x
>> (s
->sprite_warping_accuracy
+1));
2178 src_y
= s
->mb_y
* 8 + (motion_y
>> (s
->sprite_warping_accuracy
+1));
2179 motion_x
<<=(3-s
->sprite_warping_accuracy
);
2180 motion_y
<<=(3-s
->sprite_warping_accuracy
);
2181 src_x
= clip(src_x
, -8, s
->width
>>1);
2182 if (src_x
== s
->width
>>1)
2184 src_y
= clip(src_y
, -8, s
->height
>>1);
2185 if (src_y
== s
->height
>>1)
2188 offset
= (src_y
* uvlinesize
) + src_x
+ (src_offset
>>1);
2189 ptr
= ref_picture
[1] + offset
;
2190 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
2191 if( (unsigned)src_x
>= (s
->h_edge_pos
>>1) - 9
2192 || (unsigned)src_y
>= (s
->v_edge_pos
>>1) - 9){
2193 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, uvlinesize
, 9, 9, src_x
, src_y
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2194 ptr
= s
->edge_emu_buffer
;
2198 s
->dsp
.gmc1(dest_cb
+ (dest_offset
>>1), ptr
, uvlinesize
, 8, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
2200 ptr
= ref_picture
[2] + offset
;
2202 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, uvlinesize
, 9, 9, src_x
, src_y
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2203 ptr
= s
->edge_emu_buffer
;
2205 s
->dsp
.gmc1(dest_cr
+ (dest_offset
>>1), ptr
, uvlinesize
, 8, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
2210 static inline void gmc_motion(MpegEncContext
*s
,
2211 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2213 uint8_t **ref_picture
, int src_offset
)
2216 int linesize
, uvlinesize
;
2217 const int a
= s
->sprite_warping_accuracy
;
2220 linesize
= s
->linesize
;
2221 uvlinesize
= s
->uvlinesize
;
2223 ptr
= ref_picture
[0] + src_offset
;
2225 dest_y
+=dest_offset
;
2227 ox
= s
->sprite_offset
[0][0] + s
->sprite_delta
[0][0]*s
->mb_x
*16 + s
->sprite_delta
[0][1]*s
->mb_y
*16;
2228 oy
= s
->sprite_offset
[0][1] + s
->sprite_delta
[1][0]*s
->mb_x
*16 + s
->sprite_delta
[1][1]*s
->mb_y
*16;
2230 s
->dsp
.gmc(dest_y
, ptr
, linesize
, 16,
2233 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
2234 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
2235 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
2236 s
->h_edge_pos
, s
->v_edge_pos
);
2237 s
->dsp
.gmc(dest_y
+8, ptr
, linesize
, 16,
2238 ox
+ s
->sprite_delta
[0][0]*8,
2239 oy
+ s
->sprite_delta
[1][0]*8,
2240 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
2241 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
2242 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
2243 s
->h_edge_pos
, s
->v_edge_pos
);
2245 if(s
->flags
&CODEC_FLAG_GRAY
) return;
2248 dest_cb
+=dest_offset
>>1;
2249 dest_cr
+=dest_offset
>>1;
2251 ox
= s
->sprite_offset
[1][0] + s
->sprite_delta
[0][0]*s
->mb_x
*8 + s
->sprite_delta
[0][1]*s
->mb_y
*8;
2252 oy
= s
->sprite_offset
[1][1] + s
->sprite_delta
[1][0]*s
->mb_x
*8 + s
->sprite_delta
[1][1]*s
->mb_y
*8;
2254 ptr
= ref_picture
[1] + (src_offset
>>1);
2255 s
->dsp
.gmc(dest_cb
, ptr
, uvlinesize
, 8,
2258 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
2259 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
2260 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
2261 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2263 ptr
= ref_picture
[2] + (src_offset
>>1);
2264 s
->dsp
.gmc(dest_cr
, ptr
, uvlinesize
, 8,
2267 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
2268 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
2269 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
2270 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2274 * Copies a rectangular area of samples to a temporary buffer and replicates the boarder samples.
2275 * @param buf destination buffer
2276 * @param src source buffer
2277 * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
2278 * @param block_w width of block
2279 * @param block_h height of block
2280 * @param src_x x coordinate of the top left sample of the block in the source buffer
2281 * @param src_y y coordinate of the top left sample of the block in the source buffer
2282 * @param w width of the source buffer
2283 * @param h height of the source buffer
2285 void ff_emulated_edge_mc(uint8_t *buf
, uint8_t *src
, int linesize
, int block_w
, int block_h
,
2286 int src_x
, int src_y
, int w
, int h
){
2288 int start_y
, start_x
, end_y
, end_x
;
2291 src
+= (h
-1-src_y
)*linesize
;
2293 }else if(src_y
<=-block_h
){
2294 src
+= (1-block_h
-src_y
)*linesize
;
2300 }else if(src_x
<=-block_w
){
2301 src
+= (1-block_w
-src_x
);
2305 start_y
= FFMAX(0, -src_y
);
2306 start_x
= FFMAX(0, -src_x
);
2307 end_y
= FFMIN(block_h
, h
-src_y
);
2308 end_x
= FFMIN(block_w
, w
-src_x
);
2310 // copy existing part
2311 for(y
=start_y
; y
<end_y
; y
++){
2312 for(x
=start_x
; x
<end_x
; x
++){
2313 buf
[x
+ y
*linesize
]= src
[x
+ y
*linesize
];
2318 for(y
=0; y
<start_y
; y
++){
2319 for(x
=start_x
; x
<end_x
; x
++){
2320 buf
[x
+ y
*linesize
]= buf
[x
+ start_y
*linesize
];
2325 for(y
=end_y
; y
<block_h
; y
++){
2326 for(x
=start_x
; x
<end_x
; x
++){
2327 buf
[x
+ y
*linesize
]= buf
[x
+ (end_y
-1)*linesize
];
2331 for(y
=0; y
<block_h
; y
++){
2333 for(x
=0; x
<start_x
; x
++){
2334 buf
[x
+ y
*linesize
]= buf
[start_x
+ y
*linesize
];
2338 for(x
=end_x
; x
<block_w
; x
++){
2339 buf
[x
+ y
*linesize
]= buf
[end_x
- 1 + y
*linesize
];
2344 static inline int hpel_motion(MpegEncContext
*s
,
2345 uint8_t *dest
, uint8_t *src
,
2346 int src_x
, int src_y
,
2347 int width
, int height
, int stride
,
2348 int h_edge_pos
, int v_edge_pos
,
2349 int w
, int h
, op_pixels_func
*pix_op
,
2350 int motion_x
, int motion_y
)
2355 dxy
= ((motion_y
& 1) << 1) | (motion_x
& 1);
2356 src_x
+= motion_x
>> 1;
2357 src_y
+= motion_y
>> 1;
2359 /* WARNING: do no forget half pels */
2360 src_x
= clip(src_x
, -16, width
); //FIXME unneeded for emu?
2363 src_y
= clip(src_y
, -16, height
);
2364 if (src_y
== height
)
2366 src
+= src_y
* stride
+ src_x
;
2368 if(s
->unrestricted_mv
&& (s
->flags
&CODEC_FLAG_EMU_EDGE
)){
2369 if( (unsigned)src_x
> h_edge_pos
- (motion_x
&1) - w
2370 || (unsigned)src_y
> v_edge_pos
- (motion_y
&1) - h
){
2371 ff_emulated_edge_mc(s
->edge_emu_buffer
, src
, stride
, w
+1, h
+1,
2372 src_x
, src_y
, h_edge_pos
, v_edge_pos
);
2373 src
= s
->edge_emu_buffer
;
2377 pix_op
[dxy
](dest
, src
, stride
, h
);
2381 /* apply one mpeg motion vector to the three components */
2382 static inline void mpeg_motion(MpegEncContext
*s
,
2383 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2385 uint8_t **ref_picture
, int src_offset
,
2386 int field_based
, op_pixels_func (*pix_op
)[4],
2387 int motion_x
, int motion_y
, int h
)
2390 int dxy
, offset
, mx
, my
, src_x
, src_y
, height
, v_edge_pos
, uvlinesize
;
2393 if(s
->quarter_sample
)
2400 height
= s
->height
>> field_based
;
2401 v_edge_pos
= s
->v_edge_pos
>> field_based
;
2402 uvlinesize
= s
->current_picture
.linesize
[1] << field_based
;
2405 dest_y
+ dest_offset
, ref_picture
[0] + src_offset
,
2406 s
->mb_x
* 16, s
->mb_y
* (16 >> field_based
),
2407 s
->width
, height
, s
->current_picture
.linesize
[0] << field_based
,
2408 s
->h_edge_pos
, v_edge_pos
,
2410 motion_x
, motion_y
);
2413 if(s
->flags
&CODEC_FLAG_GRAY
) return;
2415 if (s
->out_format
== FMT_H263
) {
2417 if ((motion_x
& 3) != 0)
2419 if ((motion_y
& 3) != 0)
2426 dxy
= ((my
& 1) << 1) | (mx
& 1);
2431 src_x
= s
->mb_x
* 8 + mx
;
2432 src_y
= s
->mb_y
* (8 >> field_based
) + my
;
2433 src_x
= clip(src_x
, -8, s
->width
>> 1);
2434 if (src_x
== (s
->width
>> 1))
2436 src_y
= clip(src_y
, -8, height
>> 1);
2437 if (src_y
== (height
>> 1))
2439 offset
= (src_y
* uvlinesize
) + src_x
+ (src_offset
>> 1);
2440 ptr
= ref_picture
[1] + offset
;
2442 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
- (src_offset
>> 1), s
->uvlinesize
, 9, 9+field_based
,
2443 src_x
, src_y
<<field_based
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2444 ptr
= s
->edge_emu_buffer
+ (src_offset
>> 1);
2446 pix_op
[1][dxy
](dest_cb
+ (dest_offset
>> 1), ptr
, uvlinesize
, h
>> 1);
2448 ptr
= ref_picture
[2] + offset
;
2450 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
- (src_offset
>> 1), s
->uvlinesize
, 9, 9+field_based
,
2451 src_x
, src_y
<<field_based
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2452 ptr
= s
->edge_emu_buffer
+ (src_offset
>> 1);
2454 pix_op
[1][dxy
](dest_cr
+ (dest_offset
>> 1), ptr
, uvlinesize
, h
>> 1);
2456 //FIXME move to dsputil, avg variant, 16x16 version
2457 static inline void put_obmc(uint8_t *dst
, uint8_t *src
[5], int stride
){
2459 uint8_t * const top
= src
[1];
2460 uint8_t * const left
= src
[2];
2461 uint8_t * const mid
= src
[0];
2462 uint8_t * const right
= src
[3];
2463 uint8_t * const bottom
= src
[4];
2464 #define OBMC_FILTER(x, t, l, m, r, b)\
2465 dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
2466 #define OBMC_FILTER4(x, t, l, m, r, b)\
2467 OBMC_FILTER(x , t, l, m, r, b);\
2468 OBMC_FILTER(x+1 , t, l, m, r, b);\
2469 OBMC_FILTER(x +stride, t, l, m, r, b);\
2470 OBMC_FILTER(x+1+stride, t, l, m, r, b);
2473 OBMC_FILTER (x
, 2, 2, 4, 0, 0);
2474 OBMC_FILTER (x
+1, 2, 1, 5, 0, 0);
2475 OBMC_FILTER4(x
+2, 2, 1, 5, 0, 0);
2476 OBMC_FILTER4(x
+4, 2, 0, 5, 1, 0);
2477 OBMC_FILTER (x
+6, 2, 0, 5, 1, 0);
2478 OBMC_FILTER (x
+7, 2, 0, 4, 2, 0);
2480 OBMC_FILTER (x
, 1, 2, 5, 0, 0);
2481 OBMC_FILTER (x
+1, 1, 2, 5, 0, 0);
2482 OBMC_FILTER (x
+6, 1, 0, 5, 2, 0);
2483 OBMC_FILTER (x
+7, 1, 0, 5, 2, 0);
2485 OBMC_FILTER4(x
, 1, 2, 5, 0, 0);
2486 OBMC_FILTER4(x
+2, 1, 1, 6, 0, 0);
2487 OBMC_FILTER4(x
+4, 1, 0, 6, 1, 0);
2488 OBMC_FILTER4(x
+6, 1, 0, 5, 2, 0);
2490 OBMC_FILTER4(x
, 0, 2, 5, 0, 1);
2491 OBMC_FILTER4(x
+2, 0, 1, 6, 0, 1);
2492 OBMC_FILTER4(x
+4, 0, 0, 6, 1, 1);
2493 OBMC_FILTER4(x
+6, 0, 0, 5, 2, 1);
2495 OBMC_FILTER (x
, 0, 2, 5, 0, 1);
2496 OBMC_FILTER (x
+1, 0, 2, 5, 0, 1);
2497 OBMC_FILTER4(x
+2, 0, 1, 5, 0, 2);
2498 OBMC_FILTER4(x
+4, 0, 0, 5, 1, 2);
2499 OBMC_FILTER (x
+6, 0, 0, 5, 2, 1);
2500 OBMC_FILTER (x
+7, 0, 0, 5, 2, 1);
2502 OBMC_FILTER (x
, 0, 2, 4, 0, 2);
2503 OBMC_FILTER (x
+1, 0, 1, 5, 0, 2);
2504 OBMC_FILTER (x
+6, 0, 0, 5, 1, 2);
2505 OBMC_FILTER (x
+7, 0, 0, 4, 2, 2);
2508 /* obmc for 1 8x8 luma block */
2509 static inline void obmc_motion(MpegEncContext
*s
,
2510 uint8_t *dest
, uint8_t *src
,
2511 int src_x
, int src_y
,
2512 op_pixels_func
*pix_op
,
2513 int16_t mv
[5][2]/* mid top left right bottom*/)
2519 assert(s
->quarter_sample
==0);
2522 if(i
&& mv
[i
][0]==mv
[MID
][0] && mv
[i
][1]==mv
[MID
][1]){
2525 ptr
[i
]= s
->obmc_scratchpad
+ 8*(i
&1) + s
->linesize
*8*(i
>>1);
2526 hpel_motion(s
, ptr
[i
], src
,
2528 s
->width
, s
->height
, s
->linesize
,
2529 s
->h_edge_pos
, s
->v_edge_pos
,
2531 mv
[i
][0], mv
[i
][1]);
2535 put_obmc(dest
, ptr
, s
->linesize
);
2538 static inline void qpel_motion(MpegEncContext
*s
,
2539 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2541 uint8_t **ref_picture
, int src_offset
,
2542 int field_based
, op_pixels_func (*pix_op
)[4],
2543 qpel_mc_func (*qpix_op
)[16],
2544 int motion_x
, int motion_y
, int h
)
2547 int dxy
, offset
, mx
, my
, src_x
, src_y
, height
, v_edge_pos
, linesize
, uvlinesize
;
2550 dxy
= ((motion_y
& 3) << 2) | (motion_x
& 3);
2551 src_x
= s
->mb_x
* 16 + (motion_x
>> 2);
2552 src_y
= s
->mb_y
* (16 >> field_based
) + (motion_y
>> 2);
2554 height
= s
->height
>> field_based
;
2555 v_edge_pos
= s
->v_edge_pos
>> field_based
;
2556 src_x
= clip(src_x
, -16, s
->width
);
2557 if (src_x
== s
->width
)
2559 src_y
= clip(src_y
, -16, height
);
2560 if (src_y
== height
)
2562 linesize
= s
->linesize
<< field_based
;
2563 uvlinesize
= s
->uvlinesize
<< field_based
;
2564 ptr
= ref_picture
[0] + (src_y
* linesize
) + src_x
+ src_offset
;
2565 dest_y
+= dest_offset
;
2566 //printf("%d %d %d\n", src_x, src_y, dxy);
2568 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
2569 if( (unsigned)src_x
> s
->h_edge_pos
- (motion_x
&3) - 16
2570 || (unsigned)src_y
> v_edge_pos
- (motion_y
&3) - h
){
2571 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
- src_offset
, s
->linesize
, 17, 17+field_based
,
2572 src_x
, src_y
<<field_based
, s
->h_edge_pos
, s
->v_edge_pos
);
2573 ptr
= s
->edge_emu_buffer
+ src_offset
;
2578 qpix_op
[0][dxy
](dest_y
, ptr
, linesize
);
2580 //damn interlaced mode
2581 //FIXME boundary mirroring is not exactly correct here
2582 qpix_op
[1][dxy
](dest_y
, ptr
, linesize
);
2583 qpix_op
[1][dxy
](dest_y
+8, ptr
+8, linesize
);
2586 if(s
->flags
&CODEC_FLAG_GRAY
) return;
2591 }else if(s
->workaround_bugs
&FF_BUG_QPEL_CHROMA2
){
2592 static const int rtab
[8]= {0,0,1,1,0,0,0,1};
2593 mx
= (motion_x
>>1) + rtab
[motion_x
&7];
2594 my
= (motion_y
>>1) + rtab
[motion_y
&7];
2595 }else if(s
->workaround_bugs
&FF_BUG_QPEL_CHROMA
){
2596 mx
= (motion_x
>>1)|(motion_x
&1);
2597 my
= (motion_y
>>1)|(motion_y
&1);
2605 dxy
= (mx
&1) | ((my
&1)<<1);
2609 src_x
= s
->mb_x
* 8 + mx
;
2610 src_y
= s
->mb_y
* (8 >> field_based
) + my
;
2611 src_x
= clip(src_x
, -8, s
->width
>> 1);
2612 if (src_x
== (s
->width
>> 1))
2614 src_y
= clip(src_y
, -8, height
>> 1);
2615 if (src_y
== (height
>> 1))
2618 offset
= (src_y
* uvlinesize
) + src_x
+ (src_offset
>> 1);
2619 ptr
= ref_picture
[1] + offset
;
2621 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
- (src_offset
>> 1), s
->uvlinesize
, 9, 9 + field_based
,
2622 src_x
, src_y
<<field_based
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2623 ptr
= s
->edge_emu_buffer
+ (src_offset
>> 1);
2625 pix_op
[1][dxy
](dest_cb
+ (dest_offset
>> 1), ptr
, uvlinesize
, h
>> 1);
2627 ptr
= ref_picture
[2] + offset
;
2629 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
- (src_offset
>> 1), s
->uvlinesize
, 9, 9 + field_based
,
2630 src_x
, src_y
<<field_based
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2631 ptr
= s
->edge_emu_buffer
+ (src_offset
>> 1);
2633 pix_op
[1][dxy
](dest_cr
+ (dest_offset
>> 1), ptr
, uvlinesize
, h
>> 1);
2636 inline int ff_h263_round_chroma(int x
){
2638 return (h263_chroma_roundtab
[x
& 0xf] + ((x
>> 3) & ~1));
2641 return -(h263_chroma_roundtab
[x
& 0xf] + ((x
>> 3) & ~1));
2646 * h263 chorma 4mv motion compensation.
2648 static inline void chroma_4mv_motion(MpegEncContext
*s
,
2649 uint8_t *dest_cb
, uint8_t *dest_cr
,
2650 uint8_t **ref_picture
,
2651 op_pixels_func
*pix_op
,
2653 int dxy
, emu
=0, src_x
, src_y
, offset
;
2656 /* In case of 8X8, we construct a single chroma motion vector
2657 with a special rounding */
2658 mx
= ff_h263_round_chroma(mx
);
2659 my
= ff_h263_round_chroma(my
);
2661 dxy
= ((my
& 1) << 1) | (mx
& 1);
2665 src_x
= s
->mb_x
* 8 + mx
;
2666 src_y
= s
->mb_y
* 8 + my
;
2667 src_x
= clip(src_x
, -8, s
->width
/2);
2668 if (src_x
== s
->width
/2)
2670 src_y
= clip(src_y
, -8, s
->height
/2);
2671 if (src_y
== s
->height
/2)
2674 offset
= (src_y
* (s
->uvlinesize
)) + src_x
;
2675 ptr
= ref_picture
[1] + offset
;
2676 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
2677 if( (unsigned)src_x
> (s
->h_edge_pos
>>1) - (dxy
&1) - 8
2678 || (unsigned)src_y
> (s
->v_edge_pos
>>1) - (dxy
>>1) - 8){
2679 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, s
->uvlinesize
, 9, 9, src_x
, src_y
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2680 ptr
= s
->edge_emu_buffer
;
2684 pix_op
[dxy
](dest_cb
, ptr
, s
->uvlinesize
, 8);
2686 ptr
= ref_picture
[2] + offset
;
2688 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, s
->uvlinesize
, 9, 9, src_x
, src_y
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2689 ptr
= s
->edge_emu_buffer
;
2691 pix_op
[dxy
](dest_cr
, ptr
, s
->uvlinesize
, 8);
2695 * motion compesation of a single macroblock
2697 * @param dest_y luma destination pointer
2698 * @param dest_cb chroma cb/u destination pointer
2699 * @param dest_cr chroma cr/v destination pointer
2700 * @param dir direction (0->forward, 1->backward)
2701 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2702 * @param pic_op halfpel motion compensation function (average or put normally)
2703 * @param pic_op qpel motion compensation function (average or put normally)
2704 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2706 static inline void MPV_motion(MpegEncContext
*s
,
2707 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2708 int dir
, uint8_t **ref_picture
,
2709 op_pixels_func (*pix_op
)[4], qpel_mc_func (*qpix_op
)[16])
2711 int dxy
, mx
, my
, src_x
, src_y
, motion_x
, motion_y
;
2713 uint8_t *ptr
, *dest
;
2718 if(s
->obmc
&& s
->pict_type
!= B_TYPE
){
2719 int16_t mv_cache
[4][4][2];
2720 const int xy
= s
->mb_x
+ s
->mb_y
*s
->mb_stride
;
2721 const int mot_stride
= s
->mb_width
*2 + 2;
2722 const int mot_xy
= 1 + mb_x
*2 + (mb_y
*2 + 1)*mot_stride
;
2724 assert(!s
->mb_skiped
);
2726 memcpy(mv_cache
[1][1], s
->current_picture
.motion_val
[0][mot_xy
], sizeof(int16_t)*4);
2727 memcpy(mv_cache
[2][1], s
->current_picture
.motion_val
[0][mot_xy
+mot_stride
], sizeof(int16_t)*4);
2728 memcpy(mv_cache
[3][1], s
->current_picture
.motion_val
[0][mot_xy
+mot_stride
], sizeof(int16_t)*4);
2730 if(mb_y
==0 || IS_INTRA(s
->current_picture
.mb_type
[xy
-s
->mb_stride
])){
2731 memcpy(mv_cache
[0][1], mv_cache
[1][1], sizeof(int16_t)*4);
2733 memcpy(mv_cache
[0][1], s
->current_picture
.motion_val
[0][mot_xy
-mot_stride
], sizeof(int16_t)*4);
2736 if(mb_x
==0 || IS_INTRA(s
->current_picture
.mb_type
[xy
-1])){
2737 *(int32_t*)mv_cache
[1][0]= *(int32_t*)mv_cache
[1][1];
2738 *(int32_t*)mv_cache
[2][0]= *(int32_t*)mv_cache
[2][1];
2740 *(int32_t*)mv_cache
[1][0]= *(int32_t*)s
->current_picture
.motion_val
[0][mot_xy
-1];
2741 *(int32_t*)mv_cache
[2][0]= *(int32_t*)s
->current_picture
.motion_val
[0][mot_xy
-1+mot_stride
];
2744 if(mb_x
+1>=s
->mb_width
|| IS_INTRA(s
->current_picture
.mb_type
[xy
+1])){
2745 *(int32_t*)mv_cache
[1][3]= *(int32_t*)mv_cache
[1][2];
2746 *(int32_t*)mv_cache
[2][3]= *(int32_t*)mv_cache
[2][2];
2748 *(int32_t*)mv_cache
[1][3]= *(int32_t*)s
->current_picture
.motion_val
[0][mot_xy
+2];
2749 *(int32_t*)mv_cache
[2][3]= *(int32_t*)s
->current_picture
.motion_val
[0][mot_xy
+2+mot_stride
];
2755 const int x
= (i
&1)+1;
2756 const int y
= (i
>>1)+1;
2758 {mv_cache
[y
][x
][0], mv_cache
[y
][x
][1]},
2759 {mv_cache
[y
-1][x
][0], mv_cache
[y
-1][x
][1]},
2760 {mv_cache
[y
][x
-1][0], mv_cache
[y
][x
-1][1]},
2761 {mv_cache
[y
][x
+1][0], mv_cache
[y
][x
+1][1]},
2762 {mv_cache
[y
+1][x
][0], mv_cache
[y
+1][x
][1]}};
2764 obmc_motion(s
, dest_y
+ ((i
& 1) * 8) + (i
>> 1) * 8 * s
->linesize
,
2766 mb_x
* 16 + (i
& 1) * 8, mb_y
* 16 + (i
>>1) * 8,