2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard.
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 * 4MV & hq & b-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
25 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "mpegvideo.h"
35 #include "fastmemcpy.h"
41 #ifdef CONFIG_ENCODERS
42 static void encode_picture(MpegEncContext
*s
, int picture_number
);
43 #endif //CONFIG_ENCODERS
44 static void dct_unquantize_mpeg1_intra_c(MpegEncContext
*s
,
45 DCTELEM
*block
, int n
, int qscale
);
46 static void dct_unquantize_mpeg1_inter_c(MpegEncContext
*s
,
47 DCTELEM
*block
, int n
, int qscale
);
48 static void dct_unquantize_mpeg2_intra_c(MpegEncContext
*s
,
49 DCTELEM
*block
, int n
, int qscale
);
50 static void dct_unquantize_mpeg2_inter_c(MpegEncContext
*s
,
51 DCTELEM
*block
, int n
, int qscale
);
52 static void dct_unquantize_h263_intra_c(MpegEncContext
*s
,
53 DCTELEM
*block
, int n
, int qscale
);
54 static void dct_unquantize_h263_inter_c(MpegEncContext
*s
,
55 DCTELEM
*block
, int n
, int qscale
);
56 static void draw_edges_c(uint8_t *buf
, int wrap
, int width
, int height
, int w
);
57 #ifdef CONFIG_ENCODERS
58 static int dct_quantize_c(MpegEncContext
*s
, DCTELEM
*block
, int n
, int qscale
, int *overflow
);
59 static int dct_quantize_trellis_c(MpegEncContext
*s
, DCTELEM
*block
, int n
, int qscale
, int *overflow
);
60 static int dct_quantize_refine(MpegEncContext
*s
, DCTELEM
*block
, int16_t *weight
, DCTELEM
*orig
, int n
, int qscale
);
61 static int sse_mb(MpegEncContext
*s
);
62 static void denoise_dct_c(MpegEncContext
*s
, DCTELEM
*block
);
63 #endif //CONFIG_ENCODERS
66 extern int XVMC_field_start(MpegEncContext
*s
, AVCodecContext
*avctx
);
67 extern void XVMC_field_end(MpegEncContext
*s
);
68 extern void XVMC_decode_mb(MpegEncContext
*s
);
71 void (*draw_edges
)(uint8_t *buf
, int wrap
, int width
, int height
, int w
)= draw_edges_c
;
74 /* enable all paranoid tests for rounding, overflows, etc... */
80 /* for jpeg fast DCT */
83 static const uint16_t aanscales
[64] = {
84 /* precomputed values scaled up by 14 bits */
85 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
86 22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270,
87 21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906,
88 19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315,
89 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
90 12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552,
91 8867 , 12299, 11585, 10426, 8867, 6967, 4799, 2446,
92 4520 , 6270, 5906, 5315, 4520, 3552, 2446, 1247
95 static const uint8_t h263_chroma_roundtab
[16] = {
96 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
97 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
100 static const uint8_t ff_default_chroma_qscale_table
[32]={
101 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
102 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
105 #ifdef CONFIG_ENCODERS
106 static uint8_t (*default_mv_penalty
)[MAX_MV
*2+1]=NULL
;
107 static uint8_t default_fcode_tab
[MAX_MV
*2+1];
109 enum PixelFormat ff_yuv420p_list
[2]= {PIX_FMT_YUV420P
, -1};
111 static void convert_matrix(DSPContext
*dsp
, int (*qmat
)[64], uint16_t (*qmat16
)[2][64],
112 const uint16_t *quant_matrix
, int bias
, int qmin
, int qmax
, int intra
)
117 for(qscale
=qmin
; qscale
<=qmax
; qscale
++){
119 if (dsp
->fdct
== ff_jpeg_fdct_islow
120 #ifdef FAAN_POSTSCALE
121 || dsp
->fdct
== ff_faandct
125 const int j
= dsp
->idct_permutation
[i
];
126 /* 16 <= qscale * quant_matrix[i] <= 7905 */
127 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
128 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
129 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
131 qmat
[qscale
][i
] = (int)((uint64_t_C(1) << QMAT_SHIFT
) /
132 (qscale
* quant_matrix
[j
]));
134 } else if (dsp
->fdct
== fdct_ifast
135 #ifndef FAAN_POSTSCALE
136 || dsp
->fdct
== ff_faandct
140 const int j
= dsp
->idct_permutation
[i
];
141 /* 16 <= qscale * quant_matrix[i] <= 7905 */
142 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
143 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
144 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
146 qmat
[qscale
][i
] = (int)((uint64_t_C(1) << (QMAT_SHIFT
+ 14)) /
147 (aanscales
[i
] * qscale
* quant_matrix
[j
]));
151 const int j
= dsp
->idct_permutation
[i
];
152 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
153 So 16 <= qscale * quant_matrix[i] <= 7905
154 so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905
155 so 32768 >= (1<<19) / (qscale * quant_matrix[i]) >= 67
157 qmat
[qscale
][i
] = (int)((uint64_t_C(1) << QMAT_SHIFT
) / (qscale
* quant_matrix
[j
]));
158 // qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);
159 qmat16
[qscale
][0][i
] = (1 << QMAT_SHIFT_MMX
) / (qscale
* quant_matrix
[j
]);
161 if(qmat16
[qscale
][0][i
]==0 || qmat16
[qscale
][0][i
]==128*256) qmat16
[qscale
][0][i
]=128*256-1;
162 qmat16
[qscale
][1][i
]= ROUNDED_DIV(bias
<<(16-QUANT_BIAS_SHIFT
), qmat16
[qscale
][0][i
]);
166 for(i
=intra
; i
<64; i
++){
168 if (dsp
->fdct
== fdct_ifast
169 #ifndef FAAN_POSTSCALE
170 || dsp
->fdct
== ff_faandct
173 max
= (8191LL*aanscales
[i
]) >> 14;
175 while(((max
* qmat
[qscale
][i
]) >> shift
) > INT_MAX
){
181 av_log(NULL
, AV_LOG_INFO
, "Warning, QMAT_SHIFT is larger then %d, overflows possible\n", QMAT_SHIFT
- shift
);
185 static inline void update_qscale(MpegEncContext
*s
){
186 s
->qscale
= (s
->lambda
*139 + FF_LAMBDA_SCALE
*64) >> (FF_LAMBDA_SHIFT
+ 7);
187 s
->qscale
= clip(s
->qscale
, s
->avctx
->qmin
, s
->avctx
->qmax
);
189 s
->lambda2
= (s
->lambda
*s
->lambda
+ FF_LAMBDA_SCALE
/2) >> FF_LAMBDA_SHIFT
;
191 #endif //CONFIG_ENCODERS
193 void ff_init_scantable(uint8_t *permutation
, ScanTable
*st
, const uint8_t *src_scantable
){
197 st
->scantable
= src_scantable
;
201 j
= src_scantable
[i
];
202 st
->permutated
[i
] = permutation
[j
];
211 j
= st
->permutated
[i
];
213 st
->raster_end
[i
]= end
;
217 #ifdef CONFIG_ENCODERS
218 void ff_write_quant_matrix(PutBitContext
*pb
, int16_t *matrix
){
224 put_bits(pb
, 8, matrix
[ ff_zigzag_direct
[i
] ]);
229 #endif //CONFIG_ENCODERS
231 /* init common dct for both encoder and decoder */
232 int DCT_common_init(MpegEncContext
*s
)
234 s
->dct_unquantize_h263_intra
= dct_unquantize_h263_intra_c
;
235 s
->dct_unquantize_h263_inter
= dct_unquantize_h263_inter_c
;
236 s
->dct_unquantize_mpeg1_intra
= dct_unquantize_mpeg1_intra_c
;
237 s
->dct_unquantize_mpeg1_inter
= dct_unquantize_mpeg1_inter_c
;
238 s
->dct_unquantize_mpeg2_intra
= dct_unquantize_mpeg2_intra_c
;
239 s
->dct_unquantize_mpeg2_inter
= dct_unquantize_mpeg2_inter_c
;
241 #ifdef CONFIG_ENCODERS
242 s
->dct_quantize
= dct_quantize_c
;
243 s
->denoise_dct
= denoise_dct_c
;
247 MPV_common_init_mmx(s
);
250 MPV_common_init_axp(s
);
253 MPV_common_init_mlib(s
);
256 MPV_common_init_mmi(s
);
259 MPV_common_init_armv4l(s
);
262 MPV_common_init_ppc(s
);
265 #ifdef CONFIG_ENCODERS
266 s
->fast_dct_quantize
= s
->dct_quantize
;
268 if(s
->flags
&CODEC_FLAG_TRELLIS_QUANT
){
269 s
->dct_quantize
= dct_quantize_trellis_c
; //move before MPV_common_init_*
272 #endif //CONFIG_ENCODERS
274 /* load & permutate scantables
275 note: only wmv uses differnt ones
277 if(s
->alternate_scan
){
278 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->inter_scantable
, ff_alternate_vertical_scan
);
279 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->intra_scantable
, ff_alternate_vertical_scan
);
281 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->inter_scantable
, ff_zigzag_direct
);
282 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->intra_scantable
, ff_zigzag_direct
);
284 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->intra_h_scantable
, ff_alternate_horizontal_scan
);
285 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->intra_v_scantable
, ff_alternate_vertical_scan
);
290 static void copy_picture(Picture
*dst
, Picture
*src
){
292 dst
->type
= FF_BUFFER_TYPE_COPY
;
295 static void copy_picture_attributes(MpegEncContext
*s
, AVFrame
*dst
, AVFrame
*src
){
298 dst
->pict_type
= src
->pict_type
;
299 dst
->quality
= src
->quality
;
300 dst
->coded_picture_number
= src
->coded_picture_number
;
301 dst
->display_picture_number
= src
->display_picture_number
;
302 // dst->reference = src->reference;
304 dst
->interlaced_frame
= src
->interlaced_frame
;
305 dst
->top_field_first
= src
->top_field_first
;
307 if(s
->avctx
->me_threshold
){
308 if(!src
->motion_val
[0])
309 av_log(s
->avctx
, AV_LOG_ERROR
, "AVFrame.motion_val not set!\n");
311 av_log(s
->avctx
, AV_LOG_ERROR
, "AVFrame.mb_type not set!\n");
312 if(!src
->ref_index
[0])
313 av_log(s
->avctx
, AV_LOG_ERROR
, "AVFrame.ref_index not set!\n");
314 if(src
->motion_subsample_log2
!= dst
->motion_subsample_log2
)
315 av_log(s
->avctx
, AV_LOG_ERROR
, "AVFrame.motion_subsample_log2 doesnt match! (%d!=%d)\n",
316 src
->motion_subsample_log2
, dst
->motion_subsample_log2
);
318 memcpy(dst
->mb_type
, src
->mb_type
, s
->mb_stride
* s
->mb_height
* sizeof(dst
->mb_type
[0]));
321 int stride
= ((16*s
->mb_width
)>>src
->motion_subsample_log2
) + 1;
322 int height
= ((16*s
->mb_height
)>>src
->motion_subsample_log2
);
324 if(src
->motion_val
[i
] && src
->motion_val
[i
] != dst
->motion_val
[i
]){
325 memcpy(dst
->motion_val
[i
], src
->motion_val
[i
], 2*stride
*height
*sizeof(int16_t));
327 if(src
->ref_index
[i
] && src
->ref_index
[i
] != dst
->ref_index
[i
]){
328 memcpy(dst
->ref_index
[i
], src
->ref_index
[i
], s
->b8_stride
*2*s
->mb_height
*sizeof(int8_t));
335 * allocates a Picture
336 * The pixels are allocated/set by calling get_buffer() if shared=0
338 static int alloc_picture(MpegEncContext
*s
, Picture
*pic
, int shared
){
339 const int big_mb_num
= s
->mb_stride
*(s
->mb_height
+1) + 1; //the +1 is needed so memset(,,stride*height) doesnt sig11
340 const int mb_array_size
= s
->mb_stride
*s
->mb_height
;
341 const int b8_array_size
= s
->b8_stride
*s
->mb_height
*2;
342 const int b4_array_size
= s
->b4_stride
*s
->mb_height
*4;
346 assert(pic
->data
[0]);
347 assert(pic
->type
== 0 || pic
->type
== FF_BUFFER_TYPE_SHARED
);
348 pic
->type
= FF_BUFFER_TYPE_SHARED
;
352 assert(!pic
->data
[0]);
354 r
= s
->avctx
->get_buffer(s
->avctx
, (AVFrame
*)pic
);
356 if(r
<0 || !pic
->age
|| !pic
->type
|| !pic
->data
[0]){
357 av_log(s
->avctx
, AV_LOG_ERROR
, "get_buffer() failed (%d %d %d %p)\n", r
, pic
->age
, pic
->type
, pic
->data
[0]);
361 if(s
->linesize
&& (s
->linesize
!= pic
->linesize
[0] || s
->uvlinesize
!= pic
->linesize
[1])){
362 av_log(s
->avctx
, AV_LOG_ERROR
, "get_buffer() failed (stride changed)\n");
366 if(pic
->linesize
[1] != pic
->linesize
[2]){
367 av_log(s
->avctx
, AV_LOG_ERROR
, "get_buffer() failed (uv stride missmatch)\n");
371 s
->linesize
= pic
->linesize
[0];
372 s
->uvlinesize
= pic
->linesize
[1];
375 if(pic
->qscale_table
==NULL
){
377 CHECKED_ALLOCZ(pic
->mb_var
, mb_array_size
* sizeof(int16_t))
378 CHECKED_ALLOCZ(pic
->mc_mb_var
, mb_array_size
* sizeof(int16_t))
379 CHECKED_ALLOCZ(pic
->mb_mean
, mb_array_size
* sizeof(int8_t))
382 CHECKED_ALLOCZ(pic
->mbskip_table
, mb_array_size
* sizeof(uint8_t)+2) //the +2 is for the slice end check
383 CHECKED_ALLOCZ(pic
->qscale_table
, mb_array_size
* sizeof(uint8_t))
384 CHECKED_ALLOCZ(pic
->mb_type_base
, big_mb_num
* sizeof(uint32_t))
385 pic
->mb_type
= pic
->mb_type_base
+ s
->mb_stride
+1;
386 if(s
->out_format
== FMT_H264
){
388 CHECKED_ALLOCZ(pic
->motion_val_base
[i
], 2 * (b4_array_size
+2) * sizeof(int16_t))
389 pic
->motion_val
[i
]= pic
->motion_val_base
[i
]+2;
390 CHECKED_ALLOCZ(pic
->ref_index
[i
], b8_array_size
* sizeof(uint8_t))
392 pic
->motion_subsample_log2
= 2;
393 }else if(s
->out_format
== FMT_H263
|| s
->encoding
|| (s
->avctx
->debug
&FF_DEBUG_MV
) || (s
->avctx
->debug_mv
)){
395 CHECKED_ALLOCZ(pic
->motion_val_base
[i
], 2 * (b8_array_size
+2) * sizeof(int16_t))
396 pic
->motion_val
[i
]= pic
->motion_val_base
[i
]+2;
397 CHECKED_ALLOCZ(pic
->ref_index
[i
], b8_array_size
* sizeof(uint8_t))
399 pic
->motion_subsample_log2
= 3;
401 if(s
->avctx
->debug
&FF_DEBUG_DCT_COEFF
) {
402 CHECKED_ALLOCZ(pic
->dct_coeff
, 64 * mb_array_size
* sizeof(DCTELEM
)*6)
404 pic
->qstride
= s
->mb_stride
;
405 CHECKED_ALLOCZ(pic
->pan_scan
, 1 * sizeof(AVPanScan
))
408 //it might be nicer if the application would keep track of these but it would require a API change
409 memmove(s
->prev_pict_types
+1, s
->prev_pict_types
, PREV_PICT_TYPES_BUFFER_SIZE
-1);
410 s
->prev_pict_types
[0]= s
->pict_type
;
411 if(pic
->age
< PREV_PICT_TYPES_BUFFER_SIZE
&& s
->prev_pict_types
[pic
->age
] == B_TYPE
)
412 pic
->age
= INT_MAX
; // skiped MBs in b frames are quite rare in mpeg1/2 and its a bit tricky to skip them anyway
415 fail
: //for the CHECKED_ALLOCZ macro
420 * deallocates a picture
422 static void free_picture(MpegEncContext
*s
, Picture
*pic
){
425 if(pic
->data
[0] && pic
->type
!=FF_BUFFER_TYPE_SHARED
){
426 s
->avctx
->release_buffer(s
->avctx
, (AVFrame
*)pic
);
429 av_freep(&pic
->mb_var
);
430 av_freep(&pic
->mc_mb_var
);
431 av_freep(&pic
->mb_mean
);
432 av_freep(&pic
->mbskip_table
);
433 av_freep(&pic
->qscale_table
);
434 av_freep(&pic
->mb_type_base
);
435 av_freep(&pic
->dct_coeff
);
436 av_freep(&pic
->pan_scan
);
439 av_freep(&pic
->motion_val_base
[i
]);
440 av_freep(&pic
->ref_index
[i
]);
443 if(pic
->type
== FF_BUFFER_TYPE_SHARED
){
452 static int init_duplicate_context(MpegEncContext
*s
, MpegEncContext
*base
){
455 // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
456 CHECKED_ALLOCZ(s
->allocated_edge_emu_buffer
, (s
->width
+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance
457 s
->edge_emu_buffer
= s
->allocated_edge_emu_buffer
+ (s
->width
+64)*2*17;
459 //FIXME should be linesize instead of s->width*2 but that isnt known before get_buffer()
460 CHECKED_ALLOCZ(s
->me
.scratchpad
, (s
->width
+64)*4*16*2*sizeof(uint8_t))
461 s
->rd_scratchpad
= s
->me
.scratchpad
;
462 s
->b_scratchpad
= s
->me
.scratchpad
;
463 s
->obmc_scratchpad
= s
->me
.scratchpad
+ 16;
465 CHECKED_ALLOCZ(s
->me
.map
, ME_MAP_SIZE
*sizeof(uint32_t))
466 CHECKED_ALLOCZ(s
->me
.score_map
, ME_MAP_SIZE
*sizeof(uint32_t))
467 if(s
->avctx
->noise_reduction
){
468 CHECKED_ALLOCZ(s
->dct_error_sum
, 2 * 64 * sizeof(int))
471 CHECKED_ALLOCZ(s
->blocks
, 64*12*2 * sizeof(DCTELEM
))
472 s
->block
= s
->blocks
[0];
475 s
->pblocks
[i
] = (short *)(&s
->block
[i
]);
479 return -1; //free() through MPV_common_end()
482 static void free_duplicate_context(MpegEncContext
*s
){
485 av_freep(&s
->allocated_edge_emu_buffer
); s
->edge_emu_buffer
= NULL
;
486 av_freep(&s
->me
.scratchpad
);
489 s
->obmc_scratchpad
= NULL
;
491 av_freep(&s
->dct_error_sum
);
492 av_freep(&s
->me
.map
);
493 av_freep(&s
->me
.score_map
);
494 av_freep(&s
->blocks
);
498 static void backup_duplicate_context(MpegEncContext
*bak
, MpegEncContext
*src
){
499 #define COPY(a) bak->a= src->a
500 COPY(allocated_edge_emu_buffer
);
501 COPY(edge_emu_buffer
);
505 COPY(obmc_scratchpad
);
512 COPY(me
.map_generation
);
520 void ff_update_duplicate_context(MpegEncContext
*dst
, MpegEncContext
*src
){
523 //FIXME copy only needed parts
525 backup_duplicate_context(&bak
, dst
);
526 memcpy(dst
, src
, sizeof(MpegEncContext
));
527 backup_duplicate_context(dst
, &bak
);
529 dst
->pblocks
[i
] = (short *)(&dst
->block
[i
]);
531 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
534 static void update_duplicate_context_after_me(MpegEncContext
*dst
, MpegEncContext
*src
){
535 #define COPY(a) dst->a= src->a
537 COPY(current_picture
);
543 COPY(picture_in_gop_number
);
544 COPY(gop_picture_number
);
545 COPY(frame_pred_frame_dct
); //FIXME dont set in encode_header
546 COPY(progressive_frame
); //FIXME dont set in encode_header
547 COPY(partitioned_frame
); //FIXME dont set in encode_header
552 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
553 * the changed fields will not depend upon the prior state of the MpegEncContext.
555 static void MPV_common_defaults(MpegEncContext
*s
){
557 s
->c_dc_scale_table
= ff_mpeg1_dc_scale_table
;
558 s
->chroma_qscale_table
= ff_default_chroma_qscale_table
;
559 s
->progressive_frame
= 1;
560 s
->progressive_sequence
= 1;
561 s
->picture_structure
= PICT_FRAME
;
563 s
->coded_picture_number
= 0;
564 s
->picture_number
= 0;
565 s
->input_picture_number
= 0;
567 s
->picture_in_gop_number
= 0;
574 * sets the given MpegEncContext to defaults for decoding.
575 * the changed fields will not depend upon the prior state of the MpegEncContext.
577 void MPV_decode_defaults(MpegEncContext
*s
){
578 MPV_common_defaults(s
);
582 * sets the given MpegEncContext to defaults for encoding.
583 * the changed fields will not depend upon the prior state of the MpegEncContext.
586 #ifdef CONFIG_ENCODERS
587 static void MPV_encode_defaults(MpegEncContext
*s
){
590 MPV_common_defaults(s
);
596 default_mv_penalty
= av_mallocz( sizeof(uint8_t)*(MAX_FCODE
+1)*(2*MAX_MV
+1) );
597 memset(default_mv_penalty
, 0, sizeof(uint8_t)*(MAX_FCODE
+1)*(2*MAX_MV
+1));
598 memset(default_fcode_tab
, 0, sizeof(uint8_t)*(2*MAX_MV
+1));
600 for(i
=-16; i
<16; i
++){
601 default_fcode_tab
[i
+ MAX_MV
]= 1;
604 s
->me
.mv_penalty
= default_mv_penalty
;
605 s
->fcode_tab
= default_fcode_tab
;
607 #endif //CONFIG_ENCODERS
610 * init common structure for both encoder and decoder.
611 * this assumes that some variables like width/height are already set
613 int MPV_common_init(MpegEncContext
*s
)
615 int y_size
, c_size
, yc_size
, i
, mb_array_size
, mv_table_size
, x
, y
;
617 if(s
->avctx
->thread_count
> MAX_THREADS
|| (16*s
->avctx
->thread_count
> s
->height
&& s
->height
)){
618 av_log(s
->avctx
, AV_LOG_ERROR
, "too many threads\n");
622 dsputil_init(&s
->dsp
, s
->avctx
);
625 s
->flags
= s
->avctx
->flags
;
626 s
->flags2
= s
->avctx
->flags2
;
628 s
->mb_width
= (s
->width
+ 15) / 16;
629 s
->mb_height
= (s
->height
+ 15) / 16;
630 s
->mb_stride
= s
->mb_width
+ 1;
631 s
->b8_stride
= s
->mb_width
*2 + 1;
632 s
->b4_stride
= s
->mb_width
*4 + 1;
633 mb_array_size
= s
->mb_height
* s
->mb_stride
;
634 mv_table_size
= (s
->mb_height
+2) * s
->mb_stride
+ 1;
636 /* set chroma shifts */
637 avcodec_get_chroma_sub_sample(s
->avctx
->pix_fmt
,&(s
->chroma_x_shift
),
638 &(s
->chroma_y_shift
) );
640 /* set default edge pos, will be overriden in decode_header if needed */
641 s
->h_edge_pos
= s
->mb_width
*16;
642 s
->v_edge_pos
= s
->mb_height
*16;
644 s
->mb_num
= s
->mb_width
* s
->mb_height
;
649 s
->block_wrap
[3]= s
->b8_stride
;
651 s
->block_wrap
[5]= s
->mb_stride
;
653 y_size
= s
->b8_stride
* (2 * s
->mb_height
+ 1);
654 c_size
= s
->mb_stride
* (s
->mb_height
+ 1);
655 yc_size
= y_size
+ 2 * c_size
;
657 /* convert fourcc to upper case */
658 s
->avctx
->codec_tag
= toupper( s
->avctx
->codec_tag
&0xFF)
659 + (toupper((s
->avctx
->codec_tag
>>8 )&0xFF)<<8 )
660 + (toupper((s
->avctx
->codec_tag
>>16)&0xFF)<<16)
661 + (toupper((s
->avctx
->codec_tag
>>24)&0xFF)<<24);
663 s
->avctx
->stream_codec_tag
= toupper( s
->avctx
->stream_codec_tag
&0xFF)
664 + (toupper((s
->avctx
->stream_codec_tag
>>8 )&0xFF)<<8 )
665 + (toupper((s
->avctx
->stream_codec_tag
>>16)&0xFF)<<16)
666 + (toupper((s
->avctx
->stream_codec_tag
>>24)&0xFF)<<24);
668 s
->avctx
->coded_frame
= (AVFrame
*)&s
->current_picture
;
670 CHECKED_ALLOCZ(s
->mb_index2xy
, (s
->mb_num
+1)*sizeof(int)) //error ressilience code looks cleaner with this
671 for(y
=0; y
<s
->mb_height
; y
++){
672 for(x
=0; x
<s
->mb_width
; x
++){
673 s
->mb_index2xy
[ x
+ y
*s
->mb_width
] = x
+ y
*s
->mb_stride
;
676 s
->mb_index2xy
[ s
->mb_height
*s
->mb_width
] = (s
->mb_height
-1)*s
->mb_stride
+ s
->mb_width
; //FIXME really needed?
679 /* Allocate MV tables */
680 CHECKED_ALLOCZ(s
->p_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
681 CHECKED_ALLOCZ(s
->b_forw_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
682 CHECKED_ALLOCZ(s
->b_back_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
683 CHECKED_ALLOCZ(s
->b_bidir_forw_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
684 CHECKED_ALLOCZ(s
->b_bidir_back_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
685 CHECKED_ALLOCZ(s
->b_direct_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
686 s
->p_mv_table
= s
->p_mv_table_base
+ s
->mb_stride
+ 1;
687 s
->b_forw_mv_table
= s
->b_forw_mv_table_base
+ s
->mb_stride
+ 1;
688 s
->b_back_mv_table
= s
->b_back_mv_table_base
+ s
->mb_stride
+ 1;
689 s
->b_bidir_forw_mv_table
= s
->b_bidir_forw_mv_table_base
+ s
->mb_stride
+ 1;
690 s
->b_bidir_back_mv_table
= s
->b_bidir_back_mv_table_base
+ s
->mb_stride
+ 1;
691 s
->b_direct_mv_table
= s
->b_direct_mv_table_base
+ s
->mb_stride
+ 1;
693 if(s
->msmpeg4_version
){
694 CHECKED_ALLOCZ(s
->ac_stats
, 2*2*(MAX_LEVEL
+1)*(MAX_RUN
+1)*2*sizeof(int));
696 CHECKED_ALLOCZ(s
->avctx
->stats_out
, 256);
698 /* Allocate MB type table */
699 CHECKED_ALLOCZ(s
->mb_type
, mb_array_size
* sizeof(uint16_t)) //needed for encoding
701 CHECKED_ALLOCZ(s
->lambda_table
, mb_array_size
* sizeof(int))
703 CHECKED_ALLOCZ(s
->q_intra_matrix
, 64*32 * sizeof(int))
704 CHECKED_ALLOCZ(s
->q_inter_matrix
, 64*32 * sizeof(int))
705 CHECKED_ALLOCZ(s
->q_intra_matrix16
, 64*32*2 * sizeof(uint16_t))
706 CHECKED_ALLOCZ(s
->q_inter_matrix16
, 64*32*2 * sizeof(uint16_t))
707 CHECKED_ALLOCZ(s
->input_picture
, MAX_PICTURE_COUNT
* sizeof(Picture
*))
708 CHECKED_ALLOCZ(s
->reordered_input_picture
, MAX_PICTURE_COUNT
* sizeof(Picture
*))
710 if(s
->avctx
->noise_reduction
){
711 CHECKED_ALLOCZ(s
->dct_offset
, 2 * 64 * sizeof(uint16_t))
714 CHECKED_ALLOCZ(s
->picture
, MAX_PICTURE_COUNT
* sizeof(Picture
))
716 CHECKED_ALLOCZ(s
->error_status_table
, mb_array_size
*sizeof(uint8_t))
718 if(s
->codec_id
==CODEC_ID_MPEG4
|| (s
->flags
& CODEC_FLAG_INTERLACED_ME
)){
719 /* interlaced direct mode decoding tables */
724 CHECKED_ALLOCZ(s
->b_field_mv_table_base
[i
][j
][k
] , mv_table_size
* 2 * sizeof(int16_t))
725 s
->b_field_mv_table
[i
][j
][k
] = s
->b_field_mv_table_base
[i
][j
][k
] + s
->mb_stride
+ 1;
727 CHECKED_ALLOCZ(s
->b_field_select_table
[i
][j
] , mb_array_size
* 2 * sizeof(uint8_t))
728 CHECKED_ALLOCZ(s
->p_field_mv_table_base
[i
][j
] , mv_table_size
* 2 * sizeof(int16_t))
729 s
->p_field_mv_table
[i
][j
] = s
->p_field_mv_table_base
[i
][j
] + s
->mb_stride
+ 1;
731 CHECKED_ALLOCZ(s
->p_field_select_table
[i
] , mb_array_size
* 2 * sizeof(uint8_t))
734 if (s
->out_format
== FMT_H263
) {
736 CHECKED_ALLOCZ(s
->ac_val_base
, yc_size
* sizeof(int16_t) * 16);
737 s
->ac_val
[0] = s
->ac_val_base
+ s
->b8_stride
+ 1;
738 s
->ac_val
[1] = s
->ac_val_base
+ y_size
+ s
->mb_stride
+ 1;
739 s
->ac_val
[2] = s
->ac_val
[1] + c_size
;
742 CHECKED_ALLOCZ(s
->coded_block_base
, y_size
);
743 s
->coded_block
= s
->coded_block_base
+ s
->b8_stride
+ 1;
745 /* divx501 bitstream reorder buffer */
746 CHECKED_ALLOCZ(s
->bitstream_buffer
, BITSTREAM_BUFFER_SIZE
);
748 /* cbp, ac_pred, pred_dir */
749 CHECKED_ALLOCZ(s
->cbp_table
, mb_array_size
* sizeof(uint8_t))
750 CHECKED_ALLOCZ(s
->pred_dir_table
, mb_array_size
* sizeof(uint8_t))
753 if (s
->h263_pred
|| s
->h263_plus
|| !s
->encoding
) {
755 //MN: we need these for error resilience of intra-frames
756 CHECKED_ALLOCZ(s
->dc_val_base
, yc_size
* sizeof(int16_t));
757 s
->dc_val
[0] = s
->dc_val_base
+ s
->b8_stride
+ 1;
758 s
->dc_val
[1] = s
->dc_val_base
+ y_size
+ s
->mb_stride
+ 1;
759 s
->dc_val
[2] = s
->dc_val
[1] + c_size
;
760 for(i
=0;i
<yc_size
;i
++)
761 s
->dc_val_base
[i
] = 1024;
764 /* which mb is a intra block */
765 CHECKED_ALLOCZ(s
->mbintra_table
, mb_array_size
);
766 memset(s
->mbintra_table
, 1, mb_array_size
);
768 /* init macroblock skip table */
769 CHECKED_ALLOCZ(s
->mbskip_table
, mb_array_size
+2);
770 //Note the +1 is for a quicker mpeg4 slice_end detection
771 CHECKED_ALLOCZ(s
->prev_pict_types
, PREV_PICT_TYPES_BUFFER_SIZE
);
773 s
->parse_context
.state
= -1;
774 if((s
->avctx
->debug
&(FF_DEBUG_VIS_QP
|FF_DEBUG_VIS_MB_TYPE
)) || (s
->avctx
->debug_mv
)){
775 s
->visualization_buffer
[0] = av_malloc((s
->mb_width
*16 + 2*EDGE_WIDTH
) * s
->mb_height
*16 + 2*EDGE_WIDTH
);
776 s
->visualization_buffer
[1] = av_malloc((s
->mb_width
*8 + EDGE_WIDTH
) * s
->mb_height
*8 + EDGE_WIDTH
);
777 s
->visualization_buffer
[2] = av_malloc((s
->mb_width
*8 + EDGE_WIDTH
) * s
->mb_height
*8 + EDGE_WIDTH
);
780 s
->context_initialized
= 1;
782 s
->thread_context
[0]= s
;
783 for(i
=1; i
<s
->avctx
->thread_count
; i
++){
784 s
->thread_context
[i
]= av_malloc(sizeof(MpegEncContext
));
785 memcpy(s
->thread_context
[i
], s
, sizeof(MpegEncContext
));
788 for(i
=0; i
<s
->avctx
->thread_count
; i
++){
789 if(init_duplicate_context(s
->thread_context
[i
], s
) < 0)
791 s
->thread_context
[i
]->start_mb_y
= (s
->mb_height
*(i
) + s
->avctx
->thread_count
/2) / s
->avctx
->thread_count
;
792 s
->thread_context
[i
]->end_mb_y
= (s
->mb_height
*(i
+1) + s
->avctx
->thread_count
/2) / s
->avctx
->thread_count
;
801 /* init common structure for both encoder and decoder */
802 void MPV_common_end(MpegEncContext
*s
)
806 for(i
=0; i
<s
->avctx
->thread_count
; i
++){
807 free_duplicate_context(s
->thread_context
[i
]);
809 for(i
=1; i
<s
->avctx
->thread_count
; i
++){
810 av_freep(&s
->thread_context
[i
]);
813 av_freep(&s
->parse_context
.buffer
);
814 s
->parse_context
.buffer_size
=0;
816 av_freep(&s
->mb_type
);
817 av_freep(&s
->p_mv_table_base
);
818 av_freep(&s
->b_forw_mv_table_base
);
819 av_freep(&s
->b_back_mv_table_base
);
820 av_freep(&s
->b_bidir_forw_mv_table_base
);
821 av_freep(&s
->b_bidir_back_mv_table_base
);
822 av_freep(&s
->b_direct_mv_table_base
);
824 s
->b_forw_mv_table
= NULL
;
825 s
->b_back_mv_table
= NULL
;
826 s
->b_bidir_forw_mv_table
= NULL
;
827 s
->b_bidir_back_mv_table
= NULL
;
828 s
->b_direct_mv_table
= NULL
;
832 av_freep(&s
->b_field_mv_table_base
[i
][j
][k
]);
833 s
->b_field_mv_table
[i
][j
][k
]=NULL
;
835 av_freep(&s
->b_field_select_table
[i
][j
]);
836 av_freep(&s
->p_field_mv_table_base
[i
][j
]);
837 s
->p_field_mv_table
[i
][j
]=NULL
;
839 av_freep(&s
->p_field_select_table
[i
]);
842 av_freep(&s
->dc_val_base
);
843 av_freep(&s
->ac_val_base
);
844 av_freep(&s
->coded_block_base
);
845 av_freep(&s
->mbintra_table
);
846 av_freep(&s
->cbp_table
);
847 av_freep(&s
->pred_dir_table
);
849 av_freep(&s
->mbskip_table
);
850 av_freep(&s
->prev_pict_types
);
851 av_freep(&s
->bitstream_buffer
);
852 av_freep(&s
->avctx
->stats_out
);
853 av_freep(&s
->ac_stats
);
854 av_freep(&s
->error_status_table
);
855 av_freep(&s
->mb_index2xy
);
856 av_freep(&s
->lambda_table
);
857 av_freep(&s
->q_intra_matrix
);
858 av_freep(&s
->q_inter_matrix
);
859 av_freep(&s
->q_intra_matrix16
);
860 av_freep(&s
->q_inter_matrix16
);
861 av_freep(&s
->input_picture
);
862 av_freep(&s
->reordered_input_picture
);
863 av_freep(&s
->dct_offset
);
866 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
867 free_picture(s
, &s
->picture
[i
]);
870 av_freep(&s
->picture
);
871 s
->context_initialized
= 0;
874 s
->current_picture_ptr
= NULL
;
875 s
->linesize
= s
->uvlinesize
= 0;
878 av_freep(&s
->visualization_buffer
[i
]);
880 avcodec_default_free_buffers(s
->avctx
);
883 #ifdef CONFIG_ENCODERS
885 /* init video encoder */
886 int MPV_encode_init(AVCodecContext
*avctx
)
888 MpegEncContext
*s
= avctx
->priv_data
;
890 int chroma_h_shift
, chroma_v_shift
;
892 MPV_encode_defaults(s
);
894 avctx
->pix_fmt
= PIX_FMT_YUV420P
; // FIXME
896 s
->bit_rate
= avctx
->bit_rate
;
897 s
->width
= avctx
->width
;
898 s
->height
= avctx
->height
;
899 if(avctx
->gop_size
> 600){
900 av_log(avctx
, AV_LOG_ERROR
, "Warning keyframe interval too large! reducing it ...\n");
903 s
->gop_size
= avctx
->gop_size
;
905 s
->flags
= avctx
->flags
;
906 s
->flags2
= avctx
->flags2
;
907 s
->max_b_frames
= avctx
->max_b_frames
;
908 s
->codec_id
= avctx
->codec
->id
;
909 s
->luma_elim_threshold
= avctx
->luma_elim_threshold
;
910 s
->chroma_elim_threshold
= avctx
->chroma_elim_threshold
;
911 s
->strict_std_compliance
= avctx
->strict_std_compliance
;
912 s
->data_partitioning
= avctx
->flags
& CODEC_FLAG_PART
;
913 s
->quarter_sample
= (avctx
->flags
& CODEC_FLAG_QPEL
)!=0;
914 s
->mpeg_quant
= avctx
->mpeg_quant
;
915 s
->rtp_mode
= !!avctx
->rtp_payload_size
;
916 s
->intra_dc_precision
= avctx
->intra_dc_precision
;
917 s
->user_specified_pts
= AV_NOPTS_VALUE
;
919 if (s
->gop_size
<= 1) {
926 s
->me_method
= avctx
->me_method
;
929 s
->fixed_qscale
= !!(avctx
->flags
& CODEC_FLAG_QSCALE
);
931 s
->adaptive_quant
= ( s
->avctx
->lumi_masking
932 || s
->avctx
->dark_masking
933 || s
->avctx
->temporal_cplx_masking
934 || s
->avctx
->spatial_cplx_masking
935 || s
->avctx
->p_masking
936 || (s
->flags
&CODEC_FLAG_QP_RD
))
939 s
->obmc
= !!(s
->flags
& CODEC_FLAG_OBMC
);
940 s
->loop_filter
= !!(s
->flags
& CODEC_FLAG_LOOP_FILTER
);
941 s
->alternate_scan
= !!(s
->flags
& CODEC_FLAG_ALT_SCAN
);
943 if(avctx
->rc_max_rate
&& !avctx
->rc_buffer_size
){
944 av_log(avctx
, AV_LOG_ERROR
, "a vbv buffer size is needed, for encoding with a maximum bitrate\n");
948 if(avctx
->rc_min_rate
&& avctx
->rc_max_rate
!= avctx
->rc_min_rate
){
949 av_log(avctx
, AV_LOG_INFO
, "Warning min_rate > 0 but min_rate != max_rate isnt recommanded!\n");
952 if(avctx
->rc_min_rate
&& avctx
->rc_min_rate
> avctx
->bit_rate
){
953 av_log(avctx
, AV_LOG_INFO
, "bitrate below min bitrate\n");
957 if(avctx
->rc_max_rate
&& avctx
->rc_max_rate
< avctx
->bit_rate
){
958 av_log(avctx
, AV_LOG_INFO
, "bitrate above max bitrate\n");
962 if( s
->avctx
->rc_max_rate
&& s
->avctx
->rc_min_rate
== s
->avctx
->rc_max_rate
963 && (s
->codec_id
== CODEC_ID_MPEG1VIDEO
|| s
->codec_id
== CODEC_ID_MPEG2VIDEO
)
964 && 90000LL * (avctx
->rc_buffer_size
-1) > s
->avctx
->rc_max_rate
*0xFFFFLL
){
966 av_log(avctx
, AV_LOG_INFO
, "Warning vbv_delay will be set to 0xFFFF (=VBR) as the specified vbv buffer is too large for the given bitrate!\n");
969 if((s
->flags
& CODEC_FLAG_4MV
) && s
->codec_id
!= CODEC_ID_MPEG4
970 && s
->codec_id
!= CODEC_ID_H263
&& s
->codec_id
!= CODEC_ID_H263P
&& s
->codec_id
!= CODEC_ID_FLV1
){
971 av_log(avctx
, AV_LOG_ERROR
, "4MV not supported by codec\n");
975 if(s
->obmc
&& s
->avctx
->mb_decision
!= FF_MB_DECISION_SIMPLE
){
976 av_log(avctx
, AV_LOG_ERROR
, "OBMC is only supported with simple mb decission\n");
980 if(s
->obmc
&& s
->codec_id
!= CODEC_ID_H263
&& s
->codec_id
!= CODEC_ID_H263P
){
981 av_log(avctx
, AV_LOG_ERROR
, "OBMC is only supported with H263(+)\n");
985 if(s
->quarter_sample
&& s
->codec_id
!= CODEC_ID_MPEG4
){
986 av_log(avctx
, AV_LOG_ERROR
, "qpel not supported by codec\n");
990 if(s
->data_partitioning
&& s
->codec_id
!= CODEC_ID_MPEG4
){
991 av_log(avctx
, AV_LOG_ERROR
, "data partitioning not supported by codec\n");
995 if(s
->max_b_frames
&& s
->codec_id
!= CODEC_ID_MPEG4
&& s
->codec_id
!= CODEC_ID_MPEG1VIDEO
&& s
->codec_id
!= CODEC_ID_MPEG2VIDEO
){
996 av_log(avctx
, AV_LOG_ERROR
, "b frames not supported by codec\n");
1000 if((s
->flags
& (CODEC_FLAG_INTERLACED_DCT
|CODEC_FLAG_INTERLACED_ME
|CODEC_FLAG_ALT_SCAN
))
1001 && s
->codec_id
!= CODEC_ID_MPEG4
&& s
->codec_id
!= CODEC_ID_MPEG2VIDEO
){
1002 av_log(avctx
, AV_LOG_ERROR
, "interlacing not supported by codec\n");
1006 if(s
->mpeg_quant
&& s
->codec_id
!= CODEC_ID_MPEG4
){ //FIXME mpeg2 uses that too
1007 av_log(avctx
, AV_LOG_ERROR
, "mpeg2 style quantization not supporetd by codec\n");
1011 if((s
->flags
& CODEC_FLAG_CBP_RD
) && !(s
->flags
& CODEC_FLAG_TRELLIS_QUANT
)){
1012 av_log(avctx
, AV_LOG_ERROR
, "CBP RD needs trellis quant\n");
1016 if((s
->flags
& CODEC_FLAG_QP_RD
) && s
->avctx
->mb_decision
!= FF_MB_DECISION_RD
){
1017 av_log(avctx
, AV_LOG_ERROR
, "QP RD needs mbd=2\n");
1021 if(s
->avctx
->scenechange_threshold
< 1000000000 && (s
->flags
& CODEC_FLAG_CLOSED_GOP
)){
1022 av_log(avctx
, AV_LOG_ERROR
, "closed gop with scene change detection arent supported yet\n");
1026 if(s
->avctx
->thread_count
> 1 && s
->codec_id
!= CODEC_ID_MPEG4
1027 && s
->codec_id
!= CODEC_ID_MPEG1VIDEO
&& s
->codec_id
!= CODEC_ID_MPEG2VIDEO
1028 && (s
->codec_id
!= CODEC_ID_H263P
|| !(s
->flags
& CODEC_FLAG_H263P_SLICE_STRUCT
))){
1029 av_log(avctx
, AV_LOG_ERROR
, "multi threaded encoding not supported by codec\n");
1033 if(s
->avctx
->thread_count
> 1)
1036 i
= ff_gcd(avctx
->frame_rate
, avctx
->frame_rate_base
);
1038 av_log(avctx
, AV_LOG_INFO
, "removing common factors from framerate\n");
1039 avctx
->frame_rate
/= i
;
1040 avctx
->frame_rate_base
/= i
;
1044 if(s
->codec_id
==CODEC_ID_MJPEG
){
1045 s
->intra_quant_bias
= 1<<(QUANT_BIAS_SHIFT
-1); //(a + x/2)/x
1046 s
->inter_quant_bias
= 0;
1047 }else if(s
->mpeg_quant
|| s
->codec_id
==CODEC_ID_MPEG1VIDEO
|| s
->codec_id
==CODEC_ID_MPEG2VIDEO
){
1048 s
->intra_quant_bias
= 3<<(QUANT_BIAS_SHIFT
-3); //(a + x*3/8)/x
1049 s
->inter_quant_bias
= 0;
1051 s
->intra_quant_bias
=0;
1052 s
->inter_quant_bias
=-(1<<(QUANT_BIAS_SHIFT
-2)); //(a - x/4)/x
1055 if(avctx
->intra_quant_bias
!= FF_DEFAULT_QUANT_BIAS
)
1056 s
->intra_quant_bias
= avctx
->intra_quant_bias
;
1057 if(avctx
->inter_quant_bias
!= FF_DEFAULT_QUANT_BIAS
)
1058 s
->inter_quant_bias
= avctx
->inter_quant_bias
;
1060 avcodec_get_chroma_sub_sample(avctx
->pix_fmt
, &chroma_h_shift
, &chroma_v_shift
);
1062 av_reduce(&s
->time_increment_resolution
, &dummy
, s
->avctx
->frame_rate
, s
->avctx
->frame_rate_base
, (1<<16)-1);
1063 s
->time_increment_bits
= av_log2(s
->time_increment_resolution
- 1) + 1;
1065 switch(avctx
->codec
->id
) {
1066 case CODEC_ID_MPEG1VIDEO
:
1067 s
->out_format
= FMT_MPEG1
;
1068 s
->low_delay
= 0; //s->max_b_frames ? 0 : 1;
1069 avctx
->delay
= s
->low_delay ?
0 : (s
->max_b_frames
+ 1);
1071 case CODEC_ID_MPEG2VIDEO
:
1072 s
->out_format
= FMT_MPEG1
;
1073 s
->low_delay
= 0; //s->max_b_frames ? 0 : 1;
1074 avctx
->delay
= s
->low_delay ?
0 : (s
->max_b_frames
+ 1);
1077 case CODEC_ID_LJPEG
:
1078 case CODEC_ID_MJPEG
:
1079 s
->out_format
= FMT_MJPEG
;
1080 s
->intra_only
= 1; /* force intra only for jpeg */
1081 s
->mjpeg_write_tables
= 1; /* write all tables */
1082 s
->mjpeg_data_only_frames
= 0; /* write all the needed headers */
1083 s
->mjpeg_vsample
[0] = 1<<chroma_v_shift
;
1084 s
->mjpeg_vsample
[1] = 1;
1085 s
->mjpeg_vsample
[2] = 1;
1086 s
->mjpeg_hsample
[0] = 1<<chroma_h_shift
;
1087 s
->mjpeg_hsample
[1] = 1;
1088 s
->mjpeg_hsample
[2] = 1;
1089 if (mjpeg_init(s
) < 0)
1096 s
->out_format
= FMT_H261
;
1101 if (h263_get_picture_format(s
->width
, s
->height
) == 7) {
1102 av_log(avctx
, AV_LOG_INFO
, "Input picture size isn't suitable for h263 codec! try h263+\n");
1105 s
->out_format
= FMT_H263
;
1106 s
->obmc
= (avctx
->flags
& CODEC_FLAG_OBMC
) ?
1:0;
1110 case CODEC_ID_H263P
:
1111 s
->out_format
= FMT_H263
;
1114 s
->umvplus
= (avctx
->flags
& CODEC_FLAG_H263P_UMV
) ?
1:0;
1115 s
->h263_aic
= (avctx
->flags
& CODEC_FLAG_H263P_AIC
) ?
1:0;
1116 s
->modified_quant
= s
->h263_aic
;
1117 s
->alt_inter_vlc
= (avctx
->flags
& CODEC_FLAG_H263P_AIV
) ?
1:0;
1118 s
->obmc
= (avctx
->flags
& CODEC_FLAG_OBMC
) ?
1:0;
1119 s
->loop_filter
= (avctx
->flags
& CODEC_FLAG_LOOP_FILTER
) ?
1:0;
1120 s
->unrestricted_mv
= s
->obmc
|| s
->loop_filter
|| s
->umvplus
;
1121 s
->h263_slice_structured
= (s
->flags
& CODEC_FLAG_H263P_SLICE_STRUCT
) ?
1:0;
1124 /* These are just to be sure */
1129 s
->out_format
= FMT_H263
;
1130 s
->h263_flv
= 2; /* format = 1; 11-bit codes */
1131 s
->unrestricted_mv
= 1;
1132 s
->rtp_mode
=0; /* don't allow GOB */
1137 s
->out_format
= FMT_H263
;
1142 s
->out_format
= FMT_H263
;
1145 s
->modified_quant
=1;
1149 s
->unrestricted_mv
= s
->obmc
|| s
->loop_filter
|| s
->umvplus
;
1151 case CODEC_ID_MPEG4
:
1152 s
->out_format
= FMT_H263
;
1154 s
->unrestricted_mv
= 1;
1155 s
->low_delay
= s
->max_b_frames ?
0 : 1;
1156 avctx
->delay
= s
->low_delay ?
0 : (s
->max_b_frames
+ 1);
1158 case CODEC_ID_MSMPEG4V1
:
1159 s
->out_format
= FMT_H263
;
1160 s
->h263_msmpeg4
= 1;
1162 s
->unrestricted_mv
= 1;
1163 s
->msmpeg4_version
= 1;
1167 case CODEC_ID_MSMPEG4V2
:
1168 s
->out_format
= FMT_H263
;
1169 s
->h263_msmpeg4
= 1;
1171 s
->unrestricted_mv
= 1;
1172 s
->msmpeg4_version
= 2;
1176 case CODEC_ID_MSMPEG4V3
:
1177 s
->out_format
= FMT_H263
;
1178 s
->h263_msmpeg4
= 1;
1180 s
->unrestricted_mv
= 1;
1181 s
->msmpeg4_version
= 3;
1182 s
->flipflop_rounding
=1;
1187 s
->out_format
= FMT_H263
;
1188 s
->h263_msmpeg4
= 1;
1190 s
->unrestricted_mv
= 1;
1191 s
->msmpeg4_version
= 4;
1192 s
->flipflop_rounding
=1;
1197 s
->out_format
= FMT_H263
;
1198 s
->h263_msmpeg4
= 1;
1200 s
->unrestricted_mv
= 1;
1201 s
->msmpeg4_version
= 5;
1202 s
->flipflop_rounding
=1;
1211 avctx
->has_b_frames
= !s
->low_delay
;
1216 if (MPV_common_init(s
) < 0)
1219 if(s
->modified_quant
)
1220 s
->chroma_qscale_table
= ff_h263_chroma_qscale_table
;
1221 s
->progressive_frame
=
1222 s
->progressive_sequence
= !(avctx
->flags
& (CODEC_FLAG_INTERLACED_DCT
|CODEC_FLAG_INTERLACED_ME
));
1223 s
->quant_precision
=5;
1225 ff_set_cmp(&s
->dsp
, s
->dsp
.ildct_cmp
, s
->avctx
->ildct_cmp
);
1226 ff_set_cmp(&s
->dsp
, s
->dsp
.frame_skip_cmp
, s
->avctx
->frame_skip_cmp
);
1228 #ifdef CONFIG_ENCODERS
1230 if (s
->out_format
== FMT_H261
)
1231 ff_h261_encode_init(s
);
1232 if (s
->out_format
== FMT_H263
)
1233 h263_encode_init(s
);
1234 if(s
->msmpeg4_version
)
1235 ff_msmpeg4_encode_init(s
);
1237 if (s
->out_format
== FMT_MPEG1
)
1238 ff_mpeg1_encode_init(s
);
1243 int j
= s
->dsp
.idct_permutation
[i
];
1245 if(s
->codec_id
==CODEC_ID_MPEG4
&& s
->mpeg_quant
){
1246 s
->intra_matrix
[j
] = ff_mpeg4_default_intra_matrix
[i
];
1247 s
->inter_matrix
[j
] = ff_mpeg4_default_non_intra_matrix
[i
];
1248 }else if(s
->out_format
== FMT_H263
|| s
->out_format
== FMT_H261
){
1249 s
->intra_matrix
[j
] =
1250 s
->inter_matrix
[j
] = ff_mpeg1_default_non_intra_matrix
[i
];
1254 s
->intra_matrix
[j
] = ff_mpeg1_default_intra_matrix
[i
];
1255 s
->inter_matrix
[j
] = ff_mpeg1_default_non_intra_matrix
[i
];
1257 if(s
->avctx
->intra_matrix
)
1258 s
->intra_matrix
[j
] = s
->avctx
->intra_matrix
[i
];
1259 if(s
->avctx
->inter_matrix
)
1260 s
->inter_matrix
[j
] = s
->avctx
->inter_matrix
[i
];
1263 /* precompute matrix */
1264 /* for mjpeg, we do include qscale in the matrix */
1265 if (s
->out_format
!= FMT_MJPEG
) {
1266 convert_matrix(&s
->dsp
, s
->q_intra_matrix
, s
->q_intra_matrix16
,
1267 s
->intra_matrix
, s
->intra_quant_bias
, avctx
->qmin
, 31, 1);
1268 convert_matrix(&s
->dsp
, s
->q_inter_matrix
, s
->q_inter_matrix16
,
1269 s
->inter_matrix
, s
->inter_quant_bias
, avctx
->qmin
, 31, 0);
1272 if(ff_rate_control_init(s
) < 0)
1278 int MPV_encode_end(AVCodecContext
*avctx
)
1280 MpegEncContext
*s
= avctx
->priv_data
;
1286 ff_rate_control_uninit(s
);
1289 if (s
->out_format
== FMT_MJPEG
)
1292 av_freep(&avctx
->extradata
);
1297 #endif //CONFIG_ENCODERS
1299 void init_rl(RLTable
*rl
, int use_static
)
1301 int8_t max_level
[MAX_RUN
+1], max_run
[MAX_LEVEL
+1];
1302 uint8_t index_run
[MAX_RUN
+1];
1303 int last
, run
, level
, start
, end
, i
;
1305 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1306 if(use_static
&& rl
->max_level
[0])
1309 /* compute max_level[], max_run[] and index_run[] */
1310 for(last
=0;last
<2;last
++) {
1319 memset(max_level
, 0, MAX_RUN
+ 1);
1320 memset(max_run
, 0, MAX_LEVEL
+ 1);
1321 memset(index_run
, rl
->n
, MAX_RUN
+ 1);
1322 for(i
=start
;i
<end
;i
++) {
1323 run
= rl
->table_run
[i
];
1324 level
= rl
->table_level
[i
];
1325 if (index_run
[run
] == rl
->n
)
1327 if (level
> max_level
[run
])
1328 max_level
[run
] = level
;
1329 if (run
> max_run
[level
])
1330 max_run
[level
] = run
;
1333 rl
->max_level
[last
] = av_mallocz_static(MAX_RUN
+ 1);
1335 rl
->max_level
[last
] = av_malloc(MAX_RUN
+ 1);
1336 memcpy(rl
->max_level
[last
], max_level
, MAX_RUN
+ 1);
1338 rl
->max_run
[last
] = av_mallocz_static(MAX_LEVEL
+ 1);
1340 rl
->max_run
[last
] = av_malloc(MAX_LEVEL
+ 1);
1341 memcpy(rl
->max_run
[last
], max_run
, MAX_LEVEL
+ 1);
1343 rl
->index_run
[last
] = av_mallocz_static(MAX_RUN
+ 1);
1345 rl
->index_run
[last
] = av_malloc(MAX_RUN
+ 1);
1346 memcpy(rl
->index_run
[last
], index_run
, MAX_RUN
+ 1);
1350 /* draw the edges of width 'w' of an image of size width, height */
1351 //FIXME check that this is ok for mpeg4 interlaced
1352 static void draw_edges_c(uint8_t *buf
, int wrap
, int width
, int height
, int w
)
1354 uint8_t *ptr
, *last_line
;
1357 last_line
= buf
+ (height
- 1) * wrap
;
1359 /* top and bottom */
1360 memcpy(buf
- (i
+ 1) * wrap
, buf
, width
);
1361 memcpy(last_line
+ (i
+ 1) * wrap
, last_line
, width
);
1363 /* left and right */
1365 for(i
=0;i
<height
;i
++) {
1366 memset(ptr
- w
, ptr
[0], w
);
1367 memset(ptr
+ width
, ptr
[width
-1], w
);
1372 memset(buf
- (i
+ 1) * wrap
- w
, buf
[0], w
); /* top left */
1373 memset(buf
- (i
+ 1) * wrap
+ width
, buf
[width
-1], w
); /* top right */
1374 memset(last_line
+ (i
+ 1) * wrap
- w
, last_line
[0], w
); /* top left */
1375 memset(last_line
+ (i
+ 1) * wrap
+ width
, last_line
[width
-1], w
); /* top right */
1379 int ff_find_unused_picture(MpegEncContext
*s
, int shared
){
1383 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1384 if(s
->picture
[i
].data
[0]==NULL
&& s
->picture
[i
].type
==0) return i
;
1387 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1388 if(s
->picture
[i
].data
[0]==NULL
&& s
->picture
[i
].type
!=0) return i
; //FIXME
1390 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1391 if(s
->picture
[i
].data
[0]==NULL
) return i
;
1399 static void update_noise_reduction(MpegEncContext
*s
){
1402 for(intra
=0; intra
<2; intra
++){
1403 if(s
->dct_count
[intra
] > (1<<16)){
1404 for(i
=0; i
<64; i
++){
1405 s
->dct_error_sum
[intra
][i
] >>=1;
1407 s
->dct_count
[intra
] >>= 1;
1410 for(i
=0; i
<64; i
++){
1411 s
->dct_offset
[intra
][i
]= (s
->avctx
->noise_reduction
* s
->dct_count
[intra
] + s
->dct_error_sum
[intra
][i
]/2) / (s
->dct_error_sum
[intra
][i
]+1);
1417 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1419 int MPV_frame_start(MpegEncContext
*s
, AVCodecContext
*avctx
)
1425 assert(s
->last_picture_ptr
==NULL
|| s
->out_format
!= FMT_H264
|| s
->codec_id
== CODEC_ID_SVQ3
);
1427 /* mark&release old frames */
1428 if (s
->pict_type
!= B_TYPE
&& s
->last_picture_ptr
&& s
->last_picture_ptr
!= s
->next_picture_ptr
&& s
->last_picture_ptr
->data
[0]) {
1429 avctx
->release_buffer(avctx
, (AVFrame
*)s
->last_picture_ptr
);
1431 /* release forgotten pictures */
1432 /* if(mpeg124/h263) */
1434 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1435 if(s
->picture
[i
].data
[0] && &s
->picture
[i
] != s
->next_picture_ptr
&& s
->picture
[i
].reference
){
1436 av_log(avctx
, AV_LOG_ERROR
, "releasing zombie picture\n");
1437 avctx
->release_buffer(avctx
, (AVFrame
*)&s
->picture
[i
]);
1444 /* release non refernce frames */
1445 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1446 if(s
->picture
[i
].data
[0] && !s
->picture
[i
].reference
/*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1447 s
->avctx
->release_buffer(s
->avctx
, (AVFrame
*)&s
->picture
[i
]);
1451 if(s
->current_picture_ptr
&& s
->current_picture_ptr
->data
[0]==NULL
)
1452 pic
= (AVFrame
*)s
->current_picture_ptr
; //we allready have a unused image (maybe it was set before reading the header)
1454 i
= ff_find_unused_picture(s
, 0);
1455 pic
= (AVFrame
*)&s
->picture
[i
];
1458 pic
->reference
= s
->pict_type
!= B_TYPE
&& !s
->dropable ?
3 : 0;
1460 pic
->coded_picture_number
= s
->coded_picture_number
++;
1462 if( alloc_picture(s
, (Picture
*)pic
, 0) < 0)
1465 s
->current_picture_ptr
= (Picture
*)pic
;
1466 s
->current_picture_ptr
->top_field_first
= s
->top_field_first
; //FIXME use only the vars from current_pic
1467 s
->current_picture_ptr
->interlaced_frame
= !s
->progressive_frame
&& !s
->progressive_sequence
;
1470 s
->current_picture_ptr
->pict_type
= s
->pict_type
;
1471 // if(s->flags && CODEC_FLAG_QSCALE)
1472 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1473 s
->current_picture_ptr
->key_frame
= s
->pict_type
== I_TYPE
;
1475 copy_picture(&s
->current_picture
, s
->current_picture_ptr
);
1477 if(s
->out_format
!= FMT_H264
|| s
->codec_id
== CODEC_ID_SVQ3
){
1478 if (s
->pict_type
!= B_TYPE
) {
1479 s
->last_picture_ptr
= s
->next_picture_ptr
;
1481 s
->next_picture_ptr
= s
->current_picture_ptr
;
1483 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1484 s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL,
1485 s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL,
1486 s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
1487 s->pict_type, s->dropable);*/
1489 if(s
->last_picture_ptr
) copy_picture(&s
->last_picture
, s
->last_picture_ptr
);
1490 if(s
->next_picture_ptr
) copy_picture(&s
->next_picture
, s
->next_picture_ptr
);
1492 if(s
->pict_type
!= I_TYPE
&& (s
->last_picture_ptr
==NULL
|| s
->last_picture_ptr
->data
[0]==NULL
)){
1493 av_log(avctx
, AV_LOG_ERROR
, "warning: first frame is no keyframe\n");
1494 assert(s
->pict_type
!= B_TYPE
); //these should have been dropped if we dont have a reference
1498 assert(s
->pict_type
== I_TYPE
|| (s
->last_picture_ptr
&& s
->last_picture_ptr
->data
[0]));
1500 if(s
->picture_structure
!=PICT_FRAME
){
1503 if(s
->picture_structure
== PICT_BOTTOM_FIELD
){
1504 s
->current_picture
.data
[i
] += s
->current_picture
.linesize
[i
];
1506 s
->current_picture
.linesize
[i
] *= 2;
1507 s
->last_picture
.linesize
[i
] *=2;
1508 s
->next_picture
.linesize
[i
] *=2;
1513 s
->hurry_up
= s
->avctx
->hurry_up
;
1514 s
->error_resilience
= avctx
->error_resilience
;
1516 /* set dequantizer, we cant do it during init as it might change for mpeg4
1517 and we cant do it in the header decode as init isnt called for mpeg4 there yet */
1518 if(s
->mpeg_quant
|| s
->codec_id
== CODEC_ID_MPEG2VIDEO
){
1519 s
->dct_unquantize_intra
= s
->dct_unquantize_mpeg2_intra
;
1520 s
->dct_unquantize_inter
= s
->dct_unquantize_mpeg2_inter
;
1521 }else if(s
->out_format
== FMT_H263
|| s
->out_format
== FMT_H261
){
1522 s
->dct_unquantize_intra
= s
->dct_unquantize_h263_intra
;
1523 s
->dct_unquantize_inter
= s
->dct_unquantize_h263_inter
;
1525 s
->dct_unquantize_intra
= s
->dct_unquantize_mpeg1_intra
;
1526 s
->dct_unquantize_inter
= s
->dct_unquantize_mpeg1_inter
;
1529 if(s
->dct_error_sum
){
1530 assert(s
->avctx
->noise_reduction
&& s
->encoding
);
1532 update_noise_reduction(s
);
1536 if(s
->avctx
->xvmc_acceleration
)
1537 return XVMC_field_start(s
, avctx
);
1542 /* generic function for encode/decode called after a frame has been coded/decoded */
1543 void MPV_frame_end(MpegEncContext
*s
)
1546 /* draw edge for correct motion prediction if outside */
1548 //just to make sure that all data is rendered.
1549 if(s
->avctx
->xvmc_acceleration
){
1553 if(s
->unrestricted_mv
&& s
->pict_type
!= B_TYPE
&& !s
->intra_only
&& !(s
->flags
&CODEC_FLAG_EMU_EDGE
)) {
1554 draw_edges(s
->current_picture
.data
[0], s
->linesize
, s
->h_edge_pos
, s
->v_edge_pos
, EDGE_WIDTH
);
1555 draw_edges(s
->current_picture
.data
[1], s
->uvlinesize
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1, EDGE_WIDTH
/2);
1556 draw_edges(s
->current_picture
.data
[2], s
->uvlinesize
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1, EDGE_WIDTH
/2);
1560 s
->last_pict_type
= s
->pict_type
;
1561 if(s
->pict_type
!=B_TYPE
){
1562 s
->last_non_b_pict_type
= s
->pict_type
;
1565 /* copy back current_picture variables */
1566 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1567 if(s
->picture
[i
].data
[0] == s
->current_picture
.data
[0]){
1568 s
->picture
[i
]= s
->current_picture
;
1572 assert(i
<MAX_PICTURE_COUNT
);
1576 /* release non refernce frames */
1577 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1578 if(s
->picture
[i
].data
[0] && !s
->picture
[i
].reference
/*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1579 s
->avctx
->release_buffer(s
->avctx
, (AVFrame
*)&s
->picture
[i
]);
1583 // clear copies, to avoid confusion
1585 memset(&s
->last_picture
, 0, sizeof(Picture
));
1586 memset(&s
->next_picture
, 0, sizeof(Picture
));
1587 memset(&s
->current_picture
, 0, sizeof(Picture
));
1589 s
->avctx
->coded_frame
= (AVFrame
*)s
->current_picture_ptr
;
1593 * draws an line from (ex, ey) -> (sx, sy).
1594 * @param w width of the image
1595 * @param h height of the image
1596 * @param stride stride/linesize of the image
1597 * @param color color of the arrow
1599 static void draw_line(uint8_t *buf
, int sx
, int sy
, int ex
, int ey
, int w
, int h
, int stride
, int color
){
1602 sx
= clip(sx
, 0, w
-1);
1603 sy
= clip(sy
, 0, h
-1);
1604 ex
= clip(ex
, 0, w
-1);
1605 ey
= clip(ey
, 0, h
-1);
1607 buf
[sy
*stride
+ sx
]+= color
;
1609 if(ABS(ex
- sx
) > ABS(ey
- sy
)){
1614 buf
+= sx
+ sy
*stride
;
1616 f
= ((ey
-sy
)<<16)/ex
;
1617 for(x
= 0; x
<= ex
; x
++){
1620 buf
[ y
*stride
+ x
]+= (color
*(0x10000-fr
))>>16;
1621 buf
[(y
+1)*stride
+ x
]+= (color
* fr
)>>16;
1628 buf
+= sx
+ sy
*stride
;
1630 if(ey
) f
= ((ex
-sx
)<<16)/ey
;
1632 for(y
= 0; y
<= ey
; y
++){
1635 buf
[y
*stride
+ x
]+= (color
*(0x10000-fr
))>>16;;
1636 buf
[y
*stride
+ x
+1]+= (color
* fr
)>>16;;
1642 * draws an arrow from (ex, ey) -> (sx, sy).
1643 * @param w width of the image
1644 * @param h height of the image
1645 * @param stride stride/linesize of the image
1646 * @param color color of the arrow
1648 static void draw_arrow(uint8_t *buf
, int sx
, int sy
, int ex
, int ey
, int w
, int h
, int stride
, int color
){
1651 sx
= clip(sx
, -100, w
+100);
1652 sy
= clip(sy
, -100, h
+100);
1653 ex
= clip(ex
, -100, w
+100);
1654 ey
= clip(ey
, -100, h
+100);
1659 if(dx
*dx
+ dy
*dy
> 3*3){
1662 int length
= ff_sqrt((rx
*rx
+ ry
*ry
)<<8);
1664 //FIXME subpixel accuracy
1665 rx
= ROUNDED_DIV(rx
*3<<4, length
);
1666 ry
= ROUNDED_DIV(ry
*3<<4, length
);
1668 draw_line(buf
, sx
, sy
, sx
+ rx
, sy
+ ry
, w
, h
, stride
, color
);
1669 draw_line(buf
, sx
, sy
, sx
- ry
, sy
+ rx
, w
, h
, stride
, color
);
1671 draw_line(buf
, sx
, sy
, ex
, ey
, w
, h
, stride
, color
);
1675 * prints debuging info for the given picture.
1677 void ff_print_debug_info(MpegEncContext
*s
, AVFrame
*pict
){
1679 if(!pict
|| !pict
->mb_type
) return;
1681 if(s
->avctx
->debug
&(FF_DEBUG_SKIP
| FF_DEBUG_QP
| FF_DEBUG_MB_TYPE
)){
1684 av_log(s
->avctx
,AV_LOG_DEBUG
,"New frame, type: ");
1685 switch (pict
->pict_type
) {
1686 case FF_I_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"I\n"); break;
1687 case FF_P_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"P\n"); break;
1688 case FF_B_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"B\n"); break;
1689 case FF_S_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"S\n"); break;
1690 case FF_SI_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"SI\n"); break;
1691 case FF_SP_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"SP\n"); break;
1693 for(y
=0; y
<s
->mb_height
; y
++){
1694 for(x
=0; x
<s
->mb_width
; x
++){
1695 if(s
->avctx
->debug
&FF_DEBUG_SKIP
){
1696 int count
= s
->mbskip_table
[x
+ y
*s
->mb_stride
];
1697 if(count
>9) count
=9;
1698 av_log(s
->avctx
, AV_LOG_DEBUG
, "%1d", count
);
1700 if(s
->avctx
->debug
&FF_DEBUG_QP
){
1701 av_log(s
->avctx
, AV_LOG_DEBUG
, "%2d", pict
->qscale_table
[x
+ y
*s
->mb_stride
]);
1703 if(s
->avctx
->debug
&FF_DEBUG_MB_TYPE
){
1704 int mb_type
= pict
->mb_type
[x
+ y
*s
->mb_stride
];
1705 //Type & MV direction
1707 av_log(s
->avctx
, AV_LOG_DEBUG
, "P");
1708 else if(IS_INTRA(mb_type
) && IS_ACPRED(mb_type
))
1709 av_log(s
->avctx
, AV_LOG_DEBUG
, "A");
1710 else if(IS_INTRA4x4(mb_type
))
1711 av_log(s
->avctx
, AV_LOG_DEBUG
, "i");
1712 else if(IS_INTRA16x16(mb_type
))
1713 av_log(s
->avctx
, AV_LOG_DEBUG
, "I");
1714 else if(IS_DIRECT(mb_type
) && IS_SKIP(mb_type
))
1715 av_log(s
->avctx
, AV_LOG_DEBUG
, "d");
1716 else if(IS_DIRECT(mb_type
))
1717 av_log(s
->avctx
, AV_LOG_DEBUG
, "D");
1718 else if(IS_GMC(mb_type
) && IS_SKIP(mb_type
))
1719 av_log(s
->avctx
, AV_LOG_DEBUG
, "g");
1720 else if(IS_GMC(mb_type
))
1721 av_log(s
->avctx
, AV_LOG_DEBUG
, "G");
1722 else if(IS_SKIP(mb_type
))
1723 av_log(s
->avctx
, AV_LOG_DEBUG
, "S");
1724 else if(!USES_LIST(mb_type
, 1))
1725 av_log(s
->avctx
, AV_LOG_DEBUG
, ">");
1726 else if(!USES_LIST(mb_type
, 0))
1727 av_log(s
->avctx
, AV_LOG_DEBUG
, "<");
1729 assert(USES_LIST(mb_type
, 0) && USES_LIST(mb_type
, 1));
1730 av_log(s
->avctx
, AV_LOG_DEBUG
, "X");
1735 av_log(s
->avctx
, AV_LOG_DEBUG
, "+");
1736 else if(IS_16X8(mb_type
))
1737 av_log(s
->avctx
, AV_LOG_DEBUG
, "-");
1738 else if(IS_8X16(mb_type
))
1739 av_log(s
->avctx
, AV_LOG_DEBUG
, "¦");
1740 else if(IS_INTRA(mb_type
) || IS_16X16(mb_type
))
1741 av_log(s
->avctx
, AV_LOG_DEBUG
, " ");
1743 av_log(s
->avctx
, AV_LOG_DEBUG
, "?");
1746 if(IS_INTERLACED(mb_type
) && s
->codec_id
== CODEC_ID_H264
)
1747 av_log(s
->avctx
, AV_LOG_DEBUG
, "=");
1749 av_log(s
->avctx
, AV_LOG_DEBUG
, " ");
1751 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1753 av_log(s
->avctx
, AV_LOG_DEBUG
, "\n");
1757 if((s
->avctx
->debug
&(FF_DEBUG_VIS_QP
|FF_DEBUG_VIS_MB_TYPE
)) || (s
->avctx
->debug_mv
)){
1758 const int shift
= 1 + s
->quarter_sample
;
1762 int h_chroma_shift
, v_chroma_shift
;
1763 const int width
= s
->avctx
->width
;
1764 const int height
= s
->avctx
->height
;
1765 const int mv_sample_log2
= 4 - pict
->motion_subsample_log2
;
1766 const int mv_stride
= (s
->mb_width
<< mv_sample_log2
) + 1;
1767 s
->low_delay
=0; //needed to see the vectors without trashing the buffers
1769 avcodec_get_chroma_sub_sample(s
->avctx
->pix_fmt
, &h_chroma_shift
, &v_chroma_shift
);
1771 memcpy(s
->visualization_buffer
[i
], pict
->data
[i
], (i
==0) ? pict
->linesize
[i
]*height
:pict
->linesize
[i
]*height
>> v_chroma_shift
);
1772 pict
->data
[i
]= s
->visualization_buffer
[i
];
1774 pict
->type
= FF_BUFFER_TYPE_COPY
;
1777 for(mb_y
=0; mb_y
<s
->mb_height
; mb_y
++){
1779 for(mb_x
=0; mb_x
<s
->mb_width
; mb_x
++){
1780 const int mb_index
= mb_x
+ mb_y
*s
->mb_stride
;
1781 if((s
->avctx
->debug_mv
) && pict
->motion_val
){
1783 for(type
=0; type
<3; type
++){
1786 case 0: if ((!(s
->avctx
->debug_mv
&FF_DEBUG_VIS_MV_P_FOR
)) || (pict
->pict_type
!=FF_P_TYPE
))
1790 case 1: if ((!(s
->avctx
->debug_mv
&FF_DEBUG_VIS_MV_B_FOR
)) || (pict
->pict_type
!=FF_B_TYPE
))
1794 case 2: if ((!(s
->avctx
->debug_mv
&FF_DEBUG_VIS_MV_B_BACK
)) || (pict
->pict_type
!=FF_B_TYPE
))
1799 if(!USES_LIST(pict
->mb_type
[mb_index
], direction
))
1802 if(IS_8X8(pict
->mb_type
[mb_index
])){
1805 int sx
= mb_x
*16 + 4 + 8*(i
&1);
1806 int sy
= mb_y
*16 + 4 + 8*(i
>>1);
1807 int xy
= (mb_x
*2 + (i
&1) + (mb_y
*2 + (i
>>1))*mv_stride
) << mv_sample_log2
-1;
1808 int mx
= (pict
->motion_val
[direction
][xy
][0]>>shift
) + sx
;
1809 int my
= (pict
->motion_val
[direction
][xy
][1]>>shift
) + sy
;
1810 draw_arrow(ptr
, sx
, sy
, mx
, my
, width
, height
, s
->linesize
, 100);
1812 }else if(IS_16X8(pict
->mb_type
[mb_index
])){
1816 int sy
=mb_y
*16 + 4 + 8*i
;
1817 int xy
= (mb_x
*2 + (mb_y
*2 + i
)*mv_stride
) << mv_sample_log2
-1;
1818 int mx
=(pict
->motion_val
[direction
][xy
][0]>>shift
);
1819 int my
=(pict
->motion_val
[direction
][xy
][1]>>shift
);
1821 if(IS_INTERLACED(pict
->mb_type
[mb_index
]))
1824 draw_arrow(ptr
, sx
, sy
, mx
+sx
, my
+sy
, width
, height
, s
->linesize
, 100);
1826 }else if(IS_8X16(pict
->mb_type
[mb_index
])){
1829 int sx
=mb_x
*16 + 4 + 8*i
;
1831 int xy
= (mb_x
*2 + i
+ mb_y
*2*mv_stride
) << mv_sample_log2
-1;
1832 int mx
=(pict
->motion_val
[direction
][xy
][0]>>shift
);
1833 int my
=(pict
->motion_val
[direction
][xy
][1]>>shift
);
1835 if(IS_INTERLACED(pict
->mb_type
[mb_index
]))
1838 draw_arrow(ptr
, sx
, sy
, mx
+sx
, my
+sy
, width
, height
, s
->linesize
, 100);
1841 int sx
= mb_x
*16 + 8;
1842 int sy
= mb_y
*16 + 8;
1843 int xy
= (mb_x
+ mb_y
*mv_stride
) << mv_sample_log2
;
1844 int mx
= (pict
->motion_val
[direction
][xy
][0]>>shift
) + sx
;
1845 int my
= (pict
->motion_val
[direction
][xy
][1]>>shift
) + sy
;
1846 draw_arrow(ptr
, sx
, sy
, mx
, my
, width
, height
, s
->linesize
, 100);
1850 if((s
->avctx
->debug
&FF_DEBUG_VIS_QP
) && pict
->motion_val
){
1851 uint64_t c
= (pict
->qscale_table
[mb_index
]*128/31) * 0x0101010101010101ULL
;
1854 *(uint64_t*)(pict
->data
[1] + 8*mb_x
+ (8*mb_y
+ y
)*pict
->linesize
[1])= c
;
1855 *(uint64_t*)(pict
->data
[2] + 8*mb_x
+ (8*mb_y
+ y
)*pict
->linesize
[2])= c
;
1858 if((s
->avctx
->debug
&FF_DEBUG_VIS_MB_TYPE
) && pict
->motion_val
){
1859 int mb_type
= pict
->mb_type
[mb_index
];
1862 #define COLOR(theta, r)\
1863 u= (int)(128 + r*cos(theta*3.141592/180));\
1864 v= (int)(128 + r*sin(theta*3.141592/180));
1868 if(IS_PCM(mb_type
)){
1870 }else if((IS_INTRA(mb_type
) && IS_ACPRED(mb_type
)) || IS_INTRA16x16(mb_type
)){
1872 }else if(IS_INTRA4x4(mb_type
)){
1874 }else if(IS_DIRECT(mb_type
) && IS_SKIP(mb_type
)){
1876 }else if(IS_DIRECT(mb_type
)){
1878 }else if(IS_GMC(mb_type
) && IS_SKIP(mb_type
)){
1880 }else if(IS_GMC(mb_type
)){
1882 }else if(IS_SKIP(mb_type
)){
1884 }else if(!USES_LIST(mb_type
, 1)){
1886 }else if(!USES_LIST(mb_type
, 0)){
1889 assert(USES_LIST(mb_type
, 0) && USES_LIST(mb_type
, 1));
1893 u
*= 0x0101010101010101ULL
;
1894 v
*= 0x0101010101010101ULL
;
1896 *(uint64_t*)(pict
->data
[1] + 8*mb_x
+ (8*mb_y
+ y
)*pict
->linesize
[1])= u
;
1897 *(uint64_t*)(pict
->data
[2] + 8*mb_x
+ (8*mb_y
+ y
)*pict
->linesize
[2])= v
;
1901 if(IS_8X8(mb_type
) || IS_16X8(mb_type
)){
1902 *(uint64_t*)(pict
->data
[0] + 16*mb_x
+ 0 + (16*mb_y
+ 8)*pict
->linesize
[0])^= 0x8080808080808080ULL
;
1903 *(uint64_t*)(pict
->data
[0] + 16*mb_x
+ 8 + (16*mb_y
+ 8)*pict
->linesize
[0])^= 0x8080808080808080ULL
;
1905 if(IS_8X8(mb_type
) || IS_8X16(mb_type
)){
1907 pict
->data
[0][16*mb_x
+ 8 + (16*mb_y
+ y
)*pict
->linesize
[0]]^= 0x80;
1910 if(IS_INTERLACED(mb_type
) && s
->codec_id
== CODEC_ID_H264
){
1914 s
->mbskip_table
[mb_index
]=0;
1920 #ifdef CONFIG_ENCODERS
1922 static int get_sae(uint8_t *src
, int ref
, int stride
){
1926 for(y
=0; y
<16; y
++){
1927 for(x
=0; x
<16; x
++){
1928 acc
+= ABS(src
[x
+y
*stride
] - ref
);
1935 static int get_intra_count(MpegEncContext
*s
, uint8_t *src
, uint8_t *ref
, int stride
){
1942 for(y
=0; y
<h
; y
+=16){
1943 for(x
=0; x
<w
; x
+=16){
1944 int offset
= x
+ y
*stride
;
1945 int sad
= s
->dsp
.sad
[0](NULL
, src
+ offset
, ref
+ offset
, stride
, 16);
1946 int mean
= (s
->dsp
.pix_sum(src
+ offset
, stride
) + 128)>>8;
1947 int sae
= get_sae(src
+ offset
, mean
, stride
);
1949 acc
+= sae
+ 500 < sad
;
1956 static int load_input_picture(MpegEncContext
*s
, AVFrame
*pic_arg
){
1959 const int encoding_delay
= s
->max_b_frames
;
1963 if(encoding_delay
&& !(s
->flags
&CODEC_FLAG_INPUT_PRESERVED
)) direct
=0;
1964 if(pic_arg
->linesize
[0] != s
->linesize
) direct
=0;
1965 if(pic_arg
->linesize
[1] != s
->uvlinesize
) direct
=0;
1966 if(pic_arg
->linesize
[2] != s
->uvlinesize
) direct
=0;
1968 // av_log(AV_LOG_DEBUG, "%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize);
1971 i
= ff_find_unused_picture(s
, 1);
1973 pic
= (AVFrame
*)&s
->picture
[i
];
1977 pic
->data
[i
]= pic_arg
->data
[i
];
1978 pic
->linesize
[i
]= pic_arg
->linesize
[i
];
1980 alloc_picture(s
, (Picture
*)pic
, 1);
1983 i
= ff_find_unused_picture(s
, 0);
1985 pic
= (AVFrame
*)&s
->picture
[i
];
1988 alloc_picture(s
, (Picture
*)pic
, 0);
1990 if( pic
->data
[0] + offset
== pic_arg
->data
[0]
1991 && pic
->data
[1] + offset
== pic_arg
->data
[1]
1992 && pic
->data
[2] + offset
== pic_arg
->data
[2]){
1995 int h_chroma_shift
, v_chroma_shift
;
1996 avcodec_get_chroma_sub_sample(s
->avctx
->pix_fmt
, &h_chroma_shift
, &v_chroma_shift
);
1999 int src_stride
= pic_arg
->linesize
[i
];
2000 int dst_stride
= i ? s
->uvlinesize
: s
->linesize
;
2001 int h_shift
= i ? h_chroma_shift
: 0;
2002 int v_shift
= i ? v_chroma_shift
: 0;
2003 int w
= s
->width
>>h_shift
;
2004 int h
= s
->height
>>v_shift
;
2005 uint8_t *src
= pic_arg
->data
[i
];
2006 uint8_t *dst
= pic
->data
[i
] + offset
;
2008 if(src_stride
==dst_stride
)
2009 memcpy(dst
, src
, src_stride
*h
);
2012 memcpy(dst
, src
, w
);
2020 copy_picture_attributes(s
, pic
, pic_arg
);
2022 pic
->display_picture_number
= s
->input_picture_number
++;
2024 if(pic
->pts
!= AV_NOPTS_VALUE
){
2025 if(s
->user_specified_pts
!= AV_NOPTS_VALUE
){
2026 int64_t time
= av_rescale(pic
->pts
, s
->avctx
->frame_rate
, s
->avctx
->frame_rate_base
*(int64_t)AV_TIME_BASE
);
2027 int64_t last
= av_rescale(s
->user_specified_pts
, s
->avctx
->frame_rate
, s
->avctx
->frame_rate_base
*(int64_t)AV_TIME_BASE
);
2030 av_log(s
->avctx
, AV_LOG_ERROR
, "Error, Invalid timestamp=%Ld, last=%Ld\n", pic
->pts
, s
->user_specified_pts
);
2034 s
->user_specified_pts
= pic
->pts
;
2036 if(s
->user_specified_pts
!= AV_NOPTS_VALUE
){
2037 s
->user_specified_pts
=
2038 pic
->pts
= s
->user_specified_pts
+ AV_TIME_BASE
*(int64_t)s
->avctx
->frame_rate_base
/ s
->avctx
->frame_rate
;
2039 av_log(s
->avctx
, AV_LOG_INFO
, "Warning: AVFrame.pts=? trying to guess (%Ld)\n", pic
->pts
);
2041 pic
->pts
= av_rescale(pic
->display_picture_number
*(int64_t)s
->avctx
->frame_rate_base
, AV_TIME_BASE
, s
->avctx
->frame_rate
);
2046 /* shift buffer entries */
2047 for(i
=1; i
<MAX_PICTURE_COUNT
/*s->encoding_delay+1*/; i
++)
2048 s
->input_picture
[i
-1]= s
->input_picture
[i
];
2050 s
->input_picture
[encoding_delay
]= (Picture
*)pic
;
2055 static int skip_check(MpegEncContext
*s
, Picture
*p
, Picture
*ref
){
2061 for(plane
=0; plane
<3; plane
++){
2062 const int stride
= p
->linesize
[plane
];
2063 const int bw
= plane ?
1 : 2;
2064 for(y
=0; y
<s
->mb_height
*bw
; y
++){
2065 for(x
=0; x
<s
->mb_width
*bw
; x
++){
2066 int v
= s
->dsp
.frame_skip_cmp
[1](s
, p
->data
[plane
] + 8*(x
+ y
*stride
), ref
->data
[plane
] + 8*(x
+ y
*stride
), stride
, 8);
2068 switch(s
->avctx
->frame_skip_exp
){
2069 case 0: score
= FFMAX(score
, v
); break;
2070 case 1: score
+= ABS(v
);break;
2071 case 2: score
+= v
*v
;break;
2072 case 3: score64
+= ABS(v
*v
*(int64_t)v
);break;
2073 case 4: score64
+= v
*v
*(int64_t)(v
*v
);break;
2079 if(score
) score64
= score
;
2081 if(score64
< s
->avctx
->frame_skip_threshold
)
2083 if(score64
< ((s
->avctx
->frame_skip_factor
* (int64_t)s
->lambda
)>>8))
2088 static void select_input_picture(MpegEncContext
*s
){
2091 for(i
=1; i
<MAX_PICTURE_COUNT
; i
++)
2092 s
->reordered_input_picture
[i
-1]= s
->reordered_input_picture
[i
];
2093 s
->reordered_input_picture
[MAX_PICTURE_COUNT
-1]= NULL
;
2095 /* set next picture types & ordering */
2096 if(s
->reordered_input_picture
[0]==NULL
&& s
->input_picture
[0]){
2097 if(/*s->picture_in_gop_number >= s->gop_size ||*/ s
->next_picture_ptr
==NULL
|| s
->intra_only
){
2098 s
->reordered_input_picture
[0]= s
->input_picture
[0];
2099 s
->reordered_input_picture
[0]->pict_type
= I_TYPE
;
2100 s
->reordered_input_picture
[0]->coded_picture_number
= s
->coded_picture_number
++;
2104 if(s
->avctx
->frame_skip_threshold
|| s
->avctx
->frame_skip_factor
){
2105 if(skip_check(s
, s
->input_picture
[0], s
->next_picture_ptr
)){
2106 //av_log(NULL, AV_LOG_DEBUG, "skip %p %Ld\n", s->input_picture[0]->data[0], s->input_picture[0]->pts);
2108 if(s
->input_picture
[0]->type
== FF_BUFFER_TYPE_SHARED
){
2110 s
->input_picture
[0]->data
[i
]= NULL
;
2111 s
->input_picture
[0]->type
= 0;
2113 assert( s
->input_picture
[0]->type
==FF_BUFFER_TYPE_USER
2114 || s
->input_picture
[0]->type
==FF_BUFFER_TYPE_INTERNAL
);
2116 s
->avctx
->release_buffer(s
->avctx
, (AVFrame
*)s
->input_picture
[0]);
2123 if(s
->flags
&CODEC_FLAG_PASS2
){
2124 for(i
=0; i
<s
->max_b_frames
+1; i
++){
2125 int pict_num
= s
->input_picture
[0]->display_picture_number
+ i
;
2127 if(pict_num
>= s
->rc_context
.num_entries
)
2129 if(!s
->input_picture
[i
]){
2130 s
->rc_context
.entry
[pict_num
-1].new_pict_type
= P_TYPE
;
2134 s
->input_picture
[i
]->pict_type
=
2135 s
->rc_context
.entry
[pict_num
].new_pict_type
;
2139 if(s
->avctx
->b_frame_strategy
==0){
2140 b_frames
= s
->max_b_frames
;
2141 while(b_frames
&& !s
->input_picture
[b_frames
]) b_frames
--;
2142 }else if(s
->avctx
->b_frame_strategy
==1){
2143 for(i
=1; i
<s
->max_b_frames
+1; i
++){
2144 if(s
->input_picture
[i
] && s
->input_picture
[i
]->b_frame_score
==0){
2145 s
->input_picture
[i
]->b_frame_score
=
2146 get_intra_count(s
, s
->input_picture
[i
]->data
[0],
2147 s
->input_picture
[i
-1]->data
[0], s
->linesize
) + 1;
2150 for(i
=0; i
<s
->max_b_frames
; i
++){
2151 if(s
->input_picture
[i
]==NULL
|| s
->input_picture
[i
]->b_frame_score
- 1 > s
->mb_num
/40) break;
2154 b_frames
= FFMAX(0, i
-1);
2157 for(i
=0; i
<b_frames
+1; i
++){
2158 s
->input_picture
[i
]->b_frame_score
=0;
2161 av_log(s
->avctx
, AV_LOG_ERROR
, "illegal b frame strategy\n");
2166 //static int b_count=0;
2167 //b_count+= b_frames;
2168 //av_log(s->avctx, AV_LOG_DEBUG, "b_frames: %d\n", b_count);
2170 for(i
= b_frames
- 1; i
>=0; i
--){
2171 int type
= s
->input_picture
[i
]->pict_type
;
2172 if(type
&& type
!= B_TYPE
)
2175 if(s
->input_picture
[b_frames
]->pict_type
== B_TYPE
&& b_frames
== s
->max_b_frames
){
2176 av_log(s
->avctx
, AV_LOG_ERROR
, "warning, too many bframes in a row\n");
2179 if(s
->picture_in_gop_number
+ b_frames
>= s
->gop_size
){
2180 if((s
->flags2
& CODEC_FLAG2_STRICT_GOP
) && s
->gop_size
> s
->picture_in_gop_number
){
2181 b_frames
= s
->gop_size
- s
->picture_in_gop_number
- 1;
2183 if(s
->flags
& CODEC_FLAG_CLOSED_GOP
)
2185 s
->input_picture
[b_frames
]->pict_type
= I_TYPE
;
2189 if( (s
->flags
& CODEC_FLAG_CLOSED_GOP
)
2191 && s
->input_picture
[b_frames
]->pict_type
== I_TYPE
)
2194 s
->reordered_input_picture
[0]= s
->input_picture
[b_frames
];
2195 if(s
->reordered_input_picture
[0]->pict_type
!= I_TYPE
)
2196 s
->reordered_input_picture
[0]->pict_type
= P_TYPE
;
2197 s
->reordered_input_picture
[0]->coded_picture_number
= s
->coded_picture_number
++;
2198 for(i
=0; i
<b_frames
; i
++){
2199 s
->reordered_input_picture
[i
+1]= s
->input_picture
[i
];
2200 s
->reordered_input_picture
[i
+1]->pict_type
= B_TYPE
;
2201 s
->reordered_input_picture
[i
+1]->coded_picture_number
= s
->coded_picture_number
++;
2206 if(s
->reordered_input_picture
[0]){
2207 s
->reordered_input_picture
[0]->reference
= s
->reordered_input_picture
[0]->pict_type
!=B_TYPE ?
3 : 0;
2209 copy_picture(&s
->new_picture
, s
->reordered_input_picture
[0]);
2211 if(s
->reordered_input_picture
[0]->type
== FF_BUFFER_TYPE_SHARED
){
2212 // input is a shared pix, so we cant modifiy it -> alloc a new one & ensure that the shared one is reuseable
2214 int i
= ff_find_unused_picture(s
, 0);
2215 Picture
*pic
= &s
->picture
[i
];
2217 /* mark us unused / free shared pic */
2219 s
->reordered_input_picture
[0]->data
[i
]= NULL
;
2220 s
->reordered_input_picture
[0]->type
= 0;
2222 pic
->reference
= s
->reordered_input_picture
[0]->reference
;
2224 alloc_picture(s
, pic
, 0);
2226 copy_picture_attributes(s
, (AVFrame
*)pic
, (AVFrame
*)s
->reordered_input_picture
[0]);
2228 s
->current_picture_ptr
= pic
;
2230 // input is not a shared pix -> reuse buffer for current_pix
2232 assert( s
->reordered_input_picture
[0]->type
==FF_BUFFER_TYPE_USER
2233 || s
->reordered_input_picture
[0]->type
==FF_BUFFER_TYPE_INTERNAL
);
2235 s
->current_picture_ptr
= s
->reordered_input_picture
[0];
2237 s
->new_picture
.data
[i
]+=16;
2240 copy_picture(&s
->current_picture
, s
->current_picture_ptr
);
2242 s
->picture_number
= s
->new_picture
.display_picture_number
;
2243 //printf("dpn:%d\n", s->picture_number);
2245 memset(&s
->new_picture
, 0, sizeof(Picture
));
2249 int MPV_encode_picture(AVCodecContext
*avctx
,
2250 unsigned char *buf
, int buf_size
, void *data
)
2252 MpegEncContext
*s
= avctx
->priv_data
;
2253 AVFrame
*pic_arg
= data
;
2254 int i
, stuffing_count
;
2256 if(avctx
->pix_fmt
!= PIX_FMT_YUV420P
){
2257 av_log(avctx
, AV_LOG_ERROR
, "this codec supports only YUV420P\n");
2261 for(i
=0; i
<avctx
->thread_count
; i
++){
2262 int start_y
= s
->thread_context
[i
]->start_mb_y
;
2263 int end_y
= s
->thread_context
[i
]-> end_mb_y
;
2264 int h
= s
->mb_height
;
2265 uint8_t *start
= buf
+ buf_size
*start_y
/h
;
2266 uint8_t *end
= buf
+ buf_size
* end_y
/h
;
2268 init_put_bits(&s
->thread_context
[i
]->pb
, start
, end
- start
);
2271 s
->picture_in_gop_number
++;
2273 if(load_input_picture(s
, pic_arg
) < 0)
2276 select_input_picture(s
);
2279 if(s
->new_picture
.data
[0]){
2280 s
->pict_type
= s
->new_picture
.pict_type
;
2282 //printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
2283 MPV_frame_start(s
, avctx
);
2285 encode_picture(s
, s
->picture_number
);
2287 avctx
->real_pict_num
= s
->picture_number
;
2288 avctx
->header_bits
= s
->header_bits
;
2289 avctx
->mv_bits
= s
->mv_bits
;
2290 avctx
->misc_bits
= s
->misc_bits
;
2291 avctx
->i_tex_bits
= s
->i_tex_bits
;
2292 avctx
->p_tex_bits
= s
->p_tex_bits
;
2293 avctx
->i_count
= s
->i_count
;
2294 avctx
->p_count
= s
->mb_num
- s
->i_count
- s
->skip_count
; //FIXME f/b_count in avctx
2295 avctx
->skip_count
= s
->skip_count
;
2299 if (s
->out_format
== FMT_MJPEG
)
2300 mjpeg_picture_trailer(s
);
2302 if(s
->flags
&CODEC_FLAG_PASS1
)
2303 ff_write_pass1_stats(s
);
2306 avctx
->error
[i
] += s
->current_picture_ptr
->error
[i
];
2309 flush_put_bits(&s
->pb
);
2310 s
->frame_bits
= put_bits_count(&s
->pb
);
2312 stuffing_count
= ff_vbv_update(s
, s
->frame_bits
);
2314 switch(s
->codec_id
){
2315 case CODEC_ID_MPEG1VIDEO
:
2316 case CODEC_ID_MPEG2VIDEO
:
2317 while(stuffing_count
--){
2318 put_bits(&s
->pb
, 8, 0);
2321 case CODEC_ID_MPEG4
:
2322 put_bits(&s
->pb
, 16, 0);
2323 put_bits(&s
->pb
, 16, 0x1C3);
2324 stuffing_count
-= 4;
2325 while(stuffing_count
--){
2326 put_bits(&s
->pb
, 8, 0xFF);
2330 av_log(s
->avctx
, AV_LOG_ERROR
, "vbv buffer overflow\n");
2332 flush_put_bits(&s
->pb
);
2333 s
->frame_bits
= put_bits_count(&s
->pb
);
2336 /* update mpeg1/2 vbv_delay for CBR */
2337 if(s
->avctx
->rc_max_rate
&& s
->avctx
->rc_min_rate
== s
->avctx
->rc_max_rate
&& s
->out_format
== FMT_MPEG1
2338 && 90000LL * (avctx
->rc_buffer_size
-1) <= s
->avctx
->rc_max_rate
*0xFFFFLL
){
2341 assert(s
->repeat_first_field
==0);
2343 vbv_delay
= lrintf(90000 * s
->rc_context
.buffer_index
/ s
->avctx
->rc_max_rate
);
2344 assert(vbv_delay
< 0xFFFF);
2346 s
->vbv_delay_ptr
[0] &= 0xF8;
2347 s
->vbv_delay_ptr
[0] |= vbv_delay
>>13;
2348 s
->vbv_delay_ptr
[1] = vbv_delay
>>5;
2349 s
->vbv_delay_ptr
[2] &= 0x07;
2350 s
->vbv_delay_ptr
[2] |= vbv_delay
<<3;
2352 s
->total_bits
+= s
->frame_bits
;
2353 avctx
->frame_bits
= s
->frame_bits
;
2355 assert((pbBufPtr(&s
->pb
) == s
->pb
.buf
));
2358 assert((s
->frame_bits
&7)==0);
2360 return s
->frame_bits
/8;
2363 #endif //CONFIG_ENCODERS
2365 static inline void gmc1_motion(MpegEncContext
*s
,
2366 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2367 uint8_t **ref_picture
)
2370 int offset
, src_x
, src_y
, linesize
, uvlinesize
;
2371 int motion_x
, motion_y
;
2374 motion_x
= s
->sprite_offset
[0][0];
2375 motion_y
= s
->sprite_offset
[0][1];
2376 src_x
= s
->mb_x
* 16 + (motion_x
>> (s
->sprite_warping_accuracy
+1));
2377 src_y
= s
->mb_y
* 16 + (motion_y
>> (s
->sprite_warping_accuracy
+1));
2378 motion_x
<<=(3-s
->sprite_warping_accuracy
);
2379 motion_y
<<=(3-s
->sprite_warping_accuracy
);
2380 src_x
= clip(src_x
, -16, s
->width
);
2381 if (src_x
== s
->width
)
2383 src_y
= clip(src_y
, -16, s
->height
);
2384 if (src_y
== s
->height
)
2387 linesize
= s
->linesize
;
2388 uvlinesize
= s
->uvlinesize
;
2390 ptr
= ref_picture
[0] + (src_y
* linesize
) + src_x
;
2392 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
2393 if( (unsigned)src_x
>= s
->h_edge_pos
- 17
2394 || (unsigned)src_y
>= s
->v_edge_pos
- 17){
2395 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, linesize
, 17, 17, src_x
, src_y
, s
->h_edge_pos
, s
->v_edge_pos
);
2396 ptr
= s
->edge_emu_buffer
;
2400 if((motion_x
|motion_y
)&7){
2401 s
->dsp
.gmc1(dest_y
, ptr
, linesize
, 16, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
2402 s
->dsp
.gmc1(dest_y
+8, ptr
+8, linesize
, 16, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
2406 dxy
= ((motion_x
>>3)&1) | ((motion_y
>>2)&2);
2407 if (s
->no_rounding
){
2408 s
->dsp
.put_no_rnd_pixels_tab
[0][dxy
](dest_y
, ptr
, linesize
, 16);
2410 s
->dsp
.put_pixels_tab
[0][dxy
](dest_y
, ptr
, linesize
, 16);
2414 if(s
->flags
&CODEC_FLAG_GRAY
) return;
2416 motion_x
= s
->sprite_offset
[1][0];
2417 motion_y
= s
->sprite_offset
[1][1];
2418 src_x
= s
->mb_x
* 8 + (motion_x
>> (s
->sprite_warping_accuracy
+1));
2419 src_y
= s
->mb_y
* 8 + (motion_y
>> (s
->sprite_warping_accuracy
+1));
2420 motion_x
<<=(3-s
->sprite_warping_accuracy
);
2421 motion_y
<<=(3-s
->sprite_warping_accuracy
);
2422 src_x
= clip(src_x
, -8, s
->width
>>1);
2423 if (src_x
== s
->width
>>1)
2425 src_y
= clip(src_y
, -8, s
->height
>>1);
2426 if (src_y
== s
->height
>>1)
2429 offset
= (src_y
* uvlinesize
) + src_x
;
2430 ptr
= ref_picture
[1] + offset
;
2431 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
2432 if( (unsigned)src_x
>= (s
->h_edge_pos
>>1) - 9
2433 || (unsigned)src_y
>= (s
->v_edge_pos
>>1) - 9){
2434 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, uvlinesize
, 9, 9, src_x
, src_y
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2435 ptr
= s
->edge_emu_buffer
;
2439 s
->dsp
.gmc1(dest_cb
, ptr
, uvlinesize
, 8, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
2441 ptr
= ref_picture
[2] + offset
;
2443 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, uvlinesize
, 9, 9, src_x
, src_y
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2444 ptr
= s
->edge_emu_buffer
;
2446 s
->dsp
.gmc1(dest_cr
, ptr
, uvlinesize
, 8, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
2451 static inline void gmc_motion(MpegEncContext
*s
,
2452 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2453 uint8_t **ref_picture
)
2456 int linesize
, uvlinesize
;
2457 const int a
= s
->sprite_warping_accuracy
;
2460 linesize
= s
->linesize
;
2461 uvlinesize
= s
->uvlinesize
;
2463 ptr
= ref_picture
[0];
2465 ox
= s
->sprite_offset
[0][0] + s
->sprite_delta
[0][0]*s
->mb_x
*16 + s
->sprite_delta
[0][1]*s
->mb_y
*16;
2466 oy
= s
->sprite_offset
[0][1] + s
->sprite_delta
[1][0]*s
->mb_x
*16 + s
->sprite_delta
[1][1]*s
->mb_y
*16;
2468 s
->dsp
.gmc(dest_y
, ptr
, linesize
, 16,
2471 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
2472 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
2473 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
2474 s
->h_edge_pos
, s
->v_edge_pos
);
2475 s
->dsp
.gmc(dest_y
+8, ptr
, linesize
, 16,
2476 ox
+ s
->sprite_delta
[0][0]*8,
2477 oy
+ s
->sprite_delta
[1][0]*8,
2478 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
2479 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
2480 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
2481 s
->h_edge_pos
, s
->v_edge_pos
);
2483 if(s
->flags
&CODEC_FLAG_GRAY
) return;
2485 ox
= s
->sprite_offset
[1][0] + s
->sprite_delta
[0][0]*s
->mb_x
*8 + s
->sprite_delta
[0][1]*s
->mb_y
*8;
2486 oy
= s
->sprite_offset
[1][1] + s
->sprite_delta
[1][0]*s
->mb_x
*8 + s
->sprite_delta
[1][1]*s
->mb_y
*8;
2488 ptr
= ref_picture
[1];
2489 s
->dsp
.gmc(dest_cb
, ptr
, uvlinesize
, 8,
2492 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
2493 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
2494 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
2495 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2497 ptr
= ref_picture
[2];
2498 s
->dsp
.gmc(dest_cr
, ptr
, uvlinesize
, 8,
2501 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
2502 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
2503 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
2504 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2508 * Copies a rectangular area of samples to a temporary buffer and replicates the boarder samples.
2509 * @param buf destination buffer
2510 * @param src source buffer
2511 * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
2512 * @param block_w width of block
2513 * @param block_h height of block
2514 * @param src_x x coordinate of the top left sample of the block in the source buffer
2515 * @param src_y y coordinate of the top left sample of the block in the source buffer
2516 * @param w width of the source buffer
2517 * @param h height of the source buffer
2519 void ff_emulated_edge_mc(uint8_t *buf
, uint8_t *src
, int linesize
, int block_w
, int block_h
,
2520 int src_x
, int src_y
, int w
, int h
){
2522 int start_y
, start_x
, end_y
, end_x
;
2525 src
+= (h
-1-src_y
)*linesize
;
2527 }else if(src_y
<=-block_h
){
2528 src
+= (1-block_h
-src_y
)*linesize
;
2534 }else if(src_x
<=-block_w
){
2535 src
+= (1-block_w
-src_x
);
2539 start_y
= FFMAX(0, -src_y
);
2540 start_x
= FFMAX(0, -src_x
);
2541 end_y
= FFMIN(block_h
, h
-src_y
);
2542 end_x
= FFMIN(block_w
, w
-src_x
);
2544 // copy existing part
2545 for(y
=start_y
; y
<end_y
; y
++){
2546 for(x
=start_x
; x
<end_x
; x
++){
2547 buf
[x
+ y
*linesize
]= src
[x
+ y
*linesize
];
2552 for(y
=0; y
<start_y
; y
++){
2553 for(x
=start_x
; x
<end_x
; x
++){
2554 buf
[x
+ y
*linesize
]= buf
[x
+ start_y
*linesize
];
2559 for(y
=end_y
; y
<block_h
; y
++){
2560 for(x
=start_x
; x
<end_x
; x
++){
2561 buf
[x
+ y
*linesize
]= buf
[x
+ (end_y
-1)*linesize
];
2565 for(y
=0; y
<block_h
; y
++){
2567 for(x
=0; x
<start_x
; x
++){
2568 buf
[x
+ y
*linesize
]= buf
[start_x
+ y
*linesize
];
2572 for(x
=end_x
; x
<block_w
; x
++){
2573 buf
[x
+ y
*linesize
]= buf
[end_x
- 1 + y
*linesize
];
2578 static inline int hpel_motion(MpegEncContext
*s
,
2579 uint8_t *dest
, uint8_t *src
,
2580 int field_based
, int field_select
,
2581 int src_x
, int src_y
,
2582 int width
, int height
, int stride
,
2583 int h_edge_pos
, int v_edge_pos
,
2584 int w
, int h
, op_pixels_func
*pix_op
,
2585 int motion_x
, int motion_y
)
2590 dxy
= ((motion_y
& 1) << 1) | (motion_x
& 1);
2591 src_x
+= motion_x
>> 1;
2592 src_y
+= motion_y
>> 1;
2594 /* WARNING: do no forget half pels */
2595 src_x
= clip(src_x
, -16, width
); //FIXME unneeded for emu?
2598 src_y
= clip(src_y
, -16, height
);
2599 if (src_y
== height
)
2601 src
+= src_y
* stride
+ src_x
;
2603 if(s
->unrestricted_mv
&& (s
->flags
&CODEC_FLAG_EMU_EDGE
)){
2604 if( (unsigned)src_x
> h_edge_pos
- (motion_x
&1) - w
2605 || (unsigned)src_y
> v_edge_pos
- (motion_y
&1) - h
){
2606 ff_emulated_edge_mc(s
->edge_emu_buffer
, src
, s
->linesize
, w
+1, (h
+1)<<field_based
,
2607 src_x
, src_y
<<field_based
, h_edge_pos
, s
->v_edge_pos
);
2608 src
= s
->edge_emu_buffer
;
2614 pix_op
[dxy
](dest
, src
, stride
, h
);
2618 static inline int hpel_motion_lowres(MpegEncContext
*s
,
2619 uint8_t *dest
, uint8_t *src
,
2620 int field_based
, int field_select
,
2621 int src_x
, int src_y
,
2622 int width
, int height
, int stride
,
2623 int h_edge_pos
, int v_edge_pos
,
2624 int w
, int h
, h264_chroma_mc_func
*pix_op
,
2625 int motion_x
, int motion_y
)
2627 const int lowres
= s
->avctx
->lowres
;
2628 const int s_mask
= (2<<lowres
)-1;
2632 if(s
->quarter_sample
){
2637 sx
= motion_x
& s_mask
;
2638 sy
= motion_y
& s_mask
;
2639 src_x
+= motion_x
>> (lowres
+1);
2640 src_y
+= motion_y
>> (lowres
+1);
2642 src
+= src_y
* stride
+ src_x
;
2644 if( (unsigned)src_x
> h_edge_pos
- (!!sx
) - w
2645 || (unsigned)src_y
>(v_edge_pos
>> field_based
) - (!!sy
) - h
){
2646 ff_emulated_edge_mc(s
->edge_emu_buffer
, src
, s
->linesize
, w
+1, (h
+1)<<field_based
,
2647 src_x
, src_y
<<field_based
, h_edge_pos
, v_edge_pos
);
2648 src
= s
->edge_emu_buffer
;
2656 pix_op
[lowres
](dest
, src
, stride
, h
, sx
, sy
);
2660 /* apply one mpeg motion vector to the three components */
2661 static always_inline
void mpeg_motion(MpegEncContext
*s
,
2662 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2663 int field_based
, int bottom_field
, int field_select
,
2664 uint8_t **ref_picture
, op_pixels_func (*pix_op
)[4],
2665 int motion_x
, int motion_y
, int h
)
2667 uint8_t *ptr_y
, *ptr_cb
, *ptr_cr
;
2668 int dxy
, uvdxy
, mx
, my
, src_x
, src_y
, uvsrc_x
, uvsrc_y
, v_edge_pos
, uvlinesize
, linesize
;
2671 if(s
->quarter_sample
)
2678 v_edge_pos
= s
->v_edge_pos
>> field_based
;
2679 linesize
= s
->current_picture
.linesize
[0] << field_based
;
2680 uvlinesize
= s
->current_picture
.linesize
[1] << field_based
;
2682 dxy
= ((motion_y
& 1) << 1) | (motion_x
& 1);
2683 src_x
= s
->mb_x
* 16 + (motion_x
>> 1);
2684 src_y
=(s
->mb_y
<<(4-field_based
)) + (motion_y
>> 1);
2686 if (s
->out_format
== FMT_H263
) {
2687 if((s
->workaround_bugs
& FF_BUG_HPEL_CHROMA
) && field_based
){
2688 mx
= (motion_x
>>1)|(motion_x
&1);
2690 uvdxy
= ((my
& 1) << 1) | (mx
& 1);
2691 uvsrc_x
= s
->mb_x
* 8 + (mx
>> 1);
2692 uvsrc_y
= (s
->mb_y
<<(3-field_based
)) + (my
>> 1);
2694 uvdxy
= dxy
| (motion_y
& 2) | ((motion_x
& 2) >> 1);
2698 }else if(s
->out_format
== FMT_H261
){//even chroma mv's are full pel in H261
2702 uvsrc_x
= s
->mb_x
*8 + mx
;
2703 uvsrc_y
= s
->mb_y
*8 + my
;
2705 if(s
->chroma_y_shift
){
2708 uvdxy
= ((my
& 1) << 1) | (mx
& 1);
2709 uvsrc_x
= s
->mb_x
* 8 + (mx
>> 1);
2710 uvsrc_y
= (s
->mb_y
<<(3-field_based
)) + (my
>> 1);
2712 if(s
->chroma_x_shift
){
2715 uvdxy
= ((motion_y
& 1) << 1) | (mx
& 1);
2716 uvsrc_x
= s
->mb_x
* 8 + (mx
>> 1);
2727 ptr_y
= ref_picture
[0] + src_y
* linesize
+ src_x
;
2728 ptr_cb
= ref_picture
[1] + uvsrc_y
* uvlinesize
+ uvsrc_x
;
2729 ptr_cr
= ref_picture
[2] + uvsrc_y
* uvlinesize
+ uvsrc_x
;
2731 if( (unsigned)src_x
> s
->h_edge_pos
- (motion_x
&1) - 16
2732 || (unsigned)src_y
> v_edge_pos
- (motion_y
&1) - h
){
2733 if(s
->codec_id
== CODEC_ID_MPEG2VIDEO
||
2734 s
->codec_id
== CODEC_ID_MPEG1VIDEO
){
2735 av_log(s
->avctx
,AV_LOG_DEBUG
,"MPEG motion vector out of boundary\n");
2738 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr_y
, s
->linesize
, 17, 17+field_based
,
2739 src_x
, src_y
<<field_based
, s
->h_edge_pos
, s
->v_edge_pos
);
2740 ptr_y
= s
->edge_emu_buffer
;
2741 if(!(s
->flags
&CODEC_FLAG_GRAY
)){
2742 uint8_t *uvbuf
= s
->edge_emu_buffer
+18*s
->linesize
;
2743 ff_emulated_edge_mc(uvbuf
, ptr_cb
, s
->uvlinesize
, 9, 9+field_based
,
2744 uvsrc_x
, uvsrc_y
<<field_based
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2745 ff_emulated_edge_mc(uvbuf
+16, ptr_cr
, s
->uvlinesize
, 9, 9+field_based
,
2746 uvsrc_x
, uvsrc_y
<<field_based
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2752 if(bottom_field
){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
2753 dest_y
+= s
->linesize
;
2754 dest_cb
+= s
->uvlinesize
;
2755 dest_cr
+= s
->uvlinesize
;
2759 ptr_y
+= s
->linesize
;
2760 ptr_cb
+= s
->uvlinesize
;
2761 ptr_cr
+= s
->uvlinesize
;
2764 pix_op
[0][dxy
](dest_y
, ptr_y
, linesize
, h
);
2766 if(!(s
->flags
&CODEC_FLAG_GRAY
)){
2767 pix_op
[s
->chroma_x_shift
][uvdxy
](dest_cb
, ptr_cb
, uvlinesize
, h
>> s
->chroma_y_shift
);
2768 pix_op
[s
->chroma_x_shift
][uvdxy
](dest_cr
, ptr_cr
, uvlinesize
, h
>> s
->chroma_y_shift
);
2770 if(s
->out_format
== FMT_H261
){
2771 ff_h261_loop_filter(s
);
2775 /* apply one mpeg motion vector to the three components */
2776 static always_inline
void mpeg_motion_lowres(MpegEncContext
*s
,
2777 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2778 int field_based
, int bottom_field
, int field_select
,
2779 uint8_t **ref_picture
, h264_chroma_mc_func
*pix_op
,
2780 int motion_x
, int motion_y
, int h
)
2782 uint8_t *ptr_y
, *ptr_cb
, *ptr_cr
;
2783 int mx
, my
, src_x
, src_y
, uvsrc_x
, uvsrc_y
, uvlinesize
, linesize
, sx
, sy
, uvsx
, uvsy
;
2784 const int lowres
= s
->avctx
->lowres
;
2785 const int block_s
= 8>>lowres
;
2786 const int s_mask
= (2<<lowres
)-1;
2787 const int h_edge_pos
= s
->h_edge_pos
>> lowres
;
2788 const int v_edge_pos
= s
->v_edge_pos
>> lowres
;
2789 linesize
= s
->current_picture
.linesize
[0] << field_based
;
2790 uvlinesize
= s
->current_picture
.linesize
[1] << field_based
;
2792 if(s
->quarter_sample
){ //FIXME obviously not perfect but qpel wont work in lowres anyway
2798 motion_y
+= (bottom_field
- field_select
)*((1<<lowres
)-1);
2801 sx
= motion_x
& s_mask
;
2802 sy
= motion_y
& s_mask
;
2803 src_x
= s
->mb_x
*2*block_s
+ (motion_x
>> (lowres
+1));
2804 src_y
=(s
->mb_y
*2*block_s
>>field_based
) + (motion_y
>> (lowres
+1));
2806 if (s
->out_format
== FMT_H263
) {
2807 uvsx
= ((motion_x
>>1) & s_mask
) | (sx
&1);
2808 uvsy
= ((motion_y
>>1) & s_mask
) | (sy
&1);