2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard.
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 * 4MV & hq & b-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
25 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "mpegvideo.h"
35 #include "fastmemcpy.h"
41 #ifdef CONFIG_ENCODERS
42 static void encode_picture(MpegEncContext
*s
, int picture_number
);
43 #endif //CONFIG_ENCODERS
44 static void dct_unquantize_mpeg1_intra_c(MpegEncContext
*s
,
45 DCTELEM
*block
, int n
, int qscale
);
46 static void dct_unquantize_mpeg1_inter_c(MpegEncContext
*s
,
47 DCTELEM
*block
, int n
, int qscale
);
48 static void dct_unquantize_mpeg2_intra_c(MpegEncContext
*s
,
49 DCTELEM
*block
, int n
, int qscale
);
50 static void dct_unquantize_mpeg2_inter_c(MpegEncContext
*s
,
51 DCTELEM
*block
, int n
, int qscale
);
52 static void dct_unquantize_h263_intra_c(MpegEncContext
*s
,
53 DCTELEM
*block
, int n
, int qscale
);
54 static void dct_unquantize_h263_inter_c(MpegEncContext
*s
,
55 DCTELEM
*block
, int n
, int qscale
);
56 static void dct_unquantize_h261_intra_c(MpegEncContext
*s
,
57 DCTELEM
*block
, int n
, int qscale
);
58 static void dct_unquantize_h261_inter_c(MpegEncContext
*s
,
59 DCTELEM
*block
, int n
, int qscale
);
60 static void draw_edges_c(uint8_t *buf
, int wrap
, int width
, int height
, int w
);
61 #ifdef CONFIG_ENCODERS
62 static int dct_quantize_c(MpegEncContext
*s
, DCTELEM
*block
, int n
, int qscale
, int *overflow
);
63 static int dct_quantize_trellis_c(MpegEncContext
*s
, DCTELEM
*block
, int n
, int qscale
, int *overflow
);
64 static int dct_quantize_refine(MpegEncContext
*s
, DCTELEM
*block
, int16_t *weight
, DCTELEM
*orig
, int n
, int qscale
);
65 static int sse_mb(MpegEncContext
*s
);
66 static void denoise_dct_c(MpegEncContext
*s
, DCTELEM
*block
);
67 #endif //CONFIG_ENCODERS
70 extern int XVMC_field_start(MpegEncContext
*s
, AVCodecContext
*avctx
);
71 extern void XVMC_field_end(MpegEncContext
*s
);
72 extern void XVMC_decode_mb(MpegEncContext
*s
);
75 void (*draw_edges
)(uint8_t *buf
, int wrap
, int width
, int height
, int w
)= draw_edges_c
;
78 /* enable all paranoid tests for rounding, overflows, etc... */
84 /* for jpeg fast DCT */
87 static const uint16_t aanscales
[64] = {
88 /* precomputed values scaled up by 14 bits */
89 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
90 22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270,
91 21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906,
92 19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315,
93 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
94 12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552,
95 8867 , 12299, 11585, 10426, 8867, 6967, 4799, 2446,
96 4520 , 6270, 5906, 5315, 4520, 3552, 2446, 1247
99 static const uint8_t h263_chroma_roundtab
[16] = {
100 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
101 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
104 static const uint8_t ff_default_chroma_qscale_table
[32]={
105 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
106 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
109 #ifdef CONFIG_ENCODERS
110 static uint8_t (*default_mv_penalty
)[MAX_MV
*2+1]=NULL
;
111 static uint8_t default_fcode_tab
[MAX_MV
*2+1];
113 enum PixelFormat ff_yuv420p_list
[2]= {PIX_FMT_YUV420P
, -1};
115 static void convert_matrix(DSPContext
*dsp
, int (*qmat
)[64], uint16_t (*qmat16
)[2][64],
116 const uint16_t *quant_matrix
, int bias
, int qmin
, int qmax
)
120 for(qscale
=qmin
; qscale
<=qmax
; qscale
++){
122 if (dsp
->fdct
== ff_jpeg_fdct_islow
123 #ifdef FAAN_POSTSCALE
124 || dsp
->fdct
== ff_faandct
128 const int j
= dsp
->idct_permutation
[i
];
129 /* 16 <= qscale * quant_matrix[i] <= 7905 */
130 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
131 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
132 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
134 qmat
[qscale
][i
] = (int)((uint64_t_C(1) << QMAT_SHIFT
) /
135 (qscale
* quant_matrix
[j
]));
137 } else if (dsp
->fdct
== fdct_ifast
138 #ifndef FAAN_POSTSCALE
139 || dsp
->fdct
== ff_faandct
143 const int j
= dsp
->idct_permutation
[i
];
144 /* 16 <= qscale * quant_matrix[i] <= 7905 */
145 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
146 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
147 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
149 qmat
[qscale
][i
] = (int)((uint64_t_C(1) << (QMAT_SHIFT
+ 14)) /
150 (aanscales
[i
] * qscale
* quant_matrix
[j
]));
154 const int j
= dsp
->idct_permutation
[i
];
155 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
156 So 16 <= qscale * quant_matrix[i] <= 7905
157 so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905
158 so 32768 >= (1<<19) / (qscale * quant_matrix[i]) >= 67
160 qmat
[qscale
][i
] = (int)((uint64_t_C(1) << QMAT_SHIFT
) / (qscale
* quant_matrix
[j
]));
161 // qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);
162 qmat16
[qscale
][0][i
] = (1 << QMAT_SHIFT_MMX
) / (qscale
* quant_matrix
[j
]);
164 if(qmat16
[qscale
][0][i
]==0 || qmat16
[qscale
][0][i
]==128*256) qmat16
[qscale
][0][i
]=128*256-1;
165 qmat16
[qscale
][1][i
]= ROUNDED_DIV(bias
<<(16-QUANT_BIAS_SHIFT
), qmat16
[qscale
][0][i
]);
171 static inline void update_qscale(MpegEncContext
*s
){
172 s
->qscale
= (s
->lambda
*139 + FF_LAMBDA_SCALE
*64) >> (FF_LAMBDA_SHIFT
+ 7);
173 s
->qscale
= clip(s
->qscale
, s
->avctx
->qmin
, s
->avctx
->qmax
);
175 s
->lambda2
= (s
->lambda
*s
->lambda
+ FF_LAMBDA_SCALE
/2) >> FF_LAMBDA_SHIFT
;
177 #endif //CONFIG_ENCODERS
179 void ff_init_scantable(uint8_t *permutation
, ScanTable
*st
, const uint8_t *src_scantable
){
183 st
->scantable
= src_scantable
;
187 j
= src_scantable
[i
];
188 st
->permutated
[i
] = permutation
[j
];
197 j
= st
->permutated
[i
];
199 st
->raster_end
[i
]= end
;
203 #ifdef CONFIG_ENCODERS
204 void ff_write_quant_matrix(PutBitContext
*pb
, int16_t *matrix
){
210 put_bits(pb
, 8, matrix
[ ff_zigzag_direct
[i
] ]);
215 #endif //CONFIG_ENCODERS
217 /* init common dct for both encoder and decoder */
218 int DCT_common_init(MpegEncContext
*s
)
220 s
->dct_unquantize_h263_intra
= dct_unquantize_h263_intra_c
;
221 s
->dct_unquantize_h263_inter
= dct_unquantize_h263_inter_c
;
222 s
->dct_unquantize_h261_intra
= dct_unquantize_h261_intra_c
;
223 s
->dct_unquantize_h261_inter
= dct_unquantize_h261_inter_c
;
224 s
->dct_unquantize_mpeg1_intra
= dct_unquantize_mpeg1_intra_c
;
225 s
->dct_unquantize_mpeg1_inter
= dct_unquantize_mpeg1_inter_c
;
226 s
->dct_unquantize_mpeg2_intra
= dct_unquantize_mpeg2_intra_c
;
227 s
->dct_unquantize_mpeg2_inter
= dct_unquantize_mpeg2_inter_c
;
229 #ifdef CONFIG_ENCODERS
230 s
->dct_quantize
= dct_quantize_c
;
231 s
->denoise_dct
= denoise_dct_c
;
235 MPV_common_init_mmx(s
);
238 MPV_common_init_axp(s
);
241 MPV_common_init_mlib(s
);
244 MPV_common_init_mmi(s
);
247 MPV_common_init_armv4l(s
);
250 MPV_common_init_ppc(s
);
253 #ifdef CONFIG_ENCODERS
254 s
->fast_dct_quantize
= s
->dct_quantize
;
256 if(s
->flags
&CODEC_FLAG_TRELLIS_QUANT
){
257 s
->dct_quantize
= dct_quantize_trellis_c
; //move before MPV_common_init_*
260 #endif //CONFIG_ENCODERS
262 /* load & permutate scantables
263 note: only wmv uses differnt ones
265 if(s
->alternate_scan
){
266 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->inter_scantable
, ff_alternate_vertical_scan
);
267 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->intra_scantable
, ff_alternate_vertical_scan
);
269 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->inter_scantable
, ff_zigzag_direct
);
270 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->intra_scantable
, ff_zigzag_direct
);
272 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->intra_h_scantable
, ff_alternate_horizontal_scan
);
273 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->intra_v_scantable
, ff_alternate_vertical_scan
);
278 static void copy_picture(Picture
*dst
, Picture
*src
){
280 dst
->type
= FF_BUFFER_TYPE_COPY
;
283 static void copy_picture_attributes(MpegEncContext
*s
, AVFrame
*dst
, AVFrame
*src
){
286 dst
->pict_type
= src
->pict_type
;
287 dst
->quality
= src
->quality
;
288 dst
->coded_picture_number
= src
->coded_picture_number
;
289 dst
->display_picture_number
= src
->display_picture_number
;
290 // dst->reference = src->reference;
292 dst
->interlaced_frame
= src
->interlaced_frame
;
293 dst
->top_field_first
= src
->top_field_first
;
295 if(s
->avctx
->me_threshold
){
296 if(!src
->motion_val
[0])
297 av_log(s
->avctx
, AV_LOG_ERROR
, "AVFrame.motion_val not set!\n");
299 av_log(s
->avctx
, AV_LOG_ERROR
, "AVFrame.mb_type not set!\n");
300 if(!src
->ref_index
[0])
301 av_log(s
->avctx
, AV_LOG_ERROR
, "AVFrame.ref_index not set!\n");
302 if(src
->motion_subsample_log2
!= dst
->motion_subsample_log2
)
303 av_log(s
->avctx
, AV_LOG_ERROR
, "AVFrame.motion_subsample_log2 doesnt match! (%d!=%d)\n",
304 src
->motion_subsample_log2
, dst
->motion_subsample_log2
);
306 memcpy(dst
->mb_type
, src
->mb_type
, s
->mb_stride
* s
->mb_height
* sizeof(dst
->mb_type
[0]));
309 int stride
= ((16*s
->mb_width
)>>src
->motion_subsample_log2
) + 1;
310 int height
= ((16*s
->mb_height
)>>src
->motion_subsample_log2
);
312 if(src
->motion_val
[i
] && src
->motion_val
[i
] != dst
->motion_val
[i
]){
313 memcpy(dst
->motion_val
[i
], src
->motion_val
[i
], 2*stride
*height
*sizeof(int16_t));
315 if(src
->ref_index
[i
] && src
->ref_index
[i
] != dst
->ref_index
[i
]){
316 memcpy(dst
->ref_index
[i
], src
->ref_index
[i
], s
->b8_stride
*2*s
->mb_height
*sizeof(int8_t));
323 * allocates a Picture
324 * The pixels are allocated/set by calling get_buffer() if shared=0
326 static int alloc_picture(MpegEncContext
*s
, Picture
*pic
, int shared
){
327 const int big_mb_num
= s
->mb_stride
*(s
->mb_height
+1) + 1; //the +1 is needed so memset(,,stride*height) doesnt sig11
328 const int mb_array_size
= s
->mb_stride
*s
->mb_height
;
329 const int b8_array_size
= s
->b8_stride
*s
->mb_height
*2;
330 const int b4_array_size
= s
->b4_stride
*s
->mb_height
*4;
334 assert(pic
->data
[0]);
335 assert(pic
->type
== 0 || pic
->type
== FF_BUFFER_TYPE_SHARED
);
336 pic
->type
= FF_BUFFER_TYPE_SHARED
;
340 assert(!pic
->data
[0]);
342 r
= s
->avctx
->get_buffer(s
->avctx
, (AVFrame
*)pic
);
344 if(r
<0 || !pic
->age
|| !pic
->type
|| !pic
->data
[0]){
345 av_log(s
->avctx
, AV_LOG_ERROR
, "get_buffer() failed (%d %d %d %p)\n", r
, pic
->age
, pic
->type
, pic
->data
[0]);
349 if(s
->linesize
&& (s
->linesize
!= pic
->linesize
[0] || s
->uvlinesize
!= pic
->linesize
[1])){
350 av_log(s
->avctx
, AV_LOG_ERROR
, "get_buffer() failed (stride changed)\n");
354 if(pic
->linesize
[1] != pic
->linesize
[2]){
355 av_log(s
->avctx
, AV_LOG_ERROR
, "get_buffer() failed (uv stride missmatch)\n");
359 s
->linesize
= pic
->linesize
[0];
360 s
->uvlinesize
= pic
->linesize
[1];
363 if(pic
->qscale_table
==NULL
){
365 CHECKED_ALLOCZ(pic
->mb_var
, mb_array_size
* sizeof(int16_t))
366 CHECKED_ALLOCZ(pic
->mc_mb_var
, mb_array_size
* sizeof(int16_t))
367 CHECKED_ALLOCZ(pic
->mb_mean
, mb_array_size
* sizeof(int8_t))
370 CHECKED_ALLOCZ(pic
->mbskip_table
, mb_array_size
* sizeof(uint8_t)+2) //the +2 is for the slice end check
371 CHECKED_ALLOCZ(pic
->qscale_table
, mb_array_size
* sizeof(uint8_t))
372 CHECKED_ALLOCZ(pic
->mb_type_base
, big_mb_num
* sizeof(uint32_t))
373 pic
->mb_type
= pic
->mb_type_base
+ s
->mb_stride
+1;
374 if(s
->out_format
== FMT_H264
){
376 CHECKED_ALLOCZ(pic
->motion_val_base
[i
], 2 * (b4_array_size
+2) * sizeof(int16_t))
377 pic
->motion_val
[i
]= pic
->motion_val_base
[i
]+2;
378 CHECKED_ALLOCZ(pic
->ref_index
[i
], b8_array_size
* sizeof(uint8_t))
380 pic
->motion_subsample_log2
= 2;
381 }else if(s
->out_format
== FMT_H263
|| s
->encoding
|| (s
->avctx
->debug
&FF_DEBUG_MV
) || (s
->avctx
->debug_mv
)){
383 CHECKED_ALLOCZ(pic
->motion_val_base
[i
], 2 * (b8_array_size
+2) * sizeof(int16_t))
384 pic
->motion_val
[i
]= pic
->motion_val_base
[i
]+2;
385 CHECKED_ALLOCZ(pic
->ref_index
[i
], b8_array_size
* sizeof(uint8_t))
387 pic
->motion_subsample_log2
= 3;
389 if(s
->avctx
->debug
&FF_DEBUG_DCT_COEFF
) {
390 CHECKED_ALLOCZ(pic
->dct_coeff
, 64 * mb_array_size
* sizeof(DCTELEM
)*6)
392 pic
->qstride
= s
->mb_stride
;
393 CHECKED_ALLOCZ(pic
->pan_scan
, 1 * sizeof(AVPanScan
))
396 //it might be nicer if the application would keep track of these but it would require a API change
397 memmove(s
->prev_pict_types
+1, s
->prev_pict_types
, PREV_PICT_TYPES_BUFFER_SIZE
-1);
398 s
->prev_pict_types
[0]= s
->pict_type
;
399 if(pic
->age
< PREV_PICT_TYPES_BUFFER_SIZE
&& s
->prev_pict_types
[pic
->age
] == B_TYPE
)
400 pic
->age
= INT_MAX
; // skiped MBs in b frames are quite rare in mpeg1/2 and its a bit tricky to skip them anyway
403 fail
: //for the CHECKED_ALLOCZ macro
408 * deallocates a picture
410 static void free_picture(MpegEncContext
*s
, Picture
*pic
){
413 if(pic
->data
[0] && pic
->type
!=FF_BUFFER_TYPE_SHARED
){
414 s
->avctx
->release_buffer(s
->avctx
, (AVFrame
*)pic
);
417 av_freep(&pic
->mb_var
);
418 av_freep(&pic
->mc_mb_var
);
419 av_freep(&pic
->mb_mean
);
420 av_freep(&pic
->mbskip_table
);
421 av_freep(&pic
->qscale_table
);
422 av_freep(&pic
->mb_type_base
);
423 av_freep(&pic
->dct_coeff
);
424 av_freep(&pic
->pan_scan
);
427 av_freep(&pic
->motion_val_base
[i
]);
428 av_freep(&pic
->ref_index
[i
]);
431 if(pic
->type
== FF_BUFFER_TYPE_SHARED
){
440 static int init_duplicate_context(MpegEncContext
*s
, MpegEncContext
*base
){
443 // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
444 CHECKED_ALLOCZ(s
->allocated_edge_emu_buffer
, (s
->width
+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance
445 s
->edge_emu_buffer
= s
->allocated_edge_emu_buffer
+ (s
->width
+64)*2*17;
447 //FIXME should be linesize instead of s->width*2 but that isnt known before get_buffer()
448 CHECKED_ALLOCZ(s
->me
.scratchpad
, (s
->width
+64)*4*16*2*sizeof(uint8_t))
449 s
->rd_scratchpad
= s
->me
.scratchpad
;
450 s
->b_scratchpad
= s
->me
.scratchpad
;
451 s
->obmc_scratchpad
= s
->me
.scratchpad
+ 16;
453 CHECKED_ALLOCZ(s
->me
.map
, ME_MAP_SIZE
*sizeof(uint32_t))
454 CHECKED_ALLOCZ(s
->me
.score_map
, ME_MAP_SIZE
*sizeof(uint32_t))
455 if(s
->avctx
->noise_reduction
){
456 CHECKED_ALLOCZ(s
->dct_error_sum
, 2 * 64 * sizeof(int))
459 CHECKED_ALLOCZ(s
->blocks
, 64*12*2 * sizeof(DCTELEM
))
460 s
->block
= s
->blocks
[0];
463 s
->pblocks
[i
] = (short *)(&s
->block
[i
]);
467 return -1; //free() through MPV_common_end()
470 static void free_duplicate_context(MpegEncContext
*s
){
473 av_freep(&s
->allocated_edge_emu_buffer
); s
->edge_emu_buffer
= NULL
;
474 av_freep(&s
->me
.scratchpad
);
477 s
->obmc_scratchpad
= NULL
;
479 av_freep(&s
->dct_error_sum
);
480 av_freep(&s
->me
.map
);
481 av_freep(&s
->me
.score_map
);
482 av_freep(&s
->blocks
);
486 static void backup_duplicate_context(MpegEncContext
*bak
, MpegEncContext
*src
){
487 #define COPY(a) bak->a= src->a
488 COPY(allocated_edge_emu_buffer
);
489 COPY(edge_emu_buffer
);
493 COPY(obmc_scratchpad
);
500 COPY(me
.map_generation
);
508 void ff_update_duplicate_context(MpegEncContext
*dst
, MpegEncContext
*src
){
511 //FIXME copy only needed parts
513 backup_duplicate_context(&bak
, dst
);
514 memcpy(dst
, src
, sizeof(MpegEncContext
));
515 backup_duplicate_context(dst
, &bak
);
517 dst
->pblocks
[i
] = (short *)(&dst
->block
[i
]);
519 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
522 static void update_duplicate_context_after_me(MpegEncContext
*dst
, MpegEncContext
*src
){
523 #define COPY(a) dst->a= src->a
525 COPY(current_picture
);
531 COPY(picture_in_gop_number
);
532 COPY(gop_picture_number
);
533 COPY(frame_pred_frame_dct
); //FIXME dont set in encode_header
534 COPY(progressive_frame
); //FIXME dont set in encode_header
535 COPY(partitioned_frame
); //FIXME dont set in encode_header
540 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
541 * the changed fields will not depend upon the prior state of the MpegEncContext.
543 static void MPV_common_defaults(MpegEncContext
*s
){
545 s
->c_dc_scale_table
= ff_mpeg1_dc_scale_table
;
546 s
->chroma_qscale_table
= ff_default_chroma_qscale_table
;
547 s
->progressive_frame
= 1;
548 s
->progressive_sequence
= 1;
549 s
->picture_structure
= PICT_FRAME
;
551 s
->coded_picture_number
= 0;
552 s
->picture_number
= 0;
553 s
->input_picture_number
= 0;
555 s
->picture_in_gop_number
= 0;
562 * sets the given MpegEncContext to defaults for decoding.
563 * the changed fields will not depend upon the prior state of the MpegEncContext.
565 void MPV_decode_defaults(MpegEncContext
*s
){
566 MPV_common_defaults(s
);
570 * sets the given MpegEncContext to defaults for encoding.
571 * the changed fields will not depend upon the prior state of the MpegEncContext.
574 #ifdef CONFIG_ENCODERS
575 static void MPV_encode_defaults(MpegEncContext
*s
){
578 MPV_common_defaults(s
);
584 default_mv_penalty
= av_mallocz( sizeof(uint8_t)*(MAX_FCODE
+1)*(2*MAX_MV
+1) );
585 memset(default_mv_penalty
, 0, sizeof(uint8_t)*(MAX_FCODE
+1)*(2*MAX_MV
+1));
586 memset(default_fcode_tab
, 0, sizeof(uint8_t)*(2*MAX_MV
+1));
588 for(i
=-16; i
<16; i
++){
589 default_fcode_tab
[i
+ MAX_MV
]= 1;
592 s
->me
.mv_penalty
= default_mv_penalty
;
593 s
->fcode_tab
= default_fcode_tab
;
595 #endif //CONFIG_ENCODERS
598 * init common structure for both encoder and decoder.
599 * this assumes that some variables like width/height are already set
601 int MPV_common_init(MpegEncContext
*s
)
603 int y_size
, c_size
, yc_size
, i
, mb_array_size
, mv_table_size
, x
, y
;
605 if(s
->avctx
->thread_count
> MAX_THREADS
|| (16*s
->avctx
->thread_count
> s
->height
&& s
->height
)){
606 av_log(s
->avctx
, AV_LOG_ERROR
, "too many threads\n");
610 dsputil_init(&s
->dsp
, s
->avctx
);
613 s
->flags
= s
->avctx
->flags
;
614 s
->flags2
= s
->avctx
->flags2
;
616 s
->mb_width
= (s
->width
+ 15) / 16;
617 s
->mb_height
= (s
->height
+ 15) / 16;
618 s
->mb_stride
= s
->mb_width
+ 1;
619 s
->b8_stride
= s
->mb_width
*2 + 1;
620 s
->b4_stride
= s
->mb_width
*4 + 1;
621 mb_array_size
= s
->mb_height
* s
->mb_stride
;
622 mv_table_size
= (s
->mb_height
+2) * s
->mb_stride
+ 1;
624 /* set chroma shifts */
625 avcodec_get_chroma_sub_sample(s
->avctx
->pix_fmt
,&(s
->chroma_x_shift
),
626 &(s
->chroma_y_shift
) );
628 /* set default edge pos, will be overriden in decode_header if needed */
629 s
->h_edge_pos
= s
->mb_width
*16;
630 s
->v_edge_pos
= s
->mb_height
*16;
632 s
->mb_num
= s
->mb_width
* s
->mb_height
;
637 s
->block_wrap
[3]= s
->b8_stride
;
639 s
->block_wrap
[5]= s
->mb_stride
;
641 y_size
= s
->b8_stride
* (2 * s
->mb_height
+ 1);
642 c_size
= s
->mb_stride
* (s
->mb_height
+ 1);
643 yc_size
= y_size
+ 2 * c_size
;
645 /* convert fourcc to upper case */
646 s
->avctx
->codec_tag
= toupper( s
->avctx
->codec_tag
&0xFF)
647 + (toupper((s
->avctx
->codec_tag
>>8 )&0xFF)<<8 )
648 + (toupper((s
->avctx
->codec_tag
>>16)&0xFF)<<16)
649 + (toupper((s
->avctx
->codec_tag
>>24)&0xFF)<<24);
651 s
->avctx
->stream_codec_tag
= toupper( s
->avctx
->stream_codec_tag
&0xFF)
652 + (toupper((s
->avctx
->stream_codec_tag
>>8 )&0xFF)<<8 )
653 + (toupper((s
->avctx
->stream_codec_tag
>>16)&0xFF)<<16)
654 + (toupper((s
->avctx
->stream_codec_tag
>>24)&0xFF)<<24);
656 s
->avctx
->coded_frame
= (AVFrame
*)&s
->current_picture
;
658 CHECKED_ALLOCZ(s
->mb_index2xy
, (s
->mb_num
+1)*sizeof(int)) //error ressilience code looks cleaner with this
659 for(y
=0; y
<s
->mb_height
; y
++){
660 for(x
=0; x
<s
->mb_width
; x
++){
661 s
->mb_index2xy
[ x
+ y
*s
->mb_width
] = x
+ y
*s
->mb_stride
;
664 s
->mb_index2xy
[ s
->mb_height
*s
->mb_width
] = (s
->mb_height
-1)*s
->mb_stride
+ s
->mb_width
; //FIXME really needed?
667 /* Allocate MV tables */
668 CHECKED_ALLOCZ(s
->p_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
669 CHECKED_ALLOCZ(s
->b_forw_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
670 CHECKED_ALLOCZ(s
->b_back_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
671 CHECKED_ALLOCZ(s
->b_bidir_forw_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
672 CHECKED_ALLOCZ(s
->b_bidir_back_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
673 CHECKED_ALLOCZ(s
->b_direct_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
674 s
->p_mv_table
= s
->p_mv_table_base
+ s
->mb_stride
+ 1;
675 s
->b_forw_mv_table
= s
->b_forw_mv_table_base
+ s
->mb_stride
+ 1;
676 s
->b_back_mv_table
= s
->b_back_mv_table_base
+ s
->mb_stride
+ 1;
677 s
->b_bidir_forw_mv_table
= s
->b_bidir_forw_mv_table_base
+ s
->mb_stride
+ 1;
678 s
->b_bidir_back_mv_table
= s
->b_bidir_back_mv_table_base
+ s
->mb_stride
+ 1;
679 s
->b_direct_mv_table
= s
->b_direct_mv_table_base
+ s
->mb_stride
+ 1;
681 if(s
->msmpeg4_version
){
682 CHECKED_ALLOCZ(s
->ac_stats
, 2*2*(MAX_LEVEL
+1)*(MAX_RUN
+1)*2*sizeof(int));
684 CHECKED_ALLOCZ(s
->avctx
->stats_out
, 256);
686 /* Allocate MB type table */
687 CHECKED_ALLOCZ(s
->mb_type
, mb_array_size
* sizeof(uint16_t)) //needed for encoding
689 CHECKED_ALLOCZ(s
->lambda_table
, mb_array_size
* sizeof(int))
691 CHECKED_ALLOCZ(s
->q_intra_matrix
, 64*32 * sizeof(int))
692 CHECKED_ALLOCZ(s
->q_inter_matrix
, 64*32 * sizeof(int))
693 CHECKED_ALLOCZ(s
->q_intra_matrix16
, 64*32*2 * sizeof(uint16_t))
694 CHECKED_ALLOCZ(s
->q_inter_matrix16
, 64*32*2 * sizeof(uint16_t))
695 CHECKED_ALLOCZ(s
->input_picture
, MAX_PICTURE_COUNT
* sizeof(Picture
*))
696 CHECKED_ALLOCZ(s
->reordered_input_picture
, MAX_PICTURE_COUNT
* sizeof(Picture
*))
698 if(s
->avctx
->noise_reduction
){
699 CHECKED_ALLOCZ(s
->dct_offset
, 2 * 64 * sizeof(uint16_t))
702 CHECKED_ALLOCZ(s
->picture
, MAX_PICTURE_COUNT
* sizeof(Picture
))
704 CHECKED_ALLOCZ(s
->error_status_table
, mb_array_size
*sizeof(uint8_t))
706 if(s
->codec_id
==CODEC_ID_MPEG4
|| (s
->flags
& CODEC_FLAG_INTERLACED_ME
)){
707 /* interlaced direct mode decoding tables */
712 CHECKED_ALLOCZ(s
->b_field_mv_table_base
[i
][j
][k
] , mv_table_size
* 2 * sizeof(int16_t))
713 s
->b_field_mv_table
[i
][j
][k
] = s
->b_field_mv_table_base
[i
][j
][k
] + s
->mb_stride
+ 1;
715 CHECKED_ALLOCZ(s
->b_field_select_table
[i
][j
] , mb_array_size
* 2 * sizeof(uint8_t))
716 CHECKED_ALLOCZ(s
->p_field_mv_table_base
[i
][j
] , mv_table_size
* 2 * sizeof(int16_t))
717 s
->p_field_mv_table
[i
][j
] = s
->p_field_mv_table_base
[i
][j
] + s
->mb_stride
+ 1;
719 CHECKED_ALLOCZ(s
->p_field_select_table
[i
] , mb_array_size
* 2 * sizeof(uint8_t))
722 if (s
->out_format
== FMT_H263
) {
724 CHECKED_ALLOCZ(s
->ac_val_base
, yc_size
* sizeof(int16_t) * 16);
725 s
->ac_val
[0] = s
->ac_val_base
+ s
->b8_stride
+ 1;
726 s
->ac_val
[1] = s
->ac_val_base
+ y_size
+ s
->mb_stride
+ 1;
727 s
->ac_val
[2] = s
->ac_val
[1] + c_size
;
730 CHECKED_ALLOCZ(s
->coded_block_base
, y_size
);
731 s
->coded_block
= s
->coded_block_base
+ s
->b8_stride
+ 1;
733 /* divx501 bitstream reorder buffer */
734 CHECKED_ALLOCZ(s
->bitstream_buffer
, BITSTREAM_BUFFER_SIZE
);
736 /* cbp, ac_pred, pred_dir */
737 CHECKED_ALLOCZ(s
->cbp_table
, mb_array_size
* sizeof(uint8_t))
738 CHECKED_ALLOCZ(s
->pred_dir_table
, mb_array_size
* sizeof(uint8_t))
741 if (s
->h263_pred
|| s
->h263_plus
|| !s
->encoding
) {
743 //MN: we need these for error resilience of intra-frames
744 CHECKED_ALLOCZ(s
->dc_val_base
, yc_size
* sizeof(int16_t));
745 s
->dc_val
[0] = s
->dc_val_base
+ s
->b8_stride
+ 1;
746 s
->dc_val
[1] = s
->dc_val_base
+ y_size
+ s
->mb_stride
+ 1;
747 s
->dc_val
[2] = s
->dc_val
[1] + c_size
;
748 for(i
=0;i
<yc_size
;i
++)
749 s
->dc_val_base
[i
] = 1024;
752 /* which mb is a intra block */
753 CHECKED_ALLOCZ(s
->mbintra_table
, mb_array_size
);
754 memset(s
->mbintra_table
, 1, mb_array_size
);
756 /* init macroblock skip table */
757 CHECKED_ALLOCZ(s
->mbskip_table
, mb_array_size
+2);
758 //Note the +1 is for a quicker mpeg4 slice_end detection
759 CHECKED_ALLOCZ(s
->prev_pict_types
, PREV_PICT_TYPES_BUFFER_SIZE
);
761 s
->parse_context
.state
= -1;
762 if((s
->avctx
->debug
&(FF_DEBUG_VIS_QP
|FF_DEBUG_VIS_MB_TYPE
)) || (s
->avctx
->debug_mv
)){
763 s
->visualization_buffer
[0] = av_malloc((s
->mb_width
*16 + 2*EDGE_WIDTH
) * s
->mb_height
*16 + 2*EDGE_WIDTH
);
764 s
->visualization_buffer
[1] = av_malloc((s
->mb_width
*8 + EDGE_WIDTH
) * s
->mb_height
*8 + EDGE_WIDTH
);
765 s
->visualization_buffer
[2] = av_malloc((s
->mb_width
*8 + EDGE_WIDTH
) * s
->mb_height
*8 + EDGE_WIDTH
);
768 s
->context_initialized
= 1;
770 s
->thread_context
[0]= s
;
771 for(i
=1; i
<s
->avctx
->thread_count
; i
++){
772 s
->thread_context
[i
]= av_malloc(sizeof(MpegEncContext
));
773 memcpy(s
->thread_context
[i
], s
, sizeof(MpegEncContext
));
776 for(i
=0; i
<s
->avctx
->thread_count
; i
++){
777 if(init_duplicate_context(s
->thread_context
[i
], s
) < 0)
779 s
->thread_context
[i
]->start_mb_y
= (s
->mb_height
*(i
) + s
->avctx
->thread_count
/2) / s
->avctx
->thread_count
;
780 s
->thread_context
[i
]->end_mb_y
= (s
->mb_height
*(i
+1) + s
->avctx
->thread_count
/2) / s
->avctx
->thread_count
;
789 /* init common structure for both encoder and decoder */
790 void MPV_common_end(MpegEncContext
*s
)
794 for(i
=0; i
<s
->avctx
->thread_count
; i
++){
795 free_duplicate_context(s
->thread_context
[i
]);
797 for(i
=1; i
<s
->avctx
->thread_count
; i
++){
798 av_freep(&s
->thread_context
[i
]);
801 av_freep(&s
->parse_context
.buffer
);
802 s
->parse_context
.buffer_size
=0;
804 av_freep(&s
->mb_type
);
805 av_freep(&s
->p_mv_table_base
);
806 av_freep(&s
->b_forw_mv_table_base
);
807 av_freep(&s
->b_back_mv_table_base
);
808 av_freep(&s
->b_bidir_forw_mv_table_base
);
809 av_freep(&s
->b_bidir_back_mv_table_base
);
810 av_freep(&s
->b_direct_mv_table_base
);
812 s
->b_forw_mv_table
= NULL
;
813 s
->b_back_mv_table
= NULL
;
814 s
->b_bidir_forw_mv_table
= NULL
;
815 s
->b_bidir_back_mv_table
= NULL
;
816 s
->b_direct_mv_table
= NULL
;
820 av_freep(&s
->b_field_mv_table_base
[i
][j
][k
]);
821 s
->b_field_mv_table
[i
][j
][k
]=NULL
;
823 av_freep(&s
->b_field_select_table
[i
][j
]);
824 av_freep(&s
->p_field_mv_table_base
[i
][j
]);
825 s
->p_field_mv_table
[i
][j
]=NULL
;
827 av_freep(&s
->p_field_select_table
[i
]);
830 av_freep(&s
->dc_val_base
);
831 av_freep(&s
->ac_val_base
);
832 av_freep(&s
->coded_block_base
);
833 av_freep(&s
->mbintra_table
);
834 av_freep(&s
->cbp_table
);
835 av_freep(&s
->pred_dir_table
);
837 av_freep(&s
->mbskip_table
);
838 av_freep(&s
->prev_pict_types
);
839 av_freep(&s
->bitstream_buffer
);
840 av_freep(&s
->avctx
->stats_out
);
841 av_freep(&s
->ac_stats
);
842 av_freep(&s
->error_status_table
);
843 av_freep(&s
->mb_index2xy
);
844 av_freep(&s
->lambda_table
);
845 av_freep(&s
->q_intra_matrix
);
846 av_freep(&s
->q_inter_matrix
);
847 av_freep(&s
->q_intra_matrix16
);
848 av_freep(&s
->q_inter_matrix16
);
849 av_freep(&s
->input_picture
);
850 av_freep(&s
->reordered_input_picture
);
851 av_freep(&s
->dct_offset
);
854 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
855 free_picture(s
, &s
->picture
[i
]);
858 av_freep(&s
->picture
);
859 s
->context_initialized
= 0;
862 s
->current_picture_ptr
= NULL
;
863 s
->linesize
= s
->uvlinesize
= 0;
866 av_freep(&s
->visualization_buffer
[i
]);
868 avcodec_default_free_buffers(s
->avctx
);
871 #ifdef CONFIG_ENCODERS
873 /* init video encoder */
874 int MPV_encode_init(AVCodecContext
*avctx
)
876 MpegEncContext
*s
= avctx
->priv_data
;
878 int chroma_h_shift
, chroma_v_shift
;
880 MPV_encode_defaults(s
);
882 avctx
->pix_fmt
= PIX_FMT_YUV420P
; // FIXME
884 s
->bit_rate
= avctx
->bit_rate
;
885 s
->width
= avctx
->width
;
886 s
->height
= avctx
->height
;
887 if(avctx
->gop_size
> 600){
888 av_log(avctx
, AV_LOG_ERROR
, "Warning keyframe interval too large! reducing it ...\n");
891 s
->gop_size
= avctx
->gop_size
;
893 s
->flags
= avctx
->flags
;
894 s
->flags2
= avctx
->flags2
;
895 s
->max_b_frames
= avctx
->max_b_frames
;
896 s
->codec_id
= avctx
->codec
->id
;
897 s
->luma_elim_threshold
= avctx
->luma_elim_threshold
;
898 s
->chroma_elim_threshold
= avctx
->chroma_elim_threshold
;
899 s
->strict_std_compliance
= avctx
->strict_std_compliance
;
900 s
->data_partitioning
= avctx
->flags
& CODEC_FLAG_PART
;
901 s
->quarter_sample
= (avctx
->flags
& CODEC_FLAG_QPEL
)!=0;
902 s
->mpeg_quant
= avctx
->mpeg_quant
;
903 s
->rtp_mode
= !!avctx
->rtp_payload_size
;
904 s
->intra_dc_precision
= avctx
->intra_dc_precision
;
905 s
->user_specified_pts
= AV_NOPTS_VALUE
;
907 if (s
->gop_size
<= 1) {
914 s
->me_method
= avctx
->me_method
;
917 s
->fixed_qscale
= !!(avctx
->flags
& CODEC_FLAG_QSCALE
);
919 s
->adaptive_quant
= ( s
->avctx
->lumi_masking
920 || s
->avctx
->dark_masking
921 || s
->avctx
->temporal_cplx_masking
922 || s
->avctx
->spatial_cplx_masking
923 || s
->avctx
->p_masking
924 || (s
->flags
&CODEC_FLAG_QP_RD
))
927 s
->obmc
= !!(s
->flags
& CODEC_FLAG_OBMC
);
928 s
->loop_filter
= !!(s
->flags
& CODEC_FLAG_LOOP_FILTER
);
929 s
->alternate_scan
= !!(s
->flags
& CODEC_FLAG_ALT_SCAN
);
931 if(avctx
->rc_max_rate
&& !avctx
->rc_buffer_size
){
932 av_log(avctx
, AV_LOG_ERROR
, "a vbv buffer size is needed, for encoding with a maximum bitrate\n");
936 if(avctx
->rc_min_rate
&& avctx
->rc_max_rate
!= avctx
->rc_min_rate
){
937 av_log(avctx
, AV_LOG_INFO
, "Warning min_rate > 0 but min_rate != max_rate isnt recommanded!\n");
940 if(avctx
->rc_min_rate
&& avctx
->rc_min_rate
> avctx
->bit_rate
){
941 av_log(avctx
, AV_LOG_INFO
, "bitrate below min bitrate\n");
945 if(avctx
->rc_max_rate
&& avctx
->rc_max_rate
< avctx
->bit_rate
){
946 av_log(avctx
, AV_LOG_INFO
, "bitrate above max bitrate\n");
950 if( s
->avctx
->rc_max_rate
&& s
->avctx
->rc_min_rate
== s
->avctx
->rc_max_rate
951 && (s
->codec_id
== CODEC_ID_MPEG1VIDEO
|| s
->codec_id
== CODEC_ID_MPEG2VIDEO
)
952 && 90000LL * (avctx
->rc_buffer_size
-1) > s
->avctx
->rc_max_rate
*0xFFFFLL
){
954 av_log(avctx
, AV_LOG_INFO
, "Warning vbv_delay will be set to 0xFFFF (=VBR) as the specified vbv buffer is too large for the given bitrate!\n");
957 if((s
->flags
& CODEC_FLAG_4MV
) && s
->codec_id
!= CODEC_ID_MPEG4
958 && s
->codec_id
!= CODEC_ID_H263
&& s
->codec_id
!= CODEC_ID_H263P
&& s
->codec_id
!= CODEC_ID_FLV1
){
959 av_log(avctx
, AV_LOG_ERROR
, "4MV not supported by codec\n");
963 if(s
->obmc
&& s
->avctx
->mb_decision
!= FF_MB_DECISION_SIMPLE
){
964 av_log(avctx
, AV_LOG_ERROR
, "OBMC is only supported with simple mb decission\n");
968 if(s
->obmc
&& s
->codec_id
!= CODEC_ID_H263
&& s
->codec_id
!= CODEC_ID_H263P
){
969 av_log(avctx
, AV_LOG_ERROR
, "OBMC is only supported with H263(+)\n");
973 if(s
->quarter_sample
&& s
->codec_id
!= CODEC_ID_MPEG4
){
974 av_log(avctx
, AV_LOG_ERROR
, "qpel not supported by codec\n");
978 if(s
->data_partitioning
&& s
->codec_id
!= CODEC_ID_MPEG4
){
979 av_log(avctx
, AV_LOG_ERROR
, "data partitioning not supported by codec\n");
983 if(s
->max_b_frames
&& s
->codec_id
!= CODEC_ID_MPEG4
&& s
->codec_id
!= CODEC_ID_MPEG1VIDEO
&& s
->codec_id
!= CODEC_ID_MPEG2VIDEO
){
984 av_log(avctx
, AV_LOG_ERROR
, "b frames not supported by codec\n");
988 if((s
->flags
& (CODEC_FLAG_INTERLACED_DCT
|CODEC_FLAG_INTERLACED_ME
|CODEC_FLAG_ALT_SCAN
))
989 && s
->codec_id
!= CODEC_ID_MPEG4
&& s
->codec_id
!= CODEC_ID_MPEG2VIDEO
){
990 av_log(avctx
, AV_LOG_ERROR
, "interlacing not supported by codec\n");
994 if(s
->mpeg_quant
&& s
->codec_id
!= CODEC_ID_MPEG4
){ //FIXME mpeg2 uses that too
995 av_log(avctx
, AV_LOG_ERROR
, "mpeg2 style quantization not supporetd by codec\n");
999 if((s
->flags
& CODEC_FLAG_CBP_RD
) && !(s
->flags
& CODEC_FLAG_TRELLIS_QUANT
)){
1000 av_log(avctx
, AV_LOG_ERROR
, "CBP RD needs trellis quant\n");
1004 if((s
->flags
& CODEC_FLAG_QP_RD
) && s
->avctx
->mb_decision
!= FF_MB_DECISION_RD
){
1005 av_log(avctx
, AV_LOG_ERROR
, "QP RD needs mbd=2\n");
1009 if(s
->avctx
->scenechange_threshold
< 1000000000 && (s
->flags
& CODEC_FLAG_CLOSED_GOP
)){
1010 av_log(avctx
, AV_LOG_ERROR
, "closed gop with scene change detection arent supported yet\n");
1014 if(s
->avctx
->thread_count
> 1 && s
->codec_id
!= CODEC_ID_MPEG4
1015 && s
->codec_id
!= CODEC_ID_MPEG1VIDEO
&& s
->codec_id
!= CODEC_ID_MPEG2VIDEO
1016 && (s
->codec_id
!= CODEC_ID_H263P
|| !(s
->flags
& CODEC_FLAG_H263P_SLICE_STRUCT
))){
1017 av_log(avctx
, AV_LOG_ERROR
, "multi threaded encoding not supported by codec\n");
1021 if(s
->avctx
->thread_count
> 1)
1024 i
= ff_gcd(avctx
->frame_rate
, avctx
->frame_rate_base
);
1026 av_log(avctx
, AV_LOG_INFO
, "removing common factors from framerate\n");
1027 avctx
->frame_rate
/= i
;
1028 avctx
->frame_rate_base
/= i
;
1032 if(s
->codec_id
==CODEC_ID_MJPEG
){
1033 s
->intra_quant_bias
= 1<<(QUANT_BIAS_SHIFT
-1); //(a + x/2)/x
1034 s
->inter_quant_bias
= 0;
1035 }else if(s
->mpeg_quant
|| s
->codec_id
==CODEC_ID_MPEG1VIDEO
|| s
->codec_id
==CODEC_ID_MPEG2VIDEO
){
1036 s
->intra_quant_bias
= 3<<(QUANT_BIAS_SHIFT
-3); //(a + x*3/8)/x
1037 s
->inter_quant_bias
= 0;
1039 s
->intra_quant_bias
=0;
1040 s
->inter_quant_bias
=-(1<<(QUANT_BIAS_SHIFT
-2)); //(a - x/4)/x
1043 if(avctx
->intra_quant_bias
!= FF_DEFAULT_QUANT_BIAS
)
1044 s
->intra_quant_bias
= avctx
->intra_quant_bias
;
1045 if(avctx
->inter_quant_bias
!= FF_DEFAULT_QUANT_BIAS
)
1046 s
->inter_quant_bias
= avctx
->inter_quant_bias
;
1048 avcodec_get_chroma_sub_sample(avctx
->pix_fmt
, &chroma_h_shift
, &chroma_v_shift
);
1050 av_reduce(&s
->time_increment_resolution
, &dummy
, s
->avctx
->frame_rate
, s
->avctx
->frame_rate_base
, (1<<16)-1);
1051 s
->time_increment_bits
= av_log2(s
->time_increment_resolution
- 1) + 1;
1053 switch(avctx
->codec
->id
) {
1054 case CODEC_ID_MPEG1VIDEO
:
1055 s
->out_format
= FMT_MPEG1
;
1056 s
->low_delay
= 0; //s->max_b_frames ? 0 : 1;
1057 avctx
->delay
= s
->low_delay ?
0 : (s
->max_b_frames
+ 1);
1059 case CODEC_ID_MPEG2VIDEO
:
1060 s
->out_format
= FMT_MPEG1
;
1061 s
->low_delay
= 0; //s->max_b_frames ? 0 : 1;
1062 avctx
->delay
= s
->low_delay ?
0 : (s
->max_b_frames
+ 1);
1065 case CODEC_ID_LJPEG
:
1066 case CODEC_ID_MJPEG
:
1067 s
->out_format
= FMT_MJPEG
;
1068 s
->intra_only
= 1; /* force intra only for jpeg */
1069 s
->mjpeg_write_tables
= 1; /* write all tables */
1070 s
->mjpeg_data_only_frames
= 0; /* write all the needed headers */
1071 s
->mjpeg_vsample
[0] = 1<<chroma_v_shift
;
1072 s
->mjpeg_vsample
[1] = 1;
1073 s
->mjpeg_vsample
[2] = 1;
1074 s
->mjpeg_hsample
[0] = 1<<chroma_h_shift
;
1075 s
->mjpeg_hsample
[1] = 1;
1076 s
->mjpeg_hsample
[2] = 1;
1077 if (mjpeg_init(s
) < 0)
1084 if (h263_get_picture_format(s
->width
, s
->height
) == 7) {
1085 av_log(avctx
, AV_LOG_INFO
, "Input picture size isn't suitable for h263 codec! try h263+\n");
1088 s
->out_format
= FMT_H263
;
1089 s
->obmc
= (avctx
->flags
& CODEC_FLAG_OBMC
) ?
1:0;
1093 case CODEC_ID_H263P
:
1094 s
->out_format
= FMT_H263
;
1097 s
->umvplus
= (avctx
->flags
& CODEC_FLAG_H263P_UMV
) ?
1:0;
1098 s
->h263_aic
= (avctx
->flags
& CODEC_FLAG_H263P_AIC
) ?
1:0;
1099 s
->modified_quant
= s
->h263_aic
;
1100 s
->alt_inter_vlc
= (avctx
->flags
& CODEC_FLAG_H263P_AIV
) ?
1:0;
1101 s
->obmc
= (avctx
->flags
& CODEC_FLAG_OBMC
) ?
1:0;
1102 s
->loop_filter
= (avctx
->flags
& CODEC_FLAG_LOOP_FILTER
) ?
1:0;
1103 s
->unrestricted_mv
= s
->obmc
|| s
->loop_filter
|| s
->umvplus
;
1104 s
->h263_slice_structured
= (s
->flags
& CODEC_FLAG_H263P_SLICE_STRUCT
) ?
1:0;
1107 /* These are just to be sure */
1112 s
->out_format
= FMT_H263
;
1113 s
->h263_flv
= 2; /* format = 1; 11-bit codes */
1114 s
->unrestricted_mv
= 1;
1115 s
->rtp_mode
=0; /* don't allow GOB */
1120 s
->out_format
= FMT_H263
;
1124 case CODEC_ID_MPEG4
:
1125 s
->out_format
= FMT_H263
;
1127 s
->unrestricted_mv
= 1;
1128 s
->low_delay
= s
->max_b_frames ?
0 : 1;
1129 avctx
->delay
= s
->low_delay ?
0 : (s
->max_b_frames
+ 1);
1131 case CODEC_ID_MSMPEG4V1
:
1132 s
->out_format
= FMT_H263
;
1133 s
->h263_msmpeg4
= 1;
1135 s
->unrestricted_mv
= 1;
1136 s
->msmpeg4_version
= 1;
1140 case CODEC_ID_MSMPEG4V2
:
1141 s
->out_format
= FMT_H263
;
1142 s
->h263_msmpeg4
= 1;
1144 s
->unrestricted_mv
= 1;
1145 s
->msmpeg4_version
= 2;
1149 case CODEC_ID_MSMPEG4V3
:
1150 s
->out_format
= FMT_H263
;
1151 s
->h263_msmpeg4
= 1;
1153 s
->unrestricted_mv
= 1;
1154 s
->msmpeg4_version
= 3;
1155 s
->flipflop_rounding
=1;
1160 s
->out_format
= FMT_H263
;
1161 s
->h263_msmpeg4
= 1;
1163 s
->unrestricted_mv
= 1;
1164 s
->msmpeg4_version
= 4;
1165 s
->flipflop_rounding
=1;
1170 s
->out_format
= FMT_H263
;
1171 s
->h263_msmpeg4
= 1;
1173 s
->unrestricted_mv
= 1;
1174 s
->msmpeg4_version
= 5;
1175 s
->flipflop_rounding
=1;
1184 avctx
->has_b_frames
= !s
->low_delay
;
1189 if (MPV_common_init(s
) < 0)
1192 if(s
->modified_quant
)
1193 s
->chroma_qscale_table
= ff_h263_chroma_qscale_table
;
1194 s
->progressive_frame
=
1195 s
->progressive_sequence
= !(avctx
->flags
& (CODEC_FLAG_INTERLACED_DCT
|CODEC_FLAG_INTERLACED_ME
));
1196 s
->quant_precision
=5;
1198 ff_set_cmp(&s
->dsp
, s
->dsp
.ildct_cmp
, s
->avctx
->ildct_cmp
);
1200 #ifdef CONFIG_ENCODERS
1202 if (s
->out_format
== FMT_H263
)
1203 h263_encode_init(s
);
1204 if(s
->msmpeg4_version
)
1205 ff_msmpeg4_encode_init(s
);
1207 if (s
->out_format
== FMT_MPEG1
)
1208 ff_mpeg1_encode_init(s
);
1213 int j
= s
->dsp
.idct_permutation
[i
];
1215 if(s
->codec_id
==CODEC_ID_MPEG4
&& s
->mpeg_quant
){
1216 s
->intra_matrix
[j
] = ff_mpeg4_default_intra_matrix
[i
];
1217 s
->inter_matrix
[j
] = ff_mpeg4_default_non_intra_matrix
[i
];
1218 }else if(s
->out_format
== FMT_H263
){
1219 s
->intra_matrix
[j
] =
1220 s
->inter_matrix
[j
] = ff_mpeg1_default_non_intra_matrix
[i
];
1224 s
->intra_matrix
[j
] = ff_mpeg1_default_intra_matrix
[i
];
1225 s
->inter_matrix
[j
] = ff_mpeg1_default_non_intra_matrix
[i
];
1227 if(s
->avctx
->intra_matrix
)
1228 s
->intra_matrix
[j
] = s
->avctx
->intra_matrix
[i
];
1229 if(s
->avctx
->inter_matrix
)
1230 s
->inter_matrix
[j
] = s
->avctx
->inter_matrix
[i
];
1233 /* precompute matrix */
1234 /* for mjpeg, we do include qscale in the matrix */
1235 if (s
->out_format
!= FMT_MJPEG
) {
1236 convert_matrix(&s
->dsp
, s
->q_intra_matrix
, s
->q_intra_matrix16
,
1237 s
->intra_matrix
, s
->intra_quant_bias
, 1, 31);
1238 convert_matrix(&s
->dsp
, s
->q_inter_matrix
, s
->q_inter_matrix16
,
1239 s
->inter_matrix
, s
->inter_quant_bias
, 1, 31);
1242 if(ff_rate_control_init(s
) < 0)
1248 int MPV_encode_end(AVCodecContext
*avctx
)
1250 MpegEncContext
*s
= avctx
->priv_data
;
1256 ff_rate_control_uninit(s
);
1259 if (s
->out_format
== FMT_MJPEG
)
1262 av_freep(&avctx
->extradata
);
1267 #endif //CONFIG_ENCODERS
1269 void init_rl(RLTable
*rl
)
1271 int8_t max_level
[MAX_RUN
+1], max_run
[MAX_LEVEL
+1];
1272 uint8_t index_run
[MAX_RUN
+1];
1273 int last
, run
, level
, start
, end
, i
;
1275 /* compute max_level[], max_run[] and index_run[] */
1276 for(last
=0;last
<2;last
++) {
1285 memset(max_level
, 0, MAX_RUN
+ 1);
1286 memset(max_run
, 0, MAX_LEVEL
+ 1);
1287 memset(index_run
, rl
->n
, MAX_RUN
+ 1);
1288 for(i
=start
;i
<end
;i
++) {
1289 run
= rl
->table_run
[i
];
1290 level
= rl
->table_level
[i
];
1291 if (index_run
[run
] == rl
->n
)
1293 if (level
> max_level
[run
])
1294 max_level
[run
] = level
;
1295 if (run
> max_run
[level
])
1296 max_run
[level
] = run
;
1298 rl
->max_level
[last
] = av_malloc(MAX_RUN
+ 1);
1299 memcpy(rl
->max_level
[last
], max_level
, MAX_RUN
+ 1);
1300 rl
->max_run
[last
] = av_malloc(MAX_LEVEL
+ 1);
1301 memcpy(rl
->max_run
[last
], max_run
, MAX_LEVEL
+ 1);
1302 rl
->index_run
[last
] = av_malloc(MAX_RUN
+ 1);
1303 memcpy(rl
->index_run
[last
], index_run
, MAX_RUN
+ 1);
1307 /* draw the edges of width 'w' of an image of size width, height */
1308 //FIXME check that this is ok for mpeg4 interlaced
1309 static void draw_edges_c(uint8_t *buf
, int wrap
, int width
, int height
, int w
)
1311 uint8_t *ptr
, *last_line
;
1314 last_line
= buf
+ (height
- 1) * wrap
;
1316 /* top and bottom */
1317 memcpy(buf
- (i
+ 1) * wrap
, buf
, width
);
1318 memcpy(last_line
+ (i
+ 1) * wrap
, last_line
, width
);
1320 /* left and right */
1322 for(i
=0;i
<height
;i
++) {
1323 memset(ptr
- w
, ptr
[0], w
);
1324 memset(ptr
+ width
, ptr
[width
-1], w
);
1329 memset(buf
- (i
+ 1) * wrap
- w
, buf
[0], w
); /* top left */
1330 memset(buf
- (i
+ 1) * wrap
+ width
, buf
[width
-1], w
); /* top right */
1331 memset(last_line
+ (i
+ 1) * wrap
- w
, last_line
[0], w
); /* top left */
1332 memset(last_line
+ (i
+ 1) * wrap
+ width
, last_line
[width
-1], w
); /* top right */
1336 int ff_find_unused_picture(MpegEncContext
*s
, int shared
){
1340 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1341 if(s
->picture
[i
].data
[0]==NULL
&& s
->picture
[i
].type
==0) return i
;
1344 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1345 if(s
->picture
[i
].data
[0]==NULL
&& s
->picture
[i
].type
!=0) return i
; //FIXME
1347 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1348 if(s
->picture
[i
].data
[0]==NULL
) return i
;
1356 static void update_noise_reduction(MpegEncContext
*s
){
1359 for(intra
=0; intra
<2; intra
++){
1360 if(s
->dct_count
[intra
] > (1<<16)){
1361 for(i
=0; i
<64; i
++){
1362 s
->dct_error_sum
[intra
][i
] >>=1;
1364 s
->dct_count
[intra
] >>= 1;
1367 for(i
=0; i
<64; i
++){
1368 s
->dct_offset
[intra
][i
]= (s
->avctx
->noise_reduction
* s
->dct_count
[intra
] + s
->dct_error_sum
[intra
][i
]/2) / (s
->dct_error_sum
[intra
][i
]+1);
1374 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1376 int MPV_frame_start(MpegEncContext
*s
, AVCodecContext
*avctx
)
1382 assert(s
->last_picture_ptr
==NULL
|| s
->out_format
!= FMT_H264
|| s
->codec_id
== CODEC_ID_SVQ3
);
1384 /* mark&release old frames */
1385 if (s
->pict_type
!= B_TYPE
&& s
->last_picture_ptr
&& s
->last_picture_ptr
!= s
->next_picture_ptr
&& s
->last_picture_ptr
->data
[0]) {
1386 avctx
->release_buffer(avctx
, (AVFrame
*)s
->last_picture_ptr
);
1388 /* release forgotten pictures */
1389 /* if(mpeg124/h263) */
1391 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1392 if(s
->picture
[i
].data
[0] && &s
->picture
[i
] != s
->next_picture_ptr
&& s
->picture
[i
].reference
){
1393 av_log(avctx
, AV_LOG_ERROR
, "releasing zombie picture\n");
1394 avctx
->release_buffer(avctx
, (AVFrame
*)&s
->picture
[i
]);
1401 /* release non refernce frames */
1402 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1403 if(s
->picture
[i
].data
[0] && !s
->picture
[i
].reference
/*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1404 s
->avctx
->release_buffer(s
->avctx
, (AVFrame
*)&s
->picture
[i
]);
1408 if(s
->current_picture_ptr
&& s
->current_picture_ptr
->data
[0]==NULL
)
1409 pic
= (AVFrame
*)s
->current_picture_ptr
; //we allready have a unused image (maybe it was set before reading the header)
1411 i
= ff_find_unused_picture(s
, 0);
1412 pic
= (AVFrame
*)&s
->picture
[i
];
1415 pic
->reference
= s
->pict_type
!= B_TYPE
&& !s
->dropable ?
3 : 0;
1417 pic
->coded_picture_number
= s
->coded_picture_number
++;
1419 if( alloc_picture(s
, (Picture
*)pic
, 0) < 0)
1422 s
->current_picture_ptr
= (Picture
*)pic
;
1423 s
->current_picture_ptr
->top_field_first
= s
->top_field_first
; //FIXME use only the vars from current_pic
1424 s
->current_picture_ptr
->interlaced_frame
= !s
->progressive_frame
&& !s
->progressive_sequence
;
1427 s
->current_picture_ptr
->pict_type
= s
->pict_type
;
1428 // if(s->flags && CODEC_FLAG_QSCALE)
1429 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1430 s
->current_picture_ptr
->key_frame
= s
->pict_type
== I_TYPE
;
1432 copy_picture(&s
->current_picture
, s
->current_picture_ptr
);
1434 if(s
->out_format
!= FMT_H264
|| s
->codec_id
== CODEC_ID_SVQ3
){
1435 if (s
->pict_type
!= B_TYPE
) {
1436 s
->last_picture_ptr
= s
->next_picture_ptr
;
1438 s
->next_picture_ptr
= s
->current_picture_ptr
;
1440 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1441 s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL,
1442 s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL,
1443 s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
1444 s->pict_type, s->dropable);*/
1446 if(s
->last_picture_ptr
) copy_picture(&s
->last_picture
, s
->last_picture_ptr
);
1447 if(s
->next_picture_ptr
) copy_picture(&s
->next_picture
, s
->next_picture_ptr
);
1449 if(s
->pict_type
!= I_TYPE
&& (s
->last_picture_ptr
==NULL
|| s
->last_picture_ptr
->data
[0]==NULL
)){
1450 av_log(avctx
, AV_LOG_ERROR
, "warning: first frame is no keyframe\n");
1451 assert(s
->pict_type
!= B_TYPE
); //these should have been dropped if we dont have a reference
1455 assert(s
->pict_type
== I_TYPE
|| (s
->last_picture_ptr
&& s
->last_picture_ptr
->data
[0]));
1457 if(s
->picture_structure
!=PICT_FRAME
){
1460 if(s
->picture_structure
== PICT_BOTTOM_FIELD
){
1461 s
->current_picture
.data
[i
] += s
->current_picture
.linesize
[i
];
1463 s
->current_picture
.linesize
[i
] *= 2;
1464 s
->last_picture
.linesize
[i
] *=2;
1465 s
->next_picture
.linesize
[i
] *=2;
1470 s
->hurry_up
= s
->avctx
->hurry_up
;
1471 s
->error_resilience
= avctx
->error_resilience
;
1473 /* set dequantizer, we cant do it during init as it might change for mpeg4
1474 and we cant do it in the header decode as init isnt called for mpeg4 there yet */
1475 if(s
->mpeg_quant
|| s
->codec_id
== CODEC_ID_MPEG2VIDEO
){
1476 s
->dct_unquantize_intra
= s
->dct_unquantize_mpeg2_intra
;
1477 s
->dct_unquantize_inter
= s
->dct_unquantize_mpeg2_inter
;
1478 }else if(s
->out_format
== FMT_H263
){
1479 s
->dct_unquantize_intra
= s
->dct_unquantize_h263_intra
;
1480 s
->dct_unquantize_inter
= s
->dct_unquantize_h263_inter
;
1481 }else if(s
->out_format
== FMT_H261
){
1482 s
->dct_unquantize_intra
= s
->dct_unquantize_h261_intra
;
1483 s
->dct_unquantize_inter
= s
->dct_unquantize_h261_inter
;
1485 s
->dct_unquantize_intra
= s
->dct_unquantize_mpeg1_intra
;
1486 s
->dct_unquantize_inter
= s
->dct_unquantize_mpeg1_inter
;
1489 if(s
->dct_error_sum
){
1490 assert(s
->avctx
->noise_reduction
&& s
->encoding
);
1492 update_noise_reduction(s
);
1496 if(s
->avctx
->xvmc_acceleration
)
1497 return XVMC_field_start(s
, avctx
);
1502 /* generic function for encode/decode called after a frame has been coded/decoded */
1503 void MPV_frame_end(MpegEncContext
*s
)
1506 /* draw edge for correct motion prediction if outside */
1508 //just to make sure that all data is rendered.
1509 if(s
->avctx
->xvmc_acceleration
){
1513 if(s
->unrestricted_mv
&& s
->pict_type
!= B_TYPE
&& !s
->intra_only
&& !(s
->flags
&CODEC_FLAG_EMU_EDGE
)) {
1514 draw_edges(s
->current_picture
.data
[0], s
->linesize
, s
->h_edge_pos
, s
->v_edge_pos
, EDGE_WIDTH
);
1515 draw_edges(s
->current_picture
.data
[1], s
->uvlinesize
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1, EDGE_WIDTH
/2);
1516 draw_edges(s
->current_picture
.data
[2], s
->uvlinesize
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1, EDGE_WIDTH
/2);
1520 s
->last_pict_type
= s
->pict_type
;
1521 if(s
->pict_type
!=B_TYPE
){
1522 s
->last_non_b_pict_type
= s
->pict_type
;
1525 /* copy back current_picture variables */
1526 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1527 if(s
->picture
[i
].data
[0] == s
->current_picture
.data
[0]){
1528 s
->picture
[i
]= s
->current_picture
;
1532 assert(i
<MAX_PICTURE_COUNT
);
1536 /* release non refernce frames */
1537 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1538 if(s
->picture
[i
].data
[0] && !s
->picture
[i
].reference
/*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1539 s
->avctx
->release_buffer(s
->avctx
, (AVFrame
*)&s
->picture
[i
]);
1543 // clear copies, to avoid confusion
1545 memset(&s
->last_picture
, 0, sizeof(Picture
));
1546 memset(&s
->next_picture
, 0, sizeof(Picture
));
1547 memset(&s
->current_picture
, 0, sizeof(Picture
));
1549 s
->avctx
->coded_frame
= (AVFrame
*)s
->current_picture_ptr
;
1553 * draws an line from (ex, ey) -> (sx, sy).
1554 * @param w width of the image
1555 * @param h height of the image
1556 * @param stride stride/linesize of the image
1557 * @param color color of the arrow
1559 static void draw_line(uint8_t *buf
, int sx
, int sy
, int ex
, int ey
, int w
, int h
, int stride
, int color
){
1562 sx
= clip(sx
, 0, w
-1);
1563 sy
= clip(sy
, 0, h
-1);
1564 ex
= clip(ex
, 0, w
-1);
1565 ey
= clip(ey
, 0, h
-1);
1567 buf
[sy
*stride
+ sx
]+= color
;
1569 if(ABS(ex
- sx
) > ABS(ey
- sy
)){
1574 buf
+= sx
+ sy
*stride
;
1576 f
= ((ey
-sy
)<<16)/ex
;
1577 for(x
= 0; x
<= ex
; x
++){
1580 buf
[ y
*stride
+ x
]+= (color
*(0x10000-fr
))>>16;
1581 buf
[(y
+1)*stride
+ x
]+= (color
* fr
)>>16;
1588 buf
+= sx
+ sy
*stride
;
1590 if(ey
) f
= ((ex
-sx
)<<16)/ey
;
1592 for(y
= 0; y
<= ey
; y
++){
1595 buf
[y
*stride
+ x
]+= (color
*(0x10000-fr
))>>16;;
1596 buf
[y
*stride
+ x
+1]+= (color
* fr
)>>16;;
1602 * draws an arrow from (ex, ey) -> (sx, sy).
1603 * @param w width of the image
1604 * @param h height of the image
1605 * @param stride stride/linesize of the image
1606 * @param color color of the arrow
1608 static void draw_arrow(uint8_t *buf
, int sx
, int sy
, int ex
, int ey
, int w
, int h
, int stride
, int color
){
1611 sx
= clip(sx
, -100, w
+100);
1612 sy
= clip(sy
, -100, h
+100);
1613 ex
= clip(ex
, -100, w
+100);
1614 ey
= clip(ey
, -100, h
+100);
1619 if(dx
*dx
+ dy
*dy
> 3*3){
1622 int length
= ff_sqrt((rx
*rx
+ ry
*ry
)<<8);
1624 //FIXME subpixel accuracy
1625 rx
= ROUNDED_DIV(rx
*3<<4, length
);
1626 ry
= ROUNDED_DIV(ry
*3<<4, length
);
1628 draw_line(buf
, sx
, sy
, sx
+ rx
, sy
+ ry
, w
, h
, stride
, color
);
1629 draw_line(buf
, sx
, sy
, sx
- ry
, sy
+ rx
, w
, h
, stride
, color
);
1631 draw_line(buf
, sx
, sy
, ex
, ey
, w
, h
, stride
, color
);
1635 * prints debuging info for the given picture.
1637 void ff_print_debug_info(MpegEncContext
*s
, AVFrame
*pict
){
1639 if(!pict
|| !pict
->mb_type
) return;
1641 if(s
->avctx
->debug
&(FF_DEBUG_SKIP
| FF_DEBUG_QP
| FF_DEBUG_MB_TYPE
)){
1644 av_log(s
->avctx
,AV_LOG_DEBUG
,"New frame, type: ");
1645 switch (pict
->pict_type
) {
1646 case FF_I_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"I\n"); break;
1647 case FF_P_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"P\n"); break;
1648 case FF_B_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"B\n"); break;
1649 case FF_S_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"S\n"); break;
1650 case FF_SI_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"SI\n"); break;
1651 case FF_SP_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"SP\n"); break;
1653 for(y
=0; y
<s
->mb_height
; y
++){
1654 for(x
=0; x
<s
->mb_width
; x
++){
1655 if(s
->avctx
->debug
&FF_DEBUG_SKIP
){
1656 int count
= s
->mbskip_table
[x
+ y
*s
->mb_stride
];
1657 if(count
>9) count
=9;
1658 av_log(s
->avctx
, AV_LOG_DEBUG
, "%1d", count
);
1660 if(s
->avctx
->debug
&FF_DEBUG_QP
){
1661 av_log(s
->avctx
, AV_LOG_DEBUG
, "%2d", pict
->qscale_table
[x
+ y
*s
->mb_stride
]);
1663 if(s
->avctx
->debug
&FF_DEBUG_MB_TYPE
){
1664 int mb_type
= pict
->mb_type
[x
+ y
*s
->mb_stride
];
1665 //Type & MV direction
1667 av_log(s
->avctx
, AV_LOG_DEBUG
, "P");
1668 else if(IS_INTRA(mb_type
) && IS_ACPRED(mb_type
))
1669 av_log(s
->avctx
, AV_LOG_DEBUG
, "A");
1670 else if(IS_INTRA4x4(mb_type
))
1671 av_log(s
->avctx
, AV_LOG_DEBUG
, "i");
1672 else if(IS_INTRA16x16(mb_type
))
1673 av_log(s
->avctx
, AV_LOG_DEBUG
, "I");
1674 else if(IS_DIRECT(mb_type
) && IS_SKIP(mb_type
))
1675 av_log(s
->avctx
, AV_LOG_DEBUG
, "d");
1676 else if(IS_DIRECT(mb_type
))
1677 av_log(s
->avctx
, AV_LOG_DEBUG
, "D");
1678 else if(IS_GMC(mb_type
) && IS_SKIP(mb_type
))
1679 av_log(s
->avctx
, AV_LOG_DEBUG
, "g");
1680 else if(IS_GMC(mb_type
))
1681 av_log(s
->avctx
, AV_LOG_DEBUG
, "G");
1682 else if(IS_SKIP(mb_type
))
1683 av_log(s
->avctx
, AV_LOG_DEBUG
, "S");
1684 else if(!USES_LIST(mb_type
, 1))
1685 av_log(s
->avctx
, AV_LOG_DEBUG
, ">");
1686 else if(!USES_LIST(mb_type
, 0))
1687 av_log(s
->avctx
, AV_LOG_DEBUG
, "<");
1689 assert(USES_LIST(mb_type
, 0) && USES_LIST(mb_type
, 1));
1690 av_log(s
->avctx
, AV_LOG_DEBUG
, "X");
1695 av_log(s
->avctx
, AV_LOG_DEBUG
, "+");
1696 else if(IS_16X8(mb_type
))
1697 av_log(s
->avctx
, AV_LOG_DEBUG
, "-");
1698 else if(IS_8X16(mb_type
))
1699 av_log(s
->avctx
, AV_LOG_DEBUG
, "¦");
1700 else if(IS_INTRA(mb_type
) || IS_16X16(mb_type
))
1701 av_log(s
->avctx
, AV_LOG_DEBUG
, " ");
1703 av_log(s
->avctx
, AV_LOG_DEBUG
, "?");
1706 if(IS_INTERLACED(mb_type
) && s
->codec_id
== CODEC_ID_H264
)
1707 av_log(s
->avctx
, AV_LOG_DEBUG
, "=");
1709 av_log(s
->avctx
, AV_LOG_DEBUG
, " ");
1711 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1713 av_log(s
->avctx
, AV_LOG_DEBUG
, "\n");
1717 if((s
->avctx
->debug
&(FF_DEBUG_VIS_QP
|FF_DEBUG_VIS_MB_TYPE
)) || (s
->avctx
->debug_mv
)){
1718 const int shift
= 1 + s
->quarter_sample
;
1722 int h_chroma_shift
, v_chroma_shift
;
1723 s
->low_delay
=0; //needed to see the vectors without trashing the buffers
1725 avcodec_get_chroma_sub_sample(s
->avctx
->pix_fmt
, &h_chroma_shift
, &v_chroma_shift
);
1727 memcpy(s
->visualization_buffer
[i
], pict
->data
[i
], (i
==0) ? pict
->linesize
[i
]*s
->height
:pict
->linesize
[i
]*s
->height
>> v_chroma_shift
);
1728 pict
->data
[i
]= s
->visualization_buffer
[i
];
1730 pict
->type
= FF_BUFFER_TYPE_COPY
;
1733 for(mb_y
=0; mb_y
<s
->mb_height
; mb_y
++){
1735 for(mb_x
=0; mb_x
<s
->mb_width
; mb_x
++){
1736 const int mb_index
= mb_x
+ mb_y
*s
->mb_stride
;
1737 if((s
->avctx
->debug_mv
) && pict
->motion_val
){
1739 for(type
=0; type
<3; type
++){
1742 case 0: if ((!(s
->avctx
->debug_mv
&FF_DEBUG_VIS_MV_P_FOR
)) || (pict
->pict_type
!=FF_P_TYPE
))
1746 case 1: if ((!(s
->avctx
->debug_mv
&FF_DEBUG_VIS_MV_B_FOR
)) || (pict
->pict_type
!=FF_B_TYPE
))
1750 case 2: if ((!(s
->avctx
->debug_mv
&FF_DEBUG_VIS_MV_B_BACK
)) || (pict
->pict_type
!=FF_B_TYPE
))
1755 if(!USES_LIST(pict
->mb_type
[mb_index
], direction
))
1759 if(IS_8X8(pict
->mb_type
[mb_index
])){
1762 int sx
= mb_x
*16 + 4 + 8*(i
&1);
1763 int sy
= mb_y
*16 + 4 + 8*(i
>>1);
1764 int xy
= mb_x
*2 + (i
&1) + (mb_y
*2 + (i
>>1))*s
->b8_stride
;
1765 int mx
= (pict
->motion_val
[direction
][xy
][0]>>shift
) + sx
;
1766 int my
= (pict
->motion_val
[direction
][xy
][1]>>shift
) + sy
;
1767 draw_arrow(ptr
, sx
, sy
, mx
, my
, s
->width
, s
->height
, s
->linesize
, 100);
1769 }else if(IS_16X8(pict
->mb_type
[mb_index
])){
1773 int sy
=mb_y
*16 + 4 + 8*i
;
1774 int xy
= mb_x
*2 + (mb_y
*2 + i
)*s
->b8_stride
;
1775 int mx
=(pict
->motion_val
[direction
][xy
][0]>>shift
);
1776 int my
=(pict
->motion_val
[direction
][xy
][1]>>shift
);
1778 if(IS_INTERLACED(pict
->mb_type
[mb_index
]))
1781 draw_arrow(ptr
, sx
, sy
, mx
+sx
, my
+sy
, s
->width
, s
->height
, s
->linesize
, 100);
1784 int sx
= mb_x
*16 + 8;
1785 int sy
= mb_y
*16 + 8;
1786 int xy
= mb_x
*2 + mb_y
*2*s
->b8_stride
;
1787 int mx
= (pict
->motion_val
[direction
][xy
][0]>>shift
) + sx
;
1788 int my
= (pict
->motion_val
[direction
][xy
][1]>>shift
) + sy
;
1789 draw_arrow(ptr
, sx
, sy
, mx
, my
, s
->width
, s
->height
, s
->linesize
, 100);
1793 if((s
->avctx
->debug
&FF_DEBUG_VIS_QP
) && pict
->motion_val
){
1794 uint64_t c
= (pict
->qscale_table
[mb_index
]*128/31) * 0x0101010101010101ULL
;
1797 *(uint64_t*)(pict
->data
[1] + 8*mb_x
+ (8*mb_y
+ y
)*pict
->linesize
[1])= c
;
1798 *(uint64_t*)(pict
->data
[2] + 8*mb_x
+ (8*mb_y
+ y
)*pict
->linesize
[2])= c
;
1801 if((s
->avctx
->debug
&FF_DEBUG_VIS_MB_TYPE
) && pict
->motion_val
){
1802 int mb_type
= pict
->mb_type
[mb_index
];
1805 #define COLOR(theta, r)\
1806 u= (int)(128 + r*cos(theta*3.141592/180));\
1807 v= (int)(128 + r*sin(theta*3.141592/180));
1811 if(IS_PCM(mb_type
)){
1813 }else if((IS_INTRA(mb_type
) && IS_ACPRED(mb_type
)) || IS_INTRA16x16(mb_type
)){
1815 }else if(IS_INTRA4x4(mb_type
)){
1817 }else if(IS_DIRECT(mb_type
) && IS_SKIP(mb_type
)){
1819 }else if(IS_DIRECT(mb_type
)){
1821 }else if(IS_GMC(mb_type
) && IS_SKIP(mb_type
)){
1823 }else if(IS_GMC(mb_type
)){
1825 }else if(IS_SKIP(mb_type
)){
1827 }else if(!USES_LIST(mb_type
, 1)){
1829 }else if(!USES_LIST(mb_type
, 0)){
1832 assert(USES_LIST(mb_type
, 0) && USES_LIST(mb_type
, 1));
1836 u
*= 0x0101010101010101ULL
;
1837 v
*= 0x0101010101010101ULL
;
1839 *(uint64_t*)(pict
->data
[1] + 8*mb_x
+ (8*mb_y
+ y
)*pict
->linesize
[1])= u
;
1840 *(uint64_t*)(pict
->data
[2] + 8*mb_x
+ (8*mb_y
+ y
)*pict
->linesize
[2])= v
;
1844 if(IS_8X8(mb_type
) || IS_16X8(mb_type
)){
1845 *(uint64_t*)(pict
->data
[0] + 16*mb_x
+ 0 + (16*mb_y
+ 8)*pict
->linesize
[0])^= 0x8080808080808080ULL
;
1846 *(uint64_t*)(pict
->data
[0] + 16*mb_x
+ 8 + (16*mb_y
+ 8)*pict
->linesize
[0])^= 0x8080808080808080ULL
;
1848 if(IS_8X8(mb_type
) || IS_8X16(mb_type
)){
1850 pict
->data
[0][16*mb_x
+ 8 + (16*mb_y
+ y
)*pict
->linesize
[0]]^= 0x80;
1853 if(IS_INTERLACED(mb_type
) && s
->codec_id
== CODEC_ID_H264
){
1857 s
->mbskip_table
[mb_index
]=0;
1863 #ifdef CONFIG_ENCODERS
1865 static int get_sae(uint8_t *src
, int ref
, int stride
){
1869 for(y
=0; y
<16; y
++){
1870 for(x
=0; x
<16; x
++){
1871 acc
+= ABS(src
[x
+y
*stride
] - ref
);
1878 static int get_intra_count(MpegEncContext
*s
, uint8_t *src
, uint8_t *ref
, int stride
){
1885 for(y
=0; y
<h
; y
+=16){
1886 for(x
=0; x
<w
; x
+=16){
1887 int offset
= x
+ y
*stride
;
1888 int sad
= s
->dsp
.sad
[0](NULL
, src
+ offset
, ref
+ offset
, stride
, 16);
1889 int mean
= (s
->dsp
.pix_sum(src
+ offset
, stride
) + 128)>>8;
1890 int sae
= get_sae(src
+ offset
, mean
, stride
);
1892 acc
+= sae
+ 500 < sad
;
1899 static int load_input_picture(MpegEncContext
*s
, AVFrame
*pic_arg
){
1902 const int encoding_delay
= s
->max_b_frames
;
1906 if(encoding_delay
&& !(s
->flags
&CODEC_FLAG_INPUT_PRESERVED
)) direct
=0;
1907 if(pic_arg
->linesize
[0] != s
->linesize
) direct
=0;
1908 if(pic_arg
->linesize
[1] != s
->uvlinesize
) direct
=0;
1909 if(pic_arg
->linesize
[2] != s
->uvlinesize
) direct
=0;
1911 // av_log(AV_LOG_DEBUG, "%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize);
1914 i
= ff_find_unused_picture(s
, 1);
1916 pic
= (AVFrame
*)&s
->picture
[i
];
1920 pic
->data
[i
]= pic_arg
->data
[i
];
1921 pic
->linesize
[i
]= pic_arg
->linesize
[i
];
1923 alloc_picture(s
, (Picture
*)pic
, 1);
1926 i
= ff_find_unused_picture(s
, 0);
1928 pic
= (AVFrame
*)&s
->picture
[i
];
1931 alloc_picture(s
, (Picture
*)pic
, 0);
1933 if( pic
->data
[0] + offset
== pic_arg
->data
[0]
1934 && pic
->data
[1] + offset
== pic_arg
->data
[1]
1935 && pic
->data
[2] + offset
== pic_arg
->data
[2]){
1938 int h_chroma_shift
, v_chroma_shift
;
1939 avcodec_get_chroma_sub_sample(s
->avctx
->pix_fmt
, &h_chroma_shift
, &v_chroma_shift
);
1942 int src_stride
= pic_arg
->linesize
[i
];
1943 int dst_stride
= i ? s
->uvlinesize
: s
->linesize
;
1944 int h_shift
= i ? h_chroma_shift
: 0;
1945 int v_shift
= i ? v_chroma_shift
: 0;
1946 int w
= s
->width
>>h_shift
;
1947 int h
= s
->height
>>v_shift
;
1948 uint8_t *src
= pic_arg
->data
[i
];
1949 uint8_t *dst
= pic
->data
[i
] + offset
;
1951 if(src_stride
==dst_stride
)
1952 memcpy(dst
, src
, src_stride
*h
);
1955 memcpy(dst
, src
, w
);
1963 copy_picture_attributes(s
, pic
, pic_arg
);
1965 pic
->display_picture_number
= s
->input_picture_number
++;
1967 if(pic
->pts
!= AV_NOPTS_VALUE
){
1968 if(s
->user_specified_pts
!= AV_NOPTS_VALUE
){
1969 int64_t time
= av_rescale(pic
->pts
, s
->avctx
->frame_rate
, s
->avctx
->frame_rate_base
*(int64_t)AV_TIME_BASE
);
1970 int64_t last
= av_rescale(s
->user_specified_pts
, s
->avctx
->frame_rate
, s
->avctx
->frame_rate_base
*(int64_t)AV_TIME_BASE
);
1973 av_log(s
->avctx
, AV_LOG_ERROR
, "Error, Invalid timestamp=%Ld, last=%Ld\n", pic
->pts
, s
->user_specified_pts
);
1977 s
->user_specified_pts
= pic
->pts
;
1979 if(s
->user_specified_pts
!= AV_NOPTS_VALUE
){
1980 s
->user_specified_pts
=
1981 pic
->pts
= s
->user_specified_pts
+ AV_TIME_BASE
*(int64_t)s
->avctx
->frame_rate_base
/ s
->avctx
->frame_rate
;
1982 av_log(s
->avctx
, AV_LOG_INFO
, "Warning: AVFrame.pts=? trying to guess (%Ld)\n", pic
->pts
);
1984 pic
->pts
= av_rescale(pic
->display_picture_number
*(int64_t)s
->avctx
->frame_rate_base
, AV_TIME_BASE
, s
->avctx
->frame_rate
);
1989 /* shift buffer entries */
1990 for(i
=1; i
<MAX_PICTURE_COUNT
/*s->encoding_delay+1*/; i
++)
1991 s
->input_picture
[i
-1]= s
->input_picture
[i
];
1993 s
->input_picture
[encoding_delay
]= (Picture
*)pic
;
1998 static void select_input_picture(MpegEncContext
*s
){
2001 for(i
=1; i
<MAX_PICTURE_COUNT
; i
++)
2002 s
->reordered_input_picture
[i
-1]= s
->reordered_input_picture
[i
];
2003 s
->reordered_input_picture
[MAX_PICTURE_COUNT
-1]= NULL
;
2005 /* set next picture types & ordering */
2006 if(s
->reordered_input_picture
[0]==NULL
&& s
->input_picture
[0]){
2007 if(/*s->picture_in_gop_number >= s->gop_size ||*/ s
->next_picture_ptr
==NULL
|| s
->intra_only
){
2008 s
->reordered_input_picture
[0]= s
->input_picture
[0];
2009 s
->reordered_input_picture
[0]->pict_type
= I_TYPE
;
2010 s
->reordered_input_picture
[0]->coded_picture_number
= s
->coded_picture_number
++;
2014 if(s
->flags
&CODEC_FLAG_PASS2
){
2015 for(i
=0; i
<s
->max_b_frames
+1; i
++){
2016 int pict_num
= s
->input_picture
[0]->display_picture_number
+ i
;
2017 int pict_type
= s
->rc_context
.entry
[pict_num
].new_pict_type
;
2018 s
->input_picture
[i
]->pict_type
= pict_type
;
2020 if(i
+ 1 >= s
->rc_context
.num_entries
) break;
2024 if(s
->input_picture
[0]->pict_type
){
2025 /* user selected pict_type */
2026 for(b_frames
=0; b_frames
<s
->max_b_frames
+1; b_frames
++){
2027 if(s
->input_picture
[b_frames
]->pict_type
!=B_TYPE
) break;
2030 if(b_frames
> s
->max_b_frames
){
2031 av_log(s
->avctx
, AV_LOG_ERROR
, "warning, too many bframes in a row\n");
2032 b_frames
= s
->max_b_frames
;
2034 }else if(s
->avctx
->b_frame_strategy
==0){
2035 b_frames
= s
->max_b_frames
;
2036 while(b_frames
&& !s
->input_picture
[b_frames
]) b_frames
--;
2037 }else if(s
->avctx
->b_frame_strategy
==1){
2038 for(i
=1; i
<s
->max_b_frames
+1; i
++){
2039 if(s
->input_picture
[i
] && s
->input_picture
[i
]->b_frame_score
==0){
2040 s
->input_picture
[i
]->b_frame_score
=
2041 get_intra_count(s
, s
->input_picture
[i
]->data
[0],
2042 s
->input_picture
[i
-1]->data
[0], s
->linesize
) + 1;
2045 for(i
=0; i
<s
->max_b_frames
; i
++){
2046 if(s
->input_picture
[i
]==NULL
|| s
->input_picture
[i
]->b_frame_score
- 1 > s
->mb_num
/40) break;
2049 b_frames
= FFMAX(0, i
-1);
2052 for(i
=0; i
<b_frames
+1; i
++){
2053 s
->input_picture
[i
]->b_frame_score
=0;
2056 av_log(s
->avctx
, AV_LOG_ERROR
, "illegal b frame strategy\n");
2061 //static int b_count=0;
2062 //b_count+= b_frames;
2063 //av_log(s->avctx, AV_LOG_DEBUG, "b_frames: %d\n", b_count);
2064 if(s
->picture_in_gop_number
+ b_frames
>= s
->gop_size
){
2065 if(s
->flags
& CODEC_FLAG_CLOSED_GOP
)
2067 s
->input_picture
[b_frames
]->pict_type
= I_TYPE
;
2070 if( (s
->flags
& CODEC_FLAG_CLOSED_GOP
)
2072 && s
->input_picture
[b_frames
]->pict_type
== I_TYPE
)
2075 s
->reordered_input_picture
[0]= s
->input_picture
[b_frames
];
2076 if(s
->reordered_input_picture
[0]->pict_type
!= I_TYPE
)
2077 s
->reordered_input_picture
[0]->pict_type
= P_TYPE
;
2078 s
->reordered_input_picture
[0]->coded_picture_number
= s
->coded_picture_number
++;
2079 for(i
=0; i
<b_frames
; i
++){
2080 s
->reordered_input_picture
[i
+1]= s
->input_picture
[i
];
2081 s
->reordered_input_picture
[i
+1]->pict_type
= B_TYPE
;
2082 s
->reordered_input_picture
[i
+1]->coded_picture_number
= s
->coded_picture_number
++;
2087 if(s
->reordered_input_picture
[0]){
2088 s
->reordered_input_picture
[0]->reference
= s
->reordered_input_picture
[0]->pict_type
!=B_TYPE ?
3 : 0;
2090 copy_picture(&s
->new_picture
, s
->reordered_input_picture
[0]);
2092 if(s
->reordered_input_picture
[0]->type
== FF_BUFFER_TYPE_SHARED
){
2093 // input is a shared pix, so we cant modifiy it -> alloc a new one & ensure that the shared one is reuseable
2095 int i
= ff_find_unused_picture(s
, 0);
2096 Picture
*pic
= &s
->picture
[i
];
2098 /* mark us unused / free shared pic */
2100 s
->reordered_input_picture
[0]->data
[i
]= NULL
;
2101 s
->reordered_input_picture
[0]->type
= 0;
2103 pic
->reference
= s
->reordered_input_picture
[0]->reference
;
2105 alloc_picture(s
, pic
, 0);
2107 copy_picture_attributes(s
, (AVFrame
*)pic
, (AVFrame
*)s
->reordered_input_picture
[0]);
2109 s
->current_picture_ptr
= pic
;
2111 // input is not a shared pix -> reuse buffer for current_pix
2113 assert( s
->reordered_input_picture
[0]->type
==FF_BUFFER_TYPE_USER
2114 || s
->reordered_input_picture
[0]->type
==FF_BUFFER_TYPE_INTERNAL
);
2116 s
->current_picture_ptr
= s
->reordered_input_picture
[0];
2118 s
->new_picture
.data
[i
]+=16;
2121 copy_picture(&s
->current_picture
, s
->current_picture_ptr
);
2123 s
->picture_number
= s
->new_picture
.display_picture_number
;
2124 //printf("dpn:%d\n", s->picture_number);
2126 memset(&s
->new_picture
, 0, sizeof(Picture
));
2130 int MPV_encode_picture(AVCodecContext
*avctx
,
2131 unsigned char *buf
, int buf_size
, void *data
)
2133 MpegEncContext
*s
= avctx
->priv_data
;
2134 AVFrame
*pic_arg
= data
;
2135 int i
, stuffing_count
;
2137 if(avctx
->pix_fmt
!= PIX_FMT_YUV420P
){
2138 av_log(avctx
, AV_LOG_ERROR
, "this codec supports only YUV420P\n");
2142 for(i
=0; i
<avctx
->thread_count
; i
++){
2143 int start_y
= s
->thread_context
[i
]->start_mb_y
;
2144 int end_y
= s
->thread_context
[i
]-> end_mb_y
;
2145 int h
= s
->mb_height
;
2146 uint8_t *start
= buf
+ buf_size
*start_y
/h
;
2147 uint8_t *end
= buf
+ buf_size
* end_y
/h
;
2149 init_put_bits(&s
->thread_context
[i
]->pb
, start
, end
- start
);
2152 s
->picture_in_gop_number
++;
2154 if(load_input_picture(s
, pic_arg
) < 0)
2157 select_input_picture(s
);
2160 if(s
->new_picture
.data
[0]){
2161 s
->pict_type
= s
->new_picture
.pict_type
;
2163 //printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
2164 MPV_frame_start(s
, avctx
);
2166 encode_picture(s
, s
->picture_number
);
2168 avctx
->real_pict_num
= s
->picture_number
;
2169 avctx
->header_bits
= s
->header_bits
;
2170 avctx
->mv_bits
= s
->mv_bits
;
2171 avctx
->misc_bits
= s
->misc_bits
;
2172 avctx
->i_tex_bits
= s
->i_tex_bits
;
2173 avctx
->p_tex_bits
= s
->p_tex_bits
;
2174 avctx
->i_count
= s
->i_count
;
2175 avctx
->p_count
= s
->mb_num
- s
->i_count
- s
->skip_count
; //FIXME f/b_count in avctx
2176 avctx
->skip_count
= s
->skip_count
;
2180 if (s
->out_format
== FMT_MJPEG
)
2181 mjpeg_picture_trailer(s
);
2183 if(s
->flags
&CODEC_FLAG_PASS1
)
2184 ff_write_pass1_stats(s
);
2187 avctx
->error
[i
] += s
->current_picture_ptr
->error
[i
];
2190 flush_put_bits(&s
->pb
);
2191 s
->frame_bits
= put_bits_count(&s
->pb
);
2193 stuffing_count
= ff_vbv_update(s
, s
->frame_bits
);
2195 switch(s
->codec_id
){
2196 case CODEC_ID_MPEG1VIDEO
:
2197 case CODEC_ID_MPEG2VIDEO
:
2198 while(stuffing_count
--){
2199 put_bits(&s
->pb
, 8, 0);
2202 case CODEC_ID_MPEG4
:
2203 put_bits(&s
->pb
, 16, 0);
2204 put_bits(&s
->pb
, 16, 0x1C3);
2205 stuffing_count
-= 4;
2206 while(stuffing_count
--){
2207 put_bits(&s
->pb
, 8, 0xFF);
2211 av_log(s
->avctx
, AV_LOG_ERROR
, "vbv buffer overflow\n");
2213 flush_put_bits(&s
->pb
);
2214 s
->frame_bits
= put_bits_count(&s
->pb
);
2217 /* update mpeg1/2 vbv_delay for CBR */
2218 if(s
->avctx
->rc_max_rate
&& s
->avctx
->rc_min_rate
== s
->avctx
->rc_max_rate
&& s
->out_format
== FMT_MPEG1
2219 && 90000LL * (avctx
->rc_buffer_size
-1) <= s
->avctx
->rc_max_rate
*0xFFFFLL
){
2222 assert(s
->repeat_first_field
==0);
2224 vbv_delay
= lrintf(90000 * s
->rc_context
.buffer_index
/ s
->avctx
->rc_max_rate
);
2225 assert(vbv_delay
< 0xFFFF);
2227 s
->vbv_delay_ptr
[0] &= 0xF8;
2228 s
->vbv_delay_ptr
[0] |= vbv_delay
>>13;
2229 s
->vbv_delay_ptr
[1] = vbv_delay
>>5;
2230 s
->vbv_delay_ptr
[2] &= 0x07;
2231 s
->vbv_delay_ptr
[2] |= vbv_delay
<<3;
2233 s
->total_bits
+= s
->frame_bits
;
2234 avctx
->frame_bits
= s
->frame_bits
;
2236 assert((pbBufPtr(&s
->pb
) == s
->pb
.buf
));
2239 assert((s
->frame_bits
&7)==0);
2241 return s
->frame_bits
/8;
2244 #endif //CONFIG_ENCODERS
2246 static inline void gmc1_motion(MpegEncContext
*s
,
2247 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2248 uint8_t **ref_picture
)
2251 int offset
, src_x
, src_y
, linesize
, uvlinesize
;
2252 int motion_x
, motion_y
;
2255 motion_x
= s
->sprite_offset
[0][0];
2256 motion_y
= s
->sprite_offset
[0][1];
2257 src_x
= s
->mb_x
* 16 + (motion_x
>> (s
->sprite_warping_accuracy
+1));
2258 src_y
= s
->mb_y
* 16 + (motion_y
>> (s
->sprite_warping_accuracy
+1));
2259 motion_x
<<=(3-s
->sprite_warping_accuracy
);
2260 motion_y
<<=(3-s
->sprite_warping_accuracy
);
2261 src_x
= clip(src_x
, -16, s
->width
);
2262 if (src_x
== s
->width
)
2264 src_y
= clip(src_y
, -16, s
->height
);
2265 if (src_y
== s
->height
)
2268 linesize
= s
->linesize
;
2269 uvlinesize
= s
->uvlinesize
;
2271 ptr
= ref_picture
[0] + (src_y
* linesize
) + src_x
;
2273 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
2274 if( (unsigned)src_x
>= s
->h_edge_pos
- 17
2275 || (unsigned)src_y
>= s
->v_edge_pos
- 17){
2276 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, linesize
, 17, 17, src_x
, src_y
, s
->h_edge_pos
, s
->v_edge_pos
);
2277 ptr
= s
->edge_emu_buffer
;
2281 if((motion_x
|motion_y
)&7){
2282 s
->dsp
.gmc1(dest_y
, ptr
, linesize
, 16, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
2283 s
->dsp
.gmc1(dest_y
+8, ptr
+8, linesize
, 16, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
2287 dxy
= ((motion_x
>>3)&1) | ((motion_y
>>2)&2);
2288 if (s
->no_rounding
){
2289 s
->dsp
.put_no_rnd_pixels_tab
[0][dxy
](dest_y
, ptr
, linesize
, 16);
2291 s
->dsp
.put_pixels_tab
[0][dxy
](dest_y
, ptr
, linesize
, 16);
2295 if(s
->flags
&CODEC_FLAG_GRAY
) return;
2297 motion_x
= s
->sprite_offset
[1][0];
2298 motion_y
= s
->sprite_offset
[1][1];
2299 src_x
= s
->mb_x
* 8 + (motion_x
>> (s
->sprite_warping_accuracy
+1));
2300 src_y
= s
->mb_y
* 8 + (motion_y
>> (s
->sprite_warping_accuracy
+1));
2301 motion_x
<<=(3-s
->sprite_warping_accuracy
);
2302 motion_y
<<=(3-s
->sprite_warping_accuracy
);
2303 src_x
= clip(src_x
, -8, s
->width
>>1);
2304 if (src_x
== s
->width
>>1)
2306 src_y
= clip(src_y
, -8, s
->height
>>1);
2307 if (src_y
== s
->height
>>1)
2310 offset
= (src_y
* uvlinesize
) + src_x
;
2311 ptr
= ref_picture
[1] + offset
;
2312 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
2313 if( (unsigned)src_x
>= (s
->h_edge_pos
>>1) - 9
2314 || (unsigned)src_y
>= (s
->v_edge_pos
>>1) - 9){
2315 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, uvlinesize
, 9, 9, src_x
, src_y
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2316 ptr
= s
->edge_emu_buffer
;
2320 s
->dsp
.gmc1(dest_cb
, ptr
, uvlinesize
, 8, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
2322 ptr
= ref_picture
[2] + offset
;
2324 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, uvlinesize
, 9, 9, src_x
, src_y
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2325 ptr
= s
->edge_emu_buffer
;
2327 s
->dsp
.gmc1(dest_cr
, ptr
, uvlinesize
, 8, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
2332 static inline void gmc_motion(MpegEncContext
*s
,
2333 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2334 uint8_t **ref_picture
)
2337 int linesize
, uvlinesize
;
2338 const int a
= s
->sprite_warping_accuracy
;
2341 linesize
= s
->linesize
;
2342 uvlinesize
= s
->uvlinesize
;
2344 ptr
= ref_picture
[0];
2346 ox
= s
->sprite_offset
[0][0] + s
->sprite_delta
[0][0]*s
->mb_x
*16 + s
->sprite_delta
[0][1]*s
->mb_y
*16;
2347 oy
= s
->sprite_offset
[0][1] + s
->sprite_delta
[1][0]*s
->mb_x
*16 + s
->sprite_delta
[1][1]*s
->mb_y
*16;
2349 s
->dsp
.gmc(dest_y
, ptr
, linesize
, 16,
2352 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
2353 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
2354 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
2355 s
->h_edge_pos
, s
->v_edge_pos
);
2356 s
->dsp
.gmc(dest_y
+8, ptr
, linesize
, 16,
2357 ox
+ s
->sprite_delta
[0][0]*8,
2358 oy
+ s
->sprite_delta
[1][0]*8,
2359 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
2360 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
2361 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
2362 s
->h_edge_pos
, s
->v_edge_pos
);
2364 if(s
->flags
&CODEC_FLAG_GRAY
) return;
2366 ox
= s
->sprite_offset
[1][0] + s
->sprite_delta
[0][0]*s
->mb_x
*8 + s
->sprite_delta
[0][1]*s
->mb_y
*8;
2367 oy
= s
->sprite_offset
[1][1] + s
->sprite_delta
[1][0]*s
->mb_x
*8 + s
->sprite_delta
[1][1]*s
->mb_y
*8;
2369 ptr
= ref_picture
[1];
2370 s
->dsp
.gmc(dest_cb
, ptr
, uvlinesize
, 8,
2373 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
2374 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
2375 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
2376 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2378 ptr
= ref_picture
[2];
2379 s
->dsp
.gmc(dest_cr
, ptr
, uvlinesize
, 8,
2382 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
2383 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
2384 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
2385 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2389 * Copies a rectangular area of samples to a temporary buffer and replicates the boarder samples.
2390 * @param buf destination buffer
2391 * @param src source buffer
2392 * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
2393 * @param block_w width of block
2394 * @param block_h height of block
2395 * @param src_x x coordinate of the top left sample of the block in the source buffer
2396 * @param src_y y coordinate of the top left sample of the block in the source buffer
2397 * @param w width of the source buffer
2398 * @param h height of the source buffer
2400 void ff_emulated_edge_mc(uint8_t *buf
, uint8_t *src
, int linesize
, int block_w
, int block_h
,
2401 int src_x
, int src_y
, int w
, int h
){
2403 int start_y
, start_x
, end_y
, end_x
;
2406 src
+= (h
-1-src_y
)*linesize
;
2408 }else if(src_y
<=-block_h
){
2409 src
+= (1-block_h
-src_y
)*linesize
;
2415 }else if(src_x
<=-block_w
){
2416 src
+= (1-block_w
-src_x
);
2420 start_y
= FFMAX(0, -src_y
);
2421 start_x
= FFMAX(0, -src_x
);
2422 end_y
= FFMIN(block_h
, h
-src_y
);
2423 end_x
= FFMIN(block_w
, w
-src_x
);
2425 // copy existing part
2426 for(y
=start_y
; y
<end_y
; y
++){
2427 for(x
=start_x
; x
<end_x
; x
++){
2428 buf
[x
+ y
*linesize
]= src
[x
+ y
*linesize
];
2433 for(y
=0; y
<start_y
; y
++){
2434 for(x
=start_x
; x
<end_x
; x
++){
2435 buf
[x
+ y
*linesize
]= buf
[x
+ start_y
*linesize
];
2440 for(y
=end_y
; y
<block_h
; y
++){
2441 for(x
=start_x
; x
<end_x
; x
++){
2442 buf
[x
+ y
*linesize
]= buf
[x
+ (end_y
-1)*linesize
];
2446 for(y
=0; y
<block_h
; y
++){
2448 for(x
=0; x
<start_x
; x
++){
2449 buf
[x
+ y
*linesize
]= buf
[start_x
+ y
*linesize
];
2453 for(x
=end_x
; x
<block_w
; x
++){
2454 buf
[x
+ y
*linesize
]= buf
[end_x
- 1 + y
*linesize
];
2459 static inline int hpel_motion(MpegEncContext
*s
,
2460 uint8_t *dest
, uint8_t *src
,
2461 int field_based
, int field_select
,
2462 int src_x
, int src_y
,
2463 int width
, int height
, int stride
,
2464 int h_edge_pos
, int v_edge_pos
,
2465 int w
, int h
, op_pixels_func
*pix_op
,
2466 int motion_x
, int motion_y
)
2471 dxy
= ((motion_y
& 1) << 1) | (motion_x
& 1);
2472 src_x
+= motion_x
>> 1;
2473 src_y
+= motion_y
>> 1;
2475 /* WARNING: do no forget half pels */
2476 src_x
= clip(src_x
, -16, width
); //FIXME unneeded for emu?
2479 src_y
= clip(src_y
, -16, height
);
2480 if (src_y
== height
)
2482 src
+= src_y
* stride
+ src_x
;
2484 if(s
->unrestricted_mv
&& (s
->flags
&CODEC_FLAG_EMU_EDGE
)){
2485 if( (unsigned)src_x
> h_edge_pos
- (motion_x
&1) - w
2486 || (unsigned)src_y
> v_edge_pos
- (motion_y
&1) - h
){
2487 ff_emulated_edge_mc(s
->edge_emu_buffer
, src
, s
->linesize
, w
+1, (h
+1)<<field_based
,
2488 src_x
, src_y
<<field_based
, h_edge_pos
, s
->v_edge_pos
);
2489 src
= s
->edge_emu_buffer
;
2495 pix_op
[dxy
](dest
, src
, stride
, h
);
2499 /* apply one mpeg motion vector to the three components */
2500 static always_inline
void mpeg_motion(MpegEncContext
*s
,
2501 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2502 int field_based
, int bottom_field
, int field_select
,
2503 uint8_t **ref_picture
, op_pixels_func (*pix_op
)[4],
2504 int motion_x
, int motion_y
, int h
)
2506 uint8_t *ptr_y
, *ptr_cb
, *ptr_cr
;
2507 int dxy
, uvdxy
, mx
, my
, src_x
, src_y
, uvsrc_x
, uvsrc_y
, v_edge_pos
, uvlinesize
, linesize
;
2510 if(s
->quarter_sample
)
2517 v_edge_pos
= s
->v_edge_pos
>> field_based
;
2518 linesize
= s
->current_picture
.linesize
[0] << field_based
;
2519 uvlinesize
= s
->current_picture
.linesize
[1] << field_based
;
2521 dxy
= ((motion_y
& 1) << 1) | (motion_x
& 1);
2522 src_x
= s
->mb_x
* 16 + (motion_x
>> 1);
2523 src_y
=(s
->mb_y
<<(4-field_based
)) + (motion_y
>> 1);
2525 if (s
->out_format
== FMT_H263
) {
2526 if((s
->workaround_bugs
& FF_BUG_HPEL_CHROMA
) && field_based
){
2527 mx
= (motion_x
>>1)|(motion_x
&1);
2529 uvdxy
= ((my
& 1) << 1) | (mx
& 1);
2530 uvsrc_x
= s
->mb_x
* 8 + (mx
>> 1);
2531 uvsrc_y
= (s
->mb_y
<<(3-field_based
)) + (my
>> 1);
2533 uvdxy
= dxy
| (motion_y
& 2) | ((motion_x
& 2) >> 1);
2537 }else if(s
->out_format
== FMT_H261
){//even chroma mv's are full pel in H261
2541 uvsrc_x
= s
->mb_x
*8 + mx
;
2542 uvsrc_y
= s
->mb_y
*8 + my
;
2544 if(s
->chroma_y_shift
){
2547 uvdxy
= ((my
& 1) << 1) | (mx
& 1);
2548 uvsrc_x
= s
->mb_x
* 8 + (mx
>> 1);
2549 uvsrc_y
= (s
->mb_y
<<(3-field_based
)) + (my
>> 1);
2551 if(s
->chroma_x_shift
){
2554 uvdxy
= ((motion_y
& 1) << 1) | (mx
& 1);
2555 uvsrc_x
= s
->mb_x
* 8 + (mx
>> 1);
2566 ptr_y
= ref_picture
[0] + src_y
* linesize
+ src_x
;
2567 ptr_cb
= ref_picture
[1] + uvsrc_y
* uvlinesize
+ uvsrc_x
;
2568 ptr_cr
= ref_picture
[2] + uvsrc_y
* uvlinesize
+ uvsrc_x
;
2570 if( (unsigned)src_x
> s
->h_edge_pos
- (motion_x
&1) - 16
2571 || (unsigned)src_y
> v_edge_pos
- (motion_y
&1) - h
){
2572 if(s
->codec_id
== CODEC_ID_MPEG2VIDEO
||
2573 s
->codec_id
== CODEC_ID_MPEG1VIDEO
){
2574 av_log(s
->avctx
,AV_LOG_DEBUG
,"MPEG motion vector out of boundary\n");
2577 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr_y
, s
->linesize
, 17, 17+field_based
,
2578 src_x
, src_y
<<field_based
, s
->h_edge_pos
, s
->v_edge_pos
);
2579 ptr_y
= s
->edge_emu_buffer
;
2580 if(!(s
->flags
&CODEC_FLAG_GRAY
)){
2581 uint8_t *uvbuf
= s
->edge_emu_buffer
+18*s
->linesize
;
2582 ff_emulated_edge_mc(uvbuf
, ptr_cb
, s
->uvlinesize
, 9, 9+field_based
,
2583 uvsrc_x
, uvsrc_y
<<field_based
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2584 ff_emulated_edge_mc(uvbuf
+16, ptr_cr
, s
->uvlinesize
, 9, 9+field_based
,
2585 uvsrc_x
, uvsrc_y
<<field_based
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2591 if(bottom_field
){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
2592 dest_y
+= s
->linesize
;
2593 dest_cb
+= s
->uvlinesize
;
2594 dest_cr
+= s
->uvlinesize
;
2598 ptr_y
+= s
->linesize
;
2599 ptr_cb
+= s
->uvlinesize
;
2600 ptr_cr
+= s
->uvlinesize
;
2603 pix_op
[0][dxy
](dest_y
, ptr_y
, linesize
, h
);
2605 if(!(s
->flags
&CODEC_FLAG_GRAY
)){
2606 pix_op
[s
->chroma_x_shift
][uvdxy
](dest_cb
, ptr_cb
, uvlinesize
, h
>> s
->chroma_y_shift
);
2607 pix_op
[s
->chroma_x_shift
][uvdxy
](dest_cr
, ptr_cr
, uvlinesize
, h
>> s
->chroma_y_shift
);
2611 /* apply one mpeg motion vector to the three components */
2612 static always_inline
void mpeg_motion_lowres(MpegEncContext
*s
,
2613 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2614 int field_based
, int bottom_field
, int field_select
,
2615 uint8_t **ref_picture
, h264_chroma_mc_func
*pix_op
,
2616 int motion_x
, int motion_y
, int h
)
2618 uint8_t *ptr_y
, *ptr_cb
, *ptr_cr
;
2619 int mx
, my
, src_x
, src_y
, uvsrc_x
, uvsrc_y
, uvlinesize
, linesize
, sx
, sy
, uvsx
, uvsy
;
2620 const int lowres
= s
->avctx
->lowres
;
2621 const int block_s
= 8>>lowres
;
2622 const int s_mask
= (2<<lowres
)-1;
2623 const int h_edge_pos
= s
->h_edge_pos
>> lowres
;
2624 const int v_edge_pos
= s
->v_edge_pos
>> lowres
;
2625 linesize
= s
->current_picture
.linesize
[0] << field_based
;
2626 uvlinesize
= s
->current_picture
.linesize
[1] << field_based
;
2628 sx
= motion_x
& s_mask
;
2629 sy
= motion_y
& s_mask
;
2630 src_x
= s
->mb_x
*2*block_s
+ (motion_x
>> (lowres
+1));
2631 src_y
= s
->mb_y
*2*block_s
+ (motion_y
>> (lowres
+1));
2633 if (s
->out_format
== FMT_H263
) {
2634 uvsx
= sx
| ((motion_x
& 2)>>1);
2635 uvsy
= sy
| ((motion_y
& 2)>>1);
2638 }else if(s
->out_format
== FMT_H261
){//even chroma mv's are full pel in H261
2641 uvsx
= (2*mx
) & s_mask
;
2642 uvsy
= (2*my
) & s_mask
;
2643 uvsrc_x
= s
->mb_x
*block_s
+ (mx
>> lowres
);
2644 uvsrc_y
= s
->mb_y
*block_s
+ (my
>> lowres
);
2650 uvsrc_x
= s
->mb_x
*block_s
+ (mx
>> (lowres
+1));
2651 uvsrc_y
= s
->mb_y
*block_s
+ (my
>> (lowres
+1));
2654 ptr_y
= ref_picture
[0] + src_y
* linesize
+ src_x
;
2655 ptr_cb
= ref_picture
[1] + uvsrc_y
* uvlinesize
+ uvsrc_x
;
2656 ptr_cr
= ref_picture
[2] + uvsrc_y
* uvlinesize
+ uvsrc_x
;
2658 if( (unsigned)src_x
> h_edge_pos
- (!!sx
) - 2*block_s
2659 || (unsigned)src_y
>(v_edge_pos
>> field_based
) - (!!sy
) - h
){
2660 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr_y
, s
->linesize
, 17, 17+field_based
,
2661 src_x
, src_y
<<field_based
, h_edge_pos
, v_edge_pos
);
2662 ptr_y
= s
->edge_emu_buffer
;
2663 if(!(s
->flags
&CODEC_FLAG_GRAY
)){
2664 uint8_t *uvbuf
= s
->edge_emu_buffer
+18*s
->linesize
;
2665 ff_emulated_edge_mc(uvbuf
, ptr_cb
, s
->uvlinesize
, 9, 9+field_based
,
2666 uvsrc_x
, uvsrc_y
<<field_based
, h_edge_pos
>>1, v_edge_pos
>>1);
2667 ff_emulated_edge_mc(uvbuf
+16, ptr_cr
, s
->uvlinesize
, 9, 9+field_based
,
2668 uvsrc_x
, uvsrc_y
<<field_based
, h_edge_pos
>>1, v_edge_pos
>>1);
2676 pix_op
[lowres
-1](dest_y
, ptr_y
, linesize
, h
, sx
, sy
);
2678 if(!(s
->flags
&CODEC_FLAG_GRAY
)){
2679 uvsx
<<= 2 - lowres
;
2680 uvsy
<<= 2 - lowres
;
2681 pix_op
[lowres
](dest_cb
, ptr_cb
, uvlinesize
, h
>> s
->chroma_y_shift
, uvsx
, uvsy
);
2682 pix_op
[lowres
](dest_cr
, ptr_cr
, uvlinesize
, h
>> s
->chroma_y_shift
, uvsx
, uvsy
);
2686 //FIXME move to dsputil, avg variant, 16x16 version
2687 static inline void put_obmc(uint8_t *dst
, uint8_t *src
[5], int stride
){
2689 uint8_t * const top
= src
[1];
2690 uint8_t * const left
= src
[2];
2691 uint8_t * const mid
= src
[0];
2692 uint8_t * const right
= src
[3];
2693 uint8_t * const bottom
= src
[4];
2694 #define OBMC_FILTER(x, t, l, m, r, b)\
2695 dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
2696 #define OBMC_FILTER4(x, t, l, m, r, b)\
2697 OBMC_FILTER(x , t, l, m, r, b);\
2698 OBMC_FILTER(x+1 , t, l, m, r, b);\
2699 OBMC_FILTER(x +stride, t, l, m, r, b);\
2700 OBMC_FILTER(x+1+stride, t, l, m, r, b);
2703 OBMC_FILTER (x
, 2, 2, 4, 0, 0);
2704 OBMC_FILTER (x
+1, 2, 1, 5, 0, 0);
2705 OBMC_FILTER4(x
+2, 2, 1, 5, 0, 0);
2706 OBMC_FILTER4(x
+4, 2, 0, 5, 1, 0);
2707 OBMC_FILTER (x
+6, 2, 0, 5, 1, 0);
2708 OBMC_FILTER (x
+7, 2, 0, 4, 2, 0);
2710 OBMC_FILTER (x
, 1, 2, 5, 0, 0);
2711 OBMC_FILTER (x
+1, 1, 2, 5, 0, 0);
2712 OBMC_FILTER (x
+6, 1, 0, 5, 2, 0);
2713 OBMC_FILTER (x
+7, 1, 0, 5, 2, 0);
2715 OBMC_FILTER4(x
, 1, 2, 5, 0, 0);
2716 OBMC_FILTER4(x
+2, 1, 1, 6, 0, 0);
2717 OBMC_FILTER4(x
+4, 1, 0, 6, 1, 0);
2718 OBMC_FILTER4(x
+6, 1, 0, 5, 2, 0);
2720 OBMC_FILTER4(x
, 0, 2, 5, 0, 1);
2721 OBMC_FILTER4(x
+2, 0, 1, 6, 0, 1);
2722 OBMC_FILTER4(x
+4, 0, 0, 6, 1, 1);
2723 OBMC_FILTER4(x
+6, 0, 0, 5, 2, 1);
2725 OBMC_FILTER (x
, 0, 2, 5, 0, 1);
2726 OBMC_FILTER (x
+1, 0, 2, 5, 0, 1);
2727 OBMC_FILTER4(x
+2, 0, 1, 5, 0, 2);
2728 OBMC_FILTER4(x
+4, 0, 0, 5, 1, 2);
2729 OBMC_FILTER (x
+6, 0, 0, 5, 2, 1);
2730 OBMC_FILTER (x
+7, 0, 0, 5, 2, 1);
2732 OBMC_FILTER (x
, 0, 2, 4, 0, 2);
2733 OBMC_FILTER (x
+1, 0, 1, 5, 0, 2);
2734 OBMC_FILTER (x
+6, 0, 0, 5, 1, 2);
2735 OBMC_FILTER (x
+7, 0, 0, 4, 2, 2);
2738 /* obmc for 1 8x8 luma block */
2739 static inline void obmc_motion(MpegEncContext
*s
,
2740 uint8_t *dest
, uint8_t *src
,
2741 int src_x
, int src_y
,
2742 op_pixels_func
*pix_op
,
2743 int16_t mv
[5][2]/* mid top left right bottom*/)
2749 assert(s
->quarter_sample
==0);
2752 if(i
&& mv
[i
][0]==mv
[MID
][0] && mv
[i
][1]==mv
[MID
][1]){
2755 ptr
[i
]= s
->obmc_scratchpad
+ 8*(i
&1) + s
->linesize
*8*(i
>>1);
2756 hpel_motion(s
, ptr
[i
], src
, 0, 0,
2758 s
->width
, s
->height
, s
->linesize
,
2759 s
->h_edge_pos
, s
->v_edge_pos
,
2761 mv
[i
][0], mv
[i
][1]);
2765 put_obmc(dest
, ptr
, s
->linesize
);
2768 static inline void qpel_motion(MpegEncContext
*s
,
2769 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2770 int field_based
, int bottom_field
, int field_select
,
2771 uint8_t **ref_picture
, op_pixels_func (*pix_op
)[4],
2772 qpel_mc_func (*qpix_op
)[16],
2773 int motion_x
, int motion_y
, int h
)
2775 uint8_t *ptr_y
, *ptr_cb
, *ptr_cr
;
2776 int dxy
, uvdxy
, mx
, my
, src_x
, src_y
, uvsrc_x
, uvsrc_y
, v_edge_pos
, linesize
, uvlinesize
;
2778 dxy
= ((motion_y
& 3) << 2) | (motion_x
& 3);
2779 src_x
= s
->mb_x
* 16 + (motion_x
>> 2);
2780 src_y
= s
->mb_y
* (16 >> field_based
) + (motion_y
>> 2);