2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard.
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 * 4MV & hq & b-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
25 * The simplest mpeg encoder (well, it was the simplest!).
29 #include <math.h> //for PI
32 #include "mpegvideo.h"
36 #include "fastmemcpy.h"
42 #ifdef CONFIG_ENCODERS
43 static void encode_picture(MpegEncContext
*s
, int picture_number
);
44 #endif //CONFIG_ENCODERS
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext
*s
,
46 DCTELEM
*block
, int n
, int qscale
);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext
*s
,
48 DCTELEM
*block
, int n
, int qscale
);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext
*s
,
50 DCTELEM
*block
, int n
, int qscale
);
51 static void dct_unquantize_mpeg2_inter_c(MpegEncContext
*s
,
52 DCTELEM
*block
, int n
, int qscale
);
53 static void dct_unquantize_h263_intra_c(MpegEncContext
*s
,
54 DCTELEM
*block
, int n
, int qscale
);
55 static void dct_unquantize_h263_inter_c(MpegEncContext
*s
,
56 DCTELEM
*block
, int n
, int qscale
);
57 static void draw_edges_c(uint8_t *buf
, int wrap
, int width
, int height
, int w
);
58 #ifdef CONFIG_ENCODERS
59 static int dct_quantize_c(MpegEncContext
*s
, DCTELEM
*block
, int n
, int qscale
, int *overflow
);
60 static int dct_quantize_trellis_c(MpegEncContext
*s
, DCTELEM
*block
, int n
, int qscale
, int *overflow
);
61 static int dct_quantize_refine(MpegEncContext
*s
, DCTELEM
*block
, int16_t *weight
, DCTELEM
*orig
, int n
, int qscale
);
62 static int sse_mb(MpegEncContext
*s
);
63 static void denoise_dct_c(MpegEncContext
*s
, DCTELEM
*block
);
64 #endif //CONFIG_ENCODERS
67 extern int XVMC_field_start(MpegEncContext
*s
, AVCodecContext
*avctx
);
68 extern void XVMC_field_end(MpegEncContext
*s
);
69 extern void XVMC_decode_mb(MpegEncContext
*s
);
72 void (*draw_edges
)(uint8_t *buf
, int wrap
, int width
, int height
, int w
)= draw_edges_c
;
75 /* enable all paranoid tests for rounding, overflows, etc... */
81 /* for jpeg fast DCT */
84 static const uint16_t aanscales
[64] = {
85 /* precomputed values scaled up by 14 bits */
86 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
87 22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270,
88 21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906,
89 19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315,
90 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
91 12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552,
92 8867 , 12299, 11585, 10426, 8867, 6967, 4799, 2446,
93 4520 , 6270, 5906, 5315, 4520, 3552, 2446, 1247
96 static const uint8_t h263_chroma_roundtab
[16] = {
97 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
98 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
101 static const uint8_t ff_default_chroma_qscale_table
[32]={
102 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
103 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
106 #ifdef CONFIG_ENCODERS
107 static uint8_t (*default_mv_penalty
)[MAX_MV
*2+1]=NULL
;
108 static uint8_t default_fcode_tab
[MAX_MV
*2+1];
110 enum PixelFormat ff_yuv420p_list
[2]= {PIX_FMT_YUV420P
, -1};
112 static void convert_matrix(DSPContext
*dsp
, int (*qmat
)[64], uint16_t (*qmat16
)[2][64],
113 const uint16_t *quant_matrix
, int bias
, int qmin
, int qmax
)
117 for(qscale
=qmin
; qscale
<=qmax
; qscale
++){
119 if (dsp
->fdct
== ff_jpeg_fdct_islow
120 #ifdef FAAN_POSTSCALE
121 || dsp
->fdct
== ff_faandct
125 const int j
= dsp
->idct_permutation
[i
];
126 /* 16 <= qscale * quant_matrix[i] <= 7905 */
127 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
128 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
129 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
131 qmat
[qscale
][i
] = (int)((uint64_t_C(1) << QMAT_SHIFT
) /
132 (qscale
* quant_matrix
[j
]));
134 } else if (dsp
->fdct
== fdct_ifast
135 #ifndef FAAN_POSTSCALE
136 || dsp
->fdct
== ff_faandct
140 const int j
= dsp
->idct_permutation
[i
];
141 /* 16 <= qscale * quant_matrix[i] <= 7905 */
142 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
143 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
144 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
146 qmat
[qscale
][i
] = (int)((uint64_t_C(1) << (QMAT_SHIFT
+ 14)) /
147 (aanscales
[i
] * qscale
* quant_matrix
[j
]));
151 const int j
= dsp
->idct_permutation
[i
];
152 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
153 So 16 <= qscale * quant_matrix[i] <= 7905
154 so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905
155 so 32768 >= (1<<19) / (qscale * quant_matrix[i]) >= 67
157 qmat
[qscale
][i
] = (int)((uint64_t_C(1) << QMAT_SHIFT
) / (qscale
* quant_matrix
[j
]));
158 // qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);
159 qmat16
[qscale
][0][i
] = (1 << QMAT_SHIFT_MMX
) / (qscale
* quant_matrix
[j
]);
161 if(qmat16
[qscale
][0][i
]==0 || qmat16
[qscale
][0][i
]==128*256) qmat16
[qscale
][0][i
]=128*256-1;
162 qmat16
[qscale
][1][i
]= ROUNDED_DIV(bias
<<(16-QUANT_BIAS_SHIFT
), qmat16
[qscale
][0][i
]);
168 static inline void update_qscale(MpegEncContext
*s
){
169 s
->qscale
= (s
->lambda
*139 + FF_LAMBDA_SCALE
*64) >> (FF_LAMBDA_SHIFT
+ 7);
170 s
->qscale
= clip(s
->qscale
, s
->avctx
->qmin
, s
->avctx
->qmax
);
172 s
->lambda2
= (s
->lambda
*s
->lambda
+ FF_LAMBDA_SCALE
/2) >> FF_LAMBDA_SHIFT
;
174 #endif //CONFIG_ENCODERS
176 void ff_init_scantable(uint8_t *permutation
, ScanTable
*st
, const uint8_t *src_scantable
){
180 st
->scantable
= src_scantable
;
184 j
= src_scantable
[i
];
185 st
->permutated
[i
] = permutation
[j
];
194 j
= st
->permutated
[i
];
196 st
->raster_end
[i
]= end
;
200 #ifdef CONFIG_ENCODERS
201 void ff_write_quant_matrix(PutBitContext
*pb
, int16_t *matrix
){
207 put_bits(pb
, 8, matrix
[ ff_zigzag_direct
[i
] ]);
212 #endif //CONFIG_ENCODERS
214 /* init common dct for both encoder and decoder */
215 int DCT_common_init(MpegEncContext
*s
)
217 s
->dct_unquantize_h263_intra
= dct_unquantize_h263_intra_c
;
218 s
->dct_unquantize_h263_inter
= dct_unquantize_h263_inter_c
;
219 s
->dct_unquantize_mpeg1_intra
= dct_unquantize_mpeg1_intra_c
;
220 s
->dct_unquantize_mpeg1_inter
= dct_unquantize_mpeg1_inter_c
;
221 s
->dct_unquantize_mpeg2_intra
= dct_unquantize_mpeg2_intra_c
;
222 s
->dct_unquantize_mpeg2_inter
= dct_unquantize_mpeg2_inter_c
;
224 #ifdef CONFIG_ENCODERS
225 s
->dct_quantize
= dct_quantize_c
;
226 s
->denoise_dct
= denoise_dct_c
;
230 MPV_common_init_mmx(s
);
233 MPV_common_init_axp(s
);
236 MPV_common_init_mlib(s
);
239 MPV_common_init_mmi(s
);
242 MPV_common_init_armv4l(s
);
245 MPV_common_init_ppc(s
);
248 #ifdef CONFIG_ENCODERS
249 s
->fast_dct_quantize
= s
->dct_quantize
;
251 if(s
->flags
&CODEC_FLAG_TRELLIS_QUANT
){
252 s
->dct_quantize
= dct_quantize_trellis_c
; //move before MPV_common_init_*
255 #endif //CONFIG_ENCODERS
257 /* load & permutate scantables
258 note: only wmv uses differnt ones
260 if(s
->alternate_scan
){
261 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->inter_scantable
, ff_alternate_vertical_scan
);
262 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->intra_scantable
, ff_alternate_vertical_scan
);
264 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->inter_scantable
, ff_zigzag_direct
);
265 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->intra_scantable
, ff_zigzag_direct
);
267 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->intra_h_scantable
, ff_alternate_horizontal_scan
);
268 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->intra_v_scantable
, ff_alternate_vertical_scan
);
270 s
->picture_structure
= PICT_FRAME
;
275 static void copy_picture(Picture
*dst
, Picture
*src
){
277 dst
->type
= FF_BUFFER_TYPE_COPY
;
280 static void copy_picture_attributes(AVFrame
*dst
, AVFrame
*src
){
281 dst
->pict_type
= src
->pict_type
;
282 dst
->quality
= src
->quality
;
283 dst
->coded_picture_number
= src
->coded_picture_number
;
284 dst
->display_picture_number
= src
->display_picture_number
;
285 // dst->reference = src->reference;
287 dst
->interlaced_frame
= src
->interlaced_frame
;
288 dst
->top_field_first
= src
->top_field_first
;
292 * allocates a Picture
293 * The pixels are allocated/set by calling get_buffer() if shared=0
295 static int alloc_picture(MpegEncContext
*s
, Picture
*pic
, int shared
){
296 const int big_mb_num
= s
->mb_stride
*(s
->mb_height
+1) + 1; //the +1 is needed so memset(,,stride*height) doesnt sig11
297 const int mb_array_size
= s
->mb_stride
*s
->mb_height
;
298 const int b8_array_size
= s
->b8_stride
*s
->mb_height
*2;
299 const int b4_array_size
= s
->b4_stride
*s
->mb_height
*4;
303 assert(pic
->data
[0]);
304 assert(pic
->type
== 0 || pic
->type
== FF_BUFFER_TYPE_SHARED
);
305 pic
->type
= FF_BUFFER_TYPE_SHARED
;
309 assert(!pic
->data
[0]);
311 r
= s
->avctx
->get_buffer(s
->avctx
, (AVFrame
*)pic
);
313 if(r
<0 || !pic
->age
|| !pic
->type
|| !pic
->data
[0]){
314 av_log(s
->avctx
, AV_LOG_ERROR
, "get_buffer() failed (%d %d %d %p)\n", r
, pic
->age
, pic
->type
, pic
->data
[0]);
318 if(s
->linesize
&& (s
->linesize
!= pic
->linesize
[0] || s
->uvlinesize
!= pic
->linesize
[1])){
319 av_log(s
->avctx
, AV_LOG_ERROR
, "get_buffer() failed (stride changed)\n");
323 if(pic
->linesize
[1] != pic
->linesize
[2]){
324 av_log(s
->avctx
, AV_LOG_ERROR
, "get_buffer() failed (uv stride missmatch)\n");
328 s
->linesize
= pic
->linesize
[0];
329 s
->uvlinesize
= pic
->linesize
[1];
332 if(pic
->qscale_table
==NULL
){
334 CHECKED_ALLOCZ(pic
->mb_var
, mb_array_size
* sizeof(int16_t))
335 CHECKED_ALLOCZ(pic
->mc_mb_var
, mb_array_size
* sizeof(int16_t))
336 CHECKED_ALLOCZ(pic
->mb_mean
, mb_array_size
* sizeof(int8_t))
339 CHECKED_ALLOCZ(pic
->mbskip_table
, mb_array_size
* sizeof(uint8_t)+2) //the +2 is for the slice end check
340 CHECKED_ALLOCZ(pic
->qscale_table
, mb_array_size
* sizeof(uint8_t))
341 CHECKED_ALLOCZ(pic
->mb_type_base
, big_mb_num
* sizeof(uint32_t))
342 pic
->mb_type
= pic
->mb_type_base
+ s
->mb_stride
+1;
343 if(s
->out_format
== FMT_H264
){
345 CHECKED_ALLOCZ(pic
->motion_val_base
[i
], 2 * (b4_array_size
+1) * sizeof(int16_t))
346 pic
->motion_val
[i
]= pic
->motion_val_base
[i
]+1;
347 CHECKED_ALLOCZ(pic
->ref_index
[i
] , b8_array_size
* sizeof(uint8_t))
349 pic
->motion_subsample_log2
= 2;
350 }else if(s
->out_format
== FMT_H263
|| s
->encoding
|| (s
->avctx
->debug
&FF_DEBUG_MV
) || (s
->avctx
->debug_mv
)){
352 CHECKED_ALLOCZ(pic
->motion_val_base
[i
], 2 * (b8_array_size
+1) * sizeof(int16_t)*2) //FIXME
353 pic
->motion_val
[i
]= pic
->motion_val_base
[i
]+1;
355 pic
->motion_subsample_log2
= 3;
357 pic
->qstride
= s
->mb_stride
;
358 CHECKED_ALLOCZ(pic
->pan_scan
, 1 * sizeof(AVPanScan
))
361 //it might be nicer if the application would keep track of these but it would require a API change
362 memmove(s
->prev_pict_types
+1, s
->prev_pict_types
, PREV_PICT_TYPES_BUFFER_SIZE
-1);
363 s
->prev_pict_types
[0]= s
->pict_type
;
364 if(pic
->age
< PREV_PICT_TYPES_BUFFER_SIZE
&& s
->prev_pict_types
[pic
->age
] == B_TYPE
)
365 pic
->age
= INT_MAX
; // skiped MBs in b frames are quite rare in mpeg1/2 and its a bit tricky to skip them anyway
368 fail
: //for the CHECKED_ALLOCZ macro
373 * deallocates a picture
375 static void free_picture(MpegEncContext
*s
, Picture
*pic
){
378 if(pic
->data
[0] && pic
->type
!=FF_BUFFER_TYPE_SHARED
){
379 s
->avctx
->release_buffer(s
->avctx
, (AVFrame
*)pic
);
382 av_freep(&pic
->mb_var
);
383 av_freep(&pic
->mc_mb_var
);
384 av_freep(&pic
->mb_mean
);
385 av_freep(&pic
->mbskip_table
);
386 av_freep(&pic
->qscale_table
);
387 av_freep(&pic
->mb_type_base
);
388 av_freep(&pic
->pan_scan
);
391 av_freep(&pic
->motion_val_base
[i
]);
392 av_freep(&pic
->ref_index
[i
]);
395 if(pic
->type
== FF_BUFFER_TYPE_SHARED
){
404 static int init_duplicate_context(MpegEncContext
*s
, MpegEncContext
*base
){
407 CHECKED_ALLOCZ(s
->allocated_edge_emu_buffer
, (s
->width
+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance
408 s
->edge_emu_buffer
= s
->allocated_edge_emu_buffer
+ (s
->width
+64)*2*17;
410 //FIXME should be linesize instead of s->width*2 but that isnt known before get_buffer()
411 CHECKED_ALLOCZ(s
->me
.scratchpad
, s
->width
*2*16*2*sizeof(uint8_t))
412 s
->rd_scratchpad
= s
->me
.scratchpad
;
413 s
->b_scratchpad
= s
->me
.scratchpad
;
414 s
->obmc_scratchpad
= s
->me
.scratchpad
+ 16;
416 CHECKED_ALLOCZ(s
->me
.map
, ME_MAP_SIZE
*sizeof(uint32_t))
417 CHECKED_ALLOCZ(s
->me
.score_map
, ME_MAP_SIZE
*sizeof(uint32_t))
418 if(s
->avctx
->noise_reduction
){
419 CHECKED_ALLOCZ(s
->dct_error_sum
, 2 * 64 * sizeof(int))
422 CHECKED_ALLOCZ(s
->blocks
, 64*6*2 * sizeof(DCTELEM
))
423 s
->block
= s
->blocks
[0];
426 s
->pblocks
[i
] = (short *)(&s
->block
[i
]);
430 return -1; //free() through MPV_common_end()
433 static void free_duplicate_context(MpegEncContext
*s
){
436 av_freep(&s
->allocated_edge_emu_buffer
); s
->edge_emu_buffer
= NULL
;
437 av_freep(&s
->me
.scratchpad
);
440 s
->obmc_scratchpad
= NULL
;
442 av_freep(&s
->dct_error_sum
);
443 av_freep(&s
->me
.map
);
444 av_freep(&s
->me
.score_map
);
445 av_freep(&s
->blocks
);
449 static void backup_duplicate_context(MpegEncContext
*bak
, MpegEncContext
*src
){
450 #define COPY(a) bak->a= src->a
451 COPY(allocated_edge_emu_buffer
);
452 COPY(edge_emu_buffer
);
456 COPY(obmc_scratchpad
);
463 COPY(me
.map_generation
);
471 void ff_update_duplicate_context(MpegEncContext
*dst
, MpegEncContext
*src
){
474 //FIXME copy only needed parts
476 backup_duplicate_context(&bak
, dst
);
477 memcpy(dst
, src
, sizeof(MpegEncContext
));
478 backup_duplicate_context(dst
, &bak
);
480 dst
->pblocks
[i
] = (short *)(&dst
->block
[i
]);
482 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
485 static void update_duplicate_context_after_me(MpegEncContext
*dst
, MpegEncContext
*src
){
486 #define COPY(a) dst->a= src->a
488 COPY(current_picture
);
494 COPY(picture_in_gop_number
);
495 COPY(gop_picture_number
);
496 COPY(frame_pred_frame_dct
); //FIXME dont set in encode_header
497 COPY(progressive_frame
); //FIXME dont set in encode_header
498 COPY(partitioned_frame
); //FIXME dont set in encode_header
502 /* init common structure for both encoder and decoder */
503 int MPV_common_init(MpegEncContext
*s
)
505 int y_size
, c_size
, yc_size
, i
, mb_array_size
, mv_table_size
, x
, y
;
507 dsputil_init(&s
->dsp
, s
->avctx
);
510 s
->flags
= s
->avctx
->flags
;
511 s
->flags2
= s
->avctx
->flags2
;
513 s
->mb_width
= (s
->width
+ 15) / 16;
514 s
->mb_height
= (s
->height
+ 15) / 16;
515 s
->mb_stride
= s
->mb_width
+ 1;
516 s
->b8_stride
= s
->mb_width
*2 + 1;
517 s
->b4_stride
= s
->mb_width
*4 + 1;
518 mb_array_size
= s
->mb_height
* s
->mb_stride
;
519 mv_table_size
= (s
->mb_height
+2) * s
->mb_stride
+ 1;
521 /* set default edge pos, will be overriden in decode_header if needed */
522 s
->h_edge_pos
= s
->mb_width
*16;
523 s
->v_edge_pos
= s
->mb_height
*16;
525 s
->mb_num
= s
->mb_width
* s
->mb_height
;
530 s
->block_wrap
[3]= s
->mb_width
*2 + 2;
532 s
->block_wrap
[5]= s
->mb_width
+ 2;
535 s
->c_dc_scale_table
= ff_mpeg1_dc_scale_table
;
536 s
->chroma_qscale_table
= ff_default_chroma_qscale_table
;
538 s
->progressive_sequence
= 1;
539 s
->progressive_frame
= 1;
540 s
->coded_picture_number
= 0;
542 y_size
= (2 * s
->mb_width
+ 2) * (2 * s
->mb_height
+ 2);
543 c_size
= (s
->mb_width
+ 2) * (s
->mb_height
+ 2);
544 yc_size
= y_size
+ 2 * c_size
;
546 /* convert fourcc to upper case */
547 s
->avctx
->codec_tag
= toupper( s
->avctx
->codec_tag
&0xFF)
548 + (toupper((s
->avctx
->codec_tag
>>8 )&0xFF)<<8 )
549 + (toupper((s
->avctx
->codec_tag
>>16)&0xFF)<<16)
550 + (toupper((s
->avctx
->codec_tag
>>24)&0xFF)<<24);
552 s
->avctx
->stream_codec_tag
= toupper( s
->avctx
->stream_codec_tag
&0xFF)
553 + (toupper((s
->avctx
->stream_codec_tag
>>8 )&0xFF)<<8 )
554 + (toupper((s
->avctx
->stream_codec_tag
>>16)&0xFF)<<16)
555 + (toupper((s
->avctx
->stream_codec_tag
>>24)&0xFF)<<24);
557 s
->avctx
->coded_frame
= (AVFrame
*)&s
->current_picture
;
559 CHECKED_ALLOCZ(s
->mb_index2xy
, (s
->mb_num
+1)*sizeof(int)) //error ressilience code looks cleaner with this
560 for(y
=0; y
<s
->mb_height
; y
++){
561 for(x
=0; x
<s
->mb_width
; x
++){
562 s
->mb_index2xy
[ x
+ y
*s
->mb_width
] = x
+ y
*s
->mb_stride
;
565 s
->mb_index2xy
[ s
->mb_height
*s
->mb_width
] = (s
->mb_height
-1)*s
->mb_stride
+ s
->mb_width
; //FIXME really needed?
568 /* Allocate MV tables */
569 CHECKED_ALLOCZ(s
->p_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
570 CHECKED_ALLOCZ(s
->b_forw_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
571 CHECKED_ALLOCZ(s
->b_back_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
572 CHECKED_ALLOCZ(s
->b_bidir_forw_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
573 CHECKED_ALLOCZ(s
->b_bidir_back_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
574 CHECKED_ALLOCZ(s
->b_direct_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
575 s
->p_mv_table
= s
->p_mv_table_base
+ s
->mb_stride
+ 1;
576 s
->b_forw_mv_table
= s
->b_forw_mv_table_base
+ s
->mb_stride
+ 1;
577 s
->b_back_mv_table
= s
->b_back_mv_table_base
+ s
->mb_stride
+ 1;
578 s
->b_bidir_forw_mv_table
= s
->b_bidir_forw_mv_table_base
+ s
->mb_stride
+ 1;
579 s
->b_bidir_back_mv_table
= s
->b_bidir_back_mv_table_base
+ s
->mb_stride
+ 1;
580 s
->b_direct_mv_table
= s
->b_direct_mv_table_base
+ s
->mb_stride
+ 1;
582 if(s
->msmpeg4_version
){
583 CHECKED_ALLOCZ(s
->ac_stats
, 2*2*(MAX_LEVEL
+1)*(MAX_RUN
+1)*2*sizeof(int));
585 CHECKED_ALLOCZ(s
->avctx
->stats_out
, 256);
587 /* Allocate MB type table */
588 CHECKED_ALLOCZ(s
->mb_type
, mb_array_size
* sizeof(uint16_t)) //needed for encoding
590 CHECKED_ALLOCZ(s
->lambda_table
, mb_array_size
* sizeof(int))
592 CHECKED_ALLOCZ(s
->q_intra_matrix
, 64*32 * sizeof(int))
593 CHECKED_ALLOCZ(s
->q_inter_matrix
, 64*32 * sizeof(int))
594 CHECKED_ALLOCZ(s
->q_intra_matrix16
, 64*32*2 * sizeof(uint16_t))
595 CHECKED_ALLOCZ(s
->q_inter_matrix16
, 64*32*2 * sizeof(uint16_t))
596 CHECKED_ALLOCZ(s
->input_picture
, MAX_PICTURE_COUNT
* sizeof(Picture
*))
597 CHECKED_ALLOCZ(s
->reordered_input_picture
, MAX_PICTURE_COUNT
* sizeof(Picture
*))
599 if(s
->avctx
->noise_reduction
){
600 CHECKED_ALLOCZ(s
->dct_offset
, 2 * 64 * sizeof(uint16_t))
603 CHECKED_ALLOCZ(s
->picture
, MAX_PICTURE_COUNT
* sizeof(Picture
))
605 CHECKED_ALLOCZ(s
->error_status_table
, mb_array_size
*sizeof(uint8_t))
607 if(s
->codec_id
==CODEC_ID_MPEG4
|| (s
->flags
& CODEC_FLAG_INTERLACED_ME
)){
608 /* interlaced direct mode decoding tables */
613 CHECKED_ALLOCZ(s
->b_field_mv_table_base
[i
][j
][k
] , mv_table_size
* 2 * sizeof(int16_t))
614 s
->b_field_mv_table
[i
][j
][k
] = s
->b_field_mv_table_base
[i
][j
][k
] + s
->mb_stride
+ 1;
616 CHECKED_ALLOCZ(s
->b_field_select_table
[i
][j
] , mb_array_size
* 2 * sizeof(uint8_t))
617 CHECKED_ALLOCZ(s
->p_field_mv_table_base
[i
][j
] , mv_table_size
* 2 * sizeof(int16_t))
618 s
->p_field_mv_table
[i
][j
] = s
->p_field_mv_table_base
[i
][j
] + s
->mb_stride
+ 1;
620 CHECKED_ALLOCZ(s
->p_field_select_table
[i
] , mb_array_size
* 2 * sizeof(uint8_t))
623 if (s
->out_format
== FMT_H263
) {
625 CHECKED_ALLOCZ(s
->ac_val
[0], yc_size
* sizeof(int16_t) * 16);
626 s
->ac_val
[1] = s
->ac_val
[0] + y_size
;
627 s
->ac_val
[2] = s
->ac_val
[1] + c_size
;
630 CHECKED_ALLOCZ(s
->coded_block
, y_size
);
632 /* divx501 bitstream reorder buffer */
633 CHECKED_ALLOCZ(s
->bitstream_buffer
, BITSTREAM_BUFFER_SIZE
);
635 /* cbp, ac_pred, pred_dir */
636 CHECKED_ALLOCZ(s
->cbp_table
, mb_array_size
* sizeof(uint8_t))
637 CHECKED_ALLOCZ(s
->pred_dir_table
, mb_array_size
* sizeof(uint8_t))
640 if (s
->h263_pred
|| s
->h263_plus
|| !s
->encoding
) {
642 //MN: we need these for error resilience of intra-frames
643 CHECKED_ALLOCZ(s
->dc_val
[0], yc_size
* sizeof(int16_t));
644 s
->dc_val
[1] = s
->dc_val
[0] + y_size
;
645 s
->dc_val
[2] = s
->dc_val
[1] + c_size
;
646 for(i
=0;i
<yc_size
;i
++)
647 s
->dc_val
[0][i
] = 1024;
650 /* which mb is a intra block */
651 CHECKED_ALLOCZ(s
->mbintra_table
, mb_array_size
);
652 memset(s
->mbintra_table
, 1, mb_array_size
);
654 /* default structure is frame */
655 s
->picture_structure
= PICT_FRAME
;
657 /* init macroblock skip table */
658 CHECKED_ALLOCZ(s
->mbskip_table
, mb_array_size
+2);
659 //Note the +1 is for a quicker mpeg4 slice_end detection
660 CHECKED_ALLOCZ(s
->prev_pict_types
, PREV_PICT_TYPES_BUFFER_SIZE
);
662 s
->parse_context
.state
= -1;
663 if((s
->avctx
->debug
&(FF_DEBUG_VIS_QP
|FF_DEBUG_VIS_MB_TYPE
)) || (s
->avctx
->debug_mv
)){
664 s
->visualization_buffer
[0] = av_malloc((s
->mb_width
*16 + 2*EDGE_WIDTH
) * s
->mb_height
*16 + 2*EDGE_WIDTH
);
665 s
->visualization_buffer
[1] = av_malloc((s
->mb_width
*8 + EDGE_WIDTH
) * s
->mb_height
*8 + EDGE_WIDTH
);
666 s
->visualization_buffer
[2] = av_malloc((s
->mb_width
*8 + EDGE_WIDTH
) * s
->mb_height
*8 + EDGE_WIDTH
);
669 s
->context_initialized
= 1;
671 s
->thread_context
[0]= s
;
672 for(i
=1; i
<s
->avctx
->thread_count
; i
++){
673 s
->thread_context
[i
]= av_malloc(sizeof(MpegEncContext
));
674 memcpy(s
->thread_context
[i
], s
, sizeof(MpegEncContext
));
677 for(i
=0; i
<s
->avctx
->thread_count
; i
++){
678 if(init_duplicate_context(s
->thread_context
[i
], s
) < 0)
680 s
->thread_context
[i
]->start_mb_y
= (s
->mb_height
*(i
) + s
->avctx
->thread_count
/2) / s
->avctx
->thread_count
;
681 s
->thread_context
[i
]->end_mb_y
= (s
->mb_height
*(i
+1) + s
->avctx
->thread_count
/2) / s
->avctx
->thread_count
;
690 /* init common structure for both encoder and decoder */
691 void MPV_common_end(MpegEncContext
*s
)
695 for(i
=0; i
<s
->avctx
->thread_count
; i
++){
696 free_duplicate_context(s
->thread_context
[i
]);
698 for(i
=1; i
<s
->avctx
->thread_count
; i
++){
699 av_freep(&s
->thread_context
[i
]);
702 av_freep(&s
->parse_context
.buffer
);
703 s
->parse_context
.buffer_size
=0;
705 av_freep(&s
->mb_type
);
706 av_freep(&s
->p_mv_table_base
);
707 av_freep(&s
->b_forw_mv_table_base
);
708 av_freep(&s
->b_back_mv_table_base
);
709 av_freep(&s
->b_bidir_forw_mv_table_base
);
710 av_freep(&s
->b_bidir_back_mv_table_base
);
711 av_freep(&s
->b_direct_mv_table_base
);
713 s
->b_forw_mv_table
= NULL
;
714 s
->b_back_mv_table
= NULL
;
715 s
->b_bidir_forw_mv_table
= NULL
;
716 s
->b_bidir_back_mv_table
= NULL
;
717 s
->b_direct_mv_table
= NULL
;
721 av_freep(&s
->b_field_mv_table_base
[i
][j
][k
]);
722 s
->b_field_mv_table
[i
][j
][k
]=NULL
;
724 av_freep(&s
->b_field_select_table
[i
][j
]);
725 av_freep(&s
->p_field_mv_table_base
[i
][j
]);
726 s
->p_field_mv_table
[i
][j
]=NULL
;
728 av_freep(&s
->p_field_select_table
[i
]);
731 av_freep(&s
->dc_val
[0]);
732 av_freep(&s
->ac_val
[0]);
733 av_freep(&s
->coded_block
);
734 av_freep(&s
->mbintra_table
);
735 av_freep(&s
->cbp_table
);
736 av_freep(&s
->pred_dir_table
);
738 av_freep(&s
->mbskip_table
);
739 av_freep(&s
->prev_pict_types
);
740 av_freep(&s
->bitstream_buffer
);
741 av_freep(&s
->avctx
->stats_out
);
742 av_freep(&s
->ac_stats
);
743 av_freep(&s
->error_status_table
);
744 av_freep(&s
->mb_index2xy
);
745 av_freep(&s
->lambda_table
);
746 av_freep(&s
->q_intra_matrix
);
747 av_freep(&s
->q_inter_matrix
);
748 av_freep(&s
->q_intra_matrix16
);
749 av_freep(&s
->q_inter_matrix16
);
750 av_freep(&s
->input_picture
);
751 av_freep(&s
->reordered_input_picture
);
752 av_freep(&s
->dct_offset
);
755 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
756 free_picture(s
, &s
->picture
[i
]);
759 av_freep(&s
->picture
);
760 avcodec_default_free_buffers(s
->avctx
);
761 s
->context_initialized
= 0;
764 s
->current_picture_ptr
= NULL
;
766 if (s
->visualization_buffer
[i
])
767 av_free(s
->visualization_buffer
[i
]);
770 #ifdef CONFIG_ENCODERS
772 /* init video encoder */
773 int MPV_encode_init(AVCodecContext
*avctx
)
775 MpegEncContext
*s
= avctx
->priv_data
;
777 int chroma_h_shift
, chroma_v_shift
;
779 avctx
->pix_fmt
= PIX_FMT_YUV420P
; // FIXME
781 s
->bit_rate
= avctx
->bit_rate
;
782 s
->width
= avctx
->width
;
783 s
->height
= avctx
->height
;
784 if(avctx
->gop_size
> 600){
785 av_log(avctx
, AV_LOG_ERROR
, "Warning keyframe interval too large! reducing it ...\n");
788 s
->gop_size
= avctx
->gop_size
;
790 s
->flags
= avctx
->flags
;
791 s
->flags2
= avctx
->flags2
;
792 s
->max_b_frames
= avctx
->max_b_frames
;
793 s
->codec_id
= avctx
->codec
->id
;
794 s
->luma_elim_threshold
= avctx
->luma_elim_threshold
;
795 s
->chroma_elim_threshold
= avctx
->chroma_elim_threshold
;
796 s
->strict_std_compliance
= avctx
->strict_std_compliance
;
797 s
->data_partitioning
= avctx
->flags
& CODEC_FLAG_PART
;
798 s
->quarter_sample
= (avctx
->flags
& CODEC_FLAG_QPEL
)!=0;
799 s
->mpeg_quant
= avctx
->mpeg_quant
;
800 s
->rtp_mode
= !!avctx
->rtp_payload_size
;
802 if (s
->gop_size
<= 1) {
809 s
->me_method
= avctx
->me_method
;
812 s
->fixed_qscale
= !!(avctx
->flags
& CODEC_FLAG_QSCALE
);
814 s
->adaptive_quant
= ( s
->avctx
->lumi_masking
815 || s
->avctx
->dark_masking
816 || s
->avctx
->temporal_cplx_masking
817 || s
->avctx
->spatial_cplx_masking
818 || s
->avctx
->p_masking
819 || (s
->flags
&CODEC_FLAG_QP_RD
))
822 s
->obmc
= !!(s
->flags
& CODEC_FLAG_OBMC
);
823 s
->loop_filter
= !!(s
->flags
& CODEC_FLAG_LOOP_FILTER
);
824 s
->alternate_scan
= !!(s
->flags
& CODEC_FLAG_ALT_SCAN
);
826 if(avctx
->rc_max_rate
&& !avctx
->rc_buffer_size
){
827 av_log(avctx
, AV_LOG_ERROR
, "a vbv buffer size is needed, for encoding with a maximum bitrate\n");
831 if(avctx
->rc_min_rate
&& avctx
->rc_max_rate
!= avctx
->rc_min_rate
){
832 av_log(avctx
, AV_LOG_INFO
, "Warning min_rate > 0 but min_rate != max_rate isnt recommanded!\n");
835 if((s
->flags
& CODEC_FLAG_4MV
) && s
->codec_id
!= CODEC_ID_MPEG4
836 && s
->codec_id
!= CODEC_ID_H263
&& s
->codec_id
!= CODEC_ID_H263P
&& s
->codec_id
!= CODEC_ID_FLV1
){
837 av_log(avctx
, AV_LOG_ERROR
, "4MV not supported by codec\n");
841 if(s
->obmc
&& s
->avctx
->mb_decision
!= FF_MB_DECISION_SIMPLE
){
842 av_log(avctx
, AV_LOG_ERROR
, "OBMC is only supported with simple mb decission\n");
846 if(s
->obmc
&& s
->codec_id
!= CODEC_ID_H263
&& s
->codec_id
!= CODEC_ID_H263P
){
847 av_log(avctx
, AV_LOG_ERROR
, "OBMC is only supported with H263(+)\n");
851 if(s
->quarter_sample
&& s
->codec_id
!= CODEC_ID_MPEG4
){
852 av_log(avctx
, AV_LOG_ERROR
, "qpel not supported by codec\n");
856 if(s
->data_partitioning
&& s
->codec_id
!= CODEC_ID_MPEG4
){
857 av_log(avctx
, AV_LOG_ERROR
, "data partitioning not supported by codec\n");
861 if(s
->max_b_frames
&& s
->codec_id
!= CODEC_ID_MPEG4
&& s
->codec_id
!= CODEC_ID_MPEG1VIDEO
&& s
->codec_id
!= CODEC_ID_MPEG2VIDEO
){
862 av_log(avctx
, AV_LOG_ERROR
, "b frames not supported by codec\n");
866 if(s
->mpeg_quant
&& s
->codec_id
!= CODEC_ID_MPEG4
){ //FIXME mpeg2 uses that too
867 av_log(avctx
, AV_LOG_ERROR
, "mpeg2 style quantization not supporetd by codec\n");
871 if((s
->flags
& CODEC_FLAG_CBP_RD
) && !(s
->flags
& CODEC_FLAG_TRELLIS_QUANT
)){
872 av_log(avctx
, AV_LOG_ERROR
, "CBP RD needs trellis quant\n");
876 if((s
->flags
& CODEC_FLAG_QP_RD
) && s
->avctx
->mb_decision
!= FF_MB_DECISION_RD
){
877 av_log(avctx
, AV_LOG_ERROR
, "QP RD needs mbd=2\n");
881 if(s
->avctx
->scenechange_threshold
< 1000000000 && (s
->flags
& CODEC_FLAG_CLOSED_GOP
)){
882 av_log(avctx
, AV_LOG_ERROR
, "closed gop with scene change detection arent supported yet\n");
886 if(s
->avctx
->thread_count
> 1 && s
->codec_id
!= CODEC_ID_MPEG4
887 && s
->codec_id
!= CODEC_ID_MPEG1VIDEO
&& s
->codec_id
!= CODEC_ID_MPEG2VIDEO
888 && (s
->codec_id
!= CODEC_ID_H263P
|| !(s
->flags
& CODEC_FLAG_H263P_SLICE_STRUCT
))){
889 av_log(avctx
, AV_LOG_ERROR
, "multi threaded encoding not supported by codec\n");
893 if(s
->avctx
->thread_count
> MAX_THREADS
|| 16*s
->avctx
->thread_count
> s
->height
){
894 av_log(avctx
, AV_LOG_ERROR
, "too many threads\n");
898 if(s
->avctx
->thread_count
> 1)
901 i
= ff_gcd(avctx
->frame_rate
, avctx
->frame_rate_base
);
903 av_log(avctx
, AV_LOG_INFO
, "removing common factors from framerate\n");
904 avctx
->frame_rate
/= i
;
905 avctx
->frame_rate_base
/= i
;
909 if(s
->codec_id
==CODEC_ID_MJPEG
){
910 s
->intra_quant_bias
= 1<<(QUANT_BIAS_SHIFT
-1); //(a + x/2)/x
911 s
->inter_quant_bias
= 0;
912 }else if(s
->mpeg_quant
|| s
->codec_id
==CODEC_ID_MPEG1VIDEO
|| s
->codec_id
==CODEC_ID_MPEG2VIDEO
){
913 s
->intra_quant_bias
= 3<<(QUANT_BIAS_SHIFT
-3); //(a + x*3/8)/x
914 s
->inter_quant_bias
= 0;
916 s
->intra_quant_bias
=0;
917 s
->inter_quant_bias
=-(1<<(QUANT_BIAS_SHIFT
-2)); //(a - x/4)/x
920 if(avctx
->intra_quant_bias
!= FF_DEFAULT_QUANT_BIAS
)
921 s
->intra_quant_bias
= avctx
->intra_quant_bias
;
922 if(avctx
->inter_quant_bias
!= FF_DEFAULT_QUANT_BIAS
)
923 s
->inter_quant_bias
= avctx
->inter_quant_bias
;
925 avcodec_get_chroma_sub_sample(avctx
->pix_fmt
, &chroma_h_shift
, &chroma_v_shift
);
927 av_reduce(&s
->time_increment_resolution
, &dummy
, s
->avctx
->frame_rate
, s
->avctx
->frame_rate_base
, (1<<16)-1);
928 s
->time_increment_bits
= av_log2(s
->time_increment_resolution
- 1) + 1;
930 switch(avctx
->codec
->id
) {
931 case CODEC_ID_MPEG1VIDEO
:
932 s
->out_format
= FMT_MPEG1
;
933 s
->low_delay
= 0; //s->max_b_frames ? 0 : 1;
934 avctx
->delay
= s
->low_delay ?
0 : (s
->max_b_frames
+ 1);
936 case CODEC_ID_MPEG2VIDEO
:
937 s
->out_format
= FMT_MPEG1
;
938 s
->low_delay
= 0; //s->max_b_frames ? 0 : 1;
939 avctx
->delay
= s
->low_delay ?
0 : (s
->max_b_frames
+ 1);
944 s
->out_format
= FMT_MJPEG
;
945 s
->intra_only
= 1; /* force intra only for jpeg */
946 s
->mjpeg_write_tables
= 1; /* write all tables */
947 s
->mjpeg_data_only_frames
= 0; /* write all the needed headers */
948 s
->mjpeg_vsample
[0] = 1<<chroma_v_shift
;
949 s
->mjpeg_vsample
[1] = 1;
950 s
->mjpeg_vsample
[2] = 1;
951 s
->mjpeg_hsample
[0] = 1<<chroma_h_shift
;
952 s
->mjpeg_hsample
[1] = 1;
953 s
->mjpeg_hsample
[2] = 1;
954 if (mjpeg_init(s
) < 0)
961 if (h263_get_picture_format(s
->width
, s
->height
) == 7) {
962 av_log(avctx
, AV_LOG_INFO
, "Input picture size isn't suitable for h263 codec! try h263+\n");
965 s
->out_format
= FMT_H263
;
966 s
->obmc
= (avctx
->flags
& CODEC_FLAG_OBMC
) ?
1:0;
971 s
->out_format
= FMT_H263
;
974 s
->umvplus
= (avctx
->flags
& CODEC_FLAG_H263P_UMV
) ?
1:0;
975 s
->h263_aic
= (avctx
->flags
& CODEC_FLAG_H263P_AIC
) ?
1:0;
976 s
->modified_quant
= s
->h263_aic
;
977 s
->alt_inter_vlc
= (avctx
->flags
& CODEC_FLAG_H263P_AIV
) ?
1:0;
978 s
->obmc
= (avctx
->flags
& CODEC_FLAG_OBMC
) ?
1:0;
979 s
->loop_filter
= (avctx
->flags
& CODEC_FLAG_LOOP_FILTER
) ?
1:0;
980 s
->unrestricted_mv
= s
->obmc
|| s
->loop_filter
|| s
->umvplus
;
981 s
->h263_slice_structured
= (s
->flags
& CODEC_FLAG_H263P_SLICE_STRUCT
) ?
1:0;
984 /* These are just to be sure */
989 s
->out_format
= FMT_H263
;
990 s
->h263_flv
= 2; /* format = 1; 11-bit codes */
991 s
->unrestricted_mv
= 1;
992 s
->rtp_mode
=0; /* don't allow GOB */
997 s
->out_format
= FMT_H263
;
1001 case CODEC_ID_MPEG4
:
1002 s
->out_format
= FMT_H263
;
1004 s
->unrestricted_mv
= 1;
1005 s
->low_delay
= s
->max_b_frames ?
0 : 1;
1006 avctx
->delay
= s
->low_delay ?
0 : (s
->max_b_frames
+ 1);
1008 case CODEC_ID_MSMPEG4V1
:
1009 s
->out_format
= FMT_H263
;
1010 s
->h263_msmpeg4
= 1;
1012 s
->unrestricted_mv
= 1;
1013 s
->msmpeg4_version
= 1;
1017 case CODEC_ID_MSMPEG4V2
:
1018 s
->out_format
= FMT_H263
;
1019 s
->h263_msmpeg4
= 1;
1021 s
->unrestricted_mv
= 1;
1022 s
->msmpeg4_version
= 2;
1026 case CODEC_ID_MSMPEG4V3
:
1027 s
->out_format
= FMT_H263
;
1028 s
->h263_msmpeg4
= 1;
1030 s
->unrestricted_mv
= 1;
1031 s
->msmpeg4_version
= 3;
1032 s
->flipflop_rounding
=1;
1037 s
->out_format
= FMT_H263
;
1038 s
->h263_msmpeg4
= 1;
1040 s
->unrestricted_mv
= 1;
1041 s
->msmpeg4_version
= 4;
1042 s
->flipflop_rounding
=1;
1047 s
->out_format
= FMT_H263
;
1048 s
->h263_msmpeg4
= 1;
1050 s
->unrestricted_mv
= 1;
1051 s
->msmpeg4_version
= 5;
1052 s
->flipflop_rounding
=1;
1061 { /* set up some save defaults, some codecs might override them later */
1067 default_mv_penalty
= av_mallocz( sizeof(uint8_t)*(MAX_FCODE
+1)*(2*MAX_MV
+1) );
1068 memset(default_mv_penalty
, 0, sizeof(uint8_t)*(MAX_FCODE
+1)*(2*MAX_MV
+1));
1069 memset(default_fcode_tab
, 0, sizeof(uint8_t)*(2*MAX_MV
+1));
1071 for(i
=-16; i
<16; i
++){
1072 default_fcode_tab
[i
+ MAX_MV
]= 1;
1076 s
->me
.mv_penalty
= default_mv_penalty
;
1077 s
->fcode_tab
= default_fcode_tab
;
1079 /* dont use mv_penalty table for crap MV as it would be confused */
1080 //FIXME remove after fixing / removing old ME
1081 if (s
->me_method
< ME_EPZS
) s
->me
.mv_penalty
= default_mv_penalty
;
1086 if (MPV_common_init(s
) < 0)
1089 if(s
->modified_quant
)
1090 s
->chroma_qscale_table
= ff_h263_chroma_qscale_table
;
1091 s
->progressive_frame
=
1092 s
->progressive_sequence
= !(avctx
->flags
& (CODEC_FLAG_INTERLACED_DCT
|CODEC_FLAG_INTERLACED_ME
));
1093 s
->quant_precision
=5;
1095 ff_set_cmp(&s
->dsp
, s
->dsp
.ildct_cmp
, s
->avctx
->ildct_cmp
);
1099 #ifdef CONFIG_ENCODERS
1101 if (s
->out_format
== FMT_H263
)
1102 h263_encode_init(s
);
1103 if(s
->msmpeg4_version
)
1104 ff_msmpeg4_encode_init(s
);
1106 if (s
->out_format
== FMT_MPEG1
)
1107 ff_mpeg1_encode_init(s
);
1110 /* init default q matrix */
1112 int j
= s
->dsp
.idct_permutation
[i
];
1114 if(s
->codec_id
==CODEC_ID_MPEG4
&& s
->mpeg_quant
){
1115 s
->intra_matrix
[j
] = ff_mpeg4_default_intra_matrix
[i
];
1116 s
->inter_matrix
[j
] = ff_mpeg4_default_non_intra_matrix
[i
];
1117 }else if(s
->out_format
== FMT_H263
){
1118 s
->intra_matrix
[j
] =
1119 s
->inter_matrix
[j
] = ff_mpeg1_default_non_intra_matrix
[i
];
1123 s
->intra_matrix
[j
] = ff_mpeg1_default_intra_matrix
[i
];
1124 s
->inter_matrix
[j
] = ff_mpeg1_default_non_intra_matrix
[i
];
1126 if(s
->avctx
->intra_matrix
)
1127 s
->intra_matrix
[j
] = s
->avctx
->intra_matrix
[i
];
1128 if(s
->avctx
->inter_matrix
)
1129 s
->inter_matrix
[j
] = s
->avctx
->inter_matrix
[i
];
1132 /* precompute matrix */
1133 /* for mjpeg, we do include qscale in the matrix */
1134 if (s
->out_format
!= FMT_MJPEG
) {
1135 convert_matrix(&s
->dsp
, s
->q_intra_matrix
, s
->q_intra_matrix16
,
1136 s
->intra_matrix
, s
->intra_quant_bias
, 1, 31);
1137 convert_matrix(&s
->dsp
, s
->q_inter_matrix
, s
->q_inter_matrix16
,
1138 s
->inter_matrix
, s
->inter_quant_bias
, 1, 31);
1141 if(ff_rate_control_init(s
) < 0)
1144 s
->picture_number
= 0;
1145 s
->input_picture_number
= 0;
1146 s
->picture_in_gop_number
= 0;
1147 /* motion detector init */
1154 int MPV_encode_end(AVCodecContext
*avctx
)
1156 MpegEncContext
*s
= avctx
->priv_data
;
1162 ff_rate_control_uninit(s
);
1165 if (s
->out_format
== FMT_MJPEG
)
1168 av_freep(&avctx
->extradata
);
1173 #endif //CONFIG_ENCODERS
1175 void init_rl(RLTable
*rl
)
1177 int8_t max_level
[MAX_RUN
+1], max_run
[MAX_LEVEL
+1];
1178 uint8_t index_run
[MAX_RUN
+1];
1179 int last
, run
, level
, start
, end
, i
;
1181 /* compute max_level[], max_run[] and index_run[] */
1182 for(last
=0;last
<2;last
++) {
1191 memset(max_level
, 0, MAX_RUN
+ 1);
1192 memset(max_run
, 0, MAX_LEVEL
+ 1);
1193 memset(index_run
, rl
->n
, MAX_RUN
+ 1);
1194 for(i
=start
;i
<end
;i
++) {
1195 run
= rl
->table_run
[i
];
1196 level
= rl
->table_level
[i
];
1197 if (index_run
[run
] == rl
->n
)
1199 if (level
> max_level
[run
])
1200 max_level
[run
] = level
;
1201 if (run
> max_run
[level
])
1202 max_run
[level
] = run
;
1204 rl
->max_level
[last
] = av_malloc(MAX_RUN
+ 1);
1205 memcpy(rl
->max_level
[last
], max_level
, MAX_RUN
+ 1);
1206 rl
->max_run
[last
] = av_malloc(MAX_LEVEL
+ 1);
1207 memcpy(rl
->max_run
[last
], max_run
, MAX_LEVEL
+ 1);
1208 rl
->index_run
[last
] = av_malloc(MAX_RUN
+ 1);
1209 memcpy(rl
->index_run
[last
], index_run
, MAX_RUN
+ 1);
1213 /* draw the edges of width 'w' of an image of size width, height */
1214 //FIXME check that this is ok for mpeg4 interlaced
1215 static void draw_edges_c(uint8_t *buf
, int wrap
, int width
, int height
, int w
)
1217 uint8_t *ptr
, *last_line
;
1220 last_line
= buf
+ (height
- 1) * wrap
;
1222 /* top and bottom */
1223 memcpy(buf
- (i
+ 1) * wrap
, buf
, width
);
1224 memcpy(last_line
+ (i
+ 1) * wrap
, last_line
, width
);
1226 /* left and right */
1228 for(i
=0;i
<height
;i
++) {
1229 memset(ptr
- w
, ptr
[0], w
);
1230 memset(ptr
+ width
, ptr
[width
-1], w
);
1235 memset(buf
- (i
+ 1) * wrap
- w
, buf
[0], w
); /* top left */
1236 memset(buf
- (i
+ 1) * wrap
+ width
, buf
[width
-1], w
); /* top right */
1237 memset(last_line
+ (i
+ 1) * wrap
- w
, last_line
[0], w
); /* top left */
1238 memset(last_line
+ (i
+ 1) * wrap
+ width
, last_line
[width
-1], w
); /* top right */
1242 int ff_find_unused_picture(MpegEncContext
*s
, int shared
){
1246 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1247 if(s
->picture
[i
].data
[0]==NULL
&& s
->picture
[i
].type
==0) return i
;
1250 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1251 if(s
->picture
[i
].data
[0]==NULL
&& s
->picture
[i
].type
!=0) return i
; //FIXME
1253 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1254 if(s
->picture
[i
].data
[0]==NULL
) return i
;
1262 static void update_noise_reduction(MpegEncContext
*s
){
1265 for(intra
=0; intra
<2; intra
++){
1266 if(s
->dct_count
[intra
] > (1<<16)){
1267 for(i
=0; i
<64; i
++){
1268 s
->dct_error_sum
[intra
][i
] >>=1;
1270 s
->dct_count
[intra
] >>= 1;
1273 for(i
=0; i
<64; i
++){
1274 s
->dct_offset
[intra
][i
]= (s
->avctx
->noise_reduction
* s
->dct_count
[intra
] + s
->dct_error_sum
[intra
][i
]/2) / (s
->dct_error_sum
[intra
][i
]+1);
1280 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1282 int MPV_frame_start(MpegEncContext
*s
, AVCodecContext
*avctx
)
1288 assert(s
->last_picture_ptr
==NULL
|| s
->out_format
!= FMT_H264
|| s
->codec_id
== CODEC_ID_SVQ3
);
1290 /* mark&release old frames */
1291 if (s
->pict_type
!= B_TYPE
&& s
->last_picture_ptr
&& s
->last_picture_ptr
->data
[0]) {
1292 avctx
->release_buffer(avctx
, (AVFrame
*)s
->last_picture_ptr
);
1294 /* release forgotten pictures */
1295 /* if(mpeg124/h263) */
1297 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1298 if(s
->picture
[i
].data
[0] && &s
->picture
[i
] != s
->next_picture_ptr
&& s
->picture
[i
].reference
){
1299 av_log(avctx
, AV_LOG_ERROR
, "releasing zombie picture\n");
1300 avctx
->release_buffer(avctx
, (AVFrame
*)&s
->picture
[i
]);
1307 /* release non refernce frames */
1308 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1309 if(s
->picture
[i
].data
[0] && !s
->picture
[i
].reference
/*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1310 s
->avctx
->release_buffer(s
->avctx
, (AVFrame
*)&s
->picture
[i
]);
1314 if(s
->current_picture_ptr
&& s
->current_picture_ptr
->data
[0]==NULL
)
1315 pic
= (AVFrame
*)s
->current_picture_ptr
; //we allready have a unused image (maybe it was set before reading the header)
1317 i
= ff_find_unused_picture(s
, 0);
1318 pic
= (AVFrame
*)&s
->picture
[i
];
1321 pic
->reference
= s
->pict_type
!= B_TYPE ?
3 : 0;
1323 pic
->coded_picture_number
= s
->coded_picture_number
++;
1325 if( alloc_picture(s
, (Picture
*)pic
, 0) < 0)
1328 s
->current_picture_ptr
= (Picture
*)pic
;
1329 s
->current_picture_ptr
->top_field_first
= s
->top_field_first
; //FIXME use only the vars from current_pic
1330 s
->current_picture_ptr
->interlaced_frame
= !s
->progressive_frame
&& !s
->progressive_sequence
;
1333 s
->current_picture_ptr
->pict_type
= s
->pict_type
;
1334 // if(s->flags && CODEC_FLAG_QSCALE)
1335 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1336 s
->current_picture_ptr
->key_frame
= s
->pict_type
== I_TYPE
;
1338 copy_picture(&s
->current_picture
, s
->current_picture_ptr
);
1340 if(s
->out_format
!= FMT_H264
|| s
->codec_id
== CODEC_ID_SVQ3
){
1341 if (s
->pict_type
!= B_TYPE
) {
1342 s
->last_picture_ptr
= s
->next_picture_ptr
;
1343 s
->next_picture_ptr
= s
->current_picture_ptr
;
1346 if(s
->last_picture_ptr
) copy_picture(&s
->last_picture
, s
->last_picture_ptr
);
1347 if(s
->next_picture_ptr
) copy_picture(&s
->next_picture
, s
->next_picture_ptr
);
1349 if(s
->pict_type
!= I_TYPE
&& (s
->last_picture_ptr
==NULL
|| s
->last_picture_ptr
->data
[0]==NULL
)){
1350 av_log(avctx
, AV_LOG_ERROR
, "warning: first frame is no keyframe\n");
1351 assert(s
->pict_type
!= B_TYPE
); //these should have been dropped if we dont have a reference
1355 assert(s
->pict_type
== I_TYPE
|| (s
->last_picture_ptr
&& s
->last_picture_ptr
->data
[0]));
1357 if(s
->picture_structure
!=PICT_FRAME
){
1360 if(s
->picture_structure
== PICT_BOTTOM_FIELD
){
1361 s
->current_picture
.data
[i
] += s
->current_picture
.linesize
[i
];
1363 s
->current_picture
.linesize
[i
] *= 2;
1364 s
->last_picture
.linesize
[i
] *=2;
1365 s
->next_picture
.linesize
[i
] *=2;
1370 s
->hurry_up
= s
->avctx
->hurry_up
;
1371 s
->error_resilience
= avctx
->error_resilience
;
1373 /* set dequantizer, we cant do it during init as it might change for mpeg4
1374 and we cant do it in the header decode as init isnt called for mpeg4 there yet */
1375 if(s
->mpeg_quant
|| s
->codec_id
== CODEC_ID_MPEG2VIDEO
){
1376 s
->dct_unquantize_intra
= s
->dct_unquantize_mpeg2_intra
;
1377 s
->dct_unquantize_inter
= s
->dct_unquantize_mpeg2_inter
;
1378 }else if(s
->out_format
== FMT_H263
){
1379 s
->dct_unquantize_intra
= s
->dct_unquantize_h263_intra
;
1380 s
->dct_unquantize_inter
= s
->dct_unquantize_h263_inter
;
1382 s
->dct_unquantize_intra
= s
->dct_unquantize_mpeg1_intra
;
1383 s
->dct_unquantize_inter
= s
->dct_unquantize_mpeg1_inter
;
1386 if(s
->dct_error_sum
){
1387 assert(s
->avctx
->noise_reduction
&& s
->encoding
);
1389 update_noise_reduction(s
);
1393 if(s
->avctx
->xvmc_acceleration
)
1394 return XVMC_field_start(s
, avctx
);
1399 /* generic function for encode/decode called after a frame has been coded/decoded */
1400 void MPV_frame_end(MpegEncContext
*s
)
1403 /* draw edge for correct motion prediction if outside */
1405 //just to make sure that all data is rendered.
1406 if(s
->avctx
->xvmc_acceleration
){
1410 if(s
->unrestricted_mv
&& s
->pict_type
!= B_TYPE
&& !s
->intra_only
&& !(s
->flags
&CODEC_FLAG_EMU_EDGE
)) {
1411 draw_edges(s
->current_picture
.data
[0], s
->linesize
, s
->h_edge_pos
, s
->v_edge_pos
, EDGE_WIDTH
);
1412 draw_edges(s
->current_picture
.data
[1], s
->uvlinesize
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1, EDGE_WIDTH
/2);
1413 draw_edges(s
->current_picture
.data
[2], s
->uvlinesize
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1, EDGE_WIDTH
/2);
1417 s
->last_pict_type
= s
->pict_type
;
1418 if(s
->pict_type
!=B_TYPE
){
1419 s
->last_non_b_pict_type
= s
->pict_type
;
1422 /* copy back current_picture variables */
1423 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1424 if(s
->picture
[i
].data
[0] == s
->current_picture
.data
[0]){
1425 s
->picture
[i
]= s
->current_picture
;
1429 assert(i
<MAX_PICTURE_COUNT
);
1433 /* release non refernce frames */
1434 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1435 if(s
->picture
[i
].data
[0] && !s
->picture
[i
].reference
/*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1436 s
->avctx
->release_buffer(s
->avctx
, (AVFrame
*)&s
->picture
[i
]);
1440 // clear copies, to avoid confusion
1442 memset(&s
->last_picture
, 0, sizeof(Picture
));
1443 memset(&s
->next_picture
, 0, sizeof(Picture
));
1444 memset(&s
->current_picture
, 0, sizeof(Picture
));
1449 * draws an line from (ex, ey) -> (sx, sy).
1450 * @param w width of the image
1451 * @param h height of the image
1452 * @param stride stride/linesize of the image
1453 * @param color color of the arrow
1455 static void draw_line(uint8_t *buf
, int sx
, int sy
, int ex
, int ey
, int w
, int h
, int stride
, int color
){
1458 sx
= clip(sx
, 0, w
-1);
1459 sy
= clip(sy
, 0, h
-1);
1460 ex
= clip(ex
, 0, w
-1);
1461 ey
= clip(ey
, 0, h
-1);
1463 buf
[sy
*stride
+ sx
]+= color
;
1465 if(ABS(ex
- sx
) > ABS(ey
- sy
)){
1470 buf
+= sx
+ sy
*stride
;
1472 f
= ((ey
-sy
)<<16)/ex
;
1473 for(x
= 0; x
<= ex
; x
++){
1474 y
= ((x
*f
) + (1<<15))>>16;
1475 buf
[y
*stride
+ x
]+= color
;
1482 buf
+= sx
+ sy
*stride
;
1484 if(ey
) f
= ((ex
-sx
)<<16)/ey
;
1486 for(y
= 0; y
<= ey
; y
++){
1487 x
= ((y
*f
) + (1<<15))>>16;
1488 buf
[y
*stride
+ x
]+= color
;
1494 * draws an arrow from (ex, ey) -> (sx, sy).
1495 * @param w width of the image
1496 * @param h height of the image
1497 * @param stride stride/linesize of the image
1498 * @param color color of the arrow
1500 static void draw_arrow(uint8_t *buf
, int sx
, int sy
, int ex
, int ey
, int w
, int h
, int stride
, int color
){
1503 sx
= clip(sx
, -100, w
+100);
1504 sy
= clip(sy
, -100, h
+100);
1505 ex
= clip(ex
, -100, w
+100);
1506 ey
= clip(ey
, -100, h
+100);
1511 if(dx
*dx
+ dy
*dy
> 3*3){
1514 int length
= ff_sqrt((rx
*rx
+ ry
*ry
)<<8);
1516 //FIXME subpixel accuracy
1517 rx
= ROUNDED_DIV(rx
*3<<4, length
);
1518 ry
= ROUNDED_DIV(ry
*3<<4, length
);
1520 draw_line(buf
, sx
, sy
, sx
+ rx
, sy
+ ry
, w
, h
, stride
, color
);
1521 draw_line(buf
, sx
, sy
, sx
- ry
, sy
+ rx
, w
, h
, stride
, color
);
1523 draw_line(buf
, sx
, sy
, ex
, ey
, w
, h
, stride
, color
);
1527 * prints debuging info for the given picture.
1529 void ff_print_debug_info(MpegEncContext
*s
, AVFrame
*pict
){
1531 if(!pict
|| !pict
->mb_type
) return;
1533 if(s
->avctx
->debug
&(FF_DEBUG_SKIP
| FF_DEBUG_QP
| FF_DEBUG_MB_TYPE
)){
1536 av_log(s
->avctx
,AV_LOG_DEBUG
,"New frame, type: ");
1537 switch (pict
->pict_type
) {
1538 case FF_I_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"I\n"); break;
1539 case FF_P_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"P\n"); break;
1540 case FF_B_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"B\n"); break;
1541 case FF_S_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"S\n"); break;
1542 case FF_SI_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"SI\n"); break;
1543 case FF_SP_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"SP\n"); break;
1545 for(y
=0; y
<s
->mb_height
; y
++){
1546 for(x
=0; x
<s
->mb_width
; x
++){
1547 if(s
->avctx
->debug
&FF_DEBUG_SKIP
){
1548 int count
= s
->mbskip_table
[x
+ y
*s
->mb_stride
];
1549 if(count
>9) count
=9;
1550 av_log(s
->avctx
, AV_LOG_DEBUG
, "%1d", count
);
1552 if(s
->avctx
->debug
&FF_DEBUG_QP
){
1553 av_log(s
->avctx
, AV_LOG_DEBUG
, "%2d", pict
->qscale_table
[x
+ y
*s
->mb_stride
]);
1555 if(s
->avctx
->debug
&FF_DEBUG_MB_TYPE
){
1556 int mb_type
= pict
->mb_type
[x
+ y
*s
->mb_stride
];
1557 //Type & MV direction
1559 av_log(s
->avctx
, AV_LOG_DEBUG
, "P");
1560 else if(IS_INTRA(mb_type
) && IS_ACPRED(mb_type
))
1561 av_log(s
->avctx
, AV_LOG_DEBUG
, "A");
1562 else if(IS_INTRA4x4(mb_type
))
1563 av_log(s
->avctx
, AV_LOG_DEBUG
, "i");
1564 else if(IS_INTRA16x16(mb_type
))
1565 av_log(s
->avctx
, AV_LOG_DEBUG
, "I");
1566 else if(IS_DIRECT(mb_type
) && IS_SKIP(mb_type
))
1567 av_log(s
->avctx
, AV_LOG_DEBUG
, "d");
1568 else if(IS_DIRECT(mb_type
))
1569 av_log(s
->avctx
, AV_LOG_DEBUG
, "D");
1570 else if(IS_GMC(mb_type
) && IS_SKIP(mb_type
))
1571 av_log(s
->avctx
, AV_LOG_DEBUG
, "g");
1572 else if(IS_GMC(mb_type
))
1573 av_log(s
->avctx
, AV_LOG_DEBUG
, "G");
1574 else if(IS_SKIP(mb_type
))
1575 av_log(s
->avctx
, AV_LOG_DEBUG
, "S");
1576 else if(!USES_LIST(mb_type
, 1))
1577 av_log(s
->avctx
, AV_LOG_DEBUG
, ">");
1578 else if(!USES_LIST(mb_type
, 0))
1579 av_log(s
->avctx
, AV_LOG_DEBUG
, "<");
1581 assert(USES_LIST(mb_type
, 0) && USES_LIST(mb_type
, 1));
1582 av_log(s
->avctx
, AV_LOG_DEBUG
, "X");
1587 av_log(s
->avctx
, AV_LOG_DEBUG
, "+");
1588 else if(IS_16X8(mb_type
))
1589 av_log(s
->avctx
, AV_LOG_DEBUG
, "-");
1590 else if(IS_8X16(mb_type
))
1591 av_log(s
->avctx
, AV_LOG_DEBUG
, "¦");
1592 else if(IS_INTRA(mb_type
) || IS_16X16(mb_type
))
1593 av_log(s
->avctx
, AV_LOG_DEBUG
, " ");
1595 av_log(s
->avctx
, AV_LOG_DEBUG
, "?");
1598 if(IS_INTERLACED(mb_type
) && s
->codec_id
== CODEC_ID_H264
)
1599 av_log(s
->avctx
, AV_LOG_DEBUG
, "=");
1601 av_log(s
->avctx
, AV_LOG_DEBUG
, " ");
1603 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1605 av_log(s
->avctx
, AV_LOG_DEBUG
, "\n");
1609 if((s
->avctx
->debug
&(FF_DEBUG_VIS_QP
|FF_DEBUG_VIS_MB_TYPE
)) || (s
->avctx
->debug_mv
)){
1610 const int shift
= 1 + s
->quarter_sample
;
1614 int h_chroma_shift
, v_chroma_shift
;
1615 s
->low_delay
=0; //needed to see the vectors without trashing the buffers
1617 avcodec_get_chroma_sub_sample(s
->avctx
->pix_fmt
, &h_chroma_shift
, &v_chroma_shift
);
1619 memcpy(s
->visualization_buffer
[i
], pict
->data
[i
], (i
==0) ? pict
->linesize
[i
]*s
->height
:pict
->linesize
[i
]*s
->height
>> v_chroma_shift
);
1620 pict
->data
[i
]= s
->visualization_buffer
[i
];
1622 pict
->type
= FF_BUFFER_TYPE_COPY
;
1625 for(mb_y
=0; mb_y
<s
->mb_height
; mb_y
++){
1627 for(mb_x
=0; mb_x
<s
->mb_width
; mb_x
++){
1628 const int mb_index
= mb_x
+ mb_y
*s
->mb_stride
;
1629 if((s
->avctx
->debug_mv
) && pict
->motion_val
){
1631 for(type
=0; type
<3; type
++){
1634 case 0: if ((!(s
->avctx
->debug_mv
&FF_DEBUG_VIS_MV_P_FOR
)) || (pict
->pict_type
!=FF_P_TYPE
))
1638 case 1: if ((!(s
->avctx
->debug_mv
&FF_DEBUG_VIS_MV_B_FOR
)) || (pict
->pict_type
!=FF_B_TYPE
))
1642 case 2: if ((!(s
->avctx
->debug_mv
&FF_DEBUG_VIS_MV_B_BACK
)) || (pict
->pict_type
!=FF_B_TYPE
))
1647 if(!USES_LIST(pict
->mb_type
[mb_index
], direction
))
1650 if(IS_8X8(pict
->mb_type
[mb_index
])){
1653 int sx
= mb_x
*16 + 4 + 8*(i
&1);
1654 int sy
= mb_y
*16 + 4 + 8*(i
>>1);
1655 int xy
= 1 + mb_x
*2 + (i
&1) + (mb_y
*2 + 1 + (i
>>1))*(s
->mb_width
*2 + 2);
1656 int mx
= (pict
->motion_val
[direction
][xy
][0]>>shift
) + sx
;
1657 int my
= (pict
->motion_val
[direction
][xy
][1]>>shift
) + sy
;
1658 draw_arrow(ptr
, sx
, sy
, mx
, my
, s
->width
, s
->height
, s
->linesize
, 100);
1660 }else if(IS_16X8(pict
->mb_type
[mb_index
])){
1664 int sy
=mb_y
*16 + 4 + 8*i
;
1665 int xy
=1 + mb_x
*2 + (mb_y
*2 + 1 + i
)*(s
->mb_width
*2 + 2);
1666 int mx
=(pict
->motion_val
[direction
][xy
][0]>>shift
) + sx
;
1667 int my
=(pict
->motion_val
[direction
][xy
][1]>>shift
) + sy
;
1668 draw_arrow(ptr
, sx
, sy
, mx
, my
, s
->width
, s
->height
, s
->linesize
, 100);
1671 int sx
= mb_x
*16 + 8;
1672 int sy
= mb_y
*16 + 8;
1673 int xy
= 1 + mb_x
*2 + (mb_y
*2 + 1)*(s
->mb_width
*2 + 2);
1674 int mx
= (pict
->motion_val
[direction
][xy
][0]>>shift
) + sx
;
1675 int my
= (pict
->motion_val
[direction
][xy
][1]>>shift
) + sy
;
1676 draw_arrow(ptr
, sx
, sy
, mx
, my
, s
->width
, s
->height
, s
->linesize
, 100);
1680 if((s
->avctx
->debug
&FF_DEBUG_VIS_QP
) && pict
->motion_val
){
1681 uint64_t c
= (pict
->qscale_table
[mb_index
]*128/31) * 0x0101010101010101ULL
;
1684 *(uint64_t*)(pict
->data
[1] + 8*mb_x
+ (8*mb_y
+ y
)*pict
->linesize
[1])= c
;
1685 *(uint64_t*)(pict
->data
[2] + 8*mb_x
+ (8*mb_y
+ y
)*pict
->linesize
[2])= c
;
1688 if((s
->avctx
->debug
&FF_DEBUG_VIS_MB_TYPE
) && pict
->motion_val
){
1689 int mb_type
= pict
->mb_type
[mb_index
];
1692 #define COLOR(theta, r)\
1693 u= (int)(128 + r*cos(theta*3.141592/180));\
1694 v= (int)(128 + r*sin(theta*3.141592/180));
1698 if(IS_PCM(mb_type
)){
1700 }else if((IS_INTRA(mb_type
) && IS_ACPRED(mb_type
)) || IS_INTRA16x16(mb_type
)){
1702 }else if(IS_INTRA4x4(mb_type
)){
1704 }else if(IS_DIRECT(mb_type
) && IS_SKIP(mb_type
)){
1706 }else if(IS_DIRECT(mb_type
)){
1708 }else if(IS_GMC(mb_type
) && IS_SKIP(mb_type
)){
1710 }else if(IS_GMC(mb_type
)){
1712 }else if(IS_SKIP(mb_type
)){
1714 }else if(!USES_LIST(mb_type
, 1)){
1716 }else if(!USES_LIST(mb_type
, 0)){
1719 assert(USES_LIST(mb_type
, 0) && USES_LIST(mb_type
, 1));
1723 u
*= 0x0101010101010101ULL
;
1724 v
*= 0x0101010101010101ULL
;
1726 *(uint64_t*)(pict
->data
[1] + 8*mb_x
+ (8*mb_y
+ y
)*pict
->linesize
[1])= u
;
1727 *(uint64_t*)(pict
->data
[2] + 8*mb_x
+ (8*mb_y
+ y
)*pict
->linesize
[2])= v
;
1731 if(IS_8X8(mb_type
) || IS_16X8(mb_type
)){
1732 *(uint64_t*)(pict
->data
[0] + 16*mb_x
+ 0 + (16*mb_y
+ 8)*pict
->linesize
[0])^= 0x8080808080808080ULL
;
1733 *(uint64_t*)(pict
->data
[0] + 16*mb_x
+ 8 + (16*mb_y
+ 8)*pict
->linesize
[0])^= 0x8080808080808080ULL
;
1735 if(IS_8X8(mb_type
) || IS_8X16(mb_type
)){
1737 pict
->data
[0][16*mb_x
+ 8 + (16*mb_y
+ y
)*pict
->linesize
[0]]^= 0x80;
1740 if(IS_INTERLACED(mb_type
) && s
->codec_id
== CODEC_ID_H264
){
1744 s
->mbskip_table
[mb_index
]=0;
1750 #ifdef CONFIG_ENCODERS
1752 static int get_sae(uint8_t *src
, int ref
, int stride
){
1756 for(y
=0; y
<16; y
++){
1757 for(x
=0; x
<16; x
++){
1758 acc
+= ABS(src
[x
+y
*stride
] - ref
);
1765 static int get_intra_count(MpegEncContext
*s
, uint8_t *src
, uint8_t *ref
, int stride
){
1772 for(y
=0; y
<h
; y
+=16){
1773 for(x
=0; x
<w
; x
+=16){
1774 int offset
= x
+ y
*stride
;
1775 int sad
= s
->dsp
.sad
[0](NULL
, src
+ offset
, ref
+ offset
, stride
, 16);
1776 int mean
= (s
->dsp
.pix_sum(src
+ offset
, stride
) + 128)>>8;
1777 int sae
= get_sae(src
+ offset
, mean
, stride
);
1779 acc
+= sae
+ 500 < sad
;
1786 static int load_input_picture(MpegEncContext
*s
, AVFrame
*pic_arg
){
1789 const int encoding_delay
= s
->max_b_frames
;
1793 if(encoding_delay
&& !(s
->flags
&CODEC_FLAG_INPUT_PRESERVED
)) direct
=0;
1794 if(pic_arg
->linesize
[0] != s
->linesize
) direct
=0;
1795 if(pic_arg
->linesize
[1] != s
->uvlinesize
) direct
=0;
1796 if(pic_arg
->linesize
[2] != s
->uvlinesize
) direct
=0;
1798 // av_log(AV_LOG_DEBUG, "%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize);
1801 i
= ff_find_unused_picture(s
, 1);
1803 pic
= (AVFrame
*)&s
->picture
[i
];
1807 pic
->data
[i
]= pic_arg
->data
[i
];
1808 pic
->linesize
[i
]= pic_arg
->linesize
[i
];
1810 alloc_picture(s
, (Picture
*)pic
, 1);
1813 i
= ff_find_unused_picture(s
, 0);
1815 pic
= (AVFrame
*)&s
->picture
[i
];
1818 alloc_picture(s
, (Picture
*)pic
, 0);
1820 if( pic
->data
[0] + offset
== pic_arg
->data
[0]
1821 && pic
->data
[1] + offset
== pic_arg
->data
[1]
1822 && pic
->data
[2] + offset
== pic_arg
->data
[2]){
1825 int h_chroma_shift
, v_chroma_shift
;
1826 avcodec_get_chroma_sub_sample(s
->avctx
->pix_fmt
, &h_chroma_shift
, &v_chroma_shift
);
1829 int src_stride
= pic_arg
->linesize
[i
];
1830 int dst_stride
= i ? s
->uvlinesize
: s
->linesize
;
1831 int h_shift
= i ? h_chroma_shift
: 0;
1832 int v_shift
= i ? v_chroma_shift
: 0;
1833 int w
= s
->width
>>h_shift
;
1834 int h
= s
->height
>>v_shift
;
1835 uint8_t *src
= pic_arg
->data
[i
];
1836 uint8_t *dst
= pic
->data
[i
] + offset
;
1838 if(src_stride
==dst_stride
)
1839 memcpy(dst
, src
, src_stride
*h
);
1842 memcpy(dst
, src
, w
);
1850 copy_picture_attributes(pic
, pic_arg
);
1852 pic
->display_picture_number
= s
->input_picture_number
++;
1853 if(pic
->pts
!= AV_NOPTS_VALUE
){
1854 s
->user_specified_pts
= pic
->pts
;
1856 if(s
->user_specified_pts
){
1857 pic
->pts
= s
->user_specified_pts
+ AV_TIME_BASE
*(int64_t)s
->avctx
->frame_rate_base
/ s
->avctx
->frame_rate
;
1858 av_log(s
->avctx
, AV_LOG_INFO
, "Warning: AVFrame.pts=? trying to guess (%Ld)\n", pic
->pts
);
1860 pic
->pts
= av_rescale(pic
->display_picture_number
*(int64_t)s
->avctx
->frame_rate_base
, AV_TIME_BASE
, s
->avctx
->frame_rate
);
1865 /* shift buffer entries */
1866 for(i
=1; i
<MAX_PICTURE_COUNT
/*s->encoding_delay+1*/; i
++)
1867 s
->input_picture
[i
-1]= s
->input_picture
[i
];
1869 s
->input_picture
[encoding_delay
]= (Picture
*)pic
;
1874 static void select_input_picture(MpegEncContext
*s
){
1877 for(i
=1; i
<MAX_PICTURE_COUNT
; i
++)
1878 s
->reordered_input_picture
[i
-1]= s
->reordered_input_picture
[i
];
1879 s
->reordered_input_picture
[MAX_PICTURE_COUNT
-1]= NULL
;
1881 /* set next picture types & ordering */
1882 if(s
->reordered_input_picture
[0]==NULL
&& s
->input_picture
[0]){
1883 if(/*s->picture_in_gop_number >= s->gop_size ||*/ s
->next_picture_ptr
==NULL
|| s
->intra_only
){
1884 s
->reordered_input_picture
[0]= s
->input_picture
[0];
1885 s
->reordered_input_picture
[0]->pict_type
= I_TYPE
;
1886 s
->reordered_input_picture
[0]->coded_picture_number
= s
->coded_picture_number
++;
1890 if(s
->flags
&CODEC_FLAG_PASS2
){
1891 for(i
=0; i
<s
->max_b_frames
+1; i
++){
1892 int pict_num
= s
->input_picture
[0]->display_picture_number
+ i
;
1893 int pict_type
= s
->rc_context
.entry
[pict_num
].new_pict_type
;
1894 s
->input_picture
[i
]->pict_type
= pict_type
;
1896 if(i
+ 1 >= s
->rc_context
.num_entries
) break;
1900 if(s
->input_picture
[0]->pict_type
){
1901 /* user selected pict_type */
1902 for(b_frames
=0; b_frames
<s
->max_b_frames
+1; b_frames
++){
1903 if(s
->input_picture
[b_frames
]->pict_type
!=B_TYPE
) break;
1906 if(b_frames
> s
->max_b_frames
){
1907 av_log(s
->avctx
, AV_LOG_ERROR
, "warning, too many bframes in a row\n");
1908 b_frames
= s
->max_b_frames
;
1910 }else if(s
->avctx
->b_frame_strategy
==0){
1911 b_frames
= s
->max_b_frames
;
1912 while(b_frames
&& !s
->input_picture
[b_frames
]) b_frames
--;
1913 }else if(s
->avctx
->b_frame_strategy
==1){
1914 for(i
=1; i
<s
->max_b_frames
+1; i
++){
1915 if(s
->input_picture
[i
] && s
->input_picture
[i
]->b_frame_score
==0){
1916 s
->input_picture
[i
]->b_frame_score
=
1917 get_intra_count(s
, s
->input_picture
[i
]->data
[0],
1918 s
->input_picture
[i
-1]->data
[0], s
->linesize
) + 1;
1921 for(i
=0; i
<s
->max_b_frames
; i
++){
1922 if(s
->input_picture
[i
]==NULL
|| s
->input_picture
[i
]->b_frame_score
- 1 > s
->mb_num
/40) break;
1925 b_frames
= FFMAX(0, i
-1);
1928 for(i
=0; i
<b_frames
+1; i
++){
1929 s
->input_picture
[i
]->b_frame_score
=0;
1932 av_log(s
->avctx
, AV_LOG_ERROR
, "illegal b frame strategy\n");
1937 //static int b_count=0;
1938 //b_count+= b_frames;
1939 //av_log(s->avctx, AV_LOG_DEBUG, "b_frames: %d\n", b_count);
1940 if(s
->picture_in_gop_number
+ b_frames
>= s
->gop_size
){
1941 if(s
->flags
& CODEC_FLAG_CLOSED_GOP
)
1943 s
->input_picture
[b_frames
]->pict_type
= I_TYPE
;
1946 if( (s
->flags
& CODEC_FLAG_CLOSED_GOP
)
1948 && s
->input_picture
[b_frames
]->pict_type
== I_TYPE
)
1951 s
->reordered_input_picture
[0]= s
->input_picture
[b_frames
];
1952 if(s
->reordered_input_picture
[0]->pict_type
!= I_TYPE
)
1953 s
->reordered_input_picture
[0]->pict_type
= P_TYPE
;
1954 s
->reordered_input_picture
[0]->coded_picture_number
= s
->coded_picture_number
++;
1955 for(i
=0; i
<b_frames
; i
++){
1956 s
->reordered_input_picture
[i
+1]= s
->input_picture
[i
];
1957 s
->reordered_input_picture
[i
+1]->pict_type
= B_TYPE
;
1958 s
->reordered_input_picture
[i
+1]->coded_picture_number
= s
->coded_picture_number
++;
1963 if(s
->reordered_input_picture
[0]){
1964 s
->reordered_input_picture
[0]->reference
= s
->reordered_input_picture
[0]->pict_type
!=B_TYPE ?
3 : 0;
1966 copy_picture(&s
->new_picture
, s
->reordered_input_picture
[0]);
1968 if(s
->reordered_input_picture
[0]->type
== FF_BUFFER_TYPE_SHARED
){
1969 // input is a shared pix, so we cant modifiy it -> alloc a new one & ensure that the shared one is reuseable
1971 int i
= ff_find_unused_picture(s
, 0);
1972 Picture
*pic
= &s
->picture
[i
];
1974 /* mark us unused / free shared pic */
1976 s
->reordered_input_picture
[0]->data
[i
]= NULL
;
1977 s
->reordered_input_picture
[0]->type
= 0;
1979 copy_picture_attributes((AVFrame
*)pic
, (AVFrame
*)s
->reordered_input_picture
[0]);
1980 pic
->reference
= s
->reordered_input_picture
[0]->reference
;
1982 alloc_picture(s
, pic
, 0);
1984 s
->current_picture_ptr
= pic
;
1986 // input is not a shared pix -> reuse buffer for current_pix
1988 assert( s
->reordered_input_picture
[0]->type
==FF_BUFFER_TYPE_USER
1989 || s
->reordered_input_picture
[0]->type
==FF_BUFFER_TYPE_INTERNAL
);
1991 s
->current_picture_ptr
= s
->reordered_input_picture
[0];
1993 s
->new_picture
.data
[i
]+=16;
1996 copy_picture(&s
->current_picture
, s
->current_picture_ptr
);
1998 s
->picture_number
= s
->new_picture
.display_picture_number
;
1999 //printf("dpn:%d\n", s->picture_number);
2001 memset(&s
->new_picture
, 0, sizeof(Picture
));
2005 int MPV_encode_picture(AVCodecContext
*avctx
,
2006 unsigned char *buf
, int buf_size
, void *data
)
2008 MpegEncContext
*s
= avctx
->priv_data
;
2009 AVFrame
*pic_arg
= data
;
2010 int i
, stuffing_count
;
2012 if(avctx
->pix_fmt
!= PIX_FMT_YUV420P
){
2013 av_log(avctx
, AV_LOG_ERROR
, "this codec supports only YUV420P\n");
2017 for(i
=0; i
<avctx
->thread_count
; i
++){
2018 int y
= s
->thread_context
[i
]->start_mb_y
;
2019 int h
= s
->mb_height
;
2020 uint8_t *start
= buf
+ buf_size
* y
/h
;
2021 uint8_t *end
= buf
+ buf_size
*(y
+1)/h
;
2023 init_put_bits(&s
->thread_context
[i
]->pb
, start
, end
- start
);
2026 s
->picture_in_gop_number
++;
2028 load_input_picture(s
, pic_arg
);
2030 select_input_picture(s
);
2033 if(s
->new_picture
.data
[0]){
2034 s
->pict_type
= s
->new_picture
.pict_type
;
2036 //printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
2037 MPV_frame_start(s
, avctx
);
2039 encode_picture(s
, s
->picture_number
);
2041 avctx
->real_pict_num
= s
->picture_number
;
2042 avctx
->header_bits
= s
->header_bits
;
2043 avctx
->mv_bits
= s
->mv_bits
;
2044 avctx
->misc_bits
= s
->misc_bits
;
2045 avctx
->i_tex_bits
= s
->i_tex_bits
;
2046 avctx
->p_tex_bits
= s
->p_tex_bits
;
2047 avctx
->i_count
= s
->i_count
;
2048 avctx
->p_count
= s
->mb_num
- s
->i_count
- s
->skip_count
; //FIXME f/b_count in avctx
2049 avctx
->skip_count
= s
->skip_count
;
2053 if (s
->out_format
== FMT_MJPEG
)
2054 mjpeg_picture_trailer(s
);
2056 if(s
->flags
&CODEC_FLAG_PASS1
)
2057 ff_write_pass1_stats(s
);
2060 avctx
->error
[i
] += s
->current_picture_ptr
->error
[i
];
2063 flush_put_bits(&s
->pb
);
2064 s
->frame_bits
= put_bits_count(&s
->pb
);
2066 stuffing_count
= ff_vbv_update(s
, s
->frame_bits
);
2068 switch(s
->codec_id
){
2069 case CODEC_ID_MPEG1VIDEO
:
2070 case CODEC_ID_MPEG2VIDEO
:
2071 while(stuffing_count
--){
2072 put_bits(&s
->pb
, 8, 0);
2075 case CODEC_ID_MPEG4
:
2076 put_bits(&s
->pb
, 16, 0);
2077 put_bits(&s
->pb
, 16, 0x1C3);
2078 stuffing_count
-= 4;
2079 while(stuffing_count
--){
2080 put_bits(&s
->pb
, 8, 0xFF);
2084 av_log(s
->avctx
, AV_LOG_ERROR
, "vbv buffer overflow\n");
2086 flush_put_bits(&s
->pb
);
2087 s
->frame_bits
= put_bits_count(&s
->pb
);
2090 /* update mpeg1/2 vbv_delay for CBR */
2091 if(s
->avctx
->rc_max_rate
&& s
->avctx
->rc_min_rate
== s
->avctx
->rc_max_rate
){
2094 assert(s
->repeat_first_field
==0);
2096 vbv_delay
= lrintf(90000 * s
->rc_context
.buffer_index
/ s
->avctx
->rc_max_rate
);
2097 assert(vbv_delay
< 0xFFFF);
2099 s
->vbv_delay_ptr
[0] &= 0xF8;
2100 s
->vbv_delay_ptr
[0] |= vbv_delay
>>13;
2101 s
->vbv_delay_ptr
[1] = vbv_delay
>>5;
2102 s
->vbv_delay_ptr
[2] &= 0x07;
2103 s
->vbv_delay_ptr
[2] |= vbv_delay
<<3;
2105 s
->total_bits
+= s
->frame_bits
;
2106 avctx
->frame_bits
= s
->frame_bits
;
2108 assert((pbBufPtr(&s
->pb
) == s
->pb
.buf
));
2111 assert((s
->frame_bits
&7)==0);
2113 return s
->frame_bits
/8;
2116 #endif //CONFIG_ENCODERS
2118 static inline void gmc1_motion(MpegEncContext
*s
,
2119 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2121 uint8_t **ref_picture
, int src_offset
)
2124 int offset
, src_x
, src_y
, linesize
, uvlinesize
;
2125 int motion_x
, motion_y
;
2128 motion_x
= s
->sprite_offset
[0][0];
2129 motion_y
= s
->sprite_offset
[0][1];
2130 src_x
= s
->mb_x
* 16 + (motion_x
>> (s
->sprite_warping_accuracy
+1));
2131 src_y
= s
->mb_y
* 16 + (motion_y
>> (s
->sprite_warping_accuracy
+1));
2132 motion_x
<<=(3-s
->sprite_warping_accuracy
);
2133 motion_y
<<=(3-s
->sprite_warping_accuracy
);
2134 src_x
= clip(src_x
, -16, s
->width
);
2135 if (src_x
== s
->width
)
2137 src_y
= clip(src_y
, -16, s
->height
);
2138 if (src_y
== s
->height
)
2141 linesize
= s
->linesize
;
2142 uvlinesize
= s
->uvlinesize
;
2144 ptr
= ref_picture
[0] + (src_y
* linesize
) + src_x
+ src_offset
;
2146 dest_y
+=dest_offset
;
2147 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
2148 if( (unsigned)src_x
>= s
->h_edge_pos
- 17
2149 || (unsigned)src_y
>= s
->v_edge_pos
- 17){
2150 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, linesize
, 17, 17, src_x
, src_y
, s
->h_edge_pos
, s
->v_edge_pos
);
2151 ptr
= s
->edge_emu_buffer
;
2155 if((motion_x
|motion_y
)&7){
2156 s
->dsp
.gmc1(dest_y
, ptr
, linesize
, 16, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
2157 s
->dsp
.gmc1(dest_y
+8, ptr
+8, linesize
, 16, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
2161 dxy
= ((motion_x
>>3)&1) | ((motion_y
>>2)&2);
2162 if (s
->no_rounding
){
2163 s
->dsp
.put_no_rnd_pixels_tab
[0][dxy
](dest_y
, ptr
, linesize
, 16);
2165 s
->dsp
.put_pixels_tab
[0][dxy
](dest_y
, ptr
, linesize
, 16);
2169 if(s
->flags
&CODEC_FLAG_GRAY
) return;
2171 motion_x
= s
->sprite_offset
[1][0];
2172 motion_y
= s
->sprite_offset
[1][1];
2173 src_x
= s
->mb_x
* 8 + (motion_x
>> (s
->sprite_warping_accuracy
+1));
2174 src_y
= s
->mb_y
* 8 + (motion_y
>> (s
->sprite_warping_accuracy
+1));
2175 motion_x
<<=(3-s
->sprite_warping_accuracy
);
2176 motion_y
<<=(3-s
->sprite_warping_accuracy
);
2177 src_x
= clip(src_x
, -8, s
->width
>>1);
2178 if (src_x
== s
->width
>>1)
2180 src_y
= clip(src_y
, -8, s
->height
>>1);
2181 if (src_y
== s
->height
>>1)
2184 offset
= (src_y
* uvlinesize
) + src_x
+ (src_offset
>>1);
2185 ptr
= ref_picture
[1] + offset
;
2186 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
2187 if( (unsigned)src_x
>= (s
->h_edge_pos
>>1) - 9
2188 || (unsigned)src_y
>= (s
->v_edge_pos
>>1) - 9){
2189 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, uvlinesize
, 9, 9, src_x
, src_y
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2190 ptr
= s
->edge_emu_buffer
;
2194 s
->dsp
.gmc1(dest_cb
+ (dest_offset
>>1), ptr
, uvlinesize
, 8, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
2196 ptr
= ref_picture
[2] + offset
;
2198 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, uvlinesize
, 9, 9, src_x
, src_y
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2199 ptr
= s
->edge_emu_buffer
;
2201 s
->dsp
.gmc1(dest_cr
+ (dest_offset
>>1), ptr
, uvlinesize
, 8, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
2206 static inline void gmc_motion(MpegEncContext
*s
,
2207 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2209 uint8_t **ref_picture
, int src_offset
)
2212 int linesize
, uvlinesize
;
2213 const int a
= s
->sprite_warping_accuracy
;
2216 linesize
= s
->linesize
;
2217 uvlinesize
= s
->uvlinesize
;
2219 ptr
= ref_picture
[0] + src_offset
;
2221 dest_y
+=dest_offset
;
2223 ox
= s
->sprite_offset
[0][0] + s
->sprite_delta
[0][0]*s
->mb_x
*16 + s
->sprite_delta
[0][1]*s
->mb_y
*16;
2224 oy
= s
->sprite_offset
[0][1] + s
->sprite_delta
[1][0]*s
->mb_x
*16 + s
->sprite_delta
[1][1]*s
->mb_y
*16;
2226 s
->dsp
.gmc(dest_y
, ptr
, linesize
, 16,
2229 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
2230 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
2231 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
2232 s
->h_edge_pos
, s
->v_edge_pos
);
2233 s
->dsp
.gmc(dest_y
+8, ptr
, linesize
, 16,
2234 ox
+ s
->sprite_delta
[0][0]*8,
2235 oy
+ s
->sprite_delta
[1][0]*8,
2236 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
2237 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
2238 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
2239 s
->h_edge_pos
, s
->v_edge_pos
);
2241 if(s
->flags
&CODEC_FLAG_GRAY
) return;
2244 dest_cb
+=dest_offset
>>1;
2245 dest_cr
+=dest_offset
>>1;
2247 ox
= s
->sprite_offset
[1][0] + s
->sprite_delta
[0][0]*s
->mb_x
*8 + s
->sprite_delta
[0][1]*s
->mb_y
*8;
2248 oy
= s
->sprite_offset
[1][1] + s
->sprite_delta
[1][0]*s
->mb_x
*8 + s
->sprite_delta
[1][1]*s
->mb_y
*8;
2250 ptr
= ref_picture
[1] + (src_offset
>>1);
2251 s
->dsp
.gmc(dest_cb
, ptr
, uvlinesize
, 8,
2254 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
2255 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
2256 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
2257 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2259 ptr
= ref_picture
[2] + (src_offset
>>1);
2260 s
->dsp
.gmc(dest_cr
, ptr
, uvlinesize
, 8,
2263 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
2264 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
2265 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
2266 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2270 * Copies a rectangular area of samples to a temporary buffer and replicates the boarder samples.
2271 * @param buf destination buffer
2272 * @param src source buffer
2273 * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
2274 * @param block_w width of block
2275 * @param block_h height of block
2276 * @param src_x x coordinate of the top left sample of the block in the source buffer
2277 * @param src_y y coordinate of the top left sample of the block in the source buffer
2278 * @param w width of the source buffer
2279 * @param h height of the source buffer
2281 void ff_emulated_edge_mc(uint8_t *buf
, uint8_t *src
, int linesize
, int block_w
, int block_h
,
2282 int src_x
, int src_y
, int w
, int h
){
2284 int start_y
, start_x
, end_y
, end_x
;
2287 src
+= (h
-1-src_y
)*linesize
;
2289 }else if(src_y
<=-block_h
){
2290 src
+= (1-block_h
-src_y
)*linesize
;
2296 }else if(src_x
<=-block_w
){
2297 src
+= (1-block_w
-src_x
);
2301 start_y
= FFMAX(0, -src_y
);
2302 start_x
= FFMAX(0, -src_x
);
2303 end_y
= FFMIN(block_h
, h
-src_y
);
2304 end_x
= FFMIN(block_w
, w
-src_x
);
2306 // copy existing part
2307 for(y
=start_y
; y
<end_y
; y
++){
2308 for(x
=start_x
; x
<end_x
; x
++){
2309 buf
[x
+ y
*linesize
]= src
[x
+ y
*linesize
];
2314 for(y
=0; y
<start_y
; y
++){
2315 for(x
=start_x
; x
<end_x
; x
++){
2316 buf
[x
+ y
*linesize
]= buf
[x
+ start_y
*linesize
];
2321 for(y
=end_y
; y
<block_h
; y
++){
2322 for(x
=start_x
; x
<end_x
; x
++){
2323 buf
[x
+ y
*linesize
]= buf
[x
+ (end_y
-1)*linesize
];
2327 for(y
=0; y
<block_h
; y
++){
2329 for(x
=0; x
<start_x
; x
++){
2330 buf
[x
+ y
*linesize
]= buf
[start_x
+ y
*linesize
];
2334 for(x
=end_x
; x
<block_w
; x
++){
2335 buf
[x
+ y
*linesize
]= buf
[end_x
- 1 + y
*linesize
];
2340 static inline int hpel_motion(MpegEncContext
*s
,
2341 uint8_t *dest
, uint8_t *src
,
2342 int src_x
, int src_y
,
2343 int width
, int height
, int stride
,
2344 int h_edge_pos
, int v_edge_pos
,
2345 int w
, int h
, op_pixels_func
*pix_op
,
2346 int motion_x
, int motion_y
)
2351 dxy
= ((motion_y
& 1) << 1) | (motion_x
& 1);
2352 src_x
+= motion_x
>> 1;
2353 src_y
+= motion_y
>> 1;
2355 /* WARNING: do no forget half pels */
2356 src_x
= clip(src_x
, -16, width
); //FIXME unneeded for emu?
2359 src_y
= clip(src_y
, -16, height
);
2360 if (src_y
== height
)
2362 src
+= src_y
* stride
+ src_x
;
2364 if(s
->unrestricted_mv
&& (s
->flags
&CODEC_FLAG_EMU_EDGE
)){
2365 if( (unsigned)src_x
> h_edge_pos
- (motion_x
&1) - w
2366 || (unsigned)src_y
> v_edge_pos
- (motion_y
&1) - h
){
2367 ff_emulated_edge_mc(s
->edge_emu_buffer
, src
, stride
, w
+1, h
+1,
2368 src_x
, src_y
, h_edge_pos
, v_edge_pos
);
2369 src
= s
->edge_emu_buffer
;
2373 pix_op
[dxy
](dest
, src
, stride
, h
);
2377 /* apply one mpeg motion vector to the three components */
2378 static inline void mpeg_motion(MpegEncContext
*s
,
2379 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2381 uint8_t **ref_picture
, int src_offset
,
2382 int field_based
, op_pixels_func (*pix_op
)[4],
2383 int motion_x
, int motion_y
, int h
)
2386 int dxy
, offset
, mx
, my
, src_x
, src_y
, height
, v_edge_pos
, uvlinesize
;
2389 if(s
->quarter_sample
)
2396 height
= s
->height
>> field_based
;
2397 v_edge_pos
= s
->v_edge_pos
>> field_based
;
2398 uvlinesize
= s
->current_picture
.linesize
[1] << field_based
;
2401 dest_y
+ dest_offset
, ref_picture
[0] + src_offset
,
2402 s
->mb_x
* 16, s
->mb_y
* (16 >> field_based
),
2403 s
->width
, height
, s
->current_picture
.linesize
[0] << field_based
,
2404 s
->h_edge_pos
, v_edge_pos
,
2406 motion_x
, motion_y
);
2409 if(s
->flags
&CODEC_FLAG_GRAY
) return;
2411 if (s
->out_format
== FMT_H263
) {
2413 if ((motion_x
& 3) != 0)
2415 if ((motion_y
& 3) != 0)
2422 dxy
= ((my
& 1) << 1) | (mx
& 1);
2427 src_x
= s
->mb_x
* 8 + mx
;
2428 src_y
= s
->mb_y
* (8 >> field_based
) + my
;
2429 src_x
= clip(src_x
, -8, s
->width
>> 1);
2430 if (src_x
== (s
->width
>> 1))
2432 src_y
= clip(src_y
, -8, height
>> 1);
2433 if (src_y
== (height
>> 1))
2435 offset
= (src_y
* uvlinesize
) + src_x
+ (src_offset
>> 1);
2436 ptr
= ref_picture
[1] + offset
;
2438 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
- (src_offset
>> 1), s
->uvlinesize
, 9, 9+field_based
,
2439 src_x
, src_y
<<field_based
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2440 ptr
= s
->edge_emu_buffer
+ (src_offset
>> 1);
2442 pix_op
[1][dxy
](dest_cb
+ (dest_offset
>> 1), ptr
, uvlinesize
, h
>> 1);
2444 ptr
= ref_picture
[2] + offset
;
2446 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
- (src_offset
>> 1), s
->uvlinesize
, 9, 9+field_based
,
2447 src_x
, src_y
<<field_based
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2448 ptr
= s
->edge_emu_buffer
+ (src_offset
>> 1);
2450 pix_op
[1][dxy
](dest_cr
+ (dest_offset
>> 1), ptr
, uvlinesize
, h
>> 1);
2452 //FIXME move to dsputil, avg variant, 16x16 version
2453 static inline void put_obmc(uint8_t *dst
, uint8_t *src
[5], int stride
){
2455 uint8_t * const top
= src
[1];
2456 uint8_t * const left
= src
[2];
2457 uint8_t * const mid
= src
[0];
2458 uint8_t * const right
= src
[3];
2459 uint8_t * const bottom
= src
[4];
2460 #define OBMC_FILTER(x, t, l, m, r, b)\
2461 dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
2462 #define OBMC_FILTER4(x, t, l, m, r, b)\
2463 OBMC_FILTER(x , t, l, m, r, b);\
2464 OBMC_FILTER(x+1 , t, l, m, r, b);\
2465 OBMC_FILTER(x +stride, t, l, m, r, b);\
2466 OBMC_FILTER(x+1+stride, t, l, m, r, b);
2469 OBMC_FILTER (x
, 2, 2, 4, 0, 0);
2470 OBMC_FILTER (x
+1, 2, 1, 5, 0, 0);
2471 OBMC_FILTER4(x
+2, 2, 1, 5, 0, 0);
2472 OBMC_FILTER4(x
+4, 2, 0, 5, 1, 0);
2473 OBMC_FILTER (x
+6, 2, 0, 5, 1, 0);
2474 OBMC_FILTER (x
+7, 2, 0, 4, 2, 0);
2476 OBMC_FILTER (x
, 1, 2, 5, 0, 0);
2477 OBMC_FILTER (x
+1, 1, 2, 5, 0, 0);
2478 OBMC_FILTER (x
+6, 1, 0, 5, 2, 0);
2479 OBMC_FILTER (x
+7, 1, 0, 5, 2, 0);
2481 OBMC_FILTER4(x
, 1, 2, 5, 0, 0);
2482 OBMC_FILTER4(x
+2, 1, 1, 6, 0, 0);
2483 OBMC_FILTER4(x
+4, 1, 0, 6, 1, 0);
2484 OBMC_FILTER4(x
+6, 1, 0, 5, 2, 0);
2486 OBMC_FILTER4(x
, 0, 2, 5, 0, 1);
2487 OBMC_FILTER4(x
+2, 0, 1, 6, 0, 1);
2488 OBMC_FILTER4(x
+4, 0, 0, 6, 1, 1);
2489 OBMC_FILTER4(x
+6, 0, 0, 5, 2, 1);
2491 OBMC_FILTER (x
, 0, 2, 5, 0, 1);
2492 OBMC_FILTER (x
+1, 0, 2, 5, 0, 1);
2493 OBMC_FILTER4(x
+2, 0, 1, 5, 0, 2);
2494 OBMC_FILTER4(x
+4, 0, 0, 5, 1, 2);
2495 OBMC_FILTER (x
+6, 0, 0, 5, 2, 1);
2496 OBMC_FILTER (x
+7, 0, 0, 5, 2, 1);
2498 OBMC_FILTER (x
, 0, 2, 4, 0, 2);
2499 OBMC_FILTER (x
+1, 0, 1, 5, 0, 2);
2500 OBMC_FILTER (x
+6, 0, 0, 5, 1, 2);
2501 OBMC_FILTER (x
+7, 0, 0, 4, 2, 2);
2504 /* obmc for 1 8x8 luma block */
2505 static inline void obmc_motion(MpegEncContext
*s
,
2506 uint8_t *dest
, uint8_t *src
,
2507 int src_x
, int src_y
,
2508 op_pixels_func
*pix_op
,
2509 int16_t mv
[5][2]/* mid top left right bottom*/)
2515 assert(s
->quarter_sample
==0);
2518 if(i
&& mv
[i
][0]==mv
[MID
][0] && mv
[i
][1]==mv
[MID
][1]){
2521 ptr
[i
]= s
->obmc_scratchpad
+ 8*(i
&1) + s
->linesize
*8*(i
>>1);
2522 hpel_motion(s
, ptr
[i
], src
,
2524 s
->width
, s
->height
, s
->linesize
,
2525 s
->h_edge_pos
, s
->v_edge_pos
,
2527 mv
[i
][0], mv
[i
][1]);
2531 put_obmc(dest
, ptr
, s
->linesize
);
2534 static inline void qpel_motion(MpegEncContext
*s
,
2535 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2537 uint8_t **ref_picture
, int src_offset
,
2538 int field_based
, op_pixels_func (*pix_op
)[4],
2539 qpel_mc_func (*qpix_op
)[16],
2540 int motion_x
, int motion_y
, int h
)
2543 int dxy
, offset
, mx
, my
, src_x
, src_y
, height
, v_edge_pos
, linesize
, uvlinesize
;
2546 dxy
= ((motion_y
& 3) << 2) | (motion_x
& 3);
2547 src_x
= s
->mb_x
* 16 + (motion_x
>> 2);
2548 src_y
= s
->mb_y
* (16 >> field_based
) + (motion_y
>> 2);
2550 height
= s
->height
>> field_based
;
2551 v_edge_pos
= s
->v_edge_pos
>> field_based
;
2552 src_x
= clip(src_x
, -16, s
->width
);
2553 if (src_x
== s
->width
)
2555 src_y
= clip(src_y
, -16, height
);
2556 if (src_y
== height
)
2558 linesize
= s
->linesize
<< field_based
;
2559 uvlinesize
= s
->uvlinesize
<< field_based
;
2560 ptr
= ref_picture
[0] + (src_y
* linesize
) + src_x
+ src_offset
;
2561 dest_y
+= dest_offset
;
2562 //printf("%d %d %d\n", src_x, src_y, dxy);
2564 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
2565 if( (unsigned)src_x
> s
->h_edge_pos
- (motion_x
&3) - 16
2566 || (unsigned)src_y
> v_edge_pos
- (motion_y
&3) - h
){
2567 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
- src_offset
, s
->linesize
, 17, 17+field_based
,
2568 src_x
, src_y
<<field_based
, s
->h_edge_pos
, s
->v_edge_pos
);
2569 ptr
= s
->edge_emu_buffer
+ src_offset
;
2574 qpix_op
[0][dxy
](dest_y
, ptr
, linesize
);
2576 //damn interlaced mode
2577 //FIXME boundary mirroring is not exactly correct here
2578 qpix_op
[1][dxy
](dest_y
, ptr
, linesize
);
2579 qpix_op
[1][dxy
](dest_y
+8, ptr
+8, linesize
);
2582 if(s
->flags
&CODEC_FLAG_GRAY
) return;
2587 }else if(s
->workaround_bugs
&FF_BUG_QPEL_CHROMA2
){
2588 static const int rtab
[8]= {0,0,1,1,0,0,0,1};
2589 mx
= (motion_x
>>1) + rtab
[motion_x
&7];
2590 my
= (motion_y
>>1) + rtab
[motion_y
&7];
2591 }else if(s
->workaround_bugs
&FF_BUG_QPEL_CHROMA
){
2592 mx
= (motion_x
>>1)|(motion_x
&1);
2593 my
= (motion_y
>>1)|(motion_y
&1);
2601 dxy
= (mx
&1) | ((my
&1)<<1);
2605 src_x
= s
->mb_x
* 8 + mx
;
2606 src_y
= s
->mb_y
* (8 >> field_based
) + my
;
2607 src_x
= clip(src_x
, -8, s
->width
>> 1);
2608 if (src_x
== (s
->width
>> 1))
2610 src_y
= clip(src_y
, -8, height
>> 1);
2611 if (src_y
== (height
>> 1))
2614 offset
= (src_y
* uvlinesize
) + src_x
+ (src_offset
>> 1);
2615 ptr
= ref_picture
[1] + offset
;
2617 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
- (src_offset
>> 1), s
->uvlinesize
, 9, 9 + field_based
,
2618 src_x
, src_y
<<field_based
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2619 ptr
= s
->edge_emu_buffer
+ (src_offset
>> 1);
2621 pix_op
[1][dxy
](dest_cb
+ (dest_offset
>> 1), ptr
, uvlinesize
, h
>> 1);
2623 ptr
= ref_picture
[2] + offset
;
2625 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
- (src_offset
>> 1), s
->uvlinesize
, 9, 9 + field_based
,
2626 src_x
, src_y
<<field_based
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2627 ptr
= s
->edge_emu_buffer
+ (src_offset
>> 1);
2629 pix_op
[1][dxy
](dest_cr
+ (dest_offset
>> 1), ptr
, uvlinesize
, h
>> 1);
2632 inline int ff_h263_round_chroma(int x
){
2634 return (h263_chroma_roundtab
[x
& 0xf] + ((x
>> 3) & ~1));
2637 return -(h263_chroma_roundtab
[x
& 0xf] + ((x
>> 3) & ~1));
2642 * h263 chorma 4mv motion compensation.
2644 static inline void chroma_4mv_motion(MpegEncContext
*s
,
2645 uint8_t *dest_cb
, uint8_t *dest_cr
,
2646 uint8_t **ref_picture
,
2647 op_pixels_func
*pix_op
,
2649 int dxy
, emu
=0, src_x
, src_y
, offset
;
2652 /* In case of 8X8, we construct a single chroma motion vector
2653 with a special rounding */
2654 mx
= ff_h263_round_chroma(mx
);
2655 my
= ff_h263_round_chroma(my
);
2657 dxy
= ((my
& 1) << 1) | (mx
& 1);
2661 src_x
= s
->mb_x
* 8 + mx
;
2662 src_y
= s
->mb_y
* 8 + my
;
2663 src_x
= clip(src_x
, -8, s
->width
/2);
2664 if (src_x
== s
->width
/2)
2666 src_y
= clip(src_y
, -8, s
->height
/2);
2667 if (src_y
== s
->height
/2)
2670 offset
= (src_y
* (s
->uvlinesize
)) + src_x
;
2671 ptr
= ref_picture
[1] + offset
;
2672 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
2673 if( (unsigned)src_x
> (s
->h_edge_pos
>>1) - (dxy
&1) - 8
2674 || (unsigned)src_y
> (s
->v_edge_pos
>>1) - (dxy
>>1) - 8){
2675 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, s
->uvlinesize
, 9, 9, src_x
, src_y
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2676 ptr
= s
->edge_emu_buffer
;
2680 pix_op
[dxy
](dest_cb
, ptr
, s
->uvlinesize
, 8);
2682 ptr
= ref_picture
[2] + offset
;
2684 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, s
->uvlinesize
, 9, 9, src_x
, src_y
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
2685 ptr
= s
->edge_emu_buffer
;
2687 pix_op
[dxy
](dest_cr
, ptr
, s
->uvlinesize
, 8);
2691 * motion compesation of a single macroblock
2693 * @param dest_y luma destination pointer
2694 * @param dest_cb chroma cb/u destination pointer
2695 * @param dest_cr chroma cr/v destination pointer
2696 * @param dir direction (0->forward, 1->backward)
2697 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2698 * @param pic_op halfpel motion compensation function (average or put normally)
2699 * @param pic_op qpel motion compensation function (average or put normally)
2700 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2702 static inline void MPV_motion(MpegEncContext
*s
,
2703 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
2704 int dir
, uint8_t **ref_picture
,
2705 op_pixels_func (*pix_op
)[4], qpel_mc_func (*qpix_op
)[16])
2707 int dxy
, mx
, my
, src_x
, src_y
, motion_x
, motion_y
;
2709 uint8_t *ptr
, *dest
;
2714 if(s
->obmc
&& s
->pict_type
!= B_TYPE
){
2715 int16_t mv_cache
[4][4][2];
2716 const int xy
= s
->mb_x
+ s
->mb_y
*s
->mb_stride
;
2717 const int mot_stride
= s
->mb_width
*2 + 2;
2718 const int mot_xy
= 1 + mb_x
*2 + (mb_y
*2 + 1)*mot_stride
;
2720 assert(!s
->mb_skiped
);
2722 memcpy(mv_cache
[1][1], s
->current_picture
.motion_val
[0][mot_xy
], sizeof(int16_t)*4);
2723 memcpy(mv_cache
[2][1], s
->current_picture
.motion_val
[0][mot_xy
+mot_stride
], sizeof(int16_t)*4);
2724 memcpy(mv_cache
[3][1], s
->current_picture
.motion_val
[0][mot_xy
+mot_stride
], sizeof(int16_t)*4);
2726 if(mb_y
==0 || IS_INTRA(s
->current_picture
.mb_type
[xy
-s
->mb_stride
])){
2727 memcpy(mv_cache
[0][1], mv_cache
[1][1], sizeof(int16_t)*4);
2729 memcpy(mv_cache
[0][1], s
->current_picture
.motion_val
[0][mot_xy
-mot_stride
], sizeof(int16_t)*4);
2732 if(mb_x
==0 || IS_INTRA(s
->current_picture
.mb_type
[xy
-1])){
2733 *(int32_t*)mv_cache
[1][0]= *(int32_t*)mv_cache
[1][1];
2734 *(int32_t*)mv_cache
[2][0]= *(int32_t*)mv_cache
[2][1];
2736 *(int32_t*)mv_cache
[1][0]= *(int32_t*)s
->current_picture
.motion_val
[0][mot_xy
-1];
2737 *(int32_t*)mv_cache
[2][0]= *(int32_t*)s
->current_picture
.motion_val
[0][mot_xy
-1+mot_stride
];
2740 if(mb_x
+1>=s
->mb_width
|| IS_INTRA(s
->current_picture
.mb_type
[xy
+1])){
2741 *(int32_t*)mv_cache
[1][3]= *(int32_t*)mv_cache
[1][2];
2742 *(int32_t*)mv_cache
[2][3]= *(int32_t*)mv_cache
[2][2];
2744 *(int32_t*)mv_cache
[1][3]= *(int32_t*)s
->current_picture
.motion_val
[0][mot_xy
+2];
2745 *(int32_t*)mv_cache
[2][3]= *(int32_t*)s
->current_picture
.motion_val
[0][mot_xy
+2+mot_stride
];
2751 const int x
= (i
&1)+1;
2752 const int y
= (i
>>1)+1;
2754 {mv_cache
[y
][x
][0], mv_cache
[y
][x
][1]},
2755 {mv_cache
[y
-1][x
][0], mv_cache
[y
-1][x
][1]},
2756 {mv_cache
[y
][x
-1][0], mv_cache
[y
][x
-1][1]},
2757 {mv_cache
[y
][x
+1][0], mv_cache
[y
][x
+1][1]},
2758 {mv_cache
[y
+1][x
][0], mv_cache
[y
+1][x
][1]}};
2760 obmc_motion(s
, dest_y
+ ((i
& 1) * 8) + (i
>> 1) * 8 * s
->linesize
,
2762 mb_x
* 16 + (i
& 1) * 8, mb_y
* 16 + (i
>>1) * 8,
2769 if(!(s
->flags
&CODEC_FLAG_GRAY
))
2770 chroma_4mv_motion(s
, dest_cb
, dest_cr
, ref_picture
, pix_op
[1], mx
, my
);
2775 switch(s
->mv_type
) {
2779 if(s
->real_sprite_warping_points
==1){
2780 gmc1_motion(s
, dest_y
, dest_cb
, dest_cr
, 0,
2783 gmc_motion(s
, dest_y
, dest_cb
, dest_cr
, 0,
2786 }else if(s
->quarter_sample
){
2787 qpel_motion(s
, dest_y
, dest_cb
, dest_cr
, 0,
2790 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 16);
2792 ff_mspel_motion(s
, dest_y
, dest_cb
, dest_cr
,
2793 ref_picture
, pix_op
,
2794 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 16);
2798 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
, 0,
2801 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 16);
2807 if(s
->quarter_sample
){
2809 motion_x
= s
->mv
[dir
][i
][0];
2810 motion_y
= s
->mv
[dir
][i
][1];
2812 dxy
= ((motion_y
& 3) << 2) | (motion_x
& 3);
2813 src_x
= mb_x
* 16 + (motion_x
>> 2) + (i
& 1) * 8;
2814 src_y
= mb_y
* 16 + (motion_y
>> 2) + (i
>>1) * 8;
2816 /* WARNING: do no forget half pels */
2817 src_x
= clip(src_x
, -16, s
->width
);
2818 if (src_x
== s
->width
)
2820 src_y
= clip(src_y
, -16, s
->height
);
2821 if (src_y
== s
->height
)
2824 ptr
= ref_picture
[0] + (src_y
* s
->linesize
) + (src_x
);
2825 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
2826 if( (unsigned)src_x
> s
->h_edge_pos
- (motion_x
&3) - 8
2827 || (unsigned)src_y
> s
->v_edge_pos
- (motion_y
&3) - 8 ){
2828 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, s
->linesize
, 9, 9, src_x
, src_y
, s
->h_edge_pos
, s
->v_edge_pos
);
2829 ptr
= s
->edge_emu_buffer
;
2832 dest
= dest_y
+ ((i
& 1) * 8) + (i
>> 1) * 8 * s
->linesize
;
2833 qpix_op
[1][dxy
](dest
, ptr
, s
->linesize
);
2835 mx
+= s
->mv
[dir
][i
][0]/2;
2836 my
+= s
->mv
[dir
][i
][1]/2;
2840 hpel_motion(s
, dest_y
+ ((i
& 1) * 8) + (i
>> 1) * 8 * s
->linesize
,
2842 mb_x
* 16 + (i
& 1) * 8, mb_y
* 16 + (i
>>1) * 8,
2843 s
->width
, s
->height
, s
->linesize
,
2844 s
->h_edge_pos
, s
->v_edge_pos
,
2846 s
->mv
[dir
][i
][0], s
->mv
[dir
][i
][1]);
2848 mx
+= s
->mv
[dir
][i
][0];
2849 my
+= s
->mv
[dir
][i
][1];
2853 if(!(s
->flags
&CODEC_FLAG_GRAY
))
2854 chroma_4mv_motion(s
, dest_cb
, dest_cr
, ref_picture
, pix_op
[1], mx
, my
);
2857 if (s
->picture_structure
== PICT_FRAME
) {
2858 if(s
->quarter_sample
){
2860 qpel_motion(s
, dest_y
, dest_cb
, dest_cr
, 0,
2861 ref_picture
, s
->field_select
[dir
][0] ? s
->linesize
: 0,
2863 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 8);
2865 qpel_motion(s
, dest_y
, dest_cb
, dest_cr
, s
->linesize
,
2866 ref_picture
, s
->field_select
[dir
][1] ? s
->linesize
: 0,
2868 s
->mv
[dir
][1][0], s
->mv
[dir
][1][1], 8);
2871 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
, 0,
2872 ref_picture
, s
->field_select
[dir
][0] ? s
->linesize
: 0,
2874 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 8);
2876 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
, s
->linesize
,
2877 ref_picture
, s
->field_select
[dir
][1] ? s
->linesize
: 0,
2879 s
->mv
[dir
][1][0], s
->mv
[dir
][1][1], 8);
2883 if(s
->picture_structure
== s
->field_select
[dir
][0] + 1 || s
->pict_type
== B_TYPE
|| s
->first_field
){
2884 offset
= s
->field_select
[dir
][0] ? s
->linesize
: 0;
2886 ref_picture
= s
->current_picture
.data
;
2887 offset
= s
->field_select
[dir
][0] ? s
->linesize
: -s
->linesize
;
2890 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
, 0,
2891 ref_picture
, offset
,
2893 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 16);
2898 uint8_t ** ref2picture
;
2900 if(s
->picture_structure
== s
->field_select
[dir
][0] + 1 || s
->pict_type
== B_TYPE
|| s
->first_field
){
2901 ref2picture
= ref_picture
;
2902 offset
= s
->field_select
[dir
][0] ? s
->linesize
: 0;
2904 ref2picture
= s
->current_picture
.data
;
2905 offset
= s
->field_select
[dir
][0] ? s
->linesize
: -s
->linesize
;
2908 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
, 0,
2909 ref2picture
, offset
,
2911 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 8);
2914 if(s
->picture_structure
== s
->field_select
[dir
][1] + 1 || s
->pict_type
== B_TYPE
|| s
->first_field
){
2915 ref2picture
= ref_picture
;
2916 offset
= s
->field_select
[dir
][1] ? s
->linesize
: 0;
2918 ref2picture
= s
->current_picture
.data
;
2919 offset
= s
->field_select
[dir
][1] ? s
->linesize
: -s
->linesize
;
2921 // I know it is ugly but this is the only way to fool emu_edge without rewrite mpeg_motion
2922 mpeg_motion(s
, dest_y
+16*s
->linesize
, dest_cb
+8*s
->uvlinesize
, dest_cr
+8*s
->uvlinesize
,
2924 ref2picture
, offset
,
2926 s
->mv
[dir
][1][0], s
->mv
[dir
][1][1]+16, 8);
2932 op_pixels_func (*dmv_pix_op
)[4];
2935 dmv_pix_op
= s
->dsp
.put_pixels_tab
;
2937 if(s
->picture_structure
== PICT_FRAME
){
2938 //put top field from top field
2939 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
, 0,
2942 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 8);
2943 //put bottom field from bottom field
2944 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
, s
->linesize
,
2945 ref_picture
, s
->linesize
,
2947 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 8);
2949 dmv_pix_op
= s
->dsp
.avg_pixels_tab
;
2951 //avg top field from bottom field
2952 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
, 0,
2953 ref_picture
, s
->linesize
,
2955 s
->mv
[dir
][2][0], s
->mv
[dir
][2][1], 8);
2956 //avg bottom field from top field
2957 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
, s
->linesize
,
2960 s
->mv
[dir
][3][0], s
->mv
[dir
][3][1], 8);
2963 offset
=(s
->picture_structure
== PICT_BOTTOM_FIELD
)?
2966 //put field from the same parity
2967 //same parity is never in the same frame
2968 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
, 0,
2971 s
->mv
[dir
][0][0],s
->mv
[dir
][0][1],16);
2973 // after put we make avg of the same block
2974 dmv_pix_op
=s
->dsp
.avg_pixels_tab
;
2976 //opposite parity is always in the same frame if this is second field
2977 if(!s
->first_field
){
2978 ref_picture
= s
->current_picture
.data
;
2979 //top field is one linesize from frame beginig
2980 offset
=(s
->picture_structure
== PICT_BOTTOM_FIELD
)?
2981 -s
->linesize
: s
->linesize
;
2983 offset
=(s
->picture_structure
== PICT_BOTTOM_FIELD
)?
2986 //avg field from the opposite parity
2987 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
,0,
2988 ref_picture
, offset
,
2990 s
->mv
[dir
][2][0],s
->mv
[dir
][2][1],16);
2999 /* put block[] to dest[] */
3000 static inline void put_dct(MpegEncContext
*s
,
3001 DCTELEM
*block
, int i
, uint8_t *dest
, int line_size
, int qscale
)
3003 s
->dct_unquantize_intra(s
, block
, i
, qscale
);
3004 s
->dsp
.idct_put (dest
, line_size
, block
);
3007 /* add block[] to dest[] */
3008 static inline void add_dct(MpegEncContext
*s
,
3009 DCTELEM
*block
, int i
, uint8_t *dest
, int line_size
)
3011 if (s
->block_last_index
[i
] >= 0) {
3012 s
->dsp
.idct_add (dest
, line_size
, block
);
3016 static inline void add_dequant_dct(MpegEncContext
*s
,
3017 DCTELEM
*block
, int i
, uint8_t *dest
, int line_size
, int qscale
)
3019 if (s
->block_last_index
[i
] >= 0) {
3020 s
->dct_unquantize_inter(s
, block
, i
, qscale
);
3022 s
->dsp
.idct_add (dest
, line_size
, block
);
3027 * cleans dc, ac, coded_block for the current non intra MB
3029 void ff_clean_intra_table_entries(MpegEncContext
*s
)
3031 int wrap
= s
->block_wrap
[0];
3032 int xy
= s
->block_index
[0];
3035 s
->dc_val
[0][xy
+ 1 ] =
3036 s
->dc_val
[0][xy
+ wrap
] =
3037 s
->dc_val
[0][xy
+ 1 + wrap
] = 1024;
3039 memset(s
->ac_val
[0][xy
], 0, 32 * sizeof(int16_t));
3040 memset(s
->ac_val
[0][xy
+ wrap
], 0, 32 * sizeof(int16_t));
3041 if (s
->msmpeg4_version
>=3) {
3042 s
->coded_block
[xy
] =
3043 s
->coded_block
[xy
+ 1 ] =
3044 s
->coded_block
[xy
+ wrap
] =
3045 s
->coded_block
[xy
+ 1 + wrap
] = 0;
3048 wrap
= s
->block_wrap
[4];
3049 xy
= s
->mb_x
+ 1 + (s
->mb_y
+ 1) * wrap
;
3051 s
->dc_val
[2][xy
] = 1024;
3053 memset(s
->ac_val
[1][xy
], 0, 16 * sizeof(int16_t));
3054 memset(s
->ac_val
[2][xy
], 0, 16 * sizeof(int16_t));
3056 s
->mbintra_table
[s
->mb_x
+ s
->mb_y
*s
->mb_stride
]= 0;
3059 /* generic function called after a macroblock has been parsed by the
3060 decoder or after it has been encoded by the encoder.
3062 Important variables used:
3063 s->mb_intra : true if intra macroblock
3064 s->mv_dir : motion vector direction
3065 s->mv_type : motion vector type
3066 s->mv : motion vector
3067 s->interlaced_dct : true if interlaced dct used (mpeg2)
3069 void MPV_decode_mb(MpegEncContext
*s
, DCTELEM block
[6][64])
3072 const int mb_xy
= s
->mb_y
* s
->mb_stride
+ s
->mb_x
;
3074 if(s
->avctx
->xvmc_acceleration
){
3075 XVMC_decode_mb(s
);//xvmc uses pblocks
3083 s
->current_picture
.qscale_table
[mb_xy
]= s
->qscale
;
3085 /* update DC predictors for P macroblocks */
3087 if (s
->h263_pred
|| s
->h263_aic
) {
3088 if(s
->mbintra_table
[mb_xy
])
3089 ff_clean_intra_table_entries(s
);
3093 s
->last_dc
[2] = 128 << s
->intra_dc_precision
;
3096 else if (s
->h263_pred
|| s
->h263_aic
)
3097 s
->mbintra_table
[mb_xy
]=1;
3099 if ((s
->flags
&CODEC_FLAG_PSNR
) || !(s
->encoding
&& (s
->intra_only
|| s
->pict_type
==B_TYPE
))) { //FIXME precalc
3100 uint8_t *dest_y
, *dest_cb
, *dest_cr
;
3101 int dct_linesize
, dct_offset
;
3102 op_pixels_func (*op_pix
)[4];
3103 qpel_mc_func (*op_qpix
)[16];
3104 const int linesize
= s
->current_picture
.linesize
[0]; //not s->linesize as this woulnd be wrong for field pics
3105 const int uvlinesize
= s
->current_picture
.linesize
[1];
3106 const int readable
= s
->pict_type
!= B_TYPE
|| s
->encoding
|| s
->avctx
->draw_horiz_band
;
3108 /* avoid copy if macroblock skipped in last frame too */
3109 /* skip only during decoding as we might trash the buffers during encoding a bit */
3111 uint8_t *mbskip_ptr
= &s
->mbskip_table
[mb_xy
];
3112 const int age
= s
->current_picture
.age
;
3118 assert(s
->pict_type
!=I_TYPE
);
3120 (*mbskip_ptr
) ++; /* indicate that this time we skiped it */
3121 if(*mbskip_ptr
>99) *mbskip_ptr
= 99;
3123 /* if previous was skipped too, then nothing to do ! */
3124 if (*mbskip_ptr
>= age
&& s
->current_picture
.reference
){
3127 } else if(!s
->current_picture
.reference
){
3128 (*mbskip_ptr
) ++; /* increase counter so the age can be compared cleanly */
3129 if(*mbskip_ptr
>99) *mbskip_ptr
= 99;
3131 *mbskip_ptr
= 0; /* not skipped */
3135 if (s
->interlaced_dct
) {
3136 dct_linesize
= linesize
* 2;
3137 dct_offset
= linesize
;