kill a bunch of compiler warnings
[libav.git] / libavcodec / mpegvideo.c
1 /*
2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard.
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 * 4MV & hq & b-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
21 */
22
23 /**
24 * @file mpegvideo.c
25 * The simplest mpeg encoder (well, it was the simplest!).
26 */
27
28 #include "avcodec.h"
29 #include "dsputil.h"
30 #include "mpegvideo.h"
31 #include "faandct.h"
32 #include <limits.h>
33
34 #ifdef USE_FASTMEMCPY
35 #include "fastmemcpy.h"
36 #endif
37
38 //#undef NDEBUG
39 //#include <assert.h>
40
41 #ifdef CONFIG_ENCODERS
42 static void encode_picture(MpegEncContext *s, int picture_number);
43 #endif //CONFIG_ENCODERS
44 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
45 DCTELEM *block, int n, int qscale);
46 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
47 DCTELEM *block, int n, int qscale);
48 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
49 DCTELEM *block, int n, int qscale);
50 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
51 DCTELEM *block, int n, int qscale);
52 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
53 DCTELEM *block, int n, int qscale);
54 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
55 DCTELEM *block, int n, int qscale);
56 static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w);
57 #ifdef CONFIG_ENCODERS
58 static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
59 static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
60 static int dct_quantize_refine(MpegEncContext *s, DCTELEM *block, int16_t *weight, DCTELEM *orig, int n, int qscale);
61 static int sse_mb(MpegEncContext *s);
62 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block);
63 #endif //CONFIG_ENCODERS
64
65 #ifdef HAVE_XVMC
66 extern int XVMC_field_start(MpegEncContext*s, AVCodecContext *avctx);
67 extern void XVMC_field_end(MpegEncContext *s);
68 extern void XVMC_decode_mb(MpegEncContext *s);
69 #endif
70
71 void (*draw_edges)(uint8_t *buf, int wrap, int width, int height, int w)= draw_edges_c;
72
73
74 /* enable all paranoid tests for rounding, overflows, etc... */
75 //#define PARANOID
76
77 //#define DEBUG
78
79
80 /* for jpeg fast DCT */
81 #define CONST_BITS 14
82
83 static const uint16_t aanscales[64] = {
84 /* precomputed values scaled up by 14 bits */
85 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
86 22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270,
87 21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906,
88 19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315,
89 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
90 12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552,
91 8867 , 12299, 11585, 10426, 8867, 6967, 4799, 2446,
92 4520 , 6270, 5906, 5315, 4520, 3552, 2446, 1247
93 };
94
95 static const uint8_t h263_chroma_roundtab[16] = {
96 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
97 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
98 };
99
100 static const uint8_t ff_default_chroma_qscale_table[32]={
101 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
102 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
103 };
104
105 #ifdef CONFIG_ENCODERS
106 static uint8_t (*default_mv_penalty)[MAX_MV*2+1]=NULL;
107 static uint8_t default_fcode_tab[MAX_MV*2+1];
108
109 enum PixelFormat ff_yuv420p_list[2]= {PIX_FMT_YUV420P, -1};
110
111 static void convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][64],
112 const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
113 {
114 int qscale;
115 int shift=0;
116
117 for(qscale=qmin; qscale<=qmax; qscale++){
118 int i;
119 if (dsp->fdct == ff_jpeg_fdct_islow
120 #ifdef FAAN_POSTSCALE
121 || dsp->fdct == ff_faandct
122 #endif
123 ) {
124 for(i=0;i<64;i++) {
125 const int j= dsp->idct_permutation[i];
126 /* 16 <= qscale * quant_matrix[i] <= 7905 */
127 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
128 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
129 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
130
131 qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) /
132 (qscale * quant_matrix[j]));
133 }
134 } else if (dsp->fdct == fdct_ifast
135 #ifndef FAAN_POSTSCALE
136 || dsp->fdct == ff_faandct
137 #endif
138 ) {
139 for(i=0;i<64;i++) {
140 const int j= dsp->idct_permutation[i];
141 /* 16 <= qscale * quant_matrix[i] <= 7905 */
142 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
143 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
144 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
145
146 qmat[qscale][i] = (int)((uint64_t_C(1) << (QMAT_SHIFT + 14)) /
147 (aanscales[i] * qscale * quant_matrix[j]));
148 }
149 } else {
150 for(i=0;i<64;i++) {
151 const int j= dsp->idct_permutation[i];
152 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
153 So 16 <= qscale * quant_matrix[i] <= 7905
154 so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905
155 so 32768 >= (1<<19) / (qscale * quant_matrix[i]) >= 67
156 */
157 qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[j]));
158 // qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);
159 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[j]);
160
161 if(qmat16[qscale][0][i]==0 || qmat16[qscale][0][i]==128*256) qmat16[qscale][0][i]=128*256-1;
162 qmat16[qscale][1][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][0][i]);
163 }
164 }
165
166 for(i=intra; i<64; i++){
167 int64_t max= 8191;
168 if (dsp->fdct == fdct_ifast
169 #ifndef FAAN_POSTSCALE
170 || dsp->fdct == ff_faandct
171 #endif
172 ) {
173 max= (8191LL*aanscales[i]) >> 14;
174 }
175 while(((max * qmat[qscale][i]) >> shift) > INT_MAX){
176 shift++;
177 }
178 }
179 }
180 if(shift){
181 av_log(NULL, AV_LOG_INFO, "Warning, QMAT_SHIFT is larger then %d, overflows possible\n", QMAT_SHIFT - shift);
182 }
183 }
184
185 static inline void update_qscale(MpegEncContext *s){
186 s->qscale= (s->lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);
187 s->qscale= clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
188
189 s->lambda2= (s->lambda*s->lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT;
190 }
191 #endif //CONFIG_ENCODERS
192
193 void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable){
194 int i;
195 int end;
196
197 st->scantable= src_scantable;
198
199 for(i=0; i<64; i++){
200 int j;
201 j = src_scantable[i];
202 st->permutated[i] = permutation[j];
203 #ifdef ARCH_POWERPC
204 st->inverse[j] = i;
205 #endif
206 }
207
208 end=-1;
209 for(i=0; i<64; i++){
210 int j;
211 j = st->permutated[i];
212 if(j>end) end=j;
213 st->raster_end[i]= end;
214 }
215 }
216
217 #ifdef CONFIG_ENCODERS
218 void ff_write_quant_matrix(PutBitContext *pb, int16_t *matrix){
219 int i;
220
221 if(matrix){
222 put_bits(pb, 1, 1);
223 for(i=0;i<64;i++) {
224 put_bits(pb, 8, matrix[ ff_zigzag_direct[i] ]);
225 }
226 }else
227 put_bits(pb, 1, 0);
228 }
229 #endif //CONFIG_ENCODERS
230
231 /* init common dct for both encoder and decoder */
232 int DCT_common_init(MpegEncContext *s)
233 {
234 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
235 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
236 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
237 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
238 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
239 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
240
241 #ifdef CONFIG_ENCODERS
242 s->dct_quantize= dct_quantize_c;
243 s->denoise_dct= denoise_dct_c;
244 #endif //CONFIG_ENCODERS
245
246 #ifdef HAVE_MMX
247 MPV_common_init_mmx(s);
248 #endif
249 #ifdef ARCH_ALPHA
250 MPV_common_init_axp(s);
251 #endif
252 #ifdef HAVE_MLIB
253 MPV_common_init_mlib(s);
254 #endif
255 #ifdef HAVE_MMI
256 MPV_common_init_mmi(s);
257 #endif
258 #ifdef ARCH_ARMV4L
259 MPV_common_init_armv4l(s);
260 #endif
261 #ifdef ARCH_POWERPC
262 MPV_common_init_ppc(s);
263 #endif
264
265 #ifdef CONFIG_ENCODERS
266 s->fast_dct_quantize= s->dct_quantize;
267
268 if(s->flags&CODEC_FLAG_TRELLIS_QUANT){
269 s->dct_quantize= dct_quantize_trellis_c; //move before MPV_common_init_*
270 }
271
272 #endif //CONFIG_ENCODERS
273
274 /* load & permutate scantables
275 note: only wmv uses different ones
276 */
277 if(s->alternate_scan){
278 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
279 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
280 }else{
281 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
282 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
283 }
284 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
285 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
286
287 return 0;
288 }
289
290 static void copy_picture(Picture *dst, Picture *src){
291 *dst = *src;
292 dst->type= FF_BUFFER_TYPE_COPY;
293 }
294
295 static void copy_picture_attributes(MpegEncContext *s, AVFrame *dst, AVFrame *src){
296 int i;
297
298 dst->pict_type = src->pict_type;
299 dst->quality = src->quality;
300 dst->coded_picture_number = src->coded_picture_number;
301 dst->display_picture_number = src->display_picture_number;
302 // dst->reference = src->reference;
303 dst->pts = src->pts;
304 dst->interlaced_frame = src->interlaced_frame;
305 dst->top_field_first = src->top_field_first;
306
307 if(s->avctx->me_threshold){
308 if(!src->motion_val[0])
309 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_val not set!\n");
310 if(!src->mb_type)
311 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.mb_type not set!\n");
312 if(!src->ref_index[0])
313 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n");
314 if(src->motion_subsample_log2 != dst->motion_subsample_log2)
315 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_subsample_log2 doesn't match! (%d!=%d)\n",
316 src->motion_subsample_log2, dst->motion_subsample_log2);
317
318 memcpy(dst->mb_type, src->mb_type, s->mb_stride * s->mb_height * sizeof(dst->mb_type[0]));
319
320 for(i=0; i<2; i++){
321 int stride= ((16*s->mb_width )>>src->motion_subsample_log2) + 1;
322 int height= ((16*s->mb_height)>>src->motion_subsample_log2);
323
324 if(src->motion_val[i] && src->motion_val[i] != dst->motion_val[i]){
325 memcpy(dst->motion_val[i], src->motion_val[i], 2*stride*height*sizeof(int16_t));
326 }
327 if(src->ref_index[i] && src->ref_index[i] != dst->ref_index[i]){
328 memcpy(dst->ref_index[i], src->ref_index[i], s->b8_stride*2*s->mb_height*sizeof(int8_t));
329 }
330 }
331 }
332 }
333
334 /**
335 * allocates a Picture
336 * The pixels are allocated/set by calling get_buffer() if shared=0
337 */
338 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
339 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) doesnt sig11
340 const int mb_array_size= s->mb_stride*s->mb_height;
341 const int b8_array_size= s->b8_stride*s->mb_height*2;
342 const int b4_array_size= s->b4_stride*s->mb_height*4;
343 int i;
344
345 if(shared){
346 assert(pic->data[0]);
347 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
348 pic->type= FF_BUFFER_TYPE_SHARED;
349 }else{
350 int r;
351
352 assert(!pic->data[0]);
353
354 r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
355
356 if(r<0 || !pic->age || !pic->type || !pic->data[0]){
357 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
358 return -1;
359 }
360
361 if(s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])){
362 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
363 return -1;
364 }
365
366 if(pic->linesize[1] != pic->linesize[2]){
367 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
368 return -1;
369 }
370
371 s->linesize = pic->linesize[0];
372 s->uvlinesize= pic->linesize[1];
373 }
374
375 if(pic->qscale_table==NULL){
376 if (s->encoding) {
377 CHECKED_ALLOCZ(pic->mb_var , mb_array_size * sizeof(int16_t))
378 CHECKED_ALLOCZ(pic->mc_mb_var, mb_array_size * sizeof(int16_t))
379 CHECKED_ALLOCZ(pic->mb_mean , mb_array_size * sizeof(int8_t))
380 }
381
382 CHECKED_ALLOCZ(pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2) //the +2 is for the slice end check
383 CHECKED_ALLOCZ(pic->qscale_table , mb_array_size * sizeof(uint8_t))
384 CHECKED_ALLOCZ(pic->mb_type_base , big_mb_num * sizeof(uint32_t))
385 pic->mb_type= pic->mb_type_base + s->mb_stride+1;
386 if(s->out_format == FMT_H264){
387 for(i=0; i<2; i++){
388 CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t))
389 pic->motion_val[i]= pic->motion_val_base[i]+4;
390 CHECKED_ALLOCZ(pic->ref_index[i], b8_array_size * sizeof(uint8_t))
391 }
392 pic->motion_subsample_log2= 2;
393 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
394 for(i=0; i<2; i++){
395 CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t))
396 pic->motion_val[i]= pic->motion_val_base[i]+4;
397 CHECKED_ALLOCZ(pic->ref_index[i], b8_array_size * sizeof(uint8_t))
398 }
399 pic->motion_subsample_log2= 3;
400 }
401 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
402 CHECKED_ALLOCZ(pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6)
403 }
404 pic->qstride= s->mb_stride;
405 CHECKED_ALLOCZ(pic->pan_scan , 1 * sizeof(AVPanScan))
406 }
407
408 //it might be nicer if the application would keep track of these but it would require a API change
409 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
410 s->prev_pict_types[0]= s->pict_type;
411 if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == B_TYPE)
412 pic->age= INT_MAX; // skipped MBs in b frames are quite rare in mpeg1/2 and its a bit tricky to skip them anyway
413
414 return 0;
415 fail: //for the CHECKED_ALLOCZ macro
416 return -1;
417 }
418
419 /**
420 * deallocates a picture
421 */
422 static void free_picture(MpegEncContext *s, Picture *pic){
423 int i;
424
425 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
426 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
427 }
428
429 av_freep(&pic->mb_var);
430 av_freep(&pic->mc_mb_var);
431 av_freep(&pic->mb_mean);
432 av_freep(&pic->mbskip_table);
433 av_freep(&pic->qscale_table);
434 av_freep(&pic->mb_type_base);
435 av_freep(&pic->dct_coeff);
436 av_freep(&pic->pan_scan);
437 pic->mb_type= NULL;
438 for(i=0; i<2; i++){
439 av_freep(&pic->motion_val_base[i]);
440 av_freep(&pic->ref_index[i]);
441 }
442
443 if(pic->type == FF_BUFFER_TYPE_SHARED){
444 for(i=0; i<4; i++){
445 pic->base[i]=
446 pic->data[i]= NULL;
447 }
448 pic->type= 0;
449 }
450 }
451
452 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
453 int i;
454
455 // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
456 CHECKED_ALLOCZ(s->allocated_edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance
457 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*17;
458
459 //FIXME should be linesize instead of s->width*2 but that isnt known before get_buffer()
460 CHECKED_ALLOCZ(s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t))
461 s->rd_scratchpad= s->me.scratchpad;
462 s->b_scratchpad= s->me.scratchpad;
463 s->obmc_scratchpad= s->me.scratchpad + 16;
464 if (s->encoding) {
465 CHECKED_ALLOCZ(s->me.map , ME_MAP_SIZE*sizeof(uint32_t))
466 CHECKED_ALLOCZ(s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t))
467 if(s->avctx->noise_reduction){
468 CHECKED_ALLOCZ(s->dct_error_sum, 2 * 64 * sizeof(int))
469 }
470 }
471 CHECKED_ALLOCZ(s->blocks, 64*12*2 * sizeof(DCTELEM))
472 s->block= s->blocks[0];
473
474 for(i=0;i<12;i++){
475 s->pblocks[i] = (short *)(&s->block[i]);
476 }
477 return 0;
478 fail:
479 return -1; //free() through MPV_common_end()
480 }
481
482 static void free_duplicate_context(MpegEncContext *s){
483 if(s==NULL) return;
484
485 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
486 av_freep(&s->me.scratchpad);
487 s->rd_scratchpad=
488 s->b_scratchpad=
489 s->obmc_scratchpad= NULL;
490
491 av_freep(&s->dct_error_sum);
492 av_freep(&s->me.map);
493 av_freep(&s->me.score_map);
494 av_freep(&s->blocks);
495 s->block= NULL;
496 }
497
498 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
499 #define COPY(a) bak->a= src->a
500 COPY(allocated_edge_emu_buffer);
501 COPY(edge_emu_buffer);
502 COPY(me.scratchpad);
503 COPY(rd_scratchpad);
504 COPY(b_scratchpad);
505 COPY(obmc_scratchpad);
506 COPY(me.map);
507 COPY(me.score_map);
508 COPY(blocks);
509 COPY(block);
510 COPY(start_mb_y);
511 COPY(end_mb_y);
512 COPY(me.map_generation);
513 COPY(pb);
514 COPY(dct_error_sum);
515 COPY(dct_count[0]);
516 COPY(dct_count[1]);
517 #undef COPY
518 }
519
520 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
521 MpegEncContext bak;
522 int i;
523 //FIXME copy only needed parts
524 //START_TIMER
525 backup_duplicate_context(&bak, dst);
526 memcpy(dst, src, sizeof(MpegEncContext));
527 backup_duplicate_context(dst, &bak);
528 for(i=0;i<12;i++){
529 dst->pblocks[i] = (short *)(&dst->block[i]);
530 }
531 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
532 }
533
534 static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src){
535 #define COPY(a) dst->a= src->a
536 COPY(pict_type);
537 COPY(current_picture);
538 COPY(f_code);
539 COPY(b_code);
540 COPY(qscale);
541 COPY(lambda);
542 COPY(lambda2);
543 COPY(picture_in_gop_number);
544 COPY(gop_picture_number);
545 COPY(frame_pred_frame_dct); //FIXME don't set in encode_header
546 COPY(progressive_frame); //FIXME don't set in encode_header
547 COPY(partitioned_frame); //FIXME don't set in encode_header
548 #undef COPY
549 }
550
551 /**
552 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
553 * the changed fields will not depend upon the prior state of the MpegEncContext.
554 */
555 static void MPV_common_defaults(MpegEncContext *s){
556 s->y_dc_scale_table=
557 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
558 s->chroma_qscale_table= ff_default_chroma_qscale_table;
559 s->progressive_frame= 1;
560 s->progressive_sequence= 1;
561 s->picture_structure= PICT_FRAME;
562
563 s->coded_picture_number = 0;
564 s->picture_number = 0;
565 s->input_picture_number = 0;
566
567 s->picture_in_gop_number = 0;
568
569 s->f_code = 1;
570 s->b_code = 1;
571 }
572
573 /**
574 * sets the given MpegEncContext to defaults for decoding.
575 * the changed fields will not depend upon the prior state of the MpegEncContext.
576 */
577 void MPV_decode_defaults(MpegEncContext *s){
578 MPV_common_defaults(s);
579 }
580
581 /**
582 * sets the given MpegEncContext to defaults for encoding.
583 * the changed fields will not depend upon the prior state of the MpegEncContext.
584 */
585
586 #ifdef CONFIG_ENCODERS
587 static void MPV_encode_defaults(MpegEncContext *s){
588 static int done=0;
589
590 MPV_common_defaults(s);
591
592 if(!done){
593 int i;
594 done=1;
595
596 default_mv_penalty= av_mallocz( sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1) );
597 memset(default_fcode_tab , 0, sizeof(uint8_t)*(2*MAX_MV+1));
598
599 for(i=-16; i<16; i++){
600 default_fcode_tab[i + MAX_MV]= 1;
601 }
602 }
603 s->me.mv_penalty= default_mv_penalty;
604 s->fcode_tab= default_fcode_tab;
605 }
606 #endif //CONFIG_ENCODERS
607
608 /**
609 * init common structure for both encoder and decoder.
610 * this assumes that some variables like width/height are already set
611 */
612 int MPV_common_init(MpegEncContext *s)
613 {
614 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
615
616 if(s->avctx->thread_count > MAX_THREADS || (16*s->avctx->thread_count > s->height && s->height)){
617 av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
618 return -1;
619 }
620
621 if((s->width || s->height) && avcodec_check_dimensions(s->avctx, s->width, s->height))
622 return -1;
623
624 dsputil_init(&s->dsp, s->avctx);
625 DCT_common_init(s);
626
627 s->flags= s->avctx->flags;
628 s->flags2= s->avctx->flags2;
629
630 s->mb_width = (s->width + 15) / 16;
631 s->mb_height = (s->height + 15) / 16;
632 s->mb_stride = s->mb_width + 1;
633 s->b8_stride = s->mb_width*2 + 1;
634 s->b4_stride = s->mb_width*4 + 1;
635 mb_array_size= s->mb_height * s->mb_stride;
636 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
637
638 /* set chroma shifts */
639 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
640 &(s->chroma_y_shift) );
641
642 /* set default edge pos, will be overriden in decode_header if needed */
643 s->h_edge_pos= s->mb_width*16;
644 s->v_edge_pos= s->mb_height*16;
645
646 s->mb_num = s->mb_width * s->mb_height;
647
648 s->block_wrap[0]=
649 s->block_wrap[1]=
650 s->block_wrap[2]=
651 s->block_wrap[3]= s->b8_stride;
652 s->block_wrap[4]=
653 s->block_wrap[5]= s->mb_stride;
654
655 y_size = s->b8_stride * (2 * s->mb_height + 1);
656 c_size = s->mb_stride * (s->mb_height + 1);
657 yc_size = y_size + 2 * c_size;
658
659 /* convert fourcc to upper case */
660 s->avctx->codec_tag= toupper( s->avctx->codec_tag &0xFF)
661 + (toupper((s->avctx->codec_tag>>8 )&0xFF)<<8 )
662 + (toupper((s->avctx->codec_tag>>16)&0xFF)<<16)
663 + (toupper((s->avctx->codec_tag>>24)&0xFF)<<24);
664
665 s->avctx->stream_codec_tag= toupper( s->avctx->stream_codec_tag &0xFF)
666 + (toupper((s->avctx->stream_codec_tag>>8 )&0xFF)<<8 )
667 + (toupper((s->avctx->stream_codec_tag>>16)&0xFF)<<16)
668 + (toupper((s->avctx->stream_codec_tag>>24)&0xFF)<<24);
669
670 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
671
672 CHECKED_ALLOCZ(s->mb_index2xy, (s->mb_num+1)*sizeof(int)) //error ressilience code looks cleaner with this
673 for(y=0; y<s->mb_height; y++){
674 for(x=0; x<s->mb_width; x++){
675 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
676 }
677 }
678 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
679
680 if (s->encoding) {
681 /* Allocate MV tables */
682 CHECKED_ALLOCZ(s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
683 CHECKED_ALLOCZ(s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
684 CHECKED_ALLOCZ(s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
685 CHECKED_ALLOCZ(s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
686 CHECKED_ALLOCZ(s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
687 CHECKED_ALLOCZ(s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
688 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
689 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
690 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
691 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
692 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
693 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
694
695 if(s->msmpeg4_version){
696 CHECKED_ALLOCZ(s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int));
697 }
698 CHECKED_ALLOCZ(s->avctx->stats_out, 256);
699
700 /* Allocate MB type table */
701 CHECKED_ALLOCZ(s->mb_type , mb_array_size * sizeof(uint16_t)) //needed for encoding
702
703 CHECKED_ALLOCZ(s->lambda_table, mb_array_size * sizeof(int))
704
705 CHECKED_ALLOCZ(s->q_intra_matrix, 64*32 * sizeof(int))
706 CHECKED_ALLOCZ(s->q_inter_matrix, 64*32 * sizeof(int))
707 CHECKED_ALLOCZ(s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t))
708 CHECKED_ALLOCZ(s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t))
709 CHECKED_ALLOCZ(s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*))
710 CHECKED_ALLOCZ(s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*))
711
712 if(s->avctx->noise_reduction){
713 CHECKED_ALLOCZ(s->dct_offset, 2 * 64 * sizeof(uint16_t))
714 }
715 }
716 CHECKED_ALLOCZ(s->picture, MAX_PICTURE_COUNT * sizeof(Picture))
717
718 CHECKED_ALLOCZ(s->error_status_table, mb_array_size*sizeof(uint8_t))
719
720 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
721 /* interlaced direct mode decoding tables */
722 for(i=0; i<2; i++){
723 int j, k;
724 for(j=0; j<2; j++){
725 for(k=0; k<2; k++){
726 CHECKED_ALLOCZ(s->b_field_mv_table_base[i][j][k] , mv_table_size * 2 * sizeof(int16_t))
727 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
728 }
729 CHECKED_ALLOCZ(s->b_field_select_table[i][j] , mb_array_size * 2 * sizeof(uint8_t))
730 CHECKED_ALLOCZ(s->p_field_mv_table_base[i][j] , mv_table_size * 2 * sizeof(int16_t))
731 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
732 }
733 CHECKED_ALLOCZ(s->p_field_select_table[i] , mb_array_size * 2 * sizeof(uint8_t))
734 }
735 }
736 if (s->out_format == FMT_H263) {
737 /* ac values */
738 CHECKED_ALLOCZ(s->ac_val_base, yc_size * sizeof(int16_t) * 16);
739 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
740 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
741 s->ac_val[2] = s->ac_val[1] + c_size;
742
743 /* cbp values */
744 CHECKED_ALLOCZ(s->coded_block_base, y_size);
745 s->coded_block= s->coded_block_base + s->b8_stride + 1;
746
747 /* cbp, ac_pred, pred_dir */
748 CHECKED_ALLOCZ(s->cbp_table , mb_array_size * sizeof(uint8_t))
749 CHECKED_ALLOCZ(s->pred_dir_table, mb_array_size * sizeof(uint8_t))
750 }
751
752 if (s->h263_pred || s->h263_plus || !s->encoding) {
753 /* dc values */
754 //MN: we need these for error resilience of intra-frames
755 CHECKED_ALLOCZ(s->dc_val_base, yc_size * sizeof(int16_t));
756 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
757 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
758 s->dc_val[2] = s->dc_val[1] + c_size;
759 for(i=0;i<yc_size;i++)
760 s->dc_val_base[i] = 1024;
761 }
762
763 /* which mb is a intra block */
764 CHECKED_ALLOCZ(s->mbintra_table, mb_array_size);
765 memset(s->mbintra_table, 1, mb_array_size);
766
767 /* init macroblock skip table */
768 CHECKED_ALLOCZ(s->mbskip_table, mb_array_size+2);
769 //Note the +1 is for a quicker mpeg4 slice_end detection
770 CHECKED_ALLOCZ(s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
771
772 s->parse_context.state= -1;
773 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
774 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
775 s->visualization_buffer[1] = av_malloc((s->mb_width*8 + EDGE_WIDTH) * s->mb_height*8 + EDGE_WIDTH);
776 s->visualization_buffer[2] = av_malloc((s->mb_width*8 + EDGE_WIDTH) * s->mb_height*8 + EDGE_WIDTH);
777 }
778
779 s->context_initialized = 1;
780
781 s->thread_context[0]= s;
782 for(i=1; i<s->avctx->thread_count; i++){
783 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
784 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
785 }
786
787 for(i=0; i<s->avctx->thread_count; i++){
788 if(init_duplicate_context(s->thread_context[i], s) < 0)
789 goto fail;
790 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
791 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
792 }
793
794 return 0;
795 fail:
796 MPV_common_end(s);
797 return -1;
798 }
799
800 /* init common structure for both encoder and decoder */
801 void MPV_common_end(MpegEncContext *s)
802 {
803 int i, j, k;
804
805 for(i=0; i<s->avctx->thread_count; i++){
806 free_duplicate_context(s->thread_context[i]);
807 }
808 for(i=1; i<s->avctx->thread_count; i++){
809 av_freep(&s->thread_context[i]);
810 }
811
812 av_freep(&s->parse_context.buffer);
813 s->parse_context.buffer_size=0;
814
815 av_freep(&s->mb_type);
816 av_freep(&s->p_mv_table_base);
817 av_freep(&s->b_forw_mv_table_base);
818 av_freep(&s->b_back_mv_table_base);
819 av_freep(&s->b_bidir_forw_mv_table_base);
820 av_freep(&s->b_bidir_back_mv_table_base);
821 av_freep(&s->b_direct_mv_table_base);
822 s->p_mv_table= NULL;
823 s->b_forw_mv_table= NULL;
824 s->b_back_mv_table= NULL;
825 s->b_bidir_forw_mv_table= NULL;
826 s->b_bidir_back_mv_table= NULL;
827 s->b_direct_mv_table= NULL;
828 for(i=0; i<2; i++){
829 for(j=0; j<2; j++){
830 for(k=0; k<2; k++){
831 av_freep(&s->b_field_mv_table_base[i][j][k]);
832 s->b_field_mv_table[i][j][k]=NULL;
833 }
834 av_freep(&s->b_field_select_table[i][j]);
835 av_freep(&s->p_field_mv_table_base[i][j]);
836 s->p_field_mv_table[i][j]=NULL;
837 }
838 av_freep(&s->p_field_select_table[i]);
839 }
840
841 av_freep(&s->dc_val_base);
842 av_freep(&s->ac_val_base);
843 av_freep(&s->coded_block_base);
844 av_freep(&s->mbintra_table);
845 av_freep(&s->cbp_table);
846 av_freep(&s->pred_dir_table);
847
848 av_freep(&s->mbskip_table);
849 av_freep(&s->prev_pict_types);
850 av_freep(&s->bitstream_buffer);
851 s->allocated_bitstream_buffer_size=0;
852
853 av_freep(&s->avctx->stats_out);
854 av_freep(&s->ac_stats);
855 av_freep(&s->error_status_table);
856 av_freep(&s->mb_index2xy);
857 av_freep(&s->lambda_table);
858 av_freep(&s->q_intra_matrix);
859 av_freep(&s->q_inter_matrix);
860 av_freep(&s->q_intra_matrix16);
861 av_freep(&s->q_inter_matrix16);
862 av_freep(&s->input_picture);
863 av_freep(&s->reordered_input_picture);
864 av_freep(&s->dct_offset);
865
866 if(s->picture){
867 for(i=0; i<MAX_PICTURE_COUNT; i++){
868 free_picture(s, &s->picture[i]);
869 }
870 }
871 av_freep(&s->picture);
872 s->context_initialized = 0;
873 s->last_picture_ptr=
874 s->next_picture_ptr=
875 s->current_picture_ptr= NULL;
876 s->linesize= s->uvlinesize= 0;
877
878 for(i=0; i<3; i++)
879 av_freep(&s->visualization_buffer[i]);
880
881 avcodec_default_free_buffers(s->avctx);
882 }
883
884 #ifdef CONFIG_ENCODERS
885
886 /* init video encoder */
887 int MPV_encode_init(AVCodecContext *avctx)
888 {
889 MpegEncContext *s = avctx->priv_data;
890 int i;
891 int chroma_h_shift, chroma_v_shift;
892
893 MPV_encode_defaults(s);
894
895 if(avctx->pix_fmt != PIX_FMT_YUVJ420P && avctx->pix_fmt != PIX_FMT_YUV420P){
896 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
897 return -1;
898 }
899
900 if(avctx->codec_id == CODEC_ID_MJPEG || avctx->codec_id == CODEC_ID_LJPEG){
901 if(avctx->strict_std_compliance>FF_COMPLIANCE_INOFFICIAL && avctx->pix_fmt != PIX_FMT_YUVJ420P){
902 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
903 return -1;
904 }
905 }else{
906 if(avctx->strict_std_compliance>FF_COMPLIANCE_INOFFICIAL && avctx->pix_fmt != PIX_FMT_YUV420P){
907 av_log(avctx, AV_LOG_ERROR, "colorspace not supported\n");
908 return -1;
909 }
910 }
911
912 s->bit_rate = avctx->bit_rate;
913 s->width = avctx->width;
914 s->height = avctx->height;
915 if(avctx->gop_size > 600){
916 av_log(avctx, AV_LOG_ERROR, "Warning keyframe interval too large! reducing it ...\n");
917 avctx->gop_size=600;
918 }
919 s->gop_size = avctx->gop_size;
920 s->avctx = avctx;
921 s->flags= avctx->flags;
922 s->flags2= avctx->flags2;
923 s->max_b_frames= avctx->max_b_frames;
924 s->codec_id= avctx->codec->id;
925 s->luma_elim_threshold = avctx->luma_elim_threshold;
926 s->chroma_elim_threshold= avctx->chroma_elim_threshold;
927 s->strict_std_compliance= avctx->strict_std_compliance;
928 s->data_partitioning= avctx->flags & CODEC_FLAG_PART;
929 s->quarter_sample= (avctx->flags & CODEC_FLAG_QPEL)!=0;
930 s->mpeg_quant= avctx->mpeg_quant;
931 s->rtp_mode= !!avctx->rtp_payload_size;
932 s->intra_dc_precision= avctx->intra_dc_precision;
933 s->user_specified_pts = AV_NOPTS_VALUE;
934
935 if (s->gop_size <= 1) {
936 s->intra_only = 1;
937 s->gop_size = 12;
938 } else {
939 s->intra_only = 0;
940 }
941
942 s->me_method = avctx->me_method;
943
944 /* Fixed QSCALE */
945 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
946
947 s->adaptive_quant= ( s->avctx->lumi_masking
948 || s->avctx->dark_masking
949 || s->avctx->temporal_cplx_masking
950 || s->avctx->spatial_cplx_masking
951 || s->avctx->p_masking
952 || s->avctx->border_masking
953 || (s->flags&CODEC_FLAG_QP_RD))
954 && !s->fixed_qscale;
955
956 s->obmc= !!(s->flags & CODEC_FLAG_OBMC);
957 s->loop_filter= !!(s->flags & CODEC_FLAG_LOOP_FILTER);
958 s->alternate_scan= !!(s->flags & CODEC_FLAG_ALT_SCAN);
959
960 if(avctx->rc_max_rate && !avctx->rc_buffer_size){
961 av_log(avctx, AV_LOG_ERROR, "a vbv buffer size is needed, for encoding with a maximum bitrate\n");
962 return -1;
963 }
964
965 if(avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate){
966 av_log(avctx, AV_LOG_INFO, "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
967 }
968
969 if(avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate){
970 av_log(avctx, AV_LOG_INFO, "bitrate below min bitrate\n");
971 return -1;
972 }
973
974 if(avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate){
975 av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
976 return -1;
977 }
978
979 if( s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate
980 && (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO)
981 && 90000LL * (avctx->rc_buffer_size-1) > s->avctx->rc_max_rate*0xFFFFLL){
982
983 av_log(avctx, AV_LOG_INFO, "Warning vbv_delay will be set to 0xFFFF (=VBR) as the specified vbv buffer is too large for the given bitrate!\n");
984 }
985
986 if((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4
987 && s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P && s->codec_id != CODEC_ID_FLV1){
988 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
989 return -1;
990 }
991
992 if(s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE){
993 av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with simple mb decision\n");
994 return -1;
995 }
996
997 if(s->obmc && s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P){
998 av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with H263(+)\n");
999 return -1;
1000 }
1001
1002 if(s->quarter_sample && s->codec_id != CODEC_ID_MPEG4){
1003 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
1004 return -1;
1005 }
1006
1007 if(s->data_partitioning && s->codec_id != CODEC_ID_MPEG4){
1008 av_log(avctx, AV_LOG_ERROR, "data partitioning not supported by codec\n");
1009 return -1;
1010 }
1011
1012 if(s->max_b_frames && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO){
1013 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
1014 return -1;
1015 }
1016
1017 if((s->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME|CODEC_FLAG_ALT_SCAN))
1018 && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG2VIDEO){
1019 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
1020 return -1;
1021 }
1022
1023 if(s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4){ //FIXME mpeg2 uses that too
1024 av_log(avctx, AV_LOG_ERROR, "mpeg2 style quantization not supported by codec\n");
1025 return -1;
1026 }
1027
1028 if((s->flags & CODEC_FLAG_CBP_RD) && !(s->flags & CODEC_FLAG_TRELLIS_QUANT)){
1029 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
1030 return -1;
1031 }
1032
1033 if((s->flags & CODEC_FLAG_QP_RD) && s->avctx->mb_decision != FF_MB_DECISION_RD){
1034 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
1035 return -1;
1036 }
1037
1038 if(s->avctx->scenechange_threshold < 1000000000 && (s->flags & CODEC_FLAG_CLOSED_GOP)){
1039 av_log(avctx, AV_LOG_ERROR, "closed gop with scene change detection arent supported yet\n");
1040 return -1;
1041 }
1042
1043 if(s->avctx->thread_count > 1 && s->codec_id != CODEC_ID_MPEG4
1044 && s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO
1045 && (s->codec_id != CODEC_ID_H263P || !(s->flags & CODEC_FLAG_H263P_SLICE_STRUCT))){
1046 av_log(avctx, AV_LOG_ERROR, "multi threaded encoding not supported by codec\n");
1047 return -1;
1048 }
1049
1050 if(s->avctx->thread_count > 1)
1051 s->rtp_mode= 1;
1052
1053 if(!avctx->time_base.den || !avctx->time_base.num){
1054 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
1055 return -1;
1056 }
1057
1058 i= (INT_MAX/2+128)>>8;
1059 if(avctx->me_threshold >= i){
1060 av_log(avctx, AV_LOG_ERROR, "me_threshold too large, max is %d\n", i - 1);
1061 return -1;
1062 }
1063 if(avctx->mb_threshold >= i){
1064 av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n", i - 1);
1065 return -1;
1066 }
1067
1068 if(avctx->b_frame_strategy && (avctx->flags&CODEC_FLAG_PASS2)){
1069 av_log(avctx, AV_LOG_ERROR, "b_frame_strategy must be 0 on the second pass");
1070 return -1;
1071 }
1072
1073 i= ff_gcd(avctx->time_base.den, avctx->time_base.num);
1074 if(i > 1){
1075 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
1076 avctx->time_base.den /= i;
1077 avctx->time_base.num /= i;
1078 // return -1;
1079 }
1080
1081 if(s->codec_id==CODEC_ID_MJPEG){
1082 s->intra_quant_bias= 1<<(QUANT_BIAS_SHIFT-1); //(a + x/2)/x
1083 s->inter_quant_bias= 0;
1084 }else if(s->mpeg_quant || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO){
1085 s->intra_quant_bias= 3<<(QUANT_BIAS_SHIFT-3); //(a + x*3/8)/x
1086 s->inter_quant_bias= 0;
1087 }else{
1088 s->intra_quant_bias=0;
1089 s->inter_quant_bias=-(1<<(QUANT_BIAS_SHIFT-2)); //(a - x/4)/x
1090 }
1091
1092 if(avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
1093 s->intra_quant_bias= avctx->intra_quant_bias;
1094 if(avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
1095 s->inter_quant_bias= avctx->inter_quant_bias;
1096
1097 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift);
1098
1099 if(avctx->codec_id == CODEC_ID_MPEG4 && s->avctx->time_base.den > (1<<16)-1){
1100 av_log(avctx, AV_LOG_ERROR, "timebase not supported by mpeg 4 standard\n");
1101 return -1;
1102 }
1103 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
1104
1105 switch(avctx->codec->id) {
1106 case CODEC_ID_MPEG1VIDEO:
1107 s->out_format = FMT_MPEG1;
1108 s->low_delay= 0; //s->max_b_frames ? 0 : 1;
1109 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
1110 break;
1111 case CODEC_ID_MPEG2VIDEO:
1112 s->out_format = FMT_MPEG1;
1113 s->low_delay= 0; //s->max_b_frames ? 0 : 1;
1114 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
1115 s->rtp_mode= 1;
1116 break;
1117 case CODEC_ID_LJPEG:
1118 case CODEC_ID_MJPEG:
1119 s->out_format = FMT_MJPEG;
1120 s->intra_only = 1; /* force intra only for jpeg */
1121 s->mjpeg_write_tables = 1; /* write all tables */
1122 s->mjpeg_data_only_frames = 0; /* write all the needed headers */
1123 s->mjpeg_vsample[0] = 1<<chroma_v_shift;
1124 s->mjpeg_vsample[1] = 1;
1125 s->mjpeg_vsample[2] = 1;
1126 s->mjpeg_hsample[0] = 1<<chroma_h_shift;
1127 s->mjpeg_hsample[1] = 1;
1128 s->mjpeg_hsample[2] = 1;
1129 if (mjpeg_init(s) < 0)
1130 return -1;
1131 avctx->delay=0;
1132 s->low_delay=1;
1133 break;
1134 case CODEC_ID_H261:
1135 s->out_format = FMT_H261;
1136 avctx->delay=0;
1137 s->low_delay=1;
1138 break;
1139 case CODEC_ID_H263:
1140 if (h263_get_picture_format(s->width, s->height) == 7) {
1141 av_log(avctx, AV_LOG_INFO, "Input picture size isn't suitable for h263 codec! try h263+\n");
1142 return -1;
1143 }
1144 s->out_format = FMT_H263;
1145 s->obmc= (avctx->flags & CODEC_FLAG_OBMC) ? 1:0;
1146 avctx->delay=0;
1147 s->low_delay=1;
1148 break;
1149 case CODEC_ID_H263P:
1150 s->out_format = FMT_H263;
1151 s->h263_plus = 1;
1152 /* Fx */
1153 s->umvplus = (avctx->flags & CODEC_FLAG_H263P_UMV) ? 1:0;
1154 s->h263_aic= (avctx->flags & CODEC_FLAG_H263P_AIC) ? 1:0;
1155 s->modified_quant= s->h263_aic;
1156 s->alt_inter_vlc= (avctx->flags & CODEC_FLAG_H263P_AIV) ? 1:0;
1157 s->obmc= (avctx->flags & CODEC_FLAG_OBMC) ? 1:0;
1158 s->loop_filter= (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1:0;
1159 s->unrestricted_mv= s->obmc || s->loop_filter || s->umvplus;
1160 s->h263_slice_structured= (s->flags & CODEC_FLAG_H263P_SLICE_STRUCT) ? 1:0;
1161
1162 /* /Fx */
1163 /* These are just to be sure */
1164 avctx->delay=0;
1165 s->low_delay=1;
1166 break;
1167 case CODEC_ID_FLV1:
1168 s->out_format = FMT_H263;
1169 s->h263_flv = 2; /* format = 1; 11-bit codes */
1170 s->unrestricted_mv = 1;
1171 s->rtp_mode=0; /* don't allow GOB */
1172 avctx->delay=0;
1173 s->low_delay=1;
1174 break;
1175 case CODEC_ID_RV10:
1176 s->out_format = FMT_H263;
1177 avctx->delay=0;
1178 s->low_delay=1;
1179 break;
1180 case CODEC_ID_RV20:
1181 s->out_format = FMT_H263;
1182 avctx->delay=0;
1183 s->low_delay=1;
1184 s->modified_quant=1;
1185 s->h263_aic=1;
1186 s->h263_plus=1;
1187 s->loop_filter=1;
1188 s->unrestricted_mv= s->obmc || s->loop_filter || s->umvplus;
1189 break;
1190 case CODEC_ID_MPEG4:
1191 s->out_format = FMT_H263;
1192 s->h263_pred = 1;
1193 s->unrestricted_mv = 1;
1194 s->low_delay= s->max_b_frames ? 0 : 1;
1195 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
1196 break;
1197 case CODEC_ID_MSMPEG4V1:
1198 s->out_format = FMT_H263;
1199 s->h263_msmpeg4 = 1;
1200 s->h263_pred = 1;
1201 s->unrestricted_mv = 1;
1202 s->msmpeg4_version= 1;
1203 avctx->delay=0;
1204 s->low_delay=1;
1205 break;
1206 case CODEC_ID_MSMPEG4V2:
1207 s->out_format = FMT_H263;
1208 s->h263_msmpeg4 = 1;
1209 s->h263_pred = 1;
1210 s->unrestricted_mv = 1;
1211 s->msmpeg4_version= 2;
1212 avctx->delay=0;
1213 s->low_delay=1;
1214 break;
1215 case CODEC_ID_MSMPEG4V3:
1216 s->out_format = FMT_H263;
1217 s->h263_msmpeg4 = 1;
1218 s->h263_pred = 1;
1219 s->unrestricted_mv = 1;
1220 s->msmpeg4_version= 3;
1221 s->flipflop_rounding=1;
1222 avctx->delay=0;
1223 s->low_delay=1;
1224 break;
1225 case CODEC_ID_WMV1:
1226 s->out_format = FMT_H263;
1227 s->h263_msmpeg4 = 1;
1228 s->h263_pred = 1;
1229 s->unrestricted_mv = 1;
1230 s->msmpeg4_version= 4;
1231 s->flipflop_rounding=1;
1232 avctx->delay=0;
1233 s->low_delay=1;
1234 break;
1235 case CODEC_ID_WMV2:
1236 s->out_format = FMT_H263;
1237 s->h263_msmpeg4 = 1;
1238 s->h263_pred = 1;
1239 s->unrestricted_mv = 1;
1240 s->msmpeg4_version= 5;
1241 s->flipflop_rounding=1;
1242 avctx->delay=0;
1243 s->low_delay=1;
1244 break;
1245 default:
1246 return -1;
1247 }
1248
1249 avctx->has_b_frames= !s->low_delay;
1250
1251 s->encoding = 1;
1252
1253 /* init */
1254 if (MPV_common_init(s) < 0)
1255 return -1;
1256
1257 if(s->modified_quant)
1258 s->chroma_qscale_table= ff_h263_chroma_qscale_table;
1259 s->progressive_frame=
1260 s->progressive_sequence= !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME));
1261 s->quant_precision=5;
1262
1263 ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
1264 ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
1265
1266 #ifdef CONFIG_H261_ENCODER
1267 if (s->out_format == FMT_H261)
1268 ff_h261_encode_init(s);
1269 #endif
1270 if (s->out_format == FMT_H263)
1271 h263_encode_init(s);
1272 if(s->msmpeg4_version)
1273 ff_msmpeg4_encode_init(s);
1274 if (s->out_format == FMT_MPEG1)
1275 ff_mpeg1_encode_init(s);
1276
1277 /* init q matrix */
1278 for(i=0;i<64;i++) {
1279 int j= s->dsp.idct_permutation[i];
1280 if(s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){
1281 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
1282 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
1283 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1284 s->intra_matrix[j] =
1285 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1286 }else
1287 { /* mpeg1/2 */
1288 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1289 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1290 }
1291 if(s->avctx->intra_matrix)
1292 s->intra_matrix[j] = s->avctx->intra_matrix[i];
1293 if(s->avctx->inter_matrix)
1294 s->inter_matrix[j] = s->avctx->inter_matrix[i];
1295 }
1296
1297 /* precompute matrix */
1298 /* for mjpeg, we do include qscale in the matrix */
1299 if (s->out_format != FMT_MJPEG) {
1300 convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
1301 s->intra_matrix, s->intra_quant_bias, avctx->qmin, 31, 1);
1302 convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
1303 s->inter_matrix, s->inter_quant_bias, avctx->qmin, 31, 0);
1304 }
1305
1306 if(ff_rate_control_init(s) < 0)
1307 return -1;
1308
1309 return 0;
1310 }
1311
1312 int MPV_encode_end(AVCodecContext *avctx)
1313 {
1314 MpegEncContext *s = avctx->priv_data;
1315
1316 #ifdef STATS
1317 print_stats();
1318 #endif
1319
1320 ff_rate_control_uninit(s);
1321
1322 MPV_common_end(s);
1323 if (s->out_format == FMT_MJPEG)
1324 mjpeg_close(s);
1325
1326 av_freep(&avctx->extradata);
1327
1328 return 0;
1329 }
1330
1331 #endif //CONFIG_ENCODERS
1332
1333 void init_rl(RLTable *rl, int use_static)
1334 {
1335 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
1336 uint8_t index_run[MAX_RUN+1];
1337 int last, run, level, start, end, i;
1338
1339 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1340 if(use_static && rl->max_level[0])
1341 return;
1342
1343 /* compute max_level[], max_run[] and index_run[] */
1344 for(last=0;last<2;last++) {
1345 if (last == 0) {
1346 start = 0;
1347 end = rl->last;
1348 } else {
1349 start = rl->last;
1350 end = rl->n;
1351 }
1352
1353 memset(max_level, 0, MAX_RUN + 1);
1354 memset(max_run, 0, MAX_LEVEL + 1);
1355 memset(index_run, rl->n, MAX_RUN + 1);
1356 for(i=start;i<end;i++) {
1357 run = rl->table_run[i];
1358 level = rl->table_level[i];
1359 if (index_run[run] == rl->n)
1360 index_run[run] = i;
1361 if (level > max_level[run])
1362 max_level[run] = level;
1363 if (run > max_run[level])
1364 max_run[level] = run;
1365 }
1366 if(use_static)
1367 rl->max_level[last] = av_mallocz_static(MAX_RUN + 1);
1368 else
1369 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1370 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1371 if(use_static)
1372 rl->max_run[last] = av_mallocz_static(MAX_LEVEL + 1);
1373 else
1374 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1375 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1376 if(use_static)
1377 rl->index_run[last] = av_mallocz_static(MAX_RUN + 1);
1378 else
1379 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1380 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1381 }
1382 }
1383
1384 /* draw the edges of width 'w' of an image of size width, height */
1385 //FIXME check that this is ok for mpeg4 interlaced
1386 static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w)
1387 {
1388 uint8_t *ptr, *last_line;
1389 int i;
1390
1391 last_line = buf + (height - 1) * wrap;
1392 for(i=0;i<w;i++) {
1393 /* top and bottom */
1394 memcpy(buf - (i + 1) * wrap, buf, width);
1395 memcpy(last_line + (i + 1) * wrap, last_line, width);
1396 }
1397 /* left and right */
1398 ptr = buf;
1399 for(i=0;i<height;i++) {
1400 memset(ptr - w, ptr[0], w);
1401 memset(ptr + width, ptr[width-1], w);
1402 ptr += wrap;
1403 }
1404 /* corners */
1405 for(i=0;i<w;i++) {
1406 memset(buf - (i + 1) * wrap - w, buf[0], w); /* top left */
1407 memset(buf - (i + 1) * wrap + width, buf[width-1], w); /* top right */
1408 memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */
1409 memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */
1410 }
1411 }
1412
1413 int ff_find_unused_picture(MpegEncContext *s, int shared){
1414 int i;
1415
1416 if(shared){
1417 for(i=0; i<MAX_PICTURE_COUNT; i++){
1418 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
1419 }
1420 }else{
1421 for(i=0; i<MAX_PICTURE_COUNT; i++){
1422 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
1423 }
1424 for(i=0; i<MAX_PICTURE_COUNT; i++){
1425 if(s->picture[i].data[0]==NULL) return i;
1426 }
1427 }
1428
1429 assert(0);
1430 return -1;
1431 }
1432
1433 static void update_noise_reduction(MpegEncContext *s){
1434 int intra, i;
1435
1436 for(intra=0; intra<2; intra++){
1437 if(s->dct_count[intra] > (1<<16)){
1438 for(i=0; i<64; i++){
1439 s->dct_error_sum[intra][i] >>=1;
1440 }
1441 s->dct_count[intra] >>= 1;
1442 }
1443
1444 for(i=0; i<64; i++){
1445 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
1446 }
1447 }
1448 }
1449
1450 /**
1451 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1452 */
1453 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1454 {
1455 int i;
1456 AVFrame *pic;
1457 s->mb_skipped = 0;
1458
1459 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
1460
1461 /* mark&release old frames */
1462 if (s->pict_type != B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
1463 avctx->release_buffer(avctx, (AVFrame*)s->last_picture_ptr);
1464
1465 /* release forgotten pictures */
1466 /* if(mpeg124/h263) */
1467 if(!s->encoding){
1468 for(i=0; i<MAX_PICTURE_COUNT; i++){
1469 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
1470 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
1471 avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
1472 }
1473 }
1474 }
1475 }
1476 alloc:
1477 if(!s->encoding){
1478 /* release non reference frames */
1479 for(i=0; i<MAX_PICTURE_COUNT; i++){
1480 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1481 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
1482 }
1483 }
1484
1485 if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
1486 pic= (AVFrame*)s->current_picture_ptr; //we allready have a unused image (maybe it was set before reading the header)
1487 else{
1488 i= ff_find_unused_picture(s, 0);
1489 pic= (AVFrame*)&s->picture[i];
1490 }
1491
1492 pic->reference= (s->pict_type != B_TYPE || s->codec_id == CODEC_ID_H264)
1493 && !s->dropable ? 3 : 0;
1494
1495 pic->coded_picture_number= s->coded_picture_number++;
1496
1497 if( alloc_picture(s, (Picture*)pic, 0) < 0)
1498 return -1;
1499
1500 s->current_picture_ptr= (Picture*)pic;
1501 s->current_picture_ptr->top_field_first= s->top_field_first; //FIXME use only the vars from current_pic
1502 s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
1503 }
1504
1505 s->current_picture_ptr->pict_type= s->pict_type;
1506 // if(s->flags && CODEC_FLAG_QSCALE)
1507 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1508 s->current_picture_ptr->key_frame= s->pict_type == I_TYPE;
1509
1510 copy_picture(&s->current_picture, s->current_picture_ptr);
1511
1512 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
1513 if (s->pict_type != B_TYPE) {
1514 s->last_picture_ptr= s->next_picture_ptr;
1515 if(!s->dropable)
1516 s->next_picture_ptr= s->current_picture_ptr;
1517 }
1518 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1519 s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL,
1520 s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL,
1521 s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
1522 s->pict_type, s->dropable);*/
1523
1524 if(s->last_picture_ptr) copy_picture(&s->last_picture, s->last_picture_ptr);
1525 if(s->next_picture_ptr) copy_picture(&s->next_picture, s->next_picture_ptr);
1526
1527 if(s->pict_type != I_TYPE && (s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL)){
1528 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
1529 assert(s->pict_type != B_TYPE); //these should have been dropped if we don't have a reference
1530 goto alloc;
1531 }
1532
1533 assert(s->pict_type == I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
1534
1535 if(s->picture_structure!=PICT_FRAME){
1536 int i;
1537 for(i=0; i<4; i++){
1538 if(s->picture_structure == PICT_BOTTOM_FIELD){
1539 s->current_picture.data[i] += s->current_picture.linesize[i];
1540 }
1541 s->current_picture.linesize[i] *= 2;
1542 s->last_picture.linesize[i] *=2;
1543 s->next_picture.linesize[i] *=2;
1544 }
1545 }
1546 }
1547
1548 s->hurry_up= s->avctx->hurry_up;
1549 s->error_resilience= avctx->error_resilience;
1550
1551 /* set dequantizer, we can't do it during init as it might change for mpeg4
1552 and we can't do it in the header decode as init isnt called for mpeg4 there yet */
1553 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1554 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1555 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1556 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1557 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1558 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1559 }else{
1560 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1561 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1562 }
1563
1564 if(s->dct_error_sum){
1565 assert(s->avctx->noise_reduction && s->encoding);
1566
1567 update_noise_reduction(s);
1568 }
1569
1570 #ifdef HAVE_XVMC
1571 if(s->avctx->xvmc_acceleration)
1572 return XVMC_field_start(s, avctx);
1573 #endif
1574 return 0;
1575 }
1576
1577 /* generic function for encode/decode called after a frame has been coded/decoded */
1578 void MPV_frame_end(MpegEncContext *s)
1579 {
1580 int i;
1581 /* draw edge for correct motion prediction if outside */
1582 #ifdef HAVE_XVMC
1583 //just to make sure that all data is rendered.
1584 if(s->avctx->xvmc_acceleration){
1585 XVMC_field_end(s);
1586 }else
1587 #endif
1588 if(s->unrestricted_mv && s->current_picture.reference && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1589 draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH );
1590 draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1591 draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1592 }
1593 emms_c();
1594
1595 s->last_pict_type = s->pict_type;
1596 if(s->pict_type!=B_TYPE){
1597 s->last_non_b_pict_type= s->pict_type;
1598 }
1599 #if 0
1600 /* copy back current_picture variables */
1601 for(i=0; i<MAX_PICTURE_COUNT; i++){
1602 if(s->picture[i].data[0] == s->current_picture.data[0]){
1603 s->picture[i]= s->current_picture;
1604 break;
1605 }
1606 }
1607 assert(i<MAX_PICTURE_COUNT);
1608 #endif
1609
1610 if(s->encoding){
1611 /* release non-reference frames */
1612 for(i=0; i<MAX_PICTURE_COUNT; i++){
1613 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1614 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
1615 }
1616 }
1617 }
1618 // clear copies, to avoid confusion
1619 #if 0
1620 memset(&s->last_picture, 0, sizeof(Picture));
1621 memset(&s->next_picture, 0, sizeof(Picture));
1622 memset(&s->current_picture, 0, sizeof(Picture));
1623 #endif
1624 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1625 }
1626
1627 /**
1628 * draws an line from (ex, ey) -> (sx, sy).
1629 * @param w width of the image
1630 * @param h height of the image
1631 * @param stride stride/linesize of the image
1632 * @param color color of the arrow
1633 */
1634 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1635 int t, x, y, fr, f;
1636
1637 sx= clip(sx, 0, w-1);
1638 sy= clip(sy, 0, h-1);
1639 ex= clip(ex, 0, w-1);
1640 ey= clip(ey, 0, h-1);
1641
1642 buf[sy*stride + sx]+= color;
1643
1644 if(ABS(ex - sx) > ABS(ey - sy)){
1645 if(sx > ex){
1646 t=sx; sx=ex; ex=t;
1647 t=sy; sy=ey; ey=t;
1648 }
1649 buf+= sx + sy*stride;
1650 ex-= sx;
1651 f= ((ey-sy)<<16)/ex;
1652 for(x= 0; x <= ex; x++){
1653 y = (x*f)>>16;
1654 fr= (x*f)&0xFFFF;
1655 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1656 buf[(y+1)*stride + x]+= (color* fr )>>16;
1657 }
1658 }else{
1659 if(sy > ey){
1660 t=sx; sx=ex; ex=t;
1661 t=sy; sy=ey; ey=t;
1662 }
1663 buf+= sx + sy*stride;
1664 ey-= sy;
1665 if(ey) f= ((ex-sx)<<16)/ey;
1666 else f= 0;
1667 for(y= 0; y <= ey; y++){
1668 x = (y*f)>>16;
1669 fr= (y*f)&0xFFFF;
1670 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;;
1671 buf[y*stride + x+1]+= (color* fr )>>16;;
1672 }
1673 }
1674 }
1675
1676 /**
1677 * draws an arrow from (ex, ey) -> (sx, sy).
1678 * @param w width of the image
1679 * @param h height of the image
1680 * @param stride stride/linesize of the image
1681 * @param color color of the arrow
1682 */
1683 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1684 int dx,dy;
1685
1686 sx= clip(sx, -100, w+100);
1687 sy= clip(sy, -100, h+100);
1688 ex= clip(ex, -100, w+100);
1689 ey= clip(ey, -100, h+100);
1690
1691 dx= ex - sx;
1692 dy= ey - sy;
1693
1694 if(dx*dx + dy*dy > 3*3){
1695 int rx= dx + dy;
1696 int ry= -dx + dy;
1697 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1698
1699 //FIXME subpixel accuracy
1700 rx= ROUNDED_DIV(rx*3<<4, length);
1701 ry= ROUNDED_DIV(ry*3<<4, length);
1702
1703 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1704 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1705 }
1706 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1707 }
1708
1709 /**
1710 * prints debuging info for the given picture.
1711 */
1712 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1713
1714 if(!pict || !pict->mb_type) return;
1715
1716 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1717 int x,y;
1718
1719 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1720 switch (pict->pict_type) {
1721 case FF_I_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
1722 case FF_P_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
1723 case FF_B_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
1724 case FF_S_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
1725 case FF_SI_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
1726 case FF_SP_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
1727 }
1728 for(y=0; y<s->mb_height; y++){
1729 for(x=0; x<s->mb_width; x++){
1730 if(s->avctx->debug&FF_DEBUG_SKIP){
1731 int count= s->mbskip_table[x + y*s->mb_stride];
1732 if(count>9) count=9;
1733 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1734 }
1735 if(s->avctx->debug&FF_DEBUG_QP){
1736 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1737 }
1738 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1739 int mb_type= pict->mb_type[x + y*s->mb_stride];
1740 //Type & MV direction
1741 if(IS_PCM(mb_type))
1742 av_log(s->avctx, AV_LOG_DEBUG, "P");
1743 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1744 av_log(s->avctx, AV_LOG_DEBUG, "A");
1745 else if(IS_INTRA4x4(mb_type))
1746 av_log(s->avctx, AV_LOG_DEBUG, "i");
1747 else if(IS_INTRA16x16(mb_type))
1748 av_log(s->avctx, AV_LOG_DEBUG, "I");
1749 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1750 av_log(s->avctx, AV_LOG_DEBUG, "d");
1751 else if(IS_DIRECT(mb_type))
1752 av_log(s->avctx, AV_LOG_DEBUG, "D");
1753 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1754 av_log(s->avctx, AV_LOG_DEBUG, "g");
1755 else if(IS_GMC(mb_type))
1756 av_log(s->avctx, AV_LOG_DEBUG, "G");
1757 else if(IS_SKIP(mb_type))
1758 av_log(s->avctx, AV_LOG_DEBUG, "S");
1759 else if(!USES_LIST(mb_type, 1))
1760 av_log(s->avctx, AV_LOG_DEBUG, ">");
1761 else if(!USES_LIST(mb_type, 0))
1762 av_log(s->avctx, AV_LOG_DEBUG, "<");
1763 else{
1764 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1765 av_log(s->avctx, AV_LOG_DEBUG, "X");
1766 }
1767
1768 //segmentation
1769 if(IS_8X8(mb_type))
1770 av_log(s->avctx, AV_LOG_DEBUG, "+");
1771 else if(IS_16X8(mb_type))
1772 av_log(s->avctx, AV_LOG_DEBUG, "-");
1773 else if(IS_8X16(mb_type))
1774 av_log(s->avctx, AV_LOG_DEBUG, "|");
1775 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1776 av_log(s->avctx, AV_LOG_DEBUG, " ");
1777 else
1778 av_log(s->avctx, AV_LOG_DEBUG, "?");
1779
1780
1781 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264)
1782 av_log(s->avctx, AV_LOG_DEBUG, "=");
1783 else
1784 av_log(s->avctx, AV_LOG_DEBUG, " ");
1785 }
1786 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1787 }
1788 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1789 }
1790 }
1791
1792 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1793 const int shift= 1 + s->quarter_sample;
1794 int mb_y;
1795 uint8_t *ptr;
1796 int i;
1797 int h_chroma_shift, v_chroma_shift;
1798 const int width = s->avctx->width;
1799 const int height= s->avctx->height;
1800 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1801 const int mv_stride= (s->mb_width << mv_sample_log2) + 1;
1802 s->low_delay=0; //needed to see the vectors without trashing the buffers
1803
1804 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1805 for(i=0; i<3; i++){
1806 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1807 pict->data[i]= s->visualization_buffer[i];
1808 }
1809 pict->type= FF_BUFFER_TYPE_COPY;
1810 ptr= pict->data[0];
1811
1812 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1813 int mb_x;
1814 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1815 const int mb_index= mb_x + mb_y*s->mb_stride;
1816 if((s->avctx->debug_mv) && pict->motion_val){
1817 int type;
1818 for(type=0; type<3; type++){
1819 int direction = 0;
1820 switch (type) {
1821 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=FF_P_TYPE))
1822 continue;
1823 direction = 0;
1824 break;
1825 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=FF_B_TYPE))
1826 continue;
1827 direction = 0;
1828 break;
1829 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=FF_B_TYPE))
1830 continue;
1831 direction = 1;
1832 break;
1833 }
1834 if(!USES_LIST(pict->mb_type[mb_index], direction))
1835 continue;
1836
1837 if(IS_8X8(pict->mb_type[mb_index])){
1838 int i;
1839 for(i=0; i<4; i++){
1840 int sx= mb_x*16 + 4 + 8*(i&1);
1841 int sy= mb_y*16 + 4 + 8*(i>>1);
1842 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1843 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1844 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1845 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1846 }
1847 }else if(IS_16X8(pict->mb_type[mb_index])){
1848 int i;
1849 for(i=0; i<2; i++){
1850 int sx=mb_x*16 + 8;
1851 int sy=mb_y*16 + 4 + 8*i;
1852 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1853 int mx=(pict->motion_val[direction][xy][0]>>shift);
1854 int my=(pict->motion_val[direction][xy][1]>>shift);
1855
1856 if(IS_INTERLACED(pict->mb_type[mb_index]))
1857 my*=2;
1858
1859 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1860 }
1861 }else if(IS_8X16(pict->mb_type[mb_index])){
1862 int i;
1863 for(i=0; i<2; i++){
1864 int sx=mb_x*16 + 4 + 8*i;
1865 int sy=mb_y*16 + 8;
1866 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1867 int mx=(pict->motion_val[direction][xy][0]>>shift);
1868 int my=(pict->motion_val[direction][xy][1]>>shift);
1869
1870 if(IS_INTERLACED(pict->mb_type[mb_index]))
1871 my*=2;
1872
1873 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1874 }
1875 }else{
1876 int sx= mb_x*16 + 8;
1877 int sy= mb_y*16 + 8;
1878 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1879 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1880 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1881 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1882 }
1883 }
1884 }
1885 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1886 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1887 int y;
1888 for(y=0; y<8; y++){
1889 *(uint64_t*)(pict->data[1] + 8*mb_x + (8*mb_y + y)*pict->linesize[1])= c;
1890 *(uint64_t*)(pict->data[2] + 8*mb_x + (8*mb_y + y)*pict->linesize[2])= c;
1891 }
1892 }
1893 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1894 int mb_type= pict->mb_type[mb_index];
1895 uint64_t u,v;
1896 int y;
1897 #define COLOR(theta, r)\
1898 u= (int)(128 + r*cos(theta*3.141592/180));\
1899 v= (int)(128 + r*sin(theta*3.141592/180));
1900
1901
1902 u=v=128;
1903 if(IS_PCM(mb_type)){
1904 COLOR(120,48)
1905 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1906 COLOR(30,48)
1907 }else if(IS_INTRA4x4(mb_type)){
1908 COLOR(90,48)
1909 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1910 // COLOR(120,48)
1911 }else if(IS_DIRECT(mb_type)){
1912 COLOR(150,48)
1913 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1914 COLOR(170,48)
1915 }else if(IS_GMC(mb_type)){
1916 COLOR(190,48)
1917 }else if(IS_SKIP(mb_type)){
1918 // COLOR(180,48)
1919 }else if(!USES_LIST(mb_type, 1)){
1920 COLOR(240,48)
1921 }else if(!USES_LIST(mb_type, 0)){
1922 COLOR(0,48)
1923 }else{
1924 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1925 COLOR(300,48)
1926 }
1927
1928 u*= 0x0101010101010101ULL;
1929 v*= 0x0101010101010101ULL;
1930 for(y=0; y<8; y++){
1931 *(uint64_t*)(pict->data[1] + 8*mb_x + (8*mb_y + y)*pict->linesize[1])= u;
1932 *(uint64_t*)(pict->data[2] + 8*mb_x + (8*mb_y + y)*pict->linesize[2])= v;
1933 }
1934
1935 //segmentation
1936 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1937 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1938 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1939 }
1940 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1941 for(y=0; y<16; y++)
1942 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1943 }
1944 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1945 int dm= 1 << (mv_sample_log2-2);
1946 for(i=0; i<4; i++){
1947 int sx= mb_x*16 + 8*(i&1);
1948 int sy= mb_y*16 + 8*(i>>1);
1949 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1950 //FIXME bidir
1951 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1952 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1953 for(y=0; y<8; y++)
1954 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1955 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1956 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1957 }
1958 }
1959
1960 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1961 // hmm
1962 }
1963 }
1964 s->mbskip_table[mb_index]=0;
1965 }
1966 }
1967 }
1968 }
1969
1970 #ifdef CONFIG_ENCODERS
1971
1972 static int get_sae(uint8_t *src, int ref, int stride){
1973 int x,y;
1974 int acc=0;
1975
1976 for(y=0; y<16; y++){
1977 for(x=0; x<16; x++){
1978 acc+= ABS(src[x+y*stride] - ref);
1979 }
1980 }
1981
1982 return acc;
1983 }
1984
1985 static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride){
1986 int x, y, w, h;
1987 int acc=0;
1988
1989 w= s->width &~15;
1990 h= s->height&~15;
1991
1992 for(y=0; y<h; y+=16){
1993 for(x=0; x<w; x+=16){
1994 int offset= x + y*stride;
1995 int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride, 16);
1996 int mean= (s->dsp.pix_sum(src + offset, stride) + 128)>>8;
1997 int sae = get_sae(src + offset, mean, stride);
1998
1999 acc+= sae + 500 < sad;
2000 }
2001 }
2002 return acc;
2003 }
2004
2005
2006 static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
2007 AVFrame *pic=NULL;
2008 int64_t pts;
2009 int i;
2010 const int encoding_delay= s->max_b_frames;
2011 int direct=1;
2012
2013 if(pic_arg){
2014 pts= pic_arg->pts;
2015 pic_arg->display_picture_number= s->input_picture_number++;
2016
2017 if(pts != AV_NOPTS_VALUE){
2018 if(s->user_specified_pts != AV_NOPTS_VALUE){
2019 int64_t time= pts;
2020 int64_t last= s->user_specified_pts;
2021
2022 if(time <= last){
2023 av_log(s->avctx, AV_LOG_ERROR, "Error, Invalid timestamp=%Ld, last=%Ld\n", pts, s->user_specified_pts);
2024 return -1;
2025 }
2026 }
2027 s->user_specified_pts= pts;
2028 }else{
2029 if(s->user_specified_pts != AV_NOPTS_VALUE){
2030 s->user_specified_pts=
2031 pts= s->user_specified_pts + 1;
2032 av_log(s->avctx, AV_LOG_INFO, "Warning: AVFrame.pts=? trying to guess (%Ld)\n", pts);
2033 }else{
2034 pts= pic_arg->display_picture_number;
2035 }
2036 }
2037 }
2038
2039 if(pic_arg){
2040 if(encoding_delay && !(s->flags&CODEC_FLAG_INPUT_PRESERVED)) direct=0;
2041 if(pic_arg->linesize[0] != s->linesize) direct=0;
2042 if(pic_arg->linesize[1] != s->uvlinesize) direct=0;
2043 if(pic_arg->linesize[2] != s->uvlinesize) direct=0;
2044
2045 // av_log(AV_LOG_DEBUG, "%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize);
2046
2047 if(direct){
2048 i= ff_find_unused_picture(s, 1);
2049
2050 pic= (AVFrame*)&s->picture[i];
2051 pic->reference= 3;
2052
2053 for(i=0; i<4; i++){
2054 pic->data[i]= pic_arg->data[i];
2055 pic->linesize[i]= pic_arg->linesize[i];
2056 }
2057 alloc_picture(s, (Picture*)pic, 1);
2058 }else{
2059 int offset= 16;
2060 i= ff_find_unused_picture(s, 0);
2061
2062 pic= (AVFrame*)&s->picture[i];
2063 pic->reference= 3;
2064
2065 alloc_picture(s, (Picture*)pic, 0);
2066
2067 if( pic->data[0] + offset == pic_arg->data[0]
2068 && pic->data[1] + offset == pic_arg->data[1]
2069 && pic->data[2] + offset == pic_arg->data[2]){
2070 // empty
2071 }else{
2072 int h_chroma_shift, v_chroma_shift;
2073 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
2074
2075 for(i=0; i<3; i++){
2076 int src_stride= pic_arg->linesize[i];
2077 int dst_stride= i ? s->uvlinesize : s->linesize;
2078 int h_shift= i ? h_chroma_shift : 0;
2079 int v_shift= i ? v_chroma_shift : 0;
2080 int w= s->width >>h_shift;
2081 int h= s->height>>v_shift;
2082 uint8_t *src= pic_arg->data[i];
2083 uint8_t *dst= pic->data[i] + offset;
2084
2085 if(src_stride==dst_stride)
2086 memcpy(dst, src, src_stride*h);
2087 else{
2088 while(h--){
2089 memcpy(dst, src, w);
2090 dst += dst_stride;
2091 src += src_stride;
2092 }
2093 }
2094 }
2095 }
2096 }
2097 copy_picture_attributes(s, pic, pic_arg);
2098 pic->pts= pts; //we set this here to avoid modifiying pic_arg
2099 }
2100
2101 /* shift buffer entries */
2102 for(i=1; i<MAX_PICTURE_COUNT /*s->encoding_delay+1*/; i++)
2103 s->input_picture[i-1]= s->input_picture[i];
2104
2105 s->input_picture[encoding_delay]= (Picture*)pic;
2106
2107 return 0;
2108 }
2109
2110 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref){
2111 int x, y, plane;
2112 int score=0;
2113 int64_t score64=0;
2114
2115 for(plane=0; plane<3; plane++){
2116 const int stride= p->linesize[plane];
2117 const int bw= plane ? 1 : 2;
2118 for(y=0; y<s->mb_height*bw; y++){
2119 for(x=0; x<s->mb_width*bw; x++){
2120 int v= s->dsp.frame_skip_cmp[1](s, p->data[plane] + 8*(x + y*stride), ref->data[plane] + 8*(x + y*stride), stride, 8);
2121
2122 switch(s->avctx->frame_skip_exp){
2123 case 0: score= FFMAX(score, v); break;
2124 case 1: score+= ABS(v);break;
2125 case 2: score+= v*v;break;
2126 case 3: score64+= ABS(v*v*(int64_t)v);break;
2127 case 4: score64+= v*v*(int64_t)(v*v);break;
2128 }
2129 }
2130 }
2131 }
2132
2133 if(score) score64= score;
2134
2135 if(score64 < s->avctx->frame_skip_threshold)
2136 return 1;
2137 if(score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda)>>8))
2138 return 1;
2139 return 0;
2140 }
2141
2142 static void select_input_picture(MpegEncContext *s){
2143 int i;
2144
2145 for(i=1; i<MAX_PICTURE_COUNT; i++)
2146 s->reordered_input_picture[i-1]= s->reordered_input_picture[i];
2147 s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL;
2148
2149 /* set next picture type & ordering */
2150 if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
2151 if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){
2152 s->reordered_input_picture[0]= s->input_picture[0];
2153 s->reordered_input_picture[0]->pict_type= I_TYPE;
2154 s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++;
2155 }else{
2156 int b_frames;
2157
2158 if(s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor){
2159 if(skip_check(s, s->input_picture[0], s->next_picture_ptr)){
2160 //av_log(NULL, AV_LOG_DEBUG, "skip %p %Ld\n", s->input_picture[0]->data[0], s->input_picture[0]->pts);
2161
2162 if(s->input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
2163 for(i=0; i<4; i++)
2164 s->input_picture[0]->data[i]= NULL;
2165 s->input_picture[0]->type= 0;
2166 }else{
2167 assert( s->input_picture[0]->type==FF_BUFFER_TYPE_USER
2168 || s->input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
2169
2170 s->avctx->release_buffer(s->avctx, (AVFrame*)s->input_picture[0]);
2171 }
2172
2173 goto no_output_pic;
2174 }
2175 }
2176
2177 if(s->flags&CODEC_FLAG_PASS2){
2178 for(i=0; i<s->max_b_frames+1; i++){
2179 int pict_num= s->input_picture[0]->display_picture_number + i;
2180
2181 if(pict_num >= s->rc_context.num_entries)
2182 break;
2183 if(!s->input_picture[i]){
2184 s->rc_context.entry[pict_num-1].new_pict_type = P_TYPE;
2185 break;
2186 }
2187
2188 s->input_picture[i]->pict_type=
2189 s->rc_context.entry[pict_num].new_pict_type;
2190 }
2191 }
2192
2193 if(s->avctx->b_frame_strategy==0){
2194 b_frames= s->max_b_frames;
2195 while(b_frames && !s->input_picture[b_frames]) b_frames--;
2196 }else if(s->avctx->b_frame_strategy==1){
2197 for(i=1; i<s->max_b_frames+1; i++){
2198 if(s->input_picture[i] && s->input_picture[i]->b_frame_score==0){
2199 s->input_picture[i]->b_frame_score=
2200 get_intra_count(s, s->input_picture[i ]->data[0],
2201 s->input_picture[i-1]->data[0], s->linesize) + 1;
2202 }
2203 }
2204 for(i=0; i<s->max_b_frames+1; i++){
2205 if(s->input_picture[i]==NULL || s->input_picture[i]->b_frame_score - 1 > s->mb_num/40) break;
2206 }
2207
2208 b_frames= FFMAX(0, i-1);
2209
2210 /* reset scores */
2211 for(i=0; i<b_frames+1; i++){
2212 s->input_picture[i]->b_frame_score=0;
2213 }
2214 }else{
2215 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
2216 b_frames=0;
2217 }
2218
2219 emms_c();
2220 //static int b_count=0;
2221 //b_count+= b_frames;
2222 //av_log(s->avctx, AV_LOG_DEBUG, "b_frames: %d\n", b_count);
2223
2224 for(i= b_frames - 1; i>=0; i--){
2225 int type= s->input_picture[i]->pict_type;
2226 if(type && type != B_TYPE)
2227 b_frames= i;
2228 }
2229 if(s->input_picture[b_frames]->pict_type == B_TYPE && b_frames == s->max_b_frames){
2230 av_log(s->avctx, AV_LOG_ERROR, "warning, too many b frames in a row\n");
2231 }
2232
2233 if(s->picture_in_gop_number + b_frames >= s->gop_size){
2234 if((s->flags2 & CODEC_FLAG2_STRICT_GOP) && s->gop_size > s->picture_in_gop_number){
2235 b_frames= s->gop_size - s->picture_in_gop_number - 1;
2236 }else{
2237 if(s->flags & CODEC_FLAG_CLOSED_GOP)
2238 b_frames=0;
2239 s->input_picture[b_frames]->pict_type= I_TYPE;
2240 }
2241 }
2242
2243 if( (s->flags & CODEC_FLAG_CLOSED_GOP)
2244 && b_frames
2245 && s->input_picture[b_frames]->pict_type== I_TYPE)
2246 b_frames--;
2247
2248 s->reordered_input_picture[0]= s->input_picture[b_frames];
2249 if(s->reordered_input_picture[0]->pict_type != I_TYPE)
2250 s->reordered_input_picture[0]->pict_type= P_TYPE;
2251 s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++;
2252 for(i=0; i<b_frames; i++){
2253 s->reordered_input_picture[i+1]= s->input_picture[i];
2254 s->reordered_input_picture[i+1]->pict_type= B_TYPE;
2255 s->reordered_input_picture[i+1]->coded_picture_number= s->coded_picture_number++;
2256 }
2257 }
2258 }
2259 no_output_pic:
2260 if(s->reordered_input_picture[0]){
2261 s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE ? 3 : 0;
2262
2263 copy_picture(&s->new_picture, s->reordered_input_picture[0]);
2264
2265 if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
2266 // input is a shared pix, so we can't modifiy it -> alloc a new one & ensure that the shared one is reuseable
2267
2268 int i= ff_find_unused_picture(s, 0);
2269 Picture *pic= &s->picture[i];
2270
2271 /* mark us unused / free shared pic */
2272 for(i=0; i<4; i++)
2273 s->reordered_input_picture[0]->data[i]= NULL;
2274 s->reordered_input_picture[0]->type= 0;
2275
2276 pic->reference = s->reordered_input_picture[0]->reference;
2277
2278 alloc_picture(s, pic, 0);
2279
2280 copy_picture_attributes(s, (AVFrame*)pic, (AVFrame*)s->reordered_input_picture[0]);
2281
2282 s->current_picture_ptr= pic;
2283 }else{
2284 // input is not a shared pix -> reuse buffer for current_pix
2285
2286 assert( s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_USER
2287 || s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
2288
2289 s->current_picture_ptr= s->reordered_input_picture[0];
2290 for(i=0; i<4; i++){
2291 s->new_picture.data[i]+=16;
2292 }
2293 }
2294 copy_picture(&s->current_picture, s->current_picture_ptr);
2295
2296 s->picture_number= s->new_picture.display_picture_number;
2297 //printf("dpn:%d\n", s->picture_number);
2298 }else{
2299 memset(&s->new_picture, 0, sizeof(Picture));
2300 }
2301 }
2302
2303 int MPV_encode_picture(AVCodecContext *avctx,
2304 unsigned char *buf, int buf_size, void *data)
2305 {
2306 MpegEncContext *s = avctx->priv_data;
2307 AVFrame *pic_arg = data;
2308 int i, stuffing_count;
2309
2310 if(avctx->pix_fmt != PIX_FMT_YUV420P && avctx->pix_fmt != PIX_FMT_YUVJ420P){
2311 av_log(avctx, AV_LOG_ERROR, "this codec supports only YUV420P\n");
2312 return -1;
2313 }
2314
2315 for(i=0; i<avctx->thread_count; i++){
2316 int start_y= s->thread_context[i]->start_mb_y;
2317 int end_y= s->thread_context[i]-> end_mb_y;
2318 int h= s->mb_height;
2319 uint8_t *start= buf + (size_t)(((int64_t) buf_size)*start_y/h);
2320 uint8_t *end = buf + (size_t)(((int64_t) buf_size)* end_y/h);
2321
2322 init_put_bits(&s->thread_context[i]->pb, start, end - start);
2323 }
2324
2325 s->picture_in_gop_number++;
2326
2327 if(load_input_picture(s, pic_arg) < 0)
2328 return -1;
2329
2330 select_input_picture(s);
2331
2332 /* output? */
2333 if(s->new_picture.data[0]){
2334 s->pict_type= s->new_picture.pict_type;
2335 //emms_c();
2336 //printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
2337 MPV_frame_start(s, avctx);
2338
2339 encode_picture(s, s->picture_number);
2340
2341 avctx->real_pict_num = s->picture_number;
2342 avctx->header_bits = s->header_bits;
2343 avctx->mv_bits = s->mv_bits;
2344 avctx->misc_bits = s->misc_bits;
2345 avctx->i_tex_bits = s->i_tex_bits;
2346 avctx->p_tex_bits = s->p_tex_bits;
2347 avctx->i_count = s->i_count;
2348 avctx->p_count = s->mb_num - s->i_count - s->skip_count; //FIXME f/b_count in avctx
2349 avctx->skip_count = s->skip_count;
2350
2351 MPV_frame_end(s);
2352
2353 if (s->out_format == FMT_MJPEG)
2354 mjpeg_picture_trailer(s);
2355
2356 if(s->flags&CODEC_FLAG_PASS1)
2357 ff_write_pass1_stats(s);
2358
2359 for(i=0; i<4; i++){
2360 avctx->error[i] += s->current_picture_ptr->error[i];
2361 }
2362
2363 if(s->flags&CODEC_FLAG_PASS1)
2364 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits + avctx->i_tex_bits + avctx->p_tex_bits == put_bits_count(&s->pb));
2365 flush_put_bits(&s->pb);
2366 s->frame_bits = put_bits_count(&s->pb);
2367
2368 stuffing_count= ff_vbv_update(s, s->frame_bits);
2369 if(stuffing_count){
2370 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < stuffing_count + 50){
2371 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
2372 return -1;
2373 }
2374
2375 switch(s->codec_id){
2376 case CODEC_ID_MPEG1VIDEO:
2377 case CODEC_ID_MPEG2VIDEO:
2378 while(stuffing_count--){
2379 put_bits(&s->pb, 8, 0);
2380 }
2381 break;
2382 case CODEC_ID_MPEG4:
2383 put_bits(&s->pb, 16, 0);
2384 put_bits(&s->pb, 16, 0x1C3);
2385 stuffing_count -= 4;
2386 while(stuffing_count--){
2387 put_bits(&s->pb, 8, 0xFF);
2388 }
2389 break;
2390 default:
2391 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2392 }
2393 flush_put_bits(&s->pb);
2394 s->frame_bits = put_bits_count(&s->pb);
2395 }
2396
2397 /* update mpeg1/2 vbv_delay for CBR */
2398 if(s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate && s->out_format == FMT_MPEG1
2399 && 90000LL * (avctx->rc_buffer_size-1) <= s->avctx->rc_max_rate*0xFFFFLL){
2400 int vbv_delay;
2401
2402 assert(s->repeat_first_field==0);
2403
2404 vbv_delay= lrintf(90000 * s->rc_context.buffer_index / s->avctx->rc_max_rate);
2405 assert(vbv_delay < 0xFFFF);
2406
2407 s->vbv_delay_ptr[0] &= 0xF8;
2408 s->vbv_delay_ptr[0] |= vbv_delay>>13;
2409 s->vbv_delay_ptr[1] = vbv_delay>>5;
2410 s->vbv_delay_ptr[2] &= 0x07;
2411 s->vbv_delay_ptr[2] |= vbv_delay<<3;
2412 }
2413 s->total_bits += s->frame_bits;
2414 avctx->frame_bits = s->frame_bits;
2415 }else{
2416 assert((pbBufPtr(&s->pb) == s->pb.buf));
2417 s->frame_bits=0;
2418 }
2419 assert((s->frame_bits&7)==0);
2420
2421 return s->frame_bits/8;
2422 }
2423
2424 #endif //CONFIG_ENCODERS
2425
2426 static inline void gmc1_motion(MpegEncContext *s,
2427 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2428 uint8_t **ref_picture)
2429 {
2430 uint8_t *ptr;
2431 int offset, src_x, src_y, linesize, uvlinesize;
2432 int motion_x, motion_y;
2433 int emu=0;
2434
2435 motion_x= s->sprite_offset[0][0];
2436 motion_y= s->sprite_offset[0][1];
2437 src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy+1));
2438 src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy+1));
2439 motion_x<<=(3-s->sprite_warping_accuracy);
2440 motion_y<<=(3-s->sprite_warping_accuracy);
2441 src_x = clip(src_x, -16, s->width);
2442 if (src_x == s->width)
2443 motion_x =0;
2444 src_y = clip(src_y, -16, s->height);
2445 if (src_y == s->height)
2446 motion_y =0;
2447
2448 linesize = s->linesize;
2449 uvlinesize = s->uvlinesize;
2450
2451 ptr = ref_picture[0] + (src_y * linesize) + src_x;
2452
2453 if(s->flags&CODEC_FLAG_EMU_EDGE){
2454 if( (unsigned)src_x >= s->h_edge_pos - 17
2455 || (unsigned)src_y >= s->v_edge_pos - 17){
2456 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
2457 ptr= s->edge_emu_buffer;
2458 }
2459 }
2460
2461 if((motion_x|motion_y)&7){
2462 s->dsp.gmc1(dest_y , ptr , linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
2463 s->dsp.gmc1(dest_y+8, ptr+8, linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
2464 }else{
2465 int dxy;
2466
2467 dxy= ((motion_x>>3)&1) | ((motion_y>>2)&2);
2468 if (s->no_rounding){
2469 s->dsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
2470 }else{
2471 s->dsp.put_pixels_tab [0][dxy](dest_y, ptr, linesize, 16);
2472 }
2473 }
2474
2475 if(s->flags&CODEC_FLAG_GRAY) return;
2476
2477 motion_x= s->sprite_offset[1][0];
2478 motion_y= s->sprite_offset[1][1];
2479 src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy+1));
2480 src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy+1));
2481 motion_x<<=(3-s->sprite_warping_accuracy);
2482 motion_y<<=(3-s->sprite_warping_accuracy);
2483 src_x = clip(src_x, -8, s->width>>1);
2484 if (src_x == s->width>>1)
2485 motion_x =0;
2486 src_y = clip(src_y, -8, s->height>>1);
2487 if (src_y == s->height>>1)
2488 motion_y =0;
2489
2490 offset = (src_y * uvlinesize) + src_x;
2491 ptr = ref_picture[1] + offset;
2492 if(s->flags&CODEC_FLAG_EMU_EDGE){
2493 if( (unsigned)src_x >= (s->h_edge_pos>>1) - 9
2494 || (unsigned)src_y >= (s->v_edge_pos>>1) - 9){
2495 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2496 ptr= s->edge_emu_buffer;
2497 emu=1;
2498 }
2499 }
2500 s->dsp.gmc1(dest_cb, ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
2501
2502 ptr = ref_picture[2] + offset;
2503 if(emu){
2504 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2505 ptr= s->edge_emu_buffer;
2506 }
2507 s->dsp.gmc1(dest_cr, ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
2508
2509 return;
2510 }
2511
2512 static inline void gmc_motion(MpegEncContext *s,
2513 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2514 uint8_t **ref_picture)
2515 {
2516 uint8_t *ptr;
2517 int linesize, uvlinesize;
2518 const int a= s->sprite_warping_accuracy;
2519 int ox, oy;
2520
2521 linesize = s->linesize;
2522 uvlinesize = s->uvlinesize;
2523
2524 ptr = ref_picture[0];
2525
2526 ox= s->sprite_offset[0][0] + s->sprite_delta[0][0]*s->mb_x*16 + s->sprite_delta[0][1]*s->mb_y*16;
2527 oy= s->sprite_offset[0][1] + s->sprite_delta[1][0]*s->mb_x*16 + s->sprite_delta[1][1]*s->mb_y*16;
2528
2529 s->dsp.gmc(dest_y, ptr, linesize, 16,
2530 ox,
2531 oy,
2532 s->sprite_delta[0][0], s->sprite_delta[0][1],
2533 s->sprite_delta[1][0], s->sprite_delta[1][1],
2534 a+1, (1<<(2*a+1)) - s->no_rounding,
2535 s->h_edge_pos, s->v_edge_pos);
2536 s->dsp.gmc(dest_y+8, ptr, linesize, 16,
2537 ox + s->sprite_delta[0][0]*8,
2538 oy + s->sprite_delta[1][0]*8,
2539 s->sprite_delta[0][0], s->sprite_delta[0][1],
2540 s->sprite_delta[1][0], s->sprite_delta[1][1],
2541 a+1, (1<<(2*a+1)) - s->no_rounding,
2542 s->h_edge_pos, s->v_edge_pos);
2543
2544 if(s->flags&CODEC_FLAG_GRAY) return;
2545
2546 ox= s->sprite_offset[1][0] + s->sprite_delta[0][0]*s->mb_x*8 + s->sprite_delta[0][1]*s->mb_y*8;
2547 oy= s->sprite_offset[1][1] + s->sprite_delta[1][0]*s->mb_x*8 + s->sprite_delta[1][1]*s->mb_y*8;
2548
2549 ptr = ref_picture[1];
2550 s->dsp.gmc(dest_cb, ptr, uvlinesize, 8,
2551 ox,
2552 oy,
2553 s->sprite_delta[0][0], s->sprite_delta[0][1],
2554 s->sprite_delta[1][0], s->sprite_delta[1][1],
2555 a+1, (1<<(2*a+1)) - s->no_rounding,
2556 s->h_edge_pos>>1, s->v_edge_pos>>1);
2557
2558 ptr = ref_picture[2];
2559 s->dsp.gmc(dest_cr, ptr, uvlinesize, 8,
2560 ox,
2561 oy,
2562 s->sprite_delta[0][0], s->sprite_delta[0][1],
2563 s->sprite_delta[1][0], s->sprite_delta[1][1],
2564 a+1, (1<<(2*a+1)) - s->no_rounding,
2565 s->h_edge_pos>>1, s->v_edge_pos>>1);
2566 }
2567
2568 /**
2569 * Copies a rectangular area of samples to a temporary buffer and replicates the boarder samples.
2570 * @param buf destination buffer
2571 * @param src source buffer
2572 * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
2573 * @param block_w width of block
2574 * @param block_h height of block
2575 * @param src_x x coordinate of the top left sample of the block in the source buffer
2576 * @param src_y y coordinate of the top left sample of the block in the source buffer
2577 * @param w width of the source buffer
2578 * @param h height of the source buffer
2579 */
2580 void ff_emulated_edge_mc(uint8_t *buf, uint8_t *src, int linesize, int block_w, int block_h,
2581 int src_x, int src_y, int w, int h){
2582 int x, y;
2583 int start_y, start_x, end_y, end_x;
2584
2585 if(src_y>= h){
2586 src+= (h-1-src_y)*linesize;
2587 src_y=h-1;
2588 }else if(src_y<=-block_h){
2589 src+= (1-block_h-src_y)*linesize;
2590 src_y=1-block_h;
2591 }
2592 if(src_x>= w){
2593 src+= (w-1-src_x);
2594 src_x=w-1;
2595 }else if(src_x<=-block_w){
2596 src+= (1-block_w-src_x);
2597 src_x=1-block_w;
2598 }
2599
2600 start_y= FFMAX(0, -src_y);
2601 start_x= FFMAX(0, -src_x);
2602 end_y= FFMIN(block_h, h-src_y);
2603 end_x= FFMIN(block_w, w-src_x);
2604
2605 // copy existing part
2606 for(y=start_y; y<end_y; y++){
2607 for(x=start_x; x<end_x; x++){
2608 buf[x + y*linesize]= src[x + y*linesize];
2609 }
2610 }
2611
2612 //top
2613 for(y=0; y<start_y; y++){
2614 for(x=start_x; x<end_x; x++){
2615 buf[x + y*linesize]= buf[x + start_y*linesize];
2616 }
2617 }
2618
2619 //bottom
2620 for(y=end_y; y<block_h; y++){
2621 for(x=start_x; x<end_x; x++){
2622 buf[x + y*linesize]= buf[x + (end_y-1)*linesize];
2623 }
2624 }
2625
2626 for(y=0; y<block_h; y++){
2627 //left
2628 for(x=0; x<start_x; x++){
2629 buf[x + y*linesize]= buf[start_x + y*linesize];
2630 }
2631
2632 //right
2633 for(x=end_x; x<block_w; x++){
2634 buf[x + y*linesize]= buf[end_x - 1 + y*linesize];
2635 }
2636 }
2637 }
2638
2639 static inline int hpel_motion(MpegEncContext *s,
2640 uint8_t *dest, uint8_t *src,
2641 int field_based, int field_select,
2642 int src_x, int src_y,
2643 int width, int height, int stride,
2644 int h_edge_pos, int v_edge_pos,
2645 int w, int h, op_pixels_func *pix_op,
2646 int motion_x, int motion_y)
2647 {
2648 int dxy;
2649 int emu=0;
2650
2651 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
2652 src_x += motion_x >> 1;
2653 src_y += motion_y >> 1;
2654
2655 /* WARNING: do no forget half pels */
2656 src_x = clip(src_x, -16, width); //FIXME unneeded for emu?
2657 if (src_x == width)
2658 dxy &= ~1;
2659 src_y = clip(src_y, -16, height);
2660 if (src_y == height)
2661 dxy &= ~2;
2662 src += src_y * stride + src_x;
2663
2664 if(s->unrestricted_mv && (s->flags&CODEC_FLAG_EMU_EDGE)){
2665 if( (unsigned)src_x > h_edge_pos - (motion_x&1) - w
2666 || (unsigned)src_y > v_edge_pos - (motion_y&1) - h){
2667 ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
2668 src_x, src_y<<field_based, h_edge_pos, s->v_edge_pos);
2669 src= s->edge_emu_buffer;
2670 emu=1;
2671 }
2672 }
2673 if(field_select)
2674 src += s->linesize;
2675 pix_op[dxy](dest, src, stride, h);
2676 return emu;
2677 }
2678
2679 static inline int hpel_motion_lowres(MpegEncContext *s,
2680 uint8_t *dest, uint8_t *src,
2681 int field_based, int field_select,
2682 int src_x, int src_y,
2683 int width, int height, int stride,
2684 int h_edge_pos, int v_edge_pos,
2685 int w, int h, h264_chroma_mc_func *pix_op,
2686 int motion_x, int motion_y)
2687 {
2688 const int lowres= s->avctx->lowres;
2689 const int s_mask= (2<<lowres)-1;
2690 int emu=0;
2691 int sx, sy;
2692
2693 if(s->quarter_sample){
2694 motion_x/=2;
2695 motion_y/=2;
2696 }
2697
2698 sx= motion_x & s_mask;
2699 sy= motion_y & s_mask;
2700 src_x += motion_x >> (lowres+1);
2701 src_y += motion_y >> (lowres+1);
2702
2703 src += src_y * stride + src_x;
2704
2705 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
2706 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
2707 ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
2708 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
2709 src= s->edge_emu_buffer;
2710 emu=1;
2711 }
2712
2713 sx <<= 2 - lowres;
2714 sy <<= 2 - lowres;
2715 if(field_select)
2716 src += s->linesize;
2717 pix_op[lowres](dest, src, stride, h, sx, sy);
2718 return emu;
2719 }
2720
2721 /* apply one mpeg motion vector to the three components */
2722 static always_inline void mpeg_motion(MpegEncContext *s,
2723 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2724 int field_based, int bottom_field, int field_select,
2725 uint8_t **ref_picture, op_pixels_func (*pix_op)[4],
2726 int motion_x, int motion_y, int h)
2727 {
2728 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2729 int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos, uvlinesize, linesize;
2730
2731 #if 0
2732 if(s->quarter_sample)
2733 {
2734 motion_x>>=1;
2735 motion_y>>=1;
2736 }
2737 #endif
2738
2739 v_edge_pos = s->v_edge_pos >> field_based;
2740 linesize = s->current_picture.linesize[0] << field_based;
2741 uvlinesize = s->current_picture.linesize[1] << field_based;
2742
2743 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
2744 src_x = s->mb_x* 16 + (motion_x >> 1);
2745 src_y =(s->mb_y<<(4-field_based)) + (motion_y >> 1);
2746
2747 if (s->out_format == FMT_H263) {
2748 if((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based){
2749 mx = (motion_x>>1)|(motion_x&1);
2750 my = motion_y >>1;
2751 uvdxy = ((my & 1) << 1) | (mx & 1);
2752 uvsrc_x = s->mb_x* 8 + (mx >> 1);
2753 uvsrc_y = (s->mb_y<<(3-field_based)) + (my >> 1);
2754 }else{
2755 uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1);
2756 uvsrc_x = src_x>>1;
2757 uvsrc_y = src_y>>1;
2758 }
2759 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
2760 mx = motion_x / 4;
2761 my = motion_y / 4;
2762 uvdxy = 0;
2763 uvsrc_x = s->mb_x*8 + mx;
2764 uvsrc_y = s->mb_y*8 + my;
2765 } else {
2766 if(s->chroma_y_shift){
2767 mx = motion_x / 2;
2768 my = motion_y / 2;
2769 uvdxy = ((my & 1) << 1) | (mx & 1);
2770 uvsrc_x = s->mb_x* 8 + (mx >> 1);
2771 uvsrc_y = (s->mb_y<<(3-field_based)) + (my >> 1);
2772 } else {
2773 if(s->chroma_x_shift){
2774 //Chroma422
2775 mx = motion_x / 2;
2776 uvdxy = ((motion_y & 1) << 1) | (mx & 1);
2777 uvsrc_x = s->mb_x* 8 + (mx >> 1);
2778 uvsrc_y = src_y;
2779 } else {
2780 //Chroma444
2781 uvdxy = dxy;
2782 uvsrc_x = src_x;
2783 uvsrc_y = src_y;
2784 }
2785 }
2786 }
2787
2788 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2789 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2790 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2791
2792 if( (unsigned)src_x > s->h_edge_pos - (motion_x&1) - 16
2793 || (unsigned)src_y > v_edge_pos - (motion_y&1) - h){
2794 if(s->codec_id == CODEC_ID_MPEG2VIDEO ||
2795 s->codec_id == CODEC_ID_MPEG1VIDEO){
2796 av_log(s->avctx,AV_LOG_DEBUG,"MPEG motion vector out of boundary\n");
2797 return ;
2798 }
2799 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
2800 src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
2801 ptr_y = s->edge_emu_buffer;