069194d18216134a58ecbf6355e0ebe4765a1a0d
[libav.git] / libavcodec / mpegvideo.c
1 /*
2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard.
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 * 4MV & hq & b-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
21 */
22
23 /**
24 * @file mpegvideo.c
25 * The simplest mpeg encoder (well, it was the simplest!).
26 */
27
28 #include "avcodec.h"
29 #include "dsputil.h"
30 #include "mpegvideo.h"
31 #include "faandct.h"
32 #include <limits.h>
33
34 #ifdef USE_FASTMEMCPY
35 #include "fastmemcpy.h"
36 #endif
37
38 //#undef NDEBUG
39 //#include <assert.h>
40
41 #ifdef CONFIG_ENCODERS
42 static void encode_picture(MpegEncContext *s, int picture_number);
43 #endif //CONFIG_ENCODERS
44 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
45 DCTELEM *block, int n, int qscale);
46 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
47 DCTELEM *block, int n, int qscale);
48 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
49 DCTELEM *block, int n, int qscale);
50 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
51 DCTELEM *block, int n, int qscale);
52 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
53 DCTELEM *block, int n, int qscale);
54 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
55 DCTELEM *block, int n, int qscale);
56 static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w);
57 #ifdef CONFIG_ENCODERS
58 static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
59 static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
60 static int dct_quantize_refine(MpegEncContext *s, DCTELEM *block, int16_t *weight, DCTELEM *orig, int n, int qscale);
61 static int sse_mb(MpegEncContext *s);
62 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block);
63 #endif //CONFIG_ENCODERS
64
65 #ifdef HAVE_XVMC
66 extern int XVMC_field_start(MpegEncContext*s, AVCodecContext *avctx);
67 extern void XVMC_field_end(MpegEncContext *s);
68 extern void XVMC_decode_mb(MpegEncContext *s);
69 #endif
70
71 void (*draw_edges)(uint8_t *buf, int wrap, int width, int height, int w)= draw_edges_c;
72
73
74 /* enable all paranoid tests for rounding, overflows, etc... */
75 //#define PARANOID
76
77 //#define DEBUG
78
79
80 /* for jpeg fast DCT */
81 #define CONST_BITS 14
82
83 static const uint16_t aanscales[64] = {
84 /* precomputed values scaled up by 14 bits */
85 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
86 22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270,
87 21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906,
88 19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315,
89 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
90 12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552,
91 8867 , 12299, 11585, 10426, 8867, 6967, 4799, 2446,
92 4520 , 6270, 5906, 5315, 4520, 3552, 2446, 1247
93 };
94
95 static const uint8_t h263_chroma_roundtab[16] = {
96 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
97 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
98 };
99
100 static const uint8_t ff_default_chroma_qscale_table[32]={
101 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
102 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
103 };
104
105 #ifdef CONFIG_ENCODERS
106 static uint8_t (*default_mv_penalty)[MAX_MV*2+1]=NULL;
107 static uint8_t default_fcode_tab[MAX_MV*2+1];
108
109 enum PixelFormat ff_yuv420p_list[2]= {PIX_FMT_YUV420P, -1};
110
111 static void convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][64],
112 const uint16_t *quant_matrix, int bias, int qmin, int qmax)
113 {
114 int qscale;
115
116 for(qscale=qmin; qscale<=qmax; qscale++){
117 int i;
118 if (dsp->fdct == ff_jpeg_fdct_islow
119 #ifdef FAAN_POSTSCALE
120 || dsp->fdct == ff_faandct
121 #endif
122 ) {
123 for(i=0;i<64;i++) {
124 const int j= dsp->idct_permutation[i];
125 /* 16 <= qscale * quant_matrix[i] <= 7905 */
126 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
127 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
128 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
129
130 qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) /
131 (qscale * quant_matrix[j]));
132 }
133 } else if (dsp->fdct == fdct_ifast
134 #ifndef FAAN_POSTSCALE
135 || dsp->fdct == ff_faandct
136 #endif
137 ) {
138 for(i=0;i<64;i++) {
139 const int j= dsp->idct_permutation[i];
140 /* 16 <= qscale * quant_matrix[i] <= 7905 */
141 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
142 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
143 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
144
145 qmat[qscale][i] = (int)((uint64_t_C(1) << (QMAT_SHIFT + 14)) /
146 (aanscales[i] * qscale * quant_matrix[j]));
147 }
148 } else {
149 for(i=0;i<64;i++) {
150 const int j= dsp->idct_permutation[i];
151 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
152 So 16 <= qscale * quant_matrix[i] <= 7905
153 so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905
154 so 32768 >= (1<<19) / (qscale * quant_matrix[i]) >= 67
155 */
156 qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[j]));
157 // qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);
158 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[j]);
159
160 if(qmat16[qscale][0][i]==0 || qmat16[qscale][0][i]==128*256) qmat16[qscale][0][i]=128*256-1;
161 qmat16[qscale][1][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][0][i]);
162 }
163 }
164 }
165 }
166
167 static inline void update_qscale(MpegEncContext *s){
168 s->qscale= (s->lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);
169 s->qscale= clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
170
171 s->lambda2= (s->lambda*s->lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT;
172 }
173 #endif //CONFIG_ENCODERS
174
175 void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable){
176 int i;
177 int end;
178
179 st->scantable= src_scantable;
180
181 for(i=0; i<64; i++){
182 int j;
183 j = src_scantable[i];
184 st->permutated[i] = permutation[j];
185 #ifdef ARCH_POWERPC
186 st->inverse[j] = i;
187 #endif
188 }
189
190 end=-1;
191 for(i=0; i<64; i++){
192 int j;
193 j = st->permutated[i];
194 if(j>end) end=j;
195 st->raster_end[i]= end;
196 }
197 }
198
199 #ifdef CONFIG_ENCODERS
200 void ff_write_quant_matrix(PutBitContext *pb, int16_t *matrix){
201 int i;
202
203 if(matrix){
204 put_bits(pb, 1, 1);
205 for(i=0;i<64;i++) {
206 put_bits(pb, 8, matrix[ ff_zigzag_direct[i] ]);
207 }
208 }else
209 put_bits(pb, 1, 0);
210 }
211 #endif //CONFIG_ENCODERS
212
213 /* init common dct for both encoder and decoder */
214 int DCT_common_init(MpegEncContext *s)
215 {
216 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
217 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
218 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
219 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
220 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
221 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
222
223 #ifdef CONFIG_ENCODERS
224 s->dct_quantize= dct_quantize_c;
225 s->denoise_dct= denoise_dct_c;
226 #endif
227
228 #ifdef HAVE_MMX
229 MPV_common_init_mmx(s);
230 #endif
231 #ifdef ARCH_ALPHA
232 MPV_common_init_axp(s);
233 #endif
234 #ifdef HAVE_MLIB
235 MPV_common_init_mlib(s);
236 #endif
237 #ifdef HAVE_MMI
238 MPV_common_init_mmi(s);
239 #endif
240 #ifdef ARCH_ARMV4L
241 MPV_common_init_armv4l(s);
242 #endif
243 #ifdef ARCH_POWERPC
244 MPV_common_init_ppc(s);
245 #endif
246
247 #ifdef CONFIG_ENCODERS
248 s->fast_dct_quantize= s->dct_quantize;
249
250 if(s->flags&CODEC_FLAG_TRELLIS_QUANT){
251 s->dct_quantize= dct_quantize_trellis_c; //move before MPV_common_init_*
252 }
253
254 #endif //CONFIG_ENCODERS
255
256 /* load & permutate scantables
257 note: only wmv uses differnt ones
258 */
259 if(s->alternate_scan){
260 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
261 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
262 }else{
263 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
264 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
265 }
266 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
267 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
268
269 return 0;
270 }
271
272 static void copy_picture(Picture *dst, Picture *src){
273 *dst = *src;
274 dst->type= FF_BUFFER_TYPE_COPY;
275 }
276
277 static void copy_picture_attributes(MpegEncContext *s, AVFrame *dst, AVFrame *src){
278 int i;
279
280 dst->pict_type = src->pict_type;
281 dst->quality = src->quality;
282 dst->coded_picture_number = src->coded_picture_number;
283 dst->display_picture_number = src->display_picture_number;
284 // dst->reference = src->reference;
285 dst->pts = src->pts;
286 dst->interlaced_frame = src->interlaced_frame;
287 dst->top_field_first = src->top_field_first;
288
289 if(s->avctx->me_threshold){
290 if(!src->motion_val[0])
291 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_val not set!\n");
292 if(!src->mb_type)
293 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.mb_type not set!\n");
294 if(!src->ref_index[0])
295 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n");
296 if(src->motion_subsample_log2 != dst->motion_subsample_log2)
297 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_subsample_log2 doesnt match! (%d!=%d)\n",
298 src->motion_subsample_log2, dst->motion_subsample_log2);
299
300 memcpy(dst->mb_type, src->mb_type, s->mb_stride * s->mb_height * sizeof(dst->mb_type[0]));
301
302 for(i=0; i<2; i++){
303 int stride= ((16*s->mb_width )>>src->motion_subsample_log2) + 1;
304 int height= ((16*s->mb_height)>>src->motion_subsample_log2);
305
306 if(src->motion_val[i] && src->motion_val[i] != dst->motion_val[i]){
307 memcpy(dst->motion_val[i], src->motion_val[i], 2*stride*height*sizeof(int16_t));
308 }
309 if(src->ref_index[i] && src->ref_index[i] != dst->ref_index[i]){
310 memcpy(dst->ref_index[i], src->ref_index[i], s->b8_stride*2*s->mb_height*sizeof(int8_t));
311 }
312 }
313 }
314 }
315
316 /**
317 * allocates a Picture
318 * The pixels are allocated/set by calling get_buffer() if shared=0
319 */
320 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
321 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) doesnt sig11
322 const int mb_array_size= s->mb_stride*s->mb_height;
323 const int b8_array_size= s->b8_stride*s->mb_height*2;
324 const int b4_array_size= s->b4_stride*s->mb_height*4;
325 int i;
326
327 if(shared){
328 assert(pic->data[0]);
329 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
330 pic->type= FF_BUFFER_TYPE_SHARED;
331 }else{
332 int r;
333
334 assert(!pic->data[0]);
335
336 r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
337
338 if(r<0 || !pic->age || !pic->type || !pic->data[0]){
339 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
340 return -1;
341 }
342
343 if(s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])){
344 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
345 return -1;
346 }
347
348 if(pic->linesize[1] != pic->linesize[2]){
349 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride missmatch)\n");
350 return -1;
351 }
352
353 s->linesize = pic->linesize[0];
354 s->uvlinesize= pic->linesize[1];
355 }
356
357 if(pic->qscale_table==NULL){
358 if (s->encoding) {
359 CHECKED_ALLOCZ(pic->mb_var , mb_array_size * sizeof(int16_t))
360 CHECKED_ALLOCZ(pic->mc_mb_var, mb_array_size * sizeof(int16_t))
361 CHECKED_ALLOCZ(pic->mb_mean , mb_array_size * sizeof(int8_t))
362 }
363
364 CHECKED_ALLOCZ(pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2) //the +2 is for the slice end check
365 CHECKED_ALLOCZ(pic->qscale_table , mb_array_size * sizeof(uint8_t))
366 CHECKED_ALLOCZ(pic->mb_type_base , big_mb_num * sizeof(uint32_t))
367 pic->mb_type= pic->mb_type_base + s->mb_stride+1;
368 if(s->out_format == FMT_H264){
369 for(i=0; i<2; i++){
370 CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b4_array_size+2) * sizeof(int16_t))
371 pic->motion_val[i]= pic->motion_val_base[i]+2;
372 CHECKED_ALLOCZ(pic->ref_index[i], b8_array_size * sizeof(uint8_t))
373 }
374 pic->motion_subsample_log2= 2;
375 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
376 for(i=0; i<2; i++){
377 CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b8_array_size+2) * sizeof(int16_t))
378 pic->motion_val[i]= pic->motion_val_base[i]+2;
379 CHECKED_ALLOCZ(pic->ref_index[i], b8_array_size * sizeof(uint8_t))
380 }
381 pic->motion_subsample_log2= 3;
382 }
383 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
384 CHECKED_ALLOCZ(pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6)
385 }
386 pic->qstride= s->mb_stride;
387 CHECKED_ALLOCZ(pic->pan_scan , 1 * sizeof(AVPanScan))
388 }
389
390 //it might be nicer if the application would keep track of these but it would require a API change
391 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
392 s->prev_pict_types[0]= s->pict_type;
393 if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == B_TYPE)
394 pic->age= INT_MAX; // skiped MBs in b frames are quite rare in mpeg1/2 and its a bit tricky to skip them anyway
395
396 return 0;
397 fail: //for the CHECKED_ALLOCZ macro
398 return -1;
399 }
400
401 /**
402 * deallocates a picture
403 */
404 static void free_picture(MpegEncContext *s, Picture *pic){
405 int i;
406
407 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
408 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
409 }
410
411 av_freep(&pic->mb_var);
412 av_freep(&pic->mc_mb_var);
413 av_freep(&pic->mb_mean);
414 av_freep(&pic->mbskip_table);
415 av_freep(&pic->qscale_table);
416 av_freep(&pic->mb_type_base);
417 av_freep(&pic->dct_coeff);
418 av_freep(&pic->pan_scan);
419 pic->mb_type= NULL;
420 for(i=0; i<2; i++){
421 av_freep(&pic->motion_val_base[i]);
422 av_freep(&pic->ref_index[i]);
423 }
424
425 if(pic->type == FF_BUFFER_TYPE_SHARED){
426 for(i=0; i<4; i++){
427 pic->base[i]=
428 pic->data[i]= NULL;
429 }
430 pic->type= 0;
431 }
432 }
433
434 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
435 int i;
436
437 // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
438 CHECKED_ALLOCZ(s->allocated_edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance
439 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*17;
440
441 //FIXME should be linesize instead of s->width*2 but that isnt known before get_buffer()
442 CHECKED_ALLOCZ(s->me.scratchpad, (s->width+64)*2*16*2*sizeof(uint8_t))
443 s->rd_scratchpad= s->me.scratchpad;
444 s->b_scratchpad= s->me.scratchpad;
445 s->obmc_scratchpad= s->me.scratchpad + 16;
446 if (s->encoding) {
447 CHECKED_ALLOCZ(s->me.map , ME_MAP_SIZE*sizeof(uint32_t))
448 CHECKED_ALLOCZ(s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t))
449 if(s->avctx->noise_reduction){
450 CHECKED_ALLOCZ(s->dct_error_sum, 2 * 64 * sizeof(int))
451 }
452 }
453 CHECKED_ALLOCZ(s->blocks, 64*12*2 * sizeof(DCTELEM))
454 s->block= s->blocks[0];
455
456 for(i=0;i<12;i++){
457 s->pblocks[i] = (short *)(&s->block[i]);
458 }
459 return 0;
460 fail:
461 return -1; //free() through MPV_common_end()
462 }
463
464 static void free_duplicate_context(MpegEncContext *s){
465 if(s==NULL) return;
466
467 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
468 av_freep(&s->me.scratchpad);
469 s->rd_scratchpad=
470 s->b_scratchpad=
471 s->obmc_scratchpad= NULL;
472
473 av_freep(&s->dct_error_sum);
474 av_freep(&s->me.map);
475 av_freep(&s->me.score_map);
476 av_freep(&s->blocks);
477 s->block= NULL;
478 }
479
480 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
481 #define COPY(a) bak->a= src->a
482 COPY(allocated_edge_emu_buffer);
483 COPY(edge_emu_buffer);
484 COPY(me.scratchpad);
485 COPY(rd_scratchpad);
486 COPY(b_scratchpad);
487 COPY(obmc_scratchpad);
488 COPY(me.map);
489 COPY(me.score_map);
490 COPY(blocks);
491 COPY(block);
492 COPY(start_mb_y);
493 COPY(end_mb_y);
494 COPY(me.map_generation);
495 COPY(pb);
496 COPY(dct_error_sum);
497 COPY(dct_count[0]);
498 COPY(dct_count[1]);
499 #undef COPY
500 }
501
502 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
503 MpegEncContext bak;
504 int i;
505 //FIXME copy only needed parts
506 //START_TIMER
507 backup_duplicate_context(&bak, dst);
508 memcpy(dst, src, sizeof(MpegEncContext));
509 backup_duplicate_context(dst, &bak);
510 for(i=0;i<12;i++){
511 dst->pblocks[i] = (short *)(&dst->block[i]);
512 }
513 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
514 }
515
516 static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src){
517 #define COPY(a) dst->a= src->a
518 COPY(pict_type);
519 COPY(current_picture);
520 COPY(f_code);
521 COPY(b_code);
522 COPY(qscale);
523 COPY(lambda);
524 COPY(lambda2);
525 COPY(picture_in_gop_number);
526 COPY(gop_picture_number);
527 COPY(frame_pred_frame_dct); //FIXME dont set in encode_header
528 COPY(progressive_frame); //FIXME dont set in encode_header
529 COPY(partitioned_frame); //FIXME dont set in encode_header
530 #undef COPY
531 }
532
533 /**
534 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
535 * the changed fields will not depend upon the prior state of the MpegEncContext.
536 */
537 static void MPV_common_defaults(MpegEncContext *s){
538 s->y_dc_scale_table=
539 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
540 s->chroma_qscale_table= ff_default_chroma_qscale_table;
541 s->progressive_frame= 1;
542 s->progressive_sequence= 1;
543 s->picture_structure= PICT_FRAME;
544
545 s->coded_picture_number = 0;
546 s->picture_number = 0;
547 s->input_picture_number = 0;
548
549 s->picture_in_gop_number = 0;
550
551 s->f_code = 1;
552 s->b_code = 1;
553 }
554
555 /**
556 * sets the given MpegEncContext to defaults for decoding.
557 * the changed fields will not depend upon the prior state of the MpegEncContext.
558 */
559 void MPV_decode_defaults(MpegEncContext *s){
560 MPV_common_defaults(s);
561 }
562
563 /**
564 * sets the given MpegEncContext to defaults for encoding.
565 * the changed fields will not depend upon the prior state of the MpegEncContext.
566 */
567
568 #ifdef CONFIG_ENCODERS
569 static void MPV_encode_defaults(MpegEncContext *s){
570 static int done=0;
571
572 MPV_common_defaults(s);
573
574 if(!done){
575 int i;
576 done=1;
577
578 default_mv_penalty= av_mallocz( sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1) );
579 memset(default_mv_penalty, 0, sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1));
580 memset(default_fcode_tab , 0, sizeof(uint8_t)*(2*MAX_MV+1));
581
582 for(i=-16; i<16; i++){
583 default_fcode_tab[i + MAX_MV]= 1;
584 }
585 }
586 s->me.mv_penalty= default_mv_penalty;
587 s->fcode_tab= default_fcode_tab;
588 }
589 #endif //CONFIG_ENCODERS
590
591 /**
592 * init common structure for both encoder and decoder.
593 * this assumes that some variables like width/height are already set
594 */
595 int MPV_common_init(MpegEncContext *s)
596 {
597 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
598
599 if(s->avctx->thread_count > MAX_THREADS || (16*s->avctx->thread_count > s->height && s->height)){
600 av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
601 return -1;
602 }
603
604 dsputil_init(&s->dsp, s->avctx);
605 DCT_common_init(s);
606
607 s->flags= s->avctx->flags;
608 s->flags2= s->avctx->flags2;
609
610 s->mb_width = (s->width + 15) / 16;
611 s->mb_height = (s->height + 15) / 16;
612 s->mb_stride = s->mb_width + 1;
613 s->b8_stride = s->mb_width*2 + 1;
614 s->b4_stride = s->mb_width*4 + 1;
615 mb_array_size= s->mb_height * s->mb_stride;
616 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
617
618 /* set default edge pos, will be overriden in decode_header if needed */
619 s->h_edge_pos= s->mb_width*16;
620 s->v_edge_pos= s->mb_height*16;
621
622 s->mb_num = s->mb_width * s->mb_height;
623
624 s->block_wrap[0]=
625 s->block_wrap[1]=
626 s->block_wrap[2]=
627 s->block_wrap[3]= s->b8_stride;
628 s->block_wrap[4]=
629 s->block_wrap[5]= s->mb_stride;
630
631 y_size = s->b8_stride * (2 * s->mb_height + 1);
632 c_size = s->mb_stride * (s->mb_height + 1);
633 yc_size = y_size + 2 * c_size;
634
635 /* convert fourcc to upper case */
636 s->avctx->codec_tag= toupper( s->avctx->codec_tag &0xFF)
637 + (toupper((s->avctx->codec_tag>>8 )&0xFF)<<8 )
638 + (toupper((s->avctx->codec_tag>>16)&0xFF)<<16)
639 + (toupper((s->avctx->codec_tag>>24)&0xFF)<<24);
640
641 s->avctx->stream_codec_tag= toupper( s->avctx->stream_codec_tag &0xFF)
642 + (toupper((s->avctx->stream_codec_tag>>8 )&0xFF)<<8 )
643 + (toupper((s->avctx->stream_codec_tag>>16)&0xFF)<<16)
644 + (toupper((s->avctx->stream_codec_tag>>24)&0xFF)<<24);
645
646 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
647
648 CHECKED_ALLOCZ(s->mb_index2xy, (s->mb_num+1)*sizeof(int)) //error ressilience code looks cleaner with this
649 for(y=0; y<s->mb_height; y++){
650 for(x=0; x<s->mb_width; x++){
651 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
652 }
653 }
654 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
655
656 if (s->encoding) {
657 /* Allocate MV tables */
658 CHECKED_ALLOCZ(s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
659 CHECKED_ALLOCZ(s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
660 CHECKED_ALLOCZ(s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
661 CHECKED_ALLOCZ(s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
662 CHECKED_ALLOCZ(s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
663 CHECKED_ALLOCZ(s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
664 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
665 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
666 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
667 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
668 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
669 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
670
671 if(s->msmpeg4_version){
672 CHECKED_ALLOCZ(s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int));
673 }
674 CHECKED_ALLOCZ(s->avctx->stats_out, 256);
675
676 /* Allocate MB type table */
677 CHECKED_ALLOCZ(s->mb_type , mb_array_size * sizeof(uint16_t)) //needed for encoding
678
679 CHECKED_ALLOCZ(s->lambda_table, mb_array_size * sizeof(int))
680
681 CHECKED_ALLOCZ(s->q_intra_matrix, 64*32 * sizeof(int))
682 CHECKED_ALLOCZ(s->q_inter_matrix, 64*32 * sizeof(int))
683 CHECKED_ALLOCZ(s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t))
684 CHECKED_ALLOCZ(s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t))
685 CHECKED_ALLOCZ(s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*))
686 CHECKED_ALLOCZ(s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*))
687
688 if(s->avctx->noise_reduction){
689 CHECKED_ALLOCZ(s->dct_offset, 2 * 64 * sizeof(uint16_t))
690 }
691 }
692 CHECKED_ALLOCZ(s->picture, MAX_PICTURE_COUNT * sizeof(Picture))
693
694 CHECKED_ALLOCZ(s->error_status_table, mb_array_size*sizeof(uint8_t))
695
696 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
697 /* interlaced direct mode decoding tables */
698 for(i=0; i<2; i++){
699 int j, k;
700 for(j=0; j<2; j++){
701 for(k=0; k<2; k++){
702 CHECKED_ALLOCZ(s->b_field_mv_table_base[i][j][k] , mv_table_size * 2 * sizeof(int16_t))
703 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
704 }
705 CHECKED_ALLOCZ(s->b_field_select_table[i][j] , mb_array_size * 2 * sizeof(uint8_t))
706 CHECKED_ALLOCZ(s->p_field_mv_table_base[i][j] , mv_table_size * 2 * sizeof(int16_t))
707 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
708 }
709 CHECKED_ALLOCZ(s->p_field_select_table[i] , mb_array_size * 2 * sizeof(uint8_t))
710 }
711 }
712 if (s->out_format == FMT_H263) {
713 /* ac values */
714 CHECKED_ALLOCZ(s->ac_val_base, yc_size * sizeof(int16_t) * 16);
715 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
716 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
717 s->ac_val[2] = s->ac_val[1] + c_size;
718
719 /* cbp values */
720 CHECKED_ALLOCZ(s->coded_block_base, y_size);
721 s->coded_block= s->coded_block_base + s->b8_stride + 1;
722
723 /* divx501 bitstream reorder buffer */
724 CHECKED_ALLOCZ(s->bitstream_buffer, BITSTREAM_BUFFER_SIZE);
725
726 /* cbp, ac_pred, pred_dir */
727 CHECKED_ALLOCZ(s->cbp_table , mb_array_size * sizeof(uint8_t))
728 CHECKED_ALLOCZ(s->pred_dir_table, mb_array_size * sizeof(uint8_t))
729 }
730
731 if (s->h263_pred || s->h263_plus || !s->encoding) {
732 /* dc values */
733 //MN: we need these for error resilience of intra-frames
734 CHECKED_ALLOCZ(s->dc_val_base, yc_size * sizeof(int16_t));
735 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
736 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
737 s->dc_val[2] = s->dc_val[1] + c_size;
738 for(i=0;i<yc_size;i++)
739 s->dc_val_base[i] = 1024;
740 }
741
742 /* which mb is a intra block */
743 CHECKED_ALLOCZ(s->mbintra_table, mb_array_size);
744 memset(s->mbintra_table, 1, mb_array_size);
745
746 /* init macroblock skip table */
747 CHECKED_ALLOCZ(s->mbskip_table, mb_array_size+2);
748 //Note the +1 is for a quicker mpeg4 slice_end detection
749 CHECKED_ALLOCZ(s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
750
751 s->parse_context.state= -1;
752 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
753 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
754 s->visualization_buffer[1] = av_malloc((s->mb_width*8 + EDGE_WIDTH) * s->mb_height*8 + EDGE_WIDTH);
755 s->visualization_buffer[2] = av_malloc((s->mb_width*8 + EDGE_WIDTH) * s->mb_height*8 + EDGE_WIDTH);
756 }
757
758 s->context_initialized = 1;
759
760 s->thread_context[0]= s;
761 for(i=1; i<s->avctx->thread_count; i++){
762 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
763 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
764 }
765
766 for(i=0; i<s->avctx->thread_count; i++){
767 if(init_duplicate_context(s->thread_context[i], s) < 0)
768 goto fail;
769 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
770 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
771 }
772
773 return 0;
774 fail:
775 MPV_common_end(s);
776 return -1;
777 }
778
779 /* init common structure for both encoder and decoder */
780 void MPV_common_end(MpegEncContext *s)
781 {
782 int i, j, k;
783
784 for(i=0; i<s->avctx->thread_count; i++){
785 free_duplicate_context(s->thread_context[i]);
786 }
787 for(i=1; i<s->avctx->thread_count; i++){
788 av_freep(&s->thread_context[i]);
789 }
790
791 av_freep(&s->parse_context.buffer);
792 s->parse_context.buffer_size=0;
793
794 av_freep(&s->mb_type);
795 av_freep(&s->p_mv_table_base);
796 av_freep(&s->b_forw_mv_table_base);
797 av_freep(&s->b_back_mv_table_base);
798 av_freep(&s->b_bidir_forw_mv_table_base);
799 av_freep(&s->b_bidir_back_mv_table_base);
800 av_freep(&s->b_direct_mv_table_base);
801 s->p_mv_table= NULL;
802 s->b_forw_mv_table= NULL;
803 s->b_back_mv_table= NULL;
804 s->b_bidir_forw_mv_table= NULL;
805 s->b_bidir_back_mv_table= NULL;
806 s->b_direct_mv_table= NULL;
807 for(i=0; i<2; i++){
808 for(j=0; j<2; j++){
809 for(k=0; k<2; k++){
810 av_freep(&s->b_field_mv_table_base[i][j][k]);
811 s->b_field_mv_table[i][j][k]=NULL;
812 }
813 av_freep(&s->b_field_select_table[i][j]);
814 av_freep(&s->p_field_mv_table_base[i][j]);
815 s->p_field_mv_table[i][j]=NULL;
816 }
817 av_freep(&s->p_field_select_table[i]);
818 }
819
820 av_freep(&s->dc_val_base);
821 av_freep(&s->ac_val_base);
822 av_freep(&s->coded_block_base);
823 av_freep(&s->mbintra_table);
824 av_freep(&s->cbp_table);
825 av_freep(&s->pred_dir_table);
826
827 av_freep(&s->mbskip_table);
828 av_freep(&s->prev_pict_types);
829 av_freep(&s->bitstream_buffer);
830 av_freep(&s->avctx->stats_out);
831 av_freep(&s->ac_stats);
832 av_freep(&s->error_status_table);
833 av_freep(&s->mb_index2xy);
834 av_freep(&s->lambda_table);
835 av_freep(&s->q_intra_matrix);
836 av_freep(&s->q_inter_matrix);
837 av_freep(&s->q_intra_matrix16);
838 av_freep(&s->q_inter_matrix16);
839 av_freep(&s->input_picture);
840 av_freep(&s->reordered_input_picture);
841 av_freep(&s->dct_offset);
842
843 if(s->picture){
844 for(i=0; i<MAX_PICTURE_COUNT; i++){
845 free_picture(s, &s->picture[i]);
846 }
847 }
848 av_freep(&s->picture);
849 s->context_initialized = 0;
850 s->last_picture_ptr=
851 s->next_picture_ptr=
852 s->current_picture_ptr= NULL;
853
854 for(i=0; i<3; i++)
855 av_freep(&s->visualization_buffer[i]);
856 }
857
858 #ifdef CONFIG_ENCODERS
859
860 /* init video encoder */
861 int MPV_encode_init(AVCodecContext *avctx)
862 {
863 MpegEncContext *s = avctx->priv_data;
864 int i, dummy;
865 int chroma_h_shift, chroma_v_shift;
866
867 MPV_encode_defaults(s);
868
869 avctx->pix_fmt = PIX_FMT_YUV420P; // FIXME
870
871 s->bit_rate = avctx->bit_rate;
872 s->width = avctx->width;
873 s->height = avctx->height;
874 if(avctx->gop_size > 600){
875 av_log(avctx, AV_LOG_ERROR, "Warning keyframe interval too large! reducing it ...\n");
876 avctx->gop_size=600;
877 }
878 s->gop_size = avctx->gop_size;
879 s->avctx = avctx;
880 s->flags= avctx->flags;
881 s->flags2= avctx->flags2;
882 s->max_b_frames= avctx->max_b_frames;
883 s->codec_id= avctx->codec->id;
884 s->luma_elim_threshold = avctx->luma_elim_threshold;
885 s->chroma_elim_threshold= avctx->chroma_elim_threshold;
886 s->strict_std_compliance= avctx->strict_std_compliance;
887 s->data_partitioning= avctx->flags & CODEC_FLAG_PART;
888 s->quarter_sample= (avctx->flags & CODEC_FLAG_QPEL)!=0;
889 s->mpeg_quant= avctx->mpeg_quant;
890 s->rtp_mode= !!avctx->rtp_payload_size;
891 s->intra_dc_precision= avctx->intra_dc_precision;
892
893 if (s->gop_size <= 1) {
894 s->intra_only = 1;
895 s->gop_size = 12;
896 } else {
897 s->intra_only = 0;
898 }
899
900 s->me_method = avctx->me_method;
901
902 /* Fixed QSCALE */
903 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
904
905 s->adaptive_quant= ( s->avctx->lumi_masking
906 || s->avctx->dark_masking
907 || s->avctx->temporal_cplx_masking
908 || s->avctx->spatial_cplx_masking
909 || s->avctx->p_masking
910 || (s->flags&CODEC_FLAG_QP_RD))
911 && !s->fixed_qscale;
912
913 s->obmc= !!(s->flags & CODEC_FLAG_OBMC);
914 s->loop_filter= !!(s->flags & CODEC_FLAG_LOOP_FILTER);
915 s->alternate_scan= !!(s->flags & CODEC_FLAG_ALT_SCAN);
916
917 if(avctx->rc_max_rate && !avctx->rc_buffer_size){
918 av_log(avctx, AV_LOG_ERROR, "a vbv buffer size is needed, for encoding with a maximum bitrate\n");
919 return -1;
920 }
921
922 if(avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate){
923 av_log(avctx, AV_LOG_INFO, "Warning min_rate > 0 but min_rate != max_rate isnt recommanded!\n");
924 }
925
926 if( s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate
927 && (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO)
928 && 90000LL * (avctx->rc_buffer_size-1) > s->avctx->rc_max_rate*0xFFFFLL){
929
930 av_log(avctx, AV_LOG_INFO, "Warning vbv_delay will be set to 0xFFFF (=VBR) as the specified vbv buffer is too large for the given bitrate!\n");
931 }
932
933 if((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4
934 && s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P && s->codec_id != CODEC_ID_FLV1){
935 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
936 return -1;
937 }
938
939 if(s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE){
940 av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with simple mb decission\n");
941 return -1;
942 }
943
944 if(s->obmc && s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P){
945 av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with H263(+)\n");
946 return -1;
947 }
948
949 if(s->quarter_sample && s->codec_id != CODEC_ID_MPEG4){
950 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
951 return -1;
952 }
953
954 if(s->data_partitioning && s->codec_id != CODEC_ID_MPEG4){
955 av_log(avctx, AV_LOG_ERROR, "data partitioning not supported by codec\n");
956 return -1;
957 }
958
959 if(s->max_b_frames && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO){
960 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
961 return -1;
962 }
963
964 if((s->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME|CODEC_FLAG_ALT_SCAN))
965 && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG2VIDEO){
966 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
967 return -1;
968 }
969
970 if(s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4){ //FIXME mpeg2 uses that too
971 av_log(avctx, AV_LOG_ERROR, "mpeg2 style quantization not supporetd by codec\n");
972 return -1;
973 }
974
975 if((s->flags & CODEC_FLAG_CBP_RD) && !(s->flags & CODEC_FLAG_TRELLIS_QUANT)){
976 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
977 return -1;
978 }
979
980 if((s->flags & CODEC_FLAG_QP_RD) && s->avctx->mb_decision != FF_MB_DECISION_RD){
981 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
982 return -1;
983 }
984
985 if(s->avctx->scenechange_threshold < 1000000000 && (s->flags & CODEC_FLAG_CLOSED_GOP)){
986 av_log(avctx, AV_LOG_ERROR, "closed gop with scene change detection arent supported yet\n");
987 return -1;
988 }
989
990 if(s->avctx->thread_count > 1 && s->codec_id != CODEC_ID_MPEG4
991 && s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO
992 && (s->codec_id != CODEC_ID_H263P || !(s->flags & CODEC_FLAG_H263P_SLICE_STRUCT))){
993 av_log(avctx, AV_LOG_ERROR, "multi threaded encoding not supported by codec\n");
994 return -1;
995 }
996
997 if(s->avctx->thread_count > 1)
998 s->rtp_mode= 1;
999
1000 i= ff_gcd(avctx->frame_rate, avctx->frame_rate_base);
1001 if(i > 1){
1002 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
1003 avctx->frame_rate /= i;
1004 avctx->frame_rate_base /= i;
1005 // return -1;
1006 }
1007
1008 if(s->codec_id==CODEC_ID_MJPEG){
1009 s->intra_quant_bias= 1<<(QUANT_BIAS_SHIFT-1); //(a + x/2)/x
1010 s->inter_quant_bias= 0;
1011 }else if(s->mpeg_quant || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO){
1012 s->intra_quant_bias= 3<<(QUANT_BIAS_SHIFT-3); //(a + x*3/8)/x
1013 s->inter_quant_bias= 0;
1014 }else{
1015 s->intra_quant_bias=0;
1016 s->inter_quant_bias=-(1<<(QUANT_BIAS_SHIFT-2)); //(a - x/4)/x
1017 }
1018
1019 if(avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
1020 s->intra_quant_bias= avctx->intra_quant_bias;
1021 if(avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
1022 s->inter_quant_bias= avctx->inter_quant_bias;
1023
1024 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift);
1025
1026 av_reduce(&s->time_increment_resolution, &dummy, s->avctx->frame_rate, s->avctx->frame_rate_base, (1<<16)-1);
1027 s->time_increment_bits = av_log2(s->time_increment_resolution - 1) + 1;
1028
1029 switch(avctx->codec->id) {
1030 case CODEC_ID_MPEG1VIDEO:
1031 s->out_format = FMT_MPEG1;
1032 s->low_delay= 0; //s->max_b_frames ? 0 : 1;
1033 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
1034 break;
1035 case CODEC_ID_MPEG2VIDEO:
1036 s->out_format = FMT_MPEG1;
1037 s->low_delay= 0; //s->max_b_frames ? 0 : 1;
1038 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
1039 s->rtp_mode= 1;
1040 break;
1041 case CODEC_ID_LJPEG:
1042 case CODEC_ID_MJPEG:
1043 s->out_format = FMT_MJPEG;
1044 s->intra_only = 1; /* force intra only for jpeg */
1045 s->mjpeg_write_tables = 1; /* write all tables */
1046 s->mjpeg_data_only_frames = 0; /* write all the needed headers */
1047 s->mjpeg_vsample[0] = 1<<chroma_v_shift;
1048 s->mjpeg_vsample[1] = 1;
1049 s->mjpeg_vsample[2] = 1;
1050 s->mjpeg_hsample[0] = 1<<chroma_h_shift;
1051 s->mjpeg_hsample[1] = 1;
1052 s->mjpeg_hsample[2] = 1;
1053 if (mjpeg_init(s) < 0)
1054 return -1;
1055 avctx->delay=0;
1056 s->low_delay=1;
1057 break;
1058 #ifdef CONFIG_RISKY
1059 case CODEC_ID_H263:
1060 if (h263_get_picture_format(s->width, s->height) == 7) {
1061 av_log(avctx, AV_LOG_INFO, "Input picture size isn't suitable for h263 codec! try h263+\n");
1062 return -1;
1063 }
1064 s->out_format = FMT_H263;
1065 s->obmc= (avctx->flags & CODEC_FLAG_OBMC) ? 1:0;
1066 avctx->delay=0;
1067 s->low_delay=1;
1068 break;
1069 case CODEC_ID_H263P:
1070 s->out_format = FMT_H263;
1071 s->h263_plus = 1;
1072 /* Fx */
1073 s->umvplus = (avctx->flags & CODEC_FLAG_H263P_UMV) ? 1:0;
1074 s->h263_aic= (avctx->flags & CODEC_FLAG_H263P_AIC) ? 1:0;
1075 s->modified_quant= s->h263_aic;
1076 s->alt_inter_vlc= (avctx->flags & CODEC_FLAG_H263P_AIV) ? 1:0;
1077 s->obmc= (avctx->flags & CODEC_FLAG_OBMC) ? 1:0;
1078 s->loop_filter= (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1:0;
1079 s->unrestricted_mv= s->obmc || s->loop_filter || s->umvplus;
1080 s->h263_slice_structured= (s->flags & CODEC_FLAG_H263P_SLICE_STRUCT) ? 1:0;
1081
1082 /* /Fx */
1083 /* These are just to be sure */
1084 avctx->delay=0;
1085 s->low_delay=1;
1086 break;
1087 case CODEC_ID_FLV1:
1088 s->out_format = FMT_H263;
1089 s->h263_flv = 2; /* format = 1; 11-bit codes */
1090 s->unrestricted_mv = 1;
1091 s->rtp_mode=0; /* don't allow GOB */
1092 avctx->delay=0;
1093 s->low_delay=1;
1094 break;
1095 case CODEC_ID_RV10:
1096 s->out_format = FMT_H263;
1097 avctx->delay=0;
1098 s->low_delay=1;
1099 break;
1100 case CODEC_ID_MPEG4:
1101 s->out_format = FMT_H263;
1102 s->h263_pred = 1;
1103 s->unrestricted_mv = 1;
1104 s->low_delay= s->max_b_frames ? 0 : 1;
1105 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
1106 break;
1107 case CODEC_ID_MSMPEG4V1:
1108 s->out_format = FMT_H263;
1109 s->h263_msmpeg4 = 1;
1110 s->h263_pred = 1;
1111 s->unrestricted_mv = 1;
1112 s->msmpeg4_version= 1;
1113 avctx->delay=0;
1114 s->low_delay=1;
1115 break;
1116 case CODEC_ID_MSMPEG4V2:
1117 s->out_format = FMT_H263;
1118 s->h263_msmpeg4 = 1;
1119 s->h263_pred = 1;
1120 s->unrestricted_mv = 1;
1121 s->msmpeg4_version= 2;
1122 avctx->delay=0;
1123 s->low_delay=1;
1124 break;
1125 case CODEC_ID_MSMPEG4V3:
1126 s->out_format = FMT_H263;
1127 s->h263_msmpeg4 = 1;
1128 s->h263_pred = 1;
1129 s->unrestricted_mv = 1;
1130 s->msmpeg4_version= 3;
1131 s->flipflop_rounding=1;
1132 avctx->delay=0;
1133 s->low_delay=1;
1134 break;
1135 case CODEC_ID_WMV1:
1136 s->out_format = FMT_H263;
1137 s->h263_msmpeg4 = 1;
1138 s->h263_pred = 1;
1139 s->unrestricted_mv = 1;
1140 s->msmpeg4_version= 4;
1141 s->flipflop_rounding=1;
1142 avctx->delay=0;
1143 s->low_delay=1;
1144 break;
1145 case CODEC_ID_WMV2:
1146 s->out_format = FMT_H263;
1147 s->h263_msmpeg4 = 1;
1148 s->h263_pred = 1;
1149 s->unrestricted_mv = 1;
1150 s->msmpeg4_version= 5;
1151 s->flipflop_rounding=1;
1152 avctx->delay=0;
1153 s->low_delay=1;
1154 break;
1155 #endif
1156 default:
1157 return -1;
1158 }
1159
1160 avctx->has_b_frames= !s->low_delay;
1161
1162 s->encoding = 1;
1163
1164 /* init */
1165 if (MPV_common_init(s) < 0)
1166 return -1;
1167
1168 if(s->modified_quant)
1169 s->chroma_qscale_table= ff_h263_chroma_qscale_table;
1170 s->progressive_frame=
1171 s->progressive_sequence= !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME));
1172 s->quant_precision=5;
1173
1174 ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
1175
1176 #ifdef CONFIG_ENCODERS
1177 #ifdef CONFIG_RISKY
1178 if (s->out_format == FMT_H263)
1179 h263_encode_init(s);
1180 if(s->msmpeg4_version)
1181 ff_msmpeg4_encode_init(s);
1182 #endif
1183 if (s->out_format == FMT_MPEG1)
1184 ff_mpeg1_encode_init(s);
1185 #endif
1186
1187 /* init q matrix */
1188 for(i=0;i<64;i++) {
1189 int j= s->dsp.idct_permutation[i];
1190 #ifdef CONFIG_RISKY
1191 if(s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){
1192 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
1193 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
1194 }else if(s->out_format == FMT_H263){
1195 s->intra_matrix[j] =
1196 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1197 }else
1198 #endif
1199 { /* mpeg1/2 */
1200 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1201 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1202 }
1203 if(s->avctx->intra_matrix)
1204 s->intra_matrix[j] = s->avctx->intra_matrix[i];
1205 if(s->avctx->inter_matrix)
1206 s->inter_matrix[j] = s->avctx->inter_matrix[i];
1207 }
1208
1209 /* precompute matrix */
1210 /* for mjpeg, we do include qscale in the matrix */
1211 if (s->out_format != FMT_MJPEG) {
1212 convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
1213 s->intra_matrix, s->intra_quant_bias, 1, 31);
1214 convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
1215 s->inter_matrix, s->inter_quant_bias, 1, 31);
1216 }
1217
1218 if(ff_rate_control_init(s) < 0)
1219 return -1;
1220
1221 return 0;
1222 }
1223
1224 int MPV_encode_end(AVCodecContext *avctx)
1225 {
1226 MpegEncContext *s = avctx->priv_data;
1227
1228 #ifdef STATS
1229 print_stats();
1230 #endif
1231
1232 ff_rate_control_uninit(s);
1233
1234 MPV_common_end(s);
1235 if (s->out_format == FMT_MJPEG)
1236 mjpeg_close(s);
1237
1238 av_freep(&avctx->extradata);
1239
1240 return 0;
1241 }
1242
1243 #endif //CONFIG_ENCODERS
1244
1245 void init_rl(RLTable *rl)
1246 {
1247 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
1248 uint8_t index_run[MAX_RUN+1];
1249 int last, run, level, start, end, i;
1250
1251 /* compute max_level[], max_run[] and index_run[] */
1252 for(last=0;last<2;last++) {
1253 if (last == 0) {
1254 start = 0;
1255 end = rl->last;
1256 } else {
1257 start = rl->last;
1258 end = rl->n;
1259 }
1260
1261 memset(max_level, 0, MAX_RUN + 1);
1262 memset(max_run, 0, MAX_LEVEL + 1);
1263 memset(index_run, rl->n, MAX_RUN + 1);
1264 for(i=start;i<end;i++) {
1265 run = rl->table_run[i];
1266 level = rl->table_level[i];
1267 if (index_run[run] == rl->n)
1268 index_run[run] = i;
1269 if (level > max_level[run])
1270 max_level[run] = level;
1271 if (run > max_run[level])
1272 max_run[level] = run;
1273 }
1274 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1275 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1276 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1277 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1278 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1279 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1280 }
1281 }
1282
1283 /* draw the edges of width 'w' of an image of size width, height */
1284 //FIXME check that this is ok for mpeg4 interlaced
1285 static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w)
1286 {
1287 uint8_t *ptr, *last_line;
1288 int i;
1289
1290 last_line = buf + (height - 1) * wrap;
1291 for(i=0;i<w;i++) {
1292 /* top and bottom */
1293 memcpy(buf - (i + 1) * wrap, buf, width);
1294 memcpy(last_line + (i + 1) * wrap, last_line, width);
1295 }
1296 /* left and right */
1297 ptr = buf;
1298 for(i=0;i<height;i++) {
1299 memset(ptr - w, ptr[0], w);
1300 memset(ptr + width, ptr[width-1], w);
1301 ptr += wrap;
1302 }
1303 /* corners */
1304 for(i=0;i<w;i++) {
1305 memset(buf - (i + 1) * wrap - w, buf[0], w); /* top left */
1306 memset(buf - (i + 1) * wrap + width, buf[width-1], w); /* top right */
1307 memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */
1308 memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */
1309 }
1310 }
1311
1312 int ff_find_unused_picture(MpegEncContext *s, int shared){
1313 int i;
1314
1315 if(shared){
1316 for(i=0; i<MAX_PICTURE_COUNT; i++){
1317 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
1318 }
1319 }else{
1320 for(i=0; i<MAX_PICTURE_COUNT; i++){
1321 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
1322 }
1323 for(i=0; i<MAX_PICTURE_COUNT; i++){
1324 if(s->picture[i].data[0]==NULL) return i;
1325 }
1326 }
1327
1328 assert(0);
1329 return -1;
1330 }
1331
1332 static void update_noise_reduction(MpegEncContext *s){
1333 int intra, i;
1334
1335 for(intra=0; intra<2; intra++){
1336 if(s->dct_count[intra] > (1<<16)){
1337 for(i=0; i<64; i++){
1338 s->dct_error_sum[intra][i] >>=1;
1339 }
1340 s->dct_count[intra] >>= 1;
1341 }
1342
1343 for(i=0; i<64; i++){
1344 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
1345 }
1346 }
1347 }
1348
1349 /**
1350 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1351 */
1352 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1353 {
1354 int i;
1355 AVFrame *pic;
1356 s->mb_skiped = 0;
1357
1358 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
1359
1360 /* mark&release old frames */
1361 if (s->pict_type != B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
1362 avctx->release_buffer(avctx, (AVFrame*)s->last_picture_ptr);
1363
1364 /* release forgotten pictures */
1365 /* if(mpeg124/h263) */
1366 if(!s->encoding){
1367 for(i=0; i<MAX_PICTURE_COUNT; i++){
1368 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
1369 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
1370 avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
1371 }
1372 }
1373 }
1374 }
1375 alloc:
1376 if(!s->encoding){
1377 /* release non refernce frames */
1378 for(i=0; i<MAX_PICTURE_COUNT; i++){
1379 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1380 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
1381 }
1382 }
1383
1384 if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
1385 pic= (AVFrame*)s->current_picture_ptr; //we allready have a unused image (maybe it was set before reading the header)
1386 else{
1387 i= ff_find_unused_picture(s, 0);
1388 pic= (AVFrame*)&s->picture[i];
1389 }
1390
1391 pic->reference= s->pict_type != B_TYPE && !s->dropable ? 3 : 0;
1392
1393 pic->coded_picture_number= s->coded_picture_number++;
1394
1395 if( alloc_picture(s, (Picture*)pic, 0) < 0)
1396 return -1;
1397
1398 s->current_picture_ptr= (Picture*)pic;
1399 s->current_picture_ptr->top_field_first= s->top_field_first; //FIXME use only the vars from current_pic
1400 s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
1401 }
1402
1403 s->current_picture_ptr->pict_type= s->pict_type;
1404 // if(s->flags && CODEC_FLAG_QSCALE)
1405 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1406 s->current_picture_ptr->key_frame= s->pict_type == I_TYPE;
1407
1408 copy_picture(&s->current_picture, s->current_picture_ptr);
1409
1410 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
1411 if (s->pict_type != B_TYPE) {
1412 s->last_picture_ptr= s->next_picture_ptr;
1413 if(!s->dropable)
1414 s->next_picture_ptr= s->current_picture_ptr;
1415 }
1416 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1417 s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL,
1418 s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL,
1419 s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
1420 s->pict_type, s->dropable);*/
1421
1422 if(s->last_picture_ptr) copy_picture(&s->last_picture, s->last_picture_ptr);
1423 if(s->next_picture_ptr) copy_picture(&s->next_picture, s->next_picture_ptr);
1424
1425 if(s->pict_type != I_TYPE && (s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL)){
1426 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
1427 assert(s->pict_type != B_TYPE); //these should have been dropped if we dont have a reference
1428 goto alloc;
1429 }
1430
1431 assert(s->pict_type == I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
1432
1433 if(s->picture_structure!=PICT_FRAME){
1434 int i;
1435 for(i=0; i<4; i++){
1436 if(s->picture_structure == PICT_BOTTOM_FIELD){
1437 s->current_picture.data[i] += s->current_picture.linesize[i];
1438 }
1439 s->current_picture.linesize[i] *= 2;
1440 s->last_picture.linesize[i] *=2;
1441 s->next_picture.linesize[i] *=2;
1442 }
1443 }
1444 }
1445
1446 s->hurry_up= s->avctx->hurry_up;
1447 s->error_resilience= avctx->error_resilience;
1448
1449 /* set dequantizer, we cant do it during init as it might change for mpeg4
1450 and we cant do it in the header decode as init isnt called for mpeg4 there yet */
1451 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1452 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1453 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1454 }else if(s->out_format == FMT_H263){
1455 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1456 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1457 }else{
1458 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1459 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1460 }
1461
1462 if(s->dct_error_sum){
1463 assert(s->avctx->noise_reduction && s->encoding);
1464
1465 update_noise_reduction(s);
1466 }
1467
1468 #ifdef HAVE_XVMC
1469 if(s->avctx->xvmc_acceleration)
1470 return XVMC_field_start(s, avctx);
1471 #endif
1472 return 0;
1473 }
1474
1475 /* generic function for encode/decode called after a frame has been coded/decoded */
1476 void MPV_frame_end(MpegEncContext *s)
1477 {
1478 int i;
1479 /* draw edge for correct motion prediction if outside */
1480 #ifdef HAVE_XVMC
1481 //just to make sure that all data is rendered.
1482 if(s->avctx->xvmc_acceleration){
1483 XVMC_field_end(s);
1484 }else
1485 #endif
1486 if(s->unrestricted_mv && s->pict_type != B_TYPE && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1487 draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH );
1488 draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1489 draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1490 }
1491 emms_c();
1492
1493 s->last_pict_type = s->pict_type;
1494 if(s->pict_type!=B_TYPE){
1495 s->last_non_b_pict_type= s->pict_type;
1496 }
1497 #if 0
1498 /* copy back current_picture variables */
1499 for(i=0; i<MAX_PICTURE_COUNT; i++){
1500 if(s->picture[i].data[0] == s->current_picture.data[0]){
1501 s->picture[i]= s->current_picture;
1502 break;
1503 }
1504 }
1505 assert(i<MAX_PICTURE_COUNT);
1506 #endif
1507
1508 if(s->encoding){
1509 /* release non refernce frames */
1510 for(i=0; i<MAX_PICTURE_COUNT; i++){
1511 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1512 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
1513 }
1514 }
1515 }
1516 // clear copies, to avoid confusion
1517 #if 0
1518 memset(&s->last_picture, 0, sizeof(Picture));
1519 memset(&s->next_picture, 0, sizeof(Picture));
1520 memset(&s->current_picture, 0, sizeof(Picture));
1521 #endif
1522 }
1523
1524 /**
1525 * draws an line from (ex, ey) -> (sx, sy).
1526 * @param w width of the image
1527 * @param h height of the image
1528 * @param stride stride/linesize of the image
1529 * @param color color of the arrow
1530 */
1531 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1532 int t, x, y, fr, f;
1533
1534 sx= clip(sx, 0, w-1);
1535 sy= clip(sy, 0, h-1);
1536 ex= clip(ex, 0, w-1);
1537 ey= clip(ey, 0, h-1);
1538
1539 buf[sy*stride + sx]+= color;
1540
1541 if(ABS(ex - sx) > ABS(ey - sy)){
1542 if(sx > ex){
1543 t=sx; sx=ex; ex=t;
1544 t=sy; sy=ey; ey=t;
1545 }
1546 buf+= sx + sy*stride;
1547 ex-= sx;
1548 f= ((ey-sy)<<16)/ex;
1549 for(x= 0; x <= ex; x++){
1550 y = (x*f)>>16;
1551 fr= (x*f)&0xFFFF;
1552 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1553 buf[(y+1)*stride + x]+= (color* fr )>>16;
1554 }
1555 }else{
1556 if(sy > ey){
1557 t=sx; sx=ex; ex=t;
1558 t=sy; sy=ey; ey=t;
1559 }
1560 buf+= sx + sy*stride;
1561 ey-= sy;
1562 if(ey) f= ((ex-sx)<<16)/ey;
1563 else f= 0;
1564 for(y= 0; y <= ey; y++){
1565 x = (y*f)>>16;
1566 fr= (y*f)&0xFFFF;
1567 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;;
1568 buf[y*stride + x+1]+= (color* fr )>>16;;
1569 }
1570 }
1571 }
1572
1573 /**
1574 * draws an arrow from (ex, ey) -> (sx, sy).
1575 * @param w width of the image
1576 * @param h height of the image
1577 * @param stride stride/linesize of the image
1578 * @param color color of the arrow
1579 */
1580 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1581 int dx,dy;
1582
1583 sx= clip(sx, -100, w+100);
1584 sy= clip(sy, -100, h+100);
1585 ex= clip(ex, -100, w+100);
1586 ey= clip(ey, -100, h+100);
1587
1588 dx= ex - sx;
1589 dy= ey - sy;
1590
1591 if(dx*dx + dy*dy > 3*3){
1592 int rx= dx + dy;
1593 int ry= -dx + dy;
1594 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1595
1596 //FIXME subpixel accuracy
1597 rx= ROUNDED_DIV(rx*3<<4, length);
1598 ry= ROUNDED_DIV(ry*3<<4, length);
1599
1600 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1601 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1602 }
1603 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1604 }
1605
1606 /**
1607 * prints debuging info for the given picture.
1608 */
1609 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1610
1611 if(!pict || !pict->mb_type) return;
1612
1613 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1614 int x,y;
1615
1616 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1617 switch (pict->pict_type) {
1618 case FF_I_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
1619 case FF_P_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
1620 case FF_B_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
1621 case FF_S_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
1622 case FF_SI_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
1623 case FF_SP_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
1624 }
1625 for(y=0; y<s->mb_height; y++){
1626 for(x=0; x<s->mb_width; x++){
1627 if(s->avctx->debug&FF_DEBUG_SKIP){
1628 int count= s->mbskip_table[x + y*s->mb_stride];
1629 if(count>9) count=9;
1630 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1631 }
1632 if(s->avctx->debug&FF_DEBUG_QP){
1633 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1634 }
1635 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1636 int mb_type= pict->mb_type[x + y*s->mb_stride];
1637 //Type & MV direction
1638 if(IS_PCM(mb_type))
1639 av_log(s->avctx, AV_LOG_DEBUG, "P");
1640 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1641 av_log(s->avctx, AV_LOG_DEBUG, "A");
1642 else if(IS_INTRA4x4(mb_type))
1643 av_log(s->avctx, AV_LOG_DEBUG, "i");
1644 else if(IS_INTRA16x16(mb_type))
1645 av_log(s->avctx, AV_LOG_DEBUG, "I");
1646 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1647 av_log(s->avctx, AV_LOG_DEBUG, "d");
1648 else if(IS_DIRECT(mb_type))
1649 av_log(s->avctx, AV_LOG_DEBUG, "D");
1650 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1651 av_log(s->avctx, AV_LOG_DEBUG, "g");
1652 else if(IS_GMC(mb_type))
1653 av_log(s->avctx, AV_LOG_DEBUG, "G");
1654 else if(IS_SKIP(mb_type))
1655 av_log(s->avctx, AV_LOG_DEBUG, "S");
1656 else if(!USES_LIST(mb_type, 1))
1657 av_log(s->avctx, AV_LOG_DEBUG, ">");
1658 else if(!USES_LIST(mb_type, 0))
1659 av_log(s->avctx, AV_LOG_DEBUG, "<");
1660 else{
1661 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1662 av_log(s->avctx, AV_LOG_DEBUG, "X");
1663 }
1664
1665 //segmentation
1666 if(IS_8X8(mb_type))
1667 av_log(s->avctx, AV_LOG_DEBUG, "+");
1668 else if(IS_16X8(mb_type))
1669 av_log(s->avctx, AV_LOG_DEBUG, "-");
1670 else if(IS_8X16(mb_type))
1671 av_log(s->avctx, AV_LOG_DEBUG, "¦");
1672 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1673 av_log(s->avctx, AV_LOG_DEBUG, " ");
1674 else
1675 av_log(s->avctx, AV_LOG_DEBUG, "?");
1676
1677
1678 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264)
1679 av_log(s->avctx, AV_LOG_DEBUG, "=");
1680 else
1681 av_log(s->avctx, AV_LOG_DEBUG, " ");
1682 }
1683 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1684 }
1685 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1686 }
1687 }
1688
1689 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1690 const int shift= 1 + s->quarter_sample;
1691 int mb_y;
1692 uint8_t *ptr;
1693 int i;
1694 int h_chroma_shift, v_chroma_shift;
1695 s->low_delay=0; //needed to see the vectors without trashing the buffers
1696
1697 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1698 for(i=0; i<3; i++){
1699 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*s->height:pict->linesize[i]*s->height >> v_chroma_shift);
1700 pict->data[i]= s->visualization_buffer[i];
1701 }
1702 pict->type= FF_BUFFER_TYPE_COPY;
1703 ptr= pict->data[0];
1704
1705 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1706 int mb_x;
1707 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1708 const int mb_index= mb_x + mb_y*s->mb_stride;
1709 if((s->avctx->debug_mv) && pict->motion_val){
1710 int type;
1711 for(type=0; type<3; type++){
1712 int direction = 0;
1713 switch (type) {
1714 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=FF_P_TYPE))
1715 continue;
1716 direction = 0;
1717 break;
1718 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=FF_B_TYPE))
1719 continue;
1720 direction = 0;
1721 break;
1722 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=FF_B_TYPE))
1723 continue;
1724 direction = 1;
1725 break;
1726 }
1727 if(!USES_LIST(pict->mb_type[mb_index], direction))
1728 continue;
1729
1730 //FIXME for h264
1731 if(IS_8X8(pict->mb_type[mb_index])){
1732 int i;
1733 for(i=0; i<4; i++){
1734 int sx= mb_x*16 + 4 + 8*(i&1);
1735 int sy= mb_y*16 + 4 + 8*(i>>1);
1736 int xy= mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*s->b8_stride;
1737 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1738 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1739 draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100);
1740 }
1741 }else if(IS_16X8(pict->mb_type[mb_index])){
1742 int i;
1743 for(i=0; i<2; i++){
1744 int sx=mb_x*16 + 8;
1745 int sy=mb_y*16 + 4 + 8*i;
1746 int xy= mb_x*2 + (mb_y*2 + i)*s->b8_stride;
1747 int mx=(pict->motion_val[direction][xy][0]>>shift);
1748 int my=(pict->motion_val[direction][xy][1]>>shift);
1749
1750 if(IS_INTERLACED(pict->mb_type[mb_index]))
1751 my*=2;
1752
1753 draw_arrow(ptr, sx, sy, mx+sx, my+sy, s->width, s->height, s->linesize, 100);
1754 }
1755 }else{
1756 int sx= mb_x*16 + 8;
1757 int sy= mb_y*16 + 8;
1758 int xy= mb_x*2 + mb_y*2*s->b8_stride;
1759 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1760 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1761 draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100);
1762 }
1763 }
1764 }
1765 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1766 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1767 int y;
1768 for(y=0; y<8; y++){
1769 *(uint64_t*)(pict->data[1] + 8*mb_x + (8*mb_y + y)*pict->linesize[1])= c;
1770 *(uint64_t*)(pict->data[2] + 8*mb_x + (8*mb_y + y)*pict->linesize[2])= c;
1771 }
1772 }
1773 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1774 int mb_type= pict->mb_type[mb_index];
1775 uint64_t u,v;
1776 int y;
1777 #define COLOR(theta, r)\
1778 u= (int)(128 + r*cos(theta*3.141592/180));\
1779 v= (int)(128 + r*sin(theta*3.141592/180));
1780
1781
1782 u=v=128;
1783 if(IS_PCM(mb_type)){
1784 COLOR(120,48)
1785 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1786 COLOR(30,48)
1787 }else if(IS_INTRA4x4(mb_type)){
1788 COLOR(90,48)
1789 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1790 // COLOR(120,48)
1791 }else if(IS_DIRECT(mb_type)){
1792 COLOR(150,48)
1793 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1794 COLOR(170,48)
1795 }else if(IS_GMC(mb_type)){
1796 COLOR(190,48)
1797 }else if(IS_SKIP(mb_type)){
1798 // COLOR(180,48)
1799 }else if(!USES_LIST(mb_type, 1)){
1800 COLOR(240,48)
1801 }else if(!USES_LIST(mb_type, 0)){
1802 COLOR(0,48)
1803 }else{
1804 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1805 COLOR(300,48)
1806 }
1807
1808 u*= 0x0101010101010101ULL;
1809 v*= 0x0101010101010101ULL;
1810 for(y=0; y<8; y++){
1811 *(uint64_t*)(pict->data[1] + 8*mb_x + (8*mb_y + y)*pict->linesize[1])= u;
1812 *(uint64_t*)(pict->data[2] + 8*mb_x + (8*mb_y + y)*pict->linesize[2])= v;
1813 }
1814
1815 //segmentation
1816 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1817 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1818 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1819 }
1820 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1821 for(y=0; y<16; y++)
1822 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1823 }
1824
1825 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1826 // hmm
1827 }
1828 }
1829 s->mbskip_table[mb_index]=0;
1830 }
1831 }
1832 }
1833 }
1834
1835 #ifdef CONFIG_ENCODERS
1836
1837 static int get_sae(uint8_t *src, int ref, int stride){
1838 int x,y;
1839 int acc=0;
1840
1841 for(y=0; y<16; y++){
1842 for(x=0; x<16; x++){
1843 acc+= ABS(src[x+y*stride] - ref);
1844 }
1845 }
1846
1847 return acc;
1848 }
1849
1850 static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride){
1851 int x, y, w, h;
1852 int acc=0;
1853
1854 w= s->width &~15;
1855 h= s->height&~15;
1856
1857 for(y=0; y<h; y+=16){
1858 for(x=0; x<w; x+=16){
1859 int offset= x + y*stride;
1860 int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride, 16);
1861 int mean= (s->dsp.pix_sum(src + offset, stride) + 128)>>8;
1862 int sae = get_sae(src + offset, mean, stride);
1863
1864 acc+= sae + 500 < sad;
1865 }
1866 }
1867 return acc;
1868 }
1869
1870
1871 static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
1872 AVFrame *pic=NULL;
1873 int i;
1874 const int encoding_delay= s->max_b_frames;
1875 int direct=1;
1876
1877 if(pic_arg){
1878 if(encoding_delay && !(s->flags&CODEC_FLAG_INPUT_PRESERVED)) direct=0;
1879 if(pic_arg->linesize[0] != s->linesize) direct=0;
1880 if(pic_arg->linesize[1] != s->uvlinesize) direct=0;
1881 if(pic_arg->linesize[2] != s->uvlinesize) direct=0;
1882
1883 // av_log(AV_LOG_DEBUG, "%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize);
1884
1885 if(direct){
1886 i= ff_find_unused_picture(s, 1);
1887
1888 pic= (AVFrame*)&s->picture[i];
1889 pic->reference= 3;
1890
1891 for(i=0; i<4; i++){
1892 pic->data[i]= pic_arg->data[i];
1893 pic->linesize[i]= pic_arg->linesize[i];
1894 }
1895 alloc_picture(s, (Picture*)pic, 1);
1896 }else{
1897 int offset= 16;
1898 i= ff_find_unused_picture(s, 0);
1899
1900 pic= (AVFrame*)&s->picture[i];
1901 pic->reference= 3;
1902
1903 alloc_picture(s, (Picture*)pic, 0);
1904
1905 if( pic->data[0] + offset == pic_arg->data[0]
1906 && pic->data[1] + offset == pic_arg->data[1]
1907 && pic->data[2] + offset == pic_arg->data[2]){
1908 // empty
1909 }else{
1910 int h_chroma_shift, v_chroma_shift;
1911 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1912
1913 for(i=0; i<3; i++){
1914 int src_stride= pic_arg->linesize[i];
1915 int dst_stride= i ? s->uvlinesize : s->linesize;
1916 int h_shift= i ? h_chroma_shift : 0;
1917 int v_shift= i ? v_chroma_shift : 0;
1918 int w= s->width >>h_shift;
1919 int h= s->height>>v_shift;
1920 uint8_t *src= pic_arg->data[i];
1921 uint8_t *dst= pic->data[i] + offset;
1922
1923 if(src_stride==dst_stride)
1924 memcpy(dst, src, src_stride*h);
1925 else{
1926 while(h--){
1927 memcpy(dst, src, w);
1928 dst += dst_stride;
1929 src += src_stride;
1930 }
1931 }
1932 }
1933 }
1934 }
1935 copy_picture_attributes(s, pic, pic_arg);
1936
1937 pic->display_picture_number= s->input_picture_number++;
1938 if(pic->pts != AV_NOPTS_VALUE){
1939 s->user_specified_pts= pic->pts;
1940 }else{
1941 if(s->user_specified_pts){
1942 pic->pts= s->user_specified_pts + AV_TIME_BASE*(int64_t)s->avctx->frame_rate_base / s->avctx->frame_rate;
1943 av_log(s->avctx, AV_LOG_INFO, "Warning: AVFrame.pts=? trying to guess (%Ld)\n", pic->pts);
1944 }else{
1945 pic->pts= av_rescale(pic->display_picture_number*(int64_t)s->avctx->frame_rate_base, AV_TIME_BASE, s->avctx->frame_rate);
1946 }
1947 }
1948 }
1949
1950 /* shift buffer entries */
1951 for(i=1; i<MAX_PICTURE_COUNT /*s->encoding_delay+1*/; i++)
1952 s->input_picture[i-1]= s->input_picture[i];
1953
1954 s->input_picture[encoding_delay]= (Picture*)pic;
1955
1956 return 0;
1957 }
1958
1959 static void select_input_picture(MpegEncContext *s){
1960 int i;
1961
1962 for(i=1; i<MAX_PICTURE_COUNT; i++)
1963 s->reordered_input_picture[i-1]= s->reordered_input_picture[i];
1964 s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL;
1965
1966 /* set next picture types & ordering */
1967 if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
1968 if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){
1969 s->reordered_input_picture[0]= s->input_picture[0];
1970 s->reordered_input_picture[0]->pict_type= I_TYPE;
1971 s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++;
1972 }else{
1973 int b_frames;
1974
1975 if(s->flags&CODEC_FLAG_PASS2){
1976 for(i=0; i<s->max_b_frames+1; i++){
1977 int pict_num= s->input_picture[0]->display_picture_number + i;
1978 int pict_type= s->rc_context.entry[pict_num].new_pict_type;
1979 s->input_picture[i]->pict_type= pict_type;
1980
1981 if(i + 1 >= s->rc_context.num_entries) break;
1982 }
1983 }
1984
1985 if(s->input_picture[0]->pict_type){
1986 /* user selected pict_type */
1987 for(b_frames=0; b_frames<s->max_b_frames+1; b_frames++){
1988 if(s->input_picture[b_frames]->pict_type!=B_TYPE) break;
1989 }
1990
1991 if(b_frames > s->max_b_frames){
1992 av_log(s->avctx, AV_LOG_ERROR, "warning, too many bframes in a row\n");
1993 b_frames = s->max_b_frames;
1994 }
1995 }else if(s->avctx->b_frame_strategy==0){
1996 b_frames= s->max_b_frames;
1997 while(b_frames && !s->input_picture[b_frames]) b_frames--;
1998 }else if(s->avctx->b_frame_strategy==1){
1999 for(i=1; i<s->max_b_frames+1; i++){
2000 if(s->input_picture[i] && s->input_picture[i]->b_frame_score==0){
2001 s->input_picture[i]->b_frame_score=
2002 get_intra_count(s, s->input_picture[i ]->data[0],
2003 s->input_picture[i-1]->data[0], s->linesize) + 1;
2004 }
2005 }
2006 for(i=0; i<s->max_b_frames; i++){
2007 if(s->input_picture[i]==NULL || s->input_picture[i]->b_frame_score - 1 > s->mb_num/40) break;
2008 }
2009
2010 b_frames= FFMAX(0, i-1);
2011
2012 /* reset scores */
2013 for(i=0; i<b_frames+1; i++){
2014 s->input_picture[i]->b_frame_score=0;
2015 }
2016 }else{
2017 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
2018 b_frames=0;
2019 }
2020
2021 emms_c();
2022 //static int b_count=0;
2023 //b_count+= b_frames;
2024 //av_log(s->avctx, AV_LOG_DEBUG, "b_frames: %d\n", b_count);
2025 if(s->picture_in_gop_number + b_frames >= s->gop_size){
2026 if(s->flags & CODEC_FLAG_CLOSED_GOP)
2027 b_frames=0;
2028 s->input_picture[b_frames]->pict_type= I_TYPE;
2029 }
2030
2031 if( (s->flags & CODEC_FLAG_CLOSED_GOP)
2032 && b_frames
2033 && s->input_picture[b_frames]->pict_type== I_TYPE)
2034 b_frames--;
2035
2036 s->reordered_input_picture[0]= s->input_picture[b_frames];
2037 if(s->reordered_input_picture[0]->pict_type != I_TYPE)
2038 s->reordered_input_picture[0]->pict_type= P_TYPE;
2039 s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++;
2040 for(i=0; i<b_frames; i++){
2041 s->reordered_input_picture[i+1]= s->input_picture[i];
2042 s->reordered_input_picture[i+1]->pict_type= B_TYPE;
2043 s->reordered_input_picture[i+1]->coded_picture_number= s->coded_picture_number++;
2044 }
2045 }
2046 }
2047
2048 if(s->reordered_input_picture[0]){
2049 s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE ? 3 : 0;
2050
2051 copy_picture(&s->new_picture, s->reordered_input_picture[0]);
2052
2053 if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
2054 // input is a shared pix, so we cant modifiy it -> alloc a new one & ensure that the shared one is reuseable
2055
2056 int i= ff_find_unused_picture(s, 0);
2057 Picture *pic= &s->picture[i];
2058
2059 /* mark us unused / free shared pic */
2060 for(i=0; i<4; i++)
2061 s->reordered_input_picture[0]->data[i]= NULL;
2062 s->reordered_input_picture[0]->type= 0;
2063
2064 pic->reference = s->reordered_input_picture[0]->reference;
2065
2066 alloc_picture(s, pic, 0);
2067
2068 copy_picture_attributes(s, (AVFrame*)pic, (AVFrame*)s->reordered_input_picture[0]);
2069
2070 s->current_picture_ptr= pic;
2071 }else{
2072 // input is not a shared pix -> reuse buffer for current_pix
2073
2074 assert( s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_USER
2075 || s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
2076
2077 s->current_picture_ptr= s->reordered_input_picture[0];
2078 for(i=0; i<4; i++){
2079 s->new_picture.data[i]+=16;
2080 }
2081 }
2082 copy_picture(&s->current_picture, s->current_picture_ptr);
2083
2084 s->picture_number= s->new_picture.display_picture_number;
2085 //printf("dpn:%d\n", s->picture_number);
2086 }else{
2087 memset(&s->new_picture, 0, sizeof(Picture));
2088 }
2089 }
2090
2091 int MPV_encode_picture(AVCodecContext *avctx,
2092 unsigned char *buf, int buf_size, void *data)
2093 {
2094 MpegEncContext *s = avctx->priv_data;
2095 AVFrame *pic_arg = data;
2096 int i, stuffing_count;
2097
2098 if(avctx->pix_fmt != PIX_FMT_YUV420P){
2099 av_log(avctx, AV_LOG_ERROR, "this codec supports only YUV420P\n");
2100 return -1;
2101 }
2102
2103 for(i=0; i<avctx->thread_count; i++){
2104 int start_y= s->thread_context[i]->start_mb_y;
2105 int end_y= s->thread_context[i]-> end_mb_y;
2106 int h= s->mb_height;
2107 uint8_t *start= buf + buf_size*start_y/h;
2108 uint8_t *end = buf + buf_size* end_y/h;
2109
2110 init_put_bits(&s->thread_context[i]->pb, start, end - start);
2111 }
2112
2113 s->picture_in_gop_number++;
2114
2115 load_input_picture(s, pic_arg);
2116
2117 select_input_picture(s);
2118
2119 /* output? */
2120 if(s->new_picture.data[0]){
2121 s->pict_type= s->new_picture.pict_type;
2122 //emms_c();
2123 //printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
2124 MPV_frame_start(s, avctx);
2125
2126 encode_picture(s, s->picture_number);
2127
2128 avctx->real_pict_num = s->picture_number;
2129 avctx->header_bits = s->header_bits;
2130 avctx->mv_bits = s->mv_bits;
2131 avctx->misc_bits = s->misc_bits;
2132 avctx->i_tex_bits = s->i_tex_bits;
2133 avctx->p_tex_bits = s->p_tex_bits;
2134 avctx->i_count = s->i_count;
2135 avctx->p_count = s->mb_num - s->i_count - s->skip_count; //FIXME f/b_count in avctx
2136 avctx->skip_count = s->skip_count;
2137
2138 MPV_frame_end(s);
2139
2140 if (s->out_format == FMT_MJPEG)
2141 mjpeg_picture_trailer(s);
2142
2143 if(s->flags&CODEC_FLAG_PASS1)
2144 ff_write_pass1_stats(s);
2145
2146 for(i=0; i<4; i++){
2147 avctx->error[i] += s->current_picture_ptr->error[i];
2148 }
2149
2150 flush_put_bits(&s->pb);
2151 s->frame_bits = put_bits_count(&s->pb);
2152
2153 stuffing_count= ff_vbv_update(s, s->frame_bits);
2154 if(stuffing_count){
2155 switch(s->codec_id){
2156 case CODEC_ID_MPEG1VIDEO:
2157 case CODEC_ID_MPEG2VIDEO:
2158 while(stuffing_count--){
2159 put_bits(&s->pb, 8, 0);
2160 }
2161 break;
2162 case CODEC_ID_MPEG4:
2163 put_bits(&s->pb, 16, 0);
2164 put_bits(&s->pb, 16, 0x1C3);
2165 stuffing_count -= 4;
2166 while(stuffing_count--){
2167 put_bits(&s->pb, 8, 0xFF);
2168 }
2169 break;
2170 default:
2171 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2172 }
2173 flush_put_bits(&s->pb);
2174 s->frame_bits = put_bits_count(&s->pb);
2175 }
2176
2177 /* update mpeg1/2 vbv_delay for CBR */
2178 if(s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate && s->out_format == FMT_MPEG1
2179 && 90000LL * (avctx->rc_buffer_size-1) <= s->avctx->rc_max_rate*0xFFFFLL){
2180 int vbv_delay;
2181
2182 assert(s->repeat_first_field==0);
2183
2184 vbv_delay= lrintf(90000 * s->rc_context.buffer_index / s->avctx->rc_max_rate);
2185 assert(vbv_delay < 0xFFFF);
2186
2187 s->vbv_delay_ptr[0] &= 0xF8;
2188 s->vbv_delay_ptr[0] |= vbv_delay>>13;
2189 s->vbv_delay_ptr[1] = vbv_delay>>5;
2190 s->vbv_delay_ptr[2] &= 0x07;
2191 s->vbv_delay_ptr[2] |= vbv_delay<<3;
2192 }
2193 s->total_bits += s->frame_bits;
2194 avctx->frame_bits = s->frame_bits;
2195 }else{
2196 assert((pbBufPtr(&s->pb) == s->pb.buf));
2197 s->frame_bits=0;
2198 }
2199 assert((s->frame_bits&7)==0);
2200
2201 return s->frame_bits/8;
2202 }
2203
2204 #endif //CONFIG_ENCODERS
2205
2206 static inline void gmc1_motion(MpegEncContext *s,
2207 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2208 uint8_t **ref_picture)
2209 {
2210 uint8_t *ptr;
2211 int offset, src_x, src_y, linesize, uvlinesize;
2212 int motion_x, motion_y;
2213 int emu=0;
2214
2215 motion_x= s->sprite_offset[0][0];
2216 motion_y= s->sprite_offset[0][1];
2217 src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy+1));
2218 src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy+1));
2219 motion_x<<=(3-s->sprite_warping_accuracy);
2220 motion_y<<=(3-s->sprite_warping_accuracy);
2221 src_x = clip(src_x, -16, s->width);
2222 if (src_x == s->width)
2223 motion_x =0;
2224 src_y = clip(src_y, -16, s->height);
2225 if (src_y == s->height)
2226 motion_y =0;
2227
2228 linesize = s->linesize;
2229 uvlinesize = s->uvlinesize;
2230
2231 ptr = ref_picture[0] + (src_y * linesize) + src_x;
2232
2233 if(s->flags&CODEC_FLAG_EMU_EDGE){
2234 if( (unsigned)src_x >= s->h_edge_pos - 17
2235 || (unsigned)src_y >= s->v_edge_pos - 17){
2236 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
2237 ptr= s->edge_emu_buffer;
2238 }
2239 }
2240
2241 if((motion_x|motion_y)&7){
2242 s->dsp.gmc1(dest_y , ptr , linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
2243 s->dsp.gmc1(dest_y+8, ptr+8, linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
2244 }else{
2245 int dxy;
2246
2247 dxy= ((motion_x>>3)&1) | ((motion_y>>2)&2);
2248 if (s->no_rounding){
2249 s->dsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
2250 }else{
2251 s->dsp.put_pixels_tab [0][dxy](dest_y, ptr, linesize, 16);
2252 }
2253 }
2254
2255 if(s->flags&CODEC_FLAG_GRAY) return;
2256
2257 motion_x= s->sprite_offset[1][0];
2258 motion_y= s->sprite_offset[1][1];
2259 src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy+1));
2260 src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy+1));
2261 motion_x<<=(3-s->sprite_warping_accuracy);
2262 motion_y<<=(3-s->sprite_warping_accuracy);
2263 src_x = clip(src_x, -8, s->width>>1);
2264 if (src_x == s->width>>1)
2265 motion_x =0;
2266 src_y = clip(src_y, -8, s->height>>1);
2267 if (src_y == s->height>>1)
2268 motion_y =0;
2269
2270 offset = (src_y * uvlinesize) + src_x;
2271 ptr = ref_picture[1] + offset;
2272 if(s->flags&CODEC_FLAG_EMU_EDGE){
2273 if( (unsigned)src_x >= (s->h_edge_pos>>1) - 9
2274 || (unsigned)src_y >= (s->v_edge_pos>>1) - 9){
2275 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2276 ptr= s->edge_emu_buffer;
2277 emu=1;
2278 }
2279 }
2280 s->dsp.gmc1(dest_cb, ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
2281
2282 ptr = ref_picture[2] + offset;
2283 if(emu){
2284 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2285 ptr= s->edge_emu_buffer;
2286 }
2287 s->dsp.gmc1(dest_cr, ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
2288
2289 return;
2290 }
2291
2292 static inline void gmc_motion(MpegEncContext *s,
2293 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2294 uint8_t **ref_picture)
2295 {
2296 uint8_t *ptr;
2297 int linesize, uvlinesize;
2298 const int a= s->sprite_warping_accuracy;
2299 int ox, oy;
2300
2301 linesize = s->linesize;
2302 uvlinesize = s->uvlinesize;
2303
2304 ptr = ref_picture[0];
2305
2306 ox= s->sprite_offset[0][0] + s->sprite_delta[0][0]*s->mb_x*16 + s->sprite_delta[0][1]*s->mb_y*16;
2307 oy= s->sprite_offset[0][1] + s->sprite_delta[1][0]*s->mb_x*16 + s->sprite_delta[1][1]*s->mb_y*16;
2308
2309 s->dsp.gmc(dest_y, ptr, linesize, 16,
2310 ox,
2311 oy,
2312 s->sprite_delta[0][0], s->sprite_delta[0][1],
2313 s->sprite_delta[1][0], s->sprite_delta[1][1],
2314 a+1, (1<<(2*a+1)) - s->no_rounding,
2315 s->h_edge_pos, s->v_edge_pos);
2316 s->dsp.gmc(dest_y+8, ptr, linesize, 16,
2317 ox + s->sprite_delta[0][0]*8,
2318 oy + s->sprite_delta[1][0]*8,
2319 s->sprite_delta[0][0], s->sprite_delta[0][1],
2320 s->sprite_delta[1][0], s->sprite_delta[1][1],
2321 a+1, (1<<(2*a+1)) - s->no_rounding,
2322 s->h_edge_pos, s->v_edge_pos);
2323
2324 if(s->flags&CODEC_FLAG_GRAY) return;
2325
2326 ox= s->sprite_offset[1][0] + s->sprite_delta[0][0]*s->mb_x*8 + s->sprite_delta[0][1]*s->mb_y*8;
2327 oy= s->sprite_offset[1][1] + s->sprite_delta[1][0]*s->mb_x*8 + s->sprite_delta[1][1]*s->mb_y*8;
2328
2329 ptr = ref_picture[1];
2330 s->dsp.gmc(dest_cb, ptr, uvlinesize, 8,
2331 ox,
2332 oy,
2333 s->sprite_delta[0][0], s->sprite_delta[0][1],
2334 s->sprite_delta[1][0], s->sprite_delta[1][1],
2335 a+1, (1<<(2*a+1)) - s->no_rounding,
2336 s->h_edge_pos>>1, s->v_edge_pos>>1);
2337
2338 ptr = ref_picture[2];
2339 s->dsp.gmc(dest_cr, ptr, uvlinesize, 8,
2340 ox,
2341 oy,
2342 s->sprite_delta[0][0], s->sprite_delta[0][1],
2343 s->sprite_delta[1][0], s->sprite_delta[1][1],
2344 a+1, (1<<(2*a+1)) - s->no_rounding,
2345 s->h_edge_pos>>1, s->v_edge_pos>>1);
2346 }
2347
2348 /**
2349 * Copies a rectangular area of samples to a temporary buffer and replicates the boarder samples.
2350 * @param buf destination buffer
2351 * @param src source buffer
2352 * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
2353 * @param block_w width of block
2354 * @param block_h height of block
2355 * @param src_x x coordinate of the top left sample of the block in the source buffer
2356 * @param src_y y coordinate of the top left sample of the block in the source buffer
2357 * @param w width of the source buffer
2358 * @param h height of the source buffer
2359 */
2360 void ff_emulated_edge_mc(uint8_t *buf, uint8_t *src, int linesize, int block_w, int block_h,
2361 int src_x, int src_y, int w, int h){
2362 int x, y;
2363 int start_y, start_x, end_y, end_x;
2364
2365 if(src_y>= h){
2366 src+= (h-1-src_y)*linesize;
2367 src_y=h-1;
2368 }else if(src_y<=-block_h){
2369 src+= (1-block_h-src_y)*linesize;
2370 src_y=1-block_h;
2371 }
2372 if(src_x>= w){
2373 src+= (w-1-src_x);
2374 src_x=w-1;
2375 }else if(src_x<=-block_w){
2376 src+= (1-block_w-src_x);
2377 src_x=1-block_w;
2378 }
2379
2380 start_y= FFMAX(0, -src_y);
2381 start_x= FFMAX(0, -src_x);
2382 end_y= FFMIN(block_h, h-src_y);
2383 end_x= FFMIN(block_w, w-src_x);
2384
2385 // copy existing part
2386 for(y=start_y; y<end_y; y++){
2387 for(x=start_x; x<end_x; x++){
2388 buf[x + y*linesize]= src[x + y*linesize];
2389 }
2390 }
2391
2392 //top
2393 for(y=0; y<start_y; y++){
2394 for(x=start_x; x<end_x; x++){
2395 buf[x + y*linesize]= buf[x + start_y*linesize];
2396 }
2397 }
2398
2399 //bottom
2400 for(y=end_y; y<block_h; y++){
2401 for(x=start_x; x<end_x; x++){
2402 buf[x + y*linesize]= buf[x + (end_y-1)*linesize];
2403 }
2404 }
2405
2406 for(y=0; y<block_h; y++){
2407 //left
2408 for(x=0; x<start_x; x++){
2409 buf[x + y*linesize]= buf[start_x + y*linesize];
2410 }
2411
2412 //right
2413 for(x=end_x; x<block_w; x++){
2414 buf[x + y*linesize]= buf[end_x - 1 + y*linesize];
2415 }
2416 }
2417 }
2418
2419 static inline int hpel_motion(MpegEncContext *s,
2420 uint8_t *dest, uint8_t *src,
2421 int field_based, int field_select,
2422 int src_x, int src_y,
2423 int width, int height, int stride,
2424 int h_edge_pos, int v_edge_pos,
2425 int w, int h, op_pixels_func *pix_op,
2426 int motion_x, int motion_y)
2427 {
2428 int dxy;
2429 int emu=0;
2430
2431 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
2432 src_x += motion_x >> 1;
2433 src_y += motion_y >> 1;
2434
2435 /* WARNING: do no forget half pels */
2436 src_x = clip(src_x, -16, width); //FIXME unneeded for emu?
2437 if (src_x == width)
2438 dxy &= ~1;
2439 src_y = clip(src_y, -16, height);
2440 if (src_y == height)
2441 dxy &= ~2;
2442 src += src_y * stride + src_x;
2443
2444 if(s->unrestricted_mv && (s->flags&CODEC_FLAG_EMU_EDGE)){
2445 if( (unsigned)src_x > h_edge_pos - (motion_x&1) - w
2446 || (unsigned)src_y > v_edge_pos - (motion_y&1) - h){
2447 ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
2448 src_x, src_y<<field_based, h_edge_pos, s->v_edge_pos);
2449 src= s->edge_emu_buffer;
2450 emu=1;
2451 }
2452 }
2453 if(field_select)
2454 src += s->linesize;
2455 pix_op[dxy](dest, src, stride, h);
2456 return emu;
2457 }
2458
2459 /* apply one mpeg motion vector to the three components */
2460 static inline void mpeg_motion(MpegEncContext *s,
2461 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2462 int field_based, int bottom_field, int field_select,
2463 uint8_t **ref_picture, op_pixels_func (*pix_op)[4],
2464 int motion_x, int motion_y, int h)
2465 {
2466 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2467 int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos, uvlinesize, linesize;
2468
2469 #if 0
2470 if(s->quarter_sample)
2471 {
2472 motion_x>>=1;
2473 motion_y>>=1;
2474 }
2475 #endif
2476
2477 v_edge_pos = s->v_edge_pos >> field_based;
2478 linesize = s->current_picture.linesize[0] << field_based;
2479 uvlinesize = s->current_picture.linesize[1] << field_based;
2480
2481 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
2482 src_x = s->mb_x* 16 + (motion_x >> 1);
2483 src_y = s->mb_y*(16>>field_based) + (motion_y >> 1);
2484
2485 if (s->out_format == FMT_H263) {
2486 if((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based){
2487 mx = (motion_x>>1)|(motion_x&1);
2488 my = motion_y >>1;
2489 uvdxy = ((my & 1) << 1) | (mx & 1);
2490 uvsrc_x = s->mb_x* 8 + (mx >> 1);
2491 uvsrc_y = s->mb_y*(8>>field_based) + (my >> 1);
2492 }else{
2493 uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1);
2494 uvsrc_x = src_x>>1;
2495 uvsrc_y = src_y>>1;
2496 }
2497 } else {
2498 mx = motion_x / 2;
2499 my = motion_y / 2;
2500 uvdxy = ((my & 1) << 1) | (mx & 1);
2501 uvsrc_x = s->mb_x* 8 + (mx >> 1);
2502 uvsrc_y = s->mb_y*(8>>field_based) + (my >> 1);
2503 }
2504
2505 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2506 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2507 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2508
2509 if( (unsigned)src_x > s->h_edge_pos - (motion_x&1) - 16
2510 || (unsigned)src_y > v_edge_pos - (motion_y&1) - h){
2511 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
2512 src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
2513 ptr_y = s->edge_emu_buffer;
2514 if(!(s->flags&CODEC_FLAG_GRAY)){
2515 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
2516 ff_emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
2517 uvsrc_x, uvsrc_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
2518 ff_emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
2519 uvsrc_x, uvsrc_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
2520 ptr_cb= uvbuf;
2521 ptr_cr= uvbuf+16;
2522 }
2523 }
2524
2525 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
2526 dest_y += s->linesize;
2527 dest_cb+= s->uvlinesize;
2528 dest_cr+= s->uvlinesize;
2529 }
2530
2531 if(field_select){
2532 ptr_y += s->linesize;
2533 ptr_cb+= s->uvlinesize;
2534 ptr_cr+= s->uvlinesize;
2535 }
2536
2537 pix_op[0][dxy](dest_y, ptr_y, linesize, h);
2538
2539 if(!(s->flags&CODEC_FLAG_GRAY)){
2540 pix_op[1][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> 1);
2541 pix_op[1][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> 1);
2542 }
2543 }
2544 //FIXME move to dsputil, avg variant, 16x16 version
2545 static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride){
2546 int x;
2547 uint8_t * const top = src[1];
2548 uint8_t * const left = src[2];
2549 uint8_t * const mid = src[0];
2550 uint8_t * const right = src[3];
2551 uint8_t * const bottom= src[4];
2552 #define OBMC_FILTER(x, t, l, m, r, b)\
2553 dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
2554 #define OBMC_FILTER4(x, t, l, m, r, b)\
2555 OBMC_FILTER(x , t, l, m, r, b);\
2556 OBMC_FILTER(x+1 , t, l, m, r, b);\
2557 OBMC_FILTER(x +stride, t, l, m, r, b);\
2558 OBMC_FILTER(x+1+stride, t, l, m, r, b);
2559
2560 x=0;
2561 OBMC_FILTER (x , 2, 2, 4, 0, 0);
2562 OBMC_FILTER (x+1, 2, 1, 5, 0, 0);
2563 OBMC_FILTER4(x+2, 2, 1, 5, 0, 0);
2564 OBMC_FILTER4(x+4, 2, 0, 5, 1, 0);
2565 OBMC_FILTER (x+6, 2, 0, 5, 1, 0);
2566 OBMC_FILTER (x+7, 2, 0, 4, 2, 0);
2567 x+= stride;
2568 OBMC_FILTER (x , 1, 2, 5, 0, 0);
2569 OBMC_FILTER (x+1, 1, 2, 5, 0, 0);
2570 OBMC_FILTER (x+6, 1, 0, 5, 2, 0);
2571 OBMC_FILTER (x+7, 1, 0, 5, 2, 0);
2572 x+= stride;
2573 OBMC_FILTER4(x , 1, 2, 5, 0, 0);
2574 OBMC_FILTER4(x+2, 1, 1, 6, 0, 0);
2575 OBMC_FILTER4(x+4, 1, 0, 6, 1, 0);
2576 OBMC_FILTER4(x+6, 1, 0, 5, 2, 0);
2577 x+= 2*stride;
2578 OBMC_FILTER4(x , 0, 2, 5, 0, 1);
2579 OBMC_FILTER4(x+2, 0, 1, 6, 0, 1);
2580 OBMC_FILTER4(x+4, 0, 0, 6, 1, 1);
2581 OBMC_FILTER4(x+6, 0, 0, 5, 2, 1);
2582 x+= 2*stride;
2583 OBMC_FILTER (x , 0, 2, 5, 0, 1);
2584 OBMC_FILTER (x+1, 0, 2, 5, 0, 1);
2585 OBMC_FILTER4(x+2, 0, 1, 5, 0, 2);
2586 OBMC_FILTER4(x+4, 0, 0, 5, 1, 2);
2587 OBMC_FILTER (x+6, 0, 0, 5, 2, 1);
2588 OBMC_FILTER (x+7, 0, 0, 5, 2, 1);
2589 x+= stride;
2590 OBMC_FILTER (x , 0, 2, 4, 0, 2);
2591 OBMC_FILTER (x+1, 0, 1, 5, 0, 2);
2592 OBMC_FILTER (x+6, 0, 0, 5, 1, 2);
2593 OBMC_FILTER (x+7, 0, 0, 4, 2, 2);
2594 }
2595
2596 /* obmc for 1 8x8 luma block */
2597 static inline void obmc_motion(MpegEncContext *s,
2598 uint8_t *dest, uint8_t *src,
2599 int src_x, int src_y,
2600 op_pixels_func *pix_op,
2601 int16_t mv[5][2]/* mid top left right bottom*/)
2602 #define MID 0
2603 {
2604 int i;
2605 uint8_t *ptr[5];
2606
2607 assert(s->quarter_sample==0);
2608
2609 for(i=0; i<5; i++){
2610 if(i && mv[i][0]==mv[MID][0] && mv[i][1]==mv[MID][1]){
2611 ptr[i]= ptr[MID];
2612 }else{
2613 ptr[i]= s->obmc_scratchpad + 8*(i&1) + s->linesize*8*(i>>1);
2614 hpel_motion(s, ptr[i], src, 0, 0,
2615 src_x, src_y,
2616 s->width, s->height, s->linesize,
2617 s->h_edge_pos, s->v_edge_pos,
2618 8, 8, pix_op,
2619 mv[i][0], mv[i][1]);
2620 }
2621 }
2622
2623 put_obmc(dest, ptr, s->linesize);
2624 }
2625
2626 static inline void qpel_motion(MpegEncContext *s,
2627 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2628 int field_based, int bottom_field, int field_select,
2629 uint8_t **ref_picture, op_pixels_func (*pix_op)[4],
2630 qpel_mc_func (*qpix_op)[16],
2631 int motion_x, int motion_y, int h)
2632 {
2633 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2634 int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos, linesize, uvlinesize;
2635
2636 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
2637 src_x = s->mb_x * 16 + (motion_x >> 2);
2638 src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
2639
2640 v_edge_pos = s->v_edge_pos >> field_based;
2641 linesize = s->linesize << field_based;
2642 uvlinesize = s->uvlinesize << field_based;
2643
2644 if(field_based){
2645 mx= motion_x/2;
2646 my= motion_y>>1;
2647 }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA2){
2648 static const int rtab[8]= {0,0,1,1,0,0,0,1};
2649 mx= (motion_x>>1) + rtab[motion_x&7];
2650 my= (motion_y>>1) + rtab[motion_y&7];
2651 }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA){
2652 mx= (motion_x>>1)|(motion_x&1);
2653 my= (motion_y>>1)|(motion_y&1);
2654 }else{
2655 mx= motion_x/2;
2656 my= motion_y/2;
2657 }
2658 mx= (mx>>1)|(mx&1);
2659 my= (my>>1)|(my&1);
2660
2661 uvdxy= (mx&1) | ((my&1)<<1);
2662 mx>>=1;
2663 my>>=1;
2664
2665 uvsrc_x = s->mb_x * 8 + mx;
2666 uvsrc_y = s->mb_y * (8 >> field_based) + my;
2667
2668 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2669 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2670 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2671
2672 if( (unsigned)src_x > s->h_edge_pos - (motion_x&3) - 16
2673 || (unsigned)src_y > v_edge_pos - (motion_y&3) - h ){
2674 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
2675 src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
2676 ptr_y= s->edge_emu_buffer;
2677 if(!(s->flags&CODEC_FLAG_GRAY)){
2678 uint8_t *uvbuf= s->edge_emu_buffer + 18*s->linesize;
2679 ff_emulated_edge_mc(uvbuf, ptr_cb, s->uvlinesize, 9, 9 + field_based,
2680 uvsrc_x, uvsrc_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
2681 ff_emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9, 9 + field_based,
2682 uvsrc_x, uvsrc_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
2683 ptr_cb= uvbuf;
2684 ptr_cr= uvbuf + 16;
2685 }
2686 }
2687
2688 if(!field_based)
2689 qpix_op[0][dxy](dest_y, ptr_y, linesize);
2690 else{
2691 if(bottom_field){
2692 dest_y += s->linesize;
2693 dest_cb+= s->uvlinesize;
2694 dest_cr+= s->uvlinesize;
2695 }
2696
2697 if(field_select){
2698 ptr_y += s->linesize;
2699 ptr_cb += s->uvlinesize;
2700 ptr_cr += s->uvlinesize;
2701 }
2702 //damn interlaced mode
2703 //FIXME boundary mirroring is not exactly correct here
2704 qpix_op[1][dxy](dest_y , ptr_y , linesize);
2705 qpix_op[1][dxy](dest_y+8, ptr_y+8, linesize);
2706 }
2707 if(!(s->flags&CODEC_FLAG_GRAY)){
2708 pix_op[1][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> 1);
2709 pix_op[1][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> 1);
2710 }
2711 }
2712
2713 inline int ff_h263_round_chroma(int x){
2714 if (x >= 0)
2715 return (h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
2716 else {
2717 x = -x;
2718 return -(h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
2719 }
2720 }
2721
2722 /**
2723 * h263 chorma 4mv motion compensation.
2724 */
2725 static inline void chroma_4mv_motion(MpegEncContext *s,
2726 uint8_t *dest_cb, uint8_t *dest_cr,
2727 uint8_t **ref_picture,
2728 op_pixels_func *pix_op,
2729 int mx, int my){
2730 int dxy, emu=0, src_x, src_y, offset;
2731 uint8_t *ptr;
2732
2733 /* In case of 8X8, we construct a single chroma motion vector
2734 with a special rounding */
2735 mx= ff_h263_round_chroma(mx);
2736 my= ff_h263_round_chroma(my);
2737
2738 dxy = ((my & 1) << 1) | (mx & 1);
2739 mx >>= 1;
2740 my >>= 1;
2741
2742 src_x = s->mb_x * 8 + mx;
2743 src_y = s->mb_y * 8 + my;
2744 src_x = clip(src_x, -8, s->width/2);
2745 if (src_x == s->width/2)
2746 dxy &= ~1;
2747 src_y = clip(src_y, -8, s->height/2);
2748 if (src_y == s->height/2)
2749 dxy &= ~2;
2750
2751 offset = (src_y * (s->uvlinesize)) + src_x;
2752 ptr = ref_picture[1] + offset;
2753 if(s->flags&CODEC_FLAG_EMU_EDGE){
2754 if( (unsigned)src_x > (s->h_edge_pos>>1) - (dxy &1) - 8
2755 || (unsigned)src_y > (s->v_edge_pos>>1) - (dxy>>1) - 8){
2756 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2757 ptr= s->edge_emu_buffer;
2758 emu=1;
2759 }
2760 }
2761 pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8);
2762
2763 ptr = ref_picture[2] + offset;
2764 if(emu){
2765 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2766 ptr= s->edge_emu_buffer;
2767 }
2768 pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8);
2769 }
2770
2771 /**
2772 * motion compesation of a single macroblock
2773 * @param s context
2774 * @param dest_y luma destination pointer
2775 * @param dest_cb chroma cb/u destination pointer
2776 * @param dest_cr chroma cr/v destination pointer
2777 * @param dir direction (0->forward, 1->backward)
2778 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2779 * @param pic_op halfpel motion compensation function (average or put normally)
2780 * @param pic_op qpel motion compensation function (average or put normally)
2781 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2782 */
2783 static inline void MPV_motion(MpegEncContext *s,
2784 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2785 int dir, uint8_t **ref_picture,
2786 op_pixels_func (*pix_op)[4], qpel_mc_func (*qpix_op)[16])
2787 {
2788 int dxy, mx, my, src_x, src_y, motion_x, motion_y;
2789 int mb_x, mb_y, i;
2790 uint8_t *ptr, *dest;
2791
2792 mb_x = s->mb_x;
2793 mb_y = s->mb_y;
2794
2795 if(s->obmc && s->pict_type != B_TYPE){
2796 int16_t mv_cache[4][4][2];
2797 const int xy= s->mb_x + s->mb_y*s->mb_stride;
2798 const int mot_stride= s->b8_stride;
2799 const int mot_xy= mb_x*2 + mb_y*2*mot_stride;
2800
2801 assert(!s->mb_skiped);
2802
2803 memcpy(mv_cache[1][1], s->current_picture.motion_val[0][mot_xy ], sizeof(int16_t)*4);
2804 memcpy(mv_cache[2][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4);
2805 memcpy(mv_cache[3][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4);
2806
2807 if(mb_y==0 || IS_INTRA(s->current_picture.mb_type[xy-s->mb_stride])){
2808 memcpy(mv_cache[0][1], mv_cache[1][1], sizeof(int16_t)*4);
2809 }else{
2810 memcpy(mv_cache[0][1], s->current_picture.motion_val[0][mot_xy-mot_stride], sizeof(int16_t)*4);
2811 }
2812
2813 if(mb_x==0 || IS_INTRA(s->current_picture.mb_type[xy-1])){
2814 *(int32_t*)mv_cache[1][0]= *(int32_t*)mv_cache[1][1];
2815 *(int32_t*)mv_cache[2][0]= *(int32_t*)mv_cache[2][1];
2816 }else{
2817 *(int32_t*)mv_cache[1][0]= *(int32_t*)s->current_picture.motion_val[0][mot_xy-1];
2818 *(int32_t*)mv_cache[2][0]= *(int32_t*)s->current_picture.motion_val[0][mot_xy-1+mot_stride];
2819 }
2820
2821 if(mb_x+1>=s->mb_width || IS_INTRA(s->current_picture.mb_type[xy+1])){
2822 *(int32_t*)mv_cache[1][3]= *(int32_t*)mv_cache[1][2];
2823 *(int32_t*)mv_cache[2][3]= *(int32_t*)mv_cache[2][2];
2824 }else{
2825 *(int32_t*)mv_cache[1][3]= *(int32_t*)s->current_picture.motion_val[0][mot_xy+2];
2826 *(int32_t*)mv_cache[2][3]= *(int32_t*)s->current_picture.motion_val[0][mot_xy+2+mot_stride];
2827 }
2828
2829 mx = 0;
2830 my = 0;
2831 for(i=0;i<4;i++) {
2832 const int x= (i&1)+1;
2833 const int y= (i>>1)+1;
2834 int16_t mv[5][2]= {
2835 {mv_cache[y][x ][0], mv_cache[y][x ][1]},
2836 {mv_cache[y-1][x][0], mv_cache[y-1][x][1]},
2837 {mv_cache[y][x-1][0], mv_cache[y][x-1][1]},
2838 {mv_cache[y][x+1][0], mv_cache[y][x+1][1]},
2839 {mv_cache[y+1][x][0], mv_cache[y+1][x][1]}};
2840 //FIXME cleanup
2841 obmc_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
2842 ref_picture[0],
2843 mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >>1) * 8,
2844 pix_op[1],
2845 mv);
2846
2847 mx += mv[0][0];
2848 my += mv[0][1];
2849 }
2850 if(!(s->flags&CODEC_FLAG_GRAY))
2851 chroma_4mv_motion(s, dest_cb, dest_cr, ref_picture, pix_op[1], mx, my);
2852
2853 return;
2854 }
2855
2856 switch(s->mv_type) {
2857 case MV_TYPE_16X16:
2858 #ifdef CONFIG_RISKY
2859 if(s->mcsel){
2860 if(s->real_sprite_warping_points==1){
2861 gmc1_motion(s, dest_y, dest_cb, dest_cr,
2862 ref_picture);
2863 }else{
2864 gmc_motion(s, dest_y, dest_cb, dest_cr,
2865 ref_picture);
2866 }
2867 }else if(s->quarter_sample){
2868 qpel_motion(s, dest_y, dest_cb, dest_cr,
2869 0, 0, 0,
2870 ref_picture, pix_op, qpix_op,
2871 s->mv[dir][0][0], s->mv[dir][0][1], 16);
2872 }else if(s->mspel){
2873 ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
2874 ref_picture, pix_op,
2875 s->mv[dir][0][0], s->mv[dir][0][1], 16);
2876 }else
2877 #endif
2878 {
2879 mpeg_motion(s, dest_y, dest_cb, dest_cr,
2880 0, 0, 0,
2881 ref_picture, pix_op,
2882 s->mv[dir][0][0], s->mv[dir][0][1], 16);
2883 }
2884 break;
2885 case MV_TYPE_8X8:
2886 mx = 0;
2887 my = 0;
2888 if(s->quarter_sample){
2889 for(i=0;i<4;i++) {
2890 motion_x = s->mv[dir][i][0];
2891 motion_y = s->mv[dir][i][1];
2892
2893 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
2894 src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
2895 src_y = mb_y * 16 + (motion_y >> 2) + (i >>1) * 8;
2896
2897 /* WARNING: do no forget half pels */
2898 src_x = clip(src_x, -16, s->width);
2899 if (src_x == s->width)
2900 dxy &= ~3;
2901 src_y = clip(src_y, -16, s->height);
2902 if (src_y == s->height)
2903 dxy &= ~12;
2904
2905 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
2906 if(s->flags&CODEC_FLAG_EMU_EDGE){
2907 if( (unsigned)src_x > s->h_edge_pos - (motion_x&3) - 8
2908 || (unsigned)src_y > s->v_edge_pos - (motion_y&3) - 8 ){
2909 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
2910 ptr= s->edge_emu_buffer;
2911 }
2912 }
2913 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
2914 qpix_op[1][dxy](dest, ptr, s->linesize);
2915
2916 mx += s->mv[dir][i][0]/2;
2917 my += s->mv[dir][i][1]/2;
2918 }
2919 }else{
2920 for(i=0;i<4;i++) {
2921 hpel_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
2922 ref_picture[0], 0, 0,
2923 mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >>1) * 8,
2924 s->width, s->height, s->linesize,
2925 s->h_edge_pos, s->v_edge_pos,
2926 8, 8, pix_op[1],
2927 s->mv[dir][i][0], s->mv[dir][i][1]);
2928
2929 mx += s->mv[dir][i][0];
2930 my += s->mv[dir][i][1];
2931 }
2932 }
2933
2934 if(!(s->flags&CODEC_FLAG_GRAY))
2935 chroma_4mv_motion(s, dest_cb, dest_cr, ref_picture, pix_op[1], mx, my);
2936 break;
2937 case MV_TYPE_FIELD:
2938 if (s->picture_structure == PICT_FRAME) {
2939 if(s->quarter_sample){
2940 for(i=0; i<2; i++){
2941 qpel_motion(s, dest_y, dest_cb, dest_cr,
2942 1, i, s->field_select[dir][i],
2943 ref_picture, pix_op, qpix_op,
2944 s->mv[dir][i][0], s->mv[dir][i][1], 8);
2945 }
2946 }else{
2947 /* top field */
2948 mpeg_motion(s, dest_y, dest_cb, dest_cr,
2949 1, 0, s->field_select[dir][0],
2950 ref_picture, pix_op,
2951 s->mv[dir][0][0], s->mv[dir][0][1], 8);
2952 /* bottom field */
2953 mpeg_motion(s, dest_y, dest_cb, dest_cr,
2954 1, 1, s->field_select[dir][1],
2955 ref_picture, pix_op,
2956 s->mv[dir][1][0], s->mv[dir][1][1], 8);
2957 }
2958 } else {
2959 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != B_TYPE && !s->first_field){
2960 ref_picture= s->current_picture_ptr->data;
2961 }
2962
2963 mpeg_motion(s, dest_y, dest_cb, dest_cr,
2964 0, 0, s->field_select[dir][0],
2965 ref_picture, pix_op,
2966 s->mv[dir][0][0], s->mv[dir][0][1], 16);
2967 }
2968 break;
2969 case MV_TYPE_16X8:
2970 for(i=0; i<2; i++){
2971 uint8_t ** ref2picture;
2972
2973 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == B_TYPE || s->first_field){
2974 ref2picture= ref_picture;
2975 }else{
2976 ref2picture= s->current_picture_ptr->data;
2977 }
2978
2979 mpeg_motion(s, dest_y, dest_cb, dest_cr,
2980 0, 0, s->field_select[dir][i],
2981 ref2picture, pix_op,
2982 s->mv[dir][i][0], s->mv[dir][i][1] + 16*i, 8);
2983
2984 dest_y += 16*s->linesize;
2985 dest_cb+= 8*s->uvlinesize;
2986 dest_cr+= 8*s->uvlinesize;
2987 }
2988 break;
2989 case MV_TYPE_DMV:
2990 if(s->picture_structure == PICT_FRAME){
2991 for(i=0; i<2; i++){
2992 int j;
2993 for(j=0; j<2; j++){
2994 mpeg_motion(s, dest_y, dest_cb, dest_cr,
2995 1, j, j^i,
2996 ref_picture, pix_op,
2997 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], 8);
2998 }
2999 pix_op = s->dsp.avg_pixels_tab;
3000 }
3001 }else{
3002 for(i=0; i<2; i++){
3003 mpeg_motion(s, dest_y, dest_cb, dest_cr,
3004 0, 0, s->picture_structure != i+1,
3005 ref_picture, pix_op,
3006 s->mv[dir][2*i][0],s->mv[dir][2*i][1],16);
3007
3008 // after put we make avg of the same block
3009 pix_op=s->dsp.avg_pixels_tab;
3010
3011 //opposite parity is always in the same frame if this is second field
3012 if(!s->first_field){
3013 ref_picture = s->current_picture_ptr->data;
3014 }
3015 }
3016 }
3017 break;
3018 default: assert(0);
3019 }
3020 }
3021
3022
3023 /* put block[] to dest[] */
3024 static inline void put_dct(MpegEncContext *s,
3025 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
3026 {
3027 s->dct_unquantize_intra(s, block, i, qscale);
3028 s->dsp.idct_put (dest, line_size, block);
3029 }
3030
3031 /* add block[] to dest[] */
3032 static inline void add_dct(MpegEncContext *s,
3033 DCTELEM *block, int i, uint8_t *dest, int line_size)
3034 {
3035 if (s->block_last_index[i] >= 0) {
3036 s->dsp.idct_add (dest, line_size, block);
3037 }
3038 }
3039
3040 static inline void add_dequant_dct(MpegEncContext *s,
3041 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
3042 {
3043 if (s->block_last_index[i] >= 0) {
3044 s->dct_unquantize_inter(s, block, i, qscale);
3045
3046 s->dsp.idct_add (dest, line_size, block);
3047 }
3048 }
3049
3050 /**
3051 * cleans dc, ac, coded_block for the current non intra MB
3052 */
3053 void ff_clean_intra_table_entries(MpegEncContext *s)
3054 {
3055 int wrap = s->b8_stride;
3056 int xy = s->block_index[0];
3057
3058 s->dc_val[0][xy ] =
3059 s->dc_val[0][xy + 1 ] =
3060 s->dc_val[0][xy + wrap] =
3061 s->dc_val[0][xy + 1 + wrap] = 1024;
3062 /* ac pred */
3063 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
3064 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
3065 if (s->msmpeg4_version>=3) {
3066 s->coded_block[xy ] =
3067 s->coded_block[xy + 1 ] =
3068 s->coded_block[xy + wrap] =
3069 s->coded_block[xy + 1 + wrap] = 0;
3070 }
3071 /* chroma */
3072 wrap = s->mb_stride;
3073 xy = s->mb_x + s->mb_y * wrap;
3074 s->dc_val[1][xy] =
3075 s->dc_val[2][xy] = 1024;
3076 /* ac pred */
3077 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
3078 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
3079
3080 s->mbintra_table[xy]= 0;
3081 }
3082
3083 /* generic function called after a macroblock has been parsed by the
3084 decoder or after it has been encoded by the encoder.
3085
3086 Important variables used:
3087 s->mb_intra : true if intra macroblock
3088 s->mv_dir : motion vector direction
3089 s->mv_type : motion vector type
3090 s->mv : motion vector
3091 s->interlaced_dct : true if interlaced dct used (mpeg2)
3092 */
3093 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
3094 {
3095 int mb_x, mb_y;
3096 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
3097 #ifdef HAVE_XVMC
3098 if(s->avctx->xvmc_acceleration){
3099 XVMC_decode_mb(s);//xvmc uses pblocks
3100 return;
3101 }
3102 #endif
3103
3104 mb_x = s->mb_x;
3105 mb_y = s->mb_y;
3106
3107 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
3108 /* save DCT coefficients */
3109 int i,j;
3110 DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
3111 for(i=0; i<6; i++)
3112 for(j=0; j<64; j++)
3113 *dct++ = block[i][s->dsp.idct_permutation[j]];
3114 }
3115
3116 s->current_picture.qscale_table[mb_xy]= s->qscale;
3117
3118 /* update DC predictors for P macroblocks */
3119 if (!s->mb_intra) {
3120 if (s->h263_pred || s->h263_aic) {
3121 if(s->mbintra_table[mb_xy])
3122 ff_clean_intra_table_entries(s);
3123 } else {
3124 s->last_dc[0] =
3125 s->last_dc[1] =
3126 s->last_dc[2] = 128 << s->intra_dc_precision;
3127 }
3128 }
3129 else if (s->h263_pred || s->h263_aic)
3130 s->mbintra_table[mb_xy]=1;
3131
3132 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==B_TYPE))) { //FIXME precalc
3133 uint8_t *dest_y, *dest_cb, *dest_cr;
3134 int dct_linesize, dct_offset;
3135 op_pixels_func (*op_pix)[4];
3136 qpel_mc_func (*op_qpix)[16];
3137 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this woulnd be wrong for field pics
3138 const int uvlinesize= s->current_picture.linesize[1];
3139 const int readable= s->pict_type