cosmetics: drop some completely pointless parentheses
[libav.git] / libavcodec / mpegvideo.c
1 /*
2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7 *
8 * This file is part of Libav.
9 *
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /**
26 * @file
27 * The simplest mpeg encoder (well, it was the simplest!).
28 */
29
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
32 #include "avcodec.h"
33 #include "dsputil.h"
34 #include "internal.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
37 #include "mjpegenc.h"
38 #include "msmpeg4.h"
39 #include "faandct.h"
40 #include "xvmc_internal.h"
41 #include "thread.h"
42 #include <limits.h>
43
44 //#undef NDEBUG
45 //#include <assert.h>
46
47 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
59 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
60 DCTELEM *block, int n, int qscale);
61
62
63 /* enable all paranoid tests for rounding, overflows, etc... */
64 //#define PARANOID
65
66 //#define DEBUG
67
68
69 static const uint8_t ff_default_chroma_qscale_table[32] = {
70 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
71 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
72 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
73 };
74
75 const uint8_t ff_mpeg1_dc_scale_table[128] = {
76 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
83 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
84 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
85 };
86
87 static const uint8_t mpeg2_dc_scale_table1[128] = {
88 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
95 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
96 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
97 };
98
99 static const uint8_t mpeg2_dc_scale_table2[128] = {
100 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
107 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
108 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
109 };
110
111 static const uint8_t mpeg2_dc_scale_table3[128] = {
112 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
120 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
121 };
122
123 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
124 ff_mpeg1_dc_scale_table,
125 mpeg2_dc_scale_table1,
126 mpeg2_dc_scale_table2,
127 mpeg2_dc_scale_table3,
128 };
129
130 const enum PixelFormat ff_pixfmt_list_420[] = {
131 PIX_FMT_YUV420P,
132 PIX_FMT_NONE
133 };
134
135 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
136 PIX_FMT_DXVA2_VLD,
137 PIX_FMT_VAAPI_VLD,
138 PIX_FMT_VDA_VLD,
139 PIX_FMT_YUV420P,
140 PIX_FMT_NONE
141 };
142
143 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
144 const uint8_t *end,
145 uint32_t * restrict state)
146 {
147 int i;
148
149 assert(p <= end);
150 if (p >= end)
151 return end;
152
153 for (i = 0; i < 3; i++) {
154 uint32_t tmp = *state << 8;
155 *state = tmp + *(p++);
156 if (tmp == 0x100 || p == end)
157 return p;
158 }
159
160 while (p < end) {
161 if (p[-1] > 1 ) p += 3;
162 else if (p[-2] ) p += 2;
163 else if (p[-3]|(p[-1]-1)) p++;
164 else {
165 p++;
166 break;
167 }
168 }
169
170 p = FFMIN(p, end) - 4;
171 *state = AV_RB32(p);
172
173 return p + 4;
174 }
175
176 /* init common dct for both encoder and decoder */
177 av_cold int ff_dct_common_init(MpegEncContext *s)
178 {
179 dsputil_init(&s->dsp, s->avctx);
180
181 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
182 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
183 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
184 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
185 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
186 if (s->flags & CODEC_FLAG_BITEXACT)
187 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
188 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
189
190 #if HAVE_MMX
191 MPV_common_init_mmx(s);
192 #elif ARCH_ALPHA
193 MPV_common_init_axp(s);
194 #elif CONFIG_MLIB
195 MPV_common_init_mlib(s);
196 #elif HAVE_MMI
197 MPV_common_init_mmi(s);
198 #elif ARCH_ARM
199 MPV_common_init_arm(s);
200 #elif HAVE_ALTIVEC
201 MPV_common_init_altivec(s);
202 #elif ARCH_BFIN
203 MPV_common_init_bfin(s);
204 #endif
205
206 /* load & permutate scantables
207 * note: only wmv uses different ones
208 */
209 if (s->alternate_scan) {
210 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
211 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
212 } else {
213 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
214 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
215 }
216 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
217 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
218
219 return 0;
220 }
221
222 void ff_copy_picture(Picture *dst, Picture *src)
223 {
224 *dst = *src;
225 dst->f.type = FF_BUFFER_TYPE_COPY;
226 }
227
228 /**
229 * Release a frame buffer
230 */
231 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
232 {
233 /* Windows Media Image codecs allocate internal buffers with different
234 * dimensions; ignore user defined callbacks for these
235 */
236 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
237 ff_thread_release_buffer(s->avctx, (AVFrame *) pic);
238 else
239 avcodec_default_release_buffer(s->avctx, (AVFrame *) pic);
240 av_freep(&pic->f.hwaccel_picture_private);
241 }
242
243 /**
244 * Allocate a frame buffer
245 */
246 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
247 {
248 int r;
249
250 if (s->avctx->hwaccel) {
251 assert(!pic->f.hwaccel_picture_private);
252 if (s->avctx->hwaccel->priv_data_size) {
253 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
254 if (!pic->f.hwaccel_picture_private) {
255 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
256 return -1;
257 }
258 }
259 }
260
261 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
262 r = ff_thread_get_buffer(s->avctx, (AVFrame *) pic);
263 else
264 r = avcodec_default_get_buffer(s->avctx, (AVFrame *) pic);
265
266 if (r < 0 || !pic->f.age || !pic->f.type || !pic->f.data[0]) {
267 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n",
268 r, pic->f.age, pic->f.type, pic->f.data[0]);
269 av_freep(&pic->f.hwaccel_picture_private);
270 return -1;
271 }
272
273 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
274 s->uvlinesize != pic->f.linesize[1])) {
275 av_log(s->avctx, AV_LOG_ERROR,
276 "get_buffer() failed (stride changed)\n");
277 free_frame_buffer(s, pic);
278 return -1;
279 }
280
281 if (pic->f.linesize[1] != pic->f.linesize[2]) {
282 av_log(s->avctx, AV_LOG_ERROR,
283 "get_buffer() failed (uv stride mismatch)\n");
284 free_frame_buffer(s, pic);
285 return -1;
286 }
287
288 return 0;
289 }
290
291 /**
292 * allocates a Picture
293 * The pixels are allocated/set by calling get_buffer() if shared = 0
294 */
295 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
296 {
297 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
298
299 // the + 1 is needed so memset(,,stride*height) does not sig11
300
301 const int mb_array_size = s->mb_stride * s->mb_height;
302 const int b8_array_size = s->b8_stride * s->mb_height * 2;
303 const int b4_array_size = s->b4_stride * s->mb_height * 4;
304 int i;
305 int r = -1;
306
307 if (shared) {
308 assert(pic->f.data[0]);
309 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
310 pic->f.type = FF_BUFFER_TYPE_SHARED;
311 } else {
312 assert(!pic->f.data[0]);
313
314 if (alloc_frame_buffer(s, pic) < 0)
315 return -1;
316
317 s->linesize = pic->f.linesize[0];
318 s->uvlinesize = pic->f.linesize[1];
319 }
320
321 if (pic->f.qscale_table == NULL) {
322 if (s->encoding) {
323 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
324 mb_array_size * sizeof(int16_t), fail)
325 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
326 mb_array_size * sizeof(int16_t), fail)
327 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
328 mb_array_size * sizeof(int8_t ), fail)
329 }
330
331 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
332 mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
333 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
334 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
335 fail)
336 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
337 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
338 fail)
339 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
340 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
341 if (s->out_format == FMT_H264) {
342 for (i = 0; i < 2; i++) {
343 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
344 2 * (b4_array_size + 4) * sizeof(int16_t),
345 fail)
346 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
347 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
348 4 * mb_array_size * sizeof(uint8_t), fail)
349 }
350 pic->f.motion_subsample_log2 = 2;
351 } else if (s->out_format == FMT_H263 || s->encoding ||
352 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
353 for (i = 0; i < 2; i++) {
354 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
355 2 * (b8_array_size + 4) * sizeof(int16_t),
356 fail)
357 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
358 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
359 4 * mb_array_size * sizeof(uint8_t), fail)
360 }
361 pic->f.motion_subsample_log2 = 3;
362 }
363 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
364 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
365 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
366 }
367 pic->f.qstride = s->mb_stride;
368 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
369 1 * sizeof(AVPanScan), fail)
370 }
371
372 /* It might be nicer if the application would keep track of these
373 * but it would require an API change. */
374 memmove(s->prev_pict_types + 1, s->prev_pict_types,
375 PREV_PICT_TYPES_BUFFER_SIZE-1);
376 s->prev_pict_types[0] = s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
377 if (pic->f.age < PREV_PICT_TYPES_BUFFER_SIZE &&
378 s->prev_pict_types[pic->f.age] == AV_PICTURE_TYPE_B)
379 pic->f.age = INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2
380 // and it is a bit tricky to skip them anyway.
381 pic->owner2 = s;
382
383 return 0;
384 fail: // for the FF_ALLOCZ_OR_GOTO macro
385 if (r >= 0)
386 free_frame_buffer(s, pic);
387 return -1;
388 }
389
390 /**
391 * deallocates a picture
392 */
393 static void free_picture(MpegEncContext *s, Picture *pic)
394 {
395 int i;
396
397 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
398 free_frame_buffer(s, pic);
399 }
400
401 av_freep(&pic->mb_var);
402 av_freep(&pic->mc_mb_var);
403 av_freep(&pic->mb_mean);
404 av_freep(&pic->f.mbskip_table);
405 av_freep(&pic->qscale_table_base);
406 av_freep(&pic->mb_type_base);
407 av_freep(&pic->f.dct_coeff);
408 av_freep(&pic->f.pan_scan);
409 pic->f.mb_type = NULL;
410 for (i = 0; i < 2; i++) {
411 av_freep(&pic->motion_val_base[i]);
412 av_freep(&pic->f.ref_index[i]);
413 }
414
415 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
416 for (i = 0; i < 4; i++) {
417 pic->f.base[i] =
418 pic->f.data[i] = NULL;
419 }
420 pic->f.type = 0;
421 }
422 }
423
424 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
425 {
426 int y_size = s->b8_stride * (2 * s->mb_height + 1);
427 int c_size = s->mb_stride * (s->mb_height + 1);
428 int yc_size = y_size + 2 * c_size;
429 int i;
430
431 // edge emu needs blocksize + filter length - 1
432 // (= 17x17 for halfpel / 21x21 for h264)
433 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
434 (s->width + 64) * 2 * 21 * 2, fail); // (width + edge + align)*interlaced*MBsize*tolerance
435
436 // FIXME should be linesize instead of s->width * 2
437 // but that is not known before get_buffer()
438 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
439 (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
440 s->me.temp = s->me.scratchpad;
441 s->rd_scratchpad = s->me.scratchpad;
442 s->b_scratchpad = s->me.scratchpad;
443 s->obmc_scratchpad = s->me.scratchpad + 16;
444 if (s->encoding) {
445 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
446 ME_MAP_SIZE * sizeof(uint32_t), fail)
447 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
448 ME_MAP_SIZE * sizeof(uint32_t), fail)
449 if (s->avctx->noise_reduction) {
450 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
451 2 * 64 * sizeof(int), fail)
452 }
453 }
454 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
455 s->block = s->blocks[0];
456
457 for (i = 0; i < 12; i++) {
458 s->pblocks[i] = &s->block[i];
459 }
460
461 if (s->out_format == FMT_H263) {
462 /* ac values */
463 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
464 yc_size * sizeof(int16_t) * 16, fail);
465 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
466 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
467 s->ac_val[2] = s->ac_val[1] + c_size;
468 }
469
470 return 0;
471 fail:
472 return -1; // free() through MPV_common_end()
473 }
474
475 static void free_duplicate_context(MpegEncContext *s)
476 {
477 if (s == NULL)
478 return;
479
480 av_freep(&s->edge_emu_buffer);
481 av_freep(&s->me.scratchpad);
482 s->me.temp =
483 s->rd_scratchpad =
484 s->b_scratchpad =
485 s->obmc_scratchpad = NULL;
486
487 av_freep(&s->dct_error_sum);
488 av_freep(&s->me.map);
489 av_freep(&s->me.score_map);
490 av_freep(&s->blocks);
491 av_freep(&s->ac_val_base);
492 s->block = NULL;
493 }
494
495 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
496 {
497 #define COPY(a) bak->a = src->a
498 COPY(edge_emu_buffer);
499 COPY(me.scratchpad);
500 COPY(me.temp);
501 COPY(rd_scratchpad);
502 COPY(b_scratchpad);
503 COPY(obmc_scratchpad);
504 COPY(me.map);
505 COPY(me.score_map);
506 COPY(blocks);
507 COPY(block);
508 COPY(start_mb_y);
509 COPY(end_mb_y);
510 COPY(me.map_generation);
511 COPY(pb);
512 COPY(dct_error_sum);
513 COPY(dct_count[0]);
514 COPY(dct_count[1]);
515 COPY(ac_val_base);
516 COPY(ac_val[0]);
517 COPY(ac_val[1]);
518 COPY(ac_val[2]);
519 #undef COPY
520 }
521
522 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
523 {
524 MpegEncContext bak;
525 int i;
526 // FIXME copy only needed parts
527 // START_TIMER
528 backup_duplicate_context(&bak, dst);
529 memcpy(dst, src, sizeof(MpegEncContext));
530 backup_duplicate_context(dst, &bak);
531 for (i = 0; i < 12; i++) {
532 dst->pblocks[i] = &dst->block[i];
533 }
534 // STOP_TIMER("update_duplicate_context")
535 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
536 }
537
538 int ff_mpeg_update_thread_context(AVCodecContext *dst,
539 const AVCodecContext *src)
540 {
541 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
542
543 if (dst == src || !s1->context_initialized)
544 return 0;
545
546 // FIXME can parameters change on I-frames?
547 // in that case dst may need a reinit
548 if (!s->context_initialized) {
549 memcpy(s, s1, sizeof(MpegEncContext));
550
551 s->avctx = dst;
552 s->picture_range_start += MAX_PICTURE_COUNT;
553 s->picture_range_end += MAX_PICTURE_COUNT;
554 s->bitstream_buffer = NULL;
555 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
556
557 MPV_common_init(s);
558 }
559
560 s->avctx->coded_height = s1->avctx->coded_height;
561 s->avctx->coded_width = s1->avctx->coded_width;
562 s->avctx->width = s1->avctx->width;
563 s->avctx->height = s1->avctx->height;
564
565 s->coded_picture_number = s1->coded_picture_number;
566 s->picture_number = s1->picture_number;
567 s->input_picture_number = s1->input_picture_number;
568
569 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
570 memcpy(&s->last_picture, &s1->last_picture,
571 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
572
573 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
574 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
575 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
576
577 memcpy(s->prev_pict_types, s1->prev_pict_types,
578 PREV_PICT_TYPES_BUFFER_SIZE);
579
580 // Error/bug resilience
581 s->next_p_frame_damaged = s1->next_p_frame_damaged;
582 s->workaround_bugs = s1->workaround_bugs;
583
584 // MPEG4 timing info
585 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
586 (char *) &s1->shape - (char *) &s1->time_increment_bits);
587
588 // B-frame info
589 s->max_b_frames = s1->max_b_frames;
590 s->low_delay = s1->low_delay;
591 s->dropable = s1->dropable;
592
593 // DivX handling (doesn't work)
594 s->divx_packed = s1->divx_packed;
595
596 if (s1->bitstream_buffer) {
597 if (s1->bitstream_buffer_size +
598 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
599 av_fast_malloc(&s->bitstream_buffer,
600 &s->allocated_bitstream_buffer_size,
601 s1->allocated_bitstream_buffer_size);
602 s->bitstream_buffer_size = s1->bitstream_buffer_size;
603 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
604 s1->bitstream_buffer_size);
605 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
606 FF_INPUT_BUFFER_PADDING_SIZE);
607 }
608
609 // MPEG2/interlacing info
610 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
611 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
612
613 if (!s1->first_field) {
614 s->last_pict_type = s1->pict_type;
615 if (s1->current_picture_ptr)
616 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
617
618 if (s1->pict_type != AV_PICTURE_TYPE_B) {
619 s->last_non_b_pict_type = s1->pict_type;
620 }
621 }
622
623 return 0;
624 }
625
626 /**
627 * sets the given MpegEncContext to common defaults
628 * (same for encoding and decoding).
629 * the changed fields will not depend upon the
630 * prior state of the MpegEncContext.
631 */
632 void MPV_common_defaults(MpegEncContext *s)
633 {
634 s->y_dc_scale_table =
635 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
636 s->chroma_qscale_table = ff_default_chroma_qscale_table;
637 s->progressive_frame = 1;
638 s->progressive_sequence = 1;
639 s->picture_structure = PICT_FRAME;
640
641 s->coded_picture_number = 0;
642 s->picture_number = 0;
643 s->input_picture_number = 0;
644
645 s->picture_in_gop_number = 0;
646
647 s->f_code = 1;
648 s->b_code = 1;
649
650 s->picture_range_start = 0;
651 s->picture_range_end = MAX_PICTURE_COUNT;
652 }
653
654 /**
655 * sets the given MpegEncContext to defaults for decoding.
656 * the changed fields will not depend upon
657 * the prior state of the MpegEncContext.
658 */
659 void MPV_decode_defaults(MpegEncContext *s)
660 {
661 MPV_common_defaults(s);
662 }
663
664 /**
665 * init common structure for both encoder and decoder.
666 * this assumes that some variables like width/height are already set
667 */
668 av_cold int MPV_common_init(MpegEncContext *s)
669 {
670 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y,
671 threads = (s->encoding ||
672 (HAVE_THREADS &&
673 s->avctx->active_thread_type & FF_THREAD_SLICE)) ?
674 s->avctx->thread_count : 1;
675
676 if (s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
677 s->mb_height = (s->height + 31) / 32 * 2;
678 else if (s->codec_id != CODEC_ID_H264)
679 s->mb_height = (s->height + 15) / 16;
680
681 if (s->avctx->pix_fmt == PIX_FMT_NONE) {
682 av_log(s->avctx, AV_LOG_ERROR,
683 "decoding to PIX_FMT_NONE is not supported.\n");
684 return -1;
685 }
686
687 if ((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
688 (s->avctx->thread_count > MAX_THREADS ||
689 (s->avctx->thread_count > s->mb_height && s->mb_height))) {
690 int max_threads = FFMIN(MAX_THREADS, s->mb_height);
691 av_log(s->avctx, AV_LOG_WARNING,
692 "too many threads (%d), reducing to %d\n",
693 s->avctx->thread_count, max_threads);
694 threads = max_threads;
695 }
696
697 if ((s->width || s->height) &&
698 av_image_check_size(s->width, s->height, 0, s->avctx))
699 return -1;
700
701 ff_dct_common_init(s);
702
703 s->flags = s->avctx->flags;
704 s->flags2 = s->avctx->flags2;
705
706 if (s->width && s->height) {
707 s->mb_width = (s->width + 15) / 16;
708 s->mb_stride = s->mb_width + 1;
709 s->b8_stride = s->mb_width * 2 + 1;
710 s->b4_stride = s->mb_width * 4 + 1;
711 mb_array_size = s->mb_height * s->mb_stride;
712 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
713
714 /* set chroma shifts */
715 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
716 &(s->chroma_y_shift) );
717
718 /* set default edge pos, will be overriden
719 * in decode_header if needed */
720 s->h_edge_pos = s->mb_width * 16;
721 s->v_edge_pos = s->mb_height * 16;
722
723 s->mb_num = s->mb_width * s->mb_height;
724
725 s->block_wrap[0] =
726 s->block_wrap[1] =
727 s->block_wrap[2] =
728 s->block_wrap[3] = s->b8_stride;
729 s->block_wrap[4] =
730 s->block_wrap[5] = s->mb_stride;
731
732 y_size = s->b8_stride * (2 * s->mb_height + 1);
733 c_size = s->mb_stride * (s->mb_height + 1);
734 yc_size = y_size + 2 * c_size;
735
736 /* convert fourcc to upper case */
737 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
738
739 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
740
741 s->avctx->coded_frame = (AVFrame *)&s->current_picture;
742
743 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
744 fail); // error ressilience code looks cleaner with this
745 for (y = 0; y < s->mb_height; y++)
746 for (x = 0; x < s->mb_width; x++)
747 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
748
749 s->mb_index2xy[s->mb_height * s->mb_width] =
750 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
751
752 if (s->encoding) {
753 /* Allocate MV tables */
754 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
755 mv_table_size * 2 * sizeof(int16_t), fail);
756 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
757 mv_table_size * 2 * sizeof(int16_t), fail);
758 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
759 mv_table_size * 2 * sizeof(int16_t), fail);
760 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
761 mv_table_size * 2 * sizeof(int16_t), fail);
762 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
763 mv_table_size * 2 * sizeof(int16_t), fail);
764 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
765 mv_table_size * 2 * sizeof(int16_t), fail);
766 s->p_mv_table = s->p_mv_table_base +
767 s->mb_stride + 1;
768 s->b_forw_mv_table = s->b_forw_mv_table_base +
769 s->mb_stride + 1;
770 s->b_back_mv_table = s->b_back_mv_table_base +
771 s->mb_stride + 1;
772 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
773 s->mb_stride + 1;
774 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
775 s->mb_stride + 1;
776 s->b_direct_mv_table = s->b_direct_mv_table_base +
777 s->mb_stride + 1;
778
779 if (s->msmpeg4_version) {
780 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
781 2 * 2 * (MAX_LEVEL + 1) *
782 (MAX_RUN + 1) * 2 * sizeof(int), fail);
783 }
784 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
785
786 /* Allocate MB type table */
787 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
788 sizeof(uint16_t), fail); // needed for encoding
789
790 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
791 sizeof(int), fail);
792
793 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
794 64 * 32 * sizeof(int), fail);
795 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
796 64 * 32 * sizeof(int), fail);
797 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
798 64 * 32 * 2 * sizeof(uint16_t), fail);
799 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
800 64 * 32 * 2 * sizeof(uint16_t), fail);
801 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
802 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
803 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
804 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
805
806 if (s->avctx->noise_reduction) {
807 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
808 2 * 64 * sizeof(uint16_t), fail);
809 }
810 }
811 }
812
813 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
814 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
815 s->picture_count * sizeof(Picture), fail);
816 for (i = 0; i < s->picture_count; i++) {
817 avcodec_get_frame_defaults((AVFrame *) &s->picture[i]);
818 }
819
820 if (s->width && s->height) {
821 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
822 mb_array_size * sizeof(uint8_t), fail);
823
824 if (s->codec_id == CODEC_ID_MPEG4 ||
825 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
826 /* interlaced direct mode decoding tables */
827 for (i = 0; i < 2; i++) {
828 int j, k;
829 for (j = 0; j < 2; j++) {
830 for (k = 0; k < 2; k++) {
831 FF_ALLOCZ_OR_GOTO(s->avctx,
832 s->b_field_mv_table_base[i][j][k],
833 mv_table_size * 2 * sizeof(int16_t),
834 fail);
835 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
836 s->mb_stride + 1;
837 }
838 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
839 mb_array_size * 2 * sizeof(uint8_t),
840 fail);
841 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
842 mv_table_size * 2 * sizeof(int16_t),
843 fail);
844 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
845 + s->mb_stride + 1;
846 }
847 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
848 mb_array_size * 2 * sizeof(uint8_t),
849 fail);
850 }
851 }
852 if (s->out_format == FMT_H263) {
853 /* cbp values */
854 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
855 s->coded_block = s->coded_block_base + s->b8_stride + 1;
856
857 /* cbp, ac_pred, pred_dir */
858 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
859 mb_array_size * sizeof(uint8_t), fail);
860 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
861 mb_array_size * sizeof(uint8_t), fail);
862 }
863
864 if (s->h263_pred || s->h263_plus || !s->encoding) {
865 /* dc values */
866 // MN: we need these for error resilience of intra-frames
867 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
868 yc_size * sizeof(int16_t), fail);
869 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
870 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
871 s->dc_val[2] = s->dc_val[1] + c_size;
872 for (i = 0; i < yc_size; i++)
873 s->dc_val_base[i] = 1024;
874 }
875
876 /* which mb is a intra block */
877 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
878 memset(s->mbintra_table, 1, mb_array_size);
879
880 /* init macroblock skip table */
881 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
882 // Note the + 1 is for a quicker mpeg4 slice_end detection
883 FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types,
884 PREV_PICT_TYPES_BUFFER_SIZE, fail);
885
886 s->parse_context.state = -1;
887 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
888 s->avctx->debug_mv) {
889 s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
890 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
891 s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
892 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
893 s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
894 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
895 }
896 }
897
898 s->context_initialized = 1;
899 s->thread_context[0] = s;
900
901 if (s->width && s->height) {
902 if (s->encoding || (HAVE_THREADS &&
903 s->avctx->active_thread_type&FF_THREAD_SLICE)) {
904 for (i = 1; i < threads; i++) {
905 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
906 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
907 }
908
909 for (i = 0; i < threads; i++) {
910 if (init_duplicate_context(s->thread_context[i], s) < 0)
911 goto fail;
912 s->thread_context[i]->start_mb_y =
913 (s->mb_height * (i) + s->avctx->thread_count / 2) /
914 s->avctx->thread_count;
915 s->thread_context[i]->end_mb_y =
916 (s->mb_height * (i + 1) + s->avctx->thread_count / 2) /
917 s->avctx->thread_count;
918 }
919 } else {
920 if (init_duplicate_context(s, s) < 0)
921 goto fail;
922 s->start_mb_y = 0;
923 s->end_mb_y = s->mb_height;
924 }
925 }
926
927 return 0;
928 fail:
929 MPV_common_end(s);
930 return -1;
931 }
932
933 /* init common structure for both encoder and decoder */
934 void MPV_common_end(MpegEncContext *s)
935 {
936 int i, j, k;
937
938 if (s->encoding || (HAVE_THREADS &&
939 s->avctx->active_thread_type & FF_THREAD_SLICE)) {
940 for (i = 0; i < s->avctx->thread_count; i++) {
941 free_duplicate_context(s->thread_context[i]);
942 }
943 for (i = 1; i < s->avctx->thread_count; i++) {
944 av_freep(&s->thread_context[i]);
945 }
946 } else free_duplicate_context(s);
947
948 av_freep(&s->parse_context.buffer);
949 s->parse_context.buffer_size = 0;
950
951 av_freep(&s->mb_type);
952 av_freep(&s->p_mv_table_base);
953 av_freep(&s->b_forw_mv_table_base);
954 av_freep(&s->b_back_mv_table_base);
955 av_freep(&s->b_bidir_forw_mv_table_base);
956 av_freep(&s->b_bidir_back_mv_table_base);
957 av_freep(&s->b_direct_mv_table_base);
958 s->p_mv_table = NULL;
959 s->b_forw_mv_table = NULL;
960 s->b_back_mv_table = NULL;
961 s->b_bidir_forw_mv_table = NULL;
962 s->b_bidir_back_mv_table = NULL;
963 s->b_direct_mv_table = NULL;
964 for (i = 0; i < 2; i++) {
965 for (j = 0; j < 2; j++) {
966 for (k = 0; k < 2; k++) {
967 av_freep(&s->b_field_mv_table_base[i][j][k]);
968 s->b_field_mv_table[i][j][k] = NULL;
969 }
970 av_freep(&s->b_field_select_table[i][j]);
971 av_freep(&s->p_field_mv_table_base[i][j]);
972 s->p_field_mv_table[i][j] = NULL;
973 }
974 av_freep(&s->p_field_select_table[i]);
975 }
976
977 av_freep(&s->dc_val_base);
978 av_freep(&s->coded_block_base);
979 av_freep(&s->mbintra_table);
980 av_freep(&s->cbp_table);
981 av_freep(&s->pred_dir_table);
982
983 av_freep(&s->mbskip_table);
984 av_freep(&s->prev_pict_types);
985 av_freep(&s->bitstream_buffer);
986 s->allocated_bitstream_buffer_size = 0;
987
988 av_freep(&s->avctx->stats_out);
989 av_freep(&s->ac_stats);
990 av_freep(&s->error_status_table);
991 av_freep(&s->mb_index2xy);
992 av_freep(&s->lambda_table);
993 av_freep(&s->q_intra_matrix);
994 av_freep(&s->q_inter_matrix);
995 av_freep(&s->q_intra_matrix16);
996 av_freep(&s->q_inter_matrix16);
997 av_freep(&s->input_picture);
998 av_freep(&s->reordered_input_picture);
999 av_freep(&s->dct_offset);
1000
1001 if (s->picture && !s->avctx->internal->is_copy) {
1002 for (i = 0; i < s->picture_count; i++) {
1003 free_picture(s, &s->picture[i]);
1004 }
1005 }
1006 av_freep(&s->picture);
1007 s->context_initialized = 0;
1008 s->last_picture_ptr =
1009 s->next_picture_ptr =
1010 s->current_picture_ptr = NULL;
1011 s->linesize = s->uvlinesize = 0;
1012
1013 for (i = 0; i < 3; i++)
1014 av_freep(&s->visualization_buffer[i]);
1015
1016 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
1017 avcodec_default_free_buffers(s->avctx);
1018 }
1019
1020 void init_rl(RLTable *rl,
1021 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1022 {
1023 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1024 uint8_t index_run[MAX_RUN + 1];
1025 int last, run, level, start, end, i;
1026
1027 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1028 if (static_store && rl->max_level[0])
1029 return;
1030
1031 /* compute max_level[], max_run[] and index_run[] */
1032 for (last = 0; last < 2; last++) {
1033 if (last == 0) {
1034 start = 0;
1035 end = rl->last;
1036 } else {
1037 start = rl->last;
1038 end = rl->n;
1039 }
1040
1041 memset(max_level, 0, MAX_RUN + 1);
1042 memset(max_run, 0, MAX_LEVEL + 1);
1043 memset(index_run, rl->n, MAX_RUN + 1);
1044 for (i = start; i < end; i++) {
1045 run = rl->table_run[i];
1046 level = rl->table_level[i];
1047 if (index_run[run] == rl->n)
1048 index_run[run] = i;
1049 if (level > max_level[run])
1050 max_level[run] = level;
1051 if (run > max_run[level])
1052 max_run[level] = run;
1053 }
1054 if (static_store)
1055 rl->max_level[last] = static_store[last];
1056 else
1057 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1058 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1059 if (static_store)
1060 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1061 else
1062 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1063 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1064 if (static_store)
1065 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1066 else
1067 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1068 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1069 }
1070 }
1071
1072 void init_vlc_rl(RLTable *rl)
1073 {
1074 int i, q;
1075
1076 for (q = 0; q < 32; q++) {
1077 int qmul = q * 2;
1078 int qadd = (q - 1) | 1;
1079
1080 if (q == 0) {
1081 qmul = 1;
1082 qadd = 0;
1083 }
1084 for (i = 0; i < rl->vlc.table_size; i++) {
1085 int code = rl->vlc.table[i][0];
1086 int len = rl->vlc.table[i][1];
1087 int level, run;
1088
1089 if (len == 0) { // illegal code
1090 run = 66;
1091 level = MAX_LEVEL;
1092 } else if (len < 0) { // more bits needed
1093 run = 0;
1094 level = code;
1095 } else {
1096 if (code == rl->n) { // esc
1097 run = 66;
1098 level = 0;
1099 } else {
1100 run = rl->table_run[code] + 1;
1101 level = rl->table_level[code] * qmul + qadd;
1102 if (code >= rl->last) run += 192;
1103 }
1104 }
1105 rl->rl_vlc[q][i].len = len;
1106 rl->rl_vlc[q][i].level = level;
1107 rl->rl_vlc[q][i].run = run;
1108 }
1109 }
1110 }
1111
1112 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1113 {
1114 int i;
1115
1116 /* release non reference frames */
1117 for (i = 0; i < s->picture_count; i++) {
1118 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1119 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1120 (remove_current || &s->picture[i] != s->current_picture_ptr)
1121 /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1122 free_frame_buffer(s, &s->picture[i]);
1123 }
1124 }
1125 }
1126
1127 int ff_find_unused_picture(MpegEncContext *s, int shared)
1128 {
1129 int i;
1130
1131 if (shared) {
1132 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1133 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1134 return i;
1135 }
1136 } else {
1137 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1138 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
1139 return i; // FIXME
1140 }
1141 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1142 if (s->picture[i].f.data[0] == NULL)
1143 return i;
1144 }
1145 }
1146
1147 av_log(s->avctx, AV_LOG_FATAL,
1148 "Internal error, picture buffer overflow\n");
1149 /* We could return -1, but the codec would crash trying to draw into a
1150 * non-existing frame anyway. This is safer than waiting for a random crash.
1151 * Also the return of this is never useful, an encoder must only allocate
1152 * as much as allowed in the specification. This has no relationship to how
1153 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1154 * enough for such valid streams).
1155 * Plus, a decoder has to check stream validity and remove frames if too
1156 * many reference frames are around. Waiting for "OOM" is not correct at
1157 * all. Similarly, missing reference frames have to be replaced by
1158 * interpolated/MC frames, anything else is a bug in the codec ...
1159 */
1160 abort();
1161 return -1;
1162 }
1163
1164 static void update_noise_reduction(MpegEncContext *s){
1165 int intra, i;
1166
1167 for(intra=0; intra<2; intra++){
1168 if(s->dct_count[intra] > (1<<16)){
1169 for(i=0; i<64; i++){
1170 s->dct_error_sum[intra][i] >>=1;
1171 }
1172 s->dct_count[intra] >>= 1;
1173 }
1174
1175 for(i=0; i<64; i++){
1176 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
1177 }
1178 }
1179 }
1180
1181 /**
1182 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1183 */
1184 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1185 {
1186 int i;
1187 Picture *pic;
1188 s->mb_skipped = 0;
1189
1190 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
1191
1192 /* mark&release old frames */
1193 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->f.data[0]) {
1194 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
1195 if (s->last_picture_ptr->owner2 == s)
1196 free_frame_buffer(s, s->last_picture_ptr);
1197
1198 /* release forgotten pictures */
1199 /* if(mpeg124/h263) */
1200 if(!s->encoding){
1201 for(i=0; i<s->picture_count; i++){
1202 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].f.reference) {
1203 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1204 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
1205 free_frame_buffer(s, &s->picture[i]);
1206 }
1207 }
1208 }
1209 }
1210 }
1211
1212 if(!s->encoding){
1213 ff_release_unused_pictures(s, 1);
1214
1215 if (s->current_picture_ptr && s->current_picture_ptr->f.data[0] == NULL)
1216 pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
1217 else{
1218 i= ff_find_unused_picture(s, 0);
1219 pic= &s->picture[i];
1220 }
1221
1222 pic->f.reference = 0;
1223 if (!s->dropable){
1224 if (s->codec_id == CODEC_ID_H264)
1225 pic->f.reference = s->picture_structure;
1226 else if (s->pict_type != AV_PICTURE_TYPE_B)
1227 pic->f.reference = 3;
1228 }
1229
1230 pic->f.coded_picture_number = s->coded_picture_number++;
1231
1232 if(ff_alloc_picture(s, pic, 0) < 0)
1233 return -1;
1234
1235 s->current_picture_ptr= pic;
1236 //FIXME use only the vars from current_pic
1237 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1238 if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
1239 if(s->picture_structure != PICT_FRAME)
1240 s->current_picture_ptr->f.top_field_first = (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1241 }
1242 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame && !s->progressive_sequence;
1243 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1244 }
1245
1246 s->current_picture_ptr->f.pict_type = s->pict_type;
1247 // if(s->flags && CODEC_FLAG_QSCALE)
1248 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1249 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1250
1251 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1252
1253 if (s->pict_type != AV_PICTURE_TYPE_B) {
1254 s->last_picture_ptr= s->next_picture_ptr;
1255 if(!s->dropable)
1256 s->next_picture_ptr= s->current_picture_ptr;
1257 }
1258 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1259 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1260 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1261 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1262 s->pict_type, s->dropable);*/
1263
1264 if(s->codec_id != CODEC_ID_H264){
1265 if ((s->last_picture_ptr == NULL || s->last_picture_ptr->f.data[0] == NULL) &&
1266 (s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
1267 if (s->pict_type != AV_PICTURE_TYPE_I)
1268 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
1269 else if (s->picture_structure != PICT_FRAME)
1270 av_log(avctx, AV_LOG_INFO, "allocate dummy last picture for field based first keyframe\n");
1271
1272 /* Allocate a dummy frame */
1273 i= ff_find_unused_picture(s, 0);
1274 s->last_picture_ptr= &s->picture[i];
1275 if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1276 return -1;
1277 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
1278 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
1279 }
1280 if ((s->next_picture_ptr == NULL || s->next_picture_ptr->f.data[0] == NULL) && s->pict_type == AV_PICTURE_TYPE_B) {
1281 /* Allocate a dummy frame */
1282 i= ff_find_unused_picture(s, 0);
1283 s->next_picture_ptr= &s->picture[i];
1284 if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1285 return -1;
1286 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0);
1287 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 1);
1288 }
1289 }
1290
1291 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1292 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1293
1294 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->f.data[0]));
1295
1296 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
1297 int i;
1298 for(i=0; i<4; i++){
1299 if(s->picture_structure == PICT_BOTTOM_FIELD){
1300 s->current_picture.f.data[i] += s->current_picture.f.linesize[i];
1301 }
1302 s->current_picture.f.linesize[i] *= 2;
1303 s->last_picture.f.linesize[i] *= 2;
1304 s->next_picture.f.linesize[i] *= 2;
1305 }
1306 }
1307
1308 s->error_recognition= avctx->error_recognition;
1309
1310 /* set dequantizer, we can't do it during init as it might change for mpeg4
1311 and we can't do it in the header decode as init is not called for mpeg4 there yet */
1312 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1313 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1314 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1315 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1316 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1317 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1318 }else{
1319 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1320 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1321 }
1322
1323 if(s->dct_error_sum){
1324 assert(s->avctx->noise_reduction && s->encoding);
1325
1326 update_noise_reduction(s);
1327 }
1328
1329 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1330 return ff_xvmc_field_start(s, avctx);
1331
1332 return 0;
1333 }
1334
1335 /* generic function for encode/decode called after a frame has been coded/decoded */
1336 void MPV_frame_end(MpegEncContext *s)
1337 {
1338 int i;
1339 /* redraw edges for the frame if decoding didn't complete */
1340 //just to make sure that all data is rendered.
1341 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1342 ff_xvmc_field_end(s);
1343 }else if((s->error_count || s->encoding)
1344 && !s->avctx->hwaccel
1345 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
1346 && s->unrestricted_mv
1347 && s->current_picture.f.reference
1348 && !s->intra_only
1349 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1350 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
1351 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
1352 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1353 s->h_edge_pos , s->v_edge_pos,
1354 EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
1355 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1356 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1357 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1358 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1359 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1360 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1361 }
1362
1363 emms_c();
1364
1365 s->last_pict_type = s->pict_type;
1366 s->last_lambda_for[s->pict_type] = s->current_picture_ptr->f.quality;
1367 if(s->pict_type!=AV_PICTURE_TYPE_B){
1368 s->last_non_b_pict_type= s->pict_type;
1369 }
1370 #if 0
1371 /* copy back current_picture variables */
1372 for(i=0; i<MAX_PICTURE_COUNT; i++){
1373 if(s->picture[i].f.data[0] == s->current_picture.f.data[0]){
1374 s->picture[i]= s->current_picture;
1375 break;
1376 }
1377 }
1378 assert(i<MAX_PICTURE_COUNT);
1379 #endif
1380
1381 if(s->encoding){
1382 /* release non-reference frames */
1383 for(i=0; i<s->picture_count; i++){
1384 if (s->picture[i].f.data[0] && !s->picture[i].f.reference /*&& s->picture[i].type != FF_BUFFER_TYPE_SHARED*/) {
1385 free_frame_buffer(s, &s->picture[i]);
1386 }
1387 }
1388 }
1389 // clear copies, to avoid confusion
1390 #if 0
1391 memset(&s->last_picture, 0, sizeof(Picture));
1392 memset(&s->next_picture, 0, sizeof(Picture));
1393 memset(&s->current_picture, 0, sizeof(Picture));
1394 #endif
1395 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1396
1397 if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
1398 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
1399 }
1400 }
1401
1402 /**
1403 * draws an line from (ex, ey) -> (sx, sy).
1404 * @param w width of the image
1405 * @param h height of the image
1406 * @param stride stride/linesize of the image
1407 * @param color color of the arrow
1408 */
1409 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1410 int x, y, fr, f;
1411
1412 sx= av_clip(sx, 0, w-1);
1413 sy= av_clip(sy, 0, h-1);
1414 ex= av_clip(ex, 0, w-1);
1415 ey= av_clip(ey, 0, h-1);
1416
1417 buf[sy*stride + sx]+= color;
1418
1419 if(FFABS(ex - sx) > FFABS(ey - sy)){
1420 if(sx > ex){
1421 FFSWAP(int, sx, ex);
1422 FFSWAP(int, sy, ey);
1423 }
1424 buf+= sx + sy*stride;
1425 ex-= sx;
1426 f= ((ey-sy)<<16)/ex;
1427 for(x= 0; x <= ex; x++){
1428 y = (x*f)>>16;
1429 fr= (x*f)&0xFFFF;
1430 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1431 buf[(y+1)*stride + x]+= (color* fr )>>16;
1432 }
1433 }else{
1434 if(sy > ey){
1435 FFSWAP(int, sx, ex);
1436 FFSWAP(int, sy, ey);
1437 }
1438 buf+= sx + sy*stride;
1439 ey-= sy;
1440 if(ey) f= ((ex-sx)<<16)/ey;
1441 else f= 0;
1442 for(y= 0; y <= ey; y++){
1443 x = (y*f)>>16;
1444 fr= (y*f)&0xFFFF;
1445 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
1446 buf[y*stride + x+1]+= (color* fr )>>16;
1447 }
1448 }
1449 }
1450
1451 /**
1452 * draws an arrow from (ex, ey) -> (sx, sy).
1453 * @param w width of the image
1454 * @param h height of the image
1455 * @param stride stride/linesize of the image
1456 * @param color color of the arrow
1457 */
1458 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1459 int dx,dy;
1460
1461 sx= av_clip(sx, -100, w+100);
1462 sy= av_clip(sy, -100, h+100);
1463 ex= av_clip(ex, -100, w+100);
1464 ey= av_clip(ey, -100, h+100);
1465
1466 dx= ex - sx;
1467 dy= ey - sy;
1468
1469 if(dx*dx + dy*dy > 3*3){
1470 int rx= dx + dy;
1471 int ry= -dx + dy;
1472 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1473
1474 //FIXME subpixel accuracy
1475 rx= ROUNDED_DIV(rx*3<<4, length);
1476 ry= ROUNDED_DIV(ry*3<<4, length);
1477
1478 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1479 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1480 }
1481 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1482 }
1483
1484 /**
1485 * prints debuging info for the given picture.
1486 */
1487 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1488
1489 if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
1490
1491 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1492 int x,y;
1493
1494 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1495 switch (pict->pict_type) {
1496 case AV_PICTURE_TYPE_I: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
1497 case AV_PICTURE_TYPE_P: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
1498 case AV_PICTURE_TYPE_B: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
1499 case AV_PICTURE_TYPE_S: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
1500 case AV_PICTURE_TYPE_SI: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
1501 case AV_PICTURE_TYPE_SP: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
1502 }
1503 for(y=0; y<s->mb_height; y++){
1504 for(x=0; x<s->mb_width; x++){
1505 if(s->avctx->debug&FF_DEBUG_SKIP){
1506 int count= s->mbskip_table[x + y*s->mb_stride];
1507 if(count>9) count=9;
1508 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1509 }
1510 if(s->avctx->debug&FF_DEBUG_QP){
1511 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1512 }
1513 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1514 int mb_type= pict->mb_type[x + y*s->mb_stride];
1515 //Type & MV direction
1516 if(IS_PCM(mb_type))
1517 av_log(s->avctx, AV_LOG_DEBUG, "P");
1518 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1519 av_log(s->avctx, AV_LOG_DEBUG, "A");
1520 else if(IS_INTRA4x4(mb_type))
1521 av_log(s->avctx, AV_LOG_DEBUG, "i");
1522 else if(IS_INTRA16x16(mb_type))
1523 av_log(s->avctx, AV_LOG_DEBUG, "I");
1524 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1525 av_log(s->avctx, AV_LOG_DEBUG, "d");
1526 else if(IS_DIRECT(mb_type))
1527 av_log(s->avctx, AV_LOG_DEBUG, "D");
1528 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1529 av_log(s->avctx, AV_LOG_DEBUG, "g");
1530 else if(IS_GMC(mb_type))
1531 av_log(s->avctx, AV_LOG_DEBUG, "G");
1532 else if(IS_SKIP(mb_type))
1533 av_log(s->avctx, AV_LOG_DEBUG, "S");
1534 else if(!USES_LIST(mb_type, 1))
1535 av_log(s->avctx, AV_LOG_DEBUG, ">");
1536 else if(!USES_LIST(mb_type, 0))
1537 av_log(s->avctx, AV_LOG_DEBUG, "<");
1538 else{
1539 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1540 av_log(s->avctx, AV_LOG_DEBUG, "X");
1541 }
1542
1543 //segmentation
1544 if(IS_8X8(mb_type))
1545 av_log(s->avctx, AV_LOG_DEBUG, "+");
1546 else if(IS_16X8(mb_type))
1547 av_log(s->avctx, AV_LOG_DEBUG, "-");
1548 else if(IS_8X16(mb_type))
1549 av_log(s->avctx, AV_LOG_DEBUG, "|");
1550 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1551 av_log(s->avctx, AV_LOG_DEBUG, " ");
1552 else
1553 av_log(s->avctx, AV_LOG_DEBUG, "?");
1554
1555
1556 if(IS_INTERLACED(mb_type))
1557 av_log(s->avctx, AV_LOG_DEBUG, "=");
1558 else
1559 av_log(s->avctx, AV_LOG_DEBUG, " ");
1560 }
1561 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1562 }
1563 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1564 }
1565 }
1566
1567 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1568 s->avctx->debug_mv) {
1569 const int shift= 1 + s->quarter_sample;
1570 int mb_y;
1571 uint8_t *ptr;
1572 int i;
1573 int h_chroma_shift, v_chroma_shift, block_height;
1574 const int width = s->avctx->width;
1575 const int height= s->avctx->height;
1576 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1577 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1578 s->low_delay=0; //needed to see the vectors without trashing the buffers
1579
1580 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1581 for(i=0; i<3; i++){
1582 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1583 pict->data[i]= s->visualization_buffer[i];
1584 }
1585 pict->type= FF_BUFFER_TYPE_COPY;
1586 ptr= pict->data[0];
1587 block_height = 16>>v_chroma_shift;
1588
1589 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1590 int mb_x;
1591 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1592 const int mb_index= mb_x + mb_y*s->mb_stride;
1593 if (s->avctx->debug_mv && pict->motion_val) {
1594 int type;
1595 for(type=0; type<3; type++){
1596 int direction = 0;
1597 switch (type) {
1598 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
1599 continue;
1600 direction = 0;
1601 break;
1602 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1603 continue;
1604 direction = 0;
1605 break;
1606 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1607 continue;
1608 direction = 1;
1609 break;
1610 }
1611 if(!USES_LIST(pict->mb_type[mb_index], direction))
1612 continue;
1613
1614 if(IS_8X8(pict->mb_type[mb_index])){
1615 int i;
1616 for(i=0; i<4; i++){
1617 int sx= mb_x*16 + 4 + 8*(i&1);
1618 int sy= mb_y*16 + 4 + 8*(i>>1);
1619 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1620 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1621 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1622 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1623 }
1624 }else if(IS_16X8(pict->mb_type[mb_index])){
1625 int i;
1626 for(i=0; i<2; i++){
1627 int sx=mb_x*16 + 8;
1628 int sy=mb_y*16 + 4 + 8*i;
1629 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1630 int mx=(pict->motion_val[direction][xy][0]>>shift);
1631 int my=(pict->motion_val[direction][xy][1]>>shift);
1632
1633 if(IS_INTERLACED(pict->mb_type[mb_index]))
1634 my*=2;
1635
1636 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1637 }
1638 }else if(IS_8X16(pict->mb_type[mb_index])){
1639 int i;
1640 for(i=0; i<2; i++){
1641 int sx=mb_x*16 + 4 + 8*i;
1642 int sy=mb_y*16 + 8;
1643 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1644 int mx=(pict->motion_val[direction][xy][0]>>shift);
1645 int my=(pict->motion_val[direction][xy][1]>>shift);
1646
1647 if(IS_INTERLACED(pict->mb_type[mb_index]))
1648 my*=2;
1649
1650 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1651 }
1652 }else{
1653 int sx= mb_x*16 + 8;
1654 int sy= mb_y*16 + 8;
1655 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1656 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1657 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1658 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1659 }
1660 }
1661 }
1662 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1663 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1664 int y;
1665 for(y=0; y<block_height; y++){
1666 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1667 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
1668 }
1669 }
1670 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1671 int mb_type= pict->mb_type[mb_index];
1672 uint64_t u,v;
1673 int y;
1674 #define COLOR(theta, r)\
1675 u= (int)(128 + r*cos(theta*3.141592/180));\
1676 v= (int)(128 + r*sin(theta*3.141592/180));
1677
1678
1679 u=v=128;
1680 if(IS_PCM(mb_type)){
1681 COLOR(120,48)
1682 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1683 COLOR(30,48)
1684 }else if(IS_INTRA4x4(mb_type)){
1685 COLOR(90,48)
1686 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1687 // COLOR(120,48)
1688 }else if(IS_DIRECT(mb_type)){
1689 COLOR(150,48)
1690 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1691 COLOR(170,48)
1692 }else if(IS_GMC(mb_type)){
1693 COLOR(190,48)
1694 }else if(IS_SKIP(mb_type)){
1695 // COLOR(180,48)
1696 }else if(!USES_LIST(mb_type, 1)){
1697 COLOR(240,48)
1698 }else if(!USES_LIST(mb_type, 0)){
1699 COLOR(0,48)
1700 }else{
1701 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1702 COLOR(300,48)
1703 }
1704
1705 u*= 0x0101010101010101ULL;
1706 v*= 0x0101010101010101ULL;
1707 for(y=0; y<block_height; y++){
1708 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1709 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
1710 }
1711
1712 //segmentation
1713 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1714 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1715 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1716 }
1717 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1718 for(y=0; y<16; y++)
1719 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1720 }
1721 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1722 int dm= 1 << (mv_sample_log2-2);
1723 for(i=0; i<4; i++){
1724 int sx= mb_x*16 + 8*(i&1);
1725 int sy= mb_y*16 + 8*(i>>1);
1726 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1727 //FIXME bidir
1728 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1729 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1730 for(y=0; y<8; y++)
1731 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1732 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1733 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1734 }
1735 }
1736
1737 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1738 // hmm
1739 }
1740 }
1741 s->mbskip_table[mb_index]=0;
1742 }
1743 }
1744 }
1745 }
1746
1747 static inline int hpel_motion_lowres(MpegEncContext *s,
1748 uint8_t *dest, uint8_t *src,
1749 int field_based, int field_select,
1750 int src_x, int src_y,
1751 int width, int height, int stride,
1752 int h_edge_pos, int v_edge_pos,
1753 int w, int h, h264_chroma_mc_func *pix_op,
1754 int motion_x, int motion_y)
1755 {
1756 const int lowres= s->avctx->lowres;
1757 const int op_index= FFMIN(lowres, 2);
1758 const int s_mask= (2<<lowres)-1;
1759 int emu=0;
1760 int sx, sy;
1761
1762 if(s->quarter_sample){
1763 motion_x/=2;
1764 motion_y/=2;
1765 }
1766
1767 sx= motion_x & s_mask;
1768 sy= motion_y & s_mask;
1769 src_x += motion_x >> (lowres+1);
1770 src_y += motion_y >> (lowres+1);
1771
1772 src += src_y * stride + src_x;
1773
1774 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
1775 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1776 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1777 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1778 src= s->edge_emu_buffer;
1779 emu=1;
1780 }
1781
1782 sx= (sx << 2) >> lowres;
1783 sy= (sy << 2) >> lowres;
1784 if(field_select)
1785 src += s->linesize;
1786 pix_op[op_index](dest, src, stride, h, sx, sy);
1787 return emu;
1788 }
1789
1790 /* apply one mpeg motion vector to the three components */
1791 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1792 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1793 int field_based, int bottom_field, int field_select,
1794 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1795 int motion_x, int motion_y, int h, int mb_y)
1796 {
1797 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1798 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1799 const int lowres= s->avctx->lowres;
1800 const int op_index= FFMIN(lowres, 2);
1801 const int block_s= 8>>lowres;
1802 const int s_mask= (2<<lowres)-1;
1803 const int h_edge_pos = s->h_edge_pos >> lowres;
1804 const int v_edge_pos = s->v_edge_pos >> lowres;
1805 linesize = s->current_picture.f.linesize[0] << field_based;
1806 uvlinesize = s->current_picture.f.linesize[1] << field_based;
1807
1808 if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1809 motion_x/=2;
1810 motion_y/=2;
1811 }
1812
1813 if(field_based){
1814 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1815 }
1816
1817 sx= motion_x & s_mask;
1818 sy= motion_y & s_mask;
1819 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
1820 src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1821
1822 if (s->out_format == FMT_H263) {
1823 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1824 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1825 uvsrc_x = src_x>>1;
1826 uvsrc_y = src_y>>1;
1827 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1828 mx = motion_x / 4;
1829 my = motion_y / 4;
1830 uvsx = (2*mx) & s_mask;
1831 uvsy = (2*my) & s_mask;
1832 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
1833 uvsrc_y = mb_y*block_s + (my >> lowres);
1834 } else {
1835 mx = motion_x / 2;
1836 my = motion_y / 2;
1837 uvsx = mx & s_mask;
1838 uvsy = my & s_mask;
1839 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1840 uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
1841 }
1842
1843 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1844 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1845 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1846
1847 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
1848 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1849 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1850 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1851 ptr_y = s->edge_emu_buffer;
1852 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1853 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1854 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
1855 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1856 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1857 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1858 ptr_cb= uvbuf;
1859 ptr_cr= uvbuf+16;
1860 }
1861 }
1862
1863 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
1864 dest_y += s->linesize;
1865 dest_cb+= s->uvlinesize;
1866 dest_cr+= s->uvlinesize;
1867 }
1868
1869 if(field_select){
1870 ptr_y += s->linesize;
1871 ptr_cb+= s->uvlinesize;
1872 ptr_cr+= s->uvlinesize;
1873 }
1874
1875 sx= (sx << 2) >> lowres;
1876 sy= (sy << 2) >> lowres;
1877 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1878
1879 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1880 uvsx= (uvsx << 2) >> lowres;
1881 uvsy= (uvsy << 2) >> lowres;
1882 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1883 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1884 }
1885 //FIXME h261 lowres loop filter
1886 }
1887
1888 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1889 uint8_t *dest_cb, uint8_t *dest_cr,
1890 uint8_t **ref_picture,
1891 h264_chroma_mc_func *pix_op,
1892 int mx, int my){
1893 const int lowres= s->avctx->lowres;
1894 const int op_index= FFMIN(lowres, 2);
1895 const int block_s= 8>>lowres;
1896 const int s_mask= (2<<lowres)-1;
1897 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1898 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1899 int emu=0, src_x, src_y, offset, sx, sy;
1900 uint8_t *ptr;
1901
1902 if(s->quarter_sample){
1903 mx/=2;
1904 my/=2;
1905 }
1906
1907 /* In case of 8X8, we construct a single chroma motion vector
1908 with a special rounding */
1909 mx= ff_h263_round_chroma(mx);
1910 my= ff_h263_round_chroma(my);
1911
1912 sx= mx & s_mask;
1913 sy= my & s_mask;
1914 src_x = s->mb_x*block_s + (mx >> (lowres+1));
1915 src_y = s->mb_y*block_s + (my >> (lowres+1));
1916
1917 offset = src_y * s->uvlinesize + src_x;
1918 ptr = ref_picture[1] + offset;
1919 if(s->flags&CODEC_FLAG_EMU_EDGE){
1920 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1921 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1922 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1923 ptr= s->edge_emu_buffer;
1924 emu=1;
1925 }
1926 }
1927 sx= (sx << 2) >> lowres;
1928 sy= (sy << 2) >> lowres;
1929 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1930
1931 ptr = ref_picture[2] + offset;
1932 if(emu){
1933 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1934 ptr= s->edge_emu_buffer;
1935 }
1936 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1937 }
1938
1939 /**
1940 * motion compensation of a single macroblock
1941 * @param s context
1942 * @param dest_y luma destination pointer
1943 * @param dest_cb chroma cb/u destination pointer
1944 * @param dest_cr chroma cr/v destination pointer
1945 * @param dir direction (0->forward, 1->backward)
1946 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1947 * @param pix_op halfpel motion compensation function (average or put normally)
1948 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1949 */
1950 static inline void MPV_motion_lowres(MpegEncContext *s,
1951 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1952 int dir, uint8_t **ref_picture,
1953 h264_chroma_mc_func *pix_op)
1954 {
1955 int mx, my;
1956 int mb_x, mb_y, i;
1957 const int lowres= s->avctx->lowres;
1958 const int block_s= 8>>lowres;
1959
1960 mb_x = s->mb_x;
1961 mb_y = s->mb_y;
1962
1963 switch(s->mv_type) {
1964 case MV_TYPE_16X16:
1965 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1966 0, 0, 0,
1967 ref_picture, pix_op,
1968 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
1969 break;
1970 case MV_TYPE_8X8:
1971 mx = 0;
1972 my = 0;
1973 for(i=0;i<4;i++) {
1974 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1975 ref_picture[0], 0, 0,
1976 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1977 s->width, s->height, s->linesize,
1978 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1979 block_s, block_s, pix_op,
1980 s->mv[dir][i][0], s->mv[dir][i][1]);
1981
1982 mx += s->mv[dir][i][0];
1983 my += s->mv[dir][i][1];
1984 }
1985
1986 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1987 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1988 break;
1989 case MV_TYPE_FIELD:
1990 if (s->picture_structure == PICT_FRAME) {
1991 /* top field */
1992 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1993 1, 0, s->field_select[dir][0],
1994 ref_picture, pix_op,
1995 s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
1996 /* bottom field */
1997 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1998 1, 1, s->field_select[dir][1],
1999 ref_picture, pix_op,
2000 s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
2001 } else {
2002 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
2003 ref_picture = s->current_picture_ptr->f.data;
2004 }
2005
2006 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2007 0, 0, s->field_select[dir][0],
2008 ref_picture, pix_op,
2009 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
2010 }
2011 break;
2012 case MV_TYPE_16X8:
2013 for(i=0; i<2; i++){
2014 uint8_t ** ref2picture;
2015
2016 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
2017 ref2picture= ref_picture;
2018 }else{
2019 ref2picture = s->current_picture_ptr->f.data;
2020 }
2021
2022 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2023 0, 0, s->field_select[dir][i],
2024 ref2picture, pix_op,
2025 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
2026
2027 dest_y += 2*block_s*s->linesize;
2028 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
2029 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
2030 }
2031 break;
2032 case MV_TYPE_DMV:
2033 if(s->picture_structure == PICT_FRAME){
2034 for(i=0; i<2; i++){
2035 int j;
2036 for(j=0; j<2; j++){
2037 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2038 1, j, j^i,
2039 ref_picture, pix_op,
2040 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
2041 }
2042 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
2043 }
2044 }else{
2045 for(i=0; i<2; i++){
2046 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2047 0, 0, s->picture_structure != i+1,
2048 ref_picture, pix_op,
2049 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
2050
2051 // after put we make avg of the same block
2052 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
2053
2054 //opposite parity is always in the same frame if this is second field
2055 if(!s->first_field){
2056 ref_picture = s->current_picture_ptr->f.data;
2057 }
2058 }
2059 }
2060 break;
2061 default: assert(0);
2062 }
2063 }
2064
2065 /**
2066 * find the lowest MB row referenced in the MVs
2067 */
2068 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2069 {
2070 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2071 int my, off, i, mvs;
2072
2073 if (s->picture_structure != PICT_FRAME) goto unhandled;
2074
2075 switch (s->mv_type) {
2076 case MV_TYPE_16X16:
2077 mvs = 1;
2078 break;
2079 case MV_TYPE_16X8:
2080 mvs = 2;
2081 break;
2082 case MV_TYPE_8X8:
2083 mvs = 4;
2084 break;
2085 default:
2086 goto unhandled;
2087 }
2088
2089 for (i = 0; i < mvs; i++) {
2090 my = s->mv[dir][i][1]<<qpel_shift;
2091 my_max = FFMAX(my_max, my);
2092 my_min = FFMIN(my_min, my);
2093 }
2094
2095 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2096
2097 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2098 unhandled:
2099 return s->mb_height-1;
2100 }
2101
2102 /* put block[] to dest[] */
2103 static inline void put_dct(MpegEncContext *s,
2104 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2105 {
2106 s->dct_unquantize_intra(s, block, i, qscale);
2107 s->dsp.idct_put (dest, line_size, block);
2108 }
2109
2110 /* add block[] to dest[] */
2111 static inline void add_dct(MpegEncContext *s,
2112 DCTELEM *block, int i, uint8_t *dest, int line_size)
2113 {
2114 if (s->block_last_index[i] >= 0) {
2115 s->dsp.idct_add (dest, line_size, block);
2116 }
2117 }
2118
2119 static inline void add_dequant_dct(MpegEncContext *s,
2120 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2121 {
2122 if (s->block_last_index[i] >= 0) {
2123 s->dct_unquantize_inter(s, block, i, qscale);
2124
2125 s->dsp.idct_add (dest, line_size, block);
2126 }
2127 }
2128
2129 /**
2130 * cleans dc, ac, coded_block for the current non intra MB
2131 */
2132 void ff_clean_intra_table_entries(MpegEncContext *s)
2133 {
2134 int wrap = s->b8_stride;
2135 int xy = s->block_index[0];
2136
2137 s->dc_val[0][xy ] =
2138 s->dc_val[0][xy + 1 ] =
2139 s->dc_val[0][xy + wrap] =
2140 s->dc_val[0][xy + 1 + wrap] = 1024;
2141 /* ac pred */
2142 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2143 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2144 if (s->msmpeg4_version>=3) {
2145 s->coded_block[xy ] =
2146 s->coded_block[xy + 1 ] =
2147 s->coded_block[xy + wrap] =
2148 s->coded_block[xy + 1 + wrap] = 0;
2149 }
2150 /* chroma */
2151 wrap = s->mb_stride;
2152 xy = s->mb_x + s->mb_y * wrap;
2153 s->dc_val[1][xy] =
2154 s->dc_val[2][xy] = 1024;
2155 /* ac pred */
2156 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2157 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2158
2159 s->mbintra_table[xy]= 0;
2160 }
2161
2162 /* generic function called after a macroblock has been parsed by the
2163 decoder or after it has been encoded by the encoder.
2164
2165 Important variables used:
2166 s->mb_intra : true if intra macroblock
2167 s->mv_dir : motion vector direction
2168 s->mv_type : motion vector type
2169 s->mv : motion vector
2170 s->interlaced_dct : true if interlaced dct used (mpeg2)
2171 */
2172 static av_always_inline
2173 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
2174 int lowres_flag, int is_mpeg12)
2175 {
2176 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2177 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2178 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2179 return;
2180 }
2181
2182 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2183 /* save DCT coefficients */
2184 int i,j;
2185 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2186 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2187 for(i=0; i<6; i++){
2188 for(j=0; j<64; j++){
2189 *dct++ = block[i][s->dsp.idct_permutation[j]];
2190 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2191 }
2192 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2193 }
2194 }
2195
2196 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2197
2198 /* update DC predictors for P macroblocks */
2199 if (!s->mb_intra) {
2200 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2201 if(s->mbintra_table[mb_xy])
2202 ff_clean_intra_table_entries(s);
2203 } else {
2204 s->last_dc[0] =
2205 s->last_dc[1] =
2206 s->last_dc[2] = 128 << s->intra_dc_precision;
2207 }
2208 }
2209 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2210 s->mbintra_table[mb_xy]=1;
2211
2212 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2213 uint8_t *dest_y, *dest_cb, *dest_cr;
2214 int dct_linesize, dct_offset;
2215 op_pixels_func (*op_pix)[4];
2216 qpel_mc_func (*op_qpix)[16];
2217 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2218 const int uvlinesize = s->current_picture.f.linesize[1];
2219 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2220 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2221
2222 /* avoid copy if macroblock skipped in last frame too */
2223 /* skip only during decoding as we might trash the buffers during encoding a bit */
2224 if(!s->encoding){
2225 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2226 const int age = s->current_picture.f.age;
2227
2228 assert(age);
2229
2230 if (s->mb_skipped) {
2231 s->mb_skipped= 0;
2232 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2233
2234 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
2235 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2236
2237 /* if previous was skipped too, then nothing to do ! */
2238 if (*mbskip_ptr >= age && s->current_picture.f.reference){
2239 return;
2240 }
2241 } else if(!s->current_picture.f.reference) {
2242 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2243 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2244 } else{
2245 *mbskip_ptr = 0; /* not skipped */
2246 }
2247 }
2248
2249 dct_linesize = linesize << s->interlaced_dct;
2250 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
2251
2252 if(readable){
2253 dest_y= s->dest[0];
2254 dest_cb= s->dest[1];
2255 dest_cr= s->dest[2];
2256 }else{
2257 dest_y = s->b_scratchpad;
2258 dest_cb= s->b_scratchpad+16*linesize;
2259 dest_cr= s->b_scratchpad+32*linesize;
2260 }
2261
2262 if (!s->mb_intra) {
2263 /* motion handling */
2264 /* decoding or more than one mb_type (MC was already done otherwise) */
2265 if(!s->encoding){
2266
2267 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2268 if (s->mv_dir & MV_DIR_FORWARD) {
2269 ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
2270 }
2271 if (s->mv_dir & MV_DIR_BACKWARD) {
2272 ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
2273 }
2274 }
2275
2276 if(lowres_flag){
2277 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
2278
2279 if (s->mv_dir & MV_DIR_FORWARD) {
2280 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2281 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
2282 }
2283 if (s->mv_dir & MV_DIR_BACKWARD) {
2284 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2285 }
2286 }else{
2287 op_qpix= s->me.qpel_put;
2288 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2289 op_pix = s->dsp.put_pixels_tab;
2290 }else{
2291 op_pix = s->dsp.put_no_rnd_pixels_tab;
2292 }
2293 if (s->mv_dir & MV_DIR_FORWARD) {
2294 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2295 op_pix = s->dsp.avg_pixels_tab;
2296 op_qpix= s->me.qpel_avg;
2297 }
2298 if (s->mv_dir & MV_DIR_BACKWARD) {
2299 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2300 }
2301 }
2302 }
2303
2304 /* skip dequant / idct if we are really late ;) */
2305 if(s->avctx->skip_idct){
2306 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2307 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2308 || s->avctx->skip_idct >= AVDISCARD_ALL)
2309 goto skip_idct;
2310 }
2311
2312 /* add dct residue */
2313 if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
2314 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2315 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2316 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2317 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2318 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2319
2320 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2321 if (s->chroma_y_shift){
2322 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2323 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2324 }else{
2325 dct_linesize >>= 1;
2326 dct_offset >>=1;
2327 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2328 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2329 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2330 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2331 }
2332 }
2333 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
2334 add_dct(s, block[0], 0, dest_y , dct_linesize);
2335 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2336 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2337 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2338
2339 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2340 if(s->chroma_y_shift){//Chroma420
2341 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2342 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2343 }else{
2344 //chroma422
2345 dct_linesize = uvlinesize << s->interlaced_dct;
2346 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2347
2348 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2349 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2350 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2351 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2352 if(!s->chroma_x_shift){//Chroma444
2353 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2354 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2355 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2356 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2357 }
2358 }
2359 }//fi gray
2360 }
2361 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2362 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2363 }
2364 } else {
2365 /* dct only in intra block */
2366 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2367 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2368 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2369 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2370 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2371
2372 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2373 if(s->chroma_y_shift){
2374 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2375 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2376 }else{
2377 dct_offset >>=1;
2378 dct_linesize >>=1;
2379 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2380 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2381 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2382 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2383 }
2384 }
2385 }else{
2386 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2387 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2388 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2389 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2390
2391 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2392 if(s->chroma_y_shift){
2393 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2394 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2395 }else{
2396
2397 dct_linesize = uvlinesize << s->interlaced_dct;
2398 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2399
2400 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2401 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2402 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2403 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2404 if(!s->chroma_x_shift){//Chroma444
2405 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2406 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2407 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2408 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2409 }
2410 }
2411 }//gray
2412 }
2413 }
2414 skip_idct:
2415 if(!readable){
2416 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2417 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2418 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2419 }
2420 }
2421 }
2422
2423 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2424 #if !CONFIG_SMALL
2425 if(s->out_format == FMT_MPEG1) {
2426 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2427 else MPV_decode_mb_internal(s, block, 0, 1);
2428 } else
2429 #endif
2430 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2431 else MPV_decode_mb_internal(s, block, 0, 0);
2432 }
2433
2434 /**
2435 *
2436 * @param h is the normal height, this will be reduced automatically if needed for the last row
2437 */
2438 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2439 const int field_pic= s->picture_structure != PICT_FRAME;
2440 if(field_pic){
2441 h <<= 1;
2442 y <<= 1;
2443 }
2444
2445 if (!s->avctx->hwaccel
2446 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2447 && s->unrestricted_mv
2448 && s->current_picture.f.reference
2449 && !s->intra_only
2450 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2451 int sides = 0, edge_h;
2452 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
2453 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
2454 if (y==0) sides |= EDGE_TOP;
2455 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2456
2457 edge_h= FFMIN(h, s->v_edge_pos - y);
2458
2459 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2460 s->linesize, s->h_edge_pos, edge_h,
2461 EDGE_WIDTH, EDGE_WIDTH, sides);
2462 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2463 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2464 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2465 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2466 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2467 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2468 }
2469
2470 h= FFMIN(h, s->avctx->height - y);
2471
2472 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2473
2474 if (s->avctx->draw_horiz_band) {
2475 AVFrame *src;
2476 int offset[AV_NUM_DATA_POINTERS];
2477 int i;
2478
2479 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2480 src= (AVFrame*)s->current_picture_ptr;
2481 else if(s->last_picture_ptr)
2482 src= (AVFrame*)s->last_picture_ptr;
2483 else
2484 return;
2485
2486 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2487 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2488 offset[i] = 0;
2489 }else{
2490 offset[0]= y * s->linesize;
2491 offset[1]=
2492 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2493 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2494 offset[i] = 0;
2495 }
2496
2497 emms_c();
2498
2499 s->avctx->draw_horiz_band(s->avctx, src, offset,
2500 y, s->picture_structure, h);
2501 }
2502 }
2503
2504 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2505 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2506 const int uvlinesize = s->current_picture.f.linesize[1];
2507 const int mb_size= 4 - s->avctx->lowres;
2508
2509 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2510 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2511 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2512 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2513 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2514 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2515 //block_index is not used by mpeg2, so it is not affected by chroma_format
2516
2517 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2518 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2519 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2520
2521 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2522 {
2523 if(s->picture_structure==PICT_FRAME){
2524 s->dest[0] += s->mb_y * linesize << mb_size;
2525 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2526 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2527 }else{
2528 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2529 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2530 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2531 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2532 }
2533 }
2534 }
2535
2536 void ff_mpeg_flush(AVCodecContext *avctx){
2537 int i;
2538 MpegEncContext *s = avctx->priv_data;
2539
2540 if(s==NULL || s->picture==NULL)
2541 return;
2542
2543 for(i=0; i<s->picture_count; i++){
2544 if (s->picture[i].f.data[0] &&
2545 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2546 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2547 free_frame_buffer(s, &s->picture[i]);
2548 }
2549 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2550
2551 s->mb_x= s->mb_y= 0;
2552
2553 s->parse_context.state= -1;
2554 s->parse_context.frame_start_found= 0;
2555 s->parse_context.overread= 0;
2556 s->parse_context.overread_index= 0;
2557 s->parse_context.index= 0;
2558 s->parse_context.last_index= 0;
2559 s->bitstream_buffer_size=0;
2560 s->pp_time=0;
2561 }
2562
2563 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2564 DCTELEM *block, int n, int qscale)
2565 {
2566 int i, level, nCoeffs;
2567 const uint16_t *quant_matrix;
2568
2569 nCoeffs= s->block_last_index[n];
2570
2571 if (n < 4)
2572 block[0] = block[0] * s->y_dc_scale;
2573 else
2574 block[0] = block[0] * s->c_dc_scale;
2575 /* XXX: only mpeg1 */
2576 quant_matrix = s->intra_matrix;
2577 for(i=1;i<=nCoeffs;i++) {
2578 int j= s->intra_scantable.permutated[i];
2579 level = block[j];
2580 if (level) {
2581 if (level < 0) {
2582 level = -level;
2583 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2584 level = (level - 1) | 1;
2585 level = -level;
2586 } else {
2587 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2588 level = (level - 1) | 1;
2589 }
2590 block[j] = level;
2591 }
2592 }
2593 }
2594
2595 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2596 DCTELEM *block, int n, int qscale)
2597 {
2598 int i, level, nCoeffs;
2599 const uint16_t *quant_matrix;
2600
2601 nCoeffs= s->block_last_index[n];
2602
2603 quant_matrix = s->inter_matrix;
2604 for(i=0; i<=nCoeffs; i++) {
2605 int j= s->intra_scantable.permutated[i];
2606 level = block[j];
2607 if (level) {
2608 if (level < 0) {
2609 level = -level;
2610 level = (((level << 1) + 1) * qscale *
2611 ((int) (quant_matrix[j]))) >> 4;
2612 level = (level - 1) | 1;
2613 level = -level;
2614 } else {
2615 level = (((level << 1) + 1) * qscale *
2616 ((int) (quant_matrix[j]))) >> 4;
2617 level = (level - 1) | 1;
2618 }
2619 block[j] = level;
2620 }
2621 }
2622 }
2623
2624 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2625 DCTELEM *block, int n, int qscale)
2626 {
2627 int i, level, nCoeffs;
2628 const uint16_t *quant_matrix;
2629
2630 if(s->alternate_scan) nCoeffs= 63;
2631 else nCoeffs= s->block_last_index[n];
2632
2633 if (n < 4)
2634 block[0] = block[0] * s->y_dc_scale;
2635 else
2636 block[0] = block[0] * s->c_dc_scale;
2637 quant_matrix = s->intra_matrix;
2638 for(i=1;i<=nCoeffs;i++) {
2639 int j= s->intra_scantable.permutated[i];
2640 level = block[j];
2641 if (level) {
2642 if (level < 0) {
2643 level = -level;
2644 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2645 level = -level;
2646 } else {
2647 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2648 }
2649 block[j] = level;
2650 }
2651 }
2652 }
2653
2654 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2655 DCTELEM *block, int n, int qscale)
2656 {
2657 int i, level, nCoeffs;
2658 const uint16_t *quant_matrix;
2659 int sum=-1;
2660
2661 if(s->alternate_scan) nCoeffs= 63;
2662 else nCoeffs= s->block_last_index[n];
2663
2664 if (n < 4)
2665 block[0] = block[0] * s->y_dc_scale;
2666 else
2667 block[0] = block[0] * s->c_dc_scale;
2668 quant_matrix = s->intra_matrix;
2669 for(i=1;i<=nCoeffs;i++) {
2670 int j= s->intra_scantable.permutated[i];
2671 level = block[j];
2672 if (level) {
2673 if (level < 0) {
2674 level = -level;
2675 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2676 level = -level;
2677 } else {
2678 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2679 }
2680 block[j] = level;
2681 sum+=level;
2682 }
2683 }
2684 block[63]^=sum&1;
2685 }
2686
2687 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2688 DCTELEM *block, int n, int qscale)
2689 {
2690 int i, level, nCoeffs;
2691 const uint16_t *quant_matrix;
2692 int sum=-1;
2693
2694 if(s->alternate_scan) nCoeffs= 63;
2695 else nCoeffs= s->block_last_index[n];
2696
2697 quant_matrix = s->inter_matrix;
2698 for(i=0; i<=nCoeffs; i++) {
2699 int j= s->intra_scantable.permutated[i];
2700 level = block[j];
2701 if (level) {
2702 if (level < 0) {
2703 level = -level;
2704 level = (((level << 1) + 1) * qscale *
2705 ((int) (quant_matrix[j]))) >> 4;
2706 level = -level;
2707 } else {
2708 level = (((level << 1) + 1) * qscale *
2709 ((int) (quant_matrix[j]))) >> 4;
2710 }
2711 block[j] = level;
2712 sum+=level;
2713 }
2714 }
2715 block[63]^=sum&1;
2716 }
2717
2718 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2719 DCTELEM *block, int n, int qscale)
2720 {
2721 int i, level, qmul, qadd;
2722 int nCoeffs;
2723
2724 assert(s->block_last_index[n]>=0);
2725
2726 qmul = qscale << 1;
2727
2728 if (!s->h263_aic) {
2729 if (n < 4)
2730 block[0] = block[0] * s->y_dc_scale;
2731 else
2732 block[0] = block[0] * s->c_dc_scale;
2733 qadd = (qscale - 1) | 1;
2734 }else{
2735 qadd = 0;
2736 }
2737 if(s->ac_pred)
2738 nCoeffs=63;
2739 else
2740 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2741
2742 for(i=1; i<=nCoeffs; i++) {
2743 level = block[i];
2744 if (level) {
2745 if (level