78fae1026de7508c03d1af8599b1f9807f57c98e
[libav.git] / libavcodec / mpegvideo.c
1 /*
2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7 *
8 * This file is part of Libav.
9 *
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /**
26 * @file
27 * The simplest mpeg encoder (well, it was the simplest!).
28 */
29
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
32 #include "avcodec.h"
33 #include "dsputil.h"
34 #include "internal.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
37 #include "mjpegenc.h"
38 #include "msmpeg4.h"
39 #include "faandct.h"
40 #include "xvmc_internal.h"
41 #include "thread.h"
42 #include <limits.h>
43
44 //#undef NDEBUG
45 //#include <assert.h>
46
47 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
59 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
60 DCTELEM *block, int n, int qscale);
61
62
63 /* enable all paranoid tests for rounding, overflows, etc... */
64 //#define PARANOID
65
66 //#define DEBUG
67
68
69 static const uint8_t ff_default_chroma_qscale_table[32] = {
70 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
71 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
72 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
73 };
74
75 const uint8_t ff_mpeg1_dc_scale_table[128] = {
76 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
83 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
84 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
85 };
86
87 static const uint8_t mpeg2_dc_scale_table1[128] = {
88 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
95 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
96 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
97 };
98
99 static const uint8_t mpeg2_dc_scale_table2[128] = {
100 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
107 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
108 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
109 };
110
111 static const uint8_t mpeg2_dc_scale_table3[128] = {
112 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
120 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
121 };
122
123 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
124 ff_mpeg1_dc_scale_table,
125 mpeg2_dc_scale_table1,
126 mpeg2_dc_scale_table2,
127 mpeg2_dc_scale_table3,
128 };
129
130 const enum PixelFormat ff_pixfmt_list_420[] = {
131 PIX_FMT_YUV420P,
132 PIX_FMT_NONE
133 };
134
135 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
136 PIX_FMT_DXVA2_VLD,
137 PIX_FMT_VAAPI_VLD,
138 PIX_FMT_VDA_VLD,
139 PIX_FMT_YUV420P,
140 PIX_FMT_NONE
141 };
142
143 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
144 const uint8_t *end,
145 uint32_t * restrict state)
146 {
147 int i;
148
149 assert(p <= end);
150 if (p >= end)
151 return end;
152
153 for (i = 0; i < 3; i++) {
154 uint32_t tmp = *state << 8;
155 *state = tmp + *(p++);
156 if (tmp == 0x100 || p == end)
157 return p;
158 }
159
160 while (p < end) {
161 if (p[-1] > 1 ) p += 3;
162 else if (p[-2] ) p += 2;
163 else if (p[-3]|(p[-1]-1)) p++;
164 else {
165 p++;
166 break;
167 }
168 }
169
170 p = FFMIN(p, end) - 4;
171 *state = AV_RB32(p);
172
173 return p + 4;
174 }
175
176 /* init common dct for both encoder and decoder */
177 av_cold int ff_dct_common_init(MpegEncContext *s)
178 {
179 dsputil_init(&s->dsp, s->avctx);
180
181 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
182 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
183 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
184 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
185 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
186 if (s->flags & CODEC_FLAG_BITEXACT)
187 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
188 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
189
190 #if HAVE_MMX
191 MPV_common_init_mmx(s);
192 #elif ARCH_ALPHA
193 MPV_common_init_axp(s);
194 #elif CONFIG_MLIB
195 MPV_common_init_mlib(s);
196 #elif HAVE_MMI
197 MPV_common_init_mmi(s);
198 #elif ARCH_ARM
199 MPV_common_init_arm(s);
200 #elif HAVE_ALTIVEC
201 MPV_common_init_altivec(s);
202 #elif ARCH_BFIN
203 MPV_common_init_bfin(s);
204 #endif
205
206 /* load & permutate scantables
207 * note: only wmv uses different ones
208 */
209 if (s->alternate_scan) {
210 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
211 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
212 } else {
213 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
214 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
215 }
216 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
217 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
218
219 return 0;
220 }
221
222 void ff_copy_picture(Picture *dst, Picture *src)
223 {
224 *dst = *src;
225 dst->f.type = FF_BUFFER_TYPE_COPY;
226 }
227
228 /**
229 * Release a frame buffer
230 */
231 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
232 {
233 /* Windows Media Image codecs allocate internal buffers with different
234 * dimensions; ignore user defined callbacks for these
235 */
236 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
237 ff_thread_release_buffer(s->avctx, (AVFrame *) pic);
238 else
239 avcodec_default_release_buffer(s->avctx, (AVFrame *) pic);
240 av_freep(&pic->f.hwaccel_picture_private);
241 }
242
243 /**
244 * Allocate a frame buffer
245 */
246 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
247 {
248 int r;
249
250 if (s->avctx->hwaccel) {
251 assert(!pic->f.hwaccel_picture_private);
252 if (s->avctx->hwaccel->priv_data_size) {
253 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
254 if (!pic->f.hwaccel_picture_private) {
255 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
256 return -1;
257 }
258 }
259 }
260
261 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
262 r = ff_thread_get_buffer(s->avctx, (AVFrame *) pic);
263 else
264 r = avcodec_default_get_buffer(s->avctx, (AVFrame *) pic);
265
266 if (r < 0 || !pic->f.age || !pic->f.type || !pic->f.data[0]) {
267 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n",
268 r, pic->f.age, pic->f.type, pic->f.data[0]);
269 av_freep(&pic->f.hwaccel_picture_private);
270 return -1;
271 }
272
273 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
274 s->uvlinesize != pic->f.linesize[1])) {
275 av_log(s->avctx, AV_LOG_ERROR,
276 "get_buffer() failed (stride changed)\n");
277 free_frame_buffer(s, pic);
278 return -1;
279 }
280
281 if (pic->f.linesize[1] != pic->f.linesize[2]) {
282 av_log(s->avctx, AV_LOG_ERROR,
283 "get_buffer() failed (uv stride mismatch)\n");
284 free_frame_buffer(s, pic);
285 return -1;
286 }
287
288 return 0;
289 }
290
291 /**
292 * allocates a Picture
293 * The pixels are allocated/set by calling get_buffer() if shared = 0
294 */
295 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
296 {
297 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
298
299 // the + 1 is needed so memset(,,stride*height) does not sig11
300
301 const int mb_array_size = s->mb_stride * s->mb_height;
302 const int b8_array_size = s->b8_stride * s->mb_height * 2;
303 const int b4_array_size = s->b4_stride * s->mb_height * 4;
304 int i;
305 int r = -1;
306
307 if (shared) {
308 assert(pic->f.data[0]);
309 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
310 pic->f.type = FF_BUFFER_TYPE_SHARED;
311 } else {
312 assert(!pic->f.data[0]);
313
314 if (alloc_frame_buffer(s, pic) < 0)
315 return -1;
316
317 s->linesize = pic->f.linesize[0];
318 s->uvlinesize = pic->f.linesize[1];
319 }
320
321 if (pic->f.qscale_table == NULL) {
322 if (s->encoding) {
323 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
324 mb_array_size * sizeof(int16_t), fail)
325 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
326 mb_array_size * sizeof(int16_t), fail)
327 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
328 mb_array_size * sizeof(int8_t ), fail)
329 }
330
331 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
332 mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
333 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
334 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
335 fail)
336 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
337 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
338 fail)
339 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
340 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
341 if (s->out_format == FMT_H264) {
342 for (i = 0; i < 2; i++) {
343 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
344 2 * (b4_array_size + 4) * sizeof(int16_t),
345 fail)
346 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
347 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
348 4 * mb_array_size * sizeof(uint8_t), fail)
349 }
350 pic->f.motion_subsample_log2 = 2;
351 } else if (s->out_format == FMT_H263 || s->encoding ||
352 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
353 for (i = 0; i < 2; i++) {
354 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
355 2 * (b8_array_size + 4) * sizeof(int16_t),
356 fail)
357 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
358 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
359 4 * mb_array_size * sizeof(uint8_t), fail)
360 }
361 pic->f.motion_subsample_log2 = 3;
362 }
363 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
364 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
365 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
366 }
367 pic->f.qstride = s->mb_stride;
368 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
369 1 * sizeof(AVPanScan), fail)
370 }
371
372 /* It might be nicer if the application would keep track of these
373 * but it would require an API change. */
374 memmove(s->prev_pict_types + 1, s->prev_pict_types,
375 PREV_PICT_TYPES_BUFFER_SIZE-1);
376 s->prev_pict_types[0] = s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
377 if (pic->f.age < PREV_PICT_TYPES_BUFFER_SIZE &&
378 s->prev_pict_types[pic->f.age] == AV_PICTURE_TYPE_B)
379 pic->f.age = INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2
380 // and it is a bit tricky to skip them anyway.
381 pic->owner2 = s;
382
383 return 0;
384 fail: // for the FF_ALLOCZ_OR_GOTO macro
385 if (r >= 0)
386 free_frame_buffer(s, pic);
387 return -1;
388 }
389
390 /**
391 * deallocates a picture
392 */
393 static void free_picture(MpegEncContext *s, Picture *pic)
394 {
395 int i;
396
397 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
398 free_frame_buffer(s, pic);
399 }
400
401 av_freep(&pic->mb_var);
402 av_freep(&pic->mc_mb_var);
403 av_freep(&pic->mb_mean);
404 av_freep(&pic->f.mbskip_table);
405 av_freep(&pic->qscale_table_base);
406 av_freep(&pic->mb_type_base);
407 av_freep(&pic->f.dct_coeff);
408 av_freep(&pic->f.pan_scan);
409 pic->f.mb_type = NULL;
410 for (i = 0; i < 2; i++) {
411 av_freep(&pic->motion_val_base[i]);
412 av_freep(&pic->f.ref_index[i]);
413 }
414
415 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
416 for (i = 0; i < 4; i++) {
417 pic->f.base[i] =
418 pic->f.data[i] = NULL;
419 }
420 pic->f.type = 0;
421 }
422 }
423
424 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
425 {
426 int y_size = s->b8_stride * (2 * s->mb_height + 1);
427 int c_size = s->mb_stride * (s->mb_height + 1);
428 int yc_size = y_size + 2 * c_size;
429 int i;
430
431 // edge emu needs blocksize + filter length - 1
432 // (= 17x17 for halfpel / 21x21 for h264)
433 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
434 (s->width + 64) * 2 * 21 * 2, fail); // (width + edge + align)*interlaced*MBsize*tolerance
435
436 // FIXME should be linesize instead of s->width * 2
437 // but that is not known before get_buffer()
438 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
439 (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
440 s->me.temp = s->me.scratchpad;
441 s->rd_scratchpad = s->me.scratchpad;
442 s->b_scratchpad = s->me.scratchpad;
443 s->obmc_scratchpad = s->me.scratchpad + 16;
444 if (s->encoding) {
445 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
446 ME_MAP_SIZE * sizeof(uint32_t), fail)
447 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
448 ME_MAP_SIZE * sizeof(uint32_t), fail)
449 if (s->avctx->noise_reduction) {
450 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
451 2 * 64 * sizeof(int), fail)
452 }
453 }
454 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
455 s->block = s->blocks[0];
456
457 for (i = 0; i < 12; i++) {
458 s->pblocks[i] = &s->block[i];
459 }
460
461 if (s->out_format == FMT_H263) {
462 /* ac values */
463 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
464 yc_size * sizeof(int16_t) * 16, fail);
465 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
466 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
467 s->ac_val[2] = s->ac_val[1] + c_size;
468 }
469
470 return 0;
471 fail:
472 return -1; // free() through MPV_common_end()
473 }
474
475 static void free_duplicate_context(MpegEncContext *s)
476 {
477 if (s == NULL)
478 return;
479
480 av_freep(&s->edge_emu_buffer);
481 av_freep(&s->me.scratchpad);
482 s->me.temp =
483 s->rd_scratchpad =
484 s->b_scratchpad =
485 s->obmc_scratchpad = NULL;
486
487 av_freep(&s->dct_error_sum);
488 av_freep(&s->me.map);
489 av_freep(&s->me.score_map);
490 av_freep(&s->blocks);
491 av_freep(&s->ac_val_base);
492 s->block = NULL;
493 }
494
495 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
496 {
497 #define COPY(a) bak->a = src->a
498 COPY(edge_emu_buffer);
499 COPY(me.scratchpad);
500 COPY(me.temp);
501 COPY(rd_scratchpad);
502 COPY(b_scratchpad);
503 COPY(obmc_scratchpad);
504 COPY(me.map);
505 COPY(me.score_map);
506 COPY(blocks);
507 COPY(block);
508 COPY(start_mb_y);
509 COPY(end_mb_y);
510 COPY(me.map_generation);
511 COPY(pb);
512 COPY(dct_error_sum);
513 COPY(dct_count[0]);
514 COPY(dct_count[1]);
515 COPY(ac_val_base);
516 COPY(ac_val[0]);
517 COPY(ac_val[1]);
518 COPY(ac_val[2]);
519 #undef COPY
520 }
521
522 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
523 {
524 MpegEncContext bak;
525 int i;
526 // FIXME copy only needed parts
527 // START_TIMER
528 backup_duplicate_context(&bak, dst);
529 memcpy(dst, src, sizeof(MpegEncContext));
530 backup_duplicate_context(dst, &bak);
531 for (i = 0; i < 12; i++) {
532 dst->pblocks[i] = &dst->block[i];
533 }
534 // STOP_TIMER("update_duplicate_context")
535 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
536 }
537
538 int ff_mpeg_update_thread_context(AVCodecContext *dst,
539 const AVCodecContext *src)
540 {
541 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
542
543 if (dst == src || !s1->context_initialized)
544 return 0;
545
546 // FIXME can parameters change on I-frames?
547 // in that case dst may need a reinit
548 if (!s->context_initialized) {
549 memcpy(s, s1, sizeof(MpegEncContext));
550
551 s->avctx = dst;
552 s->picture_range_start += MAX_PICTURE_COUNT;
553 s->picture_range_end += MAX_PICTURE_COUNT;
554 s->bitstream_buffer = NULL;
555 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
556
557 MPV_common_init(s);
558 }
559
560 s->avctx->coded_height = s1->avctx->coded_height;
561 s->avctx->coded_width = s1->avctx->coded_width;
562 s->avctx->width = s1->avctx->width;
563 s->avctx->height = s1->avctx->height;
564
565 s->coded_picture_number = s1->coded_picture_number;
566 s->picture_number = s1->picture_number;
567 s->input_picture_number = s1->input_picture_number;
568
569 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
570 memcpy(&s->last_picture, &s1->last_picture,
571 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
572
573 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
574 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
575 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
576
577 memcpy(s->prev_pict_types, s1->prev_pict_types,
578 PREV_PICT_TYPES_BUFFER_SIZE);
579
580 // Error/bug resilience
581 s->next_p_frame_damaged = s1->next_p_frame_damaged;
582 s->workaround_bugs = s1->workaround_bugs;
583
584 // MPEG4 timing info
585 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
586 (char *) &s1->shape - (char *) &s1->time_increment_bits);
587
588 // B-frame info
589 s->max_b_frames = s1->max_b_frames;
590 s->low_delay = s1->low_delay;
591 s->dropable = s1->dropable;
592
593 // DivX handling (doesn't work)
594 s->divx_packed = s1->divx_packed;
595
596 if (s1->bitstream_buffer) {
597 if (s1->bitstream_buffer_size +
598 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
599 av_fast_malloc(&s->bitstream_buffer,
600 &s->allocated_bitstream_buffer_size,
601 s1->allocated_bitstream_buffer_size);
602 s->bitstream_buffer_size = s1->bitstream_buffer_size;
603 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
604 s1->bitstream_buffer_size);
605 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
606 FF_INPUT_BUFFER_PADDING_SIZE);
607 }
608
609 // MPEG2/interlacing info
610 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
611 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
612
613 if (!s1->first_field) {
614 s->last_pict_type = s1->pict_type;
615 if (s1->current_picture_ptr)
616 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
617
618 if (s1->pict_type != AV_PICTURE_TYPE_B) {
619 s->last_non_b_pict_type = s1->pict_type;
620 }
621 }
622
623 return 0;
624 }
625
626 /**
627 * sets the given MpegEncContext to common defaults
628 * (same for encoding and decoding).
629 * the changed fields will not depend upon the
630 * prior state of the MpegEncContext.
631 */
632 void MPV_common_defaults(MpegEncContext *s)
633 {
634 s->y_dc_scale_table =
635 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
636 s->chroma_qscale_table = ff_default_chroma_qscale_table;
637 s->progressive_frame = 1;
638 s->progressive_sequence = 1;
639 s->picture_structure = PICT_FRAME;
640
641 s->coded_picture_number = 0;
642 s->picture_number = 0;
643 s->input_picture_number = 0;
644
645 s->picture_in_gop_number = 0;
646
647 s->f_code = 1;
648 s->b_code = 1;
649
650 s->picture_range_start = 0;
651 s->picture_range_end = MAX_PICTURE_COUNT;
652 }
653
654 /**
655 * sets the given MpegEncContext to defaults for decoding.
656 * the changed fields will not depend upon
657 * the prior state of the MpegEncContext.
658 */
659 void MPV_decode_defaults(MpegEncContext *s)
660 {
661 MPV_common_defaults(s);
662 }
663
664 /**
665 * init common structure for both encoder and decoder.
666 * this assumes that some variables like width/height are already set
667 */
668 av_cold int MPV_common_init(MpegEncContext *s)
669 {
670 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y,
671 threads = (s->encoding ||
672 (HAVE_THREADS &&
673 s->avctx->active_thread_type & FF_THREAD_SLICE)) ?
674 s->avctx->thread_count : 1;
675
676 if (s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
677 s->mb_height = (s->height + 31) / 32 * 2;
678 else if (s->codec_id != CODEC_ID_H264)
679 s->mb_height = (s->height + 15) / 16;
680
681 if (s->avctx->pix_fmt == PIX_FMT_NONE) {
682 av_log(s->avctx, AV_LOG_ERROR,
683 "decoding to PIX_FMT_NONE is not supported.\n");
684 return -1;
685 }
686
687 if ((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
688 (s->avctx->thread_count > MAX_THREADS ||
689 (s->avctx->thread_count > s->mb_height && s->mb_height))) {
690 int max_threads = FFMIN(MAX_THREADS, s->mb_height);
691 av_log(s->avctx, AV_LOG_WARNING,
692 "too many threads (%d), reducing to %d\n",
693 s->avctx->thread_count, max_threads);
694 threads = max_threads;
695 }
696
697 if ((s->width || s->height) &&
698 av_image_check_size(s->width, s->height, 0, s->avctx))
699 return -1;
700
701 ff_dct_common_init(s);
702
703 s->flags = s->avctx->flags;
704 s->flags2 = s->avctx->flags2;
705
706 if (s->width && s->height) {
707 s->mb_width = (s->width + 15) / 16;
708 s->mb_stride = s->mb_width + 1;
709 s->b8_stride = s->mb_width * 2 + 1;
710 s->b4_stride = s->mb_width * 4 + 1;
711 mb_array_size = s->mb_height * s->mb_stride;
712 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
713
714 /* set chroma shifts */
715 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
716 &(s->chroma_y_shift) );
717
718 /* set default edge pos, will be overriden
719 * in decode_header if needed */
720 s->h_edge_pos = s->mb_width * 16;
721 s->v_edge_pos = s->mb_height * 16;
722
723 s->mb_num = s->mb_width * s->mb_height;
724
725 s->block_wrap[0] =
726 s->block_wrap[1] =
727 s->block_wrap[2] =
728 s->block_wrap[3] = s->b8_stride;
729 s->block_wrap[4] =
730 s->block_wrap[5] = s->mb_stride;
731
732 y_size = s->b8_stride * (2 * s->mb_height + 1);
733 c_size = s->mb_stride * (s->mb_height + 1);
734 yc_size = y_size + 2 * c_size;
735
736 /* convert fourcc to upper case */
737 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
738
739 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
740
741 s->avctx->coded_frame = (AVFrame *)&s->current_picture;
742
743 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
744 fail); // error ressilience code looks cleaner with this
745 for (y = 0; y < s->mb_height; y++)
746 for (x = 0; x < s->mb_width; x++)
747 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
748
749 s->mb_index2xy[s->mb_height * s->mb_width] =
750 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
751
752 if (s->encoding) {
753 /* Allocate MV tables */
754 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
755 mv_table_size * 2 * sizeof(int16_t), fail);
756 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
757 mv_table_size * 2 * sizeof(int16_t), fail);
758 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
759 mv_table_size * 2 * sizeof(int16_t), fail);
760 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
761 mv_table_size * 2 * sizeof(int16_t), fail);
762 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
763 mv_table_size * 2 * sizeof(int16_t), fail);
764 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
765 mv_table_size * 2 * sizeof(int16_t), fail);
766 s->p_mv_table = s->p_mv_table_base +
767 s->mb_stride + 1;
768 s->b_forw_mv_table = s->b_forw_mv_table_base +
769 s->mb_stride + 1;
770 s->b_back_mv_table = s->b_back_mv_table_base +
771 s->mb_stride + 1;
772 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
773 s->mb_stride + 1;
774 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
775 s->mb_stride + 1;
776 s->b_direct_mv_table = s->b_direct_mv_table_base +
777 s->mb_stride + 1;
778
779 if (s->msmpeg4_version) {
780 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
781 2 * 2 * (MAX_LEVEL + 1) *
782 (MAX_RUN + 1) * 2 * sizeof(int), fail);
783 }
784 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
785
786 /* Allocate MB type table */
787 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
788 sizeof(uint16_t), fail); // needed for encoding
789
790 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
791 sizeof(int), fail);
792
793 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
794 64 * 32 * sizeof(int), fail);
795 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
796 64 * 32 * sizeof(int), fail);
797 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
798 64 * 32 * 2 * sizeof(uint16_t), fail);
799 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
800 64 * 32 * 2 * sizeof(uint16_t), fail);
801 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
802 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
803 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
804 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
805
806 if (s->avctx->noise_reduction) {
807 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
808 2 * 64 * sizeof(uint16_t), fail);
809 }
810 }
811 }
812
813 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
814 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
815 s->picture_count * sizeof(Picture), fail);
816 for (i = 0; i < s->picture_count; i++) {
817 avcodec_get_frame_defaults((AVFrame *) &s->picture[i]);
818 }
819
820 if (s->width && s->height) {
821 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
822 mb_array_size * sizeof(uint8_t), fail);
823
824 if (s->codec_id == CODEC_ID_MPEG4 ||
825 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
826 /* interlaced direct mode decoding tables */
827 for (i = 0; i < 2; i++) {
828 int j, k;
829 for (j = 0; j < 2; j++) {
830 for (k = 0; k < 2; k++) {
831 FF_ALLOCZ_OR_GOTO(s->avctx,
832 s->b_field_mv_table_base[i][j][k],
833 mv_table_size * 2 * sizeof(int16_t),
834 fail);
835 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
836 s->mb_stride + 1;
837 }
838 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
839 mb_array_size * 2 * sizeof(uint8_t),
840 fail);
841 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
842 mv_table_size * 2 * sizeof(int16_t),
843 fail);
844 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
845 + s->mb_stride + 1;
846 }
847 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
848 mb_array_size * 2 * sizeof(uint8_t),
849 fail);
850 }
851 }
852 if (s->out_format == FMT_H263) {
853 /* cbp values */
854 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
855 s->coded_block = s->coded_block_base + s->b8_stride + 1;
856
857 /* cbp, ac_pred, pred_dir */
858 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
859 mb_array_size * sizeof(uint8_t), fail);
860 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
861 mb_array_size * sizeof(uint8_t), fail);
862 }
863
864 if (s->h263_pred || s->h263_plus || !s->encoding) {
865 /* dc values */
866 // MN: we need these for error resilience of intra-frames
867 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
868 yc_size * sizeof(int16_t), fail);
869 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
870 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
871 s->dc_val[2] = s->dc_val[1] + c_size;
872 for (i = 0; i < yc_size; i++)
873 s->dc_val_base[i] = 1024;
874 }
875
876 /* which mb is a intra block */
877 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
878 memset(s->mbintra_table, 1, mb_array_size);
879
880 /* init macroblock skip table */
881 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
882 // Note the + 1 is for a quicker mpeg4 slice_end detection
883 FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types,
884 PREV_PICT_TYPES_BUFFER_SIZE, fail);
885
886 s->parse_context.state = -1;
887 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
888 (s->avctx->debug_mv)) {
889 s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
890 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
891 s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
892 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
893 s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
894 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
895 }
896 }
897
898 s->context_initialized = 1;
899 s->thread_context[0] = s;
900
901 if (s->width && s->height) {
902 if (s->encoding || (HAVE_THREADS &&
903 s->avctx->active_thread_type&FF_THREAD_SLICE)) {
904 for (i = 1; i < threads; i++) {
905 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
906 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
907 }
908
909 for (i = 0; i < threads; i++) {
910 if (init_duplicate_context(s->thread_context[i], s) < 0)
911 goto fail;
912 s->thread_context[i]->start_mb_y =
913 (s->mb_height * (i) + s->avctx->thread_count / 2) /
914 s->avctx->thread_count;
915 s->thread_context[i]->end_mb_y =
916 (s->mb_height * (i + 1) + s->avctx->thread_count / 2) /
917 s->avctx->thread_count;
918 }
919 } else {
920 if (init_duplicate_context(s, s) < 0)
921 goto fail;
922 s->start_mb_y = 0;
923 s->end_mb_y = s->mb_height;
924 }
925 }
926
927 return 0;
928 fail:
929 MPV_common_end(s);
930 return -1;
931 }
932
933 /* init common structure for both encoder and decoder */
934 void MPV_common_end(MpegEncContext *s)
935 {
936 int i, j, k;
937
938 if (s->encoding || (HAVE_THREADS &&
939 s->avctx->active_thread_type & FF_THREAD_SLICE)) {
940 for (i = 0; i < s->avctx->thread_count; i++) {
941 free_duplicate_context(s->thread_context[i]);
942 }
943 for (i = 1; i < s->avctx->thread_count; i++) {
944 av_freep(&s->thread_context[i]);
945 }
946 } else free_duplicate_context(s);
947
948 av_freep(&s->parse_context.buffer);
949 s->parse_context.buffer_size = 0;
950
951 av_freep(&s->mb_type);
952 av_freep(&s->p_mv_table_base);
953 av_freep(&s->b_forw_mv_table_base);
954 av_freep(&s->b_back_mv_table_base);
955 av_freep(&s->b_bidir_forw_mv_table_base);
956 av_freep(&s->b_bidir_back_mv_table_base);
957 av_freep(&s->b_direct_mv_table_base);
958 s->p_mv_table = NULL;
959 s->b_forw_mv_table = NULL;
960 s->b_back_mv_table = NULL;
961 s->b_bidir_forw_mv_table = NULL;
962 s->b_bidir_back_mv_table = NULL;
963 s->b_direct_mv_table = NULL;
964 for (i = 0; i < 2; i++) {
965 for (j = 0; j < 2; j++) {
966 for (k = 0; k < 2; k++) {
967 av_freep(&s->b_field_mv_table_base[i][j][k]);
968 s->b_field_mv_table[i][j][k] = NULL;
969 }
970 av_freep(&s->b_field_select_table[i][j]);
971 av_freep(&s->p_field_mv_table_base[i][j]);
972 s->p_field_mv_table[i][j] = NULL;
973 }
974 av_freep(&s->p_field_select_table[i]);
975 }
976
977 av_freep(&s->dc_val_base);
978 av_freep(&s->coded_block_base);
979 av_freep(&s->mbintra_table);
980 av_freep(&s->cbp_table);
981 av_freep(&s->pred_dir_table);
982
983 av_freep(&s->mbskip_table);
984 av_freep(&s->prev_pict_types);
985 av_freep(&s->bitstream_buffer);
986 s->allocated_bitstream_buffer_size = 0;
987
988 av_freep(&s->avctx->stats_out);
989 av_freep(&s->ac_stats);
990 av_freep(&s->error_status_table);
991 av_freep(&s->mb_index2xy);
992 av_freep(&s->lambda_table);
993 av_freep(&s->q_intra_matrix);
994 av_freep(&s->q_inter_matrix);
995 av_freep(&s->q_intra_matrix16);
996 av_freep(&s->q_inter_matrix16);
997 av_freep(&s->input_picture);
998 av_freep(&s->reordered_input_picture);
999 av_freep(&s->dct_offset);
1000
1001 if (s->picture && !s->avctx->internal->is_copy) {
1002 for (i = 0; i < s->picture_count; i++) {
1003 free_picture(s, &s->picture[i]);
1004 }
1005 }
1006 av_freep(&s->picture);
1007 s->context_initialized = 0;
1008 s->last_picture_ptr =
1009 s->next_picture_ptr =
1010 s->current_picture_ptr = NULL;
1011 s->linesize = s->uvlinesize = 0;
1012
1013 for (i = 0; i < 3; i++)
1014 av_freep(&s->visualization_buffer[i]);
1015
1016 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
1017 avcodec_default_free_buffers(s->avctx);
1018 }
1019
1020 void init_rl(RLTable *rl,
1021 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1022 {
1023 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1024 uint8_t index_run[MAX_RUN + 1];
1025 int last, run, level, start, end, i;
1026
1027 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1028 if (static_store && rl->max_level[0])
1029 return;
1030
1031 /* compute max_level[], max_run[] and index_run[] */
1032 for (last = 0; last < 2; last++) {
1033 if (last == 0) {
1034 start = 0;
1035 end = rl->last;
1036 } else {
1037 start = rl->last;
1038 end = rl->n;
1039 }
1040
1041 memset(max_level, 0, MAX_RUN + 1);
1042 memset(max_run, 0, MAX_LEVEL + 1);
1043 memset(index_run, rl->n, MAX_RUN + 1);
1044 for (i = start; i < end; i++) {
1045 run = rl->table_run[i];
1046 level = rl->table_level[i];
1047 if (index_run[run] == rl->n)
1048 index_run[run] = i;
1049 if (level > max_level[run])
1050 max_level[run] = level;
1051 if (run > max_run[level])
1052 max_run[level] = run;
1053 }
1054 if (static_store)
1055 rl->max_level[last] = static_store[last];
1056 else
1057 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1058 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1059 if (static_store)
1060 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1061 else
1062 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1063 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1064 if (static_store)
1065 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1066 else
1067 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1068 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1069 }
1070 }
1071
1072 void init_vlc_rl(RLTable *rl)
1073 {
1074 int i, q;
1075
1076 for (q = 0; q < 32; q++) {
1077 int qmul = q * 2;
1078 int qadd = (q - 1) | 1;
1079
1080 if (q == 0) {
1081 qmul = 1;
1082 qadd = 0;
1083 }
1084 for (i = 0; i < rl->vlc.table_size; i++) {
1085 int code = rl->vlc.table[i][0];
1086 int len = rl->vlc.table[i][1];
1087 int level, run;
1088
1089 if (len == 0) { // illegal code
1090 run = 66;
1091 level = MAX_LEVEL;
1092 } else if (len < 0) { // more bits needed
1093 run = 0;
1094 level = code;
1095 } else {
1096 if (code == rl->n) { // esc
1097 run = 66;
1098 level = 0;
1099 } else {
1100 run = rl->table_run[code] + 1;
1101 level = rl->table_level[code] * qmul + qadd;
1102 if (code >= rl->last) run += 192;
1103 }
1104 }
1105 rl->rl_vlc[q][i].len = len;
1106 rl->rl_vlc[q][i].level = level;
1107 rl->rl_vlc[q][i].run = run;
1108 }
1109 }
1110 }
1111
1112 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1113 {
1114 int i;
1115
1116 /* release non reference frames */
1117 for (i = 0; i < s->picture_count; i++) {
1118 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1119 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1120 (remove_current || &s->picture[i] != s->current_picture_ptr)
1121 /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1122 free_frame_buffer(s, &s->picture[i]);
1123 }
1124 }
1125 }
1126
1127 int ff_find_unused_picture(MpegEncContext *s, int shared)
1128 {
1129 int i;
1130
1131 if (shared) {
1132 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1133 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1134 return i;
1135 }
1136 } else {
1137 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1138 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
1139 return i; // FIXME
1140 }
1141 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1142 if (s->picture[i].f.data[0] == NULL)
1143 return i;
1144 }
1145 }
1146
1147 av_log(s->avctx, AV_LOG_FATAL,
1148 "Internal error, picture buffer overflow\n");
1149 /* We could return -1, but the codec would crash trying to draw into a
1150 * non-existing frame anyway. This is safer than waiting for a random crash.
1151 * Also the return of this is never useful, an encoder must only allocate
1152 * as much as allowed in the specification. This has no relationship to how
1153 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1154 * enough for such valid streams).
1155 * Plus, a decoder has to check stream validity and remove frames if too
1156 * many reference frames are around. Waiting for "OOM" is not correct at
1157 * all. Similarly, missing reference frames have to be replaced by
1158 * interpolated/MC frames, anything else is a bug in the codec ...
1159 */
1160 abort();
1161 return -1;
1162 }
1163
1164 static void update_noise_reduction(MpegEncContext *s){
1165 int intra, i;
1166
1167 for(intra=0; intra<2; intra++){
1168 if(s->dct_count[intra] > (1<<16)){
1169 for(i=0; i<64; i++){
1170 s->dct_error_sum[intra][i] >>=1;
1171 }
1172 s->dct_count[intra] >>= 1;
1173 }
1174
1175 for(i=0; i<64; i++){
1176 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
1177 }
1178 }
1179 }
1180
1181 /**
1182 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1183 */
1184 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1185 {
1186 int i;
1187 Picture *pic;
1188 s->mb_skipped = 0;
1189
1190 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
1191
1192 /* mark&release old frames */
1193 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->f.data[0]) {
1194 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
1195 if (s->last_picture_ptr->owner2 == s)
1196 free_frame_buffer(s, s->last_picture_ptr);
1197
1198 /* release forgotten pictures */
1199 /* if(mpeg124/h263) */
1200 if(!s->encoding){
1201 for(i=0; i<s->picture_count; i++){
1202 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].f.reference) {
1203 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1204 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
1205 free_frame_buffer(s, &s->picture[i]);
1206 }
1207 }
1208 }
1209 }
1210 }
1211
1212 if(!s->encoding){
1213 ff_release_unused_pictures(s, 1);
1214
1215 if (s->current_picture_ptr && s->current_picture_ptr->f.data[0] == NULL)
1216 pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
1217 else{
1218 i= ff_find_unused_picture(s, 0);
1219 pic= &s->picture[i];
1220 }
1221
1222 pic->f.reference = 0;
1223 if (!s->dropable){
1224 if (s->codec_id == CODEC_ID_H264)
1225 pic->f.reference = s->picture_structure;
1226 else if (s->pict_type != AV_PICTURE_TYPE_B)
1227 pic->f.reference = 3;
1228 }
1229
1230 pic->f.coded_picture_number = s->coded_picture_number++;
1231
1232 if(ff_alloc_picture(s, pic, 0) < 0)
1233 return -1;
1234
1235 s->current_picture_ptr= pic;
1236 //FIXME use only the vars from current_pic
1237 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1238 if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
1239 if(s->picture_structure != PICT_FRAME)
1240 s->current_picture_ptr->f.top_field_first = (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1241 }
1242 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame && !s->progressive_sequence;
1243 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1244 }
1245
1246 s->current_picture_ptr->f.pict_type = s->pict_type;
1247 // if(s->flags && CODEC_FLAG_QSCALE)
1248 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1249 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1250
1251 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1252
1253 if (s->pict_type != AV_PICTURE_TYPE_B) {
1254 s->last_picture_ptr= s->next_picture_ptr;
1255 if(!s->dropable)
1256 s->next_picture_ptr= s->current_picture_ptr;
1257 }
1258 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1259 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1260 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1261 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1262 s->pict_type, s->dropable);*/
1263
1264 if(s->codec_id != CODEC_ID_H264){
1265 if ((s->last_picture_ptr == NULL || s->last_picture_ptr->f.data[0] == NULL) &&
1266 (s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
1267 if (s->pict_type != AV_PICTURE_TYPE_I)
1268 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
1269 else if (s->picture_structure != PICT_FRAME)
1270 av_log(avctx, AV_LOG_INFO, "allocate dummy last picture for field based first keyframe\n");
1271
1272 /* Allocate a dummy frame */
1273 i= ff_find_unused_picture(s, 0);
1274 s->last_picture_ptr= &s->picture[i];
1275 if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1276 return -1;
1277 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
1278 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
1279 }
1280 if ((s->next_picture_ptr == NULL || s->next_picture_ptr->f.data[0] == NULL) && s->pict_type == AV_PICTURE_TYPE_B) {
1281 /* Allocate a dummy frame */
1282 i= ff_find_unused_picture(s, 0);
1283 s->next_picture_ptr= &s->picture[i];
1284 if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1285 return -1;
1286 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0);
1287 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 1);
1288 }
1289 }
1290
1291 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1292 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1293
1294 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->f.data[0]));
1295
1296 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
1297 int i;
1298 for(i=0; i<4; i++){
1299 if(s->picture_structure == PICT_BOTTOM_FIELD){
1300 s->current_picture.f.data[i] += s->current_picture.f.linesize[i];
1301 }
1302 s->current_picture.f.linesize[i] *= 2;
1303 s->last_picture.f.linesize[i] *= 2;
1304 s->next_picture.f.linesize[i] *= 2;
1305 }
1306 }
1307
1308 s->error_recognition= avctx->error_recognition;
1309
1310 /* set dequantizer, we can't do it during init as it might change for mpeg4
1311 and we can't do it in the header decode as init is not called for mpeg4 there yet */
1312 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1313 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1314 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1315 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1316 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1317 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1318 }else{
1319 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1320 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1321 }
1322
1323 if(s->dct_error_sum){
1324 assert(s->avctx->noise_reduction && s->encoding);
1325
1326 update_noise_reduction(s);
1327 }
1328
1329 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1330 return ff_xvmc_field_start(s, avctx);
1331
1332 return 0;
1333 }
1334
1335 /* generic function for encode/decode called after a frame has been coded/decoded */
1336 void MPV_frame_end(MpegEncContext *s)
1337 {
1338 int i;
1339 /* redraw edges for the frame if decoding didn't complete */
1340 //just to make sure that all data is rendered.
1341 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1342 ff_xvmc_field_end(s);
1343 }else if((s->error_count || s->encoding)
1344 && !s->avctx->hwaccel
1345 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
1346 && s->unrestricted_mv
1347 && s->current_picture.f.reference
1348 && !s->intra_only
1349 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1350 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
1351 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
1352 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1353 s->h_edge_pos , s->v_edge_pos,
1354 EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
1355 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1356 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1357 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1358 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1359 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1360 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1361 }
1362
1363 emms_c();
1364
1365 s->last_pict_type = s->pict_type;
1366 s->last_lambda_for[s->pict_type] = s->current_picture_ptr->f.quality;
1367 if(s->pict_type!=AV_PICTURE_TYPE_B){
1368 s->last_non_b_pict_type= s->pict_type;
1369 }
1370 #if 0
1371 /* copy back current_picture variables */
1372 for(i=0; i<MAX_PICTURE_COUNT; i++){
1373 if(s->picture[i].f.data[0] == s->current_picture.f.data[0]){
1374 s->picture[i]= s->current_picture;
1375 break;
1376 }
1377 }
1378 assert(i<MAX_PICTURE_COUNT);
1379 #endif
1380
1381 if(s->encoding){
1382 /* release non-reference frames */
1383 for(i=0; i<s->picture_count; i++){
1384 if (s->picture[i].f.data[0] && !s->picture[i].f.reference /*&& s->picture[i].type != FF_BUFFER_TYPE_SHARED*/) {
1385 free_frame_buffer(s, &s->picture[i]);
1386 }
1387 }
1388 }
1389 // clear copies, to avoid confusion
1390 #if 0
1391 memset(&s->last_picture, 0, sizeof(Picture));
1392 memset(&s->next_picture, 0, sizeof(Picture));
1393 memset(&s->current_picture, 0, sizeof(Picture));
1394 #endif
1395 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1396
1397 if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
1398 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
1399 }
1400 }
1401
1402 /**
1403 * draws an line from (ex, ey) -> (sx, sy).
1404 * @param w width of the image
1405 * @param h height of the image
1406 * @param stride stride/linesize of the image
1407 * @param color color of the arrow
1408 */
1409 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1410 int x, y, fr, f;
1411
1412 sx= av_clip(sx, 0, w-1);
1413 sy= av_clip(sy, 0, h-1);
1414 ex= av_clip(ex, 0, w-1);
1415 ey= av_clip(ey, 0, h-1);
1416
1417 buf[sy*stride + sx]+= color;
1418
1419 if(FFABS(ex - sx) > FFABS(ey - sy)){
1420 if(sx > ex){
1421 FFSWAP(int, sx, ex);
1422 FFSWAP(int, sy, ey);
1423 }
1424 buf+= sx + sy*stride;
1425 ex-= sx;
1426 f= ((ey-sy)<<16)/ex;
1427 for(x= 0; x <= ex; x++){
1428 y = (x*f)>>16;
1429 fr= (x*f)&0xFFFF;
1430 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1431 buf[(y+1)*stride + x]+= (color* fr )>>16;
1432 }
1433 }else{
1434 if(sy > ey){
1435 FFSWAP(int, sx, ex);
1436 FFSWAP(int, sy, ey);
1437 }
1438 buf+= sx + sy*stride;
1439 ey-= sy;
1440 if(ey) f= ((ex-sx)<<16)/ey;
1441 else f= 0;
1442 for(y= 0; y <= ey; y++){
1443 x = (y*f)>>16;
1444 fr= (y*f)&0xFFFF;
1445 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
1446 buf[y*stride + x+1]+= (color* fr )>>16;
1447 }
1448 }
1449 }
1450
1451 /**
1452 * draws an arrow from (ex, ey) -> (sx, sy).
1453 * @param w width of the image
1454 * @param h height of the image
1455 * @param stride stride/linesize of the image
1456 * @param color color of the arrow
1457 */
1458 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1459 int dx,dy;
1460
1461 sx= av_clip(sx, -100, w+100);
1462 sy= av_clip(sy, -100, h+100);
1463 ex= av_clip(ex, -100, w+100);
1464 ey= av_clip(ey, -100, h+100);
1465
1466 dx= ex - sx;
1467 dy= ey - sy;
1468
1469 if(dx*dx + dy*dy > 3*3){
1470 int rx= dx + dy;
1471 int ry= -dx + dy;
1472 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1473
1474 //FIXME subpixel accuracy
1475 rx= ROUNDED_DIV(rx*3<<4, length);
1476 ry= ROUNDED_DIV(ry*3<<4, length);
1477
1478 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1479 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1480 }
1481 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1482 }
1483
1484 /**
1485 * prints debuging info for the given picture.
1486 */
1487 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1488
1489 if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
1490
1491 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1492 int x,y;
1493
1494 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1495 switch (pict->pict_type) {
1496 case AV_PICTURE_TYPE_I: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
1497 case AV_PICTURE_TYPE_P: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
1498 case AV_PICTURE_TYPE_B: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
1499 case AV_PICTURE_TYPE_S: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
1500 case AV_PICTURE_TYPE_SI: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
1501 case AV_PICTURE_TYPE_SP: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
1502 }
1503 for(y=0; y<s->mb_height; y++){
1504 for(x=0; x<s->mb_width; x++){
1505 if(s->avctx->debug&FF_DEBUG_SKIP){
1506 int count= s->mbskip_table[x + y*s->mb_stride];
1507 if(count>9) count=9;
1508 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1509 }
1510 if(s->avctx->debug&FF_DEBUG_QP){
1511 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1512 }
1513 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1514 int mb_type= pict->mb_type[x + y*s->mb_stride];
1515 //Type & MV direction
1516 if(IS_PCM(mb_type))
1517 av_log(s->avctx, AV_LOG_DEBUG, "P");
1518 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1519 av_log(s->avctx, AV_LOG_DEBUG, "A");
1520 else if(IS_INTRA4x4(mb_type))
1521 av_log(s->avctx, AV_LOG_DEBUG, "i");
1522 else if(IS_INTRA16x16(mb_type))
1523 av_log(s->avctx, AV_LOG_DEBUG, "I");
1524 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1525 av_log(s->avctx, AV_LOG_DEBUG, "d");
1526 else if(IS_DIRECT(mb_type))
1527 av_log(s->avctx, AV_LOG_DEBUG, "D");
1528 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1529 av_log(s->avctx, AV_LOG_DEBUG, "g");
1530 else if(IS_GMC(mb_type))
1531 av_log(s->avctx, AV_LOG_DEBUG, "G");
1532 else if(IS_SKIP(mb_type))
1533 av_log(s->avctx, AV_LOG_DEBUG, "S");
1534 else if(!USES_LIST(mb_type, 1))
1535 av_log(s->avctx, AV_LOG_DEBUG, ">");
1536 else if(!USES_LIST(mb_type, 0))
1537 av_log(s->avctx, AV_LOG_DEBUG, "<");
1538 else{
1539 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1540 av_log(s->avctx, AV_LOG_DEBUG, "X");
1541 }
1542
1543 //segmentation
1544 if(IS_8X8(mb_type))
1545 av_log(s->avctx, AV_LOG_DEBUG, "+");
1546 else if(IS_16X8(mb_type))
1547 av_log(s->avctx, AV_LOG_DEBUG, "-");
1548 else if(IS_8X16(mb_type))
1549 av_log(s->avctx, AV_LOG_DEBUG, "|");
1550 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1551 av_log(s->avctx, AV_LOG_DEBUG, " ");
1552 else
1553 av_log(s->avctx, AV_LOG_DEBUG, "?");
1554
1555
1556 if(IS_INTERLACED(mb_type))
1557 av_log(s->avctx, AV_LOG_DEBUG, "=");
1558 else
1559 av_log(s->avctx, AV_LOG_DEBUG, " ");
1560 }
1561 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1562 }
1563 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1564 }
1565 }
1566
1567 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1568 const int shift= 1 + s->quarter_sample;
1569 int mb_y;
1570 uint8_t *ptr;
1571 int i;
1572 int h_chroma_shift, v_chroma_shift, block_height;
1573 const int width = s->avctx->width;
1574 const int height= s->avctx->height;
1575 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1576 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1577 s->low_delay=0; //needed to see the vectors without trashing the buffers
1578
1579 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1580 for(i=0; i<3; i++){
1581 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1582 pict->data[i]= s->visualization_buffer[i];
1583 }
1584 pict->type= FF_BUFFER_TYPE_COPY;
1585 ptr= pict->data[0];
1586 block_height = 16>>v_chroma_shift;
1587
1588 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1589 int mb_x;
1590 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1591 const int mb_index= mb_x + mb_y*s->mb_stride;
1592 if((s->avctx->debug_mv) && pict->motion_val){
1593 int type;
1594 for(type=0; type<3; type++){
1595 int direction = 0;
1596 switch (type) {
1597 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
1598 continue;
1599 direction = 0;
1600 break;
1601 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1602 continue;
1603 direction = 0;
1604 break;
1605 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1606 continue;
1607 direction = 1;
1608 break;
1609 }
1610 if(!USES_LIST(pict->mb_type[mb_index], direction))
1611 continue;
1612
1613 if(IS_8X8(pict->mb_type[mb_index])){
1614 int i;
1615 for(i=0; i<4; i++){
1616 int sx= mb_x*16 + 4 + 8*(i&1);
1617 int sy= mb_y*16 + 4 + 8*(i>>1);
1618 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1619 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1620 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1621 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1622 }
1623 }else if(IS_16X8(pict->mb_type[mb_index])){
1624 int i;
1625 for(i=0; i<2; i++){
1626 int sx=mb_x*16 + 8;
1627 int sy=mb_y*16 + 4 + 8*i;
1628 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1629 int mx=(pict->motion_val[direction][xy][0]>>shift);
1630 int my=(pict->motion_val[direction][xy][1]>>shift);
1631
1632 if(IS_INTERLACED(pict->mb_type[mb_index]))
1633 my*=2;
1634
1635 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1636 }
1637 }else if(IS_8X16(pict->mb_type[mb_index])){
1638 int i;
1639 for(i=0; i<2; i++){
1640 int sx=mb_x*16 + 4 + 8*i;
1641 int sy=mb_y*16 + 8;
1642 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1643 int mx=(pict->motion_val[direction][xy][0]>>shift);
1644 int my=(pict->motion_val[direction][xy][1]>>shift);
1645
1646 if(IS_INTERLACED(pict->mb_type[mb_index]))
1647 my*=2;
1648
1649 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1650 }
1651 }else{
1652 int sx= mb_x*16 + 8;
1653 int sy= mb_y*16 + 8;
1654 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1655 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1656 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1657 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1658 }
1659 }
1660 }
1661 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1662 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1663 int y;
1664 for(y=0; y<block_height; y++){
1665 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1666 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
1667 }
1668 }
1669 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1670 int mb_type= pict->mb_type[mb_index];
1671 uint64_t u,v;
1672 int y;
1673 #define COLOR(theta, r)\
1674 u= (int)(128 + r*cos(theta*3.141592/180));\
1675 v= (int)(128 + r*sin(theta*3.141592/180));
1676
1677
1678 u=v=128;
1679 if(IS_PCM(mb_type)){
1680 COLOR(120,48)
1681 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1682 COLOR(30,48)
1683 }else if(IS_INTRA4x4(mb_type)){
1684 COLOR(90,48)
1685 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1686 // COLOR(120,48)
1687 }else if(IS_DIRECT(mb_type)){
1688 COLOR(150,48)
1689 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1690 COLOR(170,48)
1691 }else if(IS_GMC(mb_type)){
1692 COLOR(190,48)
1693 }else if(IS_SKIP(mb_type)){
1694 // COLOR(180,48)
1695 }else if(!USES_LIST(mb_type, 1)){
1696 COLOR(240,48)
1697 }else if(!USES_LIST(mb_type, 0)){
1698 COLOR(0,48)
1699 }else{
1700 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1701 COLOR(300,48)
1702 }
1703
1704 u*= 0x0101010101010101ULL;
1705 v*= 0x0101010101010101ULL;
1706 for(y=0; y<block_height; y++){
1707 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1708 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
1709 }
1710
1711 //segmentation
1712 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1713 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1714 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1715 }
1716 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1717 for(y=0; y<16; y++)
1718 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1719 }
1720 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1721 int dm= 1 << (mv_sample_log2-2);
1722 for(i=0; i<4; i++){
1723 int sx= mb_x*16 + 8*(i&1);
1724 int sy= mb_y*16 + 8*(i>>1);
1725 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1726 //FIXME bidir
1727 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1728 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1729 for(y=0; y<8; y++)
1730 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1731 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1732 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1733 }
1734 }
1735
1736 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1737 // hmm
1738 }
1739 }
1740 s->mbskip_table[mb_index]=0;
1741 }
1742 }
1743 }
1744 }
1745
1746 static inline int hpel_motion_lowres(MpegEncContext *s,
1747 uint8_t *dest, uint8_t *src,
1748 int field_based, int field_select,
1749 int src_x, int src_y,
1750 int width, int height, int stride,
1751 int h_edge_pos, int v_edge_pos,
1752 int w, int h, h264_chroma_mc_func *pix_op,
1753 int motion_x, int motion_y)
1754 {
1755 const int lowres= s->avctx->lowres;
1756 const int op_index= FFMIN(lowres, 2);
1757 const int s_mask= (2<<lowres)-1;
1758 int emu=0;
1759 int sx, sy;
1760
1761 if(s->quarter_sample){
1762 motion_x/=2;
1763 motion_y/=2;
1764 }
1765
1766 sx= motion_x & s_mask;
1767 sy= motion_y & s_mask;
1768 src_x += motion_x >> (lowres+1);
1769 src_y += motion_y >> (lowres+1);
1770
1771 src += src_y * stride + src_x;
1772
1773 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
1774 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1775 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1776 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1777 src= s->edge_emu_buffer;
1778 emu=1;
1779 }
1780
1781 sx= (sx << 2) >> lowres;
1782 sy= (sy << 2) >> lowres;
1783 if(field_select)
1784 src += s->linesize;
1785 pix_op[op_index](dest, src, stride, h, sx, sy);
1786 return emu;
1787 }
1788
1789 /* apply one mpeg motion vector to the three components */
1790 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1791 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1792 int field_based, int bottom_field, int field_select,
1793 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1794 int motion_x, int motion_y, int h, int mb_y)
1795 {
1796 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1797 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1798 const int lowres= s->avctx->lowres;
1799 const int op_index= FFMIN(lowres, 2);
1800 const int block_s= 8>>lowres;
1801 const int s_mask= (2<<lowres)-1;
1802 const int h_edge_pos = s->h_edge_pos >> lowres;
1803 const int v_edge_pos = s->v_edge_pos >> lowres;
1804 linesize = s->current_picture.f.linesize[0] << field_based;
1805 uvlinesize = s->current_picture.f.linesize[1] << field_based;
1806
1807 if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1808 motion_x/=2;
1809 motion_y/=2;
1810 }
1811
1812 if(field_based){
1813 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1814 }
1815
1816 sx= motion_x & s_mask;
1817 sy= motion_y & s_mask;
1818 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
1819 src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1820
1821 if (s->out_format == FMT_H263) {
1822 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1823 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1824 uvsrc_x = src_x>>1;
1825 uvsrc_y = src_y>>1;
1826 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1827 mx = motion_x / 4;
1828 my = motion_y / 4;
1829 uvsx = (2*mx) & s_mask;
1830 uvsy = (2*my) & s_mask;
1831 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
1832 uvsrc_y = mb_y*block_s + (my >> lowres);
1833 } else {
1834 mx = motion_x / 2;
1835 my = motion_y / 2;
1836 uvsx = mx & s_mask;
1837 uvsy = my & s_mask;
1838 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1839 uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
1840 }
1841
1842 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1843 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1844 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1845
1846 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
1847 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1848 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1849 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1850 ptr_y = s->edge_emu_buffer;
1851 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1852 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1853 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
1854 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1855 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1856 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1857 ptr_cb= uvbuf;
1858 ptr_cr= uvbuf+16;
1859 }
1860 }
1861
1862 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
1863 dest_y += s->linesize;
1864 dest_cb+= s->uvlinesize;
1865 dest_cr+= s->uvlinesize;
1866 }
1867
1868 if(field_select){
1869 ptr_y += s->linesize;
1870 ptr_cb+= s->uvlinesize;
1871 ptr_cr+= s->uvlinesize;
1872 }
1873
1874 sx= (sx << 2) >> lowres;
1875 sy= (sy << 2) >> lowres;
1876 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1877
1878 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1879 uvsx= (uvsx << 2) >> lowres;
1880 uvsy= (uvsy << 2) >> lowres;
1881 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1882 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1883 }
1884 //FIXME h261 lowres loop filter
1885 }
1886
1887 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1888 uint8_t *dest_cb, uint8_t *dest_cr,
1889 uint8_t **ref_picture,
1890 h264_chroma_mc_func *pix_op,
1891 int mx, int my){
1892 const int lowres= s->avctx->lowres;
1893 const int op_index= FFMIN(lowres, 2);
1894 const int block_s= 8>>lowres;
1895 const int s_mask= (2<<lowres)-1;
1896 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1897 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1898 int emu=0, src_x, src_y, offset, sx, sy;
1899 uint8_t *ptr;
1900
1901 if(s->quarter_sample){
1902 mx/=2;
1903 my/=2;
1904 }
1905
1906 /* In case of 8X8, we construct a single chroma motion vector
1907 with a special rounding */
1908 mx= ff_h263_round_chroma(mx);
1909 my= ff_h263_round_chroma(my);
1910
1911 sx= mx & s_mask;
1912 sy= my & s_mask;
1913 src_x = s->mb_x*block_s + (mx >> (lowres+1));
1914 src_y = s->mb_y*block_s + (my >> (lowres+1));
1915
1916 offset = src_y * s->uvlinesize + src_x;
1917 ptr = ref_picture[1] + offset;
1918 if(s->flags&CODEC_FLAG_EMU_EDGE){
1919 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1920 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1921 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1922 ptr= s->edge_emu_buffer;
1923 emu=1;
1924 }
1925 }
1926 sx= (sx << 2) >> lowres;
1927 sy= (sy << 2) >> lowres;
1928 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1929
1930 ptr = ref_picture[2] + offset;
1931 if(emu){
1932 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1933 ptr= s->edge_emu_buffer;
1934 }
1935 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1936 }
1937
1938 /**
1939 * motion compensation of a single macroblock
1940 * @param s context
1941 * @param dest_y luma destination pointer
1942 * @param dest_cb chroma cb/u destination pointer
1943 * @param dest_cr chroma cr/v destination pointer
1944 * @param dir direction (0->forward, 1->backward)
1945 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1946 * @param pix_op halfpel motion compensation function (average or put normally)
1947 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1948 */
1949 static inline void MPV_motion_lowres(MpegEncContext *s,
1950 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1951 int dir, uint8_t **ref_picture,
1952 h264_chroma_mc_func *pix_op)
1953 {
1954 int mx, my;
1955 int mb_x, mb_y, i;
1956 const int lowres= s->avctx->lowres;
1957 const int block_s= 8>>lowres;
1958
1959 mb_x = s->mb_x;
1960 mb_y = s->mb_y;
1961
1962 switch(s->mv_type) {
1963 case MV_TYPE_16X16:
1964 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1965 0, 0, 0,
1966 ref_picture, pix_op,
1967 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
1968 break;
1969 case MV_TYPE_8X8:
1970 mx = 0;
1971 my = 0;
1972 for(i=0;i<4;i++) {
1973 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1974 ref_picture[0], 0, 0,
1975 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1976 s->width, s->height, s->linesize,
1977 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1978 block_s, block_s, pix_op,
1979 s->mv[dir][i][0], s->mv[dir][i][1]);
1980
1981 mx += s->mv[dir][i][0];
1982 my += s->mv[dir][i][1];
1983 }
1984
1985 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1986 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1987 break;
1988 case MV_TYPE_FIELD:
1989 if (s->picture_structure == PICT_FRAME) {
1990 /* top field */
1991 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1992 1, 0, s->field_select[dir][0],
1993 ref_picture, pix_op,
1994 s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
1995 /* bottom field */
1996 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1997 1, 1, s->field_select[dir][1],
1998 ref_picture, pix_op,
1999 s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
2000 } else {
2001 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
2002 ref_picture = s->current_picture_ptr->f.data;
2003 }
2004
2005 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2006 0, 0, s->field_select[dir][0],
2007 ref_picture, pix_op,
2008 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
2009 }
2010 break;
2011 case MV_TYPE_16X8:
2012 for(i=0; i<2; i++){
2013 uint8_t ** ref2picture;
2014
2015 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
2016 ref2picture= ref_picture;
2017 }else{
2018 ref2picture = s->current_picture_ptr->f.data;
2019 }
2020
2021 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2022 0, 0, s->field_select[dir][i],
2023 ref2picture, pix_op,
2024 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
2025
2026 dest_y += 2*block_s*s->linesize;
2027 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
2028 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
2029 }
2030 break;
2031 case MV_TYPE_DMV:
2032 if(s->picture_structure == PICT_FRAME){
2033 for(i=0; i<2; i++){
2034 int j;
2035 for(j=0; j<2; j++){
2036 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2037 1, j, j^i,
2038 ref_picture, pix_op,
2039 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
2040 }
2041 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
2042 }
2043 }else{
2044 for(i=0; i<2; i++){
2045 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2046 0, 0, s->picture_structure != i+1,
2047 ref_picture, pix_op,
2048 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
2049
2050 // after put we make avg of the same block
2051 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
2052
2053 //opposite parity is always in the same frame if this is second field
2054 if(!s->first_field){
2055 ref_picture = s->current_picture_ptr->f.data;
2056 }
2057 }
2058 }
2059 break;
2060 default: assert(0);
2061 }
2062 }
2063
2064 /**
2065 * find the lowest MB row referenced in the MVs
2066 */
2067 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2068 {
2069 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2070 int my, off, i, mvs;
2071
2072 if (s->picture_structure != PICT_FRAME) goto unhandled;
2073
2074 switch (s->mv_type) {
2075 case MV_TYPE_16X16:
2076 mvs = 1;
2077 break;
2078 case MV_TYPE_16X8:
2079 mvs = 2;
2080 break;
2081 case MV_TYPE_8X8:
2082 mvs = 4;
2083 break;
2084 default:
2085 goto unhandled;
2086 }
2087
2088 for (i = 0; i < mvs; i++) {
2089 my = s->mv[dir][i][1]<<qpel_shift;
2090 my_max = FFMAX(my_max, my);
2091 my_min = FFMIN(my_min, my);
2092 }
2093
2094 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2095
2096 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2097 unhandled:
2098 return s->mb_height-1;
2099 }
2100
2101 /* put block[] to dest[] */
2102 static inline void put_dct(MpegEncContext *s,
2103 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2104 {
2105 s->dct_unquantize_intra(s, block, i, qscale);
2106 s->dsp.idct_put (dest, line_size, block);
2107 }
2108
2109 /* add block[] to dest[] */
2110 static inline void add_dct(MpegEncContext *s,
2111 DCTELEM *block, int i, uint8_t *dest, int line_size)
2112 {
2113 if (s->block_last_index[i] >= 0) {
2114 s->dsp.idct_add (dest, line_size, block);
2115 }
2116 }
2117
2118 static inline void add_dequant_dct(MpegEncContext *s,
2119 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2120 {
2121 if (s->block_last_index[i] >= 0) {
2122 s->dct_unquantize_inter(s, block, i, qscale);
2123
2124 s->dsp.idct_add (dest, line_size, block);
2125 }
2126 }
2127
2128 /**
2129 * cleans dc, ac, coded_block for the current non intra MB
2130 */
2131 void ff_clean_intra_table_entries(MpegEncContext *s)
2132 {
2133 int wrap = s->b8_stride;
2134 int xy = s->block_index[0];
2135
2136 s->dc_val[0][xy ] =
2137 s->dc_val[0][xy + 1 ] =
2138 s->dc_val[0][xy + wrap] =
2139 s->dc_val[0][xy + 1 + wrap] = 1024;
2140 /* ac pred */
2141 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2142 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2143 if (s->msmpeg4_version>=3) {
2144 s->coded_block[xy ] =
2145 s->coded_block[xy + 1 ] =
2146 s->coded_block[xy + wrap] =
2147 s->coded_block[xy + 1 + wrap] = 0;
2148 }
2149 /* chroma */
2150 wrap = s->mb_stride;
2151 xy = s->mb_x + s->mb_y * wrap;
2152 s->dc_val[1][xy] =
2153 s->dc_val[2][xy] = 1024;
2154 /* ac pred */
2155 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2156 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2157
2158 s->mbintra_table[xy]= 0;
2159 }
2160
2161 /* generic function called after a macroblock has been parsed by the
2162 decoder or after it has been encoded by the encoder.
2163
2164 Important variables used:
2165 s->mb_intra : true if intra macroblock
2166 s->mv_dir : motion vector direction
2167 s->mv_type : motion vector type
2168 s->mv : motion vector
2169 s->interlaced_dct : true if interlaced dct used (mpeg2)
2170 */
2171 static av_always_inline
2172 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
2173 int lowres_flag, int is_mpeg12)
2174 {
2175 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2176 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2177 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2178 return;
2179 }
2180
2181 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2182 /* save DCT coefficients */
2183 int i,j;
2184 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2185 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2186 for(i=0; i<6; i++){
2187 for(j=0; j<64; j++){
2188 *dct++ = block[i][s->dsp.idct_permutation[j]];
2189 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2190 }
2191 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2192 }
2193 }
2194
2195 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2196
2197 /* update DC predictors for P macroblocks */
2198 if (!s->mb_intra) {
2199 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2200 if(s->mbintra_table[mb_xy])
2201 ff_clean_intra_table_entries(s);
2202 } else {
2203 s->last_dc[0] =
2204 s->last_dc[1] =
2205 s->last_dc[2] = 128 << s->intra_dc_precision;
2206 }
2207 }
2208 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2209 s->mbintra_table[mb_xy]=1;
2210
2211 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2212 uint8_t *dest_y, *dest_cb, *dest_cr;
2213 int dct_linesize, dct_offset;
2214 op_pixels_func (*op_pix)[4];
2215 qpel_mc_func (*op_qpix)[16];
2216 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2217 const int uvlinesize = s->current_picture.f.linesize[1];
2218 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2219 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2220
2221 /* avoid copy if macroblock skipped in last frame too */
2222 /* skip only during decoding as we might trash the buffers during encoding a bit */
2223 if(!s->encoding){
2224 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2225 const int age = s->current_picture.f.age;
2226
2227 assert(age);
2228
2229 if (s->mb_skipped) {
2230 s->mb_skipped= 0;
2231 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2232
2233 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
2234 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2235
2236 /* if previous was skipped too, then nothing to do ! */
2237 if (*mbskip_ptr >= age && s->current_picture.f.reference){
2238 return;
2239 }
2240 } else if(!s->current_picture.f.reference) {
2241 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2242 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2243 } else{
2244 *mbskip_ptr = 0; /* not skipped */
2245 }
2246 }
2247
2248 dct_linesize = linesize << s->interlaced_dct;
2249 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
2250
2251 if(readable){
2252 dest_y= s->dest[0];
2253 dest_cb= s->dest[1];
2254 dest_cr= s->dest[2];
2255 }else{
2256 dest_y = s->b_scratchpad;
2257 dest_cb= s->b_scratchpad+16*linesize;
2258 dest_cr= s->b_scratchpad+32*linesize;
2259 }
2260
2261 if (!s->mb_intra) {
2262 /* motion handling */
2263 /* decoding or more than one mb_type (MC was already done otherwise) */
2264 if(!s->encoding){
2265
2266 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2267 if (s->mv_dir & MV_DIR_FORWARD) {
2268 ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
2269 }
2270 if (s->mv_dir & MV_DIR_BACKWARD) {
2271 ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
2272 }
2273 }
2274
2275 if(lowres_flag){
2276 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
2277
2278 if (s->mv_dir & MV_DIR_FORWARD) {
2279 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2280 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
2281 }
2282 if (s->mv_dir & MV_DIR_BACKWARD) {
2283 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2284 }
2285 }else{
2286 op_qpix= s->me.qpel_put;
2287 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2288 op_pix = s->dsp.put_pixels_tab;
2289 }else{
2290 op_pix = s->dsp.put_no_rnd_pixels_tab;
2291 }
2292 if (s->mv_dir & MV_DIR_FORWARD) {
2293 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2294 op_pix = s->dsp.avg_pixels_tab;
2295 op_qpix= s->me.qpel_avg;
2296 }
2297 if (s->mv_dir & MV_DIR_BACKWARD) {
2298 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2299 }
2300 }
2301 }
2302
2303 /* skip dequant / idct if we are really late ;) */
2304 if(s->avctx->skip_idct){
2305 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2306 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2307 || s->avctx->skip_idct >= AVDISCARD_ALL)
2308 goto skip_idct;
2309 }
2310
2311 /* add dct residue */
2312 if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
2313 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2314 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2315 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2316 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2317 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2318
2319 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2320 if (s->chroma_y_shift){
2321 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2322 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2323 }else{
2324 dct_linesize >>= 1;
2325 dct_offset >>=1;
2326 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2327 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2328 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2329 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2330 }
2331 }
2332 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
2333 add_dct(s, block[0], 0, dest_y , dct_linesize);
2334 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2335 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2336 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2337
2338 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2339 if(s->chroma_y_shift){//Chroma420
2340 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2341 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2342 }else{
2343 //chroma422
2344 dct_linesize = uvlinesize << s->interlaced_dct;
2345 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2346
2347 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2348 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2349 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2350 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2351 if(!s->chroma_x_shift){//Chroma444
2352 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2353 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2354 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2355 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2356 }
2357 }
2358 }//fi gray
2359 }
2360 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2361 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2362 }
2363 } else {
2364 /* dct only in intra block */
2365 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2366 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2367 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2368 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2369 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2370
2371 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2372 if(s->chroma_y_shift){
2373 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2374 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2375 }else{
2376 dct_offset >>=1;
2377 dct_linesize >>=1;
2378 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2379 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2380 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2381 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2382 }
2383 }
2384 }else{
2385 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2386 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2387 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2388 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2389
2390 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2391 if(s->chroma_y_shift){
2392 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2393 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2394 }else{
2395
2396 dct_linesize = uvlinesize << s->interlaced_dct;
2397 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2398
2399 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2400 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2401 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2402 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2403 if(!s->chroma_x_shift){//Chroma444
2404 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2405 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2406 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2407 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2408 }
2409 }
2410 }//gray
2411 }
2412 }
2413 skip_idct:
2414 if(!readable){
2415 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2416 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2417 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2418 }
2419 }
2420 }
2421
2422 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2423 #if !CONFIG_SMALL
2424 if(s->out_format == FMT_MPEG1) {
2425 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2426 else MPV_decode_mb_internal(s, block, 0, 1);
2427 } else
2428 #endif
2429 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2430 else MPV_decode_mb_internal(s, block, 0, 0);
2431 }
2432
2433 /**
2434 *
2435 * @param h is the normal height, this will be reduced automatically if needed for the last row
2436 */
2437 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2438 const int field_pic= s->picture_structure != PICT_FRAME;
2439 if(field_pic){
2440 h <<= 1;
2441 y <<= 1;
2442 }
2443
2444 if (!s->avctx->hwaccel
2445 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2446 && s->unrestricted_mv
2447 && s->current_picture.f.reference
2448 && !s->intra_only
2449 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2450 int sides = 0, edge_h;
2451 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
2452 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
2453 if (y==0) sides |= EDGE_TOP;
2454 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2455
2456 edge_h= FFMIN(h, s->v_edge_pos - y);
2457
2458 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2459 s->linesize, s->h_edge_pos, edge_h,
2460 EDGE_WIDTH, EDGE_WIDTH, sides);
2461 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2462 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2463 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2464 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2465 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2466 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2467 }
2468
2469 h= FFMIN(h, s->avctx->height - y);
2470
2471 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2472
2473 if (s->avctx->draw_horiz_band) {
2474 AVFrame *src;
2475 int offset[AV_NUM_DATA_POINTERS];
2476 int i;
2477
2478 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2479 src= (AVFrame*)s->current_picture_ptr;
2480 else if(s->last_picture_ptr)
2481 src= (AVFrame*)s->last_picture_ptr;
2482 else
2483 return;
2484
2485 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2486 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2487 offset[i] = 0;
2488 }else{
2489 offset[0]= y * s->linesize;
2490 offset[1]=
2491 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2492 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2493 offset[i] = 0;
2494 }
2495
2496 emms_c();
2497
2498 s->avctx->draw_horiz_band(s->avctx, src, offset,
2499 y, s->picture_structure, h);
2500 }
2501 }
2502
2503 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2504 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2505 const int uvlinesize = s->current_picture.f.linesize[1];
2506 const int mb_size= 4 - s->avctx->lowres;
2507
2508 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2509 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2510 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2511 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2512 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2513 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2514 //block_index is not used by mpeg2, so it is not affected by chroma_format
2515
2516 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2517 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2518 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2519
2520 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2521 {
2522 if(s->picture_structure==PICT_FRAME){
2523 s->dest[0] += s->mb_y * linesize << mb_size;
2524 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2525 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2526 }else{
2527 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2528 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2529 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2530 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2531 }
2532 }
2533 }
2534
2535 void ff_mpeg_flush(AVCodecContext *avctx){
2536 int i;
2537 MpegEncContext *s = avctx->priv_data;
2538
2539 if(s==NULL || s->picture==NULL)
2540 return;
2541
2542 for(i=0; i<s->picture_count; i++){
2543 if (s->picture[i].f.data[0] &&
2544 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2545 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2546 free_frame_buffer(s, &s->picture[i]);
2547 }
2548 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2549
2550 s->mb_x= s->mb_y= 0;
2551
2552 s->parse_context.state= -1;
2553 s->parse_context.frame_start_found= 0;
2554 s->parse_context.overread= 0;
2555 s->parse_context.overread_index= 0;
2556 s->parse_context.index= 0;
2557 s->parse_context.last_index= 0;
2558 s->bitstream_buffer_size=0;
2559 s->pp_time=0;
2560 }
2561
2562 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2563 DCTELEM *block, int n, int qscale)
2564 {
2565 int i, level, nCoeffs;
2566 const uint16_t *quant_matrix;
2567
2568 nCoeffs= s->block_last_index[n];
2569
2570 if (n < 4)
2571 block[0] = block[0] * s->y_dc_scale;
2572 else
2573 block[0] = block[0] * s->c_dc_scale;
2574 /* XXX: only mpeg1 */
2575 quant_matrix = s->intra_matrix;
2576 for(i=1;i<=nCoeffs;i++) {
2577 int j= s->intra_scantable.permutated[i];
2578 level = block[j];
2579 if (level) {
2580 if (level < 0) {
2581 level = -level;
2582 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2583 level = (level - 1) | 1;
2584 level = -level;
2585 } else {
2586 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2587 level = (level - 1) | 1;
2588 }
2589 block[j] = level;
2590 }
2591 }
2592 }
2593
2594 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2595 DCTELEM *block, int n, int qscale)
2596 {
2597 int i, level, nCoeffs;
2598 const uint16_t *quant_matrix;
2599
2600 nCoeffs= s->block_last_index[n];
2601
2602 quant_matrix = s->inter_matrix;
2603 for(i=0; i<=nCoeffs; i++) {
2604 int j= s->intra_scantable.permutated[i];
2605 level = block[j];
2606 if (level) {
2607 if (level < 0) {
2608 level = -level;
2609 level = (((level << 1) + 1) * qscale *
2610 ((int) (quant_matrix[j]))) >> 4;
2611 level = (level - 1) | 1;
2612 level = -level;
2613 } else {
2614 level = (((level << 1) + 1) * qscale *
2615 ((int) (quant_matrix[j]))) >> 4;
2616 level = (level - 1) | 1;
2617 }
2618 block[j] = level;
2619 }
2620 }
2621 }
2622
2623 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2624 DCTELEM *block, int n, int qscale)
2625 {
2626 int i, level, nCoeffs;
2627 const uint16_t *quant_matrix;
2628
2629 if(s->alternate_scan) nCoeffs= 63;
2630 else nCoeffs= s->block_last_index[n];
2631
2632 if (n < 4)
2633 block[0] = block[0] * s->y_dc_scale;
2634 else
2635 block[0] = block[0] * s->c_dc_scale;
2636 quant_matrix = s->intra_matrix;
2637 for(i=1;i<=nCoeffs;i++) {
2638 int j= s->intra_scantable.permutated[i];
2639 level = block[j];
2640 if (level) {
2641 if (level < 0) {
2642 level = -level;
2643 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2644 level = -level;
2645 } else {
2646 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2647 }
2648 block[j] = level;
2649 }
2650 }
2651 }
2652
2653 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2654 DCTELEM *block, int n, int qscale)
2655 {
2656 int i, level, nCoeffs;
2657 const uint16_t *quant_matrix;
2658 int sum=-1;
2659
2660 if(s->alternate_scan) nCoeffs= 63;
2661 else nCoeffs= s->block_last_index[n];
2662
2663 if (n < 4)
2664 block[0] = block[0] * s->y_dc_scale;
2665 else
2666 block[0] = block[0] * s->c_dc_scale;
2667 quant_matrix = s->intra_matrix;
2668 for(i=1;i<=nCoeffs;i++) {
2669 int j= s->intra_scantable.permutated[i];
2670 level = block[j];
2671 if (level) {
2672 if (level < 0) {
2673 level = -level;
2674 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2675 level = -level;
2676 } else {
2677 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2678 }
2679 block[j] = level;
2680 sum+=level;
2681 }
2682 }
2683 block[63]^=sum&1;
2684 }
2685
2686 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2687 DCTELEM *block, int n, int qscale)
2688 {
2689 int i, level, nCoeffs;
2690 const uint16_t *quant_matrix;
2691 int sum=-1;
2692
2693 if(s->alternate_scan) nCoeffs= 63;
2694 else nCoeffs= s->block_last_index[n];
2695
2696 quant_matrix = s->inter_matrix;
2697 for(i=0; i<=nCoeffs; i++) {
2698 int j= s->intra_scantable.permutated[i];
2699 level = block[j];
2700 if (level) {
2701 if (level < 0) {
2702 level = -level;
2703 level = (((level << 1) + 1) * qscale *
2704 ((int) (quant_matrix[j]))) >> 4;
2705 level = -level;
2706 } else {
2707 level = (((level << 1) + 1) * qscale *
2708 ((int) (quant_matrix[j]))) >> 4;
2709 }
2710 block[j] = level;
2711 sum+=level;
2712 }
2713 }
2714 block[63]^=sum&1;
2715 }
2716
2717 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2718 DCTELEM *block, int n, int qscale)
2719 {
2720 int i, level, qmul, qadd;
2721 int nCoeffs;
2722
2723 assert(s->block_last_index[n]>=0);
2724
2725 qmul = qscale << 1;
2726
2727 if (!s->h263_aic) {
2728 if (n < 4)
2729 block[0] = block[0] * s->y_dc_scale;
2730 else
2731 block[0] = block[0] * s->c_dc_scale;
2732 qadd = (qscale - 1) | 1;
2733 }else{
2734 qadd = 0;
2735 }
2736 if(s->ac_pred)
2737 nCoeffs=63;
2738 else
2739 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2740
2741 for(i=1; i<=nCoeffs; i++) {
2742 level = block[i];
2743 if (level) {
2744 if (level < 0) {
2745 level = level * qmul - qadd;
2746 } else {
2747 level = level * qmul + qadd;
2748 }
2749 block[i] = level;
2750 }
2751 }
2752 }
2753
2754 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2755 DCTELEM *block, int n, int qscale)
2756 {
2757 int i, level, qmul, qadd;
2758 int nCoeffs;
2759
2760 assert(s->block_last_index[n]>=0);
2761
2762 qadd = (qscale - 1) | 1;
2763 qmul = qscale << 1;
2764
2765 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2766
2767 for(i=0; i<=nCoeffs; i++) {
2768 level = block[i];
2769 if (level) {
2770 if (level < 0) {
2771 level = level * qmul - qadd;
2772 } else {
2773 level = level * qmul + qadd;
2774 }
2775 block[i] = level;
2776 }
2777 }
2778 }
2779
2780 /**
2781 * set qscale and update qscale dependent variables.
2782 */
2783 void ff_set_qscale(MpegEncContext * s, int qscale)
2784 {
2785 if (qscale < 1)
2786 qscale = 1;
2787 else if (qscale > 31)
2788 qscale = 31;
2789
2790 s->qscale = qscale;
2791 s->chroma_qscale= s->chroma_qscale_table[qscale];
2792
2793 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2794 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2795 }
2796
2797 void MPV_report_decode_progress(MpegEncContext *s)
2798 {
2799 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
2800 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);
2801 }