error_resilience: decouple ER from MpegEncContext
[libav.git] / libavcodec / mpegvideo.c
1 /*
2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7 *
8 * This file is part of Libav.
9 *
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /**
26 * @file
27 * The simplest mpeg encoder (well, it was the simplest!).
28 */
29
30 #include "libavutil/imgutils.h"
31 #include "avcodec.h"
32 #include "dsputil.h"
33 #include "internal.h"
34 #include "mathops.h"
35 #include "mpegvideo.h"
36 #include "mjpegenc.h"
37 #include "msmpeg4.h"
38 #include "xvmc_internal.h"
39 #include "thread.h"
40 #include <limits.h>
41
42 //#undef NDEBUG
43 //#include <assert.h>
44
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46 int16_t *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48 int16_t *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50 int16_t *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52 int16_t *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54 int16_t *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56 int16_t *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58 int16_t *block, int n, int qscale);
59
60
61 /* enable all paranoid tests for rounding, overflows, etc... */
62 //#define PARANOID
63
64 //#define DEBUG
65
66
67 static const uint8_t ff_default_chroma_qscale_table[32] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
70 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
71 };
72
73 const uint8_t ff_mpeg1_dc_scale_table[128] = {
74 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
83 };
84
85 static const uint8_t mpeg2_dc_scale_table1[128] = {
86 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
95 };
96
97 static const uint8_t mpeg2_dc_scale_table2[128] = {
98 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
107 };
108
109 static const uint8_t mpeg2_dc_scale_table3[128] = {
110 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119 };
120
121 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
122 ff_mpeg1_dc_scale_table,
123 mpeg2_dc_scale_table1,
124 mpeg2_dc_scale_table2,
125 mpeg2_dc_scale_table3,
126 };
127
128 const enum AVPixelFormat ff_pixfmt_list_420[] = {
129 AV_PIX_FMT_YUV420P,
130 AV_PIX_FMT_NONE
131 };
132
133 const enum AVPixelFormat ff_hwaccel_pixfmt_list_420[] = {
134 #if CONFIG_DXVA2
135 AV_PIX_FMT_DXVA2_VLD,
136 #endif
137 #if CONFIG_VAAPI
138 AV_PIX_FMT_VAAPI_VLD,
139 #endif
140 #if CONFIG_VDA
141 AV_PIX_FMT_VDA_VLD,
142 #endif
143 #if CONFIG_VDPAU
144 AV_PIX_FMT_VDPAU,
145 #endif
146 AV_PIX_FMT_YUV420P,
147 AV_PIX_FMT_NONE
148 };
149
150 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
151 int (*mv)[2][4][2],
152 int mb_x, int mb_y, int mb_intra, int mb_skipped)
153 {
154 MpegEncContext *s = opaque;
155
156 s->mv_dir = mv_dir;
157 s->mv_type = mv_type;
158 s->mb_intra = mb_intra;
159 s->mb_skipped = mb_skipped;
160 s->mb_x = mb_x;
161 s->mb_y = mb_y;
162 memcpy(s->mv, mv, sizeof(*mv));
163
164 ff_init_block_index(s);
165 ff_update_block_index(s);
166
167 s->dsp.clear_blocks(s->block[0]);
168
169 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
170 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
171 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
172
173 assert(ref == 0);
174 ff_MPV_decode_mb(s, s->block);
175 }
176
177 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
178 const uint8_t *end,
179 uint32_t * restrict state)
180 {
181 int i;
182
183 assert(p <= end);
184 if (p >= end)
185 return end;
186
187 for (i = 0; i < 3; i++) {
188 uint32_t tmp = *state << 8;
189 *state = tmp + *(p++);
190 if (tmp == 0x100 || p == end)
191 return p;
192 }
193
194 while (p < end) {
195 if (p[-1] > 1 ) p += 3;
196 else if (p[-2] ) p += 2;
197 else if (p[-3]|(p[-1]-1)) p++;
198 else {
199 p++;
200 break;
201 }
202 }
203
204 p = FFMIN(p, end) - 4;
205 *state = AV_RB32(p);
206
207 return p + 4;
208 }
209
210 /* init common dct for both encoder and decoder */
211 av_cold int ff_dct_common_init(MpegEncContext *s)
212 {
213 ff_dsputil_init(&s->dsp, s->avctx);
214 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
215
216 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
217 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
218 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
219 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
220 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
221 if (s->flags & CODEC_FLAG_BITEXACT)
222 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
223 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
224
225 #if ARCH_X86
226 ff_MPV_common_init_x86(s);
227 #elif ARCH_ALPHA
228 ff_MPV_common_init_axp(s);
229 #elif ARCH_ARM
230 ff_MPV_common_init_arm(s);
231 #elif HAVE_ALTIVEC
232 ff_MPV_common_init_altivec(s);
233 #elif ARCH_BFIN
234 ff_MPV_common_init_bfin(s);
235 #endif
236
237 /* load & permutate scantables
238 * note: only wmv uses different ones
239 */
240 if (s->alternate_scan) {
241 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
242 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
243 } else {
244 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
245 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
246 }
247 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
248 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
249
250 return 0;
251 }
252
253 void ff_copy_picture(Picture *dst, Picture *src)
254 {
255 *dst = *src;
256 dst->f.type = FF_BUFFER_TYPE_COPY;
257 }
258
259 /**
260 * Release a frame buffer
261 */
262 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
263 {
264 /* WM Image / Screen codecs allocate internal buffers with different
265 * dimensions / colorspaces; ignore user-defined callbacks for these. */
266 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
267 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
268 s->codec_id != AV_CODEC_ID_MSS2)
269 ff_thread_release_buffer(s->avctx, &pic->f);
270 else
271 avcodec_default_release_buffer(s->avctx, &pic->f);
272 av_freep(&pic->f.hwaccel_picture_private);
273 }
274
275 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
276 {
277 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
278
279 // edge emu needs blocksize + filter length - 1
280 // (= 17x17 for halfpel / 21x21 for h264)
281 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
282 // at uvlinesize. It supports only YUV420 so 24x24 is enough
283 // linesize * interlaced * MBsize
284 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
285 fail);
286
287 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 2,
288 fail)
289 s->me.temp = s->me.scratchpad;
290 s->rd_scratchpad = s->me.scratchpad;
291 s->b_scratchpad = s->me.scratchpad;
292 s->obmc_scratchpad = s->me.scratchpad + 16;
293
294 return 0;
295 fail:
296 av_freep(&s->edge_emu_buffer);
297 return AVERROR(ENOMEM);
298 }
299
300 /**
301 * Allocate a frame buffer
302 */
303 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
304 {
305 int r, ret;
306
307 if (s->avctx->hwaccel) {
308 assert(!pic->f.hwaccel_picture_private);
309 if (s->avctx->hwaccel->priv_data_size) {
310 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
311 if (!pic->f.hwaccel_picture_private) {
312 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
313 return -1;
314 }
315 }
316 }
317
318 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
319 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
320 s->codec_id != AV_CODEC_ID_MSS2)
321 r = ff_thread_get_buffer(s->avctx, &pic->f);
322 else
323 r = avcodec_default_get_buffer(s->avctx, &pic->f);
324
325 if (r < 0 || !pic->f.type || !pic->f.data[0]) {
326 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
327 r, pic->f.type, pic->f.data[0]);
328 av_freep(&pic->f.hwaccel_picture_private);
329 return -1;
330 }
331
332 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
333 s->uvlinesize != pic->f.linesize[1])) {
334 av_log(s->avctx, AV_LOG_ERROR,
335 "get_buffer() failed (stride changed)\n");
336 free_frame_buffer(s, pic);
337 return -1;
338 }
339
340 if (pic->f.linesize[1] != pic->f.linesize[2]) {
341 av_log(s->avctx, AV_LOG_ERROR,
342 "get_buffer() failed (uv stride mismatch)\n");
343 free_frame_buffer(s, pic);
344 return -1;
345 }
346
347 if (!s->edge_emu_buffer &&
348 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
349 av_log(s->avctx, AV_LOG_ERROR,
350 "get_buffer() failed to allocate context scratch buffers.\n");
351 free_frame_buffer(s, pic);
352 return ret;
353 }
354
355 return 0;
356 }
357
358 /**
359 * Allocate a Picture.
360 * The pixels are allocated/set by calling get_buffer() if shared = 0
361 */
362 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
363 {
364 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
365
366 // the + 1 is needed so memset(,,stride*height) does not sig11
367
368 const int mb_array_size = s->mb_stride * s->mb_height;
369 const int b8_array_size = s->b8_stride * s->mb_height * 2;
370 const int b4_array_size = s->b4_stride * s->mb_height * 4;
371 int i;
372 int r = -1;
373
374 if (shared) {
375 assert(pic->f.data[0]);
376 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
377 pic->f.type = FF_BUFFER_TYPE_SHARED;
378 } else {
379 assert(!pic->f.data[0]);
380
381 if (alloc_frame_buffer(s, pic) < 0)
382 return -1;
383
384 s->linesize = pic->f.linesize[0];
385 s->uvlinesize = pic->f.linesize[1];
386 }
387
388 if (pic->f.qscale_table == NULL) {
389 if (s->encoding) {
390 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
391 mb_array_size * sizeof(int16_t), fail)
392 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
393 mb_array_size * sizeof(int16_t), fail)
394 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
395 mb_array_size * sizeof(int8_t ), fail)
396 }
397
398 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
399 mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
400 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
401 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
402 fail)
403 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
404 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
405 fail)
406 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
407 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
408 if (s->out_format == FMT_H264) {
409 for (i = 0; i < 2; i++) {
410 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
411 2 * (b4_array_size + 4) * sizeof(int16_t),
412 fail)
413 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
414 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
415 4 * mb_array_size * sizeof(uint8_t), fail)
416 }
417 pic->f.motion_subsample_log2 = 2;
418 } else if (s->out_format == FMT_H263 || s->encoding ||
419 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
420 for (i = 0; i < 2; i++) {
421 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
422 2 * (b8_array_size + 4) * sizeof(int16_t),
423 fail)
424 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
425 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
426 4 * mb_array_size * sizeof(uint8_t), fail)
427 }
428 pic->f.motion_subsample_log2 = 3;
429 }
430 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
431 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
432 64 * mb_array_size * sizeof(int16_t) * 6, fail)
433 }
434 pic->f.qstride = s->mb_stride;
435 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
436 1 * sizeof(AVPanScan), fail)
437 }
438
439 pic->owner2 = s;
440
441 return 0;
442 fail: // for the FF_ALLOCZ_OR_GOTO macro
443 if (r >= 0)
444 free_frame_buffer(s, pic);
445 return -1;
446 }
447
448 /**
449 * Deallocate a picture.
450 */
451 static void free_picture(MpegEncContext *s, Picture *pic)
452 {
453 int i;
454
455 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
456 free_frame_buffer(s, pic);
457 }
458
459 av_freep(&pic->mb_var);
460 av_freep(&pic->mc_mb_var);
461 av_freep(&pic->mb_mean);
462 av_freep(&pic->f.mbskip_table);
463 av_freep(&pic->qscale_table_base);
464 pic->f.qscale_table = NULL;
465 av_freep(&pic->mb_type_base);
466 pic->f.mb_type = NULL;
467 av_freep(&pic->f.dct_coeff);
468 av_freep(&pic->f.pan_scan);
469 pic->f.mb_type = NULL;
470 for (i = 0; i < 2; i++) {
471 av_freep(&pic->motion_val_base[i]);
472 av_freep(&pic->f.ref_index[i]);
473 pic->f.motion_val[i] = NULL;
474 }
475
476 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
477 for (i = 0; i < 4; i++) {
478 pic->f.base[i] =
479 pic->f.data[i] = NULL;
480 }
481 pic->f.type = 0;
482 }
483 }
484
485 static int init_duplicate_context(MpegEncContext *s)
486 {
487 int y_size = s->b8_stride * (2 * s->mb_height + 1);
488 int c_size = s->mb_stride * (s->mb_height + 1);
489 int yc_size = y_size + 2 * c_size;
490 int i;
491
492 s->edge_emu_buffer =
493 s->me.scratchpad =
494 s->me.temp =
495 s->rd_scratchpad =
496 s->b_scratchpad =
497 s->obmc_scratchpad = NULL;
498
499 if (s->encoding) {
500 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
501 ME_MAP_SIZE * sizeof(uint32_t), fail)
502 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
503 ME_MAP_SIZE * sizeof(uint32_t), fail)
504 if (s->avctx->noise_reduction) {
505 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
506 2 * 64 * sizeof(int), fail)
507 }
508 }
509 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
510 s->block = s->blocks[0];
511
512 for (i = 0; i < 12; i++) {
513 s->pblocks[i] = &s->block[i];
514 }
515
516 if (s->out_format == FMT_H263) {
517 /* ac values */
518 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
519 yc_size * sizeof(int16_t) * 16, fail);
520 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
521 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
522 s->ac_val[2] = s->ac_val[1] + c_size;
523 }
524
525 return 0;
526 fail:
527 return -1; // free() through ff_MPV_common_end()
528 }
529
530 static void free_duplicate_context(MpegEncContext *s)
531 {
532 if (s == NULL)
533 return;
534
535 av_freep(&s->edge_emu_buffer);
536 av_freep(&s->me.scratchpad);
537 s->me.temp =
538 s->rd_scratchpad =
539 s->b_scratchpad =
540 s->obmc_scratchpad = NULL;
541
542 av_freep(&s->dct_error_sum);
543 av_freep(&s->me.map);
544 av_freep(&s->me.score_map);
545 av_freep(&s->blocks);
546 av_freep(&s->ac_val_base);
547 s->block = NULL;
548 }
549
550 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
551 {
552 #define COPY(a) bak->a = src->a
553 COPY(edge_emu_buffer);
554 COPY(me.scratchpad);
555 COPY(me.temp);
556 COPY(rd_scratchpad);
557 COPY(b_scratchpad);
558 COPY(obmc_scratchpad);
559 COPY(me.map);
560 COPY(me.score_map);
561 COPY(blocks);
562 COPY(block);
563 COPY(start_mb_y);
564 COPY(end_mb_y);
565 COPY(me.map_generation);
566 COPY(pb);
567 COPY(dct_error_sum);
568 COPY(dct_count[0]);
569 COPY(dct_count[1]);
570 COPY(ac_val_base);
571 COPY(ac_val[0]);
572 COPY(ac_val[1]);
573 COPY(ac_val[2]);
574 #undef COPY
575 }
576
577 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
578 {
579 MpegEncContext bak;
580 int i, ret;
581 // FIXME copy only needed parts
582 // START_TIMER
583 backup_duplicate_context(&bak, dst);
584 memcpy(dst, src, sizeof(MpegEncContext));
585 backup_duplicate_context(dst, &bak);
586 for (i = 0; i < 12; i++) {
587 dst->pblocks[i] = &dst->block[i];
588 }
589 if (!dst->edge_emu_buffer &&
590 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
591 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
592 "scratch buffers.\n");
593 return ret;
594 }
595 // STOP_TIMER("update_duplicate_context")
596 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
597 return 0;
598 }
599
600 int ff_mpeg_update_thread_context(AVCodecContext *dst,
601 const AVCodecContext *src)
602 {
603 int i;
604 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
605
606 if (dst == src || !s1->context_initialized)
607 return 0;
608
609 // FIXME can parameters change on I-frames?
610 // in that case dst may need a reinit
611 if (!s->context_initialized) {
612 memcpy(s, s1, sizeof(MpegEncContext));
613
614 s->avctx = dst;
615 s->picture_range_start += MAX_PICTURE_COUNT;
616 s->picture_range_end += MAX_PICTURE_COUNT;
617 s->bitstream_buffer = NULL;
618 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
619
620 ff_MPV_common_init(s);
621 }
622
623 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
624 int err;
625 s->context_reinit = 0;
626 s->height = s1->height;
627 s->width = s1->width;
628 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
629 return err;
630 }
631
632 s->avctx->coded_height = s1->avctx->coded_height;
633 s->avctx->coded_width = s1->avctx->coded_width;
634 s->avctx->width = s1->avctx->width;
635 s->avctx->height = s1->avctx->height;
636
637 s->coded_picture_number = s1->coded_picture_number;
638 s->picture_number = s1->picture_number;
639 s->input_picture_number = s1->input_picture_number;
640
641 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
642 memcpy(&s->last_picture, &s1->last_picture,
643 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
644
645 // reset s->picture[].f.extended_data to s->picture[].f.data
646 for (i = 0; i < s->picture_count; i++)
647 s->picture[i].f.extended_data = s->picture[i].f.data;
648
649 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
650 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
651 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
652
653 // Error/bug resilience
654 s->next_p_frame_damaged = s1->next_p_frame_damaged;
655 s->workaround_bugs = s1->workaround_bugs;
656
657 // MPEG4 timing info
658 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
659 (char *) &s1->shape - (char *) &s1->time_increment_bits);
660
661 // B-frame info
662 s->max_b_frames = s1->max_b_frames;
663 s->low_delay = s1->low_delay;
664 s->droppable = s1->droppable;
665
666 // DivX handling (doesn't work)
667 s->divx_packed = s1->divx_packed;
668
669 if (s1->bitstream_buffer) {
670 if (s1->bitstream_buffer_size +
671 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
672 av_fast_malloc(&s->bitstream_buffer,
673 &s->allocated_bitstream_buffer_size,
674 s1->allocated_bitstream_buffer_size);
675 s->bitstream_buffer_size = s1->bitstream_buffer_size;
676 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
677 s1->bitstream_buffer_size);
678 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
679 FF_INPUT_BUFFER_PADDING_SIZE);
680 }
681
682 // linesize dependend scratch buffer allocation
683 if (!s->edge_emu_buffer)
684 if (s1->linesize) {
685 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
686 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
687 "scratch buffers.\n");
688 return AVERROR(ENOMEM);
689 }
690 } else {
691 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
692 "be allocated due to unknown size.\n");
693 return AVERROR_BUG;
694 }
695
696 // MPEG2/interlacing info
697 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
698 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
699
700 if (!s1->first_field) {
701 s->last_pict_type = s1->pict_type;
702 if (s1->current_picture_ptr)
703 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
704
705 if (s1->pict_type != AV_PICTURE_TYPE_B) {
706 s->last_non_b_pict_type = s1->pict_type;
707 }
708 }
709
710 return 0;
711 }
712
713 /**
714 * Set the given MpegEncContext to common defaults
715 * (same for encoding and decoding).
716 * The changed fields will not depend upon the
717 * prior state of the MpegEncContext.
718 */
719 void ff_MPV_common_defaults(MpegEncContext *s)
720 {
721 s->y_dc_scale_table =
722 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
723 s->chroma_qscale_table = ff_default_chroma_qscale_table;
724 s->progressive_frame = 1;
725 s->progressive_sequence = 1;
726 s->picture_structure = PICT_FRAME;
727
728 s->coded_picture_number = 0;
729 s->picture_number = 0;
730 s->input_picture_number = 0;
731
732 s->picture_in_gop_number = 0;
733
734 s->f_code = 1;
735 s->b_code = 1;
736
737 s->picture_range_start = 0;
738 s->picture_range_end = MAX_PICTURE_COUNT;
739
740 s->slice_context_count = 1;
741 }
742
743 /**
744 * Set the given MpegEncContext to defaults for decoding.
745 * the changed fields will not depend upon
746 * the prior state of the MpegEncContext.
747 */
748 void ff_MPV_decode_defaults(MpegEncContext *s)
749 {
750 ff_MPV_common_defaults(s);
751 }
752
753 static int init_er(MpegEncContext *s)
754 {
755 ERContext *er = &s->er;
756 int mb_array_size = s->mb_height * s->mb_stride;
757 int i;
758
759 er->avctx = s->avctx;
760 er->dsp = &s->dsp;
761
762 er->mb_index2xy = s->mb_index2xy;
763 er->mb_num = s->mb_num;
764 er->mb_width = s->mb_width;
765 er->mb_height = s->mb_height;
766 er->mb_stride = s->mb_stride;
767 er->b8_stride = s->b8_stride;
768
769 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
770 er->error_status_table = av_mallocz(mb_array_size);
771 if (!er->er_temp_buffer || !er->error_status_table)
772 goto fail;
773
774 er->mbskip_table = s->mbskip_table;
775 er->mbintra_table = s->mbintra_table;
776
777 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
778 er->dc_val[i] = s->dc_val[i];
779
780 er->decode_mb = mpeg_er_decode_mb;
781 er->opaque = s;
782
783 return 0;
784 fail:
785 av_freep(&er->er_temp_buffer);
786 av_freep(&er->error_status_table);
787 return AVERROR(ENOMEM);
788 }
789
790 /**
791 * Initialize and allocates MpegEncContext fields dependent on the resolution.
792 */
793 static int init_context_frame(MpegEncContext *s)
794 {
795 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
796
797 s->mb_width = (s->width + 15) / 16;
798 s->mb_stride = s->mb_width + 1;
799 s->b8_stride = s->mb_width * 2 + 1;
800 s->b4_stride = s->mb_width * 4 + 1;
801 mb_array_size = s->mb_height * s->mb_stride;
802 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
803
804 /* set default edge pos, will be overriden
805 * in decode_header if needed */
806 s->h_edge_pos = s->mb_width * 16;
807 s->v_edge_pos = s->mb_height * 16;
808
809 s->mb_num = s->mb_width * s->mb_height;
810
811 s->block_wrap[0] =
812 s->block_wrap[1] =
813 s->block_wrap[2] =
814 s->block_wrap[3] = s->b8_stride;
815 s->block_wrap[4] =
816 s->block_wrap[5] = s->mb_stride;
817
818 y_size = s->b8_stride * (2 * s->mb_height + 1);
819 c_size = s->mb_stride * (s->mb_height + 1);
820 yc_size = y_size + 2 * c_size;
821
822 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
823 fail); // error ressilience code looks cleaner with this
824 for (y = 0; y < s->mb_height; y++)
825 for (x = 0; x < s->mb_width; x++)
826 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
827
828 s->mb_index2xy[s->mb_height * s->mb_width] =
829 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
830
831 if (s->encoding) {
832 /* Allocate MV tables */
833 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
834 mv_table_size * 2 * sizeof(int16_t), fail);
835 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
836 mv_table_size * 2 * sizeof(int16_t), fail);
837 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
838 mv_table_size * 2 * sizeof(int16_t), fail);
839 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
840 mv_table_size * 2 * sizeof(int16_t), fail);
841 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
842 mv_table_size * 2 * sizeof(int16_t), fail);
843 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
844 mv_table_size * 2 * sizeof(int16_t), fail);
845 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
846 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
847 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
848 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
849 s->mb_stride + 1;
850 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
851 s->mb_stride + 1;
852 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
853
854 /* Allocate MB type table */
855 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
856 sizeof(uint16_t), fail); // needed for encoding
857
858 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
859 sizeof(int), fail);
860
861 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
862 mb_array_size * sizeof(float), fail);
863 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
864 mb_array_size * sizeof(float), fail);
865
866 }
867
868 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
869 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
870 /* interlaced direct mode decoding tables */
871 for (i = 0; i < 2; i++) {
872 int j, k;
873 for (j = 0; j < 2; j++) {
874 for (k = 0; k < 2; k++) {
875 FF_ALLOCZ_OR_GOTO(s->avctx,
876 s->b_field_mv_table_base[i][j][k],
877 mv_table_size * 2 * sizeof(int16_t),
878 fail);
879 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
880 s->mb_stride + 1;
881 }
882 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
883 mb_array_size * 2 * sizeof(uint8_t), fail);
884 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
885 mv_table_size * 2 * sizeof(int16_t), fail);
886 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
887 + s->mb_stride + 1;
888 }
889 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
890 mb_array_size * 2 * sizeof(uint8_t), fail);
891 }
892 }
893 if (s->out_format == FMT_H263) {
894 /* cbp values */
895 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
896 s->coded_block = s->coded_block_base + s->b8_stride + 1;
897
898 /* cbp, ac_pred, pred_dir */
899 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
900 mb_array_size * sizeof(uint8_t), fail);
901 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
902 mb_array_size * sizeof(uint8_t), fail);
903 }
904
905 if (s->h263_pred || s->h263_plus || !s->encoding) {
906 /* dc values */
907 // MN: we need these for error resilience of intra-frames
908 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
909 yc_size * sizeof(int16_t), fail);
910 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
911 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
912 s->dc_val[2] = s->dc_val[1] + c_size;
913 for (i = 0; i < yc_size; i++)
914 s->dc_val_base[i] = 1024;
915 }
916
917 /* which mb is a intra block */
918 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
919 memset(s->mbintra_table, 1, mb_array_size);
920
921 /* init macroblock skip table */
922 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
923 // Note the + 1 is for a quicker mpeg4 slice_end detection
924
925 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
926 s->avctx->debug_mv) {
927 s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
928 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
929 s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
930 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
931 s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
932 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
933 }
934
935 return init_er(s);
936 fail:
937 return AVERROR(ENOMEM);
938 }
939
940 /**
941 * init common structure for both encoder and decoder.
942 * this assumes that some variables like width/height are already set
943 */
944 av_cold int ff_MPV_common_init(MpegEncContext *s)
945 {
946 int i;
947 int nb_slices = (HAVE_THREADS &&
948 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
949 s->avctx->thread_count : 1;
950
951 if (s->encoding && s->avctx->slices)
952 nb_slices = s->avctx->slices;
953
954 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
955 s->mb_height = (s->height + 31) / 32 * 2;
956 else if (s->codec_id != AV_CODEC_ID_H264)
957 s->mb_height = (s->height + 15) / 16;
958
959 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
960 av_log(s->avctx, AV_LOG_ERROR,
961 "decoding to AV_PIX_FMT_NONE is not supported.\n");
962 return -1;
963 }
964
965 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
966 int max_slices;
967 if (s->mb_height)
968 max_slices = FFMIN(MAX_THREADS, s->mb_height);
969 else
970 max_slices = MAX_THREADS;
971 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
972 " reducing to %d\n", nb_slices, max_slices);
973 nb_slices = max_slices;
974 }
975
976 if ((s->width || s->height) &&
977 av_image_check_size(s->width, s->height, 0, s->avctx))
978 return -1;
979
980 ff_dct_common_init(s);
981
982 s->flags = s->avctx->flags;
983 s->flags2 = s->avctx->flags2;
984
985 if (s->width && s->height) {
986 /* set chroma shifts */
987 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
988 &s->chroma_x_shift,
989 &s->chroma_y_shift);
990
991 /* convert fourcc to upper case */
992 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
993
994 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
995
996 s->avctx->coded_frame = &s->current_picture.f;
997
998 if (s->encoding) {
999 if (s->msmpeg4_version) {
1000 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
1001 2 * 2 * (MAX_LEVEL + 1) *
1002 (MAX_RUN + 1) * 2 * sizeof(int), fail);
1003 }
1004 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1005
1006 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
1007 64 * 32 * sizeof(int), fail);
1008 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
1009 64 * 32 * sizeof(int), fail);
1010 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
1011 64 * 32 * 2 * sizeof(uint16_t), fail);
1012 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
1013 64 * 32 * 2 * sizeof(uint16_t), fail);
1014 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
1015 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
1016 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
1017 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
1018
1019 if (s->avctx->noise_reduction) {
1020 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
1021 2 * 64 * sizeof(uint16_t), fail);
1022 }
1023 }
1024 }
1025
1026 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
1027 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1028 s->picture_count * sizeof(Picture), fail);
1029 for (i = 0; i < s->picture_count; i++) {
1030 avcodec_get_frame_defaults(&s->picture[i].f);
1031 }
1032
1033 if (s->width && s->height) {
1034 if (init_context_frame(s))
1035 goto fail;
1036
1037 s->parse_context.state = -1;
1038 }
1039
1040 s->context_initialized = 1;
1041 s->thread_context[0] = s;
1042
1043 if (s->width && s->height) {
1044 if (nb_slices > 1) {
1045 for (i = 1; i < nb_slices; i++) {
1046 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1047 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1048 }
1049
1050 for (i = 0; i < nb_slices; i++) {
1051 if (init_duplicate_context(s->thread_context[i]) < 0)
1052 goto fail;
1053 s->thread_context[i]->start_mb_y =
1054 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1055 s->thread_context[i]->end_mb_y =
1056 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1057 }
1058 } else {
1059 if (init_duplicate_context(s) < 0)
1060 goto fail;
1061 s->start_mb_y = 0;
1062 s->end_mb_y = s->mb_height;
1063 }
1064 s->slice_context_count = nb_slices;
1065 }
1066
1067 return 0;
1068 fail:
1069 ff_MPV_common_end(s);
1070 return -1;
1071 }
1072
1073 /**
1074 * Frees and resets MpegEncContext fields depending on the resolution.
1075 * Is used during resolution changes to avoid a full reinitialization of the
1076 * codec.
1077 */
1078 static int free_context_frame(MpegEncContext *s)
1079 {
1080 int i, j, k;
1081
1082 av_freep(&s->mb_type);
1083 av_freep(&s->p_mv_table_base);
1084 av_freep(&s->b_forw_mv_table_base);
1085 av_freep(&s->b_back_mv_table_base);
1086 av_freep(&s->b_bidir_forw_mv_table_base);
1087 av_freep(&s->b_bidir_back_mv_table_base);
1088 av_freep(&s->b_direct_mv_table_base);
1089 s->p_mv_table = NULL;
1090 s->b_forw_mv_table = NULL;
1091 s->b_back_mv_table = NULL;
1092 s->b_bidir_forw_mv_table = NULL;
1093 s->b_bidir_back_mv_table = NULL;
1094 s->b_direct_mv_table = NULL;
1095 for (i = 0; i < 2; i++) {
1096 for (j = 0; j < 2; j++) {
1097 for (k = 0; k < 2; k++) {
1098 av_freep(&s->b_field_mv_table_base[i][j][k]);
1099 s->b_field_mv_table[i][j][k] = NULL;
1100 }
1101 av_freep(&s->b_field_select_table[i][j]);
1102 av_freep(&s->p_field_mv_table_base[i][j]);
1103 s->p_field_mv_table[i][j] = NULL;
1104 }
1105 av_freep(&s->p_field_select_table[i]);
1106 }
1107
1108 av_freep(&s->dc_val_base);
1109 av_freep(&s->coded_block_base);
1110 av_freep(&s->mbintra_table);
1111 av_freep(&s->cbp_table);
1112 av_freep(&s->pred_dir_table);
1113
1114 av_freep(&s->mbskip_table);
1115
1116 av_freep(&s->er.error_status_table);
1117 av_freep(&s->er.er_temp_buffer);
1118 av_freep(&s->mb_index2xy);
1119 av_freep(&s->lambda_table);
1120 av_freep(&s->cplx_tab);
1121 av_freep(&s->bits_tab);
1122
1123 s->linesize = s->uvlinesize = 0;
1124
1125 for (i = 0; i < 3; i++)
1126 av_freep(&s->visualization_buffer[i]);
1127
1128 return 0;
1129 }
1130
1131 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1132 {
1133 int i, err = 0;
1134
1135 if (s->slice_context_count > 1) {
1136 for (i = 0; i < s->slice_context_count; i++) {
1137 free_duplicate_context(s->thread_context[i]);
1138 }
1139 for (i = 1; i < s->slice_context_count; i++) {
1140 av_freep(&s->thread_context[i]);
1141 }
1142 } else
1143 free_duplicate_context(s);
1144
1145 free_context_frame(s);
1146
1147 if (s->picture)
1148 for (i = 0; i < s->picture_count; i++) {
1149 s->picture[i].needs_realloc = 1;
1150 }
1151
1152 s->last_picture_ptr =
1153 s->next_picture_ptr =
1154 s->current_picture_ptr = NULL;
1155
1156 // init
1157 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1158 s->mb_height = (s->height + 31) / 32 * 2;
1159 else if (s->codec_id != AV_CODEC_ID_H264)
1160 s->mb_height = (s->height + 15) / 16;
1161
1162 if ((s->width || s->height) &&
1163 av_image_check_size(s->width, s->height, 0, s->avctx))
1164 return AVERROR_INVALIDDATA;
1165
1166 if ((err = init_context_frame(s)))
1167 goto fail;
1168
1169 s->thread_context[0] = s;
1170
1171 if (s->width && s->height) {
1172 int nb_slices = s->slice_context_count;
1173 if (nb_slices > 1) {
1174 for (i = 1; i < nb_slices; i++) {
1175 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1176 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1177 }
1178
1179 for (i = 0; i < nb_slices; i++) {
1180 if (init_duplicate_context(s->thread_context[i]) < 0)
1181 goto fail;
1182 s->thread_context[i]->start_mb_y =
1183 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1184 s->thread_context[i]->end_mb_y =
1185 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1186 }
1187 } else {
1188 if (init_duplicate_context(s) < 0)
1189 goto fail;
1190 s->start_mb_y = 0;
1191 s->end_mb_y = s->mb_height;
1192 }
1193 s->slice_context_count = nb_slices;
1194 }
1195
1196 return 0;
1197 fail:
1198 ff_MPV_common_end(s);
1199 return err;
1200 }
1201
1202 /* init common structure for both encoder and decoder */
1203 void ff_MPV_common_end(MpegEncContext *s)
1204 {
1205 int i;
1206
1207 if (s->slice_context_count > 1) {
1208 for (i = 0; i < s->slice_context_count; i++) {
1209 free_duplicate_context(s->thread_context[i]);
1210 }
1211 for (i = 1; i < s->slice_context_count; i++) {
1212 av_freep(&s->thread_context[i]);
1213 }
1214 s->slice_context_count = 1;
1215 } else free_duplicate_context(s);
1216
1217 av_freep(&s->parse_context.buffer);
1218 s->parse_context.buffer_size = 0;
1219
1220 av_freep(&s->bitstream_buffer);
1221 s->allocated_bitstream_buffer_size = 0;
1222
1223 av_freep(&s->avctx->stats_out);
1224 av_freep(&s->ac_stats);
1225
1226 av_freep(&s->q_intra_matrix);
1227 av_freep(&s->q_inter_matrix);
1228 av_freep(&s->q_intra_matrix16);
1229 av_freep(&s->q_inter_matrix16);
1230 av_freep(&s->input_picture);
1231 av_freep(&s->reordered_input_picture);
1232 av_freep(&s->dct_offset);
1233
1234 if (s->picture && !s->avctx->internal->is_copy) {
1235 for (i = 0; i < s->picture_count; i++) {
1236 free_picture(s, &s->picture[i]);
1237 }
1238 }
1239 av_freep(&s->picture);
1240
1241 free_context_frame(s);
1242
1243 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
1244 avcodec_default_free_buffers(s->avctx);
1245
1246 s->context_initialized = 0;
1247 s->last_picture_ptr =
1248 s->next_picture_ptr =
1249 s->current_picture_ptr = NULL;
1250 s->linesize = s->uvlinesize = 0;
1251 }
1252
1253 void ff_init_rl(RLTable *rl,
1254 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1255 {
1256 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1257 uint8_t index_run[MAX_RUN + 1];
1258 int last, run, level, start, end, i;
1259
1260 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1261 if (static_store && rl->max_level[0])
1262 return;
1263
1264 /* compute max_level[], max_run[] and index_run[] */
1265 for (last = 0; last < 2; last++) {
1266 if (last == 0) {
1267 start = 0;
1268 end = rl->last;
1269 } else {
1270 start = rl->last;
1271 end = rl->n;
1272 }
1273
1274 memset(max_level, 0, MAX_RUN + 1);
1275 memset(max_run, 0, MAX_LEVEL + 1);
1276 memset(index_run, rl->n, MAX_RUN + 1);
1277 for (i = start; i < end; i++) {
1278 run = rl->table_run[i];
1279 level = rl->table_level[i];
1280 if (index_run[run] == rl->n)
1281 index_run[run] = i;
1282 if (level > max_level[run])
1283 max_level[run] = level;
1284 if (run > max_run[level])
1285 max_run[level] = run;
1286 }
1287 if (static_store)
1288 rl->max_level[last] = static_store[last];
1289 else
1290 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1291 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1292 if (static_store)
1293 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1294 else
1295 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1296 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1297 if (static_store)
1298 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1299 else
1300 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1301 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1302 }
1303 }
1304
1305 void ff_init_vlc_rl(RLTable *rl)
1306 {
1307 int i, q;
1308
1309 for (q = 0; q < 32; q++) {
1310 int qmul = q * 2;
1311 int qadd = (q - 1) | 1;
1312
1313 if (q == 0) {
1314 qmul = 1;
1315 qadd = 0;
1316 }
1317 for (i = 0; i < rl->vlc.table_size; i++) {
1318 int code = rl->vlc.table[i][0];
1319 int len = rl->vlc.table[i][1];
1320 int level, run;
1321
1322 if (len == 0) { // illegal code
1323 run = 66;
1324 level = MAX_LEVEL;
1325 } else if (len < 0) { // more bits needed
1326 run = 0;
1327 level = code;
1328 } else {
1329 if (code == rl->n) { // esc
1330 run = 66;
1331 level = 0;
1332 } else {
1333 run = rl->table_run[code] + 1;
1334 level = rl->table_level[code] * qmul + qadd;
1335 if (code >= rl->last) run += 192;
1336 }
1337 }
1338 rl->rl_vlc[q][i].len = len;
1339 rl->rl_vlc[q][i].level = level;
1340 rl->rl_vlc[q][i].run = run;
1341 }
1342 }
1343 }
1344
1345 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1346 {
1347 int i;
1348
1349 /* release non reference frames */
1350 for (i = 0; i < s->picture_count; i++) {
1351 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1352 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1353 (remove_current || &s->picture[i] != s->current_picture_ptr)
1354 /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1355 free_frame_buffer(s, &s->picture[i]);
1356 }
1357 }
1358 }
1359
1360 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1361 {
1362 if (pic->f.data[0] == NULL)
1363 return 1;
1364 if (pic->needs_realloc && !(pic->f.reference & DELAYED_PIC_REF))
1365 if (!pic->owner2 || pic->owner2 == s)
1366 return 1;
1367 return 0;
1368 }
1369
1370 static int find_unused_picture(MpegEncContext *s, int shared)
1371 {
1372 int i;
1373
1374 if (shared) {
1375 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1376 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1377 return i;
1378 }
1379 } else {
1380 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1381 if (pic_is_unused(s, &s->picture[i]) && s->picture[i].f.type != 0)
1382 return i; // FIXME
1383 }
1384 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1385 if (pic_is_unused(s, &s->picture[i]))
1386 return i;
1387 }
1388 }
1389
1390 return AVERROR_INVALIDDATA;
1391 }
1392
1393 int ff_find_unused_picture(MpegEncContext *s, int shared)
1394 {
1395 int ret = find_unused_picture(s, shared);
1396
1397 if (ret >= 0 && ret < s->picture_range_end) {
1398 if (s->picture[ret].needs_realloc) {
1399 s->picture[ret].needs_realloc = 0;
1400 free_picture(s, &s->picture[ret]);
1401 avcodec_get_frame_defaults(&s->picture[ret].f);
1402 }
1403 }
1404 return ret;
1405 }
1406
1407 static void update_noise_reduction(MpegEncContext *s)
1408 {
1409 int intra, i;
1410
1411 for (intra = 0; intra < 2; intra++) {
1412 if (s->dct_count[intra] > (1 << 16)) {
1413 for (i = 0; i < 64; i++) {
1414 s->dct_error_sum[intra][i] >>= 1;
1415 }
1416 s->dct_count[intra] >>= 1;
1417 }
1418
1419 for (i = 0; i < 64; i++) {
1420 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1421 s->dct_count[intra] +
1422 s->dct_error_sum[intra][i] / 2) /
1423 (s->dct_error_sum[intra][i] + 1);
1424 }
1425 }
1426 }
1427
1428 /**
1429 * generic function for encode/decode called after coding/decoding
1430 * the header and before a frame is coded/decoded.
1431 */
1432 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1433 {
1434 int i;
1435 Picture *pic;
1436 s->mb_skipped = 0;
1437
1438 /* mark & release old frames */
1439 if (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3) {
1440 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1441 s->last_picture_ptr != s->next_picture_ptr &&
1442 s->last_picture_ptr->f.data[0]) {
1443 if (s->last_picture_ptr->owner2 == s)
1444 free_frame_buffer(s, s->last_picture_ptr);
1445 }
1446
1447 /* release forgotten pictures */
1448 /* if (mpeg124/h263) */
1449 if (!s->encoding) {
1450 for (i = 0; i < s->picture_count; i++) {
1451 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
1452 &s->picture[i] != s->last_picture_ptr &&
1453 &s->picture[i] != s->next_picture_ptr &&
1454 s->picture[i].f.reference && !s->picture[i].needs_realloc) {
1455 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1456 av_log(avctx, AV_LOG_ERROR,
1457 "releasing zombie picture\n");
1458 free_frame_buffer(s, &s->picture[i]);
1459 }
1460 }
1461 }
1462 }
1463
1464 if (!s->encoding) {
1465 ff_release_unused_pictures(s, 1);
1466
1467 if (s->current_picture_ptr &&
1468 s->current_picture_ptr->f.data[0] == NULL) {
1469 // we already have a unused image
1470 // (maybe it was set before reading the header)
1471 pic = s->current_picture_ptr;
1472 } else {
1473 i = ff_find_unused_picture(s, 0);
1474 if (i < 0) {
1475 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1476 return i;
1477 }
1478 pic = &s->picture[i];
1479 }
1480
1481 pic->f.reference = 0;
1482 if (!s->droppable) {
1483 if (s->codec_id == AV_CODEC_ID_H264)
1484 pic->f.reference = s->picture_structure;
1485 else if (s->pict_type != AV_PICTURE_TYPE_B)
1486 pic->f.reference = 3;
1487 }
1488
1489 pic->f.coded_picture_number = s->coded_picture_number++;
1490
1491 if (ff_alloc_picture(s, pic, 0) < 0)
1492 return -1;
1493
1494 s->current_picture_ptr = pic;
1495 // FIXME use only the vars from current_pic
1496 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1497 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1498 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1499 if (s->picture_structure != PICT_FRAME)
1500 s->current_picture_ptr->f.top_field_first =
1501 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1502 }
1503 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1504 !s->progressive_sequence;
1505 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1506 }
1507
1508 s->current_picture_ptr->f.pict_type = s->pict_type;
1509 // if (s->flags && CODEC_FLAG_QSCALE)
1510 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1511 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1512
1513 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1514
1515 if (s->pict_type != AV_PICTURE_TYPE_B) {
1516 s->last_picture_ptr = s->next_picture_ptr;
1517 if (!s->droppable)
1518 s->next_picture_ptr = s->current_picture_ptr;
1519 }
1520 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1521 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1522 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1523 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1524 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1525 s->pict_type, s->droppable);
1526
1527 if (s->codec_id != AV_CODEC_ID_H264) {
1528 if ((s->last_picture_ptr == NULL ||
1529 s->last_picture_ptr->f.data[0] == NULL) &&
1530 (s->pict_type != AV_PICTURE_TYPE_I ||
1531 s->picture_structure != PICT_FRAME)) {
1532 int h_chroma_shift, v_chroma_shift;
1533 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1534 &h_chroma_shift, &v_chroma_shift);
1535 if (s->pict_type != AV_PICTURE_TYPE_I)
1536 av_log(avctx, AV_LOG_ERROR,
1537 "warning: first frame is no keyframe\n");
1538 else if (s->picture_structure != PICT_FRAME)
1539 av_log(avctx, AV_LOG_INFO,
1540 "allocate dummy last picture for field based first keyframe\n");
1541
1542 /* Allocate a dummy frame */
1543 i = ff_find_unused_picture(s, 0);
1544 if (i < 0) {
1545 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1546 return i;
1547 }
1548 s->last_picture_ptr = &s->picture[i];
1549 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1550 s->last_picture_ptr = NULL;
1551 return -1;
1552 }
1553
1554 memset(s->last_picture_ptr->f.data[0], 0,
1555 avctx->height * s->last_picture_ptr->f.linesize[0]);
1556 memset(s->last_picture_ptr->f.data[1], 0x80,
1557 (avctx->height >> v_chroma_shift) *
1558 s->last_picture_ptr->f.linesize[1]);
1559 memset(s->last_picture_ptr->f.data[2], 0x80,
1560 (avctx->height >> v_chroma_shift) *
1561 s->last_picture_ptr->f.linesize[2]);
1562
1563 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
1564 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
1565 s->last_picture_ptr->f.reference = 3;
1566 }
1567 if ((s->next_picture_ptr == NULL ||
1568 s->next_picture_ptr->f.data[0] == NULL) &&
1569 s->pict_type == AV_PICTURE_TYPE_B) {
1570 /* Allocate a dummy frame */
1571 i = ff_find_unused_picture(s, 0);
1572 if (i < 0) {
1573 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1574 return i;
1575 }
1576 s->next_picture_ptr = &s->picture[i];
1577 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1578 s->next_picture_ptr = NULL;
1579 return -1;
1580 }
1581 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
1582 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
1583 s->next_picture_ptr->f.reference = 3;
1584 }
1585 }
1586
1587 if (s->last_picture_ptr)
1588 ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1589 if (s->next_picture_ptr)
1590 ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1591
1592 if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME)) {
1593 if (s->next_picture_ptr)
1594 s->next_picture_ptr->owner2 = s;
1595 if (s->last_picture_ptr)
1596 s->last_picture_ptr->owner2 = s;
1597 }
1598
1599 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1600 s->last_picture_ptr->f.data[0]));
1601
1602 if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
1603 int i;
1604 for (i = 0; i < 4; i++) {
1605 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1606 s->current_picture.f.data[i] +=
1607 s->current_picture.f.linesize[i];
1608 }
1609 s->current_picture.f.linesize[i] *= 2;
1610 s->last_picture.f.linesize[i] *= 2;
1611 s->next_picture.f.linesize[i] *= 2;
1612 }
1613 }
1614
1615 s->err_recognition = avctx->err_recognition;
1616
1617 /* set dequantizer, we can't do it during init as
1618 * it might change for mpeg4 and we can't do it in the header
1619 * decode as init is not called for mpeg4 there yet */
1620 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1621 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1622 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1623 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1624 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1625 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1626 } else {
1627 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1628 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1629 }
1630
1631 if (s->dct_error_sum) {
1632 assert(s->avctx->noise_reduction && s->encoding);
1633 update_noise_reduction(s);
1634 }
1635
1636 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1637 return ff_xvmc_field_start(s, avctx);
1638
1639 return 0;
1640 }
1641
1642 /* generic function for encode/decode called after a
1643 * frame has been coded/decoded. */
1644 void ff_MPV_frame_end(MpegEncContext *s)
1645 {
1646 int i;
1647 /* redraw edges for the frame if decoding didn't complete */
1648 // just to make sure that all data is rendered.
1649 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1650 ff_xvmc_field_end(s);
1651 } else if ((s->er.error_count || s->encoding) &&
1652 !s->avctx->hwaccel &&
1653 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1654 s->unrestricted_mv &&
1655 s->current_picture.f.reference &&
1656 !s->intra_only &&
1657 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1658 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1659 int hshift = desc->log2_chroma_w;
1660 int vshift = desc->log2_chroma_h;
1661 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1662 s->h_edge_pos, s->v_edge_pos,
1663 EDGE_WIDTH, EDGE_WIDTH,
1664 EDGE_TOP | EDGE_BOTTOM);
1665 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1666 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1667 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1668 EDGE_TOP | EDGE_BOTTOM);
1669 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1670 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1671 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1672 EDGE_TOP | EDGE_BOTTOM);
1673 }
1674
1675 emms_c();
1676
1677 s->last_pict_type = s->pict_type;
1678 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1679 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1680 s->last_non_b_pict_type = s->pict_type;
1681 }
1682 #if 0
1683 /* copy back current_picture variables */
1684 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1685 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1686 s->picture[i] = s->current_picture;
1687 break;
1688 }
1689 }
1690 assert(i < MAX_PICTURE_COUNT);
1691 #endif
1692
1693 if (s->encoding) {
1694 /* release non-reference frames */
1695 for (i = 0; i < s->picture_count; i++) {
1696 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1697 /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
1698 free_frame_buffer(s, &s->picture[i]);
1699 }
1700 }
1701 }
1702 // clear copies, to avoid confusion
1703 #if 0
1704 memset(&s->last_picture, 0, sizeof(Picture));
1705 memset(&s->next_picture, 0, sizeof(Picture));
1706 memset(&s->current_picture, 0, sizeof(Picture));
1707 #endif
1708 s->avctx->coded_frame = &s->current_picture_ptr->f;
1709
1710 if (s->codec_id != AV_CODEC_ID_H264 && s->current_picture.f.reference) {
1711 ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
1712 }
1713 }
1714
1715 /**
1716 * Draw a line from (ex, ey) -> (sx, sy).
1717 * @param w width of the image
1718 * @param h height of the image
1719 * @param stride stride/linesize of the image
1720 * @param color color of the arrow
1721 */
1722 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1723 int w, int h, int stride, int color)
1724 {
1725 int x, y, fr, f;
1726
1727 sx = av_clip(sx, 0, w - 1);
1728 sy = av_clip(sy, 0, h - 1);
1729 ex = av_clip(ex, 0, w - 1);
1730 ey = av_clip(ey, 0, h - 1);
1731
1732 buf[sy * stride + sx] += color;
1733
1734 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1735 if (sx > ex) {
1736 FFSWAP(int, sx, ex);
1737 FFSWAP(int, sy, ey);
1738 }
1739 buf += sx + sy * stride;
1740 ex -= sx;
1741 f = ((ey - sy) << 16) / ex;
1742 for (x = 0; x <= ex; x++) {
1743 y = (x * f) >> 16;
1744 fr = (x * f) & 0xFFFF;
1745 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1746 buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1747 }
1748 } else {
1749 if (sy > ey) {
1750 FFSWAP(int, sx, ex);
1751 FFSWAP(int, sy, ey);
1752 }
1753 buf += sx + sy * stride;
1754 ey -= sy;
1755 if (ey)
1756 f = ((ex - sx) << 16) / ey;
1757 else
1758 f = 0;
1759 for (y = 0; y = ey; y++) {
1760 x = (y * f) >> 16;
1761 fr = (y * f) & 0xFFFF;
1762 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1763 buf[y * stride + x + 1] += (color * fr ) >> 16;
1764 }
1765 }
1766 }
1767
1768 /**
1769 * Draw an arrow from (ex, ey) -> (sx, sy).
1770 * @param w width of the image
1771 * @param h height of the image
1772 * @param stride stride/linesize of the image
1773 * @param color color of the arrow
1774 */
1775 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1776 int ey, int w, int h, int stride, int color)
1777 {
1778 int dx,dy;
1779
1780 sx = av_clip(sx, -100, w + 100);
1781 sy = av_clip(sy, -100, h + 100);
1782 ex = av_clip(ex, -100, w + 100);
1783 ey = av_clip(ey, -100, h + 100);
1784
1785 dx = ex - sx;
1786 dy = ey - sy;
1787
1788 if (dx * dx + dy * dy > 3 * 3) {
1789 int rx = dx + dy;
1790 int ry = -dx + dy;
1791 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1792
1793 // FIXME subpixel accuracy
1794 rx = ROUNDED_DIV(rx * 3 << 4, length);
1795 ry = ROUNDED_DIV(ry * 3 << 4, length);
1796
1797 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1798 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1799 }
1800 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1801 }
1802
1803 /**
1804 * Print debugging info for the given picture.
1805 */
1806 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
1807 {
1808 if (s->avctx->hwaccel || !pict || !pict->mb_type)
1809 return;
1810
1811 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1812 int x,y;
1813
1814 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1815 switch (pict->pict_type) {
1816 case AV_PICTURE_TYPE_I:
1817 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1818 break;
1819 case AV_PICTURE_TYPE_P:
1820 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1821 break;
1822 case AV_PICTURE_TYPE_B:
1823 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1824 break;
1825 case AV_PICTURE_TYPE_S:
1826 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1827 break;
1828 case AV_PICTURE_TYPE_SI:
1829 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1830 break;
1831 case AV_PICTURE_TYPE_SP:
1832 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1833 break;
1834 }
1835 for (y = 0; y < s->mb_height; y++) {
1836 for (x = 0; x < s->mb_width; x++) {
1837 if (s->avctx->debug & FF_DEBUG_SKIP) {
1838 int count = s->mbskip_table[x + y * s->mb_stride];
1839 if (count > 9)
1840 count = 9;
1841 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1842 }
1843 if (s->avctx->debug & FF_DEBUG_QP) {
1844 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1845 pict->qscale_table[x + y * s->mb_stride]);
1846 }
1847 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1848 int mb_type = pict->mb_type[x + y * s->mb_stride];
1849 // Type & MV direction
1850 if (IS_PCM(mb_type))
1851 av_log(s->avctx, AV_LOG_DEBUG, "P");
1852 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1853 av_log(s->avctx, AV_LOG_DEBUG, "A");
1854 else if (IS_INTRA4x4(mb_type))
1855 av_log(s->avctx, AV_LOG_DEBUG, "i");
1856 else if (IS_INTRA16x16(mb_type))
1857 av_log(s->avctx, AV_LOG_DEBUG, "I");
1858 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1859 av_log(s->avctx, AV_LOG_DEBUG, "d");
1860 else if (IS_DIRECT(mb_type))
1861 av_log(s->avctx, AV_LOG_DEBUG, "D");
1862 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1863 av_log(s->avctx, AV_LOG_DEBUG, "g");
1864 else if (IS_GMC(mb_type))
1865 av_log(s->avctx, AV_LOG_DEBUG, "G");
1866 else if (IS_SKIP(mb_type))
1867 av_log(s->avctx, AV_LOG_DEBUG, "S");
1868 else if (!USES_LIST(mb_type, 1))
1869 av_log(s->avctx, AV_LOG_DEBUG, ">");
1870 else if (!USES_LIST(mb_type, 0))
1871 av_log(s->avctx, AV_LOG_DEBUG, "<");
1872 else {
1873 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1874 av_log(s->avctx, AV_LOG_DEBUG, "X");
1875 }
1876
1877 // segmentation
1878 if (IS_8X8(mb_type))
1879 av_log(s->avctx, AV_LOG_DEBUG, "+");
1880 else if (IS_16X8(mb_type))
1881 av_log(s->avctx, AV_LOG_DEBUG, "-");
1882 else if (IS_8X16(mb_type))
1883 av_log(s->avctx, AV_LOG_DEBUG, "|");
1884 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1885 av_log(s->avctx, AV_LOG_DEBUG, " ");
1886 else
1887 av_log(s->avctx, AV_LOG_DEBUG, "?");
1888
1889
1890 if (IS_INTERLACED(mb_type))
1891 av_log(s->avctx, AV_LOG_DEBUG, "=");
1892 else
1893 av_log(s->avctx, AV_LOG_DEBUG, " ");
1894 }
1895 }
1896 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1897 }
1898 }
1899
1900 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1901 (s->avctx->debug_mv)) {
1902 const int shift = 1 + s->quarter_sample;
1903 int mb_y;
1904 uint8_t *ptr;
1905 int i;
1906 int h_chroma_shift, v_chroma_shift, block_height;
1907 const int width = s->avctx->width;
1908 const int height = s->avctx->height;
1909 const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
1910 const int mv_stride = (s->mb_width << mv_sample_log2) +
1911 (s->codec_id == AV_CODEC_ID_H264 ? 0 : 1);
1912 s->low_delay = 0; // needed to see the vectors without trashing the buffers
1913
1914 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1915 &h_chroma_shift, &v_chroma_shift);
1916 for (i = 0; i < 3; i++) {
1917 memcpy(s->visualization_buffer[i], pict->data[i],
1918 (i == 0) ? pict->linesize[i] * height:
1919 pict->linesize[i] * height >> v_chroma_shift);
1920 pict->data[i] = s->visualization_buffer[i];
1921 }
1922 pict->type = FF_BUFFER_TYPE_COPY;
1923 ptr = pict->data[0];
1924 block_height = 16 >> v_chroma_shift;
1925
1926 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1927 int mb_x;
1928 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1929 const int mb_index = mb_x + mb_y * s->mb_stride;
1930 if ((s->avctx->debug_mv) && pict->motion_val) {
1931 int type;
1932 for (type = 0; type < 3; type++) {
1933 int direction = 0;
1934 switch (type) {
1935 case 0:
1936 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1937 (pict->pict_type!= AV_PICTURE_TYPE_P))
1938 continue;
1939 direction = 0;
1940 break;
1941 case 1:
1942 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1943 (pict->pict_type!= AV_PICTURE_TYPE_B))
1944 continue;
1945 direction = 0;
1946 break;
1947 case 2:
1948 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1949 (pict->pict_type!= AV_PICTURE_TYPE_B))
1950 continue;
1951 direction = 1;
1952 break;
1953 }
1954 if (!USES_LIST(pict->mb_type[mb_index], direction))
1955 continue;
1956
1957 if (IS_8X8(pict->mb_type[mb_index])) {
1958 int i;
1959 for (i = 0; i < 4; i++) {
1960 int sx = mb_x * 16 + 4 + 8 * (i & 1);
1961 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1962 int xy = (mb_x * 2 + (i & 1) +
1963 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1964 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
1965 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
1966 draw_arrow(ptr, sx, sy, mx, my, width,
1967 height, s->linesize, 100);
1968 }
1969 } else if (IS_16X8(pict->mb_type[mb_index])) {
1970 int i;
1971 for (i = 0; i < 2; i++) {
1972 int sx = mb_x * 16 + 8;
1973 int sy = mb_y * 16 + 4 + 8 * i;
1974 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1975 int mx = (pict->motion_val[direction][xy][0] >> shift);
1976 int my = (pict->motion_val[direction][xy][1] >> shift);
1977
1978 if (IS_INTERLACED(pict->mb_type[mb_index]))
1979 my *= 2;
1980
1981 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1982 height, s->linesize, 100);
1983 }
1984 } else if (IS_8X16(pict->mb_type[mb_index])) {
1985 int i;
1986 for (i = 0; i < 2; i++) {
1987 int sx = mb_x * 16 + 4 + 8 * i;
1988 int sy = mb_y * 16 + 8;
1989 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1990 int mx = pict->motion_val[direction][xy][0] >> shift;
1991 int my = pict->motion_val[direction][xy][1] >> shift;
1992
1993 if (IS_INTERLACED(pict->mb_type[mb_index]))
1994 my *= 2;
1995
1996 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1997 height, s->linesize, 100);
1998 }
1999 } else {
2000 int sx = mb_x * 16 + 8;
2001 int sy = mb_y * 16 + 8;
2002 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
2003 int mx = pict->motion_val[direction][xy][0] >> shift + sx;
2004 int my = pict->motion_val[direction][xy][1] >> shift + sy;
2005 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
2006 }
2007 }
2008 }
2009 if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
2010 uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
2011 0x0101010101010101ULL;
2012 int y;
2013 for (y = 0; y < block_height; y++) {
2014 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2015 (block_height * mb_y + y) *
2016 pict->linesize[1]) = c;
2017 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2018 (block_height * mb_y + y) *
2019 pict->linesize[2]) = c;
2020 }
2021 }
2022 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2023 pict->motion_val) {
2024 int mb_type = pict->mb_type[mb_index];
2025 uint64_t u,v;
2026 int y;
2027 #define COLOR(theta, r) \
2028 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2029 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2030
2031
2032 u = v = 128;
2033 if (IS_PCM(mb_type)) {
2034 COLOR(120, 48)
2035 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2036 IS_INTRA16x16(mb_type)) {
2037 COLOR(30, 48)
2038 } else if (IS_INTRA4x4(mb_type)) {
2039 COLOR(90, 48)
2040 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2041 // COLOR(120, 48)
2042 } else if (IS_DIRECT(mb_type)) {
2043 COLOR(150, 48)
2044 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2045 COLOR(170, 48)
2046 } else if (IS_GMC(mb_type)) {
2047 COLOR(190, 48)
2048 } else if (IS_SKIP(mb_type)) {
2049 // COLOR(180, 48)
2050 } else if (!USES_LIST(mb_type, 1)) {
2051 COLOR(240, 48)
2052 } else if (!USES_LIST(mb_type, 0)) {
2053 COLOR(0, 48)
2054 } else {
2055 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2056 COLOR(300,48)
2057 }
2058
2059 u *= 0x0101010101010101ULL;
2060 v *= 0x0101010101010101ULL;
2061 for (y = 0; y < block_height; y++) {
2062 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2063 (block_height * mb_y + y) * pict->linesize[1]) = u;
2064 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2065 (block_height * mb_y + y) * pict->linesize[2]) = v;
2066 }
2067
2068 // segmentation
2069 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2070 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2071 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2072 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2073 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2074 }
2075 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2076 for (y = 0; y < 16; y++)
2077 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2078 pict->linesize[0]] ^= 0x80;
2079 }
2080 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2081 int dm = 1 << (mv_sample_log2 - 2);
2082 for (i = 0; i < 4; i++) {
2083 int sx = mb_x * 16 + 8 * (i & 1);
2084 int sy = mb_y * 16 + 8 * (i >> 1);
2085 int xy = (mb_x * 2 + (i & 1) +
2086 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2087 // FIXME bidir
2088 int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
2089 if (mv[0] != mv[dm] ||
2090 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2091 for (y = 0; y < 8; y++)
2092 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2093 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2094 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2095 pict->linesize[0]) ^= 0x8080808080808080ULL;
2096 }
2097 }
2098
2099 if (IS_INTERLACED(mb_type) &&
2100 s->codec_id == AV_CODEC_ID_H264) {
2101 // hmm
2102 }
2103 }
2104 s->mbskip_table[mb_index] = 0;
2105 }
2106 }
2107 }
2108 }
2109
2110 /**
2111 * find the lowest MB row referenced in the MVs
2112 */
2113 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2114 {
2115 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2116 int my, off, i, mvs;
2117
2118 if (s->picture_structure != PICT_FRAME || s->mcsel)
2119 goto unhandled;
2120
2121 switch (s->mv_type) {
2122 case MV_TYPE_16X16:
2123 mvs = 1;
2124 break;
2125 case MV_TYPE_16X8:
2126 mvs = 2;
2127 break;
2128 case MV_TYPE_8X8:
2129 mvs = 4;
2130 break;
2131 default:
2132 goto unhandled;
2133 }
2134
2135 for (i = 0; i < mvs; i++) {
2136 my = s->mv[dir][i][1]<<qpel_shift;
2137 my_max = FFMAX(my_max, my);
2138 my_min = FFMIN(my_min, my);
2139 }
2140
2141 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2142
2143 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2144 unhandled:
2145 return s->mb_height-1;
2146 }
2147
2148 /* put block[] to dest[] */
2149 static inline void put_dct(MpegEncContext *s,
2150 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2151 {
2152 s->dct_unquantize_intra(s, block, i, qscale);
2153 s->dsp.idct_put (dest, line_size, block);
2154 }
2155
2156 /* add block[] to dest[] */
2157 static inline void add_dct(MpegEncContext *s,
2158 int16_t *block, int i, uint8_t *dest, int line_size)
2159 {
2160 if (s->block_last_index[i] >= 0) {
2161 s->dsp.idct_add (dest, line_size, block);
2162 }
2163 }
2164
2165 static inline void add_dequant_dct(MpegEncContext *s,
2166 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2167 {
2168 if (s->block_last_index[i] >= 0) {
2169 s->dct_unquantize_inter(s, block, i, qscale);
2170
2171 s->dsp.idct_add (dest, line_size, block);
2172 }
2173 }
2174
2175 /**
2176 * Clean dc, ac, coded_block for the current non-intra MB.
2177 */
2178 void ff_clean_intra_table_entries(MpegEncContext *s)
2179 {
2180 int wrap = s->b8_stride;
2181 int xy = s->block_index[0];
2182
2183 s->dc_val[0][xy ] =
2184 s->dc_val[0][xy + 1 ] =
2185 s->dc_val[0][xy + wrap] =
2186 s->dc_val[0][xy + 1 + wrap] = 1024;
2187 /* ac pred */
2188 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2189 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2190 if (s->msmpeg4_version>=3) {
2191 s->coded_block[xy ] =
2192 s->coded_block[xy + 1 ] =
2193 s->coded_block[xy + wrap] =
2194 s->coded_block[xy + 1 + wrap] = 0;
2195 }
2196 /* chroma */
2197 wrap = s->mb_stride;
2198 xy = s->mb_x + s->mb_y * wrap;
2199 s->dc_val[1][xy] =
2200 s->dc_val[2][xy] = 1024;
2201 /* ac pred */
2202 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2203 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2204
2205 s->mbintra_table[xy]= 0;
2206 }
2207
2208 /* generic function called after a macroblock has been parsed by the
2209 decoder or after it has been encoded by the encoder.
2210
2211 Important variables used:
2212 s->mb_intra : true if intra macroblock
2213 s->mv_dir : motion vector direction
2214 s->mv_type : motion vector type
2215 s->mv : motion vector
2216 s->interlaced_dct : true if interlaced dct used (mpeg2)
2217 */
2218 static av_always_inline
2219 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2220 int is_mpeg12)
2221 {
2222 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2223 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2224 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2225 return;
2226 }
2227
2228 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2229 /* save DCT coefficients */
2230 int i,j;
2231 int16_t *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2232 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2233 for(i=0; i<6; i++){
2234 for(j=0; j<64; j++){
2235 *dct++ = block[i][s->dsp.idct_permutation[j]];
2236 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2237 }
2238 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2239 }
2240 }
2241
2242 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2243
2244 /* update DC predictors for P macroblocks */
2245 if (!s->mb_intra) {
2246 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2247 if(s->mbintra_table[mb_xy])
2248 ff_clean_intra_table_entries(s);
2249 } else {
2250 s->last_dc[0] =
2251 s->last_dc[1] =
2252 s->last_dc[2] = 128 << s->intra_dc_precision;
2253 }
2254 }
2255 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2256 s->mbintra_table[mb_xy]=1;
2257
2258 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2259 uint8_t *dest_y, *dest_cb, *dest_cr;
2260 int dct_linesize, dct_offset;
2261 op_pixels_func (*op_pix)[4];
2262 qpel_mc_func (*op_qpix)[16];
2263 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2264 const int uvlinesize = s->current_picture.f.linesize[1];
2265 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2266 const int block_size = 8;
2267
2268 /* avoid copy if macroblock skipped in last frame too */
2269 /* skip only during decoding as we might trash the buffers during encoding a bit */
2270 if(!s->encoding){
2271 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2272
2273 if (s->mb_skipped) {
2274 s->mb_skipped= 0;
2275 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2276 *mbskip_ptr = 1;
2277 } else if(!s->current_picture.f.reference) {
2278 *mbskip_ptr = 1;
2279 } else{
2280 *mbskip_ptr = 0; /* not skipped */
2281 }
2282 }
2283
2284 dct_linesize = linesize << s->interlaced_dct;
2285 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2286
2287 if(readable){
2288 dest_y= s->dest[0];
2289 dest_cb= s->dest[1];
2290 dest_cr= s->dest[2];
2291 }else{
2292 dest_y = s->b_scratchpad;
2293 dest_cb= s->b_scratchpad+16*linesize;
2294 dest_cr= s->b_scratchpad+32*linesize;
2295 }
2296
2297 if (!s->mb_intra) {
2298 /* motion handling */
2299 /* decoding or more than one mb_type (MC was already done otherwise) */
2300 if(!s->encoding){
2301
2302 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2303 if (s->mv_dir & MV_DIR_FORWARD) {
2304 ff_thread_await_progress(&s->last_picture_ptr->f,
2305 ff_MPV_lowest_referenced_row(s, 0),
2306 0);
2307 }
2308 if (s->mv_dir & MV_DIR_BACKWARD) {
2309 ff_thread_await_progress(&s->next_picture_ptr->f,
2310 ff_MPV_lowest_referenced_row(s, 1),
2311 0);
2312 }
2313 }
2314
2315 op_qpix= s->me.qpel_put;
2316 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2317 op_pix = s->dsp.put_pixels_tab;
2318 }else{
2319 op_pix = s->dsp.put_no_rnd_pixels_tab;
2320 }
2321 if (s->mv_dir & MV_DIR_FORWARD) {
2322 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2323 op_pix = s->dsp.avg_pixels_tab;
2324 op_qpix= s->me.qpel_avg;
2325 }
2326 if (s->mv_dir & MV_DIR_BACKWARD) {
2327 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2328 }
2329 }
2330
2331 /* skip dequant / idct if we are really late ;) */
2332 if(s->avctx->skip_idct){
2333 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2334 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2335 || s->avctx->skip_idct >= AVDISCARD_ALL)
2336 goto skip_idct;
2337 }
2338
2339 /* add dct residue */
2340 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2341 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2342 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2343 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2344 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2345 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2346
2347 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2348 if (s->chroma_y_shift){
2349 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2350 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2351 }else{
2352 dct_linesize >>= 1;
2353 dct_offset >>=1;
2354 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2355 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2356 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2357 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2358 }
2359 }
2360 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2361 add_dct(s, block[0], 0, dest_y , dct_linesize);
2362 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2363 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2364 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2365
2366 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2367 if(s->chroma_y_shift){//Chroma420
2368 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2369 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2370 }else{
2371 //chroma422
2372 dct_linesize = uvlinesize << s->interlaced_dct;
2373 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2374
2375 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2376 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2377 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2378 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2379 if(!s->chroma_x_shift){//Chroma444
2380 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2381 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2382 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2383 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2384 }
2385 }
2386 }//fi gray
2387 }
2388 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2389 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2390 }
2391 } else {
2392 /* dct only in intra block */
2393 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2394 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2395 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2396 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2397 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2398
2399 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2400 if(s->chroma_y_shift){
2401 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2402 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2403 }else{
2404 dct_offset >>=1;
2405 dct_linesize >>=1;
2406 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2407 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2408 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2409 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2410 }
2411 }
2412 }else{
2413 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2414 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2415 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2416 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2417
2418 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2419 if(s->chroma_y_shift){
2420 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2421 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2422 }else{
2423
2424 dct_linesize = uvlinesize << s->interlaced_dct;
2425 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2426
2427 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2428 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2429 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2430 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2431 if(!s->chroma_x_shift){//Chroma444
2432 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2433 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2434 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2435 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2436 }
2437 }
2438 }//gray
2439 }
2440 }
2441 skip_idct:
2442 if(!readable){
2443 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2444 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2445 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2446 }
2447 }
2448 }
2449
2450 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2451 #if !CONFIG_SMALL
2452 if(s->out_format == FMT_MPEG1) {
2453 MPV_decode_mb_internal(s, block, 1);
2454 } else
2455 #endif
2456 MPV_decode_mb_internal(s, block, 0);
2457 }
2458
2459 /**
2460 * @param h is the normal height, this will be reduced automatically if needed for the last row
2461 */
2462 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2463 const int field_pic= s->picture_structure != PICT_FRAME;
2464 if(field_pic){
2465 h <<= 1;
2466 y <<= 1;
2467 }
2468
2469 if (!s->avctx->hwaccel
2470 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2471 && s->unrestricted_mv
2472 && s->current_picture.f.reference
2473 && !s->intra_only
2474 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2475 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
2476 int sides = 0, edge_h;
2477 int hshift = desc->log2_chroma_w;
2478 int vshift = desc->log2_chroma_h;
2479 if (y==0) sides |= EDGE_TOP;
2480 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2481
2482 edge_h= FFMIN(h, s->v_edge_pos - y);
2483
2484 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2485 s->linesize, s->h_edge_pos, edge_h,
2486 EDGE_WIDTH, EDGE_WIDTH, sides);
2487 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2488 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2489 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2490 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2491 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2492 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2493 }
2494
2495 h= FFMIN(h, s->avctx->height - y);
2496
2497 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2498
2499 if (s->avctx->draw_horiz_band) {
2500 AVFrame *src;
2501 int offset[AV_NUM_DATA_POINTERS];
2502 int i;
2503
2504 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2505 src = &s->current_picture_ptr->f;
2506 else if(s->last_picture_ptr)
2507 src = &s->last_picture_ptr->f;
2508 else
2509 return;
2510
2511 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2512 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2513 offset[i] = 0;
2514 }else{
2515 offset[0]= y * s->linesize;
2516 offset[1]=
2517 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2518 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2519 offset[i] = 0;
2520 }
2521
2522 emms_c();
2523
2524 s->avctx->draw_horiz_band(s->avctx, src, offset,
2525 y, s->picture_structure, h);
2526 }
2527 }
2528
2529 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2530 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2531 const int uvlinesize = s->current_picture.f.linesize[1];
2532 const int mb_size= 4;
2533
2534 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2535 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2536 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2537 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2538 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2539 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2540 //block_index is not used by mpeg2, so it is not affected by chroma_format
2541
2542 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2543 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2544 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2545
2546 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2547 {
2548 if(s->picture_structure==PICT_FRAME){
2549 s->dest[0] += s->mb_y * linesize << mb_size;
2550 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2551 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2552 }else{
2553 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2554 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2555 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2556 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2557 }
2558 }
2559 }
2560
2561 /**
2562 * Permute an 8x8 block.
2563 * @param block the block which will be permuted according to the given permutation vector
2564 * @param permutation the permutation vector
2565 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2566 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2567 * (inverse) permutated to scantable order!
2568 */
2569 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2570 {
2571 int i;
2572 int16_t temp[64];
2573
2574 if(last<=0) return;
2575 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2576
2577 for(i=0; i<=last; i++){
2578 const int j= scantable[i];
2579 temp[j]= block[j];
2580 block[j]=0;
2581 }
2582
2583 for(i=0; i<=last; i++){
2584 const int j= scantable[i];
2585 const int perm_j= permutation[j];
2586 block[perm_j]= temp[j];
2587 }
2588 }
2589
2590 void ff_mpeg_flush(AVCodecContext *avctx){
2591 int i;
2592 MpegEncContext *s = avctx->priv_data;
2593
2594 if(s==NULL || s->picture==NULL)
2595 return;
2596
2597 for(i=0; i<s->picture_count; i++){
2598 if (s->picture[i].f.data[0] &&
2599 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2600 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2601 free_frame_buffer(s, &s->picture[i]);
2602 }
2603 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2604
2605 s->mb_x= s->mb_y= 0;
2606
2607 s->parse_context.state= -1;
2608 s->parse_context.frame_start_found= 0;
2609 s->parse_context.overread= 0;
2610 s->parse_context.overread_index= 0;
2611 s->parse_context.index= 0;
2612 s->parse_context.last_index= 0;
2613 s->bitstream_buffer_size=0;
2614 s->pp_time=0;
2615 }
2616
2617 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2618 int16_t *block, int n, int qscale)
2619 {
2620 int i, level, nCoeffs;
2621 const uint16_t *quant_matrix;
2622
2623 nCoeffs= s->block_last_index[n];
2624
2625 if (n < 4)
2626 block[0] = block[0] * s->y_dc_scale;
2627 else
2628 block[0] = block[0] * s->c_dc_scale;
2629 /* XXX: only mpeg1 */
2630 quant_matrix = s->intra_matrix;
2631 for(i=1;i<=nCoeffs;i++) {
2632 int j= s->intra_scantable.permutated[i];
2633 level = block[j];
2634 if (level) {
2635 if (level < 0) {
2636 level = -level;
2637 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2638 level = (level - 1) | 1;
2639 level = -level;
2640 } else {
2641 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2642 level = (level - 1) | 1;
2643 }
2644 block[j] = level;
2645 }
2646 }
2647 }
2648
2649 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2650 int16_t *block, int n, int qscale)
2651 {
2652 int i, level, nCoeffs;
2653 const uint16_t *quant_matrix;
2654
2655 nCoeffs= s->block_last_index[n];
2656
2657 quant_matrix = s->inter_matrix;
2658 for(i=0; i<=nCoeffs; i++) {
2659 int j= s->intra_scantable.permutated[i];
2660 level = block[j];
2661 if (level) {
2662 if (level < 0) {
2663 level = -level;
2664 level = (((level << 1) + 1) * qscale *
2665 ((int) (quant_matrix[j]))) >> 4;
2666 level = (level - 1) | 1;
2667 level = -level;
2668 } else {
2669 level = (((level << 1) + 1) * qscale *
2670 ((int) (quant_matrix[j]))) >> 4;
2671 level = (level - 1) | 1;
2672 }
2673 block[j] = level;
2674 }
2675 }
2676 }
2677
2678 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2679 int16_t *block, int n, int qscale)
2680 {
2681 int i, level, nCoeffs;
2682 const uint16_t *quant_matrix;
2683
2684 if(s->alternate_scan) nCoeffs= 63;
2685 else nCoeffs= s->block_last_index[n];
2686
2687 if (n < 4)
2688 block[0] = block[0] * s->y_dc_scale;
2689 else
2690 block[0] = block[0] * s->c_dc_scale;
2691 quant_matrix = s->intra_matrix;
2692 for(i=1;i<=nCoeffs;i++) {
2693 int j= s->intra_scantable.permutated[i];
2694 level = block[j];
2695 if (level) {
2696 if (level < 0) {
2697 level = -level;
2698 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2699 level = -level;
2700 } else {
2701 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2702 }
2703 block[j] = level;
2704 }
2705 }
2706 }
2707
2708 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2709 int16_t *block, int n, int qscale)
2710 {
2711 int i, level, nCoeffs;
2712 const uint16_t *quant_matrix;
2713 int sum=-1;
2714
2715 if(s->alternate_scan) nCoeffs= 63;
2716 else nCoeffs= s->block_last_index[n];
2717
2718 if (n < 4)
2719 block[0] = block[0] * s->y_dc_scale;
2720 else
2721 block[0] = block[0] * s->c_dc_scale;
2722 quant_matrix = s->intra_matrix;
2723 for(i=1;i<=nCoeffs;i++) {
2724 int j= s->intra_scantable.permutated[i];
2725 level = block[j];
2726 if (level) {
2727 if (level < 0) {
2728 level = -level;
2729 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2730 level = -level;
2731 } else {
2732 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2733 }
2734 block[j] = level;
2735 sum+=level;
2736 }
2737 }
2738 block[63]^=sum&1;
2739 }
2740
2741 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2742 int16_t *block, int n, int qscale)
2743 {
2744 int i, level, nCoeffs;
2745 const uint16_t *quant_matrix;
2746 int sum=-1;
2747
2748 if(s->alternate_scan) nCoeffs= 63;
2749 else nCoeffs= s->block_last_index[n];
2750
2751 quant_matrix = s->inter_matrix;
2752 for(i=0; i<=nCoeffs; i++) {
2753 int j= s->intra_scantable.permutated[i];
2754 level = block[j];
2755 if (level) {
2756 if (level < 0) {
2757 level = -level;
2758 level = (((level << 1) + 1) * qscale *
2759 ((int) (quant_matrix[j]))) >> 4;
2760 level = -level;
2761 } else {
2762 level = (((level << 1) + 1) * qscale *
2763 ((int) (quant_matrix[j]))) >> 4;
2764 }
2765 block[j] = level;
2766 sum+=level;
2767 }
2768 }
2769 block[63]^=sum&1;
2770 }
2771
2772 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2773 int16_t *block, int n, int qscale)
2774 {
2775 int i, level, qmul, qadd;
2776 int nCoeffs;
2777
2778 assert(s->block_last_index[n]>=0);
2779
2780 qmul = qscale << 1;
2781
2782 if (!s->h263_aic) {
2783 if (n < 4)
2784 block[0] = block[0] * s->y_dc_scale;
2785 else
2786 block[0] = block[0] * s->c_dc_scale;
2787 qadd = (qscale - 1) | 1;
2788 }else{
2789 qadd = 0;
2790 }
2791 if(s->ac_pred)
2792 nCoeffs=63;
2793 else
2794 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2795
2796 for(i=1; i<=nCoeffs; i++) {
2797 level = block[i];
2798 if (level) {
2799 if (level < 0) {
2800 level = level * qmul - qadd;
2801 } else {
2802 level = level * qmul + qadd;
2803 }
2804 block[i] = level;
2805 }
2806 }
2807 }
2808
2809 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2810 int16_t *block, int n, int qscale)