lavc: Split out ff_hwaccel_pixfmt_list_420[] over individual codecs
[libav.git] / libavcodec / mpegvideo.c
1 /*
2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7 *
8 * This file is part of Libav.
9 *
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /**
26 * @file
27 * The simplest mpeg encoder (well, it was the simplest!).
28 */
29
30 #include "libavutil/imgutils.h"
31 #include "avcodec.h"
32 #include "dsputil.h"
33 #include "internal.h"
34 #include "mathops.h"
35 #include "mpegvideo.h"
36 #include "mjpegenc.h"
37 #include "msmpeg4.h"
38 #include "xvmc_internal.h"
39 #include "thread.h"
40 #include <limits.h>
41
42 //#undef NDEBUG
43 //#include <assert.h>
44
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46 int16_t *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48 int16_t *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50 int16_t *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52 int16_t *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54 int16_t *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56 int16_t *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58 int16_t *block, int n, int qscale);
59
60
61 /* enable all paranoid tests for rounding, overflows, etc... */
62 //#define PARANOID
63
64 //#define DEBUG
65
66
67 static const uint8_t ff_default_chroma_qscale_table[32] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
70 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
71 };
72
73 const uint8_t ff_mpeg1_dc_scale_table[128] = {
74 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
83 };
84
85 static const uint8_t mpeg2_dc_scale_table1[128] = {
86 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
95 };
96
97 static const uint8_t mpeg2_dc_scale_table2[128] = {
98 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
107 };
108
109 static const uint8_t mpeg2_dc_scale_table3[128] = {
110 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119 };
120
121 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
122 ff_mpeg1_dc_scale_table,
123 mpeg2_dc_scale_table1,
124 mpeg2_dc_scale_table2,
125 mpeg2_dc_scale_table3,
126 };
127
128 const enum AVPixelFormat ff_pixfmt_list_420[] = {
129 AV_PIX_FMT_YUV420P,
130 AV_PIX_FMT_NONE
131 };
132
133 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
134 int (*mv)[2][4][2],
135 int mb_x, int mb_y, int mb_intra, int mb_skipped)
136 {
137 MpegEncContext *s = opaque;
138
139 s->mv_dir = mv_dir;
140 s->mv_type = mv_type;
141 s->mb_intra = mb_intra;
142 s->mb_skipped = mb_skipped;
143 s->mb_x = mb_x;
144 s->mb_y = mb_y;
145 memcpy(s->mv, mv, sizeof(*mv));
146
147 ff_init_block_index(s);
148 ff_update_block_index(s);
149
150 s->dsp.clear_blocks(s->block[0]);
151
152 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
153 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
154 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
155
156 assert(ref == 0);
157 ff_MPV_decode_mb(s, s->block);
158 }
159
160 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
161 const uint8_t *end,
162 uint32_t * restrict state)
163 {
164 int i;
165
166 assert(p <= end);
167 if (p >= end)
168 return end;
169
170 for (i = 0; i < 3; i++) {
171 uint32_t tmp = *state << 8;
172 *state = tmp + *(p++);
173 if (tmp == 0x100 || p == end)
174 return p;
175 }
176
177 while (p < end) {
178 if (p[-1] > 1 ) p += 3;
179 else if (p[-2] ) p += 2;
180 else if (p[-3]|(p[-1]-1)) p++;
181 else {
182 p++;
183 break;
184 }
185 }
186
187 p = FFMIN(p, end) - 4;
188 *state = AV_RB32(p);
189
190 return p + 4;
191 }
192
193 /* init common dct for both encoder and decoder */
194 av_cold int ff_dct_common_init(MpegEncContext *s)
195 {
196 ff_dsputil_init(&s->dsp, s->avctx);
197 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
198
199 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
200 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
201 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
202 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
203 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
204 if (s->flags & CODEC_FLAG_BITEXACT)
205 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
206 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
207
208 #if ARCH_X86
209 ff_MPV_common_init_x86(s);
210 #elif ARCH_ALPHA
211 ff_MPV_common_init_axp(s);
212 #elif ARCH_ARM
213 ff_MPV_common_init_arm(s);
214 #elif HAVE_ALTIVEC
215 ff_MPV_common_init_altivec(s);
216 #elif ARCH_BFIN
217 ff_MPV_common_init_bfin(s);
218 #endif
219
220 /* load & permutate scantables
221 * note: only wmv uses different ones
222 */
223 if (s->alternate_scan) {
224 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
225 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
226 } else {
227 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
228 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
229 }
230 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
231 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
232
233 return 0;
234 }
235
236 void ff_copy_picture(Picture *dst, Picture *src)
237 {
238 *dst = *src;
239 dst->f.type = FF_BUFFER_TYPE_COPY;
240 }
241
242 /**
243 * Release a frame buffer
244 */
245 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
246 {
247 /* WM Image / Screen codecs allocate internal buffers with different
248 * dimensions / colorspaces; ignore user-defined callbacks for these. */
249 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
250 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
251 s->codec_id != AV_CODEC_ID_MSS2)
252 ff_thread_release_buffer(s->avctx, &pic->f);
253 else
254 avcodec_default_release_buffer(s->avctx, &pic->f);
255 av_freep(&pic->f.hwaccel_picture_private);
256 }
257
258 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
259 {
260 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
261
262 // edge emu needs blocksize + filter length - 1
263 // (= 17x17 for halfpel / 21x21 for h264)
264 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
265 // at uvlinesize. It supports only YUV420 so 24x24 is enough
266 // linesize * interlaced * MBsize
267 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
268 fail);
269
270 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 2,
271 fail)
272 s->me.temp = s->me.scratchpad;
273 s->rd_scratchpad = s->me.scratchpad;
274 s->b_scratchpad = s->me.scratchpad;
275 s->obmc_scratchpad = s->me.scratchpad + 16;
276
277 return 0;
278 fail:
279 av_freep(&s->edge_emu_buffer);
280 return AVERROR(ENOMEM);
281 }
282
283 /**
284 * Allocate a frame buffer
285 */
286 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
287 {
288 int r, ret;
289
290 if (s->avctx->hwaccel) {
291 assert(!pic->f.hwaccel_picture_private);
292 if (s->avctx->hwaccel->priv_data_size) {
293 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
294 if (!pic->f.hwaccel_picture_private) {
295 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
296 return -1;
297 }
298 }
299 }
300
301 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
302 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
303 s->codec_id != AV_CODEC_ID_MSS2)
304 r = ff_thread_get_buffer(s->avctx, &pic->f);
305 else
306 r = avcodec_default_get_buffer(s->avctx, &pic->f);
307
308 if (r < 0 || !pic->f.type || !pic->f.data[0]) {
309 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
310 r, pic->f.type, pic->f.data[0]);
311 av_freep(&pic->f.hwaccel_picture_private);
312 return -1;
313 }
314
315 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
316 s->uvlinesize != pic->f.linesize[1])) {
317 av_log(s->avctx, AV_LOG_ERROR,
318 "get_buffer() failed (stride changed)\n");
319 free_frame_buffer(s, pic);
320 return -1;
321 }
322
323 if (pic->f.linesize[1] != pic->f.linesize[2]) {
324 av_log(s->avctx, AV_LOG_ERROR,
325 "get_buffer() failed (uv stride mismatch)\n");
326 free_frame_buffer(s, pic);
327 return -1;
328 }
329
330 if (!s->edge_emu_buffer &&
331 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
332 av_log(s->avctx, AV_LOG_ERROR,
333 "get_buffer() failed to allocate context scratch buffers.\n");
334 free_frame_buffer(s, pic);
335 return ret;
336 }
337
338 return 0;
339 }
340
341 /**
342 * Allocate a Picture.
343 * The pixels are allocated/set by calling get_buffer() if shared = 0
344 */
345 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
346 {
347 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
348
349 // the + 1 is needed so memset(,,stride*height) does not sig11
350
351 const int mb_array_size = s->mb_stride * s->mb_height;
352 const int b8_array_size = s->b8_stride * s->mb_height * 2;
353 const int b4_array_size = s->b4_stride * s->mb_height * 4;
354 int i;
355 int r = -1;
356
357 if (shared) {
358 assert(pic->f.data[0]);
359 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
360 pic->f.type = FF_BUFFER_TYPE_SHARED;
361 } else {
362 assert(!pic->f.data[0]);
363
364 if (alloc_frame_buffer(s, pic) < 0)
365 return -1;
366
367 s->linesize = pic->f.linesize[0];
368 s->uvlinesize = pic->f.linesize[1];
369 }
370
371 if (pic->f.qscale_table == NULL) {
372 if (s->encoding) {
373 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
374 mb_array_size * sizeof(int16_t), fail)
375 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
376 mb_array_size * sizeof(int16_t), fail)
377 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
378 mb_array_size * sizeof(int8_t ), fail)
379 }
380
381 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
382 mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
383 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
384 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
385 fail)
386 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
387 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
388 fail)
389 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
390 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
391 if (s->out_format == FMT_H264) {
392 for (i = 0; i < 2; i++) {
393 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
394 2 * (b4_array_size + 4) * sizeof(int16_t),
395 fail)
396 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
397 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
398 4 * mb_array_size * sizeof(uint8_t), fail)
399 }
400 pic->f.motion_subsample_log2 = 2;
401 } else if (s->out_format == FMT_H263 || s->encoding ||
402 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
403 for (i = 0; i < 2; i++) {
404 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
405 2 * (b8_array_size + 4) * sizeof(int16_t),
406 fail)
407 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
408 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
409 4 * mb_array_size * sizeof(uint8_t), fail)
410 }
411 pic->f.motion_subsample_log2 = 3;
412 }
413 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
414 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
415 64 * mb_array_size * sizeof(int16_t) * 6, fail)
416 }
417 pic->f.qstride = s->mb_stride;
418 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
419 1 * sizeof(AVPanScan), fail)
420 }
421
422 pic->owner2 = s;
423
424 return 0;
425 fail: // for the FF_ALLOCZ_OR_GOTO macro
426 if (r >= 0)
427 free_frame_buffer(s, pic);
428 return -1;
429 }
430
431 /**
432 * Deallocate a picture.
433 */
434 static void free_picture(MpegEncContext *s, Picture *pic)
435 {
436 int i;
437
438 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
439 free_frame_buffer(s, pic);
440 }
441
442 av_freep(&pic->mb_var);
443 av_freep(&pic->mc_mb_var);
444 av_freep(&pic->mb_mean);
445 av_freep(&pic->f.mbskip_table);
446 av_freep(&pic->qscale_table_base);
447 pic->f.qscale_table = NULL;
448 av_freep(&pic->mb_type_base);
449 pic->f.mb_type = NULL;
450 av_freep(&pic->f.dct_coeff);
451 av_freep(&pic->f.pan_scan);
452 pic->f.mb_type = NULL;
453 for (i = 0; i < 2; i++) {
454 av_freep(&pic->motion_val_base[i]);
455 av_freep(&pic->f.ref_index[i]);
456 pic->f.motion_val[i] = NULL;
457 }
458
459 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
460 for (i = 0; i < 4; i++) {
461 pic->f.base[i] =
462 pic->f.data[i] = NULL;
463 }
464 pic->f.type = 0;
465 }
466 }
467
468 static int init_duplicate_context(MpegEncContext *s)
469 {
470 int y_size = s->b8_stride * (2 * s->mb_height + 1);
471 int c_size = s->mb_stride * (s->mb_height + 1);
472 int yc_size = y_size + 2 * c_size;
473 int i;
474
475 s->edge_emu_buffer =
476 s->me.scratchpad =
477 s->me.temp =
478 s->rd_scratchpad =
479 s->b_scratchpad =
480 s->obmc_scratchpad = NULL;
481
482 if (s->encoding) {
483 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
484 ME_MAP_SIZE * sizeof(uint32_t), fail)
485 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
486 ME_MAP_SIZE * sizeof(uint32_t), fail)
487 if (s->avctx->noise_reduction) {
488 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
489 2 * 64 * sizeof(int), fail)
490 }
491 }
492 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
493 s->block = s->blocks[0];
494
495 for (i = 0; i < 12; i++) {
496 s->pblocks[i] = &s->block[i];
497 }
498
499 if (s->out_format == FMT_H263) {
500 /* ac values */
501 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
502 yc_size * sizeof(int16_t) * 16, fail);
503 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
504 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
505 s->ac_val[2] = s->ac_val[1] + c_size;
506 }
507
508 return 0;
509 fail:
510 return -1; // free() through ff_MPV_common_end()
511 }
512
513 static void free_duplicate_context(MpegEncContext *s)
514 {
515 if (s == NULL)
516 return;
517
518 av_freep(&s->edge_emu_buffer);
519 av_freep(&s->me.scratchpad);
520 s->me.temp =
521 s->rd_scratchpad =
522 s->b_scratchpad =
523 s->obmc_scratchpad = NULL;
524
525 av_freep(&s->dct_error_sum);
526 av_freep(&s->me.map);
527 av_freep(&s->me.score_map);
528 av_freep(&s->blocks);
529 av_freep(&s->ac_val_base);
530 s->block = NULL;
531 }
532
533 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
534 {
535 #define COPY(a) bak->a = src->a
536 COPY(edge_emu_buffer);
537 COPY(me.scratchpad);
538 COPY(me.temp);
539 COPY(rd_scratchpad);
540 COPY(b_scratchpad);
541 COPY(obmc_scratchpad);
542 COPY(me.map);
543 COPY(me.score_map);
544 COPY(blocks);
545 COPY(block);
546 COPY(start_mb_y);
547 COPY(end_mb_y);
548 COPY(me.map_generation);
549 COPY(pb);
550 COPY(dct_error_sum);
551 COPY(dct_count[0]);
552 COPY(dct_count[1]);
553 COPY(ac_val_base);
554 COPY(ac_val[0]);
555 COPY(ac_val[1]);
556 COPY(ac_val[2]);
557 #undef COPY
558 }
559
560 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
561 {
562 MpegEncContext bak;
563 int i, ret;
564 // FIXME copy only needed parts
565 // START_TIMER
566 backup_duplicate_context(&bak, dst);
567 memcpy(dst, src, sizeof(MpegEncContext));
568 backup_duplicate_context(dst, &bak);
569 for (i = 0; i < 12; i++) {
570 dst->pblocks[i] = &dst->block[i];
571 }
572 if (!dst->edge_emu_buffer &&
573 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
574 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
575 "scratch buffers.\n");
576 return ret;
577 }
578 // STOP_TIMER("update_duplicate_context")
579 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
580 return 0;
581 }
582
583 int ff_mpeg_update_thread_context(AVCodecContext *dst,
584 const AVCodecContext *src)
585 {
586 int i;
587 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
588
589 if (dst == src || !s1->context_initialized)
590 return 0;
591
592 // FIXME can parameters change on I-frames?
593 // in that case dst may need a reinit
594 if (!s->context_initialized) {
595 memcpy(s, s1, sizeof(MpegEncContext));
596
597 s->avctx = dst;
598 s->picture_range_start += MAX_PICTURE_COUNT;
599 s->picture_range_end += MAX_PICTURE_COUNT;
600 s->bitstream_buffer = NULL;
601 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
602
603 ff_MPV_common_init(s);
604 }
605
606 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
607 int err;
608 s->context_reinit = 0;
609 s->height = s1->height;
610 s->width = s1->width;
611 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
612 return err;
613 }
614
615 s->avctx->coded_height = s1->avctx->coded_height;
616 s->avctx->coded_width = s1->avctx->coded_width;
617 s->avctx->width = s1->avctx->width;
618 s->avctx->height = s1->avctx->height;
619
620 s->coded_picture_number = s1->coded_picture_number;
621 s->picture_number = s1->picture_number;
622 s->input_picture_number = s1->input_picture_number;
623
624 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
625 memcpy(&s->last_picture, &s1->last_picture,
626 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
627
628 // reset s->picture[].f.extended_data to s->picture[].f.data
629 for (i = 0; i < s->picture_count; i++)
630 s->picture[i].f.extended_data = s->picture[i].f.data;
631
632 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
633 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
634 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
635
636 // Error/bug resilience
637 s->next_p_frame_damaged = s1->next_p_frame_damaged;
638 s->workaround_bugs = s1->workaround_bugs;
639
640 // MPEG4 timing info
641 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
642 (char *) &s1->shape - (char *) &s1->time_increment_bits);
643
644 // B-frame info
645 s->max_b_frames = s1->max_b_frames;
646 s->low_delay = s1->low_delay;
647 s->droppable = s1->droppable;
648
649 // DivX handling (doesn't work)
650 s->divx_packed = s1->divx_packed;
651
652 if (s1->bitstream_buffer) {
653 if (s1->bitstream_buffer_size +
654 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
655 av_fast_malloc(&s->bitstream_buffer,
656 &s->allocated_bitstream_buffer_size,
657 s1->allocated_bitstream_buffer_size);
658 s->bitstream_buffer_size = s1->bitstream_buffer_size;
659 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
660 s1->bitstream_buffer_size);
661 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
662 FF_INPUT_BUFFER_PADDING_SIZE);
663 }
664
665 // linesize dependend scratch buffer allocation
666 if (!s->edge_emu_buffer)
667 if (s1->linesize) {
668 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
669 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
670 "scratch buffers.\n");
671 return AVERROR(ENOMEM);
672 }
673 } else {
674 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
675 "be allocated due to unknown size.\n");
676 return AVERROR_BUG;
677 }
678
679 // MPEG2/interlacing info
680 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
681 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
682
683 if (!s1->first_field) {
684 s->last_pict_type = s1->pict_type;
685 if (s1->current_picture_ptr)
686 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
687
688 if (s1->pict_type != AV_PICTURE_TYPE_B) {
689 s->last_non_b_pict_type = s1->pict_type;
690 }
691 }
692
693 return 0;
694 }
695
696 /**
697 * Set the given MpegEncContext to common defaults
698 * (same for encoding and decoding).
699 * The changed fields will not depend upon the
700 * prior state of the MpegEncContext.
701 */
702 void ff_MPV_common_defaults(MpegEncContext *s)
703 {
704 s->y_dc_scale_table =
705 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
706 s->chroma_qscale_table = ff_default_chroma_qscale_table;
707 s->progressive_frame = 1;
708 s->progressive_sequence = 1;
709 s->picture_structure = PICT_FRAME;
710
711 s->coded_picture_number = 0;
712 s->picture_number = 0;
713 s->input_picture_number = 0;
714
715 s->picture_in_gop_number = 0;
716
717 s->f_code = 1;
718 s->b_code = 1;
719
720 s->picture_range_start = 0;
721 s->picture_range_end = MAX_PICTURE_COUNT;
722
723 s->slice_context_count = 1;
724 }
725
726 /**
727 * Set the given MpegEncContext to defaults for decoding.
728 * the changed fields will not depend upon
729 * the prior state of the MpegEncContext.
730 */
731 void ff_MPV_decode_defaults(MpegEncContext *s)
732 {
733 ff_MPV_common_defaults(s);
734 }
735
736 static int init_er(MpegEncContext *s)
737 {
738 ERContext *er = &s->er;
739 int mb_array_size = s->mb_height * s->mb_stride;
740 int i;
741
742 er->avctx = s->avctx;
743 er->dsp = &s->dsp;
744
745 er->mb_index2xy = s->mb_index2xy;
746 er->mb_num = s->mb_num;
747 er->mb_width = s->mb_width;
748 er->mb_height = s->mb_height;
749 er->mb_stride = s->mb_stride;
750 er->b8_stride = s->b8_stride;
751
752 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
753 er->error_status_table = av_mallocz(mb_array_size);
754 if (!er->er_temp_buffer || !er->error_status_table)
755 goto fail;
756
757 er->mbskip_table = s->mbskip_table;
758 er->mbintra_table = s->mbintra_table;
759
760 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
761 er->dc_val[i] = s->dc_val[i];
762
763 er->decode_mb = mpeg_er_decode_mb;
764 er->opaque = s;
765
766 return 0;
767 fail:
768 av_freep(&er->er_temp_buffer);
769 av_freep(&er->error_status_table);
770 return AVERROR(ENOMEM);
771 }
772
773 /**
774 * Initialize and allocates MpegEncContext fields dependent on the resolution.
775 */
776 static int init_context_frame(MpegEncContext *s)
777 {
778 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
779
780 s->mb_width = (s->width + 15) / 16;
781 s->mb_stride = s->mb_width + 1;
782 s->b8_stride = s->mb_width * 2 + 1;
783 s->b4_stride = s->mb_width * 4 + 1;
784 mb_array_size = s->mb_height * s->mb_stride;
785 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
786
787 /* set default edge pos, will be overriden
788 * in decode_header if needed */
789 s->h_edge_pos = s->mb_width * 16;
790 s->v_edge_pos = s->mb_height * 16;
791
792 s->mb_num = s->mb_width * s->mb_height;
793
794 s->block_wrap[0] =
795 s->block_wrap[1] =
796 s->block_wrap[2] =
797 s->block_wrap[3] = s->b8_stride;
798 s->block_wrap[4] =
799 s->block_wrap[5] = s->mb_stride;
800
801 y_size = s->b8_stride * (2 * s->mb_height + 1);
802 c_size = s->mb_stride * (s->mb_height + 1);
803 yc_size = y_size + 2 * c_size;
804
805 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
806 fail); // error ressilience code looks cleaner with this
807 for (y = 0; y < s->mb_height; y++)
808 for (x = 0; x < s->mb_width; x++)
809 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
810
811 s->mb_index2xy[s->mb_height * s->mb_width] =
812 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
813
814 if (s->encoding) {
815 /* Allocate MV tables */
816 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
817 mv_table_size * 2 * sizeof(int16_t), fail);
818 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
819 mv_table_size * 2 * sizeof(int16_t), fail);
820 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
821 mv_table_size * 2 * sizeof(int16_t), fail);
822 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
823 mv_table_size * 2 * sizeof(int16_t), fail);
824 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
825 mv_table_size * 2 * sizeof(int16_t), fail);
826 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
827 mv_table_size * 2 * sizeof(int16_t), fail);
828 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
829 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
830 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
831 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
832 s->mb_stride + 1;
833 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
834 s->mb_stride + 1;
835 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
836
837 /* Allocate MB type table */
838 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
839 sizeof(uint16_t), fail); // needed for encoding
840
841 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
842 sizeof(int), fail);
843
844 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
845 mb_array_size * sizeof(float), fail);
846 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
847 mb_array_size * sizeof(float), fail);
848
849 }
850
851 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
852 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
853 /* interlaced direct mode decoding tables */
854 for (i = 0; i < 2; i++) {
855 int j, k;
856 for (j = 0; j < 2; j++) {
857 for (k = 0; k < 2; k++) {
858 FF_ALLOCZ_OR_GOTO(s->avctx,
859 s->b_field_mv_table_base[i][j][k],
860 mv_table_size * 2 * sizeof(int16_t),
861 fail);
862 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
863 s->mb_stride + 1;
864 }
865 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
866 mb_array_size * 2 * sizeof(uint8_t), fail);
867 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
868 mv_table_size * 2 * sizeof(int16_t), fail);
869 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
870 + s->mb_stride + 1;
871 }
872 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
873 mb_array_size * 2 * sizeof(uint8_t), fail);
874 }
875 }
876 if (s->out_format == FMT_H263) {
877 /* cbp values */
878 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
879 s->coded_block = s->coded_block_base + s->b8_stride + 1;
880
881 /* cbp, ac_pred, pred_dir */
882 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
883 mb_array_size * sizeof(uint8_t), fail);
884 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
885 mb_array_size * sizeof(uint8_t), fail);
886 }
887
888 if (s->h263_pred || s->h263_plus || !s->encoding) {
889 /* dc values */
890 // MN: we need these for error resilience of intra-frames
891 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
892 yc_size * sizeof(int16_t), fail);
893 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
894 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
895 s->dc_val[2] = s->dc_val[1] + c_size;
896 for (i = 0; i < yc_size; i++)
897 s->dc_val_base[i] = 1024;
898 }
899
900 /* which mb is a intra block */
901 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
902 memset(s->mbintra_table, 1, mb_array_size);
903
904 /* init macroblock skip table */
905 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
906 // Note the + 1 is for a quicker mpeg4 slice_end detection
907
908 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
909 s->avctx->debug_mv) {
910 s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
911 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
912 s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
913 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
914 s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
915 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
916 }
917
918 return init_er(s);
919 fail:
920 return AVERROR(ENOMEM);
921 }
922
923 /**
924 * init common structure for both encoder and decoder.
925 * this assumes that some variables like width/height are already set
926 */
927 av_cold int ff_MPV_common_init(MpegEncContext *s)
928 {
929 int i;
930 int nb_slices = (HAVE_THREADS &&
931 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
932 s->avctx->thread_count : 1;
933
934 if (s->encoding && s->avctx->slices)
935 nb_slices = s->avctx->slices;
936
937 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
938 s->mb_height = (s->height + 31) / 32 * 2;
939 else if (s->codec_id != AV_CODEC_ID_H264)
940 s->mb_height = (s->height + 15) / 16;
941
942 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
943 av_log(s->avctx, AV_LOG_ERROR,
944 "decoding to AV_PIX_FMT_NONE is not supported.\n");
945 return -1;
946 }
947
948 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
949 int max_slices;
950 if (s->mb_height)
951 max_slices = FFMIN(MAX_THREADS, s->mb_height);
952 else
953 max_slices = MAX_THREADS;
954 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
955 " reducing to %d\n", nb_slices, max_slices);
956 nb_slices = max_slices;
957 }
958
959 if ((s->width || s->height) &&
960 av_image_check_size(s->width, s->height, 0, s->avctx))
961 return -1;
962
963 ff_dct_common_init(s);
964
965 s->flags = s->avctx->flags;
966 s->flags2 = s->avctx->flags2;
967
968 if (s->width && s->height) {
969 /* set chroma shifts */
970 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
971 &s->chroma_x_shift,
972 &s->chroma_y_shift);
973
974 /* convert fourcc to upper case */
975 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
976
977 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
978
979 s->avctx->coded_frame = &s->current_picture.f;
980
981 if (s->encoding) {
982 if (s->msmpeg4_version) {
983 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
984 2 * 2 * (MAX_LEVEL + 1) *
985 (MAX_RUN + 1) * 2 * sizeof(int), fail);
986 }
987 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
988
989 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
990 64 * 32 * sizeof(int), fail);
991 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
992 64 * 32 * sizeof(int), fail);
993 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
994 64 * 32 * 2 * sizeof(uint16_t), fail);
995 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
996 64 * 32 * 2 * sizeof(uint16_t), fail);
997 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
998 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
999 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
1000 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
1001
1002 if (s->avctx->noise_reduction) {
1003 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
1004 2 * 64 * sizeof(uint16_t), fail);
1005 }
1006 }
1007 }
1008
1009 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
1010 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1011 s->picture_count * sizeof(Picture), fail);
1012 for (i = 0; i < s->picture_count; i++) {
1013 avcodec_get_frame_defaults(&s->picture[i].f);
1014 }
1015
1016 if (s->width && s->height) {
1017 if (init_context_frame(s))
1018 goto fail;
1019
1020 s->parse_context.state = -1;
1021 }
1022
1023 s->context_initialized = 1;
1024 s->thread_context[0] = s;
1025
1026 if (s->width && s->height) {
1027 if (nb_slices > 1) {
1028 for (i = 1; i < nb_slices; i++) {
1029 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1030 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1031 }
1032
1033 for (i = 0; i < nb_slices; i++) {
1034 if (init_duplicate_context(s->thread_context[i]) < 0)
1035 goto fail;
1036 s->thread_context[i]->start_mb_y =
1037 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1038 s->thread_context[i]->end_mb_y =
1039 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1040 }
1041 } else {
1042 if (init_duplicate_context(s) < 0)
1043 goto fail;
1044 s->start_mb_y = 0;
1045 s->end_mb_y = s->mb_height;
1046 }
1047 s->slice_context_count = nb_slices;
1048 }
1049
1050 return 0;
1051 fail:
1052 ff_MPV_common_end(s);
1053 return -1;
1054 }
1055
1056 /**
1057 * Frees and resets MpegEncContext fields depending on the resolution.
1058 * Is used during resolution changes to avoid a full reinitialization of the
1059 * codec.
1060 */
1061 static int free_context_frame(MpegEncContext *s)
1062 {
1063 int i, j, k;
1064
1065 av_freep(&s->mb_type);
1066 av_freep(&s->p_mv_table_base);
1067 av_freep(&s->b_forw_mv_table_base);
1068 av_freep(&s->b_back_mv_table_base);
1069 av_freep(&s->b_bidir_forw_mv_table_base);
1070 av_freep(&s->b_bidir_back_mv_table_base);
1071 av_freep(&s->b_direct_mv_table_base);
1072 s->p_mv_table = NULL;
1073 s->b_forw_mv_table = NULL;
1074 s->b_back_mv_table = NULL;
1075 s->b_bidir_forw_mv_table = NULL;
1076 s->b_bidir_back_mv_table = NULL;
1077 s->b_direct_mv_table = NULL;
1078 for (i = 0; i < 2; i++) {
1079 for (j = 0; j < 2; j++) {
1080 for (k = 0; k < 2; k++) {
1081 av_freep(&s->b_field_mv_table_base[i][j][k]);
1082 s->b_field_mv_table[i][j][k] = NULL;
1083 }
1084 av_freep(&s->b_field_select_table[i][j]);
1085 av_freep(&s->p_field_mv_table_base[i][j]);
1086 s->p_field_mv_table[i][j] = NULL;
1087 }
1088 av_freep(&s->p_field_select_table[i]);
1089 }
1090
1091 av_freep(&s->dc_val_base);
1092 av_freep(&s->coded_block_base);
1093 av_freep(&s->mbintra_table);
1094 av_freep(&s->cbp_table);
1095 av_freep(&s->pred_dir_table);
1096
1097 av_freep(&s->mbskip_table);
1098
1099 av_freep(&s->er.error_status_table);
1100 av_freep(&s->er.er_temp_buffer);
1101 av_freep(&s->mb_index2xy);
1102 av_freep(&s->lambda_table);
1103 av_freep(&s->cplx_tab);
1104 av_freep(&s->bits_tab);
1105
1106 s->linesize = s->uvlinesize = 0;
1107
1108 for (i = 0; i < 3; i++)
1109 av_freep(&s->visualization_buffer[i]);
1110
1111 return 0;
1112 }
1113
1114 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1115 {
1116 int i, err = 0;
1117
1118 if (s->slice_context_count > 1) {
1119 for (i = 0; i < s->slice_context_count; i++) {
1120 free_duplicate_context(s->thread_context[i]);
1121 }
1122 for (i = 1; i < s->slice_context_count; i++) {
1123 av_freep(&s->thread_context[i]);
1124 }
1125 } else
1126 free_duplicate_context(s);
1127
1128 free_context_frame(s);
1129
1130 if (s->picture)
1131 for (i = 0; i < s->picture_count; i++) {
1132 s->picture[i].needs_realloc = 1;
1133 }
1134
1135 s->last_picture_ptr =
1136 s->next_picture_ptr =
1137 s->current_picture_ptr = NULL;
1138
1139 // init
1140 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1141 s->mb_height = (s->height + 31) / 32 * 2;
1142 else if (s->codec_id != AV_CODEC_ID_H264)
1143 s->mb_height = (s->height + 15) / 16;
1144
1145 if ((s->width || s->height) &&
1146 av_image_check_size(s->width, s->height, 0, s->avctx))
1147 return AVERROR_INVALIDDATA;
1148
1149 if ((err = init_context_frame(s)))
1150 goto fail;
1151
1152 s->thread_context[0] = s;
1153
1154 if (s->width && s->height) {
1155 int nb_slices = s->slice_context_count;
1156 if (nb_slices > 1) {
1157 for (i = 1; i < nb_slices; i++) {
1158 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1159 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1160 }
1161
1162 for (i = 0; i < nb_slices; i++) {
1163 if (init_duplicate_context(s->thread_context[i]) < 0)
1164 goto fail;
1165 s->thread_context[i]->start_mb_y =
1166 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1167 s->thread_context[i]->end_mb_y =
1168 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1169 }
1170 } else {
1171 if (init_duplicate_context(s) < 0)
1172 goto fail;
1173 s->start_mb_y = 0;
1174 s->end_mb_y = s->mb_height;
1175 }
1176 s->slice_context_count = nb_slices;
1177 }
1178
1179 return 0;
1180 fail:
1181 ff_MPV_common_end(s);
1182 return err;
1183 }
1184
1185 /* init common structure for both encoder and decoder */
1186 void ff_MPV_common_end(MpegEncContext *s)
1187 {
1188 int i;
1189
1190 if (s->slice_context_count > 1) {
1191 for (i = 0; i < s->slice_context_count; i++) {
1192 free_duplicate_context(s->thread_context[i]);
1193 }
1194 for (i = 1; i < s->slice_context_count; i++) {
1195 av_freep(&s->thread_context[i]);
1196 }
1197 s->slice_context_count = 1;
1198 } else free_duplicate_context(s);
1199
1200 av_freep(&s->parse_context.buffer);
1201 s->parse_context.buffer_size = 0;
1202
1203 av_freep(&s->bitstream_buffer);
1204 s->allocated_bitstream_buffer_size = 0;
1205
1206 av_freep(&s->avctx->stats_out);
1207 av_freep(&s->ac_stats);
1208
1209 av_freep(&s->q_intra_matrix);
1210 av_freep(&s->q_inter_matrix);
1211 av_freep(&s->q_intra_matrix16);
1212 av_freep(&s->q_inter_matrix16);
1213 av_freep(&s->input_picture);
1214 av_freep(&s->reordered_input_picture);
1215 av_freep(&s->dct_offset);
1216
1217 if (s->picture && !s->avctx->internal->is_copy) {
1218 for (i = 0; i < s->picture_count; i++) {
1219 free_picture(s, &s->picture[i]);
1220 }
1221 }
1222 av_freep(&s->picture);
1223
1224 free_context_frame(s);
1225
1226 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
1227 avcodec_default_free_buffers(s->avctx);
1228
1229 s->context_initialized = 0;
1230 s->last_picture_ptr =
1231 s->next_picture_ptr =
1232 s->current_picture_ptr = NULL;
1233 s->linesize = s->uvlinesize = 0;
1234 }
1235
1236 void ff_init_rl(RLTable *rl,
1237 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1238 {
1239 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1240 uint8_t index_run[MAX_RUN + 1];
1241 int last, run, level, start, end, i;
1242
1243 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1244 if (static_store && rl->max_level[0])
1245 return;
1246
1247 /* compute max_level[], max_run[] and index_run[] */
1248 for (last = 0; last < 2; last++) {
1249 if (last == 0) {
1250 start = 0;
1251 end = rl->last;
1252 } else {
1253 start = rl->last;
1254 end = rl->n;
1255 }
1256
1257 memset(max_level, 0, MAX_RUN + 1);
1258 memset(max_run, 0, MAX_LEVEL + 1);
1259 memset(index_run, rl->n, MAX_RUN + 1);
1260 for (i = start; i < end; i++) {
1261 run = rl->table_run[i];
1262 level = rl->table_level[i];
1263 if (index_run[run] == rl->n)
1264 index_run[run] = i;
1265 if (level > max_level[run])
1266 max_level[run] = level;
1267 if (run > max_run[level])
1268 max_run[level] = run;
1269 }
1270 if (static_store)
1271 rl->max_level[last] = static_store[last];
1272 else
1273 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1274 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1275 if (static_store)
1276 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1277 else
1278 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1279 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1280 if (static_store)
1281 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1282 else
1283 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1284 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1285 }
1286 }
1287
1288 void ff_init_vlc_rl(RLTable *rl)
1289 {
1290 int i, q;
1291
1292 for (q = 0; q < 32; q++) {
1293 int qmul = q * 2;
1294 int qadd = (q - 1) | 1;
1295
1296 if (q == 0) {
1297 qmul = 1;
1298 qadd = 0;
1299 }
1300 for (i = 0; i < rl->vlc.table_size; i++) {
1301 int code = rl->vlc.table[i][0];
1302 int len = rl->vlc.table[i][1];
1303 int level, run;
1304
1305 if (len == 0) { // illegal code
1306 run = 66;
1307 level = MAX_LEVEL;
1308 } else if (len < 0) { // more bits needed
1309 run = 0;
1310 level = code;
1311 } else {
1312 if (code == rl->n) { // esc
1313 run = 66;
1314 level = 0;
1315 } else {
1316 run = rl->table_run[code] + 1;
1317 level = rl->table_level[code] * qmul + qadd;
1318 if (code >= rl->last) run += 192;
1319 }
1320 }
1321 rl->rl_vlc[q][i].len = len;
1322 rl->rl_vlc[q][i].level = level;
1323 rl->rl_vlc[q][i].run = run;
1324 }
1325 }
1326 }
1327
1328 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1329 {
1330 int i;
1331
1332 /* release non reference frames */
1333 for (i = 0; i < s->picture_count; i++) {
1334 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1335 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1336 (remove_current || &s->picture[i] != s->current_picture_ptr)
1337 /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1338 free_frame_buffer(s, &s->picture[i]);
1339 }
1340 }
1341 }
1342
1343 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1344 {
1345 if (pic->f.data[0] == NULL)
1346 return 1;
1347 if (pic->needs_realloc && !(pic->f.reference & DELAYED_PIC_REF))
1348 if (!pic->owner2 || pic->owner2 == s)
1349 return 1;
1350 return 0;
1351 }
1352
1353 static int find_unused_picture(MpegEncContext *s, int shared)
1354 {
1355 int i;
1356
1357 if (shared) {
1358 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1359 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1360 return i;
1361 }
1362 } else {
1363 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1364 if (pic_is_unused(s, &s->picture[i]) && s->picture[i].f.type != 0)
1365 return i; // FIXME
1366 }
1367 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1368 if (pic_is_unused(s, &s->picture[i]))
1369 return i;
1370 }
1371 }
1372
1373 return AVERROR_INVALIDDATA;
1374 }
1375
1376 int ff_find_unused_picture(MpegEncContext *s, int shared)
1377 {
1378 int ret = find_unused_picture(s, shared);
1379
1380 if (ret >= 0 && ret < s->picture_range_end) {
1381 if (s->picture[ret].needs_realloc) {
1382 s->picture[ret].needs_realloc = 0;
1383 free_picture(s, &s->picture[ret]);
1384 avcodec_get_frame_defaults(&s->picture[ret].f);
1385 }
1386 }
1387 return ret;
1388 }
1389
1390 static void update_noise_reduction(MpegEncContext *s)
1391 {
1392 int intra, i;
1393
1394 for (intra = 0; intra < 2; intra++) {
1395 if (s->dct_count[intra] > (1 << 16)) {
1396 for (i = 0; i < 64; i++) {
1397 s->dct_error_sum[intra][i] >>= 1;
1398 }
1399 s->dct_count[intra] >>= 1;
1400 }
1401
1402 for (i = 0; i < 64; i++) {
1403 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1404 s->dct_count[intra] +
1405 s->dct_error_sum[intra][i] / 2) /
1406 (s->dct_error_sum[intra][i] + 1);
1407 }
1408 }
1409 }
1410
1411 /**
1412 * generic function for encode/decode called after coding/decoding
1413 * the header and before a frame is coded/decoded.
1414 */
1415 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1416 {
1417 int i;
1418 Picture *pic;
1419 s->mb_skipped = 0;
1420
1421 /* mark & release old frames */
1422 if (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3) {
1423 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1424 s->last_picture_ptr != s->next_picture_ptr &&
1425 s->last_picture_ptr->f.data[0]) {
1426 if (s->last_picture_ptr->owner2 == s)
1427 free_frame_buffer(s, s->last_picture_ptr);
1428 }
1429
1430 /* release forgotten pictures */
1431 /* if (mpeg124/h263) */
1432 if (!s->encoding) {
1433 for (i = 0; i < s->picture_count; i++) {
1434 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
1435 &s->picture[i] != s->last_picture_ptr &&
1436 &s->picture[i] != s->next_picture_ptr &&
1437 s->picture[i].f.reference && !s->picture[i].needs_realloc) {
1438 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1439 av_log(avctx, AV_LOG_ERROR,
1440 "releasing zombie picture\n");
1441 free_frame_buffer(s, &s->picture[i]);
1442 }
1443 }
1444 }
1445 }
1446
1447 if (!s->encoding) {
1448 ff_release_unused_pictures(s, 1);
1449
1450 if (s->current_picture_ptr &&
1451 s->current_picture_ptr->f.data[0] == NULL) {
1452 // we already have a unused image
1453 // (maybe it was set before reading the header)
1454 pic = s->current_picture_ptr;
1455 } else {
1456 i = ff_find_unused_picture(s, 0);
1457 if (i < 0) {
1458 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1459 return i;
1460 }
1461 pic = &s->picture[i];
1462 }
1463
1464 pic->f.reference = 0;
1465 if (!s->droppable) {
1466 if (s->codec_id == AV_CODEC_ID_H264)
1467 pic->f.reference = s->picture_structure;
1468 else if (s->pict_type != AV_PICTURE_TYPE_B)
1469 pic->f.reference = 3;
1470 }
1471
1472 pic->f.coded_picture_number = s->coded_picture_number++;
1473
1474 if (ff_alloc_picture(s, pic, 0) < 0)
1475 return -1;
1476
1477 s->current_picture_ptr = pic;
1478 // FIXME use only the vars from current_pic
1479 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1480 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1481 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1482 if (s->picture_structure != PICT_FRAME)
1483 s->current_picture_ptr->f.top_field_first =
1484 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1485 }
1486 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1487 !s->progressive_sequence;
1488 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1489 }
1490
1491 s->current_picture_ptr->f.pict_type = s->pict_type;
1492 // if (s->flags && CODEC_FLAG_QSCALE)
1493 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1494 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1495
1496 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1497
1498 if (s->pict_type != AV_PICTURE_TYPE_B) {
1499 s->last_picture_ptr = s->next_picture_ptr;
1500 if (!s->droppable)
1501 s->next_picture_ptr = s->current_picture_ptr;
1502 }
1503 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1504 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1505 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1506 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1507 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1508 s->pict_type, s->droppable);
1509
1510 if (s->codec_id != AV_CODEC_ID_H264) {
1511 if ((s->last_picture_ptr == NULL ||
1512 s->last_picture_ptr->f.data[0] == NULL) &&
1513 (s->pict_type != AV_PICTURE_TYPE_I ||
1514 s->picture_structure != PICT_FRAME)) {
1515 int h_chroma_shift, v_chroma_shift;
1516 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1517 &h_chroma_shift, &v_chroma_shift);
1518 if (s->pict_type != AV_PICTURE_TYPE_I)
1519 av_log(avctx, AV_LOG_ERROR,
1520 "warning: first frame is no keyframe\n");
1521 else if (s->picture_structure != PICT_FRAME)
1522 av_log(avctx, AV_LOG_INFO,
1523 "allocate dummy last picture for field based first keyframe\n");
1524
1525 /* Allocate a dummy frame */
1526 i = ff_find_unused_picture(s, 0);
1527 if (i < 0) {
1528 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1529 return i;
1530 }
1531 s->last_picture_ptr = &s->picture[i];
1532 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1533 s->last_picture_ptr = NULL;
1534 return -1;
1535 }
1536
1537 memset(s->last_picture_ptr->f.data[0], 0,
1538 avctx->height * s->last_picture_ptr->f.linesize[0]);
1539 memset(s->last_picture_ptr->f.data[1], 0x80,
1540 (avctx->height >> v_chroma_shift) *
1541 s->last_picture_ptr->f.linesize[1]);
1542 memset(s->last_picture_ptr->f.data[2], 0x80,
1543 (avctx->height >> v_chroma_shift) *
1544 s->last_picture_ptr->f.linesize[2]);
1545
1546 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
1547 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
1548 s->last_picture_ptr->f.reference = 3;
1549 }
1550 if ((s->next_picture_ptr == NULL ||
1551 s->next_picture_ptr->f.data[0] == NULL) &&
1552 s->pict_type == AV_PICTURE_TYPE_B) {
1553 /* Allocate a dummy frame */
1554 i = ff_find_unused_picture(s, 0);
1555 if (i < 0) {
1556 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1557 return i;
1558 }
1559 s->next_picture_ptr = &s->picture[i];
1560 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1561 s->next_picture_ptr = NULL;
1562 return -1;
1563 }
1564 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
1565 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
1566 s->next_picture_ptr->f.reference = 3;
1567 }
1568 }
1569
1570 if (s->last_picture_ptr)
1571 ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1572 if (s->next_picture_ptr)
1573 ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1574
1575 if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME)) {
1576 if (s->next_picture_ptr)
1577 s->next_picture_ptr->owner2 = s;
1578 if (s->last_picture_ptr)
1579 s->last_picture_ptr->owner2 = s;
1580 }
1581
1582 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1583 s->last_picture_ptr->f.data[0]));
1584
1585 if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
1586 int i;
1587 for (i = 0; i < 4; i++) {
1588 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1589 s->current_picture.f.data[i] +=
1590 s->current_picture.f.linesize[i];
1591 }
1592 s->current_picture.f.linesize[i] *= 2;
1593 s->last_picture.f.linesize[i] *= 2;
1594 s->next_picture.f.linesize[i] *= 2;
1595 }
1596 }
1597
1598 s->err_recognition = avctx->err_recognition;
1599
1600 /* set dequantizer, we can't do it during init as
1601 * it might change for mpeg4 and we can't do it in the header
1602 * decode as init is not called for mpeg4 there yet */
1603 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1604 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1605 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1606 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1607 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1608 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1609 } else {
1610 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1611 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1612 }
1613
1614 if (s->dct_error_sum) {
1615 assert(s->avctx->noise_reduction && s->encoding);
1616 update_noise_reduction(s);
1617 }
1618
1619 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1620 return ff_xvmc_field_start(s, avctx);
1621
1622 return 0;
1623 }
1624
1625 /* generic function for encode/decode called after a
1626 * frame has been coded/decoded. */
1627 void ff_MPV_frame_end(MpegEncContext *s)
1628 {
1629 int i;
1630 /* redraw edges for the frame if decoding didn't complete */
1631 // just to make sure that all data is rendered.
1632 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1633 ff_xvmc_field_end(s);
1634 } else if ((s->er.error_count || s->encoding) &&
1635 !s->avctx->hwaccel &&
1636 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1637 s->unrestricted_mv &&
1638 s->current_picture.f.reference &&
1639 !s->intra_only &&
1640 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1641 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1642 int hshift = desc->log2_chroma_w;
1643 int vshift = desc->log2_chroma_h;
1644 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1645 s->h_edge_pos, s->v_edge_pos,
1646 EDGE_WIDTH, EDGE_WIDTH,
1647 EDGE_TOP | EDGE_BOTTOM);
1648 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1649 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1650 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1651 EDGE_TOP | EDGE_BOTTOM);
1652 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1653 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1654 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1655 EDGE_TOP | EDGE_BOTTOM);
1656 }
1657
1658 emms_c();
1659
1660 s->last_pict_type = s->pict_type;
1661 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1662 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1663 s->last_non_b_pict_type = s->pict_type;
1664 }
1665 #if 0
1666 /* copy back current_picture variables */
1667 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1668 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1669 s->picture[i] = s->current_picture;
1670 break;
1671 }
1672 }
1673 assert(i < MAX_PICTURE_COUNT);
1674 #endif
1675
1676 if (s->encoding) {
1677 /* release non-reference frames */
1678 for (i = 0; i < s->picture_count; i++) {
1679 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1680 /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
1681 free_frame_buffer(s, &s->picture[i]);
1682 }
1683 }
1684 }
1685 // clear copies, to avoid confusion
1686 #if 0
1687 memset(&s->last_picture, 0, sizeof(Picture));
1688 memset(&s->next_picture, 0, sizeof(Picture));
1689 memset(&s->current_picture, 0, sizeof(Picture));
1690 #endif
1691 s->avctx->coded_frame = &s->current_picture_ptr->f;
1692
1693 if (s->codec_id != AV_CODEC_ID_H264 && s->current_picture.f.reference) {
1694 ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
1695 }
1696 }
1697
1698 /**
1699 * Draw a line from (ex, ey) -> (sx, sy).
1700 * @param w width of the image
1701 * @param h height of the image
1702 * @param stride stride/linesize of the image
1703 * @param color color of the arrow
1704 */
1705 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1706 int w, int h, int stride, int color)
1707 {
1708 int x, y, fr, f;
1709
1710 sx = av_clip(sx, 0, w - 1);
1711 sy = av_clip(sy, 0, h - 1);
1712 ex = av_clip(ex, 0, w - 1);
1713 ey = av_clip(ey, 0, h - 1);
1714
1715 buf[sy * stride + sx] += color;
1716
1717 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1718 if (sx > ex) {
1719 FFSWAP(int, sx, ex);
1720 FFSWAP(int, sy, ey);
1721 }
1722 buf += sx + sy * stride;
1723 ex -= sx;
1724 f = ((ey - sy) << 16) / ex;
1725 for (x = 0; x <= ex; x++) {
1726 y = (x * f) >> 16;
1727 fr = (x * f) & 0xFFFF;
1728 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1729 buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1730 }
1731 } else {
1732 if (sy > ey) {
1733 FFSWAP(int, sx, ex);
1734 FFSWAP(int, sy, ey);
1735 }
1736 buf += sx + sy * stride;
1737 ey -= sy;
1738 if (ey)
1739 f = ((ex - sx) << 16) / ey;
1740 else
1741 f = 0;
1742 for (y = 0; y = ey; y++) {
1743 x = (y * f) >> 16;
1744 fr = (y * f) & 0xFFFF;
1745 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1746 buf[y * stride + x + 1] += (color * fr ) >> 16;
1747 }
1748 }
1749 }
1750
1751 /**
1752 * Draw an arrow from (ex, ey) -> (sx, sy).
1753 * @param w width of the image
1754 * @param h height of the image
1755 * @param stride stride/linesize of the image
1756 * @param color color of the arrow
1757 */
1758 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1759 int ey, int w, int h, int stride, int color)
1760 {
1761 int dx,dy;
1762
1763 sx = av_clip(sx, -100, w + 100);
1764 sy = av_clip(sy, -100, h + 100);
1765 ex = av_clip(ex, -100, w + 100);
1766 ey = av_clip(ey, -100, h + 100);
1767
1768 dx = ex - sx;
1769 dy = ey - sy;
1770
1771 if (dx * dx + dy * dy > 3 * 3) {
1772 int rx = dx + dy;
1773 int ry = -dx + dy;
1774 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1775
1776 // FIXME subpixel accuracy
1777 rx = ROUNDED_DIV(rx * 3 << 4, length);
1778 ry = ROUNDED_DIV(ry * 3 << 4, length);
1779
1780 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1781 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1782 }
1783 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1784 }
1785
1786 /**
1787 * Print debugging info for the given picture.
1788 */
1789 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
1790 {
1791 if (s->avctx->hwaccel || !pict || !pict->mb_type)
1792 return;
1793
1794 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1795 int x,y;
1796
1797 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1798 switch (pict->pict_type) {
1799 case AV_PICTURE_TYPE_I:
1800 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1801 break;
1802 case AV_PICTURE_TYPE_P:
1803 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1804 break;
1805 case AV_PICTURE_TYPE_B:
1806 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1807 break;
1808 case AV_PICTURE_TYPE_S:
1809 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1810 break;
1811 case AV_PICTURE_TYPE_SI:
1812 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1813 break;
1814 case AV_PICTURE_TYPE_SP:
1815 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1816 break;
1817 }
1818 for (y = 0; y < s->mb_height; y++) {
1819 for (x = 0; x < s->mb_width; x++) {
1820 if (s->avctx->debug & FF_DEBUG_SKIP) {
1821 int count = s->mbskip_table[x + y * s->mb_stride];
1822 if (count > 9)
1823 count = 9;
1824 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1825 }
1826 if (s->avctx->debug & FF_DEBUG_QP) {
1827 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1828 pict->qscale_table[x + y * s->mb_stride]);
1829 }
1830 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1831 int mb_type = pict->mb_type[x + y * s->mb_stride];
1832 // Type & MV direction
1833 if (IS_PCM(mb_type))
1834 av_log(s->avctx, AV_LOG_DEBUG, "P");
1835 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1836 av_log(s->avctx, AV_LOG_DEBUG, "A");
1837 else if (IS_INTRA4x4(mb_type))
1838 av_log(s->avctx, AV_LOG_DEBUG, "i");
1839 else if (IS_INTRA16x16(mb_type))
1840 av_log(s->avctx, AV_LOG_DEBUG, "I");
1841 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1842 av_log(s->avctx, AV_LOG_DEBUG, "d");
1843 else if (IS_DIRECT(mb_type))
1844 av_log(s->avctx, AV_LOG_DEBUG, "D");
1845 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1846 av_log(s->avctx, AV_LOG_DEBUG, "g");
1847 else if (IS_GMC(mb_type))
1848 av_log(s->avctx, AV_LOG_DEBUG, "G");
1849 else if (IS_SKIP(mb_type))
1850 av_log(s->avctx, AV_LOG_DEBUG, "S");
1851 else if (!USES_LIST(mb_type, 1))
1852 av_log(s->avctx, AV_LOG_DEBUG, ">");
1853 else if (!USES_LIST(mb_type, 0))
1854 av_log(s->avctx, AV_LOG_DEBUG, "<");
1855 else {
1856 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1857 av_log(s->avctx, AV_LOG_DEBUG, "X");
1858 }
1859
1860 // segmentation
1861 if (IS_8X8(mb_type))
1862 av_log(s->avctx, AV_LOG_DEBUG, "+");
1863 else if (IS_16X8(mb_type))
1864 av_log(s->avctx, AV_LOG_DEBUG, "-");
1865 else if (IS_8X16(mb_type))
1866 av_log(s->avctx, AV_LOG_DEBUG, "|");
1867 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1868 av_log(s->avctx, AV_LOG_DEBUG, " ");
1869 else
1870 av_log(s->avctx, AV_LOG_DEBUG, "?");
1871
1872
1873 if (IS_INTERLACED(mb_type))
1874 av_log(s->avctx, AV_LOG_DEBUG, "=");
1875 else
1876 av_log(s->avctx, AV_LOG_DEBUG, " ");
1877 }
1878 }
1879 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1880 }
1881 }
1882
1883 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1884 (s->avctx->debug_mv)) {
1885 const int shift = 1 + s->quarter_sample;
1886 int mb_y;
1887 uint8_t *ptr;
1888 int i;
1889 int h_chroma_shift, v_chroma_shift, block_height;
1890 const int width = s->avctx->width;
1891 const int height = s->avctx->height;
1892 const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
1893 const int mv_stride = (s->mb_width << mv_sample_log2) +
1894 (s->codec_id == AV_CODEC_ID_H264 ? 0 : 1);
1895 s->low_delay = 0; // needed to see the vectors without trashing the buffers
1896
1897 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1898 &h_chroma_shift, &v_chroma_shift);
1899 for (i = 0; i < 3; i++) {
1900 memcpy(s->visualization_buffer[i], pict->data[i],
1901 (i == 0) ? pict->linesize[i] * height:
1902 pict->linesize[i] * height >> v_chroma_shift);
1903 pict->data[i] = s->visualization_buffer[i];
1904 }
1905 pict->type = FF_BUFFER_TYPE_COPY;
1906 ptr = pict->data[0];
1907 block_height = 16 >> v_chroma_shift;
1908
1909 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1910 int mb_x;
1911 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1912 const int mb_index = mb_x + mb_y * s->mb_stride;
1913 if ((s->avctx->debug_mv) && pict->motion_val) {
1914 int type;
1915 for (type = 0; type < 3; type++) {
1916 int direction = 0;
1917 switch (type) {
1918 case 0:
1919 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1920 (pict->pict_type!= AV_PICTURE_TYPE_P))
1921 continue;
1922 direction = 0;
1923 break;
1924 case 1:
1925 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1926 (pict->pict_type!= AV_PICTURE_TYPE_B))
1927 continue;
1928 direction = 0;
1929 break;
1930 case 2:
1931 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1932 (pict->pict_type!= AV_PICTURE_TYPE_B))
1933 continue;
1934 direction = 1;
1935 break;
1936 }
1937 if (!USES_LIST(pict->mb_type[mb_index], direction))
1938 continue;
1939
1940 if (IS_8X8(pict->mb_type[mb_index])) {
1941 int i;
1942 for (i = 0; i < 4; i++) {
1943 int sx = mb_x * 16 + 4 + 8 * (i & 1);
1944 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1945 int xy = (mb_x * 2 + (i & 1) +
1946 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1947 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
1948 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
1949 draw_arrow(ptr, sx, sy, mx, my, width,
1950 height, s->linesize, 100);
1951 }
1952 } else if (IS_16X8(pict->mb_type[mb_index])) {
1953 int i;
1954 for (i = 0; i < 2; i++) {
1955 int sx = mb_x * 16 + 8;
1956 int sy = mb_y * 16 + 4 + 8 * i;
1957 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1958 int mx = (pict->motion_val[direction][xy][0] >> shift);
1959 int my = (pict->motion_val[direction][xy][1] >> shift);
1960
1961 if (IS_INTERLACED(pict->mb_type[mb_index]))
1962 my *= 2;
1963
1964 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1965 height, s->linesize, 100);
1966 }
1967 } else if (IS_8X16(pict->mb_type[mb_index])) {
1968 int i;
1969 for (i = 0; i < 2; i++) {
1970 int sx = mb_x * 16 + 4 + 8 * i;
1971 int sy = mb_y * 16 + 8;
1972 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1973 int mx = pict->motion_val[direction][xy][0] >> shift;
1974 int my = pict->motion_val[direction][xy][1] >> shift;
1975
1976 if (IS_INTERLACED(pict->mb_type[mb_index]))
1977 my *= 2;
1978
1979 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1980 height, s->linesize, 100);
1981 }
1982 } else {
1983 int sx = mb_x * 16 + 8;
1984 int sy = mb_y * 16 + 8;
1985 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
1986 int mx = pict->motion_val[direction][xy][0] >> shift + sx;
1987 int my = pict->motion_val[direction][xy][1] >> shift + sy;
1988 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1989 }
1990 }
1991 }
1992 if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
1993 uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
1994 0x0101010101010101ULL;
1995 int y;
1996 for (y = 0; y < block_height; y++) {
1997 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1998 (block_height * mb_y + y) *
1999 pict->linesize[1]) = c;
2000 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2001 (block_height * mb_y + y) *
2002 pict->linesize[2]) = c;
2003 }
2004 }
2005 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2006 pict->motion_val) {
2007 int mb_type = pict->mb_type[mb_index];
2008 uint64_t u,v;
2009 int y;
2010 #define COLOR(theta, r) \
2011 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2012 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2013
2014
2015 u = v = 128;
2016 if (IS_PCM(mb_type)) {
2017 COLOR(120, 48)
2018 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2019 IS_INTRA16x16(mb_type)) {
2020 COLOR(30, 48)
2021 } else if (IS_INTRA4x4(mb_type)) {
2022 COLOR(90, 48)
2023 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2024 // COLOR(120, 48)
2025 } else if (IS_DIRECT(mb_type)) {
2026 COLOR(150, 48)
2027 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2028 COLOR(170, 48)
2029 } else if (IS_GMC(mb_type)) {
2030 COLOR(190, 48)
2031 } else if (IS_SKIP(mb_type)) {
2032 // COLOR(180, 48)
2033 } else if (!USES_LIST(mb_type, 1)) {
2034 COLOR(240, 48)
2035 } else if (!USES_LIST(mb_type, 0)) {
2036 COLOR(0, 48)
2037 } else {
2038 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2039 COLOR(300,48)
2040 }
2041
2042 u *= 0x0101010101010101ULL;
2043 v *= 0x0101010101010101ULL;
2044 for (y = 0; y < block_height; y++) {
2045 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2046 (block_height * mb_y + y) * pict->linesize[1]) = u;
2047 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2048 (block_height * mb_y + y) * pict->linesize[2]) = v;
2049 }
2050
2051 // segmentation
2052 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2053 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2054 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2055 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2056 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2057 }
2058 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2059 for (y = 0; y < 16; y++)
2060 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2061 pict->linesize[0]] ^= 0x80;
2062 }
2063 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2064 int dm = 1 << (mv_sample_log2 - 2);
2065 for (i = 0; i < 4; i++) {
2066 int sx = mb_x * 16 + 8 * (i & 1);
2067 int sy = mb_y * 16 + 8 * (i >> 1);
2068 int xy = (mb_x * 2 + (i & 1) +
2069 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2070 // FIXME bidir
2071 int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
2072 if (mv[0] != mv[dm] ||
2073 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2074 for (y = 0; y < 8; y++)
2075 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2076 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2077 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2078 pict->linesize[0]) ^= 0x8080808080808080ULL;
2079 }
2080 }
2081
2082 if (IS_INTERLACED(mb_type) &&
2083 s->codec_id == AV_CODEC_ID_H264) {
2084 // hmm
2085 }
2086 }
2087 s->mbskip_table[mb_index] = 0;
2088 }
2089 }
2090 }
2091 }
2092
2093 /**
2094 * find the lowest MB row referenced in the MVs
2095 */
2096 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2097 {
2098 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2099 int my, off, i, mvs;
2100
2101 if (s->picture_structure != PICT_FRAME || s->mcsel)
2102 goto unhandled;
2103
2104 switch (s->mv_type) {
2105 case MV_TYPE_16X16:
2106 mvs = 1;
2107 break;
2108 case MV_TYPE_16X8:
2109 mvs = 2;
2110 break;
2111 case MV_TYPE_8X8:
2112 mvs = 4;
2113 break;
2114 default:
2115 goto unhandled;
2116 }
2117
2118 for (i = 0; i < mvs; i++) {
2119 my = s->mv[dir][i][1]<<qpel_shift;
2120 my_max = FFMAX(my_max, my);
2121 my_min = FFMIN(my_min, my);
2122 }
2123
2124 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2125
2126 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2127 unhandled:
2128 return s->mb_height-1;
2129 }
2130
2131 /* put block[] to dest[] */
2132 static inline void put_dct(MpegEncContext *s,
2133 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2134 {
2135 s->dct_unquantize_intra(s, block, i, qscale);
2136 s->dsp.idct_put (dest, line_size, block);
2137 }
2138
2139 /* add block[] to dest[] */
2140 static inline void add_dct(MpegEncContext *s,
2141 int16_t *block, int i, uint8_t *dest, int line_size)
2142 {
2143 if (s->block_last_index[i] >= 0) {
2144 s->dsp.idct_add (dest, line_size, block);
2145 }
2146 }
2147
2148 static inline void add_dequant_dct(MpegEncContext *s,
2149 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2150 {
2151 if (s->block_last_index[i] >= 0) {
2152 s->dct_unquantize_inter(s, block, i, qscale);
2153
2154 s->dsp.idct_add (dest, line_size, block);
2155 }
2156 }
2157
2158 /**
2159 * Clean dc, ac, coded_block for the current non-intra MB.
2160 */
2161 void ff_clean_intra_table_entries(MpegEncContext *s)
2162 {
2163 int wrap = s->b8_stride;
2164 int xy = s->block_index[0];
2165
2166 s->dc_val[0][xy ] =
2167 s->dc_val[0][xy + 1 ] =
2168 s->dc_val[0][xy + wrap] =
2169 s->dc_val[0][xy + 1 + wrap] = 1024;
2170 /* ac pred */
2171 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2172 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2173 if (s->msmpeg4_version>=3) {
2174 s->coded_block[xy ] =
2175 s->coded_block[xy + 1 ] =
2176 s->coded_block[xy + wrap] =
2177 s->coded_block[xy + 1 + wrap] = 0;
2178 }
2179 /* chroma */
2180 wrap = s->mb_stride;
2181 xy = s->mb_x + s->mb_y * wrap;
2182 s->dc_val[1][xy] =
2183 s->dc_val[2][xy] = 1024;
2184 /* ac pred */
2185 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2186 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2187
2188 s->mbintra_table[xy]= 0;
2189 }
2190
2191 /* generic function called after a macroblock has been parsed by the
2192 decoder or after it has been encoded by the encoder.
2193
2194 Important variables used:
2195 s->mb_intra : true if intra macroblock
2196 s->mv_dir : motion vector direction
2197 s->mv_type : motion vector type
2198 s->mv : motion vector
2199 s->interlaced_dct : true if interlaced dct used (mpeg2)
2200 */
2201 static av_always_inline
2202 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2203 int is_mpeg12)
2204 {
2205 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2206 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2207 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2208 return;
2209 }
2210
2211 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2212 /* save DCT coefficients */
2213 int i,j;
2214 int16_t *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2215 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2216 for(i=0; i<6; i++){
2217 for(j=0; j<64; j++){
2218 *dct++ = block[i][s->dsp.idct_permutation[j]];
2219 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2220 }
2221 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2222 }
2223 }
2224
2225 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2226
2227 /* update DC predictors for P macroblocks */
2228 if (!s->mb_intra) {
2229 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2230 if(s->mbintra_table[mb_xy])
2231 ff_clean_intra_table_entries(s);
2232 } else {
2233 s->last_dc[0] =
2234 s->last_dc[1] =
2235 s->last_dc[2] = 128 << s->intra_dc_precision;
2236 }
2237 }
2238 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2239 s->mbintra_table[mb_xy]=1;
2240
2241 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2242 uint8_t *dest_y, *dest_cb, *dest_cr;
2243 int dct_linesize, dct_offset;
2244 op_pixels_func (*op_pix)[4];
2245 qpel_mc_func (*op_qpix)[16];
2246 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2247 const int uvlinesize = s->current_picture.f.linesize[1];
2248 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2249 const int block_size = 8;
2250
2251 /* avoid copy if macroblock skipped in last frame too */
2252 /* skip only during decoding as we might trash the buffers during encoding a bit */
2253 if(!s->encoding){
2254 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2255
2256 if (s->mb_skipped) {
2257 s->mb_skipped= 0;
2258 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2259 *mbskip_ptr = 1;
2260 } else if(!s->current_picture.f.reference) {
2261 *mbskip_ptr = 1;
2262 } else{
2263 *mbskip_ptr = 0; /* not skipped */
2264 }
2265 }
2266
2267 dct_linesize = linesize << s->interlaced_dct;
2268 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2269
2270 if(readable){
2271 dest_y= s->dest[0];
2272 dest_cb= s->dest[1];
2273 dest_cr= s->dest[2];
2274 }else{
2275 dest_y = s->b_scratchpad;
2276 dest_cb= s->b_scratchpad+16*linesize;
2277 dest_cr= s->b_scratchpad+32*linesize;
2278 }
2279
2280 if (!s->mb_intra) {
2281 /* motion handling */
2282 /* decoding or more than one mb_type (MC was already done otherwise) */
2283 if(!s->encoding){
2284
2285 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2286 if (s->mv_dir & MV_DIR_FORWARD) {
2287 ff_thread_await_progress(&s->last_picture_ptr->f,
2288 ff_MPV_lowest_referenced_row(s, 0),
2289 0);
2290 }
2291 if (s->mv_dir & MV_DIR_BACKWARD) {
2292 ff_thread_await_progress(&s->next_picture_ptr->f,
2293 ff_MPV_lowest_referenced_row(s, 1),
2294 0);
2295 }
2296 }
2297
2298 op_qpix= s->me.qpel_put;
2299 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2300 op_pix = s->dsp.put_pixels_tab;
2301 }else{
2302 op_pix = s->dsp.put_no_rnd_pixels_tab;
2303 }
2304 if (s->mv_dir & MV_DIR_FORWARD) {
2305 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2306 op_pix = s->dsp.avg_pixels_tab;
2307 op_qpix= s->me.qpel_avg;
2308 }
2309 if (s->mv_dir & MV_DIR_BACKWARD) {
2310 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2311 }
2312 }
2313
2314 /* skip dequant / idct if we are really late ;) */
2315 if(s->avctx->skip_idct){
2316 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2317 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2318 || s->avctx->skip_idct >= AVDISCARD_ALL)
2319 goto skip_idct;
2320 }
2321
2322 /* add dct residue */
2323 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2324 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2325 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2326 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2327 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2328 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2329
2330 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2331 if (s->chroma_y_shift){
2332 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2333 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2334 }else{
2335 dct_linesize >>= 1;
2336 dct_offset >>=1;
2337 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2338 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2339 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2340 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2341 }
2342 }
2343 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2344 add_dct(s, block[0], 0, dest_y , dct_linesize);
2345 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2346 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2347 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2348
2349 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2350 if(s->chroma_y_shift){//Chroma420
2351 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2352 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2353 }else{
2354 //chroma422
2355 dct_linesize = uvlinesize << s->interlaced_dct;
2356 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2357
2358 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2359 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2360 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2361 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2362 if(!s->chroma_x_shift){//Chroma444
2363 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2364 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2365 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2366 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2367 }
2368 }
2369 }//fi gray
2370 }
2371 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2372 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2373 }
2374 } else {
2375 /* dct only in intra block */
2376 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2377 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2378 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2379 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2380 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2381
2382 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2383 if(s->chroma_y_shift){
2384 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2385 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2386 }else{
2387 dct_offset >>=1;
2388 dct_linesize >>=1;
2389 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2390 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2391 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2392 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2393 }
2394 }
2395 }else{
2396 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2397 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2398 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2399 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2400
2401 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2402 if(s->chroma_y_shift){
2403 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2404 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2405 }else{
2406
2407 dct_linesize = uvlinesize << s->interlaced_dct;
2408 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2409
2410 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2411 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2412 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2413 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2414 if(!s->chroma_x_shift){//Chroma444
2415 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2416 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2417 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2418 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2419 }
2420 }
2421 }//gray
2422 }
2423 }
2424 skip_idct:
2425 if(!readable){
2426 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2427 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2428 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2429 }
2430 }
2431 }
2432
2433 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2434 #if !CONFIG_SMALL
2435 if(s->out_format == FMT_MPEG1) {
2436 MPV_decode_mb_internal(s, block, 1);
2437 } else
2438 #endif
2439 MPV_decode_mb_internal(s, block, 0);
2440 }
2441
2442 /**
2443 * @param h is the normal height, this will be reduced automatically if needed for the last row
2444 */
2445 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2446 Picture *last, int y, int h, int picture_structure,
2447 int first_field, int draw_edges, int low_delay,
2448 int v_edge_pos, int h_edge_pos)
2449 {
2450 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2451 int hshift = desc->log2_chroma_w;
2452 int vshift = desc->log2_chroma_h;
2453 const int field_pic = picture_structure != PICT_FRAME;
2454 if(field_pic){
2455 h <<= 1;
2456 y <<= 1;
2457 }
2458
2459 if (!avctx->hwaccel &&
2460 !(avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
2461 draw_edges &&
2462 cur->f.reference &&
2463 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2464 int *linesize = cur->f.linesize;
2465 int sides = 0, edge_h;
2466 if (y==0) sides |= EDGE_TOP;
2467 if (y + h >= v_edge_pos)
2468 sides |= EDGE_BOTTOM;
2469
2470 edge_h= FFMIN(h, v_edge_pos - y);
2471
2472 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2473 linesize[0], h_edge_pos, edge_h,
2474 EDGE_WIDTH, EDGE_WIDTH, sides);
2475 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2476 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2477 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2478 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2479 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2480 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2481 }
2482
2483 h = FFMIN(h, avctx->height - y);
2484
2485 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2486
2487 if (avctx->draw_horiz_band) {
2488 AVFrame *src;
2489 int offset[AV_NUM_DATA_POINTERS];
2490 int i;
2491
2492 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2493 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
2494 src = &cur->f;
2495 else if (last)
2496 src = &last->f;
2497 else
2498 return;
2499
2500 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2501 picture_structure == PICT_FRAME &&
2502 avctx->codec_id != AV_CODEC_ID_H264 &&
2503 avctx->codec_id != AV_CODEC_ID_SVQ3) {
2504 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2505 offset[i] = 0;
2506 }else{
2507 offset[0]= y * src->linesize[0];
2508 offset[1]=
2509 offset[2]= (y >> vshift) * src->linesize[1];
2510 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2511 offset[i] = 0;
2512 }
2513
2514 emms_c();
2515
2516 avctx->draw_horiz_band(avctx, src, offset,
2517 y, picture_structure, h);
2518 }
2519 }
2520
2521 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2522 {
2523 int draw_edges = s->unrestricted_mv && !s->intra_only;
2524 ff_draw_horiz_band(s->avctx, &s->dsp, &s->current_picture,
2525 &s->last_picture, y, h, s->picture_structure,
2526 s->first_field, draw_edges, s->low_delay,
2527 s->v_edge_pos, s->h_edge_pos);
2528 }
2529
2530 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2531 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2532 const int uvlinesize = s->current_picture.f.linesize[1];
2533 const int mb_size= 4;
2534
2535 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2536 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2537 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2538 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2539 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2540 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2541 //block_index is not used by mpeg2, so it is not affected by chroma_format
2542
2543 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2544 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2545 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2546
2547 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2548 {
2549 if(s->picture_structure==PICT_FRAME){
2550 s->dest[0] += s->mb_y * linesize << mb_size;
2551 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2552 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2553 }else{
2554 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2555 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2556 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2557 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2558 }
2559 }
2560 }
2561
2562 /**
2563 * Permute an 8x8 block.
2564 * @param block the block which will be permuted according to the given permutation vector
2565 * @param permutation the permutation vector
2566 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2567 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2568 * (inverse) permutated to scantable order!
2569 */
2570 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2571 {
2572 int i;
2573 int16_t temp[64];
2574
2575 if(last<=0) return;
2576 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2577
2578 for(i=0; i<=last; i++){
2579 const int j= scantable[i];
2580 temp[j]= block[j];
2581 block[j]=0;
2582 }
2583
2584 for(i=0; i<=last; i++){
2585 const int j= scantable[i];
2586 const int perm_j= permutation[j];
2587 block[perm_j]= temp[j];
2588 }
2589 }
2590
2591 void ff_mpeg_flush(AVCodecContext *avctx){
2592 int i;
2593 MpegEncContext *s = avctx->priv_data;
2594
2595 if(s==NULL || s->picture==NULL)
2596 return;
2597
2598 for(i=0; i<s->picture_count; i++){
2599 if (s->picture[i].f.data[0] &&
2600 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2601 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2602 free_frame_buffer(s, &s->picture[i]);
2603 }
2604 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2605
2606 s->mb_x= s->mb_y= 0;
2607
2608 s->parse_context.state= -1;
2609 s->parse_context.frame_start_found= 0;
2610 s->parse_context.overread= 0;
2611 s->parse_context.overread_index= 0;
2612 s->parse_context.index= 0;
2613 s->parse_context.last_index= 0;
2614 s->bitstream_buffer_size=0;
2615 s->pp_time=0;
2616 }
2617
2618 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2619 int16_t *block, int n, int qscale)
2620 {
2621 int i, level, nCoeffs;
2622 const uint16_t *quant_matrix;
2623
2624 nCoeffs= s->block_last_index[n];
2625
2626 if (n < 4)
2627 block[0] = block[0] * s->y_dc_scale;
2628 else
2629 block[0] = block[0] * s->c_dc_scale;
2630 /* XXX: only mpeg1 */
2631 quant_matrix = s->intra_matrix;
2632 for(i=1;i<=nCoeffs;i++) {
2633 int j= s->intra_scantable.permutated[i];
2634 level = block[j];
2635 if (level) {
2636 if (level < 0) {
2637 level = -level;
2638 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2639 level = (level - 1) | 1;
2640 level = -level;
2641 } else {
2642 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2643 level = (level - 1) | 1;
2644 }
2645 block[j] = level;
2646 }
2647 }
2648 }
2649
2650 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2651 int16_t *block, int n, int qscale)
2652 {
2653 int i, level, nCoeffs;
2654 const uint16_t *quant_matrix;
2655
2656 nCoeffs= s->block_last_index[n];
2657
2658 quant_matrix = s->inter_matrix;
2659 for(i=0; i<=nCoeffs; i++) {
2660 int j= s->intra_scantable.permutated[i];
2661 level = block[j];
2662 if (level) {
2663 if (level < 0) {
2664 level = -level;
2665 level = (((level << 1) + 1) * qscale *
2666 ((int) (quant_matrix[j]))) >> 4;
2667 level = (level - 1) | 1;
2668 level = -level;
2669 } else {
2670 level = (((level << 1) + 1) * qscale *
2671 ((int) (quant_matrix[j]))) >> 4;
2672 level = (level - 1) | 1;
2673 }
2674 block[j] = level;
2675 }
2676 }
2677 }
2678
2679 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2680 int16_t *block, int n, int qscale)
2681 {
2682 int i, level, nCoeffs;
2683 const uint16_t *quant_matrix;
2684
2685 if(s->alternate_scan) nCoeffs= 63;
2686 else nCoeffs= s->block_last_index[n];
2687
2688 if (n < 4)
2689 block[0] = block[0] * s->y_dc_scale;
2690 else
2691 block[0] = block[0] * s->c_dc_scale;
2692 quant_matrix = s->intra_matrix;
2693 for(i=1;i<=nCoeffs;i++) {
2694 int j= s->intra_scantable.permutated[i];
2695 level = block[j];
2696 if (level) {
2697 if (level < 0) {
2698 level = -level;
2699 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2700 level = -level;
2701 } else {
2702 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2703 }
2704 block[j] = level;
2705 }
2706 }
2707 }
2708
2709 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2710 int16_t *block, int n, int qscale)
2711 {
2712 int i, level, nCoeffs;
2713 const uint16_t *quant_matrix;
2714 int sum=-1;
2715
2716 if(s->alternate_scan) nCoeffs= 63;
2717 else nCoeffs= s->block_last_index[n];
2718
2719 if (n < 4)
2720 block[0] = block[0] * s->y_dc_scale;
2721 else
2722 block[0] = block[0] * s->c_dc_scale;
2723 quant_matrix = s->intra_matrix;
2724 for(i=1;i<=nCoeffs;i++) {
2725 int j= s->intra_scantable.permutated[i];
2726 level = block[j];
2727 if (level) {
2728 if (level < 0) {
2729 level = -level;
2730 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2731 level = -level;
2732 } else {
2733 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2734 }
2735 block[j] = level;
2736 sum+=level;
2737 }
2738 }
2739 block[63]^=sum&1;
2740 }
2741
2742 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2743 int16_t *block, int n, int qscale)
2744 {
2745 int i, level, nCoeffs;
2746 const uint16_t *quant_matrix;
2747 int sum=-1;
2748
2749 if(s->alternate_scan) nCoeffs= 63;
2750 else nCoeffs= s->block_last_index[n];
2751
2752 quant_matrix = s->inter_matrix;
2753 for(i=0; i<=nCoeffs; i++) {
2754 int j= s->intra_scantable.permutated[i];
2755 level = block[j];
2756 if (level) {
2757 if (level < 0) {
2758 level = -level;
2759 level = (((level << 1) + 1) * qscale *
2760 ((int) (quant_matrix[j]))) >> 4;
2761 level = -level;
2762 } else {
2763 level = (((level << 1) + 1) * qscale *
2764 ((int) (quant_matrix[j]))) >> 4;
2765 }
2766 block[j] = level;
2767 sum+=level;
2768 }
2769 }
2770 block[63]^=sum&1;
2771 }
2772
2773 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2774 int16_t *block, int n, int qscale)
2775 {
2776 int i, level, qmul, qadd;
2777 int nCoeffs;
2778
2779 assert(s->block_last_index[n]>=0);
2780
2781 qmul = qscale << 1;
2782
2783 if (!s->h263_aic) {
2784 if (n < 4)
2785 block[0] = block[0] * s->y_dc_scale;
2786 else
2787 block[0] = block[0] * s->c_dc_scale;
2788 qadd = (qscale - 1) | 1;
2789 }else{
2790 qadd = 0;
2791 }
2792 if(s->ac_pred)
2793 nCoeffs=63;
2794 else
2795 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2796
2797 for(i=1; i<=nCoeffs; i++) {
2798 level = block[i];
2799 if (level) {
2800 if (level < 0) {
2801 level = level * qmul - qadd;
2802 } else {
2803 level = level * qmul + qadd;
2804 }
2805 block[i] = level;
2806 }
2807 }