4b68fd56a0515031445ab7b9a8e368897f76ea09
[libav.git] / libavcodec / mpegvideo.c
1 /*
2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7 *
8 * This file is part of Libav.
9 *
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /**
26 * @file
27 * The simplest mpeg encoder (well, it was the simplest!).
28 */
29
30 #include "libavutil/imgutils.h"
31 #include "avcodec.h"
32 #include "dsputil.h"
33 #include "internal.h"
34 #include "mathops.h"
35 #include "mpegvideo.h"
36 #include "mjpegenc.h"
37 #include "msmpeg4.h"
38 #include "xvmc_internal.h"
39 #include "thread.h"
40 #include <limits.h>
41
42 //#undef NDEBUG
43 //#include <assert.h>
44
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46 int16_t *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48 int16_t *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50 int16_t *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52 int16_t *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54 int16_t *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56 int16_t *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58 int16_t *block, int n, int qscale);
59
60
61 /* enable all paranoid tests for rounding, overflows, etc... */
62 //#define PARANOID
63
64 //#define DEBUG
65
66
67 static const uint8_t ff_default_chroma_qscale_table[32] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
70 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
71 };
72
73 const uint8_t ff_mpeg1_dc_scale_table[128] = {
74 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
83 };
84
85 static const uint8_t mpeg2_dc_scale_table1[128] = {
86 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
95 };
96
97 static const uint8_t mpeg2_dc_scale_table2[128] = {
98 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
107 };
108
109 static const uint8_t mpeg2_dc_scale_table3[128] = {
110 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119 };
120
121 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
122 ff_mpeg1_dc_scale_table,
123 mpeg2_dc_scale_table1,
124 mpeg2_dc_scale_table2,
125 mpeg2_dc_scale_table3,
126 };
127
128 const enum AVPixelFormat ff_pixfmt_list_420[] = {
129 AV_PIX_FMT_YUV420P,
130 AV_PIX_FMT_NONE
131 };
132
133 const enum AVPixelFormat ff_hwaccel_pixfmt_list_420[] = {
134 #if CONFIG_DXVA2
135 AV_PIX_FMT_DXVA2_VLD,
136 #endif
137 #if CONFIG_VAAPI
138 AV_PIX_FMT_VAAPI_VLD,
139 #endif
140 #if CONFIG_VDA
141 AV_PIX_FMT_VDA_VLD,
142 #endif
143 #if CONFIG_VDPAU
144 AV_PIX_FMT_VDPAU,
145 #endif
146 AV_PIX_FMT_YUV420P,
147 AV_PIX_FMT_NONE
148 };
149
150 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
151 const uint8_t *end,
152 uint32_t * restrict state)
153 {
154 int i;
155
156 assert(p <= end);
157 if (p >= end)
158 return end;
159
160 for (i = 0; i < 3; i++) {
161 uint32_t tmp = *state << 8;
162 *state = tmp + *(p++);
163 if (tmp == 0x100 || p == end)
164 return p;
165 }
166
167 while (p < end) {
168 if (p[-1] > 1 ) p += 3;
169 else if (p[-2] ) p += 2;
170 else if (p[-3]|(p[-1]-1)) p++;
171 else {
172 p++;
173 break;
174 }
175 }
176
177 p = FFMIN(p, end) - 4;
178 *state = AV_RB32(p);
179
180 return p + 4;
181 }
182
183 /* init common dct for both encoder and decoder */
184 av_cold int ff_dct_common_init(MpegEncContext *s)
185 {
186 ff_dsputil_init(&s->dsp, s->avctx);
187 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
188
189 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
190 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
191 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
192 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
193 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
194 if (s->flags & CODEC_FLAG_BITEXACT)
195 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
196 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
197
198 #if ARCH_X86
199 ff_MPV_common_init_x86(s);
200 #elif ARCH_ALPHA
201 ff_MPV_common_init_axp(s);
202 #elif ARCH_ARM
203 ff_MPV_common_init_arm(s);
204 #elif HAVE_ALTIVEC
205 ff_MPV_common_init_altivec(s);
206 #elif ARCH_BFIN
207 ff_MPV_common_init_bfin(s);
208 #endif
209
210 /* load & permutate scantables
211 * note: only wmv uses different ones
212 */
213 if (s->alternate_scan) {
214 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
215 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
216 } else {
217 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
218 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
219 }
220 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
221 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
222
223 return 0;
224 }
225
226 void ff_copy_picture(Picture *dst, Picture *src)
227 {
228 *dst = *src;
229 dst->f.type = FF_BUFFER_TYPE_COPY;
230 }
231
232 /**
233 * Release a frame buffer
234 */
235 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
236 {
237 /* WM Image / Screen codecs allocate internal buffers with different
238 * dimensions / colorspaces; ignore user-defined callbacks for these. */
239 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
240 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
241 s->codec_id != AV_CODEC_ID_MSS2)
242 ff_thread_release_buffer(s->avctx, &pic->f);
243 else
244 avcodec_default_release_buffer(s->avctx, &pic->f);
245 av_freep(&pic->f.hwaccel_picture_private);
246 }
247
248 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
249 {
250 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
251
252 // edge emu needs blocksize + filter length - 1
253 // (= 17x17 for halfpel / 21x21 for h264)
254 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
255 // at uvlinesize. It supports only YUV420 so 24x24 is enough
256 // linesize * interlaced * MBsize
257 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
258 fail);
259
260 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 2,
261 fail)
262 s->me.temp = s->me.scratchpad;
263 s->rd_scratchpad = s->me.scratchpad;
264 s->b_scratchpad = s->me.scratchpad;
265 s->obmc_scratchpad = s->me.scratchpad + 16;
266
267 return 0;
268 fail:
269 av_freep(&s->edge_emu_buffer);
270 return AVERROR(ENOMEM);
271 }
272
273 /**
274 * Allocate a frame buffer
275 */
276 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
277 {
278 int r, ret;
279
280 if (s->avctx->hwaccel) {
281 assert(!pic->f.hwaccel_picture_private);
282 if (s->avctx->hwaccel->priv_data_size) {
283 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
284 if (!pic->f.hwaccel_picture_private) {
285 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
286 return -1;
287 }
288 }
289 }
290
291 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
292 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
293 s->codec_id != AV_CODEC_ID_MSS2)
294 r = ff_thread_get_buffer(s->avctx, &pic->f);
295 else
296 r = avcodec_default_get_buffer(s->avctx, &pic->f);
297
298 if (r < 0 || !pic->f.type || !pic->f.data[0]) {
299 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
300 r, pic->f.type, pic->f.data[0]);
301 av_freep(&pic->f.hwaccel_picture_private);
302 return -1;
303 }
304
305 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
306 s->uvlinesize != pic->f.linesize[1])) {
307 av_log(s->avctx, AV_LOG_ERROR,
308 "get_buffer() failed (stride changed)\n");
309 free_frame_buffer(s, pic);
310 return -1;
311 }
312
313 if (pic->f.linesize[1] != pic->f.linesize[2]) {
314 av_log(s->avctx, AV_LOG_ERROR,
315 "get_buffer() failed (uv stride mismatch)\n");
316 free_frame_buffer(s, pic);
317 return -1;
318 }
319
320 if (!s->edge_emu_buffer &&
321 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
322 av_log(s->avctx, AV_LOG_ERROR,
323 "get_buffer() failed to allocate context scratch buffers.\n");
324 free_frame_buffer(s, pic);
325 return ret;
326 }
327
328 return 0;
329 }
330
331 /**
332 * Allocate a Picture.
333 * The pixels are allocated/set by calling get_buffer() if shared = 0
334 */
335 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
336 {
337 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
338
339 // the + 1 is needed so memset(,,stride*height) does not sig11
340
341 const int mb_array_size = s->mb_stride * s->mb_height;
342 const int b8_array_size = s->b8_stride * s->mb_height * 2;
343 const int b4_array_size = s->b4_stride * s->mb_height * 4;
344 int i;
345 int r = -1;
346
347 if (shared) {
348 assert(pic->f.data[0]);
349 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
350 pic->f.type = FF_BUFFER_TYPE_SHARED;
351 } else {
352 assert(!pic->f.data[0]);
353
354 if (alloc_frame_buffer(s, pic) < 0)
355 return -1;
356
357 s->linesize = pic->f.linesize[0];
358 s->uvlinesize = pic->f.linesize[1];
359 }
360
361 if (pic->f.qscale_table == NULL) {
362 if (s->encoding) {
363 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
364 mb_array_size * sizeof(int16_t), fail)
365 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
366 mb_array_size * sizeof(int16_t), fail)
367 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
368 mb_array_size * sizeof(int8_t ), fail)
369 }
370
371 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
372 mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
373 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
374 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
375 fail)
376 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
377 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
378 fail)
379 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
380 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
381 if (s->out_format == FMT_H264) {
382 for (i = 0; i < 2; i++) {
383 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
384 2 * (b4_array_size + 4) * sizeof(int16_t),
385 fail)
386 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
387 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
388 4 * mb_array_size * sizeof(uint8_t), fail)
389 }
390 pic->f.motion_subsample_log2 = 2;
391 } else if (s->out_format == FMT_H263 || s->encoding ||
392 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
393 for (i = 0; i < 2; i++) {
394 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
395 2 * (b8_array_size + 4) * sizeof(int16_t),
396 fail)
397 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
398 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
399 4 * mb_array_size * sizeof(uint8_t), fail)
400 }
401 pic->f.motion_subsample_log2 = 3;
402 }
403 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
404 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
405 64 * mb_array_size * sizeof(int16_t) * 6, fail)
406 }
407 pic->f.qstride = s->mb_stride;
408 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
409 1 * sizeof(AVPanScan), fail)
410 }
411
412 pic->owner2 = s;
413
414 return 0;
415 fail: // for the FF_ALLOCZ_OR_GOTO macro
416 if (r >= 0)
417 free_frame_buffer(s, pic);
418 return -1;
419 }
420
421 /**
422 * Deallocate a picture.
423 */
424 static void free_picture(MpegEncContext *s, Picture *pic)
425 {
426 int i;
427
428 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
429 free_frame_buffer(s, pic);
430 }
431
432 av_freep(&pic->mb_var);
433 av_freep(&pic->mc_mb_var);
434 av_freep(&pic->mb_mean);
435 av_freep(&pic->f.mbskip_table);
436 av_freep(&pic->qscale_table_base);
437 pic->f.qscale_table = NULL;
438 av_freep(&pic->mb_type_base);
439 pic->f.mb_type = NULL;
440 av_freep(&pic->f.dct_coeff);
441 av_freep(&pic->f.pan_scan);
442 pic->f.mb_type = NULL;
443 for (i = 0; i < 2; i++) {
444 av_freep(&pic->motion_val_base[i]);
445 av_freep(&pic->f.ref_index[i]);
446 pic->f.motion_val[i] = NULL;
447 }
448
449 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
450 for (i = 0; i < 4; i++) {
451 pic->f.base[i] =
452 pic->f.data[i] = NULL;
453 }
454 pic->f.type = 0;
455 }
456 }
457
458 static int init_duplicate_context(MpegEncContext *s)
459 {
460 int y_size = s->b8_stride * (2 * s->mb_height + 1);
461 int c_size = s->mb_stride * (s->mb_height + 1);
462 int yc_size = y_size + 2 * c_size;
463 int i;
464
465 s->edge_emu_buffer =
466 s->me.scratchpad =
467 s->me.temp =
468 s->rd_scratchpad =
469 s->b_scratchpad =
470 s->obmc_scratchpad = NULL;
471
472 if (s->encoding) {
473 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
474 ME_MAP_SIZE * sizeof(uint32_t), fail)
475 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
476 ME_MAP_SIZE * sizeof(uint32_t), fail)
477 if (s->avctx->noise_reduction) {
478 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
479 2 * 64 * sizeof(int), fail)
480 }
481 }
482 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
483 s->block = s->blocks[0];
484
485 for (i = 0; i < 12; i++) {
486 s->pblocks[i] = &s->block[i];
487 }
488
489 if (s->out_format == FMT_H263) {
490 /* ac values */
491 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
492 yc_size * sizeof(int16_t) * 16, fail);
493 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
494 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
495 s->ac_val[2] = s->ac_val[1] + c_size;
496 }
497
498 return 0;
499 fail:
500 return -1; // free() through ff_MPV_common_end()
501 }
502
503 static void free_duplicate_context(MpegEncContext *s)
504 {
505 if (s == NULL)
506 return;
507
508 av_freep(&s->edge_emu_buffer);
509 av_freep(&s->me.scratchpad);
510 s->me.temp =
511 s->rd_scratchpad =
512 s->b_scratchpad =
513 s->obmc_scratchpad = NULL;
514
515 av_freep(&s->dct_error_sum);
516 av_freep(&s->me.map);
517 av_freep(&s->me.score_map);
518 av_freep(&s->blocks);
519 av_freep(&s->ac_val_base);
520 s->block = NULL;
521 }
522
523 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
524 {
525 #define COPY(a) bak->a = src->a
526 COPY(edge_emu_buffer);
527 COPY(me.scratchpad);
528 COPY(me.temp);
529 COPY(rd_scratchpad);
530 COPY(b_scratchpad);
531 COPY(obmc_scratchpad);
532 COPY(me.map);
533 COPY(me.score_map);
534 COPY(blocks);
535 COPY(block);
536 COPY(start_mb_y);
537 COPY(end_mb_y);
538 COPY(me.map_generation);
539 COPY(pb);
540 COPY(dct_error_sum);
541 COPY(dct_count[0]);
542 COPY(dct_count[1]);
543 COPY(ac_val_base);
544 COPY(ac_val[0]);
545 COPY(ac_val[1]);
546 COPY(ac_val[2]);
547 #undef COPY
548 }
549
550 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
551 {
552 MpegEncContext bak;
553 int i, ret;
554 // FIXME copy only needed parts
555 // START_TIMER
556 backup_duplicate_context(&bak, dst);
557 memcpy(dst, src, sizeof(MpegEncContext));
558 backup_duplicate_context(dst, &bak);
559 for (i = 0; i < 12; i++) {
560 dst->pblocks[i] = &dst->block[i];
561 }
562 if (!dst->edge_emu_buffer &&
563 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
564 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
565 "scratch buffers.\n");
566 return ret;
567 }
568 // STOP_TIMER("update_duplicate_context")
569 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
570 return 0;
571 }
572
573 int ff_mpeg_update_thread_context(AVCodecContext *dst,
574 const AVCodecContext *src)
575 {
576 int i;
577 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
578
579 if (dst == src || !s1->context_initialized)
580 return 0;
581
582 // FIXME can parameters change on I-frames?
583 // in that case dst may need a reinit
584 if (!s->context_initialized) {
585 memcpy(s, s1, sizeof(MpegEncContext));
586
587 s->avctx = dst;
588 s->picture_range_start += MAX_PICTURE_COUNT;
589 s->picture_range_end += MAX_PICTURE_COUNT;
590 s->bitstream_buffer = NULL;
591 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
592
593 ff_MPV_common_init(s);
594 }
595
596 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
597 int err;
598 s->context_reinit = 0;
599 s->height = s1->height;
600 s->width = s1->width;
601 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
602 return err;
603 }
604
605 s->avctx->coded_height = s1->avctx->coded_height;
606 s->avctx->coded_width = s1->avctx->coded_width;
607 s->avctx->width = s1->avctx->width;
608 s->avctx->height = s1->avctx->height;
609
610 s->coded_picture_number = s1->coded_picture_number;
611 s->picture_number = s1->picture_number;
612 s->input_picture_number = s1->input_picture_number;
613
614 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
615 memcpy(&s->last_picture, &s1->last_picture,
616 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
617
618 // reset s->picture[].f.extended_data to s->picture[].f.data
619 for (i = 0; i < s->picture_count; i++)
620 s->picture[i].f.extended_data = s->picture[i].f.data;
621
622 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
623 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
624 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
625
626 // Error/bug resilience
627 s->next_p_frame_damaged = s1->next_p_frame_damaged;
628 s->workaround_bugs = s1->workaround_bugs;
629
630 // MPEG4 timing info
631 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
632 (char *) &s1->shape - (char *) &s1->time_increment_bits);
633
634 // B-frame info
635 s->max_b_frames = s1->max_b_frames;
636 s->low_delay = s1->low_delay;
637 s->droppable = s1->droppable;
638
639 // DivX handling (doesn't work)
640 s->divx_packed = s1->divx_packed;
641
642 if (s1->bitstream_buffer) {
643 if (s1->bitstream_buffer_size +
644 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
645 av_fast_malloc(&s->bitstream_buffer,
646 &s->allocated_bitstream_buffer_size,
647 s1->allocated_bitstream_buffer_size);
648 s->bitstream_buffer_size = s1->bitstream_buffer_size;
649 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
650 s1->bitstream_buffer_size);
651 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
652 FF_INPUT_BUFFER_PADDING_SIZE);
653 }
654
655 // linesize dependend scratch buffer allocation
656 if (!s->edge_emu_buffer)
657 if (s1->linesize) {
658 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
659 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
660 "scratch buffers.\n");
661 return AVERROR(ENOMEM);
662 }
663 } else {
664 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
665 "be allocated due to unknown size.\n");
666 return AVERROR_BUG;
667 }
668
669 // MPEG2/interlacing info
670 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
671 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
672
673 if (!s1->first_field) {
674 s->last_pict_type = s1->pict_type;
675 if (s1->current_picture_ptr)
676 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
677
678 if (s1->pict_type != AV_PICTURE_TYPE_B) {
679 s->last_non_b_pict_type = s1->pict_type;
680 }
681 }
682
683 return 0;
684 }
685
686 /**
687 * Set the given MpegEncContext to common defaults
688 * (same for encoding and decoding).
689 * The changed fields will not depend upon the
690 * prior state of the MpegEncContext.
691 */
692 void ff_MPV_common_defaults(MpegEncContext *s)
693 {
694 s->y_dc_scale_table =
695 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
696 s->chroma_qscale_table = ff_default_chroma_qscale_table;
697 s->progressive_frame = 1;
698 s->progressive_sequence = 1;
699 s->picture_structure = PICT_FRAME;
700
701 s->coded_picture_number = 0;
702 s->picture_number = 0;
703 s->input_picture_number = 0;
704
705 s->picture_in_gop_number = 0;
706
707 s->f_code = 1;
708 s->b_code = 1;
709
710 s->picture_range_start = 0;
711 s->picture_range_end = MAX_PICTURE_COUNT;
712
713 s->slice_context_count = 1;
714 }
715
716 /**
717 * Set the given MpegEncContext to defaults for decoding.
718 * the changed fields will not depend upon
719 * the prior state of the MpegEncContext.
720 */
721 void ff_MPV_decode_defaults(MpegEncContext *s)
722 {
723 ff_MPV_common_defaults(s);
724 }
725
726 /**
727 * Initialize and allocates MpegEncContext fields dependent on the resolution.
728 */
729 static int init_context_frame(MpegEncContext *s)
730 {
731 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
732
733 s->mb_width = (s->width + 15) / 16;
734 s->mb_stride = s->mb_width + 1;
735 s->b8_stride = s->mb_width * 2 + 1;
736 s->b4_stride = s->mb_width * 4 + 1;
737 mb_array_size = s->mb_height * s->mb_stride;
738 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
739
740 /* set default edge pos, will be overriden
741 * in decode_header if needed */
742 s->h_edge_pos = s->mb_width * 16;
743 s->v_edge_pos = s->mb_height * 16;
744
745 s->mb_num = s->mb_width * s->mb_height;
746
747 s->block_wrap[0] =
748 s->block_wrap[1] =
749 s->block_wrap[2] =
750 s->block_wrap[3] = s->b8_stride;
751 s->block_wrap[4] =
752 s->block_wrap[5] = s->mb_stride;
753
754 y_size = s->b8_stride * (2 * s->mb_height + 1);
755 c_size = s->mb_stride * (s->mb_height + 1);
756 yc_size = y_size + 2 * c_size;
757
758 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
759 fail); // error ressilience code looks cleaner with this
760 for (y = 0; y < s->mb_height; y++)
761 for (x = 0; x < s->mb_width; x++)
762 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
763
764 s->mb_index2xy[s->mb_height * s->mb_width] =
765 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
766
767 if (s->encoding) {
768 /* Allocate MV tables */
769 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
770 mv_table_size * 2 * sizeof(int16_t), fail);
771 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
772 mv_table_size * 2 * sizeof(int16_t), fail);
773 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
774 mv_table_size * 2 * sizeof(int16_t), fail);
775 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
776 mv_table_size * 2 * sizeof(int16_t), fail);
777 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
778 mv_table_size * 2 * sizeof(int16_t), fail);
779 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
780 mv_table_size * 2 * sizeof(int16_t), fail);
781 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
782 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
783 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
784 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
785 s->mb_stride + 1;
786 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
787 s->mb_stride + 1;
788 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
789
790 /* Allocate MB type table */
791 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
792 sizeof(uint16_t), fail); // needed for encoding
793
794 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
795 sizeof(int), fail);
796
797 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
798 mb_array_size * sizeof(float), fail);
799 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
800 mb_array_size * sizeof(float), fail);
801
802 }
803
804 FF_ALLOC_OR_GOTO(s->avctx, s->er_temp_buffer,
805 mb_array_size * sizeof(uint8_t), fail);
806 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
807 mb_array_size * sizeof(uint8_t), fail);
808
809 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
810 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
811 /* interlaced direct mode decoding tables */
812 for (i = 0; i < 2; i++) {
813 int j, k;
814 for (j = 0; j < 2; j++) {
815 for (k = 0; k < 2; k++) {
816 FF_ALLOCZ_OR_GOTO(s->avctx,
817 s->b_field_mv_table_base[i][j][k],
818 mv_table_size * 2 * sizeof(int16_t),
819 fail);
820 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
821 s->mb_stride + 1;
822 }
823 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
824 mb_array_size * 2 * sizeof(uint8_t), fail);
825 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
826 mv_table_size * 2 * sizeof(int16_t), fail);
827 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
828 + s->mb_stride + 1;
829 }
830 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
831 mb_array_size * 2 * sizeof(uint8_t), fail);
832 }
833 }
834 if (s->out_format == FMT_H263) {
835 /* cbp values */
836 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
837 s->coded_block = s->coded_block_base + s->b8_stride + 1;
838
839 /* cbp, ac_pred, pred_dir */
840 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
841 mb_array_size * sizeof(uint8_t), fail);
842 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
843 mb_array_size * sizeof(uint8_t), fail);
844 }
845
846 if (s->h263_pred || s->h263_plus || !s->encoding) {
847 /* dc values */
848 // MN: we need these for error resilience of intra-frames
849 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
850 yc_size * sizeof(int16_t), fail);
851 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
852 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
853 s->dc_val[2] = s->dc_val[1] + c_size;
854 for (i = 0; i < yc_size; i++)
855 s->dc_val_base[i] = 1024;
856 }
857
858 /* which mb is a intra block */
859 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
860 memset(s->mbintra_table, 1, mb_array_size);
861
862 /* init macroblock skip table */
863 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
864 // Note the + 1 is for a quicker mpeg4 slice_end detection
865
866 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
867 s->avctx->debug_mv) {
868 s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
869 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
870 s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
871 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
872 s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
873 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
874 }
875
876 return 0;
877 fail:
878 return AVERROR(ENOMEM);
879 }
880
881 /**
882 * init common structure for both encoder and decoder.
883 * this assumes that some variables like width/height are already set
884 */
885 av_cold int ff_MPV_common_init(MpegEncContext *s)
886 {
887 int i;
888 int nb_slices = (HAVE_THREADS &&
889 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
890 s->avctx->thread_count : 1;
891
892 if (s->encoding && s->avctx->slices)
893 nb_slices = s->avctx->slices;
894
895 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
896 s->mb_height = (s->height + 31) / 32 * 2;
897 else if (s->codec_id != AV_CODEC_ID_H264)
898 s->mb_height = (s->height + 15) / 16;
899
900 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
901 av_log(s->avctx, AV_LOG_ERROR,
902 "decoding to AV_PIX_FMT_NONE is not supported.\n");
903 return -1;
904 }
905
906 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
907 int max_slices;
908 if (s->mb_height)
909 max_slices = FFMIN(MAX_THREADS, s->mb_height);
910 else
911 max_slices = MAX_THREADS;
912 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
913 " reducing to %d\n", nb_slices, max_slices);
914 nb_slices = max_slices;
915 }
916
917 if ((s->width || s->height) &&
918 av_image_check_size(s->width, s->height, 0, s->avctx))
919 return -1;
920
921 ff_dct_common_init(s);
922
923 s->flags = s->avctx->flags;
924 s->flags2 = s->avctx->flags2;
925
926 if (s->width && s->height) {
927 /* set chroma shifts */
928 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
929 &s->chroma_x_shift,
930 &s->chroma_y_shift);
931
932 /* convert fourcc to upper case */
933 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
934
935 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
936
937 s->avctx->coded_frame = &s->current_picture.f;
938
939 if (s->encoding) {
940 if (s->msmpeg4_version) {
941 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
942 2 * 2 * (MAX_LEVEL + 1) *
943 (MAX_RUN + 1) * 2 * sizeof(int), fail);
944 }
945 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
946
947 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
948 64 * 32 * sizeof(int), fail);
949 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
950 64 * 32 * sizeof(int), fail);
951 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
952 64 * 32 * 2 * sizeof(uint16_t), fail);
953 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
954 64 * 32 * 2 * sizeof(uint16_t), fail);
955 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
956 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
957 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
958 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
959
960 if (s->avctx->noise_reduction) {
961 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
962 2 * 64 * sizeof(uint16_t), fail);
963 }
964 }
965 }
966
967 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
968 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
969 s->picture_count * sizeof(Picture), fail);
970 for (i = 0; i < s->picture_count; i++) {
971 avcodec_get_frame_defaults(&s->picture[i].f);
972 }
973
974 if (s->width && s->height) {
975 if (init_context_frame(s))
976 goto fail;
977
978 s->parse_context.state = -1;
979 }
980
981 s->context_initialized = 1;
982 s->thread_context[0] = s;
983
984 if (s->width && s->height) {
985 if (nb_slices > 1) {
986 for (i = 1; i < nb_slices; i++) {
987 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
988 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
989 }
990
991 for (i = 0; i < nb_slices; i++) {
992 if (init_duplicate_context(s->thread_context[i]) < 0)
993 goto fail;
994 s->thread_context[i]->start_mb_y =
995 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
996 s->thread_context[i]->end_mb_y =
997 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
998 }
999 } else {
1000 if (init_duplicate_context(s) < 0)
1001 goto fail;
1002 s->start_mb_y = 0;
1003 s->end_mb_y = s->mb_height;
1004 }
1005 s->slice_context_count = nb_slices;
1006 }
1007
1008 return 0;
1009 fail:
1010 ff_MPV_common_end(s);
1011 return -1;
1012 }
1013
1014 /**
1015 * Frees and resets MpegEncContext fields depending on the resolution.
1016 * Is used during resolution changes to avoid a full reinitialization of the
1017 * codec.
1018 */
1019 static int free_context_frame(MpegEncContext *s)
1020 {
1021 int i, j, k;
1022
1023 av_freep(&s->mb_type);
1024 av_freep(&s->p_mv_table_base);
1025 av_freep(&s->b_forw_mv_table_base);
1026 av_freep(&s->b_back_mv_table_base);
1027 av_freep(&s->b_bidir_forw_mv_table_base);
1028 av_freep(&s->b_bidir_back_mv_table_base);
1029 av_freep(&s->b_direct_mv_table_base);
1030 s->p_mv_table = NULL;
1031 s->b_forw_mv_table = NULL;
1032 s->b_back_mv_table = NULL;
1033 s->b_bidir_forw_mv_table = NULL;
1034 s->b_bidir_back_mv_table = NULL;
1035 s->b_direct_mv_table = NULL;
1036 for (i = 0; i < 2; i++) {
1037 for (j = 0; j < 2; j++) {
1038 for (k = 0; k < 2; k++) {
1039 av_freep(&s->b_field_mv_table_base[i][j][k]);
1040 s->b_field_mv_table[i][j][k] = NULL;
1041 }
1042 av_freep(&s->b_field_select_table[i][j]);
1043 av_freep(&s->p_field_mv_table_base[i][j]);
1044 s->p_field_mv_table[i][j] = NULL;
1045 }
1046 av_freep(&s->p_field_select_table[i]);
1047 }
1048
1049 av_freep(&s->dc_val_base);
1050 av_freep(&s->coded_block_base);
1051 av_freep(&s->mbintra_table);
1052 av_freep(&s->cbp_table);
1053 av_freep(&s->pred_dir_table);
1054
1055 av_freep(&s->mbskip_table);
1056
1057 av_freep(&s->error_status_table);
1058 av_freep(&s->er_temp_buffer);
1059 av_freep(&s->mb_index2xy);
1060 av_freep(&s->lambda_table);
1061 av_freep(&s->cplx_tab);
1062 av_freep(&s->bits_tab);
1063
1064 s->linesize = s->uvlinesize = 0;
1065
1066 for (i = 0; i < 3; i++)
1067 av_freep(&s->visualization_buffer[i]);
1068
1069 return 0;
1070 }
1071
1072 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1073 {
1074 int i, err = 0;
1075
1076 if (s->slice_context_count > 1) {
1077 for (i = 0; i < s->slice_context_count; i++) {
1078 free_duplicate_context(s->thread_context[i]);
1079 }
1080 for (i = 1; i < s->slice_context_count; i++) {
1081 av_freep(&s->thread_context[i]);
1082 }
1083 } else
1084 free_duplicate_context(s);
1085
1086 free_context_frame(s);
1087
1088 if (s->picture)
1089 for (i = 0; i < s->picture_count; i++) {
1090 s->picture[i].needs_realloc = 1;
1091 }
1092
1093 s->last_picture_ptr =
1094 s->next_picture_ptr =
1095 s->current_picture_ptr = NULL;
1096
1097 // init
1098 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1099 s->mb_height = (s->height + 31) / 32 * 2;
1100 else if (s->codec_id != AV_CODEC_ID_H264)
1101 s->mb_height = (s->height + 15) / 16;
1102
1103 if ((s->width || s->height) &&
1104 av_image_check_size(s->width, s->height, 0, s->avctx))
1105 return AVERROR_INVALIDDATA;
1106
1107 if ((err = init_context_frame(s)))
1108 goto fail;
1109
1110 s->thread_context[0] = s;
1111
1112 if (s->width && s->height) {
1113 int nb_slices = s->slice_context_count;
1114 if (nb_slices > 1) {
1115 for (i = 1; i < nb_slices; i++) {
1116 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1117 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1118 }
1119
1120 for (i = 0; i < nb_slices; i++) {
1121 if (init_duplicate_context(s->thread_context[i]) < 0)
1122 goto fail;
1123 s->thread_context[i]->start_mb_y =
1124 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1125 s->thread_context[i]->end_mb_y =
1126 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1127 }
1128 } else {
1129 if (init_duplicate_context(s) < 0)
1130 goto fail;
1131 s->start_mb_y = 0;
1132 s->end_mb_y = s->mb_height;
1133 }
1134 s->slice_context_count = nb_slices;
1135 }
1136
1137 return 0;
1138 fail:
1139 ff_MPV_common_end(s);
1140 return err;
1141 }
1142
1143 /* init common structure for both encoder and decoder */
1144 void ff_MPV_common_end(MpegEncContext *s)
1145 {
1146 int i;
1147
1148 if (s->slice_context_count > 1) {
1149 for (i = 0; i < s->slice_context_count; i++) {
1150 free_duplicate_context(s->thread_context[i]);
1151 }
1152 for (i = 1; i < s->slice_context_count; i++) {
1153 av_freep(&s->thread_context[i]);
1154 }
1155 s->slice_context_count = 1;
1156 } else free_duplicate_context(s);
1157
1158 av_freep(&s->parse_context.buffer);
1159 s->parse_context.buffer_size = 0;
1160
1161 av_freep(&s->bitstream_buffer);
1162 s->allocated_bitstream_buffer_size = 0;
1163
1164 av_freep(&s->avctx->stats_out);
1165 av_freep(&s->ac_stats);
1166
1167 av_freep(&s->q_intra_matrix);
1168 av_freep(&s->q_inter_matrix);
1169 av_freep(&s->q_intra_matrix16);
1170 av_freep(&s->q_inter_matrix16);
1171 av_freep(&s->input_picture);
1172 av_freep(&s->reordered_input_picture);
1173 av_freep(&s->dct_offset);
1174
1175 if (s->picture && !s->avctx->internal->is_copy) {
1176 for (i = 0; i < s->picture_count; i++) {
1177 free_picture(s, &s->picture[i]);
1178 }
1179 }
1180 av_freep(&s->picture);
1181
1182 free_context_frame(s);
1183
1184 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
1185 avcodec_default_free_buffers(s->avctx);
1186
1187 s->context_initialized = 0;
1188 s->last_picture_ptr =
1189 s->next_picture_ptr =
1190 s->current_picture_ptr = NULL;
1191 s->linesize = s->uvlinesize = 0;
1192 }
1193
1194 void ff_init_rl(RLTable *rl,
1195 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1196 {
1197 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1198 uint8_t index_run[MAX_RUN + 1];
1199 int last, run, level, start, end, i;
1200
1201 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1202 if (static_store && rl->max_level[0])
1203 return;
1204
1205 /* compute max_level[], max_run[] and index_run[] */
1206 for (last = 0; last < 2; last++) {
1207 if (last == 0) {
1208 start = 0;
1209 end = rl->last;
1210 } else {
1211 start = rl->last;
1212 end = rl->n;
1213 }
1214
1215 memset(max_level, 0, MAX_RUN + 1);
1216 memset(max_run, 0, MAX_LEVEL + 1);
1217 memset(index_run, rl->n, MAX_RUN + 1);
1218 for (i = start; i < end; i++) {
1219 run = rl->table_run[i];
1220 level = rl->table_level[i];
1221 if (index_run[run] == rl->n)
1222 index_run[run] = i;
1223 if (level > max_level[run])
1224 max_level[run] = level;
1225 if (run > max_run[level])
1226 max_run[level] = run;
1227 }
1228 if (static_store)
1229 rl->max_level[last] = static_store[last];
1230 else
1231 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1232 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1233 if (static_store)
1234 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1235 else
1236 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1237 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1238 if (static_store)
1239 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1240 else
1241 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1242 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1243 }
1244 }
1245
1246 void ff_init_vlc_rl(RLTable *rl)
1247 {
1248 int i, q;
1249
1250 for (q = 0; q < 32; q++) {
1251 int qmul = q * 2;
1252 int qadd = (q - 1) | 1;
1253
1254 if (q == 0) {
1255 qmul = 1;
1256 qadd = 0;
1257 }
1258 for (i = 0; i < rl->vlc.table_size; i++) {
1259 int code = rl->vlc.table[i][0];
1260 int len = rl->vlc.table[i][1];
1261 int level, run;
1262
1263 if (len == 0) { // illegal code
1264 run = 66;
1265 level = MAX_LEVEL;
1266 } else if (len < 0) { // more bits needed
1267 run = 0;
1268 level = code;
1269 } else {
1270 if (code == rl->n) { // esc
1271 run = 66;
1272 level = 0;
1273 } else {
1274 run = rl->table_run[code] + 1;
1275 level = rl->table_level[code] * qmul + qadd;
1276 if (code >= rl->last) run += 192;
1277 }
1278 }
1279 rl->rl_vlc[q][i].len = len;
1280 rl->rl_vlc[q][i].level = level;
1281 rl->rl_vlc[q][i].run = run;
1282 }
1283 }
1284 }
1285
1286 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1287 {
1288 int i;
1289
1290 /* release non reference frames */
1291 for (i = 0; i < s->picture_count; i++) {
1292 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1293 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1294 (remove_current || &s->picture[i] != s->current_picture_ptr)
1295 /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1296 free_frame_buffer(s, &s->picture[i]);
1297 }
1298 }
1299 }
1300
1301 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1302 {
1303 if (pic->f.data[0] == NULL)
1304 return 1;
1305 if (pic->needs_realloc && !(pic->f.reference & DELAYED_PIC_REF))
1306 if (!pic->owner2 || pic->owner2 == s)
1307 return 1;
1308 return 0;
1309 }
1310
1311 static int find_unused_picture(MpegEncContext *s, int shared)
1312 {
1313 int i;
1314
1315 if (shared) {
1316 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1317 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1318 return i;
1319 }
1320 } else {
1321 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1322 if (pic_is_unused(s, &s->picture[i]) && s->picture[i].f.type != 0)
1323 return i; // FIXME
1324 }
1325 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1326 if (pic_is_unused(s, &s->picture[i]))
1327 return i;
1328 }
1329 }
1330
1331 return AVERROR_INVALIDDATA;
1332 }
1333
1334 int ff_find_unused_picture(MpegEncContext *s, int shared)
1335 {
1336 int ret = find_unused_picture(s, shared);
1337
1338 if (ret >= 0 && ret < s->picture_range_end) {
1339 if (s->picture[ret].needs_realloc) {
1340 s->picture[ret].needs_realloc = 0;
1341 free_picture(s, &s->picture[ret]);
1342 avcodec_get_frame_defaults(&s->picture[ret].f);
1343 }
1344 }
1345 return ret;
1346 }
1347
1348 static void update_noise_reduction(MpegEncContext *s)
1349 {
1350 int intra, i;
1351
1352 for (intra = 0; intra < 2; intra++) {
1353 if (s->dct_count[intra] > (1 << 16)) {
1354 for (i = 0; i < 64; i++) {
1355 s->dct_error_sum[intra][i] >>= 1;
1356 }
1357 s->dct_count[intra] >>= 1;
1358 }
1359
1360 for (i = 0; i < 64; i++) {
1361 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1362 s->dct_count[intra] +
1363 s->dct_error_sum[intra][i] / 2) /
1364 (s->dct_error_sum[intra][i] + 1);
1365 }
1366 }
1367 }
1368
1369 /**
1370 * generic function for encode/decode called after coding/decoding
1371 * the header and before a frame is coded/decoded.
1372 */
1373 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1374 {
1375 int i;
1376 Picture *pic;
1377 s->mb_skipped = 0;
1378
1379 /* mark & release old frames */
1380 if (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3) {
1381 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1382 s->last_picture_ptr != s->next_picture_ptr &&
1383 s->last_picture_ptr->f.data[0]) {
1384 if (s->last_picture_ptr->owner2 == s)
1385 free_frame_buffer(s, s->last_picture_ptr);
1386 }
1387
1388 /* release forgotten pictures */
1389 /* if (mpeg124/h263) */
1390 if (!s->encoding) {
1391 for (i = 0; i < s->picture_count; i++) {
1392 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
1393 &s->picture[i] != s->last_picture_ptr &&
1394 &s->picture[i] != s->next_picture_ptr &&
1395 s->picture[i].f.reference && !s->picture[i].needs_realloc) {
1396 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1397 av_log(avctx, AV_LOG_ERROR,
1398 "releasing zombie picture\n");
1399 free_frame_buffer(s, &s->picture[i]);
1400 }
1401 }
1402 }
1403 }
1404
1405 if (!s->encoding) {
1406 ff_release_unused_pictures(s, 1);
1407
1408 if (s->current_picture_ptr &&
1409 s->current_picture_ptr->f.data[0] == NULL) {
1410 // we already have a unused image
1411 // (maybe it was set before reading the header)
1412 pic = s->current_picture_ptr;
1413 } else {
1414 i = ff_find_unused_picture(s, 0);
1415 if (i < 0) {
1416 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1417 return i;
1418 }
1419 pic = &s->picture[i];
1420 }
1421
1422 pic->f.reference = 0;
1423 if (!s->droppable) {
1424 if (s->codec_id == AV_CODEC_ID_H264)
1425 pic->f.reference = s->picture_structure;
1426 else if (s->pict_type != AV_PICTURE_TYPE_B)
1427 pic->f.reference = 3;
1428 }
1429
1430 pic->f.coded_picture_number = s->coded_picture_number++;
1431
1432 if (ff_alloc_picture(s, pic, 0) < 0)
1433 return -1;
1434
1435 s->current_picture_ptr = pic;
1436 // FIXME use only the vars from current_pic
1437 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1438 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1439 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1440 if (s->picture_structure != PICT_FRAME)
1441 s->current_picture_ptr->f.top_field_first =
1442 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1443 }
1444 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1445 !s->progressive_sequence;
1446 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1447 }
1448
1449 s->current_picture_ptr->f.pict_type = s->pict_type;
1450 // if (s->flags && CODEC_FLAG_QSCALE)
1451 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1452 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1453
1454 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1455
1456 if (s->pict_type != AV_PICTURE_TYPE_B) {
1457 s->last_picture_ptr = s->next_picture_ptr;
1458 if (!s->droppable)
1459 s->next_picture_ptr = s->current_picture_ptr;
1460 }
1461 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1462 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1463 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1464 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1465 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1466 s->pict_type, s->droppable);
1467
1468 if (s->codec_id != AV_CODEC_ID_H264) {
1469 if ((s->last_picture_ptr == NULL ||
1470 s->last_picture_ptr->f.data[0] == NULL) &&
1471 (s->pict_type != AV_PICTURE_TYPE_I ||
1472 s->picture_structure != PICT_FRAME)) {
1473 int h_chroma_shift, v_chroma_shift;
1474 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1475 &h_chroma_shift, &v_chroma_shift);
1476 if (s->pict_type != AV_PICTURE_TYPE_I)
1477 av_log(avctx, AV_LOG_ERROR,
1478 "warning: first frame is no keyframe\n");
1479 else if (s->picture_structure != PICT_FRAME)
1480 av_log(avctx, AV_LOG_INFO,
1481 "allocate dummy last picture for field based first keyframe\n");
1482
1483 /* Allocate a dummy frame */
1484 i = ff_find_unused_picture(s, 0);
1485 if (i < 0) {
1486 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1487 return i;
1488 }
1489 s->last_picture_ptr = &s->picture[i];
1490 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1491 s->last_picture_ptr = NULL;
1492 return -1;
1493 }
1494
1495 memset(s->last_picture_ptr->f.data[0], 0,
1496 avctx->height * s->last_picture_ptr->f.linesize[0]);
1497 memset(s->last_picture_ptr->f.data[1], 0x80,
1498 (avctx->height >> v_chroma_shift) *
1499 s->last_picture_ptr->f.linesize[1]);
1500 memset(s->last_picture_ptr->f.data[2], 0x80,
1501 (avctx->height >> v_chroma_shift) *
1502 s->last_picture_ptr->f.linesize[2]);
1503
1504 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
1505 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
1506 s->last_picture_ptr->f.reference = 3;
1507 }
1508 if ((s->next_picture_ptr == NULL ||
1509 s->next_picture_ptr->f.data[0] == NULL) &&
1510 s->pict_type == AV_PICTURE_TYPE_B) {
1511 /* Allocate a dummy frame */
1512 i = ff_find_unused_picture(s, 0);
1513 if (i < 0) {
1514 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1515 return i;
1516 }
1517 s->next_picture_ptr = &s->picture[i];
1518 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1519 s->next_picture_ptr = NULL;
1520 return -1;
1521 }
1522 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
1523 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
1524 s->next_picture_ptr->f.reference = 3;
1525 }
1526 }
1527
1528 if (s->last_picture_ptr)
1529 ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1530 if (s->next_picture_ptr)
1531 ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1532
1533 if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME)) {
1534 if (s->next_picture_ptr)
1535 s->next_picture_ptr->owner2 = s;
1536 if (s->last_picture_ptr)
1537 s->last_picture_ptr->owner2 = s;
1538 }
1539
1540 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1541 s->last_picture_ptr->f.data[0]));
1542
1543 if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
1544 int i;
1545 for (i = 0; i < 4; i++) {
1546 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1547 s->current_picture.f.data[i] +=
1548 s->current_picture.f.linesize[i];
1549 }
1550 s->current_picture.f.linesize[i] *= 2;
1551 s->last_picture.f.linesize[i] *= 2;
1552 s->next_picture.f.linesize[i] *= 2;
1553 }
1554 }
1555
1556 s->err_recognition = avctx->err_recognition;
1557
1558 /* set dequantizer, we can't do it during init as
1559 * it might change for mpeg4 and we can't do it in the header
1560 * decode as init is not called for mpeg4 there yet */
1561 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1562 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1563 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1564 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1565 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1566 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1567 } else {
1568 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1569 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1570 }
1571
1572 if (s->dct_error_sum) {
1573 assert(s->avctx->noise_reduction && s->encoding);
1574 update_noise_reduction(s);
1575 }
1576
1577 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1578 return ff_xvmc_field_start(s, avctx);
1579
1580 return 0;
1581 }
1582
1583 /* generic function for encode/decode called after a
1584 * frame has been coded/decoded. */
1585 void ff_MPV_frame_end(MpegEncContext *s)
1586 {
1587 int i;
1588 /* redraw edges for the frame if decoding didn't complete */
1589 // just to make sure that all data is rendered.
1590 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1591 ff_xvmc_field_end(s);
1592 } else if ((s->error_count || s->encoding) &&
1593 !s->avctx->hwaccel &&
1594 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1595 s->unrestricted_mv &&
1596 s->current_picture.f.reference &&
1597 !s->intra_only &&
1598 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1599 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1600 int hshift = desc->log2_chroma_w;
1601 int vshift = desc->log2_chroma_h;
1602 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1603 s->h_edge_pos, s->v_edge_pos,
1604 EDGE_WIDTH, EDGE_WIDTH,
1605 EDGE_TOP | EDGE_BOTTOM);
1606 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1607 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1608 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1609 EDGE_TOP | EDGE_BOTTOM);
1610 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1611 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1612 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1613 EDGE_TOP | EDGE_BOTTOM);
1614 }
1615
1616 emms_c();
1617
1618 s->last_pict_type = s->pict_type;
1619 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1620 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1621 s->last_non_b_pict_type = s->pict_type;
1622 }
1623 #if 0
1624 /* copy back current_picture variables */
1625 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1626 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1627 s->picture[i] = s->current_picture;
1628 break;
1629 }
1630 }
1631 assert(i < MAX_PICTURE_COUNT);
1632 #endif
1633
1634 if (s->encoding) {
1635 /* release non-reference frames */
1636 for (i = 0; i < s->picture_count; i++) {
1637 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1638 /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
1639 free_frame_buffer(s, &s->picture[i]);
1640 }
1641 }
1642 }
1643 // clear copies, to avoid confusion
1644 #if 0
1645 memset(&s->last_picture, 0, sizeof(Picture));
1646 memset(&s->next_picture, 0, sizeof(Picture));
1647 memset(&s->current_picture, 0, sizeof(Picture));
1648 #endif
1649 s->avctx->coded_frame = &s->current_picture_ptr->f;
1650
1651 if (s->codec_id != AV_CODEC_ID_H264 && s->current_picture.f.reference) {
1652 ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
1653 }
1654 }
1655
1656 /**
1657 * Draw a line from (ex, ey) -> (sx, sy).
1658 * @param w width of the image
1659 * @param h height of the image
1660 * @param stride stride/linesize of the image
1661 * @param color color of the arrow
1662 */
1663 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1664 int w, int h, int stride, int color)
1665 {
1666 int x, y, fr, f;
1667
1668 sx = av_clip(sx, 0, w - 1);
1669 sy = av_clip(sy, 0, h - 1);
1670 ex = av_clip(ex, 0, w - 1);
1671 ey = av_clip(ey, 0, h - 1);
1672
1673 buf[sy * stride + sx] += color;
1674
1675 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1676 if (sx > ex) {
1677 FFSWAP(int, sx, ex);
1678 FFSWAP(int, sy, ey);
1679 }
1680 buf += sx + sy * stride;
1681 ex -= sx;
1682 f = ((ey - sy) << 16) / ex;
1683 for (x = 0; x <= ex; x++) {
1684 y = (x * f) >> 16;
1685 fr = (x * f) & 0xFFFF;
1686 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1687 buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1688 }
1689 } else {
1690 if (sy > ey) {
1691 FFSWAP(int, sx, ex);
1692 FFSWAP(int, sy, ey);
1693 }
1694 buf += sx + sy * stride;
1695 ey -= sy;
1696 if (ey)
1697 f = ((ex - sx) << 16) / ey;
1698 else
1699 f = 0;
1700 for (y = 0; y = ey; y++) {
1701 x = (y * f) >> 16;
1702 fr = (y * f) & 0xFFFF;
1703 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1704 buf[y * stride + x + 1] += (color * fr ) >> 16;
1705 }
1706 }
1707 }
1708
1709 /**
1710 * Draw an arrow from (ex, ey) -> (sx, sy).
1711 * @param w width of the image
1712 * @param h height of the image
1713 * @param stride stride/linesize of the image
1714 * @param color color of the arrow
1715 */
1716 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1717 int ey, int w, int h, int stride, int color)
1718 {
1719 int dx,dy;
1720
1721 sx = av_clip(sx, -100, w + 100);
1722 sy = av_clip(sy, -100, h + 100);
1723 ex = av_clip(ex, -100, w + 100);
1724 ey = av_clip(ey, -100, h + 100);
1725
1726 dx = ex - sx;
1727 dy = ey - sy;
1728
1729 if (dx * dx + dy * dy > 3 * 3) {
1730 int rx = dx + dy;
1731 int ry = -dx + dy;
1732 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1733
1734 // FIXME subpixel accuracy
1735 rx = ROUNDED_DIV(rx * 3 << 4, length);
1736 ry = ROUNDED_DIV(ry * 3 << 4, length);
1737
1738 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1739 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1740 }
1741 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1742 }
1743
1744 /**
1745 * Print debugging info for the given picture.
1746 */
1747 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
1748 {
1749 if (s->avctx->hwaccel || !pict || !pict->mb_type)
1750 return;
1751
1752 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1753 int x,y;
1754
1755 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1756 switch (pict->pict_type) {
1757 case AV_PICTURE_TYPE_I:
1758 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1759 break;
1760 case AV_PICTURE_TYPE_P:
1761 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1762 break;
1763 case AV_PICTURE_TYPE_B:
1764 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1765 break;
1766 case AV_PICTURE_TYPE_S:
1767 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1768 break;
1769 case AV_PICTURE_TYPE_SI:
1770 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1771 break;
1772 case AV_PICTURE_TYPE_SP:
1773 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1774 break;
1775 }
1776 for (y = 0; y < s->mb_height; y++) {
1777 for (x = 0; x < s->mb_width; x++) {
1778 if (s->avctx->debug & FF_DEBUG_SKIP) {
1779 int count = s->mbskip_table[x + y * s->mb_stride];
1780 if (count > 9)
1781 count = 9;
1782 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1783 }
1784 if (s->avctx->debug & FF_DEBUG_QP) {
1785 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1786 pict->qscale_table[x + y * s->mb_stride]);
1787 }
1788 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1789 int mb_type = pict->mb_type[x + y * s->mb_stride];
1790 // Type & MV direction
1791 if (IS_PCM(mb_type))
1792 av_log(s->avctx, AV_LOG_DEBUG, "P");
1793 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1794 av_log(s->avctx, AV_LOG_DEBUG, "A");
1795 else if (IS_INTRA4x4(mb_type))
1796 av_log(s->avctx, AV_LOG_DEBUG, "i");
1797 else if (IS_INTRA16x16(mb_type))
1798 av_log(s->avctx, AV_LOG_DEBUG, "I");
1799 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1800 av_log(s->avctx, AV_LOG_DEBUG, "d");
1801 else if (IS_DIRECT(mb_type))
1802 av_log(s->avctx, AV_LOG_DEBUG, "D");
1803 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1804 av_log(s->avctx, AV_LOG_DEBUG, "g");
1805 else if (IS_GMC(mb_type))
1806 av_log(s->avctx, AV_LOG_DEBUG, "G");
1807 else if (IS_SKIP(mb_type))
1808 av_log(s->avctx, AV_LOG_DEBUG, "S");
1809 else if (!USES_LIST(mb_type, 1))
1810 av_log(s->avctx, AV_LOG_DEBUG, ">");
1811 else if (!USES_LIST(mb_type, 0))
1812 av_log(s->avctx, AV_LOG_DEBUG, "<");
1813 else {
1814 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1815 av_log(s->avctx, AV_LOG_DEBUG, "X");
1816 }
1817
1818 // segmentation
1819 if (IS_8X8(mb_type))
1820 av_log(s->avctx, AV_LOG_DEBUG, "+");
1821 else if (IS_16X8(mb_type))
1822 av_log(s->avctx, AV_LOG_DEBUG, "-");
1823 else if (IS_8X16(mb_type))
1824 av_log(s->avctx, AV_LOG_DEBUG, "|");
1825 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1826 av_log(s->avctx, AV_LOG_DEBUG, " ");
1827 else
1828 av_log(s->avctx, AV_LOG_DEBUG, "?");
1829
1830
1831 if (IS_INTERLACED(mb_type))
1832 av_log(s->avctx, AV_LOG_DEBUG, "=");
1833 else
1834 av_log(s->avctx, AV_LOG_DEBUG, " ");
1835 }
1836 }
1837 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1838 }
1839 }
1840
1841 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1842 (s->avctx->debug_mv)) {
1843 const int shift = 1 + s->quarter_sample;
1844 int mb_y;
1845 uint8_t *ptr;
1846 int i;
1847 int h_chroma_shift, v_chroma_shift, block_height;
1848 const int width = s->avctx->width;
1849 const int height = s->avctx->height;
1850 const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
1851 const int mv_stride = (s->mb_width << mv_sample_log2) +
1852 (s->codec_id == AV_CODEC_ID_H264 ? 0 : 1);
1853 s->low_delay = 0; // needed to see the vectors without trashing the buffers
1854
1855 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1856 &h_chroma_shift, &v_chroma_shift);
1857 for (i = 0; i < 3; i++) {
1858 memcpy(s->visualization_buffer[i], pict->data[i],
1859 (i == 0) ? pict->linesize[i] * height:
1860 pict->linesize[i] * height >> v_chroma_shift);
1861 pict->data[i] = s->visualization_buffer[i];
1862 }
1863 pict->type = FF_BUFFER_TYPE_COPY;
1864 ptr = pict->data[0];
1865 block_height = 16 >> v_chroma_shift;
1866
1867 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1868 int mb_x;
1869 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1870 const int mb_index = mb_x + mb_y * s->mb_stride;
1871 if ((s->avctx->debug_mv) && pict->motion_val) {
1872 int type;
1873 for (type = 0; type < 3; type++) {
1874 int direction = 0;
1875 switch (type) {
1876 case 0:
1877 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1878 (pict->pict_type!= AV_PICTURE_TYPE_P))
1879 continue;
1880 direction = 0;
1881 break;
1882 case 1:
1883 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1884 (pict->pict_type!= AV_PICTURE_TYPE_B))
1885 continue;
1886 direction = 0;
1887 break;
1888 case 2:
1889 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1890 (pict->pict_type!= AV_PICTURE_TYPE_B))
1891 continue;
1892 direction = 1;
1893 break;
1894 }
1895 if (!USES_LIST(pict->mb_type[mb_index], direction))
1896 continue;
1897
1898 if (IS_8X8(pict->mb_type[mb_index])) {
1899 int i;
1900 for (i = 0; i < 4; i++) {
1901 int sx = mb_x * 16 + 4 + 8 * (i & 1);
1902 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1903 int xy = (mb_x * 2 + (i & 1) +
1904 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1905 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
1906 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
1907 draw_arrow(ptr, sx, sy, mx, my, width,
1908 height, s->linesize, 100);
1909 }
1910 } else if (IS_16X8(pict->mb_type[mb_index])) {
1911 int i;
1912 for (i = 0; i < 2; i++) {
1913 int sx = mb_x * 16 + 8;
1914 int sy = mb_y * 16 + 4 + 8 * i;
1915 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1916 int mx = (pict->motion_val[direction][xy][0] >> shift);
1917 int my = (pict->motion_val[direction][xy][1] >> shift);
1918
1919 if (IS_INTERLACED(pict->mb_type[mb_index]))
1920 my *= 2;
1921
1922 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1923 height, s->linesize, 100);
1924 }
1925 } else if (IS_8X16(pict->mb_type[mb_index])) {
1926 int i;
1927 for (i = 0; i < 2; i++) {
1928 int sx = mb_x * 16 + 4 + 8 * i;
1929 int sy = mb_y * 16 + 8;
1930 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1931 int mx = pict->motion_val[direction][xy][0] >> shift;
1932 int my = pict->motion_val[direction][xy][1] >> shift;
1933
1934 if (IS_INTERLACED(pict->mb_type[mb_index]))
1935 my *= 2;
1936
1937 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1938 height, s->linesize, 100);
1939 }
1940 } else {
1941 int sx = mb_x * 16 + 8;
1942 int sy = mb_y * 16 + 8;
1943 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
1944 int mx = pict->motion_val[direction][xy][0] >> shift + sx;
1945 int my = pict->motion_val[direction][xy][1] >> shift + sy;
1946 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1947 }
1948 }
1949 }
1950 if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
1951 uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
1952 0x0101010101010101ULL;
1953 int y;
1954 for (y = 0; y < block_height; y++) {
1955 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1956 (block_height * mb_y + y) *
1957 pict->linesize[1]) = c;
1958 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1959 (block_height * mb_y + y) *
1960 pict->linesize[2]) = c;
1961 }
1962 }
1963 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
1964 pict->motion_val) {
1965 int mb_type = pict->mb_type[mb_index];
1966 uint64_t u,v;
1967 int y;
1968 #define COLOR(theta, r) \
1969 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
1970 v = (int)(128 + r * sin(theta * 3.141592 / 180));
1971
1972
1973 u = v = 128;
1974 if (IS_PCM(mb_type)) {
1975 COLOR(120, 48)
1976 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
1977 IS_INTRA16x16(mb_type)) {
1978 COLOR(30, 48)
1979 } else if (IS_INTRA4x4(mb_type)) {
1980 COLOR(90, 48)
1981 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
1982 // COLOR(120, 48)
1983 } else if (IS_DIRECT(mb_type)) {
1984 COLOR(150, 48)
1985 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
1986 COLOR(170, 48)
1987 } else if (IS_GMC(mb_type)) {
1988 COLOR(190, 48)
1989 } else if (IS_SKIP(mb_type)) {
1990 // COLOR(180, 48)
1991 } else if (!USES_LIST(mb_type, 1)) {
1992 COLOR(240, 48)
1993 } else if (!USES_LIST(mb_type, 0)) {
1994 COLOR(0, 48)
1995 } else {
1996 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1997 COLOR(300,48)
1998 }
1999
2000 u *= 0x0101010101010101ULL;
2001 v *= 0x0101010101010101ULL;
2002 for (y = 0; y < block_height; y++) {
2003 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2004 (block_height * mb_y + y) * pict->linesize[1]) = u;
2005 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2006 (block_height * mb_y + y) * pict->linesize[2]) = v;
2007 }
2008
2009 // segmentation
2010 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2011 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2012 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2013 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2014 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2015 }
2016 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2017 for (y = 0; y < 16; y++)
2018 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2019 pict->linesize[0]] ^= 0x80;
2020 }
2021 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2022 int dm = 1 << (mv_sample_log2 - 2);
2023 for (i = 0; i < 4; i++) {
2024 int sx = mb_x * 16 + 8 * (i & 1);
2025 int sy = mb_y * 16 + 8 * (i >> 1);
2026 int xy = (mb_x * 2 + (i & 1) +
2027 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2028 // FIXME bidir
2029 int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
2030 if (mv[0] != mv[dm] ||
2031 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2032 for (y = 0; y < 8; y++)
2033 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2034 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2035 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2036 pict->linesize[0]) ^= 0x8080808080808080ULL;
2037 }
2038 }
2039
2040 if (IS_INTERLACED(mb_type) &&
2041 s->codec_id == AV_CODEC_ID_H264) {
2042 // hmm
2043 }
2044 }
2045 s->mbskip_table[mb_index] = 0;
2046 }
2047 }
2048 }
2049 }
2050
2051 /**
2052 * find the lowest MB row referenced in the MVs
2053 */
2054 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2055 {
2056 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2057 int my, off, i, mvs;
2058
2059 if (s->picture_structure != PICT_FRAME || s->mcsel)
2060 goto unhandled;
2061
2062 switch (s->mv_type) {
2063 case MV_TYPE_16X16:
2064 mvs = 1;
2065 break;
2066 case MV_TYPE_16X8:
2067 mvs = 2;
2068 break;
2069 case MV_TYPE_8X8:
2070 mvs = 4;
2071 break;
2072 default:
2073 goto unhandled;
2074 }
2075
2076 for (i = 0; i < mvs; i++) {
2077 my = s->mv[dir][i][1]<<qpel_shift;
2078 my_max = FFMAX(my_max, my);
2079 my_min = FFMIN(my_min, my);
2080 }
2081
2082 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2083
2084 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2085 unhandled:
2086 return s->mb_height-1;
2087 }
2088
2089 /* put block[] to dest[] */
2090 static inline void put_dct(MpegEncContext *s,
2091 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2092 {
2093 s->dct_unquantize_intra(s, block, i, qscale);
2094 s->dsp.idct_put (dest, line_size, block);
2095 }
2096
2097 /* add block[] to dest[] */
2098 static inline void add_dct(MpegEncContext *s,
2099 int16_t *block, int i, uint8_t *dest, int line_size)
2100 {
2101 if (s->block_last_index[i] >= 0) {
2102 s->dsp.idct_add (dest, line_size, block);
2103 }
2104 }
2105
2106 static inline void add_dequant_dct(MpegEncContext *s,
2107 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2108 {
2109 if (s->block_last_index[i] >= 0) {
2110 s->dct_unquantize_inter(s, block, i, qscale);
2111
2112 s->dsp.idct_add (dest, line_size, block);
2113 }
2114 }
2115
2116 /**
2117 * Clean dc, ac, coded_block for the current non-intra MB.
2118 */
2119 void ff_clean_intra_table_entries(MpegEncContext *s)
2120 {
2121 int wrap = s->b8_stride;
2122 int xy = s->block_index[0];
2123
2124 s->dc_val[0][xy ] =
2125 s->dc_val[0][xy + 1 ] =
2126 s->dc_val[0][xy + wrap] =
2127 s->dc_val[0][xy + 1 + wrap] = 1024;
2128 /* ac pred */
2129 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2130 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2131 if (s->msmpeg4_version>=3) {
2132 s->coded_block[xy ] =
2133 s->coded_block[xy + 1 ] =
2134 s->coded_block[xy + wrap] =
2135 s->coded_block[xy + 1 + wrap] = 0;
2136 }
2137 /* chroma */
2138 wrap = s->mb_stride;
2139 xy = s->mb_x + s->mb_y * wrap;
2140 s->dc_val[1][xy] =
2141 s->dc_val[2][xy] = 1024;
2142 /* ac pred */
2143 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2144 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2145
2146 s->mbintra_table[xy]= 0;
2147 }
2148
2149 /* generic function called after a macroblock has been parsed by the
2150 decoder or after it has been encoded by the encoder.
2151
2152 Important variables used:
2153 s->mb_intra : true if intra macroblock
2154 s->mv_dir : motion vector direction
2155 s->mv_type : motion vector type
2156 s->mv : motion vector
2157 s->interlaced_dct : true if interlaced dct used (mpeg2)
2158 */
2159 static av_always_inline
2160 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2161 int is_mpeg12)
2162 {
2163 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2164 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2165 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2166 return;
2167 }
2168
2169 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2170 /* save DCT coefficients */
2171 int i,j;
2172 int16_t *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2173 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2174 for(i=0; i<6; i++){
2175 for(j=0; j<64; j++){
2176 *dct++ = block[i][s->dsp.idct_permutation[j]];
2177 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2178 }
2179 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2180 }
2181 }
2182
2183 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2184
2185 /* update DC predictors for P macroblocks */
2186 if (!s->mb_intra) {
2187 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2188 if(s->mbintra_table[mb_xy])
2189 ff_clean_intra_table_entries(s);
2190 } else {
2191 s->last_dc[0] =
2192 s->last_dc[1] =
2193 s->last_dc[2] = 128 << s->intra_dc_precision;
2194 }
2195 }
2196 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2197 s->mbintra_table[mb_xy]=1;
2198
2199 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2200 uint8_t *dest_y, *dest_cb, *dest_cr;
2201 int dct_linesize, dct_offset;
2202 op_pixels_func (*op_pix)[4];
2203 qpel_mc_func (*op_qpix)[16];
2204 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2205 const int uvlinesize = s->current_picture.f.linesize[1];
2206 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2207 const int block_size = 8;
2208
2209 /* avoid copy if macroblock skipped in last frame too */
2210 /* skip only during decoding as we might trash the buffers during encoding a bit */
2211 if(!s->encoding){
2212 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2213
2214 if (s->mb_skipped) {
2215 s->mb_skipped= 0;
2216 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2217 *mbskip_ptr = 1;
2218 } else if(!s->current_picture.f.reference) {
2219 *mbskip_ptr = 1;
2220 } else{
2221 *mbskip_ptr = 0; /* not skipped */
2222 }
2223 }
2224
2225 dct_linesize = linesize << s->interlaced_dct;
2226 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2227
2228 if(readable){
2229 dest_y= s->dest[0];
2230 dest_cb= s->dest[1];
2231 dest_cr= s->dest[2];
2232 }else{
2233 dest_y = s->b_scratchpad;
2234 dest_cb= s->b_scratchpad+16*linesize;
2235 dest_cr= s->b_scratchpad+32*linesize;
2236 }
2237
2238 if (!s->mb_intra) {
2239 /* motion handling */
2240 /* decoding or more than one mb_type (MC was already done otherwise) */
2241 if(!s->encoding){
2242
2243 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2244 if (s->mv_dir & MV_DIR_FORWARD) {
2245 ff_thread_await_progress(&s->last_picture_ptr->f,
2246 ff_MPV_lowest_referenced_row(s, 0),
2247 0);
2248 }
2249 if (s->mv_dir & MV_DIR_BACKWARD) {
2250 ff_thread_await_progress(&s->next_picture_ptr->f,
2251 ff_MPV_lowest_referenced_row(s, 1),
2252 0);
2253 }
2254 }
2255
2256 op_qpix= s->me.qpel_put;
2257 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2258 op_pix = s->dsp.put_pixels_tab;
2259 }else{
2260 op_pix = s->dsp.put_no_rnd_pixels_tab;
2261 }
2262 if (s->mv_dir & MV_DIR_FORWARD) {
2263 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2264 op_pix = s->dsp.avg_pixels_tab;
2265 op_qpix= s->me.qpel_avg;
2266 }
2267 if (s->mv_dir & MV_DIR_BACKWARD) {
2268 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2269 }
2270 }
2271
2272 /* skip dequant / idct if we are really late ;) */
2273 if(s->avctx->skip_idct){
2274 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2275 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2276 || s->avctx->skip_idct >= AVDISCARD_ALL)
2277 goto skip_idct;
2278 }
2279
2280 /* add dct residue */
2281 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2282 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2283 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2284 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2285 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2286 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2287
2288 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2289 if (s->chroma_y_shift){
2290 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2291 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2292 }else{
2293 dct_linesize >>= 1;
2294 dct_offset >>=1;
2295 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2296 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2297 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2298 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2299 }
2300 }
2301 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2302 add_dct(s, block[0], 0, dest_y , dct_linesize);
2303 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2304 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2305 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2306
2307 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2308 if(s->chroma_y_shift){//Chroma420
2309 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2310 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2311 }else{
2312 //chroma422
2313 dct_linesize = uvlinesize << s->interlaced_dct;
2314 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2315
2316 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2317 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2318 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2319 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2320 if(!s->chroma_x_shift){//Chroma444
2321 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2322 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2323 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2324 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2325 }
2326 }
2327 }//fi gray
2328 }
2329 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2330 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2331 }
2332 } else {
2333 /* dct only in intra block */
2334 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2335 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2336 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2337 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2338 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2339
2340 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2341 if(s->chroma_y_shift){
2342 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2343 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2344 }else{
2345 dct_offset >>=1;
2346 dct_linesize >>=1;
2347 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2348 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2349 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2350 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2351 }
2352 }
2353 }else{
2354 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2355 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2356 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2357 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2358
2359 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2360 if(s->chroma_y_shift){
2361 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2362 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2363 }else{
2364
2365 dct_linesize = uvlinesize << s->interlaced_dct;
2366 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2367
2368 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2369 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2370 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2371 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2372 if(!s->chroma_x_shift){//Chroma444
2373 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2374 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2375 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2376 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2377 }
2378 }
2379 }//gray
2380 }
2381 }
2382 skip_idct:
2383 if(!readable){
2384 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2385 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2386 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2387 }
2388 }
2389 }
2390
2391 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2392 #if !CONFIG_SMALL
2393 if(s->out_format == FMT_MPEG1) {
2394 MPV_decode_mb_internal(s, block, 1);
2395 } else
2396 #endif
2397 MPV_decode_mb_internal(s, block, 0);
2398 }
2399
2400 /**
2401 * @param h is the normal height, this will be reduced automatically if needed for the last row
2402 */
2403 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2404 const int field_pic= s->picture_structure != PICT_FRAME;
2405 if(field_pic){
2406 h <<= 1;
2407 y <<= 1;
2408 }
2409
2410 if (!s->avctx->hwaccel
2411 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2412 && s->unrestricted_mv
2413 && s->current_picture.f.reference
2414 && !s->intra_only
2415 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2416 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
2417 int sides = 0, edge_h;
2418 int hshift = desc->log2_chroma_w;
2419 int vshift = desc->log2_chroma_h;
2420 if (y==0) sides |= EDGE_TOP;
2421 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2422
2423 edge_h= FFMIN(h, s->v_edge_pos - y);
2424
2425 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2426 s->linesize, s->h_edge_pos, edge_h,
2427 EDGE_WIDTH, EDGE_WIDTH, sides);
2428 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2429 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2430 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2431 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2432 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2433 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2434 }
2435
2436 h= FFMIN(h, s->avctx->height - y);
2437
2438 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2439
2440 if (s->avctx->draw_horiz_band) {
2441 AVFrame *src;
2442 int offset[AV_NUM_DATA_POINTERS];
2443 int i;
2444
2445 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2446 src = &s->current_picture_ptr->f;
2447 else if(s->last_picture_ptr)
2448 src = &s->last_picture_ptr->f;
2449 else
2450 return;
2451
2452 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2453 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2454 offset[i] = 0;
2455 }else{
2456 offset[0]= y * s->linesize;
2457 offset[1]=
2458 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2459 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2460 offset[i] = 0;
2461 }
2462
2463 emms_c();
2464
2465 s->avctx->draw_horiz_band(s->avctx, src, offset,
2466 y, s->picture_structure, h);
2467 }
2468 }
2469
2470 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2471 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2472 const int uvlinesize = s->current_picture.f.linesize[1];
2473 const int mb_size= 4;
2474
2475 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2476 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2477 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2478 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2479 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2480 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2481 //block_index is not used by mpeg2, so it is not affected by chroma_format
2482
2483 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2484 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2485 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2486
2487 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2488 {
2489 if(s->picture_structure==PICT_FRAME){
2490 s->dest[0] += s->mb_y * linesize << mb_size;
2491 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2492 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2493 }else{
2494 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2495 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2496 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2497 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2498 }
2499 }
2500 }
2501
2502 /**
2503 * Permute an 8x8 block.
2504 * @param block the block which will be permuted according to the given permutation vector
2505 * @param permutation the permutation vector
2506 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2507 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2508 * (inverse) permutated to scantable order!
2509 */
2510 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2511 {
2512 int i;
2513 int16_t temp[64];
2514
2515 if(last<=0) return;
2516 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2517
2518 for(i=0; i<=last; i++){
2519 const int j= scantable[i];
2520 temp[j]= block[j];
2521 block[j]=0;
2522 }
2523
2524 for(i=0; i<=last; i++){
2525 const int j= scantable[i];
2526 const int perm_j= permutation[j];
2527 block[perm_j]= temp[j];
2528 }
2529 }
2530
2531 void ff_mpeg_flush(AVCodecContext *avctx){
2532 int i;
2533 MpegEncContext *s = avctx->priv_data;
2534
2535 if(s==NULL || s->picture==NULL)
2536 return;
2537
2538 for(i=0; i<s->picture_count; i++){
2539 if (s->picture[i].f.data[0] &&
2540 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2541 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2542 free_frame_buffer(s, &s->picture[i]);
2543 }
2544 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2545
2546 s->mb_x= s->mb_y= 0;
2547
2548 s->parse_context.state= -1;
2549 s->parse_context.frame_start_found= 0;
2550 s->parse_context.overread= 0;
2551 s->parse_context.overread_index= 0;
2552 s->parse_context.index= 0;
2553 s->parse_context.last_index= 0;
2554 s->bitstream_buffer_size=0;
2555 s->pp_time=0;
2556 }
2557
2558 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2559 int16_t *block, int n, int qscale)
2560 {
2561 int i, level, nCoeffs;
2562 const uint16_t *quant_matrix;
2563
2564 nCoeffs= s->block_last_index[n];
2565
2566 if (n < 4)
2567 block[0] = block[0] * s->y_dc_scale;
2568 else
2569 block[0] = block[0] * s->c_dc_scale;
2570 /* XXX: only mpeg1 */
2571 quant_matrix = s->intra_matrix;
2572 for(i=1;i<=nCoeffs;i++) {
2573 int j= s->intra_scantable.permutated[i];
2574 level = block[j];
2575 if (level) {
2576 if (level < 0) {
2577 level = -level;
2578 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2579 level = (level - 1) | 1;
2580 level = -level;
2581 } else {
2582 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2583 level = (level - 1) | 1;
2584 }
2585 block[j] = level;
2586 }
2587 }
2588 }
2589
2590 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2591 int16_t *block, int n, int qscale)
2592 {
2593 int i, level, nCoeffs;
2594 const uint16_t *quant_matrix;
2595
2596 nCoeffs= s->block_last_index[n];
2597
2598 quant_matrix = s->inter_matrix;
2599 for(i=0; i<=nCoeffs; i++) {
2600 int j= s->intra_scantable.permutated[i];
2601 level = block[j];
2602 if (level) {
2603 if (level < 0) {
2604 level = -level;
2605 level = (((level << 1) + 1) * qscale *
2606 ((int) (quant_matrix[j]))) >> 4;
2607 level = (level - 1) | 1;
2608 level = -level;
2609 } else {
2610 level = (((level << 1) + 1) * qscale *
2611 ((int) (quant_matrix[j]))) >> 4;
2612 level = (level - 1) | 1;
2613 }
2614 block[j] = level;
2615 }
2616 }
2617 }
2618
2619 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2620 int16_t *block, int n, int qscale)
2621 {
2622 int i, level, nCoeffs;
2623 const uint16_t *quant_matrix;
2624
2625 if(s->alternate_scan) nCoeffs= 63;
2626 else nCoeffs= s->block_last_index[n];
2627
2628 if (n < 4)
2629 block[0] = block[0] * s->y_dc_scale;
2630 else
2631 block[0] = block[0] * s->c_dc_scale;
2632 quant_matrix = s->intra_matrix;
2633 for(i=1;i<=nCoeffs;i++) {
2634 int j= s->intra_scantable.permutated[i];
2635 level = block[j];
2636 if (level) {
2637 if (level < 0) {
2638 level = -level;
2639 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2640 level = -level;
2641 } else {
2642 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2643 }
2644 block[j] = level;
2645 }
2646 }
2647 }
2648
2649 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2650 int16_t *block, int n, int qscale)
2651 {
2652 int i, level, nCoeffs;
2653 const uint16_t *quant_matrix;
2654 int sum=-1;
2655
2656 if(s->alternate_scan) nCoeffs= 63;
2657 else nCoeffs= s->block_last_index[n];
2658
2659 if (n < 4)
2660 block[0] = block[0] * s->y_dc_scale;
2661 else
2662 block[0] = block[0] * s->c_dc_scale;
2663 quant_matrix = s->intra_matrix;
2664 for(i=1;i<=nCoeffs;i++) {
2665 int j= s->intra_scantable.permutated[i];
2666 level = block[j];
2667 if (level) {
2668 if (level < 0) {
2669 level = -level;
2670 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2671 level = -level;
2672 } else {
2673 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2674 }
2675 block[j] = level;
2676 sum+=level;
2677 }
2678 }
2679 block[63]^=sum&1;
2680 }
2681
2682 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2683 int16_t *block, int n, int qscale)
2684 {
2685 int i, level, nCoeffs;
2686 const uint16_t *quant_matrix;
2687 int sum=-1;
2688
2689 if(s->alternate_scan) nCoeffs= 63;
2690 else nCoeffs= s->block_last_index[n];
2691
2692 quant_matrix = s->inter_matrix;
2693 for(i=0; i<=nCoeffs; i++) {
2694 int j= s->intra_scantable.permutated[i];
2695 level = block[j];
2696 if (level) {
2697 if (level < 0) {
2698 level = -level;
2699 level = (((level << 1) + 1) * qscale *
2700 ((int) (quant_matrix[j]))) >> 4;
2701 level = -level;
2702 } else {
2703 level = (((level << 1) + 1) * qscale *
2704 ((int) (quant_matrix[j]))) >> 4;
2705 }
2706 block[j] = level;
2707 sum+=level;
2708 }
2709 }
2710 block[63]^=sum&1;
2711 }
2712
2713 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2714 int16_t *block, int n, int qscale)
2715 {
2716 int i, level, qmul, qadd;
2717 int nCoeffs;
2718
2719 assert(s->block_last_index[n]>=0);
2720
2721 qmul = qscale << 1;
2722
2723 if (!s->h263_aic) {
2724 if (n < 4)
2725 block[0] = block[0] * s->y_dc_scale;
2726 else
2727 block[0] = block[0] * s->c_dc_scale;
2728 qadd = (qscale - 1) | 1;
2729 }else{
2730 qadd = 0;
2731 }
2732 if(s->ac_pred)
2733 nCoeffs=63;
2734 else
2735 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2736
2737 for(i=1; i<=nCoeffs; i++) {
2738 level = block[i];
2739 if (level) {
2740 if (level < 0) {
2741 level = level * qmul - qadd;
2742 } else {
2743 level = level * qmul + qadd;
2744 }
2745 block[i] = level;
2746 }
2747 }
2748 }
2749
2750 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2751 int16_t *block, int n, int qscale)
2752 {
2753 int i, level, qmul, qadd;
2754 int nCoeffs;
2755
2756 assert(s->block_last_index[n]>=0);
2757
2758 qadd = (qscale - 1) | 1;
2759 qmul = qscale << 1;
2760
2761 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2762
2763 for(i=0; i<=nCoeffs; i++) {
2764 level = block[i];
2765 if (level) {
2766 if (level < 0) {
2767 level = level * qmul - qadd;
2768 } else {
2769 level = level * qmul + qadd;
2770 }
2771 block[i] = level;
2772 }
2773 }
2774 }
2775
2776 /**
2777 * set qscale and update qscale dependent variables.
2778 */
2779 void ff_set_qscale(MpegEncContext * s, int qscale)
2780 {
2781 if (qscale < 1)
2782 qscale = 1;
2783 else if (qscale > 31)
2784 qscale = 31;
2785
2786 s->qscale = qscale;
2787 s->chroma_qscale= s->chroma_qscale_table[qscale];
2788
2789 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2790 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2791 }
2792
2793 void ff_MPV_report_decode_progress(MpegEncContext *s)
2794 {
2795 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
2796 ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);
2797 }