49f2e661773ab5dcb59c250a1e461a7d39bfb9b2
[libav.git] / libavcodec / mpegvideo.c
1 /*
2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7 *
8 * This file is part of Libav.
9 *
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /**
26 * @file
27 * The simplest mpeg encoder (well, it was the simplest!).
28 */
29
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/timer.h"
35 #include "avcodec.h"
36 #include "blockdsp.h"
37 #include "idctdsp.h"
38 #include "internal.h"
39 #include "mathops.h"
40 #include "mpegutils.h"
41 #include "mpegvideo.h"
42 #include "mjpegenc.h"
43 #include "msmpeg4.h"
44 #include "qpeldsp.h"
45 #include "xvmc_internal.h"
46 #include "thread.h"
47 #include "wmv2.h"
48 #include <limits.h>
49
50 static const uint8_t ff_default_chroma_qscale_table[32] = {
51 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
52 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
53 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
54 };
55
56 const uint8_t ff_mpeg1_dc_scale_table[128] = {
57 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
58 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
64 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
65 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
66 };
67
68 static const uint8_t mpeg2_dc_scale_table1[128] = {
69 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
70 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
76 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
77 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
78 };
79
80 static const uint8_t mpeg2_dc_scale_table2[128] = {
81 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
88 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
89 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
90 };
91
92 static const uint8_t mpeg2_dc_scale_table3[128] = {
93 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
94 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
102 };
103
104 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
105 ff_mpeg1_dc_scale_table,
106 mpeg2_dc_scale_table1,
107 mpeg2_dc_scale_table2,
108 mpeg2_dc_scale_table3,
109 };
110
111 const uint8_t ff_alternate_horizontal_scan[64] = {
112 0, 1, 2, 3, 8, 9, 16, 17,
113 10, 11, 4, 5, 6, 7, 15, 14,
114 13, 12, 19, 18, 24, 25, 32, 33,
115 26, 27, 20, 21, 22, 23, 28, 29,
116 30, 31, 34, 35, 40, 41, 48, 49,
117 42, 43, 36, 37, 38, 39, 44, 45,
118 46, 47, 50, 51, 56, 57, 58, 59,
119 52, 53, 54, 55, 60, 61, 62, 63,
120 };
121
122 const uint8_t ff_alternate_vertical_scan[64] = {
123 0, 8, 16, 24, 1, 9, 2, 10,
124 17, 25, 32, 40, 48, 56, 57, 49,
125 41, 33, 26, 18, 3, 11, 4, 12,
126 19, 27, 34, 42, 50, 58, 35, 43,
127 51, 59, 20, 28, 5, 13, 6, 14,
128 21, 29, 36, 44, 52, 60, 37, 45,
129 53, 61, 22, 30, 7, 15, 23, 31,
130 38, 46, 54, 62, 39, 47, 55, 63,
131 };
132
133 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
134 int16_t *block, int n, int qscale)
135 {
136 int i, level, nCoeffs;
137 const uint16_t *quant_matrix;
138
139 nCoeffs= s->block_last_index[n];
140
141 if (n < 4)
142 block[0] = block[0] * s->y_dc_scale;
143 else
144 block[0] = block[0] * s->c_dc_scale;
145 /* XXX: only mpeg1 */
146 quant_matrix = s->intra_matrix;
147 for(i=1;i<=nCoeffs;i++) {
148 int j= s->intra_scantable.permutated[i];
149 level = block[j];
150 if (level) {
151 if (level < 0) {
152 level = -level;
153 level = (int)(level * qscale * quant_matrix[j]) >> 3;
154 level = (level - 1) | 1;
155 level = -level;
156 } else {
157 level = (int)(level * qscale * quant_matrix[j]) >> 3;
158 level = (level - 1) | 1;
159 }
160 block[j] = level;
161 }
162 }
163 }
164
165 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
166 int16_t *block, int n, int qscale)
167 {
168 int i, level, nCoeffs;
169 const uint16_t *quant_matrix;
170
171 nCoeffs= s->block_last_index[n];
172
173 quant_matrix = s->inter_matrix;
174 for(i=0; i<=nCoeffs; i++) {
175 int j= s->intra_scantable.permutated[i];
176 level = block[j];
177 if (level) {
178 if (level < 0) {
179 level = -level;
180 level = (((level << 1) + 1) * qscale *
181 ((int) (quant_matrix[j]))) >> 4;
182 level = (level - 1) | 1;
183 level = -level;
184 } else {
185 level = (((level << 1) + 1) * qscale *
186 ((int) (quant_matrix[j]))) >> 4;
187 level = (level - 1) | 1;
188 }
189 block[j] = level;
190 }
191 }
192 }
193
194 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
195 int16_t *block, int n, int qscale)
196 {
197 int i, level, nCoeffs;
198 const uint16_t *quant_matrix;
199
200 if(s->alternate_scan) nCoeffs= 63;
201 else nCoeffs= s->block_last_index[n];
202
203 if (n < 4)
204 block[0] = block[0] * s->y_dc_scale;
205 else
206 block[0] = block[0] * s->c_dc_scale;
207 quant_matrix = s->intra_matrix;
208 for(i=1;i<=nCoeffs;i++) {
209 int j= s->intra_scantable.permutated[i];
210 level = block[j];
211 if (level) {
212 if (level < 0) {
213 level = -level;
214 level = (int)(level * qscale * quant_matrix[j]) >> 3;
215 level = -level;
216 } else {
217 level = (int)(level * qscale * quant_matrix[j]) >> 3;
218 }
219 block[j] = level;
220 }
221 }
222 }
223
224 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
225 int16_t *block, int n, int qscale)
226 {
227 int i, level, nCoeffs;
228 const uint16_t *quant_matrix;
229 int sum=-1;
230
231 if(s->alternate_scan) nCoeffs= 63;
232 else nCoeffs= s->block_last_index[n];
233
234 if (n < 4)
235 block[0] = block[0] * s->y_dc_scale;
236 else
237 block[0] = block[0] * s->c_dc_scale;
238 quant_matrix = s->intra_matrix;
239 for(i=1;i<=nCoeffs;i++) {
240 int j= s->intra_scantable.permutated[i];
241 level = block[j];
242 if (level) {
243 if (level < 0) {
244 level = -level;
245 level = (int)(level * qscale * quant_matrix[j]) >> 3;
246 level = -level;
247 } else {
248 level = (int)(level * qscale * quant_matrix[j]) >> 3;
249 }
250 block[j] = level;
251 sum+=level;
252 }
253 }
254 block[63]^=sum&1;
255 }
256
257 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
258 int16_t *block, int n, int qscale)
259 {
260 int i, level, nCoeffs;
261 const uint16_t *quant_matrix;
262 int sum=-1;
263
264 if(s->alternate_scan) nCoeffs= 63;
265 else nCoeffs= s->block_last_index[n];
266
267 quant_matrix = s->inter_matrix;
268 for(i=0; i<=nCoeffs; i++) {
269 int j= s->intra_scantable.permutated[i];
270 level = block[j];
271 if (level) {
272 if (level < 0) {
273 level = -level;
274 level = (((level << 1) + 1) * qscale *
275 ((int) (quant_matrix[j]))) >> 4;
276 level = -level;
277 } else {
278 level = (((level << 1) + 1) * qscale *
279 ((int) (quant_matrix[j]))) >> 4;
280 }
281 block[j] = level;
282 sum+=level;
283 }
284 }
285 block[63]^=sum&1;
286 }
287
288 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
289 int16_t *block, int n, int qscale)
290 {
291 int i, level, qmul, qadd;
292 int nCoeffs;
293
294 assert(s->block_last_index[n]>=0);
295
296 qmul = qscale << 1;
297
298 if (!s->h263_aic) {
299 if (n < 4)
300 block[0] = block[0] * s->y_dc_scale;
301 else
302 block[0] = block[0] * s->c_dc_scale;
303 qadd = (qscale - 1) | 1;
304 }else{
305 qadd = 0;
306 }
307 if(s->ac_pred)
308 nCoeffs=63;
309 else
310 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
311
312 for(i=1; i<=nCoeffs; i++) {
313 level = block[i];
314 if (level) {
315 if (level < 0) {
316 level = level * qmul - qadd;
317 } else {
318 level = level * qmul + qadd;
319 }
320 block[i] = level;
321 }
322 }
323 }
324
325 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
326 int16_t *block, int n, int qscale)
327 {
328 int i, level, qmul, qadd;
329 int nCoeffs;
330
331 assert(s->block_last_index[n]>=0);
332
333 qadd = (qscale - 1) | 1;
334 qmul = qscale << 1;
335
336 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
337
338 for(i=0; i<=nCoeffs; i++) {
339 level = block[i];
340 if (level) {
341 if (level < 0) {
342 level = level * qmul - qadd;
343 } else {
344 level = level * qmul + qadd;
345 }
346 block[i] = level;
347 }
348 }
349 }
350
351 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
352 int (*mv)[2][4][2],
353 int mb_x, int mb_y, int mb_intra, int mb_skipped)
354 {
355 MpegEncContext *s = opaque;
356
357 s->mv_dir = mv_dir;
358 s->mv_type = mv_type;
359 s->mb_intra = mb_intra;
360 s->mb_skipped = mb_skipped;
361 s->mb_x = mb_x;
362 s->mb_y = mb_y;
363 memcpy(s->mv, mv, sizeof(*mv));
364
365 ff_init_block_index(s);
366 ff_update_block_index(s);
367
368 s->bdsp.clear_blocks(s->block[0]);
369
370 s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
371 s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
372 s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
373
374 if (ref)
375 av_log(s->avctx, AV_LOG_DEBUG,
376 "Interlaced error concealment is not fully implemented\n");
377 ff_mpv_decode_mb(s, s->block);
378 }
379
380 /* init common dct for both encoder and decoder */
381 static av_cold int dct_init(MpegEncContext *s)
382 {
383 ff_blockdsp_init(&s->bdsp, s->avctx);
384 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
385 ff_mpegvideodsp_init(&s->mdsp);
386 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
387
388 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
389 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
390 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
391 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
392 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
393 if (s->avctx->flags & CODEC_FLAG_BITEXACT)
394 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
395 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
396
397 if (HAVE_INTRINSICS_NEON)
398 ff_mpv_common_init_neon(s);
399
400 if (ARCH_ARM)
401 ff_mpv_common_init_arm(s);
402 if (ARCH_PPC)
403 ff_mpv_common_init_ppc(s);
404 if (ARCH_X86)
405 ff_mpv_common_init_x86(s);
406
407 return 0;
408 }
409
410 av_cold void ff_mpv_idct_init(MpegEncContext *s)
411 {
412 ff_idctdsp_init(&s->idsp, s->avctx);
413
414 /* load & permutate scantables
415 * note: only wmv uses different ones
416 */
417 if (s->alternate_scan) {
418 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
419 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
420 } else {
421 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
422 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
423 }
424 ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
425 ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
426 }
427
428 static int frame_size_alloc(MpegEncContext *s, int linesize)
429 {
430 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
431
432 // edge emu needs blocksize + filter length - 1
433 // (= 17x17 for halfpel / 21x21 for h264)
434 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
435 // at uvlinesize. It supports only YUV420 so 24x24 is enough
436 // linesize * interlaced * MBsize
437 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
438 fail);
439
440 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 3,
441 fail)
442 s->me.temp = s->me.scratchpad;
443 s->rd_scratchpad = s->me.scratchpad;
444 s->b_scratchpad = s->me.scratchpad;
445 s->obmc_scratchpad = s->me.scratchpad + 16;
446
447 return 0;
448 fail:
449 av_freep(&s->edge_emu_buffer);
450 return AVERROR(ENOMEM);
451 }
452
453 /**
454 * Allocate a frame buffer
455 */
456 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
457 {
458 int edges_needed = av_codec_is_encoder(s->avctx->codec);
459 int r, ret;
460
461 pic->tf.f = pic->f;
462 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
463 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
464 s->codec_id != AV_CODEC_ID_MSS2) {
465 if (edges_needed) {
466 pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
467 pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
468 }
469
470 r = ff_thread_get_buffer(s->avctx, &pic->tf,
471 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
472 } else {
473 pic->f->width = s->avctx->width;
474 pic->f->height = s->avctx->height;
475 pic->f->format = s->avctx->pix_fmt;
476 r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
477 }
478
479 if (r < 0 || !pic->f->buf[0]) {
480 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
481 r, pic->f->data[0]);
482 return -1;
483 }
484
485 if (edges_needed) {
486 int i;
487 for (i = 0; pic->f->data[i]; i++) {
488 int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
489 pic->f->linesize[i] +
490 (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
491 pic->f->data[i] += offset;
492 }
493 pic->f->width = s->avctx->width;
494 pic->f->height = s->avctx->height;
495 }
496
497 if (s->avctx->hwaccel) {
498 assert(!pic->hwaccel_picture_private);
499 if (s->avctx->hwaccel->frame_priv_data_size) {
500 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
501 if (!pic->hwaccel_priv_buf) {
502 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
503 return -1;
504 }
505 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
506 }
507 }
508
509 if (s->linesize && (s->linesize != pic->f->linesize[0] ||
510 s->uvlinesize != pic->f->linesize[1])) {
511 av_log(s->avctx, AV_LOG_ERROR,
512 "get_buffer() failed (stride changed)\n");
513 ff_mpeg_unref_picture(s->avctx, pic);
514 return -1;
515 }
516
517 if (pic->f->linesize[1] != pic->f->linesize[2]) {
518 av_log(s->avctx, AV_LOG_ERROR,
519 "get_buffer() failed (uv stride mismatch)\n");
520 ff_mpeg_unref_picture(s->avctx, pic);
521 return -1;
522 }
523
524 if (!s->edge_emu_buffer &&
525 (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
526 av_log(s->avctx, AV_LOG_ERROR,
527 "get_buffer() failed to allocate context scratch buffers.\n");
528 ff_mpeg_unref_picture(s->avctx, pic);
529 return ret;
530 }
531
532 return 0;
533 }
534
535 void ff_free_picture_tables(Picture *pic)
536 {
537 int i;
538
539 av_buffer_unref(&pic->mb_var_buf);
540 av_buffer_unref(&pic->mc_mb_var_buf);
541 av_buffer_unref(&pic->mb_mean_buf);
542 av_buffer_unref(&pic->mbskip_table_buf);
543 av_buffer_unref(&pic->qscale_table_buf);
544 av_buffer_unref(&pic->mb_type_buf);
545
546 for (i = 0; i < 2; i++) {
547 av_buffer_unref(&pic->motion_val_buf[i]);
548 av_buffer_unref(&pic->ref_index_buf[i]);
549 }
550 }
551
552 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
553 {
554 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
555 const int mb_array_size = s->mb_stride * s->mb_height;
556 const int b8_array_size = s->b8_stride * s->mb_height * 2;
557 int i;
558
559
560 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
561 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
562 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
563 sizeof(uint32_t));
564 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
565 return AVERROR(ENOMEM);
566
567 if (s->encoding) {
568 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
569 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
570 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
571 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
572 return AVERROR(ENOMEM);
573 }
574
575 if (s->out_format == FMT_H263 || s->encoding) {
576 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
577 int ref_index_size = 4 * mb_array_size;
578
579 for (i = 0; mv_size && i < 2; i++) {
580 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
581 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
582 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
583 return AVERROR(ENOMEM);
584 }
585 }
586
587 return 0;
588 }
589
590 static int make_tables_writable(Picture *pic)
591 {
592 int ret, i;
593 #define MAKE_WRITABLE(table) \
594 do {\
595 if (pic->table &&\
596 (ret = av_buffer_make_writable(&pic->table)) < 0)\
597 return ret;\
598 } while (0)
599
600 MAKE_WRITABLE(mb_var_buf);
601 MAKE_WRITABLE(mc_mb_var_buf);
602 MAKE_WRITABLE(mb_mean_buf);
603 MAKE_WRITABLE(mbskip_table_buf);
604 MAKE_WRITABLE(qscale_table_buf);
605 MAKE_WRITABLE(mb_type_buf);
606
607 for (i = 0; i < 2; i++) {
608 MAKE_WRITABLE(motion_val_buf[i]);
609 MAKE_WRITABLE(ref_index_buf[i]);
610 }
611
612 return 0;
613 }
614
615 /**
616 * Allocate a Picture.
617 * The pixels are allocated/set by calling get_buffer() if shared = 0
618 */
619 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
620 {
621 int i, ret;
622
623 if (shared) {
624 assert(pic->f->data[0]);
625 pic->shared = 1;
626 } else {
627 assert(!pic->f->buf[0]);
628
629 if (alloc_frame_buffer(s, pic) < 0)
630 return -1;
631
632 s->linesize = pic->f->linesize[0];
633 s->uvlinesize = pic->f->linesize[1];
634 }
635
636 if (!pic->qscale_table_buf)
637 ret = alloc_picture_tables(s, pic);
638 else
639 ret = make_tables_writable(pic);
640 if (ret < 0)
641 goto fail;
642
643 if (s->encoding) {
644 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
645 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
646 pic->mb_mean = pic->mb_mean_buf->data;
647 }
648
649 pic->mbskip_table = pic->mbskip_table_buf->data;
650 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
651 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
652
653 if (pic->motion_val_buf[0]) {
654 for (i = 0; i < 2; i++) {
655 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
656 pic->ref_index[i] = pic->ref_index_buf[i]->data;
657 }
658 }
659
660 return 0;
661 fail:
662 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
663 ff_mpeg_unref_picture(s->avctx, pic);
664 ff_free_picture_tables(pic);
665 return AVERROR(ENOMEM);
666 }
667
668 /**
669 * Deallocate a picture.
670 */
671 void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
672 {
673 pic->tf.f = pic->f;
674 /* WM Image / Screen codecs allocate internal buffers with different
675 * dimensions / colorspaces; ignore user-defined callbacks for these. */
676 if (avctx->codec->id != AV_CODEC_ID_WMV3IMAGE &&
677 avctx->codec->id != AV_CODEC_ID_VC1IMAGE &&
678 avctx->codec->id != AV_CODEC_ID_MSS2)
679 ff_thread_release_buffer(avctx, &pic->tf);
680 else if (pic->f)
681 av_frame_unref(pic->f);
682
683 av_buffer_unref(&pic->hwaccel_priv_buf);
684
685 if (pic->needs_realloc)
686 ff_free_picture_tables(pic);
687 }
688
689 static int update_picture_tables(Picture *dst, Picture *src)
690 {
691 int i;
692
693 #define UPDATE_TABLE(table)\
694 do {\
695 if (src->table &&\
696 (!dst->table || dst->table->buffer != src->table->buffer)) {\
697 av_buffer_unref(&dst->table);\
698 dst->table = av_buffer_ref(src->table);\
699 if (!dst->table) {\
700 ff_free_picture_tables(dst);\
701 return AVERROR(ENOMEM);\
702 }\
703 }\
704 } while (0)
705
706 UPDATE_TABLE(mb_var_buf);
707 UPDATE_TABLE(mc_mb_var_buf);
708 UPDATE_TABLE(mb_mean_buf);
709 UPDATE_TABLE(mbskip_table_buf);
710 UPDATE_TABLE(qscale_table_buf);
711 UPDATE_TABLE(mb_type_buf);
712 for (i = 0; i < 2; i++) {
713 UPDATE_TABLE(motion_val_buf[i]);
714 UPDATE_TABLE(ref_index_buf[i]);
715 }
716
717 dst->mb_var = src->mb_var;
718 dst->mc_mb_var = src->mc_mb_var;
719 dst->mb_mean = src->mb_mean;
720 dst->mbskip_table = src->mbskip_table;
721 dst->qscale_table = src->qscale_table;
722 dst->mb_type = src->mb_type;
723 for (i = 0; i < 2; i++) {
724 dst->motion_val[i] = src->motion_val[i];
725 dst->ref_index[i] = src->ref_index[i];
726 }
727
728 return 0;
729 }
730
731 int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
732 {
733 int ret;
734
735 av_assert0(!dst->f->buf[0]);
736 av_assert0(src->f->buf[0]);
737
738 src->tf.f = src->f;
739 dst->tf.f = dst->f;
740 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
741 if (ret < 0)
742 goto fail;
743
744 ret = update_picture_tables(dst, src);
745 if (ret < 0)
746 goto fail;
747
748 if (src->hwaccel_picture_private) {
749 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
750 if (!dst->hwaccel_priv_buf)
751 goto fail;
752 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
753 }
754
755 dst->field_picture = src->field_picture;
756 dst->mb_var_sum = src->mb_var_sum;
757 dst->mc_mb_var_sum = src->mc_mb_var_sum;
758 dst->b_frame_score = src->b_frame_score;
759 dst->needs_realloc = src->needs_realloc;
760 dst->reference = src->reference;
761 dst->shared = src->shared;
762
763 return 0;
764 fail:
765 ff_mpeg_unref_picture(avctx, dst);
766 return ret;
767 }
768
769 static int init_duplicate_context(MpegEncContext *s)
770 {
771 int y_size = s->b8_stride * (2 * s->mb_height + 1);
772 int c_size = s->mb_stride * (s->mb_height + 1);
773 int yc_size = y_size + 2 * c_size;
774 int i;
775
776 s->edge_emu_buffer =
777 s->me.scratchpad =
778 s->me.temp =
779 s->rd_scratchpad =
780 s->b_scratchpad =
781 s->obmc_scratchpad = NULL;
782
783 if (s->encoding) {
784 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
785 ME_MAP_SIZE * sizeof(uint32_t), fail)
786 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
787 ME_MAP_SIZE * sizeof(uint32_t), fail)
788 if (s->avctx->noise_reduction) {
789 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
790 2 * 64 * sizeof(int), fail)
791 }
792 }
793 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
794 s->block = s->blocks[0];
795
796 for (i = 0; i < 12; i++) {
797 s->pblocks[i] = &s->block[i];
798 }
799 if (s->avctx->codec_tag == AV_RL32("VCR2")) {
800 // exchange uv
801 int16_t (*tmp)[64];
802 tmp = s->pblocks[4];
803 s->pblocks[4] = s->pblocks[5];
804 s->pblocks[5] = tmp;
805 }
806
807 if (s->out_format == FMT_H263) {
808 /* ac values */
809 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
810 yc_size * sizeof(int16_t) * 16, fail);
811 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
812 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
813 s->ac_val[2] = s->ac_val[1] + c_size;
814 }
815
816 return 0;
817 fail:
818 return -1; // free() through ff_mpv_common_end()
819 }
820
821 static void free_duplicate_context(MpegEncContext *s)
822 {
823 if (!s)
824 return;
825
826 av_freep(&s->edge_emu_buffer);
827 av_freep(&s->me.scratchpad);
828 s->me.temp =
829 s->rd_scratchpad =
830 s->b_scratchpad =
831 s->obmc_scratchpad = NULL;
832
833 av_freep(&s->dct_error_sum);
834 av_freep(&s->me.map);
835 av_freep(&s->me.score_map);
836 av_freep(&s->blocks);
837 av_freep(&s->ac_val_base);
838 s->block = NULL;
839 }
840
841 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
842 {
843 #define COPY(a) bak->a = src->a
844 COPY(edge_emu_buffer);
845 COPY(me.scratchpad);
846 COPY(me.temp);
847 COPY(rd_scratchpad);
848 COPY(b_scratchpad);
849 COPY(obmc_scratchpad);
850 COPY(me.map);
851 COPY(me.score_map);
852 COPY(blocks);
853 COPY(block);
854 COPY(start_mb_y);
855 COPY(end_mb_y);
856 COPY(me.map_generation);
857 COPY(pb);
858 COPY(dct_error_sum);
859 COPY(dct_count[0]);
860 COPY(dct_count[1]);
861 COPY(ac_val_base);
862 COPY(ac_val[0]);
863 COPY(ac_val[1]);
864 COPY(ac_val[2]);
865 #undef COPY
866 }
867
868 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
869 {
870 MpegEncContext bak;
871 int i, ret;
872 // FIXME copy only needed parts
873 // START_TIMER
874 backup_duplicate_context(&bak, dst);
875 memcpy(dst, src, sizeof(MpegEncContext));
876 backup_duplicate_context(dst, &bak);
877 for (i = 0; i < 12; i++) {
878 dst->pblocks[i] = &dst->block[i];
879 }
880 if (dst->avctx->codec_tag == AV_RL32("VCR2")) {
881 // exchange uv
882 int16_t (*tmp)[64];
883 tmp = dst->pblocks[4];
884 dst->pblocks[4] = dst->pblocks[5];
885 dst->pblocks[5] = tmp;
886 }
887 if (!dst->edge_emu_buffer &&
888 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
889 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
890 "scratch buffers.\n");
891 return ret;
892 }
893 // STOP_TIMER("update_duplicate_context")
894 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
895 return 0;
896 }
897
898 int ff_mpeg_update_thread_context(AVCodecContext *dst,
899 const AVCodecContext *src)
900 {
901 int i, ret;
902 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
903
904 if (dst == src || !s1->context_initialized)
905 return 0;
906
907 // FIXME can parameters change on I-frames?
908 // in that case dst may need a reinit
909 if (!s->context_initialized) {
910 int err;
911 memcpy(s, s1, sizeof(MpegEncContext));
912
913 s->avctx = dst;
914 s->bitstream_buffer = NULL;
915 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
916
917 ff_mpv_idct_init(s);
918 if ((err = ff_mpv_common_init(s)) < 0)
919 return err;
920 }
921
922 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
923 int err;
924 s->context_reinit = 0;
925 s->height = s1->height;
926 s->width = s1->width;
927 if ((err = ff_mpv_common_frame_size_change(s)) < 0)
928 return err;
929 }
930
931 s->avctx->coded_height = s1->avctx->coded_height;
932 s->avctx->coded_width = s1->avctx->coded_width;
933 s->avctx->width = s1->avctx->width;
934 s->avctx->height = s1->avctx->height;
935
936 s->coded_picture_number = s1->coded_picture_number;
937 s->picture_number = s1->picture_number;
938
939 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
940 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
941 if (s1->picture[i].f->buf[0] &&
942 (ret = ff_mpeg_ref_picture(s->avctx, &s->picture[i], &s1->picture[i])) < 0)
943 return ret;
944 }
945
946 #define UPDATE_PICTURE(pic)\
947 do {\
948 ff_mpeg_unref_picture(s->avctx, &s->pic);\
949 if (s1->pic.f->buf[0])\
950 ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\
951 else\
952 ret = update_picture_tables(&s->pic, &s1->pic);\
953 if (ret < 0)\
954 return ret;\
955 } while (0)
956
957 UPDATE_PICTURE(current_picture);
958 UPDATE_PICTURE(last_picture);
959 UPDATE_PICTURE(next_picture);
960
961 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
962 ((pic && pic >= old_ctx->picture && \
963 pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
964 &new_ctx->picture[pic - old_ctx->picture] : NULL)
965
966 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
967 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
968 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
969
970 // Error/bug resilience
971 s->next_p_frame_damaged = s1->next_p_frame_damaged;
972 s->workaround_bugs = s1->workaround_bugs;
973
974 // MPEG4 timing info
975 memcpy(&s->last_time_base, &s1->last_time_base,
976 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
977 (char *) &s1->last_time_base);
978
979 // B-frame info
980 s->max_b_frames = s1->max_b_frames;
981 s->low_delay = s1->low_delay;
982 s->droppable = s1->droppable;
983
984 // DivX handling (doesn't work)
985 s->divx_packed = s1->divx_packed;
986
987 if (s1->bitstream_buffer) {
988 if (s1->bitstream_buffer_size +
989 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
990 av_fast_malloc(&s->bitstream_buffer,
991 &s->allocated_bitstream_buffer_size,
992 s1->allocated_bitstream_buffer_size);
993 s->bitstream_buffer_size = s1->bitstream_buffer_size;
994 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
995 s1->bitstream_buffer_size);
996 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
997 FF_INPUT_BUFFER_PADDING_SIZE);
998 }
999
1000 // linesize dependend scratch buffer allocation
1001 if (!s->edge_emu_buffer)
1002 if (s1->linesize) {
1003 if (frame_size_alloc(s, s1->linesize) < 0) {
1004 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
1005 "scratch buffers.\n");
1006 return AVERROR(ENOMEM);
1007 }
1008 } else {
1009 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
1010 "be allocated due to unknown size.\n");
1011 return AVERROR_BUG;
1012 }
1013
1014 // MPEG2/interlacing info
1015 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1016 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1017
1018 if (!s1->first_field) {
1019 s->last_pict_type = s1->pict_type;
1020 if (s1->current_picture_ptr)
1021 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1022 }
1023
1024 return 0;
1025 }
1026
1027 /**
1028 * Set the given MpegEncContext to common defaults
1029 * (same for encoding and decoding).
1030 * The changed fields will not depend upon the
1031 * prior state of the MpegEncContext.
1032 */
1033 void ff_mpv_common_defaults(MpegEncContext *s)
1034 {
1035 s->y_dc_scale_table =
1036 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
1037 s->chroma_qscale_table = ff_default_chroma_qscale_table;
1038 s->progressive_frame = 1;
1039 s->progressive_sequence = 1;
1040 s->picture_structure = PICT_FRAME;
1041
1042 s->coded_picture_number = 0;
1043 s->picture_number = 0;
1044
1045 s->f_code = 1;
1046 s->b_code = 1;
1047
1048 s->slice_context_count = 1;
1049 }
1050
1051 /**
1052 * Set the given MpegEncContext to defaults for decoding.
1053 * the changed fields will not depend upon
1054 * the prior state of the MpegEncContext.
1055 */
1056 void ff_mpv_decode_defaults(MpegEncContext *s)
1057 {
1058 ff_mpv_common_defaults(s);
1059 }
1060
1061 static int init_er(MpegEncContext *s)
1062 {
1063 ERContext *er = &s->er;
1064 int mb_array_size = s->mb_height * s->mb_stride;
1065 int i;
1066
1067 er->avctx = s->avctx;
1068
1069 er->mb_index2xy = s->mb_index2xy;
1070 er->mb_num = s->mb_num;
1071 er->mb_width = s->mb_width;
1072 er->mb_height = s->mb_height;
1073 er->mb_stride = s->mb_stride;
1074 er->b8_stride = s->b8_stride;
1075
1076 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
1077 er->error_status_table = av_mallocz(mb_array_size);
1078 if (!er->er_temp_buffer || !er->error_status_table)
1079 goto fail;
1080
1081 er->mbskip_table = s->mbskip_table;
1082 er->mbintra_table = s->mbintra_table;
1083
1084 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1085 er->dc_val[i] = s->dc_val[i];
1086
1087 er->decode_mb = mpeg_er_decode_mb;
1088 er->opaque = s;
1089
1090 return 0;
1091 fail:
1092 av_freep(&er->er_temp_buffer);
1093 av_freep(&er->error_status_table);
1094 return AVERROR(ENOMEM);
1095 }
1096
1097 /**
1098 * Initialize and allocates MpegEncContext fields dependent on the resolution.
1099 */
1100 static int init_context_frame(MpegEncContext *s)
1101 {
1102 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1103
1104 s->mb_width = (s->width + 15) / 16;
1105 s->mb_stride = s->mb_width + 1;
1106 s->b8_stride = s->mb_width * 2 + 1;
1107 mb_array_size = s->mb_height * s->mb_stride;
1108 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1109
1110 /* set default edge pos, will be overriden
1111 * in decode_header if needed */
1112 s->h_edge_pos = s->mb_width * 16;
1113 s->v_edge_pos = s->mb_height * 16;
1114
1115 s->mb_num = s->mb_width * s->mb_height;
1116
1117 s->block_wrap[0] =
1118 s->block_wrap[1] =
1119 s->block_wrap[2] =
1120 s->block_wrap[3] = s->b8_stride;
1121 s->block_wrap[4] =
1122 s->block_wrap[5] = s->mb_stride;
1123
1124 y_size = s->b8_stride * (2 * s->mb_height + 1);
1125 c_size = s->mb_stride * (s->mb_height + 1);
1126 yc_size = y_size + 2 * c_size;
1127
1128 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
1129 fail); // error ressilience code looks cleaner with this
1130 for (y = 0; y < s->mb_height; y++)
1131 for (x = 0; x < s->mb_width; x++)
1132 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1133
1134 s->mb_index2xy[s->mb_height * s->mb_width] =
1135 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1136
1137 if (s->encoding) {
1138 /* Allocate MV tables */
1139 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
1140 mv_table_size * 2 * sizeof(int16_t), fail);
1141 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
1142 mv_table_size * 2 * sizeof(int16_t), fail);
1143 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
1144 mv_table_size * 2 * sizeof(int16_t), fail);
1145 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
1146 mv_table_size * 2 * sizeof(int16_t), fail);
1147 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
1148 mv_table_size * 2 * sizeof(int16_t), fail);
1149 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
1150 mv_table_size * 2 * sizeof(int16_t), fail);
1151 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1152 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
1153 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
1154 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
1155 s->mb_stride + 1;
1156 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
1157 s->mb_stride + 1;
1158 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
1159
1160 /* Allocate MB type table */
1161 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
1162 sizeof(uint16_t), fail); // needed for encoding
1163
1164 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
1165 sizeof(int), fail);
1166
1167 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
1168 mb_array_size * sizeof(float), fail);
1169 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
1170 mb_array_size * sizeof(float), fail);
1171
1172 }
1173
1174 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1175 (s->avctx->flags & CODEC_FLAG_INTERLACED_ME)) {
1176 /* interlaced direct mode decoding tables */
1177 for (i = 0; i < 2; i++) {
1178 int j, k;
1179 for (j = 0; j < 2; j++) {
1180 for (k = 0; k < 2; k++) {
1181 FF_ALLOCZ_OR_GOTO(s->avctx,
1182 s->b_field_mv_table_base[i][j][k],
1183 mv_table_size * 2 * sizeof(int16_t),
1184 fail);
1185 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1186 s->mb_stride + 1;
1187 }
1188 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
1189 mb_array_size * 2 * sizeof(uint8_t), fail);
1190 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
1191 mv_table_size * 2 * sizeof(int16_t), fail);
1192 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
1193 + s->mb_stride + 1;
1194 }
1195 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
1196 mb_array_size * 2 * sizeof(uint8_t), fail);
1197 }
1198 }
1199 if (s->out_format == FMT_H263) {
1200 /* cbp values */
1201 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
1202 s->coded_block = s->coded_block_base + s->b8_stride + 1;
1203
1204 /* cbp, ac_pred, pred_dir */
1205 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
1206 mb_array_size * sizeof(uint8_t), fail);
1207 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
1208 mb_array_size * sizeof(uint8_t), fail);
1209 }
1210
1211 if (s->h263_pred || s->h263_plus || !s->encoding) {
1212 /* dc values */
1213 // MN: we need these for error resilience of intra-frames
1214 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
1215 yc_size * sizeof(int16_t), fail);
1216 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1217 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1218 s->dc_val[2] = s->dc_val[1] + c_size;
1219 for (i = 0; i < yc_size; i++)
1220 s->dc_val_base[i] = 1024;
1221 }
1222
1223 /* which mb is a intra block */
1224 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1225 memset(s->mbintra_table, 1, mb_array_size);
1226
1227 /* init macroblock skip table */
1228 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1229 // Note the + 1 is for a quicker mpeg4 slice_end detection
1230
1231 return init_er(s);
1232 fail:
1233 return AVERROR(ENOMEM);
1234 }
1235
1236 /**
1237 * init common structure for both encoder and decoder.
1238 * this assumes that some variables like width/height are already set
1239 */
1240 av_cold int ff_mpv_common_init(MpegEncContext *s)
1241 {
1242 int i;
1243 int nb_slices = (HAVE_THREADS &&
1244 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1245 s->avctx->thread_count : 1;
1246
1247 if (s->encoding && s->avctx->slices)
1248 nb_slices = s->avctx->slices;
1249
1250 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1251 s->mb_height = (s->height + 31) / 32 * 2;
1252 else
1253 s->mb_height = (s->height + 15) / 16;
1254
1255 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1256 av_log(s->avctx, AV_LOG_ERROR,
1257 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1258 return -1;
1259 }
1260
1261 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1262 int max_slices;
1263 if (s->mb_height)
1264 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1265 else
1266 max_slices = MAX_THREADS;
1267 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1268 " reducing to %d\n", nb_slices, max_slices);
1269 nb_slices = max_slices;
1270 }
1271
1272 if ((s->width || s->height) &&
1273 av_image_check_size(s->width, s->height, 0, s->avctx))
1274 return -1;
1275
1276 dct_init(s);
1277
1278 /* set chroma shifts */
1279 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1280 &s->chroma_x_shift,
1281 &s->chroma_y_shift);
1282
1283 /* convert fourcc to upper case */
1284 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1285
1286 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1287 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1288 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1289 s->picture[i].f = av_frame_alloc();
1290 if (!s->picture[i].f)
1291 goto fail;
1292 }
1293 memset(&s->next_picture, 0, sizeof(s->next_picture));
1294 memset(&s->last_picture, 0, sizeof(s->last_picture));
1295 memset(&s->current_picture, 0, sizeof(s->current_picture));
1296 memset(&s->new_picture, 0, sizeof(s->new_picture));
1297 s->next_picture.f = av_frame_alloc();
1298 if (!s->next_picture.f)
1299 goto fail;
1300 s->last_picture.f = av_frame_alloc();
1301 if (!s->last_picture.f)
1302 goto fail;
1303 s->current_picture.f = av_frame_alloc();
1304 if (!s->current_picture.f)
1305 goto fail;
1306 s->new_picture.f = av_frame_alloc();
1307 if (!s->new_picture.f)
1308 goto fail;
1309
1310 if (s->width && s->height) {
1311 if (init_context_frame(s))
1312 goto fail;
1313
1314 s->parse_context.state = -1;
1315 }
1316
1317 s->context_initialized = 1;
1318 s->thread_context[0] = s;
1319
1320 if (s->width && s->height) {
1321 if (nb_slices > 1) {
1322 for (i = 1; i < nb_slices; i++) {
1323 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1324 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1325 }
1326
1327 for (i = 0; i < nb_slices; i++) {
1328 if (init_duplicate_context(s->thread_context[i]) < 0)
1329 goto fail;
1330 s->thread_context[i]->start_mb_y =
1331 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1332 s->thread_context[i]->end_mb_y =
1333 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1334 }
1335 } else {
1336 if (init_duplicate_context(s) < 0)
1337 goto fail;
1338 s->start_mb_y = 0;
1339 s->end_mb_y = s->mb_height;
1340 }
1341 s->slice_context_count = nb_slices;
1342 }
1343
1344 return 0;
1345 fail:
1346 ff_mpv_common_end(s);
1347 return -1;
1348 }
1349
1350 /**
1351 * Frees and resets MpegEncContext fields depending on the resolution.
1352 * Is used during resolution changes to avoid a full reinitialization of the
1353 * codec.
1354 */
1355 static void free_context_frame(MpegEncContext *s)
1356 {
1357 int i, j, k;
1358
1359 av_freep(&s->mb_type);
1360 av_freep(&s->p_mv_table_base);
1361 av_freep(&s->b_forw_mv_table_base);
1362 av_freep(&s->b_back_mv_table_base);
1363 av_freep(&s->b_bidir_forw_mv_table_base);
1364 av_freep(&s->b_bidir_back_mv_table_base);
1365 av_freep(&s->b_direct_mv_table_base);
1366 s->p_mv_table = NULL;
1367 s->b_forw_mv_table = NULL;
1368 s->b_back_mv_table = NULL;
1369 s->b_bidir_forw_mv_table = NULL;
1370 s->b_bidir_back_mv_table = NULL;
1371 s->b_direct_mv_table = NULL;
1372 for (i = 0; i < 2; i++) {
1373 for (j = 0; j < 2; j++) {
1374 for (k = 0; k < 2; k++) {
1375 av_freep(&s->b_field_mv_table_base[i][j][k]);
1376 s->b_field_mv_table[i][j][k] = NULL;
1377 }
1378 av_freep(&s->b_field_select_table[i][j]);
1379 av_freep(&s->p_field_mv_table_base[i][j]);
1380 s->p_field_mv_table[i][j] = NULL;
1381 }
1382 av_freep(&s->p_field_select_table[i]);
1383 }
1384
1385 av_freep(&s->dc_val_base);
1386 av_freep(&s->coded_block_base);
1387 av_freep(&s->mbintra_table);
1388 av_freep(&s->cbp_table);
1389 av_freep(&s->pred_dir_table);
1390
1391 av_freep(&s->mbskip_table);
1392
1393 av_freep(&s->er.error_status_table);
1394 av_freep(&s->er.er_temp_buffer);
1395 av_freep(&s->mb_index2xy);
1396 av_freep(&s->lambda_table);
1397 av_freep(&s->cplx_tab);
1398 av_freep(&s->bits_tab);
1399
1400 s->linesize = s->uvlinesize = 0;
1401 }
1402
1403 int ff_mpv_common_frame_size_change(MpegEncContext *s)
1404 {
1405 int i, err = 0;
1406
1407 if (s->slice_context_count > 1) {
1408 for (i = 0; i < s->slice_context_count; i++) {
1409 free_duplicate_context(s->thread_context[i]);
1410 }
1411 for (i = 1; i < s->slice_context_count; i++) {
1412 av_freep(&s->thread_context[i]);
1413 }
1414 } else
1415 free_duplicate_context(s);
1416
1417 free_context_frame(s);
1418
1419 if (s->picture)
1420 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1421 s->picture[i].needs_realloc = 1;
1422 }
1423
1424 s->last_picture_ptr =
1425 s->next_picture_ptr =
1426 s->current_picture_ptr = NULL;
1427
1428 // init
1429 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1430 s->mb_height = (s->height + 31) / 32 * 2;
1431 else
1432 s->mb_height = (s->height + 15) / 16;
1433
1434 if ((s->width || s->height) &&
1435 (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
1436 goto fail;
1437
1438 if ((err = init_context_frame(s)))
1439 goto fail;
1440
1441 s->thread_context[0] = s;
1442
1443 if (s->width && s->height) {
1444 int nb_slices = s->slice_context_count;
1445 if (nb_slices > 1) {
1446 for (i = 1; i < nb_slices; i++) {
1447 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1448 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1449 }
1450
1451 for (i = 0; i < nb_slices; i++) {
1452 if ((err = init_duplicate_context(s->thread_context[i])) < 0)
1453 goto fail;
1454 s->thread_context[i]->start_mb_y =
1455 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1456 s->thread_context[i]->end_mb_y =
1457 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1458 }
1459 } else {
1460 if (init_duplicate_context(s) < 0)
1461 goto fail;
1462 s->start_mb_y = 0;
1463 s->end_mb_y = s->mb_height;
1464 }
1465 s->slice_context_count = nb_slices;
1466 }
1467
1468 return 0;
1469 fail:
1470 ff_mpv_common_end(s);
1471 return err;
1472 }
1473
1474 /* init common structure for both encoder and decoder */
1475 void ff_mpv_common_end(MpegEncContext *s)
1476 {
1477 int i;
1478
1479 if (s->slice_context_count > 1) {
1480 for (i = 0; i < s->slice_context_count; i++) {
1481 free_duplicate_context(s->thread_context[i]);
1482 }
1483 for (i = 1; i < s->slice_context_count; i++) {
1484 av_freep(&s->thread_context[i]);
1485 }
1486 s->slice_context_count = 1;
1487 } else free_duplicate_context(s);
1488
1489 av_freep(&s->parse_context.buffer);
1490 s->parse_context.buffer_size = 0;
1491
1492 av_freep(&s->bitstream_buffer);
1493 s->allocated_bitstream_buffer_size = 0;
1494
1495 if (s->picture) {
1496 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1497 ff_free_picture_tables(&s->picture[i]);
1498 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1499 av_frame_free(&s->picture[i].f);
1500 }
1501 }
1502 av_freep(&s->picture);
1503 ff_free_picture_tables(&s->last_picture);
1504 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1505 av_frame_free(&s->last_picture.f);
1506 ff_free_picture_tables(&s->current_picture);
1507 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1508 av_frame_free(&s->current_picture.f);
1509 ff_free_picture_tables(&s->next_picture);
1510 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1511 av_frame_free(&s->next_picture.f);
1512 ff_free_picture_tables(&s->new_picture);
1513 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1514 av_frame_free(&s->new_picture.f);
1515
1516 free_context_frame(s);
1517
1518 s->context_initialized = 0;
1519 s->last_picture_ptr =
1520 s->next_picture_ptr =
1521 s->current_picture_ptr = NULL;
1522 s->linesize = s->uvlinesize = 0;
1523 }
1524
1525 static void release_unused_pictures(AVCodecContext *avctx, Picture *picture)
1526 {
1527 int i;
1528
1529 /* release non reference frames */
1530 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1531 if (!picture[i].reference)
1532 ff_mpeg_unref_picture(avctx, &picture[i]);
1533 }
1534 }
1535
1536 static inline int pic_is_unused(Picture *pic)
1537 {
1538 if (!pic->f->buf[0])
1539 return 1;
1540 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1541 return 1;
1542 return 0;
1543 }
1544
1545 static int find_unused_picture(Picture *picture, int shared)
1546 {
1547 int i;
1548
1549 if (shared) {
1550 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1551 if (!picture[i].f->buf[0])
1552 return i;
1553 }
1554 } else {
1555 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1556 if (pic_is_unused(&picture[i]))
1557 return i;
1558 }
1559 }
1560
1561 return AVERROR_INVALIDDATA;
1562 }
1563
1564 int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
1565 {
1566 int ret = find_unused_picture(picture, shared);
1567
1568 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1569 if (picture[ret].needs_realloc) {
1570 picture[ret].needs_realloc = 0;
1571 ff_free_picture_tables(&picture[ret]);
1572 ff_mpeg_unref_picture(avctx, &picture[ret]);
1573 }
1574 }
1575 return ret;
1576 }
1577
1578 /**
1579 * generic function called after decoding
1580 * the header and before a frame is decoded.
1581 */
1582 int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1583 {
1584 int i, ret;
1585 Picture *pic;
1586 s->mb_skipped = 0;
1587
1588 /* mark & release old frames */
1589 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1590 s->last_picture_ptr != s->next_picture_ptr &&
1591 s->last_picture_ptr->f->buf[0]) {
1592 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1593 }
1594
1595 /* release forgotten pictures */
1596 /* if (mpeg124/h263) */
1597 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1598 if (&s->picture[i] != s->last_picture_ptr &&
1599 &s->picture[i] != s->next_picture_ptr &&
1600 s->picture[i].reference && !s->picture[i].needs_realloc) {
1601 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1602 av_log(avctx, AV_LOG_ERROR,
1603 "releasing zombie picture\n");
1604 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1605 }
1606 }
1607
1608 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1609
1610 release_unused_pictures(s->avctx, s->picture);
1611
1612 if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
1613 // we already have a unused image
1614 // (maybe it was set before reading the header)
1615 pic = s->current_picture_ptr;
1616 } else {
1617 i = ff_find_unused_picture(s->avctx, s->picture, 0);
1618 if (i < 0) {
1619 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1620 return i;
1621 }
1622 pic = &s->picture[i];
1623 }
1624
1625 pic->reference = 0;
1626 if (!s->droppable) {
1627 if (s->pict_type != AV_PICTURE_TYPE_B)
1628 pic->reference = 3;
1629 }
1630
1631 pic->f->coded_picture_number = s->coded_picture_number++;
1632
1633 if (ff_alloc_picture(s, pic, 0) < 0)
1634 return -1;
1635
1636 s->current_picture_ptr = pic;
1637 // FIXME use only the vars from current_pic
1638 s->current_picture_ptr->f->top_field_first = s->top_field_first;
1639 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1640 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1641 if (s->picture_structure != PICT_FRAME)
1642 s->current_picture_ptr->f->top_field_first =
1643 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1644 }
1645 s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1646 !s->progressive_sequence;
1647 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1648
1649 s->current_picture_ptr->f->pict_type = s->pict_type;
1650 // if (s->avctx->flags && CODEC_FLAG_QSCALE)
1651 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1652 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1653
1654 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1655 s->current_picture_ptr)) < 0)
1656 return ret;
1657
1658 if (s->pict_type != AV_PICTURE_TYPE_B) {
1659 s->last_picture_ptr = s->next_picture_ptr;
1660 if (!s->droppable)
1661 s->next_picture_ptr = s->current_picture_ptr;
1662 }
1663 ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1664 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1665 s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1666 s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1667 s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1668 s->pict_type, s->droppable);
1669
1670 if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
1671 (s->pict_type != AV_PICTURE_TYPE_I ||
1672 s->picture_structure != PICT_FRAME)) {
1673 int h_chroma_shift, v_chroma_shift;
1674 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1675 &h_chroma_shift, &v_chroma_shift);
1676 if (s->pict_type != AV_PICTURE_TYPE_I)
1677 av_log(avctx, AV_LOG_ERROR,
1678 "warning: first frame is no keyframe\n");
1679 else if (s->picture_structure != PICT_FRAME)
1680 av_log(avctx, AV_LOG_INFO,
1681 "allocate dummy last picture for field based first keyframe\n");
1682
1683 /* Allocate a dummy frame */
1684 i = ff_find_unused_picture(s->avctx, s->picture, 0);
1685 if (i < 0) {
1686 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1687 return i;
1688 }
1689 s->last_picture_ptr = &s->picture[i];
1690
1691 s->last_picture_ptr->reference = 3;
1692 s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
1693
1694 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1695 s->last_picture_ptr = NULL;
1696 return -1;
1697 }
1698
1699 memset(s->last_picture_ptr->f->data[0], 0,
1700 avctx->height * s->last_picture_ptr->f->linesize[0]);
1701 memset(s->last_picture_ptr->f->data[1], 0x80,
1702 (avctx->height >> v_chroma_shift) *
1703 s->last_picture_ptr->f->linesize[1]);
1704 memset(s->last_picture_ptr->f->data[2], 0x80,
1705 (avctx->height >> v_chroma_shift) *
1706 s->last_picture_ptr->f->linesize[2]);
1707
1708 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1709 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1710 }
1711 if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
1712 s->pict_type == AV_PICTURE_TYPE_B) {
1713 /* Allocate a dummy frame */
1714 i = ff_find_unused_picture(s->avctx, s->picture, 0);
1715 if (i < 0) {
1716 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1717 return i;
1718 }
1719 s->next_picture_ptr = &s->picture[i];
1720
1721 s->next_picture_ptr->reference = 3;
1722 s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
1723
1724 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1725 s->next_picture_ptr = NULL;
1726 return -1;
1727 }
1728 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1729 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1730 }
1731
1732 if (s->last_picture_ptr) {
1733 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1734 if (s->last_picture_ptr->f->buf[0] &&
1735 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1736 s->last_picture_ptr)) < 0)
1737 return ret;
1738 }
1739 if (s->next_picture_ptr) {
1740 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1741 if (s->next_picture_ptr->f->buf[0] &&
1742 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1743 s->next_picture_ptr)) < 0)
1744 return ret;
1745 }
1746
1747 if (s->pict_type != AV_PICTURE_TYPE_I &&
1748 !(s->last_picture_ptr && s->last_picture_ptr->f->buf[0])) {
1749 av_log(s, AV_LOG_ERROR,
1750 "Non-reference picture received and no reference available\n");
1751 return AVERROR_INVALIDDATA;
1752 }
1753
1754 if (s->picture_structure!= PICT_FRAME) {
1755 int i;
1756 for (i = 0; i < 4; i++) {
1757 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1758 s->current_picture.f->data[i] +=
1759 s->current_picture.f->linesize[i];
1760 }
1761 s->current_picture.f->linesize[i] *= 2;
1762 s->last_picture.f->linesize[i] *= 2;
1763 s->next_picture.f->linesize[i] *= 2;
1764 }
1765 }
1766
1767 /* set dequantizer, we can't do it during init as
1768 * it might change for mpeg4 and we can't do it in the header
1769 * decode as init is not called for mpeg4 there yet */
1770 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1771 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1772 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1773 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1774 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1775 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1776 } else {
1777 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1778 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1779 }
1780
1781 #if FF_API_XVMC
1782 FF_DISABLE_DEPRECATION_WARNINGS
1783 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1784 return ff_xvmc_field_start(s, avctx);
1785 FF_ENABLE_DEPRECATION_WARNINGS
1786 #endif /* FF_API_XVMC */
1787
1788 return 0;
1789 }
1790
1791 /* called after a frame has been decoded. */
1792 void ff_mpv_frame_end(MpegEncContext *s)
1793 {
1794 #if FF_API_XVMC
1795 FF_DISABLE_DEPRECATION_WARNINGS
1796 /* redraw edges for the frame if decoding didn't complete */
1797 // just to make sure that all data is rendered.
1798 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1799 ff_xvmc_field_end(s);
1800 } else
1801 FF_ENABLE_DEPRECATION_WARNINGS
1802 #endif /* FF_API_XVMC */
1803
1804 emms_c();
1805
1806 if (s->current_picture.reference)
1807 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1808 }
1809
1810 /**
1811 * Print debugging info for the given picture.
1812 */
1813 void ff_print_debug_info(MpegEncContext *s, Picture *p)
1814 {
1815 AVFrame *pict;
1816 if (s->avctx->hwaccel || !p || !p->mb_type)
1817 return;
1818 pict = p->f;
1819
1820 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1821 int x,y;
1822
1823 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1824 switch (pict->pict_type) {
1825 case AV_PICTURE_TYPE_I:
1826 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1827 break;
1828 case AV_PICTURE_TYPE_P:
1829 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1830 break;
1831 case AV_PICTURE_TYPE_B:
1832 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1833 break;
1834 case AV_PICTURE_TYPE_S:
1835 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1836 break;
1837 case AV_PICTURE_TYPE_SI:
1838 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1839 break;
1840 case AV_PICTURE_TYPE_SP:
1841 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1842 break;
1843 }
1844 for (y = 0; y < s->mb_height; y++) {
1845 for (x = 0; x < s->mb_width; x++) {
1846 if (s->avctx->debug & FF_DEBUG_SKIP) {
1847 int count = s->mbskip_table[x + y * s->mb_stride];
1848 if (count > 9)
1849 count = 9;
1850 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1851 }
1852 if (s->avctx->debug & FF_DEBUG_QP) {
1853 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1854 p->qscale_table[x + y * s->mb_stride]);
1855 }
1856 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1857 int mb_type = p->mb_type[x + y * s->mb_stride];
1858 // Type & MV direction
1859 if (IS_PCM(mb_type))
1860 av_log(s->avctx, AV_LOG_DEBUG, "P");
1861 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1862 av_log(s->avctx, AV_LOG_DEBUG, "A");
1863 else if (IS_INTRA4x4(mb_type))
1864 av_log(s->avctx, AV_LOG_DEBUG, "i");
1865 else if (IS_INTRA16x16(mb_type))
1866 av_log(s->avctx, AV_LOG_DEBUG, "I");
1867 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1868 av_log(s->avctx, AV_LOG_DEBUG, "d");
1869 else if (IS_DIRECT(mb_type))
1870 av_log(s->avctx, AV_LOG_DEBUG, "D");
1871 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1872 av_log(s->avctx, AV_LOG_DEBUG, "g");
1873 else if (IS_GMC(mb_type))
1874 av_log(s->avctx, AV_LOG_DEBUG, "G");
1875 else if (IS_SKIP(mb_type))
1876 av_log(s->avctx, AV_LOG_DEBUG, "S");
1877 else if (!USES_LIST(mb_type, 1))
1878 av_log(s->avctx, AV_LOG_DEBUG, ">");
1879 else if (!USES_LIST(mb_type, 0))
1880 av_log(s->avctx, AV_LOG_DEBUG, "<");
1881 else {
1882 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1883 av_log(s->avctx, AV_LOG_DEBUG, "X");
1884 }
1885
1886 // segmentation
1887 if (IS_8X8(mb_type))
1888 av_log(s->avctx, AV_LOG_DEBUG, "+");
1889 else if (IS_16X8(mb_type))
1890 av_log(s->avctx, AV_LOG_DEBUG, "-");
1891 else if (IS_8X16(mb_type))
1892 av_log(s->avctx, AV_LOG_DEBUG, "|");
1893 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1894 av_log(s->avctx, AV_LOG_DEBUG, " ");
1895 else
1896 av_log(s->avctx, AV_LOG_DEBUG, "?");
1897
1898
1899 if (IS_INTERLACED(mb_type))
1900 av_log(s->avctx, AV_LOG_DEBUG, "=");
1901 else
1902 av_log(s->avctx, AV_LOG_DEBUG, " ");
1903 }
1904 }
1905 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1906 }
1907 }
1908 }
1909
1910 /**
1911 * find the lowest MB row referenced in the MVs
1912 */
1913 int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir)
1914 {
1915 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1916 int my, off, i, mvs;
1917
1918 if (s->picture_structure != PICT_FRAME || s->mcsel)
1919 goto unhandled;
1920
1921 switch (s->mv_type) {
1922 case MV_TYPE_16X16:
1923 mvs = 1;
1924 break;
1925 case MV_TYPE_16X8:
1926 mvs = 2;
1927 break;
1928 case MV_TYPE_8X8:
1929 mvs = 4;
1930 break;
1931 default:
1932 goto unhandled;
1933 }
1934
1935 for (i = 0; i < mvs; i++) {
1936 my = s->mv[dir][i][1]<<qpel_shift;
1937 my_max = FFMAX(my_max, my);
1938 my_min = FFMIN(my_min, my);
1939 }
1940
1941 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1942
1943 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1944 unhandled:
1945 return s->mb_height-1;
1946 }
1947
1948 /* put block[] to dest[] */
1949 static inline void put_dct(MpegEncContext *s,
1950 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1951 {
1952 s->dct_unquantize_intra(s, block, i, qscale);
1953 s->idsp.idct_put(dest, line_size, block);
1954 }
1955
1956 /* add block[] to dest[] */
1957 static inline void add_dct(MpegEncContext *s,
1958 int16_t *block, int i, uint8_t *dest, int line_size)
1959 {
1960 if (s->block_last_index[i] >= 0) {
1961 s->idsp.idct_add(dest, line_size, block);
1962 }
1963 }
1964
1965 static inline void add_dequant_dct(MpegEncContext *s,
1966 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1967 {
1968 if (s->block_last_index[i] >= 0) {
1969 s->dct_unquantize_inter(s, block, i, qscale);
1970
1971 s->idsp.idct_add(dest, line_size, block);
1972 }
1973 }
1974
1975 /**
1976 * Clean dc, ac, coded_block for the current non-intra MB.
1977 */
1978 void ff_clean_intra_table_entries(MpegEncContext *s)
1979 {
1980 int wrap = s->b8_stride;
1981 int xy = s->block_index[0];
1982
1983 s->dc_val[0][xy ] =
1984 s->dc_val[0][xy + 1 ] =
1985 s->dc_val[0][xy + wrap] =
1986 s->dc_val[0][xy + 1 + wrap] = 1024;
1987 /* ac pred */
1988 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1989 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1990 if (s->msmpeg4_version>=3) {
1991 s->coded_block[xy ] =
1992 s->coded_block[xy + 1 ] =
1993 s->coded_block[xy + wrap] =
1994 s->coded_block[xy + 1 + wrap] = 0;
1995 }
1996 /* chroma */
1997 wrap = s->mb_stride;
1998 xy = s->mb_x + s->mb_y * wrap;
1999 s->dc_val[1][xy] =
2000 s->dc_val[2][xy] = 1024;
2001 /* ac pred */
2002 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2003 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2004
2005 s->mbintra_table[xy]= 0;
2006 }
2007
2008 /* generic function called after a macroblock has been parsed by the
2009 decoder or after it has been encoded by the encoder.
2010
2011 Important variables used:
2012 s->mb_intra : true if intra macroblock
2013 s->mv_dir : motion vector direction
2014 s->mv_type : motion vector type
2015 s->mv : motion vector
2016 s->interlaced_dct : true if interlaced dct used (mpeg2)
2017 */
2018 static av_always_inline
2019 void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2020 int is_mpeg12)
2021 {
2022 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2023
2024 #if FF_API_XVMC
2025 FF_DISABLE_DEPRECATION_WARNINGS
2026 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2027 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2028 return;
2029 }
2030 FF_ENABLE_DEPRECATION_WARNINGS
2031 #endif /* FF_API_XVMC */
2032
2033 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2034 /* print DCT coefficients */
2035 int i,j;
2036 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2037 for(i=0; i<6; i++){
2038 for(j=0; j<64; j++){
2039 av_log(s->avctx, AV_LOG_DEBUG, "%5d",
2040 block[i][s->idsp.idct_permutation[j]]);
2041 }
2042 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2043 }
2044 }
2045
2046 s->current_picture.qscale_table[mb_xy] = s->qscale;
2047
2048 /* update DC predictors for P macroblocks */
2049 if (!s->mb_intra) {
2050 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2051 if(s->mbintra_table[mb_xy])
2052 ff_clean_intra_table_entries(s);
2053 } else {
2054 s->last_dc[0] =
2055 s->last_dc[1] =
2056 s->last_dc[2] = 128 << s->intra_dc_precision;
2057 }
2058 }
2059 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2060 s->mbintra_table[mb_xy]=1;
2061
2062 if ((s->avctx->flags & CODEC_FLAG_PSNR) ||
2063 !(s->encoding && (s->intra_only || s->pict_type == AV_PICTURE_TYPE_B) &&
2064 s->avctx->mb_decision != FF_MB_DECISION_RD)) { // FIXME precalc
2065 uint8_t *dest_y, *dest_cb, *dest_cr;
2066 int dct_linesize, dct_offset;
2067 op_pixels_func (*op_pix)[4];
2068 qpel_mc_func (*op_qpix)[16];
2069 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2070 const int uvlinesize = s->current_picture.f->linesize[1];
2071 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2072 const int block_size = 8;
2073
2074 /* avoid copy if macroblock skipped in last frame too */
2075 /* skip only during decoding as we might trash the buffers during encoding a bit */
2076 if(!s->encoding){
2077 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2078
2079 if (s->mb_skipped) {
2080 s->mb_skipped= 0;
2081 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2082 *mbskip_ptr = 1;
2083 } else if(!s->current_picture.reference) {
2084 *mbskip_ptr = 1;
2085 } else{
2086 *mbskip_ptr = 0; /* not skipped */
2087 }
2088 }
2089
2090 dct_linesize = linesize << s->interlaced_dct;
2091 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2092
2093 if(readable){
2094 dest_y= s->dest[0];
2095 dest_cb= s->dest[1];
2096 dest_cr= s->dest[2];
2097 }else{
2098 dest_y = s->b_scratchpad;
2099 dest_cb= s->b_scratchpad+16*linesize;
2100 dest_cr= s->b_scratchpad+32*linesize;
2101 }
2102
2103 if (!s->mb_intra) {
2104 /* motion handling */
2105 /* decoding or more than one mb_type (MC was already done otherwise) */
2106 if(!s->encoding){
2107
2108 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2109 if (s->mv_dir & MV_DIR_FORWARD) {
2110 ff_thread_await_progress(&s->last_picture_ptr->tf,
2111 ff_mpv_lowest_referenced_row(s, 0),
2112 0);
2113 }
2114 if (s->mv_dir & MV_DIR_BACKWARD) {
2115 ff_thread_await_progress(&s->next_picture_ptr->tf,
2116 ff_mpv_lowest_referenced_row(s, 1),
2117 0);
2118 }
2119 }
2120
2121 op_qpix= s->me.qpel_put;
2122 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2123 op_pix = s->hdsp.put_pixels_tab;
2124 }else{
2125 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2126 }
2127 if (s->mv_dir & MV_DIR_FORWARD) {
2128 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
2129 op_pix = s->hdsp.avg_pixels_tab;
2130 op_qpix= s->me.qpel_avg;
2131 }
2132 if (s->mv_dir & MV_DIR_BACKWARD) {
2133 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
2134 }
2135 }
2136
2137 /* skip dequant / idct if we are really late ;) */
2138 if(s->avctx->skip_idct){
2139 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2140 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2141 || s->avctx->skip_idct >= AVDISCARD_ALL)
2142 goto skip_idct;
2143 }
2144
2145 /* add dct residue */
2146 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2147 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2148 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2149 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2150 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2151 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2152
2153 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
2154 if (s->chroma_y_shift){
2155 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2156 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2157 }else{
2158 dct_linesize >>= 1;
2159 dct_offset >>=1;
2160 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2161 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2162 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2163 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2164 }
2165 }
2166 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2167 add_dct(s, block[0], 0, dest_y , dct_linesize);
2168 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2169 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2170 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2171
2172 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
2173 if(s->chroma_y_shift){//Chroma420
2174 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2175 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2176 }else{
2177 //chroma422
2178 dct_linesize = uvlinesize << s->interlaced_dct;
2179 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2180
2181 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2182 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2183 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2184 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2185 if(!s->chroma_x_shift){//Chroma444
2186 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2187 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2188 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2189 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2190 }
2191 }
2192 }//fi gray
2193 }
2194 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2195 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2196 }
2197 } else {
2198 /* dct only in intra block */
2199 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2200 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2201 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2202 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2203 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2204
2205 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
2206 if(s->chroma_y_shift){
2207 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2208 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2209 }else{
2210 dct_offset >>=1;
2211 dct_linesize >>=1;
2212 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2213 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2214 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2215 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2216 }
2217 }
2218 }else{
2219 s->idsp.idct_put(dest_y, dct_linesize, block[0]);
2220 s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2221 s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
2222 s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2223
2224 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
2225 if(s->chroma_y_shift){
2226 s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
2227 s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
2228 }else{
2229
2230 dct_linesize = uvlinesize << s->interlaced_dct;
2231 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2232
2233 s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
2234 s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
2235 s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2236 s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2237 if(!s->chroma_x_shift){//Chroma444
2238 s->idsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2239 s->idsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2240 s->idsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2241 s->idsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2242 }
2243 }
2244 }//gray
2245 }
2246 }
2247 skip_idct:
2248 if(!readable){
2249 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2250 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2251 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2252 }
2253 }
2254 }
2255
2256 void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
2257 {
2258 #if !CONFIG_SMALL
2259 if(s->out_format == FMT_MPEG1) {
2260 mpv_decode_mb_internal(s, block, 1);
2261 } else
2262 #endif
2263 mpv_decode_mb_internal(s, block, 0);
2264 }
2265
2266 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2267 {
2268 ff_draw_horiz_band(s->avctx, s->current_picture.f,
2269 s->last_picture.f, y, h, s->picture_structure,
2270 s->first_field, s->low_delay);
2271 }
2272
2273 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2274 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2275 const int uvlinesize = s->current_picture.f->linesize[1];
2276 const int mb_size= 4;
2277
2278 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2279 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2280 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2281 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2282 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2283 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2284 //block_index is not used by mpeg2, so it is not affected by chroma_format
2285
2286 s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) << mb_size);
2287 s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2288 s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2289
2290 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2291 {
2292 if(s->picture_structure==PICT_FRAME){
2293 s->dest[0] += s->mb_y * linesize << mb_size;
2294 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2295 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2296 }else{
2297 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2298 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2299 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2300 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2301 }
2302 }
2303 }
2304
2305 /**
2306 * Permute an 8x8 block.
2307 * @param block the block which will be permuted according to the given permutation vector
2308 * @param permutation the permutation vector
2309 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2310 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2311 * (inverse) permutated to scantable order!
2312 */
2313 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2314 {
2315 int i;
2316 int16_t temp[64];
2317
2318 if(last<=0) return;
2319 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2320
2321 for(i=0; i<=last; i++){
2322 const int j= scantable[i];
2323 temp[j]= block[j];
2324 block[j]=0;
2325 }
2326
2327 for(i=0; i<=last; i++){
2328 const int j= scantable[i];
2329 const int perm_j= permutation[j];
2330 block[perm_j]= temp[j];
2331 }
2332 }
2333
2334 void ff_mpeg_flush(AVCodecContext *avctx){
2335 int i;
2336 MpegEncContext *s = avctx->priv_data;
2337
2338 if (!s || !s->picture)
2339 return;
2340
2341 for (i = 0; i < MAX_PICTURE_COUNT; i++)
2342 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2343 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2344
2345 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
2346 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
2347 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
2348
2349 s->mb_x= s->mb_y= 0;
2350
2351 s->parse_context.state= -1;
2352 s->parse_context.frame_start_found= 0;
2353 s->parse_context.overread= 0;
2354 s->parse_context.overread_index= 0;
2355 s->parse_context.index= 0;
2356 s->parse_context.last_index= 0;
2357 s->bitstream_buffer_size=0;
2358 s->pp_time=0;
2359 }
2360
2361 /**
2362 * set qscale and update qscale dependent variables.
2363 */
2364 void ff_set_qscale(MpegEncContext * s, int qscale)
2365 {
2366 if (qscale < 1)
2367 qscale = 1;
2368 else if (qscale > 31)
2369 qscale = 31;
2370
2371 s->qscale = qscale;
2372 s->chroma_qscale= s->chroma_qscale_table[qscale];
2373
2374 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2375 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2376 }
2377
2378 void ff_mpv_report_decode_progress(MpegEncContext *s)
2379 {
2380 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
2381 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
2382 }