mpegvideo: move ff_draw_horiz_band() to mpegutils.c
[libav.git] / libavcodec / mpegvideo.c
1 /*
2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7 *
8 * This file is part of Libav.
9 *
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /**
26 * @file
27 * The simplest mpeg encoder (well, it was the simplest!).
28 */
29
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/timer.h"
35 #include "avcodec.h"
36 #include "dsputil.h"
37 #include "internal.h"
38 #include "mathops.h"
39 #include "mpegutils.h"
40 #include "mpegvideo.h"
41 #include "mjpegenc.h"
42 #include "msmpeg4.h"
43 #include "xvmc_internal.h"
44 #include "thread.h"
45 #include <limits.h>
46
47 static const uint8_t ff_default_chroma_qscale_table[32] = {
48 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
49 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
50 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
51 };
52
53 const uint8_t ff_mpeg1_dc_scale_table[128] = {
54 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
55 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
56 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
57 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
58 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63 };
64
65 static const uint8_t mpeg2_dc_scale_table1[128] = {
66 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
67 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
68 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
69 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
70 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75 };
76
77 static const uint8_t mpeg2_dc_scale_table2[128] = {
78 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
79 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
80 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
81 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87 };
88
89 static const uint8_t mpeg2_dc_scale_table3[128] = {
90 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
91 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
92 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
93 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
94 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 };
100
101 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
102 ff_mpeg1_dc_scale_table,
103 mpeg2_dc_scale_table1,
104 mpeg2_dc_scale_table2,
105 mpeg2_dc_scale_table3,
106 };
107
108 const enum AVPixelFormat ff_pixfmt_list_420[] = {
109 AV_PIX_FMT_YUV420P,
110 AV_PIX_FMT_NONE
111 };
112
113 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
114 int16_t *block, int n, int qscale)
115 {
116 int i, level, nCoeffs;
117 const uint16_t *quant_matrix;
118
119 nCoeffs= s->block_last_index[n];
120
121 if (n < 4)
122 block[0] = block[0] * s->y_dc_scale;
123 else
124 block[0] = block[0] * s->c_dc_scale;
125 /* XXX: only mpeg1 */
126 quant_matrix = s->intra_matrix;
127 for(i=1;i<=nCoeffs;i++) {
128 int j= s->intra_scantable.permutated[i];
129 level = block[j];
130 if (level) {
131 if (level < 0) {
132 level = -level;
133 level = (int)(level * qscale * quant_matrix[j]) >> 3;
134 level = (level - 1) | 1;
135 level = -level;
136 } else {
137 level = (int)(level * qscale * quant_matrix[j]) >> 3;
138 level = (level - 1) | 1;
139 }
140 block[j] = level;
141 }
142 }
143 }
144
145 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
146 int16_t *block, int n, int qscale)
147 {
148 int i, level, nCoeffs;
149 const uint16_t *quant_matrix;
150
151 nCoeffs= s->block_last_index[n];
152
153 quant_matrix = s->inter_matrix;
154 for(i=0; i<=nCoeffs; i++) {
155 int j= s->intra_scantable.permutated[i];
156 level = block[j];
157 if (level) {
158 if (level < 0) {
159 level = -level;
160 level = (((level << 1) + 1) * qscale *
161 ((int) (quant_matrix[j]))) >> 4;
162 level = (level - 1) | 1;
163 level = -level;
164 } else {
165 level = (((level << 1) + 1) * qscale *
166 ((int) (quant_matrix[j]))) >> 4;
167 level = (level - 1) | 1;
168 }
169 block[j] = level;
170 }
171 }
172 }
173
174 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
175 int16_t *block, int n, int qscale)
176 {
177 int i, level, nCoeffs;
178 const uint16_t *quant_matrix;
179
180 if(s->alternate_scan) nCoeffs= 63;
181 else nCoeffs= s->block_last_index[n];
182
183 if (n < 4)
184 block[0] = block[0] * s->y_dc_scale;
185 else
186 block[0] = block[0] * s->c_dc_scale;
187 quant_matrix = s->intra_matrix;
188 for(i=1;i<=nCoeffs;i++) {
189 int j= s->intra_scantable.permutated[i];
190 level = block[j];
191 if (level) {
192 if (level < 0) {
193 level = -level;
194 level = (int)(level * qscale * quant_matrix[j]) >> 3;
195 level = -level;
196 } else {
197 level = (int)(level * qscale * quant_matrix[j]) >> 3;
198 }
199 block[j] = level;
200 }
201 }
202 }
203
204 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
205 int16_t *block, int n, int qscale)
206 {
207 int i, level, nCoeffs;
208 const uint16_t *quant_matrix;
209 int sum=-1;
210
211 if(s->alternate_scan) nCoeffs= 63;
212 else nCoeffs= s->block_last_index[n];
213
214 if (n < 4)
215 block[0] = block[0] * s->y_dc_scale;
216 else
217 block[0] = block[0] * s->c_dc_scale;
218 quant_matrix = s->intra_matrix;
219 for(i=1;i<=nCoeffs;i++) {
220 int j= s->intra_scantable.permutated[i];
221 level = block[j];
222 if (level) {
223 if (level < 0) {
224 level = -level;
225 level = (int)(level * qscale * quant_matrix[j]) >> 3;
226 level = -level;
227 } else {
228 level = (int)(level * qscale * quant_matrix[j]) >> 3;
229 }
230 block[j] = level;
231 sum+=level;
232 }
233 }
234 block[63]^=sum&1;
235 }
236
237 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
238 int16_t *block, int n, int qscale)
239 {
240 int i, level, nCoeffs;
241 const uint16_t *quant_matrix;
242 int sum=-1;
243
244 if(s->alternate_scan) nCoeffs= 63;
245 else nCoeffs= s->block_last_index[n];
246
247 quant_matrix = s->inter_matrix;
248 for(i=0; i<=nCoeffs; i++) {
249 int j= s->intra_scantable.permutated[i];
250 level = block[j];
251 if (level) {
252 if (level < 0) {
253 level = -level;
254 level = (((level << 1) + 1) * qscale *
255 ((int) (quant_matrix[j]))) >> 4;
256 level = -level;
257 } else {
258 level = (((level << 1) + 1) * qscale *
259 ((int) (quant_matrix[j]))) >> 4;
260 }
261 block[j] = level;
262 sum+=level;
263 }
264 }
265 block[63]^=sum&1;
266 }
267
268 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
269 int16_t *block, int n, int qscale)
270 {
271 int i, level, qmul, qadd;
272 int nCoeffs;
273
274 assert(s->block_last_index[n]>=0);
275
276 qmul = qscale << 1;
277
278 if (!s->h263_aic) {
279 if (n < 4)
280 block[0] = block[0] * s->y_dc_scale;
281 else
282 block[0] = block[0] * s->c_dc_scale;
283 qadd = (qscale - 1) | 1;
284 }else{
285 qadd = 0;
286 }
287 if(s->ac_pred)
288 nCoeffs=63;
289 else
290 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
291
292 for(i=1; i<=nCoeffs; i++) {
293 level = block[i];
294 if (level) {
295 if (level < 0) {
296 level = level * qmul - qadd;
297 } else {
298 level = level * qmul + qadd;
299 }
300 block[i] = level;
301 }
302 }
303 }
304
305 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
306 int16_t *block, int n, int qscale)
307 {
308 int i, level, qmul, qadd;
309 int nCoeffs;
310
311 assert(s->block_last_index[n]>=0);
312
313 qadd = (qscale - 1) | 1;
314 qmul = qscale << 1;
315
316 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
317
318 for(i=0; i<=nCoeffs; i++) {
319 level = block[i];
320 if (level) {
321 if (level < 0) {
322 level = level * qmul - qadd;
323 } else {
324 level = level * qmul + qadd;
325 }
326 block[i] = level;
327 }
328 }
329 }
330
331 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
332 int (*mv)[2][4][2],
333 int mb_x, int mb_y, int mb_intra, int mb_skipped)
334 {
335 MpegEncContext *s = opaque;
336
337 s->mv_dir = mv_dir;
338 s->mv_type = mv_type;
339 s->mb_intra = mb_intra;
340 s->mb_skipped = mb_skipped;
341 s->mb_x = mb_x;
342 s->mb_y = mb_y;
343 memcpy(s->mv, mv, sizeof(*mv));
344
345 ff_init_block_index(s);
346 ff_update_block_index(s);
347
348 s->dsp.clear_blocks(s->block[0]);
349
350 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
351 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
352 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
353
354 assert(ref == 0);
355 ff_MPV_decode_mb(s, s->block);
356 }
357
358 /* init common dct for both encoder and decoder */
359 av_cold int ff_dct_common_init(MpegEncContext *s)
360 {
361 ff_dsputil_init(&s->dsp, s->avctx);
362 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
363 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
364
365 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
366 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
367 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
368 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
369 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
370 if (s->flags & CODEC_FLAG_BITEXACT)
371 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
372 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
373
374 if (ARCH_ARM)
375 ff_MPV_common_init_arm(s);
376 if (ARCH_PPC)
377 ff_MPV_common_init_ppc(s);
378 if (ARCH_X86)
379 ff_MPV_common_init_x86(s);
380
381 /* load & permutate scantables
382 * note: only wmv uses different ones
383 */
384 if (s->alternate_scan) {
385 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
386 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
387 } else {
388 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
389 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
390 }
391 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
392 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
393
394 return 0;
395 }
396
397 static int frame_size_alloc(MpegEncContext *s, int linesize)
398 {
399 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
400
401 // edge emu needs blocksize + filter length - 1
402 // (= 17x17 for halfpel / 21x21 for h264)
403 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
404 // at uvlinesize. It supports only YUV420 so 24x24 is enough
405 // linesize * interlaced * MBsize
406 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
407 fail);
408
409 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 3,
410 fail)
411 s->me.temp = s->me.scratchpad;
412 s->rd_scratchpad = s->me.scratchpad;
413 s->b_scratchpad = s->me.scratchpad;
414 s->obmc_scratchpad = s->me.scratchpad + 16;
415
416 return 0;
417 fail:
418 av_freep(&s->edge_emu_buffer);
419 return AVERROR(ENOMEM);
420 }
421
422 /**
423 * Allocate a frame buffer
424 */
425 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
426 {
427 int edges_needed = av_codec_is_encoder(s->avctx->codec);
428 int r, ret;
429
430 pic->tf.f = &pic->f;
431 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
432 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
433 s->codec_id != AV_CODEC_ID_MSS2) {
434 if (edges_needed) {
435 pic->f.width = s->avctx->width + 2 * EDGE_WIDTH;
436 pic->f.height = s->avctx->height + 2 * EDGE_WIDTH;
437 }
438
439 r = ff_thread_get_buffer(s->avctx, &pic->tf,
440 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
441 } else {
442 pic->f.width = s->avctx->width;
443 pic->f.height = s->avctx->height;
444 pic->f.format = s->avctx->pix_fmt;
445 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
446 }
447
448 if (r < 0 || !pic->f.buf[0]) {
449 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
450 r, pic->f.data[0]);
451 return -1;
452 }
453
454 if (edges_needed) {
455 int i;
456 for (i = 0; pic->f.data[i]; i++) {
457 int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
458 pic->f.linesize[i] +
459 (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
460 pic->f.data[i] += offset;
461 }
462 pic->f.width = s->avctx->width;
463 pic->f.height = s->avctx->height;
464 }
465
466 if (s->avctx->hwaccel) {
467 assert(!pic->hwaccel_picture_private);
468 if (s->avctx->hwaccel->priv_data_size) {
469 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
470 if (!pic->hwaccel_priv_buf) {
471 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
472 return -1;
473 }
474 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
475 }
476 }
477
478 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
479 s->uvlinesize != pic->f.linesize[1])) {
480 av_log(s->avctx, AV_LOG_ERROR,
481 "get_buffer() failed (stride changed)\n");
482 ff_mpeg_unref_picture(s, pic);
483 return -1;
484 }
485
486 if (pic->f.linesize[1] != pic->f.linesize[2]) {
487 av_log(s->avctx, AV_LOG_ERROR,
488 "get_buffer() failed (uv stride mismatch)\n");
489 ff_mpeg_unref_picture(s, pic);
490 return -1;
491 }
492
493 if (!s->edge_emu_buffer &&
494 (ret = frame_size_alloc(s, pic->f.linesize[0])) < 0) {
495 av_log(s->avctx, AV_LOG_ERROR,
496 "get_buffer() failed to allocate context scratch buffers.\n");
497 ff_mpeg_unref_picture(s, pic);
498 return ret;
499 }
500
501 return 0;
502 }
503
504 void ff_free_picture_tables(Picture *pic)
505 {
506 int i;
507
508 av_buffer_unref(&pic->mb_var_buf);
509 av_buffer_unref(&pic->mc_mb_var_buf);
510 av_buffer_unref(&pic->mb_mean_buf);
511 av_buffer_unref(&pic->mbskip_table_buf);
512 av_buffer_unref(&pic->qscale_table_buf);
513 av_buffer_unref(&pic->mb_type_buf);
514
515 for (i = 0; i < 2; i++) {
516 av_buffer_unref(&pic->motion_val_buf[i]);
517 av_buffer_unref(&pic->ref_index_buf[i]);
518 }
519 }
520
521 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
522 {
523 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
524 const int mb_array_size = s->mb_stride * s->mb_height;
525 const int b8_array_size = s->b8_stride * s->mb_height * 2;
526 int i;
527
528
529 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
530 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
531 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
532 sizeof(uint32_t));
533 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
534 return AVERROR(ENOMEM);
535
536 if (s->encoding) {
537 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
538 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
539 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
540 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
541 return AVERROR(ENOMEM);
542 }
543
544 if (s->out_format == FMT_H263 || s->encoding) {
545 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
546 int ref_index_size = 4 * mb_array_size;
547
548 for (i = 0; mv_size && i < 2; i++) {
549 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
550 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
551 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
552 return AVERROR(ENOMEM);
553 }
554 }
555
556 return 0;
557 }
558
559 static int make_tables_writable(Picture *pic)
560 {
561 int ret, i;
562 #define MAKE_WRITABLE(table) \
563 do {\
564 if (pic->table &&\
565 (ret = av_buffer_make_writable(&pic->table)) < 0)\
566 return ret;\
567 } while (0)
568
569 MAKE_WRITABLE(mb_var_buf);
570 MAKE_WRITABLE(mc_mb_var_buf);
571 MAKE_WRITABLE(mb_mean_buf);
572 MAKE_WRITABLE(mbskip_table_buf);
573 MAKE_WRITABLE(qscale_table_buf);
574 MAKE_WRITABLE(mb_type_buf);
575
576 for (i = 0; i < 2; i++) {
577 MAKE_WRITABLE(motion_val_buf[i]);
578 MAKE_WRITABLE(ref_index_buf[i]);
579 }
580
581 return 0;
582 }
583
584 /**
585 * Allocate a Picture.
586 * The pixels are allocated/set by calling get_buffer() if shared = 0
587 */
588 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
589 {
590 int i, ret;
591
592 if (shared) {
593 assert(pic->f.data[0]);
594 pic->shared = 1;
595 } else {
596 assert(!pic->f.buf[0]);
597
598 if (alloc_frame_buffer(s, pic) < 0)
599 return -1;
600
601 s->linesize = pic->f.linesize[0];
602 s->uvlinesize = pic->f.linesize[1];
603 }
604
605 if (!pic->qscale_table_buf)
606 ret = alloc_picture_tables(s, pic);
607 else
608 ret = make_tables_writable(pic);
609 if (ret < 0)
610 goto fail;
611
612 if (s->encoding) {
613 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
614 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
615 pic->mb_mean = pic->mb_mean_buf->data;
616 }
617
618 pic->mbskip_table = pic->mbskip_table_buf->data;
619 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
620 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
621
622 if (pic->motion_val_buf[0]) {
623 for (i = 0; i < 2; i++) {
624 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
625 pic->ref_index[i] = pic->ref_index_buf[i]->data;
626 }
627 }
628
629 return 0;
630 fail:
631 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
632 ff_mpeg_unref_picture(s, pic);
633 ff_free_picture_tables(pic);
634 return AVERROR(ENOMEM);
635 }
636
637 /**
638 * Deallocate a picture.
639 */
640 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
641 {
642 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
643
644 pic->tf.f = &pic->f;
645 /* WM Image / Screen codecs allocate internal buffers with different
646 * dimensions / colorspaces; ignore user-defined callbacks for these. */
647 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
648 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
649 s->codec_id != AV_CODEC_ID_MSS2)
650 ff_thread_release_buffer(s->avctx, &pic->tf);
651 else
652 av_frame_unref(&pic->f);
653
654 av_buffer_unref(&pic->hwaccel_priv_buf);
655
656 if (pic->needs_realloc)
657 ff_free_picture_tables(pic);
658
659 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
660 }
661
662 static int update_picture_tables(Picture *dst, Picture *src)
663 {
664 int i;
665
666 #define UPDATE_TABLE(table)\
667 do {\
668 if (src->table &&\
669 (!dst->table || dst->table->buffer != src->table->buffer)) {\
670 av_buffer_unref(&dst->table);\
671 dst->table = av_buffer_ref(src->table);\
672 if (!dst->table) {\
673 ff_free_picture_tables(dst);\
674 return AVERROR(ENOMEM);\
675 }\
676 }\
677 } while (0)
678
679 UPDATE_TABLE(mb_var_buf);
680 UPDATE_TABLE(mc_mb_var_buf);
681 UPDATE_TABLE(mb_mean_buf);
682 UPDATE_TABLE(mbskip_table_buf);
683 UPDATE_TABLE(qscale_table_buf);
684 UPDATE_TABLE(mb_type_buf);
685 for (i = 0; i < 2; i++) {
686 UPDATE_TABLE(motion_val_buf[i]);
687 UPDATE_TABLE(ref_index_buf[i]);
688 }
689
690 dst->mb_var = src->mb_var;
691 dst->mc_mb_var = src->mc_mb_var;
692 dst->mb_mean = src->mb_mean;
693 dst->mbskip_table = src->mbskip_table;
694 dst->qscale_table = src->qscale_table;
695 dst->mb_type = src->mb_type;
696 for (i = 0; i < 2; i++) {
697 dst->motion_val[i] = src->motion_val[i];
698 dst->ref_index[i] = src->ref_index[i];
699 }
700
701 return 0;
702 }
703
704 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
705 {
706 int ret;
707
708 av_assert0(!dst->f.buf[0]);
709 av_assert0(src->f.buf[0]);
710
711 src->tf.f = &src->f;
712 dst->tf.f = &dst->f;
713 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
714 if (ret < 0)
715 goto fail;
716
717 ret = update_picture_tables(dst, src);
718 if (ret < 0)
719 goto fail;
720
721 if (src->hwaccel_picture_private) {
722 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
723 if (!dst->hwaccel_priv_buf)
724 goto fail;
725 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
726 }
727
728 dst->field_picture = src->field_picture;
729 dst->mb_var_sum = src->mb_var_sum;
730 dst->mc_mb_var_sum = src->mc_mb_var_sum;
731 dst->b_frame_score = src->b_frame_score;
732 dst->needs_realloc = src->needs_realloc;
733 dst->reference = src->reference;
734 dst->shared = src->shared;
735
736 return 0;
737 fail:
738 ff_mpeg_unref_picture(s, dst);
739 return ret;
740 }
741
742 static void exchange_uv(MpegEncContext *s)
743 {
744 int16_t (*tmp)[64];
745
746 tmp = s->pblocks[4];
747 s->pblocks[4] = s->pblocks[5];
748 s->pblocks[5] = tmp;
749 }
750
751 static int init_duplicate_context(MpegEncContext *s)
752 {
753 int y_size = s->b8_stride * (2 * s->mb_height + 1);
754 int c_size = s->mb_stride * (s->mb_height + 1);
755 int yc_size = y_size + 2 * c_size;
756 int i;
757
758 s->edge_emu_buffer =
759 s->me.scratchpad =
760 s->me.temp =
761 s->rd_scratchpad =
762 s->b_scratchpad =
763 s->obmc_scratchpad = NULL;
764
765 if (s->encoding) {
766 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
767 ME_MAP_SIZE * sizeof(uint32_t), fail)
768 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
769 ME_MAP_SIZE * sizeof(uint32_t), fail)
770 if (s->avctx->noise_reduction) {
771 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
772 2 * 64 * sizeof(int), fail)
773 }
774 }
775 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
776 s->block = s->blocks[0];
777
778 for (i = 0; i < 12; i++) {
779 s->pblocks[i] = &s->block[i];
780 }
781 if (s->avctx->codec_tag == AV_RL32("VCR2"))
782 exchange_uv(s);
783
784 if (s->out_format == FMT_H263) {
785 /* ac values */
786 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
787 yc_size * sizeof(int16_t) * 16, fail);
788 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
789 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
790 s->ac_val[2] = s->ac_val[1] + c_size;
791 }
792
793 return 0;
794 fail:
795 return -1; // free() through ff_MPV_common_end()
796 }
797
798 static void free_duplicate_context(MpegEncContext *s)
799 {
800 if (s == NULL)
801 return;
802
803 av_freep(&s->edge_emu_buffer);
804 av_freep(&s->me.scratchpad);
805 s->me.temp =
806 s->rd_scratchpad =
807 s->b_scratchpad =
808 s->obmc_scratchpad = NULL;
809
810 av_freep(&s->dct_error_sum);
811 av_freep(&s->me.map);
812 av_freep(&s->me.score_map);
813 av_freep(&s->blocks);
814 av_freep(&s->ac_val_base);
815 s->block = NULL;
816 }
817
818 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
819 {
820 #define COPY(a) bak->a = src->a
821 COPY(edge_emu_buffer);
822 COPY(me.scratchpad);
823 COPY(me.temp);
824 COPY(rd_scratchpad);
825 COPY(b_scratchpad);
826 COPY(obmc_scratchpad);
827 COPY(me.map);
828 COPY(me.score_map);
829 COPY(blocks);
830 COPY(block);
831 COPY(start_mb_y);
832 COPY(end_mb_y);
833 COPY(me.map_generation);
834 COPY(pb);
835 COPY(dct_error_sum);
836 COPY(dct_count[0]);
837 COPY(dct_count[1]);
838 COPY(ac_val_base);
839 COPY(ac_val[0]);
840 COPY(ac_val[1]);
841 COPY(ac_val[2]);
842 #undef COPY
843 }
844
845 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
846 {
847 MpegEncContext bak;
848 int i, ret;
849 // FIXME copy only needed parts
850 // START_TIMER
851 backup_duplicate_context(&bak, dst);
852 memcpy(dst, src, sizeof(MpegEncContext));
853 backup_duplicate_context(dst, &bak);
854 for (i = 0; i < 12; i++) {
855 dst->pblocks[i] = &dst->block[i];
856 }
857 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
858 exchange_uv(dst);
859 if (!dst->edge_emu_buffer &&
860 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
861 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
862 "scratch buffers.\n");
863 return ret;
864 }
865 // STOP_TIMER("update_duplicate_context")
866 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
867 return 0;
868 }
869
870 int ff_mpeg_update_thread_context(AVCodecContext *dst,
871 const AVCodecContext *src)
872 {
873 int i, ret;
874 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
875
876 if (dst == src || !s1->context_initialized)
877 return 0;
878
879 // FIXME can parameters change on I-frames?
880 // in that case dst may need a reinit
881 if (!s->context_initialized) {
882 memcpy(s, s1, sizeof(MpegEncContext));
883
884 s->avctx = dst;
885 s->bitstream_buffer = NULL;
886 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
887
888 ff_MPV_common_init(s);
889 }
890
891 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
892 int err;
893 s->context_reinit = 0;
894 s->height = s1->height;
895 s->width = s1->width;
896 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
897 return err;
898 }
899
900 s->avctx->coded_height = s1->avctx->coded_height;
901 s->avctx->coded_width = s1->avctx->coded_width;
902 s->avctx->width = s1->avctx->width;
903 s->avctx->height = s1->avctx->height;
904
905 s->coded_picture_number = s1->coded_picture_number;
906 s->picture_number = s1->picture_number;
907
908 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
909 ff_mpeg_unref_picture(s, &s->picture[i]);
910 if (s1->picture[i].f.buf[0] &&
911 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
912 return ret;
913 }
914
915 #define UPDATE_PICTURE(pic)\
916 do {\
917 ff_mpeg_unref_picture(s, &s->pic);\
918 if (s1->pic.f.buf[0])\
919 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
920 else\
921 ret = update_picture_tables(&s->pic, &s1->pic);\
922 if (ret < 0)\
923 return ret;\
924 } while (0)
925
926 UPDATE_PICTURE(current_picture);
927 UPDATE_PICTURE(last_picture);
928 UPDATE_PICTURE(next_picture);
929
930 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
931 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
932 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
933
934 // Error/bug resilience
935 s->next_p_frame_damaged = s1->next_p_frame_damaged;
936 s->workaround_bugs = s1->workaround_bugs;
937
938 // MPEG4 timing info
939 memcpy(&s->last_time_base, &s1->last_time_base,
940 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
941 (char *) &s1->last_time_base);
942
943 // B-frame info
944 s->max_b_frames = s1->max_b_frames;
945 s->low_delay = s1->low_delay;
946 s->droppable = s1->droppable;
947
948 // DivX handling (doesn't work)
949 s->divx_packed = s1->divx_packed;
950
951 if (s1->bitstream_buffer) {
952 if (s1->bitstream_buffer_size +
953 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
954 av_fast_malloc(&s->bitstream_buffer,
955 &s->allocated_bitstream_buffer_size,
956 s1->allocated_bitstream_buffer_size);
957 s->bitstream_buffer_size = s1->bitstream_buffer_size;
958 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
959 s1->bitstream_buffer_size);
960 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
961 FF_INPUT_BUFFER_PADDING_SIZE);
962 }
963
964 // linesize dependend scratch buffer allocation
965 if (!s->edge_emu_buffer)
966 if (s1->linesize) {
967 if (frame_size_alloc(s, s1->linesize) < 0) {
968 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
969 "scratch buffers.\n");
970 return AVERROR(ENOMEM);
971 }
972 } else {
973 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
974 "be allocated due to unknown size.\n");
975 return AVERROR_BUG;
976 }
977
978 // MPEG2/interlacing info
979 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
980 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
981
982 if (!s1->first_field) {
983 s->last_pict_type = s1->pict_type;
984 if (s1->current_picture_ptr)
985 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
986 }
987
988 return 0;
989 }
990
991 /**
992 * Set the given MpegEncContext to common defaults
993 * (same for encoding and decoding).
994 * The changed fields will not depend upon the
995 * prior state of the MpegEncContext.
996 */
997 void ff_MPV_common_defaults(MpegEncContext *s)
998 {
999 s->y_dc_scale_table =
1000 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
1001 s->chroma_qscale_table = ff_default_chroma_qscale_table;
1002 s->progressive_frame = 1;
1003 s->progressive_sequence = 1;
1004 s->picture_structure = PICT_FRAME;
1005
1006 s->coded_picture_number = 0;
1007 s->picture_number = 0;
1008
1009 s->f_code = 1;
1010 s->b_code = 1;
1011
1012 s->slice_context_count = 1;
1013 }
1014
1015 /**
1016 * Set the given MpegEncContext to defaults for decoding.
1017 * the changed fields will not depend upon
1018 * the prior state of the MpegEncContext.
1019 */
1020 void ff_MPV_decode_defaults(MpegEncContext *s)
1021 {
1022 ff_MPV_common_defaults(s);
1023 }
1024
1025 static int init_er(MpegEncContext *s)
1026 {
1027 ERContext *er = &s->er;
1028 int mb_array_size = s->mb_height * s->mb_stride;
1029 int i;
1030
1031 er->avctx = s->avctx;
1032 er->dsp = &s->dsp;
1033
1034 er->mb_index2xy = s->mb_index2xy;
1035 er->mb_num = s->mb_num;
1036 er->mb_width = s->mb_width;
1037 er->mb_height = s->mb_height;
1038 er->mb_stride = s->mb_stride;
1039 er->b8_stride = s->b8_stride;
1040
1041 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
1042 er->error_status_table = av_mallocz(mb_array_size);
1043 if (!er->er_temp_buffer || !er->error_status_table)
1044 goto fail;
1045
1046 er->mbskip_table = s->mbskip_table;
1047 er->mbintra_table = s->mbintra_table;
1048
1049 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1050 er->dc_val[i] = s->dc_val[i];
1051
1052 er->decode_mb = mpeg_er_decode_mb;
1053 er->opaque = s;
1054
1055 return 0;
1056 fail:
1057 av_freep(&er->er_temp_buffer);
1058 av_freep(&er->error_status_table);
1059 return AVERROR(ENOMEM);
1060 }
1061
1062 /**
1063 * Initialize and allocates MpegEncContext fields dependent on the resolution.
1064 */
1065 static int init_context_frame(MpegEncContext *s)
1066 {
1067 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1068
1069 s->mb_width = (s->width + 15) / 16;
1070 s->mb_stride = s->mb_width + 1;
1071 s->b8_stride = s->mb_width * 2 + 1;
1072 s->b4_stride = s->mb_width * 4 + 1;
1073 mb_array_size = s->mb_height * s->mb_stride;
1074 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1075
1076 /* set default edge pos, will be overriden
1077 * in decode_header if needed */
1078 s->h_edge_pos = s->mb_width * 16;
1079 s->v_edge_pos = s->mb_height * 16;
1080
1081 s->mb_num = s->mb_width * s->mb_height;
1082
1083 s->block_wrap[0] =
1084 s->block_wrap[1] =
1085 s->block_wrap[2] =
1086 s->block_wrap[3] = s->b8_stride;
1087 s->block_wrap[4] =
1088 s->block_wrap[5] = s->mb_stride;
1089
1090 y_size = s->b8_stride * (2 * s->mb_height + 1);
1091 c_size = s->mb_stride * (s->mb_height + 1);
1092 yc_size = y_size + 2 * c_size;
1093
1094 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
1095 fail); // error ressilience code looks cleaner with this
1096 for (y = 0; y < s->mb_height; y++)
1097 for (x = 0; x < s->mb_width; x++)
1098 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1099
1100 s->mb_index2xy[s->mb_height * s->mb_width] =
1101 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1102
1103 if (s->encoding) {
1104 /* Allocate MV tables */
1105 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
1106 mv_table_size * 2 * sizeof(int16_t), fail);
1107 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
1108 mv_table_size * 2 * sizeof(int16_t), fail);
1109 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
1110 mv_table_size * 2 * sizeof(int16_t), fail);
1111 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
1112 mv_table_size * 2 * sizeof(int16_t), fail);
1113 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
1114 mv_table_size * 2 * sizeof(int16_t), fail);
1115 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
1116 mv_table_size * 2 * sizeof(int16_t), fail);
1117 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1118 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
1119 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
1120 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
1121 s->mb_stride + 1;
1122 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
1123 s->mb_stride + 1;
1124 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
1125
1126 /* Allocate MB type table */
1127 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
1128 sizeof(uint16_t), fail); // needed for encoding
1129
1130 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
1131 sizeof(int), fail);
1132
1133 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
1134 mb_array_size * sizeof(float), fail);
1135 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
1136 mb_array_size * sizeof(float), fail);
1137
1138 }
1139
1140 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1141 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
1142 /* interlaced direct mode decoding tables */
1143 for (i = 0; i < 2; i++) {
1144 int j, k;
1145 for (j = 0; j < 2; j++) {
1146 for (k = 0; k < 2; k++) {
1147 FF_ALLOCZ_OR_GOTO(s->avctx,
1148 s->b_field_mv_table_base[i][j][k],
1149 mv_table_size * 2 * sizeof(int16_t),
1150 fail);
1151 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1152 s->mb_stride + 1;
1153 }
1154 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
1155 mb_array_size * 2 * sizeof(uint8_t), fail);
1156 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
1157 mv_table_size * 2 * sizeof(int16_t), fail);
1158 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
1159 + s->mb_stride + 1;
1160 }
1161 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
1162 mb_array_size * 2 * sizeof(uint8_t), fail);
1163 }
1164 }
1165 if (s->out_format == FMT_H263) {
1166 /* cbp values */
1167 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
1168 s->coded_block = s->coded_block_base + s->b8_stride + 1;
1169
1170 /* cbp, ac_pred, pred_dir */
1171 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
1172 mb_array_size * sizeof(uint8_t), fail);
1173 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
1174 mb_array_size * sizeof(uint8_t), fail);
1175 }
1176
1177 if (s->h263_pred || s->h263_plus || !s->encoding) {
1178 /* dc values */
1179 // MN: we need these for error resilience of intra-frames
1180 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
1181 yc_size * sizeof(int16_t), fail);
1182 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1183 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1184 s->dc_val[2] = s->dc_val[1] + c_size;
1185 for (i = 0; i < yc_size; i++)
1186 s->dc_val_base[i] = 1024;
1187 }
1188
1189 /* which mb is a intra block */
1190 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1191 memset(s->mbintra_table, 1, mb_array_size);
1192
1193 /* init macroblock skip table */
1194 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1195 // Note the + 1 is for a quicker mpeg4 slice_end detection
1196
1197 return init_er(s);
1198 fail:
1199 return AVERROR(ENOMEM);
1200 }
1201
1202 /**
1203 * init common structure for both encoder and decoder.
1204 * this assumes that some variables like width/height are already set
1205 */
1206 av_cold int ff_MPV_common_init(MpegEncContext *s)
1207 {
1208 int i;
1209 int nb_slices = (HAVE_THREADS &&
1210 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1211 s->avctx->thread_count : 1;
1212
1213 if (s->encoding && s->avctx->slices)
1214 nb_slices = s->avctx->slices;
1215
1216 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1217 s->mb_height = (s->height + 31) / 32 * 2;
1218 else
1219 s->mb_height = (s->height + 15) / 16;
1220
1221 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1222 av_log(s->avctx, AV_LOG_ERROR,
1223 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1224 return -1;
1225 }
1226
1227 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1228 int max_slices;
1229 if (s->mb_height)
1230 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1231 else
1232 max_slices = MAX_THREADS;
1233 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1234 " reducing to %d\n", nb_slices, max_slices);
1235 nb_slices = max_slices;
1236 }
1237
1238 if ((s->width || s->height) &&
1239 av_image_check_size(s->width, s->height, 0, s->avctx))
1240 return -1;
1241
1242 ff_dct_common_init(s);
1243
1244 s->flags = s->avctx->flags;
1245 s->flags2 = s->avctx->flags2;
1246
1247 /* set chroma shifts */
1248 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1249 &s->chroma_x_shift,
1250 &s->chroma_y_shift);
1251
1252 /* convert fourcc to upper case */
1253 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1254
1255 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1256
1257 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1258 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1259 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1260 av_frame_unref(&s->picture[i].f);
1261 }
1262 memset(&s->next_picture, 0, sizeof(s->next_picture));
1263 memset(&s->last_picture, 0, sizeof(s->last_picture));
1264 memset(&s->current_picture, 0, sizeof(s->current_picture));
1265 av_frame_unref(&s->next_picture.f);
1266 av_frame_unref(&s->last_picture.f);
1267 av_frame_unref(&s->current_picture.f);
1268
1269 if (s->width && s->height) {
1270 if (init_context_frame(s))
1271 goto fail;
1272
1273 s->parse_context.state = -1;
1274 }
1275
1276 s->context_initialized = 1;
1277 s->thread_context[0] = s;
1278
1279 if (s->width && s->height) {
1280 if (nb_slices > 1) {
1281 for (i = 1; i < nb_slices; i++) {
1282 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1283 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1284 }
1285
1286 for (i = 0; i < nb_slices; i++) {
1287 if (init_duplicate_context(s->thread_context[i]) < 0)
1288 goto fail;
1289 s->thread_context[i]->start_mb_y =
1290 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1291 s->thread_context[i]->end_mb_y =
1292 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1293 }
1294 } else {
1295 if (init_duplicate_context(s) < 0)
1296 goto fail;
1297 s->start_mb_y = 0;
1298 s->end_mb_y = s->mb_height;
1299 }
1300 s->slice_context_count = nb_slices;
1301 }
1302
1303 return 0;
1304 fail:
1305 ff_MPV_common_end(s);
1306 return -1;
1307 }
1308
1309 /**
1310 * Frees and resets MpegEncContext fields depending on the resolution.
1311 * Is used during resolution changes to avoid a full reinitialization of the
1312 * codec.
1313 */
1314 static int free_context_frame(MpegEncContext *s)
1315 {
1316 int i, j, k;
1317
1318 av_freep(&s->mb_type);
1319 av_freep(&s->p_mv_table_base);
1320 av_freep(&s->b_forw_mv_table_base);
1321 av_freep(&s->b_back_mv_table_base);
1322 av_freep(&s->b_bidir_forw_mv_table_base);
1323 av_freep(&s->b_bidir_back_mv_table_base);
1324 av_freep(&s->b_direct_mv_table_base);
1325 s->p_mv_table = NULL;
1326 s->b_forw_mv_table = NULL;
1327 s->b_back_mv_table = NULL;
1328 s->b_bidir_forw_mv_table = NULL;
1329 s->b_bidir_back_mv_table = NULL;
1330 s->b_direct_mv_table = NULL;
1331 for (i = 0; i < 2; i++) {
1332 for (j = 0; j < 2; j++) {
1333 for (k = 0; k < 2; k++) {
1334 av_freep(&s->b_field_mv_table_base[i][j][k]);
1335 s->b_field_mv_table[i][j][k] = NULL;
1336 }
1337 av_freep(&s->b_field_select_table[i][j]);
1338 av_freep(&s->p_field_mv_table_base[i][j]);
1339 s->p_field_mv_table[i][j] = NULL;
1340 }
1341 av_freep(&s->p_field_select_table[i]);
1342 }
1343
1344 av_freep(&s->dc_val_base);
1345 av_freep(&s->coded_block_base);
1346 av_freep(&s->mbintra_table);
1347 av_freep(&s->cbp_table);
1348 av_freep(&s->pred_dir_table);
1349
1350 av_freep(&s->mbskip_table);
1351
1352 av_freep(&s->er.error_status_table);
1353 av_freep(&s->er.er_temp_buffer);
1354 av_freep(&s->mb_index2xy);
1355 av_freep(&s->lambda_table);
1356 av_freep(&s->cplx_tab);
1357 av_freep(&s->bits_tab);
1358
1359 s->linesize = s->uvlinesize = 0;
1360
1361 return 0;
1362 }
1363
1364 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1365 {
1366 int i, err = 0;
1367
1368 if (s->slice_context_count > 1) {
1369 for (i = 0; i < s->slice_context_count; i++) {
1370 free_duplicate_context(s->thread_context[i]);
1371 }
1372 for (i = 1; i < s->slice_context_count; i++) {
1373 av_freep(&s->thread_context[i]);
1374 }
1375 } else
1376 free_duplicate_context(s);
1377
1378 if ((err = free_context_frame(s)) < 0)
1379 return err;
1380
1381 if (s->picture)
1382 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1383 s->picture[i].needs_realloc = 1;
1384 }
1385
1386 s->last_picture_ptr =
1387 s->next_picture_ptr =
1388 s->current_picture_ptr = NULL;
1389
1390 // init
1391 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1392 s->mb_height = (s->height + 31) / 32 * 2;
1393 else
1394 s->mb_height = (s->height + 15) / 16;
1395
1396 if ((s->width || s->height) &&
1397 av_image_check_size(s->width, s->height, 0, s->avctx))
1398 return AVERROR_INVALIDDATA;
1399
1400 if ((err = init_context_frame(s)))
1401 goto fail;
1402
1403 s->thread_context[0] = s;
1404
1405 if (s->width && s->height) {
1406 int nb_slices = s->slice_context_count;
1407 if (nb_slices > 1) {
1408 for (i = 1; i < nb_slices; i++) {
1409 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1410 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1411 }
1412
1413 for (i = 0; i < nb_slices; i++) {
1414 if (init_duplicate_context(s->thread_context[i]) < 0)
1415 goto fail;
1416 s->thread_context[i]->start_mb_y =
1417 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1418 s->thread_context[i]->end_mb_y =
1419 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1420 }
1421 } else {
1422 if (init_duplicate_context(s) < 0)
1423 goto fail;
1424 s->start_mb_y = 0;
1425 s->end_mb_y = s->mb_height;
1426 }
1427 s->slice_context_count = nb_slices;
1428 }
1429
1430 return 0;
1431 fail:
1432 ff_MPV_common_end(s);
1433 return err;
1434 }
1435
1436 /* init common structure for both encoder and decoder */
1437 void ff_MPV_common_end(MpegEncContext *s)
1438 {
1439 int i;
1440
1441 if (s->slice_context_count > 1) {
1442 for (i = 0; i < s->slice_context_count; i++) {
1443 free_duplicate_context(s->thread_context[i]);
1444 }
1445 for (i = 1; i < s->slice_context_count; i++) {
1446 av_freep(&s->thread_context[i]);
1447 }
1448 s->slice_context_count = 1;
1449 } else free_duplicate_context(s);
1450
1451 av_freep(&s->parse_context.buffer);
1452 s->parse_context.buffer_size = 0;
1453
1454 av_freep(&s->bitstream_buffer);
1455 s->allocated_bitstream_buffer_size = 0;
1456
1457 if (s->picture) {
1458 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1459 ff_free_picture_tables(&s->picture[i]);
1460 ff_mpeg_unref_picture(s, &s->picture[i]);
1461 }
1462 }
1463 av_freep(&s->picture);
1464 ff_free_picture_tables(&s->last_picture);
1465 ff_mpeg_unref_picture(s, &s->last_picture);
1466 ff_free_picture_tables(&s->current_picture);
1467 ff_mpeg_unref_picture(s, &s->current_picture);
1468 ff_free_picture_tables(&s->next_picture);
1469 ff_mpeg_unref_picture(s, &s->next_picture);
1470
1471 free_context_frame(s);
1472
1473 s->context_initialized = 0;
1474 s->last_picture_ptr =
1475 s->next_picture_ptr =
1476 s->current_picture_ptr = NULL;
1477 s->linesize = s->uvlinesize = 0;
1478 }
1479
1480 av_cold void ff_init_rl(RLTable *rl,
1481 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1482 {
1483 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1484 uint8_t index_run[MAX_RUN + 1];
1485 int last, run, level, start, end, i;
1486
1487 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1488 if (static_store && rl->max_level[0])
1489 return;
1490
1491 /* compute max_level[], max_run[] and index_run[] */
1492 for (last = 0; last < 2; last++) {
1493 if (last == 0) {
1494 start = 0;
1495 end = rl->last;
1496 } else {
1497 start = rl->last;
1498 end = rl->n;
1499 }
1500
1501 memset(max_level, 0, MAX_RUN + 1);
1502 memset(max_run, 0, MAX_LEVEL + 1);
1503 memset(index_run, rl->n, MAX_RUN + 1);
1504 for (i = start; i < end; i++) {
1505 run = rl->table_run[i];
1506 level = rl->table_level[i];
1507 if (index_run[run] == rl->n)
1508 index_run[run] = i;
1509 if (level > max_level[run])
1510 max_level[run] = level;
1511 if (run > max_run[level])
1512 max_run[level] = run;
1513 }
1514 if (static_store)
1515 rl->max_level[last] = static_store[last];
1516 else
1517 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1518 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1519 if (static_store)
1520 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1521 else
1522 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1523 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1524 if (static_store)
1525 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1526 else
1527 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1528 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1529 }
1530 }
1531
1532 av_cold void ff_init_vlc_rl(RLTable *rl)
1533 {
1534 int i, q;
1535
1536 for (q = 0; q < 32; q++) {
1537 int qmul = q * 2;
1538 int qadd = (q - 1) | 1;
1539
1540 if (q == 0) {
1541 qmul = 1;
1542 qadd = 0;
1543 }
1544 for (i = 0; i < rl->vlc.table_size; i++) {
1545 int code = rl->vlc.table[i][0];
1546 int len = rl->vlc.table[i][1];
1547 int level, run;
1548
1549 if (len == 0) { // illegal code
1550 run = 66;
1551 level = MAX_LEVEL;
1552 } else if (len < 0) { // more bits needed
1553 run = 0;
1554 level = code;
1555 } else {
1556 if (code == rl->n) { // esc
1557 run = 66;
1558 level = 0;
1559 } else {
1560 run = rl->table_run[code] + 1;
1561 level = rl->table_level[code] * qmul + qadd;
1562 if (code >= rl->last) run += 192;
1563 }
1564 }
1565 rl->rl_vlc[q][i].len = len;
1566 rl->rl_vlc[q][i].level = level;
1567 rl->rl_vlc[q][i].run = run;
1568 }
1569 }
1570 }
1571
1572 static void release_unused_pictures(MpegEncContext *s)
1573 {
1574 int i;
1575
1576 /* release non reference frames */
1577 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1578 if (!s->picture[i].reference)
1579 ff_mpeg_unref_picture(s, &s->picture[i]);
1580 }
1581 }
1582
1583 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1584 {
1585 if (pic->f.buf[0] == NULL)
1586 return 1;
1587 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1588 return 1;
1589 return 0;
1590 }
1591
1592 static int find_unused_picture(MpegEncContext *s, int shared)
1593 {
1594 int i;
1595
1596 if (shared) {
1597 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1598 if (s->picture[i].f.buf[0] == NULL)
1599 return i;
1600 }
1601 } else {
1602 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1603 if (pic_is_unused(s, &s->picture[i]))
1604 return i;
1605 }
1606 }
1607
1608 return AVERROR_INVALIDDATA;
1609 }
1610
1611 int ff_find_unused_picture(MpegEncContext *s, int shared)
1612 {
1613 int ret = find_unused_picture(s, shared);
1614
1615 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1616 if (s->picture[ret].needs_realloc) {
1617 s->picture[ret].needs_realloc = 0;
1618 ff_free_picture_tables(&s->picture[ret]);
1619 ff_mpeg_unref_picture(s, &s->picture[ret]);
1620 }
1621 }
1622 return ret;
1623 }
1624
1625 /**
1626 * generic function called after decoding
1627 * the header and before a frame is decoded.
1628 */
1629 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1630 {
1631 int i, ret;
1632 Picture *pic;
1633 s->mb_skipped = 0;
1634
1635 /* mark & release old frames */
1636 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1637 s->last_picture_ptr != s->next_picture_ptr &&
1638 s->last_picture_ptr->f.buf[0]) {
1639 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1640 }
1641
1642 /* release forgotten pictures */
1643 /* if (mpeg124/h263) */
1644 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1645 if (&s->picture[i] != s->last_picture_ptr &&
1646 &s->picture[i] != s->next_picture_ptr &&
1647 s->picture[i].reference && !s->picture[i].needs_realloc) {
1648 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1649 av_log(avctx, AV_LOG_ERROR,
1650 "releasing zombie picture\n");
1651 ff_mpeg_unref_picture(s, &s->picture[i]);
1652 }
1653 }
1654
1655 ff_mpeg_unref_picture(s, &s->current_picture);
1656
1657 release_unused_pictures(s);
1658
1659 if (s->current_picture_ptr &&
1660 s->current_picture_ptr->f.buf[0] == NULL) {
1661 // we already have a unused image
1662 // (maybe it was set before reading the header)
1663 pic = s->current_picture_ptr;
1664 } else {
1665 i = ff_find_unused_picture(s, 0);
1666 if (i < 0) {
1667 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1668 return i;
1669 }
1670 pic = &s->picture[i];
1671 }
1672
1673 pic->reference = 0;
1674 if (!s->droppable) {
1675 if (s->pict_type != AV_PICTURE_TYPE_B)
1676 pic->reference = 3;
1677 }
1678
1679 pic->f.coded_picture_number = s->coded_picture_number++;
1680
1681 if (ff_alloc_picture(s, pic, 0) < 0)
1682 return -1;
1683
1684 s->current_picture_ptr = pic;
1685 // FIXME use only the vars from current_pic
1686 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1687 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1688 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1689 if (s->picture_structure != PICT_FRAME)
1690 s->current_picture_ptr->f.top_field_first =
1691 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1692 }
1693 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1694 !s->progressive_sequence;
1695 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1696
1697 s->current_picture_ptr->f.pict_type = s->pict_type;
1698 // if (s->flags && CODEC_FLAG_QSCALE)
1699 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1700 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1701
1702 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1703 s->current_picture_ptr)) < 0)
1704 return ret;
1705
1706 if (s->pict_type != AV_PICTURE_TYPE_B) {
1707 s->last_picture_ptr = s->next_picture_ptr;
1708 if (!s->droppable)
1709 s->next_picture_ptr = s->current_picture_ptr;
1710 }
1711 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1712 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1713 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1714 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1715 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1716 s->pict_type, s->droppable);
1717
1718 if ((s->last_picture_ptr == NULL ||
1719 s->last_picture_ptr->f.buf[0] == NULL) &&
1720 (s->pict_type != AV_PICTURE_TYPE_I ||
1721 s->picture_structure != PICT_FRAME)) {
1722 int h_chroma_shift, v_chroma_shift;
1723 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1724 &h_chroma_shift, &v_chroma_shift);
1725 if (s->pict_type != AV_PICTURE_TYPE_I)
1726 av_log(avctx, AV_LOG_ERROR,
1727 "warning: first frame is no keyframe\n");
1728 else if (s->picture_structure != PICT_FRAME)
1729 av_log(avctx, AV_LOG_INFO,
1730 "allocate dummy last picture for field based first keyframe\n");
1731
1732 /* Allocate a dummy frame */
1733 i = ff_find_unused_picture(s, 0);
1734 if (i < 0) {
1735 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1736 return i;
1737 }
1738 s->last_picture_ptr = &s->picture[i];
1739
1740 s->last_picture_ptr->reference = 3;
1741 s->last_picture_ptr->f.pict_type = AV_PICTURE_TYPE_I;
1742
1743 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1744 s->last_picture_ptr = NULL;
1745 return -1;
1746 }
1747
1748 memset(s->last_picture_ptr->f.data[0], 0,
1749 avctx->height * s->last_picture_ptr->f.linesize[0]);
1750 memset(s->last_picture_ptr->f.data[1], 0x80,
1751 (avctx->height >> v_chroma_shift) *
1752 s->last_picture_ptr->f.linesize[1]);
1753 memset(s->last_picture_ptr->f.data[2], 0x80,
1754 (avctx->height >> v_chroma_shift) *
1755 s->last_picture_ptr->f.linesize[2]);
1756
1757 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1758 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1759 }
1760 if ((s->next_picture_ptr == NULL ||
1761 s->next_picture_ptr->f.buf[0] == NULL) &&
1762 s->pict_type == AV_PICTURE_TYPE_B) {
1763 /* Allocate a dummy frame */
1764 i = ff_find_unused_picture(s, 0);
1765 if (i < 0) {
1766 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1767 return i;
1768 }
1769 s->next_picture_ptr = &s->picture[i];
1770
1771 s->next_picture_ptr->reference = 3;
1772 s->next_picture_ptr->f.pict_type = AV_PICTURE_TYPE_I;
1773
1774 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1775 s->next_picture_ptr = NULL;
1776 return -1;
1777 }
1778 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1779 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1780 }
1781
1782 if (s->last_picture_ptr) {
1783 ff_mpeg_unref_picture(s, &s->last_picture);
1784 if (s->last_picture_ptr->f.buf[0] &&
1785 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1786 s->last_picture_ptr)) < 0)
1787 return ret;
1788 }
1789 if (s->next_picture_ptr) {
1790 ff_mpeg_unref_picture(s, &s->next_picture);
1791 if (s->next_picture_ptr->f.buf[0] &&
1792 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1793 s->next_picture_ptr)) < 0)
1794 return ret;
1795 }
1796
1797 if (s->pict_type != AV_PICTURE_TYPE_I &&
1798 !(s->last_picture_ptr && s->last_picture_ptr->f.buf[0])) {
1799 av_log(s, AV_LOG_ERROR,
1800 "Non-reference picture received and no reference available\n");
1801 return AVERROR_INVALIDDATA;
1802 }
1803
1804 if (s->picture_structure!= PICT_FRAME) {
1805 int i;
1806 for (i = 0; i < 4; i++) {
1807 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1808 s->current_picture.f.data[i] +=
1809 s->current_picture.f.linesize[i];
1810 }
1811 s->current_picture.f.linesize[i] *= 2;
1812 s->last_picture.f.linesize[i] *= 2;
1813 s->next_picture.f.linesize[i] *= 2;
1814 }
1815 }
1816
1817 s->err_recognition = avctx->err_recognition;
1818
1819 /* set dequantizer, we can't do it during init as
1820 * it might change for mpeg4 and we can't do it in the header
1821 * decode as init is not called for mpeg4 there yet */
1822 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1823 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1824 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1825 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1826 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1827 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1828 } else {
1829 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1830 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1831 }
1832
1833 #if FF_API_XVMC
1834 FF_DISABLE_DEPRECATION_WARNINGS
1835 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1836 return ff_xvmc_field_start(s, avctx);
1837 FF_ENABLE_DEPRECATION_WARNINGS
1838 #endif /* FF_API_XVMC */
1839
1840 return 0;
1841 }
1842
1843 /* called after a frame has been decoded. */
1844 void ff_MPV_frame_end(MpegEncContext *s)
1845 {
1846 #if FF_API_XVMC
1847 FF_DISABLE_DEPRECATION_WARNINGS
1848 /* redraw edges for the frame if decoding didn't complete */
1849 // just to make sure that all data is rendered.
1850 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1851 ff_xvmc_field_end(s);
1852 } else
1853 FF_ENABLE_DEPRECATION_WARNINGS
1854 #endif /* FF_API_XVMC */
1855
1856 emms_c();
1857
1858 if (s->current_picture.reference)
1859 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1860 }
1861
1862 /**
1863 * Print debugging info for the given picture.
1864 */
1865 void ff_print_debug_info(MpegEncContext *s, Picture *p)
1866 {
1867 AVFrame *pict;
1868 if (s->avctx->hwaccel || !p || !p->mb_type)
1869 return;
1870 pict = &p->f;
1871
1872 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1873 int x,y;
1874
1875 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1876 switch (pict->pict_type) {
1877 case AV_PICTURE_TYPE_I:
1878 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1879 break;
1880 case AV_PICTURE_TYPE_P:
1881 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1882 break;
1883 case AV_PICTURE_TYPE_B:
1884 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1885 break;
1886 case AV_PICTURE_TYPE_S:
1887 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1888 break;
1889 case AV_PICTURE_TYPE_SI:
1890 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1891 break;
1892 case AV_PICTURE_TYPE_SP:
1893 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1894 break;
1895 }
1896 for (y = 0; y < s->mb_height; y++) {
1897 for (x = 0; x < s->mb_width; x++) {
1898 if (s->avctx->debug & FF_DEBUG_SKIP) {
1899 int count = s->mbskip_table[x + y * s->mb_stride];
1900 if (count > 9)
1901 count = 9;
1902 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1903 }
1904 if (s->avctx->debug & FF_DEBUG_QP) {
1905 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1906 p->qscale_table[x + y * s->mb_stride]);
1907 }
1908 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1909 int mb_type = p->mb_type[x + y * s->mb_stride];
1910 // Type & MV direction
1911 if (IS_PCM(mb_type))
1912 av_log(s->avctx, AV_LOG_DEBUG, "P");
1913 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1914 av_log(s->avctx, AV_LOG_DEBUG, "A");
1915 else if (IS_INTRA4x4(mb_type))
1916 av_log(s->avctx, AV_LOG_DEBUG, "i");
1917 else if (IS_INTRA16x16(mb_type))
1918 av_log(s->avctx, AV_LOG_DEBUG, "I");
1919 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1920 av_log(s->avctx, AV_LOG_DEBUG, "d");
1921 else if (IS_DIRECT(mb_type))
1922 av_log(s->avctx, AV_LOG_DEBUG, "D");
1923 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1924 av_log(s->avctx, AV_LOG_DEBUG, "g");
1925 else if (IS_GMC(mb_type))
1926 av_log(s->avctx, AV_LOG_DEBUG, "G");
1927 else if (IS_SKIP(mb_type))
1928 av_log(s->avctx, AV_LOG_DEBUG, "S");
1929 else if (!USES_LIST(mb_type, 1))
1930 av_log(s->avctx, AV_LOG_DEBUG, ">");
1931 else if (!USES_LIST(mb_type, 0))
1932 av_log(s->avctx, AV_LOG_DEBUG, "<");
1933 else {
1934 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1935 av_log(s->avctx, AV_LOG_DEBUG, "X");
1936 }
1937
1938 // segmentation
1939 if (IS_8X8(mb_type))
1940 av_log(s->avctx, AV_LOG_DEBUG, "+");
1941 else if (IS_16X8(mb_type))
1942 av_log(s->avctx, AV_LOG_DEBUG, "-");
1943 else if (IS_8X16(mb_type))
1944 av_log(s->avctx, AV_LOG_DEBUG, "|");
1945 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1946 av_log(s->avctx, AV_LOG_DEBUG, " ");
1947 else
1948 av_log(s->avctx, AV_LOG_DEBUG, "?");
1949
1950
1951 if (IS_INTERLACED(mb_type))
1952 av_log(s->avctx, AV_LOG_DEBUG, "=");
1953 else
1954 av_log(s->avctx, AV_LOG_DEBUG, " ");
1955 }
1956 }
1957 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1958 }
1959 }
1960 }
1961
1962 /**
1963 * find the lowest MB row referenced in the MVs
1964 */
1965 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1966 {
1967 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1968 int my, off, i, mvs;
1969
1970 if (s->picture_structure != PICT_FRAME || s->mcsel)
1971 goto unhandled;
1972
1973 switch (s->mv_type) {
1974 case MV_TYPE_16X16:
1975 mvs = 1;
1976 break;
1977 case MV_TYPE_16X8:
1978 mvs = 2;
1979 break;
1980 case MV_TYPE_8X8:
1981 mvs = 4;
1982 break;
1983 default:
1984 goto unhandled;
1985 }
1986
1987 for (i = 0; i < mvs; i++) {
1988 my = s->mv[dir][i][1]<<qpel_shift;
1989 my_max = FFMAX(my_max, my);
1990 my_min = FFMIN(my_min, my);
1991 }
1992
1993 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1994
1995 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1996 unhandled:
1997 return s->mb_height-1;
1998 }
1999
2000 /* put block[] to dest[] */
2001 static inline void put_dct(MpegEncContext *s,
2002 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2003 {
2004 s->dct_unquantize_intra(s, block, i, qscale);
2005 s->dsp.idct_put (dest, line_size, block);
2006 }
2007
2008 /* add block[] to dest[] */
2009 static inline void add_dct(MpegEncContext *s,
2010 int16_t *block, int i, uint8_t *dest, int line_size)
2011 {
2012 if (s->block_last_index[i] >= 0) {
2013 s->dsp.idct_add (dest, line_size, block);
2014 }
2015 }
2016
2017 static inline void add_dequant_dct(MpegEncContext *s,
2018 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2019 {
2020 if (s->block_last_index[i] >= 0) {
2021 s->dct_unquantize_inter(s, block, i, qscale);
2022
2023 s->dsp.idct_add (dest, line_size, block);
2024 }
2025 }
2026
2027 /**
2028 * Clean dc, ac, coded_block for the current non-intra MB.
2029 */
2030 void ff_clean_intra_table_entries(MpegEncContext *s)
2031 {
2032 int wrap = s->b8_stride;
2033 int xy = s->block_index[0];
2034
2035 s->dc_val[0][xy ] =
2036 s->dc_val[0][xy + 1 ] =
2037 s->dc_val[0][xy + wrap] =
2038 s->dc_val[0][xy + 1 + wrap] = 1024;
2039 /* ac pred */
2040 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2041 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2042 if (s->msmpeg4_version>=3) {
2043 s->coded_block[xy ] =
2044 s->coded_block[xy + 1 ] =
2045 s->coded_block[xy + wrap] =
2046 s->coded_block[xy + 1 + wrap] = 0;
2047 }
2048 /* chroma */
2049 wrap = s->mb_stride;
2050 xy = s->mb_x + s->mb_y * wrap;
2051 s->dc_val[1][xy] =
2052 s->dc_val[2][xy] = 1024;
2053 /* ac pred */
2054 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2055 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2056
2057 s->mbintra_table[xy]= 0;
2058 }
2059
2060 /* generic function called after a macroblock has been parsed by the
2061 decoder or after it has been encoded by the encoder.
2062
2063 Important variables used:
2064 s->mb_intra : true if intra macroblock
2065 s->mv_dir : motion vector direction
2066 s->mv_type : motion vector type
2067 s->mv : motion vector
2068 s->interlaced_dct : true if interlaced dct used (mpeg2)
2069 */
2070 static av_always_inline
2071 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2072 int is_mpeg12)
2073 {
2074 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2075
2076 #if FF_API_XVMC
2077 FF_DISABLE_DEPRECATION_WARNINGS
2078 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2079 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2080 return;
2081 }
2082 FF_ENABLE_DEPRECATION_WARNINGS
2083 #endif /* FF_API_XVMC */
2084
2085 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2086 /* print DCT coefficients */
2087 int i,j;
2088 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2089 for(i=0; i<6; i++){
2090 for(j=0; j<64; j++){
2091 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2092 }
2093 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2094 }
2095 }
2096
2097 s->current_picture.qscale_table[mb_xy] = s->qscale;
2098
2099 /* update DC predictors for P macroblocks */
2100 if (!s->mb_intra) {
2101 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2102 if(s->mbintra_table[mb_xy])
2103 ff_clean_intra_table_entries(s);
2104 } else {
2105 s->last_dc[0] =
2106 s->last_dc[1] =
2107 s->last_dc[2] = 128 << s->intra_dc_precision;
2108 }
2109 }
2110 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2111 s->mbintra_table[mb_xy]=1;
2112
2113 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2114 uint8_t *dest_y, *dest_cb, *dest_cr;
2115 int dct_linesize, dct_offset;
2116 op_pixels_func (*op_pix)[4];
2117 qpel_mc_func (*op_qpix)[16];
2118 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2119 const int uvlinesize = s->current_picture.f.linesize[1];
2120 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2121 const int block_size = 8;
2122
2123 /* avoid copy if macroblock skipped in last frame too */
2124 /* skip only during decoding as we might trash the buffers during encoding a bit */
2125 if(!s->encoding){
2126 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2127
2128 if (s->mb_skipped) {
2129 s->mb_skipped= 0;
2130 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2131 *mbskip_ptr = 1;
2132 } else if(!s->current_picture.reference) {
2133 *mbskip_ptr = 1;
2134 } else{
2135 *mbskip_ptr = 0; /* not skipped */
2136 }
2137 }
2138
2139 dct_linesize = linesize << s->interlaced_dct;
2140 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2141
2142 if(readable){
2143 dest_y= s->dest[0];
2144 dest_cb= s->dest[1];
2145 dest_cr= s->dest[2];
2146 }else{
2147 dest_y = s->b_scratchpad;
2148 dest_cb= s->b_scratchpad+16*linesize;
2149 dest_cr= s->b_scratchpad+32*linesize;
2150 }
2151
2152 if (!s->mb_intra) {
2153 /* motion handling */
2154 /* decoding or more than one mb_type (MC was already done otherwise) */
2155 if(!s->encoding){
2156
2157 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2158 if (s->mv_dir & MV_DIR_FORWARD) {
2159 ff_thread_await_progress(&s->last_picture_ptr->tf,
2160 ff_MPV_lowest_referenced_row(s, 0),
2161 0);
2162 }
2163 if (s->mv_dir & MV_DIR_BACKWARD) {
2164 ff_thread_await_progress(&s->next_picture_ptr->tf,
2165 ff_MPV_lowest_referenced_row(s, 1),
2166 0);
2167 }
2168 }
2169
2170 op_qpix= s->me.qpel_put;
2171 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2172 op_pix = s->hdsp.put_pixels_tab;
2173 }else{
2174 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2175 }
2176 if (s->mv_dir & MV_DIR_FORWARD) {
2177 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2178 op_pix = s->hdsp.avg_pixels_tab;
2179 op_qpix= s->me.qpel_avg;
2180 }
2181 if (s->mv_dir & MV_DIR_BACKWARD) {
2182 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2183 }
2184 }
2185
2186 /* skip dequant / idct if we are really late ;) */
2187 if(s->avctx->skip_idct){
2188 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2189 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2190 || s->avctx->skip_idct >= AVDISCARD_ALL)
2191 goto skip_idct;
2192 }
2193
2194 /* add dct residue */
2195 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2196 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2197 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2198 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2199 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2200 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2201
2202 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2203 if (s->chroma_y_shift){
2204 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2205 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2206 }else{
2207 dct_linesize >>= 1;
2208 dct_offset >>=1;
2209 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2210 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2211 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2212 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2213 }
2214 }
2215 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2216 add_dct(s, block[0], 0, dest_y , dct_linesize);
2217 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2218 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2219 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2220
2221 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2222 if(s->chroma_y_shift){//Chroma420
2223 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2224 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2225 }else{
2226 //chroma422
2227 dct_linesize = uvlinesize << s->interlaced_dct;
2228 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2229
2230 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2231 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2232 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2233 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2234 if(!s->chroma_x_shift){//Chroma444
2235 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2236 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2237 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2238 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2239 }
2240 }
2241 }//fi gray
2242 }
2243 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2244 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2245 }
2246 } else {
2247 /* dct only in intra block */
2248 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2249 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2250 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2251 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2252 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2253
2254 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2255 if(s->chroma_y_shift){
2256 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2257 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2258 }else{
2259 dct_offset >>=1;
2260 dct_linesize >>=1;
2261 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2262 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2263 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2264 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2265 }
2266 }
2267 }else{
2268 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2269 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2270 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2271 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2272
2273 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2274 if(s->chroma_y_shift){
2275 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2276 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2277 }else{
2278
2279 dct_linesize = uvlinesize << s->interlaced_dct;
2280 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2281
2282 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2283 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2284 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2285 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2286 if(!s->chroma_x_shift){//Chroma444
2287 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2288 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2289 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2290 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2291 }
2292 }
2293 }//gray
2294 }
2295 }
2296 skip_idct:
2297 if(!readable){
2298 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2299 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2300 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2301 }
2302 }
2303 }
2304
2305 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2306 #if !CONFIG_SMALL
2307 if(s->out_format == FMT_MPEG1) {
2308 MPV_decode_mb_internal(s, block, 1);
2309 } else
2310 #endif
2311 MPV_decode_mb_internal(s, block, 0);
2312 }
2313
2314 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2315 {
2316 ff_draw_horiz_band(s->avctx, &s->current_picture.f,
2317 &s->last_picture.f, y, h, s->picture_structure,
2318 s->first_field, s->low_delay);
2319 }
2320
2321 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2322 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2323 const int uvlinesize = s->current_picture.f.linesize[1];
2324 const int mb_size= 4;
2325
2326 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2327 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2328 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2329 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2330 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2331 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2332 //block_index is not used by mpeg2, so it is not affected by chroma_format
2333
2334 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2335 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2336 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2337
2338 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2339 {
2340 if(s->picture_structure==PICT_FRAME){
2341 s->dest[0] += s->mb_y * linesize << mb_size;
2342 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2343 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2344 }else{
2345 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2346 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2347 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2348 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2349 }
2350 }
2351 }
2352
2353 /**
2354 * Permute an 8x8 block.
2355 * @param block the block which will be permuted according to the given permutation vector
2356 * @param permutation the permutation vector
2357 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2358 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2359 * (inverse) permutated to scantable order!
2360 */
2361 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2362 {
2363 int i;
2364 int16_t temp[64];
2365
2366 if(last<=0) return;
2367 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2368
2369 for(i=0; i<=last; i++){
2370 const int j= scantable[i];
2371 temp[j]= block[j];
2372 block[j]=0;
2373 }
2374
2375 for(i=0; i<=last; i++){
2376 const int j= scantable[i];
2377 const int perm_j= permutation[j];
2378 block[perm_j]= temp[j];
2379 }
2380 }
2381
2382 void ff_mpeg_flush(AVCodecContext *avctx){
2383 int i;
2384 MpegEncContext *s = avctx->priv_data;
2385
2386 if(s==NULL || s->picture==NULL)
2387 return;
2388
2389 for (i = 0; i < MAX_PICTURE_COUNT; i++)
2390 ff_mpeg_unref_picture(s, &s->picture[i]);
2391 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2392
2393 ff_mpeg_unref_picture(s, &s->current_picture);
2394 ff_mpeg_unref_picture(s, &s->last_picture);
2395 ff_mpeg_unref_picture(s, &s->next_picture);
2396
2397 s->mb_x= s->mb_y= 0;
2398
2399 s->parse_context.state= -1;
2400 s->parse_context.frame_start_found= 0;
2401 s->parse_context.overread= 0;
2402 s->parse_context.overread_index= 0;
2403 s->parse_context.index= 0;
2404 s->parse_context.last_index= 0;
2405 s->bitstream_buffer_size=0;
2406 s->pp_time=0;
2407 }
2408
2409 /**
2410 * set qscale and update qscale dependent variables.
2411 */
2412 void ff_set_qscale(MpegEncContext * s, int qscale)
2413 {
2414 if (qscale < 1)
2415 qscale = 1;
2416 else if (qscale > 31)
2417 qscale = 31;
2418
2419 s->qscale = qscale;
2420 s->chroma_qscale= s->chroma_qscale_table[qscale];
2421
2422 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2423 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2424 }
2425
2426 void ff_MPV_report_decode_progress(MpegEncContext *s)
2427 {
2428 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
2429 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
2430 }
2431
2432 #if CONFIG_ERROR_RESILIENCE
2433 void ff_mpeg_set_erpic(ERPicture *dst, Picture *src)
2434 {
2435 int i;
2436
2437 if (!src)
2438 return;
2439
2440 dst->f = &src->f;
2441 dst->tf = &src->tf;
2442
2443 for (i = 0; i < 2; i++) {
2444 dst->motion_val[i] = src->motion_val[i];
2445 dst->ref_index[i] = src->ref_index[i];
2446 }
2447
2448 dst->mb_type = src->mb_type;
2449 dst->field_picture = src->field_picture;
2450 }
2451
2452 void ff_mpeg_er_frame_start(MpegEncContext *s)
2453 {
2454 ERContext *er = &s->er;
2455
2456 ff_mpeg_set_erpic(&er->cur_pic, s->current_picture_ptr);
2457 ff_mpeg_set_erpic(&er->next_pic, s->next_picture_ptr);
2458 ff_mpeg_set_erpic(&er->last_pic, s->last_picture_ptr);
2459
2460 er->pp_time = s->pp_time;
2461 er->pb_time = s->pb_time;
2462 er->quarter_sample = s->quarter_sample;
2463 er->partitioned_frame = s->partitioned_frame;
2464
2465 ff_er_frame_start(er);
2466 }
2467 #endif /* CONFIG_ERROR_RESILIENCE */