hwaccel: Rename priv_data_size to frame_priv_data_size
[libav.git] / libavcodec / mpegvideo.c
1 /*
2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7 *
8 * This file is part of Libav.
9 *
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /**
26 * @file
27 * The simplest mpeg encoder (well, it was the simplest!).
28 */
29
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/timer.h"
35 #include "avcodec.h"
36 #include "dsputil.h"
37 #include "internal.h"
38 #include "mathops.h"
39 #include "mpegutils.h"
40 #include "mpegvideo.h"
41 #include "mjpegenc.h"
42 #include "msmpeg4.h"
43 #include "xvmc_internal.h"
44 #include "thread.h"
45 #include <limits.h>
46
47 static const uint8_t ff_default_chroma_qscale_table[32] = {
48 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
49 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
50 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
51 };
52
53 const uint8_t ff_mpeg1_dc_scale_table[128] = {
54 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
55 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
56 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
57 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
58 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63 };
64
65 static const uint8_t mpeg2_dc_scale_table1[128] = {
66 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
67 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
68 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
69 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
70 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75 };
76
77 static const uint8_t mpeg2_dc_scale_table2[128] = {
78 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
79 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
80 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
81 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87 };
88
89 static const uint8_t mpeg2_dc_scale_table3[128] = {
90 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
91 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
92 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
93 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
94 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 };
100
101 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
102 ff_mpeg1_dc_scale_table,
103 mpeg2_dc_scale_table1,
104 mpeg2_dc_scale_table2,
105 mpeg2_dc_scale_table3,
106 };
107
108 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
109 int16_t *block, int n, int qscale)
110 {
111 int i, level, nCoeffs;
112 const uint16_t *quant_matrix;
113
114 nCoeffs= s->block_last_index[n];
115
116 if (n < 4)
117 block[0] = block[0] * s->y_dc_scale;
118 else
119 block[0] = block[0] * s->c_dc_scale;
120 /* XXX: only mpeg1 */
121 quant_matrix = s->intra_matrix;
122 for(i=1;i<=nCoeffs;i++) {
123 int j= s->intra_scantable.permutated[i];
124 level = block[j];
125 if (level) {
126 if (level < 0) {
127 level = -level;
128 level = (int)(level * qscale * quant_matrix[j]) >> 3;
129 level = (level - 1) | 1;
130 level = -level;
131 } else {
132 level = (int)(level * qscale * quant_matrix[j]) >> 3;
133 level = (level - 1) | 1;
134 }
135 block[j] = level;
136 }
137 }
138 }
139
140 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
141 int16_t *block, int n, int qscale)
142 {
143 int i, level, nCoeffs;
144 const uint16_t *quant_matrix;
145
146 nCoeffs= s->block_last_index[n];
147
148 quant_matrix = s->inter_matrix;
149 for(i=0; i<=nCoeffs; i++) {
150 int j= s->intra_scantable.permutated[i];
151 level = block[j];
152 if (level) {
153 if (level < 0) {
154 level = -level;
155 level = (((level << 1) + 1) * qscale *
156 ((int) (quant_matrix[j]))) >> 4;
157 level = (level - 1) | 1;
158 level = -level;
159 } else {
160 level = (((level << 1) + 1) * qscale *
161 ((int) (quant_matrix[j]))) >> 4;
162 level = (level - 1) | 1;
163 }
164 block[j] = level;
165 }
166 }
167 }
168
169 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
170 int16_t *block, int n, int qscale)
171 {
172 int i, level, nCoeffs;
173 const uint16_t *quant_matrix;
174
175 if(s->alternate_scan) nCoeffs= 63;
176 else nCoeffs= s->block_last_index[n];
177
178 if (n < 4)
179 block[0] = block[0] * s->y_dc_scale;
180 else
181 block[0] = block[0] * s->c_dc_scale;
182 quant_matrix = s->intra_matrix;
183 for(i=1;i<=nCoeffs;i++) {
184 int j= s->intra_scantable.permutated[i];
185 level = block[j];
186 if (level) {
187 if (level < 0) {
188 level = -level;
189 level = (int)(level * qscale * quant_matrix[j]) >> 3;
190 level = -level;
191 } else {
192 level = (int)(level * qscale * quant_matrix[j]) >> 3;
193 }
194 block[j] = level;
195 }
196 }
197 }
198
199 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
200 int16_t *block, int n, int qscale)
201 {
202 int i, level, nCoeffs;
203 const uint16_t *quant_matrix;
204 int sum=-1;
205
206 if(s->alternate_scan) nCoeffs= 63;
207 else nCoeffs= s->block_last_index[n];
208
209 if (n < 4)
210 block[0] = block[0] * s->y_dc_scale;
211 else
212 block[0] = block[0] * s->c_dc_scale;
213 quant_matrix = s->intra_matrix;
214 for(i=1;i<=nCoeffs;i++) {
215 int j= s->intra_scantable.permutated[i];
216 level = block[j];
217 if (level) {
218 if (level < 0) {
219 level = -level;
220 level = (int)(level * qscale * quant_matrix[j]) >> 3;
221 level = -level;
222 } else {
223 level = (int)(level * qscale * quant_matrix[j]) >> 3;
224 }
225 block[j] = level;
226 sum+=level;
227 }
228 }
229 block[63]^=sum&1;
230 }
231
232 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
233 int16_t *block, int n, int qscale)
234 {
235 int i, level, nCoeffs;
236 const uint16_t *quant_matrix;
237 int sum=-1;
238
239 if(s->alternate_scan) nCoeffs= 63;
240 else nCoeffs= s->block_last_index[n];
241
242 quant_matrix = s->inter_matrix;
243 for(i=0; i<=nCoeffs; i++) {
244 int j= s->intra_scantable.permutated[i];
245 level = block[j];
246 if (level) {
247 if (level < 0) {
248 level = -level;
249 level = (((level << 1) + 1) * qscale *
250 ((int) (quant_matrix[j]))) >> 4;
251 level = -level;
252 } else {
253 level = (((level << 1) + 1) * qscale *
254 ((int) (quant_matrix[j]))) >> 4;
255 }
256 block[j] = level;
257 sum+=level;
258 }
259 }
260 block[63]^=sum&1;
261 }
262
263 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
264 int16_t *block, int n, int qscale)
265 {
266 int i, level, qmul, qadd;
267 int nCoeffs;
268
269 assert(s->block_last_index[n]>=0);
270
271 qmul = qscale << 1;
272
273 if (!s->h263_aic) {
274 if (n < 4)
275 block[0] = block[0] * s->y_dc_scale;
276 else
277 block[0] = block[0] * s->c_dc_scale;
278 qadd = (qscale - 1) | 1;
279 }else{
280 qadd = 0;
281 }
282 if(s->ac_pred)
283 nCoeffs=63;
284 else
285 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
286
287 for(i=1; i<=nCoeffs; i++) {
288 level = block[i];
289 if (level) {
290 if (level < 0) {
291 level = level * qmul - qadd;
292 } else {
293 level = level * qmul + qadd;
294 }
295 block[i] = level;
296 }
297 }
298 }
299
300 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
301 int16_t *block, int n, int qscale)
302 {
303 int i, level, qmul, qadd;
304 int nCoeffs;
305
306 assert(s->block_last_index[n]>=0);
307
308 qadd = (qscale - 1) | 1;
309 qmul = qscale << 1;
310
311 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
312
313 for(i=0; i<=nCoeffs; i++) {
314 level = block[i];
315 if (level) {
316 if (level < 0) {
317 level = level * qmul - qadd;
318 } else {
319 level = level * qmul + qadd;
320 }
321 block[i] = level;
322 }
323 }
324 }
325
326 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
327 int (*mv)[2][4][2],
328 int mb_x, int mb_y, int mb_intra, int mb_skipped)
329 {
330 MpegEncContext *s = opaque;
331
332 s->mv_dir = mv_dir;
333 s->mv_type = mv_type;
334 s->mb_intra = mb_intra;
335 s->mb_skipped = mb_skipped;
336 s->mb_x = mb_x;
337 s->mb_y = mb_y;
338 memcpy(s->mv, mv, sizeof(*mv));
339
340 ff_init_block_index(s);
341 ff_update_block_index(s);
342
343 s->dsp.clear_blocks(s->block[0]);
344
345 s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
346 s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
347 s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
348
349 assert(ref == 0);
350 ff_MPV_decode_mb(s, s->block);
351 }
352
353 /* init common dct for both encoder and decoder */
354 av_cold int ff_dct_common_init(MpegEncContext *s)
355 {
356 ff_dsputil_init(&s->dsp, s->avctx);
357 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
358 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
359
360 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
361 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
362 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
363 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
364 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
365 if (s->flags & CODEC_FLAG_BITEXACT)
366 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
367 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
368
369 if (ARCH_ARM)
370 ff_MPV_common_init_arm(s);
371 if (ARCH_PPC)
372 ff_MPV_common_init_ppc(s);
373 if (ARCH_X86)
374 ff_MPV_common_init_x86(s);
375
376 /* load & permutate scantables
377 * note: only wmv uses different ones
378 */
379 if (s->alternate_scan) {
380 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
381 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
382 } else {
383 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
384 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
385 }
386 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
387 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
388
389 return 0;
390 }
391
392 static int frame_size_alloc(MpegEncContext *s, int linesize)
393 {
394 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
395
396 // edge emu needs blocksize + filter length - 1
397 // (= 17x17 for halfpel / 21x21 for h264)
398 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
399 // at uvlinesize. It supports only YUV420 so 24x24 is enough
400 // linesize * interlaced * MBsize
401 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
402 fail);
403
404 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 3,
405 fail)
406 s->me.temp = s->me.scratchpad;
407 s->rd_scratchpad = s->me.scratchpad;
408 s->b_scratchpad = s->me.scratchpad;
409 s->obmc_scratchpad = s->me.scratchpad + 16;
410
411 return 0;
412 fail:
413 av_freep(&s->edge_emu_buffer);
414 return AVERROR(ENOMEM);
415 }
416
417 /**
418 * Allocate a frame buffer
419 */
420 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
421 {
422 int edges_needed = av_codec_is_encoder(s->avctx->codec);
423 int r, ret;
424
425 pic->tf.f = pic->f;
426 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
427 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
428 s->codec_id != AV_CODEC_ID_MSS2) {
429 if (edges_needed) {
430 pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
431 pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
432 }
433
434 r = ff_thread_get_buffer(s->avctx, &pic->tf,
435 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
436 } else {
437 pic->f->width = s->avctx->width;
438 pic->f->height = s->avctx->height;
439 pic->f->format = s->avctx->pix_fmt;
440 r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
441 }
442
443 if (r < 0 || !pic->f->buf[0]) {
444 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
445 r, pic->f->data[0]);
446 return -1;
447 }
448
449 if (edges_needed) {
450 int i;
451 for (i = 0; pic->f->data[i]; i++) {
452 int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
453 pic->f->linesize[i] +
454 (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
455 pic->f->data[i] += offset;
456 }
457 pic->f->width = s->avctx->width;
458 pic->f->height = s->avctx->height;
459 }
460
461 if (s->avctx->hwaccel) {
462 assert(!pic->hwaccel_picture_private);
463 if (s->avctx->hwaccel->frame_priv_data_size) {
464 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
465 if (!pic->hwaccel_priv_buf) {
466 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
467 return -1;
468 }
469 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
470 }
471 }
472
473 if (s->linesize && (s->linesize != pic->f->linesize[0] ||
474 s->uvlinesize != pic->f->linesize[1])) {
475 av_log(s->avctx, AV_LOG_ERROR,
476 "get_buffer() failed (stride changed)\n");
477 ff_mpeg_unref_picture(s, pic);
478 return -1;
479 }
480
481 if (pic->f->linesize[1] != pic->f->linesize[2]) {
482 av_log(s->avctx, AV_LOG_ERROR,
483 "get_buffer() failed (uv stride mismatch)\n");
484 ff_mpeg_unref_picture(s, pic);
485 return -1;
486 }
487
488 if (!s->edge_emu_buffer &&
489 (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
490 av_log(s->avctx, AV_LOG_ERROR,
491 "get_buffer() failed to allocate context scratch buffers.\n");
492 ff_mpeg_unref_picture(s, pic);
493 return ret;
494 }
495
496 return 0;
497 }
498
499 void ff_free_picture_tables(Picture *pic)
500 {
501 int i;
502
503 av_buffer_unref(&pic->mb_var_buf);
504 av_buffer_unref(&pic->mc_mb_var_buf);
505 av_buffer_unref(&pic->mb_mean_buf);
506 av_buffer_unref(&pic->mbskip_table_buf);
507 av_buffer_unref(&pic->qscale_table_buf);
508 av_buffer_unref(&pic->mb_type_buf);
509
510 for (i = 0; i < 2; i++) {
511 av_buffer_unref(&pic->motion_val_buf[i]);
512 av_buffer_unref(&pic->ref_index_buf[i]);
513 }
514 }
515
516 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
517 {
518 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
519 const int mb_array_size = s->mb_stride * s->mb_height;
520 const int b8_array_size = s->b8_stride * s->mb_height * 2;
521 int i;
522
523
524 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
525 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
526 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
527 sizeof(uint32_t));
528 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
529 return AVERROR(ENOMEM);
530
531 if (s->encoding) {
532 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
533 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
534 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
535 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
536 return AVERROR(ENOMEM);
537 }
538
539 if (s->out_format == FMT_H263 || s->encoding) {
540 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
541 int ref_index_size = 4 * mb_array_size;
542
543 for (i = 0; mv_size && i < 2; i++) {
544 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
545 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
546 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
547 return AVERROR(ENOMEM);
548 }
549 }
550
551 return 0;
552 }
553
554 static int make_tables_writable(Picture *pic)
555 {
556 int ret, i;
557 #define MAKE_WRITABLE(table) \
558 do {\
559 if (pic->table &&\
560 (ret = av_buffer_make_writable(&pic->table)) < 0)\
561 return ret;\
562 } while (0)
563
564 MAKE_WRITABLE(mb_var_buf);
565 MAKE_WRITABLE(mc_mb_var_buf);
566 MAKE_WRITABLE(mb_mean_buf);
567 MAKE_WRITABLE(mbskip_table_buf);
568 MAKE_WRITABLE(qscale_table_buf);
569 MAKE_WRITABLE(mb_type_buf);
570
571 for (i = 0; i < 2; i++) {
572 MAKE_WRITABLE(motion_val_buf[i]);
573 MAKE_WRITABLE(ref_index_buf[i]);
574 }
575
576 return 0;
577 }
578
579 /**
580 * Allocate a Picture.
581 * The pixels are allocated/set by calling get_buffer() if shared = 0
582 */
583 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
584 {
585 int i, ret;
586
587 if (shared) {
588 assert(pic->f->data[0]);
589 pic->shared = 1;
590 } else {
591 assert(!pic->f->buf[0]);
592
593 if (alloc_frame_buffer(s, pic) < 0)
594 return -1;
595
596 s->linesize = pic->f->linesize[0];
597 s->uvlinesize = pic->f->linesize[1];
598 }
599
600 if (!pic->qscale_table_buf)
601 ret = alloc_picture_tables(s, pic);
602 else
603 ret = make_tables_writable(pic);
604 if (ret < 0)
605 goto fail;
606
607 if (s->encoding) {
608 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
609 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
610 pic->mb_mean = pic->mb_mean_buf->data;
611 }
612
613 pic->mbskip_table = pic->mbskip_table_buf->data;
614 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
615 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
616
617 if (pic->motion_val_buf[0]) {
618 for (i = 0; i < 2; i++) {
619 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
620 pic->ref_index[i] = pic->ref_index_buf[i]->data;
621 }
622 }
623
624 return 0;
625 fail:
626 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
627 ff_mpeg_unref_picture(s, pic);
628 ff_free_picture_tables(pic);
629 return AVERROR(ENOMEM);
630 }
631
632 /**
633 * Deallocate a picture.
634 */
635 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
636 {
637 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
638
639 pic->tf.f = pic->f;
640 /* WM Image / Screen codecs allocate internal buffers with different
641 * dimensions / colorspaces; ignore user-defined callbacks for these. */
642 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
643 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
644 s->codec_id != AV_CODEC_ID_MSS2)
645 ff_thread_release_buffer(s->avctx, &pic->tf);
646 else if (pic->f)
647 av_frame_unref(pic->f);
648
649 av_buffer_unref(&pic->hwaccel_priv_buf);
650
651 if (pic->needs_realloc)
652 ff_free_picture_tables(pic);
653
654 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
655 }
656
657 static int update_picture_tables(Picture *dst, Picture *src)
658 {
659 int i;
660
661 #define UPDATE_TABLE(table)\
662 do {\
663 if (src->table &&\
664 (!dst->table || dst->table->buffer != src->table->buffer)) {\
665 av_buffer_unref(&dst->table);\
666 dst->table = av_buffer_ref(src->table);\
667 if (!dst->table) {\
668 ff_free_picture_tables(dst);\
669 return AVERROR(ENOMEM);\
670 }\
671 }\
672 } while (0)
673
674 UPDATE_TABLE(mb_var_buf);
675 UPDATE_TABLE(mc_mb_var_buf);
676 UPDATE_TABLE(mb_mean_buf);
677 UPDATE_TABLE(mbskip_table_buf);
678 UPDATE_TABLE(qscale_table_buf);
679 UPDATE_TABLE(mb_type_buf);
680 for (i = 0; i < 2; i++) {
681 UPDATE_TABLE(motion_val_buf[i]);
682 UPDATE_TABLE(ref_index_buf[i]);
683 }
684
685 dst->mb_var = src->mb_var;
686 dst->mc_mb_var = src->mc_mb_var;
687 dst->mb_mean = src->mb_mean;
688 dst->mbskip_table = src->mbskip_table;
689 dst->qscale_table = src->qscale_table;
690 dst->mb_type = src->mb_type;
691 for (i = 0; i < 2; i++) {
692 dst->motion_val[i] = src->motion_val[i];
693 dst->ref_index[i] = src->ref_index[i];
694 }
695
696 return 0;
697 }
698
699 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
700 {
701 int ret;
702
703 av_assert0(!dst->f->buf[0]);
704 av_assert0(src->f->buf[0]);
705
706 src->tf.f = src->f;
707 dst->tf.f = dst->f;
708 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
709 if (ret < 0)
710 goto fail;
711
712 ret = update_picture_tables(dst, src);
713 if (ret < 0)
714 goto fail;
715
716 if (src->hwaccel_picture_private) {
717 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
718 if (!dst->hwaccel_priv_buf)
719 goto fail;
720 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
721 }
722
723 dst->field_picture = src->field_picture;
724 dst->mb_var_sum = src->mb_var_sum;
725 dst->mc_mb_var_sum = src->mc_mb_var_sum;
726 dst->b_frame_score = src->b_frame_score;
727 dst->needs_realloc = src->needs_realloc;
728 dst->reference = src->reference;
729 dst->shared = src->shared;
730
731 return 0;
732 fail:
733 ff_mpeg_unref_picture(s, dst);
734 return ret;
735 }
736
737 static void exchange_uv(MpegEncContext *s)
738 {
739 int16_t (*tmp)[64];
740
741 tmp = s->pblocks[4];
742 s->pblocks[4] = s->pblocks[5];
743 s->pblocks[5] = tmp;
744 }
745
746 static int init_duplicate_context(MpegEncContext *s)
747 {
748 int y_size = s->b8_stride * (2 * s->mb_height + 1);
749 int c_size = s->mb_stride * (s->mb_height + 1);
750 int yc_size = y_size + 2 * c_size;
751 int i;
752
753 s->edge_emu_buffer =
754 s->me.scratchpad =
755 s->me.temp =
756 s->rd_scratchpad =
757 s->b_scratchpad =
758 s->obmc_scratchpad = NULL;
759
760 if (s->encoding) {
761 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
762 ME_MAP_SIZE * sizeof(uint32_t), fail)
763 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
764 ME_MAP_SIZE * sizeof(uint32_t), fail)
765 if (s->avctx->noise_reduction) {
766 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
767 2 * 64 * sizeof(int), fail)
768 }
769 }
770 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
771 s->block = s->blocks[0];
772
773 for (i = 0; i < 12; i++) {
774 s->pblocks[i] = &s->block[i];
775 }
776 if (s->avctx->codec_tag == AV_RL32("VCR2"))
777 exchange_uv(s);
778
779 if (s->out_format == FMT_H263) {
780 /* ac values */
781 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
782 yc_size * sizeof(int16_t) * 16, fail);
783 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
784 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
785 s->ac_val[2] = s->ac_val[1] + c_size;
786 }
787
788 return 0;
789 fail:
790 return -1; // free() through ff_MPV_common_end()
791 }
792
793 static void free_duplicate_context(MpegEncContext *s)
794 {
795 if (s == NULL)
796 return;
797
798 av_freep(&s->edge_emu_buffer);
799 av_freep(&s->me.scratchpad);
800 s->me.temp =
801 s->rd_scratchpad =
802 s->b_scratchpad =
803 s->obmc_scratchpad = NULL;
804
805 av_freep(&s->dct_error_sum);
806 av_freep(&s->me.map);
807 av_freep(&s->me.score_map);
808 av_freep(&s->blocks);
809 av_freep(&s->ac_val_base);
810 s->block = NULL;
811 }
812
813 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
814 {
815 #define COPY(a) bak->a = src->a
816 COPY(edge_emu_buffer);
817 COPY(me.scratchpad);
818 COPY(me.temp);
819 COPY(rd_scratchpad);
820 COPY(b_scratchpad);
821 COPY(obmc_scratchpad);
822 COPY(me.map);
823 COPY(me.score_map);
824 COPY(blocks);
825 COPY(block);
826 COPY(start_mb_y);
827 COPY(end_mb_y);
828 COPY(me.map_generation);
829 COPY(pb);
830 COPY(dct_error_sum);
831 COPY(dct_count[0]);
832 COPY(dct_count[1]);
833 COPY(ac_val_base);
834 COPY(ac_val[0]);
835 COPY(ac_val[1]);
836 COPY(ac_val[2]);
837 #undef COPY
838 }
839
840 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
841 {
842 MpegEncContext bak;
843 int i, ret;
844 // FIXME copy only needed parts
845 // START_TIMER
846 backup_duplicate_context(&bak, dst);
847 memcpy(dst, src, sizeof(MpegEncContext));
848 backup_duplicate_context(dst, &bak);
849 for (i = 0; i < 12; i++) {
850 dst->pblocks[i] = &dst->block[i];
851 }
852 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
853 exchange_uv(dst);
854 if (!dst->edge_emu_buffer &&
855 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
856 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
857 "scratch buffers.\n");
858 return ret;
859 }
860 // STOP_TIMER("update_duplicate_context")
861 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
862 return 0;
863 }
864
865 int ff_mpeg_update_thread_context(AVCodecContext *dst,
866 const AVCodecContext *src)
867 {
868 int i, ret;
869 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
870
871 if (dst == src || !s1->context_initialized)
872 return 0;
873
874 // FIXME can parameters change on I-frames?
875 // in that case dst may need a reinit
876 if (!s->context_initialized) {
877 memcpy(s, s1, sizeof(MpegEncContext));
878
879 s->avctx = dst;
880 s->bitstream_buffer = NULL;
881 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
882
883 ff_MPV_common_init(s);
884 }
885
886 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
887 int err;
888 s->context_reinit = 0;
889 s->height = s1->height;
890 s->width = s1->width;
891 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
892 return err;
893 }
894
895 s->avctx->coded_height = s1->avctx->coded_height;
896 s->avctx->coded_width = s1->avctx->coded_width;
897 s->avctx->width = s1->avctx->width;
898 s->avctx->height = s1->avctx->height;
899
900 s->coded_picture_number = s1->coded_picture_number;
901 s->picture_number = s1->picture_number;
902
903 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
904 ff_mpeg_unref_picture(s, &s->picture[i]);
905 if (s1->picture[i].f->buf[0] &&
906 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
907 return ret;
908 }
909
910 #define UPDATE_PICTURE(pic)\
911 do {\
912 ff_mpeg_unref_picture(s, &s->pic);\
913 if (s1->pic.f->buf[0])\
914 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
915 else\
916 ret = update_picture_tables(&s->pic, &s1->pic);\
917 if (ret < 0)\
918 return ret;\
919 } while (0)
920
921 UPDATE_PICTURE(current_picture);
922 UPDATE_PICTURE(last_picture);
923 UPDATE_PICTURE(next_picture);
924
925 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
926 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
927 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
928
929 // Error/bug resilience
930 s->next_p_frame_damaged = s1->next_p_frame_damaged;
931 s->workaround_bugs = s1->workaround_bugs;
932
933 // MPEG4 timing info
934 memcpy(&s->last_time_base, &s1->last_time_base,
935 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
936 (char *) &s1->last_time_base);
937
938 // B-frame info
939 s->max_b_frames = s1->max_b_frames;
940 s->low_delay = s1->low_delay;
941 s->droppable = s1->droppable;
942
943 // DivX handling (doesn't work)
944 s->divx_packed = s1->divx_packed;
945
946 if (s1->bitstream_buffer) {
947 if (s1->bitstream_buffer_size +
948 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
949 av_fast_malloc(&s->bitstream_buffer,
950 &s->allocated_bitstream_buffer_size,
951 s1->allocated_bitstream_buffer_size);
952 s->bitstream_buffer_size = s1->bitstream_buffer_size;
953 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
954 s1->bitstream_buffer_size);
955 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
956 FF_INPUT_BUFFER_PADDING_SIZE);
957 }
958
959 // linesize dependend scratch buffer allocation
960 if (!s->edge_emu_buffer)
961 if (s1->linesize) {
962 if (frame_size_alloc(s, s1->linesize) < 0) {
963 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
964 "scratch buffers.\n");
965 return AVERROR(ENOMEM);
966 }
967 } else {
968 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
969 "be allocated due to unknown size.\n");
970 return AVERROR_BUG;
971 }
972
973 // MPEG2/interlacing info
974 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
975 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
976
977 if (!s1->first_field) {
978 s->last_pict_type = s1->pict_type;
979 if (s1->current_picture_ptr)
980 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
981 }
982
983 return 0;
984 }
985
986 /**
987 * Set the given MpegEncContext to common defaults
988 * (same for encoding and decoding).
989 * The changed fields will not depend upon the
990 * prior state of the MpegEncContext.
991 */
992 void ff_MPV_common_defaults(MpegEncContext *s)
993 {
994 s->y_dc_scale_table =
995 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
996 s->chroma_qscale_table = ff_default_chroma_qscale_table;
997 s->progressive_frame = 1;
998 s->progressive_sequence = 1;
999 s->picture_structure = PICT_FRAME;
1000
1001 s->coded_picture_number = 0;
1002 s->picture_number = 0;
1003
1004 s->f_code = 1;
1005 s->b_code = 1;
1006
1007 s->slice_context_count = 1;
1008 }
1009
1010 /**
1011 * Set the given MpegEncContext to defaults for decoding.
1012 * the changed fields will not depend upon
1013 * the prior state of the MpegEncContext.
1014 */
1015 void ff_MPV_decode_defaults(MpegEncContext *s)
1016 {
1017 ff_MPV_common_defaults(s);
1018 }
1019
1020 static int init_er(MpegEncContext *s)
1021 {
1022 ERContext *er = &s->er;
1023 int mb_array_size = s->mb_height * s->mb_stride;
1024 int i;
1025
1026 er->avctx = s->avctx;
1027 er->dsp = &s->dsp;
1028
1029 er->mb_index2xy = s->mb_index2xy;
1030 er->mb_num = s->mb_num;
1031 er->mb_width = s->mb_width;
1032 er->mb_height = s->mb_height;
1033 er->mb_stride = s->mb_stride;
1034 er->b8_stride = s->b8_stride;
1035
1036 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
1037 er->error_status_table = av_mallocz(mb_array_size);
1038 if (!er->er_temp_buffer || !er->error_status_table)
1039 goto fail;
1040
1041 er->mbskip_table = s->mbskip_table;
1042 er->mbintra_table = s->mbintra_table;
1043
1044 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1045 er->dc_val[i] = s->dc_val[i];
1046
1047 er->decode_mb = mpeg_er_decode_mb;
1048 er->opaque = s;
1049
1050 return 0;
1051 fail:
1052 av_freep(&er->er_temp_buffer);
1053 av_freep(&er->error_status_table);
1054 return AVERROR(ENOMEM);
1055 }
1056
1057 /**
1058 * Initialize and allocates MpegEncContext fields dependent on the resolution.
1059 */
1060 static int init_context_frame(MpegEncContext *s)
1061 {
1062 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1063
1064 s->mb_width = (s->width + 15) / 16;
1065 s->mb_stride = s->mb_width + 1;
1066 s->b8_stride = s->mb_width * 2 + 1;
1067 mb_array_size = s->mb_height * s->mb_stride;
1068 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1069
1070 /* set default edge pos, will be overriden
1071 * in decode_header if needed */
1072 s->h_edge_pos = s->mb_width * 16;
1073 s->v_edge_pos = s->mb_height * 16;
1074
1075 s->mb_num = s->mb_width * s->mb_height;
1076
1077 s->block_wrap[0] =
1078 s->block_wrap[1] =
1079 s->block_wrap[2] =
1080 s->block_wrap[3] = s->b8_stride;
1081 s->block_wrap[4] =
1082 s->block_wrap[5] = s->mb_stride;
1083
1084 y_size = s->b8_stride * (2 * s->mb_height + 1);
1085 c_size = s->mb_stride * (s->mb_height + 1);
1086 yc_size = y_size + 2 * c_size;
1087
1088 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
1089 fail); // error ressilience code looks cleaner with this
1090 for (y = 0; y < s->mb_height; y++)
1091 for (x = 0; x < s->mb_width; x++)
1092 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1093
1094 s->mb_index2xy[s->mb_height * s->mb_width] =
1095 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1096
1097 if (s->encoding) {
1098 /* Allocate MV tables */
1099 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
1100 mv_table_size * 2 * sizeof(int16_t), fail);
1101 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
1102 mv_table_size * 2 * sizeof(int16_t), fail);
1103 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
1104 mv_table_size * 2 * sizeof(int16_t), fail);
1105 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
1106 mv_table_size * 2 * sizeof(int16_t), fail);
1107 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
1108 mv_table_size * 2 * sizeof(int16_t), fail);
1109 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
1110 mv_table_size * 2 * sizeof(int16_t), fail);
1111 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1112 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
1113 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
1114 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
1115 s->mb_stride + 1;
1116 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
1117 s->mb_stride + 1;
1118 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
1119
1120 /* Allocate MB type table */
1121 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
1122 sizeof(uint16_t), fail); // needed for encoding
1123
1124 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
1125 sizeof(int), fail);
1126
1127 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
1128 mb_array_size * sizeof(float), fail);
1129 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
1130 mb_array_size * sizeof(float), fail);
1131
1132 }
1133
1134 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1135 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
1136 /* interlaced direct mode decoding tables */
1137 for (i = 0; i < 2; i++) {
1138 int j, k;
1139 for (j = 0; j < 2; j++) {
1140 for (k = 0; k < 2; k++) {
1141 FF_ALLOCZ_OR_GOTO(s->avctx,
1142 s->b_field_mv_table_base[i][j][k],
1143 mv_table_size * 2 * sizeof(int16_t),
1144 fail);
1145 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1146 s->mb_stride + 1;
1147 }
1148 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
1149 mb_array_size * 2 * sizeof(uint8_t), fail);
1150 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
1151 mv_table_size * 2 * sizeof(int16_t), fail);
1152 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
1153 + s->mb_stride + 1;
1154 }
1155 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
1156 mb_array_size * 2 * sizeof(uint8_t), fail);
1157 }
1158 }
1159 if (s->out_format == FMT_H263) {
1160 /* cbp values */
1161 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
1162 s->coded_block = s->coded_block_base + s->b8_stride + 1;
1163
1164 /* cbp, ac_pred, pred_dir */
1165 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
1166 mb_array_size * sizeof(uint8_t), fail);
1167 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
1168 mb_array_size * sizeof(uint8_t), fail);
1169 }
1170
1171 if (s->h263_pred || s->h263_plus || !s->encoding) {
1172 /* dc values */
1173 // MN: we need these for error resilience of intra-frames
1174 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
1175 yc_size * sizeof(int16_t), fail);
1176 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1177 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1178 s->dc_val[2] = s->dc_val[1] + c_size;
1179 for (i = 0; i < yc_size; i++)
1180 s->dc_val_base[i] = 1024;
1181 }
1182
1183 /* which mb is a intra block */
1184 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1185 memset(s->mbintra_table, 1, mb_array_size);
1186
1187 /* init macroblock skip table */
1188 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1189 // Note the + 1 is for a quicker mpeg4 slice_end detection
1190
1191 return init_er(s);
1192 fail:
1193 return AVERROR(ENOMEM);
1194 }
1195
1196 /**
1197 * init common structure for both encoder and decoder.
1198 * this assumes that some variables like width/height are already set
1199 */
1200 av_cold int ff_MPV_common_init(MpegEncContext *s)
1201 {
1202 int i;
1203 int nb_slices = (HAVE_THREADS &&
1204 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1205 s->avctx->thread_count : 1;
1206
1207 if (s->encoding && s->avctx->slices)
1208 nb_slices = s->avctx->slices;
1209
1210 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1211 s->mb_height = (s->height + 31) / 32 * 2;
1212 else
1213 s->mb_height = (s->height + 15) / 16;
1214
1215 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1216 av_log(s->avctx, AV_LOG_ERROR,
1217 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1218 return -1;
1219 }
1220
1221 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1222 int max_slices;
1223 if (s->mb_height)
1224 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1225 else
1226 max_slices = MAX_THREADS;
1227 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1228 " reducing to %d\n", nb_slices, max_slices);
1229 nb_slices = max_slices;
1230 }
1231
1232 if ((s->width || s->height) &&
1233 av_image_check_size(s->width, s->height, 0, s->avctx))
1234 return -1;
1235
1236 ff_dct_common_init(s);
1237
1238 s->flags = s->avctx->flags;
1239 s->flags2 = s->avctx->flags2;
1240
1241 /* set chroma shifts */
1242 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1243 &s->chroma_x_shift,
1244 &s->chroma_y_shift);
1245
1246 /* convert fourcc to upper case */
1247 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1248
1249 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1250
1251 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1252 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1253 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1254 s->picture[i].f = av_frame_alloc();
1255 if (!s->picture[i].f)
1256 goto fail;
1257 }
1258 memset(&s->next_picture, 0, sizeof(s->next_picture));
1259 memset(&s->last_picture, 0, sizeof(s->last_picture));
1260 memset(&s->current_picture, 0, sizeof(s->current_picture));
1261 memset(&s->new_picture, 0, sizeof(s->new_picture));
1262 s->next_picture.f = av_frame_alloc();
1263 if (!s->next_picture.f)
1264 goto fail;
1265 s->last_picture.f = av_frame_alloc();
1266 if (!s->last_picture.f)
1267 goto fail;
1268 s->current_picture.f = av_frame_alloc();
1269 if (!s->current_picture.f)
1270 goto fail;
1271 s->new_picture.f = av_frame_alloc();
1272 if (!s->new_picture.f)
1273 goto fail;
1274
1275 if (s->width && s->height) {
1276 if (init_context_frame(s))
1277 goto fail;
1278
1279 s->parse_context.state = -1;
1280 }
1281
1282 s->context_initialized = 1;
1283 s->thread_context[0] = s;
1284
1285 if (s->width && s->height) {
1286 if (nb_slices > 1) {
1287 for (i = 1; i < nb_slices; i++) {
1288 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1289 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1290 }
1291
1292 for (i = 0; i < nb_slices; i++) {
1293 if (init_duplicate_context(s->thread_context[i]) < 0)
1294 goto fail;
1295 s->thread_context[i]->start_mb_y =
1296 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1297 s->thread_context[i]->end_mb_y =
1298 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1299 }
1300 } else {
1301 if (init_duplicate_context(s) < 0)
1302 goto fail;
1303 s->start_mb_y = 0;
1304 s->end_mb_y = s->mb_height;
1305 }
1306 s->slice_context_count = nb_slices;
1307 }
1308
1309 return 0;
1310 fail:
1311 ff_MPV_common_end(s);
1312 return -1;
1313 }
1314
1315 /**
1316 * Frees and resets MpegEncContext fields depending on the resolution.
1317 * Is used during resolution changes to avoid a full reinitialization of the
1318 * codec.
1319 */
1320 static int free_context_frame(MpegEncContext *s)
1321 {
1322 int i, j, k;
1323
1324 av_freep(&s->mb_type);
1325 av_freep(&s->p_mv_table_base);
1326 av_freep(&s->b_forw_mv_table_base);
1327 av_freep(&s->b_back_mv_table_base);
1328 av_freep(&s->b_bidir_forw_mv_table_base);
1329 av_freep(&s->b_bidir_back_mv_table_base);
1330 av_freep(&s->b_direct_mv_table_base);
1331 s->p_mv_table = NULL;
1332 s->b_forw_mv_table = NULL;
1333 s->b_back_mv_table = NULL;
1334 s->b_bidir_forw_mv_table = NULL;
1335 s->b_bidir_back_mv_table = NULL;
1336 s->b_direct_mv_table = NULL;
1337 for (i = 0; i < 2; i++) {
1338 for (j = 0; j < 2; j++) {
1339 for (k = 0; k < 2; k++) {
1340 av_freep(&s->b_field_mv_table_base[i][j][k]);
1341 s->b_field_mv_table[i][j][k] = NULL;
1342 }
1343 av_freep(&s->b_field_select_table[i][j]);
1344 av_freep(&s->p_field_mv_table_base[i][j]);
1345 s->p_field_mv_table[i][j] = NULL;
1346 }
1347 av_freep(&s->p_field_select_table[i]);
1348 }
1349
1350 av_freep(&s->dc_val_base);
1351 av_freep(&s->coded_block_base);
1352 av_freep(&s->mbintra_table);
1353 av_freep(&s->cbp_table);
1354 av_freep(&s->pred_dir_table);
1355
1356 av_freep(&s->mbskip_table);
1357
1358 av_freep(&s->er.error_status_table);
1359 av_freep(&s->er.er_temp_buffer);
1360 av_freep(&s->mb_index2xy);
1361 av_freep(&s->lambda_table);
1362 av_freep(&s->cplx_tab);
1363 av_freep(&s->bits_tab);
1364
1365 s->linesize = s->uvlinesize = 0;
1366
1367 return 0;
1368 }
1369
1370 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1371 {
1372 int i, err = 0;
1373
1374 if (s->slice_context_count > 1) {
1375 for (i = 0; i < s->slice_context_count; i++) {
1376 free_duplicate_context(s->thread_context[i]);
1377 }
1378 for (i = 1; i < s->slice_context_count; i++) {
1379 av_freep(&s->thread_context[i]);
1380 }
1381 } else
1382 free_duplicate_context(s);
1383
1384 if ((err = free_context_frame(s)) < 0)
1385 return err;
1386
1387 if (s->picture)
1388 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1389 s->picture[i].needs_realloc = 1;
1390 }
1391
1392 s->last_picture_ptr =
1393 s->next_picture_ptr =
1394 s->current_picture_ptr = NULL;
1395
1396 // init
1397 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1398 s->mb_height = (s->height + 31) / 32 * 2;
1399 else
1400 s->mb_height = (s->height + 15) / 16;
1401
1402 if ((s->width || s->height) &&
1403 av_image_check_size(s->width, s->height, 0, s->avctx))
1404 return AVERROR_INVALIDDATA;
1405
1406 if ((err = init_context_frame(s)))
1407 goto fail;
1408
1409 s->thread_context[0] = s;
1410
1411 if (s->width && s->height) {
1412 int nb_slices = s->slice_context_count;
1413 if (nb_slices > 1) {
1414 for (i = 1; i < nb_slices; i++) {
1415 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1416 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1417 }
1418
1419 for (i = 0; i < nb_slices; i++) {
1420 if (init_duplicate_context(s->thread_context[i]) < 0)
1421 goto fail;
1422 s->thread_context[i]->start_mb_y =
1423 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1424 s->thread_context[i]->end_mb_y =
1425 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1426 }
1427 } else {
1428 if (init_duplicate_context(s) < 0)
1429 goto fail;
1430 s->start_mb_y = 0;
1431 s->end_mb_y = s->mb_height;
1432 }
1433 s->slice_context_count = nb_slices;
1434 }
1435
1436 return 0;
1437 fail:
1438 ff_MPV_common_end(s);
1439 return err;
1440 }
1441
1442 /* init common structure for both encoder and decoder */
1443 void ff_MPV_common_end(MpegEncContext *s)
1444 {
1445 int i;
1446
1447 if (s->slice_context_count > 1) {
1448 for (i = 0; i < s->slice_context_count; i++) {
1449 free_duplicate_context(s->thread_context[i]);
1450 }
1451 for (i = 1; i < s->slice_context_count; i++) {
1452 av_freep(&s->thread_context[i]);
1453 }
1454 s->slice_context_count = 1;
1455 } else free_duplicate_context(s);
1456
1457 av_freep(&s->parse_context.buffer);
1458 s->parse_context.buffer_size = 0;
1459
1460 av_freep(&s->bitstream_buffer);
1461 s->allocated_bitstream_buffer_size = 0;
1462
1463 if (s->picture) {
1464 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1465 ff_free_picture_tables(&s->picture[i]);
1466 ff_mpeg_unref_picture(s, &s->picture[i]);
1467 av_frame_free(&s->picture[i].f);
1468 }
1469 }
1470 av_freep(&s->picture);
1471 ff_free_picture_tables(&s->last_picture);
1472 ff_mpeg_unref_picture(s, &s->last_picture);
1473 av_frame_free(&s->last_picture.f);
1474 ff_free_picture_tables(&s->current_picture);
1475 ff_mpeg_unref_picture(s, &s->current_picture);
1476 av_frame_free(&s->current_picture.f);
1477 ff_free_picture_tables(&s->next_picture);
1478 ff_mpeg_unref_picture(s, &s->next_picture);
1479 av_frame_free(&s->next_picture.f);
1480 ff_free_picture_tables(&s->new_picture);
1481 ff_mpeg_unref_picture(s, &s->new_picture);
1482 av_frame_free(&s->new_picture.f);
1483
1484 free_context_frame(s);
1485
1486 s->context_initialized = 0;
1487 s->last_picture_ptr =
1488 s->next_picture_ptr =
1489 s->current_picture_ptr = NULL;
1490 s->linesize = s->uvlinesize = 0;
1491 }
1492
1493 av_cold void ff_init_rl(RLTable *rl,
1494 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1495 {
1496 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1497 uint8_t index_run[MAX_RUN + 1];
1498 int last, run, level, start, end, i;
1499
1500 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1501 if (static_store && rl->max_level[0])
1502 return;
1503
1504 /* compute max_level[], max_run[] and index_run[] */
1505 for (last = 0; last < 2; last++) {
1506 if (last == 0) {
1507 start = 0;
1508 end = rl->last;
1509 } else {
1510 start = rl->last;
1511 end = rl->n;
1512 }
1513
1514 memset(max_level, 0, MAX_RUN + 1);
1515 memset(max_run, 0, MAX_LEVEL + 1);
1516 memset(index_run, rl->n, MAX_RUN + 1);
1517 for (i = start; i < end; i++) {
1518 run = rl->table_run[i];
1519 level = rl->table_level[i];
1520 if (index_run[run] == rl->n)
1521 index_run[run] = i;
1522 if (level > max_level[run])
1523 max_level[run] = level;
1524 if (run > max_run[level])
1525 max_run[level] = run;
1526 }
1527 if (static_store)
1528 rl->max_level[last] = static_store[last];
1529 else
1530 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1531 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1532 if (static_store)
1533 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1534 else
1535 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1536 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1537 if (static_store)
1538 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1539 else
1540 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1541 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1542 }
1543 }
1544
1545 av_cold void ff_init_vlc_rl(RLTable *rl)
1546 {
1547 int i, q;
1548
1549 for (q = 0; q < 32; q++) {
1550 int qmul = q * 2;
1551 int qadd = (q - 1) | 1;
1552
1553 if (q == 0) {
1554 qmul = 1;
1555 qadd = 0;
1556 }
1557 for (i = 0; i < rl->vlc.table_size; i++) {
1558 int code = rl->vlc.table[i][0];
1559 int len = rl->vlc.table[i][1];
1560 int level, run;
1561
1562 if (len == 0) { // illegal code
1563 run = 66;
1564 level = MAX_LEVEL;
1565 } else if (len < 0) { // more bits needed
1566 run = 0;
1567 level = code;
1568 } else {
1569 if (code == rl->n) { // esc
1570 run = 66;
1571 level = 0;
1572 } else {
1573 run = rl->table_run[code] + 1;
1574 level = rl->table_level[code] * qmul + qadd;
1575 if (code >= rl->last) run += 192;
1576 }
1577 }
1578 rl->rl_vlc[q][i].len = len;
1579 rl->rl_vlc[q][i].level = level;
1580 rl->rl_vlc[q][i].run = run;
1581 }
1582 }
1583 }
1584
1585 static void release_unused_pictures(MpegEncContext *s)
1586 {
1587 int i;
1588
1589 /* release non reference frames */
1590 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1591 if (!s->picture[i].reference)
1592 ff_mpeg_unref_picture(s, &s->picture[i]);
1593 }
1594 }
1595
1596 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1597 {
1598 if (pic->f->buf[0] == NULL)
1599 return 1;
1600 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1601 return 1;
1602 return 0;
1603 }
1604
1605 static int find_unused_picture(MpegEncContext *s, int shared)
1606 {
1607 int i;
1608
1609 if (shared) {
1610 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1611 if (s->picture[i].f->buf[0] == NULL)
1612 return i;
1613 }
1614 } else {
1615 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1616 if (pic_is_unused(s, &s->picture[i]))
1617 return i;
1618 }
1619 }
1620
1621 return AVERROR_INVALIDDATA;
1622 }
1623
1624 int ff_find_unused_picture(MpegEncContext *s, int shared)
1625 {
1626 int ret = find_unused_picture(s, shared);
1627
1628 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1629 if (s->picture[ret].needs_realloc) {
1630 s->picture[ret].needs_realloc = 0;
1631 ff_free_picture_tables(&s->picture[ret]);
1632 ff_mpeg_unref_picture(s, &s->picture[ret]);
1633 }
1634 }
1635 return ret;
1636 }
1637
1638 /**
1639 * generic function called after decoding
1640 * the header and before a frame is decoded.
1641 */
1642 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1643 {
1644 int i, ret;
1645 Picture *pic;
1646 s->mb_skipped = 0;
1647
1648 /* mark & release old frames */
1649 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1650 s->last_picture_ptr != s->next_picture_ptr &&
1651 s->last_picture_ptr->f->buf[0]) {
1652 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1653 }
1654
1655 /* release forgotten pictures */
1656 /* if (mpeg124/h263) */
1657 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1658 if (&s->picture[i] != s->last_picture_ptr &&
1659 &s->picture[i] != s->next_picture_ptr &&
1660 s->picture[i].reference && !s->picture[i].needs_realloc) {
1661 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1662 av_log(avctx, AV_LOG_ERROR,
1663 "releasing zombie picture\n");
1664 ff_mpeg_unref_picture(s, &s->picture[i]);
1665 }
1666 }
1667
1668 ff_mpeg_unref_picture(s, &s->current_picture);
1669
1670 release_unused_pictures(s);
1671
1672 if (s->current_picture_ptr &&
1673 s->current_picture_ptr->f->buf[0] == NULL) {
1674 // we already have a unused image
1675 // (maybe it was set before reading the header)
1676 pic = s->current_picture_ptr;
1677 } else {
1678 i = ff_find_unused_picture(s, 0);
1679 if (i < 0) {
1680 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1681 return i;
1682 }
1683 pic = &s->picture[i];
1684 }
1685
1686 pic->reference = 0;
1687 if (!s->droppable) {
1688 if (s->pict_type != AV_PICTURE_TYPE_B)
1689 pic->reference = 3;
1690 }
1691
1692 pic->f->coded_picture_number = s->coded_picture_number++;
1693
1694 if (ff_alloc_picture(s, pic, 0) < 0)
1695 return -1;
1696
1697 s->current_picture_ptr = pic;
1698 // FIXME use only the vars from current_pic
1699 s->current_picture_ptr->f->top_field_first = s->top_field_first;
1700 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1701 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1702 if (s->picture_structure != PICT_FRAME)
1703 s->current_picture_ptr->f->top_field_first =
1704 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1705 }
1706 s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1707 !s->progressive_sequence;
1708 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1709
1710 s->current_picture_ptr->f->pict_type = s->pict_type;
1711 // if (s->flags && CODEC_FLAG_QSCALE)
1712 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1713 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1714
1715 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1716 s->current_picture_ptr)) < 0)
1717 return ret;
1718
1719 if (s->pict_type != AV_PICTURE_TYPE_B) {
1720 s->last_picture_ptr = s->next_picture_ptr;
1721 if (!s->droppable)
1722 s->next_picture_ptr = s->current_picture_ptr;
1723 }
1724 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1725 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1726 s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1727 s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1728 s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1729 s->pict_type, s->droppable);
1730
1731 if ((s->last_picture_ptr == NULL ||
1732 s->last_picture_ptr->f->buf[0] == NULL) &&
1733 (s->pict_type != AV_PICTURE_TYPE_I ||
1734 s->picture_structure != PICT_FRAME)) {
1735 int h_chroma_shift, v_chroma_shift;
1736 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1737 &h_chroma_shift, &v_chroma_shift);
1738 if (s->pict_type != AV_PICTURE_TYPE_I)
1739 av_log(avctx, AV_LOG_ERROR,
1740 "warning: first frame is no keyframe\n");
1741 else if (s->picture_structure != PICT_FRAME)
1742 av_log(avctx, AV_LOG_INFO,
1743 "allocate dummy last picture for field based first keyframe\n");
1744
1745 /* Allocate a dummy frame */
1746 i = ff_find_unused_picture(s, 0);
1747 if (i < 0) {
1748 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1749 return i;
1750 }
1751 s->last_picture_ptr = &s->picture[i];
1752
1753 s->last_picture_ptr->reference = 3;
1754 s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
1755
1756 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1757 s->last_picture_ptr = NULL;
1758 return -1;
1759 }
1760
1761 memset(s->last_picture_ptr->f->data[0], 0,
1762 avctx->height * s->last_picture_ptr->f->linesize[0]);
1763 memset(s->last_picture_ptr->f->data[1], 0x80,
1764 (avctx->height >> v_chroma_shift) *
1765 s->last_picture_ptr->f->linesize[1]);
1766 memset(s->last_picture_ptr->f->data[2], 0x80,
1767 (avctx->height >> v_chroma_shift) *
1768 s->last_picture_ptr->f->linesize[2]);
1769
1770 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1771 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1772 }
1773 if ((s->next_picture_ptr == NULL ||
1774 s->next_picture_ptr->f->buf[0] == NULL) &&
1775 s->pict_type == AV_PICTURE_TYPE_B) {
1776 /* Allocate a dummy frame */
1777 i = ff_find_unused_picture(s, 0);
1778 if (i < 0) {
1779 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1780 return i;
1781 }
1782 s->next_picture_ptr = &s->picture[i];
1783
1784 s->next_picture_ptr->reference = 3;
1785 s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
1786
1787 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1788 s->next_picture_ptr = NULL;
1789 return -1;
1790 }
1791 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1792 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1793 }
1794
1795 if (s->last_picture_ptr) {
1796 ff_mpeg_unref_picture(s, &s->last_picture);
1797 if (s->last_picture_ptr->f->buf[0] &&
1798 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1799 s->last_picture_ptr)) < 0)
1800 return ret;
1801 }
1802 if (s->next_picture_ptr) {
1803 ff_mpeg_unref_picture(s, &s->next_picture);
1804 if (s->next_picture_ptr->f->buf[0] &&
1805 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1806 s->next_picture_ptr)) < 0)
1807 return ret;
1808 }
1809
1810 if (s->pict_type != AV_PICTURE_TYPE_I &&
1811 !(s->last_picture_ptr && s->last_picture_ptr->f->buf[0])) {
1812 av_log(s, AV_LOG_ERROR,
1813 "Non-reference picture received and no reference available\n");
1814 return AVERROR_INVALIDDATA;
1815 }
1816
1817 if (s->picture_structure!= PICT_FRAME) {
1818 int i;
1819 for (i = 0; i < 4; i++) {
1820 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1821 s->current_picture.f->data[i] +=
1822 s->current_picture.f->linesize[i];
1823 }
1824 s->current_picture.f->linesize[i] *= 2;
1825 s->last_picture.f->linesize[i] *= 2;
1826 s->next_picture.f->linesize[i] *= 2;
1827 }
1828 }
1829
1830 s->err_recognition = avctx->err_recognition;
1831
1832 /* set dequantizer, we can't do it during init as
1833 * it might change for mpeg4 and we can't do it in the header
1834 * decode as init is not called for mpeg4 there yet */
1835 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1836 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1837 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1838 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1839 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1840 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1841 } else {
1842 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1843 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1844 }
1845
1846 #if FF_API_XVMC
1847 FF_DISABLE_DEPRECATION_WARNINGS
1848 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1849 return ff_xvmc_field_start(s, avctx);
1850 FF_ENABLE_DEPRECATION_WARNINGS
1851 #endif /* FF_API_XVMC */
1852
1853 return 0;
1854 }
1855
1856 /* called after a frame has been decoded. */
1857 void ff_MPV_frame_end(MpegEncContext *s)
1858 {
1859 #if FF_API_XVMC
1860 FF_DISABLE_DEPRECATION_WARNINGS
1861 /* redraw edges for the frame if decoding didn't complete */
1862 // just to make sure that all data is rendered.
1863 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1864 ff_xvmc_field_end(s);
1865 } else
1866 FF_ENABLE_DEPRECATION_WARNINGS
1867 #endif /* FF_API_XVMC */
1868
1869 emms_c();
1870
1871 if (s->current_picture.reference)
1872 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1873 }
1874
1875 /**
1876 * Print debugging info for the given picture.
1877 */
1878 void ff_print_debug_info(MpegEncContext *s, Picture *p)
1879 {
1880 AVFrame *pict;
1881 if (s->avctx->hwaccel || !p || !p->mb_type)
1882 return;
1883 pict = p->f;
1884
1885 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1886 int x,y;
1887
1888 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1889 switch (pict->pict_type) {
1890 case AV_PICTURE_TYPE_I:
1891 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1892 break;
1893 case AV_PICTURE_TYPE_P:
1894 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1895 break;
1896 case AV_PICTURE_TYPE_B:
1897 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1898 break;
1899 case AV_PICTURE_TYPE_S:
1900 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1901 break;
1902 case AV_PICTURE_TYPE_SI:
1903 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1904 break;
1905 case AV_PICTURE_TYPE_SP:
1906 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1907 break;
1908 }
1909 for (y = 0; y < s->mb_height; y++) {
1910 for (x = 0; x < s->mb_width; x++) {
1911 if (s->avctx->debug & FF_DEBUG_SKIP) {
1912 int count = s->mbskip_table[x + y * s->mb_stride];
1913 if (count > 9)
1914 count = 9;
1915 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1916 }
1917 if (s->avctx->debug & FF_DEBUG_QP) {
1918 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1919 p->qscale_table[x + y * s->mb_stride]);
1920 }
1921 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1922 int mb_type = p->mb_type[x + y * s->mb_stride];
1923 // Type & MV direction
1924 if (IS_PCM(mb_type))
1925 av_log(s->avctx, AV_LOG_DEBUG, "P");
1926 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1927 av_log(s->avctx, AV_LOG_DEBUG, "A");
1928 else if (IS_INTRA4x4(mb_type))
1929 av_log(s->avctx, AV_LOG_DEBUG, "i");
1930 else if (IS_INTRA16x16(mb_type))
1931 av_log(s->avctx, AV_LOG_DEBUG, "I");
1932 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1933 av_log(s->avctx, AV_LOG_DEBUG, "d");
1934 else if (IS_DIRECT(mb_type))
1935 av_log(s->avctx, AV_LOG_DEBUG, "D");
1936 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1937 av_log(s->avctx, AV_LOG_DEBUG, "g");
1938 else if (IS_GMC(mb_type))
1939 av_log(s->avctx, AV_LOG_DEBUG, "G");
1940 else if (IS_SKIP(mb_type))
1941 av_log(s->avctx, AV_LOG_DEBUG, "S");
1942 else if (!USES_LIST(mb_type, 1))
1943 av_log(s->avctx, AV_LOG_DEBUG, ">");
1944 else if (!USES_LIST(mb_type, 0))
1945 av_log(s->avctx, AV_LOG_DEBUG, "<");
1946 else {
1947 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1948 av_log(s->avctx, AV_LOG_DEBUG, "X");
1949 }
1950
1951 // segmentation
1952 if (IS_8X8(mb_type))
1953 av_log(s->avctx, AV_LOG_DEBUG, "+");
1954 else if (IS_16X8(mb_type))
1955 av_log(s->avctx, AV_LOG_DEBUG, "-");
1956 else if (IS_8X16(mb_type))
1957 av_log(s->avctx, AV_LOG_DEBUG, "|");
1958 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1959 av_log(s->avctx, AV_LOG_DEBUG, " ");
1960 else
1961 av_log(s->avctx, AV_LOG_DEBUG, "?");
1962
1963
1964 if (IS_INTERLACED(mb_type))
1965 av_log(s->avctx, AV_LOG_DEBUG, "=");
1966 else
1967 av_log(s->avctx, AV_LOG_DEBUG, " ");
1968 }
1969 }
1970 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1971 }
1972 }
1973 }
1974
1975 /**
1976 * find the lowest MB row referenced in the MVs
1977 */
1978 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1979 {
1980 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1981 int my, off, i, mvs;
1982
1983 if (s->picture_structure != PICT_FRAME || s->mcsel)
1984 goto unhandled;
1985
1986 switch (s->mv_type) {
1987 case MV_TYPE_16X16:
1988 mvs = 1;
1989 break;
1990 case MV_TYPE_16X8:
1991 mvs = 2;
1992 break;
1993 case MV_TYPE_8X8:
1994 mvs = 4;
1995 break;
1996 default:
1997 goto unhandled;
1998 }
1999
2000 for (i = 0; i < mvs; i++) {
2001 my = s->mv[dir][i][1]<<qpel_shift;
2002 my_max = FFMAX(my_max, my);
2003 my_min = FFMIN(my_min, my);
2004 }
2005
2006 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2007
2008 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2009 unhandled:
2010 return s->mb_height-1;
2011 }
2012
2013 /* put block[] to dest[] */
2014 static inline void put_dct(MpegEncContext *s,
2015 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2016 {
2017 s->dct_unquantize_intra(s, block, i, qscale);
2018 s->dsp.idct_put (dest, line_size, block);
2019 }
2020
2021 /* add block[] to dest[] */
2022 static inline void add_dct(MpegEncContext *s,
2023 int16_t *block, int i, uint8_t *dest, int line_size)
2024 {
2025 if (s->block_last_index[i] >= 0) {
2026 s->dsp.idct_add (dest, line_size, block);
2027 }
2028 }
2029
2030 static inline void add_dequant_dct(MpegEncContext *s,
2031 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2032 {
2033 if (s->block_last_index[i] >= 0) {
2034 s->dct_unquantize_inter(s, block, i, qscale);
2035
2036 s->dsp.idct_add (dest, line_size, block);
2037 }
2038 }
2039
2040 /**
2041 * Clean dc, ac, coded_block for the current non-intra MB.
2042 */
2043 void ff_clean_intra_table_entries(MpegEncContext *s)
2044 {
2045 int wrap = s->b8_stride;
2046 int xy = s->block_index[0];
2047
2048 s->dc_val[0][xy ] =
2049 s->dc_val[0][xy + 1 ] =
2050 s->dc_val[0][xy + wrap] =
2051 s->dc_val[0][xy + 1 + wrap] = 1024;
2052 /* ac pred */
2053 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2054 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2055 if (s->msmpeg4_version>=3) {
2056 s->coded_block[xy ] =
2057 s->coded_block[xy + 1 ] =
2058 s->coded_block[xy + wrap] =
2059 s->coded_block[xy + 1 + wrap] = 0;
2060 }
2061 /* chroma */
2062 wrap = s->mb_stride;
2063 xy = s->mb_x + s->mb_y * wrap;
2064 s->dc_val[1][xy] =
2065 s->dc_val[2][xy] = 1024;
2066 /* ac pred */
2067 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2068 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2069
2070 s->mbintra_table[xy]= 0;
2071 }
2072
2073 /* generic function called after a macroblock has been parsed by the
2074 decoder or after it has been encoded by the encoder.
2075
2076 Important variables used:
2077 s->mb_intra : true if intra macroblock
2078 s->mv_dir : motion vector direction
2079 s->mv_type : motion vector type
2080 s->mv : motion vector
2081 s->interlaced_dct : true if interlaced dct used (mpeg2)
2082 */
2083 static av_always_inline
2084 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2085 int is_mpeg12)
2086 {
2087 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2088
2089 #if FF_API_XVMC
2090 FF_DISABLE_DEPRECATION_WARNINGS
2091 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2092 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2093 return;
2094 }
2095 FF_ENABLE_DEPRECATION_WARNINGS
2096 #endif /* FF_API_XVMC */
2097
2098 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2099 /* print DCT coefficients */
2100 int i,j;
2101 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2102 for(i=0; i<6; i++){
2103 for(j=0; j<64; j++){
2104 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2105 }
2106 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2107 }
2108 }
2109
2110 s->current_picture.qscale_table[mb_xy] = s->qscale;
2111
2112 /* update DC predictors for P macroblocks */
2113 if (!s->mb_intra) {
2114 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2115 if(s->mbintra_table[mb_xy])
2116 ff_clean_intra_table_entries(s);
2117 } else {
2118 s->last_dc[0] =
2119 s->last_dc[1] =
2120 s->last_dc[2] = 128 << s->intra_dc_precision;
2121 }
2122 }
2123 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2124 s->mbintra_table[mb_xy]=1;
2125
2126 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2127 uint8_t *dest_y, *dest_cb, *dest_cr;
2128 int dct_linesize, dct_offset;
2129 op_pixels_func (*op_pix)[4];
2130 qpel_mc_func (*op_qpix)[16];
2131 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2132 const int uvlinesize = s->current_picture.f->linesize[1];
2133 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2134 const int block_size = 8;
2135
2136 /* avoid copy if macroblock skipped in last frame too */
2137 /* skip only during decoding as we might trash the buffers during encoding a bit */
2138 if(!s->encoding){
2139 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2140
2141 if (s->mb_skipped) {
2142 s->mb_skipped= 0;
2143 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2144 *mbskip_ptr = 1;
2145 } else if(!s->current_picture.reference) {
2146 *mbskip_ptr = 1;
2147 } else{
2148 *mbskip_ptr = 0; /* not skipped */
2149 }
2150 }
2151
2152 dct_linesize = linesize << s->interlaced_dct;
2153 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2154
2155 if(readable){
2156 dest_y= s->dest[0];
2157 dest_cb= s->dest[1];
2158 dest_cr= s->dest[2];
2159 }else{
2160 dest_y = s->b_scratchpad;
2161 dest_cb= s->b_scratchpad+16*linesize;
2162 dest_cr= s->b_scratchpad+32*linesize;
2163 }
2164
2165 if (!s->mb_intra) {
2166 /* motion handling */
2167 /* decoding or more than one mb_type (MC was already done otherwise) */
2168 if(!s->encoding){
2169
2170 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2171 if (s->mv_dir & MV_DIR_FORWARD) {
2172 ff_thread_await_progress(&s->last_picture_ptr->tf,
2173 ff_MPV_lowest_referenced_row(s, 0),
2174 0);
2175 }
2176 if (s->mv_dir & MV_DIR_BACKWARD) {
2177 ff_thread_await_progress(&s->next_picture_ptr->tf,
2178 ff_MPV_lowest_referenced_row(s, 1),
2179 0);
2180 }
2181 }
2182
2183 op_qpix= s->me.qpel_put;
2184 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2185 op_pix = s->hdsp.put_pixels_tab;
2186 }else{
2187 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2188 }
2189 if (s->mv_dir & MV_DIR_FORWARD) {
2190 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
2191 op_pix = s->hdsp.avg_pixels_tab;
2192 op_qpix= s->me.qpel_avg;
2193 }
2194 if (s->mv_dir & MV_DIR_BACKWARD) {
2195 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
2196 }
2197 }
2198
2199 /* skip dequant / idct if we are really late ;) */
2200 if(s->avctx->skip_idct){
2201 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2202 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2203 || s->avctx->skip_idct >= AVDISCARD_ALL)
2204 goto skip_idct;
2205 }
2206
2207 /* add dct residue */
2208 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2209 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2210 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2211 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2212 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2213 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2214
2215 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2216 if (s->chroma_y_shift){
2217 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2218 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2219 }else{
2220 dct_linesize >>= 1;
2221 dct_offset >>=1;
2222 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2223 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2224 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2225 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2226 }
2227 }
2228 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2229 add_dct(s, block[0], 0, dest_y , dct_linesize);
2230 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2231 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2232 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2233
2234 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2235 if(s->chroma_y_shift){//Chroma420
2236 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2237 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2238 }else{
2239 //chroma422
2240 dct_linesize = uvlinesize << s->interlaced_dct;
2241 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2242
2243 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2244 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2245 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2246 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2247 if(!s->chroma_x_shift){//Chroma444
2248 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2249 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2250 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2251 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2252 }
2253 }
2254 }//fi gray
2255 }
2256 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2257 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2258 }
2259 } else {
2260 /* dct only in intra block */
2261 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2262 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2263 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2264 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2265 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2266
2267 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2268 if(s->chroma_y_shift){
2269 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2270 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2271 }else{
2272 dct_offset >>=1;
2273 dct_linesize >>=1;
2274 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2275 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2276 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2277 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2278 }
2279 }
2280 }else{
2281 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2282 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2283 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2284 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2285
2286 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2287 if(s->chroma_y_shift){
2288 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2289 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2290 }else{
2291
2292 dct_linesize = uvlinesize << s->interlaced_dct;
2293 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2294
2295 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2296 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2297 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2298 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2299 if(!s->chroma_x_shift){//Chroma444
2300 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2301 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2302 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2303 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2304 }
2305 }
2306 }//gray
2307 }
2308 }
2309 skip_idct:
2310 if(!readable){
2311 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2312 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2313 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2314 }
2315 }
2316 }
2317
2318 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2319 #if !CONFIG_SMALL
2320 if(s->out_format == FMT_MPEG1) {
2321 MPV_decode_mb_internal(s, block, 1);
2322 } else
2323 #endif
2324 MPV_decode_mb_internal(s, block, 0);
2325 }
2326
2327 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2328 {
2329 ff_draw_horiz_band(s->avctx, s->current_picture.f,
2330 s->last_picture.f, y, h, s->picture_structure,
2331 s->first_field, s->low_delay);
2332 }
2333
2334 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2335 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2336 const int uvlinesize = s->current_picture.f->linesize[1];
2337 const int mb_size= 4;
2338
2339 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2340 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2341 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2342 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2343 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2344 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2345 //block_index is not used by mpeg2, so it is not affected by chroma_format
2346
2347 s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) << mb_size);
2348 s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2349 s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2350
2351 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2352 {
2353 if(s->picture_structure==PICT_FRAME){
2354 s->dest[0] += s->mb_y * linesize << mb_size;
2355 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2356 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2357 }else{
2358 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2359 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2360 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2361 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2362 }
2363 }
2364 }
2365
2366 /**
2367 * Permute an 8x8 block.
2368 * @param block the block which will be permuted according to the given permutation vector
2369 * @param permutation the permutation vector
2370 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2371 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2372 * (inverse) permutated to scantable order!
2373 */
2374 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2375 {
2376 int i;
2377 int16_t temp[64];
2378
2379 if(last<=0) return;
2380 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2381
2382 for(i=0; i<=last; i++){
2383 const int j= scantable[i];
2384 temp[j]= block[j];
2385 block[j]=0;
2386 }
2387
2388 for(i=0; i<=last; i++){
2389 const int j= scantable[i];
2390 const int perm_j= permutation[j];
2391 block[perm_j]= temp[j];
2392 }
2393 }
2394
2395 void ff_mpeg_flush(AVCodecContext *avctx){
2396 int i;
2397 MpegEncContext *s = avctx->priv_data;
2398
2399 if(s==NULL || s->picture==NULL)
2400 return;
2401
2402 for (i = 0; i < MAX_PICTURE_COUNT; i++)
2403 ff_mpeg_unref_picture(s, &s->picture[i]);
2404 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2405
2406 ff_mpeg_unref_picture(s, &s->current_picture);
2407 ff_mpeg_unref_picture(s, &s->last_picture);
2408 ff_mpeg_unref_picture(s, &s->next_picture);
2409
2410 s->mb_x= s->mb_y= 0;
2411
2412 s->parse_context.state= -1;
2413 s->parse_context.frame_start_found= 0;
2414 s->parse_context.overread= 0;
2415 s->parse_context.overread_index= 0;
2416 s->parse_context.index= 0;
2417 s->parse_context.last_index= 0;
2418 s->bitstream_buffer_size=0;
2419 s->pp_time=0;
2420 }
2421
2422 /**
2423 * set qscale and update qscale dependent variables.
2424 */
2425 void ff_set_qscale(MpegEncContext * s, int qscale)
2426 {
2427 if (qscale < 1)
2428 qscale = 1;
2429 else if (qscale > 31)
2430 qscale = 31;
2431
2432 s->qscale = qscale;
2433 s->chroma_qscale= s->chroma_qscale_table[qscale];
2434
2435 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2436 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2437 }
2438
2439 void ff_MPV_report_decode_progress(MpegEncContext *s)
2440 {
2441 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
2442 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
2443 }
2444
2445 #if CONFIG_ERROR_RESILIENCE
2446 void ff_mpeg_set_erpic(ERPicture *dst, Picture *src)
2447 {
2448 int i;
2449
2450 if (!src)
2451 return;
2452
2453 dst->f = src->f;
2454 dst->tf = &src->tf;
2455
2456 for (i = 0; i < 2; i++) {
2457 dst->motion_val[i] = src->motion_val[i];
2458 dst->ref_index[i] = src->ref_index[i];
2459 }
2460
2461 dst->mb_type = src->mb_type;
2462 dst->field_picture = src->field_picture;
2463 }
2464
2465 void ff_mpeg_er_frame_start(MpegEncContext *s)
2466 {
2467 ERContext *er = &s->er;
2468
2469 ff_mpeg_set_erpic(&er->cur_pic, s->current_picture_ptr);
2470 ff_mpeg_set_erpic(&er->next_pic, s->next_picture_ptr);
2471 ff_mpeg_set_erpic(&er->last_pic, s->last_picture_ptr);
2472
2473 er->pp_time = s->pp_time;
2474 er->pb_time = s->pb_time;
2475 er->quarter_sample = s->quarter_sample;
2476 er->partitioned_frame = s->partitioned_frame;
2477
2478 ff_er_frame_start(er);
2479 }
2480 #endif /* CONFIG_ERROR_RESILIENCE */