mpegvideo: don't copy input_picture_number in update_thread_context()
[libav.git] / libavcodec / mpegvideo.c
1 /*
2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7 *
8 * This file is part of Libav.
9 *
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /**
26 * @file
27 * The simplest mpeg encoder (well, it was the simplest!).
28 */
29
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "avcodec.h"
35 #include "dsputil.h"
36 #include "internal.h"
37 #include "mathops.h"
38 #include "mpegvideo.h"
39 #include "mjpegenc.h"
40 #include "msmpeg4.h"
41 #include "xvmc_internal.h"
42 #include "thread.h"
43 #include <limits.h>
44
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46 int16_t *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48 int16_t *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50 int16_t *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52 int16_t *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54 int16_t *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56 int16_t *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58 int16_t *block, int n, int qscale);
59
60 static const uint8_t ff_default_chroma_qscale_table[32] = {
61 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
62 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
63 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
64 };
65
66 const uint8_t ff_mpeg1_dc_scale_table[128] = {
67 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
68 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
69 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
70 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
71 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
72 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
73 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 };
77
78 static const uint8_t mpeg2_dc_scale_table1[128] = {
79 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
80 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
81 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
82 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
83 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88 };
89
90 static const uint8_t mpeg2_dc_scale_table2[128] = {
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
97 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 };
101
102 static const uint8_t mpeg2_dc_scale_table3[128] = {
103 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
104 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
105 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
107 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
108 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112 };
113
114 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
115 ff_mpeg1_dc_scale_table,
116 mpeg2_dc_scale_table1,
117 mpeg2_dc_scale_table2,
118 mpeg2_dc_scale_table3,
119 };
120
121 const enum AVPixelFormat ff_pixfmt_list_420[] = {
122 AV_PIX_FMT_YUV420P,
123 AV_PIX_FMT_NONE
124 };
125
126 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
127 int (*mv)[2][4][2],
128 int mb_x, int mb_y, int mb_intra, int mb_skipped)
129 {
130 MpegEncContext *s = opaque;
131
132 s->mv_dir = mv_dir;
133 s->mv_type = mv_type;
134 s->mb_intra = mb_intra;
135 s->mb_skipped = mb_skipped;
136 s->mb_x = mb_x;
137 s->mb_y = mb_y;
138 memcpy(s->mv, mv, sizeof(*mv));
139
140 ff_init_block_index(s);
141 ff_update_block_index(s);
142
143 s->dsp.clear_blocks(s->block[0]);
144
145 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
146 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
147 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
148
149 assert(ref == 0);
150 ff_MPV_decode_mb(s, s->block);
151 }
152
153 /* init common dct for both encoder and decoder */
154 av_cold int ff_dct_common_init(MpegEncContext *s)
155 {
156 ff_dsputil_init(&s->dsp, s->avctx);
157 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
158 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
159
160 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
161 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
162 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
163 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
164 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
165 if (s->flags & CODEC_FLAG_BITEXACT)
166 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
167 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
168
169 if (ARCH_ARM)
170 ff_MPV_common_init_arm(s);
171 if (ARCH_BFIN)
172 ff_MPV_common_init_bfin(s);
173 if (ARCH_PPC)
174 ff_MPV_common_init_ppc(s);
175 if (ARCH_X86)
176 ff_MPV_common_init_x86(s);
177
178 /* load & permutate scantables
179 * note: only wmv uses different ones
180 */
181 if (s->alternate_scan) {
182 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
183 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
184 } else {
185 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
186 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
187 }
188 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
189 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
190
191 return 0;
192 }
193
194 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
195 {
196 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
197
198 // edge emu needs blocksize + filter length - 1
199 // (= 17x17 for halfpel / 21x21 for h264)
200 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
201 // at uvlinesize. It supports only YUV420 so 24x24 is enough
202 // linesize * interlaced * MBsize
203 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
204 fail);
205
206 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 3,
207 fail)
208 s->me.temp = s->me.scratchpad;
209 s->rd_scratchpad = s->me.scratchpad;
210 s->b_scratchpad = s->me.scratchpad;
211 s->obmc_scratchpad = s->me.scratchpad + 16;
212
213 return 0;
214 fail:
215 av_freep(&s->edge_emu_buffer);
216 return AVERROR(ENOMEM);
217 }
218
219 /**
220 * Allocate a frame buffer
221 */
222 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
223 {
224 int r, ret;
225
226 pic->tf.f = &pic->f;
227 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
228 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
229 s->codec_id != AV_CODEC_ID_MSS2)
230 r = ff_thread_get_buffer(s->avctx, &pic->tf,
231 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
232 else {
233 pic->f.width = s->avctx->width;
234 pic->f.height = s->avctx->height;
235 pic->f.format = s->avctx->pix_fmt;
236 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
237 }
238
239 if (r < 0 || !pic->f.buf[0]) {
240 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
241 r, pic->f.data[0]);
242 return -1;
243 }
244
245 if (s->avctx->hwaccel) {
246 assert(!pic->hwaccel_picture_private);
247 if (s->avctx->hwaccel->priv_data_size) {
248 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
249 if (!pic->hwaccel_priv_buf) {
250 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
251 return -1;
252 }
253 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
254 }
255 }
256
257 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
258 s->uvlinesize != pic->f.linesize[1])) {
259 av_log(s->avctx, AV_LOG_ERROR,
260 "get_buffer() failed (stride changed)\n");
261 ff_mpeg_unref_picture(s, pic);
262 return -1;
263 }
264
265 if (pic->f.linesize[1] != pic->f.linesize[2]) {
266 av_log(s->avctx, AV_LOG_ERROR,
267 "get_buffer() failed (uv stride mismatch)\n");
268 ff_mpeg_unref_picture(s, pic);
269 return -1;
270 }
271
272 if (!s->edge_emu_buffer &&
273 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
274 av_log(s->avctx, AV_LOG_ERROR,
275 "get_buffer() failed to allocate context scratch buffers.\n");
276 ff_mpeg_unref_picture(s, pic);
277 return ret;
278 }
279
280 return 0;
281 }
282
283 static void free_picture_tables(Picture *pic)
284 {
285 int i;
286
287 av_buffer_unref(&pic->mb_var_buf);
288 av_buffer_unref(&pic->mc_mb_var_buf);
289 av_buffer_unref(&pic->mb_mean_buf);
290 av_buffer_unref(&pic->mbskip_table_buf);
291 av_buffer_unref(&pic->qscale_table_buf);
292 av_buffer_unref(&pic->mb_type_buf);
293
294 for (i = 0; i < 2; i++) {
295 av_buffer_unref(&pic->motion_val_buf[i]);
296 av_buffer_unref(&pic->ref_index_buf[i]);
297 }
298 }
299
300 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
301 {
302 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
303 const int mb_array_size = s->mb_stride * s->mb_height;
304 const int b8_array_size = s->b8_stride * s->mb_height * 2;
305 int i;
306
307
308 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
309 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
310 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
311 sizeof(uint32_t));
312 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
313 return AVERROR(ENOMEM);
314
315 if (s->encoding) {
316 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
317 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
318 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
319 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
320 return AVERROR(ENOMEM);
321 }
322
323 if (s->out_format == FMT_H263 || s->encoding) {
324 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
325 int ref_index_size = 4 * mb_array_size;
326
327 for (i = 0; mv_size && i < 2; i++) {
328 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
329 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
330 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
331 return AVERROR(ENOMEM);
332 }
333 }
334
335 return 0;
336 }
337
338 static int make_tables_writable(Picture *pic)
339 {
340 int ret, i;
341 #define MAKE_WRITABLE(table) \
342 do {\
343 if (pic->table &&\
344 (ret = av_buffer_make_writable(&pic->table)) < 0)\
345 return ret;\
346 } while (0)
347
348 MAKE_WRITABLE(mb_var_buf);
349 MAKE_WRITABLE(mc_mb_var_buf);
350 MAKE_WRITABLE(mb_mean_buf);
351 MAKE_WRITABLE(mbskip_table_buf);
352 MAKE_WRITABLE(qscale_table_buf);
353 MAKE_WRITABLE(mb_type_buf);
354
355 for (i = 0; i < 2; i++) {
356 MAKE_WRITABLE(motion_val_buf[i]);
357 MAKE_WRITABLE(ref_index_buf[i]);
358 }
359
360 return 0;
361 }
362
363 /**
364 * Allocate a Picture.
365 * The pixels are allocated/set by calling get_buffer() if shared = 0
366 */
367 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
368 {
369 int i, ret;
370
371 if (shared) {
372 assert(pic->f.data[0]);
373 pic->shared = 1;
374 } else {
375 assert(!pic->f.buf[0]);
376
377 if (alloc_frame_buffer(s, pic) < 0)
378 return -1;
379
380 s->linesize = pic->f.linesize[0];
381 s->uvlinesize = pic->f.linesize[1];
382 }
383
384 if (!pic->qscale_table_buf)
385 ret = alloc_picture_tables(s, pic);
386 else
387 ret = make_tables_writable(pic);
388 if (ret < 0)
389 goto fail;
390
391 if (s->encoding) {
392 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
393 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
394 pic->mb_mean = pic->mb_mean_buf->data;
395 }
396
397 pic->mbskip_table = pic->mbskip_table_buf->data;
398 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
399 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
400
401 if (pic->motion_val_buf[0]) {
402 for (i = 0; i < 2; i++) {
403 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
404 pic->ref_index[i] = pic->ref_index_buf[i]->data;
405 }
406 }
407
408 return 0;
409 fail:
410 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
411 ff_mpeg_unref_picture(s, pic);
412 free_picture_tables(pic);
413 return AVERROR(ENOMEM);
414 }
415
416 /**
417 * Deallocate a picture.
418 */
419 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
420 {
421 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
422
423 pic->tf.f = &pic->f;
424 /* WM Image / Screen codecs allocate internal buffers with different
425 * dimensions / colorspaces; ignore user-defined callbacks for these. */
426 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
427 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
428 s->codec_id != AV_CODEC_ID_MSS2)
429 ff_thread_release_buffer(s->avctx, &pic->tf);
430 else
431 av_frame_unref(&pic->f);
432
433 av_buffer_unref(&pic->hwaccel_priv_buf);
434
435 if (pic->needs_realloc)
436 free_picture_tables(pic);
437
438 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
439 }
440
441 static int update_picture_tables(Picture *dst, Picture *src)
442 {
443 int i;
444
445 #define UPDATE_TABLE(table)\
446 do {\
447 if (src->table &&\
448 (!dst->table || dst->table->buffer != src->table->buffer)) {\
449 av_buffer_unref(&dst->table);\
450 dst->table = av_buffer_ref(src->table);\
451 if (!dst->table) {\
452 free_picture_tables(dst);\
453 return AVERROR(ENOMEM);\
454 }\
455 }\
456 } while (0)
457
458 UPDATE_TABLE(mb_var_buf);
459 UPDATE_TABLE(mc_mb_var_buf);
460 UPDATE_TABLE(mb_mean_buf);
461 UPDATE_TABLE(mbskip_table_buf);
462 UPDATE_TABLE(qscale_table_buf);
463 UPDATE_TABLE(mb_type_buf);
464 for (i = 0; i < 2; i++) {
465 UPDATE_TABLE(motion_val_buf[i]);
466 UPDATE_TABLE(ref_index_buf[i]);
467 }
468
469 dst->mb_var = src->mb_var;
470 dst->mc_mb_var = src->mc_mb_var;
471 dst->mb_mean = src->mb_mean;
472 dst->mbskip_table = src->mbskip_table;
473 dst->qscale_table = src->qscale_table;
474 dst->mb_type = src->mb_type;
475 for (i = 0; i < 2; i++) {
476 dst->motion_val[i] = src->motion_val[i];
477 dst->ref_index[i] = src->ref_index[i];
478 }
479
480 return 0;
481 }
482
483 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
484 {
485 int ret;
486
487 av_assert0(!dst->f.buf[0]);
488 av_assert0(src->f.buf[0]);
489
490 src->tf.f = &src->f;
491 dst->tf.f = &dst->f;
492 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
493 if (ret < 0)
494 goto fail;
495
496 ret = update_picture_tables(dst, src);
497 if (ret < 0)
498 goto fail;
499
500 if (src->hwaccel_picture_private) {
501 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
502 if (!dst->hwaccel_priv_buf)
503 goto fail;
504 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
505 }
506
507 dst->field_picture = src->field_picture;
508 dst->mb_var_sum = src->mb_var_sum;
509 dst->mc_mb_var_sum = src->mc_mb_var_sum;
510 dst->b_frame_score = src->b_frame_score;
511 dst->needs_realloc = src->needs_realloc;
512 dst->reference = src->reference;
513 dst->shared = src->shared;
514
515 return 0;
516 fail:
517 ff_mpeg_unref_picture(s, dst);
518 return ret;
519 }
520
521 static void exchange_uv(MpegEncContext *s)
522 {
523 int16_t (*tmp)[64];
524
525 tmp = s->pblocks[4];
526 s->pblocks[4] = s->pblocks[5];
527 s->pblocks[5] = tmp;
528 }
529
530 static int init_duplicate_context(MpegEncContext *s)
531 {
532 int y_size = s->b8_stride * (2 * s->mb_height + 1);
533 int c_size = s->mb_stride * (s->mb_height + 1);
534 int yc_size = y_size + 2 * c_size;
535 int i;
536
537 s->edge_emu_buffer =
538 s->me.scratchpad =
539 s->me.temp =
540 s->rd_scratchpad =
541 s->b_scratchpad =
542 s->obmc_scratchpad = NULL;
543
544 if (s->encoding) {
545 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
546 ME_MAP_SIZE * sizeof(uint32_t), fail)
547 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
548 ME_MAP_SIZE * sizeof(uint32_t), fail)
549 if (s->avctx->noise_reduction) {
550 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
551 2 * 64 * sizeof(int), fail)
552 }
553 }
554 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
555 s->block = s->blocks[0];
556
557 for (i = 0; i < 12; i++) {
558 s->pblocks[i] = &s->block[i];
559 }
560 if (s->avctx->codec_tag == AV_RL32("VCR2"))
561 exchange_uv(s);
562
563 if (s->out_format == FMT_H263) {
564 /* ac values */
565 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
566 yc_size * sizeof(int16_t) * 16, fail);
567 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
568 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
569 s->ac_val[2] = s->ac_val[1] + c_size;
570 }
571
572 return 0;
573 fail:
574 return -1; // free() through ff_MPV_common_end()
575 }
576
577 static void free_duplicate_context(MpegEncContext *s)
578 {
579 if (s == NULL)
580 return;
581
582 av_freep(&s->edge_emu_buffer);
583 av_freep(&s->me.scratchpad);
584 s->me.temp =
585 s->rd_scratchpad =
586 s->b_scratchpad =
587 s->obmc_scratchpad = NULL;
588
589 av_freep(&s->dct_error_sum);
590 av_freep(&s->me.map);
591 av_freep(&s->me.score_map);
592 av_freep(&s->blocks);
593 av_freep(&s->ac_val_base);
594 s->block = NULL;
595 }
596
597 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
598 {
599 #define COPY(a) bak->a = src->a
600 COPY(edge_emu_buffer);
601 COPY(me.scratchpad);
602 COPY(me.temp);
603 COPY(rd_scratchpad);
604 COPY(b_scratchpad);
605 COPY(obmc_scratchpad);
606 COPY(me.map);
607 COPY(me.score_map);
608 COPY(blocks);
609 COPY(block);
610 COPY(start_mb_y);
611 COPY(end_mb_y);
612 COPY(me.map_generation);
613 COPY(pb);
614 COPY(dct_error_sum);
615 COPY(dct_count[0]);
616 COPY(dct_count[1]);
617 COPY(ac_val_base);
618 COPY(ac_val[0]);
619 COPY(ac_val[1]);
620 COPY(ac_val[2]);
621 #undef COPY
622 }
623
624 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
625 {
626 MpegEncContext bak;
627 int i, ret;
628 // FIXME copy only needed parts
629 // START_TIMER
630 backup_duplicate_context(&bak, dst);
631 memcpy(dst, src, sizeof(MpegEncContext));
632 backup_duplicate_context(dst, &bak);
633 for (i = 0; i < 12; i++) {
634 dst->pblocks[i] = &dst->block[i];
635 }
636 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
637 exchange_uv(dst);
638 if (!dst->edge_emu_buffer &&
639 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
640 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
641 "scratch buffers.\n");
642 return ret;
643 }
644 // STOP_TIMER("update_duplicate_context")
645 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
646 return 0;
647 }
648
649 int ff_mpeg_update_thread_context(AVCodecContext *dst,
650 const AVCodecContext *src)
651 {
652 int i, ret;
653 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
654
655 if (dst == src || !s1->context_initialized)
656 return 0;
657
658 // FIXME can parameters change on I-frames?
659 // in that case dst may need a reinit
660 if (!s->context_initialized) {
661 memcpy(s, s1, sizeof(MpegEncContext));
662
663 s->avctx = dst;
664 s->bitstream_buffer = NULL;
665 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
666
667 ff_MPV_common_init(s);
668 }
669
670 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
671 int err;
672 s->context_reinit = 0;
673 s->height = s1->height;
674 s->width = s1->width;
675 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
676 return err;
677 }
678
679 s->avctx->coded_height = s1->avctx->coded_height;
680 s->avctx->coded_width = s1->avctx->coded_width;
681 s->avctx->width = s1->avctx->width;
682 s->avctx->height = s1->avctx->height;
683
684 s->coded_picture_number = s1->coded_picture_number;
685 s->picture_number = s1->picture_number;
686
687 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
688 ff_mpeg_unref_picture(s, &s->picture[i]);
689 if (s1->picture[i].f.buf[0] &&
690 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
691 return ret;
692 }
693
694 #define UPDATE_PICTURE(pic)\
695 do {\
696 ff_mpeg_unref_picture(s, &s->pic);\
697 if (s1->pic.f.buf[0])\
698 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
699 else\
700 ret = update_picture_tables(&s->pic, &s1->pic);\
701 if (ret < 0)\
702 return ret;\
703 } while (0)
704
705 UPDATE_PICTURE(current_picture);
706 UPDATE_PICTURE(last_picture);
707 UPDATE_PICTURE(next_picture);
708
709 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
710 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
711 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
712
713 // Error/bug resilience
714 s->next_p_frame_damaged = s1->next_p_frame_damaged;
715 s->workaround_bugs = s1->workaround_bugs;
716
717 // MPEG4 timing info
718 memcpy(&s->last_time_base, &s1->last_time_base,
719 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
720 (char *) &s1->last_time_base);
721
722 // B-frame info
723 s->max_b_frames = s1->max_b_frames;
724 s->low_delay = s1->low_delay;
725 s->droppable = s1->droppable;
726
727 // DivX handling (doesn't work)
728 s->divx_packed = s1->divx_packed;
729
730 if (s1->bitstream_buffer) {
731 if (s1->bitstream_buffer_size +
732 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
733 av_fast_malloc(&s->bitstream_buffer,
734 &s->allocated_bitstream_buffer_size,
735 s1->allocated_bitstream_buffer_size);
736 s->bitstream_buffer_size = s1->bitstream_buffer_size;
737 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
738 s1->bitstream_buffer_size);
739 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
740 FF_INPUT_BUFFER_PADDING_SIZE);
741 }
742
743 // linesize dependend scratch buffer allocation
744 if (!s->edge_emu_buffer)
745 if (s1->linesize) {
746 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
747 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
748 "scratch buffers.\n");
749 return AVERROR(ENOMEM);
750 }
751 } else {
752 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
753 "be allocated due to unknown size.\n");
754 return AVERROR_BUG;
755 }
756
757 // MPEG2/interlacing info
758 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
759 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
760
761 if (!s1->first_field) {
762 s->last_pict_type = s1->pict_type;
763 if (s1->current_picture_ptr)
764 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
765
766 if (s1->pict_type != AV_PICTURE_TYPE_B) {
767 s->last_non_b_pict_type = s1->pict_type;
768 }
769 }
770
771 return 0;
772 }
773
774 /**
775 * Set the given MpegEncContext to common defaults
776 * (same for encoding and decoding).
777 * The changed fields will not depend upon the
778 * prior state of the MpegEncContext.
779 */
780 void ff_MPV_common_defaults(MpegEncContext *s)
781 {
782 s->y_dc_scale_table =
783 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
784 s->chroma_qscale_table = ff_default_chroma_qscale_table;
785 s->progressive_frame = 1;
786 s->progressive_sequence = 1;
787 s->picture_structure = PICT_FRAME;
788
789 s->coded_picture_number = 0;
790 s->picture_number = 0;
791 s->input_picture_number = 0;
792
793 s->picture_in_gop_number = 0;
794
795 s->f_code = 1;
796 s->b_code = 1;
797
798 s->slice_context_count = 1;
799 }
800
801 /**
802 * Set the given MpegEncContext to defaults for decoding.
803 * the changed fields will not depend upon
804 * the prior state of the MpegEncContext.
805 */
806 void ff_MPV_decode_defaults(MpegEncContext *s)
807 {
808 ff_MPV_common_defaults(s);
809 }
810
811 static int init_er(MpegEncContext *s)
812 {
813 ERContext *er = &s->er;
814 int mb_array_size = s->mb_height * s->mb_stride;
815 int i;
816
817 er->avctx = s->avctx;
818 er->dsp = &s->dsp;
819
820 er->mb_index2xy = s->mb_index2xy;
821 er->mb_num = s->mb_num;
822 er->mb_width = s->mb_width;
823 er->mb_height = s->mb_height;
824 er->mb_stride = s->mb_stride;
825 er->b8_stride = s->b8_stride;
826
827 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
828 er->error_status_table = av_mallocz(mb_array_size);
829 if (!er->er_temp_buffer || !er->error_status_table)
830 goto fail;
831
832 er->mbskip_table = s->mbskip_table;
833 er->mbintra_table = s->mbintra_table;
834
835 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
836 er->dc_val[i] = s->dc_val[i];
837
838 er->decode_mb = mpeg_er_decode_mb;
839 er->opaque = s;
840
841 return 0;
842 fail:
843 av_freep(&er->er_temp_buffer);
844 av_freep(&er->error_status_table);
845 return AVERROR(ENOMEM);
846 }
847
848 /**
849 * Initialize and allocates MpegEncContext fields dependent on the resolution.
850 */
851 static int init_context_frame(MpegEncContext *s)
852 {
853 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
854
855 s->mb_width = (s->width + 15) / 16;
856 s->mb_stride = s->mb_width + 1;
857 s->b8_stride = s->mb_width * 2 + 1;
858 s->b4_stride = s->mb_width * 4 + 1;
859 mb_array_size = s->mb_height * s->mb_stride;
860 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
861
862 /* set default edge pos, will be overriden
863 * in decode_header if needed */
864 s->h_edge_pos = s->mb_width * 16;
865 s->v_edge_pos = s->mb_height * 16;
866
867 s->mb_num = s->mb_width * s->mb_height;
868
869 s->block_wrap[0] =
870 s->block_wrap[1] =
871 s->block_wrap[2] =
872 s->block_wrap[3] = s->b8_stride;
873 s->block_wrap[4] =
874 s->block_wrap[5] = s->mb_stride;
875
876 y_size = s->b8_stride * (2 * s->mb_height + 1);
877 c_size = s->mb_stride * (s->mb_height + 1);
878 yc_size = y_size + 2 * c_size;
879
880 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
881 fail); // error ressilience code looks cleaner with this
882 for (y = 0; y < s->mb_height; y++)
883 for (x = 0; x < s->mb_width; x++)
884 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
885
886 s->mb_index2xy[s->mb_height * s->mb_width] =
887 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
888
889 if (s->encoding) {
890 /* Allocate MV tables */
891 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
892 mv_table_size * 2 * sizeof(int16_t), fail);
893 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
894 mv_table_size * 2 * sizeof(int16_t), fail);
895 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
896 mv_table_size * 2 * sizeof(int16_t), fail);
897 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
898 mv_table_size * 2 * sizeof(int16_t), fail);
899 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
900 mv_table_size * 2 * sizeof(int16_t), fail);
901 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
902 mv_table_size * 2 * sizeof(int16_t), fail);
903 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
904 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
905 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
906 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
907 s->mb_stride + 1;
908 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
909 s->mb_stride + 1;
910 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
911
912 /* Allocate MB type table */
913 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
914 sizeof(uint16_t), fail); // needed for encoding
915
916 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
917 sizeof(int), fail);
918
919 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
920 mb_array_size * sizeof(float), fail);
921 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
922 mb_array_size * sizeof(float), fail);
923
924 }
925
926 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
927 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
928 /* interlaced direct mode decoding tables */
929 for (i = 0; i < 2; i++) {
930 int j, k;
931 for (j = 0; j < 2; j++) {
932 for (k = 0; k < 2; k++) {
933 FF_ALLOCZ_OR_GOTO(s->avctx,
934 s->b_field_mv_table_base[i][j][k],
935 mv_table_size * 2 * sizeof(int16_t),
936 fail);
937 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
938 s->mb_stride + 1;
939 }
940 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
941 mb_array_size * 2 * sizeof(uint8_t), fail);
942 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
943 mv_table_size * 2 * sizeof(int16_t), fail);
944 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
945 + s->mb_stride + 1;
946 }
947 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
948 mb_array_size * 2 * sizeof(uint8_t), fail);
949 }
950 }
951 if (s->out_format == FMT_H263) {
952 /* cbp values */
953 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
954 s->coded_block = s->coded_block_base + s->b8_stride + 1;
955
956 /* cbp, ac_pred, pred_dir */
957 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
958 mb_array_size * sizeof(uint8_t), fail);
959 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
960 mb_array_size * sizeof(uint8_t), fail);
961 }
962
963 if (s->h263_pred || s->h263_plus || !s->encoding) {
964 /* dc values */
965 // MN: we need these for error resilience of intra-frames
966 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
967 yc_size * sizeof(int16_t), fail);
968 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
969 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
970 s->dc_val[2] = s->dc_val[1] + c_size;
971 for (i = 0; i < yc_size; i++)
972 s->dc_val_base[i] = 1024;
973 }
974
975 /* which mb is a intra block */
976 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
977 memset(s->mbintra_table, 1, mb_array_size);
978
979 /* init macroblock skip table */
980 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
981 // Note the + 1 is for a quicker mpeg4 slice_end detection
982
983 return init_er(s);
984 fail:
985 return AVERROR(ENOMEM);
986 }
987
988 /**
989 * init common structure for both encoder and decoder.
990 * this assumes that some variables like width/height are already set
991 */
992 av_cold int ff_MPV_common_init(MpegEncContext *s)
993 {
994 int i;
995 int nb_slices = (HAVE_THREADS &&
996 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
997 s->avctx->thread_count : 1;
998
999 if (s->encoding && s->avctx->slices)
1000 nb_slices = s->avctx->slices;
1001
1002 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1003 s->mb_height = (s->height + 31) / 32 * 2;
1004 else
1005 s->mb_height = (s->height + 15) / 16;
1006
1007 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1008 av_log(s->avctx, AV_LOG_ERROR,
1009 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1010 return -1;
1011 }
1012
1013 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1014 int max_slices;
1015 if (s->mb_height)
1016 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1017 else
1018 max_slices = MAX_THREADS;
1019 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1020 " reducing to %d\n", nb_slices, max_slices);
1021 nb_slices = max_slices;
1022 }
1023
1024 if ((s->width || s->height) &&
1025 av_image_check_size(s->width, s->height, 0, s->avctx))
1026 return -1;
1027
1028 ff_dct_common_init(s);
1029
1030 s->flags = s->avctx->flags;
1031 s->flags2 = s->avctx->flags2;
1032
1033 /* set chroma shifts */
1034 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1035 &s->chroma_x_shift,
1036 &s->chroma_y_shift);
1037
1038 /* convert fourcc to upper case */
1039 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1040
1041 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1042
1043 if (s->width && s->height) {
1044 s->avctx->coded_frame = &s->current_picture.f;
1045
1046 if (s->encoding) {
1047 if (s->msmpeg4_version) {
1048 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
1049 2 * 2 * (MAX_LEVEL + 1) *
1050 (MAX_RUN + 1) * 2 * sizeof(int), fail);
1051 }
1052 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1053
1054 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
1055 64 * 32 * sizeof(int), fail);
1056 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
1057 64 * 32 * sizeof(int), fail);
1058 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
1059 64 * 32 * 2 * sizeof(uint16_t), fail);
1060 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
1061 64 * 32 * 2 * sizeof(uint16_t), fail);
1062 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
1063 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
1064 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
1065 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
1066
1067 if (s->avctx->noise_reduction) {
1068 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
1069 2 * 64 * sizeof(uint16_t), fail);
1070 }
1071 }
1072 }
1073
1074 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1075 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1076 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1077 avcodec_get_frame_defaults(&s->picture[i].f);
1078 }
1079 memset(&s->next_picture, 0, sizeof(s->next_picture));
1080 memset(&s->last_picture, 0, sizeof(s->last_picture));
1081 memset(&s->current_picture, 0, sizeof(s->current_picture));
1082 avcodec_get_frame_defaults(&s->next_picture.f);
1083 avcodec_get_frame_defaults(&s->last_picture.f);
1084 avcodec_get_frame_defaults(&s->current_picture.f);
1085
1086 if (s->width && s->height) {
1087 if (init_context_frame(s))
1088 goto fail;
1089
1090 s->parse_context.state = -1;
1091 }
1092
1093 s->context_initialized = 1;
1094 s->thread_context[0] = s;
1095
1096 if (s->width && s->height) {
1097 if (nb_slices > 1) {
1098 for (i = 1; i < nb_slices; i++) {
1099 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1100 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1101 }
1102
1103 for (i = 0; i < nb_slices; i++) {
1104 if (init_duplicate_context(s->thread_context[i]) < 0)
1105 goto fail;
1106 s->thread_context[i]->start_mb_y =
1107 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1108 s->thread_context[i]->end_mb_y =
1109 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1110 }
1111 } else {
1112 if (init_duplicate_context(s) < 0)
1113 goto fail;
1114 s->start_mb_y = 0;
1115 s->end_mb_y = s->mb_height;
1116 }
1117 s->slice_context_count = nb_slices;
1118 }
1119
1120 return 0;
1121 fail:
1122 ff_MPV_common_end(s);
1123 return -1;
1124 }
1125
1126 /**
1127 * Frees and resets MpegEncContext fields depending on the resolution.
1128 * Is used during resolution changes to avoid a full reinitialization of the
1129 * codec.
1130 */
1131 static int free_context_frame(MpegEncContext *s)
1132 {
1133 int i, j, k;
1134
1135 av_freep(&s->mb_type);
1136 av_freep(&s->p_mv_table_base);
1137 av_freep(&s->b_forw_mv_table_base);
1138 av_freep(&s->b_back_mv_table_base);
1139 av_freep(&s->b_bidir_forw_mv_table_base);
1140 av_freep(&s->b_bidir_back_mv_table_base);
1141 av_freep(&s->b_direct_mv_table_base);
1142 s->p_mv_table = NULL;
1143 s->b_forw_mv_table = NULL;
1144 s->b_back_mv_table = NULL;
1145 s->b_bidir_forw_mv_table = NULL;
1146 s->b_bidir_back_mv_table = NULL;
1147 s->b_direct_mv_table = NULL;
1148 for (i = 0; i < 2; i++) {
1149 for (j = 0; j < 2; j++) {
1150 for (k = 0; k < 2; k++) {
1151 av_freep(&s->b_field_mv_table_base[i][j][k]);
1152 s->b_field_mv_table[i][j][k] = NULL;
1153 }
1154 av_freep(&s->b_field_select_table[i][j]);
1155 av_freep(&s->p_field_mv_table_base[i][j]);
1156 s->p_field_mv_table[i][j] = NULL;
1157 }
1158 av_freep(&s->p_field_select_table[i]);
1159 }
1160
1161 av_freep(&s->dc_val_base);
1162 av_freep(&s->coded_block_base);
1163 av_freep(&s->mbintra_table);
1164 av_freep(&s->cbp_table);
1165 av_freep(&s->pred_dir_table);
1166
1167 av_freep(&s->mbskip_table);
1168
1169 av_freep(&s->er.error_status_table);
1170 av_freep(&s->er.er_temp_buffer);
1171 av_freep(&s->mb_index2xy);
1172 av_freep(&s->lambda_table);
1173 av_freep(&s->cplx_tab);
1174 av_freep(&s->bits_tab);
1175
1176 s->linesize = s->uvlinesize = 0;
1177
1178 return 0;
1179 }
1180
1181 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1182 {
1183 int i, err = 0;
1184
1185 if (s->slice_context_count > 1) {
1186 for (i = 0; i < s->slice_context_count; i++) {
1187 free_duplicate_context(s->thread_context[i]);
1188 }
1189 for (i = 1; i < s->slice_context_count; i++) {
1190 av_freep(&s->thread_context[i]);
1191 }
1192 } else
1193 free_duplicate_context(s);
1194
1195 if ((err = free_context_frame(s)) < 0)
1196 return err;
1197
1198 if (s->picture)
1199 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1200 s->picture[i].needs_realloc = 1;
1201 }
1202
1203 s->last_picture_ptr =
1204 s->next_picture_ptr =
1205 s->current_picture_ptr = NULL;
1206
1207 // init
1208 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1209 s->mb_height = (s->height + 31) / 32 * 2;
1210 else
1211 s->mb_height = (s->height + 15) / 16;
1212
1213 if ((s->width || s->height) &&
1214 av_image_check_size(s->width, s->height, 0, s->avctx))
1215 return AVERROR_INVALIDDATA;
1216
1217 if ((err = init_context_frame(s)))
1218 goto fail;
1219
1220 s->thread_context[0] = s;
1221
1222 if (s->width && s->height) {
1223 int nb_slices = s->slice_context_count;
1224 if (nb_slices > 1) {
1225 for (i = 1; i < nb_slices; i++) {
1226 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1227 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1228 }
1229
1230 for (i = 0; i < nb_slices; i++) {
1231 if (init_duplicate_context(s->thread_context[i]) < 0)
1232 goto fail;
1233 s->thread_context[i]->start_mb_y =
1234 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1235 s->thread_context[i]->end_mb_y =
1236 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1237 }
1238 } else {
1239 if (init_duplicate_context(s) < 0)
1240 goto fail;
1241 s->start_mb_y = 0;
1242 s->end_mb_y = s->mb_height;
1243 }
1244 s->slice_context_count = nb_slices;
1245 }
1246
1247 return 0;
1248 fail:
1249 ff_MPV_common_end(s);
1250 return err;
1251 }
1252
1253 /* init common structure for both encoder and decoder */
1254 void ff_MPV_common_end(MpegEncContext *s)
1255 {
1256 int i;
1257
1258 if (s->slice_context_count > 1) {
1259 for (i = 0; i < s->slice_context_count; i++) {
1260 free_duplicate_context(s->thread_context[i]);
1261 }
1262 for (i = 1; i < s->slice_context_count; i++) {
1263 av_freep(&s->thread_context[i]);
1264 }
1265 s->slice_context_count = 1;
1266 } else free_duplicate_context(s);
1267
1268 av_freep(&s->parse_context.buffer);
1269 s->parse_context.buffer_size = 0;
1270
1271 av_freep(&s->bitstream_buffer);
1272 s->allocated_bitstream_buffer_size = 0;
1273
1274 av_freep(&s->avctx->stats_out);
1275 av_freep(&s->ac_stats);
1276
1277 av_freep(&s->q_intra_matrix);
1278 av_freep(&s->q_inter_matrix);
1279 av_freep(&s->q_intra_matrix16);
1280 av_freep(&s->q_inter_matrix16);
1281 av_freep(&s->input_picture);
1282 av_freep(&s->reordered_input_picture);
1283 av_freep(&s->dct_offset);
1284
1285 if (s->picture) {
1286 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1287 free_picture_tables(&s->picture[i]);
1288 ff_mpeg_unref_picture(s, &s->picture[i]);
1289 }
1290 }
1291 av_freep(&s->picture);
1292 free_picture_tables(&s->last_picture);
1293 ff_mpeg_unref_picture(s, &s->last_picture);
1294 free_picture_tables(&s->current_picture);
1295 ff_mpeg_unref_picture(s, &s->current_picture);
1296 free_picture_tables(&s->next_picture);
1297 ff_mpeg_unref_picture(s, &s->next_picture);
1298 free_picture_tables(&s->new_picture);
1299 ff_mpeg_unref_picture(s, &s->new_picture);
1300
1301 free_context_frame(s);
1302
1303 s->context_initialized = 0;
1304 s->last_picture_ptr =
1305 s->next_picture_ptr =
1306 s->current_picture_ptr = NULL;
1307 s->linesize = s->uvlinesize = 0;
1308 }
1309
1310 av_cold void ff_init_rl(RLTable *rl,
1311 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1312 {
1313 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1314 uint8_t index_run[MAX_RUN + 1];
1315 int last, run, level, start, end, i;
1316
1317 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1318 if (static_store && rl->max_level[0])
1319 return;
1320
1321 /* compute max_level[], max_run[] and index_run[] */
1322 for (last = 0; last < 2; last++) {
1323 if (last == 0) {
1324 start = 0;
1325 end = rl->last;
1326 } else {
1327 start = rl->last;
1328 end = rl->n;
1329 }
1330
1331 memset(max_level, 0, MAX_RUN + 1);
1332 memset(max_run, 0, MAX_LEVEL + 1);
1333 memset(index_run, rl->n, MAX_RUN + 1);
1334 for (i = start; i < end; i++) {
1335 run = rl->table_run[i];
1336 level = rl->table_level[i];
1337 if (index_run[run] == rl->n)
1338 index_run[run] = i;
1339 if (level > max_level[run])
1340 max_level[run] = level;
1341 if (run > max_run[level])
1342 max_run[level] = run;
1343 }
1344 if (static_store)
1345 rl->max_level[last] = static_store[last];
1346 else
1347 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1348 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1349 if (static_store)
1350 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1351 else
1352 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1353 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1354 if (static_store)
1355 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1356 else
1357 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1358 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1359 }
1360 }
1361
1362 av_cold void ff_init_vlc_rl(RLTable *rl)
1363 {
1364 int i, q;
1365
1366 for (q = 0; q < 32; q++) {
1367 int qmul = q * 2;
1368 int qadd = (q - 1) | 1;
1369
1370 if (q == 0) {
1371 qmul = 1;
1372 qadd = 0;
1373 }
1374 for (i = 0; i < rl->vlc.table_size; i++) {
1375 int code = rl->vlc.table[i][0];
1376 int len = rl->vlc.table[i][1];
1377 int level, run;
1378
1379 if (len == 0) { // illegal code
1380 run = 66;
1381 level = MAX_LEVEL;
1382 } else if (len < 0) { // more bits needed
1383 run = 0;
1384 level = code;
1385 } else {
1386 if (code == rl->n) { // esc
1387 run = 66;
1388 level = 0;
1389 } else {
1390 run = rl->table_run[code] + 1;
1391 level = rl->table_level[code] * qmul + qadd;
1392 if (code >= rl->last) run += 192;
1393 }
1394 }
1395 rl->rl_vlc[q][i].len = len;
1396 rl->rl_vlc[q][i].level = level;
1397 rl->rl_vlc[q][i].run = run;
1398 }
1399 }
1400 }
1401
1402 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1403 {
1404 int i;
1405
1406 /* release non reference frames */
1407 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1408 if (!s->picture[i].reference &&
1409 (remove_current || &s->picture[i] != s->current_picture_ptr)) {
1410 ff_mpeg_unref_picture(s, &s->picture[i]);
1411 }
1412 }
1413 }
1414
1415 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1416 {
1417 if (pic->f.buf[0] == NULL)
1418 return 1;
1419 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1420 return 1;
1421 return 0;
1422 }
1423
1424 static int find_unused_picture(MpegEncContext *s, int shared)
1425 {
1426 int i;
1427
1428 if (shared) {
1429 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1430 if (s->picture[i].f.buf[0] == NULL)
1431 return i;
1432 }
1433 } else {
1434 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1435 if (pic_is_unused(s, &s->picture[i]))
1436 return i;
1437 }
1438 }
1439
1440 return AVERROR_INVALIDDATA;
1441 }
1442
1443 int ff_find_unused_picture(MpegEncContext *s, int shared)
1444 {
1445 int ret = find_unused_picture(s, shared);
1446
1447 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1448 if (s->picture[ret].needs_realloc) {
1449 s->picture[ret].needs_realloc = 0;
1450 free_picture_tables(&s->picture[ret]);
1451 ff_mpeg_unref_picture(s, &s->picture[ret]);
1452 avcodec_get_frame_defaults(&s->picture[ret].f);
1453 }
1454 }
1455 return ret;
1456 }
1457
1458 static void update_noise_reduction(MpegEncContext *s)
1459 {
1460 int intra, i;
1461
1462 for (intra = 0; intra < 2; intra++) {
1463 if (s->dct_count[intra] > (1 << 16)) {
1464 for (i = 0; i < 64; i++) {
1465 s->dct_error_sum[intra][i] >>= 1;
1466 }
1467 s->dct_count[intra] >>= 1;
1468 }
1469
1470 for (i = 0; i < 64; i++) {
1471 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1472 s->dct_count[intra] +
1473 s->dct_error_sum[intra][i] / 2) /
1474 (s->dct_error_sum[intra][i] + 1);
1475 }
1476 }
1477 }
1478
1479 /**
1480 * generic function for encode/decode called after coding/decoding
1481 * the header and before a frame is coded/decoded.
1482 */
1483 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1484 {
1485 int i, ret;
1486 Picture *pic;
1487 s->mb_skipped = 0;
1488
1489 /* mark & release old frames */
1490 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1491 s->last_picture_ptr != s->next_picture_ptr &&
1492 s->last_picture_ptr->f.buf[0]) {
1493 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1494 }
1495
1496 /* release forgotten pictures */
1497 /* if (mpeg124/h263) */
1498 if (!s->encoding) {
1499 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1500 if (&s->picture[i] != s->last_picture_ptr &&
1501 &s->picture[i] != s->next_picture_ptr &&
1502 s->picture[i].reference && !s->picture[i].needs_realloc) {
1503 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1504 av_log(avctx, AV_LOG_ERROR,
1505 "releasing zombie picture\n");
1506 ff_mpeg_unref_picture(s, &s->picture[i]);
1507 }
1508 }
1509 }
1510
1511 ff_mpeg_unref_picture(s, &s->current_picture);
1512
1513 if (!s->encoding) {
1514 ff_release_unused_pictures(s, 1);
1515
1516 if (s->current_picture_ptr &&
1517 s->current_picture_ptr->f.buf[0] == NULL) {
1518 // we already have a unused image
1519 // (maybe it was set before reading the header)
1520 pic = s->current_picture_ptr;
1521 } else {
1522 i = ff_find_unused_picture(s, 0);
1523 if (i < 0) {
1524 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1525 return i;
1526 }
1527 pic = &s->picture[i];
1528 }
1529
1530 pic->reference = 0;
1531 if (!s->droppable) {
1532 if (s->pict_type != AV_PICTURE_TYPE_B)
1533 pic->reference = 3;
1534 }
1535
1536 pic->f.coded_picture_number = s->coded_picture_number++;
1537
1538 if (ff_alloc_picture(s, pic, 0) < 0)
1539 return -1;
1540
1541 s->current_picture_ptr = pic;
1542 // FIXME use only the vars from current_pic
1543 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1544 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1545 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1546 if (s->picture_structure != PICT_FRAME)
1547 s->current_picture_ptr->f.top_field_first =
1548 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1549 }
1550 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1551 !s->progressive_sequence;
1552 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1553 }
1554
1555 s->current_picture_ptr->f.pict_type = s->pict_type;
1556 // if (s->flags && CODEC_FLAG_QSCALE)
1557 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1558 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1559
1560 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1561 s->current_picture_ptr)) < 0)
1562 return ret;
1563
1564 if (s->pict_type != AV_PICTURE_TYPE_B) {
1565 s->last_picture_ptr = s->next_picture_ptr;
1566 if (!s->droppable)
1567 s->next_picture_ptr = s->current_picture_ptr;
1568 }
1569 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1570 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1571 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1572 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1573 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1574 s->pict_type, s->droppable);
1575
1576 if ((s->last_picture_ptr == NULL ||
1577 s->last_picture_ptr->f.buf[0] == NULL) &&
1578 (s->pict_type != AV_PICTURE_TYPE_I ||
1579 s->picture_structure != PICT_FRAME)) {
1580 int h_chroma_shift, v_chroma_shift;
1581 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1582 &h_chroma_shift, &v_chroma_shift);
1583 if (s->pict_type != AV_PICTURE_TYPE_I)
1584 av_log(avctx, AV_LOG_ERROR,
1585 "warning: first frame is no keyframe\n");
1586 else if (s->picture_structure != PICT_FRAME)
1587 av_log(avctx, AV_LOG_INFO,
1588 "allocate dummy last picture for field based first keyframe\n");
1589
1590 /* Allocate a dummy frame */
1591 i = ff_find_unused_picture(s, 0);
1592 if (i < 0) {
1593 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1594 return i;
1595 }
1596 s->last_picture_ptr = &s->picture[i];
1597 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1598 s->last_picture_ptr = NULL;
1599 return -1;
1600 }
1601
1602 memset(s->last_picture_ptr->f.data[0], 0,
1603 avctx->height * s->last_picture_ptr->f.linesize[0]);
1604 memset(s->last_picture_ptr->f.data[1], 0x80,
1605 (avctx->height >> v_chroma_shift) *
1606 s->last_picture_ptr->f.linesize[1]);
1607 memset(s->last_picture_ptr->f.data[2], 0x80,
1608 (avctx->height >> v_chroma_shift) *
1609 s->last_picture_ptr->f.linesize[2]);
1610
1611 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1612 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1613 }
1614 if ((s->next_picture_ptr == NULL ||
1615 s->next_picture_ptr->f.buf[0] == NULL) &&
1616 s->pict_type == AV_PICTURE_TYPE_B) {
1617 /* Allocate a dummy frame */
1618 i = ff_find_unused_picture(s, 0);
1619 if (i < 0) {
1620 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1621 return i;
1622 }
1623 s->next_picture_ptr = &s->picture[i];
1624 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1625 s->next_picture_ptr = NULL;
1626 return -1;
1627 }
1628 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1629 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1630 }
1631
1632 if (s->last_picture_ptr) {
1633 ff_mpeg_unref_picture(s, &s->last_picture);
1634 if (s->last_picture_ptr->f.buf[0] &&
1635 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1636 s->last_picture_ptr)) < 0)
1637 return ret;
1638 }
1639 if (s->next_picture_ptr) {
1640 ff_mpeg_unref_picture(s, &s->next_picture);
1641 if (s->next_picture_ptr->f.buf[0] &&
1642 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1643 s->next_picture_ptr)) < 0)
1644 return ret;
1645 }
1646
1647 if (s->pict_type != AV_PICTURE_TYPE_I &&
1648 !(s->last_picture_ptr && s->last_picture_ptr->f.buf[0])) {
1649 av_log(s, AV_LOG_ERROR,
1650 "Non-reference picture received and no reference available\n");
1651 return AVERROR_INVALIDDATA;
1652 }
1653
1654 if (s->picture_structure!= PICT_FRAME) {
1655 int i;
1656 for (i = 0; i < 4; i++) {
1657 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1658 s->current_picture.f.data[i] +=
1659 s->current_picture.f.linesize[i];
1660 }
1661 s->current_picture.f.linesize[i] *= 2;
1662 s->last_picture.f.linesize[i] *= 2;
1663 s->next_picture.f.linesize[i] *= 2;
1664 }
1665 }
1666
1667 s->err_recognition = avctx->err_recognition;
1668
1669 /* set dequantizer, we can't do it during init as
1670 * it might change for mpeg4 and we can't do it in the header
1671 * decode as init is not called for mpeg4 there yet */
1672 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1673 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1674 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1675 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1676 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1677 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1678 } else {
1679 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1680 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1681 }
1682
1683 if (s->dct_error_sum) {
1684 assert(s->avctx->noise_reduction && s->encoding);
1685 update_noise_reduction(s);
1686 }
1687
1688 #if FF_API_XVMC
1689 FF_DISABLE_DEPRECATION_WARNINGS
1690 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1691 return ff_xvmc_field_start(s, avctx);
1692 FF_ENABLE_DEPRECATION_WARNINGS
1693 #endif /* FF_API_XVMC */
1694
1695 return 0;
1696 }
1697
1698 /* generic function for encode/decode called after a
1699 * frame has been coded/decoded. */
1700 void ff_MPV_frame_end(MpegEncContext *s)
1701 {
1702 int i;
1703
1704 #if FF_API_XVMC
1705 FF_DISABLE_DEPRECATION_WARNINGS
1706 /* redraw edges for the frame if decoding didn't complete */
1707 // just to make sure that all data is rendered.
1708 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1709 ff_xvmc_field_end(s);
1710 } else
1711 FF_ENABLE_DEPRECATION_WARNINGS
1712 #endif /* FF_API_XVMC */
1713 if ((s->er.error_count || s->encoding) &&
1714 !s->avctx->hwaccel &&
1715 s->unrestricted_mv &&
1716 s->current_picture.reference &&
1717 !s->intra_only &&
1718 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1719 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1720 int hshift = desc->log2_chroma_w;
1721 int vshift = desc->log2_chroma_h;
1722 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1723 s->h_edge_pos, s->v_edge_pos,
1724 EDGE_WIDTH, EDGE_WIDTH,
1725 EDGE_TOP | EDGE_BOTTOM);
1726 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1727 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1728 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1729 EDGE_TOP | EDGE_BOTTOM);
1730 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1731 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1732 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1733 EDGE_TOP | EDGE_BOTTOM);
1734 }
1735
1736 emms_c();
1737
1738 s->last_pict_type = s->pict_type;
1739 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1740 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1741 s->last_non_b_pict_type = s->pict_type;
1742 }
1743 #if 0
1744 /* copy back current_picture variables */
1745 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1746 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1747 s->picture[i] = s->current_picture;
1748 break;
1749 }
1750 }
1751 assert(i < MAX_PICTURE_COUNT);
1752 #endif
1753
1754 if (s->encoding) {
1755 /* release non-reference frames */
1756 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1757 if (!s->picture[i].reference)
1758 ff_mpeg_unref_picture(s, &s->picture[i]);
1759 }
1760 }
1761 // clear copies, to avoid confusion
1762 #if 0
1763 memset(&s->last_picture, 0, sizeof(Picture));
1764 memset(&s->next_picture, 0, sizeof(Picture));
1765 memset(&s->current_picture, 0, sizeof(Picture));
1766 #endif
1767 s->avctx->coded_frame = &s->current_picture_ptr->f;
1768
1769 if (s->current_picture.reference)
1770 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1771 }
1772
1773 /**
1774 * Print debugging info for the given picture.
1775 */
1776 void ff_print_debug_info(MpegEncContext *s, Picture *p)
1777 {
1778 AVFrame *pict;
1779 if (s->avctx->hwaccel || !p || !p->mb_type)
1780 return;
1781 pict = &p->f;
1782
1783 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1784 int x,y;
1785
1786 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1787 switch (pict->pict_type) {
1788 case AV_PICTURE_TYPE_I:
1789 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1790 break;
1791 case AV_PICTURE_TYPE_P:
1792 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1793 break;
1794 case AV_PICTURE_TYPE_B:
1795 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1796 break;
1797 case AV_PICTURE_TYPE_S:
1798 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1799 break;
1800 case AV_PICTURE_TYPE_SI:
1801 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1802 break;
1803 case AV_PICTURE_TYPE_SP:
1804 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1805 break;
1806 }
1807 for (y = 0; y < s->mb_height; y++) {
1808 for (x = 0; x < s->mb_width; x++) {
1809 if (s->avctx->debug & FF_DEBUG_SKIP) {
1810 int count = s->mbskip_table[x + y * s->mb_stride];
1811 if (count > 9)
1812 count = 9;
1813 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1814 }
1815 if (s->avctx->debug & FF_DEBUG_QP) {
1816 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1817 p->qscale_table[x + y * s->mb_stride]);
1818 }
1819 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1820 int mb_type = p->mb_type[x + y * s->mb_stride];
1821 // Type & MV direction
1822 if (IS_PCM(mb_type))
1823 av_log(s->avctx, AV_LOG_DEBUG, "P");
1824 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1825 av_log(s->avctx, AV_LOG_DEBUG, "A");
1826 else if (IS_INTRA4x4(mb_type))
1827 av_log(s->avctx, AV_LOG_DEBUG, "i");
1828 else if (IS_INTRA16x16(mb_type))
1829 av_log(s->avctx, AV_LOG_DEBUG, "I");
1830 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1831 av_log(s->avctx, AV_LOG_DEBUG, "d");
1832 else if (IS_DIRECT(mb_type))
1833 av_log(s->avctx, AV_LOG_DEBUG, "D");
1834 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1835 av_log(s->avctx, AV_LOG_DEBUG, "g");
1836 else if (IS_GMC(mb_type))
1837 av_log(s->avctx, AV_LOG_DEBUG, "G");
1838 else if (IS_SKIP(mb_type))
1839 av_log(s->avctx, AV_LOG_DEBUG, "S");
1840 else if (!USES_LIST(mb_type, 1))
1841 av_log(s->avctx, AV_LOG_DEBUG, ">");
1842 else if (!USES_LIST(mb_type, 0))
1843 av_log(s->avctx, AV_LOG_DEBUG, "<");
1844 else {
1845 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1846 av_log(s->avctx, AV_LOG_DEBUG, "X");
1847 }
1848
1849 // segmentation
1850 if (IS_8X8(mb_type))
1851 av_log(s->avctx, AV_LOG_DEBUG, "+");
1852 else if (IS_16X8(mb_type))
1853 av_log(s->avctx, AV_LOG_DEBUG, "-");
1854 else if (IS_8X16(mb_type))
1855 av_log(s->avctx, AV_LOG_DEBUG, "|");
1856 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1857 av_log(s->avctx, AV_LOG_DEBUG, " ");
1858 else
1859 av_log(s->avctx, AV_LOG_DEBUG, "?");
1860
1861
1862 if (IS_INTERLACED(mb_type))
1863 av_log(s->avctx, AV_LOG_DEBUG, "=");
1864 else
1865 av_log(s->avctx, AV_LOG_DEBUG, " ");
1866 }
1867 }
1868 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1869 }
1870 }
1871 }
1872
1873 /**
1874 * find the lowest MB row referenced in the MVs
1875 */
1876 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1877 {
1878 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1879 int my, off, i, mvs;
1880
1881 if (s->picture_structure != PICT_FRAME || s->mcsel)
1882 goto unhandled;
1883
1884 switch (s->mv_type) {
1885 case MV_TYPE_16X16:
1886 mvs = 1;
1887 break;
1888 case MV_TYPE_16X8:
1889 mvs = 2;
1890 break;
1891 case MV_TYPE_8X8:
1892 mvs = 4;
1893 break;
1894 default:
1895 goto unhandled;
1896 }
1897
1898 for (i = 0; i < mvs; i++) {
1899 my = s->mv[dir][i][1]<<qpel_shift;
1900 my_max = FFMAX(my_max, my);
1901 my_min = FFMIN(my_min, my);
1902 }
1903
1904 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1905
1906 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1907 unhandled:
1908 return s->mb_height-1;
1909 }
1910
1911 /* put block[] to dest[] */
1912 static inline void put_dct(MpegEncContext *s,
1913 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1914 {
1915 s->dct_unquantize_intra(s, block, i, qscale);
1916 s->dsp.idct_put (dest, line_size, block);
1917 }
1918
1919 /* add block[] to dest[] */
1920 static inline void add_dct(MpegEncContext *s,
1921 int16_t *block, int i, uint8_t *dest, int line_size)
1922 {
1923 if (s->block_last_index[i] >= 0) {
1924 s->dsp.idct_add (dest, line_size, block);
1925 }
1926 }
1927
1928 static inline void add_dequant_dct(MpegEncContext *s,
1929 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1930 {
1931 if (s->block_last_index[i] >= 0) {
1932 s->dct_unquantize_inter(s, block, i, qscale);
1933
1934 s->dsp.idct_add (dest, line_size, block);
1935 }
1936 }
1937
1938 /**
1939 * Clean dc, ac, coded_block for the current non-intra MB.
1940 */
1941 void ff_clean_intra_table_entries(MpegEncContext *s)
1942 {
1943 int wrap = s->b8_stride;
1944 int xy = s->block_index[0];
1945
1946 s->dc_val[0][xy ] =
1947 s->dc_val[0][xy + 1 ] =
1948 s->dc_val[0][xy + wrap] =
1949 s->dc_val[0][xy + 1 + wrap] = 1024;
1950 /* ac pred */
1951 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1952 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1953 if (s->msmpeg4_version>=3) {
1954 s->coded_block[xy ] =
1955 s->coded_block[xy + 1 ] =
1956 s->coded_block[xy + wrap] =
1957 s->coded_block[xy + 1 + wrap] = 0;
1958 }
1959 /* chroma */
1960 wrap = s->mb_stride;
1961 xy = s->mb_x + s->mb_y * wrap;
1962 s->dc_val[1][xy] =
1963 s->dc_val[2][xy] = 1024;
1964 /* ac pred */
1965 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1966 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1967
1968 s->mbintra_table[xy]= 0;
1969 }
1970
1971 /* generic function called after a macroblock has been parsed by the
1972 decoder or after it has been encoded by the encoder.
1973
1974 Important variables used:
1975 s->mb_intra : true if intra macroblock
1976 s->mv_dir : motion vector direction
1977 s->mv_type : motion vector type
1978 s->mv : motion vector
1979 s->interlaced_dct : true if interlaced dct used (mpeg2)
1980 */
1981 static av_always_inline
1982 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
1983 int is_mpeg12)
1984 {
1985 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1986
1987 #if FF_API_XVMC
1988 FF_DISABLE_DEPRECATION_WARNINGS
1989 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1990 ff_xvmc_decode_mb(s);//xvmc uses pblocks
1991 return;
1992 }
1993 FF_ENABLE_DEPRECATION_WARNINGS
1994 #endif /* FF_API_XVMC */
1995
1996 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1997 /* print DCT coefficients */
1998 int i,j;
1999 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2000 for(i=0; i<6; i++){
2001 for(j=0; j<64; j++){
2002 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2003 }
2004 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2005 }
2006 }
2007
2008 s->current_picture.qscale_table[mb_xy] = s->qscale;
2009
2010 /* update DC predictors for P macroblocks */
2011 if (!s->mb_intra) {
2012 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2013 if(s->mbintra_table[mb_xy])
2014 ff_clean_intra_table_entries(s);
2015 } else {
2016 s->last_dc[0] =
2017 s->last_dc[1] =
2018 s->last_dc[2] = 128 << s->intra_dc_precision;
2019 }
2020 }
2021 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2022 s->mbintra_table[mb_xy]=1;
2023
2024 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2025 uint8_t *dest_y, *dest_cb, *dest_cr;
2026 int dct_linesize, dct_offset;
2027 op_pixels_func (*op_pix)[4];
2028 qpel_mc_func (*op_qpix)[16];
2029 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2030 const int uvlinesize = s->current_picture.f.linesize[1];
2031 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2032 const int block_size = 8;
2033
2034 /* avoid copy if macroblock skipped in last frame too */
2035 /* skip only during decoding as we might trash the buffers during encoding a bit */
2036 if(!s->encoding){
2037 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2038
2039 if (s->mb_skipped) {
2040 s->mb_skipped= 0;
2041 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2042 *mbskip_ptr = 1;
2043 } else if(!s->current_picture.reference) {
2044 *mbskip_ptr = 1;
2045 } else{
2046 *mbskip_ptr = 0; /* not skipped */
2047 }
2048 }
2049
2050 dct_linesize = linesize << s->interlaced_dct;
2051 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2052
2053 if(readable){
2054 dest_y= s->dest[0];
2055 dest_cb= s->dest[1];
2056 dest_cr= s->dest[2];
2057 }else{
2058 dest_y = s->b_scratchpad;
2059 dest_cb= s->b_scratchpad+16*linesize;
2060 dest_cr= s->b_scratchpad+32*linesize;
2061 }
2062
2063 if (!s->mb_intra) {
2064 /* motion handling */
2065 /* decoding or more than one mb_type (MC was already done otherwise) */
2066 if(!s->encoding){
2067
2068 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2069 if (s->mv_dir & MV_DIR_FORWARD) {
2070 ff_thread_await_progress(&s->last_picture_ptr->tf,
2071 ff_MPV_lowest_referenced_row(s, 0),
2072 0);
2073 }
2074 if (s->mv_dir & MV_DIR_BACKWARD) {
2075 ff_thread_await_progress(&s->next_picture_ptr->tf,
2076 ff_MPV_lowest_referenced_row(s, 1),
2077 0);
2078 }
2079 }
2080
2081 op_qpix= s->me.qpel_put;
2082 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2083 op_pix = s->hdsp.put_pixels_tab;
2084 }else{
2085 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2086 }
2087 if (s->mv_dir & MV_DIR_FORWARD) {
2088 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2089 op_pix = s->hdsp.avg_pixels_tab;
2090 op_qpix= s->me.qpel_avg;
2091 }
2092 if (s->mv_dir & MV_DIR_BACKWARD) {
2093 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2094 }
2095 }
2096
2097 /* skip dequant / idct if we are really late ;) */
2098 if(s->avctx->skip_idct){
2099 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2100 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2101 || s->avctx->skip_idct >= AVDISCARD_ALL)
2102 goto skip_idct;
2103 }
2104
2105 /* add dct residue */
2106 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2107 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2108 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2109 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2110 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2111 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2112
2113 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2114 if (s->chroma_y_shift){
2115 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2116 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2117 }else{
2118 dct_linesize >>= 1;
2119 dct_offset >>=1;
2120 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2121 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2122 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2123 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2124 }
2125 }
2126 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2127 add_dct(s, block[0], 0, dest_y , dct_linesize);
2128 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2129 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2130 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2131
2132 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2133 if(s->chroma_y_shift){//Chroma420
2134 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2135 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2136 }else{
2137 //chroma422
2138 dct_linesize = uvlinesize << s->interlaced_dct;
2139 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2140
2141 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2142 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2143 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2144 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2145 if(!s->chroma_x_shift){//Chroma444
2146 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2147 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2148 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2149 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2150 }
2151 }
2152 }//fi gray
2153 }
2154 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2155 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2156 }
2157 } else {
2158 /* dct only in intra block */
2159 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2160 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2161 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2162 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2163 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2164
2165 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2166 if(s->chroma_y_shift){
2167 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2168 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2169 }else{
2170 dct_offset >>=1;
2171 dct_linesize >>=1;
2172 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2173 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2174 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2175 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2176 }
2177 }
2178 }else{
2179 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2180 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2181 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2182 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2183
2184 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2185 if(s->chroma_y_shift){
2186 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2187 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2188 }else{
2189
2190 dct_linesize = uvlinesize << s->interlaced_dct;
2191 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2192
2193 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2194 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2195 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2196 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2197 if(!s->chroma_x_shift){//Chroma444
2198 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2199 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2200 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2201 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2202 }
2203 }
2204 }//gray
2205 }
2206 }
2207 skip_idct:
2208 if(!readable){
2209 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2210 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2211 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2212 }
2213 }
2214 }
2215
2216 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2217 #if !CONFIG_SMALL
2218 if(s->out_format == FMT_MPEG1) {
2219 MPV_decode_mb_internal(s, block, 1);
2220 } else
2221 #endif
2222 MPV_decode_mb_internal(s, block, 0);
2223 }
2224
2225 /**
2226 * @param h is the normal height, this will be reduced automatically if needed for the last row
2227 */
2228 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2229 Picture *last, int y, int h, int picture_structure,
2230 int first_field, int draw_edges, int low_delay,
2231 int v_edge_pos, int h_edge_pos)
2232 {
2233 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2234 int hshift = desc->log2_chroma_w;
2235 int vshift = desc->log2_chroma_h;
2236 const int field_pic = picture_structure != PICT_FRAME;
2237 if(field_pic){
2238 h <<= 1;
2239 y <<= 1;
2240 }
2241
2242 if (!avctx->hwaccel &&
2243 draw_edges &&
2244 cur->reference &&
2245 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2246 int *linesize = cur->f.linesize;
2247 int sides = 0, edge_h;
2248 if (y==0) sides |= EDGE_TOP;
2249 if (y + h >= v_edge_pos)
2250 sides |= EDGE_BOTTOM;
2251
2252 edge_h= FFMIN(h, v_edge_pos - y);
2253
2254 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2255 linesize[0], h_edge_pos, edge_h,
2256 EDGE_WIDTH, EDGE_WIDTH, sides);
2257 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2258 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2259 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2260 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2261 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2262 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2263 }
2264
2265 h = FFMIN(h, avctx->height - y);
2266
2267 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2268
2269 if (avctx->draw_horiz_band) {
2270 AVFrame *src;
2271 int offset[AV_NUM_DATA_POINTERS];
2272 int i;
2273
2274 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2275 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
2276 src = &cur->f;
2277 else if (last)
2278 src = &last->f;
2279 else
2280 return;
2281
2282 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2283 picture_structure == PICT_FRAME &&
2284 avctx->codec_id != AV_CODEC_ID_SVQ3) {
2285 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2286 offset[i] = 0;
2287 }else{
2288 offset[0]= y * src->linesize[0];
2289 offset[1]=
2290 offset[2]= (y >> vshift) * src->linesize[1];
2291 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2292 offset[i] = 0;
2293 }
2294
2295 emms_c();
2296
2297 avctx->draw_horiz_band(avctx, src, offset,
2298 y, picture_structure, h);
2299 }
2300 }
2301
2302 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2303 {
2304 int draw_edges = s->unrestricted_mv && !s->intra_only;
2305 ff_draw_horiz_band(s->avctx, &s->dsp, &s->current_picture,
2306 &s->last_picture, y, h, s->picture_structure,
2307 s->first_field, draw_edges, s->low_delay,
2308 s->v_edge_pos, s->h_edge_pos);
2309 }
2310
2311 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2312 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2313 const int uvlinesize = s->current_picture.f.linesize[1];
2314 const int mb_size= 4;
2315
2316 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2317 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2318 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2319 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2320 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2321 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2322 //block_index is not used by mpeg2, so it is not affected by chroma_format
2323
2324 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2325 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2326 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2327
2328 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2329 {
2330 if(s->picture_structure==PICT_FRAME){
2331 s->dest[0] += s->mb_y * linesize << mb_size;
2332 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2333 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2334 }else{
2335 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2336 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2337 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2338 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2339 }
2340 }
2341 }
2342
2343 /**
2344 * Permute an 8x8 block.
2345 * @param block the block which will be permuted according to the given permutation vector
2346 * @param permutation the permutation vector
2347 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2348 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2349 * (inverse) permutated to scantable order!
2350 */
2351 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2352 {
2353 int i;
2354 int16_t temp[64];
2355
2356 if(last<=0) return;
2357 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2358
2359 for(i=0; i<=last; i++){
2360 const int j= scantable[i];
2361 temp[j]= block[j];
2362 block[j]=0;
2363 }
2364
2365 for(i=0; i<=last; i++){
2366 const int j= scantable[i];
2367 const int perm_j= permutation[j];
2368 block[perm_j]= temp[j];
2369 }
2370 }
2371
2372 void ff_mpeg_flush(AVCodecContext *avctx){
2373 int i;
2374 MpegEncContext *s = avctx->priv_data;
2375
2376 if(s==NULL || s->picture==NULL)
2377 return;
2378
2379 for (i = 0; i < MAX_PICTURE_COUNT; i++)
2380 ff_mpeg_unref_picture(s, &s->picture[i]);
2381 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2382
2383 ff_mpeg_unref_picture(s, &s->current_picture);
2384 ff_mpeg_unref_picture(s, &s->last_picture);
2385 ff_mpeg_unref_picture(s, &s->next_picture);
2386
2387 s->mb_x= s->mb_y= 0;
2388
2389 s->parse_context.state= -1;
2390 s->parse_context.frame_start_found= 0;
2391 s->parse_context.overread= 0;
2392 s->parse_context.overread_index= 0;
2393 s->parse_context.index= 0;
2394 s->parse_context.last_index= 0;
2395 s->bitstream_buffer_size=0;
2396 s->pp_time=0;
2397 }
2398
2399 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2400 int16_t *block, int n, int qscale)
2401 {
2402 int i, level, nCoeffs;
2403 const uint16_t *quant_matrix;
2404
2405 nCoeffs= s->block_last_index[n];
2406
2407 if (n < 4)
2408 block[0] = block[0] * s->y_dc_scale;
2409 else
2410 block[0] = block[0] * s->c_dc_scale;
2411 /* XXX: only mpeg1 */
2412 quant_matrix = s->intra_matrix;
2413 for(i=1;i<=nCoeffs;i++) {
2414 int j= s->intra_scantable.permutated[i];
2415 level = block[j];
2416 if (level) {
2417 if (level < 0) {
2418 level = -level;
2419 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2420 level = (level - 1) | 1;
2421 level = -level;
2422 } else {
2423 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2424 level = (level - 1) | 1;
2425 }
2426 block[j] = level;
2427 }
2428 }
2429 }
2430
2431 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2432 int16_t *block, int n, int qscale)
2433 {
2434 int i, level, nCoeffs;
2435 const uint16_t *quant_matrix;
2436
2437 nCoeffs= s->block_last_index[n];
2438
2439 quant_matrix = s->inter_matrix;
2440 for(i=0; i<=nCoeffs; i++) {
2441 int j= s->intra_scantable.permutated[i];
2442 level = block[j];
2443 if (level) {
2444 if (level < 0) {
2445 level = -level;
2446 level = (((level << 1) + 1) * qscale *
2447 ((int) (quant_matrix[j]))) >> 4;
2448 level = (level - 1) | 1;
2449 level = -level;
2450 } else {
2451 level = (((level << 1) + 1) * qscale *
2452 ((int) (quant_matrix[j]))) >> 4;
2453 level = (level - 1) | 1;
2454 }
2455 block[j] = level;
2456 }
2457 }
2458 }
2459
2460 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2461 int16_t *block, int n, int qscale)
2462 {
2463 int i, level, nCoeffs;
2464 const uint16_t *quant_matrix;
2465
2466 if(s->alternate_scan) nCoeffs= 63;
2467 else nCoeffs= s->block_last_index[n];
2468
2469 if (n < 4)
2470 block[0] = block[0] * s->y_dc_scale;
2471 else
2472 block[0] = block[0] * s->c_dc_scale;
2473 quant_matrix = s->intra_matrix;
2474 for(i=1;i<=nCoeffs;i++) {
2475 int j= s->intra_scantable.permutated[i];
2476 level = block[j];
2477 if (level) {
2478 if (level < 0) {
2479 level = -level;
2480 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2481 level = -level;
2482 } else {
2483 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2484 }
2485 block[j] = level;
2486 }
2487 }
2488 }
2489
2490 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2491 int16_t *block, int n, int qscale)
2492 {
2493 int i, level, nCoeffs;
2494 const uint16_t *quant_matrix;
2495 int sum=-1;
2496
2497 if(s->alternate_scan) nCoeffs= 63;
2498 else nCoeffs= s->block_last_index[n];
2499
2500 if (n < 4)
2501 block[0] = block[0] * s->y_dc_scale;
2502 else
2503 block[0] = block[0] * s->c_dc_scale;
2504 quant_matrix = s->intra_matrix;
2505 for(i=1;i<=nCoeffs;i++) {
2506 int j= s->intra_scantable.permutated[i];
2507 level = block[j];
2508 if (level) {
2509 if (level < 0) {
2510 level = -level;
2511 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2512 level = -level;
2513 } else {
2514 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2515 }
2516 block[j] = level;
2517 sum+=level;
2518 }
2519 }
2520 block[63]^=sum&1;
2521 }
2522
2523 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2524 int16_t *block, int n, int qscale)
2525 {
2526 int i, level, nCoeffs;
2527 const uint16_t *quant_matrix;
2528 int sum=-1;
2529
2530 if(s->alternate_scan) nCoeffs= 63;
2531 else nCoeffs= s->block_last_index[n];
2532
2533 quant_matrix = s->inter_matrix;
2534 for(i=0; i<=nCoeffs; i++) {
2535 int j= s->intra_scantable.permutated[i];
2536 level = block[j];
2537 if (level) {
2538 if (level < 0) {
2539 level = -level;
2540 level = (((level << 1) + 1) * qscale *
2541 ((int) (quant_matrix[j]))) >> 4;
2542 level = -level;
2543 } else {
2544 level = (((level << 1) + 1) * qscale *
2545 ((int) (quant_matrix[j]))) >> 4;
2546 }
2547 block[j] = level;
2548 sum+=level;
2549 }
2550 }
2551 block[63]^=sum&1;
2552 }
2553
2554 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2555 int16_t *block, int n, int qscale)
2556 {
2557 int i, level, qmul, qadd;
2558 int nCoeffs;
2559
2560 assert(s->block_last_index[n]>=0);
2561
2562 qmul = qscale << 1;
2563
2564 if (!s->h263_aic) {
2565 if (n < 4)
2566 block[0] = block[0] * s->y_dc_scale;
2567 else
2568 block[0] = block[0] * s->c_dc_scale;
2569 qadd = (qscale - 1) | 1;
2570 }else{
2571 qadd = 0;
2572 }
2573 if(s->ac_pred)
2574 nCoeffs=63;
2575 else
2576 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2577
2578 for(i=1; i<=nCoeffs; i++) {
2579 level = block[i];
2580 if (level) {
2581 if (level < 0) {
2582 level = level * qmul - qadd;
2583 } else {
2584 level = level * qmul + qadd;
2585 }
2586 block[i] = level;
2587 }
2588 }
2589 }
2590
2591 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2592 int16_t *block, int n, int qscale)
2593 {
2594 int i, level, qmul, qadd;
2595 int nCoeffs;
2596
2597 assert(s->block_last_index[n]>=0);
2598
2599 qadd = (qscale - 1) | 1;
2600 qmul = qscale << 1;
2601
2602 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2603
2604 for(i=0; i<=nCoeffs; i++) {
2605 level = block[i];
2606 if (level) {
2607 if (level < 0) {
2608 level = level * qmul - qadd;
2609 } else {
2610 level = level * qmul + qadd;
2611 }
2612 block[i] = level;
2613 }
2614 }
2615 }
2616
2617 /**
2618 * set qscale and update qscale dependent variables.
2619 */
2620 void ff_set_qscale(MpegEncContext * s, int qscale)
2621 {
2622 if (qscale < 1)
2623 qscale = 1;
2624 else if (qscale > 31)
2625 qscale = 31;
2626
2627 s->qscale = qscale;
2628 s->chroma_qscale= s->chroma_qscale_table[qscale];
2629
2630 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2631 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2632 }
2633
2634 void ff_MPV_report_decode_progress(MpegEncContext *s)
2635 {
2636 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
2637 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
2638 }
2639
2640 #if CONFIG_ERROR_RESILIENCE
2641 void ff_mpeg_er_frame_start(MpegEncContext *s)
2642 {
2643 ERContext *er = &s->er;
2644
2645 er->cur_pic = s->current_picture_ptr;
2646 er->last_pic = s->last_picture_ptr;
2647 er->next_pic = s->next_picture_ptr;
2648
2649 er->pp_time = s->pp_time;
2650 er->pb_time = s->pb_time;
2651 er->quarter_sample = s->quarter_sample;
2652 er->partitioned_frame = s->partitioned_frame;
2653
2654 ff_er_frame_start(er);
2655 }
2656 #endif /* CONFIG_ERROR_RESILIENCE */