c9a425abfadb211d55d22fda52522c1f5d36c460
[libav.git] / libavcodec / mpegvideo.c
1 /*
2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7 *
8 * This file is part of Libav.
9 *
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /**
26 * @file
27 * The simplest mpeg encoder (well, it was the simplest!).
28 */
29
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "avcodec.h"
34 #include "dsputil.h"
35 #include "internal.h"
36 #include "mathops.h"
37 #include "mpegvideo.h"
38 #include "mjpegenc.h"
39 #include "msmpeg4.h"
40 #include "xvmc_internal.h"
41 #include "thread.h"
42 #include <limits.h>
43
44 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
45 int16_t *block, int n, int qscale);
46 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
47 int16_t *block, int n, int qscale);
48 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
49 int16_t *block, int n, int qscale);
50 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
51 int16_t *block, int n, int qscale);
52 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
53 int16_t *block, int n, int qscale);
54 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
55 int16_t *block, int n, int qscale);
56 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
57 int16_t *block, int n, int qscale);
58
59 static const uint8_t ff_default_chroma_qscale_table[32] = {
60 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
61 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
62 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
63 };
64
65 const uint8_t ff_mpeg1_dc_scale_table[128] = {
66 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
67 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
68 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
69 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
70 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
71 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
72 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
73 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75 };
76
77 static const uint8_t mpeg2_dc_scale_table1[128] = {
78 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
79 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
80 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
81 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
82 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
83 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 };
88
89 static const uint8_t mpeg2_dc_scale_table2[128] = {
90 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
91 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
97 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
99 };
100
101 static const uint8_t mpeg2_dc_scale_table3[128] = {
102 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
103 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
104 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
105 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
107 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
108 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
111 };
112
113 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
114 ff_mpeg1_dc_scale_table,
115 mpeg2_dc_scale_table1,
116 mpeg2_dc_scale_table2,
117 mpeg2_dc_scale_table3,
118 };
119
120 const enum AVPixelFormat ff_pixfmt_list_420[] = {
121 AV_PIX_FMT_YUV420P,
122 AV_PIX_FMT_NONE
123 };
124
125 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
126 int (*mv)[2][4][2],
127 int mb_x, int mb_y, int mb_intra, int mb_skipped)
128 {
129 MpegEncContext *s = opaque;
130
131 s->mv_dir = mv_dir;
132 s->mv_type = mv_type;
133 s->mb_intra = mb_intra;
134 s->mb_skipped = mb_skipped;
135 s->mb_x = mb_x;
136 s->mb_y = mb_y;
137 memcpy(s->mv, mv, sizeof(*mv));
138
139 ff_init_block_index(s);
140 ff_update_block_index(s);
141
142 s->dsp.clear_blocks(s->block[0]);
143
144 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
145 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
146 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
147
148 assert(ref == 0);
149 ff_MPV_decode_mb(s, s->block);
150 }
151
152 /* init common dct for both encoder and decoder */
153 av_cold int ff_dct_common_init(MpegEncContext *s)
154 {
155 ff_dsputil_init(&s->dsp, s->avctx);
156 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
157 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
158
159 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
160 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
161 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
162 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
163 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
164 if (s->flags & CODEC_FLAG_BITEXACT)
165 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
166 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
167
168 #if ARCH_X86
169 ff_MPV_common_init_x86(s);
170 #elif ARCH_ALPHA
171 ff_MPV_common_init_axp(s);
172 #elif ARCH_ARM
173 ff_MPV_common_init_arm(s);
174 #elif ARCH_BFIN
175 ff_MPV_common_init_bfin(s);
176 #elif ARCH_PPC
177 ff_MPV_common_init_ppc(s);
178 #endif
179
180 /* load & permutate scantables
181 * note: only wmv uses different ones
182 */
183 if (s->alternate_scan) {
184 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
185 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
186 } else {
187 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
188 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
189 }
190 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
191 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
192
193 return 0;
194 }
195
196 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
197 {
198 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
199
200 // edge emu needs blocksize + filter length - 1
201 // (= 17x17 for halfpel / 21x21 for h264)
202 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
203 // at uvlinesize. It supports only YUV420 so 24x24 is enough
204 // linesize * interlaced * MBsize
205 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
206 fail);
207
208 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 3,
209 fail)
210 s->me.temp = s->me.scratchpad;
211 s->rd_scratchpad = s->me.scratchpad;
212 s->b_scratchpad = s->me.scratchpad;
213 s->obmc_scratchpad = s->me.scratchpad + 16;
214
215 return 0;
216 fail:
217 av_freep(&s->edge_emu_buffer);
218 return AVERROR(ENOMEM);
219 }
220
221 /**
222 * Allocate a frame buffer
223 */
224 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
225 {
226 int r, ret;
227
228 pic->tf.f = &pic->f;
229 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
230 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
231 s->codec_id != AV_CODEC_ID_MSS2)
232 r = ff_thread_get_buffer(s->avctx, &pic->tf,
233 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
234 else {
235 pic->f.width = s->avctx->width;
236 pic->f.height = s->avctx->height;
237 pic->f.format = s->avctx->pix_fmt;
238 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
239 }
240
241 if (r < 0 || !pic->f.data[0]) {
242 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
243 r, pic->f.data[0]);
244 return -1;
245 }
246
247 if (s->avctx->hwaccel) {
248 assert(!pic->hwaccel_picture_private);
249 if (s->avctx->hwaccel->priv_data_size) {
250 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
251 if (!pic->hwaccel_priv_buf) {
252 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
253 return -1;
254 }
255 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
256 }
257 }
258
259 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
260 s->uvlinesize != pic->f.linesize[1])) {
261 av_log(s->avctx, AV_LOG_ERROR,
262 "get_buffer() failed (stride changed)\n");
263 ff_mpeg_unref_picture(s, pic);
264 return -1;
265 }
266
267 if (pic->f.linesize[1] != pic->f.linesize[2]) {
268 av_log(s->avctx, AV_LOG_ERROR,
269 "get_buffer() failed (uv stride mismatch)\n");
270 ff_mpeg_unref_picture(s, pic);
271 return -1;
272 }
273
274 if (!s->edge_emu_buffer &&
275 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
276 av_log(s->avctx, AV_LOG_ERROR,
277 "get_buffer() failed to allocate context scratch buffers.\n");
278 ff_mpeg_unref_picture(s, pic);
279 return ret;
280 }
281
282 return 0;
283 }
284
285 static void free_picture_tables(Picture *pic)
286 {
287 int i;
288
289 av_buffer_unref(&pic->mb_var_buf);
290 av_buffer_unref(&pic->mc_mb_var_buf);
291 av_buffer_unref(&pic->mb_mean_buf);
292 av_buffer_unref(&pic->mbskip_table_buf);
293 av_buffer_unref(&pic->qscale_table_buf);
294 av_buffer_unref(&pic->mb_type_buf);
295
296 for (i = 0; i < 2; i++) {
297 av_buffer_unref(&pic->motion_val_buf[i]);
298 av_buffer_unref(&pic->ref_index_buf[i]);
299 }
300 }
301
302 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
303 {
304 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
305 const int mb_array_size = s->mb_stride * s->mb_height;
306 const int b8_array_size = s->b8_stride * s->mb_height * 2;
307 int i;
308
309
310 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
311 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
312 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
313 sizeof(uint32_t));
314 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
315 return AVERROR(ENOMEM);
316
317 if (s->encoding) {
318 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
319 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
320 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
321 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
322 return AVERROR(ENOMEM);
323 }
324
325 if (s->out_format == FMT_H263 || s->encoding ||
326 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
327 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
328 int ref_index_size = 4 * mb_array_size;
329
330 for (i = 0; mv_size && i < 2; i++) {
331 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
332 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
333 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
334 return AVERROR(ENOMEM);
335 }
336 }
337
338 return 0;
339 }
340
341 static int make_tables_writable(Picture *pic)
342 {
343 int ret, i;
344 #define MAKE_WRITABLE(table) \
345 do {\
346 if (pic->table &&\
347 (ret = av_buffer_make_writable(&pic->table)) < 0)\
348 return ret;\
349 } while (0)
350
351 MAKE_WRITABLE(mb_var_buf);
352 MAKE_WRITABLE(mc_mb_var_buf);
353 MAKE_WRITABLE(mb_mean_buf);
354 MAKE_WRITABLE(mbskip_table_buf);
355 MAKE_WRITABLE(qscale_table_buf);
356 MAKE_WRITABLE(mb_type_buf);
357
358 for (i = 0; i < 2; i++) {
359 MAKE_WRITABLE(motion_val_buf[i]);
360 MAKE_WRITABLE(ref_index_buf[i]);
361 }
362
363 return 0;
364 }
365
366 /**
367 * Allocate a Picture.
368 * The pixels are allocated/set by calling get_buffer() if shared = 0
369 */
370 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
371 {
372 int i, ret;
373
374 if (shared) {
375 assert(pic->f.data[0]);
376 pic->shared = 1;
377 } else {
378 assert(!pic->f.data[0]);
379
380 if (alloc_frame_buffer(s, pic) < 0)
381 return -1;
382
383 s->linesize = pic->f.linesize[0];
384 s->uvlinesize = pic->f.linesize[1];
385 }
386
387 if (!pic->qscale_table_buf)
388 ret = alloc_picture_tables(s, pic);
389 else
390 ret = make_tables_writable(pic);
391 if (ret < 0)
392 goto fail;
393
394 if (s->encoding) {
395 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
396 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
397 pic->mb_mean = pic->mb_mean_buf->data;
398 }
399
400 pic->mbskip_table = pic->mbskip_table_buf->data;
401 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
402 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
403
404 if (pic->motion_val_buf[0]) {
405 for (i = 0; i < 2; i++) {
406 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
407 pic->ref_index[i] = pic->ref_index_buf[i]->data;
408 }
409 }
410
411 return 0;
412 fail:
413 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
414 ff_mpeg_unref_picture(s, pic);
415 free_picture_tables(pic);
416 return AVERROR(ENOMEM);
417 }
418
419 /**
420 * Deallocate a picture.
421 */
422 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
423 {
424 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
425
426 pic->tf.f = &pic->f;
427 /* WM Image / Screen codecs allocate internal buffers with different
428 * dimensions / colorspaces; ignore user-defined callbacks for these. */
429 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
430 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
431 s->codec_id != AV_CODEC_ID_MSS2)
432 ff_thread_release_buffer(s->avctx, &pic->tf);
433 else
434 av_frame_unref(&pic->f);
435
436 av_buffer_unref(&pic->hwaccel_priv_buf);
437
438 if (pic->needs_realloc)
439 free_picture_tables(pic);
440
441 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
442 }
443
444 static int update_picture_tables(Picture *dst, Picture *src)
445 {
446 int i;
447
448 #define UPDATE_TABLE(table)\
449 do {\
450 if (src->table &&\
451 (!dst->table || dst->table->buffer != src->table->buffer)) {\
452 av_buffer_unref(&dst->table);\
453 dst->table = av_buffer_ref(src->table);\
454 if (!dst->table) {\
455 free_picture_tables(dst);\
456 return AVERROR(ENOMEM);\
457 }\
458 }\
459 } while (0)
460
461 UPDATE_TABLE(mb_var_buf);
462 UPDATE_TABLE(mc_mb_var_buf);
463 UPDATE_TABLE(mb_mean_buf);
464 UPDATE_TABLE(mbskip_table_buf);
465 UPDATE_TABLE(qscale_table_buf);
466 UPDATE_TABLE(mb_type_buf);
467 for (i = 0; i < 2; i++) {
468 UPDATE_TABLE(motion_val_buf[i]);
469 UPDATE_TABLE(ref_index_buf[i]);
470 }
471
472 dst->mb_var = src->mb_var;
473 dst->mc_mb_var = src->mc_mb_var;
474 dst->mb_mean = src->mb_mean;
475 dst->mbskip_table = src->mbskip_table;
476 dst->qscale_table = src->qscale_table;
477 dst->mb_type = src->mb_type;
478 for (i = 0; i < 2; i++) {
479 dst->motion_val[i] = src->motion_val[i];
480 dst->ref_index[i] = src->ref_index[i];
481 }
482
483 return 0;
484 }
485
486 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
487 {
488 int ret;
489
490 av_assert0(!dst->f.buf[0]);
491 av_assert0(src->f.buf[0]);
492
493 src->tf.f = &src->f;
494 dst->tf.f = &dst->f;
495 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
496 if (ret < 0)
497 goto fail;
498
499 ret = update_picture_tables(dst, src);
500 if (ret < 0)
501 goto fail;
502
503 if (src->hwaccel_picture_private) {
504 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
505 if (!dst->hwaccel_priv_buf)
506 goto fail;
507 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
508 }
509
510 dst->field_picture = src->field_picture;
511 dst->mb_var_sum = src->mb_var_sum;
512 dst->mc_mb_var_sum = src->mc_mb_var_sum;
513 dst->b_frame_score = src->b_frame_score;
514 dst->needs_realloc = src->needs_realloc;
515 dst->reference = src->reference;
516 dst->shared = src->shared;
517
518 return 0;
519 fail:
520 ff_mpeg_unref_picture(s, dst);
521 return ret;
522 }
523
524 static int init_duplicate_context(MpegEncContext *s)
525 {
526 int y_size = s->b8_stride * (2 * s->mb_height + 1);
527 int c_size = s->mb_stride * (s->mb_height + 1);
528 int yc_size = y_size + 2 * c_size;
529 int i;
530
531 s->edge_emu_buffer =
532 s->me.scratchpad =
533 s->me.temp =
534 s->rd_scratchpad =
535 s->b_scratchpad =
536 s->obmc_scratchpad = NULL;
537
538 if (s->encoding) {
539 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
540 ME_MAP_SIZE * sizeof(uint32_t), fail)
541 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
542 ME_MAP_SIZE * sizeof(uint32_t), fail)
543 if (s->avctx->noise_reduction) {
544 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
545 2 * 64 * sizeof(int), fail)
546 }
547 }
548 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
549 s->block = s->blocks[0];
550
551 for (i = 0; i < 12; i++) {
552 s->pblocks[i] = &s->block[i];
553 }
554
555 if (s->out_format == FMT_H263) {
556 /* ac values */
557 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
558 yc_size * sizeof(int16_t) * 16, fail);
559 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
560 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
561 s->ac_val[2] = s->ac_val[1] + c_size;
562 }
563
564 return 0;
565 fail:
566 return -1; // free() through ff_MPV_common_end()
567 }
568
569 static void free_duplicate_context(MpegEncContext *s)
570 {
571 if (s == NULL)
572 return;
573
574 av_freep(&s->edge_emu_buffer);
575 av_freep(&s->me.scratchpad);
576 s->me.temp =
577 s->rd_scratchpad =
578 s->b_scratchpad =
579 s->obmc_scratchpad = NULL;
580
581 av_freep(&s->dct_error_sum);
582 av_freep(&s->me.map);
583 av_freep(&s->me.score_map);
584 av_freep(&s->blocks);
585 av_freep(&s->ac_val_base);
586 s->block = NULL;
587 }
588
589 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
590 {
591 #define COPY(a) bak->a = src->a
592 COPY(edge_emu_buffer);
593 COPY(me.scratchpad);
594 COPY(me.temp);
595 COPY(rd_scratchpad);
596 COPY(b_scratchpad);
597 COPY(obmc_scratchpad);
598 COPY(me.map);
599 COPY(me.score_map);
600 COPY(blocks);
601 COPY(block);
602 COPY(start_mb_y);
603 COPY(end_mb_y);
604 COPY(me.map_generation);
605 COPY(pb);
606 COPY(dct_error_sum);
607 COPY(dct_count[0]);
608 COPY(dct_count[1]);
609 COPY(ac_val_base);
610 COPY(ac_val[0]);
611 COPY(ac_val[1]);
612 COPY(ac_val[2]);
613 #undef COPY
614 }
615
616 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
617 {
618 MpegEncContext bak;
619 int i, ret;
620 // FIXME copy only needed parts
621 // START_TIMER
622 backup_duplicate_context(&bak, dst);
623 memcpy(dst, src, sizeof(MpegEncContext));
624 backup_duplicate_context(dst, &bak);
625 for (i = 0; i < 12; i++) {
626 dst->pblocks[i] = &dst->block[i];
627 }
628 if (!dst->edge_emu_buffer &&
629 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
630 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
631 "scratch buffers.\n");
632 return ret;
633 }
634 // STOP_TIMER("update_duplicate_context")
635 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
636 return 0;
637 }
638
639 int ff_mpeg_update_thread_context(AVCodecContext *dst,
640 const AVCodecContext *src)
641 {
642 int i, ret;
643 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
644
645 if (dst == src || !s1->context_initialized)
646 return 0;
647
648 // FIXME can parameters change on I-frames?
649 // in that case dst may need a reinit
650 if (!s->context_initialized) {
651 memcpy(s, s1, sizeof(MpegEncContext));
652
653 s->avctx = dst;
654 s->bitstream_buffer = NULL;
655 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
656
657 ff_MPV_common_init(s);
658 }
659
660 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
661 int err;
662 s->context_reinit = 0;
663 s->height = s1->height;
664 s->width = s1->width;
665 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
666 return err;
667 }
668
669 s->avctx->coded_height = s1->avctx->coded_height;
670 s->avctx->coded_width = s1->avctx->coded_width;
671 s->avctx->width = s1->avctx->width;
672 s->avctx->height = s1->avctx->height;
673
674 s->coded_picture_number = s1->coded_picture_number;
675 s->picture_number = s1->picture_number;
676 s->input_picture_number = s1->input_picture_number;
677
678 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
679 ff_mpeg_unref_picture(s, &s->picture[i]);
680 if (s1->picture[i].f.data[0] &&
681 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
682 return ret;
683 }
684
685 #define UPDATE_PICTURE(pic)\
686 do {\
687 ff_mpeg_unref_picture(s, &s->pic);\
688 if (s1->pic.f.data[0])\
689 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
690 else\
691 ret = update_picture_tables(&s->pic, &s1->pic);\
692 if (ret < 0)\
693 return ret;\
694 } while (0)
695
696 UPDATE_PICTURE(current_picture);
697 UPDATE_PICTURE(last_picture);
698 UPDATE_PICTURE(next_picture);
699
700 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
701 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
702 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
703
704 // Error/bug resilience
705 s->next_p_frame_damaged = s1->next_p_frame_damaged;
706 s->workaround_bugs = s1->workaround_bugs;
707
708 // MPEG4 timing info
709 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
710 (char *) &s1->shape - (char *) &s1->time_increment_bits);
711
712 // B-frame info
713 s->max_b_frames = s1->max_b_frames;
714 s->low_delay = s1->low_delay;
715 s->droppable = s1->droppable;
716
717 // DivX handling (doesn't work)
718 s->divx_packed = s1->divx_packed;
719
720 if (s1->bitstream_buffer) {
721 if (s1->bitstream_buffer_size +
722 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
723 av_fast_malloc(&s->bitstream_buffer,
724 &s->allocated_bitstream_buffer_size,
725 s1->allocated_bitstream_buffer_size);
726 s->bitstream_buffer_size = s1->bitstream_buffer_size;
727 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
728 s1->bitstream_buffer_size);
729 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
730 FF_INPUT_BUFFER_PADDING_SIZE);
731 }
732
733 // linesize dependend scratch buffer allocation
734 if (!s->edge_emu_buffer)
735 if (s1->linesize) {
736 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
737 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
738 "scratch buffers.\n");
739 return AVERROR(ENOMEM);
740 }
741 } else {
742 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
743 "be allocated due to unknown size.\n");
744 return AVERROR_BUG;
745 }
746
747 // MPEG2/interlacing info
748 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
749 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
750
751 if (!s1->first_field) {
752 s->last_pict_type = s1->pict_type;
753 if (s1->current_picture_ptr)
754 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
755
756 if (s1->pict_type != AV_PICTURE_TYPE_B) {
757 s->last_non_b_pict_type = s1->pict_type;
758 }
759 }
760
761 return 0;
762 }
763
764 /**
765 * Set the given MpegEncContext to common defaults
766 * (same for encoding and decoding).
767 * The changed fields will not depend upon the
768 * prior state of the MpegEncContext.
769 */
770 void ff_MPV_common_defaults(MpegEncContext *s)
771 {
772 s->y_dc_scale_table =
773 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
774 s->chroma_qscale_table = ff_default_chroma_qscale_table;
775 s->progressive_frame = 1;
776 s->progressive_sequence = 1;
777 s->picture_structure = PICT_FRAME;
778
779 s->coded_picture_number = 0;
780 s->picture_number = 0;
781 s->input_picture_number = 0;
782
783 s->picture_in_gop_number = 0;
784
785 s->f_code = 1;
786 s->b_code = 1;
787
788 s->slice_context_count = 1;
789 }
790
791 /**
792 * Set the given MpegEncContext to defaults for decoding.
793 * the changed fields will not depend upon
794 * the prior state of the MpegEncContext.
795 */
796 void ff_MPV_decode_defaults(MpegEncContext *s)
797 {
798 ff_MPV_common_defaults(s);
799 }
800
801 static int init_er(MpegEncContext *s)
802 {
803 ERContext *er = &s->er;
804 int mb_array_size = s->mb_height * s->mb_stride;
805 int i;
806
807 er->avctx = s->avctx;
808 er->dsp = &s->dsp;
809
810 er->mb_index2xy = s->mb_index2xy;
811 er->mb_num = s->mb_num;
812 er->mb_width = s->mb_width;
813 er->mb_height = s->mb_height;
814 er->mb_stride = s->mb_stride;
815 er->b8_stride = s->b8_stride;
816
817 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
818 er->error_status_table = av_mallocz(mb_array_size);
819 if (!er->er_temp_buffer || !er->error_status_table)
820 goto fail;
821
822 er->mbskip_table = s->mbskip_table;
823 er->mbintra_table = s->mbintra_table;
824
825 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
826 er->dc_val[i] = s->dc_val[i];
827
828 er->decode_mb = mpeg_er_decode_mb;
829 er->opaque = s;
830
831 return 0;
832 fail:
833 av_freep(&er->er_temp_buffer);
834 av_freep(&er->error_status_table);
835 return AVERROR(ENOMEM);
836 }
837
838 /**
839 * Initialize and allocates MpegEncContext fields dependent on the resolution.
840 */
841 static int init_context_frame(MpegEncContext *s)
842 {
843 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
844
845 s->mb_width = (s->width + 15) / 16;
846 s->mb_stride = s->mb_width + 1;
847 s->b8_stride = s->mb_width * 2 + 1;
848 s->b4_stride = s->mb_width * 4 + 1;
849 mb_array_size = s->mb_height * s->mb_stride;
850 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
851
852 /* set default edge pos, will be overriden
853 * in decode_header if needed */
854 s->h_edge_pos = s->mb_width * 16;
855 s->v_edge_pos = s->mb_height * 16;
856
857 s->mb_num = s->mb_width * s->mb_height;
858
859 s->block_wrap[0] =
860 s->block_wrap[1] =
861 s->block_wrap[2] =
862 s->block_wrap[3] = s->b8_stride;
863 s->block_wrap[4] =
864 s->block_wrap[5] = s->mb_stride;
865
866 y_size = s->b8_stride * (2 * s->mb_height + 1);
867 c_size = s->mb_stride * (s->mb_height + 1);
868 yc_size = y_size + 2 * c_size;
869
870 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
871 fail); // error ressilience code looks cleaner with this
872 for (y = 0; y < s->mb_height; y++)
873 for (x = 0; x < s->mb_width; x++)
874 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
875
876 s->mb_index2xy[s->mb_height * s->mb_width] =
877 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
878
879 if (s->encoding) {
880 /* Allocate MV tables */
881 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
882 mv_table_size * 2 * sizeof(int16_t), fail);
883 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
884 mv_table_size * 2 * sizeof(int16_t), fail);
885 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
886 mv_table_size * 2 * sizeof(int16_t), fail);
887 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
888 mv_table_size * 2 * sizeof(int16_t), fail);
889 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
890 mv_table_size * 2 * sizeof(int16_t), fail);
891 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
892 mv_table_size * 2 * sizeof(int16_t), fail);
893 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
894 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
895 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
896 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
897 s->mb_stride + 1;
898 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
899 s->mb_stride + 1;
900 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
901
902 /* Allocate MB type table */
903 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
904 sizeof(uint16_t), fail); // needed for encoding
905
906 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
907 sizeof(int), fail);
908
909 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
910 mb_array_size * sizeof(float), fail);
911 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
912 mb_array_size * sizeof(float), fail);
913
914 }
915
916 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
917 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
918 /* interlaced direct mode decoding tables */
919 for (i = 0; i < 2; i++) {
920 int j, k;
921 for (j = 0; j < 2; j++) {
922 for (k = 0; k < 2; k++) {
923 FF_ALLOCZ_OR_GOTO(s->avctx,
924 s->b_field_mv_table_base[i][j][k],
925 mv_table_size * 2 * sizeof(int16_t),
926 fail);
927 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
928 s->mb_stride + 1;
929 }
930 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
931 mb_array_size * 2 * sizeof(uint8_t), fail);
932 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
933 mv_table_size * 2 * sizeof(int16_t), fail);
934 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
935 + s->mb_stride + 1;
936 }
937 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
938 mb_array_size * 2 * sizeof(uint8_t), fail);
939 }
940 }
941 if (s->out_format == FMT_H263) {
942 /* cbp values */
943 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
944 s->coded_block = s->coded_block_base + s->b8_stride + 1;
945
946 /* cbp, ac_pred, pred_dir */
947 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
948 mb_array_size * sizeof(uint8_t), fail);
949 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
950 mb_array_size * sizeof(uint8_t), fail);
951 }
952
953 if (s->h263_pred || s->h263_plus || !s->encoding) {
954 /* dc values */
955 // MN: we need these for error resilience of intra-frames
956 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
957 yc_size * sizeof(int16_t), fail);
958 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
959 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
960 s->dc_val[2] = s->dc_val[1] + c_size;
961 for (i = 0; i < yc_size; i++)
962 s->dc_val_base[i] = 1024;
963 }
964
965 /* which mb is a intra block */
966 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
967 memset(s->mbintra_table, 1, mb_array_size);
968
969 /* init macroblock skip table */
970 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
971 // Note the + 1 is for a quicker mpeg4 slice_end detection
972
973 return init_er(s);
974 fail:
975 return AVERROR(ENOMEM);
976 }
977
978 /**
979 * init common structure for both encoder and decoder.
980 * this assumes that some variables like width/height are already set
981 */
982 av_cold int ff_MPV_common_init(MpegEncContext *s)
983 {
984 int i;
985 int nb_slices = (HAVE_THREADS &&
986 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
987 s->avctx->thread_count : 1;
988
989 if (s->encoding && s->avctx->slices)
990 nb_slices = s->avctx->slices;
991
992 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
993 s->mb_height = (s->height + 31) / 32 * 2;
994 else
995 s->mb_height = (s->height + 15) / 16;
996
997 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
998 av_log(s->avctx, AV_LOG_ERROR,
999 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1000 return -1;
1001 }
1002
1003 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1004 int max_slices;
1005 if (s->mb_height)
1006 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1007 else
1008 max_slices = MAX_THREADS;
1009 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1010 " reducing to %d\n", nb_slices, max_slices);
1011 nb_slices = max_slices;
1012 }
1013
1014 if ((s->width || s->height) &&
1015 av_image_check_size(s->width, s->height, 0, s->avctx))
1016 return -1;
1017
1018 ff_dct_common_init(s);
1019
1020 s->flags = s->avctx->flags;
1021 s->flags2 = s->avctx->flags2;
1022
1023 if (s->width && s->height) {
1024 /* set chroma shifts */
1025 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1026 &s->chroma_x_shift,
1027 &s->chroma_y_shift);
1028
1029 /* convert fourcc to upper case */
1030 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1031
1032 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1033
1034 s->avctx->coded_frame = &s->current_picture.f;
1035
1036 if (s->encoding) {
1037 if (s->msmpeg4_version) {
1038 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
1039 2 * 2 * (MAX_LEVEL + 1) *
1040 (MAX_RUN + 1) * 2 * sizeof(int), fail);
1041 }
1042 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1043
1044 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
1045 64 * 32 * sizeof(int), fail);
1046 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
1047 64 * 32 * sizeof(int), fail);
1048 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
1049 64 * 32 * 2 * sizeof(uint16_t), fail);
1050 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
1051 64 * 32 * 2 * sizeof(uint16_t), fail);
1052 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
1053 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
1054 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
1055 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
1056
1057 if (s->avctx->noise_reduction) {
1058 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
1059 2 * 64 * sizeof(uint16_t), fail);
1060 }
1061 }
1062 }
1063
1064 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1065 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1066 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1067 avcodec_get_frame_defaults(&s->picture[i].f);
1068 }
1069 memset(&s->next_picture, 0, sizeof(s->next_picture));
1070 memset(&s->last_picture, 0, sizeof(s->last_picture));
1071 memset(&s->current_picture, 0, sizeof(s->current_picture));
1072 avcodec_get_frame_defaults(&s->next_picture.f);
1073 avcodec_get_frame_defaults(&s->last_picture.f);
1074 avcodec_get_frame_defaults(&s->current_picture.f);
1075
1076 if (s->width && s->height) {
1077 if (init_context_frame(s))
1078 goto fail;
1079
1080 s->parse_context.state = -1;
1081 }
1082
1083 s->context_initialized = 1;
1084 s->thread_context[0] = s;
1085
1086 if (s->width && s->height) {
1087 if (nb_slices > 1) {
1088 for (i = 1; i < nb_slices; i++) {
1089 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1090 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1091 }
1092
1093 for (i = 0; i < nb_slices; i++) {
1094 if (init_duplicate_context(s->thread_context[i]) < 0)
1095 goto fail;
1096 s->thread_context[i]->start_mb_y =
1097 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1098 s->thread_context[i]->end_mb_y =
1099 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1100 }
1101 } else {
1102 if (init_duplicate_context(s) < 0)
1103 goto fail;
1104 s->start_mb_y = 0;
1105 s->end_mb_y = s->mb_height;
1106 }
1107 s->slice_context_count = nb_slices;
1108 }
1109
1110 return 0;
1111 fail:
1112 ff_MPV_common_end(s);
1113 return -1;
1114 }
1115
1116 /**
1117 * Frees and resets MpegEncContext fields depending on the resolution.
1118 * Is used during resolution changes to avoid a full reinitialization of the
1119 * codec.
1120 */
1121 static int free_context_frame(MpegEncContext *s)
1122 {
1123 int i, j, k;
1124
1125 av_freep(&s->mb_type);
1126 av_freep(&s->p_mv_table_base);
1127 av_freep(&s->b_forw_mv_table_base);
1128 av_freep(&s->b_back_mv_table_base);
1129 av_freep(&s->b_bidir_forw_mv_table_base);
1130 av_freep(&s->b_bidir_back_mv_table_base);
1131 av_freep(&s->b_direct_mv_table_base);
1132 s->p_mv_table = NULL;
1133 s->b_forw_mv_table = NULL;
1134 s->b_back_mv_table = NULL;
1135 s->b_bidir_forw_mv_table = NULL;
1136 s->b_bidir_back_mv_table = NULL;
1137 s->b_direct_mv_table = NULL;
1138 for (i = 0; i < 2; i++) {
1139 for (j = 0; j < 2; j++) {
1140 for (k = 0; k < 2; k++) {
1141 av_freep(&s->b_field_mv_table_base[i][j][k]);
1142 s->b_field_mv_table[i][j][k] = NULL;
1143 }
1144 av_freep(&s->b_field_select_table[i][j]);
1145 av_freep(&s->p_field_mv_table_base[i][j]);
1146 s->p_field_mv_table[i][j] = NULL;
1147 }
1148 av_freep(&s->p_field_select_table[i]);
1149 }
1150
1151 av_freep(&s->dc_val_base);
1152 av_freep(&s->coded_block_base);
1153 av_freep(&s->mbintra_table);
1154 av_freep(&s->cbp_table);
1155 av_freep(&s->pred_dir_table);
1156
1157 av_freep(&s->mbskip_table);
1158
1159 av_freep(&s->er.error_status_table);
1160 av_freep(&s->er.er_temp_buffer);
1161 av_freep(&s->mb_index2xy);
1162 av_freep(&s->lambda_table);
1163 av_freep(&s->cplx_tab);
1164 av_freep(&s->bits_tab);
1165
1166 s->linesize = s->uvlinesize = 0;
1167
1168 return 0;
1169 }
1170
1171 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1172 {
1173 int i, err = 0;
1174
1175 if (s->slice_context_count > 1) {
1176 for (i = 0; i < s->slice_context_count; i++) {
1177 free_duplicate_context(s->thread_context[i]);
1178 }
1179 for (i = 1; i < s->slice_context_count; i++) {
1180 av_freep(&s->thread_context[i]);
1181 }
1182 } else
1183 free_duplicate_context(s);
1184
1185 if ((err = free_context_frame(s)) < 0)
1186 return err;
1187
1188 if (s->picture)
1189 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1190 s->picture[i].needs_realloc = 1;
1191 }
1192
1193 s->last_picture_ptr =
1194 s->next_picture_ptr =
1195 s->current_picture_ptr = NULL;
1196
1197 // init
1198 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1199 s->mb_height = (s->height + 31) / 32 * 2;
1200 else
1201 s->mb_height = (s->height + 15) / 16;
1202
1203 if ((s->width || s->height) &&
1204 av_image_check_size(s->width, s->height, 0, s->avctx))
1205 return AVERROR_INVALIDDATA;
1206
1207 if ((err = init_context_frame(s)))
1208 goto fail;
1209
1210 s->thread_context[0] = s;
1211
1212 if (s->width && s->height) {
1213 int nb_slices = s->slice_context_count;
1214 if (nb_slices > 1) {
1215 for (i = 1; i < nb_slices; i++) {
1216 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1217 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1218 }
1219
1220 for (i = 0; i < nb_slices; i++) {
1221 if (init_duplicate_context(s->thread_context[i]) < 0)
1222 goto fail;
1223 s->thread_context[i]->start_mb_y =
1224 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1225 s->thread_context[i]->end_mb_y =
1226 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1227 }
1228 } else {
1229 if (init_duplicate_context(s) < 0)
1230 goto fail;
1231 s->start_mb_y = 0;
1232 s->end_mb_y = s->mb_height;
1233 }
1234 s->slice_context_count = nb_slices;
1235 }
1236
1237 return 0;
1238 fail:
1239 ff_MPV_common_end(s);
1240 return err;
1241 }
1242
1243 /* init common structure for both encoder and decoder */
1244 void ff_MPV_common_end(MpegEncContext *s)
1245 {
1246 int i;
1247
1248 if (s->slice_context_count > 1) {
1249 for (i = 0; i < s->slice_context_count; i++) {
1250 free_duplicate_context(s->thread_context[i]);
1251 }
1252 for (i = 1; i < s->slice_context_count; i++) {
1253 av_freep(&s->thread_context[i]);
1254 }
1255 s->slice_context_count = 1;
1256 } else free_duplicate_context(s);
1257
1258 av_freep(&s->parse_context.buffer);
1259 s->parse_context.buffer_size = 0;
1260
1261 av_freep(&s->bitstream_buffer);
1262 s->allocated_bitstream_buffer_size = 0;
1263
1264 av_freep(&s->avctx->stats_out);
1265 av_freep(&s->ac_stats);
1266
1267 av_freep(&s->q_intra_matrix);
1268 av_freep(&s->q_inter_matrix);
1269 av_freep(&s->q_intra_matrix16);
1270 av_freep(&s->q_inter_matrix16);
1271 av_freep(&s->input_picture);
1272 av_freep(&s->reordered_input_picture);
1273 av_freep(&s->dct_offset);
1274
1275 if (s->picture) {
1276 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1277 free_picture_tables(&s->picture[i]);
1278 ff_mpeg_unref_picture(s, &s->picture[i]);
1279 }
1280 }
1281 av_freep(&s->picture);
1282 free_picture_tables(&s->last_picture);
1283 ff_mpeg_unref_picture(s, &s->last_picture);
1284 free_picture_tables(&s->current_picture);
1285 ff_mpeg_unref_picture(s, &s->current_picture);
1286 free_picture_tables(&s->next_picture);
1287 ff_mpeg_unref_picture(s, &s->next_picture);
1288 free_picture_tables(&s->new_picture);
1289 ff_mpeg_unref_picture(s, &s->new_picture);
1290
1291 free_context_frame(s);
1292
1293 s->context_initialized = 0;
1294 s->last_picture_ptr =
1295 s->next_picture_ptr =
1296 s->current_picture_ptr = NULL;
1297 s->linesize = s->uvlinesize = 0;
1298 }
1299
1300 av_cold void ff_init_rl(RLTable *rl,
1301 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1302 {
1303 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1304 uint8_t index_run[MAX_RUN + 1];
1305 int last, run, level, start, end, i;
1306
1307 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1308 if (static_store && rl->max_level[0])
1309 return;
1310
1311 /* compute max_level[], max_run[] and index_run[] */
1312 for (last = 0; last < 2; last++) {
1313 if (last == 0) {
1314 start = 0;
1315 end = rl->last;
1316 } else {
1317 start = rl->last;
1318 end = rl->n;
1319 }
1320
1321 memset(max_level, 0, MAX_RUN + 1);
1322 memset(max_run, 0, MAX_LEVEL + 1);
1323 memset(index_run, rl->n, MAX_RUN + 1);
1324 for (i = start; i < end; i++) {
1325 run = rl->table_run[i];
1326 level = rl->table_level[i];
1327 if (index_run[run] == rl->n)
1328 index_run[run] = i;
1329 if (level > max_level[run])
1330 max_level[run] = level;
1331 if (run > max_run[level])
1332 max_run[level] = run;
1333 }
1334 if (static_store)
1335 rl->max_level[last] = static_store[last];
1336 else
1337 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1338 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1339 if (static_store)
1340 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1341 else
1342 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1343 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1344 if (static_store)
1345 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1346 else
1347 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1348 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1349 }
1350 }
1351
1352 av_cold void ff_init_vlc_rl(RLTable *rl)
1353 {
1354 int i, q;
1355
1356 for (q = 0; q < 32; q++) {
1357 int qmul = q * 2;
1358 int qadd = (q - 1) | 1;
1359
1360 if (q == 0) {
1361 qmul = 1;
1362 qadd = 0;
1363 }
1364 for (i = 0; i < rl->vlc.table_size; i++) {
1365 int code = rl->vlc.table[i][0];
1366 int len = rl->vlc.table[i][1];
1367 int level, run;
1368
1369 if (len == 0) { // illegal code
1370 run = 66;
1371 level = MAX_LEVEL;
1372 } else if (len < 0) { // more bits needed
1373 run = 0;
1374 level = code;
1375 } else {
1376 if (code == rl->n) { // esc
1377 run = 66;
1378 level = 0;
1379 } else {
1380 run = rl->table_run[code] + 1;
1381 level = rl->table_level[code] * qmul + qadd;
1382 if (code >= rl->last) run += 192;
1383 }
1384 }
1385 rl->rl_vlc[q][i].len = len;
1386 rl->rl_vlc[q][i].level = level;
1387 rl->rl_vlc[q][i].run = run;
1388 }
1389 }
1390 }
1391
1392 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1393 {
1394 int i;
1395
1396 /* release non reference frames */
1397 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1398 if (!s->picture[i].reference &&
1399 (remove_current || &s->picture[i] != s->current_picture_ptr)) {
1400 ff_mpeg_unref_picture(s, &s->picture[i]);
1401 }
1402 }
1403 }
1404
1405 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1406 {
1407 if (pic->f.data[0] == NULL)
1408 return 1;
1409 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1410 return 1;
1411 return 0;
1412 }
1413
1414 static int find_unused_picture(MpegEncContext *s, int shared)
1415 {
1416 int i;
1417
1418 if (shared) {
1419 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1420 if (s->picture[i].f.data[0] == NULL)
1421 return i;
1422 }
1423 } else {
1424 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1425 if (pic_is_unused(s, &s->picture[i]))
1426 return i;
1427 }
1428 }
1429
1430 return AVERROR_INVALIDDATA;
1431 }
1432
1433 int ff_find_unused_picture(MpegEncContext *s, int shared)
1434 {
1435 int ret = find_unused_picture(s, shared);
1436
1437 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1438 if (s->picture[ret].needs_realloc) {
1439 s->picture[ret].needs_realloc = 0;
1440 free_picture_tables(&s->picture[ret]);
1441 ff_mpeg_unref_picture(s, &s->picture[ret]);
1442 avcodec_get_frame_defaults(&s->picture[ret].f);
1443 }
1444 }
1445 return ret;
1446 }
1447
1448 static void update_noise_reduction(MpegEncContext *s)
1449 {
1450 int intra, i;
1451
1452 for (intra = 0; intra < 2; intra++) {
1453 if (s->dct_count[intra] > (1 << 16)) {
1454 for (i = 0; i < 64; i++) {
1455 s->dct_error_sum[intra][i] >>= 1;
1456 }
1457 s->dct_count[intra] >>= 1;
1458 }
1459
1460 for (i = 0; i < 64; i++) {
1461 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1462 s->dct_count[intra] +
1463 s->dct_error_sum[intra][i] / 2) /
1464 (s->dct_error_sum[intra][i] + 1);
1465 }
1466 }
1467 }
1468
1469 /**
1470 * generic function for encode/decode called after coding/decoding
1471 * the header and before a frame is coded/decoded.
1472 */
1473 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1474 {
1475 int i, ret;
1476 Picture *pic;
1477 s->mb_skipped = 0;
1478
1479 /* mark & release old frames */
1480 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1481 s->last_picture_ptr != s->next_picture_ptr &&
1482 s->last_picture_ptr->f.data[0]) {
1483 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1484 }
1485
1486 /* release forgotten pictures */
1487 /* if (mpeg124/h263) */
1488 if (!s->encoding) {
1489 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1490 if (&s->picture[i] != s->last_picture_ptr &&
1491 &s->picture[i] != s->next_picture_ptr &&
1492 s->picture[i].reference && !s->picture[i].needs_realloc) {
1493 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1494 av_log(avctx, AV_LOG_ERROR,
1495 "releasing zombie picture\n");
1496 ff_mpeg_unref_picture(s, &s->picture[i]);
1497 }
1498 }
1499 }
1500
1501 ff_mpeg_unref_picture(s, &s->current_picture);
1502
1503 if (!s->encoding) {
1504 ff_release_unused_pictures(s, 1);
1505
1506 if (s->current_picture_ptr &&
1507 s->current_picture_ptr->f.data[0] == NULL) {
1508 // we already have a unused image
1509 // (maybe it was set before reading the header)
1510 pic = s->current_picture_ptr;
1511 } else {
1512 i = ff_find_unused_picture(s, 0);
1513 if (i < 0) {
1514 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1515 return i;
1516 }
1517 pic = &s->picture[i];
1518 }
1519
1520 pic->reference = 0;
1521 if (!s->droppable) {
1522 if (s->pict_type != AV_PICTURE_TYPE_B)
1523 pic->reference = 3;
1524 }
1525
1526 pic->f.coded_picture_number = s->coded_picture_number++;
1527
1528 if (ff_alloc_picture(s, pic, 0) < 0)
1529 return -1;
1530
1531 s->current_picture_ptr = pic;
1532 // FIXME use only the vars from current_pic
1533 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1534 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1535 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1536 if (s->picture_structure != PICT_FRAME)
1537 s->current_picture_ptr->f.top_field_first =
1538 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1539 }
1540 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1541 !s->progressive_sequence;
1542 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1543 }
1544
1545 s->current_picture_ptr->f.pict_type = s->pict_type;
1546 // if (s->flags && CODEC_FLAG_QSCALE)
1547 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1548 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1549
1550 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1551 s->current_picture_ptr)) < 0)
1552 return ret;
1553
1554 if (s->pict_type != AV_PICTURE_TYPE_B) {
1555 s->last_picture_ptr = s->next_picture_ptr;
1556 if (!s->droppable)
1557 s->next_picture_ptr = s->current_picture_ptr;
1558 }
1559 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1560 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1561 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1562 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1563 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1564 s->pict_type, s->droppable);
1565
1566 if ((s->last_picture_ptr == NULL ||
1567 s->last_picture_ptr->f.data[0] == NULL) &&
1568 (s->pict_type != AV_PICTURE_TYPE_I ||
1569 s->picture_structure != PICT_FRAME)) {
1570 int h_chroma_shift, v_chroma_shift;
1571 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1572 &h_chroma_shift, &v_chroma_shift);
1573 if (s->pict_type != AV_PICTURE_TYPE_I)
1574 av_log(avctx, AV_LOG_ERROR,
1575 "warning: first frame is no keyframe\n");
1576 else if (s->picture_structure != PICT_FRAME)
1577 av_log(avctx, AV_LOG_INFO,
1578 "allocate dummy last picture for field based first keyframe\n");
1579
1580 /* Allocate a dummy frame */
1581 i = ff_find_unused_picture(s, 0);
1582 if (i < 0) {
1583 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1584 return i;
1585 }
1586 s->last_picture_ptr = &s->picture[i];
1587 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1588 s->last_picture_ptr = NULL;
1589 return -1;
1590 }
1591
1592 memset(s->last_picture_ptr->f.data[0], 0,
1593 avctx->height * s->last_picture_ptr->f.linesize[0]);
1594 memset(s->last_picture_ptr->f.data[1], 0x80,
1595 (avctx->height >> v_chroma_shift) *
1596 s->last_picture_ptr->f.linesize[1]);
1597 memset(s->last_picture_ptr->f.data[2], 0x80,
1598 (avctx->height >> v_chroma_shift) *
1599 s->last_picture_ptr->f.linesize[2]);
1600
1601 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1602 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1603 }
1604 if ((s->next_picture_ptr == NULL ||
1605 s->next_picture_ptr->f.data[0] == NULL) &&
1606 s->pict_type == AV_PICTURE_TYPE_B) {
1607 /* Allocate a dummy frame */
1608 i = ff_find_unused_picture(s, 0);
1609 if (i < 0) {
1610 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1611 return i;
1612 }
1613 s->next_picture_ptr = &s->picture[i];
1614 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1615 s->next_picture_ptr = NULL;
1616 return -1;
1617 }
1618 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1619 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1620 }
1621
1622 if (s->last_picture_ptr) {
1623 ff_mpeg_unref_picture(s, &s->last_picture);
1624 if (s->last_picture_ptr->f.data[0] &&
1625 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1626 s->last_picture_ptr)) < 0)
1627 return ret;
1628 }
1629 if (s->next_picture_ptr) {
1630 ff_mpeg_unref_picture(s, &s->next_picture);
1631 if (s->next_picture_ptr->f.data[0] &&
1632 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1633 s->next_picture_ptr)) < 0)
1634 return ret;
1635 }
1636
1637 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1638 s->last_picture_ptr->f.data[0]));
1639
1640 if (s->picture_structure!= PICT_FRAME) {
1641 int i;
1642 for (i = 0; i < 4; i++) {
1643 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1644 s->current_picture.f.data[i] +=
1645 s->current_picture.f.linesize[i];
1646 }
1647 s->current_picture.f.linesize[i] *= 2;
1648 s->last_picture.f.linesize[i] *= 2;
1649 s->next_picture.f.linesize[i] *= 2;
1650 }
1651 }
1652
1653 s->err_recognition = avctx->err_recognition;
1654
1655 /* set dequantizer, we can't do it during init as
1656 * it might change for mpeg4 and we can't do it in the header
1657 * decode as init is not called for mpeg4 there yet */
1658 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1659 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1660 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1661 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1662 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1663 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1664 } else {
1665 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1666 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1667 }
1668
1669 if (s->dct_error_sum) {
1670 assert(s->avctx->noise_reduction && s->encoding);
1671 update_noise_reduction(s);
1672 }
1673
1674 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1675 return ff_xvmc_field_start(s, avctx);
1676
1677 return 0;
1678 }
1679
1680 /* generic function for encode/decode called after a
1681 * frame has been coded/decoded. */
1682 void ff_MPV_frame_end(MpegEncContext *s)
1683 {
1684 int i;
1685 /* redraw edges for the frame if decoding didn't complete */
1686 // just to make sure that all data is rendered.
1687 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1688 ff_xvmc_field_end(s);
1689 } else if ((s->er.error_count || s->encoding) &&
1690 !s->avctx->hwaccel &&
1691 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1692 s->unrestricted_mv &&
1693 s->current_picture.reference &&
1694 !s->intra_only &&
1695 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1696 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1697 int hshift = desc->log2_chroma_w;
1698 int vshift = desc->log2_chroma_h;
1699 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1700 s->h_edge_pos, s->v_edge_pos,
1701 EDGE_WIDTH, EDGE_WIDTH,
1702 EDGE_TOP | EDGE_BOTTOM);
1703 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1704 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1705 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1706 EDGE_TOP | EDGE_BOTTOM);
1707 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1708 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1709 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1710 EDGE_TOP | EDGE_BOTTOM);
1711 }
1712
1713 emms_c();
1714
1715 s->last_pict_type = s->pict_type;
1716 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1717 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1718 s->last_non_b_pict_type = s->pict_type;
1719 }
1720 #if 0
1721 /* copy back current_picture variables */
1722 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1723 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1724 s->picture[i] = s->current_picture;
1725 break;
1726 }
1727 }
1728 assert(i < MAX_PICTURE_COUNT);
1729 #endif
1730
1731 if (s->encoding) {
1732 /* release non-reference frames */
1733 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1734 if (!s->picture[i].reference)
1735 ff_mpeg_unref_picture(s, &s->picture[i]);
1736 }
1737 }
1738 // clear copies, to avoid confusion
1739 #if 0
1740 memset(&s->last_picture, 0, sizeof(Picture));
1741 memset(&s->next_picture, 0, sizeof(Picture));
1742 memset(&s->current_picture, 0, sizeof(Picture));
1743 #endif
1744 s->avctx->coded_frame = &s->current_picture_ptr->f;
1745
1746 if (s->current_picture.reference)
1747 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1748 }
1749
1750 /**
1751 * Print debugging info for the given picture.
1752 */
1753 void ff_print_debug_info(MpegEncContext *s, Picture *p)
1754 {
1755 AVFrame *pict;
1756 if (s->avctx->hwaccel || !p || !p->mb_type)
1757 return;
1758 pict = &p->f;
1759
1760 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1761 int x,y;
1762
1763 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1764 switch (pict->pict_type) {
1765 case AV_PICTURE_TYPE_I:
1766 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1767 break;
1768 case AV_PICTURE_TYPE_P:
1769 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1770 break;
1771 case AV_PICTURE_TYPE_B:
1772 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1773 break;
1774 case AV_PICTURE_TYPE_S:
1775 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1776 break;
1777 case AV_PICTURE_TYPE_SI:
1778 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1779 break;
1780 case AV_PICTURE_TYPE_SP:
1781 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1782 break;
1783 }
1784 for (y = 0; y < s->mb_height; y++) {
1785 for (x = 0; x < s->mb_width; x++) {
1786 if (s->avctx->debug & FF_DEBUG_SKIP) {
1787 int count = s->mbskip_table[x + y * s->mb_stride];
1788 if (count > 9)
1789 count = 9;
1790 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1791 }
1792 if (s->avctx->debug & FF_DEBUG_QP) {
1793 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1794 p->qscale_table[x + y * s->mb_stride]);
1795 }
1796 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1797 int mb_type = p->mb_type[x + y * s->mb_stride];
1798 // Type & MV direction
1799 if (IS_PCM(mb_type))
1800 av_log(s->avctx, AV_LOG_DEBUG, "P");
1801 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1802 av_log(s->avctx, AV_LOG_DEBUG, "A");
1803 else if (IS_INTRA4x4(mb_type))
1804 av_log(s->avctx, AV_LOG_DEBUG, "i");
1805 else if (IS_INTRA16x16(mb_type))
1806 av_log(s->avctx, AV_LOG_DEBUG, "I");
1807 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1808 av_log(s->avctx, AV_LOG_DEBUG, "d");
1809 else if (IS_DIRECT(mb_type))
1810 av_log(s->avctx, AV_LOG_DEBUG, "D");
1811 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1812 av_log(s->avctx, AV_LOG_DEBUG, "g");
1813 else if (IS_GMC(mb_type))
1814 av_log(s->avctx, AV_LOG_DEBUG, "G");
1815 else if (IS_SKIP(mb_type))
1816 av_log(s->avctx, AV_LOG_DEBUG, "S");
1817 else if (!USES_LIST(mb_type, 1))
1818 av_log(s->avctx, AV_LOG_DEBUG, ">");
1819 else if (!USES_LIST(mb_type, 0))
1820 av_log(s->avctx, AV_LOG_DEBUG, "<");
1821 else {
1822 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1823 av_log(s->avctx, AV_LOG_DEBUG, "X");
1824 }
1825
1826 // segmentation
1827 if (IS_8X8(mb_type))
1828 av_log(s->avctx, AV_LOG_DEBUG, "+");
1829 else if (IS_16X8(mb_type))
1830 av_log(s->avctx, AV_LOG_DEBUG, "-");
1831 else if (IS_8X16(mb_type))
1832 av_log(s->avctx, AV_LOG_DEBUG, "|");
1833 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1834 av_log(s->avctx, AV_LOG_DEBUG, " ");
1835 else
1836 av_log(s->avctx, AV_LOG_DEBUG, "?");
1837
1838
1839 if (IS_INTERLACED(mb_type))
1840 av_log(s->avctx, AV_LOG_DEBUG, "=");
1841 else
1842 av_log(s->avctx, AV_LOG_DEBUG, " ");
1843 }
1844 }
1845 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1846 }
1847 }
1848 }
1849
1850 /**
1851 * find the lowest MB row referenced in the MVs
1852 */
1853 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1854 {
1855 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1856 int my, off, i, mvs;
1857
1858 if (s->picture_structure != PICT_FRAME || s->mcsel)
1859 goto unhandled;
1860
1861 switch (s->mv_type) {
1862 case MV_TYPE_16X16:
1863 mvs = 1;
1864 break;
1865 case MV_TYPE_16X8:
1866 mvs = 2;
1867 break;
1868 case MV_TYPE_8X8:
1869 mvs = 4;
1870 break;
1871 default:
1872 goto unhandled;
1873 }
1874
1875 for (i = 0; i < mvs; i++) {
1876 my = s->mv[dir][i][1]<<qpel_shift;
1877 my_max = FFMAX(my_max, my);
1878 my_min = FFMIN(my_min, my);
1879 }
1880
1881 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1882
1883 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1884 unhandled:
1885 return s->mb_height-1;
1886 }
1887
1888 /* put block[] to dest[] */
1889 static inline void put_dct(MpegEncContext *s,
1890 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1891 {
1892 s->dct_unquantize_intra(s, block, i, qscale);
1893 s->dsp.idct_put (dest, line_size, block);
1894 }
1895
1896 /* add block[] to dest[] */
1897 static inline void add_dct(MpegEncContext *s,
1898 int16_t *block, int i, uint8_t *dest, int line_size)
1899 {
1900 if (s->block_last_index[i] >= 0) {
1901 s->dsp.idct_add (dest, line_size, block);
1902 }
1903 }
1904
1905 static inline void add_dequant_dct(MpegEncContext *s,
1906 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1907 {
1908 if (s->block_last_index[i] >= 0) {
1909 s->dct_unquantize_inter(s, block, i, qscale);
1910
1911 s->dsp.idct_add (dest, line_size, block);
1912 }
1913 }
1914
1915 /**
1916 * Clean dc, ac, coded_block for the current non-intra MB.
1917 */
1918 void ff_clean_intra_table_entries(MpegEncContext *s)
1919 {
1920 int wrap = s->b8_stride;
1921 int xy = s->block_index[0];
1922
1923 s->dc_val[0][xy ] =
1924 s->dc_val[0][xy + 1 ] =
1925 s->dc_val[0][xy + wrap] =
1926 s->dc_val[0][xy + 1 + wrap] = 1024;
1927 /* ac pred */
1928 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1929 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1930 if (s->msmpeg4_version>=3) {
1931 s->coded_block[xy ] =
1932 s->coded_block[xy + 1 ] =
1933 s->coded_block[xy + wrap] =
1934 s->coded_block[xy + 1 + wrap] = 0;
1935 }
1936 /* chroma */
1937 wrap = s->mb_stride;
1938 xy = s->mb_x + s->mb_y * wrap;
1939 s->dc_val[1][xy] =
1940 s->dc_val[2][xy] = 1024;
1941 /* ac pred */
1942 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1943 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1944
1945 s->mbintra_table[xy]= 0;
1946 }
1947
1948 /* generic function called after a macroblock has been parsed by the
1949 decoder or after it has been encoded by the encoder.
1950
1951 Important variables used:
1952 s->mb_intra : true if intra macroblock
1953 s->mv_dir : motion vector direction
1954 s->mv_type : motion vector type
1955 s->mv : motion vector
1956 s->interlaced_dct : true if interlaced dct used (mpeg2)
1957 */
1958 static av_always_inline
1959 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
1960 int is_mpeg12)
1961 {
1962 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1963 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1964 ff_xvmc_decode_mb(s);//xvmc uses pblocks
1965 return;
1966 }
1967
1968 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1969 /* print DCT coefficients */
1970 int i,j;
1971 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1972 for(i=0; i<6; i++){
1973 for(j=0; j<64; j++){
1974 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
1975 }
1976 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1977 }
1978 }
1979
1980 s->current_picture.qscale_table[mb_xy] = s->qscale;
1981
1982 /* update DC predictors for P macroblocks */
1983 if (!s->mb_intra) {
1984 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1985 if(s->mbintra_table[mb_xy])
1986 ff_clean_intra_table_entries(s);
1987 } else {
1988 s->last_dc[0] =
1989 s->last_dc[1] =
1990 s->last_dc[2] = 128 << s->intra_dc_precision;
1991 }
1992 }
1993 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1994 s->mbintra_table[mb_xy]=1;
1995
1996 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
1997 uint8_t *dest_y, *dest_cb, *dest_cr;
1998 int dct_linesize, dct_offset;
1999 op_pixels_func (*op_pix)[4];
2000 qpel_mc_func (*op_qpix)[16];
2001 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2002 const int uvlinesize = s->current_picture.f.linesize[1];
2003 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2004 const int block_size = 8;
2005
2006 /* avoid copy if macroblock skipped in last frame too */
2007 /* skip only during decoding as we might trash the buffers during encoding a bit */
2008 if(!s->encoding){
2009 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2010
2011 if (s->mb_skipped) {
2012 s->mb_skipped= 0;
2013 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2014 *mbskip_ptr = 1;
2015 } else if(!s->current_picture.reference) {
2016 *mbskip_ptr = 1;
2017 } else{
2018 *mbskip_ptr = 0; /* not skipped */
2019 }
2020 }
2021
2022 dct_linesize = linesize << s->interlaced_dct;
2023 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2024
2025 if(readable){
2026 dest_y= s->dest[0];
2027 dest_cb= s->dest[1];
2028 dest_cr= s->dest[2];
2029 }else{
2030 dest_y = s->b_scratchpad;
2031 dest_cb= s->b_scratchpad+16*linesize;
2032 dest_cr= s->b_scratchpad+32*linesize;
2033 }
2034
2035 if (!s->mb_intra) {
2036 /* motion handling */
2037 /* decoding or more than one mb_type (MC was already done otherwise) */
2038 if(!s->encoding){
2039
2040 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2041 if (s->mv_dir & MV_DIR_FORWARD) {
2042 ff_thread_await_progress(&s->last_picture_ptr->tf,
2043 ff_MPV_lowest_referenced_row(s, 0),
2044 0);
2045 }
2046 if (s->mv_dir & MV_DIR_BACKWARD) {
2047 ff_thread_await_progress(&s->next_picture_ptr->tf,
2048 ff_MPV_lowest_referenced_row(s, 1),
2049 0);
2050 }
2051 }
2052
2053 op_qpix= s->me.qpel_put;
2054 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2055 op_pix = s->hdsp.put_pixels_tab;
2056 }else{
2057 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2058 }
2059 if (s->mv_dir & MV_DIR_FORWARD) {
2060 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2061 op_pix = s->hdsp.avg_pixels_tab;
2062 op_qpix= s->me.qpel_avg;
2063 }
2064 if (s->mv_dir & MV_DIR_BACKWARD) {
2065 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2066 }
2067 }
2068
2069 /* skip dequant / idct if we are really late ;) */
2070 if(s->avctx->skip_idct){
2071 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2072 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2073 || s->avctx->skip_idct >= AVDISCARD_ALL)
2074 goto skip_idct;
2075 }
2076
2077 /* add dct residue */
2078 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2079 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2080 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2081 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2082 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2083 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2084
2085 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2086 if (s->chroma_y_shift){
2087 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2088 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2089 }else{
2090 dct_linesize >>= 1;
2091 dct_offset >>=1;
2092 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2093 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2094 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2095 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2096 }
2097 }
2098 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2099 add_dct(s, block[0], 0, dest_y , dct_linesize);
2100 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2101 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2102 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2103
2104 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2105 if(s->chroma_y_shift){//Chroma420
2106 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2107 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2108 }else{
2109 //chroma422
2110 dct_linesize = uvlinesize << s->interlaced_dct;
2111 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2112
2113 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2114 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2115 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2116 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2117 if(!s->chroma_x_shift){//Chroma444
2118 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2119 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2120 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2121 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2122 }
2123 }
2124 }//fi gray
2125 }
2126 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2127 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2128 }
2129 } else {
2130 /* dct only in intra block */
2131 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2132 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2133 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2134 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2135 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2136
2137 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2138 if(s->chroma_y_shift){
2139 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2140 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2141 }else{
2142 dct_offset >>=1;
2143 dct_linesize >>=1;
2144 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2145 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2146 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2147 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2148 }
2149 }
2150 }else{
2151 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2152 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2153 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2154 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2155
2156 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2157 if(s->chroma_y_shift){
2158 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2159 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2160 }else{
2161
2162 dct_linesize = uvlinesize << s->interlaced_dct;
2163 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2164
2165 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2166 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2167 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2168 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2169 if(!s->chroma_x_shift){//Chroma444
2170 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2171 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2172 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2173 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2174 }
2175 }
2176 }//gray
2177 }
2178 }
2179 skip_idct:
2180 if(!readable){
2181 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2182 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2183 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2184 }
2185 }
2186 }
2187
2188 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2189 #if !CONFIG_SMALL
2190 if(s->out_format == FMT_MPEG1) {
2191 MPV_decode_mb_internal(s, block, 1);
2192 } else
2193 #endif
2194 MPV_decode_mb_internal(s, block, 0);
2195 }
2196
2197 /**
2198 * @param h is the normal height, this will be reduced automatically if needed for the last row
2199 */
2200 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2201 Picture *last, int y, int h, int picture_structure,
2202 int first_field, int draw_edges, int low_delay,
2203 int v_edge_pos, int h_edge_pos)
2204 {
2205 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2206 int hshift = desc->log2_chroma_w;
2207 int vshift = desc->log2_chroma_h;
2208 const int field_pic = picture_structure != PICT_FRAME;
2209 if(field_pic){
2210 h <<= 1;
2211 y <<= 1;
2212 }
2213
2214 if (!avctx->hwaccel &&
2215 !(avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
2216 draw_edges &&
2217 cur->reference &&
2218 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2219 int *linesize = cur->f.linesize;
2220 int sides = 0, edge_h;
2221 if (y==0) sides |= EDGE_TOP;
2222 if (y + h >= v_edge_pos)
2223 sides |= EDGE_BOTTOM;
2224
2225 edge_h= FFMIN(h, v_edge_pos - y);
2226
2227 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2228 linesize[0], h_edge_pos, edge_h,
2229 EDGE_WIDTH, EDGE_WIDTH, sides);
2230 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2231 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2232 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2233 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2234 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2235 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2236 }
2237
2238 h = FFMIN(h, avctx->height - y);
2239
2240 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2241
2242 if (avctx->draw_horiz_band) {
2243 AVFrame *src;
2244 int offset[AV_NUM_DATA_POINTERS];
2245 int i;
2246
2247 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2248 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
2249 src = &cur->f;
2250 else if (last)
2251 src = &last->f;
2252 else
2253 return;
2254
2255 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2256 picture_structure == PICT_FRAME &&
2257 avctx->codec_id != AV_CODEC_ID_SVQ3) {
2258 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2259 offset[i] = 0;
2260 }else{
2261 offset[0]= y * src->linesize[0];
2262 offset[1]=
2263 offset[2]= (y >> vshift) * src->linesize[1];
2264 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2265 offset[i] = 0;
2266 }
2267
2268 emms_c();
2269
2270 avctx->draw_horiz_band(avctx, src, offset,
2271 y, picture_structure, h);
2272 }
2273 }
2274
2275 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2276 {
2277 int draw_edges = s->unrestricted_mv && !s->intra_only;
2278 ff_draw_horiz_band(s->avctx, &s->dsp, &s->current_picture,
2279 &s->last_picture, y, h, s->picture_structure,
2280 s->first_field, draw_edges, s->low_delay,
2281 s->v_edge_pos, s->h_edge_pos);
2282 }
2283
2284 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2285 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2286 const int uvlinesize = s->current_picture.f.linesize[1];
2287 const int mb_size= 4;
2288
2289 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2290 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2291 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2292 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2293 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2294 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2295 //block_index is not used by mpeg2, so it is not affected by chroma_format
2296
2297 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2298 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2299 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2300
2301 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2302 {
2303 if(s->picture_structure==PICT_FRAME){
2304 s->dest[0] += s->mb_y * linesize << mb_size;
2305 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2306 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2307 }else{
2308 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2309 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2310 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2311 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2312 }
2313 }
2314 }
2315
2316 /**
2317 * Permute an 8x8 block.
2318 * @param block the block which will be permuted according to the given permutation vector
2319 * @param permutation the permutation vector
2320 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2321 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2322 * (inverse) permutated to scantable order!
2323 */
2324 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2325 {
2326 int i;
2327 int16_t temp[64];
2328
2329 if(last<=0) return;
2330 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2331
2332 for(i=0; i<=last; i++){
2333 const int j= scantable[i];
2334 temp[j]= block[j];
2335 block[j]=0;
2336 }
2337
2338 for(i=0; i<=last; i++){
2339 const int j= scantable[i];
2340 const int perm_j= permutation[j];
2341 block[perm_j]= temp[j];
2342 }
2343 }
2344
2345 void ff_mpeg_flush(AVCodecContext *avctx){
2346 int i;
2347 MpegEncContext *s = avctx->priv_data;
2348
2349 if(s==NULL || s->picture==NULL)
2350 return;
2351
2352 for (i = 0; i < MAX_PICTURE_COUNT; i++)
2353 ff_mpeg_unref_picture(s, &s->picture[i]);
2354 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2355
2356 ff_mpeg_unref_picture(s, &s->current_picture);
2357 ff_mpeg_unref_picture(s, &s->last_picture);
2358 ff_mpeg_unref_picture(s, &s->next_picture);
2359
2360 s->mb_x= s->mb_y= 0;
2361
2362 s->parse_context.state= -1;
2363 s->parse_context.frame_start_found= 0;
2364 s->parse_context.overread= 0;
2365 s->parse_context.overread_index= 0;
2366 s->parse_context.index= 0;
2367 s->parse_context.last_index= 0;
2368 s->bitstream_buffer_size=0;
2369 s->pp_time=0;
2370 }
2371
2372 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2373 int16_t *block, int n, int qscale)
2374 {
2375 int i, level, nCoeffs;
2376 const uint16_t *quant_matrix;
2377
2378 nCoeffs= s->block_last_index[n];
2379
2380 if (n < 4)
2381 block[0] = block[0] * s->y_dc_scale;
2382 else
2383 block[0] = block[0] * s->c_dc_scale;
2384 /* XXX: only mpeg1 */
2385 quant_matrix = s->intra_matrix;
2386 for(i=1;i<=nCoeffs;i++) {
2387 int j= s->intra_scantable.permutated[i];
2388 level = block[j];
2389 if (level) {
2390 if (level < 0) {
2391 level = -level;
2392 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2393 level = (level - 1) | 1;
2394 level = -level;
2395 } else {
2396 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2397 level = (level - 1) | 1;
2398 }
2399 block[j] = level;
2400 }
2401 }
2402 }
2403
2404 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2405 int16_t *block, int n, int qscale)
2406 {
2407 int i, level, nCoeffs;
2408 const uint16_t *quant_matrix;
2409
2410 nCoeffs= s->block_last_index[n];
2411
2412 quant_matrix = s->inter_matrix;
2413 for(i=0; i<=nCoeffs; i++) {
2414 int j= s->intra_scantable.permutated[i];
2415 level = block[j];
2416 if (level) {
2417 if (level < 0) {
2418 level = -level;
2419 level = (((level << 1) + 1) * qscale *
2420 ((int) (quant_matrix[j]))) >> 4;
2421 level = (level - 1) | 1;
2422 level = -level;
2423 } else {
2424 level = (((level << 1) + 1) * qscale *
2425 ((int) (quant_matrix[j]))) >> 4;
2426 level = (level - 1) | 1;
2427 }
2428 block[j] = level;
2429 }
2430 }
2431 }
2432
2433 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2434 int16_t *block, int n, int qscale)
2435 {
2436 int i, level, nCoeffs;
2437 const uint16_t *quant_matrix;
2438
2439 if(s->alternate_scan) nCoeffs= 63;
2440 else nCoeffs= s->block_last_index[n];
2441
2442 if (n < 4)
2443 block[0] = block[0] * s->y_dc_scale;
2444 else
2445 block[0] = block[0] * s->c_dc_scale;
2446 quant_matrix = s->intra_matrix;
2447 for(i=1;i<=nCoeffs;i++) {
2448 int j= s->intra_scantable.permutated[i];
2449 level = block[j];
2450 if (level) {
2451 if (level < 0) {
2452 level = -level;
2453 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2454 level = -level;
2455 } else {
2456 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2457 }
2458 block[j] = level;
2459 }
2460 }
2461 }
2462
2463 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2464 int16_t *block, int n, int qscale)
2465 {
2466 int i, level, nCoeffs;
2467 const uint16_t *quant_matrix;
2468 int sum=-1;
2469
2470 if(s->alternate_scan) nCoeffs= 63;
2471 else nCoeffs= s->block_last_index[n];
2472
2473 if (n < 4)
2474 block[0] = block[0] * s->y_dc_scale;
2475 else
2476 block[0] = block[0] * s->c_dc_scale;
2477 quant_matrix = s->intra_matrix;
2478 for(i=1;i<=nCoeffs;i++) {
2479 int j= s->intra_scantable.permutated[i];
2480 level = block[j];
2481 if (level) {
2482 if (level < 0) {
2483 level = -level;
2484 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2485 level = -level;
2486 } else {
2487 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2488 }
2489 block[j] = level;
2490 sum+=level;
2491 }
2492 }
2493 block[63]^=sum&1;
2494 }
2495
2496 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2497 int16_t *block, int n, int qscale)
2498 {
2499 int i, level, nCoeffs;
2500 const uint16_t *quant_matrix;
2501 int sum=-1;
2502
2503 if(s->alternate_scan) nCoeffs= 63;
2504 else nCoeffs= s->block_last_index[n];
2505
2506 quant_matrix = s->inter_matrix;
2507 for(i=0; i<=nCoeffs; i++) {
2508 int j= s->intra_scantable.permutated[i];
2509 level = block[j];
2510 if (level) {
2511 if (level < 0) {
2512 level = -level;
2513 level = (((level << 1) + 1) * qscale *
2514 ((int) (quant_matrix[j]))) >> 4;
2515 level = -level;
2516 } else {
2517 level = (((level << 1) + 1) * qscale *
2518 ((int) (quant_matrix[j]))) >> 4;
2519 }
2520 block[j] = level;
2521 sum+=level;
2522 }
2523 }
2524 block[63]^=sum&1;
2525 }
2526
2527 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2528 int16_t *block, int n, int qscale)
2529 {
2530 int i, level, qmul, qadd;
2531 int nCoeffs;
2532
2533 assert(s->block_last_index[n]>=0);
2534
2535 qmul = qscale << 1;
2536
2537 if (!s->h263_aic) {
2538 if (n < 4)
2539 block[0] = block[0] * s->y_dc_scale;
2540 else
2541 block[0] = block[0] * s->c_dc_scale;
2542 qadd = (qscale - 1) | 1;
2543 }else{
2544 qadd = 0;
2545 }
2546 if(s->ac_pred)
2547 nCoeffs=63;
2548 else
2549 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2550
2551 for(i=1; i<=nCoeffs; i++) {
2552 level = block[i];
2553 if (level) {
2554 if (level < 0) {
2555 level = level * qmul - qadd;
2556 } else {
2557 level = level * qmul + qadd;
2558 }
2559 block[i] = level;
2560 }
2561 }
2562 }
2563
2564 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2565 int16_t *block, int n, int qscale)
2566 {
2567 int i, level, qmul, qadd;
2568 int nCoeffs;
2569
2570 assert(s->block_last_index[n]>=0);
2571
2572 qadd = (qscale - 1) | 1;
2573 qmul = qscale << 1;
2574
2575 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2576
2577 for(i=0; i<=nCoeffs; i++) {
2578 level = block[i];
2579 if (level) {
2580 if (level < 0) {
2581 level = level * qmul - qadd;
2582 } else {
2583 level = level * qmul + qadd;
2584 }
2585 block[i] = level;
2586 }
2587 }
2588 }
2589
2590 /**
2591 * set qscale and update qscale dependent variables.
2592 */
2593 void ff_set_qscale(MpegEncContext * s, int qscale)
2594 {
2595 if (qscale < 1)
2596 qscale = 1;
2597 else if (qscale > 31)
2598 qscale = 31;
2599
2600 s->qscale = qscale;
2601 s->chroma_qscale= s->chroma_qscale_table[qscale];
2602
2603 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2604 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2605 }
2606
2607 void ff_MPV_report_decode_progress(MpegEncContext *s)
2608 {
2609 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
2610 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
2611 }
2612
2613 #if CONFIG_ERROR_RESILIENCE
2614 void ff_mpeg_er_frame_start(MpegEncContext *s)
2615 {
2616 ERContext *er = &s->er;
2617
2618 er->cur_pic = s->current_picture_ptr;
2619 er->last_pic = s->last_picture_ptr;
2620 er->next_pic = s->next_picture_ptr;
2621
2622 er->pp_time = s->pp_time;
2623 er->pb_time = s->pb_time;
2624 er->quarter_sample = s->quarter_sample;
2625 er->partitioned_frame = s->partitioned_frame;
2626
2627 ff_er_frame_start(er);
2628 }
2629 #endif /* CONFIG_ERROR_RESILIENCE */