vdpau: remove old-style decoders
[libav.git] / libavcodec / mpegvideo.c
1 /*
2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7 *
8 * This file is part of Libav.
9 *
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /**
26 * @file
27 * The simplest mpeg encoder (well, it was the simplest!).
28 */
29
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "avcodec.h"
34 #include "dsputil.h"
35 #include "internal.h"
36 #include "mathops.h"
37 #include "mpegvideo.h"
38 #include "mjpegenc.h"
39 #include "msmpeg4.h"
40 #include "xvmc_internal.h"
41 #include "thread.h"
42 #include <limits.h>
43
44 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
45 int16_t *block, int n, int qscale);
46 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
47 int16_t *block, int n, int qscale);
48 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
49 int16_t *block, int n, int qscale);
50 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
51 int16_t *block, int n, int qscale);
52 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
53 int16_t *block, int n, int qscale);
54 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
55 int16_t *block, int n, int qscale);
56 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
57 int16_t *block, int n, int qscale);
58
59 static const uint8_t ff_default_chroma_qscale_table[32] = {
60 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
61 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
62 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
63 };
64
65 const uint8_t ff_mpeg1_dc_scale_table[128] = {
66 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
67 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
68 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
69 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
70 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
71 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
72 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
73 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75 };
76
77 static const uint8_t mpeg2_dc_scale_table1[128] = {
78 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
79 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
80 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
81 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
82 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
83 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 };
88
89 static const uint8_t mpeg2_dc_scale_table2[128] = {
90 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
91 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
97 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
99 };
100
101 static const uint8_t mpeg2_dc_scale_table3[128] = {
102 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
103 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
104 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
105 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
107 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
108 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
111 };
112
113 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
114 ff_mpeg1_dc_scale_table,
115 mpeg2_dc_scale_table1,
116 mpeg2_dc_scale_table2,
117 mpeg2_dc_scale_table3,
118 };
119
120 const enum AVPixelFormat ff_pixfmt_list_420[] = {
121 AV_PIX_FMT_YUV420P,
122 AV_PIX_FMT_NONE
123 };
124
125 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
126 int (*mv)[2][4][2],
127 int mb_x, int mb_y, int mb_intra, int mb_skipped)
128 {
129 MpegEncContext *s = opaque;
130
131 s->mv_dir = mv_dir;
132 s->mv_type = mv_type;
133 s->mb_intra = mb_intra;
134 s->mb_skipped = mb_skipped;
135 s->mb_x = mb_x;
136 s->mb_y = mb_y;
137 memcpy(s->mv, mv, sizeof(*mv));
138
139 ff_init_block_index(s);
140 ff_update_block_index(s);
141
142 s->dsp.clear_blocks(s->block[0]);
143
144 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
145 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
146 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
147
148 assert(ref == 0);
149 ff_MPV_decode_mb(s, s->block);
150 }
151
152 /* init common dct for both encoder and decoder */
153 av_cold int ff_dct_common_init(MpegEncContext *s)
154 {
155 ff_dsputil_init(&s->dsp, s->avctx);
156 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
157 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
158
159 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
160 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
161 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
162 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
163 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
164 if (s->flags & CODEC_FLAG_BITEXACT)
165 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
166 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
167
168 #if ARCH_X86
169 ff_MPV_common_init_x86(s);
170 #elif ARCH_ALPHA
171 ff_MPV_common_init_axp(s);
172 #elif ARCH_ARM
173 ff_MPV_common_init_arm(s);
174 #elif ARCH_BFIN
175 ff_MPV_common_init_bfin(s);
176 #elif ARCH_PPC
177 ff_MPV_common_init_ppc(s);
178 #endif
179
180 /* load & permutate scantables
181 * note: only wmv uses different ones
182 */
183 if (s->alternate_scan) {
184 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
185 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
186 } else {
187 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
188 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
189 }
190 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
191 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
192
193 return 0;
194 }
195
196 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
197 {
198 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
199
200 // edge emu needs blocksize + filter length - 1
201 // (= 17x17 for halfpel / 21x21 for h264)
202 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
203 // at uvlinesize. It supports only YUV420 so 24x24 is enough
204 // linesize * interlaced * MBsize
205 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
206 fail);
207
208 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 3,
209 fail)
210 s->me.temp = s->me.scratchpad;
211 s->rd_scratchpad = s->me.scratchpad;
212 s->b_scratchpad = s->me.scratchpad;
213 s->obmc_scratchpad = s->me.scratchpad + 16;
214
215 return 0;
216 fail:
217 av_freep(&s->edge_emu_buffer);
218 return AVERROR(ENOMEM);
219 }
220
221 /**
222 * Allocate a frame buffer
223 */
224 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
225 {
226 int r, ret;
227
228 pic->tf.f = &pic->f;
229 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
230 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
231 s->codec_id != AV_CODEC_ID_MSS2)
232 r = ff_thread_get_buffer(s->avctx, &pic->tf,
233 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
234 else {
235 pic->f.width = s->avctx->width;
236 pic->f.height = s->avctx->height;
237 pic->f.format = s->avctx->pix_fmt;
238 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
239 }
240
241 if (r < 0 || !pic->f.data[0]) {
242 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
243 r, pic->f.data[0]);
244 return -1;
245 }
246
247 if (s->avctx->hwaccel) {
248 assert(!pic->hwaccel_picture_private);
249 if (s->avctx->hwaccel->priv_data_size) {
250 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
251 if (!pic->hwaccel_priv_buf) {
252 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
253 return -1;
254 }
255 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
256 }
257 }
258
259 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
260 s->uvlinesize != pic->f.linesize[1])) {
261 av_log(s->avctx, AV_LOG_ERROR,
262 "get_buffer() failed (stride changed)\n");
263 ff_mpeg_unref_picture(s, pic);
264 return -1;
265 }
266
267 if (pic->f.linesize[1] != pic->f.linesize[2]) {
268 av_log(s->avctx, AV_LOG_ERROR,
269 "get_buffer() failed (uv stride mismatch)\n");
270 ff_mpeg_unref_picture(s, pic);
271 return -1;
272 }
273
274 if (!s->edge_emu_buffer &&
275 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
276 av_log(s->avctx, AV_LOG_ERROR,
277 "get_buffer() failed to allocate context scratch buffers.\n");
278 ff_mpeg_unref_picture(s, pic);
279 return ret;
280 }
281
282 return 0;
283 }
284
285 static void free_picture_tables(Picture *pic)
286 {
287 int i;
288
289 av_buffer_unref(&pic->mb_var_buf);
290 av_buffer_unref(&pic->mc_mb_var_buf);
291 av_buffer_unref(&pic->mb_mean_buf);
292 av_buffer_unref(&pic->mbskip_table_buf);
293 av_buffer_unref(&pic->qscale_table_buf);
294 av_buffer_unref(&pic->mb_type_buf);
295
296 for (i = 0; i < 2; i++) {
297 av_buffer_unref(&pic->motion_val_buf[i]);
298 av_buffer_unref(&pic->ref_index_buf[i]);
299 }
300 }
301
302 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
303 {
304 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
305 const int mb_array_size = s->mb_stride * s->mb_height;
306 const int b8_array_size = s->b8_stride * s->mb_height * 2;
307 int i;
308
309
310 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
311 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
312 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
313 sizeof(uint32_t));
314 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
315 return AVERROR(ENOMEM);
316
317 if (s->encoding) {
318 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
319 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
320 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
321 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
322 return AVERROR(ENOMEM);
323 }
324
325 if (s->out_format == FMT_H263 || s->encoding ||
326 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
327 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
328 int ref_index_size = 4 * mb_array_size;
329
330 for (i = 0; mv_size && i < 2; i++) {
331 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
332 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
333 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
334 return AVERROR(ENOMEM);
335 }
336 }
337
338 return 0;
339 }
340
341 static int make_tables_writable(Picture *pic)
342 {
343 int ret, i;
344 #define MAKE_WRITABLE(table) \
345 do {\
346 if (pic->table &&\
347 (ret = av_buffer_make_writable(&pic->table)) < 0)\
348 return ret;\
349 } while (0)
350
351 MAKE_WRITABLE(mb_var_buf);
352 MAKE_WRITABLE(mc_mb_var_buf);
353 MAKE_WRITABLE(mb_mean_buf);
354 MAKE_WRITABLE(mbskip_table_buf);
355 MAKE_WRITABLE(qscale_table_buf);
356 MAKE_WRITABLE(mb_type_buf);
357
358 for (i = 0; i < 2; i++) {
359 MAKE_WRITABLE(motion_val_buf[i]);
360 MAKE_WRITABLE(ref_index_buf[i]);
361 }
362
363 return 0;
364 }
365
366 /**
367 * Allocate a Picture.
368 * The pixels are allocated/set by calling get_buffer() if shared = 0
369 */
370 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
371 {
372 int i, ret;
373
374 if (shared) {
375 assert(pic->f.data[0]);
376 pic->shared = 1;
377 } else {
378 assert(!pic->f.data[0]);
379
380 if (alloc_frame_buffer(s, pic) < 0)
381 return -1;
382
383 s->linesize = pic->f.linesize[0];
384 s->uvlinesize = pic->f.linesize[1];
385 }
386
387 if (!pic->qscale_table_buf)
388 ret = alloc_picture_tables(s, pic);
389 else
390 ret = make_tables_writable(pic);
391 if (ret < 0)
392 goto fail;
393
394 if (s->encoding) {
395 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
396 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
397 pic->mb_mean = pic->mb_mean_buf->data;
398 }
399
400 pic->mbskip_table = pic->mbskip_table_buf->data;
401 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
402 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
403
404 if (pic->motion_val_buf[0]) {
405 for (i = 0; i < 2; i++) {
406 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
407 pic->ref_index[i] = pic->ref_index_buf[i]->data;
408 }
409 }
410
411 return 0;
412 fail:
413 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
414 ff_mpeg_unref_picture(s, pic);
415 free_picture_tables(pic);
416 return AVERROR(ENOMEM);
417 }
418
419 /**
420 * Deallocate a picture.
421 */
422 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
423 {
424 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
425
426 pic->tf.f = &pic->f;
427 /* WM Image / Screen codecs allocate internal buffers with different
428 * dimensions / colorspaces; ignore user-defined callbacks for these. */
429 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
430 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
431 s->codec_id != AV_CODEC_ID_MSS2)
432 ff_thread_release_buffer(s->avctx, &pic->tf);
433 else
434 av_frame_unref(&pic->f);
435
436 av_buffer_unref(&pic->hwaccel_priv_buf);
437
438 if (pic->needs_realloc)
439 free_picture_tables(pic);
440
441 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
442 }
443
444 static int update_picture_tables(Picture *dst, Picture *src)
445 {
446 int i;
447
448 #define UPDATE_TABLE(table)\
449 do {\
450 if (src->table &&\
451 (!dst->table || dst->table->buffer != src->table->buffer)) {\
452 av_buffer_unref(&dst->table);\
453 dst->table = av_buffer_ref(src->table);\
454 if (!dst->table) {\
455 free_picture_tables(dst);\
456 return AVERROR(ENOMEM);\
457 }\
458 }\
459 } while (0)
460
461 UPDATE_TABLE(mb_var_buf);
462 UPDATE_TABLE(mc_mb_var_buf);
463 UPDATE_TABLE(mb_mean_buf);
464 UPDATE_TABLE(mbskip_table_buf);
465 UPDATE_TABLE(qscale_table_buf);
466 UPDATE_TABLE(mb_type_buf);
467 for (i = 0; i < 2; i++) {
468 UPDATE_TABLE(motion_val_buf[i]);
469 UPDATE_TABLE(ref_index_buf[i]);
470 }
471
472 dst->mb_var = src->mb_var;
473 dst->mc_mb_var = src->mc_mb_var;
474 dst->mb_mean = src->mb_mean;
475 dst->mbskip_table = src->mbskip_table;
476 dst->qscale_table = src->qscale_table;
477 dst->mb_type = src->mb_type;
478 for (i = 0; i < 2; i++) {
479 dst->motion_val[i] = src->motion_val[i];
480 dst->ref_index[i] = src->ref_index[i];
481 }
482
483 return 0;
484 }
485
486 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
487 {
488 int ret;
489
490 av_assert0(!dst->f.buf[0]);
491 av_assert0(src->f.buf[0]);
492
493 src->tf.f = &src->f;
494 dst->tf.f = &dst->f;
495 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
496 if (ret < 0)
497 goto fail;
498
499 ret = update_picture_tables(dst, src);
500 if (ret < 0)
501 goto fail;
502
503 if (src->hwaccel_picture_private) {
504 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
505 if (!dst->hwaccel_priv_buf)
506 goto fail;
507 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
508 }
509
510 dst->field_picture = src->field_picture;
511 dst->mb_var_sum = src->mb_var_sum;
512 dst->mc_mb_var_sum = src->mc_mb_var_sum;
513 dst->b_frame_score = src->b_frame_score;
514 dst->needs_realloc = src->needs_realloc;
515 dst->reference = src->reference;
516 dst->shared = src->shared;
517
518 return 0;
519 fail:
520 ff_mpeg_unref_picture(s, dst);
521 return ret;
522 }
523
524 static int init_duplicate_context(MpegEncContext *s)
525 {
526 int y_size = s->b8_stride * (2 * s->mb_height + 1);
527 int c_size = s->mb_stride * (s->mb_height + 1);
528 int yc_size = y_size + 2 * c_size;
529 int i;
530
531 s->edge_emu_buffer =
532 s->me.scratchpad =
533 s->me.temp =
534 s->rd_scratchpad =
535 s->b_scratchpad =
536 s->obmc_scratchpad = NULL;
537
538 if (s->encoding) {
539 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
540 ME_MAP_SIZE * sizeof(uint32_t), fail)
541 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
542 ME_MAP_SIZE * sizeof(uint32_t), fail)
543 if (s->avctx->noise_reduction) {
544 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
545 2 * 64 * sizeof(int), fail)
546 }
547 }
548 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
549 s->block = s->blocks[0];
550
551 for (i = 0; i < 12; i++) {
552 s->pblocks[i] = &s->block[i];
553 }
554
555 if (s->out_format == FMT_H263) {
556 /* ac values */
557 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
558 yc_size * sizeof(int16_t) * 16, fail);
559 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
560 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
561 s->ac_val[2] = s->ac_val[1] + c_size;
562 }
563
564 return 0;
565 fail:
566 return -1; // free() through ff_MPV_common_end()
567 }
568
569 static void free_duplicate_context(MpegEncContext *s)
570 {
571 if (s == NULL)
572 return;
573
574 av_freep(&s->edge_emu_buffer);
575 av_freep(&s->me.scratchpad);
576 s->me.temp =
577 s->rd_scratchpad =
578 s->b_scratchpad =
579 s->obmc_scratchpad = NULL;
580
581 av_freep(&s->dct_error_sum);
582 av_freep(&s->me.map);
583 av_freep(&s->me.score_map);
584 av_freep(&s->blocks);
585 av_freep(&s->ac_val_base);
586 s->block = NULL;
587 }
588
589 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
590 {
591 #define COPY(a) bak->a = src->a
592 COPY(edge_emu_buffer);
593 COPY(me.scratchpad);
594 COPY(me.temp);
595 COPY(rd_scratchpad);
596 COPY(b_scratchpad);
597 COPY(obmc_scratchpad);
598 COPY(me.map);
599 COPY(me.score_map);
600 COPY(blocks);
601 COPY(block);
602 COPY(start_mb_y);
603 COPY(end_mb_y);
604 COPY(me.map_generation);
605 COPY(pb);
606 COPY(dct_error_sum);
607 COPY(dct_count[0]);
608 COPY(dct_count[1]);
609 COPY(ac_val_base);
610 COPY(ac_val[0]);
611 COPY(ac_val[1]);
612 COPY(ac_val[2]);
613 #undef COPY
614 }
615
616 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
617 {
618 MpegEncContext bak;
619 int i, ret;
620 // FIXME copy only needed parts
621 // START_TIMER
622 backup_duplicate_context(&bak, dst);
623 memcpy(dst, src, sizeof(MpegEncContext));
624 backup_duplicate_context(dst, &bak);
625 for (i = 0; i < 12; i++) {
626 dst->pblocks[i] = &dst->block[i];
627 }
628 if (!dst->edge_emu_buffer &&
629 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
630 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
631 "scratch buffers.\n");
632 return ret;
633 }
634 // STOP_TIMER("update_duplicate_context")
635 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
636 return 0;
637 }
638
639 int ff_mpeg_update_thread_context(AVCodecContext *dst,
640 const AVCodecContext *src)
641 {
642 int i, ret;
643 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
644
645 if (dst == src || !s1->context_initialized)
646 return 0;
647
648 // FIXME can parameters change on I-frames?
649 // in that case dst may need a reinit
650 if (!s->context_initialized) {
651 memcpy(s, s1, sizeof(MpegEncContext));
652
653 s->avctx = dst;
654 s->bitstream_buffer = NULL;
655 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
656
657 ff_MPV_common_init(s);
658 }
659
660 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
661 int err;
662 s->context_reinit = 0;
663 s->height = s1->height;
664 s->width = s1->width;
665 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
666 return err;
667 }
668
669 s->avctx->coded_height = s1->avctx->coded_height;
670 s->avctx->coded_width = s1->avctx->coded_width;
671 s->avctx->width = s1->avctx->width;
672 s->avctx->height = s1->avctx->height;
673
674 s->coded_picture_number = s1->coded_picture_number;
675 s->picture_number = s1->picture_number;
676 s->input_picture_number = s1->input_picture_number;
677
678 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
679 ff_mpeg_unref_picture(s, &s->picture[i]);
680 if (s1->picture[i].f.data[0] &&
681 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
682 return ret;
683 }
684
685 #define UPDATE_PICTURE(pic)\
686 do {\
687 ff_mpeg_unref_picture(s, &s->pic);\
688 if (s1->pic.f.data[0])\
689 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
690 else\
691 ret = update_picture_tables(&s->pic, &s1->pic);\
692 if (ret < 0)\
693 return ret;\
694 } while (0)
695
696 UPDATE_PICTURE(current_picture);
697 UPDATE_PICTURE(last_picture);
698 UPDATE_PICTURE(next_picture);
699
700 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
701 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
702 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
703
704 // Error/bug resilience
705 s->next_p_frame_damaged = s1->next_p_frame_damaged;
706 s->workaround_bugs = s1->workaround_bugs;
707
708 // MPEG4 timing info
709 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
710 (char *) &s1->shape - (char *) &s1->time_increment_bits);
711
712 // B-frame info
713 s->max_b_frames = s1->max_b_frames;
714 s->low_delay = s1->low_delay;
715 s->droppable = s1->droppable;
716
717 // DivX handling (doesn't work)
718 s->divx_packed = s1->divx_packed;
719
720 if (s1->bitstream_buffer) {
721 if (s1->bitstream_buffer_size +
722 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
723 av_fast_malloc(&s->bitstream_buffer,
724 &s->allocated_bitstream_buffer_size,
725 s1->allocated_bitstream_buffer_size);
726 s->bitstream_buffer_size = s1->bitstream_buffer_size;
727 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
728 s1->bitstream_buffer_size);
729 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
730 FF_INPUT_BUFFER_PADDING_SIZE);
731 }
732
733 // linesize dependend scratch buffer allocation
734 if (!s->edge_emu_buffer)
735 if (s1->linesize) {
736 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
737 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
738 "scratch buffers.\n");
739 return AVERROR(ENOMEM);
740 }
741 } else {
742 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
743 "be allocated due to unknown size.\n");
744 return AVERROR_BUG;
745 }
746
747 // MPEG2/interlacing info
748 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
749 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
750
751 if (!s1->first_field) {
752 s->last_pict_type = s1->pict_type;
753 if (s1->current_picture_ptr)
754 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
755
756 if (s1->pict_type != AV_PICTURE_TYPE_B) {
757 s->last_non_b_pict_type = s1->pict_type;
758 }
759 }
760
761 return 0;
762 }
763
764 /**
765 * Set the given MpegEncContext to common defaults
766 * (same for encoding and decoding).
767 * The changed fields will not depend upon the
768 * prior state of the MpegEncContext.
769 */
770 void ff_MPV_common_defaults(MpegEncContext *s)
771 {
772 s->y_dc_scale_table =
773 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
774 s->chroma_qscale_table = ff_default_chroma_qscale_table;
775 s->progressive_frame = 1;
776 s->progressive_sequence = 1;
777 s->picture_structure = PICT_FRAME;
778
779 s->coded_picture_number = 0;
780 s->picture_number = 0;
781 s->input_picture_number = 0;
782
783 s->picture_in_gop_number = 0;
784
785 s->f_code = 1;
786 s->b_code = 1;
787
788 s->slice_context_count = 1;
789 }
790
791 /**
792 * Set the given MpegEncContext to defaults for decoding.
793 * the changed fields will not depend upon
794 * the prior state of the MpegEncContext.
795 */
796 void ff_MPV_decode_defaults(MpegEncContext *s)
797 {
798 ff_MPV_common_defaults(s);
799 }
800
801 static int init_er(MpegEncContext *s)
802 {
803 ERContext *er = &s->er;
804 int mb_array_size = s->mb_height * s->mb_stride;
805 int i;
806
807 er->avctx = s->avctx;
808 er->dsp = &s->dsp;
809
810 er->mb_index2xy = s->mb_index2xy;
811 er->mb_num = s->mb_num;
812 er->mb_width = s->mb_width;
813 er->mb_height = s->mb_height;
814 er->mb_stride = s->mb_stride;
815 er->b8_stride = s->b8_stride;
816
817 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
818 er->error_status_table = av_mallocz(mb_array_size);
819 if (!er->er_temp_buffer || !er->error_status_table)
820 goto fail;
821
822 er->mbskip_table = s->mbskip_table;
823 er->mbintra_table = s->mbintra_table;
824
825 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
826 er->dc_val[i] = s->dc_val[i];
827
828 er->decode_mb = mpeg_er_decode_mb;
829 er->opaque = s;
830
831 return 0;
832 fail:
833 av_freep(&er->er_temp_buffer);
834 av_freep(&er->error_status_table);
835 return AVERROR(ENOMEM);
836 }
837
838 /**
839 * Initialize and allocates MpegEncContext fields dependent on the resolution.
840 */
841 static int init_context_frame(MpegEncContext *s)
842 {
843 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
844
845 s->mb_width = (s->width + 15) / 16;
846 s->mb_stride = s->mb_width + 1;
847 s->b8_stride = s->mb_width * 2 + 1;
848 s->b4_stride = s->mb_width * 4 + 1;
849 mb_array_size = s->mb_height * s->mb_stride;
850 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
851
852 /* set default edge pos, will be overriden
853 * in decode_header if needed */
854 s->h_edge_pos = s->mb_width * 16;
855 s->v_edge_pos = s->mb_height * 16;
856
857 s->mb_num = s->mb_width * s->mb_height;
858
859 s->block_wrap[0] =
860 s->block_wrap[1] =
861 s->block_wrap[2] =
862 s->block_wrap[3] = s->b8_stride;
863 s->block_wrap[4] =
864 s->block_wrap[5] = s->mb_stride;
865
866 y_size = s->b8_stride * (2 * s->mb_height + 1);
867 c_size = s->mb_stride * (s->mb_height + 1);
868 yc_size = y_size + 2 * c_size;
869
870 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
871 fail); // error ressilience code looks cleaner with this
872 for (y = 0; y < s->mb_height; y++)
873 for (x = 0; x < s->mb_width; x++)
874 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
875
876 s->mb_index2xy[s->mb_height * s->mb_width] =
877 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
878
879 if (s->encoding) {
880 /* Allocate MV tables */
881 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
882 mv_table_size * 2 * sizeof(int16_t), fail);
883 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
884 mv_table_size * 2 * sizeof(int16_t), fail);
885 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
886 mv_table_size * 2 * sizeof(int16_t), fail);
887 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
888 mv_table_size * 2 * sizeof(int16_t), fail);
889 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
890 mv_table_size * 2 * sizeof(int16_t), fail);
891 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
892 mv_table_size * 2 * sizeof(int16_t), fail);
893 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
894 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
895 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
896 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
897 s->mb_stride + 1;
898 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
899 s->mb_stride + 1;
900 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
901
902 /* Allocate MB type table */
903 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
904 sizeof(uint16_t), fail); // needed for encoding
905
906 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
907 sizeof(int), fail);
908
909 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
910 mb_array_size * sizeof(float), fail);
911 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
912 mb_array_size * sizeof(float), fail);
913
914 }
915
916 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
917 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
918 /* interlaced direct mode decoding tables */
919 for (i = 0; i < 2; i++) {
920 int j, k;
921 for (j = 0; j < 2; j++) {
922 for (k = 0; k < 2; k++) {
923 FF_ALLOCZ_OR_GOTO(s->avctx,
924 s->b_field_mv_table_base[i][j][k],
925 mv_table_size * 2 * sizeof(int16_t),
926 fail);
927 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
928 s->mb_stride + 1;
929 }
930 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
931 mb_array_size * 2 * sizeof(uint8_t), fail);
932 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
933 mv_table_size * 2 * sizeof(int16_t), fail);
934 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
935 + s->mb_stride + 1;
936 }
937 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
938 mb_array_size * 2 * sizeof(uint8_t), fail);
939 }
940 }
941 if (s->out_format == FMT_H263) {
942 /* cbp values */
943 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
944 s->coded_block = s->coded_block_base + s->b8_stride + 1;
945
946 /* cbp, ac_pred, pred_dir */
947 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
948 mb_array_size * sizeof(uint8_t), fail);
949 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
950 mb_array_size * sizeof(uint8_t), fail);
951 }
952
953 if (s->h263_pred || s->h263_plus || !s->encoding) {
954 /* dc values */
955 // MN: we need these for error resilience of intra-frames
956 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
957 yc_size * sizeof(int16_t), fail);
958 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
959 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
960 s->dc_val[2] = s->dc_val[1] + c_size;
961 for (i = 0; i < yc_size; i++)
962 s->dc_val_base[i] = 1024;
963 }
964
965 /* which mb is a intra block */
966 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
967 memset(s->mbintra_table, 1, mb_array_size);
968
969 /* init macroblock skip table */
970 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
971 // Note the + 1 is for a quicker mpeg4 slice_end detection
972
973 return init_er(s);
974 fail:
975 return AVERROR(ENOMEM);
976 }
977
978 /**
979 * init common structure for both encoder and decoder.
980 * this assumes that some variables like width/height are already set
981 */
982 av_cold int ff_MPV_common_init(MpegEncContext *s)
983 {
984 int i;
985 int nb_slices = (HAVE_THREADS &&
986 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
987 s->avctx->thread_count : 1;
988
989 if (s->encoding && s->avctx->slices)
990 nb_slices = s->avctx->slices;
991
992 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
993 s->mb_height = (s->height + 31) / 32 * 2;
994 else
995 s->mb_height = (s->height + 15) / 16;
996
997 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
998 av_log(s->avctx, AV_LOG_ERROR,
999 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1000 return -1;
1001 }
1002
1003 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1004 int max_slices;
1005 if (s->mb_height)
1006 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1007 else
1008 max_slices = MAX_THREADS;
1009 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1010 " reducing to %d\n", nb_slices, max_slices);
1011 nb_slices = max_slices;
1012 }
1013
1014 if ((s->width || s->height) &&
1015 av_image_check_size(s->width, s->height, 0, s->avctx))
1016 return -1;
1017
1018 ff_dct_common_init(s);
1019
1020 s->flags = s->avctx->flags;
1021 s->flags2 = s->avctx->flags2;
1022
1023 if (s->width && s->height) {
1024 /* set chroma shifts */
1025 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1026 &s->chroma_x_shift,
1027 &s->chroma_y_shift);
1028
1029 /* convert fourcc to upper case */
1030 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1031
1032 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1033
1034 s->avctx->coded_frame = &s->current_picture.f;
1035
1036 if (s->encoding) {
1037 if (s->msmpeg4_version) {
1038 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
1039 2 * 2 * (MAX_LEVEL + 1) *
1040 (MAX_RUN + 1) * 2 * sizeof(int), fail);
1041 }
1042 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1043
1044 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
1045 64 * 32 * sizeof(int), fail);
1046 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
1047 64 * 32 * sizeof(int), fail);
1048 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
1049 64 * 32 * 2 * sizeof(uint16_t), fail);
1050 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
1051 64 * 32 * 2 * sizeof(uint16_t), fail);
1052 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
1053 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
1054 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
1055 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
1056
1057 if (s->avctx->noise_reduction) {
1058 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
1059 2 * 64 * sizeof(uint16_t), fail);
1060 }
1061 }
1062 }
1063
1064 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1065 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1066 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1067 avcodec_get_frame_defaults(&s->picture[i].f);
1068 }
1069 memset(&s->next_picture, 0, sizeof(s->next_picture));
1070 memset(&s->last_picture, 0, sizeof(s->last_picture));
1071 memset(&s->current_picture, 0, sizeof(s->current_picture));
1072 avcodec_get_frame_defaults(&s->next_picture.f);
1073 avcodec_get_frame_defaults(&s->last_picture.f);
1074 avcodec_get_frame_defaults(&s->current_picture.f);
1075
1076 if (s->width && s->height) {
1077 if (init_context_frame(s))
1078 goto fail;
1079
1080 s->parse_context.state = -1;
1081 }
1082
1083 s->context_initialized = 1;
1084 s->thread_context[0] = s;
1085
1086 if (s->width && s->height) {
1087 if (nb_slices > 1) {
1088 for (i = 1; i < nb_slices; i++) {
1089 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1090 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1091 }
1092
1093 for (i = 0; i < nb_slices; i++) {
1094 if (init_duplicate_context(s->thread_context[i]) < 0)
1095 goto fail;
1096 s->thread_context[i]->start_mb_y =
1097 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1098 s->thread_context[i]->end_mb_y =
1099 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1100 }
1101 } else {
1102 if (init_duplicate_context(s) < 0)
1103 goto fail;
1104 s->start_mb_y = 0;
1105 s->end_mb_y = s->mb_height;
1106 }
1107 s->slice_context_count = nb_slices;
1108 }
1109
1110 return 0;
1111 fail:
1112 ff_MPV_common_end(s);
1113 return -1;
1114 }
1115
1116 /**
1117 * Frees and resets MpegEncContext fields depending on the resolution.
1118 * Is used during resolution changes to avoid a full reinitialization of the
1119 * codec.
1120 */
1121 static int free_context_frame(MpegEncContext *s)
1122 {
1123 int i, j, k;
1124
1125 av_freep(&s->mb_type);
1126 av_freep(&s->p_mv_table_base);
1127 av_freep(&s->b_forw_mv_table_base);
1128 av_freep(&s->b_back_mv_table_base);
1129 av_freep(&s->b_bidir_forw_mv_table_base);
1130 av_freep(&s->b_bidir_back_mv_table_base);
1131 av_freep(&s->b_direct_mv_table_base);
1132 s->p_mv_table = NULL;
1133 s->b_forw_mv_table = NULL;
1134 s->b_back_mv_table = NULL;
1135 s->b_bidir_forw_mv_table = NULL;
1136 s->b_bidir_back_mv_table = NULL;
1137 s->b_direct_mv_table = NULL;
1138 for (i = 0; i < 2; i++) {
1139 for (j = 0; j < 2; j++) {
1140 for (k = 0; k < 2; k++) {
1141 av_freep(&s->b_field_mv_table_base[i][j][k]);
1142 s->b_field_mv_table[i][j][k] = NULL;
1143 }
1144 av_freep(&s->b_field_select_table[i][j]);
1145 av_freep(&s->p_field_mv_table_base[i][j]);
1146 s->p_field_mv_table[i][j] = NULL;
1147 }
1148 av_freep(&s->p_field_select_table[i]);
1149 }
1150
1151 av_freep(&s->dc_val_base);
1152 av_freep(&s->coded_block_base);
1153 av_freep(&s->mbintra_table);
1154 av_freep(&s->cbp_table);
1155 av_freep(&s->pred_dir_table);
1156
1157 av_freep(&s->mbskip_table);
1158
1159 av_freep(&s->er.error_status_table);
1160 av_freep(&s->er.er_temp_buffer);
1161 av_freep(&s->mb_index2xy);
1162 av_freep(&s->lambda_table);
1163 av_freep(&s->cplx_tab);
1164 av_freep(&s->bits_tab);
1165
1166 s->linesize = s->uvlinesize = 0;
1167
1168 return 0;
1169 }
1170
1171 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1172 {
1173 int i, err = 0;
1174
1175 if (s->slice_context_count > 1) {
1176 for (i = 0; i < s->slice_context_count; i++) {
1177 free_duplicate_context(s->thread_context[i]);
1178 }
1179 for (i = 1; i < s->slice_context_count; i++) {
1180 av_freep(&s->thread_context[i]);
1181 }
1182 } else
1183 free_duplicate_context(s);
1184
1185 if ((err = free_context_frame(s)) < 0)
1186 return err;
1187
1188 if (s->picture)
1189 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1190 s->picture[i].needs_realloc = 1;
1191 }
1192
1193 s->last_picture_ptr =
1194 s->next_picture_ptr =
1195 s->current_picture_ptr = NULL;
1196
1197 // init
1198 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1199 s->mb_height = (s->height + 31) / 32 * 2;
1200 else
1201 s->mb_height = (s->height + 15) / 16;
1202
1203 if ((s->width || s->height) &&
1204 av_image_check_size(s->width, s->height, 0, s->avctx))
1205 return AVERROR_INVALIDDATA;
1206
1207 if ((err = init_context_frame(s)))
1208 goto fail;
1209
1210 s->thread_context[0] = s;
1211
1212 if (s->width && s->height) {
1213 int nb_slices = s->slice_context_count;
1214 if (nb_slices > 1) {
1215 for (i = 1; i < nb_slices; i++) {
1216 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1217 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1218 }
1219
1220 for (i = 0; i < nb_slices; i++) {
1221 if (init_duplicate_context(s->thread_context[i]) < 0)
1222 goto fail;
1223 s->thread_context[i]->start_mb_y =
1224 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1225 s->thread_context[i]->end_mb_y =
1226 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1227 }
1228 } else {
1229 if (init_duplicate_context(s) < 0)
1230 goto fail;
1231 s->start_mb_y = 0;
1232 s->end_mb_y = s->mb_height;
1233 }
1234 s->slice_context_count = nb_slices;
1235 }
1236
1237 return 0;
1238 fail:
1239 ff_MPV_common_end(s);
1240 return err;
1241 }
1242
1243 /* init common structure for both encoder and decoder */
1244 void ff_MPV_common_end(MpegEncContext *s)
1245 {
1246 int i;
1247
1248 if (s->slice_context_count > 1) {
1249 for (i = 0; i < s->slice_context_count; i++) {
1250 free_duplicate_context(s->thread_context[i]);
1251 }
1252 for (i = 1; i < s->slice_context_count; i++) {
1253 av_freep(&s->thread_context[i]);
1254 }
1255 s->slice_context_count = 1;
1256 } else free_duplicate_context(s);
1257
1258 av_freep(&s->parse_context.buffer);
1259 s->parse_context.buffer_size = 0;
1260
1261 av_freep(&s->bitstream_buffer);
1262 s->allocated_bitstream_buffer_size = 0;
1263
1264 av_freep(&s->avctx->stats_out);
1265 av_freep(&s->ac_stats);
1266
1267 av_freep(&s->q_intra_matrix);
1268 av_freep(&s->q_inter_matrix);
1269 av_freep(&s->q_intra_matrix16);
1270 av_freep(&s->q_inter_matrix16);
1271 av_freep(&s->input_picture);
1272 av_freep(&s->reordered_input_picture);
1273 av_freep(&s->dct_offset);
1274
1275 if (s->picture) {
1276 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1277 free_picture_tables(&s->picture[i]);
1278 ff_mpeg_unref_picture(s, &s->picture[i]);
1279 }
1280 }
1281 av_freep(&s->picture);
1282 free_picture_tables(&s->last_picture);
1283 ff_mpeg_unref_picture(s, &s->last_picture);
1284 free_picture_tables(&s->current_picture);
1285 ff_mpeg_unref_picture(s, &s->current_picture);
1286 free_picture_tables(&s->next_picture);
1287 ff_mpeg_unref_picture(s, &s->next_picture);
1288 free_picture_tables(&s->new_picture);
1289 ff_mpeg_unref_picture(s, &s->new_picture);
1290
1291 free_context_frame(s);
1292
1293 s->context_initialized = 0;
1294 s->last_picture_ptr =
1295 s->next_picture_ptr =
1296 s->current_picture_ptr = NULL;
1297 s->linesize = s->uvlinesize = 0;
1298 }
1299
1300 av_cold void ff_init_rl(RLTable *rl,
1301 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1302 {
1303 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1304 uint8_t index_run[MAX_RUN + 1];
1305 int last, run, level, start, end, i;
1306
1307 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1308 if (static_store && rl->max_level[0])
1309 return;
1310
1311 /* compute max_level[], max_run[] and index_run[] */
1312 for (last = 0; last < 2; last++) {
1313 if (last == 0) {
1314 start = 0;
1315 end = rl->last;
1316 } else {
1317 start = rl->last;
1318 end = rl->n;
1319 }
1320
1321 memset(max_level, 0, MAX_RUN + 1);
1322 memset(max_run, 0, MAX_LEVEL + 1);
1323 memset(index_run, rl->n, MAX_RUN + 1);
1324 for (i = start; i < end; i++) {
1325 run = rl->table_run[i];
1326 level = rl->table_level[i];
1327 if (index_run[run] == rl->n)
1328 index_run[run] = i;
1329 if (level > max_level[run])
1330 max_level[run] = level;
1331 if (run > max_run[level])
1332 max_run[level] = run;
1333 }
1334 if (static_store)
1335 rl->max_level[last] = static_store[last];
1336 else
1337 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1338 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1339 if (static_store)
1340 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1341 else
1342 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1343 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1344 if (static_store)
1345 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1346 else
1347 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1348 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1349 }
1350 }
1351
1352 av_cold void ff_init_vlc_rl(RLTable *rl)
1353 {
1354 int i, q;
1355
1356 for (q = 0; q < 32; q++) {
1357 int qmul = q * 2;
1358 int qadd = (q - 1) | 1;
1359
1360 if (q == 0) {
1361 qmul = 1;
1362 qadd = 0;
1363 }
1364 for (i = 0; i < rl->vlc.table_size; i++) {
1365 int code = rl->vlc.table[i][0];
1366 int len = rl->vlc.table[i][1];
1367 int level, run;
1368
1369 if (len == 0) { // illegal code
1370 run = 66;
1371 level = MAX_LEVEL;
1372 } else if (len < 0) { // more bits needed
1373 run = 0;
1374 level = code;
1375 } else {
1376 if (code == rl->n) { // esc
1377 run = 66;
1378 level = 0;
1379 } else {
1380 run = rl->table_run[code] + 1;
1381 level = rl->table_level[code] * qmul + qadd;
1382 if (code >= rl->last) run += 192;
1383 }
1384 }
1385 rl->rl_vlc[q][i].len = len;
1386 rl->rl_vlc[q][i].level = level;
1387 rl->rl_vlc[q][i].run = run;
1388 }
1389 }
1390 }
1391
1392 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1393 {
1394 int i;
1395
1396 /* release non reference frames */
1397 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1398 if (!s->picture[i].reference &&
1399 (remove_current || &s->picture[i] != s->current_picture_ptr)) {
1400 ff_mpeg_unref_picture(s, &s->picture[i]);
1401 }
1402 }
1403 }
1404
1405 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1406 {
1407 if (pic->f.data[0] == NULL)
1408 return 1;
1409 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1410 return 1;
1411 return 0;
1412 }
1413
1414 static int find_unused_picture(MpegEncContext *s, int shared)
1415 {
1416 int i;
1417
1418 if (shared) {
1419 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1420 if (s->picture[i].f.data[0] == NULL)
1421 return i;
1422 }
1423 } else {
1424 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1425 if (pic_is_unused(s, &s->picture[i]))
1426 return i;
1427 }
1428 }
1429
1430 return AVERROR_INVALIDDATA;
1431 }
1432
1433 int ff_find_unused_picture(MpegEncContext *s, int shared)
1434 {
1435 int ret = find_unused_picture(s, shared);
1436
1437 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1438 if (s->picture[ret].needs_realloc) {
1439 s->picture[ret].needs_realloc = 0;
1440 free_picture_tables(&s->picture[ret]);
1441 ff_mpeg_unref_picture(s, &s->picture[ret]);
1442 avcodec_get_frame_defaults(&s->picture[ret].f);
1443 }
1444 }
1445 return ret;
1446 }
1447
1448 static void update_noise_reduction(MpegEncContext *s)
1449 {
1450 int intra, i;
1451
1452 for (intra = 0; intra < 2; intra++) {
1453 if (s->dct_count[intra] > (1 << 16)) {
1454 for (i = 0; i < 64; i++) {
1455 s->dct_error_sum[intra][i] >>= 1;
1456 }
1457 s->dct_count[intra] >>= 1;
1458 }
1459
1460 for (i = 0; i < 64; i++) {
1461 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1462 s->dct_count[intra] +
1463 s->dct_error_sum[intra][i] / 2) /
1464 (s->dct_error_sum[intra][i] + 1);
1465 }
1466 }
1467 }
1468
1469 /**
1470 * generic function for encode/decode called after coding/decoding
1471 * the header and before a frame is coded/decoded.
1472 */
1473 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1474 {
1475 int i, ret;
1476 Picture *pic;
1477 s->mb_skipped = 0;
1478
1479 /* mark & release old frames */
1480 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1481 s->last_picture_ptr != s->next_picture_ptr &&
1482 s->last_picture_ptr->f.data[0]) {
1483 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1484 }
1485
1486 /* release forgotten pictures */
1487 /* if (mpeg124/h263) */
1488 if (!s->encoding) {
1489 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1490 if (&s->picture[i] != s->last_picture_ptr &&
1491 &s->picture[i] != s->next_picture_ptr &&
1492 s->picture[i].reference && !s->picture[i].needs_realloc) {
1493 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1494 av_log(avctx, AV_LOG_ERROR,
1495 "releasing zombie picture\n");
1496 ff_mpeg_unref_picture(s, &s->picture[i]);
1497 }
1498 }
1499 }
1500
1501 ff_mpeg_unref_picture(s, &s->current_picture);
1502
1503 if (!s->encoding) {
1504 ff_release_unused_pictures(s, 1);
1505
1506 if (s->current_picture_ptr &&
1507 s->current_picture_ptr->f.data[0] == NULL) {
1508 // we already have a unused image
1509 // (maybe it was set before reading the header)
1510 pic = s->current_picture_ptr;
1511 } else {
1512 i = ff_find_unused_picture(s, 0);
1513 if (i < 0) {
1514 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1515 return i;
1516 }
1517 pic = &s->picture[i];
1518 }
1519
1520 pic->reference = 0;
1521 if (!s->droppable) {
1522 if (s->pict_type != AV_PICTURE_TYPE_B)
1523 pic->reference = 3;
1524 }
1525
1526 pic->f.coded_picture_number = s->coded_picture_number++;
1527
1528 if (ff_alloc_picture(s, pic, 0) < 0)
1529 return -1;
1530
1531 s->current_picture_ptr = pic;
1532 // FIXME use only the vars from current_pic
1533 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1534 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1535 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1536 if (s->picture_structure != PICT_FRAME)
1537 s->current_picture_ptr->f.top_field_first =
1538 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1539 }
1540 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1541 !s->progressive_sequence;
1542 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1543 }
1544
1545 s->current_picture_ptr->f.pict_type = s->pict_type;
1546 // if (s->flags && CODEC_FLAG_QSCALE)
1547 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1548 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1549
1550 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1551 s->current_picture_ptr)) < 0)
1552 return ret;
1553
1554 if (s->pict_type != AV_PICTURE_TYPE_B) {
1555 s->last_picture_ptr = s->next_picture_ptr;
1556 if (!s->droppable)
1557 s->next_picture_ptr = s->current_picture_ptr;
1558 }
1559 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1560 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1561 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1562 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1563 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1564 s->pict_type, s->droppable);
1565
1566 if ((s->last_picture_ptr == NULL ||
1567 s->last_picture_ptr->f.data[0] == NULL) &&
1568 (s->pict_type != AV_PICTURE_TYPE_I ||
1569 s->picture_structure != PICT_FRAME)) {
1570 int h_chroma_shift, v_chroma_shift;
1571 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1572 &h_chroma_shift, &v_chroma_shift);
1573 if (s->pict_type != AV_PICTURE_TYPE_I)
1574 av_log(avctx, AV_LOG_ERROR,
1575 "warning: first frame is no keyframe\n");
1576 else if (s->picture_structure != PICT_FRAME)
1577 av_log(avctx, AV_LOG_INFO,
1578 "allocate dummy last picture for field based first keyframe\n");
1579
1580 /* Allocate a dummy frame */
1581 i = ff_find_unused_picture(s, 0);
1582 if (i < 0) {
1583 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1584 return i;
1585 }
1586 s->last_picture_ptr = &s->picture[i];
1587 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1588 s->last_picture_ptr = NULL;
1589 return -1;
1590 }
1591
1592 memset(s->last_picture_ptr->f.data[0], 0,
1593 avctx->height * s->last_picture_ptr->f.linesize[0]);
1594 memset(s->last_picture_ptr->f.data[1], 0x80,
1595 (avctx->height >> v_chroma_shift) *
1596 s->last_picture_ptr->f.linesize[1]);
1597 memset(s->last_picture_ptr->f.data[2], 0x80,
1598 (avctx->height >> v_chroma_shift) *
1599 s->last_picture_ptr->f.linesize[2]);
1600
1601 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1602 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1603 }
1604 if ((s->next_picture_ptr == NULL ||
1605 s->next_picture_ptr->f.data[0] == NULL) &&
1606 s->pict_type == AV_PICTURE_TYPE_B) {
1607 /* Allocate a dummy frame */
1608 i = ff_find_unused_picture(s, 0);
1609 if (i < 0) {
1610 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1611 return i;
1612 }
1613 s->next_picture_ptr = &s->picture[i];
1614 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1615 s->next_picture_ptr = NULL;
1616 return -1;
1617 }
1618 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1619 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1620 }
1621
1622 if (s->last_picture_ptr) {
1623 ff_mpeg_unref_picture(s, &s->last_picture);
1624 if (s->last_picture_ptr->f.data[0] &&
1625 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1626 s->last_picture_ptr)) < 0)
1627 return ret;
1628 }
1629 if (s->next_picture_ptr) {
1630 ff_mpeg_unref_picture(s, &s->next_picture);
1631 if (s->next_picture_ptr->f.data[0] &&
1632 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1633 s->next_picture_ptr)) < 0)
1634 return ret;
1635 }
1636
1637 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1638 s->last_picture_ptr->f.data[0]));
1639
1640 if (s->picture_structure!= PICT_FRAME) {
1641 int i;
1642 for (i = 0; i < 4; i++) {
1643 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1644 s->current_picture.f.data[i] +=
1645 s->current_picture.f.linesize[i];
1646 }
1647 s->current_picture.f.linesize[i] *= 2;
1648 s->last_picture.f.linesize[i] *= 2;
1649 s->next_picture.f.linesize[i] *= 2;
1650 }
1651 }
1652
1653 s->err_recognition = avctx->err_recognition;
1654
1655 /* set dequantizer, we can't do it during init as
1656 * it might change for mpeg4 and we can't do it in the header
1657 * decode as init is not called for mpeg4 there yet */
1658 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1659 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1660 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1661 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1662 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1663 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1664 } else {
1665 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1666 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1667 }
1668
1669 if (s->dct_error_sum) {
1670 assert(s->avctx->noise_reduction && s->encoding);
1671 update_noise_reduction(s);
1672 }
1673
1674 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1675 return ff_xvmc_field_start(s, avctx);
1676
1677 return 0;
1678 }
1679
1680 /* generic function for encode/decode called after a
1681 * frame has been coded/decoded. */
1682 void ff_MPV_frame_end(MpegEncContext *s)
1683 {
1684 int i;
1685 /* redraw edges for the frame if decoding didn't complete */
1686 // just to make sure that all data is rendered.
1687 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1688 ff_xvmc_field_end(s);
1689 } else if ((s->er.error_count || s->encoding) &&
1690 !s->avctx->hwaccel &&
1691 s->unrestricted_mv &&
1692 s->current_picture.reference &&
1693 !s->intra_only &&
1694 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1695 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1696 int hshift = desc->log2_chroma_w;
1697 int vshift = desc->log2_chroma_h;
1698 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1699 s->h_edge_pos, s->v_edge_pos,
1700 EDGE_WIDTH, EDGE_WIDTH,
1701 EDGE_TOP | EDGE_BOTTOM);
1702 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1703 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1704 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1705 EDGE_TOP | EDGE_BOTTOM);
1706 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1707 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1708 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1709 EDGE_TOP | EDGE_BOTTOM);
1710 }
1711
1712 emms_c();
1713
1714 s->last_pict_type = s->pict_type;
1715 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1716 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1717 s->last_non_b_pict_type = s->pict_type;
1718 }
1719 #if 0
1720 /* copy back current_picture variables */
1721 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1722 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1723 s->picture[i] = s->current_picture;
1724 break;
1725 }
1726 }
1727 assert(i < MAX_PICTURE_COUNT);
1728 #endif
1729
1730 if (s->encoding) {
1731 /* release non-reference frames */
1732 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1733 if (!s->picture[i].reference)
1734 ff_mpeg_unref_picture(s, &s->picture[i]);
1735 }
1736 }
1737 // clear copies, to avoid confusion
1738 #if 0
1739 memset(&s->last_picture, 0, sizeof(Picture));
1740 memset(&s->next_picture, 0, sizeof(Picture));
1741 memset(&s->current_picture, 0, sizeof(Picture));
1742 #endif
1743 s->avctx->coded_frame = &s->current_picture_ptr->f;
1744
1745 if (s->current_picture.reference)
1746 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1747 }
1748
1749 /**
1750 * Print debugging info for the given picture.
1751 */
1752 void ff_print_debug_info(MpegEncContext *s, Picture *p)
1753 {
1754 AVFrame *pict;
1755 if (s->avctx->hwaccel || !p || !p->mb_type)
1756 return;
1757 pict = &p->f;
1758
1759 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1760 int x,y;
1761
1762 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1763 switch (pict->pict_type) {
1764 case AV_PICTURE_TYPE_I:
1765 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1766 break;
1767 case AV_PICTURE_TYPE_P:
1768 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1769 break;
1770 case AV_PICTURE_TYPE_B:
1771 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1772 break;
1773 case AV_PICTURE_TYPE_S:
1774 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1775 break;
1776 case AV_PICTURE_TYPE_SI:
1777 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1778 break;
1779 case AV_PICTURE_TYPE_SP:
1780 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1781 break;
1782 }
1783 for (y = 0; y < s->mb_height; y++) {
1784 for (x = 0; x < s->mb_width; x++) {
1785 if (s->avctx->debug & FF_DEBUG_SKIP) {
1786 int count = s->mbskip_table[x + y * s->mb_stride];
1787 if (count > 9)
1788 count = 9;
1789 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1790 }
1791 if (s->avctx->debug & FF_DEBUG_QP) {
1792 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1793 p->qscale_table[x + y * s->mb_stride]);
1794 }
1795 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1796 int mb_type = p->mb_type[x + y * s->mb_stride];
1797 // Type & MV direction
1798 if (IS_PCM(mb_type))
1799 av_log(s->avctx, AV_LOG_DEBUG, "P");
1800 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1801 av_log(s->avctx, AV_LOG_DEBUG, "A");
1802 else if (IS_INTRA4x4(mb_type))
1803 av_log(s->avctx, AV_LOG_DEBUG, "i");
1804 else if (IS_INTRA16x16(mb_type))
1805 av_log(s->avctx, AV_LOG_DEBUG, "I");
1806 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1807 av_log(s->avctx, AV_LOG_DEBUG, "d");
1808 else if (IS_DIRECT(mb_type))
1809 av_log(s->avctx, AV_LOG_DEBUG, "D");
1810 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1811 av_log(s->avctx, AV_LOG_DEBUG, "g");
1812 else if (IS_GMC(mb_type))
1813 av_log(s->avctx, AV_LOG_DEBUG, "G");
1814 else if (IS_SKIP(mb_type))
1815 av_log(s->avctx, AV_LOG_DEBUG, "S");
1816 else if (!USES_LIST(mb_type, 1))
1817 av_log(s->avctx, AV_LOG_DEBUG, ">");
1818 else if (!USES_LIST(mb_type, 0))
1819 av_log(s->avctx, AV_LOG_DEBUG, "<");
1820 else {
1821 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1822 av_log(s->avctx, AV_LOG_DEBUG, "X");
1823 }
1824
1825 // segmentation
1826 if (IS_8X8(mb_type))
1827 av_log(s->avctx, AV_LOG_DEBUG, "+");
1828 else if (IS_16X8(mb_type))
1829 av_log(s->avctx, AV_LOG_DEBUG, "-");
1830 else if (IS_8X16(mb_type))
1831 av_log(s->avctx, AV_LOG_DEBUG, "|");
1832 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1833 av_log(s->avctx, AV_LOG_DEBUG, " ");
1834 else
1835 av_log(s->avctx, AV_LOG_DEBUG, "?");
1836
1837
1838 if (IS_INTERLACED(mb_type))
1839 av_log(s->avctx, AV_LOG_DEBUG, "=");
1840 else
1841 av_log(s->avctx, AV_LOG_DEBUG, " ");
1842 }
1843 }
1844 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1845 }
1846 }
1847 }
1848
1849 /**
1850 * find the lowest MB row referenced in the MVs
1851 */
1852 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1853 {
1854 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1855 int my, off, i, mvs;
1856
1857 if (s->picture_structure != PICT_FRAME || s->mcsel)
1858 goto unhandled;
1859
1860 switch (s->mv_type) {
1861 case MV_TYPE_16X16:
1862 mvs = 1;
1863 break;
1864 case MV_TYPE_16X8:
1865 mvs = 2;
1866 break;
1867 case MV_TYPE_8X8:
1868 mvs = 4;
1869 break;
1870 default:
1871 goto unhandled;
1872 }
1873
1874 for (i = 0; i < mvs; i++) {
1875 my = s->mv[dir][i][1]<<qpel_shift;
1876 my_max = FFMAX(my_max, my);
1877 my_min = FFMIN(my_min, my);
1878 }
1879
1880 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1881
1882 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1883 unhandled:
1884 return s->mb_height-1;
1885 }
1886
1887 /* put block[] to dest[] */
1888 static inline void put_dct(MpegEncContext *s,
1889 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1890 {
1891 s->dct_unquantize_intra(s, block, i, qscale);
1892 s->dsp.idct_put (dest, line_size, block);
1893 }
1894
1895 /* add block[] to dest[] */
1896 static inline void add_dct(MpegEncContext *s,
1897 int16_t *block, int i, uint8_t *dest, int line_size)
1898 {
1899 if (s->block_last_index[i] >= 0) {
1900 s->dsp.idct_add (dest, line_size, block);
1901 }
1902 }
1903
1904 static inline void add_dequant_dct(MpegEncContext *s,
1905 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1906 {
1907 if (s->block_last_index[i] >= 0) {
1908 s->dct_unquantize_inter(s, block, i, qscale);
1909
1910 s->dsp.idct_add (dest, line_size, block);
1911 }
1912 }
1913
1914 /**
1915 * Clean dc, ac, coded_block for the current non-intra MB.
1916 */
1917 void ff_clean_intra_table_entries(MpegEncContext *s)
1918 {
1919 int wrap = s->b8_stride;
1920 int xy = s->block_index[0];
1921
1922 s->dc_val[0][xy ] =
1923 s->dc_val[0][xy + 1 ] =
1924 s->dc_val[0][xy + wrap] =
1925 s->dc_val[0][xy + 1 + wrap] = 1024;
1926 /* ac pred */
1927 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1928 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1929 if (s->msmpeg4_version>=3) {
1930 s->coded_block[xy ] =
1931 s->coded_block[xy + 1 ] =
1932 s->coded_block[xy + wrap] =
1933 s->coded_block[xy + 1 + wrap] = 0;
1934 }
1935 /* chroma */
1936 wrap = s->mb_stride;
1937 xy = s->mb_x + s->mb_y * wrap;
1938 s->dc_val[1][xy] =
1939 s->dc_val[2][xy] = 1024;
1940 /* ac pred */
1941 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1942 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1943
1944 s->mbintra_table[xy]= 0;
1945 }
1946
1947 /* generic function called after a macroblock has been parsed by the
1948 decoder or after it has been encoded by the encoder.
1949
1950 Important variables used:
1951 s->mb_intra : true if intra macroblock
1952 s->mv_dir : motion vector direction
1953 s->mv_type : motion vector type
1954 s->mv : motion vector
1955 s->interlaced_dct : true if interlaced dct used (mpeg2)
1956 */
1957 static av_always_inline
1958 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
1959 int is_mpeg12)
1960 {
1961 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1962 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1963 ff_xvmc_decode_mb(s);//xvmc uses pblocks
1964 return;
1965 }
1966
1967 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1968 /* print DCT coefficients */
1969 int i,j;
1970 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1971 for(i=0; i<6; i++){
1972 for(j=0; j<64; j++){
1973 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
1974 }
1975 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1976 }
1977 }
1978
1979 s->current_picture.qscale_table[mb_xy] = s->qscale;
1980
1981 /* update DC predictors for P macroblocks */
1982 if (!s->mb_intra) {
1983 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1984 if(s->mbintra_table[mb_xy])
1985 ff_clean_intra_table_entries(s);
1986 } else {
1987 s->last_dc[0] =
1988 s->last_dc[1] =
1989 s->last_dc[2] = 128 << s->intra_dc_precision;
1990 }
1991 }
1992 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1993 s->mbintra_table[mb_xy]=1;
1994
1995 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
1996 uint8_t *dest_y, *dest_cb, *dest_cr;
1997 int dct_linesize, dct_offset;
1998 op_pixels_func (*op_pix)[4];
1999 qpel_mc_func (*op_qpix)[16];
2000 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2001 const int uvlinesize = s->current_picture.f.linesize[1];
2002 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2003 const int block_size = 8;
2004
2005 /* avoid copy if macroblock skipped in last frame too */
2006 /* skip only during decoding as we might trash the buffers during encoding a bit */
2007 if(!s->encoding){
2008 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2009
2010 if (s->mb_skipped) {
2011 s->mb_skipped= 0;
2012 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2013 *mbskip_ptr = 1;
2014 } else if(!s->current_picture.reference) {
2015 *mbskip_ptr = 1;
2016 } else{
2017 *mbskip_ptr = 0; /* not skipped */
2018 }
2019 }
2020
2021 dct_linesize = linesize << s->interlaced_dct;
2022 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2023
2024 if(readable){
2025 dest_y= s->dest[0];
2026 dest_cb= s->dest[1];
2027 dest_cr= s->dest[2];
2028 }else{
2029 dest_y = s->b_scratchpad;
2030 dest_cb= s->b_scratchpad+16*linesize;
2031 dest_cr= s->b_scratchpad+32*linesize;
2032 }
2033
2034 if (!s->mb_intra) {
2035 /* motion handling */
2036 /* decoding or more than one mb_type (MC was already done otherwise) */
2037 if(!s->encoding){
2038
2039 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2040 if (s->mv_dir & MV_DIR_FORWARD) {
2041 ff_thread_await_progress(&s->last_picture_ptr->tf,
2042 ff_MPV_lowest_referenced_row(s, 0),
2043 0);
2044 }
2045 if (s->mv_dir & MV_DIR_BACKWARD) {
2046 ff_thread_await_progress(&s->next_picture_ptr->tf,
2047 ff_MPV_lowest_referenced_row(s, 1),
2048 0);
2049 }
2050 }
2051
2052 op_qpix= s->me.qpel_put;
2053 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2054 op_pix = s->hdsp.put_pixels_tab;
2055 }else{
2056 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2057 }
2058 if (s->mv_dir & MV_DIR_FORWARD) {
2059 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2060 op_pix = s->hdsp.avg_pixels_tab;
2061 op_qpix= s->me.qpel_avg;
2062 }
2063 if (s->mv_dir & MV_DIR_BACKWARD) {
2064 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2065 }
2066 }
2067
2068 /* skip dequant / idct if we are really late ;) */
2069 if(s->avctx->skip_idct){
2070 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2071 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2072 || s->avctx->skip_idct >= AVDISCARD_ALL)
2073 goto skip_idct;
2074 }
2075
2076 /* add dct residue */
2077 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2078 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2079 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2080 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2081 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2082 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2083
2084 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2085 if (s->chroma_y_shift){
2086 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2087 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2088 }else{
2089 dct_linesize >>= 1;
2090 dct_offset >>=1;
2091 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2092 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2093 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2094 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2095 }
2096 }
2097 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2098 add_dct(s, block[0], 0, dest_y , dct_linesize);
2099 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2100 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2101 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2102
2103 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2104 if(s->chroma_y_shift){//Chroma420
2105 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2106 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2107 }else{
2108 //chroma422
2109 dct_linesize = uvlinesize << s->interlaced_dct;
2110 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2111
2112 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2113 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2114 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2115 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2116 if(!s->chroma_x_shift){//Chroma444
2117 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2118 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2119 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2120 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2121 }
2122 }
2123 }//fi gray
2124 }
2125 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2126 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2127 }
2128 } else {
2129 /* dct only in intra block */
2130 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2131 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2132 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2133 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2134 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2135
2136 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2137 if(s->chroma_y_shift){
2138 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2139 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2140 }else{
2141 dct_offset >>=1;
2142 dct_linesize >>=1;
2143 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2144 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2145 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2146 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2147 }
2148 }
2149 }else{
2150 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2151 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2152 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2153 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2154
2155 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2156 if(s->chroma_y_shift){
2157 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2158 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2159 }else{
2160
2161 dct_linesize = uvlinesize << s->interlaced_dct;
2162 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2163
2164 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2165 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2166 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2167 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2168 if(!s->chroma_x_shift){//Chroma444
2169 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2170 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2171 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2172 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2173 }
2174 }
2175 }//gray
2176 }
2177 }
2178 skip_idct:
2179 if(!readable){
2180 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2181 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2182 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2183 }
2184 }
2185 }
2186
2187 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2188 #if !CONFIG_SMALL
2189 if(s->out_format == FMT_MPEG1) {
2190 MPV_decode_mb_internal(s, block, 1);
2191 } else
2192 #endif
2193 MPV_decode_mb_internal(s, block, 0);
2194 }
2195
2196 /**
2197 * @param h is the normal height, this will be reduced automatically if needed for the last row
2198 */
2199 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2200 Picture *last, int y, int h, int picture_structure,
2201 int first_field, int draw_edges, int low_delay,
2202 int v_edge_pos, int h_edge_pos)
2203 {
2204 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2205 int hshift = desc->log2_chroma_w;
2206 int vshift = desc->log2_chroma_h;
2207 const int field_pic = picture_structure != PICT_FRAME;
2208 if(field_pic){
2209 h <<= 1;
2210 y <<= 1;
2211 }
2212
2213 if (!avctx->hwaccel &&
2214 draw_edges &&
2215 cur->reference &&
2216 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2217 int *linesize = cur->f.linesize;
2218 int sides = 0, edge_h;
2219 if (y==0) sides |= EDGE_TOP;
2220 if (y + h >= v_edge_pos)
2221 sides |= EDGE_BOTTOM;
2222
2223 edge_h= FFMIN(h, v_edge_pos - y);
2224
2225 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2226 linesize[0], h_edge_pos, edge_h,
2227 EDGE_WIDTH, EDGE_WIDTH, sides);
2228 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2229 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2230 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2231 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2232 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2233 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2234 }
2235
2236 h = FFMIN(h, avctx->height - y);
2237
2238 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2239
2240 if (avctx->draw_horiz_band) {
2241 AVFrame *src;
2242 int offset[AV_NUM_DATA_POINTERS];
2243 int i;
2244
2245 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2246 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
2247 src = &cur->f;
2248 else if (last)
2249 src = &last->f;
2250 else
2251 return;
2252
2253 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2254 picture_structure == PICT_FRAME &&
2255 avctx->codec_id != AV_CODEC_ID_SVQ3) {
2256 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2257 offset[i] = 0;
2258 }else{
2259 offset[0]= y * src->linesize[0];
2260 offset[1]=
2261 offset[2]= (y >> vshift) * src->linesize[1];
2262 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2263 offset[i] = 0;
2264 }
2265
2266 emms_c();
2267
2268 avctx->draw_horiz_band(avctx, src, offset,
2269 y, picture_structure, h);
2270 }
2271 }
2272
2273 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2274 {
2275 int draw_edges = s->unrestricted_mv && !s->intra_only;
2276 ff_draw_horiz_band(s->avctx, &s->dsp, &s->current_picture,
2277 &s->last_picture, y, h, s->picture_structure,
2278 s->first_field, draw_edges, s->low_delay,
2279 s->v_edge_pos, s->h_edge_pos);
2280 }
2281
2282 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2283 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2284 const int uvlinesize = s->current_picture.f.linesize[1];
2285 const int mb_size= 4;
2286
2287 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2288 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2289 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2290 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2291 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2292 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2293 //block_index is not used by mpeg2, so it is not affected by chroma_format
2294
2295 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2296 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2297 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2298
2299 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2300 {
2301 if(s->picture_structure==PICT_FRAME){
2302 s->dest[0] += s->mb_y * linesize << mb_size;
2303 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2304 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2305 }else{
2306 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2307 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2308 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2309 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2310 }
2311 }
2312 }
2313
2314 /**
2315 * Permute an 8x8 block.
2316 * @param block the block which will be permuted according to the given permutation vector
2317 * @param permutation the permutation vector
2318 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2319 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2320 * (inverse) permutated to scantable order!
2321 */
2322 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2323 {
2324 int i;
2325 int16_t temp[64];
2326
2327 if(last<=0) return;
2328 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2329
2330 for(i=0; i<=last; i++){
2331 const int j= scantable[i];
2332 temp[j]= block[j];
2333 block[j]=0;
2334 }
2335
2336 for(i=0; i<=last; i++){
2337 const int j= scantable[i];
2338 const int perm_j= permutation[j];
2339 block[perm_j]= temp[j];
2340 }
2341 }
2342
2343 void ff_mpeg_flush(AVCodecContext *avctx){
2344 int i;
2345 MpegEncContext *s = avctx->priv_data;
2346
2347 if(s==NULL || s->picture==NULL)
2348 return;
2349
2350 for (i = 0; i < MAX_PICTURE_COUNT; i++)
2351 ff_mpeg_unref_picture(s, &s->picture[i]);
2352 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2353
2354 ff_mpeg_unref_picture(s, &s->current_picture);
2355 ff_mpeg_unref_picture(s, &s->last_picture);
2356 ff_mpeg_unref_picture(s, &s->next_picture);
2357
2358 s->mb_x= s->mb_y= 0;
2359
2360 s->parse_context.state= -1;
2361 s->parse_context.frame_start_found= 0;
2362 s->parse_context.overread= 0;
2363 s->parse_context.overread_index= 0;
2364 s->parse_context.index= 0;
2365 s->parse_context.last_index= 0;
2366 s->bitstream_buffer_size=0;
2367 s->pp_time=0;
2368 }
2369
2370 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2371 int16_t *block, int n, int qscale)
2372 {
2373 int i, level, nCoeffs;
2374 const uint16_t *quant_matrix;
2375
2376 nCoeffs= s->block_last_index[n];
2377
2378 if (n < 4)
2379 block[0] = block[0] * s->y_dc_scale;
2380 else
2381 block[0] = block[0] * s->c_dc_scale;
2382 /* XXX: only mpeg1 */
2383 quant_matrix = s->intra_matrix;
2384 for(i=1;i<=nCoeffs;i++) {
2385 int j= s->intra_scantable.permutated[i];
2386 level = block[j];
2387 if (level) {
2388 if (level < 0) {
2389 level = -level;
2390 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2391 level = (level - 1) | 1;
2392 level = -level;
2393 } else {
2394 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2395 level = (level - 1) | 1;
2396 }
2397 block[j] = level;
2398 }
2399 }
2400 }
2401
2402 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2403 int16_t *block, int n, int qscale)
2404 {
2405 int i, level, nCoeffs;
2406 const uint16_t *quant_matrix;
2407
2408 nCoeffs= s->block_last_index[n];
2409
2410 quant_matrix = s->inter_matrix;
2411 for(i=0; i<=nCoeffs; i++) {
2412 int j= s->intra_scantable.permutated[i];
2413 level = block[j];
2414 if (level) {
2415 if (level < 0) {
2416 level = -level;
2417 level = (((level << 1) + 1) * qscale *
2418 ((int) (quant_matrix[j]))) >> 4;
2419 level = (level - 1) | 1;
2420 level = -level;
2421 } else {
2422 level = (((level << 1) + 1) * qscale *
2423 ((int) (quant_matrix[j]))) >> 4;
2424 level = (level - 1) | 1;
2425 }
2426 block[j] = level;
2427 }
2428 }
2429 }
2430
2431 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2432 int16_t *block, int n, int qscale)
2433 {
2434 int i, level, nCoeffs;
2435 const uint16_t *quant_matrix;
2436
2437 if(s->alternate_scan) nCoeffs= 63;
2438 else nCoeffs= s->block_last_index[n];
2439
2440 if (n < 4)
2441 block[0] = block[0] * s->y_dc_scale;
2442 else
2443 block[0] = block[0] * s->c_dc_scale;
2444 quant_matrix = s->intra_matrix;
2445 for(i=1;i<=nCoeffs;i++) {
2446 int j= s->intra_scantable.permutated[i];
2447 level = block[j];
2448 if (level) {
2449 if (level < 0) {
2450 level = -level;
2451 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2452 level = -level;
2453 } else {
2454 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2455 }
2456 block[j] = level;
2457 }
2458 }
2459 }
2460
2461 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2462 int16_t *block, int n, int qscale)
2463 {
2464 int i, level, nCoeffs;
2465 const uint16_t *quant_matrix;
2466 int sum=-1;
2467
2468 if(s->alternate_scan) nCoeffs= 63;
2469 else nCoeffs= s->block_last_index[n];
2470
2471 if (n < 4)
2472 block[0] = block[0] * s->y_dc_scale;
2473 else
2474 block[0] = block[0] * s->c_dc_scale;
2475 quant_matrix = s->intra_matrix;
2476 for(i=1;i<=nCoeffs;i++) {
2477 int j= s->intra_scantable.permutated[i];
2478 level = block[j];
2479 if (level) {
2480 if (level < 0) {
2481 level = -level;
2482 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2483 level = -level;
2484 } else {
2485 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2486 }
2487 block[j] = level;
2488 sum+=level;
2489 }
2490 }
2491 block[63]^=sum&1;
2492 }
2493
2494 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2495 int16_t *block, int n, int qscale)
2496 {
2497 int i, level, nCoeffs;
2498 const uint16_t *quant_matrix;
2499 int sum=-1;
2500
2501 if(s->alternate_scan) nCoeffs= 63;
2502 else nCoeffs= s->block_last_index[n];
2503
2504 quant_matrix = s->inter_matrix;
2505 for(i=0; i<=nCoeffs; i++) {
2506 int j= s->intra_scantable.permutated[i];
2507 level = block[j];
2508 if (level) {
2509 if (level < 0) {
2510 level = -level;
2511 level = (((level << 1) + 1) * qscale *
2512 ((int) (quant_matrix[j]))) >> 4;
2513 level = -level;
2514 } else {
2515 level = (((level << 1) + 1) * qscale *
2516 ((int) (quant_matrix[j]))) >> 4;
2517 }
2518 block[j] = level;
2519 sum+=level;
2520 }
2521 }
2522 block[63]^=sum&1;
2523 }
2524
2525 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2526 int16_t *block, int n, int qscale)
2527 {
2528 int i, level, qmul, qadd;
2529 int nCoeffs;
2530
2531 assert(s->block_last_index[n]>=0);
2532
2533 qmul = qscale << 1;
2534
2535 if (!s->h263_aic) {
2536 if (n < 4)
2537 block[0] = block[0] * s->y_dc_scale;
2538 else
2539 block[0] = block[0] * s->c_dc_scale;
2540 qadd = (qscale - 1) | 1;
2541 }else{
2542 qadd = 0;
2543 }
2544 if(s->ac_pred)
2545 nCoeffs=63;
2546 else
2547 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2548
2549 for(i=1; i<=nCoeffs; i++) {
2550 level = block[i];
2551 if (level) {
2552 if (level < 0) {
2553 level = level * qmul - qadd;
2554 } else {
2555 level = level * qmul + qadd;
2556 }
2557 block[i] = level;
2558 }
2559 }
2560 }
2561
2562 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2563 int16_t *block, int n, int qscale)
2564 {
2565 int i, level, qmul, qadd;
2566 int nCoeffs;
2567
2568 assert(s->block_last_index[n]>=0);
2569
2570 qadd = (qscale - 1) | 1;
2571 qmul = qscale << 1;
2572
2573 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2574
2575 for(i=0; i<=nCoeffs; i++) {
2576 level = block[i];
2577 if (level) {
2578 if (level < 0) {
2579 level = level * qmul - qadd;
2580 } else {
2581 level = level * qmul + qadd;
2582 }
2583 block[i] = level;
2584 }
2585 }
2586 }
2587
2588 /**
2589 * set qscale and update qscale dependent variables.
2590 */
2591 void ff_set_qscale(MpegEncContext * s, int qscale)
2592 {
2593 if (qscale < 1)
2594 qscale = 1;
2595 else if (qscale > 31)
2596 qscale = 31;
2597
2598 s->qscale = qscale;
2599 s->chroma_qscale= s->chroma_qscale_table[qscale];
2600
2601 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2602 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2603 }
2604
2605 void ff_MPV_report_decode_progress(MpegEncContext *s)
2606 {
2607 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
2608 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
2609 }
2610
2611 #if CONFIG_ERROR_RESILIENCE
2612 void ff_mpeg_er_frame_start(MpegEncContext *s)
2613 {
2614 ERContext *er = &s->er;
2615
2616 er->cur_pic = s->current_picture_ptr;
2617 er->last_pic = s->last_picture_ptr;
2618 er->next_pic = s->next_picture_ptr;
2619
2620 er->pp_time = s->pp_time;
2621 er->pb_time = s->pb_time;
2622 er->quarter_sample = s->quarter_sample;
2623 er->partitioned_frame = s->partitioned_frame;
2624
2625 ff_er_frame_start(er);
2626 }
2627 #endif /* CONFIG_ERROR_RESILIENCE */