4:4:4 H.264 decoding support
[libav.git] / libavcodec / mpegvideo.c
CommitLineData
de6d9b64
FB
1/*
2 * The simplest mpeg encoder (well, it was the simplest!)
406792e7 3 * Copyright (c) 2000,2001 Fabrice Bellard
8f2ab833 4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
de6d9b64 5 *
7b94177e
DB
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7 *
2912e87a 8 * This file is part of Libav.
b78e7197 9 *
2912e87a 10 * Libav is free software; you can redistribute it and/or
ff4ec49e
FB
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
b78e7197 13 * version 2.1 of the License, or (at your option) any later version.
de6d9b64 14 *
2912e87a 15 * Libav is distributed in the hope that it will be useful,
de6d9b64 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
ff4ec49e
FB
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
de6d9b64 19 *
ff4ec49e 20 * You should have received a copy of the GNU Lesser General Public
2912e87a 21 * License along with Libav; if not, write to the Free Software
5509bffa 22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
de6d9b64 23 */
115329f1 24
983e3246 25/**
ba87f080 26 * @file
983e3246 27 * The simplest mpeg encoder (well, it was the simplest!).
115329f1
DB
28 */
29
94ca624f 30#include "libavutil/intmath.h"
737eb597 31#include "libavutil/imgutils.h"
de6d9b64
FB
32#include "avcodec.h"
33#include "dsputil.h"
603a5f04 34#include "internal.h"
de6d9b64 35#include "mpegvideo.h"
3ada94ba 36#include "mpegvideo_common.h"
d9c9259f 37#include "mjpegenc.h"
15025553 38#include "msmpeg4.h"
65e4c8c9 39#include "faandct.h"
4440bd0d 40#include "xvmc_internal.h"
6a9c8594 41#include "thread.h"
e96682e6 42#include <limits.h>
de6d9b64 43
e4eadb4b
MN
44//#undef NDEBUG
45//#include <assert.h>
2ad1516a 46
115329f1 47static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
21af69f7 48 DCTELEM *block, int n, int qscale);
115329f1 49static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
9dbf1ddd 50 DCTELEM *block, int n, int qscale);
d50635cd
MN
51static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
e27b6e62
MN
53static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
d50635cd
MN
55static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
115329f1 57static void dct_unquantize_h263_intra_c(MpegEncContext *s,
d50635cd 58 DCTELEM *block, int n, int qscale);
115329f1 59static void dct_unquantize_h263_inter_c(MpegEncContext *s,
21af69f7 60 DCTELEM *block, int n, int qscale);
3d9fccbf 61
de6d9b64
FB
62
63/* enable all paranoid tests for rounding, overflows, etc... */
64//#define PARANOID
65
66//#define DEBUG
67
101bea5f 68
332f9ac4
MN
69static const uint8_t ff_default_chroma_qscale_table[32]={
70// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
71 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
72};
73
6cbe71bd
AJ
74const uint8_t ff_mpeg1_dc_scale_table[128]={
75// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80};
81
6506c3d2
MN
82static const uint8_t mpeg2_dc_scale_table1[128]={
83// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88};
89
90static const uint8_t mpeg2_dc_scale_table2[128]={
91// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96};
97
98static const uint8_t mpeg2_dc_scale_table3[128]={
99// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
102 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
104};
105
53f66cee 106const uint8_t * const ff_mpeg2_dc_scale_table[4]={
6506c3d2
MN
107 ff_mpeg1_dc_scale_table,
108 mpeg2_dc_scale_table1,
109 mpeg2_dc_scale_table2,
110 mpeg2_dc_scale_table3,
111};
112
09a9b45e 113const enum PixelFormat ff_pixfmt_list_420[] = {
044f0296
GB
114 PIX_FMT_YUV420P,
115 PIX_FMT_NONE
116};
117
118const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
92c6a099 119 PIX_FMT_DXVA2_VLD,
da425800 120 PIX_FMT_VAAPI_VLD,
09a9b45e
MN
121 PIX_FMT_YUV420P,
122 PIX_FMT_NONE
123};
2ad1516a 124
82fcbc14
MN
125const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
126 int i;
127
8cb90572
MN
128 assert(p<=end);
129 if(p>=end)
130 return end;
131
82fcbc14
MN
132 for(i=0; i<3; i++){
133 uint32_t tmp= *state << 8;
134 *state= tmp + *(p++);
135 if(tmp == 0x100 || p==end)
136 return p;
137 }
82fcbc14
MN
138
139 while(p<end){
809b2a30
MN
140 if (p[-1] > 1 ) p+= 3;
141 else if(p[-2] ) p+= 2;
142 else if(p[-3]|(p[-1]-1)) p++;
82fcbc14
MN
143 else{
144 p++;
145 break;
146 }
147 }
148
809b2a30 149 p= FFMIN(p, end)-4;
f49f6e88 150 *state= AV_RB32(p);
82fcbc14
MN
151
152 return p+4;
153}
154
defdfc9a 155/* init common dct for both encoder and decoder */
5ef251e5 156av_cold int ff_dct_common_init(MpegEncContext *s)
de6d9b64 157{
d50635cd
MN
158 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
159 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
160 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
161 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
162 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
e27b6e62
MN
163 if(s->flags & CODEC_FLAG_BITEXACT)
164 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
d50635cd 165 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
b0368839 166
b250f9c6 167#if HAVE_MMX
e4eadb4b 168 MPV_common_init_mmx(s);
b250f9c6 169#elif ARCH_ALPHA
e0580f8c 170 MPV_common_init_axp(s);
b250f9c6 171#elif CONFIG_MLIB
c7e07931 172 MPV_common_init_mlib(s);
b250f9c6 173#elif HAVE_MMI
5917d17c 174 MPV_common_init_mmi(s);
b250f9c6 175#elif ARCH_ARM
a2fc0f6a 176 MPV_common_init_arm(s);
b250f9c6 177#elif HAVE_ALTIVEC
f62a9a46 178 MPV_common_init_altivec(s);
b250f9c6 179#elif ARCH_BFIN
1a822d30
MH
180 MPV_common_init_bfin(s);
181#endif
676e200c 182
2ad1516a 183 /* load & permutate scantables
115329f1 184 note: only wmv uses different ones
2ad1516a 185 */
bb198e19
MN
186 if(s->alternate_scan){
187 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
188 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
189 }else{
190 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
191 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
192 }
3d2e8cce
MN
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
194 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
d930ef19 195
defdfc9a
AB
196 return 0;
197}
198
8d2fc163 199void ff_copy_picture(Picture *dst, Picture *src){
6571e41d
MN
200 *dst = *src;
201 dst->type= FF_BUFFER_TYPE_COPY;
202}
203
1e491e29 204/**
49bd8e4b 205 * Release a frame buffer
34e46c44
GB
206 */
207static void free_frame_buffer(MpegEncContext *s, Picture *pic)
208{
6a9c8594 209 ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
952e2083 210 av_freep(&pic->hwaccel_picture_private);
34e46c44
GB
211}
212
213/**
49bd8e4b 214 * Allocate a frame buffer
34e46c44
GB
215 */
216static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
217{
218 int r;
219
68e5d523 220 if (s->avctx->hwaccel) {
952e2083 221 assert(!pic->hwaccel_picture_private);
68e5d523 222 if (s->avctx->hwaccel->priv_data_size) {
952e2083
GB
223 pic->hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
224 if (!pic->hwaccel_picture_private) {
68e5d523
GB
225 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
226 return -1;
227 }
228 }
229 }
230
6a9c8594 231 r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
34e46c44
GB
232
233 if (r<0 || !pic->age || !pic->type || !pic->data[0]) {
234 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
952e2083 235 av_freep(&pic->hwaccel_picture_private);
34e46c44
GB
236 return -1;
237 }
238
239 if (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])) {
240 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
241 free_frame_buffer(s, pic);
242 return -1;
243 }
244
245 if (pic->linesize[1] != pic->linesize[2]) {
246 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
247 free_frame_buffer(s, pic);
248 return -1;
249 }
250
251 return 0;
252}
253
254/**
4e00e76b
MN
255 * allocates a Picture
256 * The pixels are allocated/set by calling get_buffer() if shared=0
1e491e29 257 */
a4a750d3 258int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
755bfeab 259 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
7bc9090a 260 const int mb_array_size= s->mb_stride*s->mb_height;
b40cd4e0
MN
261 const int b8_array_size= s->b8_stride*s->mb_height*2;
262 const int b4_array_size= s->b4_stride*s->mb_height*4;
0da71265 263 int i;
29d2dc59 264 int r= -1;
115329f1 265
4e00e76b
MN
266 if(shared){
267 assert(pic->data[0]);
268 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
269 pic->type= FF_BUFFER_TYPE_SHARED;
270 }else{
4e00e76b 271 assert(!pic->data[0]);
115329f1 272
34e46c44 273 if (alloc_frame_buffer(s, pic) < 0)
4e00e76b 274 return -1;
4e00e76b
MN
275
276 s->linesize = pic->linesize[0];
277 s->uvlinesize= pic->linesize[1];
1e491e29 278 }
115329f1 279
4e00e76b 280 if(pic->qscale_table==NULL){
115329f1 281 if (s->encoding) {
d31dbec3
RP
282 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail)
283 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail)
284 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail)
4e00e76b 285 }
1e491e29 286
d31dbec3
RP
287 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2, fail) //the +2 is for the slice end check
288 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table , mb_array_size * sizeof(uint8_t) , fail)
289 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
0063c05c 290 pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
0da71265 291 if(s->out_format == FMT_H264){
0da71265 292 for(i=0; i<2; i++){
d31dbec3 293 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail)
02dc8983 294 pic->motion_val[i]= pic->motion_val_base[i]+4;
358b5b1a 295 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
b40cd4e0 296 }
5ea4b18d 297 pic->motion_subsample_log2= 2;
0c9bbaec 298 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
b40cd4e0 299 for(i=0; i<2; i++){
d31dbec3 300 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
02dc8983 301 pic->motion_val[i]= pic->motion_val_base[i]+4;
fc4a2d1e 302 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
0da71265 303 }
5ea4b18d 304 pic->motion_subsample_log2= 3;
0da71265 305 }
8289c6fa 306 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
d31dbec3 307 FF_ALLOCZ_OR_GOTO(s->avctx, pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6, fail)
8289c6fa 308 }
7bc9090a 309 pic->qstride= s->mb_stride;
d31dbec3 310 FF_ALLOCZ_OR_GOTO(s->avctx, pic->pan_scan , 1 * sizeof(AVPanScan), fail)
4e00e76b 311 }
0da71265 312
90b5b51e
DB
313 /* It might be nicer if the application would keep track of these
314 * but it would require an API change. */
f943e138 315 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
975a1447
SS
316 s->prev_pict_types[0]= s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
317 if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == AV_PICTURE_TYPE_B)
90b5b51e 318 pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
6a9c8594 319 pic->owner2 = NULL;
115329f1 320
1e491e29 321 return 0;
d31dbec3 322fail: //for the FF_ALLOCZ_OR_GOTO macro
29d2dc59 323 if(r>=0)
34e46c44 324 free_frame_buffer(s, pic);
1e491e29
MN
325 return -1;
326}
327
4e00e76b
MN
328/**
329 * deallocates a picture
330 */
1e491e29
MN
331static void free_picture(MpegEncContext *s, Picture *pic){
332 int i;
4e00e76b
MN
333
334 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
34e46c44 335 free_frame_buffer(s, pic);
4e00e76b
MN
336 }
337
1e491e29
MN
338 av_freep(&pic->mb_var);
339 av_freep(&pic->mc_mb_var);
340 av_freep(&pic->mb_mean);
341 av_freep(&pic->mbskip_table);
342 av_freep(&pic->qscale_table);
0da71265 343 av_freep(&pic->mb_type_base);
8289c6fa 344 av_freep(&pic->dct_coeff);
fa384dcc 345 av_freep(&pic->pan_scan);
0da71265
MN
346 pic->mb_type= NULL;
347 for(i=0; i<2; i++){
b40cd4e0 348 av_freep(&pic->motion_val_base[i]);
0da71265
MN
349 av_freep(&pic->ref_index[i]);
350 }
115329f1 351
d90cf87b 352 if(pic->type == FF_BUFFER_TYPE_SHARED){
4e00e76b
MN
353 for(i=0; i<4; i++){
354 pic->base[i]=
355 pic->data[i]= NULL;
356 }
115329f1 357 pic->type= 0;
1e491e29
MN
358 }
359}
360
9c3d33d6 361static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
2cbd734a
MR
362 int y_size = s->b8_stride * (2 * s->mb_height + 1);
363 int c_size = s->mb_stride * (s->mb_height + 1);
364 int yc_size = y_size + 2 * c_size;
9c3d33d6
MN
365 int i;
366
115329f1 367 // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
d31dbec3 368 FF_ALLOCZ_OR_GOTO(s->avctx, s->allocated_edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
5d18eaad 369 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
9c3d33d6 370
755bfeab 371 //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
d31dbec3 372 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t), fail)
a6f2c0d6 373 s->me.temp= s->me.scratchpad;
9c3d33d6
MN
374 s->rd_scratchpad= s->me.scratchpad;
375 s->b_scratchpad= s->me.scratchpad;
376 s->obmc_scratchpad= s->me.scratchpad + 16;
377 if (s->encoding) {
d31dbec3
RP
378 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map , ME_MAP_SIZE*sizeof(uint32_t), fail)
379 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
9c3d33d6 380 if(s->avctx->noise_reduction){
d31dbec3 381 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
9c3d33d6 382 }
115329f1 383 }
d31dbec3 384 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
9c3d33d6
MN
385 s->block= s->blocks[0];
386
387 for(i=0;i<12;i++){
21effaa4 388 s->pblocks[i] = &s->block[i];
9c3d33d6 389 }
2cbd734a 390
79042a6e
MR
391 if (s->out_format == FMT_H263) {
392 /* ac values */
2cbd734a
MR
393 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
394 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
395 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
396 s->ac_val[2] = s->ac_val[1] + c_size;
397 }
398
9c3d33d6
MN
399 return 0;
400fail:
401 return -1; //free() through MPV_common_end()
402}
403
404static void free_duplicate_context(MpegEncContext *s){
405 if(s==NULL) return;
406
407 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
408 av_freep(&s->me.scratchpad);
a6f2c0d6 409 s->me.temp=
115329f1
DB
410 s->rd_scratchpad=
411 s->b_scratchpad=
9c3d33d6 412 s->obmc_scratchpad= NULL;
115329f1 413
9c3d33d6
MN
414 av_freep(&s->dct_error_sum);
415 av_freep(&s->me.map);
416 av_freep(&s->me.score_map);
417 av_freep(&s->blocks);
2cbd734a 418 av_freep(&s->ac_val_base);
9c3d33d6
MN
419 s->block= NULL;
420}
421
422static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
423#define COPY(a) bak->a= src->a
424 COPY(allocated_edge_emu_buffer);
425 COPY(edge_emu_buffer);
426 COPY(me.scratchpad);
a6f2c0d6 427 COPY(me.temp);
9c3d33d6
MN
428 COPY(rd_scratchpad);
429 COPY(b_scratchpad);
430 COPY(obmc_scratchpad);
431 COPY(me.map);
432 COPY(me.score_map);
433 COPY(blocks);
434 COPY(block);
435 COPY(start_mb_y);
436 COPY(end_mb_y);
437 COPY(me.map_generation);
438 COPY(pb);
439 COPY(dct_error_sum);
da16b204
MN
440 COPY(dct_count[0]);
441 COPY(dct_count[1]);
2cbd734a
MR
442 COPY(ac_val_base);
443 COPY(ac_val[0]);
444 COPY(ac_val[1]);
445 COPY(ac_val[2]);
9c3d33d6
MN
446#undef COPY
447}
448
c62c07d3 449void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
9c3d33d6 450 MpegEncContext bak;
c62c07d3 451 int i;
9c3d33d6
MN
452 //FIXME copy only needed parts
453//START_TIMER
454 backup_duplicate_context(&bak, dst);
455 memcpy(dst, src, sizeof(MpegEncContext));
456 backup_duplicate_context(dst, &bak);
c62c07d3 457 for(i=0;i<12;i++){
21effaa4 458 dst->pblocks[i] = &dst->block[i];
c62c07d3 459 }
9c3d33d6
MN
460//STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
461}
462
6a9c8594
AS
463int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
464{
465 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
466
467 if(dst == src || !s1->context_initialized) return 0;
468
469 //FIXME can parameters change on I-frames? in that case dst may need a reinit
470 if(!s->context_initialized){
471 memcpy(s, s1, sizeof(MpegEncContext));
472
473 s->avctx = dst;
474 s->picture_range_start += MAX_PICTURE_COUNT;
475 s->picture_range_end += MAX_PICTURE_COUNT;
476 s->bitstream_buffer = NULL;
477 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
478
479 MPV_common_init(s);
480 }
481
482 s->avctx->coded_height = s1->avctx->coded_height;
483 s->avctx->coded_width = s1->avctx->coded_width;
484 s->avctx->width = s1->avctx->width;
485 s->avctx->height = s1->avctx->height;
486
487 s->coded_picture_number = s1->coded_picture_number;
488 s->picture_number = s1->picture_number;
489 s->input_picture_number = s1->input_picture_number;
490
491 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
492 memcpy(&s->last_picture, &s1->last_picture, (char*)&s1->last_picture_ptr - (char*)&s1->last_picture);
493
494 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
495 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
496 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
497
498 memcpy(s->prev_pict_types, s1->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
499
500 //Error/bug resilience
501 s->next_p_frame_damaged = s1->next_p_frame_damaged;
502 s->workaround_bugs = s1->workaround_bugs;
503
504 //MPEG4 timing info
505 memcpy(&s->time_increment_bits, &s1->time_increment_bits, (char*)&s1->shape - (char*)&s1->time_increment_bits);
506
507 //B-frame info
508 s->max_b_frames = s1->max_b_frames;
509 s->low_delay = s1->low_delay;
510 s->dropable = s1->dropable;
511
512 //DivX handling (doesn't work)
513 s->divx_packed = s1->divx_packed;
514
515 if(s1->bitstream_buffer){
516 if (s1->bitstream_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
517 av_fast_malloc(&s->bitstream_buffer, &s->allocated_bitstream_buffer_size, s1->allocated_bitstream_buffer_size);
518 s->bitstream_buffer_size = s1->bitstream_buffer_size;
519 memcpy(s->bitstream_buffer, s1->bitstream_buffer, s1->bitstream_buffer_size);
520 memset(s->bitstream_buffer+s->bitstream_buffer_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
521 }
522
523 //MPEG2/interlacing info
524 memcpy(&s->progressive_sequence, &s1->progressive_sequence, (char*)&s1->rtp_mode - (char*)&s1->progressive_sequence);
525
526 if(!s1->first_field){
527 s->last_pict_type= s1->pict_type;
528 if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->quality;
529
530 if(s1->pict_type!=FF_B_TYPE){
531 s->last_non_b_pict_type= s1->pict_type;
532 }
533 }
534
535 return 0;
536}
537
3edcacde
MN
538/**
539 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
540 * the changed fields will not depend upon the prior state of the MpegEncContext.
541 */
3ada94ba 542void MPV_common_defaults(MpegEncContext *s){
3edcacde
MN
543 s->y_dc_scale_table=
544 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
545 s->chroma_qscale_table= ff_default_chroma_qscale_table;
546 s->progressive_frame= 1;
547 s->progressive_sequence= 1;
548 s->picture_structure= PICT_FRAME;
549
550 s->coded_picture_number = 0;
551 s->picture_number = 0;
552 s->input_picture_number = 0;
553
554 s->picture_in_gop_number = 0;
7976241a
MN
555
556 s->f_code = 1;
557 s->b_code = 1;
6a9c8594
AS
558
559 s->picture_range_start = 0;
560 s->picture_range_end = MAX_PICTURE_COUNT;
3edcacde
MN
561}
562
563/**
564 * sets the given MpegEncContext to defaults for decoding.
565 * the changed fields will not depend upon the prior state of the MpegEncContext.
566 */
567void MPV_decode_defaults(MpegEncContext *s){
568 MPV_common_defaults(s);
569}
570
571/**
3edcacde
MN
572 * init common structure for both encoder and decoder.
573 * this assumes that some variables like width/height are already set
574 */
5ef251e5 575av_cold int MPV_common_init(MpegEncContext *s)
defdfc9a 576{
afebe2f7 577 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads;
defdfc9a 578
0127b861
MN
579 if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
580 s->mb_height = (s->height + 31) / 32 * 2;
00d1e96b 581 else if (s->codec_id != CODEC_ID_H264)
1b661802 582 s->mb_height = (s->height + 15) / 16;
fdb52bcc 583
9cfc1b3a
IK
584 if(s->avctx->pix_fmt == PIX_FMT_NONE){
585 av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
586 return -1;
587 }
588
83a8b300 589 if((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
6a9c8594 590 (s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height))){
000a9c02
MN
591 av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
592 return -1;
593 }
594
e16f217c 595 if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx))
0ecca7a4
MN
596 return -1;
597
b0368839 598 dsputil_init(&s->dsp, s->avctx);
6180ade7 599 ff_dct_common_init(s);
eb4b3dd3 600
9fee1e23 601 s->flags= s->avctx->flags;
303e50e6 602 s->flags2= s->avctx->flags2;
defdfc9a 603
fb22c237 604 if (s->width && s->height) {
d969e93a
RB
605 s->mb_width = (s->width + 15) / 16;
606 s->mb_stride = s->mb_width + 1;
607 s->b8_stride = s->mb_width*2 + 1;
608 s->b4_stride = s->mb_width*4 + 1;
609 mb_array_size= s->mb_height * s->mb_stride;
610 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
fb22c237 611
d969e93a
RB
612 /* set chroma shifts */
613 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
614 &(s->chroma_y_shift) );
eb4b3dd3 615
d969e93a
RB
616 /* set default edge pos, will be overriden in decode_header if needed */
617 s->h_edge_pos= s->mb_width*16;
618 s->v_edge_pos= s->mb_height*16;
ffdff4d7 619
d969e93a 620 s->mb_num = s->mb_width * s->mb_height;
eb4b3dd3 621
d969e93a
RB
622 s->block_wrap[0]=
623 s->block_wrap[1]=
624 s->block_wrap[2]=
625 s->block_wrap[3]= s->b8_stride;
626 s->block_wrap[4]=
627 s->block_wrap[5]= s->mb_stride;
115329f1 628
d969e93a
RB
629 y_size = s->b8_stride * (2 * s->mb_height + 1);
630 c_size = s->mb_stride * (s->mb_height + 1);
631 yc_size = y_size + 2 * c_size;
115329f1 632
d969e93a
RB
633 /* convert fourcc to upper case */
634 s->codec_tag = ff_toupper4(s->avctx->codec_tag);
115329f1 635
d969e93a 636 s->stream_codec_tag = ff_toupper4(s->avctx->stream_codec_tag);
603a5f04 637
d969e93a 638 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
541ae140 639
d969e93a
RB
640 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
641 for(y=0; y<s->mb_height; y++){
642 for(x=0; x<s->mb_width; x++){
643 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
644 }
7bc9090a 645 }
d969e93a 646 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
115329f1 647
d969e93a
RB
648 if (s->encoding) {
649 /* Allocate MV tables */
650 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
651 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
652 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
653 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
654 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
655 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
656 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
657 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
658 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
659 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
660 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
661 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
662
663 if(s->msmpeg4_version){
664 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
665 }
666 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
7bc9090a 667
d969e93a
RB
668 /* Allocate MB type table */
669 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
115329f1 670
d969e93a 671 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
115329f1 672
d969e93a
RB
673 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
674 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
675 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
676 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
677 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
678 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
115329f1 679
d969e93a
RB
680 if(s->avctx->noise_reduction){
681 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
682 }
821cb11f 683 }
37fbfd0a 684 }
fb22c237 685
6a9c8594
AS
686 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
687 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, s->picture_count * sizeof(Picture), fail)
688 for(i = 0; i < s->picture_count; i++) {
747069e2
LA
689 avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
690 }
b465449e 691
fb22c237 692 if (s->width && s->height) {
d969e93a 693 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
fb22c237 694
d969e93a
RB
695 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
696 /* interlaced direct mode decoding tables */
bb198e19
MN
697 for(i=0; i<2; i++){
698 int j, k;
699 for(j=0; j<2; j++){
700 for(k=0; k<2; k++){
d31dbec3
RP
701 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
702 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
bb198e19 703 }
d31dbec3
RP
704 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
705 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
706 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
bb198e19 707 }
d31dbec3 708 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
bb198e19 709 }
d969e93a
RB
710 }
711 if (s->out_format == FMT_H263) {
712 /* cbp values */
713 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
714 s->coded_block= s->coded_block_base + s->b8_stride + 1;
715
716 /* cbp, ac_pred, pred_dir */
717 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail)
718 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
719 }
115329f1 720
d969e93a
RB
721 if (s->h263_pred || s->h263_plus || !s->encoding) {
722 /* dc values */
723 //MN: we need these for error resilience of intra-frames
724 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
725 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
726 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
727 s->dc_val[2] = s->dc_val[1] + c_size;
728 for(i=0;i<yc_size;i++)
729 s->dc_val_base[i] = 1024;
730 }
8b32880c 731
d969e93a
RB
732 /* which mb is a intra block */
733 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
734 memset(s->mbintra_table, 1, mb_array_size);
115329f1 735
d969e93a
RB
736 /* init macroblock skip table */
737 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
738 //Note the +1 is for a quicker mpeg4 slice_end detection
739 FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
115329f1 740
d969e93a
RB
741 s->parse_context.state= -1;
742 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
743 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
744 s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
745 s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
746 }
fb22c237 747 }
d7425f59 748
de6d9b64 749 s->context_initialized = 1;
6a9c8594 750 s->thread_context[0]= s;
9c3d33d6 751
fb22c237 752 if (s->width && s->height) {
83a8b300 753 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
d969e93a 754 threads = s->avctx->thread_count;
fb22c237 755
d969e93a
RB
756 for(i=1; i<threads; i++){
757 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
758 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
759 }
9c3d33d6 760
d969e93a
RB
761 for(i=0; i<threads; i++){
762 if(init_duplicate_context(s->thread_context[i], s) < 0)
763 goto fail;
764 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
765 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
766 }
6a9c8594
AS
767 } else {
768 if(init_duplicate_context(s, s) < 0) goto fail;
769 s->start_mb_y = 0;
770 s->end_mb_y = s->mb_height;
771 }
fb22c237
RB
772 }
773
de6d9b64
FB
774 return 0;
775 fail:
8257bf05 776 MPV_common_end(s);
de6d9b64
FB
777 return -1;
778}
779
780/* init common structure for both encoder and decoder */
781void MPV_common_end(MpegEncContext *s)
782{
bb198e19 783 int i, j, k;
de6d9b64 784
83a8b300 785 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
6a9c8594
AS
786 for(i=0; i<s->avctx->thread_count; i++){
787 free_duplicate_context(s->thread_context[i]);
788 }
789 for(i=1; i<s->avctx->thread_count; i++){
790 av_freep(&s->thread_context[i]);
791 }
792 } else free_duplicate_context(s);
9c3d33d6 793
147e5200
MN
794 av_freep(&s->parse_context.buffer);
795 s->parse_context.buffer_size=0;
796
6000abfa 797 av_freep(&s->mb_type);
7bc9090a
MN
798 av_freep(&s->p_mv_table_base);
799 av_freep(&s->b_forw_mv_table_base);
800 av_freep(&s->b_back_mv_table_base);
801 av_freep(&s->b_bidir_forw_mv_table_base);
802 av_freep(&s->b_bidir_back_mv_table_base);
803 av_freep(&s->b_direct_mv_table_base);
804 s->p_mv_table= NULL;
805 s->b_forw_mv_table= NULL;
806 s->b_back_mv_table= NULL;
807 s->b_bidir_forw_mv_table= NULL;
808 s->b_bidir_back_mv_table= NULL;
809 s->b_direct_mv_table= NULL;
bb198e19
MN
810 for(i=0; i<2; i++){
811 for(j=0; j<2; j++){
812 for(k=0; k<2; k++){
813 av_freep(&s->b_field_mv_table_base[i][j][k]);
814 s->b_field_mv_table[i][j][k]=NULL;
815 }
816 av_freep(&s->b_field_select_table[i][j]);
817 av_freep(&s->p_field_mv_table_base[i][j]);
818 s->p_field_mv_table[i][j]=NULL;
819 }
820 av_freep(&s->p_field_select_table[i]);
821 }
115329f1 822
137c8468 823 av_freep(&s->dc_val_base);
137c8468 824 av_freep(&s->coded_block_base);
6000abfa 825 av_freep(&s->mbintra_table);
7f2fe444
MN
826 av_freep(&s->cbp_table);
827 av_freep(&s->pred_dir_table);
115329f1 828
6000abfa 829 av_freep(&s->mbskip_table);
f943e138 830 av_freep(&s->prev_pict_types);
6000abfa 831 av_freep(&s->bitstream_buffer);
0ecca7a4
MN
832 s->allocated_bitstream_buffer_size=0;
833
3aa102be 834 av_freep(&s->avctx->stats_out);
6b460aa3 835 av_freep(&s->ac_stats);
4d2858de 836 av_freep(&s->error_status_table);
7bc9090a 837 av_freep(&s->mb_index2xy);
158c7f05 838 av_freep(&s->lambda_table);
7e4995c3
MN
839 av_freep(&s->q_intra_matrix);
840 av_freep(&s->q_inter_matrix);
642ccefb
MN
841 av_freep(&s->q_intra_matrix16);
842 av_freep(&s->q_inter_matrix16);
9d9e3172
MN
843 av_freep(&s->input_picture);
844 av_freep(&s->reordered_input_picture);
821cb11f 845 av_freep(&s->dct_offset);
1e491e29 846
6a9c8594
AS
847 if(s->picture && !s->avctx->is_copy){
848 for(i=0; i<s->picture_count; i++){
9b4b6e09
MN
849 free_picture(s, &s->picture[i]);
850 }
de6d9b64 851 }
b465449e 852 av_freep(&s->picture);
de6d9b64 853 s->context_initialized = 0;
431f2172
MN
854 s->last_picture_ptr=
855 s->next_picture_ptr=
856 s->current_picture_ptr= NULL;
b100eab8 857 s->linesize= s->uvlinesize= 0;
8100cab9 858
0c9bbaec 859 for(i=0; i<3; i++)
8100cab9 860 av_freep(&s->visualization_buffer[i]);
b100eab8 861
6a9c8594
AS
862 if(!(s->avctx->active_thread_type&FF_THREAD_FRAME))
863 avcodec_default_free_buffers(s->avctx);
de6d9b64
FB
864}
865
3502a54f 866void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
1d0d55da 867{
0c1a9eda
ZK
868 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
869 uint8_t index_run[MAX_RUN+1];
1d0d55da
MN
870 int last, run, level, start, end, i;
871
073c2593 872 /* If table is static, we can quit if rl->max_level[0] is not NULL */
3502a54f 873 if(static_store && rl->max_level[0])
073c2593
BP
874 return;
875
1d0d55da
MN
876 /* compute max_level[], max_run[] and index_run[] */
877 for(last=0;last<2;last++) {
878 if (last == 0) {
879 start = 0;
880 end = rl->last;
881 } else {
882 start = rl->last;
883 end = rl->n;
884 }
885
886 memset(max_level, 0, MAX_RUN + 1);
887 memset(max_run, 0, MAX_LEVEL + 1);
888 memset(index_run, rl->n, MAX_RUN + 1);
889 for(i=start;i<end;i++) {
890 run = rl->table_run[i];
891 level = rl->table_level[i];
892 if (index_run[run] == rl->n)
893 index_run[run] = i;
894 if (level > max_level[run])
895 max_level[run] = level;
896 if (run > max_run[level])
897 max_run[level] = run;
898 }
3502a54f
MN
899 if(static_store)
900 rl->max_level[last] = static_store[last];
073c2593
BP
901 else
902 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1d0d55da 903 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
3502a54f
MN
904 if(static_store)
905 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
073c2593
BP
906 else
907 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1d0d55da 908 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
3502a54f
MN
909 if(static_store)
910 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
073c2593
BP
911 else
912 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1d0d55da
MN
913 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
914 }
915}
916
ceaaf78b 917void init_vlc_rl(RLTable *rl)
898d5d5d
AJ
918{
919 int i, q;
920
898d5d5d
AJ
921 for(q=0; q<32; q++){
922 int qmul= q*2;
923 int qadd= (q-1)|1;
924
925 if(q==0){
926 qmul=1;
927 qadd=0;
928 }
898d5d5d
AJ
929 for(i=0; i<rl->vlc.table_size; i++){
930 int code= rl->vlc.table[i][0];
931 int len = rl->vlc.table[i][1];
932 int level, run;
933
934 if(len==0){ // illegal code
935 run= 66;
936 level= MAX_LEVEL;
937 }else if(len<0){ //more bits needed
938 run= 0;
939 level= code;
940 }else{
941 if(code==rl->n){ //esc
942 run= 66;
943 level= 0;
944 }else{
945 run= rl->table_run [code] + 1;
946 level= rl->table_level[code] * qmul + qadd;
947 if(code >= rl->last) run+=192;
948 }
949 }
950 rl->rl_vlc[q][i].len= len;
951 rl->rl_vlc[q][i].level= level;
952 rl->rl_vlc[q][i].run= run;
953 }
954 }
955}
956
6a9c8594
AS
957void ff_release_unused_pictures(MpegEncContext *s, int remove_current)
958{
959 int i;
960
961 /* release non reference frames */
962 for(i=0; i<s->picture_count; i++){
963 if(s->picture[i].data[0] && !s->picture[i].reference
964 && (!s->picture[i].owner2 || s->picture[i].owner2 == s)
965 && (remove_current || &s->picture[i] != s->current_picture_ptr)
966 /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
967 free_frame_buffer(s, &s->picture[i]);
968 }
969 }
970}
971
5f194811 972int ff_find_unused_picture(MpegEncContext *s, int shared){
4e00e76b 973 int i;
115329f1 974
4e00e76b 975 if(shared){
6a9c8594 976 for(i=s->picture_range_start; i<s->picture_range_end; i++){
5f194811 977 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
4e00e76b
MN
978 }
979 }else{
6a9c8594 980 for(i=s->picture_range_start; i<s->picture_range_end; i++){
5f194811 981 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
4e00e76b 982 }
6a9c8594 983 for(i=s->picture_range_start; i<s->picture_range_end; i++){
5f194811 984 if(s->picture[i].data[0]==NULL) return i;
4e00e76b
MN
985 }
986 }
987
3c11a27b 988 av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
3a994ca4
DB
989 /* We could return -1, but the codec would crash trying to draw into a
990 * non-existing frame anyway. This is safer than waiting for a random crash.
991 * Also the return of this is never useful, an encoder must only allocate
992 * as much as allowed in the specification. This has no relationship to how
993 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
994 * enough for such valid streams).
995 * Plus, a decoder has to check stream validity and remove frames if too
996 * many reference frames are around. Waiting for "OOM" is not correct at
997 * all. Similarly, missing reference frames have to be replaced by
998 * interpolated/MC frames, anything else is a bug in the codec ...
999 */
3c11a27b 1000 abort();
5f194811 1001 return -1;
4e00e76b
MN
1002}
1003
821cb11f
MN
1004static void update_noise_reduction(MpegEncContext *s){
1005 int intra, i;
1006
1007 for(intra=0; intra<2; intra++){
1008 if(s->dct_count[intra] > (1<<16)){
1009 for(i=0; i<64; i++){
1010 s->dct_error_sum[intra][i] >>=1;
1011 }
1012 s->dct_count[intra] >>= 1;
1013 }
115329f1 1014
821cb11f
MN
1015 for(i=0; i<64; i++){
1016 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
1017 }
1018 }
1019}
1020
5f194811
MN
1021/**
1022 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1023 */
d6db1c9c 1024int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
de6d9b64 1025{
4e00e76b 1026 int i;
357ec71f 1027 Picture *pic;
160d679c 1028 s->mb_skipped = 0;
0da71265 1029
8b82a956 1030 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
e20c4069 1031
1e491e29 1032 /* mark&release old frames */
975a1447 1033 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
6ad7cd04 1034 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
34e46c44 1035 free_frame_buffer(s, s->last_picture_ptr);
1e491e29
MN
1036
1037 /* release forgotten pictures */
1038 /* if(mpeg124/h263) */
1039 if(!s->encoding){
6a9c8594 1040 for(i=0; i<s->picture_count; i++){
b536d0aa 1041 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
9b879566 1042 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
34e46c44 1043 free_frame_buffer(s, &s->picture[i]);
1e491e29
MN
1044 }
1045 }
d6db1c9c 1046 }
6ad7cd04 1047 }
93a21abd 1048 }
d52b4abe 1049
aa388dba 1050 if(!s->encoding){
6a9c8594 1051 ff_release_unused_pictures(s, 1);
e20c4069 1052
5f194811 1053 if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
357ec71f 1054 pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
5f194811
MN
1055 else{
1056 i= ff_find_unused_picture(s, 0);
357ec71f 1057 pic= &s->picture[i];
5f194811
MN
1058 }
1059
2ddcf84b
JD
1060 pic->reference= 0;
1061 if (!s->dropable){
1062 if (s->codec_id == CODEC_ID_H264)
1063 pic->reference = s->picture_structure;
975a1447 1064 else if (s->pict_type != AV_PICTURE_TYPE_B)
2ddcf84b
JD
1065 pic->reference = 3;
1066 }
b536d0aa 1067
1031fabd 1068 pic->coded_picture_number= s->coded_picture_number++;
115329f1 1069
a4a750d3 1070 if(ff_alloc_picture(s, pic, 0) < 0)
f23a68df 1071 return -1;
93a21abd 1072
357ec71f 1073 s->current_picture_ptr= pic;
8bdf1181 1074 //FIXME use only the vars from current_pic
52654005 1075 s->current_picture_ptr->top_field_first= s->top_field_first;
8bdf1181 1076 if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
52654005 1077 if(s->picture_structure != PICT_FRAME)
8bdf1181 1078 s->current_picture_ptr->top_field_first= (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
52654005 1079 }
2be9f03a 1080 s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
6a9c8594 1081 s->current_picture_ptr->field_picture= s->picture_structure != PICT_FRAME;
1e491e29 1082 }
b7adc711 1083
9f2e61b6 1084 s->current_picture_ptr->pict_type= s->pict_type;
115329f1 1085// if(s->flags && CODEC_FLAG_QSCALE)
158c7f05 1086 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
975a1447 1087 s->current_picture_ptr->key_frame= s->pict_type == AV_PICTURE_TYPE_I;
9f2e61b6 1088
8d2fc163 1089 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
115329f1 1090
975a1447 1091 if (s->pict_type != AV_PICTURE_TYPE_B) {
b536d0aa 1092 s->last_picture_ptr= s->next_picture_ptr;
14e2a940
MN
1093 if(!s->dropable)
1094 s->next_picture_ptr= s->current_picture_ptr;
de6d9b64 1095 }
14e2a940 1096/* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
115329f1
DB
1097 s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL,
1098 s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL,
14e2a940
MN
1099 s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
1100 s->pict_type, s->dropable);*/
115329f1 1101
d52b4abe 1102 if(s->codec_id != CODEC_ID_H264){
b44c8ad2
AN
1103 if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) &&
1104 (s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
1105 if (s->pict_type != AV_PICTURE_TYPE_I)
1106 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
1107 else if (s->picture_structure != PICT_FRAME)
1108 av_log(avctx, AV_LOG_INFO, "allocate dummy last picture for field based first keyframe\n");
1109
d52b4abe
MN
1110 /* Allocate a dummy frame */
1111 i= ff_find_unused_picture(s, 0);
1112 s->last_picture_ptr= &s->picture[i];
1113 if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1114 return -1;
6a9c8594
AS
1115 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
1116 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
d52b4abe 1117 }
975a1447 1118 if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==AV_PICTURE_TYPE_B){
d52b4abe
MN
1119 /* Allocate a dummy frame */
1120 i= ff_find_unused_picture(s, 0);
1121 s->next_picture_ptr= &s->picture[i];
1122 if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1123 return -1;
6a9c8594
AS
1124 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0);
1125 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 1);
d52b4abe
MN
1126 }
1127 }
1128
8d2fc163
CEH
1129 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1130 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
115329f1 1131
975a1447 1132 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
ffba1dc0 1133
12d96de3 1134 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
b536d0aa
MN
1135 int i;
1136 for(i=0; i<4; i++){
1137 if(s->picture_structure == PICT_BOTTOM_FIELD){
1138 s->current_picture.data[i] += s->current_picture.linesize[i];
115329f1 1139 }
b536d0aa
MN
1140 s->current_picture.linesize[i] *= 2;
1141 s->last_picture.linesize[i] *=2;
1142 s->next_picture.linesize[i] *=2;
1143 }
1144 }
115329f1 1145
047599a4 1146 s->error_recognition= avctx->error_recognition;
aa388dba 1147
bb628dae 1148 /* set dequantizer, we can't do it during init as it might change for mpeg4
755bfeab 1149 and we can't do it in the header decode as init is not called for mpeg4 there yet */
d50635cd
MN
1150 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1151 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1152 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
ccff9da6 1153 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
d50635cd
MN
1154 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1155 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1156 }else{
1157 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1158 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1159 }
d6db1c9c 1160
821cb11f
MN
1161 if(s->dct_error_sum){
1162 assert(s->avctx->noise_reduction && s->encoding);
1163
1164 update_noise_reduction(s);
1165 }
115329f1 1166
83344066 1167 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
a002da79 1168 return ff_xvmc_field_start(s, avctx);
83344066 1169
d6db1c9c 1170 return 0;
de6d9b64 1171}
21af69f7 1172
de6d9b64
FB
1173/* generic function for encode/decode called after a frame has been coded/decoded */
1174void MPV_frame_end(MpegEncContext *s)
1175{
1e491e29 1176 int i;
6a9c8594 1177 /* redraw edges for the frame if decoding didn't complete */
38425603 1178 //just to make sure that all data is rendered.
83344066 1179 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
a002da79 1180 ff_xvmc_field_end(s);
6a9c8594
AS
1181 }else if((s->error_count || s->encoding)
1182 && !s->avctx->hwaccel
c269cf68 1183 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
369122dd
NC
1184 && s->unrestricted_mv
1185 && s->current_picture.reference
1186 && !s->intra_only
1187 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
c9c49387
JGG
1188 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
1189 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
1500be13 1190 s->dsp.draw_edges(s->current_picture.data[0], s->linesize ,
c9c49387
JGG
1191 s->h_edge_pos , s->v_edge_pos,
1192 EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
1500be13 1193 s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize,
c9c49387
JGG
1194 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1195 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1500be13 1196 s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize,
c9c49387
JGG
1197 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1198 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
de6d9b64 1199 }
6a9c8594 1200
5975626d 1201 emms_c();
115329f1 1202
3aa102be 1203 s->last_pict_type = s->pict_type;
88e51e1f 1204 s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
975a1447 1205 if(s->pict_type!=AV_PICTURE_TYPE_B){
8b4c7dbc 1206 s->last_non_b_pict_type= s->pict_type;
8b4c7dbc 1207 }
b536d0aa
MN
1208#if 0
1209 /* copy back current_picture variables */
1e491e29
MN
1210 for(i=0; i<MAX_PICTURE_COUNT; i++){
1211 if(s->picture[i].data[0] == s->current_picture.data[0]){
1212 s->picture[i]= s->current_picture;
1213 break;
115329f1 1214 }
1e491e29
MN
1215 }
1216 assert(i<MAX_PICTURE_COUNT);
115329f1 1217#endif
1e491e29 1218
e20c4069 1219 if(s->encoding){
bb628dae 1220 /* release non-reference frames */
6a9c8594 1221 for(i=0; i<s->picture_count; i++){
e20c4069 1222 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
34e46c44 1223 free_frame_buffer(s, &s->picture[i]);
e20c4069
MN
1224 }
1225 }
1e491e29 1226 }
b536d0aa
MN
1227 // clear copies, to avoid confusion
1228#if 0
1229 memset(&s->last_picture, 0, sizeof(Picture));
1230 memset(&s->next_picture, 0, sizeof(Picture));
1231 memset(&s->current_picture, 0, sizeof(Picture));
1232#endif
7b37a6e9 1233 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
6a9c8594
AS
1234
1235 if (s->codec_id != CODEC_ID_H264 && s->current_picture.reference) {
1236 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
1237 }
de6d9b64
FB
1238}
1239
7bc9090a 1240/**
db6e7795
MN
1241 * draws an line from (ex, ey) -> (sx, sy).
1242 * @param w width of the image
1243 * @param h height of the image
1244 * @param stride stride/linesize of the image
1245 * @param color color of the arrow
1246 */
1247static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
67eca72d 1248 int x, y, fr, f;
115329f1 1249
f66e4f5f
RD
1250 sx= av_clip(sx, 0, w-1);
1251 sy= av_clip(sy, 0, h-1);
1252 ex= av_clip(ex, 0, w-1);
1253 ey= av_clip(ey, 0, h-1);
115329f1 1254
db6e7795 1255 buf[sy*stride + sx]+= color;
115329f1 1256
c26abfa5 1257 if(FFABS(ex - sx) > FFABS(ey - sy)){
db6e7795 1258 if(sx > ex){
1345f4ed
DB
1259 FFSWAP(int, sx, ex);
1260 FFSWAP(int, sy, ey);
db6e7795
MN
1261 }
1262 buf+= sx + sy*stride;
1263 ex-= sx;
1264 f= ((ey-sy)<<16)/ex;
1265 for(x= 0; x <= ex; x++){
8100cab9
MN
1266 y = (x*f)>>16;
1267 fr= (x*f)&0xFFFF;
1268 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1269 buf[(y+1)*stride + x]+= (color* fr )>>16;
db6e7795
MN
1270 }
1271 }else{
1272 if(sy > ey){
1345f4ed
DB
1273 FFSWAP(int, sx, ex);
1274 FFSWAP(int, sy, ey);
db6e7795
MN
1275 }
1276 buf+= sx + sy*stride;
1277 ey-= sy;
1278 if(ey) f= ((ex-sx)<<16)/ey;
1279 else f= 0;
1280 for(y= 0; y <= ey; y++){
8100cab9
MN
1281 x = (y*f)>>16;
1282 fr= (y*f)&0xFFFF;
cea96420
MN
1283 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
1284 buf[y*stride + x+1]+= (color* fr )>>16;
db6e7795
MN
1285 }
1286 }
1287}
1288
1289/**
1290 * draws an arrow from (ex, ey) -> (sx, sy).
1291 * @param w width of the image
1292 * @param h height of the image
1293 * @param stride stride/linesize of the image
1294 * @param color color of the arrow
1295 */
115329f1 1296static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
db6e7795
MN
1297 int dx,dy;
1298
f66e4f5f
RD
1299 sx= av_clip(sx, -100, w+100);
1300 sy= av_clip(sy, -100, h+100);
1301 ex= av_clip(ex, -100, w+100);
1302 ey= av_clip(ey, -100, h+100);
115329f1 1303
db6e7795
MN
1304 dx= ex - sx;
1305 dy= ey - sy;
115329f1 1306
db6e7795
MN
1307 if(dx*dx + dy*dy > 3*3){
1308 int rx= dx + dy;
1309 int ry= -dx + dy;
1310 int length= ff_sqrt((rx*rx + ry*ry)<<8);
115329f1 1311
db6e7795
MN
1312 //FIXME subpixel accuracy
1313 rx= ROUNDED_DIV(rx*3<<4, length);
1314 ry= ROUNDED_DIV(ry*3<<4, length);
115329f1 1315
db6e7795
MN
1316 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1317 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1318 }
1319 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1320}
1321
1322/**
7bc9090a
MN
1323 * prints debuging info for the given picture.
1324 */
0c9bbaec 1325void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
7bc9090a 1326
d69da18c 1327 if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
7bc9090a
MN
1328
1329 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1330 int x,y;
115329f1 1331
0c9bbaec
WH
1332 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1333 switch (pict->pict_type) {
975a1447
SS
1334 case AV_PICTURE_TYPE_I: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
1335 case AV_PICTURE_TYPE_P: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
1336 case AV_PICTURE_TYPE_B: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
1337 case AV_PICTURE_TYPE_S: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
1338 case AV_PICTURE_TYPE_SI: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
1339 case AV_PICTURE_TYPE_SP: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
0c9bbaec 1340 }
7bc9090a
MN
1341 for(y=0; y<s->mb_height; y++){
1342 for(x=0; x<s->mb_width; x++){
1343 if(s->avctx->debug&FF_DEBUG_SKIP){
1344 int count= s->mbskip_table[x + y*s->mb_stride];
1345 if(count>9) count=9;
9b879566 1346 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
7bc9090a
MN
1347 }
1348 if(s->avctx->debug&FF_DEBUG_QP){
9b879566 1349 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
7bc9090a
MN
1350 }
1351 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1352 int mb_type= pict->mb_type[x + y*s->mb_stride];
7bc9090a
MN
1353 //Type & MV direction
1354 if(IS_PCM(mb_type))
9b879566 1355 av_log(s->avctx, AV_LOG_DEBUG, "P");
7bc9090a 1356 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
9b879566 1357 av_log(s->avctx, AV_LOG_DEBUG, "A");
7bc9090a 1358 else if(IS_INTRA4x4(mb_type))
9b879566 1359 av_log(s->avctx, AV_LOG_DEBUG, "i");
7bc9090a 1360 else if(IS_INTRA16x16(mb_type))
9b879566 1361 av_log(s->avctx, AV_LOG_DEBUG, "I");
7bc9090a 1362 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
9b879566 1363 av_log(s->avctx, AV_LOG_DEBUG, "d");
7bc9090a 1364 else if(IS_DIRECT(mb_type))
9b879566 1365 av_log(s->avctx, AV_LOG_DEBUG, "D");
7bc9090a 1366 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
9b879566 1367 av_log(s->avctx, AV_LOG_DEBUG, "g");
7bc9090a 1368 else if(IS_GMC(mb_type))
9b879566 1369 av_log(s->avctx, AV_LOG_DEBUG, "G");
7bc9090a 1370 else if(IS_SKIP(mb_type))
9b879566 1371 av_log(s->avctx, AV_LOG_DEBUG, "S");
7bc9090a 1372 else if(!USES_LIST(mb_type, 1))
9b879566 1373 av_log(s->avctx, AV_LOG_DEBUG, ">");
7bc9090a 1374 else if(!USES_LIST(mb_type, 0))
9b879566 1375 av_log(s->avctx, AV_LOG_DEBUG, "<");
7bc9090a
MN
1376 else{
1377 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
9b879566 1378 av_log(s->avctx, AV_LOG_DEBUG, "X");
7bc9090a 1379 }
115329f1 1380
7bc9090a
MN
1381 //segmentation
1382 if(IS_8X8(mb_type))
9b879566 1383 av_log(s->avctx, AV_LOG_DEBUG, "+");
7bc9090a 1384 else if(IS_16X8(mb_type))
9b879566 1385 av_log(s->avctx, AV_LOG_DEBUG, "-");
7bc9090a 1386 else if(IS_8X16(mb_type))
30344a83 1387 av_log(s->avctx, AV_LOG_DEBUG, "|");
7bc9090a 1388 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
9b879566 1389 av_log(s->avctx, AV_LOG_DEBUG, " ");
7bc9090a 1390 else
9b879566 1391 av_log(s->avctx, AV_LOG_DEBUG, "?");
115329f1
DB
1392
1393
392c9900 1394 if(IS_INTERLACED(mb_type))
9b879566 1395 av_log(s->avctx, AV_LOG_DEBUG, "=");
7bc9090a 1396 else
9b879566 1397 av_log(s->avctx, AV_LOG_DEBUG, " ");
7bc9090a 1398 }
9b879566 1399// av_log(s->avctx, AV_LOG_DEBUG, " ");
7bc9090a 1400 }
9b879566 1401 av_log(s->avctx, AV_LOG_DEBUG, "\n");
7bc9090a
MN
1402 }
1403 }
8d7ec294 1404
0c9bbaec 1405 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
db6e7795
MN
1406 const int shift= 1 + s->quarter_sample;
1407 int mb_y;
0c9bbaec 1408 uint8_t *ptr;
0c9bbaec 1409 int i;
014d2f05 1410 int h_chroma_shift, v_chroma_shift, block_height;
4f8a8319
MN
1411 const int width = s->avctx->width;
1412 const int height= s->avctx->height;
650cec0c 1413 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
c6f9e821 1414 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
b846b231 1415 s->low_delay=0; //needed to see the vectors without trashing the buffers
0c9bbaec 1416
0982834b 1417 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
0c9bbaec 1418 for(i=0; i<3; i++){
4f8a8319 1419 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
0c9bbaec
WH
1420 pict->data[i]= s->visualization_buffer[i];
1421 }
1422 pict->type= FF_BUFFER_TYPE_COPY;
1423 ptr= pict->data[0];
014d2f05 1424 block_height = 16>>v_chroma_shift;
db6e7795
MN
1425
1426 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1427 int mb_x;
1428 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1429 const int mb_index= mb_x + mb_y*s->mb_stride;
0c9bbaec
WH
1430 if((s->avctx->debug_mv) && pict->motion_val){
1431 int type;
1432 for(type=0; type<3; type++){
e96682e6 1433 int direction = 0;
0c9bbaec 1434 switch (type) {
975a1447 1435 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
0c9bbaec
WH
1436 continue;
1437 direction = 0;
1438 break;
975a1447 1439 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
0c9bbaec
WH
1440 continue;
1441 direction = 0;
1442 break;
975a1447 1443 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
0c9bbaec
WH
1444 continue;
1445 direction = 1;
1446 break;
1447 }
ae55b533
MN
1448 if(!USES_LIST(pict->mb_type[mb_index], direction))
1449 continue;
1450
0c9bbaec
WH
1451 if(IS_8X8(pict->mb_type[mb_index])){
1452 int i;
1453 for(i=0; i<4; i++){
db6e7795
MN
1454 int sx= mb_x*16 + 4 + 8*(i&1);
1455 int sy= mb_y*16 + 4 + 8*(i>>1);
88730be6 1456 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
0c9bbaec
WH
1457 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1458 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
4f8a8319 1459 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
0c9bbaec
WH
1460 }
1461 }else if(IS_16X8(pict->mb_type[mb_index])){
1462 int i;
1463 for(i=0; i<2; i++){
9bc8b386
MN
1464 int sx=mb_x*16 + 8;
1465 int sy=mb_y*16 + 4 + 8*i;
88730be6 1466 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
650cec0c
LM
1467 int mx=(pict->motion_val[direction][xy][0]>>shift);
1468 int my=(pict->motion_val[direction][xy][1]>>shift);
115329f1 1469
650cec0c
LM
1470 if(IS_INTERLACED(pict->mb_type[mb_index]))
1471 my*=2;
115329f1 1472
650cec0c
LM
1473 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1474 }
1475 }else if(IS_8X16(pict->mb_type[mb_index])){
1476 int i;
1477 for(i=0; i<2; i++){
1478 int sx=mb_x*16 + 4 + 8*i;
1479 int sy=mb_y*16 + 8;
88730be6 1480 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
38030214
MN
1481 int mx=(pict->motion_val[direction][xy][0]>>shift);
1482 int my=(pict->motion_val[direction][xy][1]>>shift);
115329f1 1483
38030214
MN
1484 if(IS_INTERLACED(pict->mb_type[mb_index]))
1485 my*=2;
115329f1 1486
4f8a8319 1487 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
0c9bbaec
WH
1488 }
1489 }else{
1490 int sx= mb_x*16 + 8;
1491 int sy= mb_y*16 + 8;
650cec0c 1492 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
0c9bbaec
WH
1493 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1494 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
4f8a8319 1495 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
9bc8b386 1496 }
115329f1 1497 }
864119b6
MN
1498 }
1499 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1500 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1501 int y;
014d2f05
BC
1502 for(y=0; y<block_height; y++){
1503 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1504 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
864119b6
MN
1505 }
1506 }
1507 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1508 int mb_type= pict->mb_type[mb_index];
1509 uint64_t u,v;
1510 int y;
1511#define COLOR(theta, r)\
1512u= (int)(128 + r*cos(theta*3.141592/180));\
1513v= (int)(128 + r*sin(theta*3.141592/180));
1514
115329f1 1515
864119b6
MN
1516 u=v=128;
1517 if(IS_PCM(mb_type)){
1518 COLOR(120,48)
1519 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1520 COLOR(30,48)
1521 }else if(IS_INTRA4x4(mb_type)){
1522 COLOR(90,48)
1523 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1524// COLOR(120,48)
1525 }else if(IS_DIRECT(mb_type)){
1526 COLOR(150,48)
1527 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1528 COLOR(170,48)
1529 }else if(IS_GMC(mb_type)){
1530 COLOR(190,48)
1531 }else if(IS_SKIP(mb_type)){
1532// COLOR(180,48)
1533 }else if(!USES_LIST(mb_type, 1)){
1534 COLOR(240,48)
1535 }else if(!USES_LIST(mb_type, 0)){
1536 COLOR(0,48)
1537 }else{
1538 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1539 COLOR(300,48)
1540 }
1541
1542 u*= 0x0101010101010101ULL;
1543 v*= 0x0101010101010101ULL;
014d2f05
BC
1544 for(y=0; y<block_height; y++){
1545 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1546 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
864119b6
MN
1547 }
1548
1549 //segmentation
1550 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1551 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1552 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1553 }
1554 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1555 for(y=0; y<16; y++)
1556 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1557 }
e21f3983
LM
1558 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1559 int dm= 1 << (mv_sample_log2-2);
1560 for(i=0; i<4; i++){
1561 int sx= mb_x*16 + 8*(i&1);
1562 int sy= mb_y*16 + 8*(i>>1);
1563 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1564 //FIXME bidir
1565 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1566 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1567 for(y=0; y<8; y++)
1568 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1569 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1570 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1571 }
1572 }
115329f1 1573
864119b6
MN
1574 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1575 // hmm
1576 }
db6e7795
MN
1577 }
1578 s->mbskip_table[mb_index]=0;
1579 }
1580 }
1581 }
7bc9090a
MN
1582}
1583
115329f1 1584static inline int hpel_motion_lowres(MpegEncContext *s,
ac8b03c0
MN
1585 uint8_t *dest, uint8_t *src,
1586 int field_based, int field_select,
1587 int src_x, int src_y,
1588 int width, int height, int stride,
1589 int h_edge_pos, int v_edge_pos,
1590 int w, int h, h264_chroma_mc_func *pix_op,
1591 int motion_x, int motion_y)
1592{
1593 const int lowres= s->avctx->lowres;
e1bb0364 1594 const int op_index= FFMIN(lowres, 2);
ac8b03c0
MN
1595 const int s_mask= (2<<lowres)-1;
1596 int emu=0;
1597 int sx, sy;
1598
1599 if(s->quarter_sample){
1600 motion_x/=2;
1601 motion_y/=2;
1602 }
1603
1604 sx= motion_x & s_mask;
1605 sy= motion_y & s_mask;
1606 src_x += motion_x >> (lowres+1);
1607 src_y += motion_y >> (lowres+1);
115329f1 1608
ac8b03c0
MN
1609 src += src_y * stride + src_x;
1610
1611 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
1612 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
2e279598 1613 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
ac8b03c0
MN
1614 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1615 src= s->edge_emu_buffer;
1616 emu=1;
1617 }
1618
e1bb0364
AN
1619 sx= (sx << 2) >> lowres;
1620 sy= (sy << 2) >> lowres;
ac8b03c0
MN
1621 if(field_select)
1622 src += s->linesize;
e1bb0364 1623 pix_op[op_index](dest, src, stride, h, sx, sy);
ac8b03c0
MN
1624 return emu;
1625}
1626
de6d9b64 1627/* apply one mpeg motion vector to the three components */
3ada94ba 1628static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
0c1a9eda 1629 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
5ba326b5 1630 int field_based, int bottom_field, int field_select,
3ada94ba 1631 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
078cdecf 1632 int motion_x, int motion_y, int h, int mb_y)
de6d9b64 1633{
95d356c5 1634 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
3ada94ba
BF
1635 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1636 const int lowres= s->avctx->lowres;
e1bb0364 1637 const int op_index= FFMIN(lowres, 2);
3ada94ba
BF
1638 const int block_s= 8>>lowres;
1639 const int s_mask= (2<<lowres)-1;
1640 const int h_edge_pos = s->h_edge_pos >> lowres;
1641 const int v_edge_pos = s->v_edge_pos >> lowres;
95d356c5 1642 linesize = s->current_picture.linesize[0] << field_based;
b536d0aa 1643 uvlinesize = s->current_picture.linesize[1] << field_based;
93a21abd 1644
ca74c0a1 1645 if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
3ada94ba
BF
1646 motion_x/=2;
1647 motion_y/=2;
1648 }
1649
1650 if(field_based){
1651 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1652 }
1653
1654 sx= motion_x & s_mask;
1655 sy= motion_y & s_mask;
1656 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
078cdecf 1657 src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
115329f1 1658
178fcca8 1659 if (s->out_format == FMT_H263) {
71845595
MN
1660 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1661 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
178fcca8
MN
1662 uvsrc_x = src_x>>1;
1663 uvsrc_y = src_y>>1;
1664 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1665 mx = motion_x / 4;
1666 my = motion_y / 4;
1667 uvsx = (2*mx) & s_mask;
1668 uvsy = (2*my) & s_mask;
1669 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
078cdecf 1670 uvsrc_y = mb_y*block_s + (my >> lowres);
178fcca8
MN
1671 } else {
1672 mx = motion_x / 2;
1673 my = motion_y / 2;
1674 uvsx = mx & s_mask;
1675 uvsy = my & s_mask;
1676 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
078cdecf 1677 uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
178fcca8
MN
1678 }
1679
1680 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1681 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1682 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1683
1684 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
1685 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
2e279598 1686 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
178fcca8
MN
1687 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1688 ptr_y = s->edge_emu_buffer;
49fb20cb 1689 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
178fcca8 1690 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
2e279598 1691 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
178fcca8 1692 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
2e279598 1693 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
178fcca8
MN
1694 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1695 ptr_cb= uvbuf;
1696 ptr_cr= uvbuf+16;
1697 }
1698 }
1699
da9c9637
MN
1700 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
1701 dest_y += s->linesize;
1702 dest_cb+= s->uvlinesize;
1703 dest_cr+= s->uvlinesize;
1704 }
1705
1706 if(field_select){
1707 ptr_y += s->linesize;
1708 ptr_cb+= s->uvlinesize;
1709 ptr_cr+= s->uvlinesize;
1710 }
1711
e1bb0364
AN
1712 sx= (sx << 2) >> lowres;
1713 sy= (sy << 2) >> lowres;
178fcca8 1714 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
115329f1 1715
49fb20cb 1716 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
e1bb0364
AN
1717 uvsx= (uvsx << 2) >> lowres;
1718 uvsy= (uvsy << 2) >> lowres;
1719 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1720 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
178fcca8 1721 }
5f6c92d4 1722 //FIXME h261 lowres loop filter
178fcca8
MN
1723}
1724
ac8b03c0
MN
1725static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1726 uint8_t *dest_cb, uint8_t *dest_cr,
1727 uint8_t **ref_picture,
1728 h264_chroma_mc_func *pix_op,
1729 int mx, int my){
1730 const int lowres= s->avctx->lowres;
e1bb0364 1731 const int op_index= FFMIN(lowres, 2);
ac8b03c0
MN
1732 const int block_s= 8>>lowres;
1733 const int s_mask= (2<<lowres)-1;
1734 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1735 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1736 int emu=0, src_x, src_y, offset, sx, sy;
1737 uint8_t *ptr;
115329f1 1738
ac8b03c0
MN
1739 if(s->quarter_sample){
1740 mx/=2;
1741 my/=2;
1742 }
1743
1744 /* In case of 8X8, we construct a single chroma motion vector
1745 with a special rounding */
1746 mx= ff_h263_round_chroma(mx);
1747 my= ff_h263_round_chroma(my);
115329f1 1748
ac8b03c0
MN
1749 sx= mx & s_mask;
1750 sy= my & s_mask;
1751 src_x = s->mb_x*block_s + (mx >> (lowres+1));
1752 src_y = s->mb_y*block_s + (my >> (lowres+1));
115329f1 1753
ac8b03c0
MN
1754 offset = src_y * s->uvlinesize + src_x;
1755 ptr = ref_picture[1] + offset;
1756 if(s->flags&CODEC_FLAG_EMU_EDGE){
1757 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1758 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
2e279598 1759 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
ac8b03c0
MN
1760 ptr= s->edge_emu_buffer;
1761 emu=1;
1762 }
115329f1 1763 }
e1bb0364
AN
1764 sx= (sx << 2) >> lowres;
1765 sy= (sy << 2) >> lowres;
1766 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
115329f1 1767
ac8b03c0
MN
1768 ptr = ref_picture[2] + offset;
1769 if(emu){
2e279598 1770 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
ac8b03c0
MN
1771 ptr= s->edge_emu_buffer;
1772 }
e1bb0364 1773 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
ac8b03c0
MN
1774}
1775
f7190f73 1776/**
bb628dae 1777 * motion compensation of a single macroblock
eb14c713
MN
1778 * @param s context
1779 * @param dest_y luma destination pointer
1780 * @param dest_cb chroma cb/u destination pointer
1781 * @param dest_cr chroma cr/v destination pointer
1782 * @param dir direction (0->forward, 1->backward)
1783 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
9a58234f 1784 * @param pix_op halfpel motion compensation function (average or put normally)
eb14c713
MN
1785 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1786 */
3ada94ba 1787static inline void MPV_motion_lowres(MpegEncContext *s,
0c1a9eda 1788 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
115329f1 1789 int dir, uint8_t **ref_picture,
3ada94ba 1790 h264_chroma_mc_func *pix_op)
de6d9b64 1791{
3ada94ba 1792 int mx, my;
de6d9b64 1793 int mb_x, mb_y, i;
3ada94ba
BF
1794 const int lowres= s->avctx->lowres;
1795 const int block_s= 8>>lowres;
de6d9b64
FB
1796
1797 mb_x = s->mb_x;
1798 mb_y = s->mb_y;
1799
1800 switch(s->mv_type) {
1801 case MV_TYPE_16X16:
3ada94ba
BF
1802 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1803 0, 0, 0,
1804 ref_picture, pix_op,
078cdecf 1805 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
de6d9b64
FB
1806 break;
1807 case MV_TYPE_8X8:
1e7bfebe
MN
1808 mx = 0;
1809 my = 0;
1e7bfebe 1810 for(i=0;i<4;i++) {
3ada94ba 1811 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
5ba326b5 1812 ref_picture[0], 0, 0,
3ada94ba 1813 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
f7190f73 1814 s->width, s->height, s->linesize,
3ada94ba
BF
1815 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1816 block_s, block_s, pix_op,
f7190f73 1817 s->mv[dir][i][0], s->mv[dir][i][1]);
1e7bfebe
MN
1818
1819 mx += s->mv[dir][i][0];
1820 my += s->mv[dir][i][1];
225f9c44 1821 }
1e7bfebe 1822
49fb20cb 1823 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
3ada94ba 1824 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
de6d9b64
FB
1825 break;
1826 case MV_TYPE_FIELD:
1827 if (s->picture_structure == PICT_FRAME) {
3ada94ba
BF
1828 /* top field */
1829 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1830 1, 0, s->field_select[dir][0],
1831 ref_picture, pix_op,
078cdecf 1832 s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
3ada94ba
BF
1833 /* bottom field */
1834 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1835 1, 1, s->field_select[dir][1],
1836 ref_picture, pix_op,
078cdecf 1837 s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
de6d9b64 1838 } else {
975a1447 1839 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
5ba326b5 1840 ref_picture= s->current_picture_ptr->data;
115329f1 1841 }
de6d9b64 1842
3ada94ba 1843 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
5ba326b5
MN
1844 0, 0, s->field_select[dir][0],
1845 ref_picture, pix_op,
078cdecf 1846 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
de6d9b64
FB
1847 }
1848 break;
c8a4ebbf
MN
1849 case MV_TYPE_16X8:
1850 for(i=0; i<2; i++){
1851 uint8_t ** ref2picture;
d55e93e4 1852
975a1447 1853 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
d55e93e4 1854 ref2picture= ref_picture;
d55e93e4 1855 }else{
5ba326b5 1856 ref2picture= s->current_picture_ptr->data;
115329f1 1857 }
d55e93e4 1858
3ada94ba 1859 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
c8a4ebbf 1860 0, 0, s->field_select[dir][i],
5ba326b5 1861 ref2picture, pix_op,
078cdecf 1862 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
115329f1 1863
3ada94ba
BF
1864 dest_y += 2*block_s*s->linesize;
1865 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1866 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
115329f1 1867 }
d55e93e4 1868 break;
1dff7d56 1869 case MV_TYPE_DMV:
1dff7d56 1870 if(s->picture_structure == PICT_FRAME){
c8a4ebbf
MN
1871 for(i=0; i<2; i++){
1872 int j;
1873 for(j=0; j<2; j++){
3ada94ba 1874 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
c8a4ebbf
MN
1875 1, j, j^i,
1876 ref_picture, pix_op,
078cdecf 1877 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
c8a4ebbf 1878 }
3ada94ba 1879 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
c8a4ebbf 1880 }
1dff7d56 1881 }else{
c8a4ebbf 1882 for(i=0; i<2; i++){
3ada94ba 1883 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
c8a4ebbf
MN
1884 0, 0, s->picture_structure != i+1,
1885 ref_picture, pix_op,
078cdecf 1886 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
1dff7d56 1887
c8a4ebbf 1888 // after put we make avg of the same block
3ada94ba 1889 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1dff7d56 1890
c8a4ebbf
MN
1891 //opposite parity is always in the same frame if this is second field
1892 if(!s->first_field){
115329f1 1893 ref_picture = s->current_picture_ptr->data;
c8a4ebbf 1894 }
5ba326b5 1895 }
1dff7d56 1896 }
1dff7d56 1897 break;
f7190f73 1898 default: assert(0);
de6d9b64
FB
1899 }
1900}
1901
6a9c8594
AS
1902/**
1903 * find the lowest MB row referenced in the MVs
1904 */
1905int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1906{
1907 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1908 int my, off, i, mvs;
1909
1910 if (s->picture_structure != PICT_FRAME) goto unhandled;
1911
1912 switch (s->mv_type) {
1913 case MV_TYPE_16X16:
1914 mvs = 1;
1915 break;
1916 case MV_TYPE_16X8:
1917 mvs = 2;
1918 break;
1919 case MV_TYPE_8X8:
1920 mvs = 4;
1921 break;
1922 default:
1923 goto unhandled;
1924 }
1925
1926 for (i = 0; i < mvs; i++) {
1927 my = s->mv[dir][i][1]<<qpel_shift;
1928 my_max = FFMAX(my_max, my);
1929 my_min = FFMIN(my_min, my);
1930 }
1931
1932 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1933
1934 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1935unhandled:
1936 return s->mb_height-1;
1937}
1938
3ada94ba
BF
1939/* put block[] to dest[] */
1940static inline void put_dct(MpegEncContext *s,
1941 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
178fcca8 1942{
3ada94ba
BF
1943 s->dct_unquantize_intra(s, block, i, qscale);
1944 s->dsp.idct_put (dest, line_size, block);
1945}
da9c9637 1946
3ada94ba
BF
1947/* add block[] to dest[] */
1948static inline void add_dct(MpegEncContext *s,
1949 DCTELEM *block, int i, uint8_t *dest, int line_size)
1950{
1951 if (s->block_last_index[i] >= 0) {
1952 s->dsp.idct_add (dest, line_size, block);
1953 }
1954}
2417652e 1955
115329f1 1956static inline void add_dequant_dct(MpegEncContext *s,
332f9ac4 1957 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
0f440e02 1958{
de6d9b64 1959 if (s->block_last_index[i] >= 0) {
d50635cd 1960 s->dct_unquantize_inter(s, block, i, qscale);
9dbcbd92 1961
b0368839 1962 s->dsp.idct_add (dest, line_size, block);
de6d9b64
FB
1963 }
1964}
1965
7f2fe444
MN
1966/**
1967 * cleans dc, ac, coded_block for the current non intra MB
1968 */
1969void ff_clean_intra_table_entries(MpegEncContext *s)
1970{
137c8468 1971 int wrap = s->b8_stride;
7f2fe444 1972 int xy = s->block_index[0];
115329f1
DB
1973
1974 s->dc_val[0][xy ] =
1975 s->dc_val[0][xy + 1 ] =
7f2fe444
MN
1976 s->dc_val[0][xy + wrap] =
1977 s->dc_val[0][xy + 1 + wrap] = 1024;
1978 /* ac pred */
0c1a9eda
ZK
1979 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1980 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
7f2fe444
MN
1981 if (s->msmpeg4_version>=3) {
1982 s->coded_block[xy ] =
1983 s->coded_block[xy + 1 ] =
1984 s->coded_block[xy + wrap] =
1985 s->coded_block[xy + 1 + wrap] = 0;
1986 }
1987 /* chroma */
137c8468
MN
1988 wrap = s->mb_stride;
1989 xy = s->mb_x + s->mb_y * wrap;
7f2fe444
MN
1990 s->dc_val[1][xy] =
1991 s->dc_val[2][xy] = 1024;
1992 /* ac pred */
0c1a9eda
ZK
1993 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1994 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
115329f1 1995
137c8468 1996 s->mbintra_table[xy]= 0;
7f2fe444
MN
1997}
1998
de6d9b64
FB
1999/* generic function called after a macroblock has been parsed by the
2000 decoder or after it has been encoded by the encoder.
2001
2002 Important variables used:
2003 s->mb_intra : true if intra macroblock
2004 s->mv_dir : motion vector direction
2005 s->mv_type : motion vector type
2006 s->mv : motion vector
2007 s->interlaced_dct : true if interlaced dct used (mpeg2)
2008 */
54816a3e
KC
2009static av_always_inline
2010void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
bd7c626a 2011 int lowres_flag, int is_mpeg12)
de6d9b64 2012{
7bc9090a 2013 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
83344066 2014 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
78f9a878 2015 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2e7b4c84
IK
2016 return;
2017 }
de6d9b64 2018
8289c6fa
WH
2019 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2020 /* save DCT coefficients */
2021 int i,j;
2022 DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
c4fb3b03
MN
2023 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2024 for(i=0; i<6; i++){
2025 for(j=0; j<64; j++){
8289c6fa 2026 *dct++ = block[i][s->dsp.idct_permutation[j]];
c4fb3b03
MN
2027 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2028 }
2029 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2030 }
8289c6fa
WH
2031 }
2032
1e491e29 2033 s->current_picture.qscale_table[mb_xy]= s->qscale;
79e7b305 2034
de6d9b64
FB
2035 /* update DC predictors for P macroblocks */
2036 if (!s->mb_intra) {
bd7c626a 2037 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
0f440e02 2038 if(s->mbintra_table[mb_xy])
7f2fe444 2039 ff_clean_intra_table_entries(s);
de6d9b64 2040 } else {
7f2fe444
MN
2041 s->last_dc[0] =
2042 s->last_dc[1] =
de6d9b64
FB
2043 s->last_dc[2] = 128 << s->intra_dc_precision;
2044 }
2045 }
bd7c626a 2046 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
0f440e02 2047 s->mbintra_table[mb_xy]=1;
bff6ecaa 2048
975a1447 2049 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
0c1a9eda 2050 uint8_t *dest_y, *dest_cb, *dest_cr;
0f440e02 2051 int dct_linesize, dct_offset;
b3184779
MN
2052 op_pixels_func (*op_pix)[4];
2053 qpel_mc_func (*op_qpix)[16];
dfb706da 2054 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
b536d0aa 2055 const int uvlinesize= s->current_picture.linesize[1];
975a1447 2056 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
178fcca8 2057 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
3bb4e23a 2058
1e491e29 2059 /* avoid copy if macroblock skipped in last frame too */
1e491e29
MN
2060 /* skip only during decoding as we might trash the buffers during encoding a bit */
2061 if(!s->encoding){
0c1a9eda 2062 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1e491e29 2063 const int age= s->current_picture.age;
0fd90455 2064
1e491e29
MN
2065 assert(age);
2066
160d679c
MM
2067 if (s->mb_skipped) {
2068 s->mb_skipped= 0;
975a1447 2069 assert(s->pict_type!=AV_PICTURE_TYPE_I);
115329f1 2070
160d679c 2071 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
0fd90455
MN
2072 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2073
1e491e29 2074 /* if previous was skipped too, then nothing to do ! */
f943e138
MN
2075 if (*mbskip_ptr >= age && s->current_picture.reference){
2076 return;
1e491e29 2077 }
f943e138
MN
2078 } else if(!s->current_picture.reference){
2079 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2080 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2081 } else{
3bb4e23a
FB
2082 *mbskip_ptr = 0; /* not skipped */
2083 }
3994623d 2084 }
115329f1 2085
ffdff4d7 2086 dct_linesize = linesize << s->interlaced_dct;
178fcca8 2087 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
115329f1 2088
b68ab260
MN
2089 if(readable){
2090 dest_y= s->dest[0];
2091 dest_cb= s->dest[1];
2092 dest_cr= s->dest[2];
2093 }else{
9c3d33d6 2094 dest_y = s->b_scratchpad;
ae35f5e1 2095 dest_cb= s->b_scratchpad+16*linesize;
ffdff4d7 2096 dest_cr= s->b_scratchpad+32*linesize;
b68ab260 2097 }
178fcca8 2098
de6d9b64
FB
2099 if (!s->mb_intra) {
2100 /* motion handling */
dfb706da 2101 /* decoding or more than one mb_type (MC was already done otherwise) */
7d1c3fc1 2102 if(!s->encoding){
6a9c8594
AS
2103
2104 if(HAVE_PTHREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2105 if (s->mv_dir & MV_DIR_FORWARD) {
2106 ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
2107 }
2108 if (s->mv_dir & MV_DIR_BACKWARD) {
2109 ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
2110 }
2111 }
2112
178fcca8
MN
2113 if(lowres_flag){
2114 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
de6d9b64 2115
178fcca8
MN
2116 if (s->mv_dir & MV_DIR_FORWARD) {
2117 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
2118 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
2119 }
2120 if (s->mv_dir & MV_DIR_BACKWARD) {
2121 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
2122 }
2123 }else{
2833fc46 2124 op_qpix= s->me.qpel_put;
975a1447 2125 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
178fcca8 2126 op_pix = s->dsp.put_pixels_tab;
178fcca8
MN
2127 }else{
2128 op_pix = s->dsp.put_no_rnd_pixels_tab;
178fcca8
MN
2129 }
2130 if (s->mv_dir & MV_DIR_FORWARD) {
2131 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
2132 op_pix = s->dsp.avg_pixels_tab;
2833fc46 2133 op_qpix= s->me.qpel_avg;
178fcca8
MN
2134 }
2135 if (s->mv_dir & MV_DIR_BACKWARD) {
2136 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
2137 }
9dbcbd92 2138 }
de6d9b64
FB
2139 }
2140
0f440e02 2141 /* skip dequant / idct if we are really late ;) */
8c3eba7c 2142 if(s->avctx->skip_idct){
975a1447
SS
2143 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2144 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
8c3eba7c
MN
2145 || s->avctx->skip_idct >= AVDISCARD_ALL)
2146 goto skip_idct;
2147 }
0f440e02 2148
de6d9b64 2149 /* add dct residue */
8c51620f 2150 if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
a0201736 2151 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
178fcca8
MN
2152 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2153 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2154 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2155 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
0f440e02 2156
49fb20cb 2157 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2d974017
BC
2158 if (s->chroma_y_shift){
2159 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2160 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2161 }else{
2162 dct_linesize >>= 1;
2163 dct_offset >>=1;
2164 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2165 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2166 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2167 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2168 }
b50eef3a 2169 }
bd7c626a 2170 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
178fcca8
MN
2171 add_dct(s, block[0], 0, dest_y , dct_linesize);
2172 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2173 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2174 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
de6d9b64 2175
49fb20cb 2176 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
ffdff4d7
IK
2177 if(s->chroma_y_shift){//Chroma420
2178 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2179 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2180 }else{
2181 //chroma422
2182 dct_linesize = uvlinesize << s->interlaced_dct;
2183 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2184
2185 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2186 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2187 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2188 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2189 if(!s->chroma_x_shift){//Chroma444
2190 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2191 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2192 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2193 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2194 }
2195 }
2196 }//fi gray
2197 }
d702a2e6 2198 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
1457ab52 2199 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
0f440e02 2200 }
de6d9b64
FB
2201 } else {
2202 /* dct only in intra block */
029911d1 2203 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
178fcca8
MN
2204 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2205 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2206 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2207 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
a0201736 2208
49fb20cb 2209 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2d974017
BC
2210 if(s->chroma_y_shift){
2211 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2212 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2213 }else{
2214 dct_offset >>=1;
2215 dct_linesize >>=1;
2216 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2217 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2218 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3ada94ba 2219 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
477ab036
MN
2220 }
2221 }
2222 }else{
3ada94ba
BF
2223 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2224 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2225 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2226 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
fbb89806 2227
49fb20cb 2228 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3ada94ba
BF
2229 if(s->chroma_y_shift){
2230 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2231 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2232 }else{
fbb89806 2233
3ada94ba
BF
2234 dct_linesize = uvlinesize << s->interlaced_dct;
2235 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
9981dfc6 2236
3ada94ba
BF
2237 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2238 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2239 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2240 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2241 if(!s->chroma_x_shift){//Chroma444
2242 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2243 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2244 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2245 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2246 }
2247 }
2248 }//gray
9981dfc6 2249 }
477ab036 2250 }
3ada94ba
BF
2251skip_idct:
2252 if(!readable){
2253 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2254 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2255 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
34f60ee6 2256 }
477ab036 2257 }
477ab036
MN
2258}
2259
3ada94ba 2260void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
b250f9c6 2261#if !CONFIG_SMALL
bd7c626a
KC
2262 if(s->out_format == FMT_MPEG1) {
2263 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2264 else MPV_decode_mb_internal(s, block, 0, 1);
2265 } else
2266#endif
2267 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2268 else MPV_decode_mb_internal(s, block, 0, 0);
77ea0d4b
MN
2269}
2270
3ada94ba
BF
2271/**
2272 *
2273 * @param h is the normal height, this will be reduced automatically if needed for the last row
2274 */
2275void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
6a9c8594
AS
2276 const int field_pic= s->picture_structure != PICT_FRAME;
2277 if(field_pic){
2278 h <<= 1;
2279 y <<= 1;
2280 }
2281
2282 if (!s->avctx->hwaccel
2283 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2284 && s->unrestricted_mv
2285 && s->current_picture.reference
2286 && !s->intra_only
2287 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2288 int sides = 0, edge_h;
c9c49387
JGG
2289 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
2290 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
6a9c8594
AS
2291 if (y==0) sides |= EDGE_TOP;
2292 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2293
2294 edge_h= FFMIN(h, s->v_edge_pos - y);
2295
c9c49387
JGG
2296 s->dsp.draw_edges(s->current_picture_ptr->data[0] + y *s->linesize , s->linesize,
2297 s->h_edge_pos , edge_h , EDGE_WIDTH , EDGE_WIDTH , sides);
2298 s->dsp.draw_edges(s->current_picture_ptr->data[1] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
2299 s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2300 s->dsp.draw_edges(s->current_picture_ptr->data[2] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
2301 s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
6a9c8594
AS
2302 }
2303
2304 h= FFMIN(h, s->avctx->height - y);
2305
2306 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2307
3ada94ba
BF
2308 if (s->avctx->draw_horiz_band) {
2309 AVFrame *src;
2310 int offset[4];
77ea0d4b 2311
975a1447 2312 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
3ada94ba
BF
2313 src= (AVFrame*)s->current_picture_ptr;
2314 else if(s->last_picture_ptr)
2315 src= (AVFrame*)s->last_picture_ptr;
2316 else
2317 return;
115329f1 2318
975a1447 2319 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
3ada94ba
BF
2320 offset[0]=
2321 offset[1]=
2322 offset[2]=
2323 offset[3]= 0;
77ea0d4b 2324 }else{
cea96420 2325 offset[0]= y * s->linesize;
3ada94ba
BF
2326 offset[1]=
2327 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2328 offset[3]= 0;
77ea0d4b 2329 }
115329f1 2330
3ada94ba 2331 emms_c();
77ea0d4b 2332
3ada94ba
BF
2333 s->avctx->draw_horiz_band(s->avctx, src, offset,
2334 y, s->picture_structure, h);
2335 }
2336}
115329f1 2337
3ada94ba
BF
2338void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2339 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
2340 const int uvlinesize= s->current_picture.linesize[1];
2341 const int mb_size= 4 - s->avctx->lowres;
77ea0d4b 2342
3ada94ba
BF
2343 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2344 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2345 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2346 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2347 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2348 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2349 //block_index is not used by mpeg2, so it is not affected by chroma_format
115329f1 2350
3ada94ba
BF
2351 s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
2352 s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2353 s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
115329f1 2354
975a1447 2355 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3ada94ba 2356 {
078cdecf 2357 if(s->picture_structure==PICT_FRAME){
3ada94ba
BF
2358 s->dest[0] += s->mb_y * linesize << mb_size;
2359 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2360 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
078cdecf
MN
2361 }else{
2362 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2363 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2364 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2365 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2366 }
77ea0d4b 2367 }
77ea0d4b 2368}
115329f1 2369
3ada94ba
BF
2370void ff_mpeg_flush(AVCodecContext *avctx){
2371 int i;
2372 MpegEncContext *s = avctx->priv_data;
77ea0d4b 2373
3ada94ba
BF
2374 if(s==NULL || s->picture==NULL)
2375 return;
77ea0d4b 2376
6a9c8594 2377 for(i=0; i<s->picture_count; i++){
3ada94ba
BF
2378 if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
2379 || s->picture[i].type == FF_BUFFER_TYPE_USER))
34e46c44 2380 free_frame_buffer(s, &s->picture[i]);
de6d9b64 2381 }
3ada94ba 2382 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
115329f1 2383
3ada94ba 2384 s->mb_x= s->mb_y= 0;
9d9a6239 2385 s->closed_gop= 0;
7801d21d 2386
3ada94ba
BF
2387 s->parse_context.state= -1;
2388 s->parse_context.frame_start_found= 0;
2389 s->parse_context.overread= 0;
2390 s->parse_context.overread_index= 0;
2391 s->parse_context.index= 0;
2392 s->parse_context.last_index= 0;
2393 s->bitstream_buffer_size=0;
2394 s->pp_time=0;
de6d9b64
FB
2395}
2396
115329f1 2397static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
21af69f7 2398 DCTELEM *block, int n, int qscale)
de6d9b64 2399{
badaf88e 2400 int i, level, nCoeffs;
0c1a9eda 2401 const uint16_t *quant_matrix;
de6d9b64 2402
2ad1516a 2403 nCoeffs= s->block_last_index[n];
115329f1
DB
2404
2405 if (n < 4)
d50635cd
MN
2406 block[0] = block[0] * s->y_dc_scale;
2407 else
2408 block[0] = block[0] * s->c_dc_scale;
2409 /* XXX: only mpeg1 */
2410 quant_matrix = s->intra_matrix;
2411 for(i=1;i<=nCoeffs;i++) {
2412 int j= s->intra_scantable.permutated[i];
2413 level = block[j];
2414 if (level) {
2415 if (level < 0) {
2416 level = -level;
2417 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2418 level = (level - 1) | 1;
2419 level = -level;
2420 } else {
2421 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2422 level = (level - 1) | 1;
de6d9b64 2423 }
d50635cd 2424 block[j] = level;
de6d9b64 2425 }
d50635cd
MN
2426 }
2427}
2428
115329f1 2429static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
d50635cd
MN
2430 DCTELEM *block, int n, int qscale)
2431{
2432 int i, level, nCoeffs;
2433 const uint16_t *quant_matrix;
2434
2435 nCoeffs= s->block_last_index[n];
115329f1 2436
d50635cd
MN
2437 quant_matrix = s->inter_matrix;
2438 for(i=0; i<=nCoeffs; i++) {
2439 int j= s->intra_scantable.permutated[i];
2440 level = block[j];
2441 if (level) {
2442 if (level < 0) {
2443 level = -level;
2444 level = (((level << 1) + 1) * qscale *
2445 ((int) (quant_matrix[j]))) >> 4;
2446 level = (level - 1) | 1;
2447 level = -level;
2448 } else {
2449 level = (((level << 1) + 1) * qscale *
2450 ((int) (quant_matrix[j]))) >> 4;
2451 level = (level - 1) | 1;
de6d9b64 2452 }
d50635cd 2453 block[j] = level;
de6d9b64
FB
2454 }
2455 }
2456}
21af69f7 2457
115329f1 2458static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
9dbf1ddd
MN
2459 DCTELEM *block, int n, int qscale)
2460{
2461 int i, level, nCoeffs;
0c1a9eda 2462 const uint16_t *quant_matrix;
9dbf1ddd 2463
2ad1516a
MN
2464 if(s->alternate_scan) nCoeffs= 63;
2465 else nCoeffs= s->block_last_index[n];
115329f1
DB
2466
2467 if (n < 4)
d50635cd
MN
2468 block[0] = block[0] * s->y_dc_scale;
2469 else
2470 block[0] = block[0] * s->c_dc_scale;
2471 quant_matrix = s->intra_matrix;
2472 for(i=1;i<=nCoeffs;i++) {
2473 int j= s->intra_scantable.permutated[i];
2474 level = block[j];
2475 if (level) {
2476 if (level < 0) {
2477 level = -level;
2478 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2479 level = -level;
2480 } else {
2481 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2482 }
2483 block[j] = level;
2484 }
2485 }
2486}
2487
e27b6e62
MN
2488static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2489 DCTELEM *block, int n, int qscale)
2490{
2491 int i, level, nCoeffs;
2492 const uint16_t *quant_matrix;
2493 int sum=-1;
2494
2495 if(s->alternate_scan) nCoeffs= 63;
2496 else nCoeffs= s->block_last_index[n];
2497
2498 if (n < 4)
2499 block[0] = block[0] * s->y_dc_scale;
2500 else
2501 block[0] = block[0] * s->c_dc_scale;
2502 quant_matrix = s->intra_matrix;
2503 for(i=1;i<=nCoeffs;i++) {
2504 int j= s->intra_scantable.permutated[i];
2505 level = block[j];
2506 if (level) {
2507 if (level < 0) {
2508 level = -level;
2509 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2510 level = -level;
2511 } else {
2512 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2513 }
2514 block[j] = level;
2515 sum+=level;
2516 }
2517 }
2518 block[63]^=sum&1;
2519}
2520
115329f1 2521static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
d50635cd
MN
2522 DCTELEM *block, int n, int qscale)
2523{
2524 int i, level, nCoeffs;
2525 const uint16_t *quant_matrix;
2526 int sum=-1;
2527
2528 if(s->alternate_scan) nCoeffs= 63;
2529 else nCoeffs= s->block_last_index[n];
115329f1 2530
d50635cd
MN
2531 quant_matrix = s->inter_matrix;
2532 for(i=0; i<=nCoeffs; i++) {
2533 int j= s->intra_scantable.permutated[i];
2534 level = block[j];
2535 if (level) {
2536 if (level < 0) {
2537 level = -level;
2538 level = (((level << 1) + 1) * qscale *
2539 ((int) (quant_matrix[j]))) >> 4;
2540 level = -level;
2541 } else {
2542 level = (((level << 1) + 1) * qscale *
2543 ((int) (quant_matrix[j]))) >> 4;
2544 }
2545 block[j] = level;
2546 sum+=level;
2547 }
2548 }
2549 block[63]^=sum&1;
2550}
2551
115329f1 2552static void dct_unquantize_h263_intra_c(MpegEncContext *s,
d50635cd
MN
2553 DCTELEM *block, int n, int qscale)
2554{
2555 int i, level, qmul, qadd;
2556 int nCoeffs;
115329f1 2557
d50635cd 2558 assert(s->block_last_index[n]>=0);
115329f1 2559
d50635cd 2560 qmul = qscale << 1;
115329f1 2561
d50635cd 2562 if (!s->h263_aic) {
115329f1 2563 if (n < 4)
9dbf1ddd
MN
2564 block[0] = block[0] * s->y_dc_scale;
2565 else
2566 block[0] = block[0] * s->c_dc_scale;
d50635cd
MN
2567 qadd = (qscale - 1) | 1;
2568 }else{
2569 qadd = 0;
2570 }
2571 if(s->ac_pred)
2572 nCoeffs=63;
2573 else
2574 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2575
2576 for(i=1; i<=nCoeffs; i++) {
2577 level = block[i];
2578 if (level) {
2579 if (level < 0) {
2580 level = level * qmul - qadd;
2581 } else {
2582 level = level * qmul + qadd;
9dbf1ddd 2583 }
d50635cd 2584 block[i] = level;
9dbf1ddd 2585 }
9dbf1ddd
MN
2586 }
2587}
2588
115329f1 2589static void dct_unquantize_h263_inter_c(MpegEncContext *s,
21af69f7
FB
2590 DCTELEM *block, int n, int qscale)
2591{
2592 int i, level, qmul, qadd;
badaf88e 2593 int nCoeffs;
115329f1 2594
2ad1516a 2595 assert(s->block_last_index[n]>=0);
115329f1 2596
2ad1516a
MN
2597 qadd = (qscale - 1) | 1;
2598 qmul = qscale << 1;
115329f1 2599
d50635cd 2600 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
21af69f7 2601
d50635cd 2602 for(i=0; i<=nCoeffs; i++) {
21af69f7
FB
2603 level = block[i];
2604 if (level) {
2605 if (level < 0) {
2606 level = level * qmul - qadd;
2607 } else {
2608 level = level * qmul + qadd;
2609 }
21af69f7
FB
2610 block[i] = level;
2611 }
2612 }
2613}
de6d9b64 2614
b776e3d1
AJ
2615/**
2616 * set qscale and update qscale dependent variables.
2617 */
2618void ff_set_qscale(MpegEncContext * s, int qscale)
2619{
2620 if (qscale < 1)
2621 qscale = 1;
2622 else if (qscale > 31)
2623 qscale = 31;
2624
2625 s->qscale = qscale;
2626 s->chroma_qscale= s->chroma_qscale_table[qscale];
2627
2628 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2629 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2630}
6a9c8594
AS
2631
2632void MPV_report_decode_progress(MpegEncContext *s)
2633{
2634 if (s->pict_type != FF_B_TYPE && !s->partitioned_frame && !s->error_occurred)
2635 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);
2636}