Parse frame size code, see "svq3.c: parse frame size" thread on ML.
[libav.git] / libavcodec / svq3.c
1 /*
2 * Copyright (c) 2003 The FFmpeg Project
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 /*
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
31 *
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
37 *
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41 */
42
43 #if CONFIG_ZLIB
44 #include <zlib.h>
45 #endif
46
47 #include "svq1.h"
48
49 /**
50 * @file libavcodec/svq3.c
51 * svq3 decoder.
52 */
53
54 #define FULLPEL_MODE 1
55 #define HALFPEL_MODE 2
56 #define THIRDPEL_MODE 3
57 #define PREDICT_MODE 4
58
59 /* dual scan (from some older h264 draft)
60 o-->o-->o o
61 | /|
62 o o o / o
63 | / | |/ |
64 o o o o
65 /
66 o-->o-->o-->o
67 */
68 static const uint8_t svq3_scan[16] = {
69 0+0*4, 1+0*4, 2+0*4, 2+1*4,
70 2+2*4, 3+0*4, 3+1*4, 3+2*4,
71 0+1*4, 0+2*4, 1+1*4, 1+2*4,
72 0+3*4, 1+3*4, 2+3*4, 3+3*4,
73 };
74
75 static const uint8_t svq3_pred_0[25][2] = {
76 { 0, 0 },
77 { 1, 0 }, { 0, 1 },
78 { 0, 2 }, { 1, 1 }, { 2, 0 },
79 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
80 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
81 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
82 { 2, 4 }, { 3, 3 }, { 4, 2 },
83 { 4, 3 }, { 3, 4 },
84 { 4, 4 }
85 };
86
87 static const int8_t svq3_pred_1[6][6][5] = {
88 { { 2,-1,-1,-1,-1 }, { 2, 1,-1,-1,-1 }, { 1, 2,-1,-1,-1 },
89 { 2, 1,-1,-1,-1 }, { 1, 2,-1,-1,-1 }, { 1, 2,-1,-1,-1 } },
90 { { 0, 2,-1,-1,-1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
91 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
92 { { 2, 0,-1,-1,-1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
93 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
94 { { 2, 0,-1,-1,-1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
95 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
96 { { 0, 2,-1,-1,-1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
97 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
98 { { 0, 2,-1,-1,-1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
99 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
100 };
101
102 static const struct { uint8_t run; uint8_t level; } svq3_dct_tables[2][16] = {
103 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
104 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
105 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
106 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
107 };
108
109 static const uint32_t svq3_dequant_coeff[32] = {
110 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
111 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
112 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
113 61694, 68745, 77615, 89113,100253,109366,126635,141533
114 };
115
116
117 static void svq3_luma_dc_dequant_idct_c(DCTELEM *block, int qp)
118 {
119 const int qmul = svq3_dequant_coeff[qp];
120 #define stride 16
121 int i;
122 int temp[16];
123 static const int x_offset[4] = {0, 1*stride, 4* stride, 5*stride};
124 static const int y_offset[4] = {0, 2*stride, 8* stride, 10*stride};
125
126 for (i = 0; i < 4; i++){
127 const int offset = y_offset[i];
128 const int z0 = 13*(block[offset+stride*0] + block[offset+stride*4]);
129 const int z1 = 13*(block[offset+stride*0] - block[offset+stride*4]);
130 const int z2 = 7* block[offset+stride*1] - 17*block[offset+stride*5];
131 const int z3 = 17* block[offset+stride*1] + 7*block[offset+stride*5];
132
133 temp[4*i+0] = z0+z3;
134 temp[4*i+1] = z1+z2;
135 temp[4*i+2] = z1-z2;
136 temp[4*i+3] = z0-z3;
137 }
138
139 for (i = 0; i < 4; i++){
140 const int offset = x_offset[i];
141 const int z0 = 13*(temp[4*0+i] + temp[4*2+i]);
142 const int z1 = 13*(temp[4*0+i] - temp[4*2+i]);
143 const int z2 = 7* temp[4*1+i] - 17*temp[4*3+i];
144 const int z3 = 17* temp[4*1+i] + 7*temp[4*3+i];
145
146 block[stride*0 +offset] = ((z0 + z3)*qmul + 0x80000) >> 20;
147 block[stride*2 +offset] = ((z1 + z2)*qmul + 0x80000) >> 20;
148 block[stride*8 +offset] = ((z1 - z2)*qmul + 0x80000) >> 20;
149 block[stride*10+offset] = ((z0 - z3)*qmul + 0x80000) >> 20;
150 }
151 }
152 #undef stride
153
154 static void svq3_add_idct_c(uint8_t *dst, DCTELEM *block, int stride, int qp,
155 int dc)
156 {
157 const int qmul = svq3_dequant_coeff[qp];
158 int i;
159 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
160
161 if (dc) {
162 dc = 13*13*((dc == 1) ? 1538*block[0] : ((qmul*(block[0] >> 3)) / 2));
163 block[0] = 0;
164 }
165
166 for (i = 0; i < 4; i++) {
167 const int z0 = 13*(block[0 + 4*i] + block[2 + 4*i]);
168 const int z1 = 13*(block[0 + 4*i] - block[2 + 4*i]);
169 const int z2 = 7* block[1 + 4*i] - 17*block[3 + 4*i];
170 const int z3 = 17* block[1 + 4*i] + 7*block[3 + 4*i];
171
172 block[0 + 4*i] = z0 + z3;
173 block[1 + 4*i] = z1 + z2;
174 block[2 + 4*i] = z1 - z2;
175 block[3 + 4*i] = z0 - z3;
176 }
177
178 for (i = 0; i < 4; i++) {
179 const int z0 = 13*(block[i + 4*0] + block[i + 4*2]);
180 const int z1 = 13*(block[i + 4*0] - block[i + 4*2]);
181 const int z2 = 7* block[i + 4*1] - 17*block[i + 4*3];
182 const int z3 = 17* block[i + 4*1] + 7*block[i + 4*3];
183 const int rr = (dc + 0x80000);
184
185 dst[i + stride*0] = cm[ dst[i + stride*0] + (((z0 + z3)*qmul + rr) >> 20) ];
186 dst[i + stride*1] = cm[ dst[i + stride*1] + (((z1 + z2)*qmul + rr) >> 20) ];
187 dst[i + stride*2] = cm[ dst[i + stride*2] + (((z1 - z2)*qmul + rr) >> 20) ];
188 dst[i + stride*3] = cm[ dst[i + stride*3] + (((z0 - z3)*qmul + rr) >> 20) ];
189 }
190 }
191
192 static inline int svq3_decode_block(GetBitContext *gb, DCTELEM *block,
193 int index, const int type)
194 {
195 static const uint8_t *const scan_patterns[4] =
196 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
197
198 int run, level, sign, vlc, limit;
199 const int intra = (3 * type) >> 2;
200 const uint8_t *const scan = scan_patterns[type];
201
202 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
203 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
204
205 if (vlc == INVALID_VLC)
206 return -1;
207
208 sign = (vlc & 0x1) - 1;
209 vlc = (vlc + 1) >> 1;
210
211 if (type == 3) {
212 if (vlc < 3) {
213 run = 0;
214 level = vlc;
215 } else if (vlc < 4) {
216 run = 1;
217 level = 1;
218 } else {
219 run = (vlc & 0x3);
220 level = ((vlc + 9) >> 2) - run;
221 }
222 } else {
223 if (vlc < 16) {
224 run = svq3_dct_tables[intra][vlc].run;
225 level = svq3_dct_tables[intra][vlc].level;
226 } else if (intra) {
227 run = (vlc & 0x7);
228 level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
229 } else {
230 run = (vlc & 0xF);
231 level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
232 }
233 }
234
235 if ((index += run) >= limit)
236 return -1;
237
238 block[scan[index]] = (level ^ sign) - sign;
239 }
240
241 if (type != 2) {
242 break;
243 }
244 }
245
246 return 0;
247 }
248
249 static inline void svq3_mc_dir_part(MpegEncContext *s,
250 int x, int y, int width, int height,
251 int mx, int my, int dxy,
252 int thirdpel, int dir, int avg)
253 {
254 const Picture *pic = (dir == 0) ? &s->last_picture : &s->next_picture;
255 uint8_t *src, *dest;
256 int i, emu = 0;
257 int blocksize = 2 - (width>>3); //16->0, 8->1, 4->2
258
259 mx += x;
260 my += y;
261
262 if (mx < 0 || mx >= (s->h_edge_pos - width - 1) ||
263 my < 0 || my >= (s->v_edge_pos - height - 1)) {
264
265 if ((s->flags & CODEC_FLAG_EMU_EDGE)) {
266 emu = 1;
267 }
268
269 mx = av_clip (mx, -16, (s->h_edge_pos - width + 15));
270 my = av_clip (my, -16, (s->v_edge_pos - height + 15));
271 }
272
273 /* form component predictions */
274 dest = s->current_picture.data[0] + x + y*s->linesize;
275 src = pic->data[0] + mx + my*s->linesize;
276
277 if (emu) {
278 ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, (width + 1), (height + 1),
279 mx, my, s->h_edge_pos, s->v_edge_pos);
280 src = s->edge_emu_buffer;
281 }
282 if (thirdpel)
283 (avg ? s->dsp.avg_tpel_pixels_tab : s->dsp.put_tpel_pixels_tab)[dxy](dest, src, s->linesize, width, height);
284 else
285 (avg ? s->dsp.avg_pixels_tab : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src, s->linesize, height);
286
287 if (!(s->flags & CODEC_FLAG_GRAY)) {
288 mx = (mx + (mx < (int) x)) >> 1;
289 my = (my + (my < (int) y)) >> 1;
290 width = (width >> 1);
291 height = (height >> 1);
292 blocksize++;
293
294 for (i = 1; i < 3; i++) {
295 dest = s->current_picture.data[i] + (x >> 1) + (y >> 1)*s->uvlinesize;
296 src = pic->data[i] + mx + my*s->uvlinesize;
297
298 if (emu) {
299 ff_emulated_edge_mc(s->edge_emu_buffer, src, s->uvlinesize, (width + 1), (height + 1),
300 mx, my, (s->h_edge_pos >> 1), (s->v_edge_pos >> 1));
301 src = s->edge_emu_buffer;
302 }
303 if (thirdpel)
304 (avg ? s->dsp.avg_tpel_pixels_tab : s->dsp.put_tpel_pixels_tab)[dxy](dest, src, s->uvlinesize, width, height);
305 else
306 (avg ? s->dsp.avg_pixels_tab : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src, s->uvlinesize, height);
307 }
308 }
309 }
310
311 static inline int svq3_mc_dir(H264Context *h, int size, int mode, int dir,
312 int avg)
313 {
314 int i, j, k, mx, my, dx, dy, x, y;
315 MpegEncContext *const s = (MpegEncContext *) h;
316 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
317 const int part_height = 16 >> ((unsigned) (size + 1) / 3);
318 const int extra_width = (mode == PREDICT_MODE) ? -16*6 : 0;
319 const int h_edge_pos = 6*(s->h_edge_pos - part_width ) - extra_width;
320 const int v_edge_pos = 6*(s->v_edge_pos - part_height) - extra_width;
321
322 for (i = 0; i < 16; i += part_height) {
323 for (j = 0; j < 16; j += part_width) {
324 const int b_xy = (4*s->mb_x + (j >> 2)) + (4*s->mb_y + (i >> 2))*h->b_stride;
325 int dxy;
326 x = 16*s->mb_x + j;
327 y = 16*s->mb_y + i;
328 k = ((j >> 2) & 1) + ((i >> 1) & 2) + ((j >> 1) & 4) + (i & 8);
329
330 if (mode != PREDICT_MODE) {
331 pred_motion(h, k, (part_width >> 2), dir, 1, &mx, &my);
332 } else {
333 mx = s->next_picture.motion_val[0][b_xy][0]<<1;
334 my = s->next_picture.motion_val[0][b_xy][1]<<1;
335
336 if (dir == 0) {
337 mx = ((mx * h->frame_num_offset) / h->prev_frame_num_offset + 1) >> 1;
338 my = ((my * h->frame_num_offset) / h->prev_frame_num_offset + 1) >> 1;
339 } else {
340 mx = ((mx * (h->frame_num_offset - h->prev_frame_num_offset)) / h->prev_frame_num_offset + 1) >> 1;
341 my = ((my * (h->frame_num_offset - h->prev_frame_num_offset)) / h->prev_frame_num_offset + 1) >> 1;
342 }
343 }
344
345 /* clip motion vector prediction to frame border */
346 mx = av_clip(mx, extra_width - 6*x, h_edge_pos - 6*x);
347 my = av_clip(my, extra_width - 6*y, v_edge_pos - 6*y);
348
349 /* get (optional) motion vector differential */
350 if (mode == PREDICT_MODE) {
351 dx = dy = 0;
352 } else {
353 dy = svq3_get_se_golomb(&s->gb);
354 dx = svq3_get_se_golomb(&s->gb);
355
356 if (dx == INVALID_VLC || dy == INVALID_VLC) {
357 av_log(h->s.avctx, AV_LOG_ERROR, "invalid MV vlc\n");
358 return -1;
359 }
360 }
361
362 /* compute motion vector */
363 if (mode == THIRDPEL_MODE) {
364 int fx, fy;
365 mx = ((mx + 1)>>1) + dx;
366 my = ((my + 1)>>1) + dy;
367 fx = ((unsigned)(mx + 0x3000))/3 - 0x1000;
368 fy = ((unsigned)(my + 0x3000))/3 - 0x1000;
369 dxy = (mx - 3*fx) + 4*(my - 3*fy);
370
371 svq3_mc_dir_part(s, x, y, part_width, part_height, fx, fy, dxy, 1, dir, avg);
372 mx += mx;
373 my += my;
374 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
375 mx = ((unsigned)(mx + 1 + 0x3000))/3 + dx - 0x1000;
376 my = ((unsigned)(my + 1 + 0x3000))/3 + dy - 0x1000;
377 dxy = (mx&1) + 2*(my&1);
378
379 svq3_mc_dir_part(s, x, y, part_width, part_height, mx>>1, my>>1, dxy, 0, dir, avg);
380 mx *= 3;
381 my *= 3;
382 } else {
383 mx = ((unsigned)(mx + 3 + 0x6000))/6 + dx - 0x1000;
384 my = ((unsigned)(my + 3 + 0x6000))/6 + dy - 0x1000;
385
386 svq3_mc_dir_part(s, x, y, part_width, part_height, mx, my, 0, 0, dir, avg);
387 mx *= 6;
388 my *= 6;
389 }
390
391 /* update mv_cache */
392 if (mode != PREDICT_MODE) {
393 int32_t mv = pack16to32(mx,my);
394
395 if (part_height == 8 && i < 8) {
396 *(int32_t *) h->mv_cache[dir][scan8[k] + 1*8] = mv;
397
398 if (part_width == 8 && j < 8) {
399 *(int32_t *) h->mv_cache[dir][scan8[k] + 1 + 1*8] = mv;
400 }
401 }
402 if (part_width == 8 && j < 8) {
403 *(int32_t *) h->mv_cache[dir][scan8[k] + 1] = mv;
404 }
405 if (part_width == 4 || part_height == 4) {
406 *(int32_t *) h->mv_cache[dir][scan8[k]] = mv;
407 }
408 }
409
410 /* write back motion vectors */
411 fill_rectangle(s->current_picture.motion_val[dir][b_xy], part_width>>2, part_height>>2, h->b_stride, pack16to32(mx,my), 4);
412 }
413 }
414
415 return 0;
416 }
417
418 static int svq3_decode_mb(H264Context *h, unsigned int mb_type)
419 {
420 int i, j, k, m, dir, mode;
421 int cbp = 0;
422 uint32_t vlc;
423 int8_t *top, *left;
424 MpegEncContext *const s = (MpegEncContext *) h;
425 const int mb_xy = h->mb_xy;
426 const int b_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride;
427
428 h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
429 h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
430 h->topright_samples_available = 0xFFFF;
431
432 if (mb_type == 0) { /* SKIP */
433 if (s->pict_type == FF_P_TYPE || s->next_picture.mb_type[mb_xy] == -1) {
434 svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 0, 0);
435
436 if (s->pict_type == FF_B_TYPE) {
437 svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 1, 1);
438 }
439
440 mb_type = MB_TYPE_SKIP;
441 } else {
442 mb_type = FFMIN(s->next_picture.mb_type[mb_xy], 6);
443 if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 0, 0) < 0)
444 return -1;
445 if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 1, 1) < 0)
446 return -1;
447
448 mb_type = MB_TYPE_16x16;
449 }
450 } else if (mb_type < 8) { /* INTER */
451 if (h->thirdpel_flag && h->halfpel_flag == !get_bits1 (&s->gb)) {
452 mode = THIRDPEL_MODE;
453 } else if (h->halfpel_flag && h->thirdpel_flag == !get_bits1 (&s->gb)) {
454 mode = HALFPEL_MODE;
455 } else {
456 mode = FULLPEL_MODE;
457 }
458
459 /* fill caches */
460 /* note ref_cache should contain here:
461 ????????
462 ???11111
463 N??11111
464 N??11111
465 N??11111
466 */
467
468 for (m = 0; m < 2; m++) {
469 if (s->mb_x > 0 && h->intra4x4_pred_mode[mb_xy - 1][0] != -1) {
470 for (i = 0; i < 4; i++) {
471 *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - 1 + i*h->b_stride];
472 }
473 } else {
474 for (i = 0; i < 4; i++) {
475 *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = 0;
476 }
477 }
478 if (s->mb_y > 0) {
479 memcpy(h->mv_cache[m][scan8[0] - 1*8], s->current_picture.motion_val[m][b_xy - h->b_stride], 4*2*sizeof(int16_t));
480 memset(&h->ref_cache[m][scan8[0] - 1*8], (h->intra4x4_pred_mode[mb_xy - s->mb_stride][4] == -1) ? PART_NOT_AVAILABLE : 1, 4);
481
482 if (s->mb_x < (s->mb_width - 1)) {
483 *(uint32_t *) h->mv_cache[m][scan8[0] + 4 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride + 4];
484 h->ref_cache[m][scan8[0] + 4 - 1*8] =
485 (h->intra4x4_pred_mode[mb_xy - s->mb_stride + 1][0] == -1 ||
486 h->intra4x4_pred_mode[mb_xy - s->mb_stride ][4] == -1) ? PART_NOT_AVAILABLE : 1;
487 }else
488 h->ref_cache[m][scan8[0] + 4 - 1*8] = PART_NOT_AVAILABLE;
489 if (s->mb_x > 0) {
490 *(uint32_t *) h->mv_cache[m][scan8[0] - 1 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride - 1];
491 h->ref_cache[m][scan8[0] - 1 - 1*8] = (h->intra4x4_pred_mode[mb_xy - s->mb_stride - 1][3] == -1) ? PART_NOT_AVAILABLE : 1;
492 }else
493 h->ref_cache[m][scan8[0] - 1 - 1*8] = PART_NOT_AVAILABLE;
494 }else
495 memset(&h->ref_cache[m][scan8[0] - 1*8 - 1], PART_NOT_AVAILABLE, 8);
496
497 if (s->pict_type != FF_B_TYPE)
498 break;
499 }
500
501 /* decode motion vector(s) and form prediction(s) */
502 if (s->pict_type == FF_P_TYPE) {
503 if (svq3_mc_dir(h, (mb_type - 1), mode, 0, 0) < 0)
504 return -1;
505 } else { /* FF_B_TYPE */
506 if (mb_type != 2) {
507 if (svq3_mc_dir(h, 0, mode, 0, 0) < 0)
508 return -1;
509 } else {
510 for (i = 0; i < 4; i++) {
511 memset(s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
512 }
513 }
514 if (mb_type != 1) {
515 if (svq3_mc_dir(h, 0, mode, 1, (mb_type == 3)) < 0)
516 return -1;
517 } else {
518 for (i = 0; i < 4; i++) {
519 memset(s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
520 }
521 }
522 }
523
524 mb_type = MB_TYPE_16x16;
525 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
526 memset(h->intra4x4_pred_mode_cache, -1, 8*5*sizeof(int8_t));
527
528 if (mb_type == 8) {
529 if (s->mb_x > 0) {
530 for (i = 0; i < 4; i++) {
531 h->intra4x4_pred_mode_cache[scan8[0] - 1 + i*8] = h->intra4x4_pred_mode[mb_xy - 1][i];
532 }
533 if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1) {
534 h->left_samples_available = 0x5F5F;
535 }
536 }
537 if (s->mb_y > 0) {
538 h->intra4x4_pred_mode_cache[4+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][4];
539 h->intra4x4_pred_mode_cache[5+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][5];
540 h->intra4x4_pred_mode_cache[6+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][6];
541 h->intra4x4_pred_mode_cache[7+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][3];
542
543 if (h->intra4x4_pred_mode_cache[4+8*0] == -1) {
544 h->top_samples_available = 0x33FF;
545 }
546 }
547
548 /* decode prediction codes for luma blocks */
549 for (i = 0; i < 16; i+=2) {
550 vlc = svq3_get_ue_golomb(&s->gb);
551
552 if (vlc >= 25){
553 av_log(h->s.avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
554 return -1;
555 }
556
557 left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
558 top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
559
560 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
561 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
562
563 if (left[1] == -1 || left[2] == -1){
564 av_log(h->s.avctx, AV_LOG_ERROR, "weird prediction\n");
565 return -1;
566 }
567 }
568 } else { /* mb_type == 33, DC_128_PRED block type */
569 for (i = 0; i < 4; i++) {
570 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_PRED, 4);
571 }
572 }
573
574 write_back_intra_pred_mode(h);
575
576 if (mb_type == 8) {
577 check_intra4x4_pred_mode(h);
578
579 h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
580 h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
581 } else {
582 for (i = 0; i < 4; i++) {
583 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_128_PRED, 4);
584 }
585
586 h->top_samples_available = 0x33FF;
587 h->left_samples_available = 0x5F5F;
588 }
589
590 mb_type = MB_TYPE_INTRA4x4;
591 } else { /* INTRA16x16 */
592 dir = i_mb_type_info[mb_type - 8].pred_mode;
593 dir = (dir >> 1) ^ 3*(dir & 1) ^ 1;
594
595 if ((h->intra16x16_pred_mode = check_intra_pred_mode(h, dir)) == -1){
596 av_log(h->s.avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n");
597 return -1;
598 }
599
600 cbp = i_mb_type_info[mb_type - 8].cbp;
601 mb_type = MB_TYPE_INTRA16x16;
602 }
603
604 if (!IS_INTER(mb_type) && s->pict_type != FF_I_TYPE) {
605 for (i = 0; i < 4; i++) {
606 memset(s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
607 }
608 if (s->pict_type == FF_B_TYPE) {
609 for (i = 0; i < 4; i++) {
610 memset(s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
611 }
612 }
613 }
614 if (!IS_INTRA4x4(mb_type)) {
615 memset(h->intra4x4_pred_mode[mb_xy], DC_PRED, 8);
616 }
617 if (!IS_SKIP(mb_type) || s->pict_type == FF_B_TYPE) {
618 memset(h->non_zero_count_cache + 8, 0, 4*9*sizeof(uint8_t));
619 s->dsp.clear_blocks(h->mb);
620 }
621
622 if (!IS_INTRA16x16(mb_type) && (!IS_SKIP(mb_type) || s->pict_type == FF_B_TYPE)) {
623 if ((vlc = svq3_get_ue_golomb(&s->gb)) >= 48){
624 av_log(h->s.avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
625 return -1;
626 }
627
628 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc] : golomb_to_inter_cbp[vlc];
629 }
630 if (IS_INTRA16x16(mb_type) || (s->pict_type != FF_I_TYPE && s->adaptive_quant && cbp)) {
631 s->qscale += svq3_get_se_golomb(&s->gb);
632
633 if (s->qscale > 31){
634 av_log(h->s.avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
635 return -1;
636 }
637 }
638 if (IS_INTRA16x16(mb_type)) {
639 if (svq3_decode_block(&s->gb, h->mb, 0, 0)){
640 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding intra luma dc\n");
641 return -1;
642 }
643 }
644
645 if (cbp) {
646 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
647 const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
648
649 for (i = 0; i < 4; i++) {
650 if ((cbp & (1 << i))) {
651 for (j = 0; j < 4; j++) {
652 k = index ? ((j&1) + 2*(i&1) + 2*(j&2) + 4*(i&2)) : (4*i + j);
653 h->non_zero_count_cache[ scan8[k] ] = 1;
654
655 if (svq3_decode_block(&s->gb, &h->mb[16*k], index, type)){
656 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding block\n");
657 return -1;
658 }
659 }
660 }
661 }
662
663 if ((cbp & 0x30)) {
664 for (i = 0; i < 2; ++i) {
665 if (svq3_decode_block(&s->gb, &h->mb[16*(16 + 4*i)], 0, 3)){
666 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding chroma dc block\n");
667 return -1;
668 }
669 }
670
671 if ((cbp & 0x20)) {
672 for (i = 0; i < 8; i++) {
673 h->non_zero_count_cache[ scan8[16+i] ] = 1;
674
675 if (svq3_decode_block(&s->gb, &h->mb[16*(16 + i)], 1, 1)){
676 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding chroma ac block\n");
677 return -1;
678 }
679 }
680 }
681 }
682 }
683
684 h->cbp= cbp;
685 s->current_picture.mb_type[mb_xy] = mb_type;
686
687 if (IS_INTRA(mb_type)) {
688 h->chroma_pred_mode = check_intra_pred_mode(h, DC_PRED8x8);
689 }
690
691 return 0;
692 }
693
694 static int svq3_decode_slice_header(H264Context *h)
695 {
696 MpegEncContext *const s = (MpegEncContext *) h;
697 const int mb_xy = h->mb_xy;
698 int i, header;
699
700 header = get_bits(&s->gb, 8);
701
702 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
703 /* TODO: what? */
704 av_log(h->s.avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
705 return -1;
706 } else {
707 int length = (header >> 5) & 3;
708
709 h->next_slice_index = get_bits_count(&s->gb) + 8*show_bits(&s->gb, 8*length) + 8*length;
710
711 if (h->next_slice_index > s->gb.size_in_bits) {
712 av_log(h->s.avctx, AV_LOG_ERROR, "slice after bitstream end\n");
713 return -1;
714 }
715
716 s->gb.size_in_bits = h->next_slice_index - 8*(length - 1);
717 skip_bits(&s->gb, 8);
718
719 if (h->svq3_watermark_key) {
720 uint32_t header = AV_RL32(&s->gb.buffer[(get_bits_count(&s->gb)>>3)+1]);
721 AV_WL32(&s->gb.buffer[(get_bits_count(&s->gb)>>3)+1], header ^ h->svq3_watermark_key);
722 }
723 if (length > 0) {
724 memcpy((uint8_t *) &s->gb.buffer[get_bits_count(&s->gb) >> 3],
725 &s->gb.buffer[s->gb.size_in_bits >> 3], (length - 1));
726 }
727 skip_bits_long(&s->gb, 0);
728 }
729
730 if ((i = svq3_get_ue_golomb(&s->gb)) == INVALID_VLC || i >= 3){
731 av_log(h->s.avctx, AV_LOG_ERROR, "illegal slice type %d \n", i);
732 return -1;
733 }
734
735 h->slice_type = golomb_to_pict_type[i];
736
737 if ((header & 0x9F) == 2) {
738 i = (s->mb_num < 64) ? 6 : (1 + av_log2 (s->mb_num - 1));
739 s->mb_skip_run = get_bits(&s->gb, i) - (s->mb_x + (s->mb_y * s->mb_width));
740 } else {
741 skip_bits1(&s->gb);
742 s->mb_skip_run = 0;
743 }
744
745 h->slice_num = get_bits(&s->gb, 8);
746 s->qscale = get_bits(&s->gb, 5);
747 s->adaptive_quant = get_bits1(&s->gb);
748
749 /* unknown fields */
750 skip_bits1(&s->gb);
751
752 if (h->unknown_svq3_flag) {
753 skip_bits1(&s->gb);
754 }
755
756 skip_bits1(&s->gb);
757 skip_bits(&s->gb, 2);
758
759 while (get_bits1(&s->gb)) {
760 skip_bits(&s->gb, 8);
761 }
762
763 /* reset intra predictors and invalidate motion vector references */
764 if (s->mb_x > 0) {
765 memset(h->intra4x4_pred_mode[mb_xy - 1], -1, 4*sizeof(int8_t));
766 memset(h->intra4x4_pred_mode[mb_xy - s->mb_x], -1, 8*sizeof(int8_t)*s->mb_x);
767 }
768 if (s->mb_y > 0) {
769 memset(h->intra4x4_pred_mode[mb_xy - s->mb_stride], -1, 8*sizeof(int8_t)*(s->mb_width - s->mb_x));
770
771 if (s->mb_x > 0) {
772 h->intra4x4_pred_mode[mb_xy - s->mb_stride - 1][3] = -1;
773 }
774 }
775
776 return 0;
777 }
778
779 static av_cold int svq3_decode_init(AVCodecContext *avctx)
780 {
781 MpegEncContext *const s = avctx->priv_data;
782 H264Context *const h = avctx->priv_data;
783 int m;
784 unsigned char *extradata;
785 unsigned int size;
786
787 if (decode_init(avctx) < 0)
788 return -1;
789
790 s->flags = avctx->flags;
791 s->flags2 = avctx->flags2;
792 s->unrestricted_mv = 1;
793 h->is_complex=1;
794
795 if (!s->context_initialized) {
796 s->width = avctx->width;
797 s->height = avctx->height;
798 h->halfpel_flag = 1;
799 h->thirdpel_flag = 1;
800 h->unknown_svq3_flag = 0;
801 h->chroma_qp[0] = h->chroma_qp[1] = 4;
802
803 if (MPV_common_init(s) < 0)
804 return -1;
805
806 h->b_stride = 4*s->mb_width;
807
808 alloc_tables(h);
809
810 /* prowl for the "SEQH" marker in the extradata */
811 extradata = (unsigned char *)avctx->extradata;
812 for (m = 0; m < avctx->extradata_size; m++) {
813 if (!memcmp(extradata, "SEQH", 4))
814 break;
815 extradata++;
816 }
817
818 /* if a match was found, parse the extra data */
819 if (extradata && !memcmp(extradata, "SEQH", 4)) {
820
821 GetBitContext gb;
822 int frame_size_code;
823
824 size = AV_RB32(&extradata[4]);
825 init_get_bits(&gb, extradata + 8, size*8);
826
827 /* 'frame size code' and optional 'width, height' */
828 frame_size_code = get_bits(&gb, 3);
829 switch (frame_size_code) {
830 case 0: avctx->width = 160; avctx->height = 120; break;
831 case 1: avctx->width = 128; avctx->height = 96; break;
832 case 2: avctx->width = 176; avctx->height = 144; break;
833 case 3: avctx->width = 352; avctx->height = 288; break;
834 case 4: avctx->width = 704; avctx->height = 576; break;
835 case 5: avctx->width = 240; avctx->height = 180; break;
836 case 6: avctx->width = 320; avctx->height = 240; break;
837 case 7:
838 avctx->width = get_bits(&gb, 12);
839 avctx->height = get_bits(&gb, 12);
840 break;
841 }
842
843 h->halfpel_flag = get_bits1(&gb);
844 h->thirdpel_flag = get_bits1(&gb);
845
846 /* unknown fields */
847 skip_bits1(&gb);
848 skip_bits1(&gb);
849 skip_bits1(&gb);
850 skip_bits1(&gb);
851
852 s->low_delay = get_bits1(&gb);
853
854 /* unknown field */
855 skip_bits1(&gb);
856
857 while (get_bits1(&gb)) {
858 skip_bits(&gb, 8);
859 }
860
861 h->unknown_svq3_flag = get_bits1(&gb);
862 avctx->has_b_frames = !s->low_delay;
863 if (h->unknown_svq3_flag) {
864 #if CONFIG_ZLIB
865 unsigned watermark_width = svq3_get_ue_golomb(&gb);
866 unsigned watermark_height = svq3_get_ue_golomb(&gb);
867 int u1 = svq3_get_ue_golomb(&gb);
868 int u2 = get_bits(&gb, 8);
869 int u3 = get_bits(&gb, 2);
870 int u4 = svq3_get_ue_golomb(&gb);
871 unsigned buf_len = watermark_width*watermark_height*4;
872 int offset = (get_bits_count(&gb)+7)>>3;
873 uint8_t *buf;
874
875 if ((uint64_t)watermark_width*4 > UINT_MAX/watermark_height)
876 return -1;
877
878 buf = av_malloc(buf_len);
879 av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n", watermark_width, watermark_height);
880 av_log(avctx, AV_LOG_DEBUG, "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n", u1, u2, u3, u4, offset);
881 if (uncompress(buf, (uLong*)&buf_len, extradata + 8 + offset, size - offset) != Z_OK) {
882 av_log(avctx, AV_LOG_ERROR, "could not uncompress watermark logo\n");
883 av_free(buf);
884 return -1;
885 }
886 h->svq3_watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
887 h->svq3_watermark_key = h->svq3_watermark_key << 16 | h->svq3_watermark_key;
888 av_log(avctx, AV_LOG_DEBUG, "watermark key %#x\n", h->svq3_watermark_key);
889 av_free(buf);
890 #else
891 av_log(avctx, AV_LOG_ERROR, "this svq3 file contains watermark which need zlib support compiled in\n");
892 return -1;
893 #endif
894 }
895 }
896 }
897
898 return 0;
899 }
900
901 static int svq3_decode_frame(AVCodecContext *avctx,
902 void *data, int *data_size,
903 AVPacket *avpkt)
904 {
905 const uint8_t *buf = avpkt->data;
906 int buf_size = avpkt->size;
907 MpegEncContext *const s = avctx->priv_data;
908 H264Context *const h = avctx->priv_data;
909 int m, mb_type;
910
911 /* special case for last picture */
912 if (buf_size == 0) {
913 if (s->next_picture_ptr && !s->low_delay) {
914 *(AVFrame *) data = *(AVFrame *) &s->next_picture;
915 s->next_picture_ptr = NULL;
916 *data_size = sizeof(AVFrame);
917 }
918 return 0;
919 }
920
921 init_get_bits (&s->gb, buf, 8*buf_size);
922
923 s->mb_x = s->mb_y = h->mb_xy = 0;
924
925 if (svq3_decode_slice_header(h))
926 return -1;
927
928 s->pict_type = h->slice_type;
929 s->picture_number = h->slice_num;
930
931 if (avctx->debug&FF_DEBUG_PICT_INFO){
932 av_log(h->s.avctx, AV_LOG_DEBUG, "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
933 av_get_pict_type_char(s->pict_type), h->halfpel_flag, h->thirdpel_flag,
934 s->adaptive_quant, s->qscale, h->slice_num);
935 }
936
937 /* for hurry_up == 5 */
938 s->current_picture.pict_type = s->pict_type;
939 s->current_picture.key_frame = (s->pict_type == FF_I_TYPE);
940
941 /* Skip B-frames if we do not have reference frames. */
942 if (s->last_picture_ptr == NULL && s->pict_type == FF_B_TYPE)
943 return 0;
944 /* Skip B-frames if we are in a hurry. */
945 if (avctx->hurry_up && s->pict_type == FF_B_TYPE)
946 return 0;
947 /* Skip everything if we are in a hurry >= 5. */
948 if (avctx->hurry_up >= 5)
949 return 0;
950 if ( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE)
951 ||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE)
952 || avctx->skip_frame >= AVDISCARD_ALL)
953 return 0;
954
955 if (s->next_p_frame_damaged) {
956 if (s->pict_type == FF_B_TYPE)
957 return 0;
958 else
959 s->next_p_frame_damaged = 0;
960 }
961
962 if (frame_start(h) < 0)
963 return -1;
964
965 if (s->pict_type == FF_B_TYPE) {
966 h->frame_num_offset = (h->slice_num - h->prev_frame_num);
967
968 if (h->frame_num_offset < 0) {
969 h->frame_num_offset += 256;
970 }
971 if (h->frame_num_offset == 0 || h->frame_num_offset >= h->prev_frame_num_offset) {
972 av_log(h->s.avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
973 return -1;
974 }
975 } else {
976 h->prev_frame_num = h->frame_num;
977 h->frame_num = h->slice_num;
978 h->prev_frame_num_offset = (h->frame_num - h->prev_frame_num);
979
980 if (h->prev_frame_num_offset < 0) {
981 h->prev_frame_num_offset += 256;
982 }
983 }
984
985 for (m = 0; m < 2; m++){
986 int i;
987 for (i = 0; i < 4; i++){
988 int j;
989 for (j = -1; j < 4; j++)
990 h->ref_cache[m][scan8[0] + 8*i + j]= 1;
991 if (i < 3)
992 h->ref_cache[m][scan8[0] + 8*i + j]= PART_NOT_AVAILABLE;
993 }
994 }
995
996 for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
997 for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
998 h->mb_xy = s->mb_x + s->mb_y*s->mb_stride;
999
1000 if ( (get_bits_count(&s->gb) + 7) >= s->gb.size_in_bits &&
1001 ((get_bits_count(&s->gb) & 7) == 0 || show_bits(&s->gb, (-get_bits_count(&s->gb) & 7)) == 0)) {
1002
1003 skip_bits(&s->gb, h->next_slice_index - get_bits_count(&s->gb));
1004 s->gb.size_in_bits = 8*buf_size;
1005
1006 if (svq3_decode_slice_header(h))
1007 return -1;
1008
1009 /* TODO: support s->mb_skip_run */
1010 }
1011
1012 mb_type = svq3_get_ue_golomb(&s->gb);
1013
1014 if (s->pict_type == FF_I_TYPE) {
1015 mb_type += 8;
1016 } else if (s->pict_type == FF_B_TYPE && mb_type >= 4) {
1017 mb_type += 4;
1018 }
1019 if (mb_type > 33 || svq3_decode_mb(h, mb_type)) {
1020 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
1021 return -1;
1022 }
1023
1024 if (mb_type != 0) {
1025 hl_decode_mb (h);
1026 }
1027
1028 if (s->pict_type != FF_B_TYPE && !s->low_delay) {
1029 s->current_picture.mb_type[s->mb_x + s->mb_y*s->mb_stride] =
1030 (s->pict_type == FF_P_TYPE && mb_type < 8) ? (mb_type - 1) : -1;
1031 }
1032 }
1033
1034 ff_draw_horiz_band(s, 16*s->mb_y, 16);
1035 }
1036
1037 MPV_frame_end(s);
1038
1039 if (s->pict_type == FF_B_TYPE || s->low_delay) {
1040 *(AVFrame *) data = *(AVFrame *) &s->current_picture;
1041 } else {
1042 *(AVFrame *) data = *(AVFrame *) &s->last_picture;
1043 }
1044
1045 /* Do not output the last pic after seeking. */
1046 if (s->last_picture_ptr || s->low_delay) {
1047 *data_size = sizeof(AVFrame);
1048 }
1049
1050 return buf_size;
1051 }
1052
1053
1054 AVCodec svq3_decoder = {
1055 "svq3",
1056 CODEC_TYPE_VIDEO,
1057 CODEC_ID_SVQ3,
1058 sizeof(H264Context),
1059 svq3_decode_init,
1060 NULL,
1061 decode_end,
1062 svq3_decode_frame,
1063 CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_DELAY,
1064 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3"),
1065 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_NONE},
1066 };