Add ff_ prefix to data symbols of encoders, decoders, hwaccel, parsers, bsf.
[libav.git] / libavcodec / svq3.c
1 /*
2 * Copyright (c) 2003 The FFmpeg Project
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 /*
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
31 *
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
37 *
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41 */
42 #include "internal.h"
43 #include "dsputil.h"
44 #include "avcodec.h"
45 #include "mpegvideo.h"
46 #include "h264.h"
47
48 #include "h264data.h" //FIXME FIXME FIXME
49
50 #include "h264_mvpred.h"
51 #include "golomb.h"
52 #include "rectangle.h"
53 #include "vdpau_internal.h"
54
55 #if CONFIG_ZLIB
56 #include <zlib.h>
57 #endif
58
59 #include "svq1.h"
60
61 /**
62 * @file
63 * svq3 decoder.
64 */
65
66 #define FULLPEL_MODE 1
67 #define HALFPEL_MODE 2
68 #define THIRDPEL_MODE 3
69 #define PREDICT_MODE 4
70
71 /* dual scan (from some older h264 draft)
72 o-->o-->o o
73 | /|
74 o o o / o
75 | / | |/ |
76 o o o o
77 /
78 o-->o-->o-->o
79 */
80 static const uint8_t svq3_scan[16] = {
81 0+0*4, 1+0*4, 2+0*4, 2+1*4,
82 2+2*4, 3+0*4, 3+1*4, 3+2*4,
83 0+1*4, 0+2*4, 1+1*4, 1+2*4,
84 0+3*4, 1+3*4, 2+3*4, 3+3*4,
85 };
86
87 static const uint8_t svq3_pred_0[25][2] = {
88 { 0, 0 },
89 { 1, 0 }, { 0, 1 },
90 { 0, 2 }, { 1, 1 }, { 2, 0 },
91 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
92 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
93 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
94 { 2, 4 }, { 3, 3 }, { 4, 2 },
95 { 4, 3 }, { 3, 4 },
96 { 4, 4 }
97 };
98
99 static const int8_t svq3_pred_1[6][6][5] = {
100 { { 2,-1,-1,-1,-1 }, { 2, 1,-1,-1,-1 }, { 1, 2,-1,-1,-1 },
101 { 2, 1,-1,-1,-1 }, { 1, 2,-1,-1,-1 }, { 1, 2,-1,-1,-1 } },
102 { { 0, 2,-1,-1,-1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
103 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
104 { { 2, 0,-1,-1,-1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
105 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
106 { { 2, 0,-1,-1,-1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
107 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
108 { { 0, 2,-1,-1,-1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
109 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
110 { { 0, 2,-1,-1,-1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
111 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
112 };
113
114 static const struct { uint8_t run; uint8_t level; } svq3_dct_tables[2][16] = {
115 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
116 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
117 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
118 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
119 };
120
121 static const uint32_t svq3_dequant_coeff[32] = {
122 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
123 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
124 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
125 61694, 68745, 77615, 89113,100253,109366,126635,141533
126 };
127
128 void ff_svq3_luma_dc_dequant_idct_c(DCTELEM *output, DCTELEM *input, int qp){
129 const int qmul = svq3_dequant_coeff[qp];
130 #define stride 16
131 int i;
132 int temp[16];
133 static const uint8_t x_offset[4]={0, 1*stride, 4*stride, 5*stride};
134
135 for(i=0; i<4; i++){
136 const int z0 = 13*(input[4*i+0] + input[4*i+2]);
137 const int z1 = 13*(input[4*i+0] - input[4*i+2]);
138 const int z2 = 7* input[4*i+1] - 17*input[4*i+3];
139 const int z3 = 17* input[4*i+1] + 7*input[4*i+3];
140
141 temp[4*i+0] = z0+z3;
142 temp[4*i+1] = z1+z2;
143 temp[4*i+2] = z1-z2;
144 temp[4*i+3] = z0-z3;
145 }
146
147 for(i=0; i<4; i++){
148 const int offset= x_offset[i];
149 const int z0= 13*(temp[4*0+i] + temp[4*2+i]);
150 const int z1= 13*(temp[4*0+i] - temp[4*2+i]);
151 const int z2= 7* temp[4*1+i] - 17*temp[4*3+i];
152 const int z3= 17* temp[4*1+i] + 7*temp[4*3+i];
153
154 output[stride* 0+offset] = ((z0 + z3)*qmul + 0x80000) >> 20;
155 output[stride* 2+offset] = ((z1 + z2)*qmul + 0x80000) >> 20;
156 output[stride* 8+offset] = ((z1 - z2)*qmul + 0x80000) >> 20;
157 output[stride*10+offset] = ((z0 - z3)*qmul + 0x80000) >> 20;
158 }
159 }
160 #undef stride
161
162 void ff_svq3_add_idct_c(uint8_t *dst, DCTELEM *block, int stride, int qp,
163 int dc)
164 {
165 const int qmul = svq3_dequant_coeff[qp];
166 int i;
167 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
168
169 if (dc) {
170 dc = 13*13*((dc == 1) ? 1538*block[0] : ((qmul*(block[0] >> 3)) / 2));
171 block[0] = 0;
172 }
173
174 for (i = 0; i < 4; i++) {
175 const int z0 = 13*(block[0 + 4*i] + block[2 + 4*i]);
176 const int z1 = 13*(block[0 + 4*i] - block[2 + 4*i]);
177 const int z2 = 7* block[1 + 4*i] - 17*block[3 + 4*i];
178 const int z3 = 17* block[1 + 4*i] + 7*block[3 + 4*i];
179
180 block[0 + 4*i] = z0 + z3;
181 block[1 + 4*i] = z1 + z2;
182 block[2 + 4*i] = z1 - z2;
183 block[3 + 4*i] = z0 - z3;
184 }
185
186 for (i = 0; i < 4; i++) {
187 const int z0 = 13*(block[i + 4*0] + block[i + 4*2]);
188 const int z1 = 13*(block[i + 4*0] - block[i + 4*2]);
189 const int z2 = 7* block[i + 4*1] - 17*block[i + 4*3];
190 const int z3 = 17* block[i + 4*1] + 7*block[i + 4*3];
191 const int rr = (dc + 0x80000);
192
193 dst[i + stride*0] = cm[ dst[i + stride*0] + (((z0 + z3)*qmul + rr) >> 20) ];
194 dst[i + stride*1] = cm[ dst[i + stride*1] + (((z1 + z2)*qmul + rr) >> 20) ];
195 dst[i + stride*2] = cm[ dst[i + stride*2] + (((z1 - z2)*qmul + rr) >> 20) ];
196 dst[i + stride*3] = cm[ dst[i + stride*3] + (((z0 - z3)*qmul + rr) >> 20) ];
197 }
198 }
199
200 static inline int svq3_decode_block(GetBitContext *gb, DCTELEM *block,
201 int index, const int type)
202 {
203 static const uint8_t *const scan_patterns[4] =
204 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
205
206 int run, level, sign, vlc, limit;
207 const int intra = (3 * type) >> 2;
208 const uint8_t *const scan = scan_patterns[type];
209
210 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
211 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
212
213 if (vlc == INVALID_VLC)
214 return -1;
215
216 sign = (vlc & 0x1) - 1;
217 vlc = (vlc + 1) >> 1;
218
219 if (type == 3) {
220 if (vlc < 3) {
221 run = 0;
222 level = vlc;
223 } else if (vlc < 4) {
224 run = 1;
225 level = 1;
226 } else {
227 run = (vlc & 0x3);
228 level = ((vlc + 9) >> 2) - run;
229 }
230 } else {
231 if (vlc < 16) {
232 run = svq3_dct_tables[intra][vlc].run;
233 level = svq3_dct_tables[intra][vlc].level;
234 } else if (intra) {
235 run = (vlc & 0x7);
236 level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
237 } else {
238 run = (vlc & 0xF);
239 level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
240 }
241 }
242
243 if ((index += run) >= limit)
244 return -1;
245
246 block[scan[index]] = (level ^ sign) - sign;
247 }
248
249 if (type != 2) {
250 break;
251 }
252 }
253
254 return 0;
255 }
256
257 static inline void svq3_mc_dir_part(MpegEncContext *s,
258 int x, int y, int width, int height,
259 int mx, int my, int dxy,
260 int thirdpel, int dir, int avg)
261 {
262 const Picture *pic = (dir == 0) ? &s->last_picture : &s->next_picture;
263 uint8_t *src, *dest;
264 int i, emu = 0;
265 int blocksize = 2 - (width>>3); //16->0, 8->1, 4->2
266
267 mx += x;
268 my += y;
269
270 if (mx < 0 || mx >= (s->h_edge_pos - width - 1) ||
271 my < 0 || my >= (s->v_edge_pos - height - 1)) {
272
273 if ((s->flags & CODEC_FLAG_EMU_EDGE)) {
274 emu = 1;
275 }
276
277 mx = av_clip (mx, -16, (s->h_edge_pos - width + 15));
278 my = av_clip (my, -16, (s->v_edge_pos - height + 15));
279 }
280
281 /* form component predictions */
282 dest = s->current_picture.data[0] + x + y*s->linesize;
283 src = pic->data[0] + mx + my*s->linesize;
284
285 if (emu) {
286 ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, (width + 1), (height + 1),
287 mx, my, s->h_edge_pos, s->v_edge_pos);
288 src = s->edge_emu_buffer;
289 }
290 if (thirdpel)
291 (avg ? s->dsp.avg_tpel_pixels_tab : s->dsp.put_tpel_pixels_tab)[dxy](dest, src, s->linesize, width, height);
292 else
293 (avg ? s->dsp.avg_pixels_tab : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src, s->linesize, height);
294
295 if (!(s->flags & CODEC_FLAG_GRAY)) {
296 mx = (mx + (mx < (int) x)) >> 1;
297 my = (my + (my < (int) y)) >> 1;
298 width = (width >> 1);
299 height = (height >> 1);
300 blocksize++;
301
302 for (i = 1; i < 3; i++) {
303 dest = s->current_picture.data[i] + (x >> 1) + (y >> 1)*s->uvlinesize;
304 src = pic->data[i] + mx + my*s->uvlinesize;
305
306 if (emu) {
307 ff_emulated_edge_mc(s->edge_emu_buffer, src, s->uvlinesize, (width + 1), (height + 1),
308 mx, my, (s->h_edge_pos >> 1), (s->v_edge_pos >> 1));
309 src = s->edge_emu_buffer;
310 }
311 if (thirdpel)
312 (avg ? s->dsp.avg_tpel_pixels_tab : s->dsp.put_tpel_pixels_tab)[dxy](dest, src, s->uvlinesize, width, height);
313 else
314 (avg ? s->dsp.avg_pixels_tab : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src, s->uvlinesize, height);
315 }
316 }
317 }
318
319 static inline int svq3_mc_dir(H264Context *h, int size, int mode, int dir,
320 int avg)
321 {
322 int i, j, k, mx, my, dx, dy, x, y;
323 MpegEncContext *const s = (MpegEncContext *) h;
324 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
325 const int part_height = 16 >> ((unsigned) (size + 1) / 3);
326 const int extra_width = (mode == PREDICT_MODE) ? -16*6 : 0;
327 const int h_edge_pos = 6*(s->h_edge_pos - part_width ) - extra_width;
328 const int v_edge_pos = 6*(s->v_edge_pos - part_height) - extra_width;
329
330 for (i = 0; i < 16; i += part_height) {
331 for (j = 0; j < 16; j += part_width) {
332 const int b_xy = (4*s->mb_x + (j >> 2)) + (4*s->mb_y + (i >> 2))*h->b_stride;
333 int dxy;
334 x = 16*s->mb_x + j;
335 y = 16*s->mb_y + i;
336 k = ((j >> 2) & 1) + ((i >> 1) & 2) + ((j >> 1) & 4) + (i & 8);
337
338 if (mode != PREDICT_MODE) {
339 pred_motion(h, k, (part_width >> 2), dir, 1, &mx, &my);
340 } else {
341 mx = s->next_picture.motion_val[0][b_xy][0]<<1;
342 my = s->next_picture.motion_val[0][b_xy][1]<<1;
343
344 if (dir == 0) {
345 mx = ((mx * h->frame_num_offset) / h->prev_frame_num_offset + 1) >> 1;
346 my = ((my * h->frame_num_offset) / h->prev_frame_num_offset + 1) >> 1;
347 } else {
348 mx = ((mx * (h->frame_num_offset - h->prev_frame_num_offset)) / h->prev_frame_num_offset + 1) >> 1;
349 my = ((my * (h->frame_num_offset - h->prev_frame_num_offset)) / h->prev_frame_num_offset + 1) >> 1;
350 }
351 }
352
353 /* clip motion vector prediction to frame border */
354 mx = av_clip(mx, extra_width - 6*x, h_edge_pos - 6*x);
355 my = av_clip(my, extra_width - 6*y, v_edge_pos - 6*y);
356
357 /* get (optional) motion vector differential */
358 if (mode == PREDICT_MODE) {
359 dx = dy = 0;
360 } else {
361 dy = svq3_get_se_golomb(&s->gb);
362 dx = svq3_get_se_golomb(&s->gb);
363
364 if (dx == INVALID_VLC || dy == INVALID_VLC) {
365 av_log(h->s.avctx, AV_LOG_ERROR, "invalid MV vlc\n");
366 return -1;
367 }
368 }
369
370 /* compute motion vector */
371 if (mode == THIRDPEL_MODE) {
372 int fx, fy;
373 mx = ((mx + 1)>>1) + dx;
374 my = ((my + 1)>>1) + dy;
375 fx = ((unsigned)(mx + 0x3000))/3 - 0x1000;
376 fy = ((unsigned)(my + 0x3000))/3 - 0x1000;
377 dxy = (mx - 3*fx) + 4*(my - 3*fy);
378
379 svq3_mc_dir_part(s, x, y, part_width, part_height, fx, fy, dxy, 1, dir, avg);
380 mx += mx;
381 my += my;
382 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
383 mx = ((unsigned)(mx + 1 + 0x3000))/3 + dx - 0x1000;
384 my = ((unsigned)(my + 1 + 0x3000))/3 + dy - 0x1000;
385 dxy = (mx&1) + 2*(my&1);
386
387 svq3_mc_dir_part(s, x, y, part_width, part_height, mx>>1, my>>1, dxy, 0, dir, avg);
388 mx *= 3;
389 my *= 3;
390 } else {
391 mx = ((unsigned)(mx + 3 + 0x6000))/6 + dx - 0x1000;
392 my = ((unsigned)(my + 3 + 0x6000))/6 + dy - 0x1000;
393
394 svq3_mc_dir_part(s, x, y, part_width, part_height, mx, my, 0, 0, dir, avg);
395 mx *= 6;
396 my *= 6;
397 }
398
399 /* update mv_cache */
400 if (mode != PREDICT_MODE) {
401 int32_t mv = pack16to32(mx,my);
402
403 if (part_height == 8 && i < 8) {
404 *(int32_t *) h->mv_cache[dir][scan8[k] + 1*8] = mv;
405
406 if (part_width == 8 && j < 8) {
407 *(int32_t *) h->mv_cache[dir][scan8[k] + 1 + 1*8] = mv;
408 }
409 }
410 if (part_width == 8 && j < 8) {
411 *(int32_t *) h->mv_cache[dir][scan8[k] + 1] = mv;
412 }
413 if (part_width == 4 || part_height == 4) {
414 *(int32_t *) h->mv_cache[dir][scan8[k]] = mv;
415 }
416 }
417
418 /* write back motion vectors */
419 fill_rectangle(s->current_picture.motion_val[dir][b_xy], part_width>>2, part_height>>2, h->b_stride, pack16to32(mx,my), 4);
420 }
421 }
422
423 return 0;
424 }
425
426 static int svq3_decode_mb(H264Context *h, unsigned int mb_type)
427 {
428 int i, j, k, m, dir, mode;
429 int cbp = 0;
430 uint32_t vlc;
431 int8_t *top, *left;
432 MpegEncContext *const s = (MpegEncContext *) h;
433 const int mb_xy = h->mb_xy;
434 const int b_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride;
435
436 h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
437 h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
438 h->topright_samples_available = 0xFFFF;
439
440 if (mb_type == 0) { /* SKIP */
441 if (s->pict_type == FF_P_TYPE || s->next_picture.mb_type[mb_xy] == -1) {
442 svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 0, 0);
443
444 if (s->pict_type == FF_B_TYPE) {
445 svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 1, 1);
446 }
447
448 mb_type = MB_TYPE_SKIP;
449 } else {
450 mb_type = FFMIN(s->next_picture.mb_type[mb_xy], 6);
451 if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 0, 0) < 0)
452 return -1;
453 if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 1, 1) < 0)
454 return -1;
455
456 mb_type = MB_TYPE_16x16;
457 }
458 } else if (mb_type < 8) { /* INTER */
459 if (h->thirdpel_flag && h->halfpel_flag == !get_bits1 (&s->gb)) {
460 mode = THIRDPEL_MODE;
461 } else if (h->halfpel_flag && h->thirdpel_flag == !get_bits1 (&s->gb)) {
462 mode = HALFPEL_MODE;
463 } else {
464 mode = FULLPEL_MODE;
465 }
466
467 /* fill caches */
468 /* note ref_cache should contain here:
469 ????????
470 ???11111
471 N??11111
472 N??11111
473 N??11111
474 */
475
476 for (m = 0; m < 2; m++) {
477 if (s->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1]+6] != -1) {
478 for (i = 0; i < 4; i++) {
479 *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - 1 + i*h->b_stride];
480 }
481 } else {
482 for (i = 0; i < 4; i++) {
483 *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = 0;
484 }
485 }
486 if (s->mb_y > 0) {
487 memcpy(h->mv_cache[m][scan8[0] - 1*8], s->current_picture.motion_val[m][b_xy - h->b_stride], 4*2*sizeof(int16_t));
488 memset(&h->ref_cache[m][scan8[0] - 1*8], (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
489
490 if (s->mb_x < (s->mb_width - 1)) {
491 *(uint32_t *) h->mv_cache[m][scan8[0] + 4 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride + 4];
492 h->ref_cache[m][scan8[0] + 4 - 1*8] =
493 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride + 1]+6] == -1 ||
494 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride ] ] == -1) ? PART_NOT_AVAILABLE : 1;
495 }else
496 h->ref_cache[m][scan8[0] + 4 - 1*8] = PART_NOT_AVAILABLE;
497 if (s->mb_x > 0) {
498 *(uint32_t *) h->mv_cache[m][scan8[0] - 1 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride - 1];
499 h->ref_cache[m][scan8[0] - 1 - 1*8] = (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride - 1]+3] == -1) ? PART_NOT_AVAILABLE : 1;
500 }else
501 h->ref_cache[m][scan8[0] - 1 - 1*8] = PART_NOT_AVAILABLE;
502 }else
503 memset(&h->ref_cache[m][scan8[0] - 1*8 - 1], PART_NOT_AVAILABLE, 8);
504
505 if (s->pict_type != FF_B_TYPE)
506 break;
507 }
508
509 /* decode motion vector(s) and form prediction(s) */
510 if (s->pict_type == FF_P_TYPE) {
511 if (svq3_mc_dir(h, (mb_type - 1), mode, 0, 0) < 0)
512 return -1;
513 } else { /* FF_B_TYPE */
514 if (mb_type != 2) {
515 if (svq3_mc_dir(h, 0, mode, 0, 0) < 0)
516 return -1;
517 } else {
518 for (i = 0; i < 4; i++) {
519 memset(s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
520 }
521 }
522 if (mb_type != 1) {
523 if (svq3_mc_dir(h, 0, mode, 1, (mb_type == 3)) < 0)
524 return -1;
525 } else {
526 for (i = 0; i < 4; i++) {
527 memset(s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
528 }
529 }
530 }
531
532 mb_type = MB_TYPE_16x16;
533 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
534 memset(h->intra4x4_pred_mode_cache, -1, 8*5*sizeof(int8_t));
535
536 if (mb_type == 8) {
537 if (s->mb_x > 0) {
538 for (i = 0; i < 4; i++) {
539 h->intra4x4_pred_mode_cache[scan8[0] - 1 + i*8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1]+6-i];
540 }
541 if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1) {
542 h->left_samples_available = 0x5F5F;
543 }
544 }
545 if (s->mb_y > 0) {
546 h->intra4x4_pred_mode_cache[4+8*0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]+0];
547 h->intra4x4_pred_mode_cache[5+8*0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]+1];
548 h->intra4x4_pred_mode_cache[6+8*0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]+2];
549 h->intra4x4_pred_mode_cache[7+8*0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]+3];
550
551 if (h->intra4x4_pred_mode_cache[4+8*0] == -1) {
552 h->top_samples_available = 0x33FF;
553 }
554 }
555
556 /* decode prediction codes for luma blocks */
557 for (i = 0; i < 16; i+=2) {
558 vlc = svq3_get_ue_golomb(&s->gb);
559
560 if (vlc >= 25){
561 av_log(h->s.avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
562 return -1;
563 }
564
565 left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
566 top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
567
568 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
569 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
570
571 if (left[1] == -1 || left[2] == -1){
572 av_log(h->s.avctx, AV_LOG_ERROR, "weird prediction\n");
573 return -1;
574 }
575 }
576 } else { /* mb_type == 33, DC_128_PRED block type */
577 for (i = 0; i < 4; i++) {
578 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_PRED, 4);
579 }
580 }
581
582 ff_h264_write_back_intra_pred_mode(h);
583
584 if (mb_type == 8) {
585 ff_h264_check_intra4x4_pred_mode(h);
586
587 h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
588 h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
589 } else {
590 for (i = 0; i < 4; i++) {
591 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_128_PRED, 4);
592 }
593
594 h->top_samples_available = 0x33FF;
595 h->left_samples_available = 0x5F5F;
596 }
597
598 mb_type = MB_TYPE_INTRA4x4;
599 } else { /* INTRA16x16 */
600 dir = i_mb_type_info[mb_type - 8].pred_mode;
601 dir = (dir >> 1) ^ 3*(dir & 1) ^ 1;
602
603 if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir)) == -1){
604 av_log(h->s.avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n");
605 return -1;
606 }
607
608 cbp = i_mb_type_info[mb_type - 8].cbp;
609 mb_type = MB_TYPE_INTRA16x16;
610 }
611
612 if (!IS_INTER(mb_type) && s->pict_type != FF_I_TYPE) {
613 for (i = 0; i < 4; i++) {
614 memset(s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
615 }
616 if (s->pict_type == FF_B_TYPE) {
617 for (i = 0; i < 4; i++) {
618 memset(s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
619 }
620 }
621 }
622 if (!IS_INTRA4x4(mb_type)) {
623 memset(h->intra4x4_pred_mode+h->mb2br_xy[mb_xy], DC_PRED, 8);
624 }
625 if (!IS_SKIP(mb_type) || s->pict_type == FF_B_TYPE) {
626 memset(h->non_zero_count_cache + 8, 0, 4*9*sizeof(uint8_t));
627 s->dsp.clear_blocks(h->mb);
628 }
629
630 if (!IS_INTRA16x16(mb_type) && (!IS_SKIP(mb_type) || s->pict_type == FF_B_TYPE)) {
631 if ((vlc = svq3_get_ue_golomb(&s->gb)) >= 48){
632 av_log(h->s.avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
633 return -1;
634 }
635
636 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc] : golomb_to_inter_cbp[vlc];
637 }
638 if (IS_INTRA16x16(mb_type) || (s->pict_type != FF_I_TYPE && s->adaptive_quant && cbp)) {
639 s->qscale += svq3_get_se_golomb(&s->gb);
640
641 if (s->qscale > 31){
642 av_log(h->s.avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
643 return -1;
644 }
645 }
646 if (IS_INTRA16x16(mb_type)) {
647 AV_ZERO128(h->mb_luma_dc+0);
648 AV_ZERO128(h->mb_luma_dc+8);
649 if (svq3_decode_block(&s->gb, h->mb_luma_dc, 0, 1)){
650 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding intra luma dc\n");
651 return -1;
652 }
653 }
654
655 if (cbp) {
656 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
657 const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
658
659 for (i = 0; i < 4; i++) {
660 if ((cbp & (1 << i))) {
661 for (j = 0; j < 4; j++) {
662 k = index ? ((j&1) + 2*(i&1) + 2*(j&2) + 4*(i&2)) : (4*i + j);
663 h->non_zero_count_cache[ scan8[k] ] = 1;
664
665 if (svq3_decode_block(&s->gb, &h->mb[16*k], index, type)){
666 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding block\n");
667 return -1;
668 }
669 }
670 }
671 }
672
673 if ((cbp & 0x30)) {
674 for (i = 0; i < 2; ++i) {
675 if (svq3_decode_block(&s->gb, &h->mb[16*(16 + 4*i)], 0, 3)){
676 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding chroma dc block\n");
677 return -1;
678 }
679 }
680
681 if ((cbp & 0x20)) {
682 for (i = 0; i < 8; i++) {
683 h->non_zero_count_cache[ scan8[16+i] ] = 1;
684
685 if (svq3_decode_block(&s->gb, &h->mb[16*(16 + i)], 1, 1)){
686 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding chroma ac block\n");
687 return -1;
688 }
689 }
690 }
691 }
692 }
693
694 h->cbp= cbp;
695 s->current_picture.mb_type[mb_xy] = mb_type;
696
697 if (IS_INTRA(mb_type)) {
698 h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8);
699 }
700
701 return 0;
702 }
703
704 static int svq3_decode_slice_header(H264Context *h)
705 {
706 MpegEncContext *const s = (MpegEncContext *) h;
707 const int mb_xy = h->mb_xy;
708 int i, header;
709
710 header = get_bits(&s->gb, 8);
711
712 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
713 /* TODO: what? */
714 av_log(h->s.avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
715 return -1;
716 } else {
717 int length = (header >> 5) & 3;
718
719 h->next_slice_index = get_bits_count(&s->gb) + 8*show_bits(&s->gb, 8*length) + 8*length;
720
721 if (h->next_slice_index > s->gb.size_in_bits) {
722 av_log(h->s.avctx, AV_LOG_ERROR, "slice after bitstream end\n");
723 return -1;
724 }
725
726 s->gb.size_in_bits = h->next_slice_index - 8*(length - 1);
727 skip_bits(&s->gb, 8);
728
729 if (h->svq3_watermark_key) {
730 uint32_t header = AV_RL32(&s->gb.buffer[(get_bits_count(&s->gb)>>3)+1]);
731 AV_WL32(&s->gb.buffer[(get_bits_count(&s->gb)>>3)+1], header ^ h->svq3_watermark_key);
732 }
733 if (length > 0) {
734 memcpy((uint8_t *) &s->gb.buffer[get_bits_count(&s->gb) >> 3],
735 &s->gb.buffer[s->gb.size_in_bits >> 3], (length - 1));
736 }
737 skip_bits_long(&s->gb, 0);
738 }
739
740 if ((i = svq3_get_ue_golomb(&s->gb)) == INVALID_VLC || i >= 3){
741 av_log(h->s.avctx, AV_LOG_ERROR, "illegal slice type %d \n", i);
742 return -1;
743 }
744
745 h->slice_type = golomb_to_pict_type[i];
746
747 if ((header & 0x9F) == 2) {
748 i = (s->mb_num < 64) ? 6 : (1 + av_log2 (s->mb_num - 1));
749 s->mb_skip_run = get_bits(&s->gb, i) - (s->mb_x + (s->mb_y * s->mb_width));
750 } else {
751 skip_bits1(&s->gb);
752 s->mb_skip_run = 0;
753 }
754
755 h->slice_num = get_bits(&s->gb, 8);
756 s->qscale = get_bits(&s->gb, 5);
757 s->adaptive_quant = get_bits1(&s->gb);
758
759 /* unknown fields */
760 skip_bits1(&s->gb);
761
762 if (h->unknown_svq3_flag) {
763 skip_bits1(&s->gb);
764 }
765
766 skip_bits1(&s->gb);
767 skip_bits(&s->gb, 2);
768
769 while (get_bits1(&s->gb)) {
770 skip_bits(&s->gb, 8);
771 }
772
773 /* reset intra predictors and invalidate motion vector references */
774 if (s->mb_x > 0) {
775 memset(h->intra4x4_pred_mode+h->mb2br_xy[mb_xy - 1 ]+3, -1, 4*sizeof(int8_t));
776 memset(h->intra4x4_pred_mode+h->mb2br_xy[mb_xy - s->mb_x] , -1, 8*sizeof(int8_t)*s->mb_x);
777 }
778 if (s->mb_y > 0) {
779 memset(h->intra4x4_pred_mode+h->mb2br_xy[mb_xy - s->mb_stride], -1, 8*sizeof(int8_t)*(s->mb_width - s->mb_x));
780
781 if (s->mb_x > 0) {
782 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride - 1]+3] = -1;
783 }
784 }
785
786 return 0;
787 }
788
789 static av_cold int svq3_decode_init(AVCodecContext *avctx)
790 {
791 MpegEncContext *const s = avctx->priv_data;
792 H264Context *const h = avctx->priv_data;
793 int m;
794 unsigned char *extradata;
795 unsigned int size;
796
797 if (ff_h264_decode_init(avctx) < 0)
798 return -1;
799
800 s->flags = avctx->flags;
801 s->flags2 = avctx->flags2;
802 s->unrestricted_mv = 1;
803 h->is_complex=1;
804 avctx->pix_fmt = avctx->codec->pix_fmts[0];
805
806 if (!s->context_initialized) {
807 s->width = avctx->width;
808 s->height = avctx->height;
809 h->halfpel_flag = 1;
810 h->thirdpel_flag = 1;
811 h->unknown_svq3_flag = 0;
812 h->chroma_qp[0] = h->chroma_qp[1] = 4;
813
814 if (MPV_common_init(s) < 0)
815 return -1;
816
817 h->b_stride = 4*s->mb_width;
818
819 ff_h264_alloc_tables(h);
820
821 /* prowl for the "SEQH" marker in the extradata */
822 extradata = (unsigned char *)avctx->extradata;
823 for (m = 0; m < avctx->extradata_size; m++) {
824 if (!memcmp(extradata, "SEQH", 4))
825 break;
826 extradata++;
827 }
828
829 /* if a match was found, parse the extra data */
830 if (extradata && !memcmp(extradata, "SEQH", 4)) {
831
832 GetBitContext gb;
833 int frame_size_code;
834
835 size = AV_RB32(&extradata[4]);
836 init_get_bits(&gb, extradata + 8, size*8);
837
838 /* 'frame size code' and optional 'width, height' */
839 frame_size_code = get_bits(&gb, 3);
840 switch (frame_size_code) {
841 case 0: avctx->width = 160; avctx->height = 120; break;
842 case 1: avctx->width = 128; avctx->height = 96; break;
843 case 2: avctx->width = 176; avctx->height = 144; break;
844 case 3: avctx->width = 352; avctx->height = 288; break;
845 case 4: avctx->width = 704; avctx->height = 576; break;
846 case 5: avctx->width = 240; avctx->height = 180; break;
847 case 6: avctx->width = 320; avctx->height = 240; break;
848 case 7:
849 avctx->width = get_bits(&gb, 12);
850 avctx->height = get_bits(&gb, 12);
851 break;
852 }
853
854 h->halfpel_flag = get_bits1(&gb);
855 h->thirdpel_flag = get_bits1(&gb);
856
857 /* unknown fields */
858 skip_bits1(&gb);
859 skip_bits1(&gb);
860 skip_bits1(&gb);
861 skip_bits1(&gb);
862
863 s->low_delay = get_bits1(&gb);
864
865 /* unknown field */
866 skip_bits1(&gb);
867
868 while (get_bits1(&gb)) {
869 skip_bits(&gb, 8);
870 }
871
872 h->unknown_svq3_flag = get_bits1(&gb);
873 avctx->has_b_frames = !s->low_delay;
874 if (h->unknown_svq3_flag) {
875 #if CONFIG_ZLIB
876 unsigned watermark_width = svq3_get_ue_golomb(&gb);
877 unsigned watermark_height = svq3_get_ue_golomb(&gb);
878 int u1 = svq3_get_ue_golomb(&gb);
879 int u2 = get_bits(&gb, 8);
880 int u3 = get_bits(&gb, 2);
881 int u4 = svq3_get_ue_golomb(&gb);
882 unsigned long buf_len = watermark_width*watermark_height*4;
883 int offset = (get_bits_count(&gb)+7)>>3;
884 uint8_t *buf;
885
886 if ((uint64_t)watermark_width*4 > UINT_MAX/watermark_height)
887 return -1;
888
889 buf = av_malloc(buf_len);
890 av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n", watermark_width, watermark_height);
891 av_log(avctx, AV_LOG_DEBUG, "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n", u1, u2, u3, u4, offset);
892 if (uncompress(buf, &buf_len, extradata + 8 + offset, size - offset) != Z_OK) {
893 av_log(avctx, AV_LOG_ERROR, "could not uncompress watermark logo\n");
894 av_free(buf);
895 return -1;
896 }
897 h->svq3_watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
898 h->svq3_watermark_key = h->svq3_watermark_key << 16 | h->svq3_watermark_key;
899 av_log(avctx, AV_LOG_DEBUG, "watermark key %#x\n", h->svq3_watermark_key);
900 av_free(buf);
901 #else
902 av_log(avctx, AV_LOG_ERROR, "this svq3 file contains watermark which need zlib support compiled in\n");
903 return -1;
904 #endif
905 }
906 }
907 }
908
909 return 0;
910 }
911
912 static int svq3_decode_frame(AVCodecContext *avctx,
913 void *data, int *data_size,
914 AVPacket *avpkt)
915 {
916 const uint8_t *buf = avpkt->data;
917 int buf_size = avpkt->size;
918 MpegEncContext *const s = avctx->priv_data;
919 H264Context *const h = avctx->priv_data;
920 int m, mb_type;
921
922 /* special case for last picture */
923 if (buf_size == 0) {
924 if (s->next_picture_ptr && !s->low_delay) {
925 *(AVFrame *) data = *(AVFrame *) &s->next_picture;
926 s->next_picture_ptr = NULL;
927 *data_size = sizeof(AVFrame);
928 }
929 return 0;
930 }
931
932 init_get_bits (&s->gb, buf, 8*buf_size);
933
934 s->mb_x = s->mb_y = h->mb_xy = 0;
935
936 if (svq3_decode_slice_header(h))
937 return -1;
938
939 s->pict_type = h->slice_type;
940 s->picture_number = h->slice_num;
941
942 if (avctx->debug&FF_DEBUG_PICT_INFO){
943 av_log(h->s.avctx, AV_LOG_DEBUG, "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
944 av_get_pict_type_char(s->pict_type), h->halfpel_flag, h->thirdpel_flag,
945 s->adaptive_quant, s->qscale, h->slice_num);
946 }
947
948 /* for hurry_up == 5 */
949 s->current_picture.pict_type = s->pict_type;
950 s->current_picture.key_frame = (s->pict_type == FF_I_TYPE);
951
952 /* Skip B-frames if we do not have reference frames. */
953 if (s->last_picture_ptr == NULL && s->pict_type == FF_B_TYPE)
954 return 0;
955 /* Skip B-frames if we are in a hurry. */
956 if (avctx->hurry_up && s->pict_type == FF_B_TYPE)
957 return 0;
958 /* Skip everything if we are in a hurry >= 5. */
959 if (avctx->hurry_up >= 5)
960 return 0;
961 if ( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE)
962 ||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE)
963 || avctx->skip_frame >= AVDISCARD_ALL)
964 return 0;
965
966 if (s->next_p_frame_damaged) {
967 if (s->pict_type == FF_B_TYPE)
968 return 0;
969 else
970 s->next_p_frame_damaged = 0;
971 }
972
973 if (ff_h264_frame_start(h) < 0)
974 return -1;
975
976 if (s->pict_type == FF_B_TYPE) {
977 h->frame_num_offset = (h->slice_num - h->prev_frame_num);
978
979 if (h->frame_num_offset < 0) {
980 h->frame_num_offset += 256;
981 }
982 if (h->frame_num_offset == 0 || h->frame_num_offset >= h->prev_frame_num_offset) {
983 av_log(h->s.avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
984 return -1;
985 }
986 } else {
987 h->prev_frame_num = h->frame_num;
988 h->frame_num = h->slice_num;
989 h->prev_frame_num_offset = (h->frame_num - h->prev_frame_num);
990
991 if (h->prev_frame_num_offset < 0) {
992 h->prev_frame_num_offset += 256;
993 }
994 }
995
996 for (m = 0; m < 2; m++){
997 int i;
998 for (i = 0; i < 4; i++){
999 int j;
1000 for (j = -1; j < 4; j++)
1001 h->ref_cache[m][scan8[0] + 8*i + j]= 1;
1002 if (i < 3)
1003 h->ref_cache[m][scan8[0] + 8*i + j]= PART_NOT_AVAILABLE;
1004 }
1005 }
1006
1007 for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
1008 for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
1009 h->mb_xy = s->mb_x + s->mb_y*s->mb_stride;
1010
1011 if ( (get_bits_count(&s->gb) + 7) >= s->gb.size_in_bits &&
1012 ((get_bits_count(&s->gb) & 7) == 0 || show_bits(&s->gb, (-get_bits_count(&s->gb) & 7)) == 0)) {
1013
1014 skip_bits(&s->gb, h->next_slice_index - get_bits_count(&s->gb));
1015 s->gb.size_in_bits = 8*buf_size;
1016
1017 if (svq3_decode_slice_header(h))
1018 return -1;
1019
1020 /* TODO: support s->mb_skip_run */
1021 }
1022
1023 mb_type = svq3_get_ue_golomb(&s->gb);
1024
1025 if (s->pict_type == FF_I_TYPE) {
1026 mb_type += 8;
1027 } else if (s->pict_type == FF_B_TYPE && mb_type >= 4) {
1028 mb_type += 4;
1029 }
1030 if (mb_type > 33 || svq3_decode_mb(h, mb_type)) {
1031 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
1032 return -1;
1033 }
1034
1035 if (mb_type != 0) {
1036 ff_h264_hl_decode_mb (h);
1037 }
1038
1039 if (s->pict_type != FF_B_TYPE && !s->low_delay) {
1040 s->current_picture.mb_type[s->mb_x + s->mb_y*s->mb_stride] =
1041 (s->pict_type == FF_P_TYPE && mb_type < 8) ? (mb_type - 1) : -1;
1042 }
1043 }
1044
1045 ff_draw_horiz_band(s, 16*s->mb_y, 16);
1046 }
1047
1048 MPV_frame_end(s);
1049
1050 if (s->pict_type == FF_B_TYPE || s->low_delay) {
1051 *(AVFrame *) data = *(AVFrame *) &s->current_picture;
1052 } else {
1053 *(AVFrame *) data = *(AVFrame *) &s->last_picture;
1054 }
1055
1056 /* Do not output the last pic after seeking. */
1057 if (s->last_picture_ptr || s->low_delay) {
1058 *data_size = sizeof(AVFrame);
1059 }
1060
1061 return buf_size;
1062 }
1063
1064
1065 AVCodec ff_svq3_decoder = {
1066 "svq3",
1067 AVMEDIA_TYPE_VIDEO,
1068 CODEC_ID_SVQ3,
1069 sizeof(H264Context),
1070 svq3_decode_init,
1071 NULL,
1072 ff_h264_decode_end,
1073 svq3_decode_frame,
1074 CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_DELAY,
1075 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1076 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_NONE},
1077 };