5d16c3b2d37e9ad9ed6626daec9b989d837abee8
[libav.git] / libavcodec / svq3.c
1 /*
2 * Copyright (c) 2003 The FFmpeg Project
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 /*
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
31 *
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
37 *
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41 */
42 #include "internal.h"
43 #include "dsputil.h"
44 #include "avcodec.h"
45 #include "mpegvideo.h"
46 #include "h264.h"
47
48 #include "h264data.h" //FIXME FIXME FIXME
49
50 #include "h264_mvpred.h"
51 #include "golomb.h"
52 #include "rectangle.h"
53 #include "vdpau_internal.h"
54
55 #if CONFIG_ZLIB
56 #include <zlib.h>
57 #endif
58
59 #include "svq1.h"
60
61 /**
62 * @file libavcodec/svq3.c
63 * svq3 decoder.
64 */
65
66 #define FULLPEL_MODE 1
67 #define HALFPEL_MODE 2
68 #define THIRDPEL_MODE 3
69 #define PREDICT_MODE 4
70
71 /* dual scan (from some older h264 draft)
72 o-->o-->o o
73 | /|
74 o o o / o
75 | / | |/ |
76 o o o o
77 /
78 o-->o-->o-->o
79 */
80 static const uint8_t svq3_scan[16] = {
81 0+0*4, 1+0*4, 2+0*4, 2+1*4,
82 2+2*4, 3+0*4, 3+1*4, 3+2*4,
83 0+1*4, 0+2*4, 1+1*4, 1+2*4,
84 0+3*4, 1+3*4, 2+3*4, 3+3*4,
85 };
86
87 static const uint8_t svq3_pred_0[25][2] = {
88 { 0, 0 },
89 { 1, 0 }, { 0, 1 },
90 { 0, 2 }, { 1, 1 }, { 2, 0 },
91 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
92 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
93 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
94 { 2, 4 }, { 3, 3 }, { 4, 2 },
95 { 4, 3 }, { 3, 4 },
96 { 4, 4 }
97 };
98
99 static const int8_t svq3_pred_1[6][6][5] = {
100 { { 2,-1,-1,-1,-1 }, { 2, 1,-1,-1,-1 }, { 1, 2,-1,-1,-1 },
101 { 2, 1,-1,-1,-1 }, { 1, 2,-1,-1,-1 }, { 1, 2,-1,-1,-1 } },
102 { { 0, 2,-1,-1,-1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
103 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
104 { { 2, 0,-1,-1,-1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
105 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
106 { { 2, 0,-1,-1,-1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
107 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
108 { { 0, 2,-1,-1,-1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
109 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
110 { { 0, 2,-1,-1,-1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
111 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
112 };
113
114 static const struct { uint8_t run; uint8_t level; } svq3_dct_tables[2][16] = {
115 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
116 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
117 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
118 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
119 };
120
121 static const uint32_t svq3_dequant_coeff[32] = {
122 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
123 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
124 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
125 61694, 68745, 77615, 89113,100253,109366,126635,141533
126 };
127
128
129 void svq3_luma_dc_dequant_idct_c(DCTELEM *block, int qp)
130 {
131 const int qmul = svq3_dequant_coeff[qp];
132 #define stride 16
133 int i;
134 int temp[16];
135 static const int x_offset[4] = {0, 1*stride, 4* stride, 5*stride};
136 static const int y_offset[4] = {0, 2*stride, 8* stride, 10*stride};
137
138 for (i = 0; i < 4; i++){
139 const int offset = y_offset[i];
140 const int z0 = 13*(block[offset+stride*0] + block[offset+stride*4]);
141 const int z1 = 13*(block[offset+stride*0] - block[offset+stride*4]);
142 const int z2 = 7* block[offset+stride*1] - 17*block[offset+stride*5];
143 const int z3 = 17* block[offset+stride*1] + 7*block[offset+stride*5];
144
145 temp[4*i+0] = z0+z3;
146 temp[4*i+1] = z1+z2;
147 temp[4*i+2] = z1-z2;
148 temp[4*i+3] = z0-z3;
149 }
150
151 for (i = 0; i < 4; i++){
152 const int offset = x_offset[i];
153 const int z0 = 13*(temp[4*0+i] + temp[4*2+i]);
154 const int z1 = 13*(temp[4*0+i] - temp[4*2+i]);
155 const int z2 = 7* temp[4*1+i] - 17*temp[4*3+i];
156 const int z3 = 17* temp[4*1+i] + 7*temp[4*3+i];
157
158 block[stride*0 +offset] = ((z0 + z3)*qmul + 0x80000) >> 20;
159 block[stride*2 +offset] = ((z1 + z2)*qmul + 0x80000) >> 20;
160 block[stride*8 +offset] = ((z1 - z2)*qmul + 0x80000) >> 20;
161 block[stride*10+offset] = ((z0 - z3)*qmul + 0x80000) >> 20;
162 }
163 }
164 #undef stride
165
166 void svq3_add_idct_c(uint8_t *dst, DCTELEM *block, int stride, int qp,
167 int dc)
168 {
169 const int qmul = svq3_dequant_coeff[qp];
170 int i;
171 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
172
173 if (dc) {
174 dc = 13*13*((dc == 1) ? 1538*block[0] : ((qmul*(block[0] >> 3)) / 2));
175 block[0] = 0;
176 }
177
178 for (i = 0; i < 4; i++) {
179 const int z0 = 13*(block[0 + 4*i] + block[2 + 4*i]);
180 const int z1 = 13*(block[0 + 4*i] - block[2 + 4*i]);
181 const int z2 = 7* block[1 + 4*i] - 17*block[3 + 4*i];
182 const int z3 = 17* block[1 + 4*i] + 7*block[3 + 4*i];
183
184 block[0 + 4*i] = z0 + z3;
185 block[1 + 4*i] = z1 + z2;
186 block[2 + 4*i] = z1 - z2;
187 block[3 + 4*i] = z0 - z3;
188 }
189
190 for (i = 0; i < 4; i++) {
191 const int z0 = 13*(block[i + 4*0] + block[i + 4*2]);
192 const int z1 = 13*(block[i + 4*0] - block[i + 4*2]);
193 const int z2 = 7* block[i + 4*1] - 17*block[i + 4*3];
194 const int z3 = 17* block[i + 4*1] + 7*block[i + 4*3];
195 const int rr = (dc + 0x80000);
196
197 dst[i + stride*0] = cm[ dst[i + stride*0] + (((z0 + z3)*qmul + rr) >> 20) ];
198 dst[i + stride*1] = cm[ dst[i + stride*1] + (((z1 + z2)*qmul + rr) >> 20) ];
199 dst[i + stride*2] = cm[ dst[i + stride*2] + (((z1 - z2)*qmul + rr) >> 20) ];
200 dst[i + stride*3] = cm[ dst[i + stride*3] + (((z0 - z3)*qmul + rr) >> 20) ];
201 }
202 }
203
204 static inline int svq3_decode_block(GetBitContext *gb, DCTELEM *block,
205 int index, const int type)
206 {
207 static const uint8_t *const scan_patterns[4] =
208 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
209
210 int run, level, sign, vlc, limit;
211 const int intra = (3 * type) >> 2;
212 const uint8_t *const scan = scan_patterns[type];
213
214 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
215 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
216
217 if (vlc == INVALID_VLC)
218 return -1;
219
220 sign = (vlc & 0x1) - 1;
221 vlc = (vlc + 1) >> 1;
222
223 if (type == 3) {
224 if (vlc < 3) {
225 run = 0;
226 level = vlc;
227 } else if (vlc < 4) {
228 run = 1;
229 level = 1;
230 } else {
231 run = (vlc & 0x3);
232 level = ((vlc + 9) >> 2) - run;
233 }
234 } else {
235 if (vlc < 16) {
236 run = svq3_dct_tables[intra][vlc].run;
237 level = svq3_dct_tables[intra][vlc].level;
238 } else if (intra) {
239 run = (vlc & 0x7);
240 level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
241 } else {
242 run = (vlc & 0xF);
243 level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
244 }
245 }
246
247 if ((index += run) >= limit)
248 return -1;
249
250 block[scan[index]] = (level ^ sign) - sign;
251 }
252
253 if (type != 2) {
254 break;
255 }
256 }
257
258 return 0;
259 }
260
261 static inline void svq3_mc_dir_part(MpegEncContext *s,
262 int x, int y, int width, int height,
263 int mx, int my, int dxy,
264 int thirdpel, int dir, int avg)
265 {
266 const Picture *pic = (dir == 0) ? &s->last_picture : &s->next_picture;
267 uint8_t *src, *dest;
268 int i, emu = 0;
269 int blocksize = 2 - (width>>3); //16->0, 8->1, 4->2
270
271 mx += x;
272 my += y;
273
274 if (mx < 0 || mx >= (s->h_edge_pos - width - 1) ||
275 my < 0 || my >= (s->v_edge_pos - height - 1)) {
276
277 if ((s->flags & CODEC_FLAG_EMU_EDGE)) {
278 emu = 1;
279 }
280
281 mx = av_clip (mx, -16, (s->h_edge_pos - width + 15));
282 my = av_clip (my, -16, (s->v_edge_pos - height + 15));
283 }
284
285 /* form component predictions */
286 dest = s->current_picture.data[0] + x + y*s->linesize;
287 src = pic->data[0] + mx + my*s->linesize;
288
289 if (emu) {
290 ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, (width + 1), (height + 1),
291 mx, my, s->h_edge_pos, s->v_edge_pos);
292 src = s->edge_emu_buffer;
293 }
294 if (thirdpel)
295 (avg ? s->dsp.avg_tpel_pixels_tab : s->dsp.put_tpel_pixels_tab)[dxy](dest, src, s->linesize, width, height);
296 else
297 (avg ? s->dsp.avg_pixels_tab : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src, s->linesize, height);
298
299 if (!(s->flags & CODEC_FLAG_GRAY)) {
300 mx = (mx + (mx < (int) x)) >> 1;
301 my = (my + (my < (int) y)) >> 1;
302 width = (width >> 1);
303 height = (height >> 1);
304 blocksize++;
305
306 for (i = 1; i < 3; i++) {
307 dest = s->current_picture.data[i] + (x >> 1) + (y >> 1)*s->uvlinesize;
308 src = pic->data[i] + mx + my*s->uvlinesize;
309
310 if (emu) {
311 ff_emulated_edge_mc(s->edge_emu_buffer, src, s->uvlinesize, (width + 1), (height + 1),
312 mx, my, (s->h_edge_pos >> 1), (s->v_edge_pos >> 1));
313 src = s->edge_emu_buffer;
314 }
315 if (thirdpel)
316 (avg ? s->dsp.avg_tpel_pixels_tab : s->dsp.put_tpel_pixels_tab)[dxy](dest, src, s->uvlinesize, width, height);
317 else
318 (avg ? s->dsp.avg_pixels_tab : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src, s->uvlinesize, height);
319 }
320 }
321 }
322
323 static inline int svq3_mc_dir(H264Context *h, int size, int mode, int dir,
324 int avg)
325 {
326 int i, j, k, mx, my, dx, dy, x, y;
327 MpegEncContext *const s = (MpegEncContext *) h;
328 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
329 const int part_height = 16 >> ((unsigned) (size + 1) / 3);
330 const int extra_width = (mode == PREDICT_MODE) ? -16*6 : 0;
331 const int h_edge_pos = 6*(s->h_edge_pos - part_width ) - extra_width;
332 const int v_edge_pos = 6*(s->v_edge_pos - part_height) - extra_width;
333
334 for (i = 0; i < 16; i += part_height) {
335 for (j = 0; j < 16; j += part_width) {
336 const int b_xy = (4*s->mb_x + (j >> 2)) + (4*s->mb_y + (i >> 2))*h->b_stride;
337 int dxy;
338 x = 16*s->mb_x + j;
339 y = 16*s->mb_y + i;
340 k = ((j >> 2) & 1) + ((i >> 1) & 2) + ((j >> 1) & 4) + (i & 8);
341
342 if (mode != PREDICT_MODE) {
343 pred_motion(h, k, (part_width >> 2), dir, 1, &mx, &my);
344 } else {
345 mx = s->next_picture.motion_val[0][b_xy][0]<<1;
346 my = s->next_picture.motion_val[0][b_xy][1]<<1;
347
348 if (dir == 0) {
349 mx = ((mx * h->frame_num_offset) / h->prev_frame_num_offset + 1) >> 1;
350 my = ((my * h->frame_num_offset) / h->prev_frame_num_offset + 1) >> 1;
351 } else {
352 mx = ((mx * (h->frame_num_offset - h->prev_frame_num_offset)) / h->prev_frame_num_offset + 1) >> 1;
353 my = ((my * (h->frame_num_offset - h->prev_frame_num_offset)) / h->prev_frame_num_offset + 1) >> 1;
354 }
355 }
356
357 /* clip motion vector prediction to frame border */
358 mx = av_clip(mx, extra_width - 6*x, h_edge_pos - 6*x);
359 my = av_clip(my, extra_width - 6*y, v_edge_pos - 6*y);
360
361 /* get (optional) motion vector differential */
362 if (mode == PREDICT_MODE) {
363 dx = dy = 0;
364 } else {
365 dy = svq3_get_se_golomb(&s->gb);
366 dx = svq3_get_se_golomb(&s->gb);
367
368 if (dx == INVALID_VLC || dy == INVALID_VLC) {
369 av_log(h->s.avctx, AV_LOG_ERROR, "invalid MV vlc\n");
370 return -1;
371 }
372 }
373
374 /* compute motion vector */
375 if (mode == THIRDPEL_MODE) {
376 int fx, fy;
377 mx = ((mx + 1)>>1) + dx;
378 my = ((my + 1)>>1) + dy;
379 fx = ((unsigned)(mx + 0x3000))/3 - 0x1000;
380 fy = ((unsigned)(my + 0x3000))/3 - 0x1000;
381 dxy = (mx - 3*fx) + 4*(my - 3*fy);
382
383 svq3_mc_dir_part(s, x, y, part_width, part_height, fx, fy, dxy, 1, dir, avg);
384 mx += mx;
385 my += my;
386 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
387 mx = ((unsigned)(mx + 1 + 0x3000))/3 + dx - 0x1000;
388 my = ((unsigned)(my + 1 + 0x3000))/3 + dy - 0x1000;
389 dxy = (mx&1) + 2*(my&1);
390
391 svq3_mc_dir_part(s, x, y, part_width, part_height, mx>>1, my>>1, dxy, 0, dir, avg);
392 mx *= 3;
393 my *= 3;
394 } else {
395 mx = ((unsigned)(mx + 3 + 0x6000))/6 + dx - 0x1000;
396 my = ((unsigned)(my + 3 + 0x6000))/6 + dy - 0x1000;
397
398 svq3_mc_dir_part(s, x, y, part_width, part_height, mx, my, 0, 0, dir, avg);
399 mx *= 6;
400 my *= 6;
401 }
402
403 /* update mv_cache */
404 if (mode != PREDICT_MODE) {
405 int32_t mv = pack16to32(mx,my);
406
407 if (part_height == 8 && i < 8) {
408 *(int32_t *) h->mv_cache[dir][scan8[k] + 1*8] = mv;
409
410 if (part_width == 8 && j < 8) {
411 *(int32_t *) h->mv_cache[dir][scan8[k] + 1 + 1*8] = mv;
412 }
413 }
414 if (part_width == 8 && j < 8) {
415 *(int32_t *) h->mv_cache[dir][scan8[k] + 1] = mv;
416 }
417 if (part_width == 4 || part_height == 4) {
418 *(int32_t *) h->mv_cache[dir][scan8[k]] = mv;
419 }
420 }
421
422 /* write back motion vectors */
423 fill_rectangle(s->current_picture.motion_val[dir][b_xy], part_width>>2, part_height>>2, h->b_stride, pack16to32(mx,my), 4);
424 }
425 }
426
427 return 0;
428 }
429
430 static int svq3_decode_mb(H264Context *h, unsigned int mb_type)
431 {
432 int i, j, k, m, dir, mode;
433 int cbp = 0;
434 uint32_t vlc;
435 int8_t *top, *left;
436 MpegEncContext *const s = (MpegEncContext *) h;
437 const int mb_xy = h->mb_xy;
438 const int b_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride;
439
440 h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
441 h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
442 h->topright_samples_available = 0xFFFF;
443
444 if (mb_type == 0) { /* SKIP */
445 if (s->pict_type == FF_P_TYPE || s->next_picture.mb_type[mb_xy] == -1) {
446 svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 0, 0);
447
448 if (s->pict_type == FF_B_TYPE) {
449 svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 1, 1);
450 }
451
452 mb_type = MB_TYPE_SKIP;
453 } else {
454 mb_type = FFMIN(s->next_picture.mb_type[mb_xy], 6);
455 if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 0, 0) < 0)
456 return -1;
457 if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 1, 1) < 0)
458 return -1;
459
460 mb_type = MB_TYPE_16x16;
461 }
462 } else if (mb_type < 8) { /* INTER */
463 if (h->thirdpel_flag && h->halfpel_flag == !get_bits1 (&s->gb)) {
464 mode = THIRDPEL_MODE;
465 } else if (h->halfpel_flag && h->thirdpel_flag == !get_bits1 (&s->gb)) {
466 mode = HALFPEL_MODE;
467 } else {
468 mode = FULLPEL_MODE;
469 }
470
471 /* fill caches */
472 /* note ref_cache should contain here:
473 ????????
474 ???11111
475 N??11111
476 N??11111
477 N??11111
478 */
479
480 for (m = 0; m < 2; m++) {
481 if (s->mb_x > 0 && h->intra4x4_pred_mode[mb_xy - 1][0] != -1) {
482 for (i = 0; i < 4; i++) {
483 *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - 1 + i*h->b_stride];
484 }
485 } else {
486 for (i = 0; i < 4; i++) {
487 *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = 0;
488 }
489 }
490 if (s->mb_y > 0) {
491 memcpy(h->mv_cache[m][scan8[0] - 1*8], s->current_picture.motion_val[m][b_xy - h->b_stride], 4*2*sizeof(int16_t));
492 memset(&h->ref_cache[m][scan8[0] - 1*8], (h->intra4x4_pred_mode[mb_xy - s->mb_stride][4] == -1) ? PART_NOT_AVAILABLE : 1, 4);
493
494 if (s->mb_x < (s->mb_width - 1)) {
495 *(uint32_t *) h->mv_cache[m][scan8[0] + 4 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride + 4];
496 h->ref_cache[m][scan8[0] + 4 - 1*8] =
497 (h->intra4x4_pred_mode[mb_xy - s->mb_stride + 1][0] == -1 ||
498 h->intra4x4_pred_mode[mb_xy - s->mb_stride ][4] == -1) ? PART_NOT_AVAILABLE : 1;
499 }else
500 h->ref_cache[m][scan8[0] + 4 - 1*8] = PART_NOT_AVAILABLE;
501 if (s->mb_x > 0) {
502 *(uint32_t *) h->mv_cache[m][scan8[0] - 1 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride - 1];
503 h->ref_cache[m][scan8[0] - 1 - 1*8] = (h->intra4x4_pred_mode[mb_xy - s->mb_stride - 1][3] == -1) ? PART_NOT_AVAILABLE : 1;
504 }else
505 h->ref_cache[m][scan8[0] - 1 - 1*8] = PART_NOT_AVAILABLE;
506 }else
507 memset(&h->ref_cache[m][scan8[0] - 1*8 - 1], PART_NOT_AVAILABLE, 8);
508
509 if (s->pict_type != FF_B_TYPE)
510 break;
511 }
512
513 /* decode motion vector(s) and form prediction(s) */
514 if (s->pict_type == FF_P_TYPE) {
515 if (svq3_mc_dir(h, (mb_type - 1), mode, 0, 0) < 0)
516 return -1;
517 } else { /* FF_B_TYPE */
518 if (mb_type != 2) {
519 if (svq3_mc_dir(h, 0, mode, 0, 0) < 0)
520 return -1;
521 } else {
522 for (i = 0; i < 4; i++) {
523 memset(s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
524 }
525 }
526 if (mb_type != 1) {
527 if (svq3_mc_dir(h, 0, mode, 1, (mb_type == 3)) < 0)
528 return -1;
529 } else {
530 for (i = 0; i < 4; i++) {
531 memset(s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
532 }
533 }
534 }
535
536 mb_type = MB_TYPE_16x16;
537 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
538 memset(h->intra4x4_pred_mode_cache, -1, 8*5*sizeof(int8_t));
539
540 if (mb_type == 8) {
541 if (s->mb_x > 0) {
542 for (i = 0; i < 4; i++) {
543 h->intra4x4_pred_mode_cache[scan8[0] - 1 + i*8] = h->intra4x4_pred_mode[mb_xy - 1][i];
544 }
545 if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1) {
546 h->left_samples_available = 0x5F5F;
547 }
548 }
549 if (s->mb_y > 0) {
550 h->intra4x4_pred_mode_cache[4+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][4];
551 h->intra4x4_pred_mode_cache[5+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][5];
552 h->intra4x4_pred_mode_cache[6+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][6];
553 h->intra4x4_pred_mode_cache[7+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][3];
554
555 if (h->intra4x4_pred_mode_cache[4+8*0] == -1) {
556 h->top_samples_available = 0x33FF;
557 }
558 }
559
560 /* decode prediction codes for luma blocks */
561 for (i = 0; i < 16; i+=2) {
562 vlc = svq3_get_ue_golomb(&s->gb);
563
564 if (vlc >= 25){
565 av_log(h->s.avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
566 return -1;
567 }
568
569 left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
570 top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
571
572 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
573 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
574
575 if (left[1] == -1 || left[2] == -1){
576 av_log(h->s.avctx, AV_LOG_ERROR, "weird prediction\n");
577 return -1;
578 }
579 }
580 } else { /* mb_type == 33, DC_128_PRED block type */
581 for (i = 0; i < 4; i++) {
582 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_PRED, 4);
583 }
584 }
585
586 ff_h264_write_back_intra_pred_mode(h);
587
588 if (mb_type == 8) {
589 ff_h264_check_intra4x4_pred_mode(h);
590
591 h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
592 h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
593 } else {
594 for (i = 0; i < 4; i++) {
595 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_128_PRED, 4);
596 }
597
598 h->top_samples_available = 0x33FF;
599 h->left_samples_available = 0x5F5F;
600 }
601
602 mb_type = MB_TYPE_INTRA4x4;
603 } else { /* INTRA16x16 */
604 dir = i_mb_type_info[mb_type - 8].pred_mode;
605 dir = (dir >> 1) ^ 3*(dir & 1) ^ 1;
606
607 if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir)) == -1){
608 av_log(h->s.avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n");
609 return -1;
610 }
611
612 cbp = i_mb_type_info[mb_type - 8].cbp;
613 mb_type = MB_TYPE_INTRA16x16;
614 }
615
616 if (!IS_INTER(mb_type) && s->pict_type != FF_I_TYPE) {
617 for (i = 0; i < 4; i++) {
618 memset(s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
619 }
620 if (s->pict_type == FF_B_TYPE) {
621 for (i = 0; i < 4; i++) {
622 memset(s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
623 }
624 }
625 }
626 if (!IS_INTRA4x4(mb_type)) {
627 memset(h->intra4x4_pred_mode[mb_xy], DC_PRED, 8);
628 }
629 if (!IS_SKIP(mb_type) || s->pict_type == FF_B_TYPE) {
630 memset(h->non_zero_count_cache + 8, 0, 4*9*sizeof(uint8_t));
631 s->dsp.clear_blocks(h->mb);
632 }
633
634 if (!IS_INTRA16x16(mb_type) && (!IS_SKIP(mb_type) || s->pict_type == FF_B_TYPE)) {
635 if ((vlc = svq3_get_ue_golomb(&s->gb)) >= 48){
636 av_log(h->s.avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
637 return -1;
638 }
639
640 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc] : golomb_to_inter_cbp[vlc];
641 }
642 if (IS_INTRA16x16(mb_type) || (s->pict_type != FF_I_TYPE && s->adaptive_quant && cbp)) {
643 s->qscale += svq3_get_se_golomb(&s->gb);
644
645 if (s->qscale > 31){
646 av_log(h->s.avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
647 return -1;
648 }
649 }
650 if (IS_INTRA16x16(mb_type)) {
651 if (svq3_decode_block(&s->gb, h->mb, 0, 0)){
652 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding intra luma dc\n");
653 return -1;
654 }
655 }
656
657 if (cbp) {
658 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
659 const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
660
661 for (i = 0; i < 4; i++) {
662 if ((cbp & (1 << i))) {
663 for (j = 0; j < 4; j++) {
664 k = index ? ((j&1) + 2*(i&1) + 2*(j&2) + 4*(i&2)) : (4*i + j);
665 h->non_zero_count_cache[ scan8[k] ] = 1;
666
667 if (svq3_decode_block(&s->gb, &h->mb[16*k], index, type)){
668 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding block\n");
669 return -1;
670 }
671 }
672 }
673 }
674
675 if ((cbp & 0x30)) {
676 for (i = 0; i < 2; ++i) {
677 if (svq3_decode_block(&s->gb, &h->mb[16*(16 + 4*i)], 0, 3)){
678 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding chroma dc block\n");
679 return -1;
680 }
681 }
682
683 if ((cbp & 0x20)) {
684 for (i = 0; i < 8; i++) {
685 h->non_zero_count_cache[ scan8[16+i] ] = 1;
686
687 if (svq3_decode_block(&s->gb, &h->mb[16*(16 + i)], 1, 1)){
688 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding chroma ac block\n");
689 return -1;
690 }
691 }
692 }
693 }
694 }
695
696 h->cbp= cbp;
697 s->current_picture.mb_type[mb_xy] = mb_type;
698
699 if (IS_INTRA(mb_type)) {
700 h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8);
701 }
702
703 return 0;
704 }
705
706 static int svq3_decode_slice_header(H264Context *h)
707 {
708 MpegEncContext *const s = (MpegEncContext *) h;
709 const int mb_xy = h->mb_xy;
710 int i, header;
711
712 header = get_bits(&s->gb, 8);
713
714 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
715 /* TODO: what? */
716 av_log(h->s.avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
717 return -1;
718 } else {
719 int length = (header >> 5) & 3;
720
721 h->next_slice_index = get_bits_count(&s->gb) + 8*show_bits(&s->gb, 8*length) + 8*length;
722
723 if (h->next_slice_index > s->gb.size_in_bits) {
724 av_log(h->s.avctx, AV_LOG_ERROR, "slice after bitstream end\n");
725 return -1;
726 }
727
728 s->gb.size_in_bits = h->next_slice_index - 8*(length - 1);
729 skip_bits(&s->gb, 8);
730
731 if (h->svq3_watermark_key) {
732 uint32_t header = AV_RL32(&s->gb.buffer[(get_bits_count(&s->gb)>>3)+1]);
733 AV_WL32(&s->gb.buffer[(get_bits_count(&s->gb)>>3)+1], header ^ h->svq3_watermark_key);
734 }
735 if (length > 0) {
736 memcpy((uint8_t *) &s->gb.buffer[get_bits_count(&s->gb) >> 3],
737 &s->gb.buffer[s->gb.size_in_bits >> 3], (length - 1));
738 }
739 skip_bits_long(&s->gb, 0);
740 }
741
742 if ((i = svq3_get_ue_golomb(&s->gb)) == INVALID_VLC || i >= 3){
743 av_log(h->s.avctx, AV_LOG_ERROR, "illegal slice type %d \n", i);
744 return -1;
745 }
746
747 h->slice_type = golomb_to_pict_type[i];
748
749 if ((header & 0x9F) == 2) {
750 i = (s->mb_num < 64) ? 6 : (1 + av_log2 (s->mb_num - 1));
751 s->mb_skip_run = get_bits(&s->gb, i) - (s->mb_x + (s->mb_y * s->mb_width));
752 } else {
753 skip_bits1(&s->gb);
754 s->mb_skip_run = 0;
755 }
756
757 h->slice_num = get_bits(&s->gb, 8);
758 s->qscale = get_bits(&s->gb, 5);
759 s->adaptive_quant = get_bits1(&s->gb);
760
761 /* unknown fields */
762 skip_bits1(&s->gb);
763
764 if (h->unknown_svq3_flag) {
765 skip_bits1(&s->gb);
766 }
767
768 skip_bits1(&s->gb);
769 skip_bits(&s->gb, 2);
770
771 while (get_bits1(&s->gb)) {
772 skip_bits(&s->gb, 8);
773 }
774
775 /* reset intra predictors and invalidate motion vector references */
776 if (s->mb_x > 0) {
777 memset(h->intra4x4_pred_mode[mb_xy - 1], -1, 4*sizeof(int8_t));
778 memset(h->intra4x4_pred_mode[mb_xy - s->mb_x], -1, 8*sizeof(int8_t)*s->mb_x);
779 }
780 if (s->mb_y > 0) {
781 memset(h->intra4x4_pred_mode[mb_xy - s->mb_stride], -1, 8*sizeof(int8_t)*(s->mb_width - s->mb_x));
782
783 if (s->mb_x > 0) {
784 h->intra4x4_pred_mode[mb_xy - s->mb_stride - 1][3] = -1;
785 }
786 }
787
788 return 0;
789 }
790
791 static av_cold int svq3_decode_init(AVCodecContext *avctx)
792 {
793 MpegEncContext *const s = avctx->priv_data;
794 H264Context *const h = avctx->priv_data;
795 int m;
796 unsigned char *extradata;
797 unsigned int size;
798
799 if(avctx->thread_count > 1){
800 av_log(avctx, AV_LOG_ERROR, "SVQ3 does not support multithreaded decoding, patch welcome! (check latest SVN too)\n");
801 return -1;
802 }
803
804 if (ff_h264_decode_init(avctx) < 0)
805 return -1;
806
807 s->flags = avctx->flags;
808 s->flags2 = avctx->flags2;
809 s->unrestricted_mv = 1;
810 h->is_complex=1;
811 avctx->pix_fmt = avctx->codec->pix_fmts[0];
812
813 if (!s->context_initialized) {
814 s->width = avctx->width;
815 s->height = avctx->height;
816 h->halfpel_flag = 1;
817 h->thirdpel_flag = 1;
818 h->unknown_svq3_flag = 0;
819 h->chroma_qp[0] = h->chroma_qp[1] = 4;
820
821 if (MPV_common_init(s) < 0)
822 return -1;
823
824 h->b_stride = 4*s->mb_width;
825
826 ff_h264_alloc_tables(h);
827
828 /* prowl for the "SEQH" marker in the extradata */
829 extradata = (unsigned char *)avctx->extradata;
830 for (m = 0; m < avctx->extradata_size; m++) {
831 if (!memcmp(extradata, "SEQH", 4))
832 break;
833 extradata++;
834 }
835
836 /* if a match was found, parse the extra data */
837 if (extradata && !memcmp(extradata, "SEQH", 4)) {
838
839 GetBitContext gb;
840 int frame_size_code;
841
842 size = AV_RB32(&extradata[4]);
843 init_get_bits(&gb, extradata + 8, size*8);
844
845 /* 'frame size code' and optional 'width, height' */
846 frame_size_code = get_bits(&gb, 3);
847 switch (frame_size_code) {
848 case 0: avctx->width = 160; avctx->height = 120; break;
849 case 1: avctx->width = 128; avctx->height = 96; break;
850 case 2: avctx->width = 176; avctx->height = 144; break;
851 case 3: avctx->width = 352; avctx->height = 288; break;
852 case 4: avctx->width = 704; avctx->height = 576; break;
853 case 5: avctx->width = 240; avctx->height = 180; break;
854 case 6: avctx->width = 320; avctx->height = 240; break;
855 case 7:
856 avctx->width = get_bits(&gb, 12);
857 avctx->height = get_bits(&gb, 12);
858 break;
859 }
860
861 h->halfpel_flag = get_bits1(&gb);
862 h->thirdpel_flag = get_bits1(&gb);
863
864 /* unknown fields */
865 skip_bits1(&gb);
866 skip_bits1(&gb);
867 skip_bits1(&gb);
868 skip_bits1(&gb);
869
870 s->low_delay = get_bits1(&gb);
871
872 /* unknown field */
873 skip_bits1(&gb);
874
875 while (get_bits1(&gb)) {
876 skip_bits(&gb, 8);
877 }
878
879 h->unknown_svq3_flag = get_bits1(&gb);
880 avctx->has_b_frames = !s->low_delay;
881 if (h->unknown_svq3_flag) {
882 #if CONFIG_ZLIB
883 unsigned watermark_width = svq3_get_ue_golomb(&gb);
884 unsigned watermark_height = svq3_get_ue_golomb(&gb);
885 int u1 = svq3_get_ue_golomb(&gb);
886 int u2 = get_bits(&gb, 8);
887 int u3 = get_bits(&gb, 2);
888 int u4 = svq3_get_ue_golomb(&gb);
889 unsigned buf_len = watermark_width*watermark_height*4;
890 int offset = (get_bits_count(&gb)+7)>>3;
891 uint8_t *buf;
892
893 if ((uint64_t)watermark_width*4 > UINT_MAX/watermark_height)
894 return -1;
895
896 buf = av_malloc(buf_len);
897 av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n", watermark_width, watermark_height);
898 av_log(avctx, AV_LOG_DEBUG, "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n", u1, u2, u3, u4, offset);
899 if (uncompress(buf, (uLong*)&buf_len, extradata + 8 + offset, size - offset) != Z_OK) {
900 av_log(avctx, AV_LOG_ERROR, "could not uncompress watermark logo\n");
901 av_free(buf);
902 return -1;
903 }
904 h->svq3_watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
905 h->svq3_watermark_key = h->svq3_watermark_key << 16 | h->svq3_watermark_key;
906 av_log(avctx, AV_LOG_DEBUG, "watermark key %#x\n", h->svq3_watermark_key);
907 av_free(buf);
908 #else
909 av_log(avctx, AV_LOG_ERROR, "this svq3 file contains watermark which need zlib support compiled in\n");
910 return -1;
911 #endif
912 }
913 }
914 }
915
916 return 0;
917 }
918
919 static int svq3_decode_frame(AVCodecContext *avctx,
920 void *data, int *data_size,
921 AVPacket *avpkt)
922 {
923 const uint8_t *buf = avpkt->data;
924 int buf_size = avpkt->size;
925 MpegEncContext *const s = avctx->priv_data;
926 H264Context *const h = avctx->priv_data;
927 int m, mb_type;
928
929 /* special case for last picture */
930 if (buf_size == 0) {
931 if (s->next_picture_ptr && !s->low_delay) {
932 *(AVFrame *) data = *(AVFrame *) &s->next_picture;
933 s->next_picture_ptr = NULL;
934 *data_size = sizeof(AVFrame);
935 }
936 return 0;
937 }
938
939 init_get_bits (&s->gb, buf, 8*buf_size);
940
941 s->mb_x = s->mb_y = h->mb_xy = 0;
942
943 if (svq3_decode_slice_header(h))
944 return -1;
945
946 s->pict_type = h->slice_type;
947 s->picture_number = h->slice_num;
948
949 if (avctx->debug&FF_DEBUG_PICT_INFO){
950 av_log(h->s.avctx, AV_LOG_DEBUG, "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
951 av_get_pict_type_char(s->pict_type), h->halfpel_flag, h->thirdpel_flag,
952 s->adaptive_quant, s->qscale, h->slice_num);
953 }
954
955 /* for hurry_up == 5 */
956 s->current_picture.pict_type = s->pict_type;
957 s->current_picture.key_frame = (s->pict_type == FF_I_TYPE);
958
959 /* Skip B-frames if we do not have reference frames. */
960 if (s->last_picture_ptr == NULL && s->pict_type == FF_B_TYPE)
961 return 0;
962 /* Skip B-frames if we are in a hurry. */
963 if (avctx->hurry_up && s->pict_type == FF_B_TYPE)
964 return 0;
965 /* Skip everything if we are in a hurry >= 5. */
966 if (avctx->hurry_up >= 5)
967 return 0;
968 if ( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE)
969 ||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE)
970 || avctx->skip_frame >= AVDISCARD_ALL)
971 return 0;
972
973 if (s->next_p_frame_damaged) {
974 if (s->pict_type == FF_B_TYPE)
975 return 0;
976 else
977 s->next_p_frame_damaged = 0;
978 }
979
980 if (ff_h264_frame_start(h) < 0)
981 return -1;
982
983 if (s->pict_type == FF_B_TYPE) {
984 h->frame_num_offset = (h->slice_num - h->prev_frame_num);
985
986 if (h->frame_num_offset < 0) {
987 h->frame_num_offset += 256;
988 }
989 if (h->frame_num_offset == 0 || h->frame_num_offset >= h->prev_frame_num_offset) {
990 av_log(h->s.avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
991 return -1;
992 }
993 } else {
994 h->prev_frame_num = h->frame_num;
995 h->frame_num = h->slice_num;
996 h->prev_frame_num_offset = (h->frame_num - h->prev_frame_num);
997
998 if (h->prev_frame_num_offset < 0) {
999 h->prev_frame_num_offset += 256;
1000 }
1001 }
1002
1003 for (m = 0; m < 2; m++){
1004 int i;
1005 for (i = 0; i < 4; i++){
1006 int j;
1007 for (j = -1; j < 4; j++)
1008 h->ref_cache[m][scan8[0] + 8*i + j]= 1;
1009 if (i < 3)
1010 h->ref_cache[m][scan8[0] + 8*i + j]= PART_NOT_AVAILABLE;
1011 }
1012 }
1013
1014 for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
1015 for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
1016 h->mb_xy = s->mb_x + s->mb_y*s->mb_stride;
1017
1018 if ( (get_bits_count(&s->gb) + 7) >= s->gb.size_in_bits &&
1019 ((get_bits_count(&s->gb) & 7) == 0 || show_bits(&s->gb, (-get_bits_count(&s->gb) & 7)) == 0)) {
1020
1021 skip_bits(&s->gb, h->next_slice_index - get_bits_count(&s->gb));
1022 s->gb.size_in_bits = 8*buf_size;
1023
1024 if (svq3_decode_slice_header(h))
1025 return -1;
1026
1027 /* TODO: support s->mb_skip_run */
1028 }
1029
1030 mb_type = svq3_get_ue_golomb(&s->gb);
1031
1032 if (s->pict_type == FF_I_TYPE) {
1033 mb_type += 8;
1034 } else if (s->pict_type == FF_B_TYPE && mb_type >= 4) {
1035 mb_type += 4;
1036 }
1037 if (mb_type > 33 || svq3_decode_mb(h, mb_type)) {
1038 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
1039 return -1;
1040 }
1041
1042 if (mb_type != 0) {
1043 ff_h264_hl_decode_mb (h);
1044 }
1045
1046 if (s->pict_type != FF_B_TYPE && !s->low_delay) {
1047 s->current_picture.mb_type[s->mb_x + s->mb_y*s->mb_stride] =
1048 (s->pict_type == FF_P_TYPE && mb_type < 8) ? (mb_type - 1) : -1;
1049 }
1050 }
1051
1052 ff_draw_horiz_band(s, 16*s->mb_y, 16);
1053 }
1054
1055 MPV_frame_end(s);
1056
1057 if (s->pict_type == FF_B_TYPE || s->low_delay) {
1058 *(AVFrame *) data = *(AVFrame *) &s->current_picture;
1059 } else {
1060 *(AVFrame *) data = *(AVFrame *) &s->last_picture;
1061 }
1062
1063 /* Do not output the last pic after seeking. */
1064 if (s->last_picture_ptr || s->low_delay) {
1065 *data_size = sizeof(AVFrame);
1066 }
1067
1068 return buf_size;
1069 }
1070
1071
1072 AVCodec svq3_decoder = {
1073 "svq3",
1074 CODEC_TYPE_VIDEO,
1075 CODEC_ID_SVQ3,
1076 sizeof(H264Context),
1077 svq3_decode_init,
1078 NULL,
1079 ff_h264_decode_end,
1080 svq3_decode_frame,
1081 CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_DELAY,
1082 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1083 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_NONE},
1084 };