e8a9ffa86d7cf534e0df7e81f88d4b1e75d0cbc6
[libav.git] / libavcodec / huffyuv.c
1 /*
2 * huffyuv codec for libavcodec
3 *
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
21 * the algorithm used
22 */
23
24 /**
25 * @file huffyuv.c
26 * huffyuv codec for libavcodec.
27 */
28
29 #include "common.h"
30 #include "bitstream.h"
31 #include "avcodec.h"
32 #include "dsputil.h"
33
34 #define VLC_BITS 11
35
36 #ifdef WORDS_BIGENDIAN
37 #define B 3
38 #define G 2
39 #define R 1
40 #else
41 #define B 0
42 #define G 1
43 #define R 2
44 #endif
45
46 typedef enum Predictor{
47 LEFT= 0,
48 PLANE,
49 MEDIAN,
50 } Predictor;
51
52 typedef struct HYuvContext{
53 AVCodecContext *avctx;
54 Predictor predictor;
55 GetBitContext gb;
56 PutBitContext pb;
57 int interlaced;
58 int decorrelate;
59 int bitstream_bpp;
60 int version;
61 int yuy2; //use yuy2 instead of 422P
62 int bgr32; //use bgr32 instead of bgr24
63 int width, height;
64 int flags;
65 int context;
66 int picture_number;
67 int last_slice_end;
68 uint8_t __align8 temp[3][2560];
69 uint64_t stats[3][256];
70 uint8_t len[3][256];
71 uint32_t bits[3][256];
72 VLC vlc[3];
73 AVFrame picture;
74 uint8_t __align8 bitstream_buffer[1024*1024*3]; //FIXME dynamic alloc or some other solution
75 DSPContext dsp;
76 }HYuvContext;
77
78 static const unsigned char classic_shift_luma[] = {
79 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
80 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
81 69,68, 0
82 };
83
84 static const unsigned char classic_shift_chroma[] = {
85 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
86 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
87 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
88 };
89
90 static const unsigned char classic_add_luma[256] = {
91 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
92 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
93 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
94 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
95 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
96 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
97 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
98 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
99 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
100 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
101 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
102 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
103 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
104 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
105 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
106 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
107 };
108
109 static const unsigned char classic_add_chroma[256] = {
110 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
111 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
112 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
113 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
114 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
115 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
116 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
117 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
118 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
119 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
120 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
121 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
122 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
123 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
124 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
125 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
126 };
127
128 static inline int add_left_prediction(uint8_t *dst, uint8_t *src, int w, int acc){
129 int i;
130
131 for(i=0; i<w-1; i++){
132 acc+= src[i];
133 dst[i]= acc;
134 i++;
135 acc+= src[i];
136 dst[i]= acc;
137 }
138
139 for(; i<w; i++){
140 acc+= src[i];
141 dst[i]= acc;
142 }
143
144 return acc;
145 }
146
147 static inline void add_median_prediction(uint8_t *dst, uint8_t *src1, uint8_t *diff, int w, int *left, int *left_top){
148 int i;
149 uint8_t l, lt;
150
151 l= *left;
152 lt= *left_top;
153
154 for(i=0; i<w; i++){
155 l= mid_pred(l, src1[i], (l + src1[i] - lt)&0xFF) + diff[i];
156 lt= src1[i];
157 dst[i]= l;
158 }
159
160 *left= l;
161 *left_top= lt;
162 }
163
164 static inline void add_left_prediction_bgr32(uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
165 int i;
166 int r,g,b;
167 r= *red;
168 g= *green;
169 b= *blue;
170
171 for(i=0; i<w; i++){
172 b+= src[4*i+B];
173 g+= src[4*i+G];
174 r+= src[4*i+R];
175
176 dst[4*i+B]= b;
177 dst[4*i+G]= g;
178 dst[4*i+R]= r;
179 }
180
181 *red= r;
182 *green= g;
183 *blue= b;
184 }
185
186 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
187 int i;
188 if(w<32){
189 for(i=0; i<w; i++){
190 const int temp= src[i];
191 dst[i]= temp - left;
192 left= temp;
193 }
194 return left;
195 }else{
196 for(i=0; i<16; i++){
197 const int temp= src[i];
198 dst[i]= temp - left;
199 left= temp;
200 }
201 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
202 return src[w-1];
203 }
204 }
205
206 static void read_len_table(uint8_t *dst, GetBitContext *gb){
207 int i, val, repeat;
208
209 for(i=0; i<256;){
210 repeat= get_bits(gb, 3);
211 val = get_bits(gb, 5);
212 if(repeat==0)
213 repeat= get_bits(gb, 8);
214 //printf("%d %d\n", val, repeat);
215 while (repeat--)
216 dst[i++] = val;
217 }
218 }
219
220 static int generate_bits_table(uint32_t *dst, uint8_t *len_table){
221 int len, index;
222 uint32_t bits=0;
223
224 for(len=32; len>0; len--){
225 for(index=0; index<256; index++){
226 if(len_table[index]==len)
227 dst[index]= bits++;
228 }
229 if(bits & 1){
230 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
231 return -1;
232 }
233 bits >>= 1;
234 }
235 return 0;
236 }
237
238 static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
239 uint64_t counts[2*size];
240 int up[2*size];
241 int offset, i, next;
242
243 for(offset=1; ; offset<<=1){
244 for(i=0; i<size; i++){
245 counts[i]= stats[i] + offset - 1;
246 }
247
248 for(next=size; next<size*2; next++){
249 uint64_t min1, min2;
250 int min1_i, min2_i;
251
252 min1=min2= INT64_MAX;
253 min1_i= min2_i=-1;
254
255 for(i=0; i<next; i++){
256 if(min2 > counts[i]){
257 if(min1 > counts[i]){
258 min2= min1;
259 min2_i= min1_i;
260 min1= counts[i];
261 min1_i= i;
262 }else{
263 min2= counts[i];
264 min2_i= i;
265 }
266 }
267 }
268
269 if(min2==INT64_MAX) break;
270
271 counts[next]= min1 + min2;
272 counts[min1_i]=
273 counts[min2_i]= INT64_MAX;
274 up[min1_i]=
275 up[min2_i]= next;
276 up[next]= -1;
277 }
278
279 for(i=0; i<size; i++){
280 int len;
281 int index=i;
282
283 for(len=0; up[index] != -1; len++)
284 index= up[index];
285
286 if(len >= 32) break;
287
288 dst[i]= len;
289 }
290 if(i==size) break;
291 }
292 }
293
294 static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
295 GetBitContext gb;
296 int i;
297
298 init_get_bits(&gb, src, length*8);
299
300 for(i=0; i<3; i++){
301 read_len_table(s->len[i], &gb);
302
303 if(generate_bits_table(s->bits[i], s->len[i])<0){
304 return -1;
305 }
306 #if 0
307 for(j=0; j<256; j++){
308 printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j);
309 }
310 #endif
311 free_vlc(&s->vlc[i]);
312 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
313 }
314
315 return (get_bits_count(&gb)+7)/8;
316 }
317
318 static int read_old_huffman_tables(HYuvContext *s){
319 #if 1
320 GetBitContext gb;
321 int i;
322
323 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
324 read_len_table(s->len[0], &gb);
325 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
326 read_len_table(s->len[1], &gb);
327
328 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
329 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
330
331 if(s->bitstream_bpp >= 24){
332 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
333 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
334 }
335 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
336 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
337
338 for(i=0; i<3; i++){
339 free_vlc(&s->vlc[i]);
340 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
341 }
342
343 return 0;
344 #else
345 fprintf(stderr, "v1 huffyuv is not supported \n");
346 return -1;
347 #endif
348 }
349
350 static int decode_init(AVCodecContext *avctx)
351 {
352 HYuvContext *s = avctx->priv_data;
353 int width, height;
354
355 s->avctx= avctx;
356 s->flags= avctx->flags;
357
358 dsputil_init(&s->dsp, avctx);
359 memset(s->vlc, 0, 3*sizeof(VLC));
360
361 width= s->width= avctx->width;
362 height= s->height= avctx->height;
363 avctx->coded_frame= &s->picture;
364 s->interlaced= height > 288;
365
366 s->bgr32=1;
367 assert(width && height);
368 //if(avctx->extradata)
369 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
370 if(avctx->extradata_size){
371 if((avctx->bits_per_sample&7) && avctx->bits_per_sample != 12)
372 s->version=1; // do such files exist at all?
373 else
374 s->version=2;
375 }else
376 s->version=0;
377
378 if(s->version==2){
379 int method, interlace;
380
381 method= ((uint8_t*)avctx->extradata)[0];
382 s->decorrelate= method&64 ? 1 : 0;
383 s->predictor= method&63;
384 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
385 if(s->bitstream_bpp==0)
386 s->bitstream_bpp= avctx->bits_per_sample&~7;
387 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
388 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
389 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
390
391 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
392 return -1;
393 }else{
394 switch(avctx->bits_per_sample&7){
395 case 1:
396 s->predictor= LEFT;
397 s->decorrelate= 0;
398 break;
399 case 2:
400 s->predictor= LEFT;
401 s->decorrelate= 1;
402 break;
403 case 3:
404 s->predictor= PLANE;
405 s->decorrelate= avctx->bits_per_sample >= 24;
406 break;
407 case 4:
408 s->predictor= MEDIAN;
409 s->decorrelate= 0;
410 break;
411 default:
412 s->predictor= LEFT; //OLD
413 s->decorrelate= 0;
414 break;
415 }
416 s->bitstream_bpp= avctx->bits_per_sample & ~7;
417 s->context= 0;
418
419 if(read_old_huffman_tables(s) < 0)
420 return -1;
421 }
422
423 switch(s->bitstream_bpp){
424 case 12:
425 avctx->pix_fmt = PIX_FMT_YUV420P;
426 break;
427 case 16:
428 if(s->yuy2){
429 avctx->pix_fmt = PIX_FMT_YUV422;
430 }else{
431 avctx->pix_fmt = PIX_FMT_YUV422P;
432 }
433 break;
434 case 24:
435 case 32:
436 if(s->bgr32){
437 avctx->pix_fmt = PIX_FMT_RGBA32;
438 }else{
439 avctx->pix_fmt = PIX_FMT_BGR24;
440 }
441 break;
442 default:
443 assert(0);
444 }
445
446 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_sample, s->interlaced);
447
448 return 0;
449 }
450
451 static int store_table(HYuvContext *s, uint8_t *len, uint8_t *buf){
452 int i;
453 int index= 0;
454
455 for(i=0; i<256;){
456 int val= len[i];
457 int repeat=0;
458
459 for(; i<256 && len[i]==val && repeat<255; i++)
460 repeat++;
461
462 assert(val < 32 && val >0 && repeat<256 && repeat>0);
463 if(repeat>7){
464 buf[index++]= val;
465 buf[index++]= repeat;
466 }else{
467 buf[index++]= val | (repeat<<5);
468 }
469 }
470
471 return index;
472 }
473
474 static int encode_init(AVCodecContext *avctx)
475 {
476 HYuvContext *s = avctx->priv_data;
477 int i, j, width, height;
478
479 s->avctx= avctx;
480 s->flags= avctx->flags;
481
482 dsputil_init(&s->dsp, avctx);
483
484 width= s->width= avctx->width;
485 height= s->height= avctx->height;
486
487 assert(width && height);
488
489 avctx->extradata= av_mallocz(1024*30);
490 avctx->stats_out= av_mallocz(1024*30);
491 s->version=2;
492
493 avctx->coded_frame= &s->picture;
494
495 switch(avctx->pix_fmt){
496 case PIX_FMT_YUV420P:
497 s->bitstream_bpp= 12;
498 break;
499 case PIX_FMT_YUV422P:
500 s->bitstream_bpp= 16;
501 break;
502 default:
503 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
504 return -1;
505 }
506 avctx->bits_per_sample= s->bitstream_bpp;
507 s->decorrelate= s->bitstream_bpp >= 24;
508 s->predictor= avctx->prediction_method;
509 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
510 if(avctx->context_model==1){
511 s->context= avctx->context_model;
512 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
513 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
514 return -1;
515 }
516 }else s->context= 0;
517
518 if(avctx->codec->id==CODEC_ID_HUFFYUV){
519 if(avctx->pix_fmt==PIX_FMT_YUV420P){
520 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
521 return -1;
522 }
523 if(avctx->context_model){
524 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
525 return -1;
526 }
527 if(s->interlaced != ( height > 288 ))
528 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
529 }else if(avctx->strict_std_compliance>=0){
530 av_log(avctx, AV_LOG_ERROR, "This codec is under development; files encoded with it may not be decodeable with future versions!!! Set vstrict=-1 to use it anyway.\n");
531 return -1;
532 }
533
534 ((uint8_t*)avctx->extradata)[0]= s->predictor;
535 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
536 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
537 if(s->context)
538 ((uint8_t*)avctx->extradata)[2]|= 0x40;
539 ((uint8_t*)avctx->extradata)[3]= 0;
540 s->avctx->extradata_size= 4;
541
542 if(avctx->stats_in){
543 char *p= avctx->stats_in;
544
545 for(i=0; i<3; i++)
546 for(j=0; j<256; j++)
547 s->stats[i][j]= 1;
548
549 for(;;){
550 for(i=0; i<3; i++){
551 char *next;
552
553 for(j=0; j<256; j++){
554 s->stats[i][j]+= strtol(p, &next, 0);
555 if(next==p) return -1;
556 p=next;
557 }
558 }
559 if(p[0]==0 || p[1]==0 || p[2]==0) break;
560 }
561 }else{
562 for(i=0; i<3; i++)
563 for(j=0; j<256; j++){
564 int d= FFMIN(j, 256-j);
565
566 s->stats[i][j]= 100000000/(d+1);
567 }
568 }
569
570 for(i=0; i<3; i++){
571 generate_len_table(s->len[i], s->stats[i], 256);
572
573 if(generate_bits_table(s->bits[i], s->len[i])<0){
574 return -1;
575 }
576
577 s->avctx->extradata_size+=
578 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
579 }
580
581 if(s->context){
582 for(i=0; i<3; i++){
583 int pels = width*height / (i?40:10);
584 for(j=0; j<256; j++){
585 int d= FFMIN(j, 256-j);
586 s->stats[i][j]= pels/(d+1);
587 }
588 }
589 }else{
590 for(i=0; i<3; i++)
591 for(j=0; j<256; j++)
592 s->stats[i][j]= 0;
593 }
594
595 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_sample, s->interlaced);
596
597 s->picture_number=0;
598
599 return 0;
600 }
601
602 static void decode_422_bitstream(HYuvContext *s, int count){
603 int i;
604
605 count/=2;
606
607 for(i=0; i<count; i++){
608 s->temp[0][2*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
609 s->temp[1][ i ]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
610 s->temp[0][2*i+1]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
611 s->temp[2][ i ]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
612 }
613 }
614
615 static void decode_gray_bitstream(HYuvContext *s, int count){
616 int i;
617
618 count/=2;
619
620 for(i=0; i<count; i++){
621 s->temp[0][2*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
622 s->temp[0][2*i+1]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
623 }
624 }
625
626 static void encode_422_bitstream(HYuvContext *s, int count){
627 int i;
628
629 count/=2;
630 if(s->flags&CODEC_FLAG_PASS1){
631 for(i=0; i<count; i++){
632 s->stats[0][ s->temp[0][2*i ] ]++;
633 s->stats[1][ s->temp[1][ i ] ]++;
634 s->stats[0][ s->temp[0][2*i+1] ]++;
635 s->stats[2][ s->temp[2][ i ] ]++;
636 }
637 }else if(s->context){
638 for(i=0; i<count; i++){
639 s->stats[0][ s->temp[0][2*i ] ]++;
640 put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
641 s->stats[1][ s->temp[1][ i ] ]++;
642 put_bits(&s->pb, s->len[1][ s->temp[1][ i ] ], s->bits[1][ s->temp[1][ i ] ]);
643 s->stats[0][ s->temp[0][2*i+1] ]++;
644 put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
645 s->stats[2][ s->temp[2][ i ] ]++;
646 put_bits(&s->pb, s->len[2][ s->temp[2][ i ] ], s->bits[2][ s->temp[2][ i ] ]);
647 }
648 }else{
649 for(i=0; i<count; i++){
650 put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
651 put_bits(&s->pb, s->len[1][ s->temp[1][ i ] ], s->bits[1][ s->temp[1][ i ] ]);
652 put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
653 put_bits(&s->pb, s->len[2][ s->temp[2][ i ] ], s->bits[2][ s->temp[2][ i ] ]);
654 }
655 }
656 }
657
658 static void encode_gray_bitstream(HYuvContext *s, int count){
659 int i;
660
661 count/=2;
662 if(s->flags&CODEC_FLAG_PASS1){
663 for(i=0; i<count; i++){
664 s->stats[0][ s->temp[0][2*i ] ]++;
665 s->stats[0][ s->temp[0][2*i+1] ]++;
666 }
667 }else if(s->context){
668 for(i=0; i<count; i++){
669 s->stats[0][ s->temp[0][2*i ] ]++;
670 put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
671 s->stats[0][ s->temp[0][2*i+1] ]++;
672 put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
673 }
674 }else{
675 for(i=0; i<count; i++){
676 put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
677 put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
678 }
679 }
680 }
681
682 static void decode_bgr_bitstream(HYuvContext *s, int count){
683 int i;
684
685 if(s->decorrelate){
686 if(s->bitstream_bpp==24){
687 for(i=0; i<count; i++){
688 s->temp[0][4*i+G]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
689 s->temp[0][4*i+B]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
690 s->temp[0][4*i+R]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
691 }
692 }else{
693 for(i=0; i<count; i++){
694 s->temp[0][4*i+G]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
695 s->temp[0][4*i+B]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
696 s->temp[0][4*i+R]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
697 get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
698 }
699 }
700 }else{
701 if(s->bitstream_bpp==24){
702 for(i=0; i<count; i++){
703 s->temp[0][4*i+B]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
704 s->temp[0][4*i+G]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
705 s->temp[0][4*i+R]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
706 }
707 }else{
708 for(i=0; i<count; i++){
709 s->temp[0][4*i+B]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
710 s->temp[0][4*i+G]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
711 s->temp[0][4*i+R]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
712 get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
713 }
714 }
715 }
716 }
717
718 static void draw_slice(HYuvContext *s, int y){
719 int h, cy;
720 int offset[4];
721
722 if(s->avctx->draw_horiz_band==NULL)
723 return;
724
725 h= y - s->last_slice_end;
726 y -= h;
727
728 if(s->bitstream_bpp==12){
729 cy= y>>1;
730 }else{
731 cy= y;
732 }
733
734 offset[0] = s->picture.linesize[0]*y;
735 offset[1] = s->picture.linesize[1]*cy;
736 offset[2] = s->picture.linesize[2]*cy;
737 offset[3] = 0;
738 emms_c();
739
740 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
741
742 s->last_slice_end= y + h;
743 }
744
745 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size){
746 HYuvContext *s = avctx->priv_data;
747 const int width= s->width;
748 const int width2= s->width>>1;
749 const int height= s->height;
750 int fake_ystride, fake_ustride, fake_vstride;
751 AVFrame * const p= &s->picture;
752 int table_size= 0;
753
754 AVFrame *picture = data;
755
756 /* no supplementary picture */
757 if (buf_size == 0)
758 return 0;
759
760 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (uint32_t*)buf, buf_size/4);
761
762 if(p->data[0])
763 avctx->release_buffer(avctx, p);
764
765 p->reference= 0;
766 if(avctx->get_buffer(avctx, p) < 0){
767 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
768 return -1;
769 }
770
771 if(s->context){
772 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
773 if(table_size < 0)
774 return -1;
775 }
776
777 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
778
779 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
780 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
781 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
782
783 s->last_slice_end= 0;
784
785 if(s->bitstream_bpp<24){
786 int y, cy;
787 int lefty, leftu, leftv;
788 int lefttopy, lefttopu, lefttopv;
789
790 if(s->yuy2){
791 p->data[0][3]= get_bits(&s->gb, 8);
792 p->data[0][2]= get_bits(&s->gb, 8);
793 p->data[0][1]= get_bits(&s->gb, 8);
794 p->data[0][0]= get_bits(&s->gb, 8);
795
796 av_log(avctx, AV_LOG_ERROR, "YUY2 output isnt implemenetd yet\n");
797 return -1;
798 }else{
799
800 leftv= p->data[2][0]= get_bits(&s->gb, 8);
801 lefty= p->data[0][1]= get_bits(&s->gb, 8);
802 leftu= p->data[1][0]= get_bits(&s->gb, 8);
803 p->data[0][0]= get_bits(&s->gb, 8);
804
805 switch(s->predictor){
806 case LEFT:
807 case PLANE:
808 decode_422_bitstream(s, width-2);
809 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
810 if(!(s->flags&CODEC_FLAG_GRAY)){
811 leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
812 leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
813 }
814
815 for(cy=y=1; y<s->height; y++,cy++){
816 uint8_t *ydst, *udst, *vdst;
817
818 if(s->bitstream_bpp==12){
819 decode_gray_bitstream(s, width);
820
821 ydst= p->data[0] + p->linesize[0]*y;
822
823 lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
824 if(s->predictor == PLANE){
825 if(y>s->interlaced)
826 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
827 }
828 y++;
829 if(y>=s->height) break;
830 }
831
832 draw_slice(s, y);
833
834 ydst= p->data[0] + p->linesize[0]*y;
835 udst= p->data[1] + p->linesize[1]*cy;
836 vdst= p->data[2] + p->linesize[2]*cy;
837
838 decode_422_bitstream(s, width);
839 lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
840 if(!(s->flags&CODEC_FLAG_GRAY)){
841 leftu= add_left_prediction(udst, s->temp[1], width2, leftu);
842 leftv= add_left_prediction(vdst, s->temp[2], width2, leftv);
843 }
844 if(s->predictor == PLANE){
845 if(cy>s->interlaced){
846 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
847 if(!(s->flags&CODEC_FLAG_GRAY)){
848 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
849 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
850 }
851 }
852 }
853 }
854 draw_slice(s, height);
855
856 break;
857 case MEDIAN:
858 /* first line except first 2 pixels is left predicted */
859 decode_422_bitstream(s, width-2);
860 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
861 if(!(s->flags&CODEC_FLAG_GRAY)){
862 leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
863 leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
864 }
865
866 cy=y=1;
867
868 /* second line is left predicted for interlaced case */
869 if(s->interlaced){
870 decode_422_bitstream(s, width);
871 lefty= add_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
872 if(!(s->flags&CODEC_FLAG_GRAY)){
873 leftu= add_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
874 leftv= add_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
875 }
876 y++; cy++;
877 }
878
879 /* next 4 pixels are left predicted too */
880 decode_422_bitstream(s, 4);
881 lefty= add_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
882 if(!(s->flags&CODEC_FLAG_GRAY)){
883 leftu= add_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
884 leftv= add_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
885 }
886
887 /* next line except the first 4 pixels is median predicted */
888 lefttopy= p->data[0][3];
889 decode_422_bitstream(s, width-4);
890 add_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
891 if(!(s->flags&CODEC_FLAG_GRAY)){
892 lefttopu= p->data[1][1];
893 lefttopv= p->data[2][1];
894 add_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
895 add_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
896 }
897 y++; cy++;
898
899 for(; y<height; y++,cy++){
900 uint8_t *ydst, *udst, *vdst;
901
902 if(s->bitstream_bpp==12){
903 while(2*cy > y){
904 decode_gray_bitstream(s, width);
905 ydst= p->data[0] + p->linesize[0]*y;
906 add_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
907 y++;
908 }
909 if(y>=height) break;
910 }
911 draw_slice(s, y);
912
913 decode_422_bitstream(s, width);
914
915 ydst= p->data[0] + p->linesize[0]*y;
916 udst= p->data[1] + p->linesize[1]*cy;
917 vdst= p->data[2] + p->linesize[2]*cy;
918
919 add_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
920 if(!(s->flags&CODEC_FLAG_GRAY)){
921 add_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
922 add_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
923 }
924 }
925
926 draw_slice(s, height);
927 break;
928 }
929 }
930 }else{
931 int y;
932 int leftr, leftg, leftb;
933 const int last_line= (height-1)*p->linesize[0];
934
935 if(s->bitstream_bpp==32){
936 skip_bits(&s->gb, 8);
937 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
938 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
939 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
940 }else{
941 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
942 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
943 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
944 skip_bits(&s->gb, 8);
945 }
946
947 if(s->bgr32){
948 switch(s->predictor){
949 case LEFT:
950 case PLANE:
951 decode_bgr_bitstream(s, width-1);
952 add_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb);
953
954 for(y=s->height-2; y>=0; y--){ //yes its stored upside down
955 decode_bgr_bitstream(s, width);
956
957 add_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb);
958 if(s->predictor == PLANE){
959 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
960 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
961 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
962 }
963 }
964 }
965 draw_slice(s, height); // just 1 large slice as this isnt possible in reverse order
966 break;
967 default:
968 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
969 }
970 }else{
971
972 av_log(avctx, AV_LOG_ERROR, "BGR24 output isnt implemenetd yet\n");
973 return -1;
974 }
975 }
976 emms_c();
977
978 *picture= *p;
979 *data_size = sizeof(AVFrame);
980
981 return (get_bits_count(&s->gb)+31)/32*4;
982 }
983
984 static int decode_end(AVCodecContext *avctx)
985 {
986 HYuvContext *s = avctx->priv_data;
987 int i;
988
989 for(i=0; i<3; i++){
990 free_vlc(&s->vlc[i]);
991 }
992
993 return 0;
994 }
995
996 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
997 HYuvContext *s = avctx->priv_data;
998 AVFrame *pict = data;
999 const int width= s->width;
1000 const int width2= s->width>>1;
1001 const int height= s->height;
1002 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1003 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1004 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1005 AVFrame * const p= &s->picture;
1006 int i, j, size=0;
1007
1008 *p = *pict;
1009 p->pict_type= FF_I_TYPE;
1010 p->key_frame= 1;
1011
1012 if(s->context){
1013 for(i=0; i<3; i++){
1014 generate_len_table(s->len[i], s->stats[i], 256);
1015 if(generate_bits_table(s->bits[i], s->len[i])<0)
1016 return -1;
1017 size+= store_table(s, s->len[i], &buf[size]);
1018 }
1019
1020 for(i=0; i<3; i++)
1021 for(j=0; j<256; j++)
1022 s->stats[i][j] >>= 1;
1023 }
1024
1025 init_put_bits(&s->pb, buf+size, buf_size-size);
1026
1027 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1028 int lefty, leftu, leftv, y, cy;
1029
1030 put_bits(&s->pb, 8, leftv= p->data[2][0]);
1031 put_bits(&s->pb, 8, lefty= p->data[0][1]);
1032 put_bits(&s->pb, 8, leftu= p->data[1][0]);
1033 put_bits(&s->pb, 8, p->data[0][0]);
1034
1035 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+2, width-2 , lefty);
1036 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+1, width2-1, leftu);
1037 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+1, width2-1, leftv);
1038
1039 encode_422_bitstream(s, width-2);
1040
1041 if(s->predictor==MEDIAN){
1042 int lefttopy, lefttopu, lefttopv;
1043 cy=y=1;
1044 if(s->interlaced){
1045 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1046 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1047 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1048
1049 encode_422_bitstream(s, width);
1050 y++; cy++;
1051 }
1052
1053 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1054 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1055 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1056
1057 encode_422_bitstream(s, 4);
1058
1059 lefttopy= p->data[0][3];
1060 lefttopu= p->data[1][1];
1061 lefttopv= p->data[2][1];
1062 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1063 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1064 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1065 encode_422_bitstream(s, width-4);
1066 y++; cy++;
1067
1068 for(; y<height; y++,cy++){
1069 uint8_t *ydst, *udst, *vdst;
1070
1071 if(s->bitstream_bpp==12){
1072 while(2*cy > y){
1073 ydst= p->data[0] + p->linesize[0]*y;
1074 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1075 encode_gray_bitstream(s, width);
1076 y++;
1077 }
1078 if(y>=height) break;
1079 }
1080 ydst= p->data[0] + p->linesize[0]*y;
1081 udst= p->data[1] + p->linesize[1]*cy;
1082 vdst= p->data[2] + p->linesize[2]*cy;
1083
1084 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1085 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1086 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1087
1088 encode_422_bitstream(s, width);
1089 }
1090 }else{
1091 for(cy=y=1; y<height; y++,cy++){
1092 uint8_t *ydst, *udst, *vdst;
1093
1094 /* encode a luma only line & y++ */
1095 if(s->bitstream_bpp==12){
1096 ydst= p->data[0] + p->linesize[0]*y;
1097
1098 if(s->predictor == PLANE && s->interlaced < y){
1099 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1100
1101 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1102 }else{
1103 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1104 }
1105 encode_gray_bitstream(s, width);
1106 y++;
1107 if(y>=height) break;
1108 }
1109
1110 ydst= p->data[0] + p->linesize[0]*y;
1111 udst= p->data[1] + p->linesize[1]*cy;
1112 vdst= p->data[2] + p->linesize[2]*cy;
1113
1114 if(s->predictor == PLANE && s->interlaced < cy){
1115 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1116 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1117 s->dsp.diff_bytes(s->temp[2] + 1250, vdst, vdst - fake_vstride, width2);
1118
1119 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1120 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1121 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + 1250, width2, leftv);
1122 }else{
1123 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1124 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1125 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1126 }
1127
1128 encode_422_bitstream(s, width);
1129 }
1130 }
1131 }else{
1132 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1133 }
1134 emms_c();
1135
1136 size+= (put_bits_count(&s->pb)+31)/8;
1137 size/= 4;
1138
1139 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1140 int j;
1141 char *p= avctx->stats_out;
1142 for(i=0; i<3; i++){
1143 for(j=0; j<256; j++){
1144 sprintf(p, "%llu ", s->stats[i][j]);
1145 p+= strlen(p);
1146 s->stats[i][j]= 0;
1147 }
1148 sprintf(p, "\n");
1149 p++;
1150 }
1151 }else{
1152 flush_put_bits(&s->pb);
1153 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1154 avctx->stats_out[0] = '\0';
1155 }
1156
1157 s->picture_number++;
1158
1159 return size*4;
1160 }
1161
1162 static int encode_end(AVCodecContext *avctx)
1163 {
1164 // HYuvContext *s = avctx->priv_data;
1165
1166 av_freep(&avctx->extradata);
1167 av_freep(&avctx->stats_out);
1168
1169 return 0;
1170 }
1171
1172 static const AVOption huffyuv_options[] =
1173 {
1174 AVOPTION_CODEC_INT("prediction_method", "prediction_method", prediction_method, 0, 2, 0),
1175 AVOPTION_END()
1176 };
1177
1178 static const AVOption ffvhuff_options[] =
1179 {
1180 AVOPTION_CODEC_INT("prediction_method", "prediction_method", prediction_method, 0, 2, 0),
1181 AVOPTION_CODEC_INT("context_model", "context_model", context_model, 0, 2, 0),
1182 AVOPTION_END()
1183 };
1184
1185
1186 AVCodec huffyuv_decoder = {
1187 "huffyuv",
1188 CODEC_TYPE_VIDEO,
1189 CODEC_ID_HUFFYUV,
1190 sizeof(HYuvContext),
1191 decode_init,
1192 NULL,
1193 decode_end,
1194 decode_frame,
1195 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1196 NULL
1197 };
1198
1199 AVCodec ffvhuff_decoder = {
1200 "ffvhuff",
1201 CODEC_TYPE_VIDEO,
1202 CODEC_ID_FFVHUFF,
1203 sizeof(HYuvContext),
1204 decode_init,
1205 NULL,
1206 decode_end,
1207 decode_frame,
1208 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1209 NULL
1210 };
1211
1212 #ifdef CONFIG_ENCODERS
1213
1214 AVCodec huffyuv_encoder = {
1215 "huffyuv",
1216 CODEC_TYPE_VIDEO,
1217 CODEC_ID_HUFFYUV,
1218 sizeof(HYuvContext),
1219 encode_init,
1220 encode_frame,
1221 encode_end,
1222 .options = huffyuv_options,
1223 };
1224
1225 AVCodec ffvhuff_encoder = {
1226 "ffvhuff",
1227 CODEC_TYPE_VIDEO,
1228 CODEC_ID_FFVHUFF,
1229 sizeof(HYuvContext),
1230 encode_init,
1231 encode_frame,
1232 encode_end,
1233 .options = ffvhuff_options,
1234 };
1235
1236 #endif //CONFIG_ENCODERS