various security fixes and precautionary checks
[libav.git] / libavcodec / huffyuv.c
1 /*
2 * huffyuv codec for libavcodec
3 *
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
21 * the algorithm used
22 */
23
24 /**
25 * @file huffyuv.c
26 * huffyuv codec for libavcodec.
27 */
28
29 #include "common.h"
30 #include "bitstream.h"
31 #include "avcodec.h"
32 #include "dsputil.h"
33
34 #define VLC_BITS 11
35
36 #ifdef WORDS_BIGENDIAN
37 #define B 3
38 #define G 2
39 #define R 1
40 #else
41 #define B 0
42 #define G 1
43 #define R 2
44 #endif
45
46 typedef enum Predictor{
47 LEFT= 0,
48 PLANE,
49 MEDIAN,
50 } Predictor;
51
52 typedef struct HYuvContext{
53 AVCodecContext *avctx;
54 Predictor predictor;
55 GetBitContext gb;
56 PutBitContext pb;
57 int interlaced;
58 int decorrelate;
59 int bitstream_bpp;
60 int version;
61 int yuy2; //use yuy2 instead of 422P
62 int bgr32; //use bgr32 instead of bgr24
63 int width, height;
64 int flags;
65 int context;
66 int picture_number;
67 int last_slice_end;
68 uint8_t *temp[3];
69 uint64_t stats[3][256];
70 uint8_t len[3][256];
71 uint32_t bits[3][256];
72 VLC vlc[3];
73 AVFrame picture;
74 uint8_t *bitstream_buffer;
75 int bitstream_buffer_size;
76 DSPContext dsp;
77 }HYuvContext;
78
79 static const unsigned char classic_shift_luma[] = {
80 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
81 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
82 69,68, 0
83 };
84
85 static const unsigned char classic_shift_chroma[] = {
86 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
87 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
88 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
89 };
90
91 static const unsigned char classic_add_luma[256] = {
92 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
93 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
94 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
95 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
96 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
97 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
98 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
99 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
100 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
101 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
102 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
103 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
104 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
105 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
106 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
107 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
108 };
109
110 static const unsigned char classic_add_chroma[256] = {
111 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
112 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
113 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
114 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
115 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
116 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
117 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
118 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
119 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
120 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
121 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
122 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
123 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
124 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
125 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
126 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
127 };
128
129 static inline int add_left_prediction(uint8_t *dst, uint8_t *src, int w, int acc){
130 int i;
131
132 for(i=0; i<w-1; i++){
133 acc+= src[i];
134 dst[i]= acc;
135 i++;
136 acc+= src[i];
137 dst[i]= acc;
138 }
139
140 for(; i<w; i++){
141 acc+= src[i];
142 dst[i]= acc;
143 }
144
145 return acc;
146 }
147
148 static inline void add_median_prediction(uint8_t *dst, uint8_t *src1, uint8_t *diff, int w, int *left, int *left_top){
149 int i;
150 uint8_t l, lt;
151
152 l= *left;
153 lt= *left_top;
154
155 for(i=0; i<w; i++){
156 l= mid_pred(l, src1[i], (l + src1[i] - lt)&0xFF) + diff[i];
157 lt= src1[i];
158 dst[i]= l;
159 }
160
161 *left= l;
162 *left_top= lt;
163 }
164
165 static inline void add_left_prediction_bgr32(uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
166 int i;
167 int r,g,b;
168 r= *red;
169 g= *green;
170 b= *blue;
171
172 for(i=0; i<w; i++){
173 b+= src[4*i+B];
174 g+= src[4*i+G];
175 r+= src[4*i+R];
176
177 dst[4*i+B]= b;
178 dst[4*i+G]= g;
179 dst[4*i+R]= r;
180 }
181
182 *red= r;
183 *green= g;
184 *blue= b;
185 }
186
187 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
188 int i;
189 if(w<32){
190 for(i=0; i<w; i++){
191 const int temp= src[i];
192 dst[i]= temp - left;
193 left= temp;
194 }
195 return left;
196 }else{
197 for(i=0; i<16; i++){
198 const int temp= src[i];
199 dst[i]= temp - left;
200 left= temp;
201 }
202 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
203 return src[w-1];
204 }
205 }
206
207 static void read_len_table(uint8_t *dst, GetBitContext *gb){
208 int i, val, repeat;
209
210 for(i=0; i<256;){
211 repeat= get_bits(gb, 3);
212 val = get_bits(gb, 5);
213 if(repeat==0)
214 repeat= get_bits(gb, 8);
215 //printf("%d %d\n", val, repeat);
216 while (repeat--)
217 dst[i++] = val;
218 }
219 }
220
221 static int generate_bits_table(uint32_t *dst, uint8_t *len_table){
222 int len, index;
223 uint32_t bits=0;
224
225 for(len=32; len>0; len--){
226 for(index=0; index<256; index++){
227 if(len_table[index]==len)
228 dst[index]= bits++;
229 }
230 if(bits & 1){
231 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
232 return -1;
233 }
234 bits >>= 1;
235 }
236 return 0;
237 }
238
239 static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
240 uint64_t counts[2*size];
241 int up[2*size];
242 int offset, i, next;
243
244 for(offset=1; ; offset<<=1){
245 for(i=0; i<size; i++){
246 counts[i]= stats[i] + offset - 1;
247 }
248
249 for(next=size; next<size*2; next++){
250 uint64_t min1, min2;
251 int min1_i, min2_i;
252
253 min1=min2= INT64_MAX;
254 min1_i= min2_i=-1;
255
256 for(i=0; i<next; i++){
257 if(min2 > counts[i]){
258 if(min1 > counts[i]){
259 min2= min1;
260 min2_i= min1_i;
261 min1= counts[i];
262 min1_i= i;
263 }else{
264 min2= counts[i];
265 min2_i= i;
266 }
267 }
268 }
269
270 if(min2==INT64_MAX) break;
271
272 counts[next]= min1 + min2;
273 counts[min1_i]=
274 counts[min2_i]= INT64_MAX;
275 up[min1_i]=
276 up[min2_i]= next;
277 up[next]= -1;
278 }
279
280 for(i=0; i<size; i++){
281 int len;
282 int index=i;
283
284 for(len=0; up[index] != -1; len++)
285 index= up[index];
286
287 if(len >= 32) break;
288
289 dst[i]= len;
290 }
291 if(i==size) break;
292 }
293 }
294
295 static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
296 GetBitContext gb;
297 int i;
298
299 init_get_bits(&gb, src, length*8);
300
301 for(i=0; i<3; i++){
302 read_len_table(s->len[i], &gb);
303
304 if(generate_bits_table(s->bits[i], s->len[i])<0){
305 return -1;
306 }
307 #if 0
308 for(j=0; j<256; j++){
309 printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j);
310 }
311 #endif
312 free_vlc(&s->vlc[i]);
313 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
314 }
315
316 return (get_bits_count(&gb)+7)/8;
317 }
318
319 static int read_old_huffman_tables(HYuvContext *s){
320 #if 1
321 GetBitContext gb;
322 int i;
323
324 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
325 read_len_table(s->len[0], &gb);
326 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
327 read_len_table(s->len[1], &gb);
328
329 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
330 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
331
332 if(s->bitstream_bpp >= 24){
333 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
334 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
335 }
336 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
337 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
338
339 for(i=0; i<3; i++){
340 free_vlc(&s->vlc[i]);
341 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
342 }
343
344 return 0;
345 #else
346 fprintf(stderr, "v1 huffyuv is not supported \n");
347 return -1;
348 #endif
349 }
350
351 static int common_init(AVCodecContext *avctx){
352 HYuvContext *s = avctx->priv_data;
353 int i;
354
355 s->avctx= avctx;
356 s->flags= avctx->flags;
357
358 dsputil_init(&s->dsp, avctx);
359
360 s->width= avctx->width;
361 s->height= avctx->height;
362 assert(s->width>0 && s->height>0);
363
364 for(i=0; i<3; i++){
365 s->temp[i]= av_malloc(avctx->width + 16);
366 }
367 return 0;
368 }
369
370 static int decode_init(AVCodecContext *avctx)
371 {
372 HYuvContext *s = avctx->priv_data;
373
374 common_init(avctx);
375 memset(s->vlc, 0, 3*sizeof(VLC));
376
377 avctx->coded_frame= &s->picture;
378 s->interlaced= s->height > 288;
379
380 s->bgr32=1;
381 //if(avctx->extradata)
382 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
383 if(avctx->extradata_size){
384 if((avctx->bits_per_sample&7) && avctx->bits_per_sample != 12)
385 s->version=1; // do such files exist at all?
386 else
387 s->version=2;
388 }else
389 s->version=0;
390
391 if(s->version==2){
392 int method, interlace;
393
394 method= ((uint8_t*)avctx->extradata)[0];
395 s->decorrelate= method&64 ? 1 : 0;
396 s->predictor= method&63;
397 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
398 if(s->bitstream_bpp==0)
399 s->bitstream_bpp= avctx->bits_per_sample&~7;
400 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
401 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
402 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
403
404 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
405 return -1;
406 }else{
407 switch(avctx->bits_per_sample&7){
408 case 1:
409 s->predictor= LEFT;
410 s->decorrelate= 0;
411 break;
412 case 2:
413 s->predictor= LEFT;
414 s->decorrelate= 1;
415 break;
416 case 3:
417 s->predictor= PLANE;
418 s->decorrelate= avctx->bits_per_sample >= 24;
419 break;
420 case 4:
421 s->predictor= MEDIAN;
422 s->decorrelate= 0;
423 break;
424 default:
425 s->predictor= LEFT; //OLD
426 s->decorrelate= 0;
427 break;
428 }
429 s->bitstream_bpp= avctx->bits_per_sample & ~7;
430 s->context= 0;
431
432 if(read_old_huffman_tables(s) < 0)
433 return -1;
434 }
435
436 switch(s->bitstream_bpp){
437 case 12:
438 avctx->pix_fmt = PIX_FMT_YUV420P;
439 break;
440 case 16:
441 if(s->yuy2){
442 avctx->pix_fmt = PIX_FMT_YUV422;
443 }else{
444 avctx->pix_fmt = PIX_FMT_YUV422P;
445 }
446 break;
447 case 24:
448 case 32:
449 if(s->bgr32){
450 avctx->pix_fmt = PIX_FMT_RGBA32;
451 }else{
452 avctx->pix_fmt = PIX_FMT_BGR24;
453 }
454 break;
455 default:
456 assert(0);
457 }
458
459 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_sample, s->interlaced);
460
461 return 0;
462 }
463
464 static int store_table(HYuvContext *s, uint8_t *len, uint8_t *buf){
465 int i;
466 int index= 0;
467
468 for(i=0; i<256;){
469 int val= len[i];
470 int repeat=0;
471
472 for(; i<256 && len[i]==val && repeat<255; i++)
473 repeat++;
474
475 assert(val < 32 && val >0 && repeat<256 && repeat>0);
476 if(repeat>7){
477 buf[index++]= val;
478 buf[index++]= repeat;
479 }else{
480 buf[index++]= val | (repeat<<5);
481 }
482 }
483
484 return index;
485 }
486
487 static int encode_init(AVCodecContext *avctx)
488 {
489 HYuvContext *s = avctx->priv_data;
490 int i, j;
491
492 common_init(avctx);
493
494 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
495 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
496 s->version=2;
497
498 avctx->coded_frame= &s->picture;
499
500 switch(avctx->pix_fmt){
501 case PIX_FMT_YUV420P:
502 s->bitstream_bpp= 12;
503 break;
504 case PIX_FMT_YUV422P:
505 s->bitstream_bpp= 16;
506 break;
507 default:
508 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
509 return -1;
510 }
511 avctx->bits_per_sample= s->bitstream_bpp;
512 s->decorrelate= s->bitstream_bpp >= 24;
513 s->predictor= avctx->prediction_method;
514 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
515 if(avctx->context_model==1){
516 s->context= avctx->context_model;
517 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
518 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
519 return -1;
520 }
521 }else s->context= 0;
522
523 if(avctx->codec->id==CODEC_ID_HUFFYUV){
524 if(avctx->pix_fmt==PIX_FMT_YUV420P){
525 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
526 return -1;
527 }
528 if(avctx->context_model){
529 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
530 return -1;
531 }
532 if(s->interlaced != ( s->height > 288 ))
533 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
534 }else if(avctx->strict_std_compliance>=0){
535 av_log(avctx, AV_LOG_ERROR, "This codec is under development; files encoded with it may not be decodeable with future versions!!! Set vstrict=-1 to use it anyway.\n");
536 return -1;
537 }
538
539 ((uint8_t*)avctx->extradata)[0]= s->predictor;
540 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
541 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
542 if(s->context)
543 ((uint8_t*)avctx->extradata)[2]|= 0x40;
544 ((uint8_t*)avctx->extradata)[3]= 0;
545 s->avctx->extradata_size= 4;
546
547 if(avctx->stats_in){
548 char *p= avctx->stats_in;
549
550 for(i=0; i<3; i++)
551 for(j=0; j<256; j++)
552 s->stats[i][j]= 1;
553
554 for(;;){
555 for(i=0; i<3; i++){
556 char *next;
557
558 for(j=0; j<256; j++){
559 s->stats[i][j]+= strtol(p, &next, 0);
560 if(next==p) return -1;
561 p=next;
562 }
563 }
564 if(p[0]==0 || p[1]==0 || p[2]==0) break;
565 }
566 }else{
567 for(i=0; i<3; i++)
568 for(j=0; j<256; j++){
569 int d= FFMIN(j, 256-j);
570
571 s->stats[i][j]= 100000000/(d+1);
572 }
573 }
574
575 for(i=0; i<3; i++){
576 generate_len_table(s->len[i], s->stats[i], 256);
577
578 if(generate_bits_table(s->bits[i], s->len[i])<0){
579 return -1;
580 }
581
582 s->avctx->extradata_size+=
583 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
584 }
585
586 if(s->context){
587 for(i=0; i<3; i++){
588 int pels = s->width*s->height / (i?40:10);
589 for(j=0; j<256; j++){
590 int d= FFMIN(j, 256-j);
591 s->stats[i][j]= pels/(d+1);
592 }
593 }
594 }else{
595 for(i=0; i<3; i++)
596 for(j=0; j<256; j++)
597 s->stats[i][j]= 0;
598 }
599
600 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_sample, s->interlaced);
601
602 s->picture_number=0;
603
604 return 0;
605 }
606
607 static void decode_422_bitstream(HYuvContext *s, int count){
608 int i;
609
610 count/=2;
611
612 for(i=0; i<count; i++){
613 s->temp[0][2*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
614 s->temp[1][ i ]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
615 s->temp[0][2*i+1]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
616 s->temp[2][ i ]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
617 }
618 }
619
620 static void decode_gray_bitstream(HYuvContext *s, int count){
621 int i;
622
623 count/=2;
624
625 for(i=0; i<count; i++){
626 s->temp[0][2*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
627 s->temp[0][2*i+1]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
628 }
629 }
630
631 static int encode_422_bitstream(HYuvContext *s, int count){
632 int i;
633
634 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
635 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
636 return -1;
637 }
638
639 count/=2;
640 if(s->flags&CODEC_FLAG_PASS1){
641 for(i=0; i<count; i++){
642 s->stats[0][ s->temp[0][2*i ] ]++;
643 s->stats[1][ s->temp[1][ i ] ]++;
644 s->stats[0][ s->temp[0][2*i+1] ]++;
645 s->stats[2][ s->temp[2][ i ] ]++;
646 }
647 }else if(s->context){
648 for(i=0; i<count; i++){
649 s->stats[0][ s->temp[0][2*i ] ]++;
650 put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
651 s->stats[1][ s->temp[1][ i ] ]++;
652 put_bits(&s->pb, s->len[1][ s->temp[1][ i ] ], s->bits[1][ s->temp[1][ i ] ]);
653 s->stats[0][ s->temp[0][2*i+1] ]++;
654 put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
655 s->stats[2][ s->temp[2][ i ] ]++;
656 put_bits(&s->pb, s->len[2][ s->temp[2][ i ] ], s->bits[2][ s->temp[2][ i ] ]);
657 }
658 }else{
659 for(i=0; i<count; i++){
660 put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
661 put_bits(&s->pb, s->len[1][ s->temp[1][ i ] ], s->bits[1][ s->temp[1][ i ] ]);
662 put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
663 put_bits(&s->pb, s->len[2][ s->temp[2][ i ] ], s->bits[2][ s->temp[2][ i ] ]);
664 }
665 }
666 return 0;
667 }
668
669 static int encode_gray_bitstream(HYuvContext *s, int count){
670 int i;
671
672 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
673 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
674 return -1;
675 }
676
677 count/=2;
678 if(s->flags&CODEC_FLAG_PASS1){
679 for(i=0; i<count; i++){
680 s->stats[0][ s->temp[0][2*i ] ]++;
681 s->stats[0][ s->temp[0][2*i+1] ]++;
682 }
683 }else if(s->context){
684 for(i=0; i<count; i++){
685 s->stats[0][ s->temp[0][2*i ] ]++;
686 put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
687 s->stats[0][ s->temp[0][2*i+1] ]++;
688 put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
689 }
690 }else{
691 for(i=0; i<count; i++){
692 put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
693 put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
694 }
695 }
696 return 0;
697 }
698
699 static void decode_bgr_bitstream(HYuvContext *s, int count){
700 int i;
701
702 if(s->decorrelate){
703 if(s->bitstream_bpp==24){
704 for(i=0; i<count; i++){
705 s->temp[0][4*i+G]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
706 s->temp[0][4*i+B]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
707 s->temp[0][4*i+R]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
708 }
709 }else{
710 for(i=0; i<count; i++){
711 s->temp[0][4*i+G]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
712 s->temp[0][4*i+B]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
713 s->temp[0][4*i+R]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
714 get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
715 }
716 }
717 }else{
718 if(s->bitstream_bpp==24){
719 for(i=0; i<count; i++){
720 s->temp[0][4*i+B]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
721 s->temp[0][4*i+G]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
722 s->temp[0][4*i+R]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
723 }
724 }else{
725 for(i=0; i<count; i++){
726 s->temp[0][4*i+B]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
727 s->temp[0][4*i+G]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
728 s->temp[0][4*i+R]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
729 get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
730 }
731 }
732 }
733 }
734
735 static void draw_slice(HYuvContext *s, int y){
736 int h, cy;
737 int offset[4];
738
739 if(s->avctx->draw_horiz_band==NULL)
740 return;
741
742 h= y - s->last_slice_end;
743 y -= h;
744
745 if(s->bitstream_bpp==12){
746 cy= y>>1;
747 }else{
748 cy= y;
749 }
750
751 offset[0] = s->picture.linesize[0]*y;
752 offset[1] = s->picture.linesize[1]*cy;
753 offset[2] = s->picture.linesize[2]*cy;
754 offset[3] = 0;
755 emms_c();
756
757 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
758
759 s->last_slice_end= y + h;
760 }
761
762 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size){
763 HYuvContext *s = avctx->priv_data;
764 const int width= s->width;
765 const int width2= s->width>>1;
766 const int height= s->height;
767 int fake_ystride, fake_ustride, fake_vstride;
768 AVFrame * const p= &s->picture;
769 int table_size= 0;
770
771 AVFrame *picture = data;
772
773 /* no supplementary picture */
774 if (buf_size == 0)
775 return 0;
776
777 s->bitstream_buffer= av_fast_realloc(s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
778
779 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (uint32_t*)buf, buf_size/4);
780
781 if(p->data[0])
782 avctx->release_buffer(avctx, p);
783
784 p->reference= 0;
785 if(avctx->get_buffer(avctx, p) < 0){
786 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
787 return -1;
788 }
789
790 if(s->context){
791 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
792 if(table_size < 0)
793 return -1;
794 }
795
796 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
797
798 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
799 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
800 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
801
802 s->last_slice_end= 0;
803
804 if(s->bitstream_bpp<24){
805 int y, cy;
806 int lefty, leftu, leftv;
807 int lefttopy, lefttopu, lefttopv;
808
809 if(s->yuy2){
810 p->data[0][3]= get_bits(&s->gb, 8);
811 p->data[0][2]= get_bits(&s->gb, 8);
812 p->data[0][1]= get_bits(&s->gb, 8);
813 p->data[0][0]= get_bits(&s->gb, 8);
814
815 av_log(avctx, AV_LOG_ERROR, "YUY2 output isnt implemenetd yet\n");
816 return -1;
817 }else{
818
819 leftv= p->data[2][0]= get_bits(&s->gb, 8);
820 lefty= p->data[0][1]= get_bits(&s->gb, 8);
821 leftu= p->data[1][0]= get_bits(&s->gb, 8);
822 p->data[0][0]= get_bits(&s->gb, 8);
823
824 switch(s->predictor){
825 case LEFT:
826 case PLANE:
827 decode_422_bitstream(s, width-2);
828 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
829 if(!(s->flags&CODEC_FLAG_GRAY)){
830 leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
831 leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
832 }
833
834 for(cy=y=1; y<s->height; y++,cy++){
835 uint8_t *ydst, *udst, *vdst;
836
837 if(s->bitstream_bpp==12){
838 decode_gray_bitstream(s, width);
839
840 ydst= p->data[0] + p->linesize[0]*y;
841
842 lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
843 if(s->predictor == PLANE){
844 if(y>s->interlaced)
845 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
846 }
847 y++;
848 if(y>=s->height) break;
849 }
850
851 draw_slice(s, y);
852
853 ydst= p->data[0] + p->linesize[0]*y;
854 udst= p->data[1] + p->linesize[1]*cy;
855 vdst= p->data[2] + p->linesize[2]*cy;
856
857 decode_422_bitstream(s, width);
858 lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
859 if(!(s->flags&CODEC_FLAG_GRAY)){
860 leftu= add_left_prediction(udst, s->temp[1], width2, leftu);
861 leftv= add_left_prediction(vdst, s->temp[2], width2, leftv);
862 }
863 if(s->predictor == PLANE){
864 if(cy>s->interlaced){
865 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
866 if(!(s->flags&CODEC_FLAG_GRAY)){
867 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
868 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
869 }
870 }
871 }
872 }
873 draw_slice(s, height);
874
875 break;
876 case MEDIAN:
877 /* first line except first 2 pixels is left predicted */
878 decode_422_bitstream(s, width-2);
879 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
880 if(!(s->flags&CODEC_FLAG_GRAY)){
881 leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
882 leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
883 }
884
885 cy=y=1;
886
887 /* second line is left predicted for interlaced case */
888 if(s->interlaced){
889 decode_422_bitstream(s, width);
890 lefty= add_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
891 if(!(s->flags&CODEC_FLAG_GRAY)){
892 leftu= add_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
893 leftv= add_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
894 }
895 y++; cy++;
896 }
897
898 /* next 4 pixels are left predicted too */
899 decode_422_bitstream(s, 4);
900 lefty= add_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
901 if(!(s->flags&CODEC_FLAG_GRAY)){
902 leftu= add_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
903 leftv= add_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
904 }
905
906 /* next line except the first 4 pixels is median predicted */
907 lefttopy= p->data[0][3];
908 decode_422_bitstream(s, width-4);
909 add_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
910 if(!(s->flags&CODEC_FLAG_GRAY)){
911 lefttopu= p->data[1][1];
912 lefttopv= p->data[2][1];
913 add_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
914 add_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
915 }
916 y++; cy++;
917
918 for(; y<height; y++,cy++){
919 uint8_t *ydst, *udst, *vdst;
920
921 if(s->bitstream_bpp==12){
922 while(2*cy > y){
923 decode_gray_bitstream(s, width);
924 ydst= p->data[0] + p->linesize[0]*y;
925 add_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
926 y++;
927 }
928 if(y>=height) break;
929 }
930 draw_slice(s, y);
931
932 decode_422_bitstream(s, width);
933
934 ydst= p->data[0] + p->linesize[0]*y;
935 udst= p->data[1] + p->linesize[1]*cy;
936 vdst= p->data[2] + p->linesize[2]*cy;
937
938 add_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
939 if(!(s->flags&CODEC_FLAG_GRAY)){
940 add_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
941 add_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
942 }
943 }
944
945 draw_slice(s, height);
946 break;
947 }
948 }
949 }else{
950 int y;
951 int leftr, leftg, leftb;
952 const int last_line= (height-1)*p->linesize[0];
953
954 if(s->bitstream_bpp==32){
955 skip_bits(&s->gb, 8);
956 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
957 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
958 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
959 }else{
960 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
961 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
962 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
963 skip_bits(&s->gb, 8);
964 }
965
966 if(s->bgr32){
967 switch(s->predictor){
968 case LEFT:
969 case PLANE:
970 decode_bgr_bitstream(s, width-1);
971 add_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb);
972
973 for(y=s->height-2; y>=0; y--){ //yes its stored upside down
974 decode_bgr_bitstream(s, width);
975
976 add_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb);
977 if(s->predictor == PLANE){
978 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
979 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
980 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
981 }
982 }
983 }
984 draw_slice(s, height); // just 1 large slice as this isnt possible in reverse order
985 break;
986 default:
987 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
988 }
989 }else{
990
991 av_log(avctx, AV_LOG_ERROR, "BGR24 output isnt implemenetd yet\n");
992 return -1;
993 }
994 }
995 emms_c();
996
997 *picture= *p;
998 *data_size = sizeof(AVFrame);
999
1000 return (get_bits_count(&s->gb)+31)/32*4;
1001 }
1002
1003 static int common_end(HYuvContext *s){
1004 int i;
1005
1006 for(i=0; i<3; i++){
1007 av_freep(&s->temp[i]);
1008 }
1009 return 0;
1010 }
1011
1012 static int decode_end(AVCodecContext *avctx)
1013 {
1014 HYuvContext *s = avctx->priv_data;
1015 int i;
1016
1017 common_end(s);
1018 av_freep(&s->bitstream_buffer);
1019
1020 for(i=0; i<3; i++){
1021 free_vlc(&s->vlc[i]);
1022 }
1023
1024 return 0;
1025 }
1026
1027 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1028 HYuvContext *s = avctx->priv_data;
1029 AVFrame *pict = data;
1030 const int width= s->width;
1031 const int width2= s->width>>1;
1032 const int height= s->height;
1033 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1034 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1035 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1036 AVFrame * const p= &s->picture;
1037 int i, j, size=0;
1038
1039 *p = *pict;
1040 p->pict_type= FF_I_TYPE;
1041 p->key_frame= 1;
1042
1043 if(s->context){
1044 for(i=0; i<3; i++){
1045 generate_len_table(s->len[i], s->stats[i], 256);
1046 if(generate_bits_table(s->bits[i], s->len[i])<0)
1047 return -1;
1048 size+= store_table(s, s->len[i], &buf[size]);
1049 }
1050
1051 for(i=0; i<3; i++)
1052 for(j=0; j<256; j++)
1053 s->stats[i][j] >>= 1;
1054 }
1055
1056 init_put_bits(&s->pb, buf+size, buf_size-size);
1057
1058 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1059 int lefty, leftu, leftv, y, cy;
1060
1061 put_bits(&s->pb, 8, leftv= p->data[2][0]);
1062 put_bits(&s->pb, 8, lefty= p->data[0][1]);
1063 put_bits(&s->pb, 8, leftu= p->data[1][0]);
1064 put_bits(&s->pb, 8, p->data[0][0]);
1065
1066 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+2, width-2 , lefty);
1067 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+1, width2-1, leftu);
1068 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+1, width2-1, leftv);
1069
1070 encode_422_bitstream(s, width-2);
1071
1072 if(s->predictor==MEDIAN){
1073 int lefttopy, lefttopu, lefttopv;
1074 cy=y=1;
1075 if(s->interlaced){
1076 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1077 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1078 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1079
1080 encode_422_bitstream(s, width);
1081 y++; cy++;
1082 }
1083
1084 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1085 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1086 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1087
1088 encode_422_bitstream(s, 4);
1089
1090 lefttopy= p->data[0][3];
1091 lefttopu= p->data[1][1];
1092 lefttopv= p->data[2][1];
1093 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1094 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1095 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1096 encode_422_bitstream(s, width-4);
1097 y++; cy++;
1098
1099 for(; y<height; y++,cy++){
1100 uint8_t *ydst, *udst, *vdst;
1101
1102 if(s->bitstream_bpp==12){
1103 while(2*cy > y){
1104 ydst= p->data[0] + p->linesize[0]*y;
1105 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1106 encode_gray_bitstream(s, width);
1107 y++;
1108 }
1109 if(y>=height) break;
1110 }
1111 ydst= p->data[0] + p->linesize[0]*y;
1112 udst= p->data[1] + p->linesize[1]*cy;
1113 vdst= p->data[2] + p->linesize[2]*cy;
1114
1115 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1116 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1117 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1118
1119 encode_422_bitstream(s, width);
1120 }
1121 }else{
1122 for(cy=y=1; y<height; y++,cy++){
1123 uint8_t *ydst, *udst, *vdst;
1124
1125 /* encode a luma only line & y++ */
1126 if(s->bitstream_bpp==12){
1127 ydst= p->data[0] + p->linesize[0]*y;
1128
1129 if(s->predictor == PLANE && s->interlaced < y){
1130 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1131
1132 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1133 }else{
1134 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1135 }
1136 encode_gray_bitstream(s, width);
1137 y++;
1138 if(y>=height) break;
1139 }
1140
1141 ydst= p->data[0] + p->linesize[0]*y;
1142 udst= p->data[1] + p->linesize[1]*cy;
1143 vdst= p->data[2] + p->linesize[2]*cy;
1144
1145 if(s->predictor == PLANE && s->interlaced < cy){
1146 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1147 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1148 s->dsp.diff_bytes(s->temp[2] + 1250, vdst, vdst - fake_vstride, width2);
1149
1150 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1151 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1152 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + 1250, width2, leftv);
1153 }else{
1154 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1155 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1156 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1157 }
1158
1159 encode_422_bitstream(s, width);
1160 }
1161 }
1162 }else{
1163 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1164 }
1165 emms_c();
1166
1167 size+= (put_bits_count(&s->pb)+31)/8;
1168 size/= 4;
1169
1170 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1171 int j;
1172 char *p= avctx->stats_out;
1173 for(i=0; i<3; i++){
1174 for(j=0; j<256; j++){
1175 sprintf(p, "%llu ", s->stats[i][j]);
1176 p+= strlen(p);
1177 s->stats[i][j]= 0;
1178 }
1179 sprintf(p, "\n");
1180 p++;
1181 }
1182 }else{
1183 flush_put_bits(&s->pb);
1184 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1185 avctx->stats_out[0] = '\0';
1186 }
1187
1188 s->picture_number++;
1189
1190 return size*4;
1191 }
1192
1193 static int encode_end(AVCodecContext *avctx)
1194 {
1195 HYuvContext *s = avctx->priv_data;
1196
1197 common_end(s);
1198
1199 av_freep(&avctx->extradata);
1200 av_freep(&avctx->stats_out);
1201
1202 return 0;
1203 }
1204
1205 static const AVOption huffyuv_options[] =
1206 {
1207 AVOPTION_CODEC_INT("prediction_method", "prediction_method", prediction_method, 0, 2, 0),
1208 AVOPTION_END()
1209 };
1210
1211 static const AVOption ffvhuff_options[] =
1212 {
1213 AVOPTION_CODEC_INT("prediction_method", "prediction_method", prediction_method, 0, 2, 0),
1214 AVOPTION_CODEC_INT("context_model", "context_model", context_model, 0, 2, 0),
1215 AVOPTION_END()
1216 };
1217
1218
1219 AVCodec huffyuv_decoder = {
1220 "huffyuv",
1221 CODEC_TYPE_VIDEO,
1222 CODEC_ID_HUFFYUV,
1223 sizeof(HYuvContext),
1224 decode_init,
1225 NULL,
1226 decode_end,
1227 decode_frame,
1228 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1229 NULL
1230 };
1231
1232 AVCodec ffvhuff_decoder = {
1233 "ffvhuff",
1234 CODEC_TYPE_VIDEO,
1235 CODEC_ID_FFVHUFF,
1236 sizeof(HYuvContext),
1237 decode_init,
1238 NULL,
1239 decode_end,
1240 decode_frame,
1241 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1242 NULL
1243 };
1244
1245 #ifdef CONFIG_ENCODERS
1246
1247 AVCodec huffyuv_encoder = {
1248 "huffyuv",
1249 CODEC_TYPE_VIDEO,
1250 CODEC_ID_HUFFYUV,
1251 sizeof(HYuvContext),
1252 encode_init,
1253 encode_frame,
1254 encode_end,
1255 .options = huffyuv_options,
1256 };
1257
1258 AVCodec ffvhuff_encoder = {
1259 "ffvhuff",
1260 CODEC_TYPE_VIDEO,
1261 CODEC_ID_FFVHUFF,
1262 sizeof(HYuvContext),
1263 encode_init,
1264 encode_frame,
1265 encode_end,
1266 .options = ffvhuff_options,
1267 };
1268
1269 #endif //CONFIG_ENCODERS