3b56bb3869918d6fdc8bc4f69ff2c134fd6049cd
[libav.git] / libavcodec / huffyuv.c
1 /*
2 * huffyuv codec for libavcodec
3 *
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
7 * the algorithm used
8 *
9 * This file is part of FFmpeg.
10 *
11 * FFmpeg is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
15 *
16 * FFmpeg is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with FFmpeg; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26 /**
27 * @file libavcodec/huffyuv.c
28 * huffyuv codec for libavcodec.
29 */
30
31 #include "avcodec.h"
32 #include "get_bits.h"
33 #include "put_bits.h"
34 #include "dsputil.h"
35
36 #define VLC_BITS 11
37
38 #if HAVE_BIGENDIAN
39 #define B 3
40 #define G 2
41 #define R 1
42 #define A 0
43 #else
44 #define B 0
45 #define G 1
46 #define R 2
47 #define A 3
48 #endif
49
50 typedef enum Predictor{
51 LEFT= 0,
52 PLANE,
53 MEDIAN,
54 } Predictor;
55
56 typedef struct HYuvContext{
57 AVCodecContext *avctx;
58 Predictor predictor;
59 GetBitContext gb;
60 PutBitContext pb;
61 int interlaced;
62 int decorrelate;
63 int bitstream_bpp;
64 int version;
65 int yuy2; //use yuy2 instead of 422P
66 int bgr32; //use bgr32 instead of bgr24
67 int width, height;
68 int flags;
69 int context;
70 int picture_number;
71 int last_slice_end;
72 uint8_t *temp[3];
73 uint64_t stats[3][256];
74 uint8_t len[3][256];
75 uint32_t bits[3][256];
76 uint32_t pix_bgr_map[1<<VLC_BITS];
77 VLC vlc[6]; //Y,U,V,YY,YU,YV
78 AVFrame picture;
79 uint8_t *bitstream_buffer;
80 unsigned int bitstream_buffer_size;
81 DSPContext dsp;
82 }HYuvContext;
83
84 static const unsigned char classic_shift_luma[] = {
85 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
86 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
87 69,68, 0
88 };
89
90 static const unsigned char classic_shift_chroma[] = {
91 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
92 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
93 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
94 };
95
96 static const unsigned char classic_add_luma[256] = {
97 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
98 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
99 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
100 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
101 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
102 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
103 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
104 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
105 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
106 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
107 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
108 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
109 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
110 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
111 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
112 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
113 };
114
115 static const unsigned char classic_add_chroma[256] = {
116 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
117 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
118 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
119 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
120 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
121 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
122 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
123 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
124 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
125 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
126 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
127 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
128 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
129 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
130 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
131 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
132 };
133
134 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
135 int i;
136 if(w<32){
137 for(i=0; i<w; i++){
138 const int temp= src[i];
139 dst[i]= temp - left;
140 left= temp;
141 }
142 return left;
143 }else{
144 for(i=0; i<16; i++){
145 const int temp= src[i];
146 dst[i]= temp - left;
147 left= temp;
148 }
149 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
150 return src[w-1];
151 }
152 }
153
154 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
155 int i;
156 int r,g,b;
157 r= *red;
158 g= *green;
159 b= *blue;
160 for(i=0; i<FFMIN(w,4); i++){
161 const int rt= src[i*4+R];
162 const int gt= src[i*4+G];
163 const int bt= src[i*4+B];
164 dst[i*4+R]= rt - r;
165 dst[i*4+G]= gt - g;
166 dst[i*4+B]= bt - b;
167 r = rt;
168 g = gt;
169 b = bt;
170 }
171 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
172 *red= src[(w-1)*4+R];
173 *green= src[(w-1)*4+G];
174 *blue= src[(w-1)*4+B];
175 }
176
177 static int read_len_table(uint8_t *dst, GetBitContext *gb){
178 int i, val, repeat;
179
180 for(i=0; i<256;){
181 repeat= get_bits(gb, 3);
182 val = get_bits(gb, 5);
183 if(repeat==0)
184 repeat= get_bits(gb, 8);
185 //printf("%d %d\n", val, repeat);
186 if(i+repeat > 256) {
187 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
188 return -1;
189 }
190 while (repeat--)
191 dst[i++] = val;
192 }
193 return 0;
194 }
195
196 static int generate_bits_table(uint32_t *dst, uint8_t *len_table){
197 int len, index;
198 uint32_t bits=0;
199
200 for(len=32; len>0; len--){
201 for(index=0; index<256; index++){
202 if(len_table[index]==len)
203 dst[index]= bits++;
204 }
205 if(bits & 1){
206 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
207 return -1;
208 }
209 bits >>= 1;
210 }
211 return 0;
212 }
213
214 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
215 typedef struct {
216 uint64_t val;
217 int name;
218 } HeapElem;
219
220 static void heap_sift(HeapElem *h, int root, int size)
221 {
222 while(root*2+1 < size) {
223 int child = root*2+1;
224 if(child < size-1 && h[child].val > h[child+1].val)
225 child++;
226 if(h[root].val > h[child].val) {
227 FFSWAP(HeapElem, h[root], h[child]);
228 root = child;
229 } else
230 break;
231 }
232 }
233
234 static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
235 HeapElem h[size];
236 int up[2*size];
237 int len[2*size];
238 int offset, i, next;
239
240 for(offset=1; ; offset<<=1){
241 for(i=0; i<size; i++){
242 h[i].name = i;
243 h[i].val = (stats[i] << 8) + offset;
244 }
245 for(i=size/2-1; i>=0; i--)
246 heap_sift(h, i, size);
247
248 for(next=size; next<size*2-1; next++){
249 // merge the two smallest entries, and put it back in the heap
250 uint64_t min1v = h[0].val;
251 up[h[0].name] = next;
252 h[0].val = INT64_MAX;
253 heap_sift(h, 0, size);
254 up[h[0].name] = next;
255 h[0].name = next;
256 h[0].val += min1v;
257 heap_sift(h, 0, size);
258 }
259
260 len[2*size-2] = 0;
261 for(i=2*size-3; i>=size; i--)
262 len[i] = len[up[i]] + 1;
263 for(i=0; i<size; i++) {
264 dst[i] = len[up[i]] + 1;
265 if(dst[i] >= 32) break;
266 }
267 if(i==size) break;
268 }
269 }
270 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
271
272 static void generate_joint_tables(HYuvContext *s){
273 uint16_t symbols[1<<VLC_BITS];
274 uint16_t bits[1<<VLC_BITS];
275 uint8_t len[1<<VLC_BITS];
276 if(s->bitstream_bpp < 24){
277 int p, i, y, u;
278 for(p=0; p<3; p++){
279 for(i=y=0; y<256; y++){
280 int len0 = s->len[0][y];
281 int limit = VLC_BITS - len0;
282 if(limit <= 0)
283 continue;
284 for(u=0; u<256; u++){
285 int len1 = s->len[p][u];
286 if(len1 > limit)
287 continue;
288 len[i] = len0 + len1;
289 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
290 symbols[i] = (y<<8) + u;
291 if(symbols[i] != 0xffff) // reserved to mean "invalid"
292 i++;
293 }
294 }
295 free_vlc(&s->vlc[3+p]);
296 init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
297 }
298 }else{
299 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
300 int i, b, g, r, code;
301 int p0 = s->decorrelate;
302 int p1 = !s->decorrelate;
303 // restrict the range to +/-16 becaues that's pretty much guaranteed to
304 // cover all the combinations that fit in 11 bits total, and it doesn't
305 // matter if we miss a few rare codes.
306 for(i=0, g=-16; g<16; g++){
307 int len0 = s->len[p0][g&255];
308 int limit0 = VLC_BITS - len0;
309 if(limit0 < 2)
310 continue;
311 for(b=-16; b<16; b++){
312 int len1 = s->len[p1][b&255];
313 int limit1 = limit0 - len1;
314 if(limit1 < 1)
315 continue;
316 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
317 for(r=-16; r<16; r++){
318 int len2 = s->len[2][r&255];
319 if(len2 > limit1)
320 continue;
321 len[i] = len0 + len1 + len2;
322 bits[i] = (code << len2) + s->bits[2][r&255];
323 if(s->decorrelate){
324 map[i][G] = g;
325 map[i][B] = g+b;
326 map[i][R] = g+r;
327 }else{
328 map[i][B] = g;
329 map[i][G] = b;
330 map[i][R] = r;
331 }
332 i++;
333 }
334 }
335 }
336 free_vlc(&s->vlc[3]);
337 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
338 }
339 }
340
341 static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
342 GetBitContext gb;
343 int i;
344
345 init_get_bits(&gb, src, length*8);
346
347 for(i=0; i<3; i++){
348 if(read_len_table(s->len[i], &gb)<0)
349 return -1;
350 if(generate_bits_table(s->bits[i], s->len[i])<0){
351 return -1;
352 }
353 #if 0
354 for(j=0; j<256; j++){
355 printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j);
356 }
357 #endif
358 free_vlc(&s->vlc[i]);
359 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
360 }
361
362 generate_joint_tables(s);
363
364 return (get_bits_count(&gb)+7)/8;
365 }
366
367 static int read_old_huffman_tables(HYuvContext *s){
368 #if 1
369 GetBitContext gb;
370 int i;
371
372 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
373 if(read_len_table(s->len[0], &gb)<0)
374 return -1;
375 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
376 if(read_len_table(s->len[1], &gb)<0)
377 return -1;
378
379 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
380 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
381
382 if(s->bitstream_bpp >= 24){
383 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
384 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
385 }
386 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
387 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
388
389 for(i=0; i<3; i++){
390 free_vlc(&s->vlc[i]);
391 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
392 }
393
394 generate_joint_tables(s);
395
396 return 0;
397 #else
398 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
399 return -1;
400 #endif
401 }
402
403 static av_cold void alloc_temp(HYuvContext *s){
404 int i;
405
406 if(s->bitstream_bpp<24){
407 for(i=0; i<3; i++){
408 s->temp[i]= av_malloc(s->width + 16);
409 }
410 }else{
411 s->temp[0]= av_mallocz(4*s->width + 16);
412 }
413 }
414
415 static av_cold int common_init(AVCodecContext *avctx){
416 HYuvContext *s = avctx->priv_data;
417
418 s->avctx= avctx;
419 s->flags= avctx->flags;
420
421 dsputil_init(&s->dsp, avctx);
422
423 s->width= avctx->width;
424 s->height= avctx->height;
425 assert(s->width>0 && s->height>0);
426
427 return 0;
428 }
429
430 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
431 static av_cold int decode_init(AVCodecContext *avctx)
432 {
433 HYuvContext *s = avctx->priv_data;
434
435 common_init(avctx);
436 memset(s->vlc, 0, 3*sizeof(VLC));
437
438 avctx->coded_frame= &s->picture;
439 s->interlaced= s->height > 288;
440
441 s->bgr32=1;
442 //if(avctx->extradata)
443 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
444 if(avctx->extradata_size){
445 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
446 s->version=1; // do such files exist at all?
447 else
448 s->version=2;
449 }else
450 s->version=0;
451
452 if(s->version==2){
453 int method, interlace;
454
455 method= ((uint8_t*)avctx->extradata)[0];
456 s->decorrelate= method&64 ? 1 : 0;
457 s->predictor= method&63;
458 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
459 if(s->bitstream_bpp==0)
460 s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
461 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
462 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
463 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
464
465 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
466 return -1;
467 }else{
468 switch(avctx->bits_per_coded_sample&7){
469 case 1:
470 s->predictor= LEFT;
471 s->decorrelate= 0;
472 break;
473 case 2:
474 s->predictor= LEFT;
475 s->decorrelate= 1;
476 break;
477 case 3:
478 s->predictor= PLANE;
479 s->decorrelate= avctx->bits_per_coded_sample >= 24;
480 break;
481 case 4:
482 s->predictor= MEDIAN;
483 s->decorrelate= 0;
484 break;
485 default:
486 s->predictor= LEFT; //OLD
487 s->decorrelate= 0;
488 break;
489 }
490 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
491 s->context= 0;
492
493 if(read_old_huffman_tables(s) < 0)
494 return -1;
495 }
496
497 switch(s->bitstream_bpp){
498 case 12:
499 avctx->pix_fmt = PIX_FMT_YUV420P;
500 break;
501 case 16:
502 if(s->yuy2){
503 avctx->pix_fmt = PIX_FMT_YUYV422;
504 }else{
505 avctx->pix_fmt = PIX_FMT_YUV422P;
506 }
507 break;
508 case 24:
509 case 32:
510 if(s->bgr32){
511 avctx->pix_fmt = PIX_FMT_RGB32;
512 }else{
513 avctx->pix_fmt = PIX_FMT_BGR24;
514 }
515 break;
516 default:
517 assert(0);
518 }
519
520 alloc_temp(s);
521
522 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
523
524 return 0;
525 }
526 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
527
528 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
529 static int store_table(HYuvContext *s, uint8_t *len, uint8_t *buf){
530 int i;
531 int index= 0;
532
533 for(i=0; i<256;){
534 int val= len[i];
535 int repeat=0;
536
537 for(; i<256 && len[i]==val && repeat<255; i++)
538 repeat++;
539
540 assert(val < 32 && val >0 && repeat<256 && repeat>0);
541 if(repeat>7){
542 buf[index++]= val;
543 buf[index++]= repeat;
544 }else{
545 buf[index++]= val | (repeat<<5);
546 }
547 }
548
549 return index;
550 }
551
552 static av_cold int encode_init(AVCodecContext *avctx)
553 {
554 HYuvContext *s = avctx->priv_data;
555 int i, j;
556
557 common_init(avctx);
558
559 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
560 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
561 s->version=2;
562
563 avctx->coded_frame= &s->picture;
564
565 switch(avctx->pix_fmt){
566 case PIX_FMT_YUV420P:
567 s->bitstream_bpp= 12;
568 break;
569 case PIX_FMT_YUV422P:
570 s->bitstream_bpp= 16;
571 break;
572 case PIX_FMT_RGB32:
573 s->bitstream_bpp= 24;
574 break;
575 default:
576 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
577 return -1;
578 }
579 avctx->bits_per_coded_sample= s->bitstream_bpp;
580 s->decorrelate= s->bitstream_bpp >= 24;
581 s->predictor= avctx->prediction_method;
582 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
583 if(avctx->context_model==1){
584 s->context= avctx->context_model;
585 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
586 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
587 return -1;
588 }
589 }else s->context= 0;
590
591 if(avctx->codec->id==CODEC_ID_HUFFYUV){
592 if(avctx->pix_fmt==PIX_FMT_YUV420P){
593 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
594 return -1;
595 }
596 if(avctx->context_model){
597 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
598 return -1;
599 }
600 if(s->interlaced != ( s->height > 288 ))
601 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
602 }
603
604 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
605 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
606 return -1;
607 }
608
609 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
610 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
611 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
612 if(s->context)
613 ((uint8_t*)avctx->extradata)[2]|= 0x40;
614 ((uint8_t*)avctx->extradata)[3]= 0;
615 s->avctx->extradata_size= 4;
616
617 if(avctx->stats_in){
618 char *p= avctx->stats_in;
619
620 for(i=0; i<3; i++)
621 for(j=0; j<256; j++)
622 s->stats[i][j]= 1;
623
624 for(;;){
625 for(i=0; i<3; i++){
626 char *next;
627
628 for(j=0; j<256; j++){
629 s->stats[i][j]+= strtol(p, &next, 0);
630 if(next==p) return -1;
631 p=next;
632 }
633 }
634 if(p[0]==0 || p[1]==0 || p[2]==0) break;
635 }
636 }else{
637 for(i=0; i<3; i++)
638 for(j=0; j<256; j++){
639 int d= FFMIN(j, 256-j);
640
641 s->stats[i][j]= 100000000/(d+1);
642 }
643 }
644
645 for(i=0; i<3; i++){
646 generate_len_table(s->len[i], s->stats[i], 256);
647
648 if(generate_bits_table(s->bits[i], s->len[i])<0){
649 return -1;
650 }
651
652 s->avctx->extradata_size+=
653 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
654 }
655
656 if(s->context){
657 for(i=0; i<3; i++){
658 int pels = s->width*s->height / (i?40:10);
659 for(j=0; j<256; j++){
660 int d= FFMIN(j, 256-j);
661 s->stats[i][j]= pels/(d+1);
662 }
663 }
664 }else{
665 for(i=0; i<3; i++)
666 for(j=0; j<256; j++)
667 s->stats[i][j]= 0;
668 }
669
670 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
671
672 alloc_temp(s);
673
674 s->picture_number=0;
675
676 return 0;
677 }
678 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
679
680 /* TODO instead of restarting the read when the code isn't in the first level
681 * of the joint table, jump into the 2nd level of the individual table. */
682 #define READ_2PIX(dst0, dst1, plane1){\
683 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
684 if(code != 0xffff){\
685 dst0 = code>>8;\
686 dst1 = code;\
687 }else{\
688 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
689 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
690 }\
691 }
692
693 static void decode_422_bitstream(HYuvContext *s, int count){
694 int i;
695
696 count/=2;
697
698 if(count >= (get_bits_left(&s->gb))/(31*4)){
699 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
700 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
701 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
702 }
703 }else{
704 for(i=0; i<count; i++){
705 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
706 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
707 }
708 }
709 }
710
711 static void decode_gray_bitstream(HYuvContext *s, int count){
712 int i;
713
714 count/=2;
715
716 if(count >= (get_bits_left(&s->gb))/(31*2)){
717 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
718 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
719 }
720 }else{
721 for(i=0; i<count; i++){
722 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
723 }
724 }
725 }
726
727 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
728 static int encode_422_bitstream(HYuvContext *s, int offset, int count){
729 int i;
730 const uint8_t *y = s->temp[0] + offset;
731 const uint8_t *u = s->temp[1] + offset/2;
732 const uint8_t *v = s->temp[2] + offset/2;
733
734 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
735 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
736 return -1;
737 }
738
739 #define LOAD4\
740 int y0 = y[2*i];\
741 int y1 = y[2*i+1];\
742 int u0 = u[i];\
743 int v0 = v[i];
744
745 count/=2;
746 if(s->flags&CODEC_FLAG_PASS1){
747 for(i=0; i<count; i++){
748 LOAD4;
749 s->stats[0][y0]++;
750 s->stats[1][u0]++;
751 s->stats[0][y1]++;
752 s->stats[2][v0]++;
753 }
754 }
755 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
756 return 0;
757 if(s->context){
758 for(i=0; i<count; i++){
759 LOAD4;
760 s->stats[0][y0]++;
761 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
762 s->stats[1][u0]++;
763 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
764 s->stats[0][y1]++;
765 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
766 s->stats[2][v0]++;
767 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
768 }
769 }else{
770 for(i=0; i<count; i++){
771 LOAD4;
772 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
773 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
774 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
775 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
776 }
777 }
778 return 0;
779 }
780
781 static int encode_gray_bitstream(HYuvContext *s, int count){
782 int i;
783
784 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
785 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
786 return -1;
787 }
788
789 #define LOAD2\
790 int y0 = s->temp[0][2*i];\
791 int y1 = s->temp[0][2*i+1];
792 #define STAT2\
793 s->stats[0][y0]++;\
794 s->stats[0][y1]++;
795 #define WRITE2\
796 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
797 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
798
799 count/=2;
800 if(s->flags&CODEC_FLAG_PASS1){
801 for(i=0; i<count; i++){
802 LOAD2;
803 STAT2;
804 }
805 }
806 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
807 return 0;
808
809 if(s->context){
810 for(i=0; i<count; i++){
811 LOAD2;
812 STAT2;
813 WRITE2;
814 }
815 }else{
816 for(i=0; i<count; i++){
817 LOAD2;
818 WRITE2;
819 }
820 }
821 return 0;
822 }
823 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
824
825 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
826 int i;
827 for(i=0; i<count; i++){
828 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
829 if(code != -1){
830 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
831 }else if(decorrelate){
832 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
833 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
834 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
835 }else{
836 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
837 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
838 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
839 }
840 if(alpha)
841 s->temp[0][4*i+A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
842 }
843 }
844
845 static void decode_bgr_bitstream(HYuvContext *s, int count){
846 if(s->decorrelate){
847 if(s->bitstream_bpp==24)
848 decode_bgr_1(s, count, 1, 0);
849 else
850 decode_bgr_1(s, count, 1, 1);
851 }else{
852 if(s->bitstream_bpp==24)
853 decode_bgr_1(s, count, 0, 0);
854 else
855 decode_bgr_1(s, count, 0, 1);
856 }
857 }
858
859 static int encode_bgr_bitstream(HYuvContext *s, int count){
860 int i;
861
862 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
863 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
864 return -1;
865 }
866
867 #define LOAD3\
868 int g= s->temp[0][4*i+G];\
869 int b= (s->temp[0][4*i+B] - g) & 0xff;\
870 int r= (s->temp[0][4*i+R] - g) & 0xff;
871 #define STAT3\
872 s->stats[0][b]++;\
873 s->stats[1][g]++;\
874 s->stats[2][r]++;
875 #define WRITE3\
876 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
877 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
878 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
879
880 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
881 for(i=0; i<count; i++){
882 LOAD3;
883 STAT3;
884 }
885 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
886 for(i=0; i<count; i++){
887 LOAD3;
888 STAT3;
889 WRITE3;
890 }
891 }else{
892 for(i=0; i<count; i++){
893 LOAD3;
894 WRITE3;
895 }
896 }
897 return 0;
898 }
899
900 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
901 static void draw_slice(HYuvContext *s, int y){
902 int h, cy;
903 int offset[4];
904
905 if(s->avctx->draw_horiz_band==NULL)
906 return;
907
908 h= y - s->last_slice_end;
909 y -= h;
910
911 if(s->bitstream_bpp==12){
912 cy= y>>1;
913 }else{
914 cy= y;
915 }
916
917 offset[0] = s->picture.linesize[0]*y;
918 offset[1] = s->picture.linesize[1]*cy;
919 offset[2] = s->picture.linesize[2]*cy;
920 offset[3] = 0;
921 emms_c();
922
923 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
924
925 s->last_slice_end= y + h;
926 }
927
928 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
929 const uint8_t *buf = avpkt->data;
930 int buf_size = avpkt->size;
931 HYuvContext *s = avctx->priv_data;
932 const int width= s->width;
933 const int width2= s->width>>1;
934 const int height= s->height;
935 int fake_ystride, fake_ustride, fake_vstride;
936 AVFrame * const p= &s->picture;
937 int table_size= 0;
938
939 AVFrame *picture = data;
940
941 av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
942 if (!s->bitstream_buffer)
943 return AVERROR(ENOMEM);
944
945 memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
946 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
947
948 if(p->data[0])
949 avctx->release_buffer(avctx, p);
950
951 p->reference= 0;
952 if(avctx->get_buffer(avctx, p) < 0){
953 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
954 return -1;
955 }
956
957 if(s->context){
958 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
959 if(table_size < 0)
960 return -1;
961 }
962
963 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
964 return -1;
965
966 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
967
968 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
969 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
970 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
971
972 s->last_slice_end= 0;
973
974 if(s->bitstream_bpp<24){
975 int y, cy;
976 int lefty, leftu, leftv;
977 int lefttopy, lefttopu, lefttopv;
978
979 if(s->yuy2){
980 p->data[0][3]= get_bits(&s->gb, 8);
981 p->data[0][2]= get_bits(&s->gb, 8);
982 p->data[0][1]= get_bits(&s->gb, 8);
983 p->data[0][0]= get_bits(&s->gb, 8);
984
985 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
986 return -1;
987 }else{
988
989 leftv= p->data[2][0]= get_bits(&s->gb, 8);
990 lefty= p->data[0][1]= get_bits(&s->gb, 8);
991 leftu= p->data[1][0]= get_bits(&s->gb, 8);
992 p->data[0][0]= get_bits(&s->gb, 8);
993
994 switch(s->predictor){
995 case LEFT:
996 case PLANE:
997 decode_422_bitstream(s, width-2);
998 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
999 if(!(s->flags&CODEC_FLAG_GRAY)){
1000 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1001 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1002 }
1003
1004 for(cy=y=1; y<s->height; y++,cy++){
1005 uint8_t *ydst, *udst, *vdst;
1006
1007 if(s->bitstream_bpp==12){
1008 decode_gray_bitstream(s, width);
1009
1010 ydst= p->data[0] + p->linesize[0]*y;
1011
1012 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1013 if(s->predictor == PLANE){
1014 if(y>s->interlaced)
1015 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1016 }
1017 y++;
1018 if(y>=s->height) break;
1019 }
1020
1021 draw_slice(s, y);
1022
1023 ydst= p->data[0] + p->linesize[0]*y;
1024 udst= p->data[1] + p->linesize[1]*cy;
1025 vdst= p->data[2] + p->linesize[2]*cy;
1026
1027 decode_422_bitstream(s, width);
1028 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1029 if(!(s->flags&CODEC_FLAG_GRAY)){
1030 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
1031 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
1032 }
1033 if(s->predictor == PLANE){
1034 if(cy>s->interlaced){
1035 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1036 if(!(s->flags&CODEC_FLAG_GRAY)){
1037 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1038 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1039 }
1040 }
1041 }
1042 }
1043 draw_slice(s, height);
1044
1045 break;
1046 case MEDIAN:
1047 /* first line except first 2 pixels is left predicted */
1048 decode_422_bitstream(s, width-2);
1049 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1050 if(!(s->flags&CODEC_FLAG_GRAY)){
1051 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1052 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1053 }
1054
1055 cy=y=1;
1056
1057 /* second line is left predicted for interlaced case */
1058 if(s->interlaced){
1059 decode_422_bitstream(s, width);
1060 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1061 if(!(s->flags&CODEC_FLAG_GRAY)){
1062 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1063 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1064 }
1065 y++; cy++;
1066 }
1067
1068 /* next 4 pixels are left predicted too */
1069 decode_422_bitstream(s, 4);
1070 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1071 if(!(s->flags&CODEC_FLAG_GRAY)){
1072 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1073 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1074 }
1075
1076 /* next line except the first 4 pixels is median predicted */
1077 lefttopy= p->data[0][3];
1078 decode_422_bitstream(s, width-4);
1079 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1080 if(!(s->flags&CODEC_FLAG_GRAY)){
1081 lefttopu= p->data[1][1];
1082 lefttopv= p->data[2][1];
1083 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1084 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1085 }
1086 y++; cy++;
1087
1088 for(; y<height; y++,cy++){
1089 uint8_t *ydst, *udst, *vdst;
1090
1091 if(s->bitstream_bpp==12){
1092 while(2*cy > y){
1093 decode_gray_bitstream(s, width);
1094 ydst= p->data[0] + p->linesize[0]*y;
1095 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1096 y++;
1097 }
1098 if(y>=height) break;
1099 }
1100 draw_slice(s, y);
1101
1102 decode_422_bitstream(s, width);
1103
1104 ydst= p->data[0] + p->linesize[0]*y;
1105 udst= p->data[1] + p->linesize[1]*cy;
1106 vdst= p->data[2] + p->linesize[2]*cy;
1107
1108 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1109 if(!(s->flags&CODEC_FLAG_GRAY)){
1110 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1111 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1112 }
1113 }
1114
1115 draw_slice(s, height);
1116 break;
1117 }
1118 }
1119 }else{
1120 int y;
1121 int leftr, leftg, leftb, lefta;
1122 const int last_line= (height-1)*p->linesize[0];
1123
1124 if(s->bitstream_bpp==32){
1125 lefta= p->data[0][last_line+A]= get_bits(&s->gb, 8);
1126 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1127 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1128 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1129 }else{
1130 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1131 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1132 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1133 lefta= p->data[0][last_line+A]= 255;
1134 skip_bits(&s->gb, 8);
1135 }
1136
1137 if(s->bgr32){
1138 switch(s->predictor){
1139 case LEFT:
1140 case PLANE:
1141 decode_bgr_bitstream(s, width-1);
1142 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb, &lefta);
1143
1144 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1145 decode_bgr_bitstream(s, width);
1146
1147 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
1148 if(s->predictor == PLANE){
1149 if(s->bitstream_bpp!=32) lefta=0;
1150 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1151 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1152 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1153 }
1154 }
1155 }
1156 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1157 break;
1158 default:
1159 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1160 }
1161 }else{
1162
1163 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1164 return -1;
1165 }
1166 }
1167 emms_c();
1168
1169 *picture= *p;
1170 *data_size = sizeof(AVFrame);
1171
1172 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1173 }
1174 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1175
1176 static int common_end(HYuvContext *s){
1177 int i;
1178
1179 for(i=0; i<3; i++){
1180 av_freep(&s->temp[i]);
1181 }
1182 return 0;
1183 }
1184
1185 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1186 static av_cold int decode_end(AVCodecContext *avctx)
1187 {
1188 HYuvContext *s = avctx->priv_data;
1189 int i;
1190
1191 if (s->picture.data[0])
1192 avctx->release_buffer(avctx, &s->picture);
1193
1194 common_end(s);
1195 av_freep(&s->bitstream_buffer);
1196
1197 for(i=0; i<6; i++){
1198 free_vlc(&s->vlc[i]);
1199 }
1200
1201 return 0;
1202 }
1203 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1204
1205 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1206 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1207 HYuvContext *s = avctx->priv_data;
1208 AVFrame *pict = data;
1209 const int width= s->width;
1210 const int width2= s->width>>1;
1211 const int height= s->height;
1212 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1213 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1214 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1215 AVFrame * const p= &s->picture;
1216 int i, j, size=0;
1217
1218 *p = *pict;
1219 p->pict_type= FF_I_TYPE;
1220 p->key_frame= 1;
1221
1222 if(s->context){
1223 for(i=0; i<3; i++){
1224 generate_len_table(s->len[i], s->stats[i], 256);
1225 if(generate_bits_table(s->bits[i], s->len[i])<0)
1226 return -1;
1227 size+= store_table(s, s->len[i], &buf[size]);
1228 }
1229
1230 for(i=0; i<3; i++)
1231 for(j=0; j<256; j++)
1232 s->stats[i][j] >>= 1;
1233 }
1234
1235 init_put_bits(&s->pb, buf+size, buf_size-size);
1236
1237 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1238 int lefty, leftu, leftv, y, cy;
1239
1240 put_bits(&s->pb, 8, leftv= p->data[2][0]);
1241 put_bits(&s->pb, 8, lefty= p->data[0][1]);
1242 put_bits(&s->pb, 8, leftu= p->data[1][0]);
1243 put_bits(&s->pb, 8, p->data[0][0]);
1244
1245 lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
1246 leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
1247 leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
1248
1249 encode_422_bitstream(s, 2, width-2);
1250
1251 if(s->predictor==MEDIAN){
1252 int lefttopy, lefttopu, lefttopv;
1253 cy=y=1;
1254 if(s->interlaced){
1255 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1256 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1257 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1258
1259 encode_422_bitstream(s, 0, width);
1260 y++; cy++;
1261 }
1262
1263 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1264 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1265 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1266
1267 encode_422_bitstream(s, 0, 4);
1268
1269 lefttopy= p->data[0][3];
1270 lefttopu= p->data[1][1];
1271 lefttopv= p->data[2][1];
1272 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1273 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1274 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1275 encode_422_bitstream(s, 0, width-4);
1276 y++; cy++;
1277
1278 for(; y<height; y++,cy++){
1279 uint8_t *ydst, *udst, *vdst;
1280
1281 if(s->bitstream_bpp==12){
1282 while(2*cy > y){
1283 ydst= p->data[0] + p->linesize[0]*y;
1284 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1285 encode_gray_bitstream(s, width);
1286 y++;
1287 }
1288 if(y>=height) break;
1289 }
1290 ydst= p->data[0] + p->linesize[0]*y;
1291 udst= p->data[1] + p->linesize[1]*cy;
1292 vdst= p->data[2] + p->linesize[2]*cy;
1293
1294 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1295 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1296 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1297
1298 encode_422_bitstream(s, 0, width);
1299 }
1300 }else{
1301 for(cy=y=1; y<height; y++,cy++){
1302 uint8_t *ydst, *udst, *vdst;
1303
1304 /* encode a luma only line & y++ */
1305 if(s->bitstream_bpp==12){
1306 ydst= p->data[0] + p->linesize[0]*y;
1307
1308 if(s->predictor == PLANE && s->interlaced < y){
1309 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1310
1311 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1312 }else{
1313 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1314 }
1315 encode_gray_bitstream(s, width);
1316 y++;
1317 if(y>=height) break;
1318 }
1319
1320 ydst= p->data[0] + p->linesize[0]*y;
1321 udst= p->data[1] + p->linesize[1]*cy;
1322 vdst= p->data[2] + p->linesize[2]*cy;
1323
1324 if(s->predictor == PLANE && s->interlaced < cy){
1325 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1326 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1327 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1328
1329 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1330 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1331 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1332 }else{
1333 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1334 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1335 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1336 }
1337
1338 encode_422_bitstream(s, 0, width);
1339 }
1340 }
1341 }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1342 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1343 const int stride = -p->linesize[0];
1344 const int fake_stride = -fake_ystride;
1345 int y;
1346 int leftr, leftg, leftb;
1347
1348 put_bits(&s->pb, 8, leftr= data[R]);
1349 put_bits(&s->pb, 8, leftg= data[G]);
1350 put_bits(&s->pb, 8, leftb= data[B]);
1351 put_bits(&s->pb, 8, 0);
1352
1353 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
1354 encode_bgr_bitstream(s, width-1);
1355
1356 for(y=1; y<s->height; y++){
1357 uint8_t *dst = data + y*stride;
1358 if(s->predictor == PLANE && s->interlaced < y){
1359 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1360 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1361 }else{
1362 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1363 }
1364 encode_bgr_bitstream(s, width);
1365 }
1366 }else{
1367 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1368 }
1369 emms_c();
1370
1371 size+= (put_bits_count(&s->pb)+31)/8;
1372 put_bits(&s->pb, 16, 0);
1373 put_bits(&s->pb, 15, 0);
1374 size/= 4;
1375
1376 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1377 int j;
1378 char *p= avctx->stats_out;
1379 char *end= p + 1024*30;
1380 for(i=0; i<3; i++){
1381 for(j=0; j<256; j++){
1382 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1383 p+= strlen(p);
1384 s->stats[i][j]= 0;
1385 }
1386 snprintf(p, end-p, "\n");
1387 p++;
1388 }
1389 } else
1390 avctx->stats_out[0] = '\0';
1391 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1392 flush_put_bits(&s->pb);
1393 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1394 }
1395
1396 s->picture_number++;
1397
1398 return size*4;
1399 }
1400
1401 static av_cold int encode_end(AVCodecContext *avctx)
1402 {
1403 HYuvContext *s = avctx->priv_data;
1404
1405 common_end(s);
1406
1407 av_freep(&avctx->extradata);
1408 av_freep(&avctx->stats_out);
1409
1410 return 0;
1411 }
1412 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1413
1414 #if CONFIG_HUFFYUV_DECODER
1415 AVCodec huffyuv_decoder = {
1416 "huffyuv",
1417 CODEC_TYPE_VIDEO,
1418 CODEC_ID_HUFFYUV,
1419 sizeof(HYuvContext),
1420 decode_init,
1421 NULL,
1422 decode_end,
1423 decode_frame,
1424 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1425 NULL,
1426 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1427 };
1428 #endif
1429
1430 #if CONFIG_FFVHUFF_DECODER
1431 AVCodec ffvhuff_decoder = {
1432 "ffvhuff",
1433 CODEC_TYPE_VIDEO,
1434 CODEC_ID_FFVHUFF,
1435 sizeof(HYuvContext),
1436 decode_init,
1437 NULL,
1438 decode_end,
1439 decode_frame,
1440 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1441 NULL,
1442 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1443 };
1444 #endif
1445
1446 #if CONFIG_HUFFYUV_ENCODER
1447 AVCodec huffyuv_encoder = {
1448 "huffyuv",
1449 CODEC_TYPE_VIDEO,
1450 CODEC_ID_HUFFYUV,
1451 sizeof(HYuvContext),
1452 encode_init,
1453 encode_frame,
1454 encode_end,
1455 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1456 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1457 };
1458 #endif
1459
1460 #if CONFIG_FFVHUFF_ENCODER
1461 AVCodec ffvhuff_encoder = {
1462 "ffvhuff",
1463 CODEC_TYPE_VIDEO,
1464 CODEC_ID_FFVHUFF,
1465 sizeof(HYuvContext),
1466 encode_init,
1467 encode_frame,
1468 encode_end,
1469 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1470 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1471 };
1472 #endif