278948d195c91f08a9cb7199732d97b43b451839
[libav.git] / libavcodec / huffyuv.c
1 /*
2 * huffyuv codec for libavcodec
3 *
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
7 * the algorithm used
8 *
9 * This file is part of Libav.
10 *
11 * Libav is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
15 *
16 * Libav is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with Libav; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26 /**
27 * @file
28 * huffyuv codec for libavcodec.
29 */
30
31 #include "avcodec.h"
32 #include "get_bits.h"
33 #include "put_bits.h"
34 #include "dsputil.h"
35 #include "thread.h"
36
37 #define VLC_BITS 11
38
39 #if HAVE_BIGENDIAN
40 #define B 3
41 #define G 2
42 #define R 1
43 #define A 0
44 #else
45 #define B 0
46 #define G 1
47 #define R 2
48 #define A 3
49 #endif
50
51 typedef enum Predictor{
52 LEFT= 0,
53 PLANE,
54 MEDIAN,
55 } Predictor;
56
57 typedef struct HYuvContext{
58 AVCodecContext *avctx;
59 Predictor predictor;
60 GetBitContext gb;
61 PutBitContext pb;
62 int interlaced;
63 int decorrelate;
64 int bitstream_bpp;
65 int version;
66 int yuy2; //use yuy2 instead of 422P
67 int bgr32; //use bgr32 instead of bgr24
68 int width, height;
69 int flags;
70 int context;
71 int picture_number;
72 int last_slice_end;
73 uint8_t *temp[3];
74 uint64_t stats[3][256];
75 uint8_t len[3][256];
76 uint32_t bits[3][256];
77 uint32_t pix_bgr_map[1<<VLC_BITS];
78 VLC vlc[6]; //Y,U,V,YY,YU,YV
79 AVFrame picture;
80 uint8_t *bitstream_buffer;
81 unsigned int bitstream_buffer_size;
82 DSPContext dsp;
83 }HYuvContext;
84
85 #define classic_shift_luma_table_size 42
86 static const unsigned char classic_shift_luma[classic_shift_luma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
87 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
88 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
89 69,68, 0
90 };
91
92 #define classic_shift_chroma_table_size 59
93 static const unsigned char classic_shift_chroma[classic_shift_chroma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
94 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
95 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
96 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
97 };
98
99 static const unsigned char classic_add_luma[256] = {
100 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
101 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
102 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
103 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
104 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
105 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
106 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
107 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
108 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
109 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
110 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
111 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
112 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
113 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
114 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
115 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
116 };
117
118 static const unsigned char classic_add_chroma[256] = {
119 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
120 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
121 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
122 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
123 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
124 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
125 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
126 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
127 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
128 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
129 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
130 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
131 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
132 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
133 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
134 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
135 };
136
137 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
138 int i;
139 if(w<32){
140 for(i=0; i<w; i++){
141 const int temp= src[i];
142 dst[i]= temp - left;
143 left= temp;
144 }
145 return left;
146 }else{
147 for(i=0; i<16; i++){
148 const int temp= src[i];
149 dst[i]= temp - left;
150 left= temp;
151 }
152 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
153 return src[w-1];
154 }
155 }
156
157 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
158 int i;
159 int r,g,b;
160 r= *red;
161 g= *green;
162 b= *blue;
163 for(i=0; i<FFMIN(w,4); i++){
164 const int rt= src[i*4+R];
165 const int gt= src[i*4+G];
166 const int bt= src[i*4+B];
167 dst[i*4+R]= rt - r;
168 dst[i*4+G]= gt - g;
169 dst[i*4+B]= bt - b;
170 r = rt;
171 g = gt;
172 b = bt;
173 }
174 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
175 *red= src[(w-1)*4+R];
176 *green= src[(w-1)*4+G];
177 *blue= src[(w-1)*4+B];
178 }
179
180 static int read_len_table(uint8_t *dst, GetBitContext *gb){
181 int i, val, repeat;
182
183 for(i=0; i<256;){
184 repeat= get_bits(gb, 3);
185 val = get_bits(gb, 5);
186 if(repeat==0)
187 repeat= get_bits(gb, 8);
188 //printf("%d %d\n", val, repeat);
189 if(i+repeat > 256 || get_bits_left(gb) < 0) {
190 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
191 return -1;
192 }
193 while (repeat--)
194 dst[i++] = val;
195 }
196 return 0;
197 }
198
199 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table){
200 int len, index;
201 uint32_t bits=0;
202
203 for(len=32; len>0; len--){
204 for(index=0; index<256; index++){
205 if(len_table[index]==len)
206 dst[index]= bits++;
207 }
208 if(bits & 1){
209 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
210 return -1;
211 }
212 bits >>= 1;
213 }
214 return 0;
215 }
216
217 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
218 typedef struct {
219 uint64_t val;
220 int name;
221 } HeapElem;
222
223 static void heap_sift(HeapElem *h, int root, int size)
224 {
225 while(root*2+1 < size) {
226 int child = root*2+1;
227 if(child < size-1 && h[child].val > h[child+1].val)
228 child++;
229 if(h[root].val > h[child].val) {
230 FFSWAP(HeapElem, h[root], h[child]);
231 root = child;
232 } else
233 break;
234 }
235 }
236
237 static void generate_len_table(uint8_t *dst, const uint64_t *stats){
238 HeapElem h[256];
239 int up[2*256];
240 int len[2*256];
241 int offset, i, next;
242 int size = 256;
243
244 for(offset=1; ; offset<<=1){
245 for(i=0; i<size; i++){
246 h[i].name = i;
247 h[i].val = (stats[i] << 8) + offset;
248 }
249 for(i=size/2-1; i>=0; i--)
250 heap_sift(h, i, size);
251
252 for(next=size; next<size*2-1; next++){
253 // merge the two smallest entries, and put it back in the heap
254 uint64_t min1v = h[0].val;
255 up[h[0].name] = next;
256 h[0].val = INT64_MAX;
257 heap_sift(h, 0, size);
258 up[h[0].name] = next;
259 h[0].name = next;
260 h[0].val += min1v;
261 heap_sift(h, 0, size);
262 }
263
264 len[2*size-2] = 0;
265 for(i=2*size-3; i>=size; i--)
266 len[i] = len[up[i]] + 1;
267 for(i=0; i<size; i++) {
268 dst[i] = len[up[i]] + 1;
269 if(dst[i] >= 32) break;
270 }
271 if(i==size) break;
272 }
273 }
274 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
275
276 static void generate_joint_tables(HYuvContext *s){
277 uint16_t symbols[1<<VLC_BITS];
278 uint16_t bits[1<<VLC_BITS];
279 uint8_t len[1<<VLC_BITS];
280 if(s->bitstream_bpp < 24){
281 int p, i, y, u;
282 for(p=0; p<3; p++){
283 for(i=y=0; y<256; y++){
284 int len0 = s->len[0][y];
285 int limit = VLC_BITS - len0;
286 if(limit <= 0)
287 continue;
288 for(u=0; u<256; u++){
289 int len1 = s->len[p][u];
290 if(len1 > limit)
291 continue;
292 len[i] = len0 + len1;
293 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
294 symbols[i] = (y<<8) + u;
295 if(symbols[i] != 0xffff) // reserved to mean "invalid"
296 i++;
297 }
298 }
299 ff_free_vlc(&s->vlc[3+p]);
300 ff_init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
301 }
302 }else{
303 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
304 int i, b, g, r, code;
305 int p0 = s->decorrelate;
306 int p1 = !s->decorrelate;
307 // restrict the range to +/-16 becaues that's pretty much guaranteed to
308 // cover all the combinations that fit in 11 bits total, and it doesn't
309 // matter if we miss a few rare codes.
310 for(i=0, g=-16; g<16; g++){
311 int len0 = s->len[p0][g&255];
312 int limit0 = VLC_BITS - len0;
313 if(limit0 < 2)
314 continue;
315 for(b=-16; b<16; b++){
316 int len1 = s->len[p1][b&255];
317 int limit1 = limit0 - len1;
318 if(limit1 < 1)
319 continue;
320 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
321 for(r=-16; r<16; r++){
322 int len2 = s->len[2][r&255];
323 if(len2 > limit1)
324 continue;
325 len[i] = len0 + len1 + len2;
326 bits[i] = (code << len2) + s->bits[2][r&255];
327 if(s->decorrelate){
328 map[i][G] = g;
329 map[i][B] = g+b;
330 map[i][R] = g+r;
331 }else{
332 map[i][B] = g;
333 map[i][G] = b;
334 map[i][R] = r;
335 }
336 i++;
337 }
338 }
339 }
340 ff_free_vlc(&s->vlc[3]);
341 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
342 }
343 }
344
345 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
346 GetBitContext gb;
347 int i;
348
349 init_get_bits(&gb, src, length*8);
350
351 for(i=0; i<3; i++){
352 if(read_len_table(s->len[i], &gb)<0)
353 return -1;
354 if(generate_bits_table(s->bits[i], s->len[i])<0){
355 return -1;
356 }
357 ff_free_vlc(&s->vlc[i]);
358 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
359 }
360
361 generate_joint_tables(s);
362
363 return (get_bits_count(&gb)+7)/8;
364 }
365
366 static int read_old_huffman_tables(HYuvContext *s){
367 #if 1
368 GetBitContext gb;
369 int i;
370
371 init_get_bits(&gb, classic_shift_luma, classic_shift_luma_table_size*8);
372 if(read_len_table(s->len[0], &gb)<0)
373 return -1;
374 init_get_bits(&gb, classic_shift_chroma, classic_shift_chroma_table_size*8);
375 if(read_len_table(s->len[1], &gb)<0)
376 return -1;
377
378 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
379 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
380
381 if(s->bitstream_bpp >= 24){
382 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
383 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
384 }
385 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
386 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
387
388 for(i=0; i<3; i++){
389 ff_free_vlc(&s->vlc[i]);
390 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
391 }
392
393 generate_joint_tables(s);
394
395 return 0;
396 #else
397 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
398 return -1;
399 #endif
400 }
401
402 static av_cold void alloc_temp(HYuvContext *s){
403 int i;
404
405 if(s->bitstream_bpp<24){
406 for(i=0; i<3; i++){
407 s->temp[i]= av_malloc(s->width + 16);
408 }
409 }else{
410 s->temp[0]= av_mallocz(4*s->width + 16);
411 }
412 }
413
414 static av_cold int common_init(AVCodecContext *avctx){
415 HYuvContext *s = avctx->priv_data;
416
417 s->avctx= avctx;
418 s->flags= avctx->flags;
419
420 ff_dsputil_init(&s->dsp, avctx);
421
422 s->width= avctx->width;
423 s->height= avctx->height;
424 assert(s->width>0 && s->height>0);
425
426 return 0;
427 }
428
429 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
430 static av_cold int decode_init(AVCodecContext *avctx)
431 {
432 HYuvContext *s = avctx->priv_data;
433
434 common_init(avctx);
435 memset(s->vlc, 0, 3*sizeof(VLC));
436
437 avctx->coded_frame= &s->picture;
438 s->interlaced= s->height > 288;
439
440 s->bgr32=1;
441 //if(avctx->extradata)
442 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
443 if(avctx->extradata_size){
444 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
445 s->version=1; // do such files exist at all?
446 else
447 s->version=2;
448 }else
449 s->version=0;
450
451 if(s->version==2){
452 int method, interlace;
453
454 if (avctx->extradata_size < 4)
455 return -1;
456
457 method= ((uint8_t*)avctx->extradata)[0];
458 s->decorrelate= method&64 ? 1 : 0;
459 s->predictor= method&63;
460 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
461 if(s->bitstream_bpp==0)
462 s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
463 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
464 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
465 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
466
467 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size-4) < 0)
468 return -1;
469 }else{
470 switch(avctx->bits_per_coded_sample&7){
471 case 1:
472 s->predictor= LEFT;
473 s->decorrelate= 0;
474 break;
475 case 2:
476 s->predictor= LEFT;
477 s->decorrelate= 1;
478 break;
479 case 3:
480 s->predictor= PLANE;
481 s->decorrelate= avctx->bits_per_coded_sample >= 24;
482 break;
483 case 4:
484 s->predictor= MEDIAN;
485 s->decorrelate= 0;
486 break;
487 default:
488 s->predictor= LEFT; //OLD
489 s->decorrelate= 0;
490 break;
491 }
492 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
493 s->context= 0;
494
495 if(read_old_huffman_tables(s) < 0)
496 return -1;
497 }
498
499 switch(s->bitstream_bpp){
500 case 12:
501 avctx->pix_fmt = PIX_FMT_YUV420P;
502 break;
503 case 16:
504 if(s->yuy2){
505 avctx->pix_fmt = PIX_FMT_YUYV422;
506 }else{
507 avctx->pix_fmt = PIX_FMT_YUV422P;
508 }
509 break;
510 case 24:
511 case 32:
512 if(s->bgr32){
513 avctx->pix_fmt = PIX_FMT_RGB32;
514 }else{
515 avctx->pix_fmt = PIX_FMT_BGR24;
516 }
517 break;
518 default:
519 return AVERROR_INVALIDDATA;
520 }
521
522 alloc_temp(s);
523
524 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
525
526 return 0;
527 }
528
529 static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
530 {
531 HYuvContext *s = avctx->priv_data;
532 int i;
533
534 avctx->coded_frame= &s->picture;
535 alloc_temp(s);
536
537 for (i = 0; i < 6; i++)
538 s->vlc[i].table = NULL;
539
540 if(s->version==2){
541 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
542 return -1;
543 }else{
544 if(read_old_huffman_tables(s) < 0)
545 return -1;
546 }
547
548 return 0;
549 }
550 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
551
552 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
553 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf){
554 int i;
555 int index= 0;
556
557 for(i=0; i<256;){
558 int val= len[i];
559 int repeat=0;
560
561 for(; i<256 && len[i]==val && repeat<255; i++)
562 repeat++;
563
564 assert(val < 32 && val >0 && repeat<256 && repeat>0);
565 if(repeat>7){
566 buf[index++]= val;
567 buf[index++]= repeat;
568 }else{
569 buf[index++]= val | (repeat<<5);
570 }
571 }
572
573 return index;
574 }
575
576 static av_cold int encode_init(AVCodecContext *avctx)
577 {
578 HYuvContext *s = avctx->priv_data;
579 int i, j;
580
581 common_init(avctx);
582
583 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
584 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
585 s->version=2;
586
587 avctx->coded_frame= &s->picture;
588
589 switch(avctx->pix_fmt){
590 case PIX_FMT_YUV420P:
591 s->bitstream_bpp= 12;
592 break;
593 case PIX_FMT_YUV422P:
594 s->bitstream_bpp= 16;
595 break;
596 case PIX_FMT_RGB32:
597 s->bitstream_bpp= 24;
598 break;
599 default:
600 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
601 return -1;
602 }
603 avctx->bits_per_coded_sample= s->bitstream_bpp;
604 s->decorrelate= s->bitstream_bpp >= 24;
605 s->predictor= avctx->prediction_method;
606 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
607 if(avctx->context_model==1){
608 s->context= avctx->context_model;
609 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
610 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
611 return -1;
612 }
613 }else s->context= 0;
614
615 if(avctx->codec->id==CODEC_ID_HUFFYUV){
616 if(avctx->pix_fmt==PIX_FMT_YUV420P){
617 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
618 return -1;
619 }
620 if(avctx->context_model){
621 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
622 return -1;
623 }
624 if(s->interlaced != ( s->height > 288 ))
625 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
626 }
627
628 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
629 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
630 return -1;
631 }
632
633 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
634 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
635 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
636 if(s->context)
637 ((uint8_t*)avctx->extradata)[2]|= 0x40;
638 ((uint8_t*)avctx->extradata)[3]= 0;
639 s->avctx->extradata_size= 4;
640
641 if(avctx->stats_in){
642 char *p= avctx->stats_in;
643
644 for(i=0; i<3; i++)
645 for(j=0; j<256; j++)
646 s->stats[i][j]= 1;
647
648 for(;;){
649 for(i=0; i<3; i++){
650 char *next;
651
652 for(j=0; j<256; j++){
653 s->stats[i][j]+= strtol(p, &next, 0);
654 if(next==p) return -1;
655 p=next;
656 }
657 }
658 if(p[0]==0 || p[1]==0 || p[2]==0) break;
659 }
660 }else{
661 for(i=0; i<3; i++)
662 for(j=0; j<256; j++){
663 int d= FFMIN(j, 256-j);
664
665 s->stats[i][j]= 100000000/(d+1);
666 }
667 }
668
669 for(i=0; i<3; i++){
670 generate_len_table(s->len[i], s->stats[i]);
671
672 if(generate_bits_table(s->bits[i], s->len[i])<0){
673 return -1;
674 }
675
676 s->avctx->extradata_size+=
677 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
678 }
679
680 if(s->context){
681 for(i=0; i<3; i++){
682 int pels = s->width*s->height / (i?40:10);
683 for(j=0; j<256; j++){
684 int d= FFMIN(j, 256-j);
685 s->stats[i][j]= pels/(d+1);
686 }
687 }
688 }else{
689 for(i=0; i<3; i++)
690 for(j=0; j<256; j++)
691 s->stats[i][j]= 0;
692 }
693
694 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
695
696 alloc_temp(s);
697
698 s->picture_number=0;
699
700 return 0;
701 }
702 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
703
704 /* TODO instead of restarting the read when the code isn't in the first level
705 * of the joint table, jump into the 2nd level of the individual table. */
706 #define READ_2PIX(dst0, dst1, plane1){\
707 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
708 if(code != 0xffff){\
709 dst0 = code>>8;\
710 dst1 = code;\
711 }else{\
712 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
713 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
714 }\
715 }
716
717 static void decode_422_bitstream(HYuvContext *s, int count){
718 int i;
719
720 count/=2;
721
722 if(count >= (get_bits_left(&s->gb))/(31*4)){
723 for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
724 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
725 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
726 }
727 }else{
728 for(i=0; i<count; i++){
729 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
730 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
731 }
732 }
733 }
734
735 static void decode_gray_bitstream(HYuvContext *s, int count){
736 int i;
737
738 count/=2;
739
740 if(count >= (get_bits_left(&s->gb))/(31*2)){
741 for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
742 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
743 }
744 }else{
745 for(i=0; i<count; i++){
746 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
747 }
748 }
749 }
750
751 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
752 static int encode_422_bitstream(HYuvContext *s, int offset, int count){
753 int i;
754 const uint8_t *y = s->temp[0] + offset;
755 const uint8_t *u = s->temp[1] + offset/2;
756 const uint8_t *v = s->temp[2] + offset/2;
757
758 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
759 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
760 return -1;
761 }
762
763 #define LOAD4\
764 int y0 = y[2*i];\
765 int y1 = y[2*i+1];\
766 int u0 = u[i];\
767 int v0 = v[i];
768
769 count/=2;
770 if(s->flags&CODEC_FLAG_PASS1){
771 for(i=0; i<count; i++){
772 LOAD4;
773 s->stats[0][y0]++;
774 s->stats[1][u0]++;
775 s->stats[0][y1]++;
776 s->stats[2][v0]++;
777 }
778 }
779 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
780 return 0;
781 if(s->context){
782 for(i=0; i<count; i++){
783 LOAD4;
784 s->stats[0][y0]++;
785 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
786 s->stats[1][u0]++;
787 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
788 s->stats[0][y1]++;
789 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
790 s->stats[2][v0]++;
791 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
792 }
793 }else{
794 for(i=0; i<count; i++){
795 LOAD4;
796 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
797 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
798 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
799 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
800 }
801 }
802 return 0;
803 }
804
805 static int encode_gray_bitstream(HYuvContext *s, int count){
806 int i;
807
808 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
809 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
810 return -1;
811 }
812
813 #define LOAD2\
814 int y0 = s->temp[0][2*i];\
815 int y1 = s->temp[0][2*i+1];
816 #define STAT2\
817 s->stats[0][y0]++;\
818 s->stats[0][y1]++;
819 #define WRITE2\
820 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
821 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
822
823 count/=2;
824 if(s->flags&CODEC_FLAG_PASS1){
825 for(i=0; i<count; i++){
826 LOAD2;
827 STAT2;
828 }
829 }
830 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
831 return 0;
832
833 if(s->context){
834 for(i=0; i<count; i++){
835 LOAD2;
836 STAT2;
837 WRITE2;
838 }
839 }else{
840 for(i=0; i<count; i++){
841 LOAD2;
842 WRITE2;
843 }
844 }
845 return 0;
846 }
847 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
848
849 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
850 int i;
851 for(i=0; i<count; i++){
852 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
853 if(code != -1){
854 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
855 }else if(decorrelate){
856 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
857 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
858 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
859 }else{
860 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
861 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
862 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
863 }
864 if(alpha)
865 s->temp[0][4*i+A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
866 }
867 }
868
869 static void decode_bgr_bitstream(HYuvContext *s, int count){
870 if(s->decorrelate){
871 if(s->bitstream_bpp==24)
872 decode_bgr_1(s, count, 1, 0);
873 else
874 decode_bgr_1(s, count, 1, 1);
875 }else{
876 if(s->bitstream_bpp==24)
877 decode_bgr_1(s, count, 0, 0);
878 else
879 decode_bgr_1(s, count, 0, 1);
880 }
881 }
882
883 static int encode_bgr_bitstream(HYuvContext *s, int count){
884 int i;
885
886 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
887 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
888 return -1;
889 }
890
891 #define LOAD3\
892 int g= s->temp[0][4*i+G];\
893 int b= (s->temp[0][4*i+B] - g) & 0xff;\
894 int r= (s->temp[0][4*i+R] - g) & 0xff;
895 #define STAT3\
896 s->stats[0][b]++;\
897 s->stats[1][g]++;\
898 s->stats[2][r]++;
899 #define WRITE3\
900 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
901 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
902 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
903
904 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
905 for(i=0; i<count; i++){
906 LOAD3;
907 STAT3;
908 }
909 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
910 for(i=0; i<count; i++){
911 LOAD3;
912 STAT3;
913 WRITE3;
914 }
915 }else{
916 for(i=0; i<count; i++){
917 LOAD3;
918 WRITE3;
919 }
920 }
921 return 0;
922 }
923
924 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
925 static void draw_slice(HYuvContext *s, int y){
926 int h, cy, i;
927 int offset[AV_NUM_DATA_POINTERS];
928
929 if(s->avctx->draw_horiz_band==NULL)
930 return;
931
932 h= y - s->last_slice_end;
933 y -= h;
934
935 if(s->bitstream_bpp==12){
936 cy= y>>1;
937 }else{
938 cy= y;
939 }
940
941 offset[0] = s->picture.linesize[0]*y;
942 offset[1] = s->picture.linesize[1]*cy;
943 offset[2] = s->picture.linesize[2]*cy;
944 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
945 offset[i] = 0;
946 emms_c();
947
948 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
949
950 s->last_slice_end= y + h;
951 }
952
953 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
954 const uint8_t *buf = avpkt->data;
955 int buf_size = avpkt->size;
956 HYuvContext *s = avctx->priv_data;
957 const int width= s->width;
958 const int width2= s->width>>1;
959 const int height= s->height;
960 int fake_ystride, fake_ustride, fake_vstride;
961 AVFrame * const p= &s->picture;
962 int table_size= 0;
963
964 AVFrame *picture = data;
965
966 av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
967 if (!s->bitstream_buffer)
968 return AVERROR(ENOMEM);
969
970 memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
971 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
972
973 if(p->data[0])
974 ff_thread_release_buffer(avctx, p);
975
976 p->reference= 0;
977 if(ff_thread_get_buffer(avctx, p) < 0){
978 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
979 return -1;
980 }
981
982 if(s->context){
983 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
984 if(table_size < 0)
985 return -1;
986 }
987
988 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
989 return -1;
990
991 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
992
993 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
994 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
995 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
996
997 s->last_slice_end= 0;
998
999 if(s->bitstream_bpp<24){
1000 int y, cy;
1001 int lefty, leftu, leftv;
1002 int lefttopy, lefttopu, lefttopv;
1003
1004 if(s->yuy2){
1005 p->data[0][3]= get_bits(&s->gb, 8);
1006 p->data[0][2]= get_bits(&s->gb, 8);
1007 p->data[0][1]= get_bits(&s->gb, 8);
1008 p->data[0][0]= get_bits(&s->gb, 8);
1009
1010 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
1011 return -1;
1012 }else{
1013
1014 leftv= p->data[2][0]= get_bits(&s->gb, 8);
1015 lefty= p->data[0][1]= get_bits(&s->gb, 8);
1016 leftu= p->data[1][0]= get_bits(&s->gb, 8);
1017 p->data[0][0]= get_bits(&s->gb, 8);
1018
1019 switch(s->predictor){
1020 case LEFT:
1021 case PLANE:
1022 decode_422_bitstream(s, width-2);
1023 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1024 if(!(s->flags&CODEC_FLAG_GRAY)){
1025 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1026 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1027 }
1028
1029 for(cy=y=1; y<s->height; y++,cy++){
1030 uint8_t *ydst, *udst, *vdst;
1031
1032 if(s->bitstream_bpp==12){
1033 decode_gray_bitstream(s, width);
1034
1035 ydst= p->data[0] + p->linesize[0]*y;
1036
1037 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1038 if(s->predictor == PLANE){
1039 if(y>s->interlaced)
1040 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1041 }
1042 y++;
1043 if(y>=s->height) break;
1044 }
1045
1046 draw_slice(s, y);
1047
1048 ydst= p->data[0] + p->linesize[0]*y;
1049 udst= p->data[1] + p->linesize[1]*cy;
1050 vdst= p->data[2] + p->linesize[2]*cy;
1051
1052 decode_422_bitstream(s, width);
1053 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1054 if(!(s->flags&CODEC_FLAG_GRAY)){
1055 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
1056 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
1057 }
1058 if(s->predictor == PLANE){
1059 if(cy>s->interlaced){
1060 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1061 if(!(s->flags&CODEC_FLAG_GRAY)){
1062 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1063 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1064 }
1065 }
1066 }
1067 }
1068 draw_slice(s, height);
1069
1070 break;
1071 case MEDIAN:
1072 /* first line except first 2 pixels is left predicted */
1073 decode_422_bitstream(s, width-2);
1074 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1075 if(!(s->flags&CODEC_FLAG_GRAY)){
1076 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1077 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1078 }
1079
1080 cy=y=1;
1081
1082 /* second line is left predicted for interlaced case */
1083 if(s->interlaced){
1084 decode_422_bitstream(s, width);
1085 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1086 if(!(s->flags&CODEC_FLAG_GRAY)){
1087 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1088 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1089 }
1090 y++; cy++;
1091 }
1092
1093 /* next 4 pixels are left predicted too */
1094 decode_422_bitstream(s, 4);
1095 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1096 if(!(s->flags&CODEC_FLAG_GRAY)){
1097 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1098 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1099 }
1100
1101 /* next line except the first 4 pixels is median predicted */
1102 lefttopy= p->data[0][3];
1103 decode_422_bitstream(s, width-4);
1104 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1105 if(!(s->flags&CODEC_FLAG_GRAY)){
1106 lefttopu= p->data[1][1];
1107 lefttopv= p->data[2][1];
1108 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1109 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1110 }
1111 y++; cy++;
1112
1113 for(; y<height; y++,cy++){
1114 uint8_t *ydst, *udst, *vdst;
1115
1116 if(s->bitstream_bpp==12){
1117 while(2*cy > y){
1118 decode_gray_bitstream(s, width);
1119 ydst= p->data[0] + p->linesize[0]*y;
1120 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1121 y++;
1122 }
1123 if(y>=height) break;
1124 }
1125 draw_slice(s, y);
1126
1127 decode_422_bitstream(s, width);
1128
1129 ydst= p->data[0] + p->linesize[0]*y;
1130 udst= p->data[1] + p->linesize[1]*cy;
1131 vdst= p->data[2] + p->linesize[2]*cy;
1132
1133 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1134 if(!(s->flags&CODEC_FLAG_GRAY)){
1135 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1136 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1137 }
1138 }
1139
1140 draw_slice(s, height);
1141 break;
1142 }
1143 }
1144 }else{
1145 int y;
1146 int leftr, leftg, leftb, lefta;
1147 const int last_line= (height-1)*p->linesize[0];
1148
1149 if(s->bitstream_bpp==32){
1150 lefta= p->data[0][last_line+A]= get_bits(&s->gb, 8);
1151 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1152 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1153 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1154 }else{
1155 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1156 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1157 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1158 lefta= p->data[0][last_line+A]= 255;
1159 skip_bits(&s->gb, 8);
1160 }
1161
1162 if(s->bgr32){
1163 switch(s->predictor){
1164 case LEFT:
1165 case PLANE:
1166 decode_bgr_bitstream(s, width-1);
1167 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb, &lefta);
1168
1169 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1170 decode_bgr_bitstream(s, width);
1171
1172 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
1173 if(s->predictor == PLANE){
1174 if(s->bitstream_bpp!=32) lefta=0;
1175 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1176 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1177 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1178 }
1179 }
1180 }
1181 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1182 break;
1183 default:
1184 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1185 }
1186 }else{
1187
1188 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1189 return -1;
1190 }
1191 }
1192 emms_c();
1193
1194 *picture= *p;
1195 *data_size = sizeof(AVFrame);
1196
1197 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1198 }
1199 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1200
1201 static int common_end(HYuvContext *s){
1202 int i;
1203
1204 for(i=0; i<3; i++){
1205 av_freep(&s->temp[i]);
1206 }
1207 return 0;
1208 }
1209
1210 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1211 static av_cold int decode_end(AVCodecContext *avctx)
1212 {
1213 HYuvContext *s = avctx->priv_data;
1214 int i;
1215
1216 if (s->picture.data[0])
1217 avctx->release_buffer(avctx, &s->picture);
1218
1219 common_end(s);
1220 av_freep(&s->bitstream_buffer);
1221
1222 for(i=0; i<6; i++){
1223 ff_free_vlc(&s->vlc[i]);
1224 }
1225
1226 return 0;
1227 }
1228 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1229
1230 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1231 static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
1232 const AVFrame *pict, int *got_packet)
1233 {
1234 HYuvContext *s = avctx->priv_data;
1235 const int width= s->width;
1236 const int width2= s->width>>1;
1237 const int height= s->height;
1238 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1239 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1240 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1241 AVFrame * const p= &s->picture;
1242 int i, j, size = 0, ret;
1243
1244 if (!pkt->data &&
1245 (ret = av_new_packet(pkt, width * height * 3 * 4 + FF_MIN_BUFFER_SIZE)) < 0) {
1246 av_log(avctx, AV_LOG_ERROR, "Error allocating output packet.\n");
1247 return ret;
1248 }
1249
1250 *p = *pict;
1251 p->pict_type= AV_PICTURE_TYPE_I;
1252 p->key_frame= 1;
1253
1254 if(s->context){
1255 for(i=0; i<3; i++){
1256 generate_len_table(s->len[i], s->stats[i]);
1257 if(generate_bits_table(s->bits[i], s->len[i])<0)
1258 return -1;
1259 size += store_table(s, s->len[i], &pkt->data[size]);
1260 }
1261
1262 for(i=0; i<3; i++)
1263 for(j=0; j<256; j++)
1264 s->stats[i][j] >>= 1;
1265 }
1266
1267 init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
1268
1269 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1270 int lefty, leftu, leftv, y, cy;
1271
1272 put_bits(&s->pb, 8, leftv= p->data[2][0]);
1273 put_bits(&s->pb, 8, lefty= p->data[0][1]);
1274 put_bits(&s->pb, 8, leftu= p->data[1][0]);
1275 put_bits(&s->pb, 8, p->data[0][0]);
1276
1277 lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
1278 leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
1279 leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
1280
1281 encode_422_bitstream(s, 2, width-2);
1282
1283 if(s->predictor==MEDIAN){
1284 int lefttopy, lefttopu, lefttopv;
1285 cy=y=1;
1286 if(s->interlaced){
1287 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1288 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1289 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1290
1291 encode_422_bitstream(s, 0, width);
1292 y++; cy++;
1293 }
1294
1295 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1296 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1297 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1298
1299 encode_422_bitstream(s, 0, 4);
1300
1301 lefttopy= p->data[0][3];
1302 lefttopu= p->data[1][1];
1303 lefttopv= p->data[2][1];
1304 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1305 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1306 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1307 encode_422_bitstream(s, 0, width-4);
1308 y++; cy++;
1309
1310 for(; y<height; y++,cy++){
1311 uint8_t *ydst, *udst, *vdst;
1312
1313 if(s->bitstream_bpp==12){
1314 while(2*cy > y){
1315 ydst= p->data[0] + p->linesize[0]*y;
1316 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1317 encode_gray_bitstream(s, width);
1318 y++;
1319 }
1320 if(y>=height) break;
1321 }
1322 ydst= p->data[0] + p->linesize[0]*y;
1323 udst= p->data[1] + p->linesize[1]*cy;
1324 vdst= p->data[2] + p->linesize[2]*cy;
1325
1326 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1327 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1328 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1329
1330 encode_422_bitstream(s, 0, width);
1331 }
1332 }else{
1333 for(cy=y=1; y<height; y++,cy++){
1334 uint8_t *ydst, *udst, *vdst;
1335
1336 /* encode a luma only line & y++ */
1337 if(s->bitstream_bpp==12){
1338 ydst= p->data[0] + p->linesize[0]*y;
1339
1340 if(s->predictor == PLANE && s->interlaced < y){
1341 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1342
1343 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1344 }else{
1345 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1346 }
1347 encode_gray_bitstream(s, width);
1348 y++;
1349 if(y>=height) break;
1350 }
1351
1352 ydst= p->data[0] + p->linesize[0]*y;
1353 udst= p->data[1] + p->linesize[1]*cy;
1354 vdst= p->data[2] + p->linesize[2]*cy;
1355
1356 if(s->predictor == PLANE && s->interlaced < cy){
1357 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1358 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1359 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1360
1361 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1362 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1363 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1364 }else{
1365 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1366 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1367 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1368 }
1369
1370 encode_422_bitstream(s, 0, width);
1371 }
1372 }
1373 }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1374 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1375 const int stride = -p->linesize[0];
1376 const int fake_stride = -fake_ystride;
1377 int y;
1378 int leftr, leftg, leftb;
1379
1380 put_bits(&s->pb, 8, leftr= data[R]);
1381 put_bits(&s->pb, 8, leftg= data[G]);
1382 put_bits(&s->pb, 8, leftb= data[B]);
1383 put_bits(&s->pb, 8, 0);
1384
1385 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
1386 encode_bgr_bitstream(s, width-1);
1387
1388 for(y=1; y<s->height; y++){
1389 uint8_t *dst = data + y*stride;
1390 if(s->predictor == PLANE && s->interlaced < y){
1391 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1392 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1393 }else{
1394 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1395 }
1396 encode_bgr_bitstream(s, width);
1397 }
1398 }else{
1399 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1400 }
1401 emms_c();
1402
1403 size+= (put_bits_count(&s->pb)+31)/8;
1404 put_bits(&s->pb, 16, 0);
1405 put_bits(&s->pb, 15, 0);
1406 size/= 4;
1407
1408 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1409 int j;
1410 char *p= avctx->stats_out;
1411 char *end= p + 1024*30;
1412 for(i=0; i<3; i++){
1413 for(j=0; j<256; j++){
1414 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1415 p+= strlen(p);
1416 s->stats[i][j]= 0;
1417 }
1418 snprintf(p, end-p, "\n");
1419 p++;
1420 }
1421 } else
1422 avctx->stats_out[0] = '\0';
1423 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1424 flush_put_bits(&s->pb);
1425 s->dsp.bswap_buf((uint32_t*)pkt->data, (uint32_t*)pkt->data, size);
1426 }
1427
1428 s->picture_number++;
1429
1430 pkt->size = size*4;
1431 pkt->flags |= AV_PKT_FLAG_KEY;
1432 *got_packet = 1;
1433
1434 return 0;
1435 }
1436
1437 static av_cold int encode_end(AVCodecContext *avctx)
1438 {
1439 HYuvContext *s = avctx->priv_data;
1440
1441 common_end(s);
1442
1443 av_freep(&avctx->extradata);
1444 av_freep(&avctx->stats_out);
1445
1446 return 0;
1447 }
1448 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1449
1450 #if CONFIG_HUFFYUV_DECODER
1451 AVCodec ff_huffyuv_decoder = {
1452 .name = "huffyuv",
1453 .type = AVMEDIA_TYPE_VIDEO,
1454 .id = CODEC_ID_HUFFYUV,
1455 .priv_data_size = sizeof(HYuvContext),
1456 .init = decode_init,
1457 .close = decode_end,
1458 .decode = decode_frame,
1459 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
1460 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1461 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1462 };
1463 #endif
1464
1465 #if CONFIG_FFVHUFF_DECODER
1466 AVCodec ff_ffvhuff_decoder = {
1467 .name = "ffvhuff",
1468 .type = AVMEDIA_TYPE_VIDEO,
1469 .id = CODEC_ID_FFVHUFF,
1470 .priv_data_size = sizeof(HYuvContext),
1471 .init = decode_init,
1472 .close = decode_end,
1473 .decode = decode_frame,
1474 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
1475 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1476 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1477 };
1478 #endif
1479
1480 #if CONFIG_HUFFYUV_ENCODER
1481 AVCodec ff_huffyuv_encoder = {
1482 .name = "huffyuv",
1483 .type = AVMEDIA_TYPE_VIDEO,
1484 .id = CODEC_ID_HUFFYUV,
1485 .priv_data_size = sizeof(HYuvContext),
1486 .init = encode_init,
1487 .encode2 = encode_frame,
1488 .close = encode_end,
1489 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1490 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1491 };
1492 #endif
1493
1494 #if CONFIG_FFVHUFF_ENCODER
1495 AVCodec ff_ffvhuff_encoder = {
1496 .name = "ffvhuff",
1497 .type = AVMEDIA_TYPE_VIDEO,
1498 .id = CODEC_ID_FFVHUFF,
1499 .priv_data_size = sizeof(HYuvContext),
1500 .init = encode_init,
1501 .encode2 = encode_frame,
1502 .close = encode_end,
1503 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1504 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1505 };
1506 #endif