lavc: use designated initialisers for all codecs.
[libav.git] / libavcodec / huffyuv.c
1 /*
2 * huffyuv codec for libavcodec
3 *
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
7 * the algorithm used
8 *
9 * This file is part of Libav.
10 *
11 * Libav is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
15 *
16 * Libav is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with Libav; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26 /**
27 * @file
28 * huffyuv codec for libavcodec.
29 */
30
31 #include "avcodec.h"
32 #include "get_bits.h"
33 #include "put_bits.h"
34 #include "dsputil.h"
35 #include "thread.h"
36
37 #define VLC_BITS 11
38
39 #if HAVE_BIGENDIAN
40 #define B 3
41 #define G 2
42 #define R 1
43 #define A 0
44 #else
45 #define B 0
46 #define G 1
47 #define R 2
48 #define A 3
49 #endif
50
51 typedef enum Predictor{
52 LEFT= 0,
53 PLANE,
54 MEDIAN,
55 } Predictor;
56
57 typedef struct HYuvContext{
58 AVCodecContext *avctx;
59 Predictor predictor;
60 GetBitContext gb;
61 PutBitContext pb;
62 int interlaced;
63 int decorrelate;
64 int bitstream_bpp;
65 int version;
66 int yuy2; //use yuy2 instead of 422P
67 int bgr32; //use bgr32 instead of bgr24
68 int width, height;
69 int flags;
70 int context;
71 int picture_number;
72 int last_slice_end;
73 uint8_t *temp[3];
74 uint64_t stats[3][256];
75 uint8_t len[3][256];
76 uint32_t bits[3][256];
77 uint32_t pix_bgr_map[1<<VLC_BITS];
78 VLC vlc[6]; //Y,U,V,YY,YU,YV
79 AVFrame picture;
80 uint8_t *bitstream_buffer;
81 unsigned int bitstream_buffer_size;
82 DSPContext dsp;
83 }HYuvContext;
84
85 static const unsigned char classic_shift_luma[] = {
86 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
87 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
88 69,68, 0
89 };
90
91 static const unsigned char classic_shift_chroma[] = {
92 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
93 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
94 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
95 };
96
97 static const unsigned char classic_add_luma[256] = {
98 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
99 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
100 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
101 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
102 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
103 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
104 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
105 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
106 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
107 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
108 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
109 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
110 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
111 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
112 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
113 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
114 };
115
116 static const unsigned char classic_add_chroma[256] = {
117 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
118 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
119 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
120 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
121 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
122 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
123 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
124 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
125 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
126 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
127 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
128 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
129 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
130 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
131 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
132 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
133 };
134
135 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
136 int i;
137 if(w<32){
138 for(i=0; i<w; i++){
139 const int temp= src[i];
140 dst[i]= temp - left;
141 left= temp;
142 }
143 return left;
144 }else{
145 for(i=0; i<16; i++){
146 const int temp= src[i];
147 dst[i]= temp - left;
148 left= temp;
149 }
150 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
151 return src[w-1];
152 }
153 }
154
155 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
156 int i;
157 int r,g,b;
158 r= *red;
159 g= *green;
160 b= *blue;
161 for(i=0; i<FFMIN(w,4); i++){
162 const int rt= src[i*4+R];
163 const int gt= src[i*4+G];
164 const int bt= src[i*4+B];
165 dst[i*4+R]= rt - r;
166 dst[i*4+G]= gt - g;
167 dst[i*4+B]= bt - b;
168 r = rt;
169 g = gt;
170 b = bt;
171 }
172 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
173 *red= src[(w-1)*4+R];
174 *green= src[(w-1)*4+G];
175 *blue= src[(w-1)*4+B];
176 }
177
178 static int read_len_table(uint8_t *dst, GetBitContext *gb){
179 int i, val, repeat;
180
181 for(i=0; i<256;){
182 repeat= get_bits(gb, 3);
183 val = get_bits(gb, 5);
184 if(repeat==0)
185 repeat= get_bits(gb, 8);
186 //printf("%d %d\n", val, repeat);
187 if(i+repeat > 256) {
188 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
189 return -1;
190 }
191 while (repeat--)
192 dst[i++] = val;
193 }
194 return 0;
195 }
196
197 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table){
198 int len, index;
199 uint32_t bits=0;
200
201 for(len=32; len>0; len--){
202 for(index=0; index<256; index++){
203 if(len_table[index]==len)
204 dst[index]= bits++;
205 }
206 if(bits & 1){
207 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
208 return -1;
209 }
210 bits >>= 1;
211 }
212 return 0;
213 }
214
215 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
216 typedef struct {
217 uint64_t val;
218 int name;
219 } HeapElem;
220
221 static void heap_sift(HeapElem *h, int root, int size)
222 {
223 while(root*2+1 < size) {
224 int child = root*2+1;
225 if(child < size-1 && h[child].val > h[child+1].val)
226 child++;
227 if(h[root].val > h[child].val) {
228 FFSWAP(HeapElem, h[root], h[child]);
229 root = child;
230 } else
231 break;
232 }
233 }
234
235 static void generate_len_table(uint8_t *dst, const uint64_t *stats){
236 HeapElem h[256];
237 int up[2*256];
238 int len[2*256];
239 int offset, i, next;
240 int size = 256;
241
242 for(offset=1; ; offset<<=1){
243 for(i=0; i<size; i++){
244 h[i].name = i;
245 h[i].val = (stats[i] << 8) + offset;
246 }
247 for(i=size/2-1; i>=0; i--)
248 heap_sift(h, i, size);
249
250 for(next=size; next<size*2-1; next++){
251 // merge the two smallest entries, and put it back in the heap
252 uint64_t min1v = h[0].val;
253 up[h[0].name] = next;
254 h[0].val = INT64_MAX;
255 heap_sift(h, 0, size);
256 up[h[0].name] = next;
257 h[0].name = next;
258 h[0].val += min1v;
259 heap_sift(h, 0, size);
260 }
261
262 len[2*size-2] = 0;
263 for(i=2*size-3; i>=size; i--)
264 len[i] = len[up[i]] + 1;
265 for(i=0; i<size; i++) {
266 dst[i] = len[up[i]] + 1;
267 if(dst[i] >= 32) break;
268 }
269 if(i==size) break;
270 }
271 }
272 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
273
274 static void generate_joint_tables(HYuvContext *s){
275 uint16_t symbols[1<<VLC_BITS];
276 uint16_t bits[1<<VLC_BITS];
277 uint8_t len[1<<VLC_BITS];
278 if(s->bitstream_bpp < 24){
279 int p, i, y, u;
280 for(p=0; p<3; p++){
281 for(i=y=0; y<256; y++){
282 int len0 = s->len[0][y];
283 int limit = VLC_BITS - len0;
284 if(limit <= 0)
285 continue;
286 for(u=0; u<256; u++){
287 int len1 = s->len[p][u];
288 if(len1 > limit)
289 continue;
290 len[i] = len0 + len1;
291 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
292 symbols[i] = (y<<8) + u;
293 if(symbols[i] != 0xffff) // reserved to mean "invalid"
294 i++;
295 }
296 }
297 free_vlc(&s->vlc[3+p]);
298 init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
299 }
300 }else{
301 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
302 int i, b, g, r, code;
303 int p0 = s->decorrelate;
304 int p1 = !s->decorrelate;
305 // restrict the range to +/-16 becaues that's pretty much guaranteed to
306 // cover all the combinations that fit in 11 bits total, and it doesn't
307 // matter if we miss a few rare codes.
308 for(i=0, g=-16; g<16; g++){
309 int len0 = s->len[p0][g&255];
310 int limit0 = VLC_BITS - len0;
311 if(limit0 < 2)
312 continue;
313 for(b=-16; b<16; b++){
314 int len1 = s->len[p1][b&255];
315 int limit1 = limit0 - len1;
316 if(limit1 < 1)
317 continue;
318 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
319 for(r=-16; r<16; r++){
320 int len2 = s->len[2][r&255];
321 if(len2 > limit1)
322 continue;
323 len[i] = len0 + len1 + len2;
324 bits[i] = (code << len2) + s->bits[2][r&255];
325 if(s->decorrelate){
326 map[i][G] = g;
327 map[i][B] = g+b;
328 map[i][R] = g+r;
329 }else{
330 map[i][B] = g;
331 map[i][G] = b;
332 map[i][R] = r;
333 }
334 i++;
335 }
336 }
337 }
338 free_vlc(&s->vlc[3]);
339 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
340 }
341 }
342
343 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
344 GetBitContext gb;
345 int i;
346
347 init_get_bits(&gb, src, length*8);
348
349 for(i=0; i<3; i++){
350 if(read_len_table(s->len[i], &gb)<0)
351 return -1;
352 if(generate_bits_table(s->bits[i], s->len[i])<0){
353 return -1;
354 }
355 free_vlc(&s->vlc[i]);
356 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
357 }
358
359 generate_joint_tables(s);
360
361 return (get_bits_count(&gb)+7)/8;
362 }
363
364 static int read_old_huffman_tables(HYuvContext *s){
365 #if 1
366 GetBitContext gb;
367 int i;
368
369 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
370 if(read_len_table(s->len[0], &gb)<0)
371 return -1;
372 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
373 if(read_len_table(s->len[1], &gb)<0)
374 return -1;
375
376 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
377 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
378
379 if(s->bitstream_bpp >= 24){
380 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
381 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
382 }
383 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
384 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
385
386 for(i=0; i<3; i++){
387 free_vlc(&s->vlc[i]);
388 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
389 }
390
391 generate_joint_tables(s);
392
393 return 0;
394 #else
395 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
396 return -1;
397 #endif
398 }
399
400 static av_cold void alloc_temp(HYuvContext *s){
401 int i;
402
403 if(s->bitstream_bpp<24){
404 for(i=0; i<3; i++){
405 s->temp[i]= av_malloc(s->width + 16);
406 }
407 }else{
408 s->temp[0]= av_mallocz(4*s->width + 16);
409 }
410 }
411
412 static av_cold int common_init(AVCodecContext *avctx){
413 HYuvContext *s = avctx->priv_data;
414
415 s->avctx= avctx;
416 s->flags= avctx->flags;
417
418 dsputil_init(&s->dsp, avctx);
419
420 s->width= avctx->width;
421 s->height= avctx->height;
422 assert(s->width>0 && s->height>0);
423
424 return 0;
425 }
426
427 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
428 static av_cold int decode_init(AVCodecContext *avctx)
429 {
430 HYuvContext *s = avctx->priv_data;
431
432 common_init(avctx);
433 memset(s->vlc, 0, 3*sizeof(VLC));
434
435 avctx->coded_frame= &s->picture;
436 s->interlaced= s->height > 288;
437
438 s->bgr32=1;
439 //if(avctx->extradata)
440 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
441 if(avctx->extradata_size){
442 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
443 s->version=1; // do such files exist at all?
444 else
445 s->version=2;
446 }else
447 s->version=0;
448
449 if(s->version==2){
450 int method, interlace;
451
452 if (avctx->extradata_size < 4)
453 return -1;
454
455 method= ((uint8_t*)avctx->extradata)[0];
456 s->decorrelate= method&64 ? 1 : 0;
457 s->predictor= method&63;
458 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
459 if(s->bitstream_bpp==0)
460 s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
461 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
462 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
463 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
464
465 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size-4) < 0)
466 return -1;
467 }else{
468 switch(avctx->bits_per_coded_sample&7){
469 case 1:
470 s->predictor= LEFT;
471 s->decorrelate= 0;
472 break;
473 case 2:
474 s->predictor= LEFT;
475 s->decorrelate= 1;
476 break;
477 case 3:
478 s->predictor= PLANE;
479 s->decorrelate= avctx->bits_per_coded_sample >= 24;
480 break;
481 case 4:
482 s->predictor= MEDIAN;
483 s->decorrelate= 0;
484 break;
485 default:
486 s->predictor= LEFT; //OLD
487 s->decorrelate= 0;
488 break;
489 }
490 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
491 s->context= 0;
492
493 if(read_old_huffman_tables(s) < 0)
494 return -1;
495 }
496
497 switch(s->bitstream_bpp){
498 case 12:
499 avctx->pix_fmt = PIX_FMT_YUV420P;
500 break;
501 case 16:
502 if(s->yuy2){
503 avctx->pix_fmt = PIX_FMT_YUYV422;
504 }else{
505 avctx->pix_fmt = PIX_FMT_YUV422P;
506 }
507 break;
508 case 24:
509 case 32:
510 if(s->bgr32){
511 avctx->pix_fmt = PIX_FMT_RGB32;
512 }else{
513 avctx->pix_fmt = PIX_FMT_BGR24;
514 }
515 break;
516 default:
517 assert(0);
518 }
519
520 alloc_temp(s);
521
522 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
523
524 return 0;
525 }
526
527 static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
528 {
529 HYuvContext *s = avctx->priv_data;
530 int i;
531
532 avctx->coded_frame= &s->picture;
533 alloc_temp(s);
534
535 for (i = 0; i < 6; i++)
536 s->vlc[i].table = NULL;
537
538 if(s->version==2){
539 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
540 return -1;
541 }else{
542 if(read_old_huffman_tables(s) < 0)
543 return -1;
544 }
545
546 return 0;
547 }
548 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
549
550 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
551 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf){
552 int i;
553 int index= 0;
554
555 for(i=0; i<256;){
556 int val= len[i];
557 int repeat=0;
558
559 for(; i<256 && len[i]==val && repeat<255; i++)
560 repeat++;
561
562 assert(val < 32 && val >0 && repeat<256 && repeat>0);
563 if(repeat>7){
564 buf[index++]= val;
565 buf[index++]= repeat;
566 }else{
567 buf[index++]= val | (repeat<<5);
568 }
569 }
570
571 return index;
572 }
573
574 static av_cold int encode_init(AVCodecContext *avctx)
575 {
576 HYuvContext *s = avctx->priv_data;
577 int i, j;
578
579 common_init(avctx);
580
581 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
582 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
583 s->version=2;
584
585 avctx->coded_frame= &s->picture;
586
587 switch(avctx->pix_fmt){
588 case PIX_FMT_YUV420P:
589 s->bitstream_bpp= 12;
590 break;
591 case PIX_FMT_YUV422P:
592 s->bitstream_bpp= 16;
593 break;
594 case PIX_FMT_RGB32:
595 s->bitstream_bpp= 24;
596 break;
597 default:
598 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
599 return -1;
600 }
601 avctx->bits_per_coded_sample= s->bitstream_bpp;
602 s->decorrelate= s->bitstream_bpp >= 24;
603 s->predictor= avctx->prediction_method;
604 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
605 if(avctx->context_model==1){
606 s->context= avctx->context_model;
607 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
608 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
609 return -1;
610 }
611 }else s->context= 0;
612
613 if(avctx->codec->id==CODEC_ID_HUFFYUV){
614 if(avctx->pix_fmt==PIX_FMT_YUV420P){
615 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
616 return -1;
617 }
618 if(avctx->context_model){
619 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
620 return -1;
621 }
622 if(s->interlaced != ( s->height > 288 ))
623 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
624 }
625
626 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
627 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
628 return -1;
629 }
630
631 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
632 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
633 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
634 if(s->context)
635 ((uint8_t*)avctx->extradata)[2]|= 0x40;
636 ((uint8_t*)avctx->extradata)[3]= 0;
637 s->avctx->extradata_size= 4;
638
639 if(avctx->stats_in){
640 char *p= avctx->stats_in;
641
642 for(i=0; i<3; i++)
643 for(j=0; j<256; j++)
644 s->stats[i][j]= 1;
645
646 for(;;){
647 for(i=0; i<3; i++){
648 char *next;
649
650 for(j=0; j<256; j++){
651 s->stats[i][j]+= strtol(p, &next, 0);
652 if(next==p) return -1;
653 p=next;
654 }
655 }
656 if(p[0]==0 || p[1]==0 || p[2]==0) break;
657 }
658 }else{
659 for(i=0; i<3; i++)
660 for(j=0; j<256; j++){
661 int d= FFMIN(j, 256-j);
662
663 s->stats[i][j]= 100000000/(d+1);
664 }
665 }
666
667 for(i=0; i<3; i++){
668 generate_len_table(s->len[i], s->stats[i]);
669
670 if(generate_bits_table(s->bits[i], s->len[i])<0){
671 return -1;
672 }
673
674 s->avctx->extradata_size+=
675 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
676 }
677
678 if(s->context){
679 for(i=0; i<3; i++){
680 int pels = s->width*s->height / (i?40:10);
681 for(j=0; j<256; j++){
682 int d= FFMIN(j, 256-j);
683 s->stats[i][j]= pels/(d+1);
684 }
685 }
686 }else{
687 for(i=0; i<3; i++)
688 for(j=0; j<256; j++)
689 s->stats[i][j]= 0;
690 }
691
692 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
693
694 alloc_temp(s);
695
696 s->picture_number=0;
697
698 return 0;
699 }
700 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
701
702 /* TODO instead of restarting the read when the code isn't in the first level
703 * of the joint table, jump into the 2nd level of the individual table. */
704 #define READ_2PIX(dst0, dst1, plane1){\
705 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
706 if(code != 0xffff){\
707 dst0 = code>>8;\
708 dst1 = code;\
709 }else{\
710 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
711 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
712 }\
713 }
714
715 static void decode_422_bitstream(HYuvContext *s, int count){
716 int i;
717
718 count/=2;
719
720 if(count >= (get_bits_left(&s->gb))/(31*4)){
721 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
722 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
723 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
724 }
725 }else{
726 for(i=0; i<count; i++){
727 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
728 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
729 }
730 }
731 }
732
733 static void decode_gray_bitstream(HYuvContext *s, int count){
734 int i;
735
736 count/=2;
737
738 if(count >= (get_bits_left(&s->gb))/(31*2)){
739 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
740 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
741 }
742 }else{
743 for(i=0; i<count; i++){
744 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
745 }
746 }
747 }
748
749 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
750 static int encode_422_bitstream(HYuvContext *s, int offset, int count){
751 int i;
752 const uint8_t *y = s->temp[0] + offset;
753 const uint8_t *u = s->temp[1] + offset/2;
754 const uint8_t *v = s->temp[2] + offset/2;
755
756 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
757 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
758 return -1;
759 }
760
761 #define LOAD4\
762 int y0 = y[2*i];\
763 int y1 = y[2*i+1];\
764 int u0 = u[i];\
765 int v0 = v[i];
766
767 count/=2;
768 if(s->flags&CODEC_FLAG_PASS1){
769 for(i=0; i<count; i++){
770 LOAD4;
771 s->stats[0][y0]++;
772 s->stats[1][u0]++;
773 s->stats[0][y1]++;
774 s->stats[2][v0]++;
775 }
776 }
777 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
778 return 0;
779 if(s->context){
780 for(i=0; i<count; i++){
781 LOAD4;
782 s->stats[0][y0]++;
783 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
784 s->stats[1][u0]++;
785 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
786 s->stats[0][y1]++;
787 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
788 s->stats[2][v0]++;
789 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
790 }
791 }else{
792 for(i=0; i<count; i++){
793 LOAD4;
794 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
795 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
796 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
797 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
798 }
799 }
800 return 0;
801 }
802
803 static int encode_gray_bitstream(HYuvContext *s, int count){
804 int i;
805
806 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
807 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
808 return -1;
809 }
810
811 #define LOAD2\
812 int y0 = s->temp[0][2*i];\
813 int y1 = s->temp[0][2*i+1];
814 #define STAT2\
815 s->stats[0][y0]++;\
816 s->stats[0][y1]++;
817 #define WRITE2\
818 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
819 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
820
821 count/=2;
822 if(s->flags&CODEC_FLAG_PASS1){
823 for(i=0; i<count; i++){
824 LOAD2;
825 STAT2;
826 }
827 }
828 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
829 return 0;
830
831 if(s->context){
832 for(i=0; i<count; i++){
833 LOAD2;
834 STAT2;
835 WRITE2;
836 }
837 }else{
838 for(i=0; i<count; i++){
839 LOAD2;
840 WRITE2;
841 }
842 }
843 return 0;
844 }
845 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
846
847 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
848 int i;
849 for(i=0; i<count; i++){
850 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
851 if(code != -1){
852 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
853 }else if(decorrelate){
854 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
855 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
856 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
857 }else{
858 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
859 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
860 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
861 }
862 if(alpha)
863 s->temp[0][4*i+A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
864 }
865 }
866
867 static void decode_bgr_bitstream(HYuvContext *s, int count){
868 if(s->decorrelate){
869 if(s->bitstream_bpp==24)
870 decode_bgr_1(s, count, 1, 0);
871 else
872 decode_bgr_1(s, count, 1, 1);
873 }else{
874 if(s->bitstream_bpp==24)
875 decode_bgr_1(s, count, 0, 0);
876 else
877 decode_bgr_1(s, count, 0, 1);
878 }
879 }
880
881 static int encode_bgr_bitstream(HYuvContext *s, int count){
882 int i;
883
884 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
885 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
886 return -1;
887 }
888
889 #define LOAD3\
890 int g= s->temp[0][4*i+G];\
891 int b= (s->temp[0][4*i+B] - g) & 0xff;\
892 int r= (s->temp[0][4*i+R] - g) & 0xff;
893 #define STAT3\
894 s->stats[0][b]++;\
895 s->stats[1][g]++;\
896 s->stats[2][r]++;
897 #define WRITE3\
898 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
899 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
900 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
901
902 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
903 for(i=0; i<count; i++){
904 LOAD3;
905 STAT3;
906 }
907 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
908 for(i=0; i<count; i++){
909 LOAD3;
910 STAT3;
911 WRITE3;
912 }
913 }else{
914 for(i=0; i<count; i++){
915 LOAD3;
916 WRITE3;
917 }
918 }
919 return 0;
920 }
921
922 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
923 static void draw_slice(HYuvContext *s, int y){
924 int h, cy;
925 int offset[4];
926
927 if(s->avctx->draw_horiz_band==NULL)
928 return;
929
930 h= y - s->last_slice_end;
931 y -= h;
932
933 if(s->bitstream_bpp==12){
934 cy= y>>1;
935 }else{
936 cy= y;
937 }
938
939 offset[0] = s->picture.linesize[0]*y;
940 offset[1] = s->picture.linesize[1]*cy;
941 offset[2] = s->picture.linesize[2]*cy;
942 offset[3] = 0;
943 emms_c();
944
945 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
946
947 s->last_slice_end= y + h;
948 }
949
950 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
951 const uint8_t *buf = avpkt->data;
952 int buf_size = avpkt->size;
953 HYuvContext *s = avctx->priv_data;
954 const int width= s->width;
955 const int width2= s->width>>1;
956 const int height= s->height;
957 int fake_ystride, fake_ustride, fake_vstride;
958 AVFrame * const p= &s->picture;
959 int table_size= 0;
960
961 AVFrame *picture = data;
962
963 av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
964 if (!s->bitstream_buffer)
965 return AVERROR(ENOMEM);
966
967 memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
968 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
969
970 if(p->data[0])
971 ff_thread_release_buffer(avctx, p);
972
973 p->reference= 0;
974 if(ff_thread_get_buffer(avctx, p) < 0){
975 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
976 return -1;
977 }
978
979 if(s->context){
980 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
981 if(table_size < 0)
982 return -1;
983 }
984
985 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
986 return -1;
987
988 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
989
990 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
991 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
992 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
993
994 s->last_slice_end= 0;
995
996 if(s->bitstream_bpp<24){
997 int y, cy;
998 int lefty, leftu, leftv;
999 int lefttopy, lefttopu, lefttopv;
1000
1001 if(s->yuy2){
1002 p->data[0][3]= get_bits(&s->gb, 8);
1003 p->data[0][2]= get_bits(&s->gb, 8);
1004 p->data[0][1]= get_bits(&s->gb, 8);
1005 p->data[0][0]= get_bits(&s->gb, 8);
1006
1007 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
1008 return -1;
1009 }else{
1010
1011 leftv= p->data[2][0]= get_bits(&s->gb, 8);
1012 lefty= p->data[0][1]= get_bits(&s->gb, 8);
1013 leftu= p->data[1][0]= get_bits(&s->gb, 8);
1014 p->data[0][0]= get_bits(&s->gb, 8);
1015
1016 switch(s->predictor){
1017 case LEFT:
1018 case PLANE:
1019 decode_422_bitstream(s, width-2);
1020 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1021 if(!(s->flags&CODEC_FLAG_GRAY)){
1022 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1023 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1024 }
1025
1026 for(cy=y=1; y<s->height; y++,cy++){
1027 uint8_t *ydst, *udst, *vdst;
1028
1029 if(s->bitstream_bpp==12){
1030 decode_gray_bitstream(s, width);
1031
1032 ydst= p->data[0] + p->linesize[0]*y;
1033
1034 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1035 if(s->predictor == PLANE){
1036 if(y>s->interlaced)
1037 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1038 }
1039 y++;
1040 if(y>=s->height) break;
1041 }
1042
1043 draw_slice(s, y);
1044
1045 ydst= p->data[0] + p->linesize[0]*y;
1046 udst= p->data[1] + p->linesize[1]*cy;
1047 vdst= p->data[2] + p->linesize[2]*cy;
1048
1049 decode_422_bitstream(s, width);
1050 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1051 if(!(s->flags&CODEC_FLAG_GRAY)){
1052 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
1053 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
1054 }
1055 if(s->predictor == PLANE){
1056 if(cy>s->interlaced){
1057 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1058 if(!(s->flags&CODEC_FLAG_GRAY)){
1059 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1060 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1061 }
1062 }
1063 }
1064 }
1065 draw_slice(s, height);
1066
1067 break;
1068 case MEDIAN:
1069 /* first line except first 2 pixels is left predicted */
1070 decode_422_bitstream(s, width-2);
1071 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1072 if(!(s->flags&CODEC_FLAG_GRAY)){
1073 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1074 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1075 }
1076
1077 cy=y=1;
1078
1079 /* second line is left predicted for interlaced case */
1080 if(s->interlaced){
1081 decode_422_bitstream(s, width);
1082 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1083 if(!(s->flags&CODEC_FLAG_GRAY)){
1084 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1085 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1086 }
1087 y++; cy++;
1088 }
1089
1090 /* next 4 pixels are left predicted too */
1091 decode_422_bitstream(s, 4);
1092 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1093 if(!(s->flags&CODEC_FLAG_GRAY)){
1094 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1095 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1096 }
1097
1098 /* next line except the first 4 pixels is median predicted */
1099 lefttopy= p->data[0][3];
1100 decode_422_bitstream(s, width-4);
1101 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1102 if(!(s->flags&CODEC_FLAG_GRAY)){
1103 lefttopu= p->data[1][1];
1104 lefttopv= p->data[2][1];
1105 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1106 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1107 }
1108 y++; cy++;
1109
1110 for(; y<height; y++,cy++){
1111 uint8_t *ydst, *udst, *vdst;
1112
1113 if(s->bitstream_bpp==12){
1114 while(2*cy > y){
1115 decode_gray_bitstream(s, width);
1116 ydst= p->data[0] + p->linesize[0]*y;
1117 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1118 y++;
1119 }
1120 if(y>=height) break;
1121 }
1122 draw_slice(s, y);
1123
1124 decode_422_bitstream(s, width);
1125
1126 ydst= p->data[0] + p->linesize[0]*y;
1127 udst= p->data[1] + p->linesize[1]*cy;
1128 vdst= p->data[2] + p->linesize[2]*cy;
1129
1130 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1131 if(!(s->flags&CODEC_FLAG_GRAY)){
1132 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1133 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1134 }
1135 }
1136
1137 draw_slice(s, height);
1138 break;
1139 }
1140 }
1141 }else{
1142 int y;
1143 int leftr, leftg, leftb, lefta;
1144 const int last_line= (height-1)*p->linesize[0];
1145
1146 if(s->bitstream_bpp==32){
1147 lefta= p->data[0][last_line+A]= get_bits(&s->gb, 8);
1148 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1149 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1150 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1151 }else{
1152 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1153 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1154 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1155 lefta= p->data[0][last_line+A]= 255;
1156 skip_bits(&s->gb, 8);
1157 }
1158
1159 if(s->bgr32){
1160 switch(s->predictor){
1161 case LEFT:
1162 case PLANE:
1163 decode_bgr_bitstream(s, width-1);
1164 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb, &lefta);
1165
1166 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1167 decode_bgr_bitstream(s, width);
1168
1169 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
1170 if(s->predictor == PLANE){
1171 if(s->bitstream_bpp!=32) lefta=0;
1172 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1173 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1174 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1175 }
1176 }
1177 }
1178 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1179 break;
1180 default:
1181 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1182 }
1183 }else{
1184
1185 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1186 return -1;
1187 }
1188 }
1189 emms_c();
1190
1191 *picture= *p;
1192 *data_size = sizeof(AVFrame);
1193
1194 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1195 }
1196 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1197
1198 static int common_end(HYuvContext *s){
1199 int i;
1200
1201 for(i=0; i<3; i++){
1202 av_freep(&s->temp[i]);
1203 }
1204 return 0;
1205 }
1206
1207 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1208 static av_cold int decode_end(AVCodecContext *avctx)
1209 {
1210 HYuvContext *s = avctx->priv_data;
1211 int i;
1212
1213 if (s->picture.data[0])
1214 avctx->release_buffer(avctx, &s->picture);
1215
1216 common_end(s);
1217 av_freep(&s->bitstream_buffer);
1218
1219 for(i=0; i<6; i++){
1220 free_vlc(&s->vlc[i]);
1221 }
1222
1223 return 0;
1224 }
1225 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1226
1227 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1228 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1229 HYuvContext *s = avctx->priv_data;
1230 AVFrame *pict = data;
1231 const int width= s->width;
1232 const int width2= s->width>>1;
1233 const int height= s->height;
1234 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1235 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1236 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1237 AVFrame * const p= &s->picture;
1238 int i, j, size=0;
1239
1240 *p = *pict;
1241 p->pict_type= AV_PICTURE_TYPE_I;
1242 p->key_frame= 1;
1243
1244 if(s->context){
1245 for(i=0; i<3; i++){
1246 generate_len_table(s->len[i], s->stats[i]);
1247 if(generate_bits_table(s->bits[i], s->len[i])<0)
1248 return -1;
1249 size+= store_table(s, s->len[i], &buf[size]);
1250 }
1251
1252 for(i=0; i<3; i++)
1253 for(j=0; j<256; j++)
1254 s->stats[i][j] >>= 1;
1255 }
1256
1257 init_put_bits(&s->pb, buf+size, buf_size-size);
1258
1259 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1260 int lefty, leftu, leftv, y, cy;
1261
1262 put_bits(&s->pb, 8, leftv= p->data[2][0]);
1263 put_bits(&s->pb, 8, lefty= p->data[0][1]);
1264 put_bits(&s->pb, 8, leftu= p->data[1][0]);
1265 put_bits(&s->pb, 8, p->data[0][0]);
1266
1267 lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
1268 leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
1269 leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
1270
1271 encode_422_bitstream(s, 2, width-2);
1272
1273 if(s->predictor==MEDIAN){
1274 int lefttopy, lefttopu, lefttopv;
1275 cy=y=1;
1276 if(s->interlaced){
1277 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1278 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1279 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1280
1281 encode_422_bitstream(s, 0, width);
1282 y++; cy++;
1283 }
1284
1285 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1286 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1287 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1288
1289 encode_422_bitstream(s, 0, 4);
1290
1291 lefttopy= p->data[0][3];
1292 lefttopu= p->data[1][1];
1293 lefttopv= p->data[2][1];
1294 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1295 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1296 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1297 encode_422_bitstream(s, 0, width-4);
1298 y++; cy++;
1299
1300 for(; y<height; y++,cy++){
1301 uint8_t *ydst, *udst, *vdst;
1302
1303 if(s->bitstream_bpp==12){
1304 while(2*cy > y){
1305 ydst= p->data[0] + p->linesize[0]*y;
1306 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1307 encode_gray_bitstream(s, width);
1308 y++;
1309 }
1310 if(y>=height) break;
1311 }
1312 ydst= p->data[0] + p->linesize[0]*y;
1313 udst= p->data[1] + p->linesize[1]*cy;
1314 vdst= p->data[2] + p->linesize[2]*cy;
1315
1316 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1317 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1318 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1319
1320 encode_422_bitstream(s, 0, width);
1321 }
1322 }else{
1323 for(cy=y=1; y<height; y++,cy++){
1324 uint8_t *ydst, *udst, *vdst;
1325
1326 /* encode a luma only line & y++ */
1327 if(s->bitstream_bpp==12){
1328 ydst= p->data[0] + p->linesize[0]*y;
1329
1330 if(s->predictor == PLANE && s->interlaced < y){
1331 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1332
1333 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1334 }else{
1335 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1336 }
1337 encode_gray_bitstream(s, width);
1338 y++;
1339 if(y>=height) break;
1340 }
1341
1342 ydst= p->data[0] + p->linesize[0]*y;
1343 udst= p->data[1] + p->linesize[1]*cy;
1344 vdst= p->data[2] + p->linesize[2]*cy;
1345
1346 if(s->predictor == PLANE && s->interlaced < cy){
1347 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1348 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1349 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1350
1351 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1352 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1353 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1354 }else{
1355 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1356 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1357 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1358 }
1359
1360 encode_422_bitstream(s, 0, width);
1361 }
1362 }
1363 }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1364 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1365 const int stride = -p->linesize[0];
1366 const int fake_stride = -fake_ystride;
1367 int y;
1368 int leftr, leftg, leftb;
1369
1370 put_bits(&s->pb, 8, leftr= data[R]);
1371 put_bits(&s->pb, 8, leftg= data[G]);
1372 put_bits(&s->pb, 8, leftb= data[B]);
1373 put_bits(&s->pb, 8, 0);
1374
1375 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
1376 encode_bgr_bitstream(s, width-1);
1377
1378 for(y=1; y<s->height; y++){
1379 uint8_t *dst = data + y*stride;
1380 if(s->predictor == PLANE && s->interlaced < y){
1381 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1382 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1383 }else{
1384 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1385 }
1386 encode_bgr_bitstream(s, width);
1387 }
1388 }else{
1389 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1390 }
1391 emms_c();
1392
1393 size+= (put_bits_count(&s->pb)+31)/8;
1394 put_bits(&s->pb, 16, 0);
1395 put_bits(&s->pb, 15, 0);
1396 size/= 4;
1397
1398 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1399 int j;
1400 char *p= avctx->stats_out;
1401 char *end= p + 1024*30;
1402 for(i=0; i<3; i++){
1403 for(j=0; j<256; j++){
1404 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1405 p+= strlen(p);
1406 s->stats[i][j]= 0;
1407 }
1408 snprintf(p, end-p, "\n");
1409 p++;
1410 }
1411 } else
1412 avctx->stats_out[0] = '\0';
1413 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1414 flush_put_bits(&s->pb);
1415 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1416 }
1417
1418 s->picture_number++;
1419
1420 return size*4;
1421 }
1422
1423 static av_cold int encode_end(AVCodecContext *avctx)
1424 {
1425 HYuvContext *s = avctx->priv_data;
1426
1427 common_end(s);
1428
1429 av_freep(&avctx->extradata);
1430 av_freep(&avctx->stats_out);
1431
1432 return 0;
1433 }
1434 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1435
1436 #if CONFIG_HUFFYUV_DECODER
1437 AVCodec ff_huffyuv_decoder = {
1438 .name = "huffyuv",
1439 .type = AVMEDIA_TYPE_VIDEO,
1440 .id = CODEC_ID_HUFFYUV,
1441 .priv_data_size = sizeof(HYuvContext),
1442 .init = decode_init,
1443 .close = decode_end,
1444 .decode = decode_frame,
1445 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
1446 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1447 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1448 };
1449 #endif
1450
1451 #if CONFIG_FFVHUFF_DECODER
1452 AVCodec ff_ffvhuff_decoder = {
1453 .name = "ffvhuff",
1454 .type = AVMEDIA_TYPE_VIDEO,
1455 .id = CODEC_ID_FFVHUFF,
1456 .priv_data_size = sizeof(HYuvContext),
1457 .init = decode_init,
1458 .close = decode_end,
1459 .decode = decode_frame,
1460 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
1461 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1462 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1463 };
1464 #endif
1465
1466 #if CONFIG_HUFFYUV_ENCODER
1467 AVCodec ff_huffyuv_encoder = {
1468 .name = "huffyuv",
1469 .type = AVMEDIA_TYPE_VIDEO,
1470 .id = CODEC_ID_HUFFYUV,
1471 .priv_data_size = sizeof(HYuvContext),
1472 .init = encode_init,
1473 .encode = encode_frame,
1474 .close = encode_end,
1475 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1476 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1477 };
1478 #endif
1479
1480 #if CONFIG_FFVHUFF_ENCODER
1481 AVCodec ff_ffvhuff_encoder = {
1482 .name = "ffvhuff",
1483 .type = AVMEDIA_TYPE_VIDEO,
1484 .id = CODEC_ID_FFVHUFF,
1485 .priv_data_size = sizeof(HYuvContext),
1486 .init = encode_init,
1487 .encode = encode_frame,
1488 .close = encode_end,
1489 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1490 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1491 };
1492 #endif