lavc: fix decode_frame() third parameter semantics for video decoders
[libav.git] / libavcodec / huffyuv.c
1 /*
2 * huffyuv codec for libavcodec
3 *
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
7 * the algorithm used
8 *
9 * This file is part of Libav.
10 *
11 * Libav is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
15 *
16 * Libav is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with Libav; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26 /**
27 * @file
28 * huffyuv codec for libavcodec.
29 */
30
31 #include "avcodec.h"
32 #include "get_bits.h"
33 #include "put_bits.h"
34 #include "dsputil.h"
35 #include "thread.h"
36 #include "huffman.h"
37
38 #define VLC_BITS 11
39
40 #if HAVE_BIGENDIAN
41 #define B 3
42 #define G 2
43 #define R 1
44 #define A 0
45 #else
46 #define B 0
47 #define G 1
48 #define R 2
49 #define A 3
50 #endif
51
52 typedef enum Predictor {
53 LEFT= 0,
54 PLANE,
55 MEDIAN,
56 } Predictor;
57
58 typedef struct HYuvContext {
59 AVCodecContext *avctx;
60 Predictor predictor;
61 GetBitContext gb;
62 PutBitContext pb;
63 int interlaced;
64 int decorrelate;
65 int bitstream_bpp;
66 int version;
67 int yuy2; //use yuy2 instead of 422P
68 int bgr32; //use bgr32 instead of bgr24
69 int width, height;
70 int flags;
71 int context;
72 int picture_number;
73 int last_slice_end;
74 uint8_t *temp[3];
75 uint64_t stats[3][256];
76 uint8_t len[3][256];
77 uint32_t bits[3][256];
78 uint32_t pix_bgr_map[1<<VLC_BITS];
79 VLC vlc[6]; //Y,U,V,YY,YU,YV
80 AVFrame picture;
81 uint8_t *bitstream_buffer;
82 unsigned int bitstream_buffer_size;
83 DSPContext dsp;
84 } HYuvContext;
85
86 #define classic_shift_luma_table_size 42
87 static const unsigned char classic_shift_luma[classic_shift_luma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
88 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
89 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
90 69,68, 0
91 };
92
93 #define classic_shift_chroma_table_size 59
94 static const unsigned char classic_shift_chroma[classic_shift_chroma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
95 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
96 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
97 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
98 };
99
100 static const unsigned char classic_add_luma[256] = {
101 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
102 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
103 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
104 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
105 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
106 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
107 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
108 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
109 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
110 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
111 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
112 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
113 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
114 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
115 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
116 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
117 };
118
119 static const unsigned char classic_add_chroma[256] = {
120 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
121 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
122 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
123 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
124 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
125 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
126 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
127 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
128 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
129 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
130 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
131 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
132 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
133 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
134 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
135 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
136 };
137
138 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst,
139 uint8_t *src, int w, int left)
140 {
141 int i;
142 if (w < 32) {
143 for (i = 0; i < w; i++) {
144 const int temp = src[i];
145 dst[i] = temp - left;
146 left = temp;
147 }
148 return left;
149 } else {
150 for (i = 0; i < 16; i++) {
151 const int temp = src[i];
152 dst[i] = temp - left;
153 left = temp;
154 }
155 s->dsp.diff_bytes(dst + 16, src + 16, src + 15, w - 16);
156 return src[w-1];
157 }
158 }
159
160 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst,
161 uint8_t *src, int w,
162 int *red, int *green, int *blue)
163 {
164 int i;
165 int r,g,b;
166 r = *red;
167 g = *green;
168 b = *blue;
169
170 for (i = 0; i < FFMIN(w, 4); i++) {
171 const int rt = src[i * 4 + R];
172 const int gt = src[i * 4 + G];
173 const int bt = src[i * 4 + B];
174 dst[i * 4 + R] = rt - r;
175 dst[i * 4 + G] = gt - g;
176 dst[i * 4 + B] = bt - b;
177 r = rt;
178 g = gt;
179 b = bt;
180 }
181
182 s->dsp.diff_bytes(dst + 16, src + 16, src + 12, w * 4 - 16);
183
184 *red = src[(w - 1) * 4 + R];
185 *green = src[(w - 1) * 4 + G];
186 *blue = src[(w - 1) * 4 + B];
187 }
188
189 static int read_len_table(uint8_t *dst, GetBitContext *gb)
190 {
191 int i, val, repeat;
192
193 for (i = 0; i < 256;) {
194 repeat = get_bits(gb, 3);
195 val = get_bits(gb, 5);
196 if (repeat == 0)
197 repeat = get_bits(gb, 8);
198 if (i + repeat > 256 || get_bits_left(gb) < 0) {
199 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
200 return -1;
201 }
202 while (repeat--)
203 dst[i++] = val;
204 }
205 return 0;
206 }
207
208 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table)
209 {
210 int len, index;
211 uint32_t bits = 0;
212
213 for (len = 32; len > 0; len--) {
214 for (index = 0; index < 256; index++) {
215 if (len_table[index] == len)
216 dst[index] = bits++;
217 }
218 if (bits & 1) {
219 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
220 return -1;
221 }
222 bits >>= 1;
223 }
224 return 0;
225 }
226
227 static void generate_joint_tables(HYuvContext *s)
228 {
229 uint16_t symbols[1 << VLC_BITS];
230 uint16_t bits[1 << VLC_BITS];
231 uint8_t len[1 << VLC_BITS];
232 if (s->bitstream_bpp < 24) {
233 int p, i, y, u;
234 for (p = 0; p < 3; p++) {
235 for (i = y = 0; y < 256; y++) {
236 int len0 = s->len[0][y];
237 int limit = VLC_BITS - len0;
238 if(limit <= 0)
239 continue;
240 for (u = 0; u < 256; u++) {
241 int len1 = s->len[p][u];
242 if (len1 > limit)
243 continue;
244 len[i] = len0 + len1;
245 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
246 symbols[i] = (y << 8) + u;
247 if(symbols[i] != 0xffff) // reserved to mean "invalid"
248 i++;
249 }
250 }
251 ff_free_vlc(&s->vlc[3 + p]);
252 ff_init_vlc_sparse(&s->vlc[3 + p], VLC_BITS, i, len, 1, 1,
253 bits, 2, 2, symbols, 2, 2, 0);
254 }
255 } else {
256 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
257 int i, b, g, r, code;
258 int p0 = s->decorrelate;
259 int p1 = !s->decorrelate;
260 // restrict the range to +/-16 because that's pretty much guaranteed to
261 // cover all the combinations that fit in 11 bits total, and it doesn't
262 // matter if we miss a few rare codes.
263 for (i = 0, g = -16; g < 16; g++) {
264 int len0 = s->len[p0][g & 255];
265 int limit0 = VLC_BITS - len0;
266 if (limit0 < 2)
267 continue;
268 for (b = -16; b < 16; b++) {
269 int len1 = s->len[p1][b & 255];
270 int limit1 = limit0 - len1;
271 if (limit1 < 1)
272 continue;
273 code = (s->bits[p0][g & 255] << len1) + s->bits[p1][b & 255];
274 for (r = -16; r < 16; r++) {
275 int len2 = s->len[2][r & 255];
276 if (len2 > limit1)
277 continue;
278 len[i] = len0 + len1 + len2;
279 bits[i] = (code << len2) + s->bits[2][r & 255];
280 if (s->decorrelate) {
281 map[i][G] = g;
282 map[i][B] = g + b;
283 map[i][R] = g + r;
284 } else {
285 map[i][B] = g;
286 map[i][G] = b;
287 map[i][R] = r;
288 }
289 i++;
290 }
291 }
292 }
293 ff_free_vlc(&s->vlc[3]);
294 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
295 }
296 }
297
298 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length)
299 {
300 GetBitContext gb;
301 int i;
302
303 init_get_bits(&gb, src, length * 8);
304
305 for (i = 0; i < 3; i++) {
306 if (read_len_table(s->len[i], &gb) < 0)
307 return -1;
308 if (generate_bits_table(s->bits[i], s->len[i]) < 0) {
309 return -1;
310 }
311 ff_free_vlc(&s->vlc[i]);
312 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
313 s->bits[i], 4, 4, 0);
314 }
315
316 generate_joint_tables(s);
317
318 return (get_bits_count(&gb) + 7) / 8;
319 }
320
321 static int read_old_huffman_tables(HYuvContext *s)
322 {
323 GetBitContext gb;
324 int i;
325
326 init_get_bits(&gb, classic_shift_luma,
327 classic_shift_luma_table_size * 8);
328 if (read_len_table(s->len[0], &gb) < 0)
329 return -1;
330
331 init_get_bits(&gb, classic_shift_chroma,
332 classic_shift_chroma_table_size * 8);
333 if (read_len_table(s->len[1], &gb) < 0)
334 return -1;
335
336 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
337 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
338
339 if (s->bitstream_bpp >= 24) {
340 memcpy(s->bits[1], s->bits[0], 256 * sizeof(uint32_t));
341 memcpy(s->len[1] , s->len [0], 256 * sizeof(uint8_t));
342 }
343 memcpy(s->bits[2], s->bits[1], 256 * sizeof(uint32_t));
344 memcpy(s->len[2] , s->len [1], 256 * sizeof(uint8_t));
345
346 for (i = 0; i < 3; i++) {
347 ff_free_vlc(&s->vlc[i]);
348 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
349 s->bits[i], 4, 4, 0);
350 }
351
352 generate_joint_tables(s);
353
354 return 0;
355 }
356
357 static av_cold void alloc_temp(HYuvContext *s)
358 {
359 int i;
360
361 if (s->bitstream_bpp<24) {
362 for (i=0; i<3; i++) {
363 s->temp[i]= av_malloc(s->width + 16);
364 }
365 } else {
366 s->temp[0]= av_mallocz(4*s->width + 16);
367 }
368 }
369
370 static av_cold int common_init(AVCodecContext *avctx)
371 {
372 HYuvContext *s = avctx->priv_data;
373
374 s->avctx = avctx;
375 s->flags = avctx->flags;
376
377 ff_dsputil_init(&s->dsp, avctx);
378
379 s->width = avctx->width;
380 s->height = avctx->height;
381 assert(s->width>0 && s->height>0);
382
383 return 0;
384 }
385
386 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
387 static av_cold int decode_init(AVCodecContext *avctx)
388 {
389 HYuvContext *s = avctx->priv_data;
390
391 common_init(avctx);
392 memset(s->vlc, 0, 3 * sizeof(VLC));
393
394 avctx->coded_frame = &s->picture;
395 s->interlaced = s->height > 288;
396
397 s->bgr32 = 1;
398
399 if (avctx->extradata_size) {
400 if ((avctx->bits_per_coded_sample & 7) &&
401 avctx->bits_per_coded_sample != 12)
402 s->version = 1; // do such files exist at all?
403 else
404 s->version = 2;
405 } else
406 s->version = 0;
407
408 if (s->version == 2) {
409 int method, interlace;
410
411 if (avctx->extradata_size < 4)
412 return -1;
413
414 method = ((uint8_t*)avctx->extradata)[0];
415 s->decorrelate = method & 64 ? 1 : 0;
416 s->predictor = method & 63;
417 s->bitstream_bpp = ((uint8_t*)avctx->extradata)[1];
418 if (s->bitstream_bpp == 0)
419 s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
420 interlace = (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
421 s->interlaced = (interlace == 1) ? 1 : (interlace == 2) ? 0 : s->interlaced;
422 s->context = ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
423
424 if ( read_huffman_tables(s, ((uint8_t*)avctx->extradata) + 4,
425 avctx->extradata_size - 4) < 0)
426 return -1;
427 }else{
428 switch (avctx->bits_per_coded_sample & 7) {
429 case 1:
430 s->predictor = LEFT;
431 s->decorrelate = 0;
432 break;
433 case 2:
434 s->predictor = LEFT;
435 s->decorrelate = 1;
436 break;
437 case 3:
438 s->predictor = PLANE;
439 s->decorrelate = avctx->bits_per_coded_sample >= 24;
440 break;
441 case 4:
442 s->predictor = MEDIAN;
443 s->decorrelate = 0;
444 break;
445 default:
446 s->predictor = LEFT; //OLD
447 s->decorrelate = 0;
448 break;
449 }
450 s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
451 s->context = 0;
452
453 if (read_old_huffman_tables(s) < 0)
454 return -1;
455 }
456
457 switch (s->bitstream_bpp) {
458 case 12:
459 avctx->pix_fmt = AV_PIX_FMT_YUV420P;
460 break;
461 case 16:
462 if (s->yuy2) {
463 avctx->pix_fmt = AV_PIX_FMT_YUYV422;
464 } else {
465 avctx->pix_fmt = AV_PIX_FMT_YUV422P;
466 }
467 break;
468 case 24:
469 case 32:
470 if (s->bgr32) {
471 avctx->pix_fmt = AV_PIX_FMT_RGB32;
472 } else {
473 avctx->pix_fmt = AV_PIX_FMT_BGR24;
474 }
475 break;
476 default:
477 return AVERROR_INVALIDDATA;
478 }
479
480 alloc_temp(s);
481
482 return 0;
483 }
484
485 static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
486 {
487 HYuvContext *s = avctx->priv_data;
488 int i;
489
490 avctx->coded_frame= &s->picture;
491 alloc_temp(s);
492
493 for (i = 0; i < 6; i++)
494 s->vlc[i].table = NULL;
495
496 if (s->version == 2) {
497 if (read_huffman_tables(s, ((uint8_t*)avctx->extradata) + 4,
498 avctx->extradata_size) < 0)
499 return -1;
500 } else {
501 if (read_old_huffman_tables(s) < 0)
502 return -1;
503 }
504
505 return 0;
506 }
507 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
508
509 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
510 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf)
511 {
512 int i;
513 int index = 0;
514
515 for (i = 0; i < 256;) {
516 int val = len[i];
517 int repeat = 0;
518
519 for (; i < 256 && len[i] == val && repeat < 255; i++)
520 repeat++;
521
522 assert(val < 32 && val >0 && repeat<256 && repeat>0);
523 if ( repeat > 7) {
524 buf[index++] = val;
525 buf[index++] = repeat;
526 } else {
527 buf[index++] = val | (repeat << 5);
528 }
529 }
530
531 return index;
532 }
533
534 static av_cold int encode_init(AVCodecContext *avctx)
535 {
536 HYuvContext *s = avctx->priv_data;
537 int i, j;
538
539 common_init(avctx);
540
541 avctx->extradata = av_mallocz(1024*30); // 256*3+4 == 772
542 avctx->stats_out = av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
543 s->version = 2;
544
545 avctx->coded_frame = &s->picture;
546
547 switch (avctx->pix_fmt) {
548 case AV_PIX_FMT_YUV420P:
549 s->bitstream_bpp = 12;
550 break;
551 case AV_PIX_FMT_YUV422P:
552 s->bitstream_bpp = 16;
553 break;
554 case AV_PIX_FMT_RGB32:
555 s->bitstream_bpp = 24;
556 break;
557 default:
558 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
559 return -1;
560 }
561 avctx->bits_per_coded_sample = s->bitstream_bpp;
562 s->decorrelate = s->bitstream_bpp >= 24;
563 s->predictor = avctx->prediction_method;
564 s->interlaced = avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
565 if (avctx->context_model == 1) {
566 s->context = avctx->context_model;
567 if (s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)) {
568 av_log(avctx, AV_LOG_ERROR,
569 "context=1 is not compatible with "
570 "2 pass huffyuv encoding\n");
571 return -1;
572 }
573 }else s->context= 0;
574
575 if (avctx->codec->id == AV_CODEC_ID_HUFFYUV) {
576 if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
577 av_log(avctx, AV_LOG_ERROR,
578 "Error: YV12 is not supported by huffyuv; use "
579 "vcodec=ffvhuff or format=422p\n");
580 return -1;
581 }
582 if (avctx->context_model) {
583 av_log(avctx, AV_LOG_ERROR,
584 "Error: per-frame huffman tables are not supported "
585 "by huffyuv; use vcodec=ffvhuff\n");
586 return -1;
587 }
588 if (s->interlaced != ( s->height > 288 ))
589 av_log(avctx, AV_LOG_INFO,
590 "using huffyuv 2.2.0 or newer interlacing flag\n");
591 }
592
593 if (s->bitstream_bpp >= 24 && s->predictor == MEDIAN) {
594 av_log(avctx, AV_LOG_ERROR,
595 "Error: RGB is incompatible with median predictor\n");
596 return -1;
597 }
598
599 ((uint8_t*)avctx->extradata)[0] = s->predictor | (s->decorrelate << 6);
600 ((uint8_t*)avctx->extradata)[1] = s->bitstream_bpp;
601 ((uint8_t*)avctx->extradata)[2] = s->interlaced ? 0x10 : 0x20;
602 if (s->context)
603 ((uint8_t*)avctx->extradata)[2] |= 0x40;
604 ((uint8_t*)avctx->extradata)[3] = 0;
605 s->avctx->extradata_size = 4;
606
607 if (avctx->stats_in) {
608 char *p = avctx->stats_in;
609
610 for (i = 0; i < 3; i++)
611 for (j = 0; j < 256; j++)
612 s->stats[i][j] = 1;
613
614 for (;;) {
615 for (i = 0; i < 3; i++) {
616 char *next;
617
618 for (j = 0; j < 256; j++) {
619 s->stats[i][j] += strtol(p, &next, 0);
620 if (next == p) return -1;
621 p = next;
622 }
623 }
624 if (p[0] == 0 || p[1] == 0 || p[2] == 0) break;
625 }
626 } else {
627 for (i = 0; i < 3; i++)
628 for (j = 0; j < 256; j++) {
629 int d = FFMIN(j, 256 - j);
630
631 s->stats[i][j] = 100000000 / (d + 1);
632 }
633 }
634
635 for (i = 0; i < 3; i++) {
636 ff_huff_gen_len_table(s->len[i], s->stats[i]);
637
638 if (generate_bits_table(s->bits[i], s->len[i]) < 0) {
639 return -1;
640 }
641
642 s->avctx->extradata_size +=
643 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
644 }
645
646 if (s->context) {
647 for (i = 0; i < 3; i++) {
648 int pels = s->width * s->height / (i ? 40 : 10);
649 for (j = 0; j < 256; j++) {
650 int d = FFMIN(j, 256 - j);
651 s->stats[i][j] = pels/(d + 1);
652 }
653 }
654 } else {
655 for (i = 0; i < 3; i++)
656 for (j = 0; j < 256; j++)
657 s->stats[i][j]= 0;
658 }
659
660 alloc_temp(s);
661
662 s->picture_number=0;
663
664 return 0;
665 }
666 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
667
668 /* TODO instead of restarting the read when the code isn't in the first level
669 * of the joint table, jump into the 2nd level of the individual table. */
670 #define READ_2PIX(dst0, dst1, plane1){\
671 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
672 if(code != 0xffff){\
673 dst0 = code>>8;\
674 dst1 = code;\
675 }else{\
676 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
677 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
678 }\
679 }
680
681 static void decode_422_bitstream(HYuvContext *s, int count)
682 {
683 int i;
684
685 count /= 2;
686
687 if (count >= (get_bits_left(&s->gb)) / (31 * 4)) {
688 for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
689 READ_2PIX(s->temp[0][2 * i ], s->temp[1][i], 1);
690 READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
691 }
692 } else {
693 for (i = 0; i < count; i++) {
694 READ_2PIX(s->temp[0][2 * i ], s->temp[1][i], 1);
695 READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
696 }
697 }
698 }
699
700 static void decode_gray_bitstream(HYuvContext *s, int count)
701 {
702 int i;
703
704 count/=2;
705
706 if (count >= (get_bits_left(&s->gb)) / (31 * 2)) {
707 for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
708 READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
709 }
710 } else {
711 for(i=0; i<count; i++){
712 READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
713 }
714 }
715 }
716
717 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
718 static int encode_422_bitstream(HYuvContext *s, int offset, int count)
719 {
720 int i;
721 const uint8_t *y = s->temp[0] + offset;
722 const uint8_t *u = s->temp[1] + offset / 2;
723 const uint8_t *v = s->temp[2] + offset / 2;
724
725 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 2 * 4 * count) {
726 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
727 return -1;
728 }
729
730 #define LOAD4\
731 int y0 = y[2 * i];\
732 int y1 = y[2 * i + 1];\
733 int u0 = u[i];\
734 int v0 = v[i];
735
736 count /= 2;
737
738 if (s->flags & CODEC_FLAG_PASS1) {
739 for(i = 0; i < count; i++) {
740 LOAD4;
741 s->stats[0][y0]++;
742 s->stats[1][u0]++;
743 s->stats[0][y1]++;
744 s->stats[2][v0]++;
745 }
746 }
747 if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)
748 return 0;
749 if (s->context) {
750 for (i = 0; i < count; i++) {
751 LOAD4;
752 s->stats[0][y0]++;
753 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
754 s->stats[1][u0]++;
755 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
756 s->stats[0][y1]++;
757 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
758 s->stats[2][v0]++;
759 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
760 }
761 } else {
762 for(i = 0; i < count; i++) {
763 LOAD4;
764 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
765 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
766 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
767 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
768 }
769 }
770 return 0;
771 }
772
773 static int encode_gray_bitstream(HYuvContext *s, int count)
774 {
775 int i;
776
777 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 4 * count) {
778 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
779 return -1;
780 }
781
782 #define LOAD2\
783 int y0 = s->temp[0][2 * i];\
784 int y1 = s->temp[0][2 * i + 1];
785 #define STAT2\
786 s->stats[0][y0]++;\
787 s->stats[0][y1]++;
788 #define WRITE2\
789 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
790 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
791
792 count /= 2;
793
794 if (s->flags & CODEC_FLAG_PASS1) {
795 for (i = 0; i < count; i++) {
796 LOAD2;
797 STAT2;
798 }
799 }
800 if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)
801 return 0;
802
803 if (s->context) {
804 for (i = 0; i < count; i++) {
805 LOAD2;
806 STAT2;
807 WRITE2;
808 }
809 } else {
810 for (i = 0; i < count; i++) {
811 LOAD2;
812 WRITE2;
813 }
814 }
815 return 0;
816 }
817 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
818
819 static av_always_inline void decode_bgr_1(HYuvContext *s, int count,
820 int decorrelate, int alpha)
821 {
822 int i;
823 for (i = 0; i < count; i++) {
824 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
825 if (code != -1) {
826 *(uint32_t*)&s->temp[0][4 * i] = s->pix_bgr_map[code];
827 } else if(decorrelate) {
828 s->temp[0][4 * i + G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
829 s->temp[0][4 * i + B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) +
830 s->temp[0][4 * i + G];
831 s->temp[0][4 * i + R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) +
832 s->temp[0][4 * i + G];
833 } else {
834 s->temp[0][4 * i + B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
835 s->temp[0][4 * i + G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
836 s->temp[0][4 * i + R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
837 }
838 if (alpha)
839 s->temp[0][4 * i + A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
840 }
841 }
842
843 static void decode_bgr_bitstream(HYuvContext *s, int count)
844 {
845 if (s->decorrelate) {
846 if (s->bitstream_bpp==24)
847 decode_bgr_1(s, count, 1, 0);
848 else
849 decode_bgr_1(s, count, 1, 1);
850 } else {
851 if (s->bitstream_bpp==24)
852 decode_bgr_1(s, count, 0, 0);
853 else
854 decode_bgr_1(s, count, 0, 1);
855 }
856 }
857
858 static int encode_bgr_bitstream(HYuvContext *s, int count)
859 {
860 int i;
861
862 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 3 * 4 * count) {
863 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
864 return -1;
865 }
866
867 #define LOAD3\
868 int g = s->temp[0][4 * i + G];\
869 int b = (s->temp[0][4 * i + B] - g) & 0xff;\
870 int r = (s->temp[0][4 * i + R] - g) & 0xff;
871 #define STAT3\
872 s->stats[0][b]++;\
873 s->stats[1][g]++;\
874 s->stats[2][r]++;
875 #define WRITE3\
876 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
877 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
878 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
879
880 if ((s->flags & CODEC_FLAG_PASS1) &&
881 (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)) {
882 for (i = 0; i < count; i++) {
883 LOAD3;
884 STAT3;
885 }
886 } else if (s->context || (s->flags & CODEC_FLAG_PASS1)) {
887 for (i = 0; i < count; i++) {
888 LOAD3;
889 STAT3;
890 WRITE3;
891 }
892 } else {
893 for (i = 0; i < count; i++) {
894 LOAD3;
895 WRITE3;
896 }
897 }
898 return 0;
899 }
900
901 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
902 static void draw_slice(HYuvContext *s, int y)
903 {
904 int h, cy, i;
905 int offset[AV_NUM_DATA_POINTERS];
906
907 if (s->avctx->draw_horiz_band==NULL)
908 return;
909
910 h = y - s->last_slice_end;
911 y -= h;
912
913 if (s->bitstream_bpp == 12) {
914 cy = y>>1;
915 } else {
916 cy = y;
917 }
918
919 offset[0] = s->picture.linesize[0]*y;
920 offset[1] = s->picture.linesize[1]*cy;
921 offset[2] = s->picture.linesize[2]*cy;
922 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
923 offset[i] = 0;
924 emms_c();
925
926 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
927
928 s->last_slice_end = y + h;
929 }
930
931 static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
932 AVPacket *avpkt)
933 {
934 const uint8_t *buf = avpkt->data;
935 int buf_size = avpkt->size;
936 HYuvContext *s = avctx->priv_data;
937 const int width = s->width;
938 const int width2 = s->width>>1;
939 const int height = s->height;
940 int fake_ystride, fake_ustride, fake_vstride;
941 AVFrame * const p = &s->picture;
942 int table_size = 0;
943
944 AVFrame *picture = data;
945
946 av_fast_malloc(&s->bitstream_buffer,
947 &s->bitstream_buffer_size,
948 buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
949 if (!s->bitstream_buffer)
950 return AVERROR(ENOMEM);
951
952 memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
953 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer,
954 (const uint32_t*)buf, buf_size / 4);
955
956 if (p->data[0])
957 ff_thread_release_buffer(avctx, p);
958
959 p->reference = 0;
960 if (ff_thread_get_buffer(avctx, p) < 0) {
961 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
962 return -1;
963 }
964
965 if (s->context) {
966 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
967 if (table_size < 0)
968 return -1;
969 }
970
971 if ((unsigned)(buf_size-table_size) >= INT_MAX / 8)
972 return -1;
973
974 init_get_bits(&s->gb, s->bitstream_buffer+table_size,
975 (buf_size-table_size) * 8);
976
977 fake_ystride = s->interlaced ? p->linesize[0] * 2 : p->linesize[0];
978 fake_ustride = s->interlaced ? p->linesize[1] * 2 : p->linesize[1];
979 fake_vstride = s->interlaced ? p->linesize[2] * 2 : p->linesize[2];
980
981 s->last_slice_end = 0;
982
983 if (s->bitstream_bpp < 24) {
984 int y, cy;
985 int lefty, leftu, leftv;
986 int lefttopy, lefttopu, lefttopv;
987
988 if (s->yuy2) {
989 p->data[0][3] = get_bits(&s->gb, 8);
990 p->data[0][2] = get_bits(&s->gb, 8);
991 p->data[0][1] = get_bits(&s->gb, 8);
992 p->data[0][0] = get_bits(&s->gb, 8);
993
994 av_log(avctx, AV_LOG_ERROR,
995 "YUY2 output is not implemented yet\n");
996 return -1;
997 } else {
998
999 leftv = p->data[2][0] = get_bits(&s->gb, 8);
1000 lefty = p->data[0][1] = get_bits(&s->gb, 8);
1001 leftu = p->data[1][0] = get_bits(&s->gb, 8);
1002 p->data[0][0] = get_bits(&s->gb, 8);
1003
1004 switch (s->predictor) {
1005 case LEFT:
1006 case PLANE:
1007 decode_422_bitstream(s, width-2);
1008 lefty = s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1009 if (!(s->flags&CODEC_FLAG_GRAY)) {
1010 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2 - 1, leftu);
1011 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2 - 1, leftv);
1012 }
1013
1014 for (cy = y = 1; y < s->height; y++, cy++) {
1015 uint8_t *ydst, *udst, *vdst;
1016
1017 if (s->bitstream_bpp == 12) {
1018 decode_gray_bitstream(s, width);
1019
1020 ydst = p->data[0] + p->linesize[0] * y;
1021
1022 lefty = s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1023 if (s->predictor == PLANE) {
1024 if (y > s->interlaced)
1025 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1026 }
1027 y++;
1028 if (y >= s->height) break;
1029 }
1030
1031 draw_slice(s, y);
1032
1033 ydst = p->data[0] + p->linesize[0]*y;
1034 udst = p->data[1] + p->linesize[1]*cy;
1035 vdst = p->data[2] + p->linesize[2]*cy;
1036
1037 decode_422_bitstream(s, width);
1038 lefty = s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1039 if (!(s->flags & CODEC_FLAG_GRAY)) {
1040 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
1041 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
1042 }
1043 if (s->predictor == PLANE) {
1044 if (cy > s->interlaced) {
1045 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1046 if (!(s->flags & CODEC_FLAG_GRAY)) {
1047 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1048 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1049 }
1050 }
1051 }
1052 }
1053 draw_slice(s, height);
1054
1055 break;
1056 case MEDIAN:
1057 /* first line except first 2 pixels is left predicted */
1058 decode_422_bitstream(s, width - 2);
1059 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width - 2, lefty);
1060 if (!(s->flags & CODEC_FLAG_GRAY)) {
1061 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2 - 1, leftu);
1062 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2 - 1, leftv);
1063 }
1064
1065 cy = y = 1;
1066
1067 /* second line is left predicted for interlaced case */
1068 if (s->interlaced) {
1069 decode_422_bitstream(s, width);
1070 lefty = s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1071 if (!(s->flags & CODEC_FLAG_GRAY)) {
1072 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1073 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1074 }
1075 y++; cy++;
1076 }
1077
1078 /* next 4 pixels are left predicted too */
1079 decode_422_bitstream(s, 4);
1080 lefty = s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1081 if (!(s->flags&CODEC_FLAG_GRAY)) {
1082 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1083 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1084 }
1085
1086 /* next line except the first 4 pixels is median predicted */
1087 lefttopy = p->data[0][3];
1088 decode_422_bitstream(s, width - 4);
1089 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1090 if (!(s->flags&CODEC_FLAG_GRAY)) {
1091 lefttopu = p->data[1][1];
1092 lefttopv = p->data[2][1];
1093 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1] + 2, s->temp[1], width2 - 2, &leftu, &lefttopu);
1094 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2] + 2, s->temp[2], width2 - 2, &leftv, &lefttopv);
1095 }
1096 y++; cy++;
1097
1098 for (; y<height; y++, cy++) {
1099 uint8_t *ydst, *udst, *vdst;
1100
1101 if (s->bitstream_bpp == 12) {
1102 while (2 * cy > y) {
1103 decode_gray_bitstream(s, width);
1104 ydst = p->data[0] + p->linesize[0] * y;
1105 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1106 y++;
1107 }
1108 if (y >= height) break;
1109 }
1110 draw_slice(s, y);
1111
1112 decode_422_bitstream(s, width);
1113
1114 ydst = p->data[0] + p->linesize[0] * y;
1115 udst = p->data[1] + p->linesize[1] * cy;
1116 vdst = p->data[2] + p->linesize[2] * cy;
1117
1118 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1119 if (!(s->flags & CODEC_FLAG_GRAY)) {
1120 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1121 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1122 }
1123 }
1124
1125 draw_slice(s, height);
1126 break;
1127 }
1128 }
1129 } else {
1130 int y;
1131 int leftr, leftg, leftb, lefta;
1132 const int last_line = (height - 1) * p->linesize[0];
1133
1134 if (s->bitstream_bpp == 32) {
1135 lefta = p->data[0][last_line+A] = get_bits(&s->gb, 8);
1136 leftr = p->data[0][last_line+R] = get_bits(&s->gb, 8);
1137 leftg = p->data[0][last_line+G] = get_bits(&s->gb, 8);
1138 leftb = p->data[0][last_line+B] = get_bits(&s->gb, 8);
1139 } else {
1140 leftr = p->data[0][last_line+R] = get_bits(&s->gb, 8);
1141 leftg = p->data[0][last_line+G] = get_bits(&s->gb, 8);
1142 leftb = p->data[0][last_line+B] = get_bits(&s->gb, 8);
1143 lefta = p->data[0][last_line+A] = 255;
1144 skip_bits(&s->gb, 8);
1145 }
1146
1147 if (s->bgr32) {
1148 switch (s->predictor) {
1149 case LEFT:
1150 case PLANE:
1151 decode_bgr_bitstream(s, width - 1);
1152 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width - 1, &leftr, &leftg, &leftb, &lefta);
1153
1154 for (y = s->height - 2; y >= 0; y--) { //Yes it is stored upside down.
1155 decode_bgr_bitstream(s, width);
1156
1157 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
1158 if (s->predictor == PLANE) {
1159 if (s->bitstream_bpp != 32) lefta = 0;
1160 if ((y & s->interlaced) == 0 &&
1161 y < s->height - 1 - s->interlaced) {
1162 s->dsp.add_bytes(p->data[0] + p->linesize[0] * y,
1163 p->data[0] + p->linesize[0] * y +
1164 fake_ystride, fake_ystride);
1165 }
1166 }
1167 }
1168 // just 1 large slice as this is not possible in reverse order
1169 draw_slice(s, height);
1170 break;
1171 default:
1172 av_log(avctx, AV_LOG_ERROR,
1173 "prediction type not supported!\n");
1174 }
1175 }else{
1176 av_log(avctx, AV_LOG_ERROR,
1177 "BGR24 output is not implemented yet\n");
1178 return -1;
1179 }
1180 }
1181 emms_c();
1182
1183 *picture = *p;
1184 *got_frame = 1;
1185
1186 return (get_bits_count(&s->gb) + 31) / 32 * 4 + table_size;
1187 }
1188 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1189
1190 static int common_end(HYuvContext *s)
1191 {
1192 int i;
1193
1194 for(i = 0; i < 3; i++) {
1195 av_freep(&s->temp[i]);
1196 }
1197 return 0;
1198 }
1199
1200 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1201 static av_cold int decode_end(AVCodecContext *avctx)
1202 {
1203 HYuvContext *s = avctx->priv_data;
1204 int i;
1205
1206 if (s->picture.data[0])
1207 avctx->release_buffer(avctx, &s->picture);
1208
1209 common_end(s);
1210 av_freep(&s->bitstream_buffer);
1211
1212 for (i = 0; i < 6; i++) {
1213 ff_free_vlc(&s->vlc[i]);
1214 }
1215
1216 return 0;
1217 }
1218 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1219
1220 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1221 static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
1222 const AVFrame *pict, int *got_packet)
1223 {
1224 HYuvContext *s = avctx->priv_data;
1225 const int width = s->width;
1226 const int width2 = s->width>>1;
1227 const int height = s->height;
1228 const int fake_ystride = s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1229 const int fake_ustride = s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1230 const int fake_vstride = s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1231 AVFrame * const p = &s->picture;
1232 int i, j, size = 0, ret;
1233
1234 if (!pkt->data &&
1235 (ret = av_new_packet(pkt, width * height * 3 * 4 + FF_MIN_BUFFER_SIZE)) < 0) {
1236 av_log(avctx, AV_LOG_ERROR, "Error allocating output packet.\n");
1237 return ret;
1238 }
1239
1240 *p = *pict;
1241 p->pict_type = AV_PICTURE_TYPE_I;
1242 p->key_frame = 1;
1243
1244 if (s->context) {
1245 for (i = 0; i < 3; i++) {
1246 ff_huff_gen_len_table(s->len[i], s->stats[i]);
1247 if (generate_bits_table(s->bits[i], s->len[i]) < 0)
1248 return -1;
1249 size += store_table(s, s->len[i], &pkt->data[size]);
1250 }
1251
1252 for (i = 0; i < 3; i++)
1253 for (j = 0; j < 256; j++)
1254 s->stats[i][j] >>= 1;
1255 }
1256
1257 init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
1258
1259 if (avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
1260 avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
1261 int lefty, leftu, leftv, y, cy;
1262
1263 put_bits(&s->pb, 8, leftv = p->data[2][0]);
1264 put_bits(&s->pb, 8, lefty = p->data[0][1]);
1265 put_bits(&s->pb, 8, leftu = p->data[1][0]);
1266 put_bits(&s->pb, 8, p->data[0][0]);
1267
1268 lefty = sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
1269 leftu = sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
1270 leftv = sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
1271
1272 encode_422_bitstream(s, 2, width-2);
1273
1274 if (s->predictor==MEDIAN) {
1275 int lefttopy, lefttopu, lefttopv;
1276 cy = y = 1;
1277 if (s->interlaced) {
1278 lefty = sub_left_prediction(s, s->temp[0], p->data[0] + p->linesize[0], width , lefty);
1279 leftu = sub_left_prediction(s, s->temp[1], p->data[1] + p->linesize[1], width2, leftu);
1280 leftv = sub_left_prediction(s, s->temp[2], p->data[2] + p->linesize[2], width2, leftv);
1281
1282 encode_422_bitstream(s, 0, width);
1283 y++; cy++;
1284 }
1285
1286 lefty = sub_left_prediction(s, s->temp[0], p->data[0] + fake_ystride, 4, lefty);
1287 leftu = sub_left_prediction(s, s->temp[1], p->data[1] + fake_ustride, 2, leftu);
1288 leftv = sub_left_prediction(s, s->temp[2], p->data[2] + fake_vstride, 2, leftv);
1289
1290 encode_422_bitstream(s, 0, 4);
1291
1292 lefttopy = p->data[0][3];
1293 lefttopu = p->data[1][1];
1294 lefttopv = p->data[2][1];
1295 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride + 4, width - 4 , &lefty, &lefttopy);
1296 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride + 2, width2 - 2, &leftu, &lefttopu);
1297 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride + 2, width2 - 2, &leftv, &lefttopv);
1298 encode_422_bitstream(s, 0, width - 4);
1299 y++; cy++;
1300
1301 for (; y < height; y++,cy++) {
1302 uint8_t *ydst, *udst, *vdst;
1303
1304 if (s->bitstream_bpp == 12) {
1305 while (2 * cy > y) {
1306 ydst = p->data[0] + p->linesize[0] * y;
1307 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1308 encode_gray_bitstream(s, width);
1309 y++;
1310 }
1311 if (y >= height) break;
1312 }
1313 ydst = p->data[0] + p->linesize[0] * y;
1314 udst = p->data[1] + p->linesize[1] * cy;
1315 vdst = p->data[2] + p->linesize[2] * cy;
1316
1317 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1318 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1319 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1320
1321 encode_422_bitstream(s, 0, width);
1322 }
1323 } else {
1324 for (cy = y = 1; y < height; y++, cy++) {
1325 uint8_t *ydst, *udst, *vdst;
1326
1327 /* encode a luma only line & y++ */
1328 if (s->bitstream_bpp == 12) {
1329 ydst = p->data[0] + p->linesize[0] * y;
1330
1331 if (s->predictor == PLANE && s->interlaced < y) {
1332 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1333
1334 lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1335 } else {
1336 lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1337 }
1338 encode_gray_bitstream(s, width);
1339 y++;
1340 if (y >= height) break;
1341 }
1342
1343 ydst = p->data[0] + p->linesize[0] * y;
1344 udst = p->data[1] + p->linesize[1] * cy;
1345 vdst = p->data[2] + p->linesize[2] * cy;
1346
1347 if (s->predictor == PLANE && s->interlaced < cy) {
1348 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1349 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1350 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1351
1352 lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1353 leftu = sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1354 leftv = sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1355 } else {
1356 lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1357 leftu = sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1358 leftv = sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1359 }
1360
1361 encode_422_bitstream(s, 0, width);
1362 }
1363 }
1364 } else if(avctx->pix_fmt == AV_PIX_FMT_RGB32) {
1365 uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
1366 const int stride = -p->linesize[0];
1367 const int fake_stride = -fake_ystride;
1368 int y;
1369 int leftr, leftg, leftb;
1370
1371 put_bits(&s->pb, 8, leftr = data[R]);
1372 put_bits(&s->pb, 8, leftg = data[G]);
1373 put_bits(&s->pb, 8, leftb = data[B]);
1374 put_bits(&s->pb, 8, 0);
1375
1376 sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1, &leftr, &leftg, &leftb);
1377 encode_bgr_bitstream(s, width - 1);
1378
1379 for (y = 1; y < s->height; y++) {
1380 uint8_t *dst = data + y*stride;
1381 if (s->predictor == PLANE && s->interlaced < y) {
1382 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width * 4);
1383 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1384 } else {
1385 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1386 }
1387 encode_bgr_bitstream(s, width);
1388 }
1389 } else {
1390 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1391 }
1392 emms_c();
1393
1394 size += (put_bits_count(&s->pb) + 31) / 8;
1395 put_bits(&s->pb, 16, 0);
1396 put_bits(&s->pb, 15, 0);
1397 size /= 4;
1398
1399 if ((s->flags&CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) {
1400 int j;
1401 char *p = avctx->stats_out;
1402 char *end = p + 1024*30;
1403 for (i = 0; i < 3; i++) {
1404 for (j = 0; j < 256; j++) {
1405 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1406 p += strlen(p);
1407 s->stats[i][j]= 0;
1408 }
1409 snprintf(p, end-p, "\n");
1410 p++;
1411 }
1412 } else
1413 avctx->stats_out[0] = '\0';
1414 if (!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)) {
1415 flush_put_bits(&s->pb);
1416 s->dsp.bswap_buf((uint32_t*)pkt->data, (uint32_t*)pkt->data, size);
1417 }
1418
1419 s->picture_number++;
1420
1421 pkt->size = size * 4;
1422 pkt->flags |= AV_PKT_FLAG_KEY;
1423 *got_packet = 1;
1424
1425 return 0;
1426 }
1427
1428 static av_cold int encode_end(AVCodecContext *avctx)
1429 {
1430 HYuvContext *s = avctx->priv_data;
1431
1432 common_end(s);
1433
1434 av_freep(&avctx->extradata);
1435 av_freep(&avctx->stats_out);
1436
1437 return 0;
1438 }
1439 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1440
1441 #if CONFIG_HUFFYUV_DECODER
1442 AVCodec ff_huffyuv_decoder = {
1443 .name = "huffyuv",
1444 .type = AVMEDIA_TYPE_VIDEO,
1445 .id = AV_CODEC_ID_HUFFYUV,
1446 .priv_data_size = sizeof(HYuvContext),
1447 .init = decode_init,
1448 .close = decode_end,
1449 .decode = decode_frame,
1450 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
1451 CODEC_CAP_FRAME_THREADS,
1452 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1453 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1454 };
1455 #endif
1456
1457 #if CONFIG_FFVHUFF_DECODER
1458 AVCodec ff_ffvhuff_decoder = {
1459 .name = "ffvhuff",
1460 .type = AVMEDIA_TYPE_VIDEO,
1461 .id = AV_CODEC_ID_FFVHUFF,
1462 .priv_data_size = sizeof(HYuvContext),
1463 .init = decode_init,
1464 .close = decode_end,
1465 .decode = decode_frame,
1466 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
1467 CODEC_CAP_FRAME_THREADS,
1468 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1469 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1470 };
1471 #endif
1472
1473 #if CONFIG_HUFFYUV_ENCODER
1474 AVCodec ff_huffyuv_encoder = {
1475 .name = "huffyuv",
1476 .type = AVMEDIA_TYPE_VIDEO,
1477 .id = AV_CODEC_ID_HUFFYUV,
1478 .priv_data_size = sizeof(HYuvContext),
1479 .init = encode_init,
1480 .encode2 = encode_frame,
1481 .close = encode_end,
1482 .pix_fmts = (const enum AVPixelFormat[]){
1483 AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
1484 },
1485 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1486 };
1487 #endif
1488
1489 #if CONFIG_FFVHUFF_ENCODER
1490 AVCodec ff_ffvhuff_encoder = {
1491 .name = "ffvhuff",
1492 .type = AVMEDIA_TYPE_VIDEO,
1493 .id = AV_CODEC_ID_FFVHUFF,
1494 .priv_data_size = sizeof(HYuvContext),
1495 .init = encode_init,
1496 .encode2 = encode_frame,
1497 .close = encode_end,
1498 .pix_fmts = (const enum AVPixelFormat[]){
1499 AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
1500 },
1501 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1502 };
1503 #endif