vlc: Add header #include when the types are used
[libav.git] / libavcodec / utvideodec.c
1 /*
2 * Ut Video decoder
3 * Copyright (c) 2011 Konstantin Shishkov
4 *
5 * This file is part of Libav.
6 *
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 /**
23 * @file
24 * Ut Video decoder
25 */
26
27 #include <inttypes.h>
28 #include <stdlib.h>
29
30 #include "libavutil/intreadwrite.h"
31
32 #include "avcodec.h"
33 #include "bitstream.h"
34 #include "bswapdsp.h"
35 #include "bytestream.h"
36 #include "internal.h"
37 #include "thread.h"
38 #include "utvideo.h"
39 #include "vlc.h"
40
41 static int build_huff10(const uint8_t *src, VLC *vlc, int *fsym)
42 {
43 int i;
44 HuffEntry he[1024];
45 int last;
46 uint32_t codes[1024];
47 uint8_t bits[1024];
48 uint16_t syms[1024];
49 uint32_t code;
50
51 *fsym = -1;
52 for (i = 0; i < 1024; i++) {
53 he[i].sym = i;
54 he[i].len = *src++;
55 }
56 qsort(he, 1024, sizeof(*he), ff_ut10_huff_cmp_len);
57
58 if (!he[0].len) {
59 *fsym = he[0].sym;
60 return 0;
61 }
62
63 last = 1023;
64 while (he[last].len == 255 && last)
65 last--;
66
67 if (he[last].len > 32) {
68 return -1;
69 }
70
71 code = 1;
72 for (i = last; i >= 0; i--) {
73 codes[i] = code >> (32 - he[i].len);
74 bits[i] = he[i].len;
75 syms[i] = he[i].sym;
76 code += 0x80000000u >> (he[i].len - 1);
77 }
78
79 return ff_init_vlc_sparse(vlc, FFMIN(he[last].len, 11), last + 1,
80 bits, sizeof(*bits), sizeof(*bits),
81 codes, sizeof(*codes), sizeof(*codes),
82 syms, sizeof(*syms), sizeof(*syms), 0);
83 }
84
85 static int build_huff(const uint8_t *src, VLC *vlc, int *fsym)
86 {
87 int i;
88 HuffEntry he[256];
89 int last;
90 uint32_t codes[256];
91 uint8_t bits[256];
92 uint8_t syms[256];
93 uint32_t code;
94
95 *fsym = -1;
96 for (i = 0; i < 256; i++) {
97 he[i].sym = i;
98 he[i].len = *src++;
99 }
100 qsort(he, 256, sizeof(*he), ff_ut_huff_cmp_len);
101
102 if (!he[0].len) {
103 *fsym = he[0].sym;
104 return 0;
105 }
106 if (he[0].len > 32)
107 return -1;
108
109 last = 255;
110 while (he[last].len == 255 && last)
111 last--;
112
113 code = 1;
114 for (i = last; i >= 0; i--) {
115 codes[i] = code >> (32 - he[i].len);
116 bits[i] = he[i].len;
117 syms[i] = he[i].sym;
118 code += 0x80000000u >> (he[i].len - 1);
119 }
120
121 return ff_init_vlc_sparse(vlc, FFMIN(he[last].len, 9), last + 1,
122 bits, sizeof(*bits), sizeof(*bits),
123 codes, sizeof(*codes), sizeof(*codes),
124 syms, sizeof(*syms), sizeof(*syms), 0);
125 }
126
127 static int decode_plane10(UtvideoContext *c, int plane_no,
128 uint16_t *dst, int step, int stride,
129 int width, int height,
130 const uint8_t *src, const uint8_t *huff,
131 int use_pred)
132 {
133 BitstreamContext bc;
134 int i, j, slice, pix, ret;
135 int sstart, send;
136 VLC vlc;
137 int prev, fsym;
138
139 if ((ret = build_huff10(huff, &vlc, &fsym)) < 0) {
140 av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
141 return ret;
142 }
143 if (fsym >= 0) { // build_huff reported a symbol to fill slices with
144 send = 0;
145 for (slice = 0; slice < c->slices; slice++) {
146 uint16_t *dest;
147
148 sstart = send;
149 send = (height * (slice + 1) / c->slices);
150 dest = dst + sstart * stride;
151
152 prev = 0x200;
153 for (j = sstart; j < send; j++) {
154 for (i = 0; i < width * step; i += step) {
155 pix = fsym;
156 if (use_pred) {
157 prev += pix;
158 prev &= 0x3FF;
159 pix = prev;
160 }
161 dest[i] = pix;
162 }
163 dest += stride;
164 }
165 }
166 return 0;
167 }
168
169 send = 0;
170 for (slice = 0; slice < c->slices; slice++) {
171 uint16_t *dest;
172 int slice_data_start, slice_data_end, slice_size;
173
174 sstart = send;
175 send = (height * (slice + 1) / c->slices);
176 dest = dst + sstart * stride;
177
178 // slice offset and size validation was done earlier
179 slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
180 slice_data_end = AV_RL32(src + slice * 4);
181 slice_size = slice_data_end - slice_data_start;
182
183 if (!slice_size) {
184 av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
185 "yet a slice has a length of zero.\n");
186 goto fail;
187 }
188
189 memcpy(c->slice_bits, src + slice_data_start + c->slices * 4,
190 slice_size);
191 memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
192 c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
193 (uint32_t *) c->slice_bits,
194 (slice_data_end - slice_data_start + 3) >> 2);
195 bitstream_init8(&bc, c->slice_bits, slice_size);
196
197 prev = 0x200;
198 for (j = sstart; j < send; j++) {
199 for (i = 0; i < width * step; i += step) {
200 if (bitstream_bits_left(&bc) <= 0) {
201 av_log(c->avctx, AV_LOG_ERROR,
202 "Slice decoding ran out of bits\n");
203 goto fail;
204 }
205 pix = bitstream_read_vlc(&bc, vlc.table, vlc.bits, 3);
206 if (pix < 0) {
207 av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n");
208 goto fail;
209 }
210 if (use_pred) {
211 prev += pix;
212 prev &= 0x3FF;
213 pix = prev;
214 }
215 dest[i] = pix;
216 }
217 dest += stride;
218 }
219 if (bitstream_bits_left(&bc) > 32)
220 av_log(c->avctx, AV_LOG_WARNING,
221 "%d bits left after decoding slice\n", bitstream_bits_left(&bc));
222 }
223
224 ff_free_vlc(&vlc);
225
226 return 0;
227 fail:
228 ff_free_vlc(&vlc);
229 return AVERROR_INVALIDDATA;
230 }
231
232 static int compute_cmask(int plane_no, int interlaced, int pix_fmt)
233 {
234 const int is_luma = (pix_fmt == AV_PIX_FMT_YUV420P) && !plane_no;
235
236 if (interlaced)
237 return ~(1 + 2 * is_luma);
238
239 return ~is_luma;
240 }
241
242 static int decode_plane(UtvideoContext *c, int plane_no,
243 uint8_t *dst, int step, ptrdiff_t stride,
244 int width, int height,
245 const uint8_t *src, int use_pred)
246 {
247 int i, j, slice, pix;
248 int sstart, send;
249 VLC vlc;
250 BitstreamContext bc;
251 int prev, fsym;
252 const int cmask = compute_cmask(plane_no, c->interlaced, c->avctx->pix_fmt);
253
254 if (build_huff(src, &vlc, &fsym)) {
255 av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
256 return AVERROR_INVALIDDATA;
257 }
258 if (fsym >= 0) { // build_huff reported a symbol to fill slices with
259 send = 0;
260 for (slice = 0; slice < c->slices; slice++) {
261 uint8_t *dest;
262
263 sstart = send;
264 send = (height * (slice + 1) / c->slices) & cmask;
265 dest = dst + sstart * stride;
266
267 prev = 0x80;
268 for (j = sstart; j < send; j++) {
269 for (i = 0; i < width * step; i += step) {
270 pix = fsym;
271 if (use_pred) {
272 prev += pix;
273 pix = prev;
274 }
275 dest[i] = pix;
276 }
277 dest += stride;
278 }
279 }
280 return 0;
281 }
282
283 src += 256;
284
285 send = 0;
286 for (slice = 0; slice < c->slices; slice++) {
287 uint8_t *dest;
288 int slice_data_start, slice_data_end, slice_size;
289
290 sstart = send;
291 send = (height * (slice + 1) / c->slices) & cmask;
292 dest = dst + sstart * stride;
293
294 // slice offset and size validation was done earlier
295 slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
296 slice_data_end = AV_RL32(src + slice * 4);
297 slice_size = slice_data_end - slice_data_start;
298
299 if (!slice_size) {
300 av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
301 "yet a slice has a length of zero.\n");
302 goto fail;
303 }
304
305 memcpy(c->slice_bits, src + slice_data_start + c->slices * 4,
306 slice_size);
307 memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
308 c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
309 (uint32_t *) c->slice_bits,
310 (slice_data_end - slice_data_start + 3) >> 2);
311 bitstream_init8(&bc, c->slice_bits, slice_size);
312
313 prev = 0x80;
314 for (j = sstart; j < send; j++) {
315 for (i = 0; i < width * step; i += step) {
316 if (bitstream_bits_left(&bc) <= 0) {
317 av_log(c->avctx, AV_LOG_ERROR,
318 "Slice decoding ran out of bits\n");
319 goto fail;
320 }
321 pix = bitstream_read_vlc(&bc, vlc.table, vlc.bits, 4);
322 if (pix < 0) {
323 av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n");
324 goto fail;
325 }
326 if (use_pred) {
327 prev += pix;
328 pix = prev;
329 }
330 dest[i] = pix;
331 }
332 dest += stride;
333 }
334 if (bitstream_bits_left(&bc) > 32)
335 av_log(c->avctx, AV_LOG_WARNING,
336 "%d bits left after decoding slice\n", bitstream_bits_left(&bc));
337 }
338
339 ff_free_vlc(&vlc);
340
341 return 0;
342 fail:
343 ff_free_vlc(&vlc);
344 return AVERROR_INVALIDDATA;
345 }
346
347 static void restore_rgb_planes(uint8_t *src, int step, ptrdiff_t stride,
348 int width, int height)
349 {
350 int i, j;
351 uint8_t r, g, b;
352
353 for (j = 0; j < height; j++) {
354 for (i = 0; i < width * step; i += step) {
355 r = src[i];
356 g = src[i + 1];
357 b = src[i + 2];
358 src[i] = r + g - 0x80;
359 src[i + 2] = b + g - 0x80;
360 }
361 src += stride;
362 }
363 }
364
365 static void restore_rgb_planes10(AVFrame *frame, int width, int height)
366 {
367 uint16_t *src_r = (uint16_t *)frame->data[2];
368 uint16_t *src_g = (uint16_t *)frame->data[0];
369 uint16_t *src_b = (uint16_t *)frame->data[1];
370 int r, g, b;
371 int i, j;
372
373 for (j = 0; j < height; j++) {
374 for (i = 0; i < width; i++) {
375 r = src_r[i];
376 g = src_g[i];
377 b = src_b[i];
378 src_r[i] = (r + g - 0x200) & 0x3FF;
379 src_b[i] = (b + g - 0x200) & 0x3FF;
380 }
381 src_r += frame->linesize[2] / 2;
382 src_g += frame->linesize[0] / 2;
383 src_b += frame->linesize[1] / 2;
384 }
385 }
386
387 static void restore_median_planar(UtvideoContext *c, uint8_t *src,
388 ptrdiff_t stride, int width, int height,
389 int slices, int rmode)
390 {
391 int i, j, slice;
392 int A, B, C;
393 uint8_t *bsrc;
394 int slice_start, slice_height;
395 const int cmask = ~rmode;
396
397 for (slice = 0; slice < slices; slice++) {
398 slice_start = ((slice * height) / slices) & cmask;
399 slice_height = ((((slice + 1) * height) / slices) & cmask) -
400 slice_start;
401
402 if (!slice_height)
403 continue;
404 bsrc = src + slice_start * stride;
405
406 // first line - left neighbour prediction
407 bsrc[0] += 0x80;
408 c->hdspdec.add_hfyu_left_pred(bsrc, bsrc, width, 0);
409 bsrc += stride;
410 if (slice_height <= 1)
411 continue;
412 // second line - first element has top prediction, the rest uses median
413 C = bsrc[-stride];
414 bsrc[0] += C;
415 A = bsrc[0];
416 for (i = 1; i < width; i++) {
417 B = bsrc[i - stride];
418 bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
419 C = B;
420 A = bsrc[i];
421 }
422 bsrc += stride;
423 // the rest of lines use continuous median prediction
424 for (j = 2; j < slice_height; j++) {
425 c->hdspdec.add_hfyu_median_pred(bsrc, bsrc - stride,
426 bsrc, width, &A, &B);
427 bsrc += stride;
428 }
429 }
430 }
431
432 /* UtVideo interlaced mode treats every two lines as a single one,
433 * so restoring function should take care of possible padding between
434 * two parts of the same "line".
435 */
436 static void restore_median_planar_il(UtvideoContext *c, uint8_t *src,
437 ptrdiff_t stride, int width, int height,
438 int slices, int rmode)
439 {
440 int i, j, slice;
441 int A, B, C;
442 uint8_t *bsrc;
443 int slice_start, slice_height;
444 const int cmask = ~(rmode ? 3 : 1);
445 const int stride2 = stride << 1;
446
447 for (slice = 0; slice < slices; slice++) {
448 slice_start = ((slice * height) / slices) & cmask;
449 slice_height = ((((slice + 1) * height) / slices) & cmask) -
450 slice_start;
451 slice_height >>= 1;
452 if (!slice_height)
453 continue;
454
455 bsrc = src + slice_start * stride;
456
457 // first line - left neighbour prediction
458 bsrc[0] += 0x80;
459 A = c->hdspdec.add_hfyu_left_pred(bsrc, bsrc, width, 0);
460 c->hdspdec.add_hfyu_left_pred(bsrc + stride, bsrc + stride, width, A);
461 bsrc += stride2;
462 if (slice_height <= 1)
463 continue;
464 // second line - first element has top prediction, the rest uses median
465 C = bsrc[-stride2];
466 bsrc[0] += C;
467 A = bsrc[0];
468 for (i = 1; i < width; i++) {
469 B = bsrc[i - stride2];
470 bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
471 C = B;
472 A = bsrc[i];
473 }
474 c->hdspdec.add_hfyu_median_pred(bsrc + stride, bsrc - stride,
475 bsrc + stride, width, &A, &B);
476 bsrc += stride2;
477 // the rest of lines use continuous median prediction
478 for (j = 2; j < slice_height; j++) {
479 c->hdspdec.add_hfyu_median_pred(bsrc, bsrc - stride2,
480 bsrc, width, &A, &B);
481 c->hdspdec.add_hfyu_median_pred(bsrc + stride, bsrc - stride,
482 bsrc + stride, width, &A, &B);
483 bsrc += stride2;
484 }
485 }
486 }
487
488 static void restore_median_packed(uint8_t *src, int step, ptrdiff_t stride,
489 int width, int height,
490 int slices, int rmode)
491 {
492 int i, j, slice;
493 int A, B, C;
494 uint8_t *bsrc;
495 int slice_start, slice_height;
496 const int cmask = ~rmode;
497
498 for (slice = 0; slice < slices; slice++) {
499 slice_start = ((slice * height) / slices) & cmask;
500 slice_height = ((((slice + 1) * height) / slices) & cmask) -
501 slice_start;
502 if (!slice_height)
503 continue;
504
505 bsrc = src + slice_start * stride;
506
507 // first line - left neighbour prediction
508 bsrc[0] += 0x80;
509 A = bsrc[0];
510 for (i = step; i < width * step; i += step) {
511 bsrc[i] += A;
512 A = bsrc[i];
513 }
514 bsrc += stride;
515 if (slice_height == 1)
516 continue;
517 // second line - first element has top prediction, the rest uses median
518 C = bsrc[-stride];
519 bsrc[0] += C;
520 A = bsrc[0];
521 for (i = step; i < width * step; i += step) {
522 B = bsrc[i - stride];
523 bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
524 C = B;
525 A = bsrc[i];
526 }
527 bsrc += stride;
528 // the rest of lines use continuous median prediction
529 for (j = 2; j < slice_height; j++) {
530 for (i = 0; i < width * step; i += step) {
531 B = bsrc[i - stride];
532 bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
533 C = B;
534 A = bsrc[i];
535 }
536 bsrc += stride;
537 }
538 }
539 }
540
541 /* UtVideo interlaced mode treats every two lines as a single one,
542 * so restoring function should take care of possible padding between
543 * two parts of the same "line".
544 */
545 static void restore_median_packed_il(uint8_t *src, int step, ptrdiff_t stride,
546 int width, int height,
547 int slices, int rmode)
548 {
549 int i, j, slice;
550 int A, B, C;
551 uint8_t *bsrc;
552 int slice_start, slice_height;
553 const int cmask = ~(rmode ? 3 : 1);
554 const ptrdiff_t stride2 = stride << 1;
555
556 for (slice = 0; slice < slices; slice++) {
557 slice_start = ((slice * height) / slices) & cmask;
558 slice_height = ((((slice + 1) * height) / slices) & cmask) -
559 slice_start;
560 slice_height >>= 1;
561 if (!slice_height)
562 continue;
563
564 bsrc = src + slice_start * stride;
565
566 // first line - left neighbour prediction
567 bsrc[0] += 0x80;
568 A = bsrc[0];
569 for (i = step; i < width * step; i += step) {
570 bsrc[i] += A;
571 A = bsrc[i];
572 }
573 for (i = 0; i < width * step; i += step) {
574 bsrc[stride + i] += A;
575 A = bsrc[stride + i];
576 }
577 bsrc += stride2;
578 if (slice_height == 1)
579 continue;
580 // second line - first element has top prediction, the rest uses median
581 C = bsrc[-stride2];
582 bsrc[0] += C;
583 A = bsrc[0];
584 for (i = step; i < width * step; i += step) {
585 B = bsrc[i - stride2];
586 bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
587 C = B;
588 A = bsrc[i];
589 }
590 for (i = 0; i < width * step; i += step) {
591 B = bsrc[i - stride];
592 bsrc[stride + i] += mid_pred(A, B, (uint8_t)(A + B - C));
593 C = B;
594 A = bsrc[stride + i];
595 }
596 bsrc += stride2;
597 // the rest of lines use continuous median prediction
598 for (j = 2; j < slice_height; j++) {
599 for (i = 0; i < width * step; i += step) {
600 B = bsrc[i - stride2];
601 bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
602 C = B;
603 A = bsrc[i];
604 }
605 for (i = 0; i < width * step; i += step) {
606 B = bsrc[i - stride];
607 bsrc[i + stride] += mid_pred(A, B, (uint8_t)(A + B - C));
608 C = B;
609 A = bsrc[i + stride];
610 }
611 bsrc += stride2;
612 }
613 }
614 }
615
616 static void restore_gradient_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
617 int width, int height, int slices, int rmode)
618 {
619 int i, j, slice;
620 int A, B, C;
621 uint8_t *bsrc;
622 int slice_start, slice_height;
623 const int cmask = ~rmode;
624
625 for (slice = 0; slice < slices; slice++) {
626 slice_start = ((slice * height) / slices) & cmask;
627 slice_height = ((((slice + 1) * height) / slices) & cmask) -
628 slice_start;
629
630 if (!slice_height)
631 continue;
632 bsrc = src + slice_start * stride;
633
634 // first line - left neighbour prediction
635 bsrc[0] += 0x80;
636 c->hdspdec.add_hfyu_left_pred(bsrc, bsrc, width, 0);
637 bsrc += stride;
638 if (slice_height <= 1)
639 continue;
640 for (j = 1; j < slice_height; j++) {
641 // second line - first element has top prediction, the rest uses gradient
642 bsrc[0] = (bsrc[0] + bsrc[-stride]) & 0xFF;
643 for (i = 1; i < width; i++) {
644 A = bsrc[i - stride];
645 B = bsrc[i - (stride + 1)];
646 C = bsrc[i - 1];
647 bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
648 }
649 bsrc += stride;
650 }
651 }
652 }
653
654 static void restore_gradient_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
655 int width, int height, int slices, int rmode)
656 {
657 int i, j, slice;
658 int A, B, C;
659 uint8_t *bsrc;
660 int slice_start, slice_height;
661 const int cmask = ~(rmode ? 3 : 1);
662 const ptrdiff_t stride2 = stride << 1;
663
664 for (slice = 0; slice < slices; slice++) {
665 slice_start = ((slice * height) / slices) & cmask;
666 slice_height = ((((slice + 1) * height) / slices) & cmask) -
667 slice_start;
668 slice_height >>= 1;
669 if (!slice_height)
670 continue;
671
672 bsrc = src + slice_start * stride;
673
674 // first line - left neighbour prediction
675 bsrc[0] += 0x80;
676 A = c->hdspdec.add_hfyu_left_pred(bsrc, bsrc, width, 0);
677 c->hdspdec.add_hfyu_left_pred(bsrc + stride, bsrc + stride, width, A);
678 bsrc += stride2;
679 if (slice_height <= 1)
680 continue;
681 for (j = 1; j < slice_height; j++) {
682 // second line - first element has top prediction, the rest uses gradient
683 bsrc[0] = (bsrc[0] + bsrc[-stride2]) & 0xFF;
684 for (i = 1; i < width; i++) {
685 A = bsrc[i - stride2];
686 B = bsrc[i - (stride2 + 1)];
687 C = bsrc[i - 1];
688 bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
689 }
690 A = bsrc[-stride];
691 B = bsrc[-(1 + stride + stride - width)];
692 C = bsrc[width - 1];
693 bsrc[stride] = (A - B + C + bsrc[stride]) & 0xFF;
694 for (i = 1; i < width; i++) {
695 A = bsrc[i - stride];
696 B = bsrc[i - (1 + stride)];
697 C = bsrc[i - 1 + stride];
698 bsrc[i + stride] = (A - B + C + bsrc[i + stride]) & 0xFF;
699 }
700 bsrc += stride2;
701 }
702 }
703 }
704
705 static void restore_gradient_packed(uint8_t *src, int step, ptrdiff_t stride,
706 int width, int height, int slices, int rmode)
707 {
708 int i, j, slice;
709 int A, B, C;
710 uint8_t *bsrc;
711 int slice_start, slice_height;
712 const int cmask = ~rmode;
713
714 for (slice = 0; slice < slices; slice++) {
715 slice_start = ((slice * height) / slices) & cmask;
716 slice_height = ((((slice + 1) * height) / slices) & cmask) -
717 slice_start;
718
719 if (!slice_height)
720 continue;
721 bsrc = src + slice_start * stride;
722
723 // first line - left neighbour prediction
724 bsrc[0] += 0x80;
725 A = bsrc[0];
726 for (i = step; i < width * step; i += step) {
727 bsrc[i] += A;
728 A = bsrc[i];
729 }
730 bsrc += stride;
731 if (slice_height <= 1)
732 continue;
733 for (j = 1; j < slice_height; j++) {
734 // second line - first element has top prediction, the rest uses gradient
735 C = bsrc[-stride];
736 bsrc[0] += C;
737 for (i = step; i < width * step; i += step) {
738 A = bsrc[i - stride];
739 B = bsrc[i - (stride + step)];
740 C = bsrc[i - step];
741 bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
742 }
743 bsrc += stride;
744 }
745 }
746 }
747
748 static void restore_gradient_packed_il(uint8_t *src, int step, ptrdiff_t stride,
749 int width, int height, int slices, int rmode)
750 {
751 int i, j, slice;
752 int A, B, C;
753 uint8_t *bsrc;
754 int slice_start, slice_height;
755 const int cmask = ~(rmode ? 3 : 1);
756 const ptrdiff_t stride2 = stride << 1;
757
758 for (slice = 0; slice < slices; slice++) {
759 slice_start = ((slice * height) / slices) & cmask;
760 slice_height = ((((slice + 1) * height) / slices) & cmask) -
761 slice_start;
762 slice_height >>= 1;
763 if (!slice_height)
764 continue;
765
766 bsrc = src + slice_start * stride;
767
768 // first line - left neighbour prediction
769 bsrc[0] += 0x80;
770 A = bsrc[0];
771 for (i = step; i < width * step; i += step) {
772 bsrc[i] += A;
773 A = bsrc[i];
774 }
775 for (i = 0; i < width * step; i += step) {
776 bsrc[stride + i] += A;
777 A = bsrc[stride + i];
778 }
779 bsrc += stride2;
780 if (slice_height <= 1)
781 continue;
782 for (j = 1; j < slice_height; j++) {
783 // second line - first element has top prediction, the rest uses gradient
784 C = bsrc[-stride2];
785 bsrc[0] += C;
786 for (i = step; i < width * step; i += step) {
787 A = bsrc[i - stride2];
788 B = bsrc[i - (stride2 + step)];
789 C = bsrc[i - step];
790 bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
791 }
792 A = bsrc[-stride];
793 B = bsrc[-(step + stride + stride - width * step)];
794 C = bsrc[width * step - step];
795 bsrc[stride] = (A - B + C + bsrc[stride]) & 0xFF;
796 for (i = step; i < width * step; i += step) {
797 A = bsrc[i - stride];
798 B = bsrc[i - (step + stride)];
799 C = bsrc[i - step + stride];
800 bsrc[i + stride] = (A - B + C + bsrc[i + stride]) & 0xFF;
801 }
802 bsrc += stride2;
803 }
804 }
805 }
806
807 static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
808 AVPacket *avpkt)
809 {
810 const uint8_t *buf = avpkt->data;
811 int buf_size = avpkt->size;
812 UtvideoContext *c = avctx->priv_data;
813 int i, j;
814 const uint8_t *plane_start[5];
815 int plane_size, max_slice_size = 0, slice_start, slice_end, slice_size;
816 int ret;
817 GetByteContext gb;
818 ThreadFrame frame = { .f = data };
819
820 if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0) {
821 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
822 return ret;
823 }
824
825 ff_thread_finish_setup(avctx);
826
827 /* parse plane structure to get frame flags and validate slice offsets */
828 bytestream2_init(&gb, buf, buf_size);
829 if (c->pro) {
830 if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) {
831 av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
832 return AVERROR_INVALIDDATA;
833 }
834 c->frame_info = bytestream2_get_le32u(&gb);
835 c->slices = ((c->frame_info >> 16) & 0xff) + 1;
836 for (i = 0; i < c->planes; i++) {
837 plane_start[i] = gb.buffer;
838 if (bytestream2_get_bytes_left(&gb) < 1024 + 4 * c->slices) {
839 av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
840 return AVERROR_INVALIDDATA;
841 }
842 slice_start = 0;
843 slice_end = 0;
844 for (j = 0; j < c->slices; j++) {
845 slice_end = bytestream2_get_le32u(&gb);
846 if (slice_end < 0 || slice_end < slice_start ||
847 bytestream2_get_bytes_left(&gb) < slice_end) {
848 av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
849 return AVERROR_INVALIDDATA;
850 }
851 slice_size = slice_end - slice_start;
852 slice_start = slice_end;
853 max_slice_size = FFMAX(max_slice_size, slice_size);
854 }
855 plane_size = slice_end;
856 bytestream2_skipu(&gb, plane_size);
857 bytestream2_skipu(&gb, 1024);
858 }
859 plane_start[c->planes] = gb.buffer;
860 } else {
861 for (i = 0; i < c->planes; i++) {
862 plane_start[i] = gb.buffer;
863 if (bytestream2_get_bytes_left(&gb) < 256 + 4 * c->slices) {
864 av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
865 return AVERROR_INVALIDDATA;
866 }
867 bytestream2_skipu(&gb, 256);
868 slice_start = 0;
869 slice_end = 0;
870 for (j = 0; j < c->slices; j++) {
871 slice_end = bytestream2_get_le32u(&gb);
872 if (slice_end < 0 || slice_end < slice_start ||
873 bytestream2_get_bytes_left(&gb) < slice_end) {
874 av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
875 return AVERROR_INVALIDDATA;
876 }
877 slice_size = slice_end - slice_start;
878 slice_start = slice_end;
879 max_slice_size = FFMAX(max_slice_size, slice_size);
880 }
881 plane_size = slice_end;
882 bytestream2_skipu(&gb, plane_size);
883 }
884 plane_start[c->planes] = gb.buffer;
885 if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) {
886 av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
887 return AVERROR_INVALIDDATA;
888 }
889 c->frame_info = bytestream2_get_le32u(&gb);
890 }
891 av_log(avctx, AV_LOG_DEBUG, "frame information flags %"PRIX32"\n",
892 c->frame_info);
893
894 c->frame_pred = (c->frame_info >> 8) & 3;
895
896 av_fast_malloc(&c->slice_bits, &c->slice_bits_size,
897 max_slice_size + AV_INPUT_BUFFER_PADDING_SIZE);
898
899 if (!c->slice_bits) {
900 av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n");
901 return AVERROR(ENOMEM);
902 }
903
904 switch (c->avctx->pix_fmt) {
905 case AV_PIX_FMT_RGB24:
906 case AV_PIX_FMT_RGBA:
907 for (i = 0; i < c->planes; i++) {
908 ret = decode_plane(c, i, frame.f->data[0] + ff_ut_rgb_order[i],
909 c->planes, frame.f->linesize[0], avctx->width,
910 avctx->height, plane_start[i],
911 c->frame_pred == PRED_LEFT);
912 if (ret)
913 return ret;
914 if (c->frame_pred == PRED_MEDIAN) {
915 if (!c->interlaced) {
916 restore_median_packed(frame.f->data[0] + ff_ut_rgb_order[i],
917 c->planes, frame.f->linesize[0], avctx->width,
918 avctx->height, c->slices, 0);
919 } else {
920 restore_median_packed_il(frame.f->data[0] + ff_ut_rgb_order[i],
921 c->planes, frame.f->linesize[0],
922 avctx->width, avctx->height, c->slices,
923 0);
924 }
925 } else if (c->frame_pred == PRED_GRADIENT) {
926 if (!c->interlaced) {
927 restore_gradient_packed(frame.f->data[0] + ff_ut_rgb_order[i],
928 c->planes, frame.f->linesize[0],
929 avctx->width, avctx->height,
930 c->slices, 0);
931 } else {
932 restore_gradient_packed_il(frame.f->data[0] + ff_ut_rgb_order[i],
933 c->planes, frame.f->linesize[0],
934 avctx->width, avctx->height,
935 c->slices, 0);
936 }
937 }
938 }
939 restore_rgb_planes(frame.f->data[0], c->planes, frame.f->linesize[0],
940 avctx->width, avctx->height);
941 break;
942 case AV_PIX_FMT_GBRAP10:
943 case AV_PIX_FMT_GBRP10:
944 for (i = 0; i < c->planes; i++) {
945 ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], 1,
946 frame.f->linesize[i] / 2, avctx->width,
947 avctx->height, plane_start[i],
948 plane_start[i + 1] - 1024,
949 c->frame_pred == PRED_LEFT);
950 if (ret)
951 return ret;
952 }
953 restore_rgb_planes10(frame.f, avctx->width, avctx->height);
954 break;
955 case AV_PIX_FMT_YUV420P:
956 for (i = 0; i < 3; i++) {
957 ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i],
958 avctx->width >> !!i, avctx->height >> !!i,
959 plane_start[i], c->frame_pred == PRED_LEFT);
960 if (ret)
961 return ret;
962 if (c->frame_pred == PRED_MEDIAN) {
963 if (!c->interlaced) {
964 restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
965 avctx->width >> !!i, avctx->height >> !!i,
966 c->slices, !i);
967 } else {
968 restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
969 avctx->width >> !!i,
970 avctx->height >> !!i,
971 c->slices, !i);
972 }
973 } else if (c->frame_pred == PRED_GRADIENT) {
974 if (!c->interlaced) {
975 restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
976 avctx->width >> !!i,
977 avctx->height >> !!i,
978 c->slices, !i);
979 } else {
980 restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
981 avctx->width >> !!i,
982 avctx->height >> !!i,
983 c->slices, !i);
984 }
985 }
986 }
987 break;
988 case AV_PIX_FMT_YUV422P:
989 for (i = 0; i < 3; i++) {
990 ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i],
991 avctx->width >> !!i, avctx->height,
992 plane_start[i], c->frame_pred == PRED_LEFT);
993 if (ret)
994 return ret;
995 if (c->frame_pred == PRED_MEDIAN) {
996 if (!c->interlaced) {
997 restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
998 avctx->width >> !!i, avctx->height,
999 c->slices, 0);
1000 } else {
1001 restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
1002 avctx->width >> !!i, avctx->height,
1003 c->slices, 0);
1004 }
1005 } else if (c->frame_pred == PRED_GRADIENT) {
1006 if (!c->interlaced) {
1007 restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
1008 avctx->width >> !!i, avctx->height,
1009 c->slices, 0);
1010 } else {
1011 restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
1012 avctx->width >> !!i, avctx->height,
1013 c->slices, 0);
1014 }
1015 }
1016 }
1017 break;
1018 case AV_PIX_FMT_YUV444P:
1019 for (i = 0; i < 3; i++) {
1020 ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i],
1021 avctx->width, avctx->height,
1022 plane_start[i], c->frame_pred == PRED_LEFT);
1023 if (ret)
1024 return ret;
1025 if (c->frame_pred == PRED_MEDIAN) {
1026 if (!c->interlaced) {
1027 restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
1028 avctx->width, avctx->height,
1029 c->slices, 0);
1030 } else {
1031 restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
1032 avctx->width, avctx->height,
1033 c->slices, 0);
1034 }
1035 } else if (c->frame_pred == PRED_GRADIENT) {
1036 if (!c->interlaced) {
1037 restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
1038 avctx->width, avctx->height,
1039 c->slices, 0);
1040 } else {
1041 restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
1042 avctx->width, avctx->height,
1043 c->slices, 0);
1044 }
1045 }
1046 }
1047 break;
1048 case AV_PIX_FMT_YUV422P10:
1049 for (i = 0; i < 3; i++) {
1050 ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], 1, frame.f->linesize[i] / 2,
1051 avctx->width >> !!i, avctx->height,
1052 plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
1053 if (ret)
1054 return ret;
1055 }
1056 break;
1057 }
1058
1059 frame.f->key_frame = 1;
1060 frame.f->pict_type = AV_PICTURE_TYPE_I;
1061 frame.f->interlaced_frame = !!c->interlaced;
1062
1063 *got_frame = 1;
1064
1065 /* always report that the buffer was completely consumed */
1066 return buf_size;
1067 }
1068
1069 static av_cold int decode_init(AVCodecContext *avctx)
1070 {
1071 UtvideoContext * const c = avctx->priv_data;
1072
1073 c->avctx = avctx;
1074
1075 ff_bswapdsp_init(&c->bdsp);
1076 ff_huffyuvdsp_init(&c->hdspdec);
1077
1078 if (avctx->extradata_size >= 16) {
1079 av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
1080 avctx->extradata[3], avctx->extradata[2],
1081 avctx->extradata[1], avctx->extradata[0]);
1082 av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
1083 AV_RB32(avctx->extradata + 4));
1084 c->frame_info_size = AV_RL32(avctx->extradata + 8);
1085 c->flags = AV_RL32(avctx->extradata + 12);
1086
1087 if (c->frame_info_size != 4)
1088 avpriv_request_sample(avctx, "Frame info not 4 bytes");
1089 av_log(avctx, AV_LOG_DEBUG, "Encoding parameters %08"PRIX32"\n", c->flags);
1090 c->slices = (c->flags >> 24) + 1;
1091 c->compression = c->flags & 1;
1092 c->interlaced = c->flags & 0x800;
1093 } else if (avctx->extradata_size == 8) {
1094 av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
1095 avctx->extradata[3], avctx->extradata[2],
1096 avctx->extradata[1], avctx->extradata[0]);
1097 av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
1098 AV_RB32(avctx->extradata + 4));
1099 c->interlaced = 0;
1100 c->pro = 1;
1101 c->frame_info_size = 4;
1102 } else {
1103 av_log(avctx, AV_LOG_ERROR,
1104 "Insufficient extradata size %d, should be at least 16\n",
1105 avctx->extradata_size);
1106 return AVERROR_INVALIDDATA;
1107 }
1108
1109 c->slice_bits_size = 0;
1110
1111 switch (avctx->codec_tag) {
1112 case MKTAG('U', 'L', 'R', 'G'):
1113 c->planes = 3;
1114 avctx->pix_fmt = AV_PIX_FMT_RGB24;
1115 break;
1116 case MKTAG('U', 'L', 'R', 'A'):
1117 c->planes = 4;
1118 avctx->pix_fmt = AV_PIX_FMT_RGBA;
1119 break;
1120 case MKTAG('U', 'L', 'Y', '0'):
1121 c->planes = 3;
1122 avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1123 avctx->colorspace = AVCOL_SPC_BT470BG;
1124 break;
1125 case MKTAG('U', 'L', 'Y', '2'):
1126 c->planes = 3;
1127 avctx->pix_fmt = AV_PIX_FMT_YUV422P;
1128 avctx->colorspace = AVCOL_SPC_BT470BG;
1129 break;
1130 case MKTAG('U', 'L', 'Y', '4'):
1131 c->planes = 3;
1132 avctx->pix_fmt = AV_PIX_FMT_YUV444P;
1133 avctx->colorspace = AVCOL_SPC_BT470BG;
1134 break;
1135 case MKTAG('U', 'Q', 'Y', '2'):
1136 c->planes = 3;
1137 avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
1138 break;
1139 case MKTAG('U', 'Q', 'R', 'G'):
1140 c->planes = 3;
1141 avctx->pix_fmt = AV_PIX_FMT_GBRP10;
1142 break;
1143 case MKTAG('U', 'Q', 'R', 'A'):
1144 c->planes = 4;
1145 avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
1146 break;
1147 case MKTAG('U', 'L', 'H', '0'):
1148 c->planes = 3;
1149 avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1150 avctx->colorspace = AVCOL_SPC_BT709;
1151 break;
1152 case MKTAG('U', 'L', 'H', '2'):
1153 c->planes = 3;
1154 avctx->pix_fmt = AV_PIX_FMT_YUV422P;
1155 avctx->colorspace = AVCOL_SPC_BT709;
1156 break;
1157 case MKTAG('U', 'L', 'H', '4'):
1158 c->planes = 3;
1159 avctx->pix_fmt = AV_PIX_FMT_YUV444P;
1160 avctx->colorspace = AVCOL_SPC_BT709;
1161 break;
1162 default:
1163 av_log(avctx, AV_LOG_ERROR, "Unknown Ut Video FOURCC provided (%08X)\n",
1164 avctx->codec_tag);
1165 return AVERROR_INVALIDDATA;
1166 }
1167
1168 return 0;
1169 }
1170
1171 static av_cold int decode_end(AVCodecContext *avctx)
1172 {
1173 UtvideoContext * const c = avctx->priv_data;
1174
1175 av_freep(&c->slice_bits);
1176
1177 return 0;
1178 }
1179
1180 AVCodec ff_utvideo_decoder = {
1181 .name = "utvideo",
1182 .long_name = NULL_IF_CONFIG_SMALL("Ut Video"),
1183 .type = AVMEDIA_TYPE_VIDEO,
1184 .id = AV_CODEC_ID_UTVIDEO,
1185 .priv_data_size = sizeof(UtvideoContext),
1186 .init = decode_init,
1187 .close = decode_end,
1188 .decode = decode_frame,
1189 .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1190 .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
1191 };