2 * WebP (.webp) image decoder
3 * Copyright (c) 2013 Aneesh Dogra <aneesh@sugarlabs.org>
4 * Copyright (c) 2013 Justin Ruggles <justin.ruggles@gmail.com>
6 * This file is part of Libav.
8 * Libav is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * Libav is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with Libav; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * @author Aneesh Dogra <aneesh@sugarlabs.org>
28 * Container and Lossy decoding
30 * @author Justin Ruggles <justin.ruggles@gmail.com>
32 * Compressed alpha for lossy
37 * - Exif and XMP metadata
40 #define BITSTREAM_READER_LE
41 #include "libavutil/imgutils.h"
43 #include "bytestream.h"
49 #define VP8X_FLAG_ANIMATION 0x02
50 #define VP8X_FLAG_XMP_METADATA 0x04
51 #define VP8X_FLAG_EXIF_METADATA 0x08
52 #define VP8X_FLAG_ALPHA 0x10
53 #define VP8X_FLAG_ICC 0x20
55 #define MAX_PALETTE_SIZE 256
56 #define MAX_CACHE_BITS 11
57 #define NUM_CODE_LENGTH_CODES 19
58 #define HUFFMAN_CODES_PER_META_CODE 5
59 #define NUM_LITERAL_CODES 256
60 #define NUM_LENGTH_CODES 24
61 #define NUM_DISTANCE_CODES 40
62 #define NUM_SHORT_DISTANCES 120
63 #define MAX_HUFFMAN_CODE_LENGTH 15
65 static const uint16_t alphabet_sizes
[HUFFMAN_CODES_PER_META_CODE
] = {
66 NUM_LITERAL_CODES
+ NUM_LENGTH_CODES
,
67 NUM_LITERAL_CODES
, NUM_LITERAL_CODES
, NUM_LITERAL_CODES
,
71 static const uint8_t code_length_code_order
[NUM_CODE_LENGTH_CODES
] = {
72 17, 18, 0, 1, 2, 3, 4, 5, 16, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
75 static const int8_t lz77_distance_offsets
[NUM_SHORT_DISTANCES
][2] = {
76 { 0, 1 }, { 1, 0 }, { 1, 1 }, { -1, 1 }, { 0, 2 }, { 2, 0 }, { 1, 2 }, { -1, 2 },
77 { 2, 1 }, { -2, 1 }, { 2, 2 }, { -2, 2 }, { 0, 3 }, { 3, 0 }, { 1, 3 }, { -1, 3 },
78 { 3, 1 }, { -3, 1 }, { 2, 3 }, { -2, 3 }, { 3, 2 }, { -3, 2 }, { 0, 4 }, { 4, 0 },
79 { 1, 4 }, { -1, 4 }, { 4, 1 }, { -4, 1 }, { 3, 3 }, { -3, 3 }, { 2, 4 }, { -2, 4 },
80 { 4, 2 }, { -4, 2 }, { 0, 5 }, { 3, 4 }, { -3, 4 }, { 4, 3 }, { -4, 3 }, { 5, 0 },
81 { 1, 5 }, { -1, 5 }, { 5, 1 }, { -5, 1 }, { 2, 5 }, { -2, 5 }, { 5, 2 }, { -5, 2 },
82 { 4, 4 }, { -4, 4 }, { 3, 5 }, { -3, 5 }, { 5, 3 }, { -5, 3 }, { 0, 6 }, { 6, 0 },
83 { 1, 6 }, { -1, 6 }, { 6, 1 }, { -6, 1 }, { 2, 6 }, { -2, 6 }, { 6, 2 }, { -6, 2 },
84 { 4, 5 }, { -4, 5 }, { 5, 4 }, { -5, 4 }, { 3, 6 }, { -3, 6 }, { 6, 3 }, { -6, 3 },
85 { 0, 7 }, { 7, 0 }, { 1, 7 }, { -1, 7 }, { 5, 5 }, { -5, 5 }, { 7, 1 }, { -7, 1 },
86 { 4, 6 }, { -4, 6 }, { 6, 4 }, { -6, 4 }, { 2, 7 }, { -2, 7 }, { 7, 2 }, { -7, 2 },
87 { 3, 7 }, { -3, 7 }, { 7, 3 }, { -7, 3 }, { 5, 6 }, { -5, 6 }, { 6, 5 }, { -6, 5 },
88 { 8, 0 }, { 4, 7 }, { -4, 7 }, { 7, 4 }, { -7, 4 }, { 8, 1 }, { 8, 2 }, { 6, 6 },
89 { -6, 6 }, { 8, 3 }, { 5, 7 }, { -5, 7 }, { 7, 5 }, { -7, 5 }, { 8, 4 }, { 6, 7 },
90 { -6, 7 }, { 7, 6 }, { -7, 6 }, { 8, 5 }, { 7, 7 }, { -7, 7 }, { 8, 6 }, { 8, 7 }
93 enum AlphaCompression
{
94 ALPHA_COMPRESSION_NONE
,
95 ALPHA_COMPRESSION_VP8L
,
100 ALPHA_FILTER_HORIZONTAL
,
101 ALPHA_FILTER_VERTICAL
,
102 ALPHA_FILTER_GRADIENT
,
106 PREDICTOR_TRANSFORM
= 0,
109 COLOR_INDEXING_TRANSFORM
= 3,
112 enum PredictionMode
{
118 PRED_MODE_AVG_T_AVG_L_TR
,
123 PRED_MODE_AVG_AVG_L_TL_AVG_T_TR
,
125 PRED_MODE_ADD_SUBTRACT_FULL
,
126 PRED_MODE_ADD_SUBTRACT_HALF
,
137 /* The structure of WebP lossless is an optional series of transformation data,
138 * followed by the primary image. The primary image also optionally contains
139 * an entropy group mapping if there are multiple entropy groups. There is a
140 * basic image type called an "entropy coded image" that is used for all of
141 * these. The type of each entropy coded image is referred to by the
142 * specification as its role. */
144 /* Primary Image: Stores the actual pixels of the image. */
147 /* Entropy Image: Defines which Huffman group to use for different areas of
148 * the primary image. */
151 /* Predictors: Defines which predictor type to use for different areas of
152 * the primary image. */
153 IMAGE_ROLE_PREDICTOR
,
155 /* Color Transform Data: Defines the color transformation for different
156 * areas of the primary image. */
157 IMAGE_ROLE_COLOR_TRANSFORM
,
159 /* Color Index: Stored as an image of height == 1. */
160 IMAGE_ROLE_COLOR_INDEXING
,
165 typedef struct HuffReader
{
166 VLC vlc
; /* Huffman decoder context */
167 int simple
; /* whether to use simple mode */
168 int nb_symbols
; /* number of coded symbols */
169 uint16_t simple_symbols
[2]; /* symbols for simple mode */
172 typedef struct ImageContext
{
173 enum ImageRole role
; /* role of this image */
174 AVFrame
*frame
; /* AVFrame for data */
175 int color_cache_bits
; /* color cache size, log2 */
176 uint32_t *color_cache
; /* color cache data */
177 int nb_huffman_groups
; /* number of huffman groups */
178 HuffReader
*huffman_groups
; /* reader for each huffman group */
179 int size_reduction
; /* relative size compared to primary image, log2 */
180 int is_alpha_primary
;
183 typedef struct WebPContext
{
184 VP8Context v
; /* VP8 Context used for lossy decoding */
185 GetBitContext gb
; /* bitstream reader for main image chunk */
186 AVFrame
*alpha_frame
; /* AVFrame for alpha data decompressed from VP8L */
187 AVCodecContext
*avctx
; /* parent AVCodecContext */
188 int initialized
; /* set once the VP8 context is initialized */
189 int has_alpha
; /* has a separate alpha chunk */
190 enum AlphaCompression alpha_compression
; /* compression type for alpha chunk */
191 enum AlphaFilter alpha_filter
; /* filtering method for alpha chunk */
192 uint8_t *alpha_data
; /* alpha chunk data */
193 int alpha_data_size
; /* alpha chunk data size */
194 int width
; /* image width */
195 int height
; /* image height */
196 int lossless
; /* indicates lossless or lossy */
198 int nb_transforms
; /* number of transforms */
199 enum TransformType transforms
[4]; /* transformations used in the image, in order */
200 int reduced_width
; /* reduced width for index image, if applicable */
201 int nb_huffman_groups
; /* number of huffman groups in the primary image */
202 ImageContext image
[IMAGE_ROLE_NB
]; /* image context for each role */
205 #define GET_PIXEL(frame, x, y) \
206 ((frame)->data[0] + (y) * frame->linesize[0] + 4 * (x))
208 #define GET_PIXEL_COMP(frame, x, y, c) \
209 (*((frame)->data[0] + (y) * frame->linesize[0] + 4 * (x) + c))
211 static void image_ctx_free(ImageContext
*img
)
215 av_free(img
->color_cache
);
216 if (img
->role
!= IMAGE_ROLE_ARGB
&& !img
->is_alpha_primary
)
217 av_frame_free(&img
->frame
);
218 if (img
->huffman_groups
) {
219 for (i
= 0; i
< img
->nb_huffman_groups
; i
++) {
220 for (j
= 0; j
< HUFFMAN_CODES_PER_META_CODE
; j
++)
221 ff_free_vlc(&img
->huffman_groups
[i
* HUFFMAN_CODES_PER_META_CODE
+ j
].vlc
);
223 av_free(img
->huffman_groups
);
225 memset(img
, 0, sizeof(*img
));
229 /* Differs from get_vlc2() in the following ways:
230 * - codes are bit-reversed
231 * - assumes 8-bit table to make reversal simpler
232 * - assumes max depth of 2 since the max code length for WebP is 15
234 static av_always_inline
int webp_get_vlc(GetBitContext
*gb
, VLC_TYPE (*table
)[2])
241 UPDATE_CACHE(re
, gb
);
243 index
= SHOW_UBITS(re
, gb
, 8);
244 index
= ff_reverse
[index
];
245 code
= table
[index
][0];
249 LAST_SKIP_BITS(re
, gb
, 8);
250 UPDATE_CACHE(re
, gb
);
254 index
= SHOW_UBITS(re
, gb
, nb_bits
);
255 index
= (ff_reverse
[index
] >> (8 - nb_bits
)) + code
;
256 code
= table
[index
][0];
259 SKIP_BITS(re
, gb
, n
);
261 CLOSE_READER(re
, gb
);
266 static int huff_reader_get_symbol(HuffReader
*r
, GetBitContext
*gb
)
269 if (r
->nb_symbols
== 1)
270 return r
->simple_symbols
[0];
272 return r
->simple_symbols
[get_bits1(gb
)];
274 return webp_get_vlc(gb
, r
->vlc
.table
);
277 static int huff_reader_build_canonical(HuffReader
*r
, int *code_lengths
,
280 int len
= 0, sym
, code
= 0, ret
;
281 int max_code_length
= 0;
284 /* special-case 1 symbol since the vlc reader cannot handle it */
285 for (sym
= 0; sym
< alphabet_size
; sym
++) {
286 if (code_lengths
[sym
] > 0) {
295 r
->simple_symbols
[0] = code
;
300 for (sym
= 0; sym
< alphabet_size
; sym
++)
301 max_code_length
= FFMAX(max_code_length
, code_lengths
[sym
]);
303 if (max_code_length
== 0 || max_code_length
> MAX_HUFFMAN_CODE_LENGTH
)
304 return AVERROR(EINVAL
);
306 codes
= av_malloc(alphabet_size
* sizeof(*codes
));
308 return AVERROR(ENOMEM
);
312 for (len
= 1; len
<= max_code_length
; len
++) {
313 for (sym
= 0; sym
< alphabet_size
; sym
++) {
314 if (code_lengths
[sym
] != len
)
321 if (!r
->nb_symbols
) {
323 return AVERROR_INVALIDDATA
;
326 ret
= init_vlc(&r
->vlc
, 8, alphabet_size
,
327 code_lengths
, sizeof(*code_lengths
), sizeof(*code_lengths
),
328 codes
, sizeof(*codes
), sizeof(*codes
), 0);
339 static void read_huffman_code_simple(WebPContext
*s
, HuffReader
*hc
)
341 hc
->nb_symbols
= get_bits1(&s
->gb
) + 1;
343 if (get_bits1(&s
->gb
))
344 hc
->simple_symbols
[0] = get_bits(&s
->gb
, 8);
346 hc
->simple_symbols
[0] = get_bits1(&s
->gb
);
348 if (hc
->nb_symbols
== 2)
349 hc
->simple_symbols
[1] = get_bits(&s
->gb
, 8);
354 static int read_huffman_code_normal(WebPContext
*s
, HuffReader
*hc
,
357 HuffReader code_len_hc
= { { 0 }, 0, 0, { 0 } };
358 int *code_lengths
= NULL
;
359 int code_length_code_lengths
[NUM_CODE_LENGTH_CODES
] = { 0 };
360 int i
, symbol
, max_symbol
, prev_code_len
, ret
;
361 int num_codes
= 4 + get_bits(&s
->gb
, 4);
363 if (num_codes
> NUM_CODE_LENGTH_CODES
)
364 return AVERROR_INVALIDDATA
;
366 for (i
= 0; i
< num_codes
; i
++)
367 code_length_code_lengths
[code_length_code_order
[i
]] = get_bits(&s
->gb
, 3);
369 ret
= huff_reader_build_canonical(&code_len_hc
, code_length_code_lengths
,
370 NUM_CODE_LENGTH_CODES
);
374 code_lengths
= av_mallocz_array(alphabet_size
, sizeof(*code_lengths
));
376 ret
= AVERROR(ENOMEM
);
380 if (get_bits1(&s
->gb
)) {
381 int bits
= 2 + 2 * get_bits(&s
->gb
, 3);
382 max_symbol
= 2 + get_bits(&s
->gb
, bits
);
383 if (max_symbol
> alphabet_size
) {
384 av_log(s
->avctx
, AV_LOG_ERROR
, "max symbol %d > alphabet size %d\n",
385 max_symbol
, alphabet_size
);
386 ret
= AVERROR_INVALIDDATA
;
390 max_symbol
= alphabet_size
;
395 while (symbol
< alphabet_size
) {
400 code_len
= huff_reader_get_symbol(&code_len_hc
, &s
->gb
);
402 /* Code length code [0..15] indicates literal code lengths. */
403 code_lengths
[symbol
++] = code_len
;
405 prev_code_len
= code_len
;
407 int repeat
= 0, length
= 0;
410 /* Code 16 repeats the previous non-zero value [3..6] times,
411 * i.e., 3 + ReadBits(2) times. If code 16 is used before a
412 * non-zero value has been emitted, a value of 8 is repeated. */
413 repeat
= 3 + get_bits(&s
->gb
, 2);
414 length
= prev_code_len
;
417 /* Code 17 emits a streak of zeros [3..10], i.e.,
418 * 3 + ReadBits(3) times. */
419 repeat
= 3 + get_bits(&s
->gb
, 3);
422 /* Code 18 emits a streak of zeros of length [11..138], i.e.,
423 * 11 + ReadBits(7) times. */
424 repeat
= 11 + get_bits(&s
->gb
, 7);
427 if (symbol
+ repeat
> alphabet_size
) {
428 av_log(s
->avctx
, AV_LOG_ERROR
,
429 "invalid symbol %d + repeat %d > alphabet size %d\n",
430 symbol
, repeat
, alphabet_size
);
431 ret
= AVERROR_INVALIDDATA
;
435 code_lengths
[symbol
++] = length
;
439 ret
= huff_reader_build_canonical(hc
, code_lengths
, alphabet_size
);
442 ff_free_vlc(&code_len_hc
.vlc
);
443 av_free(code_lengths
);
447 static int decode_entropy_coded_image(WebPContext
*s
, enum ImageRole role
,
450 #define PARSE_BLOCK_SIZE(w, h) do { \
451 block_bits = get_bits(&s->gb, 3) + 2; \
452 blocks_w = FFALIGN((w), 1 << block_bits) >> block_bits; \
453 blocks_h = FFALIGN((h), 1 << block_bits) >> block_bits; \
456 static int decode_entropy_image(WebPContext
*s
)
459 int ret
, block_bits
, width
, blocks_w
, blocks_h
, x
, y
, max
;
462 if (s
->reduced_width
> 0)
463 width
= s
->reduced_width
;
465 PARSE_BLOCK_SIZE(width
, s
->height
);
467 ret
= decode_entropy_coded_image(s
, IMAGE_ROLE_ENTROPY
, blocks_w
, blocks_h
);
471 img
= &s
->image
[IMAGE_ROLE_ENTROPY
];
472 img
->size_reduction
= block_bits
;
474 /* the number of huffman groups is determined by the maximum group number
475 * coded in the entropy image */
477 for (y
= 0; y
< img
->frame
->height
; y
++) {
478 for (x
= 0; x
< img
->frame
->width
; x
++) {
479 int p0
= GET_PIXEL_COMP(img
->frame
, x
, y
, 1);
480 int p1
= GET_PIXEL_COMP(img
->frame
, x
, y
, 2);
481 int p
= p0
<< 8 | p1
;
485 s
->nb_huffman_groups
= max
+ 1;
490 static int parse_transform_predictor(WebPContext
*s
)
492 int block_bits
, blocks_w
, blocks_h
, ret
;
494 PARSE_BLOCK_SIZE(s
->width
, s
->height
);
496 ret
= decode_entropy_coded_image(s
, IMAGE_ROLE_PREDICTOR
, blocks_w
,
501 s
->image
[IMAGE_ROLE_PREDICTOR
].size_reduction
= block_bits
;
506 static int parse_transform_color(WebPContext
*s
)
508 int block_bits
, blocks_w
, blocks_h
, ret
;
510 PARSE_BLOCK_SIZE(s
->width
, s
->height
);
512 ret
= decode_entropy_coded_image(s
, IMAGE_ROLE_COLOR_TRANSFORM
, blocks_w
,
517 s
->image
[IMAGE_ROLE_COLOR_TRANSFORM
].size_reduction
= block_bits
;
522 static int parse_transform_color_indexing(WebPContext
*s
)
525 int width_bits
, index_size
, ret
, x
;
528 index_size
= get_bits(&s
->gb
, 8) + 1;
532 else if (index_size
<= 4)
534 else if (index_size
<= 16)
539 ret
= decode_entropy_coded_image(s
, IMAGE_ROLE_COLOR_INDEXING
,
544 img
= &s
->image
[IMAGE_ROLE_COLOR_INDEXING
];
545 img
->size_reduction
= width_bits
;
547 s
->reduced_width
= (s
->width
+ ((1 << width_bits
) - 1)) >> width_bits
;
549 /* color index values are delta-coded */
550 ct
= img
->frame
->data
[0] + 4;
551 for (x
= 4; x
< img
->frame
->width
* 4; x
++, ct
++)
557 static HuffReader
*get_huffman_group(WebPContext
*s
, ImageContext
*img
,
560 ImageContext
*gimg
= &s
->image
[IMAGE_ROLE_ENTROPY
];
563 if (gimg
->size_reduction
> 0) {
564 int group_x
= x
>> gimg
->size_reduction
;
565 int group_y
= y
>> gimg
->size_reduction
;
566 int g0
= GET_PIXEL_COMP(gimg
->frame
, group_x
, group_y
, 1);
567 int g1
= GET_PIXEL_COMP(gimg
->frame
, group_x
, group_y
, 2);
568 group
= g0
<< 8 | g1
;
571 return &img
->huffman_groups
[group
* HUFFMAN_CODES_PER_META_CODE
];
574 static av_always_inline
void color_cache_put(ImageContext
*img
, uint32_t c
)
576 uint32_t cache_idx
= (0x1E35A7BD * c
) >> (32 - img
->color_cache_bits
);
577 img
->color_cache
[cache_idx
] = c
;
580 static int decode_entropy_coded_image(WebPContext
*s
, enum ImageRole role
,
585 int i
, j
, ret
, x
, y
, width
;
587 img
= &s
->image
[role
];
591 img
->frame
= av_frame_alloc();
593 return AVERROR(ENOMEM
);
596 img
->frame
->format
= AV_PIX_FMT_ARGB
;
597 img
->frame
->width
= w
;
598 img
->frame
->height
= h
;
600 if (role
== IMAGE_ROLE_ARGB
&& !img
->is_alpha_primary
) {
601 ThreadFrame pt
= { .f
= img
->frame
};
602 ret
= ff_thread_get_buffer(s
->avctx
, &pt
, 0);
604 ret
= av_frame_get_buffer(img
->frame
, 1);
608 if (get_bits1(&s
->gb
)) {
609 img
->color_cache_bits
= get_bits(&s
->gb
, 4);
610 if (img
->color_cache_bits
< 1 || img
->color_cache_bits
> 11) {
611 av_log(s
->avctx
, AV_LOG_ERROR
, "invalid color cache bits: %d\n",
612 img
->color_cache_bits
);
613 return AVERROR_INVALIDDATA
;
615 img
->color_cache
= av_mallocz_array(1 << img
->color_cache_bits
,
616 sizeof(*img
->color_cache
));
617 if (!img
->color_cache
)
618 return AVERROR(ENOMEM
);
620 img
->color_cache_bits
= 0;
623 img
->nb_huffman_groups
= 1;
624 if (role
== IMAGE_ROLE_ARGB
&& get_bits1(&s
->gb
)) {
625 ret
= decode_entropy_image(s
);
628 img
->nb_huffman_groups
= s
->nb_huffman_groups
;
630 img
->huffman_groups
= av_mallocz_array(img
->nb_huffman_groups
*
631 HUFFMAN_CODES_PER_META_CODE
,
632 sizeof(*img
->huffman_groups
));
633 if (!img
->huffman_groups
)
634 return AVERROR(ENOMEM
);
636 for (i
= 0; i
< img
->nb_huffman_groups
; i
++) {
637 hg
= &img
->huffman_groups
[i
* HUFFMAN_CODES_PER_META_CODE
];
638 for (j
= 0; j
< HUFFMAN_CODES_PER_META_CODE
; j
++) {
639 int alphabet_size
= alphabet_sizes
[j
];
640 if (!j
&& img
->color_cache_bits
> 0)
641 alphabet_size
+= 1 << img
->color_cache_bits
;
643 if (get_bits1(&s
->gb
)) {
644 read_huffman_code_simple(s
, &hg
[j
]);
646 ret
= read_huffman_code_normal(s
, &hg
[j
], alphabet_size
);
653 width
= img
->frame
->width
;
654 if (role
== IMAGE_ROLE_ARGB
&& s
->reduced_width
> 0)
655 width
= s
->reduced_width
;
658 while (y
< img
->frame
->height
) {
661 hg
= get_huffman_group(s
, img
, x
, y
);
662 v
= huff_reader_get_symbol(&hg
[HUFF_IDX_GREEN
], &s
->gb
);
663 if (v
< NUM_LITERAL_CODES
) {
664 /* literal pixel values */
665 uint8_t *p
= GET_PIXEL(img
->frame
, x
, y
);
667 p
[1] = huff_reader_get_symbol(&hg
[HUFF_IDX_RED
], &s
->gb
);
668 p
[3] = huff_reader_get_symbol(&hg
[HUFF_IDX_BLUE
], &s
->gb
);
669 p
[0] = huff_reader_get_symbol(&hg
[HUFF_IDX_ALPHA
], &s
->gb
);
670 if (img
->color_cache_bits
)
671 color_cache_put(img
, AV_RB32(p
));
677 } else if (v
< NUM_LITERAL_CODES
+ NUM_LENGTH_CODES
) {
678 /* LZ77 backwards mapping */
679 int prefix_code
, length
, distance
, ref_x
, ref_y
;
681 /* parse length and distance */
682 prefix_code
= v
- NUM_LITERAL_CODES
;
683 if (prefix_code
< 4) {
684 length
= prefix_code
+ 1;
686 int extra_bits
= (prefix_code
- 2) >> 1;
687 int offset
= 2 + (prefix_code
& 1) << extra_bits
;
688 length
= offset
+ get_bits(&s
->gb
, extra_bits
) + 1;
690 prefix_code
= huff_reader_get_symbol(&hg
[HUFF_IDX_DIST
], &s
->gb
);
691 if (prefix_code
> 39) {
692 av_log(s
->avctx
, AV_LOG_ERROR
,
693 "distance prefix code too large: %d\n", prefix_code
);
694 return AVERROR_INVALIDDATA
;
696 if (prefix_code
< 4) {
697 distance
= prefix_code
+ 1;
699 int extra_bits
= prefix_code
- 2 >> 1;
700 int offset
= 2 + (prefix_code
& 1) << extra_bits
;
701 distance
= offset
+ get_bits(&s
->gb
, extra_bits
) + 1;
704 /* find reference location */
705 if (distance
<= NUM_SHORT_DISTANCES
) {
706 int xi
= lz77_distance_offsets
[distance
- 1][0];
707 int yi
= lz77_distance_offsets
[distance
- 1][1];
708 distance
= FFMAX(1, xi
+ yi
* width
);
710 distance
-= NUM_SHORT_DISTANCES
;
721 while (distance
>= width
) {
726 ref_x
= width
- distance
;
729 ref_x
= FFMAX(0, ref_x
);
730 ref_y
= FFMAX(0, ref_y
);
733 * source and dest regions can overlap and wrap lines, so just
735 for (i
= 0; i
< length
; i
++) {
736 uint8_t *p_ref
= GET_PIXEL(img
->frame
, ref_x
, ref_y
);
737 uint8_t *p
= GET_PIXEL(img
->frame
, x
, y
);
740 if (img
->color_cache_bits
)
741 color_cache_put(img
, AV_RB32(p
));
748 if (ref_x
== width
) {
752 if (y
== img
->frame
->height
|| ref_y
== img
->frame
->height
)
756 /* read from color cache */
757 uint8_t *p
= GET_PIXEL(img
->frame
, x
, y
);
758 int cache_idx
= v
- (NUM_LITERAL_CODES
+ NUM_LENGTH_CODES
);
760 if (!img
->color_cache_bits
) {
761 av_log(s
->avctx
, AV_LOG_ERROR
, "color cache not found\n");
762 return AVERROR_INVALIDDATA
;
764 if (cache_idx
>= 1 << img
->color_cache_bits
) {
765 av_log(s
->avctx
, AV_LOG_ERROR
,
766 "color cache index out-of-bounds\n");
767 return AVERROR_INVALIDDATA
;
769 AV_WB32(p
, img
->color_cache
[cache_idx
]);
781 /* PRED_MODE_BLACK */
782 static void inv_predict_0(uint8_t *p
, const uint8_t *p_l
, const uint8_t *p_tl
,
783 const uint8_t *p_t
, const uint8_t *p_tr
)
785 AV_WB32(p
, 0xFF000000);
789 static void inv_predict_1(uint8_t *p
, const uint8_t *p_l
, const uint8_t *p_tl
,
790 const uint8_t *p_t
, const uint8_t *p_tr
)
796 static void inv_predict_2(uint8_t *p
, const uint8_t *p_l
, const uint8_t *p_tl
,
797 const uint8_t *p_t
, const uint8_t *p_tr
)
803 static void inv_predict_3(uint8_t *p
, const uint8_t *p_l
, const uint8_t *p_tl
,
804 const uint8_t *p_t
, const uint8_t *p_tr
)
810 static void inv_predict_4(uint8_t *p
, const uint8_t *p_l
, const uint8_t *p_tl
,
811 const uint8_t *p_t
, const uint8_t *p_tr
)
816 /* PRED_MODE_AVG_T_AVG_L_TR */
817 static void inv_predict_5(uint8_t *p
, const uint8_t *p_l
, const uint8_t *p_tl
,
818 const uint8_t *p_t
, const uint8_t *p_tr
)
820 p
[0] = p_t
[0] + (p_l
[0] + p_tr
[0] >> 1) >> 1;
821 p
[1] = p_t
[1] + (p_l
[1] + p_tr
[1] >> 1) >> 1;
822 p
[2] = p_t
[2] + (p_l
[2] + p_tr
[2] >> 1) >> 1;
823 p
[3] = p_t
[3] + (p_l
[3] + p_tr
[3] >> 1) >> 1;
826 /* PRED_MODE_AVG_L_TL */
827 static void inv_predict_6(uint8_t *p
, const uint8_t *p_l
, const uint8_t *p_tl
,
828 const uint8_t *p_t
, const uint8_t *p_tr
)
830 p
[0] = p_l
[0] + p_tl
[0] >> 1;
831 p
[1] = p_l
[1] + p_tl
[1] >> 1;
832 p
[2] = p_l
[2] + p_tl
[2] >> 1;
833 p
[3] = p_l
[3] + p_tl
[3] >> 1;
836 /* PRED_MODE_AVG_L_T */
837 static void inv_predict_7(uint8_t *p
, const uint8_t *p_l
, const uint8_t *p_tl
,
838 const uint8_t *p_t
, const uint8_t *p_tr
)
840 p
[0] = p_l
[0] + p_t
[0] >> 1;
841 p
[1] = p_l
[1] + p_t
[1] >> 1;
842 p
[2] = p_l
[2] + p_t
[2] >> 1;
843 p
[3] = p_l
[3] + p_t
[3] >> 1;
846 /* PRED_MODE_AVG_TL_T */
847 static void inv_predict_8(uint8_t *p
, const uint8_t *p_l
, const uint8_t *p_tl
,
848 const uint8_t *p_t
, const uint8_t *p_tr
)
850 p
[0] = p_tl
[0] + p_t
[0] >> 1;
851 p
[1] = p_tl
[1] + p_t
[1] >> 1;
852 p
[2] = p_tl
[2] + p_t
[2] >> 1;
853 p
[3] = p_tl
[3] + p_t
[3] >> 1;
856 /* PRED_MODE_AVG_T_TR */
857 static void inv_predict_9(uint8_t *p
, const uint8_t *p_l
, const uint8_t *p_tl
,
858 const uint8_t *p_t
, const uint8_t *p_tr
)
860 p
[0] = p_t
[0] + p_tr
[0] >> 1;
861 p
[1] = p_t
[1] + p_tr
[1] >> 1;
862 p
[2] = p_t
[2] + p_tr
[2] >> 1;
863 p
[3] = p_t
[3] + p_tr
[3] >> 1;
866 /* PRED_MODE_AVG_AVG_L_TL_AVG_T_TR */
867 static void inv_predict_10(uint8_t *p
, const uint8_t *p_l
, const uint8_t *p_tl
,
868 const uint8_t *p_t
, const uint8_t *p_tr
)
870 p
[0] = (p_l
[0] + p_tl
[0] >> 1) + (p_t
[0] + p_tr
[0] >> 1) >> 1;
871 p
[1] = (p_l
[1] + p_tl
[1] >> 1) + (p_t
[1] + p_tr
[1] >> 1) >> 1;
872 p
[2] = (p_l
[2] + p_tl
[2] >> 1) + (p_t
[2] + p_tr
[2] >> 1) >> 1;
873 p
[3] = (p_l
[3] + p_tl
[3] >> 1) + (p_t
[3] + p_tr
[3] >> 1) >> 1;
876 /* PRED_MODE_SELECT */
877 static void inv_predict_11(uint8_t *p
, const uint8_t *p_l
, const uint8_t *p_tl
,
878 const uint8_t *p_t
, const uint8_t *p_tr
)
880 int diff
= (FFABS(p_l
[0] - p_tl
[0]) - FFABS(p_t
[0] - p_tl
[0])) +
881 (FFABS(p_l
[1] - p_tl
[1]) - FFABS(p_t
[1] - p_tl
[1])) +
882 (FFABS(p_l
[2] - p_tl
[2]) - FFABS(p_t
[2] - p_tl
[2])) +
883 (FFABS(p_l
[3] - p_tl
[3]) - FFABS(p_t
[3] - p_tl
[3]));
890 /* PRED_MODE_ADD_SUBTRACT_FULL */
891 static void inv_predict_12(uint8_t *p
, const uint8_t *p_l
, const uint8_t *p_tl
,
892 const uint8_t *p_t
, const uint8_t *p_tr
)
894 p
[0] = av_clip_uint8(p_l
[0] + p_t
[0] - p_tl
[0]);
895 p
[1] = av_clip_uint8(p_l
[1] + p_t
[1] - p_tl
[1]);
896 p
[2] = av_clip_uint8(p_l
[2] + p_t
[2] - p_tl
[2]);
897 p
[3] = av_clip_uint8(p_l
[3] + p_t
[3] - p_tl
[3]);
900 static av_always_inline
uint8_t clamp_add_subtract_half(int a
, int b
, int c
)
903 return av_clip_uint8(d
+ (d
- c
) / 2);
906 /* PRED_MODE_ADD_SUBTRACT_HALF */
907 static void inv_predict_13(uint8_t *p
, const uint8_t *p_l
, const uint8_t *p_tl
,
908 const uint8_t *p_t
, const uint8_t *p_tr
)
910 p
[0] = clamp_add_subtract_half(p_l
[0], p_t
[0], p_tl
[0]);
911 p
[1] = clamp_add_subtract_half(p_l
[1], p_t
[1], p_tl
[1]);
912 p
[2] = clamp_add_subtract_half(p_l
[2], p_t
[2], p_tl
[2]);
913 p
[3] = clamp_add_subtract_half(p_l
[3], p_t
[3], p_tl
[3]);
916 typedef void (*inv_predict_func
)(uint8_t *p
, const uint8_t *p_l
,
917 const uint8_t *p_tl
, const uint8_t *p_t
,
918 const uint8_t *p_tr
);
920 static const inv_predict_func inverse_predict
[14] = {
921 inv_predict_0
, inv_predict_1
, inv_predict_2
, inv_predict_3
,
922 inv_predict_4
, inv_predict_5
, inv_predict_6
, inv_predict_7
,
923 inv_predict_8
, inv_predict_9
, inv_predict_10
, inv_predict_11
,
924 inv_predict_12
, inv_predict_13
,
927 static void inverse_prediction(AVFrame
*frame
, enum PredictionMode m
, int x
, int y
)
929 uint8_t *dec
, *p_l
, *p_tl
, *p_t
, *p_tr
;
932 dec
= GET_PIXEL(frame
, x
, y
);
933 p_l
= GET_PIXEL(frame
, x
- 1, y
);
934 p_tl
= GET_PIXEL(frame
, x
- 1, y
- 1);
935 p_t
= GET_PIXEL(frame
, x
, y
- 1);
936 if (x
== frame
->width
- 1)
937 p_tr
= GET_PIXEL(frame
, 0, y
);
939 p_tr
= GET_PIXEL(frame
, x
+ 1, y
- 1);
941 inverse_predict
[m
](p
, p_l
, p_tl
, p_t
, p_tr
);
949 static int apply_predictor_transform(WebPContext
*s
)
951 ImageContext
*img
= &s
->image
[IMAGE_ROLE_ARGB
];
952 ImageContext
*pimg
= &s
->image
[IMAGE_ROLE_PREDICTOR
];
955 for (y
= 0; y
< img
->frame
->height
; y
++) {
956 for (x
= 0; x
< img
->frame
->width
; x
++) {
957 int tx
= x
>> pimg
->size_reduction
;
958 int ty
= y
>> pimg
->size_reduction
;
959 enum PredictionMode m
= GET_PIXEL_COMP(pimg
->frame
, tx
, ty
, 2);
970 av_log(s
->avctx
, AV_LOG_ERROR
,
971 "invalid predictor mode: %d\n", m
);
972 return AVERROR_INVALIDDATA
;
974 inverse_prediction(img
->frame
, m
, x
, y
);
980 static av_always_inline
uint8_t color_transform_delta(uint8_t color_pred
,
983 return (int)ff_u8_to_s8(color_pred
) * ff_u8_to_s8(color
) >> 5;
986 static int apply_color_transform(WebPContext
*s
)
988 ImageContext
*img
, *cimg
;
992 img
= &s
->image
[IMAGE_ROLE_ARGB
];
993 cimg
= &s
->image
[IMAGE_ROLE_COLOR_TRANSFORM
];
995 for (y
= 0; y
< img
->frame
->height
; y
++) {
996 for (x
= 0; x
< img
->frame
->width
; x
++) {
997 cx
= x
>> cimg
->size_reduction
;
998 cy
= y
>> cimg
->size_reduction
;
999 cp
= GET_PIXEL(cimg
->frame
, cx
, cy
);
1000 p
= GET_PIXEL(img
->frame
, x
, y
);
1002 p
[1] += color_transform_delta(cp
[3], p
[2]);
1003 p
[3] += color_transform_delta(cp
[2], p
[2]) +
1004 color_transform_delta(cp
[1], p
[1]);
1010 static int apply_subtract_green_transform(WebPContext
*s
)
1013 ImageContext
*img
= &s
->image
[IMAGE_ROLE_ARGB
];
1015 for (y
= 0; y
< img
->frame
->height
; y
++) {
1016 for (x
= 0; x
< img
->frame
->width
; x
++) {
1017 uint8_t *p
= GET_PIXEL(img
->frame
, x
, y
);
1025 static int apply_color_indexing_transform(WebPContext
*s
)
1032 img
= &s
->image
[IMAGE_ROLE_ARGB
];
1033 pal
= &s
->image
[IMAGE_ROLE_COLOR_INDEXING
];
1035 if (pal
->size_reduction
> 0) {
1038 int pixel_bits
= 8 >> pal
->size_reduction
;
1040 line
= av_malloc(img
->frame
->linesize
[0]);
1042 return AVERROR(ENOMEM
);
1044 for (y
= 0; y
< img
->frame
->height
; y
++) {
1045 p
= GET_PIXEL(img
->frame
, 0, y
);
1046 memcpy(line
, p
, img
->frame
->linesize
[0]);
1047 init_get_bits(&gb_g
, line
, img
->frame
->linesize
[0] * 8);
1048 skip_bits(&gb_g
, 16);
1050 for (x
= 0; x
< img
->frame
->width
; x
++) {
1051 p
= GET_PIXEL(img
->frame
, x
, y
);
1052 p
[2] = get_bits(&gb_g
, pixel_bits
);
1054 if (i
== 1 << pal
->size_reduction
) {
1055 skip_bits(&gb_g
, 24);
1063 for (y
= 0; y
< img
->frame
->height
; y
++) {
1064 for (x
= 0; x
< img
->frame
->width
; x
++) {
1065 p
= GET_PIXEL(img
->frame
, x
, y
);
1067 if (i
>= pal
->frame
->width
) {
1068 av_log(s
->avctx
, AV_LOG_ERROR
, "invalid palette index %d\n", i
);
1069 return AVERROR_INVALIDDATA
;
1071 pi
= GET_PIXEL(pal
->frame
, i
, 0);
1079 static int vp8_lossless_decode_frame(AVCodecContext
*avctx
, AVFrame
*p
,
1080 int *got_frame
, uint8_t *data_start
,
1081 unsigned int data_size
, int is_alpha_chunk
)
1083 WebPContext
*s
= avctx
->priv_data
;
1084 int w
, h
, ret
, i
, used
;
1086 if (!is_alpha_chunk
) {
1088 avctx
->pix_fmt
= AV_PIX_FMT_ARGB
;
1091 ret
= init_get_bits(&s
->gb
, data_start
, data_size
* 8);
1095 if (!is_alpha_chunk
) {
1096 if (get_bits(&s
->gb
, 8) != 0x2F) {
1097 av_log(avctx
, AV_LOG_ERROR
, "Invalid WebP Lossless signature\n");
1098 return AVERROR_INVALIDDATA
;
1101 w
= get_bits(&s
->gb
, 14) + 1;
1102 h
= get_bits(&s
->gb
, 14) + 1;
1103 if (s
->width
&& s
->width
!= w
) {
1104 av_log(avctx
, AV_LOG_WARNING
, "Width mismatch. %d != %d\n",
1108 if (s
->height
&& s
->height
!= h
) {
1109 av_log(avctx
, AV_LOG_WARNING
, "Height mismatch. %d != %d\n",
1114 ret
= ff_set_dimensions(avctx
, s
->width
, s
->height
);
1118 s
->has_alpha
= get_bits1(&s
->gb
);
1120 if (get_bits(&s
->gb
, 3) != 0x0) {
1121 av_log(avctx
, AV_LOG_ERROR
, "Invalid WebP Lossless version\n");
1122 return AVERROR_INVALIDDATA
;
1125 if (!s
->width
|| !s
->height
)
1131 /* parse transformations */
1132 s
->nb_transforms
= 0;
1133 s
->reduced_width
= 0;
1135 while (get_bits1(&s
->gb
)) {
1136 enum TransformType transform
= get_bits(&s
->gb
, 2);
1137 s
->transforms
[s
->nb_transforms
++] = transform
;
1138 if (used
& (1 << transform
)) {
1139 av_log(avctx
, AV_LOG_ERROR
, "Transform %d used more than once\n",
1141 ret
= AVERROR_INVALIDDATA
;
1142 goto free_and_return
;
1144 used
|= (1 << transform
);
1145 switch (transform
) {
1146 case PREDICTOR_TRANSFORM
:
1147 ret
= parse_transform_predictor(s
);
1149 case COLOR_TRANSFORM
:
1150 ret
= parse_transform_color(s
);
1152 case COLOR_INDEXING_TRANSFORM
:
1153 ret
= parse_transform_color_indexing(s
);
1157 goto free_and_return
;
1160 /* decode primary image */
1161 s
->image
[IMAGE_ROLE_ARGB
].frame
= p
;
1163 s
->image
[IMAGE_ROLE_ARGB
].is_alpha_primary
= 1;
1164 ret
= decode_entropy_coded_image(s
, IMAGE_ROLE_ARGB
, w
, h
);
1166 goto free_and_return
;
1168 /* apply transformations */
1169 for (i
= s
->nb_transforms
- 1; i
>= 0; i
--) {
1170 switch (s
->transforms
[i
]) {
1171 case PREDICTOR_TRANSFORM
:
1172 ret
= apply_predictor_transform(s
);
1174 case COLOR_TRANSFORM
:
1175 ret
= apply_color_transform(s
);
1177 case SUBTRACT_GREEN
:
1178 ret
= apply_subtract_green_transform(s
);
1180 case COLOR_INDEXING_TRANSFORM
:
1181 ret
= apply_color_indexing_transform(s
);
1185 goto free_and_return
;
1189 p
->pict_type
= AV_PICTURE_TYPE_I
;
1194 for (i
= 0; i
< IMAGE_ROLE_NB
; i
++)
1195 image_ctx_free(&s
->image
[i
]);
1200 static void alpha_inverse_prediction(AVFrame
*frame
, enum AlphaFilter m
)
1205 ls
= frame
->linesize
[3];
1207 /* filter first row using horizontal filter */
1208 dec
= frame
->data
[3] + 1;
1209 for (x
= 1; x
< frame
->width
; x
++, dec
++)
1212 /* filter first column using vertical filter */
1213 dec
= frame
->data
[3] + ls
;
1214 for (y
= 1; y
< frame
->height
; y
++, dec
+= ls
)
1215 *dec
+= *(dec
- ls
);
1217 /* filter the rest using the specified filter */
1219 case ALPHA_FILTER_HORIZONTAL
:
1220 for (y
= 1; y
< frame
->height
; y
++) {
1221 dec
= frame
->data
[3] + y
* ls
+ 1;
1222 for (x
= 1; x
< frame
->width
; x
++, dec
++)
1226 case ALPHA_FILTER_VERTICAL
:
1227 for (y
= 1; y
< frame
->height
; y
++) {
1228 dec
= frame
->data
[3] + y
* ls
+ 1;
1229 for (x
= 1; x
< frame
->width
; x
++, dec
++)
1230 *dec
+= *(dec
- ls
);
1233 case ALPHA_FILTER_GRADIENT
:
1234 for (y
= 1; y
< frame
->height
; y
++) {
1235 dec
= frame
->data
[3] + y
* ls
+ 1;
1236 for (x
= 1; x
< frame
->width
; x
++, dec
++)
1237 dec
[0] += av_clip_uint8(*(dec
- 1) + *(dec
- ls
) - *(dec
- ls
- 1));
1243 static int vp8_lossy_decode_alpha(AVCodecContext
*avctx
, AVFrame
*p
,
1244 uint8_t *data_start
,
1245 unsigned int data_size
)
1247 WebPContext
*s
= avctx
->priv_data
;
1250 if (s
->alpha_compression
== ALPHA_COMPRESSION_NONE
) {
1253 bytestream2_init(&gb
, data_start
, data_size
);
1254 for (y
= 0; y
< s
->height
; y
++)
1255 bytestream2_get_buffer(&gb
, p
->data
[3] + p
->linesize
[3] * y
,
1257 } else if (s
->alpha_compression
== ALPHA_COMPRESSION_VP8L
) {
1259 int alpha_got_frame
= 0;
1261 s
->alpha_frame
= av_frame_alloc();
1262 if (!s
->alpha_frame
)
1263 return AVERROR(ENOMEM
);
1265 ret
= vp8_lossless_decode_frame(avctx
, s
->alpha_frame
, &alpha_got_frame
,
1266 data_start
, data_size
, 1);
1268 av_frame_free(&s
->alpha_frame
);
1271 if (!alpha_got_frame
) {
1272 av_frame_free(&s
->alpha_frame
);
1273 return AVERROR_INVALIDDATA
;
1276 /* copy green component of alpha image to alpha plane of primary image */
1277 for (y
= 0; y
< s
->height
; y
++) {
1278 ap
= GET_PIXEL(s
->alpha_frame
, 0, y
) + 2;
1279 pp
= p
->data
[3] + p
->linesize
[3] * y
;
1280 for (x
= 0; x
< s
->width
; x
++) {
1286 av_frame_free(&s
->alpha_frame
);
1289 /* apply alpha filtering */
1290 if (s
->alpha_filter
)
1291 alpha_inverse_prediction(p
, s
->alpha_filter
);
1296 static int vp8_lossy_decode_frame(AVCodecContext
*avctx
, AVFrame
*p
,
1297 int *got_frame
, uint8_t *data_start
,
1298 unsigned int data_size
)
1300 WebPContext
*s
= avctx
->priv_data
;
1304 if (!s
->initialized
) {
1305 ff_vp8_decode_init(avctx
);
1308 avctx
->pix_fmt
= AV_PIX_FMT_YUVA420P
;
1312 if (data_size
> INT_MAX
) {
1313 av_log(avctx
, AV_LOG_ERROR
, "unsupported chunk size\n");
1314 return AVERROR_PATCHWELCOME
;
1317 av_init_packet(&pkt
);
1318 pkt
.data
= data_start
;
1319 pkt
.size
= data_size
;
1321 ret
= ff_vp8_decode_frame(avctx
, p
, got_frame
, &pkt
);
1323 ret
= vp8_lossy_decode_alpha(avctx
, p
, s
->alpha_data
,
1324 s
->alpha_data_size
);
1331 static int webp_decode_frame(AVCodecContext
*avctx
, void *data
, int *got_frame
,
1334 AVFrame
* const p
= data
;
1335 WebPContext
*s
= avctx
->priv_data
;
1338 uint32_t chunk_type
, chunk_size
;
1346 bytestream2_init(&gb
, avpkt
->data
, avpkt
->size
);
1348 if (bytestream2_get_bytes_left(&gb
) < 12)
1349 return AVERROR_INVALIDDATA
;
1351 if (bytestream2_get_le32(&gb
) != MKTAG('R', 'I', 'F', 'F')) {
1352 av_log(avctx
, AV_LOG_ERROR
, "missing RIFF tag\n");
1353 return AVERROR_INVALIDDATA
;
1356 chunk_size
= bytestream2_get_le32(&gb
);
1357 if (bytestream2_get_bytes_left(&gb
) < chunk_size
)
1358 return AVERROR_INVALIDDATA
;
1360 if (bytestream2_get_le32(&gb
) != MKTAG('W', 'E', 'B', 'P')) {
1361 av_log(avctx
, AV_LOG_ERROR
, "missing WEBP tag\n");
1362 return AVERROR_INVALIDDATA
;
1365 while (bytestream2_get_bytes_left(&gb
) > 8) {
1366 char chunk_str
[5] = { 0 };
1368 chunk_type
= bytestream2_get_le32(&gb
);
1369 chunk_size
= bytestream2_get_le32(&gb
);
1370 if (chunk_size
== UINT32_MAX
)
1371 return AVERROR_INVALIDDATA
;
1372 chunk_size
+= chunk_size
& 1;
1374 if (bytestream2_get_bytes_left(&gb
) < chunk_size
)
1375 return AVERROR_INVALIDDATA
;
1377 switch (chunk_type
) {
1378 case MKTAG('V', 'P', '8', ' '):
1380 ret
= vp8_lossy_decode_frame(avctx
, p
, got_frame
,
1381 avpkt
->data
+ bytestream2_tell(&gb
),
1386 bytestream2_skip(&gb
, chunk_size
);
1388 case MKTAG('V', 'P', '8', 'L'):
1390 ret
= vp8_lossless_decode_frame(avctx
, p
, got_frame
,
1391 avpkt
->data
+ bytestream2_tell(&gb
),
1396 bytestream2_skip(&gb
, chunk_size
);
1398 case MKTAG('V', 'P', '8', 'X'):
1399 vp8x_flags
= bytestream2_get_byte(&gb
);
1400 bytestream2_skip(&gb
, 3);
1401 s
->width
= bytestream2_get_le24(&gb
) + 1;
1402 s
->height
= bytestream2_get_le24(&gb
) + 1;
1403 ret
= av_image_check_size(s
->width
, s
->height
, 0, avctx
);
1407 case MKTAG('A', 'L', 'P', 'H'): {
1408 int alpha_header
, filter_m
, compression
;
1410 if (!(vp8x_flags
& VP8X_FLAG_ALPHA
)) {
1411 av_log(avctx
, AV_LOG_WARNING
,
1412 "ALPHA chunk present, but alpha bit not set in the "
1415 if (chunk_size
== 0) {
1416 av_log(avctx
, AV_LOG_ERROR
, "invalid ALPHA chunk size\n");
1417 return AVERROR_INVALIDDATA
;
1419 alpha_header
= bytestream2_get_byte(&gb
);
1420 s
->alpha_data
= avpkt
->data
+ bytestream2_tell(&gb
);
1421 s
->alpha_data_size
= chunk_size
- 1;
1422 bytestream2_skip(&gb
, s
->alpha_data_size
);
1424 filter_m
= (alpha_header
>> 2) & 0x03;
1425 compression
= alpha_header
& 0x03;
1427 if (compression
> ALPHA_COMPRESSION_VP8L
) {
1428 av_log(avctx
, AV_LOG_VERBOSE
,
1429 "skipping unsupported ALPHA chunk\n");
1432 s
->alpha_compression
= compression
;
1433 s
->alpha_filter
= filter_m
;
1438 case MKTAG('I', 'C', 'C', 'P'):
1439 case MKTAG('A', 'N', 'I', 'M'):
1440 case MKTAG('A', 'N', 'M', 'F'):
1441 case MKTAG('E', 'X', 'I', 'F'):
1442 case MKTAG('X', 'M', 'P', ' '):
1443 AV_WL32(chunk_str
, chunk_type
);
1444 av_log(avctx
, AV_LOG_VERBOSE
, "skipping unsupported chunk: %s\n",
1446 bytestream2_skip(&gb
, chunk_size
);
1449 AV_WL32(chunk_str
, chunk_type
);
1450 av_log(avctx
, AV_LOG_VERBOSE
, "skipping unknown chunk: %s\n",
1452 bytestream2_skip(&gb
, chunk_size
);
1458 av_log(avctx
, AV_LOG_ERROR
, "image data not found\n");
1459 return AVERROR_INVALIDDATA
;
1465 static av_cold
int webp_decode_close(AVCodecContext
*avctx
)
1467 WebPContext
*s
= avctx
->priv_data
;
1470 return ff_vp8_decode_free(avctx
);
1475 AVCodec ff_webp_decoder
= {
1477 .long_name
= NULL_IF_CONFIG_SMALL("WebP image"),
1478 .type
= AVMEDIA_TYPE_VIDEO
,
1479 .id
= AV_CODEC_ID_WEBP
,
1480 .priv_data_size
= sizeof(WebPContext
),
1481 .decode
= webp_decode_frame
,
1482 .close
= webp_decode_close
,
1483 .capabilities
= CODEC_CAP_DR1
| CODEC_CAP_FRAME_THREADS
,