1ffdfe534b2c6c50322be9cc2e301587c8192a67
[libav.git] / libswscale / input.c
1 /*
2 * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
3 *
4 * This file is part of Libav.
5 *
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include <assert.h>
22 #include <math.h>
23 #include <stdint.h>
24 #include <stdio.h>
25 #include <string.h>
26
27 #include "libavutil/avutil.h"
28 #include "libavutil/bswap.h"
29 #include "libavutil/cpu.h"
30 #include "libavutil/intreadwrite.h"
31 #include "libavutil/mathematics.h"
32 #include "libavutil/pixdesc.h"
33 #include "config.h"
34 #include "rgb2rgb.h"
35 #include "swscale.h"
36 #include "swscale_internal.h"
37
38 #define RGB2YUV_SHIFT 15
39 #define BY ((int)(0.114 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
40 #define BV (-(int)(0.081 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
41 #define BU ((int)(0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
42 #define GY ((int)(0.587 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
43 #define GV (-(int)(0.419 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
44 #define GU (-(int)(0.331 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
45 #define RY ((int)(0.299 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
46 #define RV ((int)(0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
47 #define RU (-(int)(0.169 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
48
49 #define input_pixel(pos) (isBE(origin) ? AV_RB16(pos) : AV_RL16(pos))
50
51 #define r ((origin == AV_PIX_FMT_BGR48BE || origin == AV_PIX_FMT_BGR48LE) ? b_r : r_b)
52 #define b ((origin == AV_PIX_FMT_BGR48BE || origin == AV_PIX_FMT_BGR48LE) ? r_b : b_r)
53
54 static av_always_inline void rgb48ToY_c_template(uint16_t *dst,
55 const uint16_t *src, int width,
56 enum AVPixelFormat origin)
57 {
58 int i;
59 for (i = 0; i < width; i++) {
60 unsigned int r_b = input_pixel(&src[i * 3 + 0]);
61 unsigned int g = input_pixel(&src[i * 3 + 1]);
62 unsigned int b_r = input_pixel(&src[i * 3 + 2]);
63
64 dst[i] = (RY * r + GY * g + BY * b + (0x2001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
65 }
66 }
67
68 static av_always_inline void rgb48ToUV_c_template(uint16_t *dstU,
69 uint16_t *dstV,
70 const uint16_t *src1,
71 const uint16_t *src2,
72 int width,
73 enum AVPixelFormat origin)
74 {
75 int i;
76 assert(src1 == src2);
77 for (i = 0; i < width; i++) {
78 int r_b = input_pixel(&src1[i * 3 + 0]);
79 int g = input_pixel(&src1[i * 3 + 1]);
80 int b_r = input_pixel(&src1[i * 3 + 2]);
81
82 dstU[i] = (RU * r + GU * g + BU * b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
83 dstV[i] = (RV * r + GV * g + BV * b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
84 }
85 }
86
87 static av_always_inline void rgb48ToUV_half_c_template(uint16_t *dstU,
88 uint16_t *dstV,
89 const uint16_t *src1,
90 const uint16_t *src2,
91 int width,
92 enum AVPixelFormat origin)
93 {
94 int i;
95 assert(src1 == src2);
96 for (i = 0; i < width; i++) {
97 int r_b = (input_pixel(&src1[6 * i + 0]) +
98 input_pixel(&src1[6 * i + 3]) + 1) >> 1;
99 int g = (input_pixel(&src1[6 * i + 1]) +
100 input_pixel(&src1[6 * i + 4]) + 1) >> 1;
101 int b_r = (input_pixel(&src1[6 * i + 2]) +
102 input_pixel(&src1[6 * i + 5]) + 1) >> 1;
103
104 dstU[i] = (RU * r + GU * g + BU * b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
105 dstV[i] = (RV * r + GV * g + BV * b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
106 }
107 }
108
109 #undef r
110 #undef b
111 #undef input_pixel
112
113 #define rgb48funcs(pattern, BE_LE, origin) \
114 static void pattern ## 48 ## BE_LE ## ToY_c(uint8_t *_dst, \
115 const uint8_t *_src, \
116 int width, \
117 uint32_t *unused) \
118 { \
119 const uint16_t *src = (const uint16_t *)_src; \
120 uint16_t *dst = (uint16_t *)_dst; \
121 rgb48ToY_c_template(dst, src, width, origin); \
122 } \
123 \
124 static void pattern ## 48 ## BE_LE ## ToUV_c(uint8_t *_dstU, \
125 uint8_t *_dstV, \
126 const uint8_t *_src1, \
127 const uint8_t *_src2, \
128 int width, \
129 uint32_t *unused) \
130 { \
131 const uint16_t *src1 = (const uint16_t *)_src1, \
132 *src2 = (const uint16_t *)_src2; \
133 uint16_t *dstU = (uint16_t *)_dstU, \
134 *dstV = (uint16_t *)_dstV; \
135 rgb48ToUV_c_template(dstU, dstV, src1, src2, width, origin); \
136 } \
137 \
138 static void pattern ## 48 ## BE_LE ## ToUV_half_c(uint8_t *_dstU, \
139 uint8_t *_dstV, \
140 const uint8_t *_src1, \
141 const uint8_t *_src2, \
142 int width, \
143 uint32_t *unused) \
144 { \
145 const uint16_t *src1 = (const uint16_t *)_src1, \
146 *src2 = (const uint16_t *)_src2; \
147 uint16_t *dstU = (uint16_t *)_dstU, \
148 *dstV = (uint16_t *)_dstV; \
149 rgb48ToUV_half_c_template(dstU, dstV, src1, src2, width, origin); \
150 }
151
152 rgb48funcs(rgb, LE, AV_PIX_FMT_RGB48LE)
153 rgb48funcs(rgb, BE, AV_PIX_FMT_RGB48BE)
154 rgb48funcs(bgr, LE, AV_PIX_FMT_BGR48LE)
155 rgb48funcs(bgr, BE, AV_PIX_FMT_BGR48BE)
156
157 #define input_pixel(i) ((origin == AV_PIX_FMT_RGBA || \
158 origin == AV_PIX_FMT_BGRA || \
159 origin == AV_PIX_FMT_ARGB || \
160 origin == AV_PIX_FMT_ABGR) \
161 ? AV_RN32A(&src[(i) * 4]) \
162 : (isBE(origin) ? AV_RB16(&src[(i) * 2]) \
163 : AV_RL16(&src[(i) * 2])))
164
165 static av_always_inline void rgb16_32ToY_c_template(uint8_t *dst,
166 const uint8_t *src,
167 int width,
168 enum AVPixelFormat origin,
169 int shr, int shg,
170 int shb, int shp,
171 int maskr, int maskg,
172 int maskb, int rsh,
173 int gsh, int bsh, int S)
174 {
175 const int ry = RY << rsh, gy = GY << gsh, by = BY << bsh;
176 const unsigned rnd = 33u << (S - 1);
177 int i;
178
179 for (i = 0; i < width; i++) {
180 int px = input_pixel(i) >> shp;
181 int b = (px & maskb) >> shb;
182 int g = (px & maskg) >> shg;
183 int r = (px & maskr) >> shr;
184
185 dst[i] = (ry * r + gy * g + by * b + rnd) >> S;
186 }
187 }
188
189 static av_always_inline void rgb16_32ToUV_c_template(uint8_t *dstU,
190 uint8_t *dstV,
191 const uint8_t *src,
192 int width,
193 enum AVPixelFormat origin,
194 int shr, int shg,
195 int shb, int shp,
196 int maskr, int maskg,
197 int maskb, int rsh,
198 int gsh, int bsh, int S)
199 {
200 const int ru = RU << rsh, gu = GU << gsh, bu = BU << bsh,
201 rv = RV << rsh, gv = GV << gsh, bv = BV << bsh;
202 const unsigned rnd = 257u << (S - 1);
203 int i;
204
205 for (i = 0; i < width; i++) {
206 int px = input_pixel(i) >> shp;
207 int b = (px & maskb) >> shb;
208 int g = (px & maskg) >> shg;
209 int r = (px & maskr) >> shr;
210
211 dstU[i] = (ru * r + gu * g + bu * b + rnd) >> S;
212 dstV[i] = (rv * r + gv * g + bv * b + rnd) >> S;
213 }
214 }
215
216 static av_always_inline void rgb16_32ToUV_half_c_template(uint8_t *dstU,
217 uint8_t *dstV,
218 const uint8_t *src,
219 int width,
220 enum AVPixelFormat origin,
221 int shr, int shg,
222 int shb, int shp,
223 int maskr, int maskg,
224 int maskb, int rsh,
225 int gsh, int bsh, int S)
226 {
227 const int ru = RU << rsh, gu = GU << gsh, bu = BU << bsh,
228 rv = RV << rsh, gv = GV << gsh, bv = BV << bsh,
229 maskgx = ~(maskr | maskb);
230 const unsigned rnd = 257u << S;
231 int i;
232
233 maskr |= maskr << 1;
234 maskb |= maskb << 1;
235 maskg |= maskg << 1;
236 for (i = 0; i < width; i++) {
237 int px0 = input_pixel(2 * i + 0) >> shp;
238 int px1 = input_pixel(2 * i + 1) >> shp;
239 int b, r, g = (px0 & maskgx) + (px1 & maskgx);
240 int rb = px0 + px1 - g;
241
242 b = (rb & maskb) >> shb;
243 if (shp ||
244 origin == AV_PIX_FMT_BGR565LE || origin == AV_PIX_FMT_BGR565BE ||
245 origin == AV_PIX_FMT_RGB565LE || origin == AV_PIX_FMT_RGB565BE) {
246 g >>= shg;
247 } else {
248 g = (g & maskg) >> shg;
249 }
250 r = (rb & maskr) >> shr;
251
252 dstU[i] = (ru * r + gu * g + bu * b + rnd) >> (S + 1);
253 dstV[i] = (rv * r + gv * g + bv * b + rnd) >> (S + 1);
254 }
255 }
256
257 #undef input_pixel
258
259 #define rgb16_32_wrapper(fmt, name, shr, shg, shb, shp, maskr, \
260 maskg, maskb, rsh, gsh, bsh, S) \
261 static void name ## ToY_c(uint8_t *dst, const uint8_t *src, \
262 int width, uint32_t *unused) \
263 { \
264 rgb16_32ToY_c_template(dst, src, width, fmt, shr, shg, shb, shp, \
265 maskr, maskg, maskb, rsh, gsh, bsh, S); \
266 } \
267 \
268 static void name ## ToUV_c(uint8_t *dstU, uint8_t *dstV, \
269 const uint8_t *src, const uint8_t *dummy, \
270 int width, uint32_t *unused) \
271 { \
272 rgb16_32ToUV_c_template(dstU, dstV, src, width, fmt, \
273 shr, shg, shb, shp, \
274 maskr, maskg, maskb, rsh, gsh, bsh, S); \
275 } \
276 \
277 static void name ## ToUV_half_c(uint8_t *dstU, uint8_t *dstV, \
278 const uint8_t *src, \
279 const uint8_t *dummy, \
280 int width, uint32_t *unused) \
281 { \
282 rgb16_32ToUV_half_c_template(dstU, dstV, src, width, fmt, \
283 shr, shg, shb, shp, \
284 maskr, maskg, maskb, \
285 rsh, gsh, bsh, S); \
286 }
287
288 rgb16_32_wrapper(AV_PIX_FMT_BGR32, bgr32, 16, 0, 0, 0, 0xFF0000, 0xFF00, 0x00FF, 8, 0, 8, RGB2YUV_SHIFT + 8)
289 rgb16_32_wrapper(AV_PIX_FMT_BGR32_1, bgr321, 16, 0, 0, 8, 0xFF0000, 0xFF00, 0x00FF, 8, 0, 8, RGB2YUV_SHIFT + 8)
290 rgb16_32_wrapper(AV_PIX_FMT_RGB32, rgb32, 0, 0, 16, 0, 0x00FF, 0xFF00, 0xFF0000, 8, 0, 8, RGB2YUV_SHIFT + 8)
291 rgb16_32_wrapper(AV_PIX_FMT_RGB32_1, rgb321, 0, 0, 16, 8, 0x00FF, 0xFF00, 0xFF0000, 8, 0, 8, RGB2YUV_SHIFT + 8)
292 rgb16_32_wrapper(AV_PIX_FMT_BGR565LE, bgr16le, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, 11, 5, 0, RGB2YUV_SHIFT + 8)
293 rgb16_32_wrapper(AV_PIX_FMT_BGR555LE, bgr15le, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, 10, 5, 0, RGB2YUV_SHIFT + 7)
294 rgb16_32_wrapper(AV_PIX_FMT_BGR444LE, bgr12le, 0, 0, 0, 0, 0x000F, 0x00F0, 0x0F00, 8, 4, 0, RGB2YUV_SHIFT + 4)
295 rgb16_32_wrapper(AV_PIX_FMT_RGB565LE, rgb16le, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, 0, 5, 11, RGB2YUV_SHIFT + 8)
296 rgb16_32_wrapper(AV_PIX_FMT_RGB555LE, rgb15le, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, 0, 5, 10, RGB2YUV_SHIFT + 7)
297 rgb16_32_wrapper(AV_PIX_FMT_RGB444LE, rgb12le, 0, 0, 0, 0, 0x0F00, 0x00F0, 0x000F, 0, 4, 8, RGB2YUV_SHIFT + 4)
298 rgb16_32_wrapper(AV_PIX_FMT_BGR565BE, bgr16be, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, 11, 5, 0, RGB2YUV_SHIFT + 8)
299 rgb16_32_wrapper(AV_PIX_FMT_BGR555BE, bgr15be, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, 10, 5, 0, RGB2YUV_SHIFT + 7)
300 rgb16_32_wrapper(AV_PIX_FMT_BGR444BE, bgr12be, 0, 0, 0, 0, 0x000F, 0x00F0, 0x0F00, 8, 4, 0, RGB2YUV_SHIFT + 4)
301 rgb16_32_wrapper(AV_PIX_FMT_RGB565BE, rgb16be, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, 0, 5, 11, RGB2YUV_SHIFT + 8)
302 rgb16_32_wrapper(AV_PIX_FMT_RGB555BE, rgb15be, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, 0, 5, 10, RGB2YUV_SHIFT + 7)
303 rgb16_32_wrapper(AV_PIX_FMT_RGB444BE, rgb12be, 0, 0, 0, 0, 0x0F00, 0x00F0, 0x000F, 0, 4, 8, RGB2YUV_SHIFT + 4)
304
305 static void abgrToA_c(uint8_t *dst, const uint8_t *src, int width,
306 uint32_t *unused)
307 {
308 int i;
309 for (i = 0; i < width; i++)
310 dst[i] = src[4 * i];
311 }
312
313 static void rgbaToA_c(uint8_t *dst, const uint8_t *src, int width,
314 uint32_t *unused)
315 {
316 int i;
317 for (i = 0; i < width; i++)
318 dst[i] = src[4 * i + 3];
319 }
320
321 static void palToY_c(uint8_t *dst, const uint8_t *src, int width, uint32_t *pal)
322 {
323 int i;
324 for (i = 0; i < width; i++) {
325 int d = src[i];
326
327 dst[i] = pal[d] & 0xFF;
328 }
329 }
330
331 static void palToUV_c(uint8_t *dstU, uint8_t *dstV,
332 const uint8_t *src1, const uint8_t *src2,
333 int width, uint32_t *pal)
334 {
335 int i;
336 assert(src1 == src2);
337 for (i = 0; i < width; i++) {
338 int p = pal[src1[i]];
339
340 dstU[i] = p >> 8;
341 dstV[i] = p >> 16;
342 }
343 }
344
345 static void monowhite2Y_c(uint8_t *dst, const uint8_t *src,
346 int width, uint32_t *unused)
347 {
348 int i, j;
349 width = (width + 7) >> 3;
350 for (i = 0; i < width; i++) {
351 int d = ~src[i];
352 for (j = 0; j < 8; j++)
353 dst[8 * i + j] = ((d >> (7 - j)) & 1) * 255;
354 }
355 }
356
357 static void monoblack2Y_c(uint8_t *dst, const uint8_t *src,
358 int width, uint32_t *unused)
359 {
360 int i, j;
361 width = (width + 7) >> 3;
362 for (i = 0; i < width; i++) {
363 int d = src[i];
364 for (j = 0; j < 8; j++)
365 dst[8 * i + j] = ((d >> (7 - j)) & 1) * 255;
366 }
367 }
368
369 static void yuy2ToY_c(uint8_t *dst, const uint8_t *src, int width,
370 uint32_t *unused)
371 {
372 int i;
373 for (i = 0; i < width; i++)
374 dst[i] = src[2 * i];
375 }
376
377 static void yuy2ToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1,
378 const uint8_t *src2, int width, uint32_t *unused)
379 {
380 int i;
381 for (i = 0; i < width; i++) {
382 dstU[i] = src1[4 * i + 1];
383 dstV[i] = src1[4 * i + 3];
384 }
385 assert(src1 == src2);
386 }
387
388 static void yvy2ToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1,
389 const uint8_t *src2, int width, uint32_t *unused)
390 {
391 int i;
392 for (i = 0; i < width; i++) {
393 dstV[i] = src1[4 * i + 1];
394 dstU[i] = src1[4 * i + 3];
395 }
396 assert(src1 == src2);
397 }
398
399 static void bswap16Y_c(uint8_t *_dst, const uint8_t *_src, int width,
400 uint32_t *unused)
401 {
402 int i;
403 const uint16_t *src = (const uint16_t *)_src;
404 uint16_t *dst = (uint16_t *)_dst;
405 for (i = 0; i < width; i++)
406 dst[i] = av_bswap16(src[i]);
407 }
408
409 static void bswap16UV_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *_src1,
410 const uint8_t *_src2, int width, uint32_t *unused)
411 {
412 int i;
413 const uint16_t *src1 = (const uint16_t *)_src1,
414 *src2 = (const uint16_t *)_src2;
415 uint16_t *dstU = (uint16_t *)_dstU, *dstV = (uint16_t *)_dstV;
416 for (i = 0; i < width; i++) {
417 dstU[i] = av_bswap16(src1[i]);
418 dstV[i] = av_bswap16(src2[i]);
419 }
420 }
421
422 /* This is almost identical to the previous, end exists only because
423 * yuy2ToY/UV)(dst, src + 1, ...) would have 100% unaligned accesses. */
424 static void uyvyToY_c(uint8_t *dst, const uint8_t *src, int width,
425 uint32_t *unused)
426 {
427 int i;
428 for (i = 0; i < width; i++)
429 dst[i] = src[2 * i + 1];
430 }
431
432 static void uyvyToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1,
433 const uint8_t *src2, int width, uint32_t *unused)
434 {
435 int i;
436 for (i = 0; i < width; i++) {
437 dstU[i] = src1[4 * i + 0];
438 dstV[i] = src1[4 * i + 2];
439 }
440 assert(src1 == src2);
441 }
442
443 static av_always_inline void nvXXtoUV_c(uint8_t *dst1, uint8_t *dst2,
444 const uint8_t *src, int width)
445 {
446 int i;
447 for (i = 0; i < width; i++) {
448 dst1[i] = src[2 * i + 0];
449 dst2[i] = src[2 * i + 1];
450 }
451 }
452
453 static void nv12ToUV_c(uint8_t *dstU, uint8_t *dstV,
454 const uint8_t *src1, const uint8_t *src2,
455 int width, uint32_t *unused)
456 {
457 nvXXtoUV_c(dstU, dstV, src1, width);
458 }
459
460 static void nv21ToUV_c(uint8_t *dstU, uint8_t *dstV,
461 const uint8_t *src1, const uint8_t *src2,
462 int width, uint32_t *unused)
463 {
464 nvXXtoUV_c(dstV, dstU, src1, width);
465 }
466
467 #define input_pixel(pos) (isBE(origin) ? AV_RB16(pos) : AV_RL16(pos))
468
469 static void bgr24ToY_c(uint8_t *dst, const uint8_t *src,
470 int width, uint32_t *unused)
471 {
472 int i;
473 for (i = 0; i < width; i++) {
474 int b = src[i * 3 + 0];
475 int g = src[i * 3 + 1];
476 int r = src[i * 3 + 2];
477
478 dst[i] = ((RY * r + GY * g + BY * b + (33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
479 }
480 }
481
482 static void bgr24ToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1,
483 const uint8_t *src2, int width, uint32_t *unused)
484 {
485 int i;
486 for (i = 0; i < width; i++) {
487 int b = src1[3 * i + 0];
488 int g = src1[3 * i + 1];
489 int r = src1[3 * i + 2];
490
491 dstU[i] = (RU * r + GU * g + BU * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
492 dstV[i] = (RV * r + GV * g + BV * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
493 }
494 assert(src1 == src2);
495 }
496
497 static void bgr24ToUV_half_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1,
498 const uint8_t *src2, int width, uint32_t *unused)
499 {
500 int i;
501 for (i = 0; i < width; i++) {
502 int b = src1[6 * i + 0] + src1[6 * i + 3];
503 int g = src1[6 * i + 1] + src1[6 * i + 4];
504 int r = src1[6 * i + 2] + src1[6 * i + 5];
505
506 dstU[i] = (RU * r + GU * g + BU * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1);
507 dstV[i] = (RV * r + GV * g + BV * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1);
508 }
509 assert(src1 == src2);
510 }
511
512 static void rgb24ToY_c(uint8_t *dst, const uint8_t *src, int width,
513 uint32_t *unused)
514 {
515 int i;
516 for (i = 0; i < width; i++) {
517 int r = src[i * 3 + 0];
518 int g = src[i * 3 + 1];
519 int b = src[i * 3 + 2];
520
521 dst[i] = ((RY * r + GY * g + BY * b + (33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
522 }
523 }
524
525 static void rgb24ToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1,
526 const uint8_t *src2, int width, uint32_t *unused)
527 {
528 int i;
529 assert(src1 == src2);
530 for (i = 0; i < width; i++) {
531 int r = src1[3 * i + 0];
532 int g = src1[3 * i + 1];
533 int b = src1[3 * i + 2];
534
535 dstU[i] = (RU * r + GU * g + BU * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
536 dstV[i] = (RV * r + GV * g + BV * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
537 }
538 }
539
540 static void rgb24ToUV_half_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1,
541 const uint8_t *src2, int width, uint32_t *unused)
542 {
543 int i;
544 assert(src1 == src2);
545 for (i = 0; i < width; i++) {
546 int r = src1[6 * i + 0] + src1[6 * i + 3];
547 int g = src1[6 * i + 1] + src1[6 * i + 4];
548 int b = src1[6 * i + 2] + src1[6 * i + 5];
549
550 dstU[i] = (RU * r + GU * g + BU * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1);
551 dstV[i] = (RV * r + GV * g + BV * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1);
552 }
553 }
554
555 static void planar_rgb_to_y(uint8_t *dst, const uint8_t *src[4], int width)
556 {
557 int i;
558 for (i = 0; i < width; i++) {
559 int g = src[0][i];
560 int b = src[1][i];
561 int r = src[2][i];
562
563 dst[i] = ((RY * r + GY * g + BY * b + (33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
564 }
565 }
566
567 static void planar_rgb_to_uv(uint8_t *dstU, uint8_t *dstV, const uint8_t *src[4], int width)
568 {
569 int i;
570 for (i = 0; i < width; i++) {
571 int g = src[0][i];
572 int b = src[1][i];
573 int r = src[2][i];
574
575 dstU[i] = (RU * r + GU * g + BU * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
576 dstV[i] = (RV * r + GV * g + BV * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
577 }
578 }
579
580 #define rdpx(src) \
581 is_be ? AV_RB16(src) : AV_RL16(src)
582 static av_always_inline void planar_rgb16_to_y(uint8_t *_dst, const uint8_t *_src[4],
583 int width, int bpc, int is_be)
584 {
585 int i;
586 const uint16_t **src = (const uint16_t **)_src;
587 uint16_t *dst = (uint16_t *)_dst;
588 for (i = 0; i < width; i++) {
589 int g = rdpx(src[0] + i);
590 int b = rdpx(src[1] + i);
591 int r = rdpx(src[2] + i);
592
593 dst[i] = ((RY * r + GY * g + BY * b + (33 << (RGB2YUV_SHIFT + bpc - 9))) >> RGB2YUV_SHIFT);
594 }
595 }
596
597 static void planar_rgb9le_to_y(uint8_t *dst, const uint8_t *src[4], int w)
598 {
599 planar_rgb16_to_y(dst, src, w, 9, 0);
600 }
601
602 static void planar_rgb9be_to_y(uint8_t *dst, const uint8_t *src[4], int w)
603 {
604 planar_rgb16_to_y(dst, src, w, 9, 1);
605 }
606
607 static void planar_rgb10le_to_y(uint8_t *dst, const uint8_t *src[4], int w)
608 {
609 planar_rgb16_to_y(dst, src, w, 10, 0);
610 }
611
612 static void planar_rgb10be_to_y(uint8_t *dst, const uint8_t *src[4], int w)
613 {
614 planar_rgb16_to_y(dst, src, w, 10, 1);
615 }
616
617 static void planar_rgb16le_to_y(uint8_t *dst, const uint8_t *src[4], int w)
618 {
619 planar_rgb16_to_y(dst, src, w, 16, 0);
620 }
621
622 static void planar_rgb16be_to_y(uint8_t *dst, const uint8_t *src[4], int w)
623 {
624 planar_rgb16_to_y(dst, src, w, 16, 1);
625 }
626
627 static av_always_inline void planar_rgb16_to_uv(uint8_t *_dstU, uint8_t *_dstV,
628 const uint8_t *_src[4], int width,
629 int bpc, int is_be)
630 {
631 int i;
632 const uint16_t **src = (const uint16_t **)_src;
633 uint16_t *dstU = (uint16_t *)_dstU;
634 uint16_t *dstV = (uint16_t *)_dstV;
635 for (i = 0; i < width; i++) {
636 int g = rdpx(src[0] + i);
637 int b = rdpx(src[1] + i);
638 int r = rdpx(src[2] + i);
639
640 dstU[i] = (RU * r + GU * g + BU * b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> RGB2YUV_SHIFT;
641 dstV[i] = (RV * r + GV * g + BV * b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> RGB2YUV_SHIFT;
642 }
643 }
644 #undef rdpx
645
646 static void planar_rgb9le_to_uv(uint8_t *dstU, uint8_t *dstV,
647 const uint8_t *src[4], int w)
648 {
649 planar_rgb16_to_uv(dstU, dstV, src, w, 9, 0);
650 }
651
652 static void planar_rgb9be_to_uv(uint8_t *dstU, uint8_t *dstV,
653 const uint8_t *src[4], int w)
654 {
655 planar_rgb16_to_uv(dstU, dstV, src, w, 9, 1);
656 }
657
658 static void planar_rgb10le_to_uv(uint8_t *dstU, uint8_t *dstV,
659 const uint8_t *src[4], int w)
660 {
661 planar_rgb16_to_uv(dstU, dstV, src, w, 10, 0);
662 }
663
664 static void planar_rgb10be_to_uv(uint8_t *dstU, uint8_t *dstV,
665 const uint8_t *src[4], int w)
666 {
667 planar_rgb16_to_uv(dstU, dstV, src, w, 10, 1);
668 }
669
670 static void planar_rgb16le_to_uv(uint8_t *dstU, uint8_t *dstV,
671 const uint8_t *src[4], int w)
672 {
673 planar_rgb16_to_uv(dstU, dstV, src, w, 16, 0);
674 }
675
676 static void planar_rgb16be_to_uv(uint8_t *dstU, uint8_t *dstV,
677 const uint8_t *src[4], int w)
678 {
679 planar_rgb16_to_uv(dstU, dstV, src, w, 16, 1);
680 }
681
682 av_cold void ff_sws_init_input_funcs(SwsContext *c)
683 {
684 enum AVPixelFormat srcFormat = c->srcFormat;
685
686 c->chrToYV12 = NULL;
687 switch (srcFormat) {
688 case AV_PIX_FMT_YUYV422:
689 c->chrToYV12 = yuy2ToUV_c;
690 break;
691 case AV_PIX_FMT_YVYU422:
692 c->chrToYV12 = yvy2ToUV_c;
693 break;
694 case AV_PIX_FMT_UYVY422:
695 c->chrToYV12 = uyvyToUV_c;
696 break;
697 case AV_PIX_FMT_NV12:
698 c->chrToYV12 = nv12ToUV_c;
699 break;
700 case AV_PIX_FMT_NV21:
701 c->chrToYV12 = nv21ToUV_c;
702 break;
703 case AV_PIX_FMT_RGB8:
704 case AV_PIX_FMT_BGR8:
705 case AV_PIX_FMT_PAL8:
706 case AV_PIX_FMT_BGR4_BYTE:
707 case AV_PIX_FMT_RGB4_BYTE:
708 c->chrToYV12 = palToUV_c;
709 break;
710 case AV_PIX_FMT_GBRP9LE:
711 c->readChrPlanar = planar_rgb9le_to_uv;
712 break;
713 case AV_PIX_FMT_GBRP10LE:
714 c->readChrPlanar = planar_rgb10le_to_uv;
715 break;
716 case AV_PIX_FMT_GBRP16LE:
717 c->readChrPlanar = planar_rgb16le_to_uv;
718 break;
719 case AV_PIX_FMT_GBRP9BE:
720 c->readChrPlanar = planar_rgb9be_to_uv;
721 break;
722 case AV_PIX_FMT_GBRP10BE:
723 c->readChrPlanar = planar_rgb10be_to_uv;
724 break;
725 case AV_PIX_FMT_GBRP16BE:
726 c->readChrPlanar = planar_rgb16be_to_uv;
727 break;
728 case AV_PIX_FMT_GBRP:
729 c->readChrPlanar = planar_rgb_to_uv;
730 break;
731 #if HAVE_BIGENDIAN
732 case AV_PIX_FMT_YUV444P9LE:
733 case AV_PIX_FMT_YUV422P9LE:
734 case AV_PIX_FMT_YUV420P9LE:
735 case AV_PIX_FMT_YUV422P10LE:
736 case AV_PIX_FMT_YUV444P10LE:
737 case AV_PIX_FMT_YUV420P10LE:
738 case AV_PIX_FMT_YUV420P16LE:
739 case AV_PIX_FMT_YUV422P16LE:
740 case AV_PIX_FMT_YUV444P16LE:
741 case AV_PIX_FMT_YUVA444P9LE:
742 case AV_PIX_FMT_YUVA422P9LE:
743 case AV_PIX_FMT_YUVA420P9LE:
744 case AV_PIX_FMT_YUVA422P10LE:
745 case AV_PIX_FMT_YUVA444P10LE:
746 case AV_PIX_FMT_YUVA420P10LE:
747 case AV_PIX_FMT_YUVA420P16LE:
748 case AV_PIX_FMT_YUVA422P16LE:
749 case AV_PIX_FMT_YUVA444P16LE:
750 c->chrToYV12 = bswap16UV_c;
751 break;
752 #else
753 case AV_PIX_FMT_YUV444P9BE:
754 case AV_PIX_FMT_YUV422P9BE:
755 case AV_PIX_FMT_YUV420P9BE:
756 case AV_PIX_FMT_YUV444P10BE:
757 case AV_PIX_FMT_YUV422P10BE:
758 case AV_PIX_FMT_YUV420P10BE:
759 case AV_PIX_FMT_YUV420P16BE:
760 case AV_PIX_FMT_YUV422P16BE:
761 case AV_PIX_FMT_YUV444P16BE:
762 case AV_PIX_FMT_YUVA444P9BE:
763 case AV_PIX_FMT_YUVA422P9BE:
764 case AV_PIX_FMT_YUVA420P9BE:
765 case AV_PIX_FMT_YUVA422P10BE:
766 case AV_PIX_FMT_YUVA444P10BE:
767 case AV_PIX_FMT_YUVA420P10BE:
768 case AV_PIX_FMT_YUVA420P16BE:
769 case AV_PIX_FMT_YUVA422P16BE:
770 case AV_PIX_FMT_YUVA444P16BE:
771 c->chrToYV12 = bswap16UV_c;
772 break;
773 #endif
774 }
775 if (c->chrSrcHSubSample) {
776 switch (srcFormat) {
777 case AV_PIX_FMT_RGB48BE:
778 c->chrToYV12 = rgb48BEToUV_half_c;
779 break;
780 case AV_PIX_FMT_RGB48LE:
781 c->chrToYV12 = rgb48LEToUV_half_c;
782 break;
783 case AV_PIX_FMT_BGR48BE:
784 c->chrToYV12 = bgr48BEToUV_half_c;
785 break;
786 case AV_PIX_FMT_BGR48LE:
787 c->chrToYV12 = bgr48LEToUV_half_c;
788 break;
789 case AV_PIX_FMT_RGB32:
790 c->chrToYV12 = bgr32ToUV_half_c;
791 break;
792 case AV_PIX_FMT_RGB32_1:
793 c->chrToYV12 = bgr321ToUV_half_c;
794 break;
795 case AV_PIX_FMT_BGR24:
796 c->chrToYV12 = bgr24ToUV_half_c;
797 break;
798 case AV_PIX_FMT_BGR565LE:
799 c->chrToYV12 = bgr16leToUV_half_c;
800 break;
801 case AV_PIX_FMT_BGR565BE:
802 c->chrToYV12 = bgr16beToUV_half_c;
803 break;
804 case AV_PIX_FMT_BGR555LE:
805 c->chrToYV12 = bgr15leToUV_half_c;
806 break;
807 case AV_PIX_FMT_BGR555BE:
808 c->chrToYV12 = bgr15beToUV_half_c;
809 break;
810 case AV_PIX_FMT_BGR444LE:
811 c->chrToYV12 = bgr12leToUV_half_c;
812 break;
813 case AV_PIX_FMT_BGR444BE:
814 c->chrToYV12 = bgr12beToUV_half_c;
815 break;
816 case AV_PIX_FMT_BGR32:
817 c->chrToYV12 = rgb32ToUV_half_c;
818 break;
819 case AV_PIX_FMT_BGR32_1:
820 c->chrToYV12 = rgb321ToUV_half_c;
821 break;
822 case AV_PIX_FMT_RGB24:
823 c->chrToYV12 = rgb24ToUV_half_c;
824 break;
825 case AV_PIX_FMT_RGB565LE:
826 c->chrToYV12 = rgb16leToUV_half_c;
827 break;
828 case AV_PIX_FMT_RGB565BE:
829 c->chrToYV12 = rgb16beToUV_half_c;
830 break;
831 case AV_PIX_FMT_RGB555LE:
832 c->chrToYV12 = rgb15leToUV_half_c;
833 break;
834 case AV_PIX_FMT_RGB555BE:
835 c->chrToYV12 = rgb15beToUV_half_c;
836 break;
837 case AV_PIX_FMT_RGB444LE:
838 c->chrToYV12 = rgb12leToUV_half_c;
839 break;
840 case AV_PIX_FMT_RGB444BE:
841 c->chrToYV12 = rgb12beToUV_half_c;
842 break;
843 }
844 } else {
845 switch (srcFormat) {
846 case AV_PIX_FMT_RGB48BE:
847 c->chrToYV12 = rgb48BEToUV_c;
848 break;
849 case AV_PIX_FMT_RGB48LE:
850 c->chrToYV12 = rgb48LEToUV_c;
851 break;
852 case AV_PIX_FMT_BGR48BE:
853 c->chrToYV12 = bgr48BEToUV_c;
854 break;
855 case AV_PIX_FMT_BGR48LE:
856 c->chrToYV12 = bgr48LEToUV_c;
857 break;
858 case AV_PIX_FMT_RGB32:
859 c->chrToYV12 = bgr32ToUV_c;
860 break;
861 case AV_PIX_FMT_RGB32_1:
862 c->chrToYV12 = bgr321ToUV_c;
863 break;
864 case AV_PIX_FMT_BGR24:
865 c->chrToYV12 = bgr24ToUV_c;
866 break;
867 case AV_PIX_FMT_BGR565LE:
868 c->chrToYV12 = bgr16leToUV_c;
869 break;
870 case AV_PIX_FMT_BGR565BE:
871 c->chrToYV12 = bgr16beToUV_c;
872 break;
873 case AV_PIX_FMT_BGR555LE:
874 c->chrToYV12 = bgr15leToUV_c;
875 break;
876 case AV_PIX_FMT_BGR555BE:
877 c->chrToYV12 = bgr15beToUV_c;
878 break;
879 case AV_PIX_FMT_BGR444LE:
880 c->chrToYV12 = bgr12leToUV_c;
881 break;
882 case AV_PIX_FMT_BGR444BE:
883 c->chrToYV12 = bgr12beToUV_c;
884 break;
885 case AV_PIX_FMT_BGR32:
886 c->chrToYV12 = rgb32ToUV_c;
887 break;
888 case AV_PIX_FMT_BGR32_1:
889 c->chrToYV12 = rgb321ToUV_c;
890 break;
891 case AV_PIX_FMT_RGB24:
892 c->chrToYV12 = rgb24ToUV_c;
893 break;
894 case AV_PIX_FMT_RGB565LE:
895 c->chrToYV12 = rgb16leToUV_c;
896 break;
897 case AV_PIX_FMT_RGB565BE:
898 c->chrToYV12 = rgb16beToUV_c;
899 break;
900 case AV_PIX_FMT_RGB555LE:
901 c->chrToYV12 = rgb15leToUV_c;
902 break;
903 case AV_PIX_FMT_RGB555BE:
904 c->chrToYV12 = rgb15beToUV_c;
905 break;
906 case AV_PIX_FMT_RGB444LE:
907 c->chrToYV12 = rgb12leToUV_c;
908 break;
909 case AV_PIX_FMT_RGB444BE:
910 c->chrToYV12 = rgb12beToUV_c;
911 break;
912 }
913 }
914
915 c->lumToYV12 = NULL;
916 c->alpToYV12 = NULL;
917 switch (srcFormat) {
918 case AV_PIX_FMT_GBRP9LE:
919 c->readLumPlanar = planar_rgb9le_to_y;
920 break;
921 case AV_PIX_FMT_GBRP10LE:
922 c->readLumPlanar = planar_rgb10le_to_y;
923 break;
924 case AV_PIX_FMT_GBRP16LE:
925 c->readLumPlanar = planar_rgb16le_to_y;
926 break;
927 case AV_PIX_FMT_GBRP9BE:
928 c->readLumPlanar = planar_rgb9be_to_y;
929 break;
930 case AV_PIX_FMT_GBRP10BE:
931 c->readLumPlanar = planar_rgb10be_to_y;
932 break;
933 case AV_PIX_FMT_GBRP16BE:
934 c->readLumPlanar = planar_rgb16be_to_y;
935 break;
936 case AV_PIX_FMT_GBRP:
937 c->readLumPlanar = planar_rgb_to_y;
938 break;
939 #if HAVE_BIGENDIAN
940 case AV_PIX_FMT_YUV444P9LE:
941 case AV_PIX_FMT_YUV422P9LE:
942 case AV_PIX_FMT_YUV420P9LE:
943 case AV_PIX_FMT_YUV444P10LE:
944 case AV_PIX_FMT_YUV422P10LE:
945 case AV_PIX_FMT_YUV420P10LE:
946 case AV_PIX_FMT_YUV420P16LE:
947 case AV_PIX_FMT_YUV422P16LE:
948 case AV_PIX_FMT_YUV444P16LE:
949 case AV_PIX_FMT_GRAY16LE:
950 c->lumToYV12 = bswap16Y_c;
951 break;
952 case AV_PIX_FMT_YUVA444P9LE:
953 case AV_PIX_FMT_YUVA422P9LE:
954 case AV_PIX_FMT_YUVA420P9LE:
955 case AV_PIX_FMT_YUVA444P10LE:
956 case AV_PIX_FMT_YUVA422P10LE:
957 case AV_PIX_FMT_YUVA420P10LE:
958 case AV_PIX_FMT_YUVA420P16LE:
959 case AV_PIX_FMT_YUVA422P16LE:
960 case AV_PIX_FMT_YUVA444P16LE:
961 c->lumToYV12 = bswap16Y_c;
962 c->alpToYV12 = bswap16Y_c;
963 break;
964 #else
965 case AV_PIX_FMT_YUV444P9BE:
966 case AV_PIX_FMT_YUV422P9BE:
967 case AV_PIX_FMT_YUV420P9BE:
968 case AV_PIX_FMT_YUV444P10BE:
969 case AV_PIX_FMT_YUV422P10BE:
970 case AV_PIX_FMT_YUV420P10BE:
971 case AV_PIX_FMT_YUV420P16BE:
972 case AV_PIX_FMT_YUV422P16BE:
973 case AV_PIX_FMT_YUV444P16BE:
974 case AV_PIX_FMT_GRAY16BE:
975 c->lumToYV12 = bswap16Y_c;
976 break;
977 case AV_PIX_FMT_YUVA444P9BE:
978 case AV_PIX_FMT_YUVA422P9BE:
979 case AV_PIX_FMT_YUVA420P9BE:
980 case AV_PIX_FMT_YUVA444P10BE:
981 case AV_PIX_FMT_YUVA422P10BE:
982 case AV_PIX_FMT_YUVA420P10BE:
983 case AV_PIX_FMT_YUVA420P16BE:
984 case AV_PIX_FMT_YUVA422P16BE:
985 case AV_PIX_FMT_YUVA444P16BE:
986 c->lumToYV12 = bswap16Y_c;
987 c->alpToYV12 = bswap16Y_c;
988 break;
989 #endif
990 case AV_PIX_FMT_YUYV422:
991 case AV_PIX_FMT_YVYU422:
992 case AV_PIX_FMT_YA8:
993 c->lumToYV12 = yuy2ToY_c;
994 break;
995 case AV_PIX_FMT_UYVY422:
996 c->lumToYV12 = uyvyToY_c;
997 break;
998 case AV_PIX_FMT_BGR24:
999 c->lumToYV12 = bgr24ToY_c;
1000 break;
1001 case AV_PIX_FMT_BGR565LE:
1002 c->lumToYV12 = bgr16leToY_c;
1003 break;
1004 case AV_PIX_FMT_BGR565BE:
1005 c->lumToYV12 = bgr16beToY_c;
1006 break;
1007 case AV_PIX_FMT_BGR555LE:
1008 c->lumToYV12 = bgr15leToY_c;
1009 break;
1010 case AV_PIX_FMT_BGR555BE:
1011 c->lumToYV12 = bgr15beToY_c;
1012 break;
1013 case AV_PIX_FMT_BGR444LE:
1014 c->lumToYV12 = bgr12leToY_c;
1015 break;
1016 case AV_PIX_FMT_BGR444BE:
1017 c->lumToYV12 = bgr12beToY_c;
1018 break;
1019 case AV_PIX_FMT_RGB24:
1020 c->lumToYV12 = rgb24ToY_c;
1021 break;
1022 case AV_PIX_FMT_RGB565LE:
1023 c->lumToYV12 = rgb16leToY_c;
1024 break;
1025 case AV_PIX_FMT_RGB565BE:
1026 c->lumToYV12 = rgb16beToY_c;
1027 break;
1028 case AV_PIX_FMT_RGB555LE:
1029 c->lumToYV12 = rgb15leToY_c;
1030 break;
1031 case AV_PIX_FMT_RGB555BE:
1032 c->lumToYV12 = rgb15beToY_c;
1033 break;
1034 case AV_PIX_FMT_RGB444LE:
1035 c->lumToYV12 = rgb12leToY_c;
1036 break;
1037 case AV_PIX_FMT_RGB444BE:
1038 c->lumToYV12 = rgb12beToY_c;
1039 break;
1040 case AV_PIX_FMT_RGB8:
1041 case AV_PIX_FMT_BGR8:
1042 case AV_PIX_FMT_PAL8:
1043 case AV_PIX_FMT_BGR4_BYTE:
1044 case AV_PIX_FMT_RGB4_BYTE:
1045 c->lumToYV12 = palToY_c;
1046 break;
1047 case AV_PIX_FMT_MONOBLACK:
1048 c->lumToYV12 = monoblack2Y_c;
1049 break;
1050 case AV_PIX_FMT_MONOWHITE:
1051 c->lumToYV12 = monowhite2Y_c;
1052 break;
1053 case AV_PIX_FMT_RGB32:
1054 c->lumToYV12 = bgr32ToY_c;
1055 break;
1056 case AV_PIX_FMT_RGB32_1:
1057 c->lumToYV12 = bgr321ToY_c;
1058 break;
1059 case AV_PIX_FMT_BGR32:
1060 c->lumToYV12 = rgb32ToY_c;
1061 break;
1062 case AV_PIX_FMT_BGR32_1:
1063 c->lumToYV12 = rgb321ToY_c;
1064 break;
1065 case AV_PIX_FMT_RGB48BE:
1066 c->lumToYV12 = rgb48BEToY_c;
1067 break;
1068 case AV_PIX_FMT_RGB48LE:
1069 c->lumToYV12 = rgb48LEToY_c;
1070 break;
1071 case AV_PIX_FMT_BGR48BE:
1072 c->lumToYV12 = bgr48BEToY_c;
1073 break;
1074 case AV_PIX_FMT_BGR48LE:
1075 c->lumToYV12 = bgr48LEToY_c;
1076 break;
1077 }
1078 if (c->alpPixBuf) {
1079 switch (srcFormat) {
1080 case AV_PIX_FMT_BGRA:
1081 case AV_PIX_FMT_RGBA:
1082 c->alpToYV12 = rgbaToA_c;
1083 break;
1084 case AV_PIX_FMT_ABGR:
1085 case AV_PIX_FMT_ARGB:
1086 c->alpToYV12 = abgrToA_c;
1087 break;
1088 case AV_PIX_FMT_YA8:
1089 c->alpToYV12 = uyvyToY_c;
1090 break;
1091 }
1092 }
1093 }