4c8bc6e06643af81afa3150c8109f77b8a7b21ee
[libav.git] / libswscale / swscale_template.c
1 /*
2 * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 * The C code (not assembly, MMX, ...) of this file can be used
21 * under the LGPL license.
22 */
23
24 #undef REAL_MOVNTQ
25 #undef MOVNTQ
26 #undef PAVGB
27 #undef PREFETCH
28 #undef PREFETCHW
29 #undef EMMS
30 #undef SFENCE
31
32 #ifdef HAVE_3DNOW
33 /* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */
34 #define EMMS "femms"
35 #else
36 #define EMMS "emms"
37 #endif
38
39 #ifdef HAVE_3DNOW
40 #define PREFETCH "prefetch"
41 #define PREFETCHW "prefetchw"
42 #elif defined (HAVE_MMX2)
43 #define PREFETCH "prefetchnta"
44 #define PREFETCHW "prefetcht0"
45 #else
46 #define PREFETCH " # nop"
47 #define PREFETCHW " # nop"
48 #endif
49
50 #ifdef HAVE_MMX2
51 #define SFENCE "sfence"
52 #else
53 #define SFENCE " # nop"
54 #endif
55
56 #ifdef HAVE_MMX2
57 #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
58 #elif defined (HAVE_3DNOW)
59 #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
60 #endif
61
62 #ifdef HAVE_MMX2
63 #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
64 #else
65 #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
66 #endif
67 #define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
68
69 #ifdef HAVE_ALTIVEC
70 #include "swscale_altivec_template.c"
71 #endif
72
73 #define YSCALEYUV2YV12X(x, offset, dest, width) \
74 __asm__ volatile(\
75 "xor %%"REG_a", %%"REG_a" \n\t"\
76 "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
77 "movq %%mm3, %%mm4 \n\t"\
78 "lea " offset "(%0), %%"REG_d" \n\t"\
79 "mov (%%"REG_d"), %%"REG_S" \n\t"\
80 ASMALIGN(4) /* FIXME Unroll? */\
81 "1: \n\t"\
82 "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
83 "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\
84 "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* srcData */\
85 "add $16, %%"REG_d" \n\t"\
86 "mov (%%"REG_d"), %%"REG_S" \n\t"\
87 "test %%"REG_S", %%"REG_S" \n\t"\
88 "pmulhw %%mm0, %%mm2 \n\t"\
89 "pmulhw %%mm0, %%mm5 \n\t"\
90 "paddw %%mm2, %%mm3 \n\t"\
91 "paddw %%mm5, %%mm4 \n\t"\
92 " jnz 1b \n\t"\
93 "psraw $3, %%mm3 \n\t"\
94 "psraw $3, %%mm4 \n\t"\
95 "packuswb %%mm4, %%mm3 \n\t"\
96 MOVNTQ(%%mm3, (%1, %%REGa))\
97 "add $8, %%"REG_a" \n\t"\
98 "cmp %2, %%"REG_a" \n\t"\
99 "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
100 "movq %%mm3, %%mm4 \n\t"\
101 "lea " offset "(%0), %%"REG_d" \n\t"\
102 "mov (%%"REG_d"), %%"REG_S" \n\t"\
103 "jb 1b \n\t"\
104 :: "r" (&c->redDither),\
105 "r" (dest), "g" (width)\
106 : "%"REG_a, "%"REG_d, "%"REG_S\
107 );
108
109 #define YSCALEYUV2YV12X_ACCURATE(x, offset, dest, width) \
110 __asm__ volatile(\
111 "lea " offset "(%0), %%"REG_d" \n\t"\
112 "xor %%"REG_a", %%"REG_a" \n\t"\
113 "pxor %%mm4, %%mm4 \n\t"\
114 "pxor %%mm5, %%mm5 \n\t"\
115 "pxor %%mm6, %%mm6 \n\t"\
116 "pxor %%mm7, %%mm7 \n\t"\
117 "mov (%%"REG_d"), %%"REG_S" \n\t"\
118 ASMALIGN(4) \
119 "1: \n\t"\
120 "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* srcData */\
121 "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\
122 "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
123 "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm1 \n\t" /* srcData */\
124 "movq %%mm0, %%mm3 \n\t"\
125 "punpcklwd %%mm1, %%mm0 \n\t"\
126 "punpckhwd %%mm1, %%mm3 \n\t"\
127 "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
128 "pmaddwd %%mm1, %%mm0 \n\t"\
129 "pmaddwd %%mm1, %%mm3 \n\t"\
130 "paddd %%mm0, %%mm4 \n\t"\
131 "paddd %%mm3, %%mm5 \n\t"\
132 "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* srcData */\
133 "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
134 "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
135 "test %%"REG_S", %%"REG_S" \n\t"\
136 "movq %%mm2, %%mm0 \n\t"\
137 "punpcklwd %%mm3, %%mm2 \n\t"\
138 "punpckhwd %%mm3, %%mm0 \n\t"\
139 "pmaddwd %%mm1, %%mm2 \n\t"\
140 "pmaddwd %%mm1, %%mm0 \n\t"\
141 "paddd %%mm2, %%mm6 \n\t"\
142 "paddd %%mm0, %%mm7 \n\t"\
143 " jnz 1b \n\t"\
144 "psrad $16, %%mm4 \n\t"\
145 "psrad $16, %%mm5 \n\t"\
146 "psrad $16, %%mm6 \n\t"\
147 "psrad $16, %%mm7 \n\t"\
148 "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
149 "packssdw %%mm5, %%mm4 \n\t"\
150 "packssdw %%mm7, %%mm6 \n\t"\
151 "paddw %%mm0, %%mm4 \n\t"\
152 "paddw %%mm0, %%mm6 \n\t"\
153 "psraw $3, %%mm4 \n\t"\
154 "psraw $3, %%mm6 \n\t"\
155 "packuswb %%mm6, %%mm4 \n\t"\
156 MOVNTQ(%%mm4, (%1, %%REGa))\
157 "add $8, %%"REG_a" \n\t"\
158 "cmp %2, %%"REG_a" \n\t"\
159 "lea " offset "(%0), %%"REG_d" \n\t"\
160 "pxor %%mm4, %%mm4 \n\t"\
161 "pxor %%mm5, %%mm5 \n\t"\
162 "pxor %%mm6, %%mm6 \n\t"\
163 "pxor %%mm7, %%mm7 \n\t"\
164 "mov (%%"REG_d"), %%"REG_S" \n\t"\
165 "jb 1b \n\t"\
166 :: "r" (&c->redDither),\
167 "r" (dest), "g" (width)\
168 : "%"REG_a, "%"REG_d, "%"REG_S\
169 );
170
171 #define YSCALEYUV2YV121 \
172 "mov %2, %%"REG_a" \n\t"\
173 ASMALIGN(4) /* FIXME Unroll? */\
174 "1: \n\t"\
175 "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
176 "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"\
177 "psraw $7, %%mm0 \n\t"\
178 "psraw $7, %%mm1 \n\t"\
179 "packuswb %%mm1, %%mm0 \n\t"\
180 MOVNTQ(%%mm0, (%1, %%REGa))\
181 "add $8, %%"REG_a" \n\t"\
182 "jnc 1b \n\t"
183
184 #define YSCALEYUV2YV121_ACCURATE \
185 "mov %2, %%"REG_a" \n\t"\
186 "pcmpeqw %%mm7, %%mm7 \n\t"\
187 "psrlw $15, %%mm7 \n\t"\
188 "psllw $6, %%mm7 \n\t"\
189 ASMALIGN(4) /* FIXME Unroll? */\
190 "1: \n\t"\
191 "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
192 "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"\
193 "paddsw %%mm7, %%mm0 \n\t"\
194 "paddsw %%mm7, %%mm1 \n\t"\
195 "psraw $7, %%mm0 \n\t"\
196 "psraw $7, %%mm1 \n\t"\
197 "packuswb %%mm1, %%mm0 \n\t"\
198 MOVNTQ(%%mm0, (%1, %%REGa))\
199 "add $8, %%"REG_a" \n\t"\
200 "jnc 1b \n\t"
201
202 /*
203 :: "m" (-lumFilterSize), "m" (-chrFilterSize),
204 "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
205 "r" (dest), "m" (dstW),
206 "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
207 : "%eax", "%ebx", "%ecx", "%edx", "%esi"
208 */
209 #define YSCALEYUV2PACKEDX \
210 __asm__ volatile(\
211 "xor %%"REG_a", %%"REG_a" \n\t"\
212 ASMALIGN(4)\
213 "nop \n\t"\
214 "1: \n\t"\
215 "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
216 "mov (%%"REG_d"), %%"REG_S" \n\t"\
217 "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
218 "movq %%mm3, %%mm4 \n\t"\
219 ASMALIGN(4)\
220 "2: \n\t"\
221 "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
222 "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\
223 "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
224 "add $16, %%"REG_d" \n\t"\
225 "mov (%%"REG_d"), %%"REG_S" \n\t"\
226 "pmulhw %%mm0, %%mm2 \n\t"\
227 "pmulhw %%mm0, %%mm5 \n\t"\
228 "paddw %%mm2, %%mm3 \n\t"\
229 "paddw %%mm5, %%mm4 \n\t"\
230 "test %%"REG_S", %%"REG_S" \n\t"\
231 " jnz 2b \n\t"\
232 \
233 "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
234 "mov (%%"REG_d"), %%"REG_S" \n\t"\
235 "movq "VROUNDER_OFFSET"(%0), %%mm1 \n\t"\
236 "movq %%mm1, %%mm7 \n\t"\
237 ASMALIGN(4)\
238 "2: \n\t"\
239 "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
240 "movq (%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y1srcData */\
241 "movq 8(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* Y2srcData */\
242 "add $16, %%"REG_d" \n\t"\
243 "mov (%%"REG_d"), %%"REG_S" \n\t"\
244 "pmulhw %%mm0, %%mm2 \n\t"\
245 "pmulhw %%mm0, %%mm5 \n\t"\
246 "paddw %%mm2, %%mm1 \n\t"\
247 "paddw %%mm5, %%mm7 \n\t"\
248 "test %%"REG_S", %%"REG_S" \n\t"\
249 " jnz 2b \n\t"\
250
251 #define YSCALEYUV2PACKEDX_END \
252 :: "r" (&c->redDither), \
253 "m" (dummy), "m" (dummy), "m" (dummy),\
254 "r" (dest), "m" (dstW) \
255 : "%"REG_a, "%"REG_d, "%"REG_S \
256 );
257
258 #define YSCALEYUV2PACKEDX_ACCURATE \
259 __asm__ volatile(\
260 "xor %%"REG_a", %%"REG_a" \n\t"\
261 ASMALIGN(4)\
262 "nop \n\t"\
263 "1: \n\t"\
264 "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
265 "mov (%%"REG_d"), %%"REG_S" \n\t"\
266 "pxor %%mm4, %%mm4 \n\t"\
267 "pxor %%mm5, %%mm5 \n\t"\
268 "pxor %%mm6, %%mm6 \n\t"\
269 "pxor %%mm7, %%mm7 \n\t"\
270 ASMALIGN(4)\
271 "2: \n\t"\
272 "movq (%%"REG_S", %%"REG_a"), %%mm0 \n\t" /* UsrcData */\
273 "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* VsrcData */\
274 "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
275 "movq (%%"REG_S", %%"REG_a"), %%mm1 \n\t" /* UsrcData */\
276 "movq %%mm0, %%mm3 \n\t"\
277 "punpcklwd %%mm1, %%mm0 \n\t"\
278 "punpckhwd %%mm1, %%mm3 \n\t"\
279 "movq "STR(APCK_COEF)"(%%"REG_d"),%%mm1 \n\t" /* filterCoeff */\
280 "pmaddwd %%mm1, %%mm0 \n\t"\
281 "pmaddwd %%mm1, %%mm3 \n\t"\
282 "paddd %%mm0, %%mm4 \n\t"\
283 "paddd %%mm3, %%mm5 \n\t"\
284 "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm3 \n\t" /* VsrcData */\
285 "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
286 "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
287 "test %%"REG_S", %%"REG_S" \n\t"\
288 "movq %%mm2, %%mm0 \n\t"\
289 "punpcklwd %%mm3, %%mm2 \n\t"\
290 "punpckhwd %%mm3, %%mm0 \n\t"\
291 "pmaddwd %%mm1, %%mm2 \n\t"\
292 "pmaddwd %%mm1, %%mm0 \n\t"\
293 "paddd %%mm2, %%mm6 \n\t"\
294 "paddd %%mm0, %%mm7 \n\t"\
295 " jnz 2b \n\t"\
296 "psrad $16, %%mm4 \n\t"\
297 "psrad $16, %%mm5 \n\t"\
298 "psrad $16, %%mm6 \n\t"\
299 "psrad $16, %%mm7 \n\t"\
300 "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
301 "packssdw %%mm5, %%mm4 \n\t"\
302 "packssdw %%mm7, %%mm6 \n\t"\
303 "paddw %%mm0, %%mm4 \n\t"\
304 "paddw %%mm0, %%mm6 \n\t"\
305 "movq %%mm4, "U_TEMP"(%0) \n\t"\
306 "movq %%mm6, "V_TEMP"(%0) \n\t"\
307 \
308 "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
309 "mov (%%"REG_d"), %%"REG_S" \n\t"\
310 "pxor %%mm1, %%mm1 \n\t"\
311 "pxor %%mm5, %%mm5 \n\t"\
312 "pxor %%mm7, %%mm7 \n\t"\
313 "pxor %%mm6, %%mm6 \n\t"\
314 ASMALIGN(4)\
315 "2: \n\t"\
316 "movq (%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* Y1srcData */\
317 "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y2srcData */\
318 "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
319 "movq (%%"REG_S", %%"REG_a", 2), %%mm4 \n\t" /* Y1srcData */\
320 "movq %%mm0, %%mm3 \n\t"\
321 "punpcklwd %%mm4, %%mm0 \n\t"\
322 "punpckhwd %%mm4, %%mm3 \n\t"\
323 "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm4 \n\t" /* filterCoeff */\
324 "pmaddwd %%mm4, %%mm0 \n\t"\
325 "pmaddwd %%mm4, %%mm3 \n\t"\
326 "paddd %%mm0, %%mm1 \n\t"\
327 "paddd %%mm3, %%mm5 \n\t"\
328 "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* Y2srcData */\
329 "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
330 "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
331 "test %%"REG_S", %%"REG_S" \n\t"\
332 "movq %%mm2, %%mm0 \n\t"\
333 "punpcklwd %%mm3, %%mm2 \n\t"\
334 "punpckhwd %%mm3, %%mm0 \n\t"\
335 "pmaddwd %%mm4, %%mm2 \n\t"\
336 "pmaddwd %%mm4, %%mm0 \n\t"\
337 "paddd %%mm2, %%mm7 \n\t"\
338 "paddd %%mm0, %%mm6 \n\t"\
339 " jnz 2b \n\t"\
340 "psrad $16, %%mm1 \n\t"\
341 "psrad $16, %%mm5 \n\t"\
342 "psrad $16, %%mm7 \n\t"\
343 "psrad $16, %%mm6 \n\t"\
344 "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
345 "packssdw %%mm5, %%mm1 \n\t"\
346 "packssdw %%mm6, %%mm7 \n\t"\
347 "paddw %%mm0, %%mm1 \n\t"\
348 "paddw %%mm0, %%mm7 \n\t"\
349 "movq "U_TEMP"(%0), %%mm3 \n\t"\
350 "movq "V_TEMP"(%0), %%mm4 \n\t"\
351
352 #define YSCALEYUV2RGBX \
353 "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
354 "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
355 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
356 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
357 "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
358 "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
359 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
360 "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
361 "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
362 "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
363 "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
364 "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
365 "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
366 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
367 "paddw %%mm3, %%mm4 \n\t"\
368 "movq %%mm2, %%mm0 \n\t"\
369 "movq %%mm5, %%mm6 \n\t"\
370 "movq %%mm4, %%mm3 \n\t"\
371 "punpcklwd %%mm2, %%mm2 \n\t"\
372 "punpcklwd %%mm5, %%mm5 \n\t"\
373 "punpcklwd %%mm4, %%mm4 \n\t"\
374 "paddw %%mm1, %%mm2 \n\t"\
375 "paddw %%mm1, %%mm5 \n\t"\
376 "paddw %%mm1, %%mm4 \n\t"\
377 "punpckhwd %%mm0, %%mm0 \n\t"\
378 "punpckhwd %%mm6, %%mm6 \n\t"\
379 "punpckhwd %%mm3, %%mm3 \n\t"\
380 "paddw %%mm7, %%mm0 \n\t"\
381 "paddw %%mm7, %%mm6 \n\t"\
382 "paddw %%mm7, %%mm3 \n\t"\
383 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
384 "packuswb %%mm0, %%mm2 \n\t"\
385 "packuswb %%mm6, %%mm5 \n\t"\
386 "packuswb %%mm3, %%mm4 \n\t"\
387 "pxor %%mm7, %%mm7 \n\t"
388
389 #define REAL_YSCALEYUV2PACKED(index, c) \
390 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
391 "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1 \n\t"\
392 "psraw $3, %%mm0 \n\t"\
393 "psraw $3, %%mm1 \n\t"\
394 "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c") \n\t"\
395 "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c") \n\t"\
396 "xor "#index", "#index" \n\t"\
397 ASMALIGN(4)\
398 "1: \n\t"\
399 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
400 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
401 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
402 "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
403 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
404 "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
405 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
406 "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
407 "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
408 "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
409 "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
410 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
411 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
412 "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
413 "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
414 "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
415 "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
416 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
417 "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
418 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
419 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
420 "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
421 "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
422 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
423 "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
424
425 #define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
426
427 #define REAL_YSCALEYUV2RGB(index, c) \
428 "xor "#index", "#index" \n\t"\
429 ASMALIGN(4)\
430 "1: \n\t"\
431 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
432 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
433 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
434 "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
435 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
436 "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
437 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
438 "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
439 "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
440 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
441 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
442 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
443 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
444 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
445 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
446 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
447 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
448 "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
449 "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
450 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
451 "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
452 "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
453 "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
454 "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
455 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
456 "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
457 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
458 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
459 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
460 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
461 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
462 "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
463 "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
464 "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
465 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
466 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
467 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
468 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
469 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
470 "paddw %%mm3, %%mm4 \n\t"\
471 "movq %%mm2, %%mm0 \n\t"\
472 "movq %%mm5, %%mm6 \n\t"\
473 "movq %%mm4, %%mm3 \n\t"\
474 "punpcklwd %%mm2, %%mm2 \n\t"\
475 "punpcklwd %%mm5, %%mm5 \n\t"\
476 "punpcklwd %%mm4, %%mm4 \n\t"\
477 "paddw %%mm1, %%mm2 \n\t"\
478 "paddw %%mm1, %%mm5 \n\t"\
479 "paddw %%mm1, %%mm4 \n\t"\
480 "punpckhwd %%mm0, %%mm0 \n\t"\
481 "punpckhwd %%mm6, %%mm6 \n\t"\
482 "punpckhwd %%mm3, %%mm3 \n\t"\
483 "paddw %%mm7, %%mm0 \n\t"\
484 "paddw %%mm7, %%mm6 \n\t"\
485 "paddw %%mm7, %%mm3 \n\t"\
486 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
487 "packuswb %%mm0, %%mm2 \n\t"\
488 "packuswb %%mm6, %%mm5 \n\t"\
489 "packuswb %%mm3, %%mm4 \n\t"\
490 "pxor %%mm7, %%mm7 \n\t"
491 #define YSCALEYUV2RGB(index, c) REAL_YSCALEYUV2RGB(index, c)
492
493 #define REAL_YSCALEYUV2PACKED1(index, c) \
494 "xor "#index", "#index" \n\t"\
495 ASMALIGN(4)\
496 "1: \n\t"\
497 "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
498 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
499 "psraw $7, %%mm3 \n\t" \
500 "psraw $7, %%mm4 \n\t" \
501 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
502 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
503 "psraw $7, %%mm1 \n\t" \
504 "psraw $7, %%mm7 \n\t" \
505
506 #define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
507
508 #define REAL_YSCALEYUV2RGB1(index, c) \
509 "xor "#index", "#index" \n\t"\
510 ASMALIGN(4)\
511 "1: \n\t"\
512 "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
513 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
514 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
515 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
516 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
517 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
518 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
519 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
520 "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
521 "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
522 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
523 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
524 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
525 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
526 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
527 "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
528 "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
529 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
530 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
531 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
532 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
533 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
534 "paddw %%mm3, %%mm4 \n\t"\
535 "movq %%mm2, %%mm0 \n\t"\
536 "movq %%mm5, %%mm6 \n\t"\
537 "movq %%mm4, %%mm3 \n\t"\
538 "punpcklwd %%mm2, %%mm2 \n\t"\
539 "punpcklwd %%mm5, %%mm5 \n\t"\
540 "punpcklwd %%mm4, %%mm4 \n\t"\
541 "paddw %%mm1, %%mm2 \n\t"\
542 "paddw %%mm1, %%mm5 \n\t"\
543 "paddw %%mm1, %%mm4 \n\t"\
544 "punpckhwd %%mm0, %%mm0 \n\t"\
545 "punpckhwd %%mm6, %%mm6 \n\t"\
546 "punpckhwd %%mm3, %%mm3 \n\t"\
547 "paddw %%mm7, %%mm0 \n\t"\
548 "paddw %%mm7, %%mm6 \n\t"\
549 "paddw %%mm7, %%mm3 \n\t"\
550 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
551 "packuswb %%mm0, %%mm2 \n\t"\
552 "packuswb %%mm6, %%mm5 \n\t"\
553 "packuswb %%mm3, %%mm4 \n\t"\
554 "pxor %%mm7, %%mm7 \n\t"
555 #define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
556
557 #define REAL_YSCALEYUV2PACKED1b(index, c) \
558 "xor "#index", "#index" \n\t"\
559 ASMALIGN(4)\
560 "1: \n\t"\
561 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
562 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
563 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
564 "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
565 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
566 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
567 "psrlw $8, %%mm3 \n\t" \
568 "psrlw $8, %%mm4 \n\t" \
569 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
570 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
571 "psraw $7, %%mm1 \n\t" \
572 "psraw $7, %%mm7 \n\t"
573 #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
574
575 // do vertical chrominance interpolation
576 #define REAL_YSCALEYUV2RGB1b(index, c) \
577 "xor "#index", "#index" \n\t"\
578 ASMALIGN(4)\
579 "1: \n\t"\
580 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
581 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
582 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
583 "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
584 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
585 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
586 "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
587 "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
588 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
589 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
590 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
591 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
592 "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
593 "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
594 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
595 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
596 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
597 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
598 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
599 "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
600 "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
601 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
602 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
603 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
604 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
605 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
606 "paddw %%mm3, %%mm4 \n\t"\
607 "movq %%mm2, %%mm0 \n\t"\
608 "movq %%mm5, %%mm6 \n\t"\
609 "movq %%mm4, %%mm3 \n\t"\
610 "punpcklwd %%mm2, %%mm2 \n\t"\
611 "punpcklwd %%mm5, %%mm5 \n\t"\
612 "punpcklwd %%mm4, %%mm4 \n\t"\
613 "paddw %%mm1, %%mm2 \n\t"\
614 "paddw %%mm1, %%mm5 \n\t"\
615 "paddw %%mm1, %%mm4 \n\t"\
616 "punpckhwd %%mm0, %%mm0 \n\t"\
617 "punpckhwd %%mm6, %%mm6 \n\t"\
618 "punpckhwd %%mm3, %%mm3 \n\t"\
619 "paddw %%mm7, %%mm0 \n\t"\
620 "paddw %%mm7, %%mm6 \n\t"\
621 "paddw %%mm7, %%mm3 \n\t"\
622 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
623 "packuswb %%mm0, %%mm2 \n\t"\
624 "packuswb %%mm6, %%mm5 \n\t"\
625 "packuswb %%mm3, %%mm4 \n\t"\
626 "pxor %%mm7, %%mm7 \n\t"
627 #define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
628
629 #define REAL_WRITEBGR32(dst, dstw, index) \
630 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
631 "movq %%mm2, %%mm1 \n\t" /* B */\
632 "movq %%mm5, %%mm6 \n\t" /* R */\
633 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
634 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
635 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
636 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
637 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
638 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
639 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
640 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
641 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
642 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
643 \
644 MOVNTQ(%%mm0, (dst, index, 4))\
645 MOVNTQ(%%mm2, 8(dst, index, 4))\
646 MOVNTQ(%%mm1, 16(dst, index, 4))\
647 MOVNTQ(%%mm3, 24(dst, index, 4))\
648 \
649 "add $8, "#index" \n\t"\
650 "cmp "#dstw", "#index" \n\t"\
651 " jb 1b \n\t"
652 #define WRITEBGR32(dst, dstw, index) REAL_WRITEBGR32(dst, dstw, index)
653
654 #define REAL_WRITERGB16(dst, dstw, index) \
655 "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
656 "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
657 "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
658 "psrlq $3, %%mm2 \n\t"\
659 \
660 "movq %%mm2, %%mm1 \n\t"\
661 "movq %%mm4, %%mm3 \n\t"\
662 \
663 "punpcklbw %%mm7, %%mm3 \n\t"\
664 "punpcklbw %%mm5, %%mm2 \n\t"\
665 "punpckhbw %%mm7, %%mm4 \n\t"\
666 "punpckhbw %%mm5, %%mm1 \n\t"\
667 \
668 "psllq $3, %%mm3 \n\t"\
669 "psllq $3, %%mm4 \n\t"\
670 \
671 "por %%mm3, %%mm2 \n\t"\
672 "por %%mm4, %%mm1 \n\t"\
673 \
674 MOVNTQ(%%mm2, (dst, index, 2))\
675 MOVNTQ(%%mm1, 8(dst, index, 2))\
676 \
677 "add $8, "#index" \n\t"\
678 "cmp "#dstw", "#index" \n\t"\
679 " jb 1b \n\t"
680 #define WRITERGB16(dst, dstw, index) REAL_WRITERGB16(dst, dstw, index)
681
682 #define REAL_WRITERGB15(dst, dstw, index) \
683 "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
684 "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
685 "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
686 "psrlq $3, %%mm2 \n\t"\
687 "psrlq $1, %%mm5 \n\t"\
688 \
689 "movq %%mm2, %%mm1 \n\t"\
690 "movq %%mm4, %%mm3 \n\t"\
691 \
692 "punpcklbw %%mm7, %%mm3 \n\t"\
693 "punpcklbw %%mm5, %%mm2 \n\t"\
694 "punpckhbw %%mm7, %%mm4 \n\t"\
695 "punpckhbw %%mm5, %%mm1 \n\t"\
696 \
697 "psllq $2, %%mm3 \n\t"\
698 "psllq $2, %%mm4 \n\t"\
699 \
700 "por %%mm3, %%mm2 \n\t"\
701 "por %%mm4, %%mm1 \n\t"\
702 \
703 MOVNTQ(%%mm2, (dst, index, 2))\
704 MOVNTQ(%%mm1, 8(dst, index, 2))\
705 \
706 "add $8, "#index" \n\t"\
707 "cmp "#dstw", "#index" \n\t"\
708 " jb 1b \n\t"
709 #define WRITERGB15(dst, dstw, index) REAL_WRITERGB15(dst, dstw, index)
710
711 #define WRITEBGR24OLD(dst, dstw, index) \
712 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
713 "movq %%mm2, %%mm1 \n\t" /* B */\
714 "movq %%mm5, %%mm6 \n\t" /* R */\
715 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
716 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
717 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
718 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
719 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
720 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
721 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
722 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
723 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
724 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
725 \
726 "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
727 "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
728 "pand "MANGLE(bm00000111)", %%mm4 \n\t" /* 00000RGB 0 */\
729 "pand "MANGLE(bm11111000)", %%mm0 \n\t" /* 00RGB000 0.5 */\
730 "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
731 "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
732 "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
733 "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
734 \
735 "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
736 "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
737 "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
738 "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
739 "pand "MANGLE(bm00001111)", %%mm2 \n\t" /* 0000RGBR 1 */\
740 "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
741 "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
742 "pand "MANGLE(bm00000111)", %%mm4 \n\t" /* 00000RGB 2 */\
743 "pand "MANGLE(bm11111000)", %%mm1 \n\t" /* 00RGB000 2.5 */\
744 "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
745 "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
746 "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
747 "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
748 \
749 "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
750 "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
751 "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
752 "pand "MANGLE(bm00000111)", %%mm5 \n\t" /* 00000RGB 3 */\
753 "pand "MANGLE(bm11111000)", %%mm3 \n\t" /* 00RGB000 3.5 */\
754 "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
755 "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
756 "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
757 \
758 MOVNTQ(%%mm0, (dst))\
759 MOVNTQ(%%mm2, 8(dst))\
760 MOVNTQ(%%mm3, 16(dst))\
761 "add $24, "#dst" \n\t"\
762 \
763 "add $8, "#index" \n\t"\
764 "cmp "#dstw", "#index" \n\t"\
765 " jb 1b \n\t"
766
767 #define WRITEBGR24MMX(dst, dstw, index) \
768 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
769 "movq %%mm2, %%mm1 \n\t" /* B */\
770 "movq %%mm5, %%mm6 \n\t" /* R */\
771 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
772 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
773 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
774 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
775 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
776 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
777 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
778 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
779 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
780 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
781 \
782 "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
783 "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
784 "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
785 "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
786 \
787 "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
788 "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
789 "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
790 "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
791 \
792 "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
793 "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
794 "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
795 "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
796 \
797 "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
798 "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
799 "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
800 "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
801 MOVNTQ(%%mm0, (dst))\
802 \
803 "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
804 "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
805 "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
806 "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
807 MOVNTQ(%%mm6, 8(dst))\
808 \
809 "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
810 "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
811 "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
812 MOVNTQ(%%mm5, 16(dst))\
813 \
814 "add $24, "#dst" \n\t"\
815 \
816 "add $8, "#index" \n\t"\
817 "cmp "#dstw", "#index" \n\t"\
818 " jb 1b \n\t"
819
820 #define WRITEBGR24MMX2(dst, dstw, index) \
821 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
822 "movq "MANGLE(ff_M24A)", %%mm0 \n\t"\
823 "movq "MANGLE(ff_M24C)", %%mm7 \n\t"\
824 "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
825 "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
826 "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
827 \
828 "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
829 "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
830 "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
831 \
832 "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
833 "por %%mm1, %%mm6 \n\t"\
834 "por %%mm3, %%mm6 \n\t"\
835 MOVNTQ(%%mm6, (dst))\
836 \
837 "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
838 "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
839 "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
840 "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
841 \
842 "pand "MANGLE(ff_M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
843 "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
844 "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
845 \
846 "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
847 "por %%mm3, %%mm6 \n\t"\
848 MOVNTQ(%%mm6, 8(dst))\
849 \
850 "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
851 "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
852 "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
853 \
854 "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
855 "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
856 "pand "MANGLE(ff_M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
857 \
858 "por %%mm1, %%mm3 \n\t"\
859 "por %%mm3, %%mm6 \n\t"\
860 MOVNTQ(%%mm6, 16(dst))\
861 \
862 "add $24, "#dst" \n\t"\
863 \
864 "add $8, "#index" \n\t"\
865 "cmp "#dstw", "#index" \n\t"\
866 " jb 1b \n\t"
867
868 #ifdef HAVE_MMX2
869 #undef WRITEBGR24
870 #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index)
871 #else
872 #undef WRITEBGR24
873 #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
874 #endif
875
876 #define REAL_WRITEYUY2(dst, dstw, index) \
877 "packuswb %%mm3, %%mm3 \n\t"\
878 "packuswb %%mm4, %%mm4 \n\t"\
879 "packuswb %%mm7, %%mm1 \n\t"\
880 "punpcklbw %%mm4, %%mm3 \n\t"\
881 "movq %%mm1, %%mm7 \n\t"\
882 "punpcklbw %%mm3, %%mm1 \n\t"\
883 "punpckhbw %%mm3, %%mm7 \n\t"\
884 \
885 MOVNTQ(%%mm1, (dst, index, 2))\
886 MOVNTQ(%%mm7, 8(dst, index, 2))\
887 \
888 "add $8, "#index" \n\t"\
889 "cmp "#dstw", "#index" \n\t"\
890 " jb 1b \n\t"
891 #define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
892
893
894 static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
895 int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
896 uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
897 {
898 #ifdef HAVE_MMX
899 if(!(c->flags & SWS_BITEXACT)){
900 if (c->flags & SWS_ACCURATE_RND){
901 if (uDest){
902 YSCALEYUV2YV12X_ACCURATE( "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
903 YSCALEYUV2YV12X_ACCURATE(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
904 }
905
906 YSCALEYUV2YV12X_ACCURATE("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
907 }else{
908 if (uDest){
909 YSCALEYUV2YV12X( "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
910 YSCALEYUV2YV12X(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
911 }
912
913 YSCALEYUV2YV12X("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
914 }
915 return;
916 }
917 #endif
918 #ifdef HAVE_ALTIVEC
919 yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize,
920 chrFilter, chrSrc, chrFilterSize,
921 dest, uDest, vDest, dstW, chrDstW);
922 #else //HAVE_ALTIVEC
923 yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
924 chrFilter, chrSrc, chrFilterSize,
925 dest, uDest, vDest, dstW, chrDstW);
926 #endif //!HAVE_ALTIVEC
927 }
928
929 static inline void RENAME(yuv2nv12X)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
930 int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
931 uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat)
932 {
933 yuv2nv12XinC(lumFilter, lumSrc, lumFilterSize,
934 chrFilter, chrSrc, chrFilterSize,
935 dest, uDest, dstW, chrDstW, dstFormat);
936 }
937
938 static inline void RENAME(yuv2yuv1)(SwsContext *c, int16_t *lumSrc, int16_t *chrSrc,
939 uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
940 {
941 int i;
942 #ifdef HAVE_MMX
943 if(!(c->flags & SWS_BITEXACT)){
944 long p= uDest ? 3 : 1;
945 uint8_t *src[3]= {lumSrc + dstW, chrSrc + chrDstW, chrSrc + VOFW + chrDstW};
946 uint8_t *dst[3]= {dest, uDest, vDest};
947 long counter[3] = {dstW, chrDstW, chrDstW};
948
949 if (c->flags & SWS_ACCURATE_RND){
950 while(p--){
951 __asm__ volatile(
952 YSCALEYUV2YV121_ACCURATE
953 :: "r" (src[p]), "r" (dst[p] + counter[p]),
954 "g" (-counter[p])
955 : "%"REG_a
956 );
957 }
958 }else{
959 while(p--){
960 __asm__ volatile(
961 YSCALEYUV2YV121
962 :: "r" (src[p]), "r" (dst[p] + counter[p]),
963 "g" (-counter[p])
964 : "%"REG_a
965 );
966 }
967 }
968 return;
969 }
970 #endif
971 for (i=0; i<dstW; i++)
972 {
973 int val= (lumSrc[i]+64)>>7;
974
975 if (val&256){
976 if (val<0) val=0;
977 else val=255;
978 }
979
980 dest[i]= val;
981 }
982
983 if (uDest)
984 for (i=0; i<chrDstW; i++)
985 {
986 int u=(chrSrc[i ]+64)>>7;
987 int v=(chrSrc[i + VOFW]+64)>>7;
988
989 if ((u|v)&256){
990 if (u<0) u=0;
991 else if (u>255) u=255;
992 if (v<0) v=0;
993 else if (v>255) v=255;
994 }
995
996 uDest[i]= u;
997 vDest[i]= v;
998 }
999 }
1000
1001
1002 /**
1003 * vertical scale YV12 to RGB
1004 */
1005 static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
1006 int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
1007 uint8_t *dest, long dstW, long dstY)
1008 {
1009 #ifdef HAVE_MMX
1010 long dummy=0;
1011 if(!(c->flags & SWS_BITEXACT)){
1012 if (c->flags & SWS_ACCURATE_RND){
1013 switch(c->dstFormat){
1014 case PIX_FMT_RGB32:
1015 YSCALEYUV2PACKEDX_ACCURATE
1016 YSCALEYUV2RGBX
1017 WRITEBGR32(%4, %5, %%REGa)
1018
1019 YSCALEYUV2PACKEDX_END
1020 return;
1021 case PIX_FMT_BGR24:
1022 YSCALEYUV2PACKEDX_ACCURATE
1023 YSCALEYUV2RGBX
1024 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
1025 "add %4, %%"REG_c" \n\t"
1026 WRITEBGR24(%%REGc, %5, %%REGa)
1027
1028
1029 :: "r" (&c->redDither),
1030 "m" (dummy), "m" (dummy), "m" (dummy),
1031 "r" (dest), "m" (dstW)
1032 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
1033 );
1034 return;
1035 case PIX_FMT_RGB555:
1036 YSCALEYUV2PACKEDX_ACCURATE
1037 YSCALEYUV2RGBX
1038 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1039 #ifdef DITHER1XBPP
1040 "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
1041 "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
1042 "paddusb "RED_DITHER"(%0), %%mm5\n\t"
1043 #endif
1044
1045 WRITERGB15(%4, %5, %%REGa)
1046 YSCALEYUV2PACKEDX_END
1047 return;
1048 case PIX_FMT_RGB565:
1049 YSCALEYUV2PACKEDX_ACCURATE
1050 YSCALEYUV2RGBX
1051 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1052 #ifdef DITHER1XBPP
1053 "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
1054 "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
1055 "paddusb "RED_DITHER"(%0), %%mm5\n\t"
1056 #endif
1057
1058 WRITERGB16(%4, %5, %%REGa)
1059 YSCALEYUV2PACKEDX_END
1060 return;
1061 case PIX_FMT_YUYV422:
1062 YSCALEYUV2PACKEDX_ACCURATE
1063 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1064
1065 "psraw $3, %%mm3 \n\t"
1066 "psraw $3, %%mm4 \n\t"
1067 "psraw $3, %%mm1 \n\t"
1068 "psraw $3, %%mm7 \n\t"
1069 WRITEYUY2(%4, %5, %%REGa)
1070 YSCALEYUV2PACKEDX_END
1071 return;
1072 }
1073 }else{
1074 switch(c->dstFormat)
1075 {
1076 case PIX_FMT_RGB32:
1077 YSCALEYUV2PACKEDX
1078 YSCALEYUV2RGBX
1079 WRITEBGR32(%4, %5, %%REGa)
1080 YSCALEYUV2PACKEDX_END
1081 return;
1082 case PIX_FMT_BGR24:
1083 YSCALEYUV2PACKEDX
1084 YSCALEYUV2RGBX
1085 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c" \n\t" //FIXME optimize
1086 "add %4, %%"REG_c" \n\t"
1087 WRITEBGR24(%%REGc, %5, %%REGa)
1088
1089 :: "r" (&c->redDither),
1090 "m" (dummy), "m" (dummy), "m" (dummy),
1091 "r" (dest), "m" (dstW)
1092 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
1093 );
1094 return;
1095 case PIX_FMT_RGB555:
1096 YSCALEYUV2PACKEDX
1097 YSCALEYUV2RGBX
1098 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1099 #ifdef DITHER1XBPP
1100 "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
1101 "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
1102 "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
1103 #endif
1104
1105 WRITERGB15(%4, %5, %%REGa)
1106 YSCALEYUV2PACKEDX_END
1107 return;
1108 case PIX_FMT_RGB565:
1109 YSCALEYUV2PACKEDX
1110 YSCALEYUV2RGBX
1111 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1112 #ifdef DITHER1XBPP
1113 "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
1114 "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
1115 "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
1116 #endif
1117
1118 WRITERGB16(%4, %5, %%REGa)
1119 YSCALEYUV2PACKEDX_END
1120 return;
1121 case PIX_FMT_YUYV422:
1122 YSCALEYUV2PACKEDX
1123 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1124
1125 "psraw $3, %%mm3 \n\t"
1126 "psraw $3, %%mm4 \n\t"
1127 "psraw $3, %%mm1 \n\t"
1128 "psraw $3, %%mm7 \n\t"
1129 WRITEYUY2(%4, %5, %%REGa)
1130 YSCALEYUV2PACKEDX_END
1131 return;
1132 }
1133 }
1134 }
1135 #endif /* HAVE_MMX */
1136 #ifdef HAVE_ALTIVEC
1137 /* The following list of supported dstFormat values should
1138 match what's found in the body of altivec_yuv2packedX() */
1139 if (c->dstFormat==PIX_FMT_ABGR || c->dstFormat==PIX_FMT_BGRA ||
1140 c->dstFormat==PIX_FMT_BGR24 || c->dstFormat==PIX_FMT_RGB24 ||
1141 c->dstFormat==PIX_FMT_RGBA || c->dstFormat==PIX_FMT_ARGB)
1142 altivec_yuv2packedX (c, lumFilter, lumSrc, lumFilterSize,
1143 chrFilter, chrSrc, chrFilterSize,
1144 dest, dstW, dstY);
1145 else
1146 #endif
1147 yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
1148 chrFilter, chrSrc, chrFilterSize,
1149 dest, dstW, dstY);
1150 }
1151
1152 /**
1153 * vertical bilinear scale YV12 to RGB
1154 */
1155 static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
1156 uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
1157 {
1158 int yalpha1=4095- yalpha;
1159 int uvalpha1=4095-uvalpha;
1160 int i;
1161
1162 #ifdef HAVE_MMX
1163 if(!(c->flags & SWS_BITEXACT)){
1164 switch(c->dstFormat)
1165 {
1166 //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
1167 case PIX_FMT_RGB32:
1168 __asm__ volatile(
1169 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1170 "mov %4, %%"REG_b" \n\t"
1171 "push %%"REG_BP" \n\t"
1172 YSCALEYUV2RGB(%%REGBP, %5)
1173 WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
1174 "pop %%"REG_BP" \n\t"
1175 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1176
1177 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1178 "a" (&c->redDither)
1179 );
1180 return;
1181 case PIX_FMT_BGR24:
1182 __asm__ volatile(
1183 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1184 "mov %4, %%"REG_b" \n\t"
1185 "push %%"REG_BP" \n\t"
1186 YSCALEYUV2RGB(%%REGBP, %5)
1187 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1188 "pop %%"REG_BP" \n\t"
1189 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1190 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1191 "a" (&c->redDither)
1192 );
1193 return;
1194 case PIX_FMT_RGB555:
1195 __asm__ volatile(
1196 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1197 "mov %4, %%"REG_b" \n\t"
1198 "push %%"REG_BP" \n\t"
1199 YSCALEYUV2RGB(%%REGBP, %5)
1200 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1201 #ifdef DITHER1XBPP
1202 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1203 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1204 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1205 #endif
1206
1207 WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1208 "pop %%"REG_BP" \n\t"
1209 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1210
1211 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1212 "a" (&c->redDither)
1213 );
1214 return;
1215 case PIX_FMT_RGB565:
1216 __asm__ volatile(
1217 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1218 "mov %4, %%"REG_b" \n\t"
1219 "push %%"REG_BP" \n\t"
1220 YSCALEYUV2RGB(%%REGBP, %5)
1221 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1222 #ifdef DITHER1XBPP
1223 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1224 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1225 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1226 #endif
1227
1228 WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1229 "pop %%"REG_BP" \n\t"
1230 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1231 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1232 "a" (&c->redDither)
1233 );
1234 return;
1235 case PIX_FMT_YUYV422:
1236 __asm__ volatile(
1237 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1238 "mov %4, %%"REG_b" \n\t"
1239 "push %%"REG_BP" \n\t"
1240 YSCALEYUV2PACKED(%%REGBP, %5)
1241 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1242 "pop %%"REG_BP" \n\t"
1243 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1244 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1245 "a" (&c->redDither)
1246 );
1247 return;
1248 default: break;
1249 }
1250 }
1251 #endif //HAVE_MMX
1252 YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C, YSCALE_YUV_2_GRAY16_2_C, YSCALE_YUV_2_MONO2_C)
1253 }
1254
1255 /**
1256 * YV12 to RGB without scaling or interpolating
1257 */
1258 static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *uvbuf0, uint16_t *uvbuf1,
1259 uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y)
1260 {
1261 const int yalpha1=0;
1262 int i;
1263
1264 uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1265 const int yalpha= 4096; //FIXME ...
1266
1267 if (flags&SWS_FULL_CHR_H_INT)
1268 {
1269 RENAME(yuv2packed2)(c, buf0, buf0, uvbuf0, uvbuf1, dest, dstW, 0, uvalpha, y);
1270 return;
1271 }
1272
1273 #ifdef HAVE_MMX
1274 if(!(flags & SWS_BITEXACT)){
1275 if (uvalpha < 2048) // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1276 {
1277 switch(dstFormat)
1278 {
1279 case PIX_FMT_RGB32:
1280 __asm__ volatile(
1281 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1282 "mov %4, %%"REG_b" \n\t"
1283 "push %%"REG_BP" \n\t"
1284 YSCALEYUV2RGB1(%%REGBP, %5)
1285 WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
1286 "pop %%"REG_BP" \n\t"
1287 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1288
1289 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1290 "a" (&c->redDither)
1291 );
1292 return;
1293 case PIX_FMT_BGR24:
1294 __asm__ volatile(
1295 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1296 "mov %4, %%"REG_b" \n\t"
1297 "push %%"REG_BP" \n\t"
1298 YSCALEYUV2RGB1(%%REGBP, %5)
1299 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1300 "pop %%"REG_BP" \n\t"
1301 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1302
1303 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1304 "a" (&c->redDither)
1305 );
1306 return;
1307 case PIX_FMT_RGB555:
1308 __asm__ volatile(
1309 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1310 "mov %4, %%"REG_b" \n\t"
1311 "push %%"REG_BP" \n\t"
1312 YSCALEYUV2RGB1(%%REGBP, %5)
1313 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1314 #ifdef DITHER1XBPP
1315 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1316 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1317 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1318 #endif
1319 WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1320 "pop %%"REG_BP" \n\t"
1321 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1322
1323 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1324 "a" (&c->redDither)
1325 );
1326 return;
1327 case PIX_FMT_RGB565:
1328 __asm__ volatile(
1329 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1330 "mov %4, %%"REG_b" \n\t"
1331 "push %%"REG_BP" \n\t"
1332 YSCALEYUV2RGB1(%%REGBP, %5)
1333 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1334 #ifdef DITHER1XBPP
1335 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1336 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1337 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1338 #endif
1339
1340 WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1341 "pop %%"REG_BP" \n\t"
1342 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1343
1344 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1345 "a" (&c->redDither)
1346 );
1347 return;
1348 case PIX_FMT_YUYV422:
1349 __asm__ volatile(
1350 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1351 "mov %4, %%"REG_b" \n\t"
1352 "push %%"REG_BP" \n\t"
1353 YSCALEYUV2PACKED1(%%REGBP, %5)
1354 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1355 "pop %%"REG_BP" \n\t"
1356 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1357
1358 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1359 "a" (&c->redDither)
1360 );
1361 return;
1362 }
1363 }
1364 else
1365 {
1366 switch(dstFormat)
1367 {
1368 case PIX_FMT_RGB32:
1369 __asm__ volatile(
1370 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1371 "mov %4, %%"REG_b" \n\t"
1372 "push %%"REG_BP" \n\t"
1373 YSCALEYUV2RGB1b(%%REGBP, %5)
1374 WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
1375 "pop %%"REG_BP" \n\t"
1376 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1377
1378 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1379 "a" (&c->redDither)
1380 );
1381 return;
1382 case PIX_FMT_BGR24:
1383 __asm__ volatile(
1384 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1385 "mov %4, %%"REG_b" \n\t"
1386 "push %%"REG_BP" \n\t"
1387 YSCALEYUV2RGB1b(%%REGBP, %5)
1388 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1389 "pop %%"REG_BP" \n\t"
1390 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1391
1392 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1393 "a" (&c->redDither)
1394 );
1395 return;
1396 case PIX_FMT_RGB555:
1397 __asm__ volatile(
1398 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1399 "mov %4, %%"REG_b" \n\t"
1400 "push %%"REG_BP" \n\t"
1401 YSCALEYUV2RGB1b(%%REGBP, %5)
1402 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1403 #ifdef DITHER1XBPP
1404 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1405 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1406 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1407 #endif
1408 WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1409 "pop %%"REG_BP" \n\t"
1410 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1411
1412 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1413 "a" (&c->redDither)
1414 );
1415 return;
1416 case PIX_FMT_RGB565:
1417 __asm__ volatile(
1418 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1419 "mov %4, %%"REG_b" \n\t"
1420 "push %%"REG_BP" \n\t"
1421 YSCALEYUV2RGB1b(%%REGBP, %5)
1422 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1423 #ifdef DITHER1XBPP
1424 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1425 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1426 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1427 #endif
1428
1429 WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1430 "pop %%"REG_BP" \n\t"
1431 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1432
1433 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1434 "a" (&c->redDither)
1435 );
1436 return;
1437 case PIX_FMT_YUYV422:
1438 __asm__ volatile(
1439 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1440 "mov %4, %%"REG_b" \n\t"
1441 "push %%"REG_BP" \n\t"
1442 YSCALEYUV2PACKED1b(%%REGBP, %5)
1443 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1444 "pop %%"REG_BP" \n\t"
1445 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1446
1447 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1448 "a" (&c->redDither)
1449 );
1450 return;
1451 }
1452 }
1453 }
1454 #endif /* HAVE_MMX */
1455 if (uvalpha < 2048)
1456 {
1457 YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C, YSCALE_YUV_2_GRAY16_1_C, YSCALE_YUV_2_MONO2_C)
1458 }else{
1459 YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C, YSCALE_YUV_2_GRAY16_1_C, YSCALE_YUV_2_MONO2_C)
1460 }
1461 }
1462
1463 //FIXME yuy2* can read up to 7 samples too much
1464
1465 static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
1466 {
1467 #ifdef HAVE_MMX
1468 __asm__ volatile(
1469 "movq "MANGLE(bm01010101)", %%mm2 \n\t"
1470 "mov %0, %%"REG_a" \n\t"
1471 "1: \n\t"
1472 "movq (%1, %%"REG_a",2), %%mm0 \n\t"
1473 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
1474 "pand %%mm2, %%mm0 \n\t"
1475 "pand %%mm2, %%mm1 \n\t"
1476 "packuswb %%mm1, %%mm0 \n\t"
1477 "movq %%mm0, (%2, %%"REG_a") \n\t"
1478 "add $8, %%"REG_a" \n\t"
1479 " js 1b \n\t"
1480 : : "g" (-width), "r" (src+width*2), "r" (dst+width)
1481 : "%"REG_a
1482 );
1483 #else
1484 int i;
1485 for (i=0; i<width; i++)
1486 dst[i]= src[2*i];
1487 #endif
1488 }
1489
1490 static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
1491 {
1492 #ifdef HAVE_MMX
1493 __asm__ volatile(
1494 "movq "MANGLE(bm01010101)", %%mm4 \n\t"
1495 "mov %0, %%"REG_a" \n\t"
1496 "1: \n\t"
1497 "movq (%1, %%"REG_a",4), %%mm0 \n\t"
1498 "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
1499 "psrlw $8, %%mm0 \n\t"
1500 "psrlw $8, %%mm1 \n\t"
1501 "packuswb %%mm1, %%mm0 \n\t"
1502 "movq %%mm0, %%mm1 \n\t"
1503 "psrlw $8, %%mm0 \n\t"
1504 "pand %%mm4, %%mm1 \n\t"
1505 "packuswb %%mm0, %%mm0 \n\t"
1506 "packuswb %%mm1, %%mm1 \n\t"
1507 "movd %%mm0, (%3, %%"REG_a") \n\t"
1508 "movd %%mm1, (%2, %%"REG_a") \n\t"
1509 "add $4, %%"REG_a" \n\t"
1510 " js 1b \n\t"
1511 : : "g" (-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
1512 : "%"REG_a
1513 );
1514 #else
1515 int i;
1516 for (i=0; i<width; i++)
1517 {
1518 dstU[i]= src1[4*i + 1];
1519 dstV[i]= src1[4*i + 3];
1520 }
1521 #endif
1522 assert(src1 == src2);
1523 }
1524
1525 /* This is almost identical to the previous, end exists only because
1526 * yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses. */
1527 static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
1528 {
1529 #ifdef HAVE_MMX
1530 __asm__ volatile(
1531 "mov %0, %%"REG_a" \n\t"
1532 "1: \n\t"
1533 "movq (%1, %%"REG_a",2), %%mm0 \n\t"
1534 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
1535 "psrlw $8, %%mm0 \n\t"
1536 "psrlw $8, %%mm1 \n\t"
1537 "packuswb %%mm1, %%mm0 \n\t"
1538 "movq %%mm0, (%2, %%"REG_a") \n\t"
1539 "add $8, %%"REG_a" \n\t"
1540 " js 1b \n\t"
1541 : : "g" (-width), "r" (src+width*2), "r" (dst+width)
1542 : "%"REG_a
1543 );
1544 #else
1545 int i;
1546 for (i=0; i<width; i++)
1547 dst[i]= src[2*i+1];
1548 #endif
1549 }
1550
1551 static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
1552 {
1553 #ifdef HAVE_MMX
1554 __asm__ volatile(
1555 "movq "MANGLE(bm01010101)", %%mm4 \n\t"
1556 "mov %0, %%"REG_a" \n\t"
1557 "1: \n\t"
1558 "movq (%1, %%"REG_a",4), %%mm0 \n\t"
1559 "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
1560 "pand %%mm4, %%mm0 \n\t"
1561 "pand %%mm4, %%mm1 \n\t"
1562 "packuswb %%mm1, %%mm0 \n\t"
1563 "movq %%mm0, %%mm1 \n\t"
1564 "psrlw $8, %%mm0 \n\t"
1565 "pand %%mm4, %%mm1 \n\t"
1566 "packuswb %%mm0, %%mm0 \n\t"
1567 "packuswb %%mm1, %%mm1 \n\t"
1568 "movd %%mm0, (%3, %%"REG_a") \n\t"
1569 "movd %%mm1, (%2, %%"REG_a") \n\t"
1570 "add $4, %%"REG_a" \n\t"
1571 " js 1b \n\t"
1572 : : "g" (-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
1573 : "%"REG_a
1574 );
1575 #else
1576 int i;
1577 for (i=0; i<width; i++)
1578 {
1579 dstU[i]= src1[4*i + 0];
1580 dstV[i]= src1[4*i + 2];
1581 }
1582 #endif
1583 assert(src1 == src2);
1584 }
1585
1586 #define BGR2Y(type, name, shr, shg, shb, maskr, maskg, maskb, RY, GY, BY, S)\
1587 static inline void RENAME(name)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)\
1588 {\
1589 int i;\
1590 for (i=0; i<width; i++)\
1591 {\
1592 int b= (((type*)src)[i]>>shb)&maskb;\
1593 int g= (((type*)src)[i]>>shg)&maskg;\
1594 int r= (((type*)src)[i]>>shr)&maskr;\
1595 \
1596 dst[i]= (((RY)*r + (GY)*g + (BY)*b + (33<<((S)-1)))>>(S));\
1597 }\
1598 }
1599
1600 BGR2Y(uint32_t, bgr32ToY,16, 0, 0, 0x00FF, 0xFF00, 0x00FF, RY<< 8, GY , BY<< 8, RGB2YUV_SHIFT+8)
1601 BGR2Y(uint32_t, rgb32ToY, 0, 0,16, 0x00FF, 0xFF00, 0x00FF, RY<< 8, GY , BY<< 8, RGB2YUV_SHIFT+8)
1602 BGR2Y(uint16_t, bgr16ToY, 0, 0, 0, 0x001F, 0x07E0, 0xF800, RY<<11, GY<<5, BY , RGB2YUV_SHIFT+8)
1603 BGR2Y(uint16_t, bgr15ToY, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, RY<<10, GY<<5, BY , RGB2YUV_SHIFT+7)
1604 BGR2Y(uint16_t, rgb16ToY, 0, 0, 0, 0xF800, 0x07E0, 0x001F, RY , GY<<5, BY<<11, RGB2YUV_SHIFT+8)
1605 BGR2Y(uint16_t, rgb15ToY, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, RY , GY<<5, BY<<10, RGB2YUV_SHIFT+7)
1606
1607 #define BGR2UV(type, name, shr, shg, shb, maskr, maskg, maskb, RU, GU, BU, RV, GV, BV, S)\
1608 static inline void RENAME(name)(uint8_t *dstU, uint8_t *dstV, uint8_t *src, uint8_t *dummy, long width, uint32_t *unused)\
1609 {\
1610 int i;\
1611 for (i=0; i<width; i++)\
1612 {\
1613 int b= (((type*)src)[i]&maskb)>>shb;\
1614 int g= (((type*)src)[i]&maskg)>>shg;\
1615 int r= (((type*)src)[i]&maskr)>>shr;\
1616 \
1617 dstU[i]= ((RU)*r + (GU)*g + (BU)*b + (257<<((S)-1)))>>(S);\
1618 dstV[i]= ((RV)*r + (GV)*g + (BV)*b + (257<<((S)-1)))>>(S);\
1619 }\
1620 }\
1621 static inline void RENAME(name ## _half)(uint8_t *dstU, uint8_t *dstV, uint8_t *src, uint8_t *dummy, long width, uint32_t *unused)\
1622 {\
1623 int i;\
1624 for (i=0; i<width; i++)\
1625 {\
1626 int pix0= ((type*)src)[2*i+0];\
1627 int pix1= ((type*)src)[2*i+1];\
1628 int g= (pix0&maskg)+(pix1&maskg);\
1629 int b= ((pix0+pix1-g)&(maskb|(2*maskb)))>>shb;\
1630 int r= ((pix0+pix1-g)&(maskr|(2*maskr)))>>shr;\
1631 \
1632 g>>=shg;\
1633 \
1634 dstU[i]= ((RU)*r + (GU)*g + (BU)*b + (257<<(S)))>>((S)+1);\
1635 dstV[i]= ((RV)*r + (GV)*g + (BV)*b + (257<<(S)))>>((S)+1);\
1636 }\
1637 }
1638
1639 BGR2UV(uint32_t, bgr32ToUV,16, 0, 0, 0xFF0000, 0xFF00, 0x00FF, RU<< 8, GU , BU<< 8, RV<< 8, GV , BV<< 8, RGB2YUV_SHIFT+8)
1640 BGR2UV(uint32_t, rgb32ToUV, 0, 0,16, 0x00FF, 0xFF00, 0xFF0000, RU<< 8, GU , BU<< 8, RV<< 8, GV , BV<< 8, RGB2YUV_SHIFT+8)
1641 BGR2UV(uint16_t, bgr16ToUV, 0, 0, 0, 0x001F, 0x07E0, 0xF800, RU<<11, GU<<5, BU , RV<<11, GV<<5, BV , RGB2YUV_SHIFT+8)
1642 BGR2UV(uint16_t, bgr15ToUV, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, RU<<10, GU<<5, BU , RV<<10, GV<<5, BV , RGB2YUV_SHIFT+7)
1643 BGR2UV(uint16_t, rgb16ToUV, 0, 0, 0, 0xF800, 0x07E0, 0x001F, RU , GU<<5, BU<<11, RV , GV<<5, BV<<11, RGB2YUV_SHIFT+8)
1644 BGR2UV(uint16_t, rgb15ToUV, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, RU , GU<<5, BU<<10, RV , GV<<5, BV<<10, RGB2YUV_SHIFT+7)
1645
1646 #ifdef HAVE_MMX
1647 static inline void RENAME(bgr24ToY_mmx)(uint8_t *dst, uint8_t *src, long width, int srcFormat)
1648 {
1649
1650 if(srcFormat == PIX_FMT_BGR24){
1651 __asm__ volatile(
1652 "movq "MANGLE(ff_bgr24toY1Coeff)", %%mm5 \n\t"
1653 "movq "MANGLE(ff_bgr24toY2Coeff)", %%mm6 \n\t"
1654 :
1655 );
1656 }else{
1657 __asm__ volatile(
1658 "movq "MANGLE(ff_rgb24toY1Coeff)", %%mm5 \n\t"
1659 "movq "MANGLE(ff_rgb24toY2Coeff)", %%mm6 \n\t"
1660 :
1661 );
1662 }
1663
1664 __asm__ volatile(
1665 "movq "MANGLE(ff_bgr24toYOffset)", %%mm4 \n\t"
1666 "mov %2, %%"REG_a" \n\t"
1667 "pxor %%mm7, %%mm7 \n\t"
1668 "1: \n\t"
1669 PREFETCH" 64(%0) \n\t"
1670 "movd (%0), %%mm0 \n\t"
1671 "movd 2(%0), %%mm1 \n\t"
1672 "movd 6(%0), %%mm2 \n\t"
1673 "movd 8(%0), %%mm3 \n\t"
1674 "add $12, %0 \n\t"
1675 "punpcklbw %%mm7, %%mm0 \n\t"
1676 "punpcklbw %%mm7, %%mm1 \n\t"
1677 "punpcklbw %%mm7, %%mm2 \n\t"
1678 "punpcklbw %%mm7, %%mm3 \n\t"
1679 "pmaddwd %%mm5, %%mm0 \n\t"
1680 "pmaddwd %%mm6, %%mm1 \n\t"
1681 "pmaddwd %%mm5, %%mm2 \n\t"
1682 "pmaddwd %%mm6, %%mm3 \n\t"
1683 "paddd %%mm1, %%mm0 \n\t"
1684 "paddd %%mm3, %%mm2 \n\t"
1685 "paddd %%mm4, %%mm0 \n\t"
1686 "paddd %%mm4, %%mm2 \n\t"
1687 "psrad $15, %%mm0 \n\t"
1688 "psrad $15, %%mm2 \n\t"
1689 "packssdw %%mm2, %%mm0 \n\t"
1690 "packuswb %%mm0, %%mm0 \n\t"
1691 "movd %%mm0, (%1, %%"REG_a") \n\t"
1692 "add $4, %%"REG_a" \n\t"
1693 " js 1b \n\t"
1694 : "+r" (src)
1695 : "r" (dst+width), "g" (-width)
1696 : "%"REG_a
1697 );
1698 }
1699
1700 static inline void RENAME(bgr24ToUV_mmx)(uint8_t *dstU, uint8_t *dstV, uint8_t *src, long width, int srcFormat)
1701 {
1702 __asm__ volatile(
1703 "movq 24+%4, %%mm6 \n\t"
1704 "mov %3, %%"REG_a" \n\t"
1705 "pxor %%mm7, %%mm7 \n\t"
1706 "1: \n\t"
1707 PREFETCH" 64(%0) \n\t"
1708 "movd (%0), %%mm0 \n\t"
1709 "movd 2(%0), %%mm1 \n\t"
1710 "punpcklbw %%mm7, %%mm0 \n\t"
1711 "punpcklbw %%mm7, %%mm1 \n\t"
1712 "movq %%mm0, %%mm2 \n\t"
1713 "movq %%mm1, %%mm3 \n\t"
1714 "pmaddwd %4, %%mm0 \n\t"
1715 "pmaddwd 8+%4, %%mm1 \n\t"
1716 "pmaddwd 16+%4, %%mm2 \n\t"
1717 "pmaddwd %%mm6, %%mm3 \n\t"
1718 "paddd %%mm1, %%mm0 \n\t"
1719 "paddd %%mm3, %%mm2 \n\t"
1720
1721 "movd 6(%0), %%mm1 \n\t"
1722 "movd 8(%0), %%mm3 \n\t"
1723 "add $12, %0 \n\t"
1724 "punpcklbw %%mm7, %%mm1 \n\t"
1725 "punpcklbw %%mm7, %%mm3 \n\t"
1726 "movq %%mm1, %%mm4 \n\t"
1727 "movq %%mm3, %%mm5 \n\t"
1728 "pmaddwd %4, %%mm1 \n\t"
1729 "pmaddwd 8+%4, %%mm3 \n\t"
1730 "pmaddwd 16+%4, %%mm4 \n\t"
1731 "pmaddwd %%mm6, %%mm5 \n\t"
1732 "paddd %%mm3, %%mm1 \n\t"
1733 "paddd %%mm5, %%mm4 \n\t"
1734
1735 "movq "MANGLE(ff_bgr24toUVOffset)", %%mm3 \n\t"
1736 "paddd %%mm3, %%mm0 \n\t"
1737 "paddd %%mm3, %%mm2 \n\t"
1738 "paddd %%mm3, %%mm1 \n\t"
1739 "paddd %%mm3, %%mm4 \n\t"
1740 "psrad $15, %%mm0 \n\t"
1741 "psrad $15, %%mm2 \n\t"
1742 "psrad $15, %%mm1 \n\t"
1743 "psrad $15, %%mm4 \n\t"
1744 "packssdw %%mm1, %%mm0 \n\t"
1745 "packssdw %%mm4, %%mm2 \n\t"
1746 "packuswb %%mm0, %%mm0 \n\t"
1747 "packuswb %%mm2, %%mm2 \n\t"
1748 "movd %%mm0, (%1, %%"REG_a") \n\t"
1749 "movd %%mm2, (%2, %%"REG_a") \n\t"
1750 "add $4, %%"REG_a" \n\t"
1751 " js 1b \n\t"
1752 : "+r" (src)
1753 : "r" (dstU+width), "r" (dstV+width), "g" (-width), "m"(ff_bgr24toUV[srcFormat == PIX_FMT_RGB24][0])
1754 : "%"REG_a
1755 );
1756 }
1757 #endif
1758
1759 static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
1760 {
1761 #ifdef HAVE_MMX
1762 RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_BGR24);
1763 #else
1764 int i;
1765 for (i=0; i<width; i++)
1766 {
1767 int b= src[i*3+0];
1768 int g= src[i*3+1];
1769 int r= src[i*3+2];
1770
1771 dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
1772 }
1773 #endif /* HAVE_MMX */
1774 }
1775
1776 static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
1777 {
1778 #ifdef HAVE_MMX
1779 RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_BGR24);
1780 #else
1781 int i;
1782 for (i=0; i<width; i++)
1783 {
1784 int b= src1[3*i + 0];
1785 int g= src1[3*i + 1];
1786 int r= src1[3*i + 2];
1787
1788 dstU[i]= (RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
1789 dstV[i]= (RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
1790 }
1791 #endif /* HAVE_MMX */
1792 assert(src1 == src2);
1793 }
1794
1795 static inline void RENAME(bgr24ToUV_half)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
1796 {
1797 int i;
1798 for (i=0; i<width; i++)
1799 {
1800 int b= src1[6*i + 0] + src1[6*i + 3];
1801 int g= src1[6*i + 1] + src1[6*i + 4];
1802 int r= src1[6*i + 2] + src1[6*i + 5];
1803
1804 dstU[i]= (RU*r + GU*g + BU*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
1805 dstV[i]= (RV*r + GV*g + BV*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
1806 }
1807 assert(src1 == src2);
1808 }
1809
1810 static inline void RENAME(rgb24ToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
1811 {
1812 #ifdef HAVE_MMX
1813 RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_RGB24);
1814 #else
1815 int i;
1816 for (i=0; i<width; i++)
1817 {
1818 int r= src[i*3+0];
1819 int g= src[i*3+1];
1820 int b= src[i*3+2];
1821
1822 dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
1823 }
1824 #endif
1825 }
1826
1827 static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
1828 {
1829 #ifdef HAVE_MMX
1830 assert(src1==src2);
1831 RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_RGB24);
1832 #else
1833 int i;
1834 assert(src1==src2);
1835 for (i=0; i<width; i++)
1836 {
1837 int r= src1[3*i + 0];
1838 int g= src1[3*i + 1];
1839 int b= src1[3*i + 2];
1840
1841 dstU[i]= (RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
1842 dstV[i]= (RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
1843 }
1844 #endif
1845 }
1846
1847 static inline void RENAME(rgb24ToUV_half)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
1848 {
1849 int i;
1850 assert(src1==src2);
1851 for (i=0; i<width; i++)
1852 {
1853 int r= src1[6*i + 0] + src1[6*i + 3];
1854 int g= src1[6*i + 1] + src1[6*i + 4];
1855 int b= src1[6*i + 2] + src1[6*i + 5];
1856
1857 dstU[i]= (RU*r + GU*g + BU*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
1858 dstV[i]= (RV*r + GV*g + BV*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
1859 }
1860 }
1861
1862
1863 static inline void RENAME(palToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *pal)
1864 {
1865 int i;
1866 for (i=0; i<width; i++)
1867 {
1868 int d= src[i];
1869
1870 dst[i]= pal[d] & 0xFF;
1871 }
1872 }
1873
1874 static inline void RENAME(palToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *pal)
1875 {
1876 int i;
1877 assert(src1 == src2);
1878 for (i=0; i<width; i++)
1879 {
1880 int p= pal[src1[i]];
1881
1882 dstU[i]= p>>8;
1883 dstV[i]= p>>16;
1884 }
1885 }
1886
1887 static inline void RENAME(monowhite2Y)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
1888 {
1889 int i, j;
1890 for (i=0; i<width/8; i++){
1891 int d= ~src[i];
1892 for(j=0; j<8; j++)
1893 dst[8*i+j]= ((d>>(7-j))&1)*255;
1894 }
1895 }
1896
1897 static inline void RENAME(monoblack2Y)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
1898 {
1899 int i, j;
1900 for (i=0; i<width/8; i++){
1901 int d= src[i];
1902 for(j=0; j<8; j++)
1903 dst[8*i+j]= ((d>>(7-j))&1)*255;
1904 }
1905 }
1906
1907 // bilinear / bicubic scaling
1908 static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc,
1909 int16_t *filter, int16_t *filterPos, long filterSize)
1910 {
1911 #ifdef HAVE_MMX
1912 assert(filterSize % 4 == 0 && filterSize>0);
1913 if (filterSize==4) // Always true for upscaling, sometimes for down, too.
1914 {
1915 long counter= -2*dstW;
1916 filter-= counter*2;
1917 filterPos-= counter/2;
1918 dst-= counter/2;
1919 __asm__ volatile(
1920 #if defined(PIC)
1921 "push %%"REG_b" \n\t"
1922 #endif
1923 "pxor %%mm7, %%mm7 \n\t"
1924 "push %%"REG_BP" \n\t" // we use 7 regs here ...
1925 "mov %%"REG_a", %%"REG_BP" \n\t"
1926 ASMALIGN(4)
1927 "1: \n\t"
1928 "movzwl (%2, %%"REG_BP"), %%eax \n\t"
1929 "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
1930 "movq (%1, %%"REG_BP", 4), %%mm1 \n\t"
1931 "movq 8(%1, %%"REG_BP", 4), %%mm3 \n\t"
1932 "movd (%3, %%"REG_a"), %%mm0 \n\t"
1933 "movd (%3, %%"REG_b"), %%mm2 \n\t"
1934 "punpcklbw %%mm7, %%mm0 \n\t"
1935 "punpcklbw %%mm7, %%mm2 \n\t"
1936 "pmaddwd %%mm1, %%mm0 \n\t"
1937 "pmaddwd %%mm2, %%mm3 \n\t"
1938 "movq %%mm0, %%mm4 \n\t"
1939 "punpckldq %%mm3, %%mm0 \n\t"
1940 "punpckhdq %%mm3, %%mm4 \n\t"
1941 "paddd %%mm4, %%mm0 \n\t"
1942 "psrad $7, %%mm0 \n\t"
1943 "packssdw %%mm0, %%mm0 \n\t"
1944 "movd %%mm0, (%4, %%"REG_BP") \n\t"
1945 "add $4, %%"REG_BP" \n\t"
1946 " jnc 1b \n\t"
1947
1948 "pop %%"REG_BP" \n\t"
1949 #if defined(PIC)
1950 "pop %%"REG_b" \n\t"
1951 #endif
1952 : "+a" (counter)
1953 : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
1954 #if !defined(PIC)
1955 : "%"REG_b
1956 #endif
1957 );
1958 }
1959 else if (filterSize==8)
1960 {
1961 long counter= -2*dstW;
1962 filter-= counter*4;
1963 filterPos-= counter/2;
1964 dst-= counter/2;
1965 __asm__ volatile(
1966 #if defined(PIC)
1967 "push %%"REG_b" \n\t"
1968 #endif
1969 "pxor %%mm7, %%mm7 \n\t"
1970 "push %%"REG_BP" \n\t" // we use 7 regs here ...
1971 "mov %%"REG_a", %%"REG_BP" \n\t"
1972 ASMALIGN(4)
1973 "1: \n\t"
1974 "movzwl (%2, %%"REG_BP"), %%eax \n\t"
1975 "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
1976 "movq (%1, %%"REG_BP", 8), %%mm1 \n\t"
1977 "movq 16(%1, %%"REG_BP", 8), %%mm3 \n\t"
1978 "movd (%3, %%"REG_a"), %%mm0 \n\t"
1979 "movd (%3, %%"REG_b"), %%mm2 \n\t"
1980 "punpcklbw %%mm7, %%mm0 \n\t"
1981 "punpcklbw %%mm7, %%mm2 \n\t"
1982 "pmaddwd %%mm1, %%mm0 \n\t"
1983 "pmaddwd %%mm2, %%mm3 \n\t"
1984
1985 "movq 8(%1, %%"REG_BP", 8), %%mm1 \n\t"
1986 "movq 24(%1, %%"REG_BP", 8), %%mm5 \n\t"
1987 "movd 4(%3, %%"REG_a"), %%mm4 \n\t"
1988 "movd 4(%3, %%"REG_b"), %%mm2 \n\t"
1989 "punpcklbw %%mm7, %%mm4 \n\t"
1990 "punpcklbw %%mm7, %%mm2 \n\t"
1991 "pmaddwd %%mm1, %%mm4 \n\t"
1992 "pmaddwd %%mm2, %%mm5 \n\t"
1993 "paddd %%mm4, %%mm0 \n\t"
1994 "paddd %%mm5, %%mm3 \n\t"
1995 "movq %%mm0, %%mm4 \n\t"
1996 "punpckldq %%mm3, %%mm0 \n\t"
1997 "punpckhdq %%mm3, %%mm4 \n\t"
1998 "paddd %%mm4, %%mm0 \n\t"
1999 "psrad $7, %%mm0 \n\t"
2000 "packssdw %%mm0, %%mm0 \n\t"
2001 "movd %%mm0, (%4, %%"REG_BP") \n\t"
2002 "add $4, %%"REG_BP" \n\t"
2003 " jnc 1b \n\t"
2004
2005 "pop %%"REG_BP" \n\t"
2006 #if defined(PIC)
2007 "pop %%"REG_b" \n\t"
2008 #endif
2009 : "+a" (counter)
2010 : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
2011 #if !defined(PIC)
2012 : "%"REG_b
2013 #endif
2014 );
2015 }
2016 else
2017 {
2018 uint8_t *offset = src+filterSize;
2019 long counter= -2*dstW;
2020 //filter-= counter*filterSize/2;
2021 filterPos-= counter/2;
2022 dst-= counter/2;
2023 __asm__ volatile(
2024 "pxor %%mm7, %%mm7 \n\t"
2025 ASMALIGN(4)
2026 "1: \n\t"
2027 "mov %2, %%"REG_c" \n\t"
2028 "movzwl (%%"REG_c", %0), %%eax \n\t"
2029 "movzwl 2(%%"REG_c", %0), %%edx \n\t"
2030 "mov %5, %%"REG_c" \n\t"
2031 "pxor %%mm4, %%mm4 \n\t"
2032 "pxor %%mm5, %%mm5 \n\t"
2033 "2: \n\t"
2034 "movq (%1), %%mm1 \n\t"
2035 "movq (%1, %6), %%mm3 \n\t"
2036 "movd (%%"REG_c", %%"REG_a"), %%mm0 \n\t"
2037 "movd (%%"REG_c", %%"REG_d"), %%mm2 \n\t"
2038 "punpcklbw %%mm7, %%mm0 \n\t"
2039 "punpcklbw %%mm7, %%mm2 \n\t"
2040 "pmaddwd %%mm1, %%mm0 \n\t"
2041 "pmaddwd %%mm2, %%mm3 \n\t"
2042 "paddd %%mm3, %%mm5 \n\t"
2043 "paddd %%mm0, %%mm4 \n\t"
2044 "add $8, %1 \n\t"
2045 "add $4, %%"REG_c" \n\t"
2046 "cmp %4, %%"REG_c" \n\t"
2047 " jb 2b \n\t"
2048 "add %6, %1 \n\t"
2049 "movq %%mm4, %%mm0 \n\t"
2050 "punpckldq %%mm5, %%mm4 \n\t"
2051 "punpckhdq %%mm5, %%mm0 \n\t"
2052 "paddd %%mm0, %%mm4 \n\t"
2053 "psrad $7, %%mm4 \n\t"
2054 "packssdw %%mm4, %%mm4 \n\t"
2055 "mov %3, %%"REG_a" \n\t"
2056 "movd %%mm4, (%%"REG_a", %0) \n\t"
2057 "add $4, %0 \n\t"
2058 " jnc 1b \n\t"
2059
2060 : "+r" (counter), "+r" (filter)
2061 : "m" (filterPos), "m" (dst), "m"(offset),
2062 "m" (src), "r" (filterSize*2)
2063 : "%"REG_a, "%"REG_c, "%"REG_d
2064 );
2065 }
2066 #else
2067 #ifdef HAVE_ALTIVEC
2068 hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize);
2069 #else
2070 int i;
2071 for (i=0; i<dstW; i++)
2072 {
2073 int j;
2074 int srcPos= filterPos[i];
2075 int val=0;
2076 //printf("filterPos: %d\n", filterPos[i]);
2077 for (j=0; j<filterSize; j++)
2078 {
2079 //printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]);
2080 val += ((int)src[srcPos + j])*filter[filterSize*i + j];
2081 }
2082 //filter += hFilterSize;
2083 dst[i] = FFMIN(val>>7, (1<<15)-1); // the cubic equation does overflow ...
2084 //dst[i] = val>>7;
2085 }
2086 #endif /* HAVE_ALTIVEC */
2087 #endif /* HAVE_MMX */
2088 }
2089 // *** horizontal scale Y line to temp buffer
2090 static inline void RENAME(hyscale)(SwsContext *c, uint16_t *dst, long dstWidth, uint8_t *src, int srcW, int xInc,
2091 int flags, int canMMX2BeUsed, int16_t *hLumFilter,
2092 int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode,
2093 int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
2094 int32_t *mmx2FilterPos, uint32_t *pal)
2095 {
2096 if (srcFormat==PIX_FMT_YUYV422 || srcFormat==PIX_FMT_GRAY16BE)
2097 {
2098 RENAME(yuy2ToY)(formatConvBuffer, src, srcW, pal);
2099 src= formatConvBuffer;
2100 }
2101 else if (srcFormat==PIX_FMT_UYVY422 || srcFormat==PIX_FMT_GRAY16LE)
2102 {
2103 RENAME(uyvyToY)(formatConvBuffer, src, srcW, pal);
2104 src= formatConvBuffer;
2105 }
2106 else if (srcFormat==PIX_FMT_RGB32)
2107 {
2108 RENAME(bgr32ToY)(formatConvBuffer, src, srcW, pal);
2109 src= formatConvBuffer;
2110 }
2111 else if (srcFormat==PIX_FMT_RGB32_1)
2112 {
2113 RENAME(bgr32ToY)(formatConvBuffer, src+ALT32_CORR, srcW, pal);
2114 src= formatConvBuffer;
2115 }
2116 else if (srcFormat==PIX_FMT_BGR24)
2117 {
2118 RENAME(bgr24ToY)(formatConvBuffer, src, srcW, pal);
2119 src= formatConvBuffer;
2120 }
2121 else if (srcFormat==PIX_FMT_BGR565)
2122 {
2123 RENAME(bgr16ToY)(formatConvBuffer, src, srcW, pal);
2124 src= formatConvBuffer;
2125 }
2126 else if (srcFormat==PIX_FMT_BGR555)
2127 {
2128 RENAME(bgr15ToY)(formatConvBuffer, src, srcW, pal);
2129 src= formatConvBuffer;
2130 }
2131 else if (srcFormat==PIX_FMT_BGR32)
2132 {
2133 RENAME(rgb32ToY)(formatConvBuffer, src, srcW, pal);
2134 src= formatConvBuffer;
2135 }
2136 else if (srcFormat==PIX_FMT_BGR32_1)
2137 {
2138 RENAME(rgb32ToY)(formatConvBuffer, src+ALT32_CORR, srcW, pal);
2139 src= formatConvBuffer;
2140 }
2141 else if (srcFormat==PIX_FMT_RGB24)
2142 {
2143 RENAME(rgb24ToY)(formatConvBuffer, src, srcW, pal);
2144 src= formatConvBuffer;
2145 }
2146 else if (srcFormat==PIX_FMT_RGB565)
2147 {
2148 RENAME(rgb16ToY)(formatConvBuffer, src, srcW, pal);
2149 src= formatConvBuffer;
2150 }
2151 else if (srcFormat==PIX_FMT_RGB555)
2152 {
2153 RENAME(rgb15ToY)(formatConvBuffer, src, srcW, pal);
2154 src= formatConvBuffer;
2155 }
2156 else if (srcFormat==PIX_FMT_RGB8 || srcFormat==PIX_FMT_BGR8 || srcFormat==PIX_FMT_PAL8 || srcFormat==PIX_FMT_BGR4_BYTE || srcFormat==PIX_FMT_RGB4_BYTE)
2157 {
2158 RENAME(palToY)(formatConvBuffer, src, srcW, pal);
2159 src= formatConvBuffer;
2160 }
2161 else if (srcFormat==PIX_FMT_MONOBLACK)
2162 {
2163 RENAME(monoblack2Y)(formatConvBuffer, src, srcW, pal);
2164 src= formatConvBuffer;
2165 }
2166 else if (srcFormat==PIX_FMT_MONOWHITE)
2167 {
2168 RENAME(monowhite2Y)(formatConvBuffer, src, srcW, pal);
2169 src= formatConvBuffer;
2170 }
2171
2172 #ifdef HAVE_MMX
2173 // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
2174 if (!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
2175 #else
2176 if (!(flags&SWS_FAST_BILINEAR))
2177 #endif
2178 {
2179 RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
2180 }
2181 else // fast bilinear upscale / crap downscale
2182 {
2183 #if defined(ARCH_X86)
2184 #ifdef HAVE_MMX2
2185 int i;
2186 #if defined(PIC)
2187 uint64_t ebxsave __attribute__((aligned(8)));
2188 #endif
2189 if (canMMX2BeUsed)
2190 {
2191 __asm__ volatile(
2192 #if defined(PIC)
2193 "mov %%"REG_b", %5 \n\t"
2194 #endif
2195 "pxor %%mm7, %%mm7 \n\t"
2196 "mov %0, %%"REG_c" \n\t"
2197 "mov %1, %%"REG_D" \n\t"
2198 "mov %2, %%"REG_d" \n\t"
2199 "mov %3, %%"REG_b" \n\t"
2200 "xor %%"REG_a", %%"REG_a" \n\t" // i
2201 PREFETCH" (%%"REG_c") \n\t"
2202 PREFETCH" 32(%%"REG_c") \n\t"
2203 PREFETCH" 64(%%"REG_c") \n\t"
2204
2205 #ifdef ARCH_X86_64
2206
2207 #define FUNNY_Y_CODE \
2208 "movl (%%"REG_b"), %%esi \n\t"\
2209 "call *%4 \n\t"\
2210 "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
2211 "add %%"REG_S", %%"REG_c" \n\t"\
2212 "add %%"REG_a", %%"REG_D" \n\t"\
2213 "xor %%"REG_a", %%"REG_a" \n\t"\
2214
2215 #else
2216
2217 #define FUNNY_Y_CODE \
2218 "movl (%%"REG_b"), %%esi \n\t"\
2219 "call *%4 \n\t"\
2220 "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
2221 "add %%"REG_a", %%"REG_D" \n\t"\
2222 "xor %%"REG_a", %%"REG_a" \n\t"\
2223
2224 #endif /* ARCH_X86_64 */
2225
2226 FUNNY_Y_CODE
2227 FUNNY_Y_CODE
2228 FUNNY_Y_CODE
2229 FUNNY_Y_CODE
2230 FUNNY_Y_CODE
2231 FUNNY_Y_CODE
2232 FUNNY_Y_CODE
2233 FUNNY_Y_CODE
2234
2235 #if defined(PIC)
2236 "mov %5, %%"REG_b" \n\t"
2237 #endif
2238 :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
2239 "m" (funnyYCode)
2240 #if defined(PIC)
2241 ,"m" (ebxsave)
2242 #endif
2243 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
2244 #if !defined(PIC)
2245 ,"%"REG_b
2246 #endif
2247 );
2248 for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
2249 }
2250 else
2251 {
2252 #endif /* HAVE_MMX2 */
2253 long xInc_shr16 = xInc >> 16;
2254 uint16_t xInc_mask = xInc & 0xffff;
2255 //NO MMX just normal asm ...
2256 __asm__ volatile(
2257 "xor %%"REG_a", %%"REG_a" \n\t" // i
2258 "xor %%"REG_d", %%"REG_d" \n\t" // xx
2259 "xorl %%ecx, %%ecx \n\t" // 2*xalpha
2260 ASMALIGN(4)
2261 "1: \n\t"
2262 "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
2263 "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
2264 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
2265 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
2266 "shll $16, %%edi \n\t"
2267 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
2268 "mov %1, %%"REG_D" \n\t"
2269 "shrl $9, %%esi \n\t"
2270 "movw %%si, (%%"REG_D", %%"REG_a", 2) \n\t"
2271 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
2272 "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
2273
2274 "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
2275 "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
2276 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
2277 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
2278 "shll $16, %%edi \n\t"
2279 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
2280 "mov %1, %%"REG_D" \n\t"
2281 "shrl $9, %%esi \n\t"
2282 "movw %%si, 2(%%"REG_D", %%"REG_a", 2) \n\t"
2283 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
2284 "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
2285
2286
2287 "add $2, %%"REG_a" \n\t"
2288 "cmp %2, %%"REG_a" \n\t"
2289 " jb 1b \n\t"
2290
2291
2292 :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask)
2293 : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
2294 );
2295 #ifdef HAVE_MMX2
2296 } //if MMX2 can't be used
2297 #endif
2298 #else
2299 int i;
2300 unsigned int xpos=0;
2301 for (i=0;i<dstWidth;i++)
2302 {
2303 register unsigned int xx=xpos>>16;
2304 register unsigned int xalpha=(xpos&0xFFFF)>>9;
2305 dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
2306 xpos+=xInc;
2307 }
2308 #endif /* defined(ARCH_X86) */
2309 }
2310
2311 if(c->srcRange != c->dstRange && !(isRGB(c->dstFormat) || isBGR(c->dstFormat))){
2312 int i;
2313 //FIXME all pal and rgb srcFormats could do this convertion as well
2314 //FIXME all scalers more complex than bilinear could do half of this transform
2315 if(c->srcRange){
2316 for (i=0; i<dstWidth; i++)
2317 dst[i]= (dst[i]*14071 + 33561947)>>14;
2318 }else{
2319 for (i=0; i<dstWidth; i++)
2320 dst[i]= (FFMIN(dst[i],30189)*19077 - 39057361)>>14;
2321 }
2322 }
2323 }
2324
2325 inline static void RENAME(hcscale)(SwsContext *c, uint16_t *dst, long dstWidth, uint8_t *src1, uint8_t *src2,
2326 int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter,
2327 int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode,
2328 int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
2329 int32_t *mmx2FilterPos, uint32_t *pal)
2330 {
2331 if (srcFormat==PIX_FMT_YUYV422)
2332 {
2333 RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2334 src1= formatConvBuffer;
2335 src2= formatConvBuffer+VOFW;
2336 }
2337 else if (srcFormat==PIX_FMT_UYVY422)
2338 {
2339 RENAME(uyvyToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2340 src1= formatConvBuffer;
2341 src2= formatConvBuffer+VOFW;
2342 }
2343 else if (srcFormat==PIX_FMT_RGB32)
2344 {
2345 if(c->chrSrcHSubSample)
2346 RENAME(bgr32ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2347 else
2348 RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2349 src1= formatConvBuffer;
2350 src2= formatConvBuffer+VOFW;
2351 }
2352 else if (srcFormat==PIX_FMT_RGB32_1)
2353 {
2354 if(c->chrSrcHSubSample)
2355 RENAME(bgr32ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1+ALT32_CORR, src2+ALT32_CORR, srcW, pal);
2356 else
2357 RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1+ALT32_CORR, src2+ALT32_CORR, srcW, pal);
2358 src1= formatConvBuffer;
2359 src2= formatConvBuffer+VOFW;
2360 }
2361 else if (srcFormat==PIX_FMT_BGR24)
2362 {
2363 if(c->chrSrcHSubSample)
2364 RENAME(bgr24ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2365 else
2366 RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2367 src1= formatConvBuffer;
2368 src2= formatConvBuffer+VOFW;
2369 }
2370 else if (srcFormat==PIX_FMT_BGR565)
2371 {
2372 if(c->chrSrcHSubSample)
2373 RENAME(bgr16ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2374 else
2375 RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2376 src1= formatConvBuffer;
2377 src2= formatConvBuffer+VOFW;
2378 }
2379 else if (srcFormat==PIX_FMT_BGR555)
2380 {
2381 if(c->chrSrcHSubSample)
2382 RENAME(bgr15ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2383 else
2384 RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2385 src1= formatConvBuffer;
2386 src2= formatConvBuffer+VOFW;
2387 }
2388 else if (srcFormat==PIX_FMT_BGR32)
2389 {
2390 if(c->chrSrcHSubSample)
2391 RENAME(rgb32ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2392 else
2393 RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2394 src1= formatConvBuffer;
2395 src2= formatConvBuffer+VOFW;
2396 }
2397 else if (srcFormat==PIX_FMT_BGR32_1)
2398 {
2399 if(c->chrSrcHSubSample)
2400 RENAME(rgb32ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1+ALT32_CORR, src2+ALT32_CORR, srcW, pal);
2401 else
2402 RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1+ALT32_CORR, src2+ALT32_CORR, srcW, pal);
2403 src1= formatConvBuffer;
2404 src2= formatConvBuffer+VOFW;
2405 }
2406 else if (srcFormat==PIX_FMT_RGB24)
2407 {
2408 if(c->chrSrcHSubSample)
2409 RENAME(rgb24ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2410 else
2411 RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2412 src1= formatConvBuffer;
2413 src2= formatConvBuffer+VOFW;
2414 }
2415 else if (srcFormat==PIX_FMT_RGB565)
2416 {
2417 if(c->chrSrcHSubSample)
2418 RENAME(rgb16ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2419 else
2420 RENAME(rgb16ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2421 src1= formatConvBuffer;
2422 src2= formatConvBuffer+VOFW;
2423 }
2424 else if (srcFormat==PIX_FMT_RGB555)
2425 {
2426 if(c->chrSrcHSubSample)
2427 RENAME(rgb15ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2428 else
2429 RENAME(rgb15ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2430 src1= formatConvBuffer;
2431 src2= formatConvBuffer+VOFW;
2432 }
2433 else if (isGray(srcFormat) || srcFormat==PIX_FMT_MONOBLACK || srcFormat==PIX_FMT_MONOWHITE)
2434 {
2435 return;
2436 }
2437 else if (srcFormat==PIX_FMT_RGB8 || srcFormat==PIX_FMT_BGR8 || srcFormat==PIX_FMT_PAL8 || srcFormat==PIX_FMT_BGR4_BYTE || srcFormat==PIX_FMT_RGB4_BYTE)
2438 {
2439 RENAME(palToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2440 src1= formatConvBuffer;
2441 src2= formatConvBuffer+VOFW;
2442 }
2443
2444 #ifdef HAVE_MMX
2445 // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
2446 if (!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
2447 #else
2448 if (!(flags&SWS_FAST_BILINEAR))
2449 #endif
2450 {
2451 RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
2452 RENAME(hScale)(dst+VOFW, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
2453 }
2454 else // fast bilinear upscale / crap downscale
2455 {
2456 #if defined(ARCH_X86)
2457 #ifdef HAVE_MMX2
2458 int i;
2459 #if defined(PIC)
2460 uint64_t ebxsave __attribute__((aligned(8)));
2461 #endif
2462 if (canMMX2BeUsed)
2463 {
2464 __asm__ volatile(
2465 #if defined(PIC)
2466 "mov %%"REG_b", %6 \n\t"
2467 #endif
2468 "pxor %%mm7, %%mm7 \n\t"
2469 "mov %0, %%"REG_c" \n\t"
2470 "mov %1, %%"REG_D" \n\t"
2471 "mov %2, %%"REG_d" \n\t"
2472 "mov %3, %%"REG_b" \n\t"
2473 "xor %%"REG_a", %%"REG_a" \n\t" // i
2474 PREFETCH" (%%"REG_c") \n\t"
2475 PREFETCH" 32(%%"REG_c") \n\t"
2476 PREFETCH" 64(%%"REG_c") \n\t"
2477
2478 #ifdef ARCH_X86_64
2479
2480 #define FUNNY_UV_CODE \
2481 "movl (%%"REG_b"), %%esi \n\t"\
2482 "call *%4 \n\t"\
2483 "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
2484 "add %%"REG_S", %%"REG_c" \n\t"\
2485 "add %%"REG_a", %%"REG_D" \n\t"\
2486 "xor %%"REG_a", %%"REG_a" \n\t"\
2487
2488 #else
2489
2490 #define FUNNY_UV_CODE \
2491 "movl (%%"REG_b"), %%esi \n\t"\
2492 "call *%4 \n\t"\
2493 "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
2494 "add %%"REG_a", %%"REG_D" \n\t"\
2495 "xor %%"REG_a", %%"REG_a" \n\t"\
2496
2497 #endif /* ARCH_X86_64 */
2498
2499 FUNNY_UV_CODE
2500 FUNNY_UV_CODE
2501 FUNNY_UV_CODE
2502 FUNNY_UV_CODE
2503 "xor %%"REG_a", %%"REG_a" \n\t" // i
2504 "mov %5, %%"REG_c" \n\t" // src
2505 "mov %1, %%"REG_D" \n\t" // buf1
2506 "add $"AV_STRINGIFY(VOF)", %%"REG_D" \n\t"
2507 PREFETCH" (%%"REG_c") \n\t"
2508 PREFETCH" 32(%%"REG_c") \n\t"
2509 PREFETCH" 64(%%"REG_c") \n\t"
2510
2511 FUNNY_UV_CODE
2512 FUNNY_UV_CODE
2513 FUNNY_UV_CODE
2514 FUNNY_UV_CODE
2515
2516 #if defined(PIC)
2517 "mov %6, %%"REG_b" \n\t"
2518 #endif
2519 :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
2520 "m" (funnyUVCode), "m" (src2)
2521 #if defined(PIC)
2522 ,"m" (ebxsave)
2523 #endif
2524 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
2525 #if !defined(PIC)
2526 ,"%"REG_b
2527 #endif
2528 );
2529 for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
2530 {
2531 //printf("%d %d %d\n", dstWidth, i, srcW);
2532 dst[i] = src1[srcW-1]*128;
2533 dst[i+VOFW] = src2[srcW-1]*128;
2534 }
2535 }
2536 else
2537 {
2538 #endif /* HAVE_MMX2 */
2539 long xInc_shr16 = (long) (xInc >> 16);
2540 uint16_t xInc_mask = xInc & 0xffff;
2541 __asm__ volatile(
2542 "xor %%"REG_a", %%"REG_a" \n\t" // i
2543 "xor %%"REG_d", %%"REG_d" \n\t" // xx
2544 "xorl %%ecx, %%ecx \n\t" // 2*xalpha
2545 ASMALIGN(4)
2546 "1: \n\t"
2547 "mov %0, %%"REG_S" \n\t"
2548 "movzbl (%%"REG_S", %%"REG_d"), %%edi \n\t" //src[xx]
2549 "movzbl 1(%%"REG_S", %%"REG_d"), %%esi \n\t" //src[xx+1]
2550 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
2551 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
2552 "shll $16, %%edi \n\t"
2553 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
2554 "mov %1, %%"REG_D" \n\t"
2555 "shrl $9, %%esi \n\t"
2556 "movw %%si, (%%"REG_D", %%"REG_a", 2) \n\t"
2557
2558 "movzbl (%5, %%"REG_d"), %%edi \n\t" //src[xx]
2559 "movzbl 1(%5, %%"REG_d"), %%esi \n\t" //src[xx+1]
2560 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
2561 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
2562 "shll $16, %%edi \n\t"
2563 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
2564 "mov %1, %%"REG_D" \n\t"
2565 "shrl $9, %%esi \n\t"
2566 "movw %%si, "AV_STRINGIFY(VOF)"(%%"REG_D", %%"REG_a", 2) \n\t"
2567
2568 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
2569 "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
2570 "add $1, %%"REG_a" \n\t"
2571 "cmp %2, %%"REG_a" \n\t"
2572 " jb 1b \n\t"
2573
2574 /* GCC 3.3 makes MPlayer crash on IA-32 machines when using "g" operand here,
2575 which is needed to support GCC 4.0. */
2576 #if defined(ARCH_X86_64) && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
2577 :: "m" (src1), "m" (dst), "g" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
2578 #else
2579 :: "m" (src1), "m" (dst), "m" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
2580 #endif
2581 "r" (src2)
2582 : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
2583 );
2584 #ifdef HAVE_MMX2
2585 } //if MMX2 can't be used
2586 #endif
2587 #else
2588 int i;
2589 unsigned int xpos=0;
2590 for (i=0;i<dstWidth;i++)
2591 {
2592 register unsigned int xx=xpos>>16;
2593 register unsigned int xalpha=(xpos&0xFFFF)>>9;
2594 dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
2595 dst[i+VOFW]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
2596 /* slower
2597 dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
2598 dst[i+VOFW]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
2599 */
2600 xpos+=xInc;
2601 }
2602 #endif /* defined(ARCH_X86) */
2603 }
2604 if(c->srcRange != c->dstRange && !(isRGB(c->dstFormat) || isBGR(c->dstFormat))){
2605 int i;
2606 //FIXME all pal and rgb srcFormats could do this convertion as well
2607 //FIXME all scalers more complex than bilinear could do half of this transform
2608 if(c->srcRange){
2609 for (i=0; i<dstWidth; i++){
2610 dst[i ]= (dst[i ]*1799 + 4081085)>>11; //1469
2611 dst[i+VOFW]= (dst[i+VOFW]*1799 + 4081085)>>11; //1469
2612 }
2613 }else{
2614 for (i=0; i<dstWidth; i++){
2615 dst[i ]= (FFMIN(dst[i ],30775)*4663 - 9289992)>>12; //-264
2616 dst[i+VOFW]= (FFMIN(dst[i+VOFW],30775)*4663 - 9289992)>>12; //-264
2617 }
2618 }
2619 }
2620 }
2621
2622 static int RENAME(swScale)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
2623 int srcSliceH, uint8_t* dst[], int dstStride[]){
2624
2625 /* load a few things into local vars to make the code more readable? and faster */
2626 const int srcW= c->srcW;
2627 const int dstW= c->dstW;
2628 const int dstH= c->dstH;
2629 const int chrDstW= c->chrDstW;
2630 const int chrSrcW= c->chrSrcW;
2631 const int lumXInc= c->lumXInc;
2632 const int chrXInc= c->chrXInc;
2633 const int dstFormat= c->dstFormat;
2634 const int srcFormat= c->srcFormat;
2635 const int flags= c->flags;
2636 const int canMMX2BeUsed= c->canMMX2BeUsed;
2637 int16_t *vLumFilterPos= c->vLumFilterPos;
2638 int16_t *vChrFilterPos= c->vChrFilterPos;
2639 int16_t *hLumFilterPos= c->hLumFilterPos;
2640 int16_t *hChrFilterPos= c->hChrFilterPos;
2641 int16_t *vLumFilter= c->vLumFilter;
2642 int16_t *vChrFilter= c->vChrFilter;
2643 int16_t *hLumFilter= c->hLumFilter;
2644 int16_t *hChrFilter= c->hChrFilter;
2645 int32_t *lumMmxFilter= c->lumMmxFilter;
2646 int32_t *chrMmxFilter= c->chrMmxFilter;
2647 const int vLumFilterSize= c->vLumFilterSize;
2648 const int vChrFilterSize= c->vChrFilterSize;
2649 const int hLumFilterSize= c->hLumFilterSize;
2650 const int hChrFilterSize= c->hChrFilterSize;
2651 int16_t **lumPixBuf= c->lumPixBuf;
2652 int16_t **chrPixBuf= c->chrPixBuf;
2653 const int vLumBufSize= c->vLumBufSize;
2654 const int vChrBufSize= c->vChrBufSize;
2655 uint8_t *funnyYCode= c->funnyYCode;
2656 uint8_t *funnyUVCode= c->funnyUVCode;
2657 uint8_t *formatConvBuffer= c->formatConvBuffer;
2658 const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
2659 const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
2660 int lastDstY;
2661 uint32_t *pal=c->pal_yuv;
2662
2663 /* vars which will change and which we need to store back in the context */
2664 int dstY= c->dstY;
2665 int lumBufIndex= c->lumBufIndex;
2666 int chrBufIndex= c->chrBufIndex;
2667 int lastInLumBuf= c->lastInLumBuf;
2668 int lastInChrBuf= c->lastInChrBuf;
2669
2670 if (isPacked(c->srcFormat)){
2671 src[0]=
2672 src[1]=
2673 src[2]= src[0];
2674 srcStride[0]=
2675 srcStride[1]=
2676 srcStride[2]= srcStride[0];
2677 }
2678 srcStride[1]<<= c->vChrDrop;
2679 srcStride[2]<<= c->vChrDrop;
2680
2681 //printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2],
2682 // (int)dst[0], (int)dst[1], (int)dst[2]);
2683
2684 #if 0 //self test FIXME move to a vfilter or something
2685 {
2686 static volatile int i=0;
2687 i++;
2688 if (srcFormat==PIX_FMT_YUV420P && i==1 && srcSliceH>= c->srcH)
2689 selfTest(src, srcStride, c->srcW, c->srcH);
2690 i--;
2691 }
2692 #endif
2693
2694 //printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2],
2695 //dstStride[0],dstStride[1],dstStride[2]);
2696
2697 if (dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0)
2698 {
2699 static int warnedAlready=0; //FIXME move this into the context perhaps
2700 if (flags & SWS_PRINT_INFO && !warnedAlready)
2701 {
2702 av_log(c, AV_LOG_WARNING, "Warning: dstStride is not aligned!\n"
2703 " ->cannot do aligned memory accesses anymore\n");
2704 warnedAlready=1;
2705 }
2706 }
2707
2708 /* Note the user might start scaling the picture in the middle so this
2709 will not get executed. This is not really intended but works
2710 currently, so people might do it. */
2711 if (srcSliceY ==0){
2712 lumBufIndex=0;
2713 chrBufIndex=0;
2714 dstY=0;
2715 lastInLumBuf= -1;
2716 lastInChrBuf= -1;
2717 }
2718
2719 lastDstY= dstY;
2720
2721 for (;dstY < dstH; dstY++){
2722 unsigned char *dest =dst[0]+dstStride[0]*dstY;
2723 const int chrDstY= dstY>>c->chrDstVSubSample;
2724 unsigned char *uDest=dst[1]+dstStride[1]*chrDstY;
2725 unsigned char *vDest=dst[2]+dstStride[2]*chrDstY;
2726
2727 const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
2728 const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
2729 const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
2730 const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
2731
2732 //printf("dstY:%d dstH:%d firstLumSrcY:%d lastInLumBuf:%d vLumBufSize: %d vChrBufSize: %d slice: %d %d vLumFilterSize: %d firstChrSrcY: %d vChrFilterSize: %d c->chrSrcVSubSample: %d\n",
2733 // dstY, dstH, firstLumSrcY, lastInLumBuf, vLumBufSize, vChrBufSize, srcSliceY, srcSliceH, vLumFilterSize, firstChrSrcY, vChrFilterSize, c->chrSrcVSubSample);
2734 //handle holes (FAST_BILINEAR & weird filters)
2735 if (firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
2736 if (firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
2737 //printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize);
2738 assert(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1);
2739 assert(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1);
2740
2741 // Do we have enough lines in this slice to output the dstY line
2742 if (lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample))
2743 {
2744 //Do horizontal scaling
2745 while(lastInLumBuf < lastLumSrcY)
2746 {
2747 uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
2748 lumBufIndex++;
2749 //printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY);
2750 assert(lumBufIndex < 2*vLumBufSize);
2751 assert(lastInLumBuf + 1 - srcSliceY < srcSliceH);
2752 assert(lastInLumBuf + 1 - srcSliceY >= 0);
2753 //printf("%d %d\n", lumBufIndex, vLumBufSize);
2754 RENAME(hyscale)(c, lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
2755 flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
2756 funnyYCode, c->srcFormat, formatConvBuffer,
2757 c->lumMmx2Filter, c->lumMmx2FilterPos, pal);
2758 lastInLumBuf++;
2759 }
2760 while(lastInChrBuf < lastChrSrcY)
2761 {
2762 uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
2763 uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
2764 chrBufIndex++;
2765 assert(chrBufIndex < 2*vChrBufSize);
2766 assert(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH));
2767 assert(lastInChrBuf + 1 - chrSrcSliceY >= 0);
2768 //FIXME replace parameters through context struct (some at least)
2769
2770 if (!(isGray(srcFormat) || isGray(dstFormat)))
2771 RENAME(hcscale)(c, chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
2772 flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
2773 funnyUVCode, c->srcFormat, formatConvBuffer,
2774 c->chrMmx2Filter, c->chrMmx2FilterPos, pal);
2775 lastInChrBuf++;
2776 }
2777 //wrap buf index around to stay inside the ring buffer
2778 if (lumBufIndex >= vLumBufSize) lumBufIndex-= vLumBufSize;
2779 if (chrBufIndex >= vChrBufSize) chrBufIndex-= vChrBufSize;
2780 }
2781 else // not enough lines left in this slice -> load the rest in the buffer
2782 {
2783 /* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n",
2784 firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY,
2785 lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize,
2786 vChrBufSize, vLumBufSize);*/
2787
2788 //Do horizontal scaling
2789 while(lastInLumBuf+1 < srcSliceY + srcSliceH)
2790 {
2791 uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
2792 lumBufIndex++;
2793 assert(lumBufIndex < 2*vLumBufSize);
2794 assert(lastInLumBuf + 1 - srcSliceY < srcSliceH);
2795 assert(lastInLumBuf + 1 - srcSliceY >= 0);
2796 RENAME(hyscale)(c, lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
2797 flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
2798 funnyYCode, c->srcFormat, formatConvBuffer,
2799 c->lumMmx2Filter, c->lumMmx2FilterPos, pal);
2800 lastInLumBuf++;
2801 }
2802 while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH))
2803 {
2804 uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
2805 uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
2806 chrBufIndex++;
2807 assert(chrBufIndex < 2*vChrBufSize);
2808 assert(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH);
2809 assert(lastInChrBuf + 1 - chrSrcSliceY >= 0);
2810
2811 if (!(isGray(srcFormat) || isGray(dstFormat)))
2812 RENAME(hcscale)(c, chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
2813 flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
2814 funnyUVCode, c->srcFormat, formatConvBuffer,
2815 c->chrMmx2Filter, c->chrMmx2FilterPos, pal);
2816 lastInChrBuf++;
2817 }
2818 //wrap buf index around to stay inside the ring buffer
2819 if (lumBufIndex >= vLumBufSize) lumBufIndex-= vLumBufSize;
2820 if (chrBufIndex >= vChrBufSize) chrBufIndex-= vChrBufSize;
2821 break; //we can't output a dstY line so let's try with the next slice
2822 }
2823
2824 #ifdef HAVE_MMX
2825 c->blueDither= ff_dither8[dstY&1];
2826 if (c->dstFormat == PIX_FMT_RGB555 || c->dstFormat == PIX_FMT_BGR555)
2827 c->greenDither= ff_dither8[dstY&1];
2828 else
2829 c->greenDither= ff_dither4[dstY&1];
2830 c->redDither= ff_dither8[(dstY+1)&1];
2831 #endif
2832 if (dstY < dstH-2)
2833 {
2834 int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
2835 int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
2836 #ifdef HAVE_MMX
2837 int i;
2838 if (flags & SWS_ACCURATE_RND){
2839 int s= APCK_SIZE / 8;
2840 for (i=0; i<vLumFilterSize; i+=2){
2841 *(void**)&lumMmxFilter[s*i ]= lumSrcPtr[i ];
2842 *(void**)&lumMmxFilter[s*i+APCK_PTR2/4 ]= lumSrcPtr[i+(vLumFilterSize>1)];
2843 lumMmxFilter[s*i+APCK_COEF/4 ]=
2844 lumMmxFilter[s*i+APCK_COEF/4+1]= vLumFilter[dstY*vLumFilterSize + i ]
2845 + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0);
2846 }
2847 for (i=0; i<vChrFilterSize; i+=2){
2848 *(void**)&chrMmxFilter[s*i ]= chrSrcPtr[i ];
2849 *(void**)&chrMmxFilter[s*i+APCK_PTR2/4 ]= chrSrcPtr[i+(vChrFilterSize>1)];
2850 chrMmxFilter[s*i+APCK_COEF/4 ]=
2851 chrMmxFilter[s*i+APCK_COEF/4+1]= vChrFilter[chrDstY*vChrFilterSize + i ]
2852 + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0);
2853 }
2854 }else{
2855 for (i=0; i<vLumFilterSize; i++)
2856 {
2857 lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
2858 lumMmxFilter[4*i+1]= (uint64_t)lumSrcPtr[i] >> 32;
2859 lumMmxFilter[4*i+2]=
2860 lumMmxFilter[4*i+3]=
2861 ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
2862 }
2863 for (i=0; i<vChrFilterSize; i++)
2864 {
2865 chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
2866 chrMmxFilter[4*i+1]= (uint64_t)chrSrcPtr[i] >> 32;
2867 chrMmxFilter[4*i+2]=
2868 chrMmxFilter[4*i+3]=
2869 ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
2870 }
2871 }
2872 #endif
2873 if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21){
2874 const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
2875 if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
2876 RENAME(yuv2nv12X)(c,
2877 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
2878 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2879 dest, uDest, dstW, chrDstW, dstFormat);
2880 }
2881 else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) //YV12 like
2882 {
2883 const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
2884 if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
2885 if (vLumFilterSize == 1 && vChrFilterSize == 1) // unscaled YV12
2886 {
2887 int16_t *lumBuf = lumPixBuf[0];
2888 int16_t *chrBuf= chrPixBuf[0];
2889 RENAME(yuv2yuv1)(c, lumBuf, chrBuf, dest, uDest, vDest, dstW, chrDstW);
2890 }
2891 else //General YV12
2892 {
2893 RENAME(yuv2yuvX)(c,
2894 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
2895 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2896 dest, uDest, vDest, dstW, chrDstW);
2897 }
2898 }
2899 else
2900 {
2901 assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
2902 assert(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
2903 if (vLumFilterSize == 1 && vChrFilterSize == 2) //unscaled RGB
2904 {
2905 int chrAlpha= vChrFilter[2*dstY+1];
2906 if(flags & SWS_FULL_CHR_H_INT){
2907 yuv2rgbXinC_full(c, //FIXME write a packed1_full function
2908 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
2909 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2910 dest, dstW, dstY);
2911 }else{
2912 RENAME(yuv2packed1)(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1),
2913 dest, dstW, chrAlpha, dstFormat, flags, dstY);
2914 }
2915 }
2916 else if (vLumFilterSize == 2 && vChrFilterSize == 2) //bilinear upscale RGB
2917 {
2918 int lumAlpha= vLumFilter[2*dstY+1];
2919 int chrAlpha= vChrFilter[2*dstY+1];
2920 lumMmxFilter[2]=
2921 lumMmxFilter[3]= vLumFilter[2*dstY ]*0x10001;
2922 chrMmxFilter[2]=
2923 chrMmxFilter[3]= vChrFilter[2*chrDstY]*0x10001;
2924 if(flags & SWS_FULL_CHR_H_INT){
2925 yuv2rgbXinC_full(c, //FIXME write a packed2_full function
2926 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
2927 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2928 dest, dstW, dstY);
2929 }else{
2930 RENAME(yuv2packed2)(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1),
2931 dest, dstW, lumAlpha, chrAlpha, dstY);
2932 }
2933 }
2934 else //general RGB
2935 {
2936 if(flags & SWS_FULL_CHR_H_INT){
2937 yuv2rgbXinC_full(c,
2938 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
2939 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2940 dest, dstW, dstY);
2941 }else{
2942 RENAME(yuv2packedX)(c,
2943 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
2944 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2945 dest, dstW, dstY);
2946 }
2947 }
2948 }
2949 }
2950 else // hmm looks like we can't use MMX here without overwriting this array's tail
2951 {
2952 int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
2953 int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
2954 if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21){
2955 const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
2956 if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
2957 yuv2nv12XinC(
2958 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
2959 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2960 dest, uDest, dstW, chrDstW, dstFormat);
2961 }
2962 else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) //YV12
2963 {
2964 const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
2965 if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
2966 yuv2yuvXinC(
2967 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
2968 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2969 dest, uDest, vDest, dstW, chrDstW);
2970 }
2971 else
2972 {
2973 assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
2974 assert(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
2975 if(flags & SWS_FULL_CHR_H_INT){
2976 yuv2rgbXinC_full(c,
2977 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
2978 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2979 dest, dstW, dstY);
2980 }else{
2981 yuv2packedXinC(c,
2982 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
2983 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
2984 dest, dstW, dstY);
2985 }
2986 }
2987 }
2988 }
2989
2990 #ifdef HAVE_MMX
2991 __asm__ volatile(SFENCE:::"memory");
2992 __asm__ volatile(EMMS:::"memory");
2993 #endif
2994 /* store changed local vars back in the context */
2995 c->dstY= dstY;
2996 c->lumBufIndex= lumBufIndex;
2997 c->chrBufIndex= chrBufIndex;
2998 c->lastInLumBuf= lastInLumBuf;
2999 c->lastInChrBuf= lastInChrBuf;
3000
3001 return dstY - lastDstY;
3002 }