more logic behavior if the altenative deblock filters are used (turning a alt filter...
[libav.git] / postproc / swscale_template.c
CommitLineData
31190492
A
1
2// Software scaling and colorspace conversion routines for MPlayer
3
afa569af 4// Orginal C implementation by A'rpi/ESP-team <arpi@thot.banki.hu>
783e9cc9
MN
5// current version mostly by Michael Niedermayer (michaelni@gmx.at)
6
d3f41512
MN
7#include <inttypes.h>
8#include "../config.h"
9
783e9cc9 10//#undef HAVE_MMX2
d3f41512 11//#undef HAVE_MMX
783e9cc9
MN
12//#undef ARCH_X86
13#define DITHER16BPP
0f25d72b 14//#define ALT_ERROR
d3f41512
MN
15
16#define RET 0xC3 //near return opcode
783e9cc9
MN
17/*
18NOTES
d3f41512 19
783e9cc9 20known BUGS with known cause (no bugreports please!)
783e9cc9 21code reads 1 sample too much (might cause a sig11)
31190492 22
783e9cc9
MN
23TODO
24check alignment off everything
25*/
31190492 26
d3f41512
MN
27static uint64_t yCoeff= 0x2568256825682568LL;
28static uint64_t ubCoeff= 0x3343334333433343LL;
29static uint64_t vrCoeff= 0x40cf40cf40cf40cfLL;
30static uint64_t ugCoeff= 0xE5E2E5E2E5E2E5E2LL;
31static uint64_t vgCoeff= 0xF36EF36EF36EF36ELL;
32static uint64_t w80= 0x0080008000800080LL;
33static uint64_t w10= 0x0010001000100010LL;
d9cf0d33
MN
34static uint64_t bm00000111=0x0000000000FFFFFFLL;
35static uint64_t bm11111000=0xFFFFFFFFFF000000LL;
d3f41512
MN
36
37static uint64_t b16Dither= 0x0004000400040004LL;
38static uint64_t b16Dither1=0x0004000400040004LL;
39static uint64_t b16Dither2=0x0602060206020602LL;
40static uint64_t g16Dither= 0x0002000200020002LL;
41static uint64_t g16Dither1=0x0002000200020002LL;
42static uint64_t g16Dither2=0x0301030103010301LL;
43
44static uint64_t b16Mask= 0x001F001F001F001FLL;
45static uint64_t g16Mask= 0x07E007E007E007E0LL;
46static uint64_t r16Mask= 0xF800F800F800F800LL;
47static uint64_t temp0;
48
783e9cc9
MN
49
50// temporary storage for 4 yuv lines:
51// 16bit for now (mmx likes it more compact)
52static uint16_t pix_buf_y[4][2048];
53static uint16_t pix_buf_uv[2][2048*2];
54
55// clipping helper table for C implementations:
56static unsigned char clip_table[768];
57
58// yuv->rgb conversion tables:
59static int yuvtab_2568[256];
60static int yuvtab_3343[256];
61static int yuvtab_0c92[256];
62static int yuvtab_1a1e[256];
63static int yuvtab_40cf[256];
64
65
d3f41512
MN
66static uint8_t funnyYCode[10000];
67static uint8_t funnyUVCode[10000];
68
69
31190492
A
70// *** bilinear scaling and yuv->rgb conversion of yv12 slices:
71// *** Note: it's called multiple times while decoding a frame, first time y==0
72// *** Designed to upscale, but may work for downscale too.
44f9179b 73// s_xinc = (src_width << 16) / dst_width
31190492
A
74// s_yinc = (src_height << 16) / dst_height
75void SwScale_YV12slice_brg24(unsigned char* srcptr[],int stride[], int y, int h,
76 unsigned char* dstptr, int dststride, int dstw, int dstbpp,
77 unsigned int s_xinc,unsigned int s_yinc){
78
79// scaling factors:
80//static int s_yinc=(vo_dga_src_height<<16)/vo_dga_vp_height;
81//static int s_xinc=(vo_dga_src_width<<8)/vo_dga_vp_width;
82
783e9cc9 83unsigned int s_xinc2;
31190492 84
783e9cc9 85static int s_srcypos; // points to the dst Pixels center in the source (0 is the center of pixel 0,0 in src)
31190492 86static int s_ypos;
783e9cc9
MN
87
88// last horzontally interpolated lines, used to avoid unnecessary calculations
31190492 89static int s_last_ypos;
783e9cc9
MN
90static int s_last_y1pos;
91
d3f41512
MN
92static int static_dstw;
93
94#ifdef HAVE_MMX2
783e9cc9 95// used to detect a horizontal size change
d3f41512
MN
96static int old_dstw= -1;
97static int old_s_xinc= -1;
783e9cc9 98
d3f41512 99#endif
0f25d72b 100int canMMX2BeUsed=0;
0f25d72b 101int srcWidth= (dstw*s_xinc + 0x8000)>>16;
31190492 102
d3fda508 103#ifdef HAVE_MMX2
0f25d72b 104canMMX2BeUsed= (s_xinc <= 0x10000 && (dstw&31)==0 && (srcWidth&15)==0) ? 1 : 0;
d3fda508
MN
105#endif
106
0f25d72b
MN
107// match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src to pixel n-2 of dst
108// n-2 is the last chrominance sample available
109// FIXME this is not perfect, but noone shuld notice the difference, the more correct variant
110// would be like the vertical one, but that would require some special code for the
111// first and last pixel
112if(canMMX2BeUsed) s_xinc+= 20;
113else s_xinc = ((srcWidth-2)<<16)/(dstw-2) - 20;
114s_xinc2=s_xinc>>1;
115
b3a134b6
MN
116 // force calculation of the horizontal interpolation of the first line
117 s_last_ypos=-99;
118 s_last_y1pos=-99;
119
31190492 120 if(y==0){
84adc106 121 s_srcypos=-0x8000;
783e9cc9 122 s_ypos=0;
d3f41512
MN
123#ifdef HAVE_MMX2
124// cant downscale !!!
783e9cc9 125 if((old_s_xinc != s_xinc || old_dstw!=dstw) && canMMX2BeUsed)
d3f41512
MN
126 {
127 uint8_t *fragment;
128 int imm8OfPShufW1;
129 int imm8OfPShufW2;
130 int fragmentLength;
131
132 int xpos, xx, xalpha, i;
133
134 old_s_xinc= s_xinc;
135 old_dstw= dstw;
136
137 static_dstw= dstw;
138
139 // create an optimized horizontal scaling routine
140
141 //code fragment
142
d3f41512
MN
143 asm volatile(
144 "jmp 9f \n\t"
145 // Begin
146 "0: \n\t"
783e9cc9 147 "movq (%%esi), %%mm0 \n\t" //FIXME Alignment
d3f41512
MN
148 "movq %%mm0, %%mm1 \n\t"
149 "psrlq $8, %%mm0 \n\t"
150 "punpcklbw %%mm7, %%mm1 \n\t"
783e9cc9 151 "movq %%mm2, %%mm3 \n\t"
d3f41512 152 "punpcklbw %%mm7, %%mm0 \n\t"
783e9cc9 153 "addw %%bx, %%cx \n\t" //2*xalpha += (4*s_xinc)&0xFFFF
d3f41512
MN
154 "pshufw $0xFF, %%mm1, %%mm1 \n\t"
155 "1: \n\t"
783e9cc9 156 "adcl %%edx, %%esi \n\t" //xx+= (4*s_xinc)>>16 + carry
d3f41512
MN
157 "pshufw $0xFF, %%mm0, %%mm0 \n\t"
158 "2: \n\t"
783e9cc9 159 "psrlw $9, %%mm3 \n\t"
d3f41512 160 "psubw %%mm1, %%mm0 \n\t"
783e9cc9
MN
161 "pmullw %%mm3, %%mm0 \n\t"
162 "paddw %%mm6, %%mm2 \n\t" // 2*alpha += xpos&0xFFFF
d3f41512
MN
163 "psllw $7, %%mm1 \n\t"
164 "paddw %%mm1, %%mm0 \n\t"
d3f41512 165
783e9cc9 166 "movq %%mm0, (%%edi, %%eax) \n\t"
d3f41512
MN
167
168 "addl $8, %%eax \n\t"
169 // End
170 "9: \n\t"
171// "int $3\n\t"
172 "leal 0b, %0 \n\t"
173 "leal 1b, %1 \n\t"
174 "leal 2b, %2 \n\t"
175 "decl %1 \n\t"
176 "decl %2 \n\t"
177 "subl %0, %1 \n\t"
178 "subl %0, %2 \n\t"
179 "leal 9b, %3 \n\t"
180 "subl %0, %3 \n\t"
181 :"=r" (fragment), "=r" (imm8OfPShufW1), "=r" (imm8OfPShufW2),
182 "=r" (fragmentLength)
183 );
184
0f25d72b 185 xpos= 0; //s_xinc/2 - 0x8000; // difference between pixel centers
783e9cc9
MN
186
187 /* choose xinc so that all 8 parts fit exactly
188 Note: we cannot use just 1 part because it would not fit in the code cache */
0f25d72b 189// s_xinc2_diff= -((((s_xinc2*(dstw/8))&0xFFFF))/(dstw/8))-10;
783e9cc9
MN
190// s_xinc_diff= -((((s_xinc*(dstw/8))&0xFFFF))/(dstw/8));
191#ifdef ALT_ERROR
0f25d72b 192// s_xinc2_diff+= ((0x10000/(dstw/8)));
783e9cc9 193#endif
0f25d72b 194// s_xinc_diff= s_xinc2_diff*2;
783e9cc9 195
0f25d72b
MN
196// s_xinc2+= s_xinc2_diff;
197// s_xinc+= s_xinc_diff;
d3fda508 198
0f25d72b 199// old_s_xinc= s_xinc;
d3fda508 200
d3f41512
MN
201 for(i=0; i<dstw/8; i++)
202 {
783e9cc9 203 int xx=xpos>>16;
d3f41512
MN
204
205 if((i&3) == 0)
206 {
207 int a=0;
783e9cc9
MN
208 int b=((xpos+s_xinc)>>16) - xx;
209 int c=((xpos+s_xinc*2)>>16) - xx;
210 int d=((xpos+s_xinc*3)>>16) - xx;
d3f41512
MN
211
212 memcpy(funnyYCode + fragmentLength*i/4, fragment, fragmentLength);
213
214 funnyYCode[fragmentLength*i/4 + imm8OfPShufW1]=
215 funnyYCode[fragmentLength*i/4 + imm8OfPShufW2]=
216 a | (b<<2) | (c<<4) | (d<<6);
217
218 funnyYCode[fragmentLength*(i+4)/4]= RET;
219 }
220 xpos+=s_xinc;
221 }
222
0f25d72b 223 xpos= 0; //s_xinc2/2 - 0x10000; // difference between centers of chrom samples
d3f41512
MN
224 for(i=0; i<dstw/8; i++)
225 {
783e9cc9 226 int xx=xpos>>16;
d3f41512
MN
227
228 if((i&3) == 0)
229 {
230 int a=0;
783e9cc9
MN
231 int b=((xpos+s_xinc2)>>16) - xx;
232 int c=((xpos+s_xinc2*2)>>16) - xx;
233 int d=((xpos+s_xinc2*3)>>16) - xx;
d3f41512
MN
234
235 memcpy(funnyUVCode + fragmentLength*i/4, fragment, fragmentLength);
236
237 funnyUVCode[fragmentLength*i/4 + imm8OfPShufW1]=
238 funnyUVCode[fragmentLength*i/4 + imm8OfPShufW2]=
239 a | (b<<2) | (c<<4) | (d<<6);
240
241 funnyUVCode[fragmentLength*(i+4)/4]= RET;
242 }
243 xpos+=s_xinc2;
244 }
245// funnyCode[0]= RET;
d3f41512 246 }
783e9cc9 247
783e9cc9 248#endif // HAVE_MMX2
31190492 249 } // reset counters
d3f41512 250
d3fda508 251
31190492
A
252 while(1){
253 unsigned char *dest=dstptr+dststride*s_ypos;
783e9cc9
MN
254 int y0=(s_srcypos + 0xFFFF)>>16; // first luminance source line number below the dst line
255 // points to the dst Pixels center in the source (0 is the center of pixel 0,0 in src)
256 int srcuvpos= s_srcypos + s_yinc/2 - 0x8000;
257 int y1=(srcuvpos + 0x1FFFF)>>17; // first chrominance source line number below the dst line
cd410226 258 int yalpha=((s_srcypos-1)&0xFFFF)>>7;
d3f41512 259 int yalpha1=yalpha^511;
cd410226 260 int uvalpha=((srcuvpos-1)&0x1FFFF)>>8;
d3f41512 261 int uvalpha1=uvalpha^511;
783e9cc9
MN
262 uint16_t *buf0=pix_buf_y[y0&1]; // top line of the interpolated slice
263 uint16_t *buf1=pix_buf_y[((y0+1)&1)]; // bottom line of the interpolated slice
264 uint16_t *uvbuf0=pix_buf_uv[y1&1]; // top line of the interpolated slice
265 uint16_t *uvbuf1=pix_buf_uv[(y1+1)&1]; // bottom line of the interpolated slice
31190492
A
266 int i;
267
783e9cc9
MN
268 // if this is before the first line than use only the first src line
269 if(y0==0) buf0= buf1;
270 if(y1==0) uvbuf0= uvbuf1; // yes we do have to check this, its not the same as y0==0
271
272 if(y0>=y+h) break; // FIXME wrong, skips last lines, but they are dupliactes anyway
273
274 // if this is after the last line than use only the last src line
275 if(y0>=y+h)
276 {
277 buf1= buf0;
278 s_last_ypos=y0;
279 }
280 if(y1>=(y+h)/2)
281 {
282 uvbuf1= uvbuf0;
283 s_last_y1pos=y1;
284 }
285
31190492
A
286
287 s_ypos++; s_srcypos+=s_yinc;
288
783e9cc9 289 //only interpolate the src line horizontally if we didnt do it allready
31190492
A
290 if(s_last_ypos!=y0){
291 unsigned char *src=srcptr[0]+(y0-y)*stride[0];
292 unsigned int xpos=0;
293 s_last_ypos=y0;
294 // *** horizontal scale Y line to temp buffer
783e9cc9 295#ifdef ARCH_X86
d3f41512 296
783e9cc9
MN
297#ifdef HAVE_MMX2
298 if(canMMX2BeUsed)
299 {
300 asm volatile(
301 "pxor %%mm7, %%mm7 \n\t"
302 "pxor %%mm2, %%mm2 \n\t" // 2*xalpha
303 "movd %5, %%mm6 \n\t" // s_xinc&0xFFFF
304 "punpcklwd %%mm6, %%mm6 \n\t"
305 "punpcklwd %%mm6, %%mm6 \n\t"
306 "movq %%mm6, %%mm2 \n\t"
307 "psllq $16, %%mm2 \n\t"
308 "paddw %%mm6, %%mm2 \n\t"
309 "psllq $16, %%mm2 \n\t"
310 "paddw %%mm6, %%mm2 \n\t"
311 "psllq $16, %%mm2 \n\t" //0,t,2t,3t t=s_xinc&0xFF
312 "movq %%mm2, temp0 \n\t"
313 "movd %4, %%mm6 \n\t" //(s_xinc*4)&0xFFFF
314 "punpcklwd %%mm6, %%mm6 \n\t"
315 "punpcklwd %%mm6, %%mm6 \n\t"
316 "xorl %%eax, %%eax \n\t" // i
317 "movl %0, %%esi \n\t" // src
318 "movl %1, %%edi \n\t" // buf1
319 "movl %3, %%edx \n\t" // (s_xinc*4)>>16
320 "xorl %%ecx, %%ecx \n\t"
321 "xorl %%ebx, %%ebx \n\t"
322 "movw %4, %%bx \n\t" // (s_xinc*4)&0xFFFF
323 // "int $3\n\t"
324 "call funnyYCode \n\t"
325 "movq temp0, %%mm2 \n\t"
326 "xorl %%ecx, %%ecx \n\t"
327 "call funnyYCode \n\t"
328 "movq temp0, %%mm2 \n\t"
329 "xorl %%ecx, %%ecx \n\t"
330 "call funnyYCode \n\t"
331 "movq temp0, %%mm2 \n\t"
332 "xorl %%ecx, %%ecx \n\t"
333 "call funnyYCode \n\t"
334 "movq temp0, %%mm2 \n\t"
335 "xorl %%ecx, %%ecx \n\t"
336 "call funnyYCode \n\t"
337 "movq temp0, %%mm2 \n\t"
338 "xorl %%ecx, %%ecx \n\t"
339 "call funnyYCode \n\t"
340 "movq temp0, %%mm2 \n\t"
341 "xorl %%ecx, %%ecx \n\t"
342 "call funnyYCode \n\t"
343 "movq temp0, %%mm2 \n\t"
344 "xorl %%ecx, %%ecx \n\t"
345 "call funnyYCode \n\t"
346 :: "m" (src), "m" (buf1), "m" (dstw), "m" ((s_xinc*4)>>16),
347 "m" ((s_xinc*4)&0xFFFF), "m" (s_xinc&0xFFFF)
348 : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
349 );
0f25d72b 350 for(i=dstw-1; (i*s_xinc)>>16 >=srcWidth-1; i--) buf1[i] = src[srcWidth-1]*128;
783e9cc9
MN
351 }
352 else
353 {
354#endif
d3f41512
MN
355 //NO MMX just normal asm ... FIXME try/write funny MMX2 variant
356 //FIXME add prefetch
357 asm volatile(
358 "xorl %%eax, %%eax \n\t" // i
359 "xorl %%ebx, %%ebx \n\t" // xx
360 "xorl %%ecx, %%ecx \n\t" // 2*xalpha
361 "1: \n\t"
362 "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
363 "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
364 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
365 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
783e9cc9 366 "shll $16, %%edi \n\t"
d3f41512 367 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
a6e972a2 368 "movl %1, %%edi \n\t"
783e9cc9 369 "shrl $9, %%esi \n\t"
a6e972a2 370 "movw %%si, (%%edi, %%eax, 2) \n\t"
783e9cc9 371 "addw %4, %%cx \n\t" //2*xalpha += s_xinc&0xFF
d3f41512
MN
372 "adcl %3, %%ebx \n\t" //xx+= s_xinc>>8 + carry
373
374 "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
375 "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
376 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
377 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
783e9cc9 378 "shll $16, %%edi \n\t"
d3f41512 379 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
a6e972a2 380 "movl %1, %%edi \n\t"
783e9cc9 381 "shrl $9, %%esi \n\t"
a6e972a2 382 "movw %%si, 2(%%edi, %%eax, 2) \n\t"
783e9cc9 383 "addw %4, %%cx \n\t" //2*xalpha += s_xinc&0xFF
d3f41512
MN
384 "adcl %3, %%ebx \n\t" //xx+= s_xinc>>8 + carry
385
386
387 "addl $2, %%eax \n\t"
388 "cmpl %2, %%eax \n\t"
389 " jb 1b \n\t"
390
391
783e9cc9 392 :: "r" (src), "m" (buf1), "m" (dstw), "m" (s_xinc>>16), "m" (s_xinc&0xFFFF)
d3f41512
MN
393 : "%eax", "%ebx", "%ecx", "%edi", "%esi"
394 );
783e9cc9
MN
395#ifdef HAVE_MMX2
396 } //if MMX2 cant be used
397#endif
d3f41512 398#else
31190492 399 for(i=0;i<dstw;i++){
783e9cc9
MN
400 register unsigned int xx=xpos>>16;
401 register unsigned int xalpha=(xpos&0xFFFF)>>9;
d3f41512 402 buf1[i]=(src[xx]*(xalpha^127)+src[xx+1]*xalpha);
31190492
A
403 xpos+=s_xinc;
404 }
d3f41512 405#endif
783e9cc9 406 }
31190492 407 // *** horizontal scale U and V lines to temp buffer
783e9cc9 408 if(s_last_y1pos!=y1){
31190492
A
409 unsigned char *src1=srcptr[1]+(y1-y/2)*stride[1];
410 unsigned char *src2=srcptr[2]+(y1-y/2)*stride[2];
783e9cc9
MN
411 int xpos=0;
412 s_last_y1pos= y1;
413#ifdef ARCH_X86
d3f41512 414#ifdef HAVE_MMX2
783e9cc9
MN
415 if(canMMX2BeUsed)
416 {
417 asm volatile(
d3f41512
MN
418 "pxor %%mm7, %%mm7 \n\t"
419 "pxor %%mm2, %%mm2 \n\t" // 2*xalpha
783e9cc9 420 "movd %5, %%mm6 \n\t" // s_xinc&0xFFFF
d3f41512
MN
421 "punpcklwd %%mm6, %%mm6 \n\t"
422 "punpcklwd %%mm6, %%mm6 \n\t"
423 "movq %%mm6, %%mm2 \n\t"
424 "psllq $16, %%mm2 \n\t"
783e9cc9 425 "paddw %%mm6, %%mm2 \n\t"
d3f41512 426 "psllq $16, %%mm2 \n\t"
783e9cc9
MN
427 "paddw %%mm6, %%mm2 \n\t"
428 "psllq $16, %%mm2 \n\t" //0,t,2t,3t t=s_xinc&0xFFFF
d3f41512 429 "movq %%mm2, temp0 \n\t"
783e9cc9 430 "movd %4, %%mm6 \n\t" //(s_xinc*4)&0xFFFF
d3f41512
MN
431 "punpcklwd %%mm6, %%mm6 \n\t"
432 "punpcklwd %%mm6, %%mm6 \n\t"
433 "xorl %%eax, %%eax \n\t" // i
d3f41512
MN
434 "movl %0, %%esi \n\t" // src
435 "movl %1, %%edi \n\t" // buf1
783e9cc9 436 "movl %3, %%edx \n\t" // (s_xinc*4)>>16
d3f41512 437 "xorl %%ecx, %%ecx \n\t"
783e9cc9
MN
438 "xorl %%ebx, %%ebx \n\t"
439 "movw %4, %%bx \n\t" // (s_xinc*4)&0xFFFF
440
d3f41512 441// "int $3\n\t"
783e9cc9
MN
442#define FUNNYUVCODE \
443 "call funnyUVCode \n\t"\
444 "movq temp0, %%mm2 \n\t"\
445 "xorl %%ecx, %%ecx \n\t"
446
447FUNNYUVCODE
448FUNNYUVCODE
449FUNNYUVCODE
450FUNNYUVCODE
451
452FUNNYUVCODE
453FUNNYUVCODE
454FUNNYUVCODE
455FUNNYUVCODE
456
457
d3f41512
MN
458
459 "xorl %%eax, %%eax \n\t" // i
d3f41512
MN
460 "movl %6, %%esi \n\t" // src
461 "movl %1, %%edi \n\t" // buf1
462 "addl $4096, %%edi \n\t"
463
783e9cc9
MN
464FUNNYUVCODE
465FUNNYUVCODE
466FUNNYUVCODE
467FUNNYUVCODE
468
469FUNNYUVCODE
470FUNNYUVCODE
471FUNNYUVCODE
472FUNNYUVCODE
473
474 :: "m" (src1), "m" (uvbuf1), "m" (dstw), "m" ((s_xinc2*4)>>16),
475 "m" ((s_xinc2*4)&0xFFFF), "m" (s_xinc2&0xFFFF), "m" (src2)
d3f41512
MN
476 : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
477 );
0f25d72b
MN
478 for(i=dstw-1; (i*s_xinc2)>>16 >=srcWidth/2-1; i--)
479 {
480 uvbuf1[i] = src1[srcWidth/2-1]*128;
481 uvbuf1[i+2048] = src2[srcWidth/2-1]*128;
482 }
783e9cc9
MN
483 }
484 else
485 {
486#endif
d3f41512
MN
487 asm volatile(
488 "xorl %%eax, %%eax \n\t" // i
489 "xorl %%ebx, %%ebx \n\t" // xx
490 "xorl %%ecx, %%ecx \n\t" // 2*xalpha
491 "1: \n\t"
a6e972a2
MN
492 "movl %0, %%esi \n\t"
493 "movzbl (%%esi, %%ebx), %%edi \n\t" //src[xx]
494 "movzbl 1(%%esi, %%ebx), %%esi \n\t" //src[xx+1]
d3f41512
MN
495 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
496 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
783e9cc9 497 "shll $16, %%edi \n\t"
d3f41512
MN
498 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
499 "movl %1, %%edi \n\t"
783e9cc9 500 "shrl $9, %%esi \n\t"
d3f41512
MN
501 "movw %%si, (%%edi, %%eax, 2) \n\t"
502
503 "movzbl (%5, %%ebx), %%edi \n\t" //src[xx]
504 "movzbl 1(%5, %%ebx), %%esi \n\t" //src[xx+1]
505 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
506 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
783e9cc9 507 "shll $16, %%edi \n\t"
d3f41512
MN
508 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
509 "movl %1, %%edi \n\t"
783e9cc9 510 "shrl $9, %%esi \n\t"
d3f41512
MN
511 "movw %%si, 4096(%%edi, %%eax, 2)\n\t"
512
783e9cc9 513 "addw %4, %%cx \n\t" //2*xalpha += s_xinc&0xFF
d3f41512
MN
514 "adcl %3, %%ebx \n\t" //xx+= s_xinc>>8 + carry
515 "addl $1, %%eax \n\t"
516 "cmpl %2, %%eax \n\t"
517 " jb 1b \n\t"
518
519
783e9cc9 520 :: "m" (src1), "m" (uvbuf1), "m" (dstw), "m" (s_xinc2>>16), "m" (s_xinc2&0xFFFF),
d3f41512
MN
521 "r" (src2)
522 : "%eax", "%ebx", "%ecx", "%edi", "%esi"
523 );
783e9cc9
MN
524#ifdef HAVE_MMX2
525 } //if MMX2 cant be used
526#endif
d3f41512 527#else
783e9cc9
MN
528 for(i=0;i<dstw;i++){
529 register unsigned int xx=xpos>>16;
530 register unsigned int xalpha=(xpos&0xFFFF)>>9;
d3f41512
MN
531 uvbuf1[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
532 uvbuf1[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
31190492 533 xpos+=s_xinc2;
31190492 534 }
783e9cc9 535#endif
84adc106
MN
536 // if this is the line before the first line
537 if(s_srcypos == s_xinc - 0x8000)
538 {
539 s_srcypos= s_yinc/2 - 0x8000;
540 continue;
541 }
31190492
A
542 }
543
783e9cc9 544
31190492 545 // Note1: this code can be resticted to n*8 (or n*16) width lines to simplify optimization...
d3f41512 546 // Re: Note1: ok n*4 for now
31190492 547 // Note2: instead of using lookup tabs, mmx version could do the multiply...
d3f41512 548 // Re: Note2: yep
31190492 549 // Note3: maybe we should make separated 15/16, 24 and 32bpp version of this:
d3f41512
MN
550 // Re: done (32 & 16) and 16 has dithering :) but 16 is untested
551#ifdef HAVE_MMX
552 //FIXME write lq version with less uv ...
553 //FIXME reorder / optimize
e72c545c 554 if(dstbpp == 32)
d3f41512
MN
555 {
556 asm volatile(
557
558#define YSCALEYUV2RGB \
559 "pxor %%mm7, %%mm7 \n\t"\
560 "movd %6, %%mm6 \n\t" /*yalpha1*/\
561 "punpcklwd %%mm6, %%mm6 \n\t"\
562 "punpcklwd %%mm6, %%mm6 \n\t"\
563 "movd %7, %%mm5 \n\t" /*uvalpha1*/\
564 "punpcklwd %%mm5, %%mm5 \n\t"\
565 "punpcklwd %%mm5, %%mm5 \n\t"\
566 "xorl %%eax, %%eax \n\t"\
567 "1: \n\t"\
568 "movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
569 "movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
d3f41512
MN
570 "movq (%2, %%eax,2), %%mm2 \n\t" /* uvbuf0[eax]*/\
571 "movq (%3, %%eax,2), %%mm3 \n\t" /* uvbuf1[eax]*/\
783e9cc9 572 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
d3f41512 573 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
783e9cc9 574 "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
d3f41512 575 "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
783e9cc9
MN
576 "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>7*/\
577 "movq 4096(%2, %%eax,2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
d3f41512 578 "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>7*/\
783e9cc9
MN
579 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
580 "movq 4096(%3, %%eax,2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
d3f41512 581 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
783e9cc9
MN
582 "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
583 "psubw w10, %%mm1 \n\t" /* Y-16*/\
d3f41512 584 "psubw w80, %%mm3 \n\t" /* (U-128)*/\
783e9cc9 585 "psllw $3, %%mm1 \n\t" /* (y-16)*8*/\
d3f41512 586 "psllw $3, %%mm3 \n\t" /*(U-128)8*/\
783e9cc9
MN
587 "pmulhw yCoeff, %%mm1 \n\t"\
588\
d3f41512 589\
d3f41512 590 "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
783e9cc9
MN
591 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
592 "pmulhw ubCoeff, %%mm3 \n\t"\
d3f41512 593 "psraw $7, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>7*/\
783e9cc9 594 "pmulhw ugCoeff, %%mm2 \n\t"\
d3f41512
MN
595 "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
596 "psubw w80, %%mm0 \n\t" /* (V-128)*/\
597 "psllw $3, %%mm0 \n\t" /* (V-128)8*/\
598\
d3f41512
MN
599\
600 "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\
601 "pmulhw vrCoeff, %%mm0 \n\t"\
783e9cc9
MN
602 "pmulhw vgCoeff, %%mm4 \n\t"\
603 "paddw %%mm1, %%mm3 \n\t" /* B*/\
d3f41512 604 "paddw %%mm1, %%mm0 \n\t" /* R*/\
783e9cc9 605 "packuswb %%mm3, %%mm3 \n\t"\
d3f41512 606\
783e9cc9 607 "packuswb %%mm0, %%mm0 \n\t"\
d3f41512
MN
608 "paddw %%mm4, %%mm2 \n\t"\
609 "paddw %%mm2, %%mm1 \n\t" /* G*/\
610\
d3f41512
MN
611 "packuswb %%mm1, %%mm1 \n\t"
612
613YSCALEYUV2RGB
614 "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
615 "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
616
617 "movq %%mm3, %%mm1 \n\t"
618 "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
619 "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
620#ifdef HAVE_MMX2
621 "movntq %%mm3, (%4, %%eax, 4) \n\t"
622 "movntq %%mm1, 8(%4, %%eax, 4) \n\t"
62ac0b01 623#else
d3f41512
MN
624 "movq %%mm3, (%4, %%eax, 4) \n\t"
625 "movq %%mm1, 8(%4, %%eax, 4) \n\t"
62ac0b01 626#endif
d3f41512
MN
627 "addl $4, %%eax \n\t"
628 "cmpl %5, %%eax \n\t"
629 " jb 1b \n\t"
630
631
632 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
633 "m" (yalpha1), "m" (uvalpha1)
634 : "%eax"
635 );
636 }
d9cf0d33
MN
637 else if(dstbpp==24)
638 {
639 asm volatile(
640
641YSCALEYUV2RGB
642
643 // lsb ... msb
644 "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
645 "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
646
647 "movq %%mm3, %%mm1 \n\t"
648 "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
649 "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
650
651 "movq %%mm3, %%mm2 \n\t" // BGR0BGR0
652 "psrlq $8, %%mm3 \n\t" // GR0BGR00
653 "pand bm00000111, %%mm2 \n\t" // BGR00000
654 "pand bm11111000, %%mm3 \n\t" // 000BGR00
655 "por %%mm2, %%mm3 \n\t" // BGRBGR00
656 "movq %%mm1, %%mm2 \n\t"
657 "psllq $48, %%mm1 \n\t" // 000000BG
658 "por %%mm1, %%mm3 \n\t" // BGRBGRBG
659
660 "movq %%mm2, %%mm1 \n\t" // BGR0BGR0
661 "psrld $16, %%mm2 \n\t" // R000R000
662 "psrlq $24, %%mm1 \n\t" // 0BGR0000
663 "por %%mm2, %%mm1 \n\t" // RBGRR000
664
665 "movl %4, %%ebx \n\t"
666 "addl %%eax, %%ebx \n\t"
667#ifdef HAVE_MMX2
668 //FIXME Alignment
669 "movntq %%mm3, (%%ebx, %%eax, 2)\n\t"
670 "movntq %%mm1, 8(%%ebx, %%eax, 2)\n\t"
671#else
672 "movd %%mm3, (%%ebx, %%eax, 2) \n\t"
673 "psrlq $32, %%mm3 \n\t"
674 "movd %%mm3, 4(%%ebx, %%eax, 2) \n\t"
675 "movd %%mm1, 8(%%ebx, %%eax, 2) \n\t"
676#endif
677 "addl $4, %%eax \n\t"
678 "cmpl %5, %%eax \n\t"
679 " jb 1b \n\t"
680
681 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstw),
682 "m" (yalpha1), "m" (uvalpha1)
683 : "%eax", "%ebx"
684 );
685 }
e72c545c 686 else if(dstbpp==16)
d3f41512
MN
687 {
688 asm volatile(
689
690YSCALEYUV2RGB
783e9cc9 691#ifdef DITHER16BPP
d3f41512
MN
692 "paddusb g16Dither, %%mm1 \n\t"
693 "paddusb b16Dither, %%mm0 \n\t"
694 "paddusb b16Dither, %%mm3 \n\t"
783e9cc9 695#endif
d3f41512
MN
696 "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
697 "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
698 "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
699
700 "psrlw $3, %%mm3 \n\t"
701 "psllw $3, %%mm1 \n\t"
702 "psllw $8, %%mm0 \n\t"
703 "pand g16Mask, %%mm1 \n\t"
704 "pand r16Mask, %%mm0 \n\t"
705
706 "por %%mm3, %%mm1 \n\t"
707 "por %%mm1, %%mm0 \n\t"
708#ifdef HAVE_MMX2
709 "movntq %%mm0, (%4, %%eax, 2) \n\t"
710#else
711 "movq %%mm0, (%4, %%eax, 2) \n\t"
712#endif
713 "addl $4, %%eax \n\t"
714 "cmpl %5, %%eax \n\t"
715 " jb 1b \n\t"
716
717 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
718 "m" (yalpha1), "m" (uvalpha1)
719 : "%eax"
720 );
721 }
722#else
e72c545c 723 if(dstbpp==32 || dstbpp==24)
d3f41512
MN
724 {
725 for(i=0;i<dstw;i++){
726 // vertical linear interpolation && yuv2rgb in a single step:
727 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>16)];
728 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>16);
729 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>16);
730 dest[0]=clip_table[((Y + yuvtab_3343[U]) >>13)];
731 dest[1]=clip_table[((Y + yuvtab_0c92[V] + yuvtab_1a1e[U]) >>13)];
732 dest[2]=clip_table[((Y + yuvtab_40cf[V]) >>13)];
e72c545c 733 dest+=dstbpp>>3;
d3f41512
MN
734 }
735 }
d9cf0d33 736 else if(dstbpp==16)
d3f41512
MN
737 {
738 for(i=0;i<dstw;i++){
739 // vertical linear interpolation && yuv2rgb in a single step:
740 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>16)];
741 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>16);
742 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>16);
743
744 ((uint16_t*)dest)[0] =
745 (clip_table[((Y + yuvtab_3343[U]) >>13)]>>3) |
746 (clip_table[((Y + yuvtab_0c92[V] + yuvtab_1a1e[U]) >>13)]<<3)&0x07E0 |
747 (clip_table[((Y + yuvtab_40cf[V]) >>13)]<<8)&0xF800;
e72c545c 748 dest+=2;
d3f41512
MN
749 }
750 }
e72c545c 751 else if(dstbpp==15) //15bit FIXME how do i figure out if its 15 or 16?
d3f41512
MN
752 {
753 for(i=0;i<dstw;i++){
754 // vertical linear interpolation && yuv2rgb in a single step:
755 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>16)];
756 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>16);
757 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>16);
758
759 ((uint16_t*)dest)[0] =
760 (clip_table[((Y + yuvtab_3343[U]) >>13)]>>3) |
761 (clip_table[((Y + yuvtab_0c92[V] + yuvtab_1a1e[U]) >>13)]<<2)&0x03E0 |
762 (clip_table[((Y + yuvtab_40cf[V]) >>13)]<<7)&0x7C00;
e72c545c 763 dest+=2;
d3f41512
MN
764 }
765 }
766#endif
767
768 b16Dither= b16Dither1;
769 b16Dither1= b16Dither2;
770 b16Dither2= b16Dither;
771
772 g16Dither= g16Dither1;
773 g16Dither1= g16Dither2;
774 g16Dither2= g16Dither;
31190492
A
775 }
776
fffd2e0a
A
777#ifdef HAVE_3DNOW
778 asm volatile("femms");
779#elif defined (HAVE_MMX)
780 asm volatile("emms");
781#endif
31190492
A
782}
783
784
785void SwScale_Init(){
786 // generating tables:
787 int i;
788 for(i=0;i<256;i++){
789 clip_table[i]=0;
790 clip_table[i+256]=i;
791 clip_table[i+512]=255;
792 yuvtab_2568[i]=(0x2568*(i-16))+(256<<13);
793 yuvtab_3343[i]=0x3343*(i-128);
794 yuvtab_0c92[i]=-0x0c92*(i-128);
795 yuvtab_1a1e[i]=-0x1a1e*(i-128);
796 yuvtab_40cf[i]=0x40cf*(i-128);
797 }
798
799}