2 * GMC (Global Motion Compensation)
4 * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include "../dsputil.h"
23 #include "dsputil_altivec.h"
26 altivec-enhanced gmc1. ATM this code assume stride is a multiple of 8,
27 to preserve proper dst alignement.
29 void gmc1_altivec(UINT8
*dst
/* align 8 */, UINT8
*src
/* align1 */, int stride
, int h
, int x16
, int y16
, int rounder
)
31 POWERPC_TBL_DECLARE(altivec_gmc1_num
, h
== 8);
32 #ifdef ALTIVEC_USE_REFERENCE_C_CODE
33 const int A
=(16-x16
)*(16-y16
);
34 const int B
=( x16
)*(16-y16
);
35 const int C
=(16-x16
)*( y16
);
36 const int D
=( x16
)*( y16
);
39 POWERPC_TBL_START_COUNT(altivec_gmc1_num
, h
== 8);
43 dst
[0]= (A
*src
[0] + B
*src
[1] + C
*src
[stride
+0] + D
*src
[stride
+1] + rounder
)>>8;
44 dst
[1]= (A
*src
[1] + B
*src
[2] + C
*src
[stride
+1] + D
*src
[stride
+2] + rounder
)>>8;
45 dst
[2]= (A
*src
[2] + B
*src
[3] + C
*src
[stride
+2] + D
*src
[stride
+3] + rounder
)>>8;
46 dst
[3]= (A
*src
[3] + B
*src
[4] + C
*src
[stride
+3] + D
*src
[stride
+4] + rounder
)>>8;
47 dst
[4]= (A
*src
[4] + B
*src
[5] + C
*src
[stride
+4] + D
*src
[stride
+5] + rounder
)>>8;
48 dst
[5]= (A
*src
[5] + B
*src
[6] + C
*src
[stride
+5] + D
*src
[stride
+6] + rounder
)>>8;
49 dst
[6]= (A
*src
[6] + B
*src
[7] + C
*src
[stride
+6] + D
*src
[stride
+7] + rounder
)>>8;
50 dst
[7]= (A
*src
[7] + B
*src
[8] + C
*src
[stride
+7] + D
*src
[stride
+8] + rounder
)>>8;
55 POWERPC_TBL_STOP_COUNT(altivec_gmc1_num
, h
== 8);
57 #else /* ALTIVEC_USE_REFERENCE_C_CODE */
58 const unsigned short __attribute__ ((aligned(16))) rounder_a
[8] =
59 {rounder
, rounder
, rounder
, rounder
,
60 rounder
, rounder
, rounder
, rounder
};
61 const unsigned short __attribute__ ((aligned(16))) ABCD
[8] =
63 (16-x16
)*(16-y16
), /* A */
64 ( x16
)*(16-y16
), /* B */
65 (16-x16
)*( y16
), /* C */
66 ( x16
)*( y16
), /* D */
67 0, 0, 0, 0 /* padding */
69 register const vector
unsigned char vczero
= (const vector
unsigned char)vec_splat_u8(0);
70 register const vector
unsigned short vcsr8
= (const vector
unsigned short)vec_splat_u16(8);
71 register vector
unsigned char dstv
, dstv2
, src_0
, src_1
, srcvA
, srcvB
, srcvC
, srcvD
;
72 register vector
unsigned short Av
, Bv
, Cv
, Dv
, rounderV
, tempA
, tempB
, tempC
, tempD
;
74 unsigned long dst_odd
= (unsigned long)dst
& 0x0000000F;
75 unsigned long src_really_odd
= (unsigned long)src
& 0x0000000F;
78 POWERPC_TBL_START_COUNT(altivec_gmc1_num
, h
== 8);
80 tempA
= vec_ld(0, (unsigned short*)ABCD
);
81 Av
= vec_splat(tempA
, 0);
82 Bv
= vec_splat(tempA
, 1);
83 Cv
= vec_splat(tempA
, 2);
84 Dv
= vec_splat(tempA
, 3);
86 rounderV
= vec_ld(0, (unsigned short*)rounder_a
);
88 // we'll be able to pick-up our 9 char elements
89 // at src from those 32 bytes
90 // we load the first batch here, as inside the loop
91 // we can re-use 'src+stride' from one iteration
92 // as the 'src' of the next.
93 src_0
= vec_ld(0, src
);
94 src_1
= vec_ld(16, src
);
95 srcvA
= vec_perm(src_0
, src_1
, vec_lvsl(0, src
));
97 if (src_really_odd
!= 0x0000000F)
98 { // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector.
99 srcvB
= vec_perm(src_0
, src_1
, vec_lvsl(1, src
));
105 srcvA
= vec_mergeh(vczero
, srcvA
);
106 srcvB
= vec_mergeh(vczero
, srcvB
);
110 dst_odd
= (unsigned long)dst
& 0x0000000F;
111 src_really_odd
= (((unsigned long)src
) + stride
) & 0x0000000F;
113 dstv
= vec_ld(0, dst
);
115 // we we'll be able to pick-up our 9 char elements
116 // at src + stride from those 32 bytes
117 // then reuse the resulting 2 vectors srvcC and srcvD
118 // as the next srcvA and srcvB
119 src_0
= vec_ld(stride
+ 0, src
);
120 src_1
= vec_ld(stride
+ 16, src
);
121 srcvC
= vec_perm(src_0
, src_1
, vec_lvsl(stride
+ 0, src
));
123 if (src_really_odd
!= 0x0000000F)
124 { // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector.
125 srcvD
= vec_perm(src_0
, src_1
, vec_lvsl(stride
+ 1, src
));
132 srcvC
= vec_mergeh(vczero
, srcvC
);
133 srcvD
= vec_mergeh(vczero
, srcvD
);
136 // OK, now we (finally) do the math :-)
137 // those four instructions replaces 32 int muls & 32 int adds.
138 // isn't AltiVec nice ?
139 tempA
= vec_mladd((vector
unsigned short)srcvA
, Av
, rounderV
);
140 tempB
= vec_mladd((vector
unsigned short)srcvB
, Bv
, tempA
);
141 tempC
= vec_mladd((vector
unsigned short)srcvC
, Cv
, tempB
);
142 tempD
= vec_mladd((vector
unsigned short)srcvD
, Dv
, tempC
);
147 tempD
= vec_sr(tempD
, vcsr8
);
149 dstv2
= vec_pack(tempD
, (vector
unsigned short)vczero
);
153 dstv2
= vec_perm(dstv
, dstv2
, vcprm(0,1,s0
,s1
));
157 dstv2
= vec_perm(dstv
, dstv2
, vcprm(s0
,s1
,2,3));
160 vec_st(dstv2
, 0, dst
);
166 POWERPC_TBL_STOP_COUNT(altivec_gmc1_num
, h
== 8);
168 #endif /* ALTIVEC_USE_REFERENCE_C_CODE */