interlaced motion estimation
[libav.git] / libavcodec / alpha / motion_est_alpha.c
1 /*
2 * Alpha optimized DSP utils
3 * Copyright (c) 2002 Falk Hueffner <falk@debian.org>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20 #include "asm.h"
21 #include "../dsputil.h"
22
23 void get_pixels_mvi(DCTELEM *restrict block,
24 const uint8_t *restrict pixels, int line_size)
25 {
26 int h = 8;
27
28 do {
29 uint64_t p;
30
31 p = ldq(pixels);
32 stq(unpkbw(p), block);
33 stq(unpkbw(p >> 32), block + 4);
34
35 pixels += line_size;
36 block += 8;
37 } while (--h);
38 }
39
40 void diff_pixels_mvi(DCTELEM *block, const uint8_t *s1, const uint8_t *s2,
41 int stride) {
42 int h = 8;
43 uint64_t mask = 0x4040;
44
45 mask |= mask << 16;
46 mask |= mask << 32;
47 do {
48 uint64_t x, y, c, d, a;
49 uint64_t signs;
50
51 x = ldq(s1);
52 y = ldq(s2);
53 c = cmpbge(x, y);
54 d = x - y;
55 a = zap(mask, c); /* We use 0x4040404040404040 here... */
56 d += 4 * a; /* ...so we can use s4addq here. */
57 signs = zap(-1, c);
58
59 stq(unpkbw(d) | (unpkbw(signs) << 8), block);
60 stq(unpkbw(d >> 32) | (unpkbw(signs >> 32) << 8), block + 4);
61
62 s1 += stride;
63 s2 += stride;
64 block += 8;
65 } while (--h);
66 }
67
68 static inline uint64_t avg2(uint64_t a, uint64_t b)
69 {
70 return (a | b) - (((a ^ b) & BYTE_VEC(0xfe)) >> 1);
71 }
72
73 static inline uint64_t avg4(uint64_t l1, uint64_t l2, uint64_t l3, uint64_t l4)
74 {
75 uint64_t r1 = ((l1 & ~BYTE_VEC(0x03)) >> 2)
76 + ((l2 & ~BYTE_VEC(0x03)) >> 2)
77 + ((l3 & ~BYTE_VEC(0x03)) >> 2)
78 + ((l4 & ~BYTE_VEC(0x03)) >> 2);
79 uint64_t r2 = (( (l1 & BYTE_VEC(0x03))
80 + (l2 & BYTE_VEC(0x03))
81 + (l3 & BYTE_VEC(0x03))
82 + (l4 & BYTE_VEC(0x03))
83 + BYTE_VEC(0x02)) >> 2) & BYTE_VEC(0x03);
84 return r1 + r2;
85 }
86
87 int pix_abs8x8_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
88 {
89 int result = 0;
90
91 if ((size_t) pix2 & 0x7) {
92 /* works only when pix2 is actually unaligned */
93 do { /* do 8 pixel a time */
94 uint64_t p1, p2;
95
96 p1 = ldq(pix1);
97 p2 = uldq(pix2);
98 result += perr(p1, p2);
99
100 pix1 += line_size;
101 pix2 += line_size;
102 } while (--h);
103 } else {
104 do {
105 uint64_t p1, p2;
106
107 p1 = ldq(pix1);
108 p2 = ldq(pix2);
109 result += perr(p1, p2);
110
111 pix1 += line_size;
112 pix2 += line_size;
113 } while (--h);
114 }
115
116 return result;
117 }
118
119 #if 0 /* now done in assembly */
120 int pix_abs16x16_mvi(uint8_t *pix1, uint8_t *pix2, int line_size)
121 {
122 int result = 0;
123 int h = 16;
124
125 if ((size_t) pix2 & 0x7) {
126 /* works only when pix2 is actually unaligned */
127 do { /* do 16 pixel a time */
128 uint64_t p1_l, p1_r, p2_l, p2_r;
129 uint64_t t;
130
131 p1_l = ldq(pix1);
132 p1_r = ldq(pix1 + 8);
133 t = ldq_u(pix2 + 8);
134 p2_l = extql(ldq_u(pix2), pix2) | extqh(t, pix2);
135 p2_r = extql(t, pix2) | extqh(ldq_u(pix2 + 16), pix2);
136 pix1 += line_size;
137 pix2 += line_size;
138
139 result += perr(p1_l, p2_l)
140 + perr(p1_r, p2_r);
141 } while (--h);
142 } else {
143 do {
144 uint64_t p1_l, p1_r, p2_l, p2_r;
145
146 p1_l = ldq(pix1);
147 p1_r = ldq(pix1 + 8);
148 p2_l = ldq(pix2);
149 p2_r = ldq(pix2 + 8);
150 pix1 += line_size;
151 pix2 += line_size;
152
153 result += perr(p1_l, p2_l)
154 + perr(p1_r, p2_r);
155 } while (--h);
156 }
157
158 return result;
159 }
160 #endif
161
162 int pix_abs16x16_x2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
163 {
164 int result = 0;
165 uint64_t disalign = (size_t) pix2 & 0x7;
166
167 switch (disalign) {
168 case 0:
169 do {
170 uint64_t p1_l, p1_r, p2_l, p2_r;
171 uint64_t l, r;
172
173 p1_l = ldq(pix1);
174 p1_r = ldq(pix1 + 8);
175 l = ldq(pix2);
176 r = ldq(pix2 + 8);
177 p2_l = avg2(l, (l >> 8) | ((uint64_t) r << 56));
178 p2_r = avg2(r, (r >> 8) | ((uint64_t) pix2[16] << 56));
179 pix1 += line_size;
180 pix2 += line_size;
181
182 result += perr(p1_l, p2_l)
183 + perr(p1_r, p2_r);
184 } while (--h);
185 break;
186 case 7:
187 /* |.......l|lllllllr|rrrrrrr*|
188 This case is special because disalign1 would be 8, which
189 gets treated as 0 by extqh. At least it is a bit faster
190 that way :) */
191 do {
192 uint64_t p1_l, p1_r, p2_l, p2_r;
193 uint64_t l, m, r;
194
195 p1_l = ldq(pix1);
196 p1_r = ldq(pix1 + 8);
197 l = ldq_u(pix2);
198 m = ldq_u(pix2 + 8);
199 r = ldq_u(pix2 + 16);
200 p2_l = avg2(extql(l, disalign) | extqh(m, disalign), m);
201 p2_r = avg2(extql(m, disalign) | extqh(r, disalign), r);
202 pix1 += line_size;
203 pix2 += line_size;
204
205 result += perr(p1_l, p2_l)
206 + perr(p1_r, p2_r);
207 } while (--h);
208 break;
209 default:
210 do {
211 uint64_t disalign1 = disalign + 1;
212 uint64_t p1_l, p1_r, p2_l, p2_r;
213 uint64_t l, m, r;
214
215 p1_l = ldq(pix1);
216 p1_r = ldq(pix1 + 8);
217 l = ldq_u(pix2);
218 m = ldq_u(pix2 + 8);
219 r = ldq_u(pix2 + 16);
220 p2_l = avg2(extql(l, disalign) | extqh(m, disalign),
221 extql(l, disalign1) | extqh(m, disalign1));
222 p2_r = avg2(extql(m, disalign) | extqh(r, disalign),
223 extql(m, disalign1) | extqh(r, disalign1));
224 pix1 += line_size;
225 pix2 += line_size;
226
227 result += perr(p1_l, p2_l)
228 + perr(p1_r, p2_r);
229 } while (--h);
230 break;
231 }
232 return result;
233 }
234
235 int pix_abs16x16_y2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
236 {
237 int result = 0;
238
239 if ((size_t) pix2 & 0x7) {
240 uint64_t t, p2_l, p2_r;
241 t = ldq_u(pix2 + 8);
242 p2_l = extql(ldq_u(pix2), pix2) | extqh(t, pix2);
243 p2_r = extql(t, pix2) | extqh(ldq_u(pix2 + 16), pix2);
244
245 do {
246 uint64_t p1_l, p1_r, np2_l, np2_r;
247 uint64_t t;
248
249 p1_l = ldq(pix1);
250 p1_r = ldq(pix1 + 8);
251 pix2 += line_size;
252 t = ldq_u(pix2 + 8);
253 np2_l = extql(ldq_u(pix2), pix2) | extqh(t, pix2);
254 np2_r = extql(t, pix2) | extqh(ldq_u(pix2 + 16), pix2);
255
256 result += perr(p1_l, avg2(p2_l, np2_l))
257 + perr(p1_r, avg2(p2_r, np2_r));
258
259 pix1 += line_size;
260 p2_l = np2_l;
261 p2_r = np2_r;
262
263 } while (--h);
264 } else {
265 uint64_t p2_l, p2_r;
266 p2_l = ldq(pix2);
267 p2_r = ldq(pix2 + 8);
268 do {
269 uint64_t p1_l, p1_r, np2_l, np2_r;
270
271 p1_l = ldq(pix1);
272 p1_r = ldq(pix1 + 8);
273 pix2 += line_size;
274 np2_l = ldq(pix2);
275 np2_r = ldq(pix2 + 8);
276
277 result += perr(p1_l, avg2(p2_l, np2_l))
278 + perr(p1_r, avg2(p2_r, np2_r));
279
280 pix1 += line_size;
281 p2_l = np2_l;
282 p2_r = np2_r;
283 } while (--h);
284 }
285 return result;
286 }
287
288 int pix_abs16x16_xy2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
289 {
290 int result = 0;
291
292 uint64_t p1_l, p1_r;
293 uint64_t p2_l, p2_r, p2_x;
294
295 p1_l = ldq(pix1);
296 p1_r = ldq(pix1 + 8);
297
298 if ((size_t) pix2 & 0x7) { /* could be optimized a lot */
299 p2_l = uldq(pix2);
300 p2_r = uldq(pix2 + 8);
301 p2_x = (uint64_t) pix2[16] << 56;
302 } else {
303 p2_l = ldq(pix2);
304 p2_r = ldq(pix2 + 8);
305 p2_x = ldq(pix2 + 16) << 56;
306 }
307
308 do {
309 uint64_t np1_l, np1_r;
310 uint64_t np2_l, np2_r, np2_x;
311
312 pix1 += line_size;
313 pix2 += line_size;
314
315 np1_l = ldq(pix1);
316 np1_r = ldq(pix1 + 8);
317
318 if ((size_t) pix2 & 0x7) { /* could be optimized a lot */
319 np2_l = uldq(pix2);
320 np2_r = uldq(pix2 + 8);
321 np2_x = (uint64_t) pix2[16] << 56;
322 } else {
323 np2_l = ldq(pix2);
324 np2_r = ldq(pix2 + 8);
325 np2_x = ldq(pix2 + 16) << 56;
326 }
327
328 result += perr(p1_l,
329 avg4( p2_l, ( p2_l >> 8) | ((uint64_t) p2_r << 56),
330 np2_l, (np2_l >> 8) | ((uint64_t) np2_r << 56)))
331 + perr(p1_r,
332 avg4( p2_r, ( p2_r >> 8) | ((uint64_t) p2_x),
333 np2_r, (np2_r >> 8) | ((uint64_t) np2_x)));
334
335 p1_l = np1_l;
336 p1_r = np1_r;
337 p2_l = np2_l;
338 p2_r = np2_r;
339 p2_x = np2_x;
340 } while (--h);
341
342 return result;
343 }