1e1d2e22d8be819225ace6eb7743849cc9c337ec
[libav.git] / libavcodec / x86 / snowdsp_mmx.c
1 /*
2 * MMX and SSE2 optimized snow DSP utils
3 * Copyright (c) 2005-2006 Robert Edele <yartrebo@earthlink.net>
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include "libavutil/x86_cpu.h"
23 #include "libavcodec/avcodec.h"
24 #include "libavcodec/snow.h"
25 #include "dsputil_mmx.h"
26
27 void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, int width){
28 const int w2= (width+1)>>1;
29 DECLARE_ALIGNED(16, IDWTELEM, temp)[width>>1];
30 const int w_l= (width>>1);
31 const int w_r= w2 - 1;
32 int i;
33
34 { // Lift 0
35 IDWTELEM * const ref = b + w2 - 1;
36 IDWTELEM b_0 = b[0]; //By allowing the first entry in b[0] to be calculated twice
37 // (the first time erroneously), we allow the SSE2 code to run an extra pass.
38 // The savings in code and time are well worth having to store this value and
39 // calculate b[0] correctly afterwards.
40
41 i = 0;
42 __asm__ volatile(
43 "pcmpeqd %%xmm7, %%xmm7 \n\t"
44 "pcmpeqd %%xmm3, %%xmm3 \n\t"
45 "psllw $1, %%xmm3 \n\t"
46 "paddw %%xmm7, %%xmm3 \n\t"
47 "psllw $13, %%xmm3 \n\t"
48 ::);
49 for(; i<w_l-15; i+=16){
50 __asm__ volatile(
51 "movdqu (%1), %%xmm1 \n\t"
52 "movdqu 16(%1), %%xmm5 \n\t"
53 "movdqu 2(%1), %%xmm2 \n\t"
54 "movdqu 18(%1), %%xmm6 \n\t"
55 "paddw %%xmm1, %%xmm2 \n\t"
56 "paddw %%xmm5, %%xmm6 \n\t"
57 "paddw %%xmm7, %%xmm2 \n\t"
58 "paddw %%xmm7, %%xmm6 \n\t"
59 "pmulhw %%xmm3, %%xmm2 \n\t"
60 "pmulhw %%xmm3, %%xmm6 \n\t"
61 "paddw (%0), %%xmm2 \n\t"
62 "paddw 16(%0), %%xmm6 \n\t"
63 "movdqa %%xmm2, (%0) \n\t"
64 "movdqa %%xmm6, 16(%0) \n\t"
65 :: "r"(&b[i]), "r"(&ref[i])
66 : "memory"
67 );
68 }
69 snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS);
70 b[0] = b_0 - ((W_DM * 2 * ref[1]+W_DO)>>W_DS);
71 }
72
73 { // Lift 1
74 IDWTELEM * const dst = b+w2;
75
76 i = 0;
77 for(; (((x86_reg)&dst[i]) & 0x1F) && i<w_r; i++){
78 dst[i] = dst[i] - (b[i] + b[i + 1]);
79 }
80 for(; i<w_r-15; i+=16){
81 __asm__ volatile(
82 "movdqu (%1), %%xmm1 \n\t"
83 "movdqu 16(%1), %%xmm5 \n\t"
84 "movdqu 2(%1), %%xmm2 \n\t"
85 "movdqu 18(%1), %%xmm6 \n\t"
86 "paddw %%xmm1, %%xmm2 \n\t"
87 "paddw %%xmm5, %%xmm6 \n\t"
88 "movdqa (%0), %%xmm0 \n\t"
89 "movdqa 16(%0), %%xmm4 \n\t"
90 "psubw %%xmm2, %%xmm0 \n\t"
91 "psubw %%xmm6, %%xmm4 \n\t"
92 "movdqa %%xmm0, (%0) \n\t"
93 "movdqa %%xmm4, 16(%0) \n\t"
94 :: "r"(&dst[i]), "r"(&b[i])
95 : "memory"
96 );
97 }
98 snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS);
99 }
100
101 { // Lift 2
102 IDWTELEM * const ref = b+w2 - 1;
103 IDWTELEM b_0 = b[0];
104
105 i = 0;
106 __asm__ volatile(
107 "psllw $15, %%xmm7 \n\t"
108 "pcmpeqw %%xmm6, %%xmm6 \n\t"
109 "psrlw $13, %%xmm6 \n\t"
110 "paddw %%xmm7, %%xmm6 \n\t"
111 ::);
112 for(; i<w_l-15; i+=16){
113 __asm__ volatile(
114 "movdqu (%1), %%xmm0 \n\t"
115 "movdqu 16(%1), %%xmm4 \n\t"
116 "movdqu 2(%1), %%xmm1 \n\t"
117 "movdqu 18(%1), %%xmm5 \n\t" //FIXME try aligned reads and shifts
118 "paddw %%xmm6, %%xmm0 \n\t"
119 "paddw %%xmm6, %%xmm4 \n\t"
120 "paddw %%xmm7, %%xmm1 \n\t"
121 "paddw %%xmm7, %%xmm5 \n\t"
122 "pavgw %%xmm1, %%xmm0 \n\t"
123 "pavgw %%xmm5, %%xmm4 \n\t"
124 "psubw %%xmm7, %%xmm0 \n\t"
125 "psubw %%xmm7, %%xmm4 \n\t"
126 "psraw $1, %%xmm0 \n\t"
127 "psraw $1, %%xmm4 \n\t"
128 "movdqa (%0), %%xmm1 \n\t"
129 "movdqa 16(%0), %%xmm5 \n\t"
130 "paddw %%xmm1, %%xmm0 \n\t"
131 "paddw %%xmm5, %%xmm4 \n\t"
132 "psraw $2, %%xmm0 \n\t"
133 "psraw $2, %%xmm4 \n\t"
134 "paddw %%xmm1, %%xmm0 \n\t"
135 "paddw %%xmm5, %%xmm4 \n\t"
136 "movdqa %%xmm0, (%0) \n\t"
137 "movdqa %%xmm4, 16(%0) \n\t"
138 :: "r"(&b[i]), "r"(&ref[i])
139 : "memory"
140 );
141 }
142 snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l);
143 b[0] = b_0 + ((2 * ref[1] + W_BO-1 + 4 * b_0) >> W_BS);
144 }
145
146 { // Lift 3
147 IDWTELEM * const src = b+w2;
148
149 i = 0;
150 for(; (((x86_reg)&temp[i]) & 0x1F) && i<w_r; i++){
151 temp[i] = src[i] - ((-W_AM*(b[i] + b[i+1]))>>W_AS);
152 }
153 for(; i<w_r-7; i+=8){
154 __asm__ volatile(
155 "movdqu 2(%1), %%xmm2 \n\t"
156 "movdqu 18(%1), %%xmm6 \n\t"
157 "paddw (%1), %%xmm2 \n\t"
158 "paddw 16(%1), %%xmm6 \n\t"
159 "movdqu (%0), %%xmm0 \n\t"
160 "movdqu 16(%0), %%xmm4 \n\t"
161 "paddw %%xmm2, %%xmm0 \n\t"
162 "paddw %%xmm6, %%xmm4 \n\t"
163 "psraw $1, %%xmm2 \n\t"
164 "psraw $1, %%xmm6 \n\t"
165 "paddw %%xmm0, %%xmm2 \n\t"
166 "paddw %%xmm4, %%xmm6 \n\t"
167 "movdqa %%xmm2, (%2) \n\t"
168 "movdqa %%xmm6, 16(%2) \n\t"
169 :: "r"(&src[i]), "r"(&b[i]), "r"(&temp[i])
170 : "memory"
171 );
172 }
173 snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO+1, W_AS);
174 }
175
176 {
177 snow_interleave_line_header(&i, width, b, temp);
178
179 for (; (i & 0x3E) != 0x3E; i-=2){
180 b[i+1] = temp[i>>1];
181 b[i] = b[i>>1];
182 }
183 for (i-=62; i>=0; i-=64){
184 __asm__ volatile(
185 "movdqa (%1), %%xmm0 \n\t"
186 "movdqa 16(%1), %%xmm2 \n\t"
187 "movdqa 32(%1), %%xmm4 \n\t"
188 "movdqa 48(%1), %%xmm6 \n\t"
189 "movdqa (%1), %%xmm1 \n\t"
190 "movdqa 16(%1), %%xmm3 \n\t"
191 "movdqa 32(%1), %%xmm5 \n\t"
192 "movdqa 48(%1), %%xmm7 \n\t"
193 "punpcklwd (%2), %%xmm0 \n\t"
194 "punpcklwd 16(%2), %%xmm2 \n\t"
195 "punpcklwd 32(%2), %%xmm4 \n\t"
196 "punpcklwd 48(%2), %%xmm6 \n\t"
197 "movdqa %%xmm0, (%0) \n\t"
198 "movdqa %%xmm2, 32(%0) \n\t"
199 "movdqa %%xmm4, 64(%0) \n\t"
200 "movdqa %%xmm6, 96(%0) \n\t"
201 "punpckhwd (%2), %%xmm1 \n\t"
202 "punpckhwd 16(%2), %%xmm3 \n\t"
203 "punpckhwd 32(%2), %%xmm5 \n\t"
204 "punpckhwd 48(%2), %%xmm7 \n\t"
205 "movdqa %%xmm1, 16(%0) \n\t"
206 "movdqa %%xmm3, 48(%0) \n\t"
207 "movdqa %%xmm5, 80(%0) \n\t"
208 "movdqa %%xmm7, 112(%0) \n\t"
209 :: "r"(&(b)[i]), "r"(&(b)[i>>1]), "r"(&(temp)[i>>1])
210 : "memory"
211 );
212 }
213 }
214 }
215
216 void ff_snow_horizontal_compose97i_mmx(IDWTELEM *b, int width){
217 const int w2= (width+1)>>1;
218 IDWTELEM temp[width >> 1];
219 const int w_l= (width>>1);
220 const int w_r= w2 - 1;
221 int i;
222
223 { // Lift 0
224 IDWTELEM * const ref = b + w2 - 1;
225
226 i = 1;
227 b[0] = b[0] - ((W_DM * 2 * ref[1]+W_DO)>>W_DS);
228 __asm__ volatile(
229 "pcmpeqw %%mm7, %%mm7 \n\t"
230 "pcmpeqw %%mm3, %%mm3 \n\t"
231 "psllw $1, %%mm3 \n\t"
232 "paddw %%mm7, %%mm3 \n\t"
233 "psllw $13, %%mm3 \n\t"
234 ::);
235 for(; i<w_l-7; i+=8){
236 __asm__ volatile(
237 "movq (%1), %%mm2 \n\t"
238 "movq 8(%1), %%mm6 \n\t"
239 "paddw 2(%1), %%mm2 \n\t"
240 "paddw 10(%1), %%mm6 \n\t"
241 "paddw %%mm7, %%mm2 \n\t"
242 "paddw %%mm7, %%mm6 \n\t"
243 "pmulhw %%mm3, %%mm2 \n\t"
244 "pmulhw %%mm3, %%mm6 \n\t"
245 "paddw (%0), %%mm2 \n\t"
246 "paddw 8(%0), %%mm6 \n\t"
247 "movq %%mm2, (%0) \n\t"
248 "movq %%mm6, 8(%0) \n\t"
249 :: "r"(&b[i]), "r"(&ref[i])
250 : "memory"
251 );
252 }
253 snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS);
254 }
255
256 { // Lift 1
257 IDWTELEM * const dst = b+w2;
258
259 i = 0;
260 for(; i<w_r-7; i+=8){
261 __asm__ volatile(
262 "movq (%1), %%mm2 \n\t"
263 "movq 8(%1), %%mm6 \n\t"
264 "paddw 2(%1), %%mm2 \n\t"
265 "paddw 10(%1), %%mm6 \n\t"
266 "movq (%0), %%mm0 \n\t"
267 "movq 8(%0), %%mm4 \n\t"
268 "psubw %%mm2, %%mm0 \n\t"
269 "psubw %%mm6, %%mm4 \n\t"
270 "movq %%mm0, (%0) \n\t"
271 "movq %%mm4, 8(%0) \n\t"
272 :: "r"(&dst[i]), "r"(&b[i])
273 : "memory"
274 );
275 }
276 snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS);
277 }
278
279 { // Lift 2
280 IDWTELEM * const ref = b+w2 - 1;
281
282 i = 1;
283 b[0] = b[0] + (((2 * ref[1] + W_BO) + 4 * b[0]) >> W_BS);
284 __asm__ volatile(
285 "psllw $15, %%mm7 \n\t"
286 "pcmpeqw %%mm6, %%mm6 \n\t"
287 "psrlw $13, %%mm6 \n\t"
288 "paddw %%mm7, %%mm6 \n\t"
289 ::);
290 for(; i<w_l-7; i+=8){
291 __asm__ volatile(
292 "movq (%1), %%mm0 \n\t"
293 "movq 8(%1), %%mm4 \n\t"
294 "movq 2(%1), %%mm1 \n\t"
295 "movq 10(%1), %%mm5 \n\t"
296 "paddw %%mm6, %%mm0 \n\t"
297 "paddw %%mm6, %%mm4 \n\t"
298 "paddw %%mm7, %%mm1 \n\t"
299 "paddw %%mm7, %%mm5 \n\t"
300 "pavgw %%mm1, %%mm0 \n\t"
301 "pavgw %%mm5, %%mm4 \n\t"
302 "psubw %%mm7, %%mm0 \n\t"
303 "psubw %%mm7, %%mm4 \n\t"
304 "psraw $1, %%mm0 \n\t"
305 "psraw $1, %%mm4 \n\t"
306 "movq (%0), %%mm1 \n\t"
307 "movq 8(%0), %%mm5 \n\t"
308 "paddw %%mm1, %%mm0 \n\t"
309 "paddw %%mm5, %%mm4 \n\t"
310 "psraw $2, %%mm0 \n\t"
311 "psraw $2, %%mm4 \n\t"
312 "paddw %%mm1, %%mm0 \n\t"
313 "paddw %%mm5, %%mm4 \n\t"
314 "movq %%mm0, (%0) \n\t"
315 "movq %%mm4, 8(%0) \n\t"
316 :: "r"(&b[i]), "r"(&ref[i])
317 : "memory"
318 );
319 }
320 snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l);
321 }
322
323 { // Lift 3
324 IDWTELEM * const src = b+w2;
325 i = 0;
326
327 for(; i<w_r-7; i+=8){
328 __asm__ volatile(
329 "movq 2(%1), %%mm2 \n\t"
330 "movq 10(%1), %%mm6 \n\t"
331 "paddw (%1), %%mm2 \n\t"
332 "paddw 8(%1), %%mm6 \n\t"
333 "movq (%0), %%mm0 \n\t"
334 "movq 8(%0), %%mm4 \n\t"
335 "paddw %%mm2, %%mm0 \n\t"
336 "paddw %%mm6, %%mm4 \n\t"
337 "psraw $1, %%mm2 \n\t"
338 "psraw $1, %%mm6 \n\t"
339 "paddw %%mm0, %%mm2 \n\t"
340 "paddw %%mm4, %%mm6 \n\t"
341 "movq %%mm2, (%2) \n\t"
342 "movq %%mm6, 8(%2) \n\t"
343 :: "r"(&src[i]), "r"(&b[i]), "r"(&temp[i])
344 : "memory"
345 );
346 }
347 snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO+1, W_AS);
348 }
349
350 {
351 snow_interleave_line_header(&i, width, b, temp);
352
353 for (; (i & 0x1E) != 0x1E; i-=2){
354 b[i+1] = temp[i>>1];
355 b[i] = b[i>>1];
356 }
357 for (i-=30; i>=0; i-=32){
358 __asm__ volatile(
359 "movq (%1), %%mm0 \n\t"
360 "movq 8(%1), %%mm2 \n\t"
361 "movq 16(%1), %%mm4 \n\t"
362 "movq 24(%1), %%mm6 \n\t"
363 "movq (%1), %%mm1 \n\t"
364 "movq 8(%1), %%mm3 \n\t"
365 "movq 16(%1), %%mm5 \n\t"
366 "movq 24(%1), %%mm7 \n\t"
367 "punpcklwd (%2), %%mm0 \n\t"
368 "punpcklwd 8(%2), %%mm2 \n\t"
369 "punpcklwd 16(%2), %%mm4 \n\t"
370 "punpcklwd 24(%2), %%mm6 \n\t"
371 "movq %%mm0, (%0) \n\t"
372 "movq %%mm2, 16(%0) \n\t"
373 "movq %%mm4, 32(%0) \n\t"
374 "movq %%mm6, 48(%0) \n\t"
375 "punpckhwd (%2), %%mm1 \n\t"
376 "punpckhwd 8(%2), %%mm3 \n\t"
377 "punpckhwd 16(%2), %%mm5 \n\t"
378 "punpckhwd 24(%2), %%mm7 \n\t"
379 "movq %%mm1, 8(%0) \n\t"
380 "movq %%mm3, 24(%0) \n\t"
381 "movq %%mm5, 40(%0) \n\t"
382 "movq %%mm7, 56(%0) \n\t"
383 :: "r"(&b[i]), "r"(&b[i>>1]), "r"(&temp[i>>1])
384 : "memory"
385 );
386 }
387 }
388 }
389
390 #if HAVE_7REGS
391 #define snow_vertical_compose_sse2_load_add(op,r,t0,t1,t2,t3)\
392 ""op" ("r",%%"REG_d"), %%"t0" \n\t"\
393 ""op" 16("r",%%"REG_d"), %%"t1" \n\t"\
394 ""op" 32("r",%%"REG_d"), %%"t2" \n\t"\
395 ""op" 48("r",%%"REG_d"), %%"t3" \n\t"
396
397 #define snow_vertical_compose_sse2_load(r,t0,t1,t2,t3)\
398 snow_vertical_compose_sse2_load_add("movdqa",r,t0,t1,t2,t3)
399
400 #define snow_vertical_compose_sse2_add(r,t0,t1,t2,t3)\
401 snow_vertical_compose_sse2_load_add("paddw",r,t0,t1,t2,t3)
402
403 #define snow_vertical_compose_r2r_sub(s0,s1,s2,s3,t0,t1,t2,t3)\
404 "psubw %%"s0", %%"t0" \n\t"\
405 "psubw %%"s1", %%"t1" \n\t"\
406 "psubw %%"s2", %%"t2" \n\t"\
407 "psubw %%"s3", %%"t3" \n\t"
408
409 #define snow_vertical_compose_sse2_store(w,s0,s1,s2,s3)\
410 "movdqa %%"s0", ("w",%%"REG_d") \n\t"\
411 "movdqa %%"s1", 16("w",%%"REG_d") \n\t"\
412 "movdqa %%"s2", 32("w",%%"REG_d") \n\t"\
413 "movdqa %%"s3", 48("w",%%"REG_d") \n\t"
414
415 #define snow_vertical_compose_sra(n,t0,t1,t2,t3)\
416 "psraw $"n", %%"t0" \n\t"\
417 "psraw $"n", %%"t1" \n\t"\
418 "psraw $"n", %%"t2" \n\t"\
419 "psraw $"n", %%"t3" \n\t"
420
421 #define snow_vertical_compose_r2r_add(s0,s1,s2,s3,t0,t1,t2,t3)\
422 "paddw %%"s0", %%"t0" \n\t"\
423 "paddw %%"s1", %%"t1" \n\t"\
424 "paddw %%"s2", %%"t2" \n\t"\
425 "paddw %%"s3", %%"t3" \n\t"
426
427 #define snow_vertical_compose_r2r_pmulhw(s0,s1,s2,s3,t0,t1,t2,t3)\
428 "pmulhw %%"s0", %%"t0" \n\t"\
429 "pmulhw %%"s1", %%"t1" \n\t"\
430 "pmulhw %%"s2", %%"t2" \n\t"\
431 "pmulhw %%"s3", %%"t3" \n\t"
432
433 #define snow_vertical_compose_sse2_move(s0,s1,s2,s3,t0,t1,t2,t3)\
434 "movdqa %%"s0", %%"t0" \n\t"\
435 "movdqa %%"s1", %%"t1" \n\t"\
436 "movdqa %%"s2", %%"t2" \n\t"\
437 "movdqa %%"s3", %%"t3" \n\t"
438
439 void ff_snow_vertical_compose97i_sse2(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width){
440 x86_reg i = width;
441
442 while(i & 0x1F)
443 {
444 i--;
445 b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS;
446 b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS;
447 b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS;
448 b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS;
449 }
450 i+=i;
451
452 __asm__ volatile (
453 "jmp 2f \n\t"
454 "1: \n\t"
455 snow_vertical_compose_sse2_load("%4","xmm0","xmm2","xmm4","xmm6")
456 snow_vertical_compose_sse2_add("%6","xmm0","xmm2","xmm4","xmm6")
457
458
459 "pcmpeqw %%xmm0, %%xmm0 \n\t"
460 "pcmpeqw %%xmm2, %%xmm2 \n\t"
461 "paddw %%xmm2, %%xmm2 \n\t"
462 "paddw %%xmm0, %%xmm2 \n\t"
463 "psllw $13, %%xmm2 \n\t"
464 snow_vertical_compose_r2r_add("xmm0","xmm0","xmm0","xmm0","xmm1","xmm3","xmm5","xmm7")
465 snow_vertical_compose_r2r_pmulhw("xmm2","xmm2","xmm2","xmm2","xmm1","xmm3","xmm5","xmm7")
466 snow_vertical_compose_sse2_add("%5","xmm1","xmm3","xmm5","xmm7")
467 snow_vertical_compose_sse2_store("%5","xmm1","xmm3","xmm5","xmm7")
468 snow_vertical_compose_sse2_load("%4","xmm0","xmm2","xmm4","xmm6")
469 snow_vertical_compose_sse2_add("%3","xmm1","xmm3","xmm5","xmm7")
470 snow_vertical_compose_r2r_sub("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6")
471 snow_vertical_compose_sse2_store("%4","xmm0","xmm2","xmm4","xmm6")
472
473 "pcmpeqw %%xmm7, %%xmm7 \n\t"
474 "pcmpeqw %%xmm5, %%xmm5 \n\t"
475 "psllw $15, %%xmm7 \n\t"
476 "psrlw $13, %%xmm5 \n\t"
477 "paddw %%xmm7, %%xmm5 \n\t"
478 snow_vertical_compose_r2r_add("xmm5","xmm5","xmm5","xmm5","xmm0","xmm2","xmm4","xmm6")
479 "movq (%2,%%"REG_d"), %%xmm1 \n\t"
480 "movq 8(%2,%%"REG_d"), %%xmm3 \n\t"
481 "paddw %%xmm7, %%xmm1 \n\t"
482 "paddw %%xmm7, %%xmm3 \n\t"
483 "pavgw %%xmm1, %%xmm0 \n\t"
484 "pavgw %%xmm3, %%xmm2 \n\t"
485 "movq 16(%2,%%"REG_d"), %%xmm1 \n\t"
486 "movq 24(%2,%%"REG_d"), %%xmm3 \n\t"
487 "paddw %%xmm7, %%xmm1 \n\t"
488 "paddw %%xmm7, %%xmm3 \n\t"
489 "pavgw %%xmm1, %%xmm4 \n\t"
490 "pavgw %%xmm3, %%xmm6 \n\t"
491 snow_vertical_compose_r2r_sub("xmm7","xmm7","xmm7","xmm7","xmm0","xmm2","xmm4","xmm6")
492 snow_vertical_compose_sra("1","xmm0","xmm2","xmm4","xmm6")
493 snow_vertical_compose_sse2_add("%3","xmm0","xmm2","xmm4","xmm6")
494
495 snow_vertical_compose_sra("2","xmm0","xmm2","xmm4","xmm6")
496 snow_vertical_compose_sse2_add("%3","xmm0","xmm2","xmm4","xmm6")
497 snow_vertical_compose_sse2_store("%3","xmm0","xmm2","xmm4","xmm6")
498 snow_vertical_compose_sse2_add("%1","xmm0","xmm2","xmm4","xmm6")
499 snow_vertical_compose_sse2_move("xmm0","xmm2","xmm4","xmm6","xmm1","xmm3","xmm5","xmm7")
500 snow_vertical_compose_sra("1","xmm0","xmm2","xmm4","xmm6")
501 snow_vertical_compose_r2r_add("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6")
502 snow_vertical_compose_sse2_add("%2","xmm0","xmm2","xmm4","xmm6")
503 snow_vertical_compose_sse2_store("%2","xmm0","xmm2","xmm4","xmm6")
504
505 "2: \n\t"
506 "sub $64, %%"REG_d" \n\t"
507 "jge 1b \n\t"
508 :"+d"(i)
509 :"r"(b0),"r"(b1),"r"(b2),"r"(b3),"r"(b4),"r"(b5));
510 }
511
512 #define snow_vertical_compose_mmx_load_add(op,r,t0,t1,t2,t3)\
513 ""op" ("r",%%"REG_d"), %%"t0" \n\t"\
514 ""op" 8("r",%%"REG_d"), %%"t1" \n\t"\
515 ""op" 16("r",%%"REG_d"), %%"t2" \n\t"\
516 ""op" 24("r",%%"REG_d"), %%"t3" \n\t"
517
518 #define snow_vertical_compose_mmx_load(r,t0,t1,t2,t3)\
519 snow_vertical_compose_mmx_load_add("movq",r,t0,t1,t2,t3)
520
521 #define snow_vertical_compose_mmx_add(r,t0,t1,t2,t3)\
522 snow_vertical_compose_mmx_load_add("paddw",r,t0,t1,t2,t3)
523
524 #define snow_vertical_compose_mmx_store(w,s0,s1,s2,s3)\
525 "movq %%"s0", ("w",%%"REG_d") \n\t"\
526 "movq %%"s1", 8("w",%%"REG_d") \n\t"\
527 "movq %%"s2", 16("w",%%"REG_d") \n\t"\
528 "movq %%"s3", 24("w",%%"REG_d") \n\t"
529
530 #define snow_vertical_compose_mmx_move(s0,s1,s2,s3,t0,t1,t2,t3)\
531 "movq %%"s0", %%"t0" \n\t"\
532 "movq %%"s1", %%"t1" \n\t"\
533 "movq %%"s2", %%"t2" \n\t"\
534 "movq %%"s3", %%"t3" \n\t"
535
536
537 void ff_snow_vertical_compose97i_mmx(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width){
538 x86_reg i = width;
539 while(i & 15)
540 {
541 i--;
542 b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS;
543 b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS;
544 b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS;
545 b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS;
546 }
547 i+=i;
548 __asm__ volatile(
549 "jmp 2f \n\t"
550 "1: \n\t"
551
552 snow_vertical_compose_mmx_load("%4","mm1","mm3","mm5","mm7")
553 snow_vertical_compose_mmx_add("%6","mm1","mm3","mm5","mm7")
554 "pcmpeqw %%mm0, %%mm0 \n\t"
555 "pcmpeqw %%mm2, %%mm2 \n\t"
556 "paddw %%mm2, %%mm2 \n\t"
557 "paddw %%mm0, %%mm2 \n\t"
558 "psllw $13, %%mm2 \n\t"
559 snow_vertical_compose_r2r_add("mm0","mm0","mm0","mm0","mm1","mm3","mm5","mm7")
560 snow_vertical_compose_r2r_pmulhw("mm2","mm2","mm2","mm2","mm1","mm3","mm5","mm7")
561 snow_vertical_compose_mmx_add("%5","mm1","mm3","mm5","mm7")
562 snow_vertical_compose_mmx_store("%5","mm1","mm3","mm5","mm7")
563 snow_vertical_compose_mmx_load("%4","mm0","mm2","mm4","mm6")
564 snow_vertical_compose_mmx_add("%3","mm1","mm3","mm5","mm7")
565 snow_vertical_compose_r2r_sub("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6")
566 snow_vertical_compose_mmx_store("%4","mm0","mm2","mm4","mm6")
567 "pcmpeqw %%mm7, %%mm7 \n\t"
568 "pcmpeqw %%mm5, %%mm5 \n\t"
569 "psllw $15, %%mm7 \n\t"
570 "psrlw $13, %%mm5 \n\t"
571 "paddw %%mm7, %%mm5 \n\t"
572 snow_vertical_compose_r2r_add("mm5","mm5","mm5","mm5","mm0","mm2","mm4","mm6")
573 "movq (%2,%%"REG_d"), %%mm1 \n\t"
574 "movq 8(%2,%%"REG_d"), %%mm3 \n\t"
575 "paddw %%mm7, %%mm1 \n\t"
576 "paddw %%mm7, %%mm3 \n\t"
577 "pavgw %%mm1, %%mm0 \n\t"
578 "pavgw %%mm3, %%mm2 \n\t"
579 "movq 16(%2,%%"REG_d"), %%mm1 \n\t"
580 "movq 24(%2,%%"REG_d"), %%mm3 \n\t"
581 "paddw %%mm7, %%mm1 \n\t"
582 "paddw %%mm7, %%mm3 \n\t"
583 "pavgw %%mm1, %%mm4 \n\t"
584 "pavgw %%mm3, %%mm6 \n\t"
585 snow_vertical_compose_r2r_sub("mm7","mm7","mm7","mm7","mm0","mm2","mm4","mm6")
586 snow_vertical_compose_sra("1","mm0","mm2","mm4","mm6")
587 snow_vertical_compose_mmx_add("%3","mm0","mm2","mm4","mm6")
588
589 snow_vertical_compose_sra("2","mm0","mm2","mm4","mm6")
590 snow_vertical_compose_mmx_add("%3","mm0","mm2","mm4","mm6")
591 snow_vertical_compose_mmx_store("%3","mm0","mm2","mm4","mm6")
592 snow_vertical_compose_mmx_add("%1","mm0","mm2","mm4","mm6")
593 snow_vertical_compose_mmx_move("mm0","mm2","mm4","mm6","mm1","mm3","mm5","mm7")
594 snow_vertical_compose_sra("1","mm0","mm2","mm4","mm6")
595 snow_vertical_compose_r2r_add("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6")
596 snow_vertical_compose_mmx_add("%2","mm0","mm2","mm4","mm6")
597 snow_vertical_compose_mmx_store("%2","mm0","mm2","mm4","mm6")
598
599 "2: \n\t"
600 "sub $32, %%"REG_d" \n\t"
601 "jge 1b \n\t"
602 :"+d"(i)
603 :"r"(b0),"r"(b1),"r"(b2),"r"(b3),"r"(b4),"r"(b5));
604 }
605 #endif //HAVE_7REGS
606
607 #define snow_inner_add_yblock_sse2_header \
608 IDWTELEM * * dst_array = sb->line + src_y;\
609 x86_reg tmp;\
610 __asm__ volatile(\
611 "mov %7, %%"REG_c" \n\t"\
612 "mov %6, %2 \n\t"\
613 "mov %4, %%"REG_S" \n\t"\
614 "pxor %%xmm7, %%xmm7 \n\t" /* 0 */\
615 "pcmpeqd %%xmm3, %%xmm3 \n\t"\
616 "psllw $15, %%xmm3 \n\t"\
617 "psrlw $12, %%xmm3 \n\t" /* FRAC_BITS >> 1 */\
618 "1: \n\t"\
619 "mov %1, %%"REG_D" \n\t"\
620 "mov (%%"REG_D"), %%"REG_D" \n\t"\
621 "add %3, %%"REG_D" \n\t"
622
623 #define snow_inner_add_yblock_sse2_start_8(out_reg1, out_reg2, ptr_offset, s_offset)\
624 "mov "PTR_SIZE"*"ptr_offset"(%%"REG_a"), %%"REG_d"; \n\t"\
625 "movq (%%"REG_d"), %%"out_reg1" \n\t"\
626 "movq (%%"REG_d", %%"REG_c"), %%"out_reg2" \n\t"\
627 "punpcklbw %%xmm7, %%"out_reg1" \n\t"\
628 "punpcklbw %%xmm7, %%"out_reg2" \n\t"\
629 "movq "s_offset"(%%"REG_S"), %%xmm0 \n\t"\
630 "movq "s_offset"+16(%%"REG_S"), %%xmm4 \n\t"\
631 "punpcklbw %%xmm7, %%xmm0 \n\t"\
632 "punpcklbw %%xmm7, %%xmm4 \n\t"\
633 "pmullw %%xmm0, %%"out_reg1" \n\t"\
634 "pmullw %%xmm4, %%"out_reg2" \n\t"
635
636 #define snow_inner_add_yblock_sse2_start_16(out_reg1, out_reg2, ptr_offset, s_offset)\
637 "mov "PTR_SIZE"*"ptr_offset"(%%"REG_a"), %%"REG_d"; \n\t"\
638 "movq (%%"REG_d"), %%"out_reg1" \n\t"\
639 "movq 8(%%"REG_d"), %%"out_reg2" \n\t"\
640 "punpcklbw %%xmm7, %%"out_reg1" \n\t"\
641 "punpcklbw %%xmm7, %%"out_reg2" \n\t"\
642 "movq "s_offset"(%%"REG_S"), %%xmm0 \n\t"\
643 "movq "s_offset"+8(%%"REG_S"), %%xmm4 \n\t"\
644 "punpcklbw %%xmm7, %%xmm0 \n\t"\
645 "punpcklbw %%xmm7, %%xmm4 \n\t"\
646 "pmullw %%xmm0, %%"out_reg1" \n\t"\
647 "pmullw %%xmm4, %%"out_reg2" \n\t"
648
649 #define snow_inner_add_yblock_sse2_accum_8(ptr_offset, s_offset) \
650 snow_inner_add_yblock_sse2_start_8("xmm2", "xmm6", ptr_offset, s_offset)\
651 "paddusw %%xmm2, %%xmm1 \n\t"\
652 "paddusw %%xmm6, %%xmm5 \n\t"
653
654 #define snow_inner_add_yblock_sse2_accum_16(ptr_offset, s_offset) \
655 snow_inner_add_yblock_sse2_start_16("xmm2", "xmm6", ptr_offset, s_offset)\
656 "paddusw %%xmm2, %%xmm1 \n\t"\
657 "paddusw %%xmm6, %%xmm5 \n\t"
658
659 #define snow_inner_add_yblock_sse2_end_common1\
660 "add $32, %%"REG_S" \n\t"\
661 "add %%"REG_c", %0 \n\t"\
662 "add %%"REG_c", "PTR_SIZE"*3(%%"REG_a");\n\t"\
663 "add %%"REG_c", "PTR_SIZE"*2(%%"REG_a");\n\t"\
664 "add %%"REG_c", "PTR_SIZE"*1(%%"REG_a");\n\t"\
665 "add %%"REG_c", (%%"REG_a") \n\t"
666
667 #define snow_inner_add_yblock_sse2_end_common2\
668 "jnz 1b \n\t"\
669 :"+m"(dst8),"+m"(dst_array),"=&r"(tmp)\
670 :\
671 "rm"((x86_reg)(src_x<<1)),"m"(obmc),"a"(block),"m"(b_h),"m"(src_stride):\
672 "%"REG_c"","%"REG_S"","%"REG_D"","%"REG_d"");
673
674 #define snow_inner_add_yblock_sse2_end_8\
675 "sal $1, %%"REG_c" \n\t"\
676 "add $"PTR_SIZE"*2, %1 \n\t"\
677 snow_inner_add_yblock_sse2_end_common1\
678 "sar $1, %%"REG_c" \n\t"\
679 "sub $2, %2 \n\t"\
680 snow_inner_add_yblock_sse2_end_common2
681
682 #define snow_inner_add_yblock_sse2_end_16\
683 "add $"PTR_SIZE"*1, %1 \n\t"\
684 snow_inner_add_yblock_sse2_end_common1\
685 "dec %2 \n\t"\
686 snow_inner_add_yblock_sse2_end_common2
687
688 static void inner_add_yblock_bw_8_obmc_16_bh_even_sse2(const uint8_t *obmc, const x86_reg obmc_stride, uint8_t * * block, int b_w, x86_reg b_h,
689 int src_x, int src_y, x86_reg src_stride, slice_buffer * sb, int add, uint8_t * dst8){
690 snow_inner_add_yblock_sse2_header
691 snow_inner_add_yblock_sse2_start_8("xmm1", "xmm5", "3", "0")
692 snow_inner_add_yblock_sse2_accum_8("2", "8")
693 snow_inner_add_yblock_sse2_accum_8("1", "128")
694 snow_inner_add_yblock_sse2_accum_8("0", "136")
695
696 "mov %0, %%"REG_d" \n\t"
697 "movdqa (%%"REG_D"), %%xmm0 \n\t"
698 "movdqa %%xmm1, %%xmm2 \n\t"
699
700 "punpckhwd %%xmm7, %%xmm1 \n\t"
701 "punpcklwd %%xmm7, %%xmm2 \n\t"
702 "paddd %%xmm2, %%xmm0 \n\t"
703 "movdqa 16(%%"REG_D"), %%xmm2 \n\t"
704 "paddd %%xmm1, %%xmm2 \n\t"
705 "paddd %%xmm3, %%xmm0 \n\t"
706 "paddd %%xmm3, %%xmm2 \n\t"
707
708 "mov %1, %%"REG_D" \n\t"
709 "mov "PTR_SIZE"(%%"REG_D"), %%"REG_D";\n\t"
710 "add %3, %%"REG_D" \n\t"
711
712 "movdqa (%%"REG_D"), %%xmm4 \n\t"
713 "movdqa %%xmm5, %%xmm6 \n\t"
714 "punpckhwd %%xmm7, %%xmm5 \n\t"
715 "punpcklwd %%xmm7, %%xmm6 \n\t"
716 "paddd %%xmm6, %%xmm4 \n\t"
717 "movdqa 16(%%"REG_D"), %%xmm6 \n\t"
718 "paddd %%xmm5, %%xmm6 \n\t"
719 "paddd %%xmm3, %%xmm4 \n\t"
720 "paddd %%xmm3, %%xmm6 \n\t"
721
722 "psrad $8, %%xmm0 \n\t" /* FRAC_BITS. */
723 "psrad $8, %%xmm2 \n\t" /* FRAC_BITS. */
724 "packssdw %%xmm2, %%xmm0 \n\t"
725 "packuswb %%xmm7, %%xmm0 \n\t"
726 "movq %%xmm0, (%%"REG_d") \n\t"
727
728 "psrad $8, %%xmm4 \n\t" /* FRAC_BITS. */
729 "psrad $8, %%xmm6 \n\t" /* FRAC_BITS. */
730 "packssdw %%xmm6, %%xmm4 \n\t"
731 "packuswb %%xmm7, %%xmm4 \n\t"
732 "movq %%xmm4, (%%"REG_d",%%"REG_c");\n\t"
733 snow_inner_add_yblock_sse2_end_8
734 }
735
736 static void inner_add_yblock_bw_16_obmc_32_sse2(const uint8_t *obmc, const x86_reg obmc_stride, uint8_t * * block, int b_w, x86_reg b_h,
737 int src_x, int src_y, x86_reg src_stride, slice_buffer * sb, int add, uint8_t * dst8){
738 snow_inner_add_yblock_sse2_header
739 snow_inner_add_yblock_sse2_start_16("xmm1", "xmm5", "3", "0")
740 snow_inner_add_yblock_sse2_accum_16("2", "16")
741 snow_inner_add_yblock_sse2_accum_16("1", "512")
742 snow_inner_add_yblock_sse2_accum_16("0", "528")
743
744 "mov %0, %%"REG_d" \n\t"
745 "psrlw $4, %%xmm1 \n\t"
746 "psrlw $4, %%xmm5 \n\t"
747 "paddw (%%"REG_D"), %%xmm1 \n\t"
748 "paddw 16(%%"REG_D"), %%xmm5 \n\t"
749 "paddw %%xmm3, %%xmm1 \n\t"
750 "paddw %%xmm3, %%xmm5 \n\t"
751 "psraw $4, %%xmm1 \n\t" /* FRAC_BITS. */
752 "psraw $4, %%xmm5 \n\t" /* FRAC_BITS. */
753 "packuswb %%xmm5, %%xmm1 \n\t"
754
755 "movdqu %%xmm1, (%%"REG_d") \n\t"
756
757 snow_inner_add_yblock_sse2_end_16
758 }
759
760 #define snow_inner_add_yblock_mmx_header \
761 IDWTELEM * * dst_array = sb->line + src_y;\
762 x86_reg tmp;\
763 __asm__ volatile(\
764 "mov %7, %%"REG_c" \n\t"\
765 "mov %6, %2 \n\t"\
766 "mov %4, %%"REG_S" \n\t"\
767 "pxor %%mm7, %%mm7 \n\t" /* 0 */\
768 "pcmpeqd %%mm3, %%mm3 \n\t"\
769 "psllw $15, %%mm3 \n\t"\
770 "psrlw $12, %%mm3 \n\t" /* FRAC_BITS >> 1 */\
771 "1: \n\t"\
772 "mov %1, %%"REG_D" \n\t"\
773 "mov (%%"REG_D"), %%"REG_D" \n\t"\
774 "add %3, %%"REG_D" \n\t"
775
776 #define snow_inner_add_yblock_mmx_start(out_reg1, out_reg2, ptr_offset, s_offset, d_offset)\
777 "mov "PTR_SIZE"*"ptr_offset"(%%"REG_a"), %%"REG_d"; \n\t"\
778 "movd "d_offset"(%%"REG_d"), %%"out_reg1" \n\t"\
779 "movd "d_offset"+4(%%"REG_d"), %%"out_reg2" \n\t"\
780 "punpcklbw %%mm7, %%"out_reg1" \n\t"\
781 "punpcklbw %%mm7, %%"out_reg2" \n\t"\
782 "movd "s_offset"(%%"REG_S"), %%mm0 \n\t"\
783 "movd "s_offset"+4(%%"REG_S"), %%mm4 \n\t"\
784 "punpcklbw %%mm7, %%mm0 \n\t"\
785 "punpcklbw %%mm7, %%mm4 \n\t"\
786 "pmullw %%mm0, %%"out_reg1" \n\t"\
787 "pmullw %%mm4, %%"out_reg2" \n\t"
788
789 #define snow_inner_add_yblock_mmx_accum(ptr_offset, s_offset, d_offset) \
790 snow_inner_add_yblock_mmx_start("mm2", "mm6", ptr_offset, s_offset, d_offset)\
791 "paddusw %%mm2, %%mm1 \n\t"\
792 "paddusw %%mm6, %%mm5 \n\t"
793
794 #define snow_inner_add_yblock_mmx_mix(read_offset, write_offset)\
795 "mov %0, %%"REG_d" \n\t"\
796 "psrlw $4, %%mm1 \n\t"\
797 "psrlw $4, %%mm5 \n\t"\
798 "paddw "read_offset"(%%"REG_D"), %%mm1 \n\t"\
799 "paddw "read_offset"+8(%%"REG_D"), %%mm5 \n\t"\
800 "paddw %%mm3, %%mm1 \n\t"\
801 "paddw %%mm3, %%mm5 \n\t"\
802 "psraw $4, %%mm1 \n\t"\
803 "psraw $4, %%mm5 \n\t"\
804 "packuswb %%mm5, %%mm1 \n\t"\
805 "movq %%mm1, "write_offset"(%%"REG_d") \n\t"
806
807 #define snow_inner_add_yblock_mmx_end(s_step)\
808 "add $"s_step", %%"REG_S" \n\t"\
809 "add %%"REG_c", "PTR_SIZE"*3(%%"REG_a");\n\t"\
810 "add %%"REG_c", "PTR_SIZE"*2(%%"REG_a");\n\t"\
811 "add %%"REG_c", "PTR_SIZE"*1(%%"REG_a");\n\t"\
812 "add %%"REG_c", (%%"REG_a") \n\t"\
813 "add $"PTR_SIZE"*1, %1 \n\t"\
814 "add %%"REG_c", %0 \n\t"\
815 "dec %2 \n\t"\
816 "jnz 1b \n\t"\
817 :"+m"(dst8),"+m"(dst_array),"=&r"(tmp)\
818 :\
819 "rm"((x86_reg)(src_x<<1)),"m"(obmc),"a"(block),"m"(b_h),"m"(src_stride):\
820 "%"REG_c"","%"REG_S"","%"REG_D"","%"REG_d"");
821
822 static void inner_add_yblock_bw_8_obmc_16_mmx(const uint8_t *obmc, const x86_reg obmc_stride, uint8_t * * block, int b_w, x86_reg b_h,
823 int src_x, int src_y, x86_reg src_stride, slice_buffer * sb, int add, uint8_t * dst8){
824 snow_inner_add_yblock_mmx_header
825 snow_inner_add_yblock_mmx_start("mm1", "mm5", "3", "0", "0")
826 snow_inner_add_yblock_mmx_accum("2", "8", "0")
827 snow_inner_add_yblock_mmx_accum("1", "128", "0")
828 snow_inner_add_yblock_mmx_accum("0", "136", "0")
829 snow_inner_add_yblock_mmx_mix("0", "0")
830 snow_inner_add_yblock_mmx_end("16")
831 }
832
833 static void inner_add_yblock_bw_16_obmc_32_mmx(const uint8_t *obmc, const x86_reg obmc_stride, uint8_t * * block, int b_w, x86_reg b_h,
834 int src_x, int src_y, x86_reg src_stride, slice_buffer * sb, int add, uint8_t * dst8){
835 snow_inner_add_yblock_mmx_header
836 snow_inner_add_yblock_mmx_start("mm1", "mm5", "3", "0", "0")
837 snow_inner_add_yblock_mmx_accum("2", "16", "0")
838 snow_inner_add_yblock_mmx_accum("1", "512", "0")
839 snow_inner_add_yblock_mmx_accum("0", "528", "0")
840 snow_inner_add_yblock_mmx_mix("0", "0")
841
842 snow_inner_add_yblock_mmx_start("mm1", "mm5", "3", "8", "8")
843 snow_inner_add_yblock_mmx_accum("2", "24", "8")
844 snow_inner_add_yblock_mmx_accum("1", "520", "8")
845 snow_inner_add_yblock_mmx_accum("0", "536", "8")
846 snow_inner_add_yblock_mmx_mix("16", "8")
847 snow_inner_add_yblock_mmx_end("32")
848 }
849
850 void ff_snow_inner_add_yblock_sse2(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
851 int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8){
852
853 if (b_w == 16)
854 inner_add_yblock_bw_16_obmc_32_sse2(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
855 else if (b_w == 8 && obmc_stride == 16) {
856 if (!(b_h & 1))
857 inner_add_yblock_bw_8_obmc_16_bh_even_sse2(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
858 else
859 inner_add_yblock_bw_8_obmc_16_mmx(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
860 } else
861 ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
862 }
863
864 void ff_snow_inner_add_yblock_mmx(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
865 int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8){
866 if (b_w == 16)
867 inner_add_yblock_bw_16_obmc_32_mmx(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
868 else if (b_w == 8 && obmc_stride == 16)
869 inner_add_yblock_bw_8_obmc_16_mmx(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
870 else
871 ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
872 }