f94f7088cf59fbbb9536fcff592712ac1ff3db6a
[libav.git] / libavcodec / i386 / h264dsp_mmx.c
1 /*
2 * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include "dsputil_mmx.h"
22
23 DECLARE_ALIGNED_8 (static const uint64_t, ff_pb_3_1 ) = 0x0103010301030103ULL;
24 DECLARE_ALIGNED_8 (static const uint64_t, ff_pb_7_3 ) = 0x0307030703070307ULL;
25
26 /***********************************/
27 /* IDCT */
28
29 #define SUMSUB_BADC( a, b, c, d ) \
30 "paddw "#b", "#a" \n\t"\
31 "paddw "#d", "#c" \n\t"\
32 "paddw "#b", "#b" \n\t"\
33 "paddw "#d", "#d" \n\t"\
34 "psubw "#a", "#b" \n\t"\
35 "psubw "#c", "#d" \n\t"
36
37 #define SUMSUBD2_AB( a, b, t ) \
38 "movq "#b", "#t" \n\t"\
39 "psraw $1 , "#b" \n\t"\
40 "paddw "#a", "#b" \n\t"\
41 "psraw $1 , "#a" \n\t"\
42 "psubw "#t", "#a" \n\t"
43
44 #define IDCT4_1D( s02, s13, d02, d13, t ) \
45 SUMSUB_BA ( s02, d02 )\
46 SUMSUBD2_AB( s13, d13, t )\
47 SUMSUB_BADC( d13, s02, s13, d02 )
48
49 #define STORE_DIFF_4P( p, t, z ) \
50 "psraw $6, "#p" \n\t"\
51 "movd (%0), "#t" \n\t"\
52 "punpcklbw "#z", "#t" \n\t"\
53 "paddsw "#t", "#p" \n\t"\
54 "packuswb "#z", "#p" \n\t"\
55 "movd "#p", (%0) \n\t"
56
57 static void ff_h264_idct_add_mmx(uint8_t *dst, int16_t *block, int stride)
58 {
59 /* Load dct coeffs */
60 asm volatile(
61 "movq (%0), %%mm0 \n\t"
62 "movq 8(%0), %%mm1 \n\t"
63 "movq 16(%0), %%mm2 \n\t"
64 "movq 24(%0), %%mm3 \n\t"
65 :: "r"(block) );
66
67 asm volatile(
68 /* mm1=s02+s13 mm2=s02-s13 mm4=d02+d13 mm0=d02-d13 */
69 IDCT4_1D( %%mm2, %%mm1, %%mm0, %%mm3, %%mm4 )
70
71 "movq %0, %%mm6 \n\t"
72 /* in: 1,4,0,2 out: 1,2,3,0 */
73 TRANSPOSE4( %%mm3, %%mm1, %%mm0, %%mm2, %%mm4 )
74
75 "paddw %%mm6, %%mm3 \n\t"
76
77 /* mm2=s02+s13 mm3=s02-s13 mm4=d02+d13 mm1=d02-d13 */
78 IDCT4_1D( %%mm4, %%mm2, %%mm3, %%mm0, %%mm1 )
79
80 "pxor %%mm7, %%mm7 \n\t"
81 :: "m"(ff_pw_32));
82
83 asm volatile(
84 STORE_DIFF_4P( %%mm0, %%mm1, %%mm7)
85 "add %1, %0 \n\t"
86 STORE_DIFF_4P( %%mm2, %%mm1, %%mm7)
87 "add %1, %0 \n\t"
88 STORE_DIFF_4P( %%mm3, %%mm1, %%mm7)
89 "add %1, %0 \n\t"
90 STORE_DIFF_4P( %%mm4, %%mm1, %%mm7)
91 : "+r"(dst)
92 : "r" ((x86_reg)stride)
93 );
94 }
95
96 static inline void h264_idct8_1d(int16_t *block)
97 {
98 asm volatile(
99 "movq 112(%0), %%mm7 \n\t"
100 "movq 80(%0), %%mm0 \n\t"
101 "movq 48(%0), %%mm3 \n\t"
102 "movq 16(%0), %%mm5 \n\t"
103
104 "movq %%mm0, %%mm4 \n\t"
105 "movq %%mm5, %%mm1 \n\t"
106 "psraw $1, %%mm4 \n\t"
107 "psraw $1, %%mm1 \n\t"
108 "paddw %%mm0, %%mm4 \n\t"
109 "paddw %%mm5, %%mm1 \n\t"
110 "paddw %%mm7, %%mm4 \n\t"
111 "paddw %%mm0, %%mm1 \n\t"
112 "psubw %%mm5, %%mm4 \n\t"
113 "paddw %%mm3, %%mm1 \n\t"
114
115 "psubw %%mm3, %%mm5 \n\t"
116 "psubw %%mm3, %%mm0 \n\t"
117 "paddw %%mm7, %%mm5 \n\t"
118 "psubw %%mm7, %%mm0 \n\t"
119 "psraw $1, %%mm3 \n\t"
120 "psraw $1, %%mm7 \n\t"
121 "psubw %%mm3, %%mm5 \n\t"
122 "psubw %%mm7, %%mm0 \n\t"
123
124 "movq %%mm4, %%mm3 \n\t"
125 "movq %%mm1, %%mm7 \n\t"
126 "psraw $2, %%mm1 \n\t"
127 "psraw $2, %%mm3 \n\t"
128 "paddw %%mm5, %%mm3 \n\t"
129 "psraw $2, %%mm5 \n\t"
130 "paddw %%mm0, %%mm1 \n\t"
131 "psraw $2, %%mm0 \n\t"
132 "psubw %%mm4, %%mm5 \n\t"
133 "psubw %%mm0, %%mm7 \n\t"
134
135 "movq 32(%0), %%mm2 \n\t"
136 "movq 96(%0), %%mm6 \n\t"
137 "movq %%mm2, %%mm4 \n\t"
138 "movq %%mm6, %%mm0 \n\t"
139 "psraw $1, %%mm4 \n\t"
140 "psraw $1, %%mm6 \n\t"
141 "psubw %%mm0, %%mm4 \n\t"
142 "paddw %%mm2, %%mm6 \n\t"
143
144 "movq (%0), %%mm2 \n\t"
145 "movq 64(%0), %%mm0 \n\t"
146 SUMSUB_BA( %%mm0, %%mm2 )
147 SUMSUB_BA( %%mm6, %%mm0 )
148 SUMSUB_BA( %%mm4, %%mm2 )
149 SUMSUB_BA( %%mm7, %%mm6 )
150 SUMSUB_BA( %%mm5, %%mm4 )
151 SUMSUB_BA( %%mm3, %%mm2 )
152 SUMSUB_BA( %%mm1, %%mm0 )
153 :: "r"(block)
154 );
155 }
156
157 static void ff_h264_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
158 {
159 int i;
160 int16_t __attribute__ ((aligned(8))) b2[64];
161
162 block[0] += 32;
163
164 for(i=0; i<2; i++){
165 DECLARE_ALIGNED_8(uint64_t, tmp);
166
167 h264_idct8_1d(block+4*i);
168
169 asm volatile(
170 "movq %%mm7, %0 \n\t"
171 TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 )
172 "movq %%mm0, 8(%1) \n\t"
173 "movq %%mm6, 24(%1) \n\t"
174 "movq %%mm7, 40(%1) \n\t"
175 "movq %%mm4, 56(%1) \n\t"
176 "movq %0, %%mm7 \n\t"
177 TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 )
178 "movq %%mm7, (%1) \n\t"
179 "movq %%mm1, 16(%1) \n\t"
180 "movq %%mm0, 32(%1) \n\t"
181 "movq %%mm3, 48(%1) \n\t"
182 : "=m"(tmp)
183 : "r"(b2+32*i)
184 : "memory"
185 );
186 }
187
188 for(i=0; i<2; i++){
189 h264_idct8_1d(b2+4*i);
190
191 asm volatile(
192 "psraw $6, %%mm7 \n\t"
193 "psraw $6, %%mm6 \n\t"
194 "psraw $6, %%mm5 \n\t"
195 "psraw $6, %%mm4 \n\t"
196 "psraw $6, %%mm3 \n\t"
197 "psraw $6, %%mm2 \n\t"
198 "psraw $6, %%mm1 \n\t"
199 "psraw $6, %%mm0 \n\t"
200
201 "movq %%mm7, (%0) \n\t"
202 "movq %%mm5, 16(%0) \n\t"
203 "movq %%mm3, 32(%0) \n\t"
204 "movq %%mm1, 48(%0) \n\t"
205 "movq %%mm0, 64(%0) \n\t"
206 "movq %%mm2, 80(%0) \n\t"
207 "movq %%mm4, 96(%0) \n\t"
208 "movq %%mm6, 112(%0) \n\t"
209 :: "r"(b2+4*i)
210 : "memory"
211 );
212 }
213
214 add_pixels_clamped_mmx(b2, dst, stride);
215 }
216
217 #define STORE_DIFF_8P( p, d, t, z )\
218 "movq "#d", "#t" \n"\
219 "psraw $6, "#p" \n"\
220 "punpcklbw "#z", "#t" \n"\
221 "paddsw "#t", "#p" \n"\
222 "packuswb "#p", "#p" \n"\
223 "movq "#p", "#d" \n"
224
225 #define H264_IDCT8_1D_SSE2(a,b,c,d,e,f,g,h)\
226 "movdqa "#c", "#a" \n"\
227 "movdqa "#g", "#e" \n"\
228 "psraw $1, "#c" \n"\
229 "psraw $1, "#g" \n"\
230 "psubw "#e", "#c" \n"\
231 "paddw "#a", "#g" \n"\
232 "movdqa "#b", "#e" \n"\
233 "psraw $1, "#e" \n"\
234 "paddw "#b", "#e" \n"\
235 "paddw "#d", "#e" \n"\
236 "paddw "#f", "#e" \n"\
237 "movdqa "#f", "#a" \n"\
238 "psraw $1, "#a" \n"\
239 "paddw "#f", "#a" \n"\
240 "paddw "#h", "#a" \n"\
241 "psubw "#b", "#a" \n"\
242 "psubw "#d", "#b" \n"\
243 "psubw "#d", "#f" \n"\
244 "paddw "#h", "#b" \n"\
245 "psubw "#h", "#f" \n"\
246 "psraw $1, "#d" \n"\
247 "psraw $1, "#h" \n"\
248 "psubw "#d", "#b" \n"\
249 "psubw "#h", "#f" \n"\
250 "movdqa "#e", "#d" \n"\
251 "movdqa "#a", "#h" \n"\
252 "psraw $2, "#d" \n"\
253 "psraw $2, "#h" \n"\
254 "paddw "#f", "#d" \n"\
255 "paddw "#b", "#h" \n"\
256 "psraw $2, "#f" \n"\
257 "psraw $2, "#b" \n"\
258 "psubw "#f", "#e" \n"\
259 "psubw "#a", "#b" \n"\
260 "movdqa 0x00(%1), "#a" \n"\
261 "movdqa 0x40(%1), "#f" \n"\
262 SUMSUB_BA(f, a)\
263 SUMSUB_BA(g, f)\
264 SUMSUB_BA(c, a)\
265 SUMSUB_BA(e, g)\
266 SUMSUB_BA(b, c)\
267 SUMSUB_BA(h, a)\
268 SUMSUB_BA(d, f)
269
270 static void ff_h264_idct8_add_sse2(uint8_t *dst, int16_t *block, int stride)
271 {
272 asm volatile(
273 "movdqa 0x10(%1), %%xmm1 \n"
274 "movdqa 0x20(%1), %%xmm2 \n"
275 "movdqa 0x30(%1), %%xmm3 \n"
276 "movdqa 0x50(%1), %%xmm5 \n"
277 "movdqa 0x60(%1), %%xmm6 \n"
278 "movdqa 0x70(%1), %%xmm7 \n"
279 H264_IDCT8_1D_SSE2(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7)
280 TRANSPOSE8(%%xmm4, %%xmm1, %%xmm7, %%xmm3, %%xmm5, %%xmm0, %%xmm2, %%xmm6, (%1))
281 "paddw %4, %%xmm4 \n"
282 "movdqa %%xmm4, 0x00(%1) \n"
283 "movdqa %%xmm2, 0x40(%1) \n"
284 H264_IDCT8_1D_SSE2(%%xmm4, %%xmm0, %%xmm6, %%xmm3, %%xmm2, %%xmm5, %%xmm7, %%xmm1)
285 "movdqa %%xmm6, 0x60(%1) \n"
286 "movdqa %%xmm7, 0x70(%1) \n"
287 "pxor %%xmm7, %%xmm7 \n"
288 STORE_DIFF_8P(%%xmm2, (%0), %%xmm6, %%xmm7)
289 STORE_DIFF_8P(%%xmm0, (%0,%2), %%xmm6, %%xmm7)
290 STORE_DIFF_8P(%%xmm1, (%0,%2,2), %%xmm6, %%xmm7)
291 STORE_DIFF_8P(%%xmm3, (%0,%3), %%xmm6, %%xmm7)
292 "lea (%0,%2,4), %0 \n"
293 STORE_DIFF_8P(%%xmm5, (%0), %%xmm6, %%xmm7)
294 STORE_DIFF_8P(%%xmm4, (%0,%2), %%xmm6, %%xmm7)
295 "movdqa 0x60(%1), %%xmm0 \n"
296 "movdqa 0x70(%1), %%xmm1 \n"
297 STORE_DIFF_8P(%%xmm0, (%0,%2,2), %%xmm6, %%xmm7)
298 STORE_DIFF_8P(%%xmm1, (%0,%3), %%xmm6, %%xmm7)
299 :"+r"(dst)
300 :"r"(block), "r"((x86_reg)stride), "r"((x86_reg)3L*stride), "m"(ff_pw_32)
301 );
302 }
303
304 static void ff_h264_idct_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
305 {
306 int dc = (block[0] + 32) >> 6;
307 asm volatile(
308 "movd %0, %%mm0 \n\t"
309 "pshufw $0, %%mm0, %%mm0 \n\t"
310 "pxor %%mm1, %%mm1 \n\t"
311 "psubw %%mm0, %%mm1 \n\t"
312 "packuswb %%mm0, %%mm0 \n\t"
313 "packuswb %%mm1, %%mm1 \n\t"
314 ::"r"(dc)
315 );
316 asm volatile(
317 "movd %0, %%mm2 \n\t"
318 "movd %1, %%mm3 \n\t"
319 "movd %2, %%mm4 \n\t"
320 "movd %3, %%mm5 \n\t"
321 "paddusb %%mm0, %%mm2 \n\t"
322 "paddusb %%mm0, %%mm3 \n\t"
323 "paddusb %%mm0, %%mm4 \n\t"
324 "paddusb %%mm0, %%mm5 \n\t"
325 "psubusb %%mm1, %%mm2 \n\t"
326 "psubusb %%mm1, %%mm3 \n\t"
327 "psubusb %%mm1, %%mm4 \n\t"
328 "psubusb %%mm1, %%mm5 \n\t"
329 "movd %%mm2, %0 \n\t"
330 "movd %%mm3, %1 \n\t"
331 "movd %%mm4, %2 \n\t"
332 "movd %%mm5, %3 \n\t"
333 :"+m"(*(uint32_t*)(dst+0*stride)),
334 "+m"(*(uint32_t*)(dst+1*stride)),
335 "+m"(*(uint32_t*)(dst+2*stride)),
336 "+m"(*(uint32_t*)(dst+3*stride))
337 );
338 }
339
340 static void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
341 {
342 int dc = (block[0] + 32) >> 6;
343 int y;
344 asm volatile(
345 "movd %0, %%mm0 \n\t"
346 "pshufw $0, %%mm0, %%mm0 \n\t"
347 "pxor %%mm1, %%mm1 \n\t"
348 "psubw %%mm0, %%mm1 \n\t"
349 "packuswb %%mm0, %%mm0 \n\t"
350 "packuswb %%mm1, %%mm1 \n\t"
351 ::"r"(dc)
352 );
353 for(y=2; y--; dst += 4*stride){
354 asm volatile(
355 "movq %0, %%mm2 \n\t"
356 "movq %1, %%mm3 \n\t"
357 "movq %2, %%mm4 \n\t"
358 "movq %3, %%mm5 \n\t"
359 "paddusb %%mm0, %%mm2 \n\t"
360 "paddusb %%mm0, %%mm3 \n\t"
361 "paddusb %%mm0, %%mm4 \n\t"
362 "paddusb %%mm0, %%mm5 \n\t"
363 "psubusb %%mm1, %%mm2 \n\t"
364 "psubusb %%mm1, %%mm3 \n\t"
365 "psubusb %%mm1, %%mm4 \n\t"
366 "psubusb %%mm1, %%mm5 \n\t"
367 "movq %%mm2, %0 \n\t"
368 "movq %%mm3, %1 \n\t"
369 "movq %%mm4, %2 \n\t"
370 "movq %%mm5, %3 \n\t"
371 :"+m"(*(uint64_t*)(dst+0*stride)),
372 "+m"(*(uint64_t*)(dst+1*stride)),
373 "+m"(*(uint64_t*)(dst+2*stride)),
374 "+m"(*(uint64_t*)(dst+3*stride))
375 );
376 }
377 }
378
379
380 /***********************************/
381 /* deblocking */
382
383 // out: o = |x-y|>a
384 // clobbers: t
385 #define DIFF_GT_MMX(x,y,a,o,t)\
386 "movq "#y", "#t" \n\t"\
387 "movq "#x", "#o" \n\t"\
388 "psubusb "#x", "#t" \n\t"\
389 "psubusb "#y", "#o" \n\t"\
390 "por "#t", "#o" \n\t"\
391 "psubusb "#a", "#o" \n\t"
392
393 // out: o = |x-y|>a
394 // clobbers: t
395 #define DIFF_GT2_MMX(x,y,a,o,t)\
396 "movq "#y", "#t" \n\t"\
397 "movq "#x", "#o" \n\t"\
398 "psubusb "#x", "#t" \n\t"\
399 "psubusb "#y", "#o" \n\t"\
400 "psubusb "#a", "#t" \n\t"\
401 "psubusb "#a", "#o" \n\t"\
402 "pcmpeqb "#t", "#o" \n\t"\
403
404 // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1
405 // out: mm5=beta-1, mm7=mask
406 // clobbers: mm4,mm6
407 #define H264_DEBLOCK_MASK(alpha1, beta1) \
408 "pshufw $0, "#alpha1", %%mm4 \n\t"\
409 "pshufw $0, "#beta1 ", %%mm5 \n\t"\
410 "packuswb %%mm4, %%mm4 \n\t"\
411 "packuswb %%mm5, %%mm5 \n\t"\
412 DIFF_GT_MMX(%%mm1, %%mm2, %%mm4, %%mm7, %%mm6) /* |p0-q0| > alpha-1 */\
413 DIFF_GT_MMX(%%mm0, %%mm1, %%mm5, %%mm4, %%mm6) /* |p1-p0| > beta-1 */\
414 "por %%mm4, %%mm7 \n\t"\
415 DIFF_GT_MMX(%%mm3, %%mm2, %%mm5, %%mm4, %%mm6) /* |q1-q0| > beta-1 */\
416 "por %%mm4, %%mm7 \n\t"\
417 "pxor %%mm6, %%mm6 \n\t"\
418 "pcmpeqb %%mm6, %%mm7 \n\t"
419
420 // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask)
421 // out: mm1=p0' mm2=q0'
422 // clobbers: mm0,3-6
423 #define H264_DEBLOCK_P0_Q0(pb_01, pb_3f)\
424 "movq %%mm1 , %%mm5 \n\t"\
425 "pxor %%mm2 , %%mm5 \n\t" /* p0^q0*/\
426 "pand "#pb_01" , %%mm5 \n\t" /* (p0^q0)&1*/\
427 "pcmpeqb %%mm4 , %%mm4 \n\t"\
428 "pxor %%mm4 , %%mm3 \n\t"\
429 "pavgb %%mm0 , %%mm3 \n\t" /* (p1 - q1 + 256)>>1*/\
430 "pavgb "MANGLE(ff_pb_3)" , %%mm3 \n\t" /*(((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2*/\
431 "pxor %%mm1 , %%mm4 \n\t"\
432 "pavgb %%mm2 , %%mm4 \n\t" /* (q0 - p0 + 256)>>1*/\
433 "pavgb %%mm5 , %%mm3 \n\t"\
434 "paddusb %%mm4 , %%mm3 \n\t" /* d+128+33*/\
435 "movq "MANGLE(ff_pb_A1)" , %%mm6 \n\t"\
436 "psubusb %%mm3 , %%mm6 \n\t"\
437 "psubusb "MANGLE(ff_pb_A1)" , %%mm3 \n\t"\
438 "pminub %%mm7 , %%mm6 \n\t"\
439 "pminub %%mm7 , %%mm3 \n\t"\
440 "psubusb %%mm6 , %%mm1 \n\t"\
441 "psubusb %%mm3 , %%mm2 \n\t"\
442 "paddusb %%mm3 , %%mm1 \n\t"\
443 "paddusb %%mm6 , %%mm2 \n\t"
444
445 // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) %8=ff_bone
446 // out: (q1addr) = av_clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 )
447 // clobbers: q2, tmp, tc0
448 #define H264_DEBLOCK_Q1(p1, q2, q2addr, q1addr, tc0, tmp)\
449 "movq %%mm1, "#tmp" \n\t"\
450 "pavgb %%mm2, "#tmp" \n\t"\
451 "pavgb "#tmp", "#q2" \n\t" /* avg(p2,avg(p0,q0)) */\
452 "pxor "q2addr", "#tmp" \n\t"\
453 "pand %8, "#tmp" \n\t" /* (p2^avg(p0,q0))&1 */\
454 "psubusb "#tmp", "#q2" \n\t" /* (p2+((p0+q0+1)>>1))>>1 */\
455 "movq "#p1", "#tmp" \n\t"\
456 "psubusb "#tc0", "#tmp" \n\t"\
457 "paddusb "#p1", "#tc0" \n\t"\
458 "pmaxub "#tmp", "#q2" \n\t"\
459 "pminub "#tc0", "#q2" \n\t"\
460 "movq "#q2", "q1addr" \n\t"
461
462 static inline void h264_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
463 {
464 DECLARE_ALIGNED_8(uint64_t, tmp0[2]);
465
466 asm volatile(
467 "movq (%1,%3), %%mm0 \n\t" //p1
468 "movq (%1,%3,2), %%mm1 \n\t" //p0
469 "movq (%2), %%mm2 \n\t" //q0
470 "movq (%2,%3), %%mm3 \n\t" //q1
471 H264_DEBLOCK_MASK(%6, %7)
472
473 "movd %5, %%mm4 \n\t"
474 "punpcklbw %%mm4, %%mm4 \n\t"
475 "punpcklwd %%mm4, %%mm4 \n\t"
476 "pcmpeqb %%mm3, %%mm3 \n\t"
477 "movq %%mm4, %%mm6 \n\t"
478 "pcmpgtb %%mm3, %%mm4 \n\t"
479 "movq %%mm6, 8+%0 \n\t"
480 "pand %%mm4, %%mm7 \n\t"
481 "movq %%mm7, %0 \n\t"
482
483 /* filter p1 */
484 "movq (%1), %%mm3 \n\t" //p2
485 DIFF_GT2_MMX(%%mm1, %%mm3, %%mm5, %%mm6, %%mm4) // |p2-p0|>beta-1
486 "pand %%mm7, %%mm6 \n\t" // mask & |p2-p0|<beta
487 "pand 8+%0, %%mm7 \n\t" // mask & tc0
488 "movq %%mm7, %%mm4 \n\t"
489 "psubb %%mm6, %%mm7 \n\t"
490 "pand %%mm4, %%mm6 \n\t" // mask & |p2-p0|<beta & tc0
491 H264_DEBLOCK_Q1(%%mm0, %%mm3, "(%1)", "(%1,%3)", %%mm6, %%mm4)
492
493 /* filter q1 */
494 "movq (%2,%3,2), %%mm4 \n\t" //q2
495 DIFF_GT2_MMX(%%mm2, %%mm4, %%mm5, %%mm6, %%mm3) // |q2-q0|>beta-1
496 "pand %0, %%mm6 \n\t"
497 "movq 8+%0, %%mm5 \n\t" // can be merged with the and below but is slower then
498 "pand %%mm6, %%mm5 \n\t"
499 "psubb %%mm6, %%mm7 \n\t"
500 "movq (%2,%3), %%mm3 \n\t"
501 H264_DEBLOCK_Q1(%%mm3, %%mm4, "(%2,%3,2)", "(%2,%3)", %%mm5, %%mm6)
502
503 /* filter p0, q0 */
504 H264_DEBLOCK_P0_Q0(%8, unused)
505 "movq %%mm1, (%1,%3,2) \n\t"
506 "movq %%mm2, (%2) \n\t"
507
508 : "=m"(*tmp0)
509 : "r"(pix-3*stride), "r"(pix), "r"((x86_reg)stride),
510 "m"(*tmp0/*unused*/), "m"(*(uint32_t*)tc0), "m"(alpha1), "m"(beta1),
511 "m"(ff_bone)
512 );
513 }
514
515 static void h264_v_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
516 {
517 if((tc0[0] & tc0[1]) >= 0)
518 h264_loop_filter_luma_mmx2(pix, stride, alpha-1, beta-1, tc0);
519 if((tc0[2] & tc0[3]) >= 0)
520 h264_loop_filter_luma_mmx2(pix+8, stride, alpha-1, beta-1, tc0+2);
521 }
522 static void h264_h_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
523 {
524 //FIXME: could cut some load/stores by merging transpose with filter
525 // also, it only needs to transpose 6x8
526 DECLARE_ALIGNED_8(uint8_t, trans[8*8]);
527 int i;
528 for(i=0; i<2; i++, pix+=8*stride, tc0+=2) {
529 if((tc0[0] & tc0[1]) < 0)
530 continue;
531 transpose4x4(trans, pix-4, 8, stride);
532 transpose4x4(trans +4*8, pix, 8, stride);
533 transpose4x4(trans+4, pix-4+4*stride, 8, stride);
534 transpose4x4(trans+4+4*8, pix +4*stride, 8, stride);
535 h264_loop_filter_luma_mmx2(trans+4*8, 8, alpha-1, beta-1, tc0);
536 transpose4x4(pix-2, trans +2*8, stride, 8);
537 transpose4x4(pix-2+4*stride, trans+4+2*8, stride, 8);
538 }
539 }
540
541 static inline void h264_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
542 {
543 asm volatile(
544 "movq (%0), %%mm0 \n\t" //p1
545 "movq (%0,%2), %%mm1 \n\t" //p0
546 "movq (%1), %%mm2 \n\t" //q0
547 "movq (%1,%2), %%mm3 \n\t" //q1
548 H264_DEBLOCK_MASK(%4, %5)
549 "movd %3, %%mm6 \n\t"
550 "punpcklbw %%mm6, %%mm6 \n\t"
551 "pand %%mm6, %%mm7 \n\t" // mm7 = tc&mask
552 H264_DEBLOCK_P0_Q0(%6, %7)
553 "movq %%mm1, (%0,%2) \n\t"
554 "movq %%mm2, (%1) \n\t"
555
556 :: "r"(pix-2*stride), "r"(pix), "r"((x86_reg)stride),
557 "r"(*(uint32_t*)tc0),
558 "m"(alpha1), "m"(beta1), "m"(ff_bone), "m"(ff_pb_3F)
559 );
560 }
561
562 static void h264_v_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
563 {
564 h264_loop_filter_chroma_mmx2(pix, stride, alpha-1, beta-1, tc0);
565 }
566
567 static void h264_h_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
568 {
569 //FIXME: could cut some load/stores by merging transpose with filter
570 DECLARE_ALIGNED_8(uint8_t, trans[8*4]);
571 transpose4x4(trans, pix-2, 8, stride);
572 transpose4x4(trans+4, pix-2+4*stride, 8, stride);
573 h264_loop_filter_chroma_mmx2(trans+2*8, 8, alpha-1, beta-1, tc0);
574 transpose4x4(pix-2, trans, stride, 8);
575 transpose4x4(pix-2+4*stride, trans+4, stride, 8);
576 }
577
578 // p0 = (p0 + q1 + 2*p1 + 2) >> 2
579 #define H264_FILTER_CHROMA4(p0, p1, q1, one) \
580 "movq "#p0", %%mm4 \n\t"\
581 "pxor "#q1", %%mm4 \n\t"\
582 "pand "#one", %%mm4 \n\t" /* mm4 = (p0^q1)&1 */\
583 "pavgb "#q1", "#p0" \n\t"\
584 "psubusb %%mm4, "#p0" \n\t"\
585 "pavgb "#p1", "#p0" \n\t" /* dst = avg(p1, avg(p0,q1) - ((p0^q1)&1)) */\
586
587 static inline void h264_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha1, int beta1)
588 {
589 asm volatile(
590 "movq (%0), %%mm0 \n\t"
591 "movq (%0,%2), %%mm1 \n\t"
592 "movq (%1), %%mm2 \n\t"
593 "movq (%1,%2), %%mm3 \n\t"
594 H264_DEBLOCK_MASK(%3, %4)
595 "movq %%mm1, %%mm5 \n\t"
596 "movq %%mm2, %%mm6 \n\t"
597 H264_FILTER_CHROMA4(%%mm1, %%mm0, %%mm3, %5) //p0'
598 H264_FILTER_CHROMA4(%%mm2, %%mm3, %%mm0, %5) //q0'
599 "psubb %%mm5, %%mm1 \n\t"
600 "psubb %%mm6, %%mm2 \n\t"
601 "pand %%mm7, %%mm1 \n\t"
602 "pand %%mm7, %%mm2 \n\t"
603 "paddb %%mm5, %%mm1 \n\t"
604 "paddb %%mm6, %%mm2 \n\t"
605 "movq %%mm1, (%0,%2) \n\t"
606 "movq %%mm2, (%1) \n\t"
607 :: "r"(pix-2*stride), "r"(pix), "r"((x86_reg)stride),
608 "m"(alpha1), "m"(beta1), "m"(ff_bone)
609 );
610 }
611
612 static void h264_v_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
613 {
614 h264_loop_filter_chroma_intra_mmx2(pix, stride, alpha-1, beta-1);
615 }
616
617 static void h264_h_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
618 {
619 //FIXME: could cut some load/stores by merging transpose with filter
620 DECLARE_ALIGNED_8(uint8_t, trans[8*4]);
621 transpose4x4(trans, pix-2, 8, stride);
622 transpose4x4(trans+4, pix-2+4*stride, 8, stride);
623 h264_loop_filter_chroma_intra_mmx2(trans+2*8, 8, alpha-1, beta-1);
624 transpose4x4(pix-2, trans, stride, 8);
625 transpose4x4(pix-2+4*stride, trans+4, stride, 8);
626 }
627
628 static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2],
629 int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field ) {
630 int dir;
631 asm volatile(
632 "pxor %%mm7, %%mm7 \n\t"
633 "movq %0, %%mm6 \n\t"
634 "movq %1, %%mm5 \n\t"
635 "movq %2, %%mm4 \n\t"
636 ::"m"(ff_pb_1), "m"(ff_pb_3), "m"(ff_pb_7)
637 );
638 if(field)
639 asm volatile(
640 "movq %0, %%mm5 \n\t"
641 "movq %1, %%mm4 \n\t"
642 ::"m"(ff_pb_3_1), "m"(ff_pb_7_3)
643 );
644
645 // could do a special case for dir==0 && edges==1, but it only reduces the
646 // average filter time by 1.2%
647 for( dir=1; dir>=0; dir-- ) {
648 const int d_idx = dir ? -8 : -1;
649 const int mask_mv = dir ? mask_mv1 : mask_mv0;
650 DECLARE_ALIGNED_8(const uint64_t, mask_dir) = dir ? 0 : 0xffffffffffffffffULL;
651 int b_idx, edge, l;
652 for( b_idx=12, edge=0; edge<edges; edge+=step, b_idx+=8*step ) {
653 asm volatile(
654 "pand %0, %%mm0 \n\t"
655 ::"m"(mask_dir)
656 );
657 if(!(mask_mv & edge)) {
658 asm volatile("pxor %%mm0, %%mm0 \n\t":);
659 for( l = bidir; l >= 0; l-- ) {
660 asm volatile(
661 "movd %0, %%mm1 \n\t"
662 "punpckldq %1, %%mm1 \n\t"
663 "movq %%mm1, %%mm2 \n\t"
664 "psrlw $7, %%mm2 \n\t"
665 "pand %%mm6, %%mm2 \n\t"
666 "por %%mm2, %%mm1 \n\t" // ref_cache with -2 mapped to -1
667 "punpckldq %%mm1, %%mm2 \n\t"
668 "pcmpeqb %%mm2, %%mm1 \n\t"
669 "paddb %%mm6, %%mm1 \n\t"
670 "punpckhbw %%mm7, %%mm1 \n\t" // ref[b] != ref[bn]
671 "por %%mm1, %%mm0 \n\t"
672
673 "movq %2, %%mm1 \n\t"
674 "movq %3, %%mm2 \n\t"
675 "psubw %4, %%mm1 \n\t"
676 "psubw %5, %%mm2 \n\t"
677 "packsswb %%mm2, %%mm1 \n\t"
678 "paddb %%mm5, %%mm1 \n\t"
679 "pminub %%mm4, %%mm1 \n\t"
680 "pcmpeqb %%mm4, %%mm1 \n\t" // abs(mv[b] - mv[bn]) >= limit
681 "por %%mm1, %%mm0 \n\t"
682 ::"m"(ref[l][b_idx]),
683 "m"(ref[l][b_idx+d_idx]),
684 "m"(mv[l][b_idx][0]),
685 "m"(mv[l][b_idx+2][0]),
686 "m"(mv[l][b_idx+d_idx][0]),
687 "m"(mv[l][b_idx+d_idx+2][0])
688 );
689 }
690 }
691 asm volatile(
692 "movd %0, %%mm1 \n\t"
693 "por %1, %%mm1 \n\t"
694 "punpcklbw %%mm7, %%mm1 \n\t"
695 "pcmpgtw %%mm7, %%mm1 \n\t" // nnz[b] || nnz[bn]
696 ::"m"(nnz[b_idx]),
697 "m"(nnz[b_idx+d_idx])
698 );
699 asm volatile(
700 "pcmpeqw %%mm7, %%mm0 \n\t"
701 "pcmpeqw %%mm7, %%mm0 \n\t"
702 "psrlw $15, %%mm0 \n\t" // nonzero -> 1
703 "psrlw $14, %%mm1 \n\t"
704 "movq %%mm0, %%mm2 \n\t"
705 "por %%mm1, %%mm2 \n\t"
706 "psrlw $1, %%mm1 \n\t"
707 "pandn %%mm2, %%mm1 \n\t"
708 "movq %%mm1, %0 \n\t"
709 :"=m"(*bS[dir][edge])
710 ::"memory"
711 );
712 }
713 edges = 4;
714 step = 1;
715 }
716 asm volatile(
717 "movq (%0), %%mm0 \n\t"
718 "movq 8(%0), %%mm1 \n\t"
719 "movq 16(%0), %%mm2 \n\t"
720 "movq 24(%0), %%mm3 \n\t"
721 TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4)
722 "movq %%mm0, (%0) \n\t"
723 "movq %%mm3, 8(%0) \n\t"
724 "movq %%mm4, 16(%0) \n\t"
725 "movq %%mm2, 24(%0) \n\t"
726 ::"r"(bS[0])
727 :"memory"
728 );
729 }
730
731 /***********************************/
732 /* motion compensation */
733
734 #define QPEL_H264V_MM(A,B,C,D,E,F,OP,T,Z,d,q)\
735 "mov"#q" "#C", "#T" \n\t"\
736 "mov"#d" (%0), "#F" \n\t"\
737 "paddw "#D", "#T" \n\t"\
738 "psllw $2, "#T" \n\t"\
739 "psubw "#B", "#T" \n\t"\
740 "psubw "#E", "#T" \n\t"\
741 "punpcklbw "#Z", "#F" \n\t"\
742 "pmullw %4, "#T" \n\t"\
743 "paddw %5, "#A" \n\t"\
744 "add %2, %0 \n\t"\
745 "paddw "#F", "#A" \n\t"\
746 "paddw "#A", "#T" \n\t"\
747 "psraw $5, "#T" \n\t"\
748 "packuswb "#T", "#T" \n\t"\
749 OP(T, (%1), A, d)\
750 "add %3, %1 \n\t"
751
752 #define QPEL_H264HV_MM(A,B,C,D,E,F,OF,T,Z,d,q)\
753 "mov"#q" "#C", "#T" \n\t"\
754 "mov"#d" (%0), "#F" \n\t"\
755 "paddw "#D", "#T" \n\t"\
756 "psllw $2, "#T" \n\t"\
757 "paddw %4, "#A" \n\t"\
758 "psubw "#B", "#T" \n\t"\
759 "psubw "#E", "#T" \n\t"\
760 "punpcklbw "#Z", "#F" \n\t"\
761 "pmullw %3, "#T" \n\t"\
762 "paddw "#F", "#A" \n\t"\
763 "add %2, %0 \n\t"\
764 "paddw "#A", "#T" \n\t"\
765 "mov"#q" "#T", "#OF"(%1) \n\t"
766
767 #define QPEL_H264V(A,B,C,D,E,F,OP) QPEL_H264V_MM(A,B,C,D,E,F,OP,%%mm6,%%mm7,d,q)
768 #define QPEL_H264HV(A,B,C,D,E,F,OF) QPEL_H264HV_MM(A,B,C,D,E,F,OF,%%mm6,%%mm7,d,q)
769 #define QPEL_H264V_XMM(A,B,C,D,E,F,OP) QPEL_H264V_MM(A,B,C,D,E,F,OP,%%xmm6,%%xmm7,q,dqa)
770 #define QPEL_H264HV_XMM(A,B,C,D,E,F,OF) QPEL_H264HV_MM(A,B,C,D,E,F,OF,%%xmm6,%%xmm7,q,dqa)
771
772
773 #define QPEL_H264(OPNAME, OP, MMX)\
774 static av_noinline void OPNAME ## h264_qpel4_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
775 int h=4;\
776 \
777 asm volatile(\
778 "pxor %%mm7, %%mm7 \n\t"\
779 "movq %5, %%mm4 \n\t"\
780 "movq %6, %%mm5 \n\t"\
781 "1: \n\t"\
782 "movd -1(%0), %%mm1 \n\t"\
783 "movd (%0), %%mm2 \n\t"\
784 "movd 1(%0), %%mm3 \n\t"\
785 "movd 2(%0), %%mm0 \n\t"\
786 "punpcklbw %%mm7, %%mm1 \n\t"\
787 "punpcklbw %%mm7, %%mm2 \n\t"\
788 "punpcklbw %%mm7, %%mm3 \n\t"\
789 "punpcklbw %%mm7, %%mm0 \n\t"\
790 "paddw %%mm0, %%mm1 \n\t"\
791 "paddw %%mm3, %%mm2 \n\t"\
792 "movd -2(%0), %%mm0 \n\t"\
793 "movd 3(%0), %%mm3 \n\t"\
794 "punpcklbw %%mm7, %%mm0 \n\t"\
795 "punpcklbw %%mm7, %%mm3 \n\t"\
796 "paddw %%mm3, %%mm0 \n\t"\
797 "psllw $2, %%mm2 \n\t"\
798 "psubw %%mm1, %%mm2 \n\t"\
799 "pmullw %%mm4, %%mm2 \n\t"\
800 "paddw %%mm5, %%mm0 \n\t"\
801 "paddw %%mm2, %%mm0 \n\t"\
802 "psraw $5, %%mm0 \n\t"\
803 "packuswb %%mm0, %%mm0 \n\t"\
804 OP(%%mm0, (%1),%%mm6, d)\
805 "add %3, %0 \n\t"\
806 "add %4, %1 \n\t"\
807 "decl %2 \n\t"\
808 " jnz 1b \n\t"\
809 : "+a"(src), "+c"(dst), "+g"(h)\
810 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
811 : "memory"\
812 );\
813 }\
814 static av_noinline void OPNAME ## h264_qpel4_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
815 int h=4;\
816 asm volatile(\
817 "pxor %%mm7, %%mm7 \n\t"\
818 "movq %0, %%mm4 \n\t"\
819 "movq %1, %%mm5 \n\t"\
820 :: "m"(ff_pw_5), "m"(ff_pw_16)\
821 );\
822 do{\
823 asm volatile(\
824 "movd -1(%0), %%mm1 \n\t"\
825 "movd (%0), %%mm2 \n\t"\
826 "movd 1(%0), %%mm3 \n\t"\
827 "movd 2(%0), %%mm0 \n\t"\
828 "punpcklbw %%mm7, %%mm1 \n\t"\
829 "punpcklbw %%mm7, %%mm2 \n\t"\
830 "punpcklbw %%mm7, %%mm3 \n\t"\
831 "punpcklbw %%mm7, %%mm0 \n\t"\
832 "paddw %%mm0, %%mm1 \n\t"\
833 "paddw %%mm3, %%mm2 \n\t"\
834 "movd -2(%0), %%mm0 \n\t"\
835 "movd 3(%0), %%mm3 \n\t"\
836 "punpcklbw %%mm7, %%mm0 \n\t"\
837 "punpcklbw %%mm7, %%mm3 \n\t"\
838 "paddw %%mm3, %%mm0 \n\t"\
839 "psllw $2, %%mm2 \n\t"\
840 "psubw %%mm1, %%mm2 \n\t"\
841 "pmullw %%mm4, %%mm2 \n\t"\
842 "paddw %%mm5, %%mm0 \n\t"\
843 "paddw %%mm2, %%mm0 \n\t"\
844 "movd (%2), %%mm3 \n\t"\
845 "psraw $5, %%mm0 \n\t"\
846 "packuswb %%mm0, %%mm0 \n\t"\
847 PAVGB" %%mm3, %%mm0 \n\t"\
848 OP(%%mm0, (%1),%%mm6, d)\
849 "add %4, %0 \n\t"\
850 "add %4, %1 \n\t"\
851 "add %3, %2 \n\t"\
852 : "+a"(src), "+c"(dst), "+d"(src2)\
853 : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride)\
854 : "memory"\
855 );\
856 }while(--h);\
857 }\
858 static av_noinline void OPNAME ## h264_qpel4_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
859 src -= 2*srcStride;\
860 asm volatile(\
861 "pxor %%mm7, %%mm7 \n\t"\
862 "movd (%0), %%mm0 \n\t"\
863 "add %2, %0 \n\t"\
864 "movd (%0), %%mm1 \n\t"\
865 "add %2, %0 \n\t"\
866 "movd (%0), %%mm2 \n\t"\
867 "add %2, %0 \n\t"\
868 "movd (%0), %%mm3 \n\t"\
869 "add %2, %0 \n\t"\
870 "movd (%0), %%mm4 \n\t"\
871 "add %2, %0 \n\t"\
872 "punpcklbw %%mm7, %%mm0 \n\t"\
873 "punpcklbw %%mm7, %%mm1 \n\t"\
874 "punpcklbw %%mm7, %%mm2 \n\t"\
875 "punpcklbw %%mm7, %%mm3 \n\t"\
876 "punpcklbw %%mm7, %%mm4 \n\t"\
877 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
878 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
879 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
880 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
881 \
882 : "+a"(src), "+c"(dst)\
883 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
884 : "memory"\
885 );\
886 }\
887 static av_noinline void OPNAME ## h264_qpel4_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
888 int h=4;\
889 int w=3;\
890 src -= 2*srcStride+2;\
891 while(w--){\
892 asm volatile(\
893 "pxor %%mm7, %%mm7 \n\t"\
894 "movd (%0), %%mm0 \n\t"\
895 "add %2, %0 \n\t"\
896 "movd (%0), %%mm1 \n\t"\
897 "add %2, %0 \n\t"\
898 "movd (%0), %%mm2 \n\t"\
899 "add %2, %0 \n\t"\
900 "movd (%0), %%mm3 \n\t"\
901 "add %2, %0 \n\t"\
902 "movd (%0), %%mm4 \n\t"\
903 "add %2, %0 \n\t"\
904 "punpcklbw %%mm7, %%mm0 \n\t"\
905 "punpcklbw %%mm7, %%mm1 \n\t"\
906 "punpcklbw %%mm7, %%mm2 \n\t"\
907 "punpcklbw %%mm7, %%mm3 \n\t"\
908 "punpcklbw %%mm7, %%mm4 \n\t"\
909 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*3)\
910 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*3)\
911 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*3)\
912 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*3)\
913 \
914 : "+a"(src)\
915 : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\
916 : "memory"\
917 );\
918 tmp += 4;\
919 src += 4 - 9*srcStride;\
920 }\
921 tmp -= 3*4;\
922 asm volatile(\
923 "1: \n\t"\
924 "movq (%0), %%mm0 \n\t"\
925 "paddw 10(%0), %%mm0 \n\t"\
926 "movq 2(%0), %%mm1 \n\t"\
927 "paddw 8(%0), %%mm1 \n\t"\
928 "movq 4(%0), %%mm2 \n\t"\
929 "paddw 6(%0), %%mm2 \n\t"\
930 "psubw %%mm1, %%mm0 \n\t"/*a-b (abccba)*/\
931 "psraw $2, %%mm0 \n\t"/*(a-b)/4 */\
932 "psubw %%mm1, %%mm0 \n\t"/*(a-b)/4-b */\
933 "paddsw %%mm2, %%mm0 \n\t"\
934 "psraw $2, %%mm0 \n\t"/*((a-b)/4-b+c)/4 */\
935 "paddw %%mm2, %%mm0 \n\t"/*(a-5*b+20*c)/16 */\
936 "psraw $6, %%mm0 \n\t"\
937 "packuswb %%mm0, %%mm0 \n\t"\
938 OP(%%mm0, (%1),%%mm7, d)\
939 "add $24, %0 \n\t"\
940 "add %3, %1 \n\t"\
941 "decl %2 \n\t"\
942 " jnz 1b \n\t"\
943 : "+a"(tmp), "+c"(dst), "+g"(h)\
944 : "S"((x86_reg)dstStride)\
945 : "memory"\
946 );\
947 }\
948 \
949 static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
950 int h=8;\
951 asm volatile(\
952 "pxor %%mm7, %%mm7 \n\t"\
953 "movq %5, %%mm6 \n\t"\
954 "1: \n\t"\
955 "movq (%0), %%mm0 \n\t"\
956 "movq 1(%0), %%mm2 \n\t"\
957 "movq %%mm0, %%mm1 \n\t"\
958 "movq %%mm2, %%mm3 \n\t"\
959 "punpcklbw %%mm7, %%mm0 \n\t"\
960 "punpckhbw %%mm7, %%mm1 \n\t"\
961 "punpcklbw %%mm7, %%mm2 \n\t"\
962 "punpckhbw %%mm7, %%mm3 \n\t"\
963 "paddw %%mm2, %%mm0 \n\t"\
964 "paddw %%mm3, %%mm1 \n\t"\
965 "psllw $2, %%mm0 \n\t"\
966 "psllw $2, %%mm1 \n\t"\
967 "movq -1(%0), %%mm2 \n\t"\
968 "movq 2(%0), %%mm4 \n\t"\
969 "movq %%mm2, %%mm3 \n\t"\
970 "movq %%mm4, %%mm5 \n\t"\
971 "punpcklbw %%mm7, %%mm2 \n\t"\
972 "punpckhbw %%mm7, %%mm3 \n\t"\
973 "punpcklbw %%mm7, %%mm4 \n\t"\
974 "punpckhbw %%mm7, %%mm5 \n\t"\
975 "paddw %%mm4, %%mm2 \n\t"\
976 "paddw %%mm3, %%mm5 \n\t"\
977 "psubw %%mm2, %%mm0 \n\t"\
978 "psubw %%mm5, %%mm1 \n\t"\
979 "pmullw %%mm6, %%mm0 \n\t"\
980 "pmullw %%mm6, %%mm1 \n\t"\
981 "movd -2(%0), %%mm2 \n\t"\
982 "movd 7(%0), %%mm5 \n\t"\
983 "punpcklbw %%mm7, %%mm2 \n\t"\
984 "punpcklbw %%mm7, %%mm5 \n\t"\
985 "paddw %%mm3, %%mm2 \n\t"\
986 "paddw %%mm5, %%mm4 \n\t"\
987 "movq %6, %%mm5 \n\t"\
988 "paddw %%mm5, %%mm2 \n\t"\
989 "paddw %%mm5, %%mm4 \n\t"\
990 "paddw %%mm2, %%mm0 \n\t"\
991 "paddw %%mm4, %%mm1 \n\t"\
992 "psraw $5, %%mm0 \n\t"\
993 "psraw $5, %%mm1 \n\t"\
994 "packuswb %%mm1, %%mm0 \n\t"\
995 OP(%%mm0, (%1),%%mm5, q)\
996 "add %3, %0 \n\t"\
997 "add %4, %1 \n\t"\
998 "decl %2 \n\t"\
999 " jnz 1b \n\t"\
1000 : "+a"(src), "+c"(dst), "+g"(h)\
1001 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
1002 : "memory"\
1003 );\
1004 }\
1005 \
1006 static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
1007 int h=8;\
1008 asm volatile(\
1009 "pxor %%mm7, %%mm7 \n\t"\
1010 "movq %0, %%mm6 \n\t"\
1011 :: "m"(ff_pw_5)\
1012 );\
1013 do{\
1014 asm volatile(\
1015 "movq (%0), %%mm0 \n\t"\
1016 "movq 1(%0), %%mm2 \n\t"\
1017 "movq %%mm0, %%mm1 \n\t"\
1018 "movq %%mm2, %%mm3 \n\t"\
1019 "punpcklbw %%mm7, %%mm0 \n\t"\
1020 "punpckhbw %%mm7, %%mm1 \n\t"\
1021 "punpcklbw %%mm7, %%mm2 \n\t"\
1022 "punpckhbw %%mm7, %%mm3 \n\t"\
1023 "paddw %%mm2, %%mm0 \n\t"\
1024 "paddw %%mm3, %%mm1 \n\t"\
1025 "psllw $2, %%mm0 \n\t"\
1026 "psllw $2, %%mm1 \n\t"\
1027 "movq -1(%0), %%mm2 \n\t"\
1028 "movq 2(%0), %%mm4 \n\t"\
1029 "movq %%mm2, %%mm3 \n\t"\
1030 "movq %%mm4, %%mm5 \n\t"\
1031 "punpcklbw %%mm7, %%mm2 \n\t"\
1032 "punpckhbw %%mm7, %%mm3 \n\t"\
1033 "punpcklbw %%mm7, %%mm4 \n\t"\
1034 "punpckhbw %%mm7, %%mm5 \n\t"\
1035 "paddw %%mm4, %%mm2 \n\t"\
1036 "paddw %%mm3, %%mm5 \n\t"\
1037 "psubw %%mm2, %%mm0 \n\t"\
1038 "psubw %%mm5, %%mm1 \n\t"\
1039 "pmullw %%mm6, %%mm0 \n\t"\
1040 "pmullw %%mm6, %%mm1 \n\t"\
1041 "movd -2(%0), %%mm2 \n\t"\
1042 "movd 7(%0), %%mm5 \n\t"\
1043 "punpcklbw %%mm7, %%mm2 \n\t"\
1044 "punpcklbw %%mm7, %%mm5 \n\t"\
1045 "paddw %%mm3, %%mm2 \n\t"\
1046 "paddw %%mm5, %%mm4 \n\t"\
1047 "movq %5, %%mm5 \n\t"\
1048 "paddw %%mm5, %%mm2 \n\t"\
1049 "paddw %%mm5, %%mm4 \n\t"\
1050 "paddw %%mm2, %%mm0 \n\t"\
1051 "paddw %%mm4, %%mm1 \n\t"\
1052 "psraw $5, %%mm0 \n\t"\
1053 "psraw $5, %%mm1 \n\t"\
1054 "movq (%2), %%mm4 \n\t"\
1055 "packuswb %%mm1, %%mm0 \n\t"\
1056 PAVGB" %%mm4, %%mm0 \n\t"\
1057 OP(%%mm0, (%1),%%mm5, q)\
1058 "add %4, %0 \n\t"\
1059 "add %4, %1 \n\t"\
1060 "add %3, %2 \n\t"\
1061 : "+a"(src), "+c"(dst), "+d"(src2)\
1062 : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\
1063 "m"(ff_pw_16)\
1064 : "memory"\
1065 );\
1066 }while(--h);\
1067 }\
1068 \
1069 static av_noinline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1070 int w= 2;\
1071 src -= 2*srcStride;\
1072 \
1073 while(w--){\
1074 asm volatile(\
1075 "pxor %%mm7, %%mm7 \n\t"\
1076 "movd (%0), %%mm0 \n\t"\
1077 "add %2, %0 \n\t"\
1078 "movd (%0), %%mm1 \n\t"\
1079 "add %2, %0 \n\t"\
1080 "movd (%0), %%mm2 \n\t"\
1081 "add %2, %0 \n\t"\
1082 "movd (%0), %%mm3 \n\t"\
1083 "add %2, %0 \n\t"\
1084 "movd (%0), %%mm4 \n\t"\
1085 "add %2, %0 \n\t"\
1086 "punpcklbw %%mm7, %%mm0 \n\t"\
1087 "punpcklbw %%mm7, %%mm1 \n\t"\
1088 "punpcklbw %%mm7, %%mm2 \n\t"\
1089 "punpcklbw %%mm7, %%mm3 \n\t"\
1090 "punpcklbw %%mm7, %%mm4 \n\t"\
1091 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
1092 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
1093 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
1094 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
1095 QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
1096 QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
1097 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
1098 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
1099 \
1100 : "+a"(src), "+c"(dst)\
1101 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
1102 : "memory"\
1103 );\
1104 if(h==16){\
1105 asm volatile(\
1106 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
1107 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
1108 QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
1109 QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
1110 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
1111 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
1112 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
1113 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
1114 \
1115 : "+a"(src), "+c"(dst)\
1116 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
1117 : "memory"\
1118 );\
1119 }\
1120 src += 4-(h+5)*srcStride;\
1121 dst += 4-h*dstStride;\
1122 }\
1123 }\
1124 static av_always_inline void OPNAME ## h264_qpel8or16_hv1_lowpass_ ## MMX(int16_t *tmp, uint8_t *src, int tmpStride, int srcStride, int size){\
1125 int w = (size+8)>>2;\
1126 src -= 2*srcStride+2;\
1127 while(w--){\
1128 asm volatile(\
1129 "pxor %%mm7, %%mm7 \n\t"\
1130 "movd (%0), %%mm0 \n\t"\
1131 "add %2, %0 \n\t"\
1132 "movd (%0), %%mm1 \n\t"\
1133 "add %2, %0 \n\t"\
1134 "movd (%0), %%mm2 \n\t"\
1135 "add %2, %0 \n\t"\
1136 "movd (%0), %%mm3 \n\t"\
1137 "add %2, %0 \n\t"\
1138 "movd (%0), %%mm4 \n\t"\
1139 "add %2, %0 \n\t"\
1140 "punpcklbw %%mm7, %%mm0 \n\t"\
1141 "punpcklbw %%mm7, %%mm1 \n\t"\
1142 "punpcklbw %%mm7, %%mm2 \n\t"\
1143 "punpcklbw %%mm7, %%mm3 \n\t"\
1144 "punpcklbw %%mm7, %%mm4 \n\t"\
1145 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*48)\
1146 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*48)\
1147 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*48)\
1148 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*48)\
1149 QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 4*48)\
1150 QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 5*48)\
1151 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 6*48)\
1152 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 7*48)\
1153 : "+a"(src)\
1154 : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\
1155 : "memory"\
1156 );\
1157 if(size==16){\
1158 asm volatile(\
1159 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 8*48)\
1160 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 9*48)\
1161 QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 10*48)\
1162 QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 11*48)\
1163 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 12*48)\
1164 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 13*48)\
1165 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 14*48)\
1166 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 15*48)\
1167 : "+a"(src)\
1168 : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\
1169 : "memory"\
1170 );\
1171 }\
1172 tmp += 4;\
1173 src += 4 - (size+5)*srcStride;\
1174 }\
1175 }\
1176 static av_always_inline void OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, int dstStride, int tmpStride, int size){\
1177 int w = size>>4;\
1178 do{\
1179 int h = size;\
1180 asm volatile(\
1181 "1: \n\t"\
1182 "movq (%0), %%mm0 \n\t"\
1183 "movq 8(%0), %%mm3 \n\t"\
1184 "movq 2(%0), %%mm1 \n\t"\
1185 "movq 10(%0), %%mm4 \n\t"\
1186 "paddw %%mm4, %%mm0 \n\t"\
1187 "paddw %%mm3, %%mm1 \n\t"\
1188 "paddw 18(%0), %%mm3 \n\t"\
1189 "paddw 16(%0), %%mm4 \n\t"\
1190 "movq 4(%0), %%mm2 \n\t"\
1191 "movq 12(%0), %%mm5 \n\t"\
1192 "paddw 6(%0), %%mm2 \n\t"\
1193 "paddw 14(%0), %%mm5 \n\t"\
1194 "psubw %%mm1, %%mm0 \n\t"\
1195 "psubw %%mm4, %%mm3 \n\t"\
1196 "psraw $2, %%mm0 \n\t"\
1197 "psraw $2, %%mm3 \n\t"\
1198 "psubw %%mm1, %%mm0 \n\t"\
1199 "psubw %%mm4, %%mm3 \n\t"\
1200 "paddsw %%mm2, %%mm0 \n\t"\
1201 "paddsw %%mm5, %%mm3 \n\t"\
1202 "psraw $2, %%mm0 \n\t"\
1203 "psraw $2, %%mm3 \n\t"\
1204 "paddw %%mm2, %%mm0 \n\t"\
1205 "paddw %%mm5, %%mm3 \n\t"\
1206 "psraw $6, %%mm0 \n\t"\
1207 "psraw $6, %%mm3 \n\t"\
1208 "packuswb %%mm3, %%mm0 \n\t"\
1209 OP(%%mm0, (%1),%%mm7, q)\
1210 "add $48, %0 \n\t"\
1211 "add %3, %1 \n\t"\
1212 "decl %2 \n\t"\
1213 " jnz 1b \n\t"\
1214 : "+a"(tmp), "+c"(dst), "+g"(h)\
1215 : "S"((x86_reg)dstStride)\
1216 : "memory"\
1217 );\
1218 tmp += 8 - size*24;\
1219 dst += 8 - size*dstStride;\
1220 }while(w--);\
1221 }\
1222 \
1223 static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1224 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\
1225 }\
1226 static av_noinline void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1227 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\
1228 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
1229 }\
1230 \
1231 static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1232 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
1233 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
1234 src += 8*srcStride;\
1235 dst += 8*dstStride;\
1236 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
1237 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
1238 }\
1239 \
1240 static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
1241 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
1242 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
1243 src += 8*dstStride;\
1244 dst += 8*dstStride;\
1245 src2 += 8*src2Stride;\
1246 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
1247 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
1248 }\
1249 \
1250 static av_noinline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\
1251 put_h264_qpel8or16_hv1_lowpass_ ## MMX(tmp, src, tmpStride, srcStride, size);\
1252 OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(dst, tmp, dstStride, tmpStride, size);\
1253 }\
1254 static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
1255 OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 8);\
1256 }\
1257 \
1258 static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
1259 OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 16);\
1260 }\
1261 \
1262 static av_noinline void OPNAME ## pixels4_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
1263 {\
1264 asm volatile(\
1265 "movq (%1), %%mm0 \n\t"\
1266 "movq 24(%1), %%mm1 \n\t"\
1267 "psraw $5, %%mm0 \n\t"\
1268 "psraw $5, %%mm1 \n\t"\
1269 "packuswb %%mm0, %%mm0 \n\t"\
1270 "packuswb %%mm1, %%mm1 \n\t"\
1271 PAVGB" (%0), %%mm0 \n\t"\
1272 PAVGB" (%0,%3), %%mm1 \n\t"\
1273 OP(%%mm0, (%2), %%mm4, d)\
1274 OP(%%mm1, (%2,%4), %%mm5, d)\
1275 "lea (%0,%3,2), %0 \n\t"\
1276 "lea (%2,%4,2), %2 \n\t"\
1277 "movq 48(%1), %%mm0 \n\t"\
1278 "movq 72(%1), %%mm1 \n\t"\
1279 "psraw $5, %%mm0 \n\t"\
1280 "psraw $5, %%mm1 \n\t"\
1281 "packuswb %%mm0, %%mm0 \n\t"\
1282 "packuswb %%mm1, %%mm1 \n\t"\
1283 PAVGB" (%0), %%mm0 \n\t"\
1284 PAVGB" (%0,%3), %%mm1 \n\t"\
1285 OP(%%mm0, (%2), %%mm4, d)\
1286 OP(%%mm1, (%2,%4), %%mm5, d)\
1287 :"+a"(src8), "+c"(src16), "+d"(dst)\
1288 :"S"((x86_reg)src8Stride), "D"((x86_reg)dstStride)\
1289 :"memory");\
1290 }\
1291 static av_noinline void OPNAME ## pixels8_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
1292 {\
1293 do{\
1294 asm volatile(\
1295 "movq (%1), %%mm0 \n\t"\
1296 "movq 8(%1), %%mm1 \n\t"\
1297 "movq 48(%1), %%mm2 \n\t"\
1298 "movq 8+48(%1), %%mm3 \n\t"\
1299 "psraw $5, %%mm0 \n\t"\
1300 "psraw $5, %%mm1 \n\t"\
1301 "psraw $5, %%mm2 \n\t"\
1302 "psraw $5, %%mm3 \n\t"\
1303 "packuswb %%mm1, %%mm0 \n\t"\
1304 "packuswb %%mm3, %%mm2 \n\t"\
1305 PAVGB" (%0), %%mm0 \n\t"\
1306 PAVGB" (%0,%3), %%mm2 \n\t"\
1307 OP(%%mm0, (%2), %%mm5, q)\
1308 OP(%%mm2, (%2,%4), %%mm5, q)\
1309 ::"a"(src8), "c"(src16), "d"(dst),\
1310 "r"((x86_reg)src8Stride), "r"((x86_reg)dstStride)\
1311 :"memory");\
1312 src8 += 2L*src8Stride;\
1313 src16 += 48;\
1314 dst += 2L*dstStride;\
1315 }while(h-=2);\
1316 }\
1317 static void OPNAME ## pixels16_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
1318 {\
1319 OPNAME ## pixels8_l2_shift5_ ## MMX(dst , src16 , src8 , dstStride, src8Stride, h);\
1320 OPNAME ## pixels8_l2_shift5_ ## MMX(dst+8, src16+8, src8+8, dstStride, src8Stride, h);\
1321 }\
1322
1323
1324 #ifdef ARCH_X86_64
1325 #define QPEL_H264_H16_XMM(OPNAME, OP, MMX)\
1326 static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
1327 int h=16;\
1328 asm volatile(\
1329 "pxor %%xmm15, %%xmm15 \n\t"\
1330 "movdqa %6, %%xmm14 \n\t"\
1331 "movdqa %7, %%xmm13 \n\t"\
1332 "1: \n\t"\
1333 "lddqu 3(%0), %%xmm1 \n\t"\
1334 "lddqu -5(%0), %%xmm7 \n\t"\
1335 "movdqa %%xmm1, %%xmm0 \n\t"\
1336 "punpckhbw %%xmm15, %%xmm1 \n\t"\
1337 "punpcklbw %%xmm15, %%xmm0 \n\t"\
1338 "punpcklbw %%xmm15, %%xmm7 \n\t"\
1339 "movdqa %%xmm1, %%xmm2 \n\t"\
1340 "movdqa %%xmm0, %%xmm6 \n\t"\
1341 "movdqa %%xmm1, %%xmm3 \n\t"\
1342 "movdqa %%xmm0, %%xmm8 \n\t"\
1343 "movdqa %%xmm1, %%xmm4 \n\t"\
1344 "movdqa %%xmm0, %%xmm9 \n\t"\
1345 "movdqa %%xmm1, %%xmm5 \n\t"\
1346 "movdqa %%xmm0, %%xmm10 \n\t"\
1347 "palignr $6, %%xmm0, %%xmm5 \n\t"\
1348 "palignr $6, %%xmm7, %%xmm10\n\t"\
1349 "palignr $8, %%xmm0, %%xmm4 \n\t"\
1350 "palignr $8, %%xmm7, %%xmm9 \n\t"\
1351 "palignr $10,%%xmm0, %%xmm3 \n\t"\
1352 "palignr $10,%%xmm7, %%xmm8 \n\t"\
1353 "paddw %%xmm1, %%xmm5 \n\t"\
1354 "paddw %%xmm0, %%xmm10 \n\t"\
1355 "palignr $12,%%xmm0, %%xmm2 \n\t"\
1356 "palignr $12,%%xmm7, %%xmm6 \n\t"\
1357 "palignr $14,%%xmm0, %%xmm1 \n\t"\
1358 "palignr $14,%%xmm7, %%xmm0 \n\t"\
1359 "paddw %%xmm3, %%xmm2 \n\t"\
1360 "paddw %%xmm8, %%xmm6 \n\t"\
1361 "paddw %%xmm4, %%xmm1 \n\t"\
1362 "paddw %%xmm9, %%xmm0 \n\t"\
1363 "psllw $2, %%xmm2 \n\t"\
1364 "psllw $2, %%xmm6 \n\t"\
1365 "psubw %%xmm1, %%xmm2 \n\t"\
1366 "psubw %%xmm0, %%xmm6 \n\t"\
1367 "paddw %%xmm13,%%xmm5 \n\t"\
1368 "paddw %%xmm13,%%xmm10 \n\t"\
1369 "pmullw %%xmm14,%%xmm2 \n\t"\
1370 "pmullw %%xmm14,%%xmm6 \n\t"\
1371 "lddqu (%2), %%xmm3 \n\t"\
1372 "paddw %%xmm5, %%xmm2 \n\t"\
1373 "paddw %%xmm10,%%xmm6 \n\t"\
1374 "psraw $5, %%xmm2 \n\t"\
1375 "psraw $5, %%xmm6 \n\t"\
1376 "packuswb %%xmm2,%%xmm6 \n\t"\
1377 "pavgb %%xmm3, %%xmm6 \n\t"\
1378 OP(%%xmm6, (%1), %%xmm4, dqa)\
1379 "add %5, %0 \n\t"\
1380 "add %5, %1 \n\t"\
1381 "add %4, %2 \n\t"\
1382 "decl %3 \n\t"\
1383 "jg 1b \n\t"\
1384 : "+a"(src), "+c"(dst), "+d"(src2), "+g"(h)\
1385 : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\
1386 "m"(ff_pw_5), "m"(ff_pw_16)\
1387 : "memory"\
1388 );\
1389 }
1390 #else // ARCH_X86_64
1391 #define QPEL_H264_H16_XMM(OPNAME, OP, MMX)\
1392 static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
1393 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
1394 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
1395 src += 8*dstStride;\
1396 dst += 8*dstStride;\
1397 src2 += 8*src2Stride;\
1398 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
1399 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
1400 }
1401 #endif // ARCH_X86_64
1402
1403 #define QPEL_H264_H_XMM(OPNAME, OP, MMX)\
1404 static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
1405 int h=8;\
1406 asm volatile(\
1407 "pxor %%xmm7, %%xmm7 \n\t"\
1408 "movdqa %0, %%xmm6 \n\t"\
1409 :: "m"(ff_pw_5)\
1410 );\
1411 do{\
1412 asm volatile(\
1413 "lddqu -5(%0), %%xmm1 \n\t"\
1414 "movdqa %%xmm1, %%xmm0 \n\t"\
1415 "punpckhbw %%xmm7, %%xmm1 \n\t"\
1416 "punpcklbw %%xmm7, %%xmm0 \n\t"\
1417 "movdqa %%xmm1, %%xmm2 \n\t"\
1418 "movdqa %%xmm1, %%xmm3 \n\t"\
1419 "movdqa %%xmm1, %%xmm4 \n\t"\
1420 "movdqa %%xmm1, %%xmm5 \n\t"\
1421 "palignr $6, %%xmm0, %%xmm5 \n\t"\
1422 "palignr $8, %%xmm0, %%xmm4 \n\t"\
1423 "palignr $10,%%xmm0, %%xmm3 \n\t"\
1424 "paddw %%xmm1, %%xmm5 \n\t"\
1425 "palignr $12,%%xmm0, %%xmm2 \n\t"\
1426 "palignr $14,%%xmm0, %%xmm1 \n\t"\
1427 "paddw %%xmm3, %%xmm2 \n\t"\
1428 "paddw %%xmm4, %%xmm1 \n\t"\
1429 "psllw $2, %%xmm2 \n\t"\
1430 "movq (%2), %%xmm3 \n\t"\
1431 "psubw %%xmm1, %%xmm2 \n\t"\
1432 "paddw %5, %%xmm5 \n\t"\
1433 "pmullw %%xmm6, %%xmm2 \n\t"\
1434 "paddw %%xmm5, %%xmm2 \n\t"\
1435 "psraw $5, %%xmm2 \n\t"\
1436 "packuswb %%xmm2, %%xmm2 \n\t"\
1437 "pavgb %%xmm3, %%xmm2 \n\t"\
1438 OP(%%xmm2, (%1), %%xmm4, q)\
1439 "add %4, %0 \n\t"\
1440 "add %4, %1 \n\t"\
1441 "add %3, %2 \n\t"\
1442 : "+a"(src), "+c"(dst), "+d"(src2)\
1443 : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\
1444 "m"(ff_pw_16)\
1445 : "memory"\
1446 );\
1447 }while(--h);\
1448 }\
1449 QPEL_H264_H16_XMM(OPNAME, OP, MMX)\
1450 \
1451 static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1452 int h=8;\
1453 asm volatile(\
1454 "pxor %%xmm7, %%xmm7 \n\t"\
1455 "movdqa %5, %%xmm6 \n\t"\
1456 "1: \n\t"\
1457 "lddqu -5(%0), %%xmm1 \n\t"\
1458 "movdqa %%xmm1, %%xmm0 \n\t"\
1459 "punpckhbw %%xmm7, %%xmm1 \n\t"\
1460 "punpcklbw %%xmm7, %%xmm0 \n\t"\
1461 "movdqa %%xmm1, %%xmm2 \n\t"\
1462 "movdqa %%xmm1, %%xmm3 \n\t"\
1463 "movdqa %%xmm1, %%xmm4 \n\t"\
1464 "movdqa %%xmm1, %%xmm5 \n\t"\
1465 "palignr $6, %%xmm0, %%xmm5 \n\t"\
1466 "palignr $8, %%xmm0, %%xmm4 \n\t"\
1467 "palignr $10,%%xmm0, %%xmm3 \n\t"\
1468 "paddw %%xmm1, %%xmm5 \n\t"\
1469 "palignr $12,%%xmm0, %%xmm2 \n\t"\
1470 "palignr $14,%%xmm0, %%xmm1 \n\t"\
1471 "paddw %%xmm3, %%xmm2 \n\t"\
1472 "paddw %%xmm4, %%xmm1 \n\t"\
1473 "psllw $2, %%xmm2 \n\t"\
1474 "psubw %%xmm1, %%xmm2 \n\t"\
1475 "paddw %6, %%xmm5 \n\t"\
1476 "pmullw %%xmm6, %%xmm2 \n\t"\
1477 "paddw %%xmm5, %%xmm2 \n\t"\
1478 "psraw $5, %%xmm2 \n\t"\
1479 "packuswb %%xmm2, %%xmm2 \n\t"\
1480 OP(%%xmm2, (%1), %%xmm4, q)\
1481 "add %3, %0 \n\t"\
1482 "add %4, %1 \n\t"\
1483 "decl %2 \n\t"\
1484 " jnz 1b \n\t"\
1485 : "+a"(src), "+c"(dst), "+g"(h)\
1486 : "D"((x86_reg)srcStride), "S"((x86_reg)dstStride),\
1487 "m"(ff_pw_5), "m"(ff_pw_16)\
1488 : "memory"\
1489 );\
1490 }\
1491 static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1492 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
1493 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
1494 src += 8*srcStride;\
1495 dst += 8*dstStride;\
1496 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
1497 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
1498 }\
1499
1500 #define QPEL_H264_V_XMM(OPNAME, OP, MMX)\
1501 static av_noinline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1502 src -= 2*srcStride;\
1503 \
1504 asm volatile(\
1505 "pxor %%xmm7, %%xmm7 \n\t"\
1506 "movq (%0), %%xmm0 \n\t"\
1507 "add %2, %0 \n\t"\
1508 "movq (%0), %%xmm1 \n\t"\
1509 "add %2, %0 \n\t"\
1510 "movq (%0), %%xmm2 \n\t"\
1511 "add %2, %0 \n\t"\
1512 "movq (%0), %%xmm3 \n\t"\
1513 "add %2, %0 \n\t"\
1514 "movq (%0), %%xmm4 \n\t"\
1515 "add %2, %0 \n\t"\
1516 "punpcklbw %%xmm7, %%xmm0 \n\t"\
1517 "punpcklbw %%xmm7, %%xmm1 \n\t"\
1518 "punpcklbw %%xmm7, %%xmm2 \n\t"\
1519 "punpcklbw %%xmm7, %%xmm3 \n\t"\
1520 "punpcklbw %%xmm7, %%xmm4 \n\t"\
1521 QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\
1522 QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\
1523 QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\
1524 QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\
1525 QPEL_H264V_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, OP)\
1526 QPEL_H264V_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, OP)\
1527 QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\
1528 QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\
1529 \
1530 : "+a"(src), "+c"(dst)\
1531 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
1532 : "memory"\
1533 );\
1534 if(h==16){\
1535 asm volatile(\
1536 QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\
1537 QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\
1538 QPEL_H264V_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, OP)\
1539 QPEL_H264V_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, OP)\
1540 QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\
1541 QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\
1542 QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\
1543 QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\
1544 \
1545 : "+a"(src), "+c"(dst)\
1546 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
1547 : "memory"\
1548 );\
1549 }\
1550 }\
1551 static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1552 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\
1553 }\
1554 static av_noinline void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1555 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\
1556 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
1557 }
1558
1559 static av_always_inline void put_h264_qpel8or16_hv1_lowpass_sse2(int16_t *tmp, uint8_t *src, int tmpStride, int srcStride, int size){
1560 int w = (size+8)>>3;
1561 src -= 2*srcStride+2;
1562 while(w--){
1563 asm volatile(
1564 "pxor %%xmm7, %%xmm7 \n\t"
1565 "movq (%0), %%xmm0 \n\t"
1566 "add %2, %0 \n\t"
1567 "movq (%0), %%xmm1 \n\t"
1568 "add %2, %0 \n\t"
1569 "movq (%0), %%xmm2 \n\t"
1570 "add %2, %0 \n\t"
1571 "movq (%0), %%xmm3 \n\t"
1572 "add %2, %0 \n\t"
1573 "movq (%0), %%xmm4 \n\t"
1574 "add %2, %0 \n\t"
1575 "punpcklbw %%xmm7, %%xmm0 \n\t"
1576 "punpcklbw %%xmm7, %%xmm1 \n\t"
1577 "punpcklbw %%xmm7, %%xmm2 \n\t"
1578 "punpcklbw %%xmm7, %%xmm3 \n\t"
1579 "punpcklbw %%xmm7, %%xmm4 \n\t"
1580 QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 0*48)
1581 QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 1*48)
1582 QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 2*48)
1583 QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 3*48)
1584 QPEL_H264HV_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, 4*48)
1585 QPEL_H264HV_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, 5*48)
1586 QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 6*48)
1587 QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 7*48)
1588 : "+a"(src)
1589 : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)
1590 : "memory"
1591 );
1592 if(size==16){
1593 asm volatile(
1594 QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 8*48)
1595 QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 9*48)
1596 QPEL_H264HV_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, 10*48)
1597 QPEL_H264HV_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, 11*48)
1598 QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 12*48)
1599 QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 13*48)
1600 QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 14*48)
1601 QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 15*48)
1602 : "+a"(src)
1603 : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)
1604 : "memory"
1605 );
1606 }
1607 tmp += 8;
1608 src += 8 - (size+5)*srcStride;
1609 }
1610 }
1611
1612 #define QPEL_H264_HV2_XMM(OPNAME, OP, MMX)\
1613 static av_always_inline void OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, int dstStride, int tmpStride, int size){\
1614 int h = size;\
1615 if(size == 16){\
1616 asm volatile(\
1617 "1: \n\t"\
1618 "movdqa 32(%0), %%xmm4 \n\t"\
1619 "movdqa 16(%0), %%xmm5 \n\t"\
1620 "movdqa (%0), %%xmm7 \n\t"\
1621 "movdqa %%xmm4, %%xmm3 \n\t"\
1622 "movdqa %%xmm4, %%xmm2 \n\t"\
1623 "movdqa %%xmm4, %%xmm1 \n\t"\
1624 "movdqa %%xmm4, %%xmm0 \n\t"\
1625 "palignr $10, %%xmm5, %%xmm0 \n\t"\
1626 "palignr $8, %%xmm5, %%xmm1 \n\t"\
1627 "palignr $6, %%xmm5, %%xmm2 \n\t"\
1628 "palignr $4, %%xmm5, %%xmm3 \n\t"\
1629 "palignr $2, %%xmm5, %%xmm4 \n\t"\
1630 "paddw %%xmm5, %%xmm0 \n\t"\
1631 "paddw %%xmm4, %%xmm1 \n\t"\
1632 "paddw %%xmm3, %%xmm2 \n\t"\
1633 "movdqa %%xmm5, %%xmm6 \n\t"\
1634 "movdqa %%xmm5, %%xmm4 \n\t"\
1635 "movdqa %%xmm5, %%xmm3 \n\t"\
1636 "palignr $8, %%xmm7, %%xmm4 \n\t"\
1637 "palignr $2, %%xmm7, %%xmm6 \n\t"\
1638 "palignr $10, %%xmm7, %%xmm3 \n\t"\
1639 "paddw %%xmm6, %%xmm4 \n\t"\
1640 "movdqa %%xmm5, %%xmm6 \n\t"\
1641 "palignr $6, %%xmm7, %%xmm5 \n\t"\
1642 "palignr $4, %%xmm7, %%xmm6 \n\t"\
1643 "paddw %%xmm7, %%xmm3 \n\t"\
1644 "paddw %%xmm6, %%xmm5 \n\t"\
1645 \
1646 "psubw %%xmm1, %%xmm0 \n\t"\
1647 "psubw %%xmm4, %%xmm3 \n\t"\
1648 "psraw $2, %%xmm0 \n\t"\
1649 "psraw $2, %%xmm3 \n\t"\
1650 "psubw %%xmm1, %%xmm0 \n\t"\
1651 "psubw %%xmm4, %%xmm3 \n\t"\
1652 "paddw %%xmm2, %%xmm0 \n\t"\
1653 "paddw %%xmm5, %%xmm3 \n\t"\
1654 "psraw $2, %%xmm0 \n\t"\
1655 "psraw $2, %%xmm3 \n\t"\
1656 "paddw %%xmm2, %%xmm0 \n\t"\
1657 "paddw %%xmm5, %%xmm3 \n\t"\
1658 "psraw $6, %%xmm0 \n\t"\
1659 "psraw $6, %%xmm3 \n\t"\
1660 "packuswb %%xmm0, %%xmm3 \n\t"\
1661 OP(%%xmm3, (%1), %%xmm7, dqa)\
1662 "add $48, %0 \n\t"\
1663 "add %3, %1 \n\t"\
1664 "decl %2 \n\t"\
1665 " jnz 1b \n\t"\
1666 : "+a"(tmp), "+c"(dst), "+g"(h)\
1667 : "S"((x86_reg)dstStride)\
1668 : "memory"\
1669 );\
1670 }else{\
1671 asm volatile(\
1672 "1: \n\t"\
1673 "movdqa 16(%0), %%xmm1 \n\t"\
1674 "movdqa (%0), %%xmm0 \n\t"\
1675 "movdqa %%xmm1, %%xmm2 \n\t"\
1676 "movdqa %%xmm1, %%xmm3 \n\t"\
1677 "movdqa %%xmm1, %%xmm4 \n\t"\
1678 "movdqa %%xmm1, %%xmm5 \n\t"\
1679 "palignr $10, %%xmm0, %%xmm5 \n\t"\
1680 "palignr $8, %%xmm0, %%xmm4 \n\t"\
1681 "palignr $6, %%xmm0, %%xmm3 \n\t"\
1682 "palignr $4, %%xmm0, %%xmm2 \n\t"\
1683 "palignr $2, %%xmm0, %%xmm1 \n\t"\
1684 "paddw %%xmm5, %%xmm0 \n\t"\
1685 "paddw %%xmm4, %%xmm1 \n\t"\
1686 "paddw %%xmm3, %%xmm2 \n\t"\
1687 "psubw %%xmm1, %%xmm0 \n\t"\
1688 "psraw $2, %%xmm0 \n\t"\
1689 "psubw %%xmm1, %%xmm0 \n\t"\
1690 "paddw %%xmm2, %%xmm0 \n\t"\
1691 "psraw $2, %%xmm0 \n\t"\
1692 "paddw %%xmm2, %%xmm0 \n\t"\
1693 "psraw $6, %%xmm0 \n\t"\
1694 "packuswb %%xmm0, %%xmm0 \n\t"\
1695 OP(%%xmm0, (%1), %%xmm7, q)\
1696 "add $48, %0 \n\t"\
1697 "add %3, %1 \n\t"\
1698 "decl %2 \n\t"\
1699 " jnz 1b \n\t"\
1700 : "+a"(tmp), "+c"(dst), "+g"(h)\
1701 : "S"((x86_reg)dstStride)\
1702 : "memory"\
1703 );\
1704 }\
1705 }
1706
1707 #define QPEL_H264_HV_XMM(OPNAME, OP, MMX)\
1708 static av_noinline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\
1709 put_h264_qpel8or16_hv1_lowpass_sse2(tmp, src, tmpStride, srcStride, size);\
1710 OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(dst, tmp, dstStride, tmpStride, size);\
1711 }\
1712 static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
1713 OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst, tmp, src, dstStride, tmpStride, srcStride, 8);\
1714 }\
1715 static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
1716 OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst, tmp, src, dstStride, tmpStride, srcStride, 16);\
1717 }\
1718
1719 #define put_pixels8_l2_sse2 put_pixels8_l2_mmx2
1720 #define avg_pixels8_l2_sse2 avg_pixels8_l2_mmx2
1721 #define put_pixels16_l2_sse2 put_pixels16_l2_mmx2
1722 #define avg_pixels16_l2_sse2 avg_pixels16_l2_mmx2
1723 #define put_pixels8_l2_ssse3 put_pixels8_l2_mmx2
1724 #define avg_pixels8_l2_ssse3 avg_pixels8_l2_mmx2
1725 #define put_pixels16_l2_ssse3 put_pixels16_l2_mmx2
1726 #define avg_pixels16_l2_ssse3 avg_pixels16_l2_mmx2
1727
1728 #define put_pixels8_l2_shift5_sse2 put_pixels8_l2_shift5_mmx2
1729 #define avg_pixels8_l2_shift5_sse2 avg_pixels8_l2_shift5_mmx2
1730 #define put_pixels16_l2_shift5_sse2 put_pixels16_l2_shift5_mmx2
1731 #define avg_pixels16_l2_shift5_sse2 avg_pixels16_l2_shift5_mmx2
1732 #define put_pixels8_l2_shift5_ssse3 put_pixels8_l2_shift5_mmx2
1733 #define avg_pixels8_l2_shift5_ssse3 avg_pixels8_l2_shift5_mmx2
1734 #define put_pixels16_l2_shift5_ssse3 put_pixels16_l2_shift5_mmx2
1735 #define avg_pixels16_l2_shift5_ssse3 avg_pixels16_l2_shift5_mmx2
1736
1737 #define put_h264_qpel8_h_lowpass_l2_sse2 put_h264_qpel8_h_lowpass_l2_mmx2
1738 #define avg_h264_qpel8_h_lowpass_l2_sse2 avg_h264_qpel8_h_lowpass_l2_mmx2
1739 #define put_h264_qpel16_h_lowpass_l2_sse2 put_h264_qpel16_h_lowpass_l2_mmx2
1740 #define avg_h264_qpel16_h_lowpass_l2_sse2 avg_h264_qpel16_h_lowpass_l2_mmx2
1741
1742 #define put_h264_qpel8_v_lowpass_ssse3 put_h264_qpel8_v_lowpass_sse2
1743 #define avg_h264_qpel8_v_lowpass_ssse3 avg_h264_qpel8_v_lowpass_sse2
1744 #define put_h264_qpel16_v_lowpass_ssse3 put_h264_qpel16_v_lowpass_sse2
1745 #define avg_h264_qpel16_v_lowpass_ssse3 avg_h264_qpel16_v_lowpass_sse2
1746
1747 #define put_h264_qpel8or16_hv2_lowpass_sse2 put_h264_qpel8or16_hv2_lowpass_mmx2
1748 #define avg_h264_qpel8or16_hv2_lowpass_sse2 avg_h264_qpel8or16_hv2_lowpass_mmx2
1749
1750 #define H264_MC(OPNAME, SIZE, MMX, ALIGN) \
1751 H264_MC_C(OPNAME, SIZE, MMX, ALIGN)\
1752 H264_MC_V(OPNAME, SIZE, MMX, ALIGN)\
1753 H264_MC_H(OPNAME, SIZE, MMX, ALIGN)\
1754 H264_MC_HV(OPNAME, SIZE, MMX, ALIGN)\
1755
1756 static void put_h264_qpel16_mc00_sse2 (uint8_t *dst, uint8_t *src, int stride){
1757 put_pixels16_sse2(dst, src, stride, 16);
1758 }
1759 static void avg_h264_qpel16_mc00_sse2 (uint8_t *dst, uint8_t *src, int stride){
1760 avg_pixels16_sse2(dst, src, stride, 16);
1761 }
1762 #define put_h264_qpel8_mc00_sse2 put_h264_qpel8_mc00_mmx2
1763 #define avg_h264_qpel8_mc00_sse2 avg_h264_qpel8_mc00_mmx2
1764
1765 #define H264_MC_C(OPNAME, SIZE, MMX, ALIGN) \
1766 static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1767 OPNAME ## pixels ## SIZE ## _ ## MMX(dst, src, stride, SIZE);\
1768 }\
1769
1770 #define H264_MC_H(OPNAME, SIZE, MMX, ALIGN) \
1771 static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1772 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src, stride, stride);\
1773 }\
1774 \
1775 static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1776 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## MMX(dst, src, stride, stride);\
1777 }\
1778 \
1779 static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1780 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src+1, stride, stride);\
1781 }\
1782
1783 #define H264_MC_V(OPNAME, SIZE, MMX, ALIGN) \
1784 static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1785 DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
1786 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
1787 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, temp, stride, stride, SIZE);\
1788 }\
1789 \
1790 static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1791 OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## MMX(dst, src, stride, stride);\
1792 }\
1793 \
1794 static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1795 DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
1796 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
1797 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, temp, stride, stride, SIZE);\
1798 }\
1799
1800 #define H264_MC_HV(OPNAME, SIZE, MMX, ALIGN) \
1801 static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1802 DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
1803 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
1804 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\
1805 }\
1806 \
1807 static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1808 DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
1809 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\
1810 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\
1811 }\
1812 \
1813 static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1814 DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
1815 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
1816 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\
1817 }\
1818 \
1819 static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1820 DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
1821 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\
1822 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\
1823 }\
1824 \
1825 static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1826 DECLARE_ALIGNED(ALIGN, uint16_t, temp[SIZE*(SIZE<8?12:24)]);\
1827 OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, temp, src, stride, SIZE, stride);\
1828 }\
1829 \
1830 static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1831 DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
1832 uint8_t * const halfHV= temp;\
1833 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
1834 assert(((int)temp & 7) == 0);\
1835 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
1836 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfHV, stride, SIZE);\
1837 }\
1838 \
1839 static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1840 DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
1841 uint8_t * const halfHV= temp;\
1842 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
1843 assert(((int)temp & 7) == 0);\
1844 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
1845 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfHV, stride, SIZE);\
1846 }\
1847 \
1848 static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1849 DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
1850 uint8_t * const halfHV= temp;\
1851 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
1852 assert(((int)temp & 7) == 0);\
1853 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
1854 OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+2, halfHV, stride, SIZE, SIZE);\
1855 }\
1856 \
1857 static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1858 DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
1859 uint8_t * const halfHV= temp;\
1860 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
1861 assert(((int)temp & 7) == 0);\
1862 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
1863 OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+3, halfHV, stride, SIZE, SIZE);\
1864 }\
1865
1866 #define H264_MC_4816(MMX)\
1867 H264_MC(put_, 4, MMX, 8)\
1868 H264_MC(put_, 8, MMX, 8)\
1869 H264_MC(put_, 16,MMX, 8)\
1870 H264_MC(avg_, 4, MMX, 8)\
1871 H264_MC(avg_, 8, MMX, 8)\
1872 H264_MC(avg_, 16,MMX, 8)\
1873
1874 #define H264_MC_816(QPEL, XMM)\
1875 QPEL(put_, 8, XMM, 16)\
1876 QPEL(put_, 16,XMM, 16)\
1877 QPEL(avg_, 8, XMM, 16)\
1878 QPEL(avg_, 16,XMM, 16)\
1879
1880
1881 #define AVG_3DNOW_OP(a,b,temp, size) \
1882 "mov" #size " " #b ", " #temp " \n\t"\
1883 "pavgusb " #temp ", " #a " \n\t"\
1884 "mov" #size " " #a ", " #b " \n\t"
1885 #define AVG_MMX2_OP(a,b,temp, size) \
1886 "mov" #size " " #b ", " #temp " \n\t"\
1887 "pavgb " #temp ", " #a " \n\t"\
1888 "mov" #size " " #a ", " #b " \n\t"
1889
1890 #define PAVGB "pavgusb"
1891 QPEL_H264(put_, PUT_OP, 3dnow)
1892 QPEL_H264(avg_, AVG_3DNOW_OP, 3dnow)
1893 #undef PAVGB
1894 #define PAVGB "pavgb"
1895 QPEL_H264(put_, PUT_OP, mmx2)
1896 QPEL_H264(avg_, AVG_MMX2_OP, mmx2)
1897 QPEL_H264_V_XMM(put_, PUT_OP, sse2)
1898 QPEL_H264_V_XMM(avg_, AVG_MMX2_OP, sse2)
1899 QPEL_H264_HV_XMM(put_, PUT_OP, sse2)
1900 QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, sse2)
1901 #ifdef HAVE_SSSE3
1902 QPEL_H264_H_XMM(put_, PUT_OP, ssse3)
1903 QPEL_H264_H_XMM(avg_, AVG_MMX2_OP, ssse3)
1904 QPEL_H264_HV2_XMM(put_, PUT_OP, ssse3)
1905 QPEL_H264_HV2_XMM(avg_, AVG_MMX2_OP, ssse3)
1906 QPEL_H264_HV_XMM(put_, PUT_OP, ssse3)
1907 QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, ssse3)
1908 #endif
1909 #undef PAVGB
1910
1911 H264_MC_4816(3dnow)
1912 H264_MC_4816(mmx2)
1913 H264_MC_816(H264_MC_V, sse2)
1914 H264_MC_816(H264_MC_HV, sse2)
1915 #ifdef HAVE_SSSE3
1916 H264_MC_816(H264_MC_H, ssse3)
1917 H264_MC_816(H264_MC_HV, ssse3)
1918 #endif
1919
1920
1921 #define H264_CHROMA_OP(S,D)
1922 #define H264_CHROMA_OP4(S,D,T)
1923 #define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_mmx
1924 #define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_mmx
1925 #define H264_CHROMA_MC2_TMPL put_h264_chroma_mc2_mmx2
1926 #define H264_CHROMA_MC8_MV0 put_pixels8_mmx
1927 #include "dsputil_h264_template_mmx.c"
1928
1929 static void put_h264_chroma_mc8_mmx_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
1930 {
1931 put_h264_chroma_mc8_mmx(dst, src, stride, h, x, y, 1);
1932 }
1933 static void put_h264_chroma_mc8_mmx_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
1934 {
1935 put_h264_chroma_mc8_mmx(dst, src, stride, h, x, y, 0);
1936 }
1937
1938 #undef H264_CHROMA_OP
1939 #undef H264_CHROMA_OP4
1940 #undef H264_CHROMA_MC8_TMPL
1941 #undef H264_CHROMA_MC4_TMPL
1942 #undef H264_CHROMA_MC2_TMPL
1943 #undef H264_CHROMA_MC8_MV0
1944
1945 #define H264_CHROMA_OP(S,D) "pavgb " #S ", " #D " \n\t"
1946 #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\
1947 "pavgb " #T ", " #D " \n\t"
1948 #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_mmx2
1949 #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_mmx2
1950 #define H264_CHROMA_MC2_TMPL avg_h264_chroma_mc2_mmx2
1951 #define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2
1952 #include "dsputil_h264_template_mmx.c"
1953 static void avg_h264_chroma_mc8_mmx2_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
1954 {
1955 avg_h264_chroma_mc8_mmx2(dst, src, stride, h, x, y, 1);
1956 }
1957 #undef H264_CHROMA_OP
1958 #undef H264_CHROMA_OP4
1959 #undef H264_CHROMA_MC8_TMPL
1960 #undef H264_CHROMA_MC4_TMPL
1961 #undef H264_CHROMA_MC2_TMPL
1962 #undef H264_CHROMA_MC8_MV0
1963
1964 #define H264_CHROMA_OP(S,D) "pavgusb " #S ", " #D " \n\t"
1965 #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\
1966 "pavgusb " #T ", " #D " \n\t"
1967 #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_3dnow
1968 #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_3dnow
1969 #define H264_CHROMA_MC8_MV0 avg_pixels8_3dnow
1970 #include "dsputil_h264_template_mmx.c"
1971 static void avg_h264_chroma_mc8_3dnow_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
1972 {
1973 avg_h264_chroma_mc8_3dnow(dst, src, stride, h, x, y, 1);
1974 }
1975 #undef H264_CHROMA_OP
1976 #undef H264_CHROMA_OP4
1977 #undef H264_CHROMA_MC8_TMPL
1978 #undef H264_CHROMA_MC4_TMPL
1979 #undef H264_CHROMA_MC8_MV0
1980
1981 #ifdef HAVE_SSSE3
1982 #define AVG_OP(X)
1983 #undef H264_CHROMA_MC8_TMPL
1984 #undef H264_CHROMA_MC4_TMPL
1985 #define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_ssse3
1986 #define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_ssse3
1987 #define H264_CHROMA_MC8_MV0 put_pixels8_mmx
1988 #include "dsputil_h264_template_ssse3.c"
1989 static void put_h264_chroma_mc8_ssse3_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
1990 {
1991 put_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 1);
1992 }
1993 static void put_h264_chroma_mc8_ssse3_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
1994 {
1995 put_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 0);
1996 }
1997
1998 #undef AVG_OP
1999 #undef H264_CHROMA_MC8_TMPL
2000 #undef H264_CHROMA_MC4_TMPL
2001 #undef H264_CHROMA_MC8_MV0
2002 #define AVG_OP(X) X
2003 #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_ssse3
2004 #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_ssse3
2005 #define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2
2006 #include "dsputil_h264_template_ssse3.c"
2007 static void avg_h264_chroma_mc8_ssse3_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
2008 {
2009 avg_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 1);
2010 }
2011 #undef AVG_OP
2012 #undef H264_CHROMA_MC8_TMPL
2013 #undef H264_CHROMA_MC4_TMPL
2014 #undef H264_CHROMA_MC8_MV0
2015 #endif
2016
2017 /***********************************/
2018 /* weighted prediction */
2019
2020 static inline void ff_h264_weight_WxH_mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset, int w, int h)
2021 {
2022 int x, y;
2023 offset <<= log2_denom;
2024 offset += (1 << log2_denom) >> 1;
2025 asm volatile(
2026 "movd %0, %%mm4 \n\t"
2027 "movd %1, %%mm5 \n\t"
2028 "movd %2, %%mm6 \n\t"
2029 "pshufw $0, %%mm4, %%mm4 \n\t"
2030 "pshufw $0, %%mm5, %%mm5 \n\t"
2031 "pxor %%mm7, %%mm7 \n\t"
2032 :: "g"(weight), "g"(offset), "g"(log2_denom)
2033 );
2034 for(y=0; y<h; y+=2){
2035 for(x=0; x<w; x+=4){
2036 asm volatile(
2037 "movd %0, %%mm0 \n\t"
2038 "movd %1, %%mm1 \n\t"
2039 "punpcklbw %%mm7, %%mm0 \n\t"
2040 "punpcklbw %%mm7, %%mm1 \n\t"
2041 "pmullw %%mm4, %%mm0 \n\t"
2042 "pmullw %%mm4, %%mm1 \n\t"
2043 "paddsw %%mm5, %%mm0 \n\t"
2044 "paddsw %%mm5, %%mm1 \n\t"
2045 "psraw %%mm6, %%mm0 \n\t"
2046 "psraw %%mm6, %%mm1 \n\t"
2047 "packuswb %%mm7, %%mm0 \n\t"
2048 "packuswb %%mm7, %%mm1 \n\t"
2049 "movd %%mm0, %0 \n\t"
2050 "movd %%mm1, %1 \n\t"
2051 : "+m"(*(uint32_t*)(dst+x)),
2052 "+m"(*(uint32_t*)(dst+x+stride))
2053 );
2054 }
2055 dst += 2*stride;
2056 }
2057 }
2058
2059 static inline void ff_h264_biweight_WxH_mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset, int w, int h)
2060 {
2061 int x, y;
2062 offset = ((offset + 1) | 1) << log2_denom;
2063 asm volatile(
2064 "movd %0, %%mm3 \n\t"
2065 "movd %1, %%mm4 \n\t"
2066 "movd %2, %%mm5 \n\t"
2067 "movd %3, %%mm6 \n\t"
2068 "pshufw $0, %%mm3, %%mm3 \n\t"
2069 "pshufw $0, %%mm4, %%mm4 \n\t"
2070 "pshufw $0, %%mm5, %%mm5 \n\t"
2071 "pxor %%mm7, %%mm7 \n\t"
2072 :: "g"(weightd), "g"(weights), "g"(offset), "g"(log2_denom+1)
2073 );
2074 for(y=0; y<h; y++){
2075 for(x=0; x<w; x+=4){
2076 asm volatile(
2077 "movd %0, %%mm0 \n\t"
2078 "movd %1, %%mm1 \n\t"
2079 "punpcklbw %%mm7, %%mm0 \n\t"
2080 "punpcklbw %%mm7, %%mm1 \n\t"
2081 "pmullw %%mm3, %%mm0 \n\t"
2082 "pmullw %%mm4, %%mm1 \n\t"
2083 "paddsw %%mm1, %%mm0 \n\t"
2084 "paddsw %%mm5, %%mm0 \n\t"
2085 "psraw %%mm6, %%mm0 \n\t"
2086 "packuswb %%mm0, %%mm0 \n\t"
2087 "movd %%mm0, %0 \n\t"
2088 : "+m"(*(uint32_t*)(dst+x))
2089 : "m"(*(uint32_t*)(src+x))
2090 );
2091 }
2092 src += stride;
2093 dst += stride;
2094 }
2095 }
2096
2097 #define H264_WEIGHT(W,H) \
2098 static void ff_h264_biweight_ ## W ## x ## H ## _mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \
2099 ff_h264_biweight_WxH_mmx2(dst, src, stride, log2_denom, weightd, weights, offset, W, H); \
2100 } \
2101 static void ff_h264_weight_ ## W ## x ## H ## _mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset){ \
2102 ff_h264_weight_WxH_mmx2(dst, stride, log2_denom, weight, offset, W, H); \
2103 }
2104
2105 H264_WEIGHT(16,16)
2106 H264_WEIGHT(16, 8)
2107 H264_WEIGHT( 8,16)
2108 H264_WEIGHT( 8, 8)
2109 H264_WEIGHT( 8, 4)
2110 H264_WEIGHT( 4, 8)
2111 H264_WEIGHT( 4, 4)
2112 H264_WEIGHT( 4, 2)
2113