sort H.264 mmx dsp functions into their own file
[libav.git] / libavcodec / i386 / dsputil_h264_template_mmx.c
CommitLineData
3072f0cb
ZH
1/*
2 * Copyright (c) 2005 Zoltan Hidvegi <hzoli -a- hzoli -d- com>
3 *
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
8 *
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19/**
20 * MMX optimized version of (put|avg)_h264_chroma_mc8.
21 * H264_CHROMA_MC8_TMPL must be defined to the desired function name and
22 * H264_CHROMA_OP must be defined to empty for put and pavgb/pavgusb for avg.
23 */
24static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
25{
26 uint64_t AA __align8;
27 uint64_t DD __align8;
28 unsigned long srcos = (long)src & 7;
29 uint64_t sh1 __align8 = srcos * 8;
30 uint64_t sh2 __align8 = 56 - sh1;
31 int i;
32
33 assert(x<8 && y<8 && x>=0 && y>=0);
34
35 asm volatile("movd %1, %%mm4\n\t"
36 "movd %2, %%mm6\n\t"
37 "punpcklwd %%mm4, %%mm4\n\t"
38 "punpcklwd %%mm6, %%mm6\n\t"
39 "punpckldq %%mm4, %%mm4\n\t" /* mm4 = x words */
40 "punpckldq %%mm6, %%mm6\n\t" /* mm6 = y words */
41 "movq %%mm4, %%mm5\n\t"
42 "pmullw %%mm6, %%mm4\n\t" /* mm4 = x * y */
43 "psllw $3, %%mm5\n\t"
44 "psllw $3, %%mm6\n\t"
45 "movq %%mm5, %%mm7\n\t"
46 "paddw %%mm6, %%mm7\n\t"
47 "movq %%mm4, %0\n\t" /* DD = x * y */
48 "psubw %%mm4, %%mm5\n\t" /* mm5 = B = 8x - xy */
49 "psubw %%mm4, %%mm6\n\t" /* mm6 = C = 8y - xy */
50 "paddw %3, %%mm4\n\t"
51 "psubw %%mm7, %%mm4\n\t" /* mm4 = A = xy - (8x+8y) + 64 */
52 "pxor %%mm7, %%mm7\n\t"
d2bb7db1 53 : "=m" (DD) : "rm" (x), "rm" (y), "m" (ff_pw_64));
3072f0cb
ZH
54
55 asm volatile("movq %%mm4, %0" : "=m" (AA));
56
57 src -= srcos;
58 asm volatile(
59 /* mm0 = src[0..7], mm1 = src[1..8] */
60 "movq %0, %%mm1\n\t"
61 "movq %1, %%mm0\n\t"
62 "psrlq %2, %%mm1\n\t"
63 "psllq %3, %%mm0\n\t"
64 "movq %%mm0, %%mm4\n\t"
65 "psllq $8, %%mm0\n\t"
66 "por %%mm1, %%mm0\n\t"
67 "psrlq $8, %%mm1\n\t"
68 "por %%mm4, %%mm1\n\t"
69 : : "m" (src[0]), "m" (src[8]), "m" (sh1), "m" (sh2));
70
71 for(i=0; i<h; i++) {
72 asm volatile(
73 /* [mm2,mm3] = A * src[0..7] */
74 "movq %%mm0, %%mm2\n\t"
75 "punpcklbw %%mm7, %%mm2\n\t"
76 "pmullw %0, %%mm2\n\t"
77 "movq %%mm0, %%mm3\n\t"
78 "punpckhbw %%mm7, %%mm3\n\t"
79 "pmullw %0, %%mm3\n\t"
80
81 /* [mm2,mm3] += B * src[1..8] */
82 "movq %%mm1, %%mm0\n\t"
83 "punpcklbw %%mm7, %%mm0\n\t"
84 "pmullw %%mm5, %%mm0\n\t"
85 "punpckhbw %%mm7, %%mm1\n\t"
86 "pmullw %%mm5, %%mm1\n\t"
87 "paddw %%mm0, %%mm2\n\t"
88 "paddw %%mm1, %%mm3\n\t"
89 : : "m" (AA));
90
91 src += stride;
92 asm volatile(
93 /* mm0 = src[0..7], mm1 = src[1..8] */
94 "movq %0, %%mm1\n\t"
95 "movq %1, %%mm0\n\t"
96 "psrlq %2, %%mm1\n\t"
97 "psllq %3, %%mm0\n\t"
98 "movq %%mm0, %%mm4\n\t"
99 "psllq $8, %%mm0\n\t"
100 "por %%mm1, %%mm0\n\t"
101 "psrlq $8, %%mm1\n\t"
102 "por %%mm4, %%mm1\n\t"
103 : : "m" (src[0]), "m" (src[8]), "m" (sh1), "m" (sh2));
104
105 asm volatile(
106 /* [mm2,mm3] += C * src[0..7] */
107 "movq %mm0, %mm4\n\t"
108 "punpcklbw %mm7, %mm4\n\t"
109 "pmullw %mm6, %mm4\n\t"
110 "paddw %mm4, %mm2\n\t"
111 "movq %mm0, %mm4\n\t"
112 "punpckhbw %mm7, %mm4\n\t"
113 "pmullw %mm6, %mm4\n\t"
114 "paddw %mm4, %mm3\n\t");
115
116 asm volatile(
117 /* [mm2,mm3] += D * src[1..8] */
118 "movq %%mm1, %%mm4\n\t"
119 "punpcklbw %%mm7, %%mm4\n\t"
120 "pmullw %0, %%mm4\n\t"
121 "paddw %%mm4, %%mm2\n\t"
122 "movq %%mm1, %%mm4\n\t"
123 "punpckhbw %%mm7, %%mm4\n\t"
124 "pmullw %0, %%mm4\n\t"
125 "paddw %%mm4, %%mm3\n\t"
126 : : "m" (DD));
127
128 asm volatile(
129 /* dst[0..7] = pack(([mm2,mm3] + 32) >> 6) */
130 "paddw %1, %%mm2\n\t"
131 "paddw %1, %%mm3\n\t"
132 "psrlw $6, %%mm2\n\t"
133 "psrlw $6, %%mm3\n\t"
134 "packuswb %%mm3, %%mm2\n\t"
135 H264_CHROMA_OP(%0, %%mm2)
136 "movq %%mm2, %0\n\t"
d2bb7db1 137 : "=m" (dst[0]) : "m" (ff_pw_32));
3072f0cb
ZH
138 dst+= stride;
139 }
140}