x86: deduplicate some constants
[libav.git] / libavcodec / x86 / vp9dsp.asm
1 ;******************************************************************************
2 ;* VP9 SIMD optimizations
3 ;*
4 ;* Copyright (c) 2013 Ronald S. Bultje <rsbultje gmail com>
5 ;*
6 ;* This file is part of Libav.
7 ;*
8 ;* Libav is free software; you can redistribute it and/or
9 ;* modify it under the terms of the GNU Lesser General Public
10 ;* License as published by the Free Software Foundation; either
11 ;* version 2.1 of the License, or (at your option) any later version.
12 ;*
13 ;* Libav is distributed in the hope that it will be useful,
14 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;* Lesser General Public License for more details.
17 ;*
18 ;* You should have received a copy of the GNU Lesser General Public
19 ;* License along with Libav; if not, write to the Free Software
20 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 ;******************************************************************************
22
23 %include "libavutil/x86/x86util.asm"
24
25 SECTION_RODATA
26
27 cextern pw_256
28
29 %macro F8_TAPS 8
30 times 8 db %1, %2
31 times 8 db %3, %4
32 times 8 db %5, %6
33 times 8 db %7, %8
34 %endmacro
35 ; int8_t ff_filters_ssse3[3][15][4][16]
36 const filters_ssse3 ; smooth
37 F8_TAPS -3, -1, 32, 64, 38, 1, -3, 0
38 F8_TAPS -2, -2, 29, 63, 41, 2, -3, 0
39 F8_TAPS -2, -2, 26, 63, 43, 4, -4, 0
40 F8_TAPS -2, -3, 24, 62, 46, 5, -4, 0
41 F8_TAPS -2, -3, 21, 60, 49, 7, -4, 0
42 F8_TAPS -1, -4, 18, 59, 51, 9, -4, 0
43 F8_TAPS -1, -4, 16, 57, 53, 12, -4, -1
44 F8_TAPS -1, -4, 14, 55, 55, 14, -4, -1
45 F8_TAPS -1, -4, 12, 53, 57, 16, -4, -1
46 F8_TAPS 0, -4, 9, 51, 59, 18, -4, -1
47 F8_TAPS 0, -4, 7, 49, 60, 21, -3, -2
48 F8_TAPS 0, -4, 5, 46, 62, 24, -3, -2
49 F8_TAPS 0, -4, 4, 43, 63, 26, -2, -2
50 F8_TAPS 0, -3, 2, 41, 63, 29, -2, -2
51 F8_TAPS 0, -3, 1, 38, 64, 32, -1, -3
52 ; regular
53 F8_TAPS 0, 1, -5, 126, 8, -3, 1, 0
54 F8_TAPS -1, 3, -10, 122, 18, -6, 2, 0
55 F8_TAPS -1, 4, -13, 118, 27, -9, 3, -1
56 F8_TAPS -1, 4, -16, 112, 37, -11, 4, -1
57 F8_TAPS -1, 5, -18, 105, 48, -14, 4, -1
58 F8_TAPS -1, 5, -19, 97, 58, -16, 5, -1
59 F8_TAPS -1, 6, -19, 88, 68, -18, 5, -1
60 F8_TAPS -1, 6, -19, 78, 78, -19, 6, -1
61 F8_TAPS -1, 5, -18, 68, 88, -19, 6, -1
62 F8_TAPS -1, 5, -16, 58, 97, -19, 5, -1
63 F8_TAPS -1, 4, -14, 48, 105, -18, 5, -1
64 F8_TAPS -1, 4, -11, 37, 112, -16, 4, -1
65 F8_TAPS -1, 3, -9, 27, 118, -13, 4, -1
66 F8_TAPS 0, 2, -6, 18, 122, -10, 3, -1
67 F8_TAPS 0, 1, -3, 8, 126, -5, 1, 0
68 ; sharp
69 F8_TAPS -1, 3, -7, 127, 8, -3, 1, 0
70 F8_TAPS -2, 5, -13, 125, 17, -6, 3, -1
71 F8_TAPS -3, 7, -17, 121, 27, -10, 5, -2
72 F8_TAPS -4, 9, -20, 115, 37, -13, 6, -2
73 F8_TAPS -4, 10, -23, 108, 48, -16, 8, -3
74 F8_TAPS -4, 10, -24, 100, 59, -19, 9, -3
75 F8_TAPS -4, 11, -24, 90, 70, -21, 10, -4
76 F8_TAPS -4, 11, -23, 80, 80, -23, 11, -4
77 F8_TAPS -4, 10, -21, 70, 90, -24, 11, -4
78 F8_TAPS -3, 9, -19, 59, 100, -24, 10, -4
79 F8_TAPS -3, 8, -16, 48, 108, -23, 10, -4
80 F8_TAPS -2, 6, -13, 37, 115, -20, 9, -4
81 F8_TAPS -2, 5, -10, 27, 121, -17, 7, -3
82 F8_TAPS -1, 3, -6, 17, 125, -13, 5, -2
83 F8_TAPS 0, 1, -3, 8, 127, -7, 3, -1
84
85 SECTION .text
86
87 %macro filter_h_fn 1
88 %assign %%px mmsize/2
89 cglobal %1_8tap_1d_h_ %+ %%px, 6, 6, 11, dst, src, dstride, sstride, h, filtery
90 mova m6, [pw_256]
91 mova m7, [filteryq+ 0]
92 %if ARCH_X86_64 && mmsize > 8
93 mova m8, [filteryq+16]
94 mova m9, [filteryq+32]
95 mova m10, [filteryq+48]
96 %endif
97 .loop:
98 movh m0, [srcq-3]
99 movh m1, [srcq-2]
100 movh m2, [srcq-1]
101 movh m3, [srcq+0]
102 movh m4, [srcq+1]
103 movh m5, [srcq+2]
104 punpcklbw m0, m1
105 punpcklbw m2, m3
106 movh m1, [srcq+3]
107 movh m3, [srcq+4]
108 add srcq, sstrideq
109 punpcklbw m4, m5
110 punpcklbw m1, m3
111 pmaddubsw m0, m7
112 %if ARCH_X86_64 && mmsize > 8
113 pmaddubsw m2, m8
114 pmaddubsw m4, m9
115 pmaddubsw m1, m10
116 %else
117 pmaddubsw m2, [filteryq+16]
118 pmaddubsw m4, [filteryq+32]
119 pmaddubsw m1, [filteryq+48]
120 %endif
121 paddw m0, m2
122 paddw m4, m1
123 paddsw m0, m4
124 pmulhrsw m0, m6
125 %ifidn %1, avg
126 movh m1, [dstq]
127 %endif
128 packuswb m0, m0
129 %ifidn %1, avg
130 pavgb m0, m1
131 %endif
132 movh [dstq], m0
133 add dstq, dstrideq
134 dec hd
135 jg .loop
136 RET
137 %endmacro
138
139 INIT_MMX ssse3
140 filter_h_fn put
141 filter_h_fn avg
142
143 INIT_XMM ssse3
144 filter_h_fn put
145 filter_h_fn avg
146
147 %macro filter_v_fn 1
148 %assign %%px mmsize/2
149 %if ARCH_X86_64
150 cglobal %1_8tap_1d_v_ %+ %%px, 6, 8, 11, dst, src, dstride, sstride, h, filtery, src4, sstride3
151 %else
152 cglobal %1_8tap_1d_v_ %+ %%px, 4, 7, 11, dst, src, dstride, sstride, filtery, src4, sstride3
153 mov filteryq, r5mp
154 %define hd r4mp
155 %endif
156 sub srcq, sstrideq
157 lea sstride3q, [sstrideq*3]
158 sub srcq, sstrideq
159 mova m6, [pw_256]
160 sub srcq, sstrideq
161 mova m7, [filteryq+ 0]
162 lea src4q, [srcq+sstrideq*4]
163 %if ARCH_X86_64 && mmsize > 8
164 mova m8, [filteryq+16]
165 mova m9, [filteryq+32]
166 mova m10, [filteryq+48]
167 %endif
168 .loop:
169 ; FIXME maybe reuse loads from previous rows, or just more generally
170 ; unroll this to prevent multiple loads of the same data?
171 movh m0, [srcq]
172 movh m1, [srcq+sstrideq]
173 movh m2, [srcq+sstrideq*2]
174 movh m3, [srcq+sstride3q]
175 movh m4, [src4q]
176 movh m5, [src4q+sstrideq]
177 punpcklbw m0, m1
178 punpcklbw m2, m3
179 movh m1, [src4q+sstrideq*2]
180 movh m3, [src4q+sstride3q]
181 add srcq, sstrideq
182 add src4q, sstrideq
183 punpcklbw m4, m5
184 punpcklbw m1, m3
185 pmaddubsw m0, m7
186 %if ARCH_X86_64 && mmsize > 8
187 pmaddubsw m2, m8
188 pmaddubsw m4, m9
189 pmaddubsw m1, m10
190 %else
191 pmaddubsw m2, [filteryq+16]
192 pmaddubsw m4, [filteryq+32]
193 pmaddubsw m1, [filteryq+48]
194 %endif
195 paddw m0, m2
196 paddw m4, m1
197 paddsw m0, m4
198 pmulhrsw m0, m6
199 %ifidn %1, avg
200 movh m1, [dstq]
201 %endif
202 packuswb m0, m0
203 %ifidn %1, avg
204 pavgb m0, m1
205 %endif
206 movh [dstq], m0
207 add dstq, dstrideq
208 dec hd
209 jg .loop
210 RET
211 %endmacro
212
213 INIT_MMX ssse3
214 filter_v_fn put
215 filter_v_fn avg
216
217 INIT_XMM ssse3
218 filter_v_fn put
219 filter_v_fn avg
220
221 %macro fpel_fn 6
222 %if %2 == 4
223 %define %%srcfn movh
224 %define %%dstfn movh
225 %else
226 %define %%srcfn movu
227 %define %%dstfn mova
228 %endif
229
230 %if %2 <= 16
231 cglobal %1%2, 5, 7, 4, dst, src, dstride, sstride, h, dstride3, sstride3
232 lea sstride3q, [sstrideq*3]
233 lea dstride3q, [dstrideq*3]
234 %else
235 cglobal %1%2, 5, 5, 4, dst, src, dstride, sstride, h
236 %endif
237 .loop:
238 %%srcfn m0, [srcq]
239 %%srcfn m1, [srcq+s%3]
240 %%srcfn m2, [srcq+s%4]
241 %%srcfn m3, [srcq+s%5]
242 lea srcq, [srcq+sstrideq*%6]
243 %ifidn %1, avg
244 pavgb m0, [dstq]
245 pavgb m1, [dstq+d%3]
246 pavgb m2, [dstq+d%4]
247 pavgb m3, [dstq+d%5]
248 %endif
249 %%dstfn [dstq], m0
250 %%dstfn [dstq+d%3], m1
251 %%dstfn [dstq+d%4], m2
252 %%dstfn [dstq+d%5], m3
253 lea dstq, [dstq+dstrideq*%6]
254 sub hd, %6
255 jnz .loop
256 RET
257 %endmacro
258
259 %define d16 16
260 %define s16 16
261 INIT_MMX mmx
262 fpel_fn put, 4, strideq, strideq*2, stride3q, 4
263 fpel_fn put, 8, strideq, strideq*2, stride3q, 4
264 INIT_MMX sse
265 fpel_fn avg, 4, strideq, strideq*2, stride3q, 4
266 fpel_fn avg, 8, strideq, strideq*2, stride3q, 4
267 INIT_XMM sse
268 fpel_fn put, 16, strideq, strideq*2, stride3q, 4
269 fpel_fn put, 32, mmsize, strideq, strideq+mmsize, 2
270 fpel_fn put, 64, mmsize, mmsize*2, mmsize*3, 1
271 INIT_XMM sse2
272 fpel_fn avg, 16, strideq, strideq*2, stride3q, 4
273 fpel_fn avg, 32, mmsize, strideq, strideq+mmsize, 2
274 fpel_fn avg, 64, mmsize, mmsize*2, mmsize*3, 1
275 %undef s16
276 %undef d16