6488f3092d2e4e0f73897b0c7abbce4aabe74d4b
[libav.git] / libavcodec / x86 / vp9dsp.asm
1 ;******************************************************************************
2 ;* VP9 SIMD optimizations
3 ;*
4 ;* Copyright (c) 2013 Ronald S. Bultje <rsbultje gmail com>
5 ;*
6 ;* This file is part of Libav.
7 ;*
8 ;* Libav is free software; you can redistribute it and/or
9 ;* modify it under the terms of the GNU Lesser General Public
10 ;* License as published by the Free Software Foundation; either
11 ;* version 2.1 of the License, or (at your option) any later version.
12 ;*
13 ;* Libav is distributed in the hope that it will be useful,
14 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;* Lesser General Public License for more details.
17 ;*
18 ;* You should have received a copy of the GNU Lesser General Public
19 ;* License along with Libav; if not, write to the Free Software
20 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 ;******************************************************************************
22
23 %include "libavutil/x86/x86util.asm"
24
25 SECTION_RODATA
26
27 ; FIXME share with vp8dsp.asm
28 pw_256: times 8 dw 256
29
30 %macro F8_TAPS 8
31 times 8 db %1, %2
32 times 8 db %3, %4
33 times 8 db %5, %6
34 times 8 db %7, %8
35 %endmacro
36 ; int8_t ff_filters_ssse3[3][15][4][16]
37 const filters_ssse3 ; smooth
38 F8_TAPS -3, -1, 32, 64, 38, 1, -3, 0
39 F8_TAPS -2, -2, 29, 63, 41, 2, -3, 0
40 F8_TAPS -2, -2, 26, 63, 43, 4, -4, 0
41 F8_TAPS -2, -3, 24, 62, 46, 5, -4, 0
42 F8_TAPS -2, -3, 21, 60, 49, 7, -4, 0
43 F8_TAPS -1, -4, 18, 59, 51, 9, -4, 0
44 F8_TAPS -1, -4, 16, 57, 53, 12, -4, -1
45 F8_TAPS -1, -4, 14, 55, 55, 14, -4, -1
46 F8_TAPS -1, -4, 12, 53, 57, 16, -4, -1
47 F8_TAPS 0, -4, 9, 51, 59, 18, -4, -1
48 F8_TAPS 0, -4, 7, 49, 60, 21, -3, -2
49 F8_TAPS 0, -4, 5, 46, 62, 24, -3, -2
50 F8_TAPS 0, -4, 4, 43, 63, 26, -2, -2
51 F8_TAPS 0, -3, 2, 41, 63, 29, -2, -2
52 F8_TAPS 0, -3, 1, 38, 64, 32, -1, -3
53 ; regular
54 F8_TAPS 0, 1, -5, 126, 8, -3, 1, 0
55 F8_TAPS -1, 3, -10, 122, 18, -6, 2, 0
56 F8_TAPS -1, 4, -13, 118, 27, -9, 3, -1
57 F8_TAPS -1, 4, -16, 112, 37, -11, 4, -1
58 F8_TAPS -1, 5, -18, 105, 48, -14, 4, -1
59 F8_TAPS -1, 5, -19, 97, 58, -16, 5, -1
60 F8_TAPS -1, 6, -19, 88, 68, -18, 5, -1
61 F8_TAPS -1, 6, -19, 78, 78, -19, 6, -1
62 F8_TAPS -1, 5, -18, 68, 88, -19, 6, -1
63 F8_TAPS -1, 5, -16, 58, 97, -19, 5, -1
64 F8_TAPS -1, 4, -14, 48, 105, -18, 5, -1
65 F8_TAPS -1, 4, -11, 37, 112, -16, 4, -1
66 F8_TAPS -1, 3, -9, 27, 118, -13, 4, -1
67 F8_TAPS 0, 2, -6, 18, 122, -10, 3, -1
68 F8_TAPS 0, 1, -3, 8, 126, -5, 1, 0
69 ; sharp
70 F8_TAPS -1, 3, -7, 127, 8, -3, 1, 0
71 F8_TAPS -2, 5, -13, 125, 17, -6, 3, -1
72 F8_TAPS -3, 7, -17, 121, 27, -10, 5, -2
73 F8_TAPS -4, 9, -20, 115, 37, -13, 6, -2
74 F8_TAPS -4, 10, -23, 108, 48, -16, 8, -3
75 F8_TAPS -4, 10, -24, 100, 59, -19, 9, -3
76 F8_TAPS -4, 11, -24, 90, 70, -21, 10, -4
77 F8_TAPS -4, 11, -23, 80, 80, -23, 11, -4
78 F8_TAPS -4, 10, -21, 70, 90, -24, 11, -4
79 F8_TAPS -3, 9, -19, 59, 100, -24, 10, -4
80 F8_TAPS -3, 8, -16, 48, 108, -23, 10, -4
81 F8_TAPS -2, 6, -13, 37, 115, -20, 9, -4
82 F8_TAPS -2, 5, -10, 27, 121, -17, 7, -3
83 F8_TAPS -1, 3, -6, 17, 125, -13, 5, -2
84 F8_TAPS 0, 1, -3, 8, 127, -7, 3, -1
85
86 SECTION .text
87
88 %macro filter_h_fn 1
89 %assign %%px mmsize/2
90 cglobal %1_8tap_1d_h_ %+ %%px, 6, 6, 11, dst, src, dstride, sstride, h, filtery
91 mova m6, [pw_256]
92 mova m7, [filteryq+ 0]
93 %if ARCH_X86_64 && mmsize > 8
94 mova m8, [filteryq+16]
95 mova m9, [filteryq+32]
96 mova m10, [filteryq+48]
97 %endif
98 .loop:
99 movh m0, [srcq-3]
100 movh m1, [srcq-2]
101 movh m2, [srcq-1]
102 movh m3, [srcq+0]
103 movh m4, [srcq+1]
104 movh m5, [srcq+2]
105 punpcklbw m0, m1
106 punpcklbw m2, m3
107 movh m1, [srcq+3]
108 movh m3, [srcq+4]
109 add srcq, sstrideq
110 punpcklbw m4, m5
111 punpcklbw m1, m3
112 pmaddubsw m0, m7
113 %if ARCH_X86_64 && mmsize > 8
114 pmaddubsw m2, m8
115 pmaddubsw m4, m9
116 pmaddubsw m1, m10
117 %else
118 pmaddubsw m2, [filteryq+16]
119 pmaddubsw m4, [filteryq+32]
120 pmaddubsw m1, [filteryq+48]
121 %endif
122 paddw m0, m2
123 paddw m4, m1
124 paddsw m0, m4
125 pmulhrsw m0, m6
126 %ifidn %1, avg
127 movh m1, [dstq]
128 %endif
129 packuswb m0, m0
130 %ifidn %1, avg
131 pavgb m0, m1
132 %endif
133 movh [dstq], m0
134 add dstq, dstrideq
135 dec hd
136 jg .loop
137 RET
138 %endmacro
139
140 INIT_MMX ssse3
141 filter_h_fn put
142 filter_h_fn avg
143
144 INIT_XMM ssse3
145 filter_h_fn put
146 filter_h_fn avg
147
148 %macro filter_v_fn 1
149 %assign %%px mmsize/2
150 %if ARCH_X86_64
151 cglobal %1_8tap_1d_v_ %+ %%px, 6, 8, 11, dst, src, dstride, sstride, h, filtery, src4, sstride3
152 %else
153 cglobal %1_8tap_1d_v_ %+ %%px, 4, 7, 11, dst, src, dstride, sstride, filtery, src4, sstride3
154 mov filteryq, r5mp
155 %define hd r4mp
156 %endif
157 sub srcq, sstrideq
158 lea sstride3q, [sstrideq*3]
159 sub srcq, sstrideq
160 mova m6, [pw_256]
161 sub srcq, sstrideq
162 mova m7, [filteryq+ 0]
163 lea src4q, [srcq+sstrideq*4]
164 %if ARCH_X86_64 && mmsize > 8
165 mova m8, [filteryq+16]
166 mova m9, [filteryq+32]
167 mova m10, [filteryq+48]
168 %endif
169 .loop:
170 ; FIXME maybe reuse loads from previous rows, or just more generally
171 ; unroll this to prevent multiple loads of the same data?
172 movh m0, [srcq]
173 movh m1, [srcq+sstrideq]
174 movh m2, [srcq+sstrideq*2]
175 movh m3, [srcq+sstride3q]
176 movh m4, [src4q]
177 movh m5, [src4q+sstrideq]
178 punpcklbw m0, m1
179 punpcklbw m2, m3
180 movh m1, [src4q+sstrideq*2]
181 movh m3, [src4q+sstride3q]
182 add srcq, sstrideq
183 add src4q, sstrideq
184 punpcklbw m4, m5
185 punpcklbw m1, m3
186 pmaddubsw m0, m7
187 %if ARCH_X86_64 && mmsize > 8
188 pmaddubsw m2, m8
189 pmaddubsw m4, m9
190 pmaddubsw m1, m10
191 %else
192 pmaddubsw m2, [filteryq+16]
193 pmaddubsw m4, [filteryq+32]
194 pmaddubsw m1, [filteryq+48]
195 %endif
196 paddw m0, m2
197 paddw m4, m1
198 paddsw m0, m4
199 pmulhrsw m0, m6
200 %ifidn %1, avg
201 movh m1, [dstq]
202 %endif
203 packuswb m0, m0
204 %ifidn %1, avg
205 pavgb m0, m1
206 %endif
207 movh [dstq], m0
208 add dstq, dstrideq
209 dec hd
210 jg .loop
211 RET
212 %endmacro
213
214 INIT_MMX ssse3
215 filter_v_fn put
216 filter_v_fn avg
217
218 INIT_XMM ssse3
219 filter_v_fn put
220 filter_v_fn avg
221
222 %macro fpel_fn 6
223 %if %2 == 4
224 %define %%srcfn movh
225 %define %%dstfn movh
226 %else
227 %define %%srcfn movu
228 %define %%dstfn mova
229 %endif
230
231 %if %2 <= 16
232 cglobal %1%2, 5, 7, 4, dst, src, dstride, sstride, h, dstride3, sstride3
233 lea sstride3q, [sstrideq*3]
234 lea dstride3q, [dstrideq*3]
235 %else
236 cglobal %1%2, 5, 5, 4, dst, src, dstride, sstride, h
237 %endif
238 .loop:
239 %%srcfn m0, [srcq]
240 %%srcfn m1, [srcq+s%3]
241 %%srcfn m2, [srcq+s%4]
242 %%srcfn m3, [srcq+s%5]
243 lea srcq, [srcq+sstrideq*%6]
244 %ifidn %1, avg
245 pavgb m0, [dstq]
246 pavgb m1, [dstq+d%3]
247 pavgb m2, [dstq+d%4]
248 pavgb m3, [dstq+d%5]
249 %endif
250 %%dstfn [dstq], m0
251 %%dstfn [dstq+d%3], m1
252 %%dstfn [dstq+d%4], m2
253 %%dstfn [dstq+d%5], m3
254 lea dstq, [dstq+dstrideq*%6]
255 sub hd, %6
256 jnz .loop
257 RET
258 %endmacro
259
260 %define d16 16
261 %define s16 16
262 INIT_MMX mmx
263 fpel_fn put, 4, strideq, strideq*2, stride3q, 4
264 fpel_fn put, 8, strideq, strideq*2, stride3q, 4
265 INIT_MMX sse
266 fpel_fn avg, 4, strideq, strideq*2, stride3q, 4
267 fpel_fn avg, 8, strideq, strideq*2, stride3q, 4
268 INIT_XMM sse
269 fpel_fn put, 16, strideq, strideq*2, stride3q, 4
270 fpel_fn put, 32, mmsize, strideq, strideq+mmsize, 2
271 fpel_fn put, 64, mmsize, mmsize*2, mmsize*3, 1
272 INIT_XMM sse2
273 fpel_fn avg, 16, strideq, strideq*2, stride3q, 4
274 fpel_fn avg, 32, mmsize, strideq, strideq+mmsize, 2
275 fpel_fn avg, 64, mmsize, mmsize*2, mmsize*3, 1
276 %undef s16
277 %undef d16