arm/aarch64: vp9lpf: Calculate !hev directly
[libav.git] / libavcodec / aarch64 / vp9lpf_neon.S
CommitLineData
9d2afd1e
MS
1/*
2 * Copyright (c) 2016 Google Inc.
3 *
4 * This file is part of Libav.
5 *
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21#include "libavutil/aarch64/asm.S"
22#include "neon.S"
23
24
25// The main loop filter macro is templated and can produce filters for
26// vectors of 8 or 16 bytes. The register mapping throughout the filter
27// is close to identical to the arm version (please try to maintain this,
28// if either is changed!). When the arm version uses e.g. d20 for the
29// input variable p3, the aarch64 version uses v20.8b or v20.16b, depending
30// on vector length.
31//
32// The number of elements in the vector is passed in via the macro parameter
33// \sz, which is either .8b or .16b. For simple instructions that doesn't
34// lengthen or narrow things, this can easily be templated like this:
35// uabd v4\sz, v20\sz, v21\sz
36//
37// For instructions that lengthen or narrow content, the arm version would
38// have used q registers. For these instructions, we have macros that expand
39// into either a single e.g. uaddl instruction, or into a uaddl + uaddl2
40// pair, depending on the \sz parameter. Wherever the arm version would have
41// used a q register, these macros instead take two v registers, i.e. q3
42// is mapped to v6+v7. For the case with 8 byte input vectors, such a
43// lengthening operation is only stored in v6.8h (what was in q3 in the arm
44// case), while the 16 byte input vectors will use v6.8h + v7.8h.
45// Such a macro invocation would look like this:
46// uaddl_sz v8.8h, v9.8h, v17, v18, \sz
47//
48// That is, in the 8 byte input vector case, the second register in these
49// register pairs will be unused.
50// Unfortunately, this makes the code quite hard to read. For readability,
51// see the arm version instead.
52
53
54.macro uabdl_sz dst1, dst2, in1, in2, sz
55 uabdl \dst1, \in1\().8b, \in2\().8b
56.ifc \sz, .16b
57 uabdl2 \dst2, \in1\().16b, \in2\().16b
58.endif
59.endm
60
61.macro add_sz dst1, dst2, in1, in2, in3, in4, sz
62 add \dst1, \in1, \in3
63.ifc \sz, .16b
64 add \dst2, \in2, \in4
65.endif
66.endm
67
68.macro sub_sz dst1, dst2, in1, in2, in3, in4, sz
69 sub \dst1, \in1, \in3
70.ifc \sz, .16b
71 sub \dst2, \in2, \in4
72.endif
73.endm
74
75.macro uaddw_sz dst1, dst2, in1, in2, in3, sz
76 uaddw \dst1, \in1, \in3\().8b
77.ifc \sz, .16b
78 uaddw2 \dst2, \in2, \in3\().16b
79.endif
80.endm
81
82.macro usubw_sz dst1, dst2, in1, in2, in3, sz
83 usubw \dst1, \in1, \in3\().8b
84.ifc \sz, .16b
85 usubw2 \dst2, \in2, \in3\().16b
86.endif
87.endm
88
89.macro cmhs_sz dst1, dst2, in1, in2, in3, in4, sz
90 cmhs \dst1, \in1, \in3
91.ifc \sz, .16b
92 cmhs \dst2, \in2, \in4
93.endif
94.endm
95
96.macro xtn_sz dst, in1, in2, sz
97 xtn \dst\().8b, \in1
98.ifc \sz, .16b
99 xtn2 \dst\().16b, \in2
100.endif
101.endm
102
103.macro usubl_sz dst1, dst2, in1, in2, sz
104 usubl \dst1, \in1\().8b, \in2\().8b
105.ifc \sz, .16b
106 usubl2 \dst2, \in1\().16b, \in2\().16b
107.endif
108.endm
109
110.macro sqxtn_sz dst, in1, in2, sz
111 sqxtn \dst\().8b, \in1
112.ifc \sz, .16b
113 sqxtn2 \dst\().16b, \in2
114.endif
115.endm
116
117.macro sqxtun_sz dst, in1, in2, sz
118 sqxtun \dst\().8b, \in1
119.ifc \sz, .16b
120 sqxtun2 \dst\().16b, \in2
121.endif
122.endm
123
124.macro mul_sz dst1, dst2, in1, in2, in3, in4, sz
125 mul \dst1, \in1, \in3
126.ifc \sz, .16b
127 mul \dst2, \in2, \in4
128.endif
129.endm
130
131.macro saddw_sz dst1, dst2, in1, in2, in3, sz
132 saddw \dst1, \in1, \in3\().8b
133.ifc \sz, .16b
134 saddw2 \dst2, \in2, \in3\().16b
135.endif
136.endm
137
138.macro ssubw_sz dst1, dst2, in1, in2, in3, sz
139 ssubw \dst1, \in1, \in3\().8b
140.ifc \sz, .16b
141 ssubw2 \dst2, \in2, \in3\().16b
142.endif
143.endm
144
145.macro uxtl_sz dst1, dst2, in, sz
146 uxtl \dst1, \in\().8b
147.ifc \sz, .16b
148 uxtl2 \dst2, \in\().16b
149.endif
150.endm
151
152.macro uaddl_sz dst1, dst2, in1, in2, sz
153 uaddl \dst1, \in1\().8b, \in2\().8b
154.ifc \sz, .16b
155 uaddl2 \dst2, \in1\().16b, \in2\().16b
156.endif
157.endm
158
159.macro rshrn_sz dst, in1, in2, shift, sz
160 rshrn \dst\().8b, \in1, \shift
161.ifc \sz, .16b
162 rshrn2 \dst\().16b, \in2, \shift
163.endif
164.endm
165
166.macro ushll_sz dst1, dst2, in, shift, sz
167 ushll \dst1, \in\().8b, \shift
168.ifc \sz, .16b
169 ushll2 \dst2, \in\().16b, \shift
170.endif
171.endm
172
173// The input to and output from this macro is in the registers v16-v31,
174// and v0-v7 are used as scratch registers.
175// p7 = v16 .. p3 = v20, p0 = v23, q0 = v24, q3 = v27, q7 = v31
176// Depending on the width of the loop filter, we either use v16-v19
177// and v28-v31 as temp registers, or v8-v15.
178// When comparing to the arm version, tmpq1 == tmp1 + tmp2,
179// tmpq2 == tmp3 + tmp4, etc.
180.macro loop_filter wd, sz, mix, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8
181.if \mix == 0
182 dup v0.8h, w2 // E
183 dup v1.8h, w2 // E
184 dup v2\sz, w3 // I
185 dup v3\sz, w4 // H
186.else
187 dup v0.8h, w2 // E
188 dup v2.8b, w3 // I
189 dup v3.8b, w4 // H
190 lsr w6, w3, #8
191 lsr w7, w4, #8
192 ushr v1.8h, v0.8h, #8 // E
193 dup v4.8b, w6 // I
194 bic v0.8h, #255, lsl 8 // E
195 dup v5.8b, w7 // H
196 trn1 v2.2d, v2.2d, v4.2d
197 trn1 v3.2d, v3.2d, v5.2d
198.endif
199
200 uabd v4\sz, v20\sz, v21\sz // abs(p3 - p2)
201 uabd v5\sz, v21\sz, v22\sz // abs(p2 - p1)
202 uabd v6\sz, v22\sz, v23\sz // abs(p1 - p0)
203 uabd v7\sz, v24\sz, v25\sz // abs(q0 - q1)
204 uabd \tmp1\sz, v25\sz, v26\sz // abs(q1 - q2)
205 uabd \tmp2\sz, v26\sz, v27\sz // abs(q2 - q3)
206 umax v4\sz, v4\sz, v5\sz
207 umax v5\sz, v6\sz, v7\sz
208 umax \tmp1\sz, \tmp1\sz, \tmp2\sz
209 uabdl_sz v6.8h, v7.8h, v23, v24, \sz // abs(p0 - q0)
210 umax v4\sz, v4\sz, v5\sz
211 add_sz v6.8h, v7.8h, v6.8h, v7.8h, v6.8h, v7.8h, \sz // abs(p0 - q0) * 2
212 uabd v5\sz, v22\sz, v25\sz // abs(p1 - q1)
213 umax v4\sz, v4\sz, \tmp1\sz // max(abs(p3 - p2), ..., abs(q2 - q3))
214 ushr v5\sz, v5\sz, #1
215 cmhs v4\sz, v2\sz, v4\sz // max(abs()) <= I
216 uaddw_sz v6.8h, v7.8h, v6.8h, v7.8h, v5, \sz // abs(p0 - q0) * 2 + abs(p1 - q1) >> 1
217 cmhs_sz v6.8h, v7.8h, v0.8h, v1.8h, v6.8h, v7.8h, \sz
218 xtn_sz v5, v6.8h, v7.8h, \sz
219 and v4\sz, v4\sz, v5\sz // fm
220
e7ae8f7a 221 // If no pixels need filtering, just exit as soon as possible
9d2afd1e
MS
222 mov x5, v4.d[0]
223.ifc \sz, .16b
224 mov x6, v4.d[1]
e7ae8f7a
JG
225 adds x5, x5, x6
226 b.eq 9f
227.else
9d2afd1e 228 cbz x5, 9f
e7ae8f7a 229.endif
9d2afd1e
MS
230
231.if \wd >= 8
232 movi v0\sz, #1
233
234 uabd v6\sz, v20\sz, v23\sz // abs(p3 - p0)
235 uabd v2\sz, v21\sz, v23\sz // abs(p2 - p0)
236 uabd v1\sz, v22\sz, v23\sz // abs(p1 - p0)
237 uabd \tmp1\sz, v25\sz, v24\sz // abs(q1 - q0)
238 uabd \tmp2\sz, v26\sz, v24\sz // abs(q2 - q0)
239 uabd \tmp3\sz, v27\sz, v24\sz // abs(q3 - q0)
240 umax v6\sz, v6\sz, v2\sz
241 umax v1\sz, v1\sz, \tmp1\sz
242 umax \tmp2\sz, \tmp2\sz, \tmp3\sz
243.if \wd == 16
244 uabd v7\sz, v16\sz, v23\sz // abs(p7 - p0)
245 umax v6\sz, v6\sz, v1\sz
246 uabd v2\sz, v17\sz, v23\sz // abs(p6 - p0)
247 umax v6\sz, v6\sz, \tmp2\sz
248 uabd v1\sz, v18\sz, v23\sz // abs(p5 - p0)
249 cmhs v6\sz, v0\sz, v6\sz // flat8in
250 uabd v8\sz, v19\sz, v23\sz // abs(p4 - p0)
251 and v6\sz, v6\sz, v4\sz // flat8in && fm
252 uabd v9\sz, v28\sz, v24\sz // abs(q4 - q0)
253 bic v4\sz, v4\sz, v6\sz // fm && !flat8in
254 uabd v10\sz, v29\sz, v24\sz // abs(q5 - q0)
255 uabd v11\sz, v30\sz, v24\sz // abs(q6 - q0)
256 uabd v12\sz, v31\sz, v24\sz // abs(q7 - q0)
257
258 umax v7\sz, v7\sz, v2\sz
259 umax v1\sz, v1\sz, v8\sz
260 umax v9\sz, v9\sz, v10\sz
261 umax v11\sz, v11\sz, v12\sz
262 // The rest of the calculation of flat8out is interleaved below
263.else
264 // The rest of the calculation of flat8in is interleaved below
265.endif
266.endif
267
268 // Calculate the normal inner loop filter for 2 or 4 pixels
269 uabd v5\sz, v22\sz, v23\sz // abs(p1 - p0)
270.if \wd == 16
271 umax v7\sz, v7\sz, v1\sz
272 umax v9\sz, v9\sz, v11\sz
273.elseif \wd == 8
274 umax v6\sz, v6\sz, v1\sz
275.endif
276 uabd v1\sz, v25\sz, v24\sz // abs(q1 - q0)
277.if \wd == 16
278 umax v7\sz, v7\sz, v9\sz
279.elseif \wd == 8
280 umax v6\sz, v6\sz, \tmp2\sz
281.endif
282 usubl_sz \tmp1\().8h, \tmp2\().8h, v22, v25, \sz // p1 - q1
283 umax v5\sz, v5\sz, v1\sz // max(abs(p1 - p0), abs(q1 - q0))
284.if \mix != 0
285 mov v1.d[0], x11
286.endif
287 usubl_sz \tmp3\().8h, \tmp4\().8h, v24, v23, \sz // q0 - p0
288 movi \tmp5\().8h, #3
289.if \wd == 8
290 cmhs v6\sz, v0\sz, v6\sz // flat8in
291.endif
292.if \mix != 0
293 sxtl v1.8h, v1.8b
294.endif
e1f9de86 295 cmhs v5\sz, v3\sz, v5\sz // !hev
9d2afd1e
MS
296.if \wd == 8
297 // If a 4/8 or 8/4 mix is used, clear the relevant half of v6
298.if \mix != 0
299 and v6\sz, v6\sz, v1.16b
300.endif
301 and v6\sz, v6\sz, v4\sz // flat8in && fm
302.endif
303 sqxtn_sz \tmp1, \tmp1\().8h, \tmp2\().8h, \sz // av_clip_int8(p1 - q1)
304.if \wd == 16
305 cmhs v7\sz, v0\sz, v7\sz // flat8out
306.elseif \wd == 8
307 bic v4\sz, v4\sz, v6\sz // fm && !flat8in
308.endif
e1f9de86 309 and v5\sz, v5\sz, v4\sz // !hev && fm && !flat8in
9d2afd1e
MS
310.if \wd == 16
311 and v7\sz, v7\sz, v6\sz // flat8out && flat8in && fm
312.endif
9d2afd1e
MS
313
314 mul_sz \tmp3\().8h, \tmp4\().8h, \tmp3\().8h, \tmp4\().8h, \tmp5\().8h, \tmp5\().8h, \sz // 3 * (q0 - p0)
315 bic \tmp1\sz, \tmp1\sz, v5\sz // if (!hev) av_clip_int8 = 0
316 movi v2\sz, #4
317 saddw_sz \tmp3\().8h, \tmp4\().8h, \tmp3\().8h, \tmp4\().8h, \tmp1, \sz // 3 * (q0 - p0) [+ av_clip_int8(p1 - q1)]
318 movi v3\sz, #3
319 sqxtn_sz \tmp1, \tmp3\().8h, \tmp4\().8h, \sz // f
320.if \wd == 16
321 bic v6\sz, v6\sz, v7\sz // fm && flat8in && !flat8out
322.endif
323
324 sqadd \tmp3\sz, \tmp1\sz, v2\sz // FFMIN(f + 4, 127)
325 sqadd \tmp4\sz, \tmp1\sz, v3\sz // FFMIN(f + 3, 127)
326 uxtl_sz v0.8h, v1.8h, v23, \sz // p0
327 sshr \tmp3\sz, \tmp3\sz, #3 // f1
328 sshr \tmp4\sz, \tmp4\sz, #3 // f2
329
330 uxtl_sz v2.8h, v3.8h, v24, \sz // q0
331 saddw_sz v0.8h, v1.8h, v0.8h, v1.8h, \tmp4, \sz // p0 + f2
332 ssubw_sz v2.8h, v3.8h, v2.8h, v3.8h, \tmp3, \sz // q0 - f1
333 sqxtun_sz v0, v0.8h, v1.8h, \sz // out p0
334 sqxtun_sz v1, v2.8h, v3.8h, \sz // out q0
335 srshr \tmp3\sz, \tmp3\sz, #1 // f = (f1 + 1) >> 1
336 bit v23\sz, v0\sz, v4\sz // if (fm && !flat8in)
337 bit v24\sz, v1\sz, v4\sz
338
339 uxtl_sz v0.8h, v1.8h, v22, \sz // p1
340 uxtl_sz v2.8h, v3.8h, v25, \sz // q1
341 saddw_sz v0.8h, v1.8h, v0.8h, v1.8h, \tmp3, \sz // p1 + f
342 ssubw_sz v2.8h, v3.8h, v2.8h, v3.8h, \tmp3, \sz // q1 - f
343 sqxtun_sz v0, v0.8h, v1.8h, \sz // out p1
344 sqxtun_sz v2, v2.8h, v3.8h, \sz // out q1
345 bit v22\sz, v0\sz, v5\sz // if (!hev && fm && !flat8in)
346 bit v25\sz, v2\sz, v5\sz
347
e7ae8f7a
JG
348 // If no pixels need flat8in, jump to flat8out
349 // (or to a writeout of the inner 4 pixels, for wd=8)
9d2afd1e
MS
350.if \wd >= 8
351 mov x5, v6.d[0]
352.ifc \sz, .16b
353 mov x6, v6.d[1]
e7ae8f7a
JG
354 adds x5, x5, x6
355 b.eq 6f
356.else
9d2afd1e 357 cbz x5, 6f
e7ae8f7a 358.endif
9d2afd1e
MS
359
360 // flat8in
361 uaddl_sz \tmp1\().8h, \tmp2\().8h, v20, v21, \sz
362 uaddl_sz \tmp3\().8h, \tmp4\().8h, v22, v25, \sz
363 uaddl_sz \tmp5\().8h, \tmp6\().8h, v20, v22, \sz
364 uaddl_sz \tmp7\().8h, \tmp8\().8h, v23, v26, \sz
365 add_sz v0.8h, v1.8h, \tmp1\().8h, \tmp2\().8h, \tmp1\().8h, \tmp2\().8h, \sz
366 uaddw_sz v0.8h, v1.8h, v0.8h, v1.8h, v23, \sz
367 uaddw_sz v0.8h, v1.8h, v0.8h, v1.8h, v24, \sz
368 add_sz v0.8h, v1.8h, v0.8h, v1.8h, \tmp5\().8h, \tmp6\().8h, \sz
369 sub_sz \tmp3\().8h, \tmp4\().8h, \tmp3\().8h, \tmp4\().8h, \tmp1\().8h, \tmp2\().8h, \sz
370 sub_sz \tmp7\().8h, \tmp8\().8h, \tmp7\().8h, \tmp8\().8h, \tmp5\().8h, \tmp6\().8h, \sz
371 rshrn_sz v2, v0.8h, v1.8h, #3, \sz // out p2
372
373 add_sz v0.8h, v1.8h, v0.8h, v1.8h, \tmp3\().8h, \tmp4\().8h, \sz
374 uaddl_sz \tmp1\().8h, \tmp2\().8h, v20, v23, \sz
375 uaddl_sz \tmp3\().8h, \tmp4\().8h, v24, v27, \sz
376 rshrn_sz v3, v0.8h, v1.8h, #3, \sz // out p1
377
378 add_sz v0.8h, v1.8h, v0.8h, v1.8h, \tmp7\().8h, \tmp8\().8h, \sz
379 sub_sz \tmp3\().8h, \tmp4\().8h, \tmp3\().8h, \tmp4\().8h, \tmp1\().8h, \tmp2\().8h, \sz
380 uaddl_sz \tmp5\().8h, \tmp6\().8h, v21, v24, \sz
381 uaddl_sz \tmp7\().8h, \tmp8\().8h, v25, v27, \sz
382 rshrn_sz v4, v0.8h, v1.8h, #3, \sz // out p0
383
384 add_sz v0.8h, v1.8h, v0.8h, v1.8h, \tmp3\().8h, \tmp4\().8h, \sz
385 sub_sz \tmp7\().8h, \tmp8\().8h, \tmp7\().8h, \tmp8\().8h, \tmp5\().8h, \tmp6\().8h, \sz
386 uaddl_sz \tmp1\().8h, \tmp2\().8h, v22, v25, \sz
387 uaddl_sz \tmp3\().8h, \tmp4\().8h, v26, v27, \sz
388 rshrn_sz v5, v0.8h, v1.8h, #3, \sz // out q0
389
390 add_sz v0.8h, v1.8h, v0.8h, v1.8h, \tmp7\().8h, \tmp8\().8h, \sz
391 sub_sz \tmp3\().8h, \tmp4\().8h, \tmp3\().8h, \tmp4\().8h, \tmp1\().8h, \tmp2\().8h, \sz
392 rshrn_sz \tmp5, v0.8h, v1.8h, #3, \sz // out q1
393
394 add_sz v0.8h, v1.8h, v0.8h, v1.8h, \tmp3\().8h, \tmp4\().8h, \sz
395 // The output here is written back into the input registers. This doesn't
396 // matter for the flat8part below, since we only update those pixels
397 // which won't be touched below.
398 bit v21\sz, v2\sz, v6\sz
399 bit v22\sz, v3\sz, v6\sz
400 bit v23\sz, v4\sz, v6\sz
401 rshrn_sz \tmp6, v0.8h, v1.8h, #3, \sz // out q2
402 bit v24\sz, v5\sz, v6\sz
403 bit v25\sz, \tmp5\sz, v6\sz
404 bit v26\sz, \tmp6\sz, v6\sz
405.endif
406.if \wd == 16
4076:
408 orr v2\sz, v6\sz, v7\sz
409 mov x5, v2.d[0]
410.ifc \sz, .16b
411 mov x6, v2.d[1]
e7ae8f7a
JG
412 adds x5, x5, x6
413 b.ne 1f
414.else
415 cbnz x5, 1f
9d2afd1e
MS
416.endif
417 // If no pixels needed flat8in nor flat8out, jump to a
418 // writeout of the inner 4 pixels
d7595de0
JG
419 br x14
4201:
e7ae8f7a 421
9d2afd1e
MS
422 mov x5, v7.d[0]
423.ifc \sz, .16b
31756abe 424 mov x6, v7.d[1]
e7ae8f7a
JG
425 adds x5, x5, x6
426 b.ne 1f
427.else
428 cbnz x5, 1f
9d2afd1e
MS
429.endif
430 // If no pixels need flat8out, jump to a writeout of the inner 6 pixels
d7595de0 431 br x15
9d2afd1e 432
d7595de0 4331:
9d2afd1e
MS
434 // flat8out
435 // This writes all outputs into v2-v17 (skipping v6 and v16).
436 // If this part is skipped, the output is read from v21-v26 (which is the input
437 // to this section).
438 ushll_sz v0.8h, v1.8h, v16, #3, \sz // 8 * v16
439 usubw_sz v0.8h, v1.8h, v0.8h, v1.8h, v16, \sz // 7 * v16
440 uaddw_sz v0.8h, v1.8h, v0.8h, v1.8h, v17, \sz
441 uaddl_sz v8.8h, v9.8h, v17, v18, \sz
442 uaddl_sz v10.8h, v11.8h, v19, v20, \sz
443 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v8.8h, v9.8h, \sz
444 uaddl_sz v8.8h, v9.8h, v16, v17, \sz
445 uaddl_sz v12.8h, v13.8h, v21, v22, \sz
446 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v10.8h, v11.8h, \sz
447 uaddl_sz v10.8h, v11.8h, v18, v25, \sz
448 uaddl_sz v14.8h, v15.8h, v23, v24, \sz
449 sub_sz v10.8h, v11.8h, v10.8h, v11.8h, v8.8h, v9.8h, \sz
450 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v12.8h, v13.8h, \sz
451 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v14.8h, v15.8h, \sz
452 uaddl_sz v12.8h, v13.8h, v16, v18, \sz
453 uaddl_sz v14.8h, v15.8h, v19, v26, \sz
454 rshrn_sz v2, v0.8h, v1.8h, #4, \sz
455
456 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v10.8h, v11.8h, \sz
457 uaddl_sz v8.8h, v9.8h, v16, v19, \sz
458 uaddl_sz v10.8h, v11.8h, v20, v27, \sz
459 sub_sz v14.8h, v15.8h, v14.8h, v15.8h, v12.8h, v13.8h, \sz
460 bif v2\sz, v17\sz, v7\sz
461 rshrn_sz v3, v0.8h, v1.8h, #4, \sz
462
463 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v14.8h, v15.8h, \sz
464 uaddl_sz v12.8h, v13.8h, v16, v20, \sz
465 uaddl_sz v14.8h, v15.8h, v21, v28, \sz
466 sub_sz v10.8h, v11.8h, v10.8h, v11.8h, v8.8h, v9.8h, \sz
467 bif v3\sz, v18\sz, v7\sz
468 rshrn_sz v4, v0.8h, v1.8h, #4, \sz
469
470 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v10.8h, v11.8h, \sz
471 uaddl_sz v8.8h, v9.8h, v16, v21, \sz
472 uaddl_sz v10.8h, v11.8h, v22, v29, \sz
473 sub_sz v14.8h, v15.8h, v14.8h, v15.8h, v12.8h, v13.8h, \sz
474 bif v4\sz, v19\sz, v7\sz
475 rshrn_sz v5, v0.8h, v1.8h, #4, \sz
476
477 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v14.8h, v15.8h, \sz
478 uaddl_sz v12.8h, v13.8h, v16, v22, \sz
479 uaddl_sz v14.8h, v15.8h, v23, v30, \sz
480 sub_sz v10.8h, v11.8h, v10.8h, v11.8h, v8.8h, v9.8h, \sz
481 bif v5\sz, v20\sz, v7\sz
482 rshrn_sz v6, v0.8h, v1.8h, #4, \sz
483
484 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v10.8h, v11.8h, \sz
485 uaddl_sz v10.8h, v11.8h, v16, v23, \sz
486 sub_sz v14.8h, v15.8h, v14.8h, v15.8h, v12.8h, v13.8h, \sz
487 uaddl_sz v12.8h, v13.8h, v24, v31, \sz
488 bif v6\sz, v21\sz, v7\sz
489 rshrn_sz v8, v0.8h, v1.8h, #4, \sz
490
491 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v14.8h, v15.8h, \sz
492 sub_sz v10.8h, v11.8h, v12.8h, v13.8h, v10.8h, v11.8h, \sz
493 uaddl_sz v12.8h, v13.8h, v17, v24, \sz
494 uaddl_sz v14.8h, v15.8h, v25, v31, \sz
495 bif v8\sz, v22\sz, v7\sz
496 rshrn_sz v9, v0.8h, v1.8h, #4, \sz
497
498 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v10.8h, v11.8h, \sz
499 sub_sz v14.8h, v15.8h, v14.8h, v15.8h, v12.8h, v13.8h, \sz
500 uaddl_sz v12.8h, v13.8h, v26, v31, \sz
501 bif v9\sz, v23\sz, v7\sz
502 rshrn_sz v10, v0.8h, v1.8h, #4, \sz
503
504 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v14.8h, v15.8h, \sz
505 uaddl_sz v14.8h, v15.8h, v18, v25, \sz
506 uaddl_sz v18.8h, v19.8h, v19, v26, \sz
507 sub_sz v12.8h, v13.8h, v12.8h, v13.8h, v14.8h, v15.8h, \sz
508 uaddl_sz v14.8h, v15.8h, v27, v31, \sz
509 bif v10\sz, v24\sz, v7\sz
510 rshrn_sz v11, v0.8h, v1.8h, #4, \sz
511
512 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v12.8h, v13.8h, \sz
513 uaddl_sz v12.8h, v13.8h, v20, v27, \sz
514 sub_sz v14.8h, v15.8h, v14.8h, v15.8h, v18.8h, v19.8h, \sz
515 uaddl_sz v18.8h, v19.8h, v28, v31, \sz
516 bif v11\sz, v25\sz, v7\sz
517 sub_sz v18.8h, v19.8h, v18.8h, v19.8h, v12.8h, v13.8h, \sz
518 rshrn_sz v12, v0.8h, v1.8h, #4, \sz
519
520 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v14.8h, v15.8h, \sz
521 uaddl_sz v14.8h, v15.8h, v21, v28, \sz
522 uaddl_sz v20.8h, v21.8h, v29, v31, \sz
523 bif v12\sz, v26\sz, v7\sz
524 rshrn_sz v13, v0.8h, v1.8h, #4, \sz
525
526 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v18.8h, v19.8h, \sz
527 sub_sz v20.8h, v21.8h, v20.8h, v21.8h, v14.8h, v15.8h, \sz
528 uaddl_sz v18.8h, v19.8h, v22, v29, \sz
529 uaddl_sz v22.8h, v23.8h, v30, v31, \sz
530 bif v13\sz, v27\sz, v7\sz
531 rshrn_sz v14, v0.8h, v1.8h, #4, \sz
532
533 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v20.8h, v21.8h, \sz
534 sub_sz v22.8h, v23.8h, v22.8h, v23.8h, v18.8h, v19.8h, \sz
535 bif v14\sz, v28\sz, v7\sz
536 rshrn_sz v15, v0.8h, v1.8h, #4, \sz
537
538 add_sz v0.8h, v1.8h, v0.8h, v1.8h, v22.8h, v23.8h, \sz
539 bif v15\sz, v29\sz, v7\sz
540 rshrn_sz v17, v0.8h, v1.8h, #4, \sz
541 bif v17\sz, v30\sz, v7\sz
542.endif
543.endm
544
545// For wd <= 8, we use v16-v19 and v28-v31 for temp registers,
546// while we need those for inputs/outputs in wd=16 and use v8-v15
547// for temp registers there instead.
548function vp9_loop_filter_4
549 loop_filter 4, .8b, 0, v16, v17, v18, v19, v28, v29, v30, v31
550 ret
5519:
552 br x10
553endfunc
554
555function vp9_loop_filter_4_16b_mix_44
556 loop_filter 4, .16b, 44, v16, v17, v18, v19, v28, v29, v30, v31
557 ret
5589:
559 br x10
560endfunc
561
562function vp9_loop_filter_8
563 loop_filter 8, .8b, 0, v16, v17, v18, v19, v28, v29, v30, v31
9d2afd1e
MS
564 ret
5656:
d7595de0 566 br x13
9d2afd1e
MS
5679:
568 br x10
569endfunc
570
571function vp9_loop_filter_8_16b_mix
572 loop_filter 8, .16b, 88, v16, v17, v18, v19, v28, v29, v30, v31
9d2afd1e
MS
573 ret
5746:
d7595de0 575 br x13
9d2afd1e
MS
5769:
577 br x10
578endfunc
579
580function vp9_loop_filter_16
581 loop_filter 16, .8b, 0, v8, v9, v10, v11, v12, v13, v14, v15
9d2afd1e
MS
582 ret
5839:
584 ldp d8, d9, [sp], 0x10
585 ldp d10, d11, [sp], 0x10
586 ldp d12, d13, [sp], 0x10
587 ldp d14, d15, [sp], 0x10
588 br x10
589endfunc
590
591function vp9_loop_filter_16_16b
592 loop_filter 16, .16b, 0, v8, v9, v10, v11, v12, v13, v14, v15
9d2afd1e
MS
593 ret
5949:
595 ldp d8, d9, [sp], 0x10
596 ldp d10, d11, [sp], 0x10
597 ldp d12, d13, [sp], 0x10
598 ldp d14, d15, [sp], 0x10
599 br x10
600endfunc
601
602.macro loop_filter_4
603 bl vp9_loop_filter_4
604.endm
605
606.macro loop_filter_4_16b_mix mix
607 bl vp9_loop_filter_4_16b_mix_\mix
608.endm
609
610.macro loop_filter_8
d7595de0
JG
611 // calculate alternative 'return' targets
612 adr x13, 6f
9d2afd1e 613 bl vp9_loop_filter_8
9d2afd1e
MS
614.endm
615
616.macro loop_filter_8_16b_mix mix
d7595de0
JG
617 // calculate alternative 'return' targets
618 adr x13, 6f
9d2afd1e
MS
619.if \mix == 48
620 mov x11, #0xffffffff00000000
621.elseif \mix == 84
622 mov x11, #0x00000000ffffffff
623.else
624 mov x11, #0xffffffffffffffff
625.endif
626 bl vp9_loop_filter_8_16b_mix
9d2afd1e
MS
627.endm
628
629.macro loop_filter_16
d7595de0
JG
630 // calculate alternative 'return' targets
631 adr x14, 7f
632 adr x15, 8f
9d2afd1e 633 bl vp9_loop_filter_16
9d2afd1e
MS
634.endm
635
636.macro loop_filter_16_16b
d7595de0
JG
637 // calculate alternative 'return' targets
638 adr x14, 7f
639 adr x15, 8f
9d2afd1e 640 bl vp9_loop_filter_16_16b
9d2afd1e
MS
641.endm
642
643
644// The public functions in this file have got the following signature:
645// void loop_filter(uint8_t *dst, ptrdiff_t stride, int mb_lim, int lim, int hev_thr);
646
647function ff_vp9_loop_filter_v_4_8_neon, export=1
648 mov x10, x30
649 sub x9, x0, x1, lsl #2
650 ld1 {v20.8b}, [x9], x1 // p3
651 ld1 {v24.8b}, [x0], x1 // q0
652 ld1 {v21.8b}, [x9], x1 // p2
653 ld1 {v25.8b}, [x0], x1 // q1
654 ld1 {v22.8b}, [x9], x1 // p1
655 ld1 {v26.8b}, [x0], x1 // q2
656 ld1 {v23.8b}, [x9], x1 // p0
657 ld1 {v27.8b}, [x0], x1 // q3
658 sub x0, x0, x1, lsl #2
659 sub x9, x9, x1, lsl #1
660
661 loop_filter_4
662
663 st1 {v22.8b}, [x9], x1
664 st1 {v24.8b}, [x0], x1
665 st1 {v23.8b}, [x9], x1
666 st1 {v25.8b}, [x0], x1
667
668 br x10
669endfunc
670
671function ff_vp9_loop_filter_v_44_16_neon, export=1
672 mov x10, x30
673 sub x9, x0, x1, lsl #2
674 ld1 {v20.16b}, [x9], x1 // p3
675 ld1 {v24.16b}, [x0], x1 // q0
676 ld1 {v21.16b}, [x9], x1 // p2
677 ld1 {v25.16b}, [x0], x1 // q1
678 ld1 {v22.16b}, [x9], x1 // p1
679 ld1 {v26.16b}, [x0], x1 // q2
680 ld1 {v23.16b}, [x9], x1 // p0
681 ld1 {v27.16b}, [x0], x1 // q3
682 sub x0, x0, x1, lsl #2
683 sub x9, x9, x1, lsl #1
684
685 loop_filter_4_16b_mix 44
686
687 st1 {v22.16b}, [x9], x1
688 st1 {v24.16b}, [x0], x1
689 st1 {v23.16b}, [x9], x1
690 st1 {v25.16b}, [x0], x1
691
692 br x10
693endfunc
694
695function ff_vp9_loop_filter_h_4_8_neon, export=1
696 mov x10, x30
697 sub x9, x0, #4
698 add x0, x9, x1, lsl #2
699 ld1 {v20.8b}, [x9], x1
700 ld1 {v24.8b}, [x0], x1
701 ld1 {v21.8b}, [x9], x1
702 ld1 {v25.8b}, [x0], x1
703 ld1 {v22.8b}, [x9], x1
704 ld1 {v26.8b}, [x0], x1
705 ld1 {v23.8b}, [x9], x1
706 ld1 {v27.8b}, [x0], x1
707
708 sub x9, x9, x1, lsl #2
709 sub x0, x0, x1, lsl #2
710 // Move x0/x9 forward by 2 pixels; we don't need to rewrite the
711 // outermost 2 pixels since they aren't changed.
712 add x9, x9, #2
713 add x0, x0, #2
714
715 transpose_8x8B v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
716
717 loop_filter_4
718
719 // We only will write the mid 4 pixels back; after the loop filter,
720 // these are in v22, v23, v24, v25, ordered as rows (8x4 pixels).
721 // We need to transpose them to columns, done with a 4x8 transpose
722 // (which in practice is two 4x4 transposes of the two 4x4 halves
723 // of the 8x4 pixels; into 4x8 pixels).
724 transpose_4x8B v22, v23, v24, v25, v26, v27, v28, v29
725 st1 {v22.s}[0], [x9], x1
726 st1 {v22.s}[1], [x0], x1
727 st1 {v23.s}[0], [x9], x1
728 st1 {v23.s}[1], [x0], x1
729 st1 {v24.s}[0], [x9], x1
730 st1 {v24.s}[1], [x0], x1
731 st1 {v25.s}[0], [x9], x1
732 st1 {v25.s}[1], [x0], x1
733
734 br x10
735endfunc
736
737function ff_vp9_loop_filter_h_44_16_neon, export=1
738 mov x10, x30
739 sub x9, x0, #4
740 add x0, x9, x1, lsl #3
741 ld1 {v20.8b}, [x9], x1
742 ld1 {v20.d}[1], [x0], x1
743 ld1 {v21.8b}, [x9], x1
744 ld1 {v21.d}[1], [x0], x1
745 ld1 {v22.8b}, [x9], x1
746 ld1 {v22.d}[1], [x0], x1
747 ld1 {v23.8b}, [x9], x1
748 ld1 {v23.d}[1], [x0], x1
749 ld1 {v24.8b}, [x9], x1
750 ld1 {v24.d}[1], [x0], x1
751 ld1 {v25.8b}, [x9], x1
752 ld1 {v25.d}[1], [x0], x1
753 ld1 {v26.8b}, [x9], x1
754 ld1 {v26.d}[1], [x0], x1
755 ld1 {v27.8b}, [x9], x1
756 ld1 {v27.d}[1], [x0], x1
757
758 sub x9, x9, x1, lsl #3
759 sub x0, x0, x1, lsl #3
760 add x9, x9, #2
761 add x0, x0, #2
762
763 transpose_8x16B v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
764
765 loop_filter_4_16b_mix 44
766
767 transpose_4x16B v22, v23, v24, v25, v26, v27, v28, v29
768
769 st1 {v22.s}[0], [x9], x1
770 st1 {v22.s}[2], [x0], x1
771 st1 {v23.s}[0], [x9], x1
772 st1 {v23.s}[2], [x0], x1
773 st1 {v24.s}[0], [x9], x1
774 st1 {v24.s}[2], [x0], x1
775 st1 {v25.s}[0], [x9], x1
776 st1 {v25.s}[2], [x0], x1
777 st1 {v22.s}[1], [x9], x1
778 st1 {v22.s}[3], [x0], x1
779 st1 {v23.s}[1], [x9], x1
780 st1 {v23.s}[3], [x0], x1
781 st1 {v24.s}[1], [x9], x1
782 st1 {v24.s}[3], [x0], x1
783 st1 {v25.s}[1], [x9], x1
784 st1 {v25.s}[3], [x0], x1
785
786 br x10
787endfunc
788
789function ff_vp9_loop_filter_v_8_8_neon, export=1
790 mov x10, x30
791 sub x9, x0, x1, lsl #2
792 ld1 {v20.8b}, [x9], x1 // p3
793 ld1 {v24.8b}, [x0], x1 // q0
794 ld1 {v21.8b}, [x9], x1 // p2
795 ld1 {v25.8b}, [x0], x1 // q1
796 ld1 {v22.8b}, [x9], x1 // p1
797 ld1 {v26.8b}, [x0], x1 // q2
798 ld1 {v23.8b}, [x9], x1 // p0
799 ld1 {v27.8b}, [x0], x1 // q3
800 sub x9, x9, x1, lsl #2
801 sub x0, x0, x1, lsl #2
802 add x9, x9, x1
803
804 loop_filter_8
805
806 st1 {v21.8b}, [x9], x1
807 st1 {v24.8b}, [x0], x1
808 st1 {v22.8b}, [x9], x1
809 st1 {v25.8b}, [x0], x1
810 st1 {v23.8b}, [x9], x1
811 st1 {v26.8b}, [x0], x1
812
813 br x10
8146:
815 sub x9, x0, x1, lsl #1
816 st1 {v22.8b}, [x9], x1
817 st1 {v24.8b}, [x0], x1
818 st1 {v23.8b}, [x9], x1
819 st1 {v25.8b}, [x0], x1
820 br x10
821endfunc
822
823.macro mix_v_16 mix
824function ff_vp9_loop_filter_v_\mix\()_16_neon, export=1
825 mov x10, x30
826 sub x9, x0, x1, lsl #2
827 ld1 {v20.16b}, [x9], x1 // p3
828 ld1 {v24.16b}, [x0], x1 // q0
829 ld1 {v21.16b}, [x9], x1 // p2
830 ld1 {v25.16b}, [x0], x1 // q1
831 ld1 {v22.16b}, [x9], x1 // p1
832 ld1 {v26.16b}, [x0], x1 // q2
833 ld1 {v23.16b}, [x9], x1 // p0
834 ld1 {v27.16b}, [x0], x1 // q3
835 sub x9, x9, x1, lsl #2
836 sub x0, x0, x1, lsl #2
837 add x9, x9, x1
838
839 loop_filter_8_16b_mix \mix
840
841 st1 {v21.16b}, [x9], x1
842 st1 {v24.16b}, [x0], x1
843 st1 {v22.16b}, [x9], x1
844 st1 {v25.16b}, [x0], x1
845 st1 {v23.16b}, [x9], x1
846 st1 {v26.16b}, [x0], x1
847
848 br x10
8496:
850 sub x9, x0, x1, lsl #1
851 st1 {v22.16b}, [x9], x1
852 st1 {v24.16b}, [x0], x1
853 st1 {v23.16b}, [x9], x1
854 st1 {v25.16b}, [x0], x1
855 br x10
856endfunc
857.endm
858
859mix_v_16 48
860mix_v_16 84
861mix_v_16 88
862
863function ff_vp9_loop_filter_h_8_8_neon, export=1
864 mov x10, x30
865 sub x9, x0, #4
866 add x0, x9, x1, lsl #2
867 ld1 {v20.8b}, [x9], x1
868 ld1 {v24.8b}, [x0], x1
869 ld1 {v21.8b}, [x9], x1
870 ld1 {v25.8b}, [x0], x1
871 ld1 {v22.8b}, [x9], x1
872 ld1 {v26.8b}, [x0], x1
873 ld1 {v23.8b}, [x9], x1
874 ld1 {v27.8b}, [x0], x1
875
876 sub x9, x9, x1, lsl #2
877 sub x0, x0, x1, lsl #2
878
879 transpose_8x8B v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
880
881 loop_filter_8
882
883 // Even though only 6 pixels per row have been changed, we write the
884 // full 8 pixel registers.
885 transpose_8x8B v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
886
887 st1 {v20.8b}, [x9], x1
888 st1 {v24.8b}, [x0], x1
889 st1 {v21.8b}, [x9], x1
890 st1 {v25.8b}, [x0], x1
891 st1 {v22.8b}, [x9], x1
892 st1 {v26.8b}, [x0], x1
893 st1 {v23.8b}, [x9], x1
894 st1 {v27.8b}, [x0], x1
895
896 br x10
8976:
898 // If we didn't need to do the flat8in part, we use the same writeback
899 // as in loop_filter_h_4_8.
900 add x9, x9, #2
901 add x0, x0, #2
902 transpose_4x8B v22, v23, v24, v25, v26, v27, v28, v29
903 st1 {v22.s}[0], [x9], x1
904 st1 {v22.s}[1], [x0], x1
905 st1 {v23.s}[0], [x9], x1
906 st1 {v23.s}[1], [x0], x1
907 st1 {v24.s}[0], [x9], x1
908 st1 {v24.s}[1], [x0], x1
909 st1 {v25.s}[0], [x9], x1
910 st1 {v25.s}[1], [x0], x1
911 br x10
912endfunc
913
914.macro mix_h_16 mix
915function ff_vp9_loop_filter_h_\mix\()_16_neon, export=1
916 mov x10, x30
917 sub x9, x0, #4
918 add x0, x9, x1, lsl #3
919 ld1 {v20.8b}, [x9], x1
920 ld1 {v20.d}[1], [x0], x1
921 ld1 {v21.8b}, [x9], x1
922 ld1 {v21.d}[1], [x0], x1
923 ld1 {v22.8b}, [x9], x1
924 ld1 {v22.d}[1], [x0], x1
925 ld1 {v23.8b}, [x9], x1
926 ld1 {v23.d}[1], [x0], x1
927 ld1 {v24.8b}, [x9], x1
928 ld1 {v24.d}[1], [x0], x1
929 ld1 {v25.8b}, [x9], x1
930 ld1 {v25.d}[1], [x0], x1
931 ld1 {v26.8b}, [x9], x1
932 ld1 {v26.d}[1], [x0], x1
933 ld1 {v27.8b}, [x9], x1
934 ld1 {v27.d}[1], [x0], x1
935
936 sub x9, x9, x1, lsl #3
937 sub x0, x0, x1, lsl #3
938
939 transpose_8x16B v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
940
941 loop_filter_8_16b_mix \mix
942
943 transpose_8x16B v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
944
945 st1 {v20.8b}, [x9], x1
946 st1 {v20.d}[1], [x0], x1
947 st1 {v21.8b}, [x9], x1
948 st1 {v21.d}[1], [x0], x1
949 st1 {v22.8b}, [x9], x1
950 st1 {v22.d}[1], [x0], x1
951 st1 {v23.8b}, [x9], x1
952 st1 {v23.d}[1], [x0], x1
953 st1 {v24.8b}, [x9], x1
954 st1 {v24.d}[1], [x0], x1
955 st1 {v25.8b}, [x9], x1
956 st1 {v25.d}[1], [x0], x1
957 st1 {v26.8b}, [x9], x1
958 st1 {v26.d}[1], [x0], x1
959 st1 {v27.8b}, [x9], x1
960 st1 {v27.d}[1], [x0], x1
961
962 br x10
9636:
964 add x9, x9, #2
965 add x0, x0, #2
966 transpose_4x16B v22, v23, v24, v25, v26, v27, v28, v29
967 st1 {v22.s}[0], [x9], x1
968 st1 {v22.s}[2], [x0], x1
969 st1 {v23.s}[0], [x9], x1
970 st1 {v23.s}[2], [x0], x1
971 st1 {v24.s}[0], [x9], x1
972 st1 {v24.s}[2], [x0], x1
973 st1 {v25.s}[0], [x9], x1
974 st1 {v25.s}[2], [x0], x1
975 st1 {v22.s}[1], [x9], x1
976 st1 {v22.s}[3], [x0], x1
977 st1 {v23.s}[1], [x9], x1
978 st1 {v23.s}[3], [x0], x1
979 st1 {v24.s}[1], [x9], x1
980 st1 {v24.s}[3], [x0], x1
981 st1 {v25.s}[1], [x9], x1
982 st1 {v25.s}[3], [x0], x1
983 br x10
984endfunc
985.endm
986
987mix_h_16 48
988mix_h_16 84
989mix_h_16 88
990
991function ff_vp9_loop_filter_v_16_8_neon, export=1
992 mov x10, x30
993 stp d14, d15, [sp, #-0x10]!
994 stp d12, d13, [sp, #-0x10]!
995 stp d10, d11, [sp, #-0x10]!
996 stp d8, d9, [sp, #-0x10]!
997 sub x9, x0, x1, lsl #3
998 ld1 {v16.8b}, [x9], x1 // p7
999 ld1 {v24.8b}, [x0], x1 // q0
1000 ld1 {v17.8b}, [x9], x1 // p6
1001 ld1 {v25.8b}, [x0], x1 // q1
1002 ld1 {v18.8b}, [x9], x1 // p5
1003 ld1 {v26.8b}, [x0], x1 // q2
1004 ld1 {v19.8b}, [x9], x1 // p4
1005 ld1 {v27.8b}, [x0], x1 // q3
1006 ld1 {v20.8b}, [x9], x1 // p3
1007 ld1 {v28.8b}, [x0], x1 // q4
1008 ld1 {v21.8b}, [x9], x1 // p2
1009 ld1 {v29.8b}, [x0], x1 // q5
1010 ld1 {v22.8b}, [x9], x1 // p1
1011 ld1 {v30.8b}, [x0], x1 // q6
1012 ld1 {v23.8b}, [x9], x1 // p0
1013 ld1 {v31.8b}, [x0], x1 // q7
1014 sub x9, x9, x1, lsl #3
1015 sub x0, x0, x1, lsl #3
1016 add x9, x9, x1
1017
1018 loop_filter_16
1019
1020 // If we did the flat8out part, we get the output in
1021 // v2-v17 (skipping v7 and v16). x9 points to x0 - 7 * stride,
1022 // store v2-v9 there, and v10-v17 into x0.
1023 st1 {v2.8b}, [x9], x1
1024 st1 {v10.8b}, [x0], x1
1025 st1 {v3.8b}, [x9], x1
1026 st1 {v11.8b}, [x0], x1
1027 st1 {v4.8b}, [x9], x1
1028 st1 {v12.8b}, [x0], x1
1029 st1 {v5.8b}, [x9], x1
1030 st1 {v13.8b}, [x0], x1
1031 st1 {v6.8b}, [x9], x1
1032 st1 {v14.8b}, [x0], x1
1033 st1 {v8.8b}, [x9], x1
1034 st1 {v15.8b}, [x0], x1
1035 st1 {v9.8b}, [x9], x1
1036 st1 {v17.8b}, [x0], x1
10379:
1038 ldp d8, d9, [sp], 0x10
1039 ldp d10, d11, [sp], 0x10
1040 ldp d12, d13, [sp], 0x10
1041 ldp d14, d15, [sp], 0x10
1042 br x10
10438:
1044 add x9, x9, x1, lsl #2
1045 // If we didn't do the flat8out part, the output is left in the
1046 // input registers.
1047 st1 {v21.8b}, [x9], x1
1048 st1 {v24.8b}, [x0], x1
1049 st1 {v22.8b}, [x9], x1
1050 st1 {v25.8b}, [x0], x1
1051 st1 {v23.8b}, [x9], x1
1052 st1 {v26.8b}, [x0], x1
1053 b 9b
10547:
1055 sub x9, x0, x1, lsl #1
1056 st1 {v22.8b}, [x9], x1
1057 st1 {v24.8b}, [x0], x1
1058 st1 {v23.8b}, [x9], x1
1059 st1 {v25.8b}, [x0], x1
1060 b 9b
1061endfunc
1062
1063function ff_vp9_loop_filter_v_16_16_neon, export=1
1064 mov x10, x30
1065 stp d14, d15, [sp, #-0x10]!
1066 stp d12, d13, [sp, #-0x10]!
1067 stp d10, d11, [sp, #-0x10]!
1068 stp d8, d9, [sp, #-0x10]!
1069 sub x9, x0, x1, lsl #3
1070 ld1 {v16.16b}, [x9], x1 // p7
1071 ld1 {v24.16b}, [x0], x1 // q0
1072 ld1 {v17.16b}, [x9], x1 // p6
1073 ld1 {v25.16b}, [x0], x1 // q1
1074 ld1 {v18.16b}, [x9], x1 // p5
1075 ld1 {v26.16b}, [x0], x1 // q2
1076 ld1 {v19.16b}, [x9], x1 // p4
1077 ld1 {v27.16b}, [x0], x1 // q3
1078 ld1 {v20.16b}, [x9], x1 // p3
1079 ld1 {v28.16b}, [x0], x1 // q4
1080 ld1 {v21.16b}, [x9], x1 // p2
1081 ld1 {v29.16b}, [x0], x1 // q5
1082 ld1 {v22.16b}, [x9], x1 // p1
1083 ld1 {v30.16b}, [x0], x1 // q6
1084 ld1 {v23.16b}, [x9], x1 // p0
1085 ld1 {v31.16b}, [x0], x1 // q7
1086 sub x9, x9, x1, lsl #3
1087 sub x0, x0, x1, lsl #3
1088 add x9, x9, x1
1089
1090 loop_filter_16_16b
1091
1092 st1 {v2.16b}, [x9], x1
1093 st1 {v10.16b}, [x0], x1
1094 st1 {v3.16b}, [x9], x1
1095 st1 {v11.16b}, [x0], x1
1096 st1 {v4.16b}, [x9], x1
1097 st1 {v12.16b}, [x0], x1
1098 st1 {v5.16b}, [x9], x1
1099 st1 {v13.16b}, [x0], x1
1100 st1 {v6.16b}, [x9], x1
1101 st1 {v14.16b}, [x0], x1
1102 st1 {v8.16b}, [x9], x1
1103 st1 {v15.16b}, [x0], x1
1104 st1 {v9.16b}, [x9], x1
1105 st1 {v17.16b}, [x0], x1
11069:
1107 ldp d8, d9, [sp], 0x10
1108 ldp d10, d11, [sp], 0x10
1109 ldp d12, d13, [sp], 0x10
1110 ldp d14, d15, [sp], 0x10
1111 br x10
11128:
1113 add x9, x9, x1, lsl #2
1114 st1 {v21.16b}, [x9], x1
1115 st1 {v24.16b}, [x0], x1
1116 st1 {v22.16b}, [x9], x1
1117 st1 {v25.16b}, [x0], x1
1118 st1 {v23.16b}, [x9], x1
1119 st1 {v26.16b}, [x0], x1
1120 b 9b
11217:
1122 sub x9, x0, x1, lsl #1
1123 st1 {v22.16b}, [x9], x1
1124 st1 {v24.16b}, [x0], x1
1125 st1 {v23.16b}, [x9], x1
1126 st1 {v25.16b}, [x0], x1
1127 b 9b
1128endfunc
1129
1130function ff_vp9_loop_filter_h_16_8_neon, export=1
1131 mov x10, x30
1132 stp d14, d15, [sp, #-0x10]!
1133 stp d12, d13, [sp, #-0x10]!
1134 stp d10, d11, [sp, #-0x10]!
1135 stp d8, d9, [sp, #-0x10]!
1136 sub x9, x0, #8
1137 ld1 {v16.8b}, [x9], x1
1138 ld1 {v24.8b}, [x0], x1
1139 ld1 {v17.8b}, [x9], x1
1140 ld1 {v25.8b}, [x0], x1
1141 ld1 {v18.8b}, [x9], x1
1142 ld1 {v26.8b}, [x0], x1
1143 ld1 {v19.8b}, [x9], x1
1144 ld1 {v27.8b}, [x0], x1
1145 ld1 {v20.8b}, [x9], x1
1146 ld1 {v28.8b}, [x0], x1
1147 ld1 {v21.8b}, [x9], x1
1148 ld1 {v29.8b}, [x0], x1
1149 ld1 {v22.8b}, [x9], x1
1150 ld1 {v30.8b}, [x0], x1
1151 ld1 {v23.8b}, [x9], x1
1152 ld1 {v31.8b}, [x0], x1
1153 sub x0, x0, x1, lsl #3
1154 sub x9, x9, x1, lsl #3
1155
1156 // The 16x8 pixels read above is in two 8x8 blocks; the left
1157 // half in v16-v23, and the right half in v24-v31. Do two 8x8 transposes
1158 // of this, to get one column per register.
1159 transpose_8x8B v16, v17, v18, v19, v20, v21, v22, v23, v0, v1
1160 transpose_8x8B v24, v25, v26, v27, v28, v29, v30, v31, v0, v1
1161
1162 loop_filter_16
1163
1164 transpose_8x8B v16, v2, v3, v4, v5, v6, v8, v9, v0, v1
1165 transpose_8x8B v10, v11, v12, v13, v14, v15, v17, v31, v0, v1
1166
1167 st1 {v16.8b}, [x9], x1
1168 st1 {v10.8b}, [x0], x1
1169 st1 {v2.8b}, [x9], x1
1170 st1 {v11.8b}, [x0], x1
1171 st1 {v3.8b}, [x9], x1
1172 st1 {v12.8b}, [x0], x1
1173 st1 {v4.8b}, [x9], x1
1174 st1 {v13.8b}, [x0], x1
1175 st1 {v5.8b}, [x9], x1
1176 st1 {v14.8b}, [x0], x1
1177 st1 {v6.8b}, [x9], x1
1178 st1 {v15.8b}, [x0], x1
1179 st1 {v8.8b}, [x9], x1
1180 st1 {v17.8b}, [x0], x1
1181 st1 {v9.8b}, [x9], x1
1182 st1 {v31.8b}, [x0], x1
11839:
1184 ldp d8, d9, [sp], 0x10
1185 ldp d10, d11, [sp], 0x10
1186 ldp d12, d13, [sp], 0x10
1187 ldp d14, d15, [sp], 0x10
1188 br x10
11898:
1190 // The same writeback as in loop_filter_h_8_8
1191 sub x9, x0, #4
1192 add x0, x9, x1, lsl #2
1193 transpose_8x8B v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
1194
1195 st1 {v20.8b}, [x9], x1
1196 st1 {v24.8b}, [x0], x1
1197 st1 {v21.8b}, [x9], x1
1198 st1 {v25.8b}, [x0], x1
1199 st1 {v22.8b}, [x9], x1
1200 st1 {v26.8b}, [x0], x1
1201 st1 {v23.8b}, [x9], x1
1202 st1 {v27.8b}, [x0], x1
1203 b 9b
12047:
1205 // The same writeback as in loop_filter_h_4_8
1206 sub x9, x0, #2
1207 add x0, x9, x1, lsl #2
1208 transpose_4x8B v22, v23, v24, v25, v26, v27, v28, v29
1209 st1 {v22.s}[0], [x9], x1
1210 st1 {v22.s}[1], [x0], x1
1211 st1 {v23.s}[0], [x9], x1
1212 st1 {v23.s}[1], [x0], x1
1213 st1 {v24.s}[0], [x9], x1
1214 st1 {v24.s}[1], [x0], x1
1215 st1 {v25.s}[0], [x9], x1
1216 st1 {v25.s}[1], [x0], x1
1217 b 9b
1218endfunc
1219
1220function ff_vp9_loop_filter_h_16_16_neon, export=1
1221 mov x10, x30
1222 stp d14, d15, [sp, #-0x10]!
1223 stp d12, d13, [sp, #-0x10]!
1224 stp d10, d11, [sp, #-0x10]!
1225 stp d8, d9, [sp, #-0x10]!
1226 sub x9, x0, #8
1227 ld1 {v16.8b}, [x9], x1
1228 ld1 {v24.8b}, [x0], x1
1229 ld1 {v17.8b}, [x9], x1
1230 ld1 {v25.8b}, [x0], x1
1231 ld1 {v18.8b}, [x9], x1
1232 ld1 {v26.8b}, [x0], x1
1233 ld1 {v19.8b}, [x9], x1
1234 ld1 {v27.8b}, [x0], x1
1235 ld1 {v20.8b}, [x9], x1
1236 ld1 {v28.8b}, [x0], x1
1237 ld1 {v21.8b}, [x9], x1
1238 ld1 {v29.8b}, [x0], x1
1239 ld1 {v22.8b}, [x9], x1
1240 ld1 {v30.8b}, [x0], x1
1241 ld1 {v23.8b}, [x9], x1
1242 ld1 {v31.8b}, [x0], x1
1243 ld1 {v16.d}[1], [x9], x1
1244 ld1 {v24.d}[1], [x0], x1
1245 ld1 {v17.d}[1], [x9], x1
1246 ld1 {v25.d}[1], [x0], x1
1247 ld1 {v18.d}[1], [x9], x1
1248 ld1 {v26.d}[1], [x0], x1
1249 ld1 {v19.d}[1], [x9], x1
1250 ld1 {v27.d}[1], [x0], x1
1251 ld1 {v20.d}[1], [x9], x1
1252 ld1 {v28.d}[1], [x0], x1
1253 ld1 {v21.d}[1], [x9], x1
1254 ld1 {v29.d}[1], [x0], x1
1255 ld1 {v22.d}[1], [x9], x1
1256 ld1 {v30.d}[1], [x0], x1
1257 ld1 {v23.d}[1], [x9], x1
1258 ld1 {v31.d}[1], [x0], x1
1259 sub x0, x0, x1, lsl #4
1260 sub x9, x9, x1, lsl #4
1261
1262 transpose_8x16B v16, v17, v18, v19, v20, v21, v22, v23, v0, v1
1263 transpose_8x16B v24, v25, v26, v27, v28, v29, v30, v31, v0, v1
1264
1265 loop_filter_16_16b
1266
1267 transpose_8x16B v16, v2, v3, v4, v5, v6, v8, v9, v0, v1
1268 transpose_8x16B v10, v11, v12, v13, v14, v15, v17, v31, v0, v1
1269
1270 st1 {v16.8b}, [x9], x1
1271 st1 {v10.8b}, [x0], x1
1272 st1 {v2.8b}, [x9], x1
1273 st1 {v11.8b}, [x0], x1
1274 st1 {v3.8b}, [x9], x1
1275 st1 {v12.8b}, [x0], x1
1276 st1 {v4.8b}, [x9], x1
1277 st1 {v13.8b}, [x0], x1
1278 st1 {v5.8b}, [x9], x1
1279 st1 {v14.8b}, [x0], x1
1280 st1 {v6.8b}, [x9], x1
1281 st1 {v15.8b}, [x0], x1
1282 st1 {v8.8b}, [x9], x1
1283 st1 {v17.8b}, [x0], x1
1284 st1 {v9.8b}, [x9], x1
1285 st1 {v31.8b}, [x0], x1
1286 st1 {v16.d}[1], [x9], x1
1287 st1 {v10.d}[1], [x0], x1
1288 st1 {v2.d}[1], [x9], x1
1289 st1 {v11.d}[1], [x0], x1
1290 st1 {v3.d}[1], [x9], x1
1291 st1 {v12.d}[1], [x0], x1
1292 st1 {v4.d}[1], [x9], x1
1293 st1 {v13.d}[1], [x0], x1
1294 st1 {v5.d}[1], [x9], x1
1295 st1 {v14.d}[1], [x0], x1
1296 st1 {v6.d}[1], [x9], x1
1297 st1 {v15.d}[1], [x0], x1
1298 st1 {v8.d}[1], [x9], x1
1299 st1 {v17.d}[1], [x0], x1
1300 st1 {v9.d}[1], [x9], x1
1301 st1 {v31.d}[1], [x0], x1
13029:
1303 ldp d8, d9, [sp], 0x10
1304 ldp d10, d11, [sp], 0x10
1305 ldp d12, d13, [sp], 0x10
1306 ldp d14, d15, [sp], 0x10
1307 br x10
13088:
1309 sub x9, x0, #4
1310 add x0, x9, x1, lsl #3
1311 transpose_8x16B v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
1312
1313 st1 {v20.8b}, [x9], x1
1314 st1 {v20.d}[1], [x0], x1
1315 st1 {v21.8b}, [x9], x1
1316 st1 {v21.d}[1], [x0], x1
1317 st1 {v22.8b}, [x9], x1
1318 st1 {v22.d}[1], [x0], x1
1319 st1 {v23.8b}, [x9], x1
1320 st1 {v23.d}[1], [x0], x1
1321 st1 {v24.8b}, [x9], x1
1322 st1 {v24.d}[1], [x0], x1
1323 st1 {v25.8b}, [x9], x1
1324 st1 {v25.d}[1], [x0], x1
1325 st1 {v26.8b}, [x9], x1
1326 st1 {v26.d}[1], [x0], x1
1327 st1 {v27.8b}, [x9], x1
1328 st1 {v27.d}[1], [x0], x1
1329 b 9b
13307:
1331 sub x9, x0, #2
1332 add x0, x9, x1, lsl #3
1333 transpose_4x16B v22, v23, v24, v25, v26, v27, v28, v29
1334 st1 {v22.s}[0], [x9], x1
1335 st1 {v22.s}[2], [x0], x1
1336 st1 {v23.s}[0], [x9], x1
1337 st1 {v23.s}[2], [x0], x1
1338 st1 {v24.s}[0], [x9], x1
1339 st1 {v24.s}[2], [x0], x1
1340 st1 {v25.s}[0], [x9], x1
1341 st1 {v25.s}[2], [x0], x1
1342 st1 {v22.s}[1], [x9], x1
1343 st1 {v22.s}[3], [x0], x1
1344 st1 {v23.s}[1], [x9], x1
1345 st1 {v23.s}[3], [x0], x1
1346 st1 {v24.s}[1], [x9], x1
1347 st1 {v24.s}[3], [x0], x1
1348 st1 {v25.s}[1], [x9], x1
1349 st1 {v25.s}[3], [x0], x1
1350 b 9b
1351endfunc