0260d631bac1465eb9173d8b6d4d9a1437fd9837
[libav.git] / tests / checkasm / vp8dsp.c
1 /*
2 * Copyright (c) 2016 Martin Storsjo
3 *
4 * This file is part of Libav.
5 *
6 * Libav is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with Libav; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21 #include <string.h>
22
23 #include "libavcodec/avcodec.h"
24 #include "libavcodec/vp8dsp.h"
25
26 #include "libavutil/common.h"
27 #include "libavutil/intreadwrite.h"
28
29 #include "checkasm.h"
30
31 #define PIXEL_STRIDE 16
32
33 #define randomize_buffers(src, dst, stride, coef) \
34 do { \
35 int x, y; \
36 for (y = 0; y < 4; y++) { \
37 AV_WN32A((src) + y * (stride), rnd()); \
38 AV_WN32A((dst) + y * (stride), rnd()); \
39 for (x = 0; x < 4; x++) \
40 (coef)[y * 4 + x] = (src)[y * (stride) + x] - \
41 (dst)[y * (stride) + x]; \
42 } \
43 } while (0)
44
45 static void dct4x4(int16_t *coef)
46 {
47 int i;
48 for (i = 0; i < 4; i++) {
49 const int a1 = (coef[i*4 + 0] + coef[i*4 + 3]) * 8;
50 const int b1 = (coef[i*4 + 1] + coef[i*4 + 2]) * 8;
51 const int c1 = (coef[i*4 + 1] - coef[i*4 + 2]) * 8;
52 const int d1 = (coef[i*4 + 0] - coef[i*4 + 3]) * 8;
53 coef[i*4 + 0] = a1 + b1;
54 coef[i*4 + 1] = (c1 * 2217 + d1 * 5352 + 14500) >> 12;
55 coef[i*4 + 2] = a1 - b1;
56 coef[i*4 + 3] = (d1 * 2217 - c1 * 5352 + 7500) >> 12;
57 }
58 for (i = 0; i < 4; i++) {
59 const int a1 = coef[i + 0*4] + coef[i + 3*4];
60 const int b1 = coef[i + 1*4] + coef[i + 2*4];
61 const int c1 = coef[i + 1*4] - coef[i + 2*4];
62 const int d1 = coef[i + 0*4] - coef[i + 3*4];
63 coef[i + 0*4] = (a1 + b1 + 7) >> 4;
64 coef[i + 1*4] = ((c1 * 2217 + d1 * 5352 + 12000) >> 16) + !!d1;
65 coef[i + 2*4] = (a1 - b1 + 7) >> 4;
66 coef[i + 3*4] = (d1 * 2217 - c1 * 5352 + 51000) >> 16;
67 }
68 }
69
70 static void wht4x4(int16_t *coef)
71 {
72 int i;
73 for (i = 0; i < 4; i++) {
74 int a1 = coef[0 * 4 + i];
75 int b1 = coef[1 * 4 + i];
76 int c1 = coef[2 * 4 + i];
77 int d1 = coef[3 * 4 + i];
78 int e1;
79 a1 += b1;
80 d1 -= c1;
81 e1 = (a1 - d1) >> 1;
82 b1 = e1 - b1;
83 c1 = e1 - c1;
84 a1 -= c1;
85 d1 += b1;
86 coef[0 * 4 + i] = a1;
87 coef[1 * 4 + i] = c1;
88 coef[2 * 4 + i] = d1;
89 coef[3 * 4 + i] = b1;
90 }
91 for (i = 0; i < 4; i++) {
92 int a1 = coef[i * 4 + 0];
93 int b1 = coef[i * 4 + 1];
94 int c1 = coef[i * 4 + 2];
95 int d1 = coef[i * 4 + 3];
96 int e1;
97 a1 += b1;
98 d1 -= c1;
99 e1 = (a1 - d1) >> 1;
100 b1 = e1 - b1;
101 c1 = e1 - c1;
102 a1 -= c1;
103 d1 += b1;
104 coef[i * 4 + 0] = a1 * 2;
105 coef[i * 4 + 1] = c1 * 2;
106 coef[i * 4 + 2] = d1 * 2;
107 coef[i * 4 + 3] = b1 * 2;
108 }
109 }
110
111 static void check_idct(void)
112 {
113 LOCAL_ALIGNED_16(uint8_t, src, [4 * 4]);
114 LOCAL_ALIGNED_16(uint8_t, dst, [4 * 4]);
115 LOCAL_ALIGNED_16(uint8_t, dst0, [4 * 4]);
116 LOCAL_ALIGNED_16(uint8_t, dst1, [4 * 4]);
117 LOCAL_ALIGNED_16(int16_t, coef, [4 * 4]);
118 LOCAL_ALIGNED_16(int16_t, subcoef0, [4 * 4]);
119 LOCAL_ALIGNED_16(int16_t, subcoef1, [4 * 4]);
120 VP8DSPContext d;
121 int dc;
122 declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *dst, int16_t *block, ptrdiff_t stride);
123
124 ff_vp8dsp_init(&d);
125 randomize_buffers(src, dst, 4, coef);
126
127 dct4x4(coef);
128
129 for (dc = 0; dc <= 1; dc++) {
130 void (*idct)(uint8_t *, int16_t *, ptrdiff_t) = dc ? d.vp8_idct_dc_add : d.vp8_idct_add;
131
132 if (check_func(idct, "vp8_idct_%sadd", dc ? "dc_" : "")) {
133 if (dc) {
134 memset(subcoef0, 0, 4 * 4 * sizeof(int16_t));
135 subcoef0[0] = coef[0];
136 } else {
137 memcpy(subcoef0, coef, 4 * 4 * sizeof(int16_t));
138 }
139 memcpy(dst0, dst, 4 * 4);
140 memcpy(dst1, dst, 4 * 4);
141 memcpy(subcoef1, subcoef0, 4 * 4 * sizeof(int16_t));
142 // Note, this uses a pixel stride of 4, even though the real decoder uses a stride as a
143 // multiple of 16. If optimizations want to take advantage of that, this test needs to be
144 // updated to make it more like the h264dsp tests.
145 call_ref(dst0, subcoef0, 4);
146 call_new(dst1, subcoef1, 4);
147 if (memcmp(dst0, dst1, 4 * 4) ||
148 memcmp(subcoef0, subcoef1, 4 * 4 * sizeof(int16_t)))
149 fail();
150
151 bench_new(dst1, subcoef1, 4);
152 }
153 }
154 }
155
156 static void check_idct_dc4(void)
157 {
158 LOCAL_ALIGNED_16(uint8_t, src, [4 * 4 * 4]);
159 LOCAL_ALIGNED_16(uint8_t, dst, [4 * 4 * 4]);
160 LOCAL_ALIGNED_16(uint8_t, dst0, [4 * 4 * 4]);
161 LOCAL_ALIGNED_16(uint8_t, dst1, [4 * 4 * 4]);
162 LOCAL_ALIGNED_16(int16_t, coef, [4], [4 * 4]);
163 LOCAL_ALIGNED_16(int16_t, subcoef0, [4], [4 * 4]);
164 LOCAL_ALIGNED_16(int16_t, subcoef1, [4], [4 * 4]);
165 VP8DSPContext d;
166 int i, chroma;
167 declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *dst, int16_t block[4][16], ptrdiff_t stride);
168
169 ff_vp8dsp_init(&d);
170
171 for (chroma = 0; chroma <= 1; chroma++) {
172 void (*idct4dc)(uint8_t *, int16_t[4][16], ptrdiff_t) = chroma ? d.vp8_idct_dc_add4uv : d.vp8_idct_dc_add4y;
173 if (check_func(idct4dc, "vp8_idct_dc_add4%s", chroma ? "uv" : "y")) {
174 int stride = chroma ? 8 : 16;
175 int w = chroma ? 2 : 4;
176 for (i = 0; i < 4; i++) {
177 int blockx = 4 * (i % w);
178 int blocky = 4 * (i / w);
179 randomize_buffers(src + stride * blocky + blockx, dst + stride * blocky + blockx, stride, coef[i]);
180 dct4x4(coef[i]);
181 memset(&coef[i][1], 0, 15 * sizeof(int16_t));
182 }
183
184 memcpy(dst0, dst, 4 * 4 * 4);
185 memcpy(dst1, dst, 4 * 4 * 4);
186 memcpy(subcoef0, coef, 4 * 4 * 4 * sizeof(int16_t));
187 memcpy(subcoef1, coef, 4 * 4 * 4 * sizeof(int16_t));
188 call_ref(dst0, subcoef0, stride);
189 call_new(dst1, subcoef1, stride);
190 if (memcmp(dst0, dst1, 4 * 4 * 4) ||
191 memcmp(subcoef0, subcoef1, 4 * 4 * 4 * sizeof(int16_t)))
192 fail();
193 bench_new(dst1, subcoef1, stride);
194 }
195 }
196
197 }
198
199 static void check_luma_dc_wht(void)
200 {
201 LOCAL_ALIGNED_16(int16_t, dc, [4 * 4]);
202 LOCAL_ALIGNED_16(int16_t, dc0, [4 * 4]);
203 LOCAL_ALIGNED_16(int16_t, dc1, [4 * 4]);
204 int16_t block[4][4][16];
205 LOCAL_ALIGNED_16(int16_t, block0, [4], [4][16]);
206 LOCAL_ALIGNED_16(int16_t, block1, [4], [4][16]);
207 VP8DSPContext d;
208 int dc_only;
209 int blockx, blocky;
210 declare_func_emms(AV_CPU_FLAG_MMX, void, int16_t block[4][4][16], int16_t dc[16]);
211
212 ff_vp8dsp_init(&d);
213
214 for (blocky = 0; blocky < 4; blocky++) {
215 for (blockx = 0; blockx < 4; blockx++) {
216 uint8_t src[16], dst[16];
217 randomize_buffers(src, dst, 4, block[blocky][blockx]);
218
219 dct4x4(block[blocky][blockx]);
220 dc[blocky * 4 + blockx] = block[blocky][blockx][0];
221 block[blocky][blockx][0] = rnd();
222 }
223 }
224 wht4x4(dc);
225
226 for (dc_only = 0; dc_only <= 1; dc_only++) {
227 void (*idct)(int16_t [4][4][16], int16_t [16]) = dc_only ? d.vp8_luma_dc_wht_dc : d.vp8_luma_dc_wht;
228
229 if (check_func(idct, "vp8_luma_dc_wht%s", dc_only ? "_dc" : "")) {
230 if (dc_only) {
231 memset(dc0, 0, 16 * sizeof(int16_t));
232 dc0[0] = dc[0];
233 } else {
234 memcpy(dc0, dc, 16 * sizeof(int16_t));
235 }
236 memcpy(dc1, dc0, 16 * sizeof(int16_t));
237 memcpy(block0, block, 4 * 4 * 16 * sizeof(int16_t));
238 memcpy(block1, block, 4 * 4 * 16 * sizeof(int16_t));
239 call_ref(block0, dc0);
240 call_new(block1, dc1);
241 if (memcmp(block0, block1, 4 * 4 * 16 * sizeof(int16_t)) ||
242 memcmp(dc0, dc1, 16 * sizeof(int16_t)))
243 fail();
244 bench_new(block1, dc1);
245 }
246 }
247 }
248
249 #define SRC_BUF_STRIDE 32
250 #define SRC_BUF_SIZE (((size << (size < 16)) + 5) * SRC_BUF_STRIDE)
251 // The mc subpixel interpolation filter needs the 2 previous pixels in either
252 // direction, the +1 is to make sure the actual load addresses always are
253 // unaligned.
254 #define src (buf + 2 * SRC_BUF_STRIDE + 2 + 1)
255
256 #undef randomize_buffers
257 #define randomize_buffers() \
258 do { \
259 int k; \
260 for (k = 0; k < SRC_BUF_SIZE; k += 4) { \
261 AV_WN32A(buf + k, rnd()); \
262 } \
263 } while (0)
264
265 static void check_mc(void)
266 {
267 LOCAL_ALIGNED_16(uint8_t, buf, [32 * 32]);
268 LOCAL_ALIGNED_16(uint8_t, dst0, [16 * 16]);
269 LOCAL_ALIGNED_16(uint8_t, dst1, [16 * 16]);
270 VP8DSPContext d;
271 int type, k, dx, dy;
272 declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *, ptrdiff_t, uint8_t *, ptrdiff_t, int, int, int);
273
274 ff_vp78dsp_init(&d);
275
276 for (type = 0; type < 2; type++) {
277 vp8_mc_func (*tab)[3][3] = type ? d.put_vp8_bilinear_pixels_tab : d.put_vp8_epel_pixels_tab;
278 for (k = 1; k < 8; k++) {
279 int hsize = k / 3;
280 int size = 16 >> hsize;
281 int height = (size << 1) >> (k % 3);
282 for (dy = 0; dy < 3; dy++) {
283 for (dx = 0; dx < 3; dx++) {
284 char str[100];
285 if (dx || dy) {
286 if (type == 0) {
287 static const char *dx_names[] = { "", "h4", "h6" };
288 static const char *dy_names[] = { "", "v4", "v6" };
289 snprintf(str, sizeof(str), "epel%d_%s%s", size, dx_names[dx], dy_names[dy]);
290 } else {
291 snprintf(str, sizeof(str), "bilin%d_%s%s", size, dx ? "h" : "", dy ? "v" : "");
292 }
293 } else {
294 snprintf(str, sizeof(str), "pixels%d", size);
295 }
296 if (check_func(tab[hsize][dy][dx], "vp8_put_%s", str)) {
297 int mx, my;
298 int i;
299 if (type == 0) {
300 mx = dx == 2 ? 2 + 2 * (rnd() % 3) : dx == 1 ? 1 + 2 * (rnd() % 4) : 0;
301 my = dy == 2 ? 2 + 2 * (rnd() % 3) : dy == 1 ? 1 + 2 * (rnd() % 4) : 0;
302 } else {
303 mx = dx ? 1 + (rnd() % 7) : 0;
304 my = dy ? 1 + (rnd() % 7) : 0;
305 }
306 randomize_buffers();
307 for (i = -2; i <= 3; i++) {
308 int val = (i == -1 || i == 2) ? 0 : 0xff;
309 // Set pixels in the first row and column to the maximum pattern,
310 // to test for potential overflows in the filter.
311 src[i ] = val;
312 src[i * SRC_BUF_STRIDE] = val;
313 }
314 call_ref(dst0, size, src, SRC_BUF_STRIDE, height, mx, my);
315 call_new(dst1, size, src, SRC_BUF_STRIDE, height, mx, my);
316 if (memcmp(dst0, dst1, size * height))
317 fail();
318 bench_new(dst1, size, src, SRC_BUF_STRIDE, height, mx, my);
319 }
320 }
321 }
322 }
323 }
324 }
325
326 #undef randomize_buffers
327
328 #define setpx(a, b, c) buf[(a) + (b) * jstride] = av_clip_uint8(c)
329 // Set the pixel to c +/- [0,d]
330 #define setdx(a, b, c, d) setpx(a, b, c - (d) + (rnd() % ((d) * 2 + 1)))
331 // Set the pixel to c +/- [d,d+e] (making sure it won't be clipped)
332 #define setdx2(a, b, o, c, d, e) setpx(a, b, o = c + ((d) + (rnd() % (e))) * (c >= 128 ? -1 : 1))
333
334 static void randomize_loopfilter_buffers(int lineoff, int str,
335 int dir, int flim_E, int flim_I,
336 int hev_thresh, uint8_t *buf,
337 int force_hev)
338 {
339 uint32_t mask = 0xff;
340 int off = dir ? lineoff : lineoff * str;
341 int istride = dir ? 1 : str;
342 int jstride = dir ? str : 1;
343 int i;
344 for (i = 0; i < 8; i += 2) {
345 // Row 0 will trigger hev for q0/q1, row 2 will trigger hev for p0/p1,
346 // rows 4 and 6 will not trigger hev.
347 // force_hev 1 will make sure all rows trigger hev, while force_hev -1
348 // makes none of them trigger it.
349 int idx = off + i * istride, p2, p1, p0, q0, q1, q2;
350 setpx(idx, 0, q0 = rnd() & mask);
351 if (i == 0 && force_hev >= 0 || force_hev > 0)
352 setdx2(idx, 1, q1, q0, hev_thresh + 1, flim_I - hev_thresh - 1);
353 else
354 setdx(idx, 1, q1 = q0, hev_thresh);
355 setdx(idx, 2, q2 = q1, flim_I);
356 setdx(idx, 3, q2, flim_I);
357 setdx(idx, -1, p0 = q0, flim_E >> 2);
358 if (i == 2 && force_hev >= 0 || force_hev > 0)
359 setdx2(idx, -2, p1, p0, hev_thresh + 1, flim_I - hev_thresh - 1);
360 else
361 setdx(idx, -2, p1 = p0, hev_thresh);
362 setdx(idx, -3, p2 = p1, flim_I);
363 setdx(idx, -4, p2, flim_I);
364 }
365 }
366
367 // Fill the buffer with random pixels
368 static void fill_loopfilter_buffers(uint8_t *buf, int stride, int w, int h)
369 {
370 int x, y;
371 for (y = 0; y < h; y++)
372 for (x = 0; x < w; x++)
373 buf[y * stride + x] = rnd() & 0xff;
374 }
375
376 #define randomize_buffers(buf, lineoff, str, force_hev) \
377 randomize_loopfilter_buffers(lineoff, str, dir, flim_E, flim_I, hev_thresh, buf, force_hev)
378
379 static void check_loopfilter_16y(void)
380 {
381 LOCAL_ALIGNED_16(uint8_t, base0, [32 + 16 * 16]);
382 LOCAL_ALIGNED_16(uint8_t, base1, [32 + 16 * 16]);
383 VP8DSPContext d;
384 int dir, edge, force_hev;
385 int flim_E = 20, flim_I = 10, hev_thresh = 7;
386 declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *, ptrdiff_t, int, int, int);
387
388 ff_vp8dsp_init(&d);
389
390 for (dir = 0; dir < 2; dir++) {
391 int midoff = dir ? 4 * 16 : 4;
392 int midoff_aligned = dir ? 4 * 16 : 16;
393 uint8_t *buf0 = base0 + midoff_aligned;
394 uint8_t *buf1 = base1 + midoff_aligned;
395 for (edge = 0; edge < 2; edge++) {
396 void (*func)(uint8_t *, ptrdiff_t, int, int, int) = NULL;
397 switch (dir << 1 | edge) {
398 case (0 << 1) | 0: func = d.vp8_h_loop_filter16y; break;
399 case (1 << 1) | 0: func = d.vp8_v_loop_filter16y; break;
400 case (0 << 1) | 1: func = d.vp8_h_loop_filter16y_inner; break;
401 case (1 << 1) | 1: func = d.vp8_v_loop_filter16y_inner; break;
402 }
403 if (check_func(func, "vp8_loop_filter16y%s_%s", edge ? "_inner" : "", dir ? "v" : "h")) {
404 for (force_hev = -1; force_hev <= 1; force_hev++) {
405 fill_loopfilter_buffers(buf0 - midoff, 16, 16, 16);
406 randomize_buffers(buf0, 0, 16, force_hev);
407 randomize_buffers(buf0, 8, 16, force_hev);
408 memcpy(buf1 - midoff, buf0 - midoff, 16 * 16);
409 call_ref(buf0, 16, flim_E, flim_I, hev_thresh);
410 call_new(buf1, 16, flim_E, flim_I, hev_thresh);
411 if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 16))
412 fail();
413 }
414 fill_loopfilter_buffers(buf0 - midoff, 16, 16, 16);
415 randomize_buffers(buf0, 0, 16, 0);
416 randomize_buffers(buf0, 8, 16, 0);
417 bench_new(buf0, 16, flim_E, flim_I, hev_thresh);
418 }
419 }
420 }
421 }
422
423 static void check_loopfilter_8uv(void)
424 {
425 LOCAL_ALIGNED_16(uint8_t, base0u, [32 + 16 * 16]);
426 LOCAL_ALIGNED_16(uint8_t, base0v, [32 + 16 * 16]);
427 LOCAL_ALIGNED_16(uint8_t, base1u, [32 + 16 * 16]);
428 LOCAL_ALIGNED_16(uint8_t, base1v, [32 + 16 * 16]);
429 VP8DSPContext d;
430 int dir, edge, force_hev;
431 int flim_E = 20, flim_I = 10, hev_thresh = 7;
432 declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *, uint8_t *, ptrdiff_t, int, int, int);
433
434 ff_vp8dsp_init(&d);
435
436 for (dir = 0; dir < 2; dir++) {
437 int midoff = dir ? 4 * 16 : 4;
438 int midoff_aligned = dir ? 4 * 16 : 16;
439 uint8_t *buf0u = base0u + midoff_aligned;
440 uint8_t *buf0v = base0v + midoff_aligned;
441 uint8_t *buf1u = base1u + midoff_aligned;
442 uint8_t *buf1v = base1v + midoff_aligned;
443 for (edge = 0; edge < 2; edge++) {
444 void (*func)(uint8_t *, uint8_t *, ptrdiff_t, int, int, int) = NULL;
445 switch (dir << 1 | edge) {
446 case (0 << 1) | 0: func = d.vp8_h_loop_filter8uv; break;
447 case (1 << 1) | 0: func = d.vp8_v_loop_filter8uv; break;
448 case (0 << 1) | 1: func = d.vp8_h_loop_filter8uv_inner; break;
449 case (1 << 1) | 1: func = d.vp8_v_loop_filter8uv_inner; break;
450 }
451 if (check_func(func, "vp8_loop_filter8uv%s_%s", edge ? "_inner" : "", dir ? "v" : "h")) {
452 for (force_hev = -1; force_hev <= 1; force_hev++) {
453 fill_loopfilter_buffers(buf0u - midoff, 16, 16, 16);
454 fill_loopfilter_buffers(buf0v - midoff, 16, 16, 16);
455 randomize_buffers(buf0u, 0, 16, force_hev);
456 randomize_buffers(buf0v, 0, 16, force_hev);
457 memcpy(buf1u - midoff, buf0u - midoff, 16 * 16);
458 memcpy(buf1v - midoff, buf0v - midoff, 16 * 16);
459
460 call_ref(buf0u, buf0v, 16, flim_E, flim_I, hev_thresh);
461 call_new(buf1u, buf1v, 16, flim_E, flim_I, hev_thresh);
462 if (memcmp(buf0u - midoff, buf1u - midoff, 16 * 16) ||
463 memcmp(buf0v - midoff, buf1v - midoff, 16 * 16))
464 fail();
465 }
466 fill_loopfilter_buffers(buf0u - midoff, 16, 16, 16);
467 fill_loopfilter_buffers(buf0v - midoff, 16, 16, 16);
468 randomize_buffers(buf0u, 0, 16, 0);
469 randomize_buffers(buf0v, 0, 16, 0);
470 bench_new(buf0u, buf0v, 16, flim_E, flim_I, hev_thresh);
471 }
472 }
473 }
474 }
475
476 static void check_loopfilter_simple(void)
477 {
478 LOCAL_ALIGNED_16(uint8_t, base0, [32 + 16 * 16]);
479 LOCAL_ALIGNED_16(uint8_t, base1, [32 + 16 * 16]);
480 VP8DSPContext d;
481 int dir;
482 int flim_E = 20, flim_I = 30, hev_thresh = 0;
483 declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *, ptrdiff_t, int);
484
485 ff_vp8dsp_init(&d);
486
487 for (dir = 0; dir < 2; dir++) {
488 int midoff = dir ? 4 * 16 : 4;
489 int midoff_aligned = dir ? 4 * 16 : 16;
490 uint8_t *buf0 = base0 + midoff_aligned;
491 uint8_t *buf1 = base1 + midoff_aligned;
492 void (*func)(uint8_t *, ptrdiff_t, int) = dir ? d.vp8_v_loop_filter_simple : d.vp8_h_loop_filter_simple;
493 if (check_func(func, "vp8_loop_filter_simple_%s", dir ? "v" : "h")) {
494 fill_loopfilter_buffers(buf0 - midoff, 16, 16, 16);
495 randomize_buffers(buf0, 0, 16, -1);
496 randomize_buffers(buf0, 8, 16, -1);
497 memcpy(buf1 - midoff, buf0 - midoff, 16 * 16);
498 call_ref(buf0, 16, flim_E);
499 call_new(buf1, 16, flim_E);
500 if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 16))
501 fail();
502 bench_new(buf0, 16, flim_E);
503 }
504 }
505 }
506
507 void checkasm_check_vp8dsp(void)
508 {
509 check_idct();
510 check_idct_dc4();
511 check_luma_dc_wht();
512 report("idct");
513 check_mc();
514 report("mc");
515 check_loopfilter_16y();
516 check_loopfilter_8uv();
517 check_loopfilter_simple();
518 report("loopfilter");
519 }