Add missing multiple inclusion guards.
[libav.git] / libavcodec / i386 / dsputil_mmx_qns.h
1 /*
2 * DSP utils : QNS functions are compiled 3 times for mmx/3dnow/ssse3
3 * Copyright (c) 2004 Michael Niedermayer
4 *
5 * MMX optimization by Michael Niedermayer <michaelni@gmx.at>
6 * 3DNow! and SSSE3 optimization by Zuxy Meng <zuxy.meng@gmail.com>
7 *
8 * This file is part of FFmpeg.
9 *
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 #ifndef FFMPEG_DSPUTIL_MMX_QNS_H
26 #define FFMPEG_DSPUTIL_MMX_QNS_H
27
28 #define MAX_ABS (512 >> (SCALE_OFFSET>0 ? SCALE_OFFSET : 0))
29
30 static int DEF(try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale)
31 {
32 long i=0;
33
34 assert(FFABS(scale) < MAX_ABS);
35 scale<<= 16 + SCALE_OFFSET - BASIS_SHIFT + RECON_SHIFT;
36
37 SET_RND(mm6);
38 asm volatile(
39 "pxor %%mm7, %%mm7 \n\t"
40 "movd %4, %%mm5 \n\t"
41 "punpcklwd %%mm5, %%mm5 \n\t"
42 "punpcklwd %%mm5, %%mm5 \n\t"
43 ASMALIGN(4)
44 "1: \n\t"
45 "movq (%1, %0), %%mm0 \n\t"
46 "movq 8(%1, %0), %%mm1 \n\t"
47 PMULHRW(%%mm0, %%mm1, %%mm5, %%mm6)
48 "paddw (%2, %0), %%mm0 \n\t"
49 "paddw 8(%2, %0), %%mm1 \n\t"
50 "psraw $6, %%mm0 \n\t"
51 "psraw $6, %%mm1 \n\t"
52 "pmullw (%3, %0), %%mm0 \n\t"
53 "pmullw 8(%3, %0), %%mm1 \n\t"
54 "pmaddwd %%mm0, %%mm0 \n\t"
55 "pmaddwd %%mm1, %%mm1 \n\t"
56 "paddd %%mm1, %%mm0 \n\t"
57 "psrld $4, %%mm0 \n\t"
58 "paddd %%mm0, %%mm7 \n\t"
59 "add $16, %0 \n\t"
60 "cmp $128, %0 \n\t" //FIXME optimize & bench
61 " jb 1b \n\t"
62 PHADDD(%%mm7, %%mm6)
63 "psrld $2, %%mm7 \n\t"
64 "movd %%mm7, %0 \n\t"
65
66 : "+r" (i)
67 : "r"(basis), "r"(rem), "r"(weight), "g"(scale)
68 );
69 return i;
70 }
71
72 static void DEF(add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale)
73 {
74 long i=0;
75
76 if(FFABS(scale) < MAX_ABS){
77 scale<<= 16 + SCALE_OFFSET - BASIS_SHIFT + RECON_SHIFT;
78 SET_RND(mm6);
79 asm volatile(
80 "movd %3, %%mm5 \n\t"
81 "punpcklwd %%mm5, %%mm5 \n\t"
82 "punpcklwd %%mm5, %%mm5 \n\t"
83 ASMALIGN(4)
84 "1: \n\t"
85 "movq (%1, %0), %%mm0 \n\t"
86 "movq 8(%1, %0), %%mm1 \n\t"
87 PMULHRW(%%mm0, %%mm1, %%mm5, %%mm6)
88 "paddw (%2, %0), %%mm0 \n\t"
89 "paddw 8(%2, %0), %%mm1 \n\t"
90 "movq %%mm0, (%2, %0) \n\t"
91 "movq %%mm1, 8(%2, %0) \n\t"
92 "add $16, %0 \n\t"
93 "cmp $128, %0 \n\t" // FIXME optimize & bench
94 " jb 1b \n\t"
95
96 : "+r" (i)
97 : "r"(basis), "r"(rem), "g"(scale)
98 );
99 }else{
100 for(i=0; i<8*8; i++){
101 rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT);
102 }
103 }
104 }
105
106 #endif /* FFMPEG_DSPUTIL_MMX_QNS_H */