f81b47844945ffc4472ecabc09f15b81eec6673c
[libav.git] / libavcodec / ppc / int_altivec.c
1 /*
2 * Copyright (c) 2007 Luca Barbato <lu_zero@gentoo.org>
3 *
4 * This file is part of Libav.
5 *
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 /**
22 ** @file
23 ** integer misc ops.
24 **/
25
26 #include "config.h"
27 #if HAVE_ALTIVEC_H
28 #include <altivec.h>
29 #endif
30
31 #include "libavcodec/dsputil.h"
32
33 #include "dsputil_altivec.h"
34
35 #include "types_altivec.h"
36
37 static int ssd_int8_vs_int16_altivec(const int8_t *pix1, const int16_t *pix2,
38 int size) {
39 int i, size16;
40 vector signed char vpix1;
41 vector signed short vpix2, vdiff, vpix1l,vpix1h;
42 union { vector signed int vscore;
43 int32_t score[4];
44 } u;
45 u.vscore = vec_splat_s32(0);
46 //
47 //XXX lazy way, fix it later
48
49 #define vec_unaligned_load(b) \
50 vec_perm(vec_ld(0,b),vec_ld(15,b),vec_lvsl(0, b));
51
52 size16 = size >> 4;
53 while(size16) {
54 // score += (pix1[i]-pix2[i])*(pix1[i]-pix2[i]);
55 //load pix1 and the first batch of pix2
56
57 vpix1 = vec_unaligned_load(pix1);
58 vpix2 = vec_unaligned_load(pix2);
59 pix2 += 8;
60 //unpack
61 vpix1h = vec_unpackh(vpix1);
62 vdiff = vec_sub(vpix1h, vpix2);
63 vpix1l = vec_unpackl(vpix1);
64 // load another batch from pix2
65 vpix2 = vec_unaligned_load(pix2);
66 u.vscore = vec_msum(vdiff, vdiff, u.vscore);
67 vdiff = vec_sub(vpix1l, vpix2);
68 u.vscore = vec_msum(vdiff, vdiff, u.vscore);
69 pix1 += 16;
70 pix2 += 8;
71 size16--;
72 }
73 u.vscore = vec_sums(u.vscore, vec_splat_s32(0));
74
75 size %= 16;
76 for (i = 0; i < size; i++) {
77 u.score[3] += (pix1[i]-pix2[i])*(pix1[i]-pix2[i]);
78 }
79 return u.score[3];
80 }
81
82 static int32_t scalarproduct_int16_altivec(int16_t *v1, const int16_t *v2,
83 int order, const int shift)
84 {
85 int i;
86 LOAD_ZERO;
87 register vec_s16 vec1, *pv;
88 register vec_s32 res = vec_splat_s32(0), t;
89 register vec_u32 shifts;
90 int32_t ires;
91
92 shifts = zero_u32v;
93 if(shift & 0x10) shifts = vec_add(shifts, vec_sl(vec_splat_u32(0x08), vec_splat_u32(0x1)));
94 if(shift & 0x08) shifts = vec_add(shifts, vec_splat_u32(0x08));
95 if(shift & 0x04) shifts = vec_add(shifts, vec_splat_u32(0x04));
96 if(shift & 0x02) shifts = vec_add(shifts, vec_splat_u32(0x02));
97 if(shift & 0x01) shifts = vec_add(shifts, vec_splat_u32(0x01));
98
99 for(i = 0; i < order; i += 8){
100 pv = (vec_s16*)v1;
101 vec1 = vec_perm(pv[0], pv[1], vec_lvsl(0, v1));
102 t = vec_msum(vec1, vec_ld(0, v2), zero_s32v);
103 t = vec_sr(t, shifts);
104 res = vec_sums(t, res);
105 v1 += 8;
106 v2 += 8;
107 }
108 res = vec_splat(res, 3);
109 vec_ste(res, 0, &ires);
110 return ires;
111 }
112
113 static int32_t scalarproduct_and_madd_int16_altivec(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul)
114 {
115 LOAD_ZERO;
116 vec_s16 *pv1 = (vec_s16*)v1;
117 vec_s16 *pv2 = (vec_s16*)v2;
118 vec_s16 *pv3 = (vec_s16*)v3;
119 register vec_s16 muls = {mul,mul,mul,mul,mul,mul,mul,mul};
120 register vec_s16 t0, t1, i0, i1;
121 register vec_s16 i2 = pv2[0], i3 = pv3[0];
122 register vec_s32 res = zero_s32v;
123 register vec_u8 align = vec_lvsl(0, v2);
124 int32_t ires;
125 order >>= 4;
126 do {
127 t0 = vec_perm(i2, pv2[1], align);
128 i2 = pv2[2];
129 t1 = vec_perm(pv2[1], i2, align);
130 i0 = pv1[0];
131 i1 = pv1[1];
132 res = vec_msum(t0, i0, res);
133 res = vec_msum(t1, i1, res);
134 t0 = vec_perm(i3, pv3[1], align);
135 i3 = pv3[2];
136 t1 = vec_perm(pv3[1], i3, align);
137 pv1[0] = vec_mladd(t0, muls, i0);
138 pv1[1] = vec_mladd(t1, muls, i1);
139 pv1 += 2;
140 pv2 += 2;
141 pv3 += 2;
142 } while(--order);
143 res = vec_splat(vec_sums(res, zero_s32v), 3);
144 vec_ste(res, 0, &ires);
145 return ires;
146 }
147
148 void ff_int_init_altivec(DSPContext* c, AVCodecContext *avctx)
149 {
150 c->ssd_int8_vs_int16 = ssd_int8_vs_int16_altivec;
151 c->scalarproduct_int16 = scalarproduct_int16_altivec;
152 c->scalarproduct_and_madd_int16 = scalarproduct_and_madd_int16_altivec;
153 }