fa71047b72d9666a967f224f02c771d935bc7f84
[libav.git] / libavcodec / ppc / gmc_altivec.c
1 /*
2 * GMC (Global Motion Compensation)
3 * AltiVec-enabled
4 * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include "libavcodec/dsputil.h"
24 #include "dsputil_ppc.h"
25 #include "util_altivec.h"
26 #include "types_altivec.h"
27 #include "dsputil_altivec.h"
28
29 /*
30 altivec-enhanced gmc1. ATM this code assume stride is a multiple of 8,
31 to preserve proper dst alignment.
32 */
33 #define GMC1_PERF_COND (h==8)
34 void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int stride, int h, int x16, int y16, int rounder)
35 {
36 POWERPC_PERF_DECLARE(altivec_gmc1_num, GMC1_PERF_COND);
37 const DECLARE_ALIGNED(16, unsigned short, rounder_a) = rounder;
38 const DECLARE_ALIGNED(16, unsigned short, ABCD)[8] =
39 {
40 (16-x16)*(16-y16), /* A */
41 ( x16)*(16-y16), /* B */
42 (16-x16)*( y16), /* C */
43 ( x16)*( y16), /* D */
44 0, 0, 0, 0 /* padding */
45 };
46 register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
47 register const vector unsigned short vcsr8 = (const vector unsigned short)vec_splat_u16(8);
48 register vector unsigned char dstv, dstv2, src_0, src_1, srcvA, srcvB, srcvC, srcvD;
49 register vector unsigned short Av, Bv, Cv, Dv, rounderV, tempA, tempB, tempC, tempD;
50 int i;
51 unsigned long dst_odd = (unsigned long)dst & 0x0000000F;
52 unsigned long src_really_odd = (unsigned long)src & 0x0000000F;
53
54
55 POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
56
57 tempA = vec_ld(0, (unsigned short*)ABCD);
58 Av = vec_splat(tempA, 0);
59 Bv = vec_splat(tempA, 1);
60 Cv = vec_splat(tempA, 2);
61 Dv = vec_splat(tempA, 3);
62
63 rounderV = vec_splat((vec_u16)vec_lde(0, &rounder_a), 0);
64
65 // we'll be able to pick-up our 9 char elements
66 // at src from those 32 bytes
67 // we load the first batch here, as inside the loop
68 // we can re-use 'src+stride' from one iteration
69 // as the 'src' of the next.
70 src_0 = vec_ld(0, src);
71 src_1 = vec_ld(16, src);
72 srcvA = vec_perm(src_0, src_1, vec_lvsl(0, src));
73
74 if (src_really_odd != 0x0000000F) {
75 // if src & 0xF == 0xF, then (src+1) is properly aligned
76 // on the second vector.
77 srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
78 } else {
79 srcvB = src_1;
80 }
81 srcvA = vec_mergeh(vczero, srcvA);
82 srcvB = vec_mergeh(vczero, srcvB);
83
84 for(i=0; i<h; i++) {
85 dst_odd = (unsigned long)dst & 0x0000000F;
86 src_really_odd = (((unsigned long)src) + stride) & 0x0000000F;
87
88 dstv = vec_ld(0, dst);
89
90 // we we'll be able to pick-up our 9 char elements
91 // at src + stride from those 32 bytes
92 // then reuse the resulting 2 vectors srvcC and srcvD
93 // as the next srcvA and srcvB
94 src_0 = vec_ld(stride + 0, src);
95 src_1 = vec_ld(stride + 16, src);
96 srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));
97
98 if (src_really_odd != 0x0000000F) {
99 // if src & 0xF == 0xF, then (src+1) is properly aligned
100 // on the second vector.
101 srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
102 } else {
103 srcvD = src_1;
104 }
105
106 srcvC = vec_mergeh(vczero, srcvC);
107 srcvD = vec_mergeh(vczero, srcvD);
108
109
110 // OK, now we (finally) do the math :-)
111 // those four instructions replaces 32 int muls & 32 int adds.
112 // isn't AltiVec nice ?
113 tempA = vec_mladd((vector unsigned short)srcvA, Av, rounderV);
114 tempB = vec_mladd((vector unsigned short)srcvB, Bv, tempA);
115 tempC = vec_mladd((vector unsigned short)srcvC, Cv, tempB);
116 tempD = vec_mladd((vector unsigned short)srcvD, Dv, tempC);
117
118 srcvA = srcvC;
119 srcvB = srcvD;
120
121 tempD = vec_sr(tempD, vcsr8);
122
123 dstv2 = vec_pack(tempD, (vector unsigned short)vczero);
124
125 if (dst_odd) {
126 dstv2 = vec_perm(dstv, dstv2, vcprm(0,1,s0,s1));
127 } else {
128 dstv2 = vec_perm(dstv, dstv2, vcprm(s0,s1,2,3));
129 }
130
131 vec_st(dstv2, 0, dst);
132
133 dst += stride;
134 src += stride;
135 }
136
137 POWERPC_PERF_STOP_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
138 }