Commit | Line | Data |
---|---|---|
05c4072b MN |
1 | /* |
2 | * Copyright (c) 2002 Dieter Shirley | |
3 | * | |
4 | * This library is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU Lesser General Public | |
6 | * License as published by the Free Software Foundation; either | |
7 | * version 2 of the License, or (at your option) any later version. | |
8 | * | |
9 | * This library is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
12 | * Lesser General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU Lesser General Public | |
15 | * License along with this library; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
17 | */ | |
18 | ||
19 | #include <stdlib.h> | |
20 | #include <stdio.h> | |
21 | #include "../dsputil.h" | |
22 | #include "../mpegvideo.h" | |
db40a39a | 23 | #include "dsputil_altivec.h" |
05c4072b | 24 | |
05c4072b MN |
25 | // Swaps two variables (used for altivec registers) |
26 | #define SWAP(a,b) \ | |
27 | do { \ | |
28 | __typeof__(a) swap_temp=a; \ | |
29 | a=b; \ | |
30 | b=swap_temp; \ | |
31 | } while (0) | |
32 | ||
33 | // transposes a matrix consisting of four vectors with four elements each | |
34 | #define TRANSPOSE4(a,b,c,d) \ | |
35 | do { \ | |
36 | __typeof__(a) _trans_ach = vec_mergeh(a, c); \ | |
37 | __typeof__(a) _trans_acl = vec_mergel(a, c); \ | |
38 | __typeof__(a) _trans_bdh = vec_mergeh(b, d); \ | |
39 | __typeof__(a) _trans_bdl = vec_mergel(b, d); \ | |
40 | \ | |
41 | a = vec_mergeh(_trans_ach, _trans_bdh); \ | |
42 | b = vec_mergel(_trans_ach, _trans_bdh); \ | |
43 | c = vec_mergeh(_trans_acl, _trans_bdl); \ | |
44 | d = vec_mergel(_trans_acl, _trans_bdl); \ | |
45 | } while (0) | |
46 | ||
47 | #define TRANSPOSE8(a,b,c,d,e,f,g,h) \ | |
48 | do { \ | |
49 | __typeof__(a) _A1, _B1, _C1, _D1, _E1, _F1, _G1, _H1; \ | |
50 | __typeof__(a) _A2, _B2, _C2, _D2, _E2, _F2, _G2, _H2; \ | |
51 | \ | |
52 | _A1 = vec_mergeh (a, e); \ | |
53 | _B1 = vec_mergel (a, e); \ | |
54 | _C1 = vec_mergeh (b, f); \ | |
55 | _D1 = vec_mergel (b, f); \ | |
56 | _E1 = vec_mergeh (c, g); \ | |
57 | _F1 = vec_mergel (c, g); \ | |
58 | _G1 = vec_mergeh (d, h); \ | |
59 | _H1 = vec_mergel (d, h); \ | |
60 | \ | |
61 | _A2 = vec_mergeh (_A1, _E1); \ | |
62 | _B2 = vec_mergel (_A1, _E1); \ | |
63 | _C2 = vec_mergeh (_B1, _F1); \ | |
64 | _D2 = vec_mergel (_B1, _F1); \ | |
65 | _E2 = vec_mergeh (_C1, _G1); \ | |
66 | _F2 = vec_mergel (_C1, _G1); \ | |
67 | _G2 = vec_mergeh (_D1, _H1); \ | |
68 | _H2 = vec_mergel (_D1, _H1); \ | |
69 | \ | |
70 | a = vec_mergeh (_A2, _E2); \ | |
71 | b = vec_mergel (_A2, _E2); \ | |
72 | c = vec_mergeh (_B2, _F2); \ | |
73 | d = vec_mergel (_B2, _F2); \ | |
74 | e = vec_mergeh (_C2, _G2); \ | |
75 | f = vec_mergel (_C2, _G2); \ | |
76 | g = vec_mergeh (_D2, _H2); \ | |
77 | h = vec_mergel (_D2, _H2); \ | |
78 | } while (0) | |
79 | ||
80 | ||
81 | // Loads a four-byte value (int or float) from the target address | |
82 | // into every element in the target vector. Only works if the | |
83 | // target address is four-byte aligned (which should be always). | |
84 | #define LOAD4(vec, address) \ | |
85 | { \ | |
86 | __typeof__(vec)* _load_addr = (__typeof__(vec)*)(address); \ | |
87 | vector unsigned char _perm_vec = vec_lvsl(0,(address)); \ | |
88 | vec = vec_ld(0, _load_addr); \ | |
89 | vec = vec_perm(vec, vec, _perm_vec); \ | |
90 | vec = vec_splat(vec, 0); \ | |
91 | } | |
92 | ||
3b991c54 RD |
93 | |
94 | #ifdef CONFIG_DARWIN | |
95 | #define FOUROF(a) (a) | |
96 | #else | |
97 | // slower, for dumb non-apple GCC | |
98 | #define FOUROF(a) {a,a,a,a} | |
99 | #endif | |
05c4072b MN |
100 | int dct_quantize_altivec(MpegEncContext* s, |
101 | DCTELEM* data, int n, | |
102 | int qscale, int* overflow) | |
103 | { | |
104 | int lastNonZero; | |
105 | vector float row0, row1, row2, row3, row4, row5, row6, row7; | |
106 | vector float alt0, alt1, alt2, alt3, alt4, alt5, alt6, alt7; | |
3b991c54 | 107 | const vector float zero = (const vector float)FOUROF(0.); |
05c4072b MN |
108 | |
109 | // Load the data into the row/alt vectors | |
110 | { | |
111 | vector signed short data0, data1, data2, data3, data4, data5, data6, data7; | |
112 | ||
113 | data0 = vec_ld(0, data); | |
114 | data1 = vec_ld(16, data); | |
115 | data2 = vec_ld(32, data); | |
116 | data3 = vec_ld(48, data); | |
117 | data4 = vec_ld(64, data); | |
118 | data5 = vec_ld(80, data); | |
119 | data6 = vec_ld(96, data); | |
120 | data7 = vec_ld(112, data); | |
121 | ||
122 | // Transpose the data before we start | |
123 | TRANSPOSE8(data0, data1, data2, data3, data4, data5, data6, data7); | |
124 | ||
125 | // load the data into floating point vectors. We load | |
126 | // the high half of each row into the main row vectors | |
127 | // and the low half into the alt vectors. | |
128 | row0 = vec_ctf(vec_unpackh(data0), 0); | |
129 | alt0 = vec_ctf(vec_unpackl(data0), 0); | |
130 | row1 = vec_ctf(vec_unpackh(data1), 0); | |
131 | alt1 = vec_ctf(vec_unpackl(data1), 0); | |
132 | row2 = vec_ctf(vec_unpackh(data2), 0); | |
133 | alt2 = vec_ctf(vec_unpackl(data2), 0); | |
134 | row3 = vec_ctf(vec_unpackh(data3), 0); | |
135 | alt3 = vec_ctf(vec_unpackl(data3), 0); | |
136 | row4 = vec_ctf(vec_unpackh(data4), 0); | |
137 | alt4 = vec_ctf(vec_unpackl(data4), 0); | |
138 | row5 = vec_ctf(vec_unpackh(data5), 0); | |
139 | alt5 = vec_ctf(vec_unpackl(data5), 0); | |
140 | row6 = vec_ctf(vec_unpackh(data6), 0); | |
141 | alt6 = vec_ctf(vec_unpackl(data6), 0); | |
142 | row7 = vec_ctf(vec_unpackh(data7), 0); | |
143 | alt7 = vec_ctf(vec_unpackl(data7), 0); | |
144 | } | |
145 | ||
146 | // The following block could exist as a separate an altivec dct | |
147 | // function. However, if we put it inline, the DCT data can remain | |
148 | // in the vector local variables, as floats, which we'll use during the | |
149 | // quantize step... | |
150 | { | |
3b991c54 RD |
151 | const vector float vec_0_298631336 = (vector float)FOUROF(0.298631336f); |
152 | const vector float vec_0_390180644 = (vector float)FOUROF(-0.390180644f); | |
153 | const vector float vec_0_541196100 = (vector float)FOUROF(0.541196100f); | |
154 | const vector float vec_0_765366865 = (vector float)FOUROF(0.765366865f); | |
155 | const vector float vec_0_899976223 = (vector float)FOUROF(-0.899976223f); | |
156 | const vector float vec_1_175875602 = (vector float)FOUROF(1.175875602f); | |
157 | const vector float vec_1_501321110 = (vector float)FOUROF(1.501321110f); | |
158 | const vector float vec_1_847759065 = (vector float)FOUROF(-1.847759065f); | |
159 | const vector float vec_1_961570560 = (vector float)FOUROF(-1.961570560f); | |
160 | const vector float vec_2_053119869 = (vector float)FOUROF(2.053119869f); | |
161 | const vector float vec_2_562915447 = (vector float)FOUROF(-2.562915447f); | |
162 | const vector float vec_3_072711026 = (vector float)FOUROF(3.072711026f); | |
05c4072b MN |
163 | |
164 | ||
165 | int whichPass, whichHalf; | |
166 | ||
167 | for(whichPass = 1; whichPass<=2; whichPass++) | |
168 | { | |
169 | for(whichHalf = 1; whichHalf<=2; whichHalf++) | |
170 | { | |
171 | vector float tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; | |
172 | vector float tmp10, tmp11, tmp12, tmp13; | |
173 | vector float z1, z2, z3, z4, z5; | |
174 | ||
175 | tmp0 = vec_add(row0, row7); // tmp0 = dataptr[0] + dataptr[7]; | |
176 | tmp7 = vec_sub(row0, row7); // tmp7 = dataptr[0] - dataptr[7]; | |
177 | tmp3 = vec_add(row3, row4); // tmp3 = dataptr[3] + dataptr[4]; | |
178 | tmp4 = vec_sub(row3, row4); // tmp4 = dataptr[3] - dataptr[4]; | |
179 | tmp1 = vec_add(row1, row6); // tmp1 = dataptr[1] + dataptr[6]; | |
180 | tmp6 = vec_sub(row1, row6); // tmp6 = dataptr[1] - dataptr[6]; | |
181 | tmp2 = vec_add(row2, row5); // tmp2 = dataptr[2] + dataptr[5]; | |
182 | tmp5 = vec_sub(row2, row5); // tmp5 = dataptr[2] - dataptr[5]; | |
183 | ||
184 | tmp10 = vec_add(tmp0, tmp3); // tmp10 = tmp0 + tmp3; | |
185 | tmp13 = vec_sub(tmp0, tmp3); // tmp13 = tmp0 - tmp3; | |
186 | tmp11 = vec_add(tmp1, tmp2); // tmp11 = tmp1 + tmp2; | |
187 | tmp12 = vec_sub(tmp1, tmp2); // tmp12 = tmp1 - tmp2; | |
188 | ||
189 | ||
190 | // dataptr[0] = (DCTELEM) ((tmp10 + tmp11) << PASS1_BITS); | |
191 | row0 = vec_add(tmp10, tmp11); | |
192 | ||
193 | // dataptr[4] = (DCTELEM) ((tmp10 - tmp11) << PASS1_BITS); | |
194 | row4 = vec_sub(tmp10, tmp11); | |
195 | ||
196 | ||
197 | // z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); | |
198 | z1 = vec_madd(vec_add(tmp12, tmp13), vec_0_541196100, (vector float)zero); | |
199 | ||
200 | // dataptr[2] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865), | |
201 | // CONST_BITS-PASS1_BITS); | |
202 | row2 = vec_madd(tmp13, vec_0_765366865, z1); | |
203 | ||
204 | // dataptr[6] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065), | |
205 | // CONST_BITS-PASS1_BITS); | |
206 | row6 = vec_madd(tmp12, vec_1_847759065, z1); | |
207 | ||
208 | z1 = vec_add(tmp4, tmp7); // z1 = tmp4 + tmp7; | |
209 | z2 = vec_add(tmp5, tmp6); // z2 = tmp5 + tmp6; | |
210 | z3 = vec_add(tmp4, tmp6); // z3 = tmp4 + tmp6; | |
211 | z4 = vec_add(tmp5, tmp7); // z4 = tmp5 + tmp7; | |
212 | ||
213 | // z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */ | |
214 | z5 = vec_madd(vec_add(z3, z4), vec_1_175875602, (vector float)zero); | |
215 | ||
216 | // z3 = MULTIPLY(z3, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */ | |
217 | z3 = vec_madd(z3, vec_1_961570560, z5); | |
218 | ||
219 | // z4 = MULTIPLY(z4, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */ | |
220 | z4 = vec_madd(z4, vec_0_390180644, z5); | |
221 | ||
222 | // The following adds are rolled into the multiplies above | |
223 | // z3 = vec_add(z3, z5); // z3 += z5; | |
224 | // z4 = vec_add(z4, z5); // z4 += z5; | |
225 | ||
226 | // z2 = MULTIPLY(z2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */ | |
227 | // Wow! It's actually more effecient to roll this multiply | |
228 | // into the adds below, even thought the multiply gets done twice! | |
229 | // z2 = vec_madd(z2, vec_2_562915447, (vector float)zero); | |
230 | ||
231 | // z1 = MULTIPLY(z1, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */ | |
232 | // Same with this one... | |
233 | // z1 = vec_madd(z1, vec_0_899976223, (vector float)zero); | |
234 | ||
235 | // tmp4 = MULTIPLY(tmp4, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */ | |
236 | // dataptr[7] = (DCTELEM) DESCALE(tmp4 + z1 + z3, CONST_BITS-PASS1_BITS); | |
237 | row7 = vec_madd(tmp4, vec_0_298631336, vec_madd(z1, vec_0_899976223, z3)); | |
238 | ||
239 | // tmp5 = MULTIPLY(tmp5, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ | |
240 | // dataptr[5] = (DCTELEM) DESCALE(tmp5 + z2 + z4, CONST_BITS-PASS1_BITS); | |
241 | row5 = vec_madd(tmp5, vec_2_053119869, vec_madd(z2, vec_2_562915447, z4)); | |
242 | ||
243 | // tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ | |
244 | // dataptr[3] = (DCTELEM) DESCALE(tmp6 + z2 + z3, CONST_BITS-PASS1_BITS); | |
245 | row3 = vec_madd(tmp6, vec_3_072711026, vec_madd(z2, vec_2_562915447, z3)); | |
246 | ||
247 | // tmp7 = MULTIPLY(tmp7, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */ | |
248 | // dataptr[1] = (DCTELEM) DESCALE(tmp7 + z1 + z4, CONST_BITS-PASS1_BITS); | |
249 | row1 = vec_madd(z1, vec_0_899976223, vec_madd(tmp7, vec_1_501321110, z4)); | |
250 | ||
251 | // Swap the row values with the alts. If this is the first half, | |
252 | // this sets up the low values to be acted on in the second half. | |
253 | // If this is the second half, it puts the high values back in | |
254 | // the row values where they are expected to be when we're done. | |
255 | SWAP(row0, alt0); | |
256 | SWAP(row1, alt1); | |
257 | SWAP(row2, alt2); | |
258 | SWAP(row3, alt3); | |
259 | SWAP(row4, alt4); | |
260 | SWAP(row5, alt5); | |
261 | SWAP(row6, alt6); | |
262 | SWAP(row7, alt7); | |
263 | } | |
264 | ||
265 | if (whichPass == 1) | |
266 | { | |
267 | // transpose the data for the second pass | |
268 | ||
269 | // First, block transpose the upper right with lower left. | |
270 | SWAP(row4, alt0); | |
271 | SWAP(row5, alt1); | |
272 | SWAP(row6, alt2); | |
273 | SWAP(row7, alt3); | |
274 | ||
275 | // Now, transpose each block of four | |
276 | TRANSPOSE4(row0, row1, row2, row3); | |
277 | TRANSPOSE4(row4, row5, row6, row7); | |
278 | TRANSPOSE4(alt0, alt1, alt2, alt3); | |
279 | TRANSPOSE4(alt4, alt5, alt6, alt7); | |
280 | } | |
281 | } | |
282 | } | |
283 | ||
284 | // used after quantise step | |
285 | int oldBaseValue = 0; | |
286 | ||
287 | // perform the quantise step, using the floating point data | |
288 | // still in the row/alt registers | |
289 | { | |
290 | const int* biasAddr; | |
291 | const vector signed int* qmat; | |
292 | vector float bias, negBias; | |
293 | ||
294 | if (s->mb_intra) | |
295 | { | |
296 | vector signed int baseVector; | |
297 | ||
298 | // We must cache element 0 in the intra case | |
299 | // (it needs special handling). | |
300 | baseVector = vec_cts(vec_splat(row0, 0), 0); | |
301 | vec_ste(baseVector, 0, &oldBaseValue); | |
302 | ||
303 | qmat = (vector signed int*)s->q_intra_matrix[qscale]; | |
304 | biasAddr = &(s->intra_quant_bias); | |
305 | } | |
306 | else | |
307 | { | |
308 | qmat = (vector signed int*)s->q_inter_matrix[qscale]; | |
309 | biasAddr = &(s->inter_quant_bias); | |
310 | } | |
311 | ||
312 | // Load the bias vector (We add 0.5 to the bias so that we're | |
313 | // rounding when we convert to int, instead of flooring.) | |
314 | { | |
315 | vector signed int biasInt; | |
3b991c54 | 316 | const vector float negOneFloat = (vector float)FOUROF(-1.0f); |
05c4072b MN |
317 | LOAD4(biasInt, biasAddr); |
318 | bias = vec_ctf(biasInt, QUANT_BIAS_SHIFT); | |
319 | negBias = vec_madd(bias, negOneFloat, zero); | |
320 | } | |
321 | ||
322 | { | |
323 | vector float q0, q1, q2, q3, q4, q5, q6, q7; | |
324 | ||
325 | q0 = vec_ctf(qmat[0], QMAT_SHIFT); | |
326 | q1 = vec_ctf(qmat[2], QMAT_SHIFT); | |
327 | q2 = vec_ctf(qmat[4], QMAT_SHIFT); | |
328 | q3 = vec_ctf(qmat[6], QMAT_SHIFT); | |
329 | q4 = vec_ctf(qmat[8], QMAT_SHIFT); | |
330 | q5 = vec_ctf(qmat[10], QMAT_SHIFT); | |
331 | q6 = vec_ctf(qmat[12], QMAT_SHIFT); | |
332 | q7 = vec_ctf(qmat[14], QMAT_SHIFT); | |
333 | ||
334 | row0 = vec_sel(vec_madd(row0, q0, negBias), vec_madd(row0, q0, bias), | |
335 | vec_cmpgt(row0, zero)); | |
336 | row1 = vec_sel(vec_madd(row1, q1, negBias), vec_madd(row1, q1, bias), | |
337 | vec_cmpgt(row1, zero)); | |
338 | row2 = vec_sel(vec_madd(row2, q2, negBias), vec_madd(row2, q2, bias), | |
339 | vec_cmpgt(row2, zero)); | |
340 | row3 = vec_sel(vec_madd(row3, q3, negBias), vec_madd(row3, q3, bias), | |
341 | vec_cmpgt(row3, zero)); | |
342 | row4 = vec_sel(vec_madd(row4, q4, negBias), vec_madd(row4, q4, bias), | |
343 | vec_cmpgt(row4, zero)); | |
344 | row5 = vec_sel(vec_madd(row5, q5, negBias), vec_madd(row5, q5, bias), | |
345 | vec_cmpgt(row5, zero)); | |
346 | row6 = vec_sel(vec_madd(row6, q6, negBias), vec_madd(row6, q6, bias), | |
347 | vec_cmpgt(row6, zero)); | |
348 | row7 = vec_sel(vec_madd(row7, q7, negBias), vec_madd(row7, q7, bias), | |
349 | vec_cmpgt(row7, zero)); | |
350 | ||
351 | q0 = vec_ctf(qmat[1], QMAT_SHIFT); | |
352 | q1 = vec_ctf(qmat[3], QMAT_SHIFT); | |
353 | q2 = vec_ctf(qmat[5], QMAT_SHIFT); | |
354 | q3 = vec_ctf(qmat[7], QMAT_SHIFT); | |
355 | q4 = vec_ctf(qmat[9], QMAT_SHIFT); | |
356 | q5 = vec_ctf(qmat[11], QMAT_SHIFT); | |
357 | q6 = vec_ctf(qmat[13], QMAT_SHIFT); | |
358 | q7 = vec_ctf(qmat[15], QMAT_SHIFT); | |
359 | ||
360 | alt0 = vec_sel(vec_madd(alt0, q0, negBias), vec_madd(alt0, q0, bias), | |
361 | vec_cmpgt(alt0, zero)); | |
362 | alt1 = vec_sel(vec_madd(alt1, q1, negBias), vec_madd(alt1, q1, bias), | |
363 | vec_cmpgt(alt1, zero)); | |
364 | alt2 = vec_sel(vec_madd(alt2, q2, negBias), vec_madd(alt2, q2, bias), | |
365 | vec_cmpgt(alt2, zero)); | |
366 | alt3 = vec_sel(vec_madd(alt3, q3, negBias), vec_madd(alt3, q3, bias), | |
367 | vec_cmpgt(alt3, zero)); | |
368 | alt4 = vec_sel(vec_madd(alt4, q4, negBias), vec_madd(alt4, q4, bias), | |
369 | vec_cmpgt(alt4, zero)); | |
370 | alt5 = vec_sel(vec_madd(alt5, q5, negBias), vec_madd(alt5, q5, bias), | |
371 | vec_cmpgt(alt5, zero)); | |
372 | alt6 = vec_sel(vec_madd(alt6, q6, negBias), vec_madd(alt6, q6, bias), | |
373 | vec_cmpgt(alt6, zero)); | |
374 | alt7 = vec_sel(vec_madd(alt7, q7, negBias), vec_madd(alt7, q7, bias), | |
375 | vec_cmpgt(alt7, zero)); | |
376 | } | |
377 | ||
378 | ||
379 | } | |
380 | ||
381 | // Store the data back into the original block | |
382 | { | |
383 | vector signed short data0, data1, data2, data3, data4, data5, data6, data7; | |
384 | ||
385 | data0 = vec_pack(vec_cts(row0, 0), vec_cts(alt0, 0)); | |
386 | data1 = vec_pack(vec_cts(row1, 0), vec_cts(alt1, 0)); | |
387 | data2 = vec_pack(vec_cts(row2, 0), vec_cts(alt2, 0)); | |
388 | data3 = vec_pack(vec_cts(row3, 0), vec_cts(alt3, 0)); | |
389 | data4 = vec_pack(vec_cts(row4, 0), vec_cts(alt4, 0)); | |
390 | data5 = vec_pack(vec_cts(row5, 0), vec_cts(alt5, 0)); | |
391 | data6 = vec_pack(vec_cts(row6, 0), vec_cts(alt6, 0)); | |
392 | data7 = vec_pack(vec_cts(row7, 0), vec_cts(alt7, 0)); | |
393 | ||
394 | { | |
395 | // Clamp for overflow | |
396 | vector signed int max_q_int, min_q_int; | |
397 | vector signed short max_q, min_q; | |
398 | ||
399 | LOAD4(max_q_int, &(s->max_qcoeff)); | |
400 | LOAD4(min_q_int, &(s->min_qcoeff)); | |
401 | ||
402 | max_q = vec_pack(max_q_int, max_q_int); | |
403 | min_q = vec_pack(min_q_int, min_q_int); | |
404 | ||
405 | data0 = vec_max(vec_min(data0, max_q), min_q); | |
406 | data1 = vec_max(vec_min(data1, max_q), min_q); | |
407 | data2 = vec_max(vec_min(data2, max_q), min_q); | |
408 | data4 = vec_max(vec_min(data4, max_q), min_q); | |
409 | data5 = vec_max(vec_min(data5, max_q), min_q); | |
410 | data6 = vec_max(vec_min(data6, max_q), min_q); | |
411 | data7 = vec_max(vec_min(data7, max_q), min_q); | |
412 | } | |
413 | ||
414 | vector bool char zero_01, zero_23, zero_45, zero_67; | |
415 | vector signed char scanIndices_01, scanIndices_23, scanIndices_45, scanIndices_67; | |
416 | vector signed char negOne = vec_splat_s8(-1); | |
417 | vector signed char* scanPtr = | |
418 | (vector signed char*)(s->intra_scantable.inverse); | |
419 | ||
420 | // Determine the largest non-zero index. | |
421 | zero_01 = vec_pack(vec_cmpeq(data0, (vector short)zero), | |
422 | vec_cmpeq(data1, (vector short)zero)); | |
423 | zero_23 = vec_pack(vec_cmpeq(data2, (vector short)zero), | |
424 | vec_cmpeq(data3, (vector short)zero)); | |
425 | zero_45 = vec_pack(vec_cmpeq(data4, (vector short)zero), | |
426 | vec_cmpeq(data5, (vector short)zero)); | |
427 | zero_67 = vec_pack(vec_cmpeq(data6, (vector short)zero), | |
428 | vec_cmpeq(data7, (vector short)zero)); | |
429 | ||
430 | // 64 biggest values | |
431 | scanIndices_01 = vec_sel(scanPtr[0], negOne, zero_01); | |
432 | scanIndices_23 = vec_sel(scanPtr[1], negOne, zero_23); | |
433 | scanIndices_45 = vec_sel(scanPtr[2], negOne, zero_45); | |
434 | scanIndices_67 = vec_sel(scanPtr[3], negOne, zero_67); | |
435 | ||
436 | // 32 largest values | |
437 | scanIndices_01 = vec_max(scanIndices_01, scanIndices_23); | |
438 | scanIndices_45 = vec_max(scanIndices_45, scanIndices_67); | |
439 | ||
440 | // 16 largest values | |
441 | scanIndices_01 = vec_max(scanIndices_01, scanIndices_45); | |
442 | ||
443 | // 8 largest values | |
444 | scanIndices_01 = vec_max(vec_mergeh(scanIndices_01, negOne), | |
445 | vec_mergel(scanIndices_01, negOne)); | |
446 | ||
447 | // 4 largest values | |
448 | scanIndices_01 = vec_max(vec_mergeh(scanIndices_01, negOne), | |
449 | vec_mergel(scanIndices_01, negOne)); | |
450 | ||
451 | // 2 largest values | |
452 | scanIndices_01 = vec_max(vec_mergeh(scanIndices_01, negOne), | |
453 | vec_mergel(scanIndices_01, negOne)); | |
454 | ||
455 | // largest value | |
456 | scanIndices_01 = vec_max(vec_mergeh(scanIndices_01, negOne), | |
457 | vec_mergel(scanIndices_01, negOne)); | |
458 | ||
459 | scanIndices_01 = vec_splat(scanIndices_01, 0); | |
460 | ||
461 | signed char lastNonZeroChar; | |
462 | ||
463 | vec_ste(scanIndices_01, 0, &lastNonZeroChar); | |
464 | ||
465 | lastNonZero = lastNonZeroChar; | |
466 | ||
467 | // While the data is still in vectors we check for the transpose IDCT permute | |
468 | // and handle it using the vector unit if we can. This is the permute used | |
469 | // by the altivec idct, so it is common when using the altivec dct. | |
470 | ||
471 | if ((lastNonZero > 0) && (s->idct_permutation_type == FF_TRANSPOSE_IDCT_PERM)) | |
472 | { | |
473 | TRANSPOSE8(data0, data1, data2, data3, data4, data5, data6, data7); | |
474 | } | |
475 | ||
476 | vec_st(data0, 0, data); | |
477 | vec_st(data1, 16, data); | |
478 | vec_st(data2, 32, data); | |
479 | vec_st(data3, 48, data); | |
480 | vec_st(data4, 64, data); | |
481 | vec_st(data5, 80, data); | |
482 | vec_st(data6, 96, data); | |
483 | vec_st(data7, 112, data); | |
484 | } | |
485 | ||
486 | // special handling of block[0] | |
487 | if (s->mb_intra) | |
488 | { | |
489 | if (!s->h263_aic) | |
490 | { | |
491 | if (n < 4) | |
492 | oldBaseValue /= s->y_dc_scale; | |
493 | else | |
494 | oldBaseValue /= s->c_dc_scale; | |
495 | } | |
496 | ||
497 | // Divide by 8, rounding the result | |
498 | data[0] = (oldBaseValue + 4) >> 3; | |
499 | } | |
500 | ||
501 | // We handled the tranpose permutation above and we don't | |
502 | // need to permute the "no" permutation case. | |
503 | if ((lastNonZero > 0) && | |
504 | (s->idct_permutation_type != FF_TRANSPOSE_IDCT_PERM) && | |
505 | (s->idct_permutation_type != FF_NO_IDCT_PERM)) | |
506 | { | |
507 | ff_block_permute(data, s->idct_permutation, | |
508 | s->intra_scantable.scantable, lastNonZero); | |
509 | } | |
510 | ||
511 | return lastNonZero; | |
512 | } | |
3b991c54 | 513 | #undef FOUROF |
05c4072b | 514 | |
744ac4be MN |
515 | /* |
516 | AltiVec version of dct_unquantize_h263 | |
517 | this code assumes `block' is 16 bytes-aligned | |
518 | */ | |
519 | void dct_unquantize_h263_altivec(MpegEncContext *s, | |
520 | DCTELEM *block, int n, int qscale) | |
521 | { | |
35e5fb06 | 522 | POWERPC_TBL_DECLARE(altivec_dct_unquantize_h263_num, 1); |
744ac4be MN |
523 | int i, level, qmul, qadd; |
524 | int nCoeffs; | |
525 | ||
526 | assert(s->block_last_index[n]>=0); | |
db40a39a | 527 | |
35e5fb06 | 528 | POWERPC_TBL_START_COUNT(altivec_dct_unquantize_h263_num, 1); |
744ac4be MN |
529 | |
530 | qadd = (qscale - 1) | 1; | |
531 | qmul = qscale << 1; | |
532 | ||
533 | if (s->mb_intra) { | |
534 | if (!s->h263_aic) { | |
535 | if (n < 4) | |
536 | block[0] = block[0] * s->y_dc_scale; | |
537 | else | |
538 | block[0] = block[0] * s->c_dc_scale; | |
539 | }else | |
540 | qadd = 0; | |
541 | i = 1; | |
542 | nCoeffs= 63; //does not allways use zigzag table | |
543 | } else { | |
544 | i = 0; | |
545 | nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]; | |
546 | } | |
547 | ||
db40a39a | 548 | #ifdef ALTIVEC_USE_REFERENCE_C_CODE |
744ac4be MN |
549 | for(;i<=nCoeffs;i++) { |
550 | level = block[i]; | |
551 | if (level) { | |
552 | if (level < 0) { | |
553 | level = level * qmul - qadd; | |
554 | } else { | |
555 | level = level * qmul + qadd; | |
556 | } | |
557 | block[i] = level; | |
558 | } | |
559 | } | |
db40a39a | 560 | #else /* ALTIVEC_USE_REFERENCE_C_CODE */ |
744ac4be | 561 | { |
3b991c54 | 562 | register const vector short vczero = (const vector short)vec_splat_s16(0); |
744ac4be MN |
563 | short __attribute__ ((aligned(16))) qmul8[] = |
564 | { | |
565 | qmul, qmul, qmul, qmul, | |
566 | qmul, qmul, qmul, qmul | |
567 | }; | |
568 | short __attribute__ ((aligned(16))) qadd8[] = | |
569 | { | |
570 | qadd, qadd, qadd, qadd, | |
571 | qadd, qadd, qadd, qadd | |
572 | }; | |
573 | short __attribute__ ((aligned(16))) nqadd8[] = | |
574 | { | |
575 | -qadd, -qadd, -qadd, -qadd, | |
576 | -qadd, -qadd, -qadd, -qadd | |
577 | }; | |
578 | register vector short blockv, qmulv, qaddv, nqaddv, temp1; | |
579 | register vector bool short blockv_null, blockv_neg; | |
580 | register short backup_0 = block[0]; | |
581 | register int j = 0; | |
582 | ||
583 | qmulv = vec_ld(0, qmul8); | |
584 | qaddv = vec_ld(0, qadd8); | |
585 | nqaddv = vec_ld(0, nqadd8); | |
586 | ||
db40a39a | 587 | #if 0 // block *is* 16 bytes-aligned, it seems. |
744ac4be MN |
588 | // first make sure block[j] is 16 bytes-aligned |
589 | for(j = 0; (j <= nCoeffs) && ((((unsigned long)block) + (j << 1)) & 0x0000000F) ; j++) { | |
590 | level = block[j]; | |
591 | if (level) { | |
592 | if (level < 0) { | |
593 | level = level * qmul - qadd; | |
594 | } else { | |
595 | level = level * qmul + qadd; | |
596 | } | |
597 | block[j] = level; | |
598 | } | |
599 | } | |
db40a39a | 600 | #endif |
744ac4be MN |
601 | |
602 | // vectorize all the 16 bytes-aligned blocks | |
603 | // of 8 elements | |
604 | for(; (j + 7) <= nCoeffs ; j+=8) | |
605 | { | |
606 | blockv = vec_ld(j << 1, block); | |
607 | blockv_neg = vec_cmplt(blockv, vczero); | |
608 | blockv_null = vec_cmpeq(blockv, vczero); | |
609 | // choose between +qadd or -qadd as the third operand | |
610 | temp1 = vec_sel(qaddv, nqaddv, blockv_neg); | |
611 | // multiply & add (block{i,i+7} * qmul [+-] qadd) | |
612 | temp1 = vec_mladd(blockv, qmulv, temp1); | |
613 | // put 0 where block[{i,i+7} used to have 0 | |
614 | blockv = vec_sel(temp1, blockv, blockv_null); | |
615 | vec_st(blockv, j << 1, block); | |
616 | } | |
617 | ||
618 | // if nCoeffs isn't a multiple of 8, finish the job | |
619 | // using good old scalar units. | |
620 | // (we could do it using a truncated vector, | |
621 | // but I'm not sure it's worth the hassle) | |
622 | for(; j <= nCoeffs ; j++) { | |
623 | level = block[j]; | |
624 | if (level) { | |
625 | if (level < 0) { | |
626 | level = level * qmul - qadd; | |
627 | } else { | |
628 | level = level * qmul + qadd; | |
629 | } | |
630 | block[j] = level; | |
631 | } | |
632 | } | |
633 | ||
634 | if (i == 1) | |
635 | { // cheat. this avoid special-casing the first iteration | |
636 | block[0] = backup_0; | |
637 | } | |
638 | } | |
db40a39a MN |
639 | #endif /* ALTIVEC_USE_REFERENCE_C_CODE */ |
640 | ||
35e5fb06 | 641 | POWERPC_TBL_STOP_COUNT(altivec_dct_unquantize_h263_num, nCoeffs == 63); |
744ac4be | 642 | } |