#include "avutil.h"
-#ifdef SYS_DARWIN
-#define AVV(x...) (x)
-#else
-#define AVV(x...) {x}
-#endif
-
#define ALTIVEC_TRANSPOSE_8x8_SHORT(src_a,src_b,src_c,src_d,src_e,src_f,src_g,src_h) \
do { \
__typeof__(src_a) tempA1, tempB1, tempC1, tempD1; \
(vector signed short)vec_mergeh((vector signed char)zero, \
(vector signed char)v_srcA##i)
- // special casing the aligned case is worthwhile, as all call from
- // the (transposed) horizontable deblocks will be aligned, i naddition
- // to the naturraly aligned vertical deblocks.
+ /* Special-casing the aligned case is worthwhile, as all calls from
+ * the (transposed) horizontable deblocks will be aligned, in addition
+ * to the naturally aligned vertical deblocks. */
if (properStride && srcAlign) {
LOAD_LINE_ALIGNED(0);
LOAD_LINE_ALIGNED(1);
One could remove the recomputation of the perm
vector by assuming (stride % 16) == 0, unfortunately
this is not always true. Quite a lot of load/stores
- can be removed by assuming proper alignement of
+ can be removed by assuming proper alignment of
src & stride :-(
*/
uint8_t *src2 = src;
(vector signed short)vec_mergeh((vector signed char)zero, \
(vector signed char)vbT##i)
- // special casing the aligned case is worthwhile, as all call from
- // the (transposed) horizontable deblocks will be aligned, in addition
- // to the naturraly aligned vertical deblocks.
+ /* Special-casing the aligned case is worthwhile, as all calls from
+ * the (transposed) horizontable deblocks will be aligned, in addition
+ * to the naturally aligned vertical deblocks. */
if (properStride && srcAlign) {
LOAD_LINE_ALIGNED(0);
LOAD_LINE_ALIGNED(1);
vec_perm(vf##i, vbT##i, permHH); \
vec_st(vg##i, i * stride, src2)
- // special casing the aligned case is worthwhile, as all call from
- // the (transposed) horizontable deblocks will be aligned, in addition
- // to the naturraly aligned vertical deblocks.
+ /* Special-casing the aligned case is worthwhile, as all calls from
+ * the (transposed) horizontable deblocks will be aligned, in addition
+ * to the naturally aligned vertical deblocks. */
if (properStride && srcAlign) {
PACK_AND_STORE_ALIGNED(1);
PACK_AND_STORE_ALIGNED(2);
One could remove the recomputation of the perm
vector by assuming (stride % 16) == 0, unfortunately
this is not always true. Quite a lot of load/stores
- can be removed by assuming proper alignement of
+ can be removed by assuming proper alignment of
src & stride :-(
*/
uint8_t *src2 = src;
const vector signed short dornotd = vec_sel((vector signed short)zero,
dclampedfinal,
vec_cmplt(absmE, vqp));
- /* add/substract to l4 and l5 */
+ /* add/subtract to l4 and l5 */
const vector signed short vb4minusd = vec_sub(vb4, dornotd);
const vector signed short vb5plusd = vec_add(vb5, dornotd);
/* finally, stores */
One could remove the recomputation of the perm
vector by assuming (stride % 16) == 0, unfortunately
this is not always true. Quite a lot of load/stores
- can be removed by assuming proper alignement of
+ can be removed by assuming proper alignment of
src & stride :-(
*/
uint8_t *srcCopy = src;