#if 0
/* 4 and 8 are very common cases (the only ones i've seen). these
- * should be unrolled and optimised
+ * should be unrolled and optimized
*/
if (predictor_coef_num == 4) {
- /* FIXME: optimised general case */
+ /* FIXME: optimized general case */
return;
}
if (predictor_coef_table == 8) {
- /* FIXME: optimised general case */
+ /* FIXME: optimized general case */
return;
}
#endif
/* Keep a count of the blocks decoded in this frame */
ctx->blocksdecoded = 0;
- /* Initialise the rice structs */
+ /* Initialize the rice structs */
ctx->riceX.k = 10;
ctx->riceX.ksum = (1 << ctx->riceX.k) * 16;
ctx->riceY.k = 10;
memset(p->historybuffer, 0, PREDICTOR_SIZE * sizeof(int32_t));
p->buf = p->historybuffer;
- /* Initialise and zero the co-efficients */
+ /* Initialize and zero the co-efficients */
memcpy(p->coeffsA[0], initial_coeffs, sizeof(initial_coeffs));
memcpy(p->coeffsA[1], initial_coeffs, sizeof(initial_coeffs));
memset(p->coeffsB, 0, sizeof(p->coeffsB));
void MPV_common_init_armv4l(MpegEncContext *s)
{
/* IWMMXT support is a superset of armv5te, so
- * allow optimised functions for armv5te unless
+ * allow optimized functions for armv5te unless
* a better iwmmxt function exists
*/
#ifdef HAVE_ARMV5TE
}
/* Return the Picture timestamp as the frame number */
- /* we substract 1 because it is added on utils.c */
+ /* we subtract 1 because it is added on utils.c */
avctx->frame_number = s->picture_number - 1;
#ifdef PRINT_FRAME_TIME
#if 0 //?
/* Return the Picture timestamp as the frame number */
- /* we substract 1 because it is added on utils.c */
+ /* we subtract 1 because it is added on utils.c */
avctx->frame_number = s->picture_number - 1;
#endif
return get_consumed_bytes(s, buf_index, buf_size);
* Chinese AVS video (AVS1-P2, JiZhun profile) decoder.
* Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de>
*
- * MMX optimised DSP functions, based on H.264 optimisations by
+ * MMX optimized DSP functions, based on H.264 optimizations by
* Michael Niedermayer and Loren Merritt
*
* This file is part of FFmpeg.
"movq (%1,%3),%%mm4\n" /* mm4 = pix2[1][0-7] */
/* todo: mm1-mm2, mm3-mm4 */
- /* algo: substract mm1 from mm2 with saturation and vice versa */
+ /* algo: subtract mm1 from mm2 with saturation and vice versa */
/* OR the results to get absolute difference */
"movq %%mm1,%%mm5\n"
"movq %%mm3,%%mm6\n"
"movq 8(%1),%%mm4\n" /* mm4 = pix2[8-15] */
/* todo: mm1-mm2, mm3-mm4 */
- /* algo: substract mm1 from mm2 with saturation and vice versa */
+ /* algo: subtract mm1 from mm2 with saturation and vice versa */
/* OR the results to get absolute difference */
"movq %%mm1,%%mm5\n"
"movq %%mm3,%%mm6\n"
"movdqu (%1,%4),%%xmm4\n" /* mm4 = pix2[1][0-15] */
/* todo: mm1-mm2, mm3-mm4 */
- /* algo: substract mm1 from mm2 with saturation and vice versa */
+ /* algo: subtract mm1 from mm2 with saturation and vice versa */
/* OR the results to get absolute difference */
"movdqa %%xmm1,%%xmm5\n"
"movdqa %%xmm3,%%xmm6\n"
static const short _T3[] ATTR_ALIGN(8) = {T3,T3,T3,T3};
static const short _C4[] ATTR_ALIGN(8) = {C4,C4,C4,C4};
- /* column code adapted from peter gubanov */
+ /* column code adapted from Peter Gubanov */
/* http://www.elecard.com/peter/idct.shtml */
movq_m2r (*_T1, mm0); // mm0 = T1
if (level < -2048 || level > 2047)
fprintf(stderr, "unquant error %d %d\n", i, level);
#endif
- We can suppose that result of two multiplications can't be greate of 0xFFFF
+ We can suppose that result of two multiplications can't be greater than 0xFFFF
i.e. is 16-bit, so we use here only PMULLW instruction and can avoid
a complex multiplication.
=====================================================
- PIX_FMT_422 must convert to and from PIX_FMT_422P.
- The other conversion functions are just optimisations for common cases.
+ The other conversion functions are just optimizations for common cases.
*/
static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
[PIX_FMT_YUV420P] = {
}
}
-// RGB24 has optimised routines
+// RGB24 has optimized routines
#if !defined(FMT_RGB32) && !defined(FMT_RGB24)
/* alpha support */
}
#endif /* HAVE_MMX */
-/* slow version to handle limit cases. Does not need optimisation */
+/* slow version to handle limit cases. Does not need optimization */
static void h_resample_slow(uint8_t *dst, int dst_width,
const uint8_t *src, int src_width,
int src_start, int src_incr, int16_t *filters)
}
}
emms_c();
- dummy = d1; /* avoid optimisation */
+ dummy = d1; /* avoid optimization */
ti = gettime() - ti;
printf(" %0.0f kop/s\n",
uint8_t permutated[64];
uint8_t raster_end[64];
#ifdef ARCH_POWERPC
- /** Used by dct_quantise_alitvec to find last-non-zero */
+ /** Used by dct_quantize_alitvec to find last-non-zero */
DECLARE_ALIGNED_8(uint8_t, inverse[64]);
#endif
} ScanTable;
dstV = \
(vector signed short)vec_mergeh((vector signed char)vzero, \
(vector signed char)dstO); \
- /* substractions inside the first butterfly */ \
+ /* subtractions inside the first butterfly */ \
but0 = vec_sub(srcV, dstV); \
op1 = vec_perm(but0, but0, perm1); \
but1 = vec_mladd(but0, vprod1, op1); \
schedule for the 7450, and its code isn't much faster than
gcc-3.3 on the 7450 (but uses 25% less instructions...)
- On the 970, the hand-made RA is still a win (arount 690
+ On the 970, the hand-made RA is still a win (around 690
vs. around 780), but xlc goes to around 660 on the
regular C code...
*/
dstW = \
(vector signed short)vec_mergel((vector signed char)vzero, \
(vector signed char)dstO); \
- /* substractions inside the first butterfly */ \
+ /* subtractions inside the first butterfly */ \
but0 = vec_sub(srcV, dstV); \
but0S = vec_sub(srcW, dstW); \
op1 = vec_perm(but0, but0, perm1); \
/*
altivec-enhanced gmc1. ATM this code assume stride is a multiple of 8,
- to preserve proper dst alignement.
+ to preserve proper dst alignment.
*/
#define GMC1_PERF_COND (h==8)
void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int stride, int h, int x16, int y16, int rounder)
vector float row0, row1, row2, row3, row4, row5, row6, row7;
vector float alt0, alt1, alt2, alt3, alt4, alt5, alt6, alt7;
const vector float zero = (const vector float)FOUROF(0.);
- // used after quantise step
+ // used after quantize step
int oldBaseValue = 0;
// Load the data into the row/alt vectors
}
}
- // perform the quantise step, using the floating point data
+ // perform the quantize step, using the floating point data
// still in the row/alt registers
{
const int* biasAddr;
data[0] = (oldBaseValue + 4) >> 3;
}
- // We handled the tranpose permutation above and we don't
+ // We handled the transpose permutation above and we don't
// need to permute the "no" permutation case.
if ((lastNonZero > 0) &&
(s->dsp.idct_permutation_type != FF_TRANSPOSE_IDCT_PERM) &&
/* libavcodec initialization code */
void dsputil_init_vis(DSPContext* c, AVCodecContext *avctx)
{
- /* VIS specific optimisations */
+ /* VIS specific optimizations */
int accel = vis_level ();
if (accel & ACCEL_SPARC_VIS) {
}
/* Return the Picture timestamp as the frame number */
- /* we substract 1 because it is added on utils.c */
+ /* we subtract 1 because it is added on utils.c */
avctx->frame_number = s->picture_number - 1;
av_free(buf2);
if (ioctl(s->fd, SNDCTL_DSP_GETISPACE, &abufi) == 0) {
bdelay += abufi.bytes;
}
- /* substract time represented by the number of bytes in the audio fifo */
+ /* subtract time represented by the number of bytes in the audio fifo */
cur_time -= (bdelay * 1000000LL) / (s->sample_rate * s->channels);
/* convert to wanted units */
uint64_t send_time; /**< time to send file, in 100-nanosecond units
* invalid if broadcasting (could be ignored) */
uint32_t preroll; /**< timestamp of the first packet, in milliseconds
- * if nonzero - substract from time */
+ * if nonzero - subtract from time */
uint32_t ignore; ///< preroll is 64bit - but let's just ignore it
uint32_t flags; /**< 0x01 - broadcast
* 0x02 - seekable
One could remove the recomputation of the perm
vector by assuming (stride % 16) == 0, unfortunately
this is not always true. Quite a lot of load/stores
- can be removed by assuming proper alignement of
+ can be removed by assuming proper alignment of
src & stride :-(
*/
uint8_t *src2 = src;
One could remove the recomputation of the perm
vector by assuming (stride % 16) == 0, unfortunately
this is not always true. Quite a lot of load/stores
- can be removed by assuming proper alignement of
+ can be removed by assuming proper alignment of
src & stride :-(
*/
uint8_t *src2 = src;
const vector signed short dornotd = vec_sel((vector signed short)zero,
dclampedfinal,
vec_cmplt(absmE, vqp));
- /* add/substract to l4 and l5 */
+ /* add/subtract to l4 and l5 */
const vector signed short vb4minusd = vec_sub(vb4, dornotd);
const vector signed short vb5plusd = vec_add(vb5, dornotd);
/* finally, stores */
One could remove the recomputation of the perm
vector by assuming (stride % 16) == 0, unfortunately
this is not always true. Quite a lot of load/stores
- can be removed by assuming proper alignement of
+ can be removed by assuming proper alignment of
src & stride :-(
*/
uint8_t *srcCopy = src;
if (
( (c == '_') && (text == ci->text) ) || /* skip '_' (consider as space)
IF text was specified in cmd line
- (which doesn't like neasted quotes) */
+ (which doesn't like nested quotes) */
( c == '\n' ) /* Skip new line char, just go to new line */
)
continue;