aacps: Adjust some const qualifiers to suppress warnings
[libav.git] / libavcodec / mpegvideo_motion.c
1 /*
2 * Copyright (c) 2000,2001 Fabrice Bellard
3 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
4 *
5 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
6 *
7 * This file is part of Libav.
8 *
9 * Libav is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * Libav is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with Libav; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24 #include <string.h>
25
26 #include "libavutil/internal.h"
27 #include "avcodec.h"
28 #include "dsputil.h"
29 #include "h261.h"
30 #include "mpegvideo.h"
31 #include "mjpegenc.h"
32 #include "msmpeg4.h"
33 #include <limits.h>
34
35 static void gmc1_motion(MpegEncContext *s,
36 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
37 uint8_t **ref_picture)
38 {
39 uint8_t *ptr;
40 int src_x, src_y, motion_x, motion_y;
41 ptrdiff_t offset, linesize, uvlinesize;
42 int emu = 0;
43
44 motion_x = s->sprite_offset[0][0];
45 motion_y = s->sprite_offset[0][1];
46 src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy + 1));
47 src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy + 1));
48 motion_x <<= (3 - s->sprite_warping_accuracy);
49 motion_y <<= (3 - s->sprite_warping_accuracy);
50 src_x = av_clip(src_x, -16, s->width);
51 if (src_x == s->width)
52 motion_x = 0;
53 src_y = av_clip(src_y, -16, s->height);
54 if (src_y == s->height)
55 motion_y = 0;
56
57 linesize = s->linesize;
58 uvlinesize = s->uvlinesize;
59
60 ptr = ref_picture[0] + src_y * linesize + src_x;
61
62 if (s->flags & CODEC_FLAG_EMU_EDGE) {
63 if ((unsigned)src_x >= FFMAX(s->h_edge_pos - 17, 0) ||
64 (unsigned)src_y >= FFMAX(s->v_edge_pos - 17, 0)) {
65 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
66 linesize, linesize,
67 17, 17,
68 src_x, src_y,
69 s->h_edge_pos, s->v_edge_pos);
70 ptr = s->edge_emu_buffer;
71 }
72 }
73
74 if ((motion_x | motion_y) & 7) {
75 s->dsp.gmc1(dest_y, ptr, linesize, 16,
76 motion_x & 15, motion_y & 15, 128 - s->no_rounding);
77 s->dsp.gmc1(dest_y + 8, ptr + 8, linesize, 16,
78 motion_x & 15, motion_y & 15, 128 - s->no_rounding);
79 } else {
80 int dxy;
81
82 dxy = ((motion_x >> 3) & 1) | ((motion_y >> 2) & 2);
83 if (s->no_rounding) {
84 s->hdsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
85 } else {
86 s->hdsp.put_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
87 }
88 }
89
90 if (CONFIG_GRAY && s->flags & CODEC_FLAG_GRAY)
91 return;
92
93 motion_x = s->sprite_offset[1][0];
94 motion_y = s->sprite_offset[1][1];
95 src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy + 1));
96 src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy + 1));
97 motion_x <<= (3 - s->sprite_warping_accuracy);
98 motion_y <<= (3 - s->sprite_warping_accuracy);
99 src_x = av_clip(src_x, -8, s->width >> 1);
100 if (src_x == s->width >> 1)
101 motion_x = 0;
102 src_y = av_clip(src_y, -8, s->height >> 1);
103 if (src_y == s->height >> 1)
104 motion_y = 0;
105
106 offset = (src_y * uvlinesize) + src_x;
107 ptr = ref_picture[1] + offset;
108 if (s->flags & CODEC_FLAG_EMU_EDGE) {
109 if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - 9, 0) ||
110 (unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - 9, 0)) {
111 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
112 uvlinesize, uvlinesize,
113 9, 9,
114 src_x, src_y,
115 s->h_edge_pos >> 1, s->v_edge_pos >> 1);
116 ptr = s->edge_emu_buffer;
117 emu = 1;
118 }
119 }
120 s->dsp.gmc1(dest_cb, ptr, uvlinesize, 8,
121 motion_x & 15, motion_y & 15, 128 - s->no_rounding);
122
123 ptr = ref_picture[2] + offset;
124 if (emu) {
125 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
126 uvlinesize, uvlinesize,
127 9, 9,
128 src_x, src_y,
129 s->h_edge_pos >> 1, s->v_edge_pos >> 1);
130 ptr = s->edge_emu_buffer;
131 }
132 s->dsp.gmc1(dest_cr, ptr, uvlinesize, 8,
133 motion_x & 15, motion_y & 15, 128 - s->no_rounding);
134 }
135
136 static void gmc_motion(MpegEncContext *s,
137 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
138 uint8_t **ref_picture)
139 {
140 uint8_t *ptr;
141 int linesize, uvlinesize;
142 const int a = s->sprite_warping_accuracy;
143 int ox, oy;
144
145 linesize = s->linesize;
146 uvlinesize = s->uvlinesize;
147
148 ptr = ref_picture[0];
149
150 ox = s->sprite_offset[0][0] + s->sprite_delta[0][0] * s->mb_x * 16 +
151 s->sprite_delta[0][1] * s->mb_y * 16;
152 oy = s->sprite_offset[0][1] + s->sprite_delta[1][0] * s->mb_x * 16 +
153 s->sprite_delta[1][1] * s->mb_y * 16;
154
155 s->dsp.gmc(dest_y, ptr, linesize, 16,
156 ox, oy,
157 s->sprite_delta[0][0], s->sprite_delta[0][1],
158 s->sprite_delta[1][0], s->sprite_delta[1][1],
159 a + 1, (1 << (2 * a + 1)) - s->no_rounding,
160 s->h_edge_pos, s->v_edge_pos);
161 s->dsp.gmc(dest_y + 8, ptr, linesize, 16,
162 ox + s->sprite_delta[0][0] * 8,
163 oy + s->sprite_delta[1][0] * 8,
164 s->sprite_delta[0][0], s->sprite_delta[0][1],
165 s->sprite_delta[1][0], s->sprite_delta[1][1],
166 a + 1, (1 << (2 * a + 1)) - s->no_rounding,
167 s->h_edge_pos, s->v_edge_pos);
168
169 if (CONFIG_GRAY && s->flags & CODEC_FLAG_GRAY)
170 return;
171
172 ox = s->sprite_offset[1][0] + s->sprite_delta[0][0] * s->mb_x * 8 +
173 s->sprite_delta[0][1] * s->mb_y * 8;
174 oy = s->sprite_offset[1][1] + s->sprite_delta[1][0] * s->mb_x * 8 +
175 s->sprite_delta[1][1] * s->mb_y * 8;
176
177 ptr = ref_picture[1];
178 s->dsp.gmc(dest_cb, ptr, uvlinesize, 8,
179 ox, oy,
180 s->sprite_delta[0][0], s->sprite_delta[0][1],
181 s->sprite_delta[1][0], s->sprite_delta[1][1],
182 a + 1, (1 << (2 * a + 1)) - s->no_rounding,
183 s->h_edge_pos >> 1, s->v_edge_pos >> 1);
184
185 ptr = ref_picture[2];
186 s->dsp.gmc(dest_cr, ptr, uvlinesize, 8,
187 ox, oy,
188 s->sprite_delta[0][0], s->sprite_delta[0][1],
189 s->sprite_delta[1][0], s->sprite_delta[1][1],
190 a + 1, (1 << (2 * a + 1)) - s->no_rounding,
191 s->h_edge_pos >> 1, s->v_edge_pos >> 1);
192 }
193
194 static inline int hpel_motion(MpegEncContext *s,
195 uint8_t *dest, uint8_t *src,
196 int src_x, int src_y,
197 op_pixels_func *pix_op,
198 int motion_x, int motion_y)
199 {
200 int dxy = 0;
201 int emu = 0;
202
203 src_x += motion_x >> 1;
204 src_y += motion_y >> 1;
205
206 /* WARNING: do no forget half pels */
207 src_x = av_clip(src_x, -16, s->width); // FIXME unneeded for emu?
208 if (src_x != s->width)
209 dxy |= motion_x & 1;
210 src_y = av_clip(src_y, -16, s->height);
211 if (src_y != s->height)
212 dxy |= (motion_y & 1) << 1;
213 src += src_y * s->linesize + src_x;
214
215 if (s->unrestricted_mv && (s->flags & CODEC_FLAG_EMU_EDGE)) {
216 if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 1) - 8, 0) ||
217 (unsigned)src_y > FFMAX(s->v_edge_pos - (motion_y & 1) - 8, 0)) {
218 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
219 s->linesize, s->linesize,
220 9, 9,
221 src_x, src_y, s->h_edge_pos,
222 s->v_edge_pos);
223 src = s->edge_emu_buffer;
224 emu = 1;
225 }
226 }
227 pix_op[dxy](dest, src, s->linesize, 8);
228 return emu;
229 }
230
231 static av_always_inline
232 void mpeg_motion_internal(MpegEncContext *s,
233 uint8_t *dest_y,
234 uint8_t *dest_cb,
235 uint8_t *dest_cr,
236 int field_based,
237 int bottom_field,
238 int field_select,
239 uint8_t **ref_picture,
240 op_pixels_func (*pix_op)[4],
241 int motion_x,
242 int motion_y,
243 int h,
244 int is_mpeg12,
245 int mb_y)
246 {
247 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
248 int dxy, uvdxy, mx, my, src_x, src_y,
249 uvsrc_x, uvsrc_y, v_edge_pos;
250 ptrdiff_t uvlinesize, linesize;
251
252 #if 0
253 if (s->quarter_sample) {
254 motion_x >>= 1;
255 motion_y >>= 1;
256 }
257 #endif
258
259 v_edge_pos = s->v_edge_pos >> field_based;
260 linesize = s->current_picture.f.linesize[0] << field_based;
261 uvlinesize = s->current_picture.f.linesize[1] << field_based;
262
263 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
264 src_x = s->mb_x * 16 + (motion_x >> 1);
265 src_y = (mb_y << (4 - field_based)) + (motion_y >> 1);
266
267 if (!is_mpeg12 && s->out_format == FMT_H263) {
268 if ((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based) {
269 mx = (motion_x >> 1) | (motion_x & 1);
270 my = motion_y >> 1;
271 uvdxy = ((my & 1) << 1) | (mx & 1);
272 uvsrc_x = s->mb_x * 8 + (mx >> 1);
273 uvsrc_y = (mb_y << (3 - field_based)) + (my >> 1);
274 } else {
275 uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1);
276 uvsrc_x = src_x >> 1;
277 uvsrc_y = src_y >> 1;
278 }
279 // Even chroma mv's are full pel in H261
280 } else if (!is_mpeg12 && s->out_format == FMT_H261) {
281 mx = motion_x / 4;
282 my = motion_y / 4;
283 uvdxy = 0;
284 uvsrc_x = s->mb_x * 8 + mx;
285 uvsrc_y = mb_y * 8 + my;
286 } else {
287 if (s->chroma_y_shift) {
288 mx = motion_x / 2;
289 my = motion_y / 2;
290 uvdxy = ((my & 1) << 1) | (mx & 1);
291 uvsrc_x = s->mb_x * 8 + (mx >> 1);
292 uvsrc_y = (mb_y << (3 - field_based)) + (my >> 1);
293 } else {
294 if (s->chroma_x_shift) {
295 // Chroma422
296 mx = motion_x / 2;
297 uvdxy = ((motion_y & 1) << 1) | (mx & 1);
298 uvsrc_x = s->mb_x * 8 + (mx >> 1);
299 uvsrc_y = src_y;
300 } else {
301 // Chroma444
302 uvdxy = dxy;
303 uvsrc_x = src_x;
304 uvsrc_y = src_y;
305 }
306 }
307 }
308
309 ptr_y = ref_picture[0] + src_y * linesize + src_x;
310 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
311 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
312
313 if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 1) - 16, 0) ||
314 (unsigned)src_y > FFMAX(v_edge_pos - (motion_y & 1) - h, 0)) {
315 if (is_mpeg12 ||
316 s->codec_id == AV_CODEC_ID_MPEG2VIDEO ||
317 s->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
318 av_log(s->avctx, AV_LOG_DEBUG,
319 "MPEG motion vector out of boundary (%d %d)\n", src_x,
320 src_y);
321 return;
322 }
323 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
324 s->linesize, s->linesize,
325 17, 17 + field_based,
326 src_x, src_y << field_based,
327 s->h_edge_pos, s->v_edge_pos);
328 ptr_y = s->edge_emu_buffer;
329 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
330 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
331 s->vdsp.emulated_edge_mc(uvbuf, ptr_cb,
332 s->uvlinesize, s->uvlinesize,
333 9, 9 + field_based,
334 uvsrc_x, uvsrc_y << field_based,
335 s->h_edge_pos >> 1, s->v_edge_pos >> 1);
336 s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr,
337 s->uvlinesize, s->uvlinesize,
338 9, 9 + field_based,
339 uvsrc_x, uvsrc_y << field_based,
340 s->h_edge_pos >> 1, s->v_edge_pos >> 1);
341 ptr_cb = uvbuf;
342 ptr_cr = uvbuf + 16;
343 }
344 }
345
346 /* FIXME use this for field pix too instead of the obnoxious hack which
347 * changes picture.data */
348 if (bottom_field) {
349 dest_y += s->linesize;
350 dest_cb += s->uvlinesize;
351 dest_cr += s->uvlinesize;
352 }
353
354 if (field_select) {
355 ptr_y += s->linesize;
356 ptr_cb += s->uvlinesize;
357 ptr_cr += s->uvlinesize;
358 }
359
360 pix_op[0][dxy](dest_y, ptr_y, linesize, h);
361
362 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
363 pix_op[s->chroma_x_shift][uvdxy]
364 (dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift);
365 pix_op[s->chroma_x_shift][uvdxy]
366 (dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift);
367 }
368 if (!is_mpeg12 && (CONFIG_H261_ENCODER || CONFIG_H261_DECODER) &&
369 s->out_format == FMT_H261) {
370 ff_h261_loop_filter(s);
371 }
372 }
373 /* apply one mpeg motion vector to the three components */
374 static void mpeg_motion(MpegEncContext *s,
375 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
376 int field_select, uint8_t **ref_picture,
377 op_pixels_func (*pix_op)[4],
378 int motion_x, int motion_y, int h, int mb_y)
379 {
380 #if !CONFIG_SMALL
381 if (s->out_format == FMT_MPEG1)
382 mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
383 field_select, ref_picture, pix_op,
384 motion_x, motion_y, h, 1, mb_y);
385 else
386 #endif
387 mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
388 field_select, ref_picture, pix_op,
389 motion_x, motion_y, h, 0, mb_y);
390 }
391
392 static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y,
393 uint8_t *dest_cb, uint8_t *dest_cr,
394 int bottom_field, int field_select,
395 uint8_t **ref_picture,
396 op_pixels_func (*pix_op)[4],
397 int motion_x, int motion_y, int h, int mb_y)
398 {
399 #if !CONFIG_SMALL
400 if(s->out_format == FMT_MPEG1)
401 mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
402 bottom_field, field_select, ref_picture, pix_op,
403 motion_x, motion_y, h, 1, mb_y);
404 else
405 #endif
406 mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
407 bottom_field, field_select, ref_picture, pix_op,
408 motion_x, motion_y, h, 0, mb_y);
409 }
410
411 // FIXME move to dsputil, avg variant, 16x16 version
412 static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride)
413 {
414 int x;
415 uint8_t *const top = src[1];
416 uint8_t *const left = src[2];
417 uint8_t *const mid = src[0];
418 uint8_t *const right = src[3];
419 uint8_t *const bottom = src[4];
420 #define OBMC_FILTER(x, t, l, m, r, b)\
421 dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
422 #define OBMC_FILTER4(x, t, l, m, r, b)\
423 OBMC_FILTER(x , t, l, m, r, b);\
424 OBMC_FILTER(x+1 , t, l, m, r, b);\
425 OBMC_FILTER(x +stride, t, l, m, r, b);\
426 OBMC_FILTER(x+1+stride, t, l, m, r, b);
427
428 x = 0;
429 OBMC_FILTER (x , 2, 2, 4, 0, 0);
430 OBMC_FILTER (x + 1, 2, 1, 5, 0, 0);
431 OBMC_FILTER4(x + 2, 2, 1, 5, 0, 0);
432 OBMC_FILTER4(x + 4, 2, 0, 5, 1, 0);
433 OBMC_FILTER (x + 6, 2, 0, 5, 1, 0);
434 OBMC_FILTER (x + 7, 2, 0, 4, 2, 0);
435 x += stride;
436 OBMC_FILTER (x , 1, 2, 5, 0, 0);
437 OBMC_FILTER (x + 1, 1, 2, 5, 0, 0);
438 OBMC_FILTER (x + 6, 1, 0, 5, 2, 0);
439 OBMC_FILTER (x + 7, 1, 0, 5, 2, 0);
440 x += stride;
441 OBMC_FILTER4(x , 1, 2, 5, 0, 0);
442 OBMC_FILTER4(x + 2, 1, 1, 6, 0, 0);
443 OBMC_FILTER4(x + 4, 1, 0, 6, 1, 0);
444 OBMC_FILTER4(x + 6, 1, 0, 5, 2, 0);
445 x += 2 * stride;
446 OBMC_FILTER4(x , 0, 2, 5, 0, 1);
447 OBMC_FILTER4(x + 2, 0, 1, 6, 0, 1);
448 OBMC_FILTER4(x + 4, 0, 0, 6, 1, 1);
449 OBMC_FILTER4(x + 6, 0, 0, 5, 2, 1);
450 x += 2*stride;
451 OBMC_FILTER (x , 0, 2, 5, 0, 1);
452 OBMC_FILTER (x + 1, 0, 2, 5, 0, 1);
453 OBMC_FILTER4(x + 2, 0, 1, 5, 0, 2);
454 OBMC_FILTER4(x + 4, 0, 0, 5, 1, 2);
455 OBMC_FILTER (x + 6, 0, 0, 5, 2, 1);
456 OBMC_FILTER (x + 7, 0, 0, 5, 2, 1);
457 x += stride;
458 OBMC_FILTER (x , 0, 2, 4, 0, 2);
459 OBMC_FILTER (x + 1, 0, 1, 5, 0, 2);
460 OBMC_FILTER (x + 6, 0, 0, 5, 1, 2);
461 OBMC_FILTER (x + 7, 0, 0, 4, 2, 2);
462 }
463
464 /* obmc for 1 8x8 luma block */
465 static inline void obmc_motion(MpegEncContext *s,
466 uint8_t *dest, uint8_t *src,
467 int src_x, int src_y,
468 op_pixels_func *pix_op,
469 int16_t mv[5][2] /* mid top left right bottom */)
470 #define MID 0
471 {
472 int i;
473 uint8_t *ptr[5];
474
475 assert(s->quarter_sample == 0);
476
477 for (i = 0; i < 5; i++) {
478 if (i && mv[i][0] == mv[MID][0] && mv[i][1] == mv[MID][1]) {
479 ptr[i] = ptr[MID];
480 } else {
481 ptr[i] = s->obmc_scratchpad + 8 * (i & 1) +
482 s->linesize * 8 * (i >> 1);
483 hpel_motion(s, ptr[i], src, src_x, src_y, pix_op,
484 mv[i][0], mv[i][1]);
485 }
486 }
487
488 put_obmc(dest, ptr, s->linesize);
489 }
490
491 static inline void qpel_motion(MpegEncContext *s,
492 uint8_t *dest_y,
493 uint8_t *dest_cb,
494 uint8_t *dest_cr,
495 int field_based, int bottom_field,
496 int field_select, uint8_t **ref_picture,
497 op_pixels_func (*pix_op)[4],
498 qpel_mc_func (*qpix_op)[16],
499 int motion_x, int motion_y, int h)
500 {
501 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
502 int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos;
503 ptrdiff_t linesize, uvlinesize;
504
505 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
506
507 src_x = s->mb_x * 16 + (motion_x >> 2);
508 src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
509
510 v_edge_pos = s->v_edge_pos >> field_based;
511 linesize = s->linesize << field_based;
512 uvlinesize = s->uvlinesize << field_based;
513
514 if (field_based) {
515 mx = motion_x / 2;
516 my = motion_y >> 1;
517 } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA2) {
518 static const int rtab[8] = { 0, 0, 1, 1, 0, 0, 0, 1 };
519 mx = (motion_x >> 1) + rtab[motion_x & 7];
520 my = (motion_y >> 1) + rtab[motion_y & 7];
521 } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA) {
522 mx = (motion_x >> 1) | (motion_x & 1);
523 my = (motion_y >> 1) | (motion_y & 1);
524 } else {
525 mx = motion_x / 2;
526 my = motion_y / 2;
527 }
528 mx = (mx >> 1) | (mx & 1);
529 my = (my >> 1) | (my & 1);
530
531 uvdxy = (mx & 1) | ((my & 1) << 1);
532 mx >>= 1;
533 my >>= 1;
534
535 uvsrc_x = s->mb_x * 8 + mx;
536 uvsrc_y = s->mb_y * (8 >> field_based) + my;
537
538 ptr_y = ref_picture[0] + src_y * linesize + src_x;
539 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
540 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
541
542 if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 3) - 16, 0) ||
543 (unsigned)src_y > FFMAX(v_edge_pos - (motion_y & 3) - h, 0)) {
544 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
545 s->linesize, s->linesize,
546 17, 17 + field_based,
547 src_x, src_y << field_based,
548 s->h_edge_pos, s->v_edge_pos);
549 ptr_y = s->edge_emu_buffer;
550 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
551 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
552 s->vdsp.emulated_edge_mc(uvbuf, ptr_cb,
553 s->uvlinesize, s->uvlinesize,
554 9, 9 + field_based,
555 uvsrc_x, uvsrc_y << field_based,
556 s->h_edge_pos >> 1, s->v_edge_pos >> 1);
557 s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr,
558 s->uvlinesize, s->uvlinesize,
559 9, 9 + field_based,
560 uvsrc_x, uvsrc_y << field_based,
561 s->h_edge_pos >> 1, s->v_edge_pos >> 1);
562 ptr_cb = uvbuf;
563 ptr_cr = uvbuf + 16;
564 }
565 }
566
567 if (!field_based)
568 qpix_op[0][dxy](dest_y, ptr_y, linesize);
569 else {
570 if (bottom_field) {
571 dest_y += s->linesize;
572 dest_cb += s->uvlinesize;
573 dest_cr += s->uvlinesize;
574 }
575
576 if (field_select) {
577 ptr_y += s->linesize;
578 ptr_cb += s->uvlinesize;
579 ptr_cr += s->uvlinesize;
580 }
581 // damn interlaced mode
582 // FIXME boundary mirroring is not exactly correct here
583 qpix_op[1][dxy](dest_y, ptr_y, linesize);
584 qpix_op[1][dxy](dest_y + 8, ptr_y + 8, linesize);
585 }
586 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
587 pix_op[1][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> 1);
588 pix_op[1][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> 1);
589 }
590 }
591
592 /**
593 * h263 chroma 4mv motion compensation.
594 */
595 static void chroma_4mv_motion(MpegEncContext *s,
596 uint8_t *dest_cb, uint8_t *dest_cr,
597 uint8_t **ref_picture,
598 op_pixels_func *pix_op,
599 int mx, int my)
600 {
601 uint8_t *ptr;
602 int src_x, src_y, dxy, emu = 0;
603 ptrdiff_t offset;
604
605 /* In case of 8X8, we construct a single chroma motion vector
606 * with a special rounding */
607 mx = ff_h263_round_chroma(mx);
608 my = ff_h263_round_chroma(my);
609
610 dxy = ((my & 1) << 1) | (mx & 1);
611 mx >>= 1;
612 my >>= 1;
613
614 src_x = s->mb_x * 8 + mx;
615 src_y = s->mb_y * 8 + my;
616 src_x = av_clip(src_x, -8, (s->width >> 1));
617 if (src_x == (s->width >> 1))
618 dxy &= ~1;
619 src_y = av_clip(src_y, -8, (s->height >> 1));
620 if (src_y == (s->height >> 1))
621 dxy &= ~2;
622
623 offset = src_y * s->uvlinesize + src_x;
624 ptr = ref_picture[1] + offset;
625 if (s->flags & CODEC_FLAG_EMU_EDGE) {
626 if ((unsigned)src_x > FFMAX((s->h_edge_pos >> 1) - (dxy & 1) - 8, 0) ||
627 (unsigned)src_y > FFMAX((s->v_edge_pos >> 1) - (dxy >> 1) - 8, 0)) {
628 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
629 s->uvlinesize, s->uvlinesize,
630 9, 9, src_x, src_y,
631 s->h_edge_pos >> 1, s->v_edge_pos >> 1);
632 ptr = s->edge_emu_buffer;
633 emu = 1;
634 }
635 }
636 pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8);
637
638 ptr = ref_picture[2] + offset;
639 if (emu) {
640 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
641 s->uvlinesize, s->uvlinesize,
642 9, 9, src_x, src_y,
643 s->h_edge_pos >> 1, s->v_edge_pos >> 1);
644 ptr = s->edge_emu_buffer;
645 }
646 pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8);
647 }
648
649 static inline void prefetch_motion(MpegEncContext *s, uint8_t **pix, int dir)
650 {
651 /* fetch pixels for estimated mv 4 macroblocks ahead
652 * optimized for 64byte cache lines */
653 const int shift = s->quarter_sample ? 2 : 1;
654 const int mx = (s->mv[dir][0][0] >> shift) + 16 * s->mb_x + 8;
655 const int my = (s->mv[dir][0][1] >> shift) + 16 * s->mb_y;
656 int off = mx + (my + (s->mb_x & 3) * 4) * s->linesize + 64;
657
658 s->vdsp.prefetch(pix[0] + off, s->linesize, 4);
659 off = (mx >> 1) + ((my >> 1) + (s->mb_x & 7)) * s->uvlinesize + 64;
660 s->vdsp.prefetch(pix[1] + off, pix[2] - pix[1], 2);
661 }
662
663 static inline void apply_obmc(MpegEncContext *s,
664 uint8_t *dest_y,
665 uint8_t *dest_cb,
666 uint8_t *dest_cr,
667 uint8_t **ref_picture,
668 op_pixels_func (*pix_op)[4])
669 {
670 LOCAL_ALIGNED_8(int16_t, mv_cache, [4], [4][2]);
671 Picture *cur_frame = &s->current_picture;
672 int mb_x = s->mb_x;
673 int mb_y = s->mb_y;
674 const int xy = mb_x + mb_y * s->mb_stride;
675 const int mot_stride = s->b8_stride;
676 const int mot_xy = mb_x * 2 + mb_y * 2 * mot_stride;
677 int mx, my, i;
678
679 assert(!s->mb_skipped);
680
681 AV_COPY32(mv_cache[1][1], cur_frame->motion_val[0][mot_xy]);
682 AV_COPY32(mv_cache[1][2], cur_frame->motion_val[0][mot_xy + 1]);
683
684 AV_COPY32(mv_cache[2][1],
685 cur_frame->motion_val[0][mot_xy + mot_stride]);
686 AV_COPY32(mv_cache[2][2],
687 cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
688
689 AV_COPY32(mv_cache[3][1],
690 cur_frame->motion_val[0][mot_xy + mot_stride]);
691 AV_COPY32(mv_cache[3][2],
692 cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
693
694 if (mb_y == 0 || IS_INTRA(cur_frame->mb_type[xy - s->mb_stride])) {
695 AV_COPY32(mv_cache[0][1], mv_cache[1][1]);
696 AV_COPY32(mv_cache[0][2], mv_cache[1][2]);
697 } else {
698 AV_COPY32(mv_cache[0][1],
699 cur_frame->motion_val[0][mot_xy - mot_stride]);
700 AV_COPY32(mv_cache[0][2],
701 cur_frame->motion_val[0][mot_xy - mot_stride + 1]);
702 }
703
704 if (mb_x == 0 || IS_INTRA(cur_frame->mb_type[xy - 1])) {
705 AV_COPY32(mv_cache[1][0], mv_cache[1][1]);
706 AV_COPY32(mv_cache[2][0], mv_cache[2][1]);
707 } else {
708 AV_COPY32(mv_cache[1][0], cur_frame->motion_val[0][mot_xy - 1]);
709 AV_COPY32(mv_cache[2][0],
710 cur_frame->motion_val[0][mot_xy - 1 + mot_stride]);
711 }
712
713 if (mb_x + 1 >= s->mb_width || IS_INTRA(cur_frame->mb_type[xy + 1])) {
714 AV_COPY32(mv_cache[1][3], mv_cache[1][2]);
715 AV_COPY32(mv_cache[2][3], mv_cache[2][2]);
716 } else {
717 AV_COPY32(mv_cache[1][3], cur_frame->motion_val[0][mot_xy + 2]);
718 AV_COPY32(mv_cache[2][3],
719 cur_frame->motion_val[0][mot_xy + 2 + mot_stride]);
720 }
721
722 mx = 0;
723 my = 0;
724 for (i = 0; i < 4; i++) {
725 const int x = (i & 1) + 1;
726 const int y = (i >> 1) + 1;
727 int16_t mv[5][2] = {
728 { mv_cache[y][x][0], mv_cache[y][x][1] },
729 { mv_cache[y - 1][x][0], mv_cache[y - 1][x][1] },
730 { mv_cache[y][x - 1][0], mv_cache[y][x - 1][1] },
731 { mv_cache[y][x + 1][0], mv_cache[y][x + 1][1] },
732 { mv_cache[y + 1][x][0], mv_cache[y + 1][x][1] }
733 };
734 // FIXME cleanup
735 obmc_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
736 ref_picture[0],
737 mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >> 1) * 8,
738 pix_op[1],
739 mv);
740
741 mx += mv[0][0];
742 my += mv[0][1];
743 }
744 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
745 chroma_4mv_motion(s, dest_cb, dest_cr,
746 ref_picture, pix_op[1],
747 mx, my);
748 }
749
750 static inline void apply_8x8(MpegEncContext *s,
751 uint8_t *dest_y,
752 uint8_t *dest_cb,
753 uint8_t *dest_cr,
754 int dir,
755 uint8_t **ref_picture,
756 qpel_mc_func (*qpix_op)[16],
757 op_pixels_func (*pix_op)[4])
758 {
759 int dxy, mx, my, src_x, src_y;
760 int i;
761 int mb_x = s->mb_x;
762 int mb_y = s->mb_y;
763 uint8_t *ptr, *dest;
764
765 mx = 0;
766 my = 0;
767 if (s->quarter_sample) {
768 for (i = 0; i < 4; i++) {
769 int motion_x = s->mv[dir][i][0];
770 int motion_y = s->mv[dir][i][1];
771
772 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
773 src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
774 src_y = mb_y * 16 + (motion_y >> 2) + (i >> 1) * 8;
775
776 /* WARNING: do no forget half pels */
777 src_x = av_clip(src_x, -16, s->width);
778 if (src_x == s->width)
779 dxy &= ~3;
780 src_y = av_clip(src_y, -16, s->height);
781 if (src_y == s->height)
782 dxy &= ~12;
783
784 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
785 if (s->flags & CODEC_FLAG_EMU_EDGE) {
786 if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 3) - 8, 0) ||
787 (unsigned)src_y > FFMAX(s->v_edge_pos - (motion_y & 3) - 8, 0)) {
788 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
789 s->linesize, s->linesize,
790 9, 9,
791 src_x, src_y,
792 s->h_edge_pos,
793 s->v_edge_pos);
794 ptr = s->edge_emu_buffer;
795 }
796 }
797 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
798 qpix_op[1][dxy](dest, ptr, s->linesize);
799
800 mx += s->mv[dir][i][0] / 2;
801 my += s->mv[dir][i][1] / 2;
802 }
803 } else {
804 for (i = 0; i < 4; i++) {
805 hpel_motion(s,
806 dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
807 ref_picture[0],
808 mb_x * 16 + (i & 1) * 8,
809 mb_y * 16 + (i >> 1) * 8,
810 pix_op[1],
811 s->mv[dir][i][0],
812 s->mv[dir][i][1]);
813
814 mx += s->mv[dir][i][0];
815 my += s->mv[dir][i][1];
816 }
817 }
818
819 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
820 chroma_4mv_motion(s, dest_cb, dest_cr,
821 ref_picture, pix_op[1], mx, my);
822 }
823
824 /**
825 * motion compensation of a single macroblock
826 * @param s context
827 * @param dest_y luma destination pointer
828 * @param dest_cb chroma cb/u destination pointer
829 * @param dest_cr chroma cr/v destination pointer
830 * @param dir direction (0->forward, 1->backward)
831 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
832 * @param pix_op halfpel motion compensation function (average or put normally)
833 * @param qpix_op qpel motion compensation function (average or put normally)
834 * the motion vectors are taken from s->mv and the MV type from s->mv_type
835 */
836 static av_always_inline void MPV_motion_internal(MpegEncContext *s,
837 uint8_t *dest_y,
838 uint8_t *dest_cb,
839 uint8_t *dest_cr,
840 int dir,
841 uint8_t **ref_picture,
842 op_pixels_func (*pix_op)[4],
843 qpel_mc_func (*qpix_op)[16],
844 int is_mpeg12)
845 {
846 int i;
847 int mb_y = s->mb_y;
848
849 prefetch_motion(s, ref_picture, dir);
850
851 if (!is_mpeg12 && s->obmc && s->pict_type != AV_PICTURE_TYPE_B) {
852 apply_obmc(s, dest_y, dest_cb, dest_cr, ref_picture, pix_op);
853 return;
854 }
855
856 switch (s->mv_type) {
857 case MV_TYPE_16X16:
858 if (s->mcsel) {
859 if (s->real_sprite_warping_points == 1) {
860 gmc1_motion(s, dest_y, dest_cb, dest_cr,
861 ref_picture);
862 } else {
863 gmc_motion(s, dest_y, dest_cb, dest_cr,
864 ref_picture);
865 }
866 } else if (!is_mpeg12 && s->quarter_sample) {
867 qpel_motion(s, dest_y, dest_cb, dest_cr,
868 0, 0, 0,
869 ref_picture, pix_op, qpix_op,
870 s->mv[dir][0][0], s->mv[dir][0][1], 16);
871 } else if (!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) &&
872 s->mspel && s->codec_id == AV_CODEC_ID_WMV2) {
873 ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
874 ref_picture, pix_op,
875 s->mv[dir][0][0], s->mv[dir][0][1], 16);
876 } else {
877 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
878 ref_picture, pix_op,
879 s->mv[dir][0][0], s->mv[dir][0][1], 16, mb_y);
880 }
881 break;
882 case MV_TYPE_8X8:
883 if (!is_mpeg12)
884 apply_8x8(s, dest_y, dest_cb, dest_cr,
885 dir, ref_picture, qpix_op, pix_op);
886 break;
887 case MV_TYPE_FIELD:
888 if (s->picture_structure == PICT_FRAME) {
889 if (!is_mpeg12 && s->quarter_sample) {
890 for (i = 0; i < 2; i++)
891 qpel_motion(s, dest_y, dest_cb, dest_cr,
892 1, i, s->field_select[dir][i],
893 ref_picture, pix_op, qpix_op,
894 s->mv[dir][i][0], s->mv[dir][i][1], 8);
895 } else {
896 /* top field */
897 mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
898 0, s->field_select[dir][0],
899 ref_picture, pix_op,
900 s->mv[dir][0][0], s->mv[dir][0][1], 8, mb_y);
901 /* bottom field */
902 mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
903 1, s->field_select[dir][1],
904 ref_picture, pix_op,
905 s->mv[dir][1][0], s->mv[dir][1][1], 8, mb_y);
906 }
907 } else {
908 if (s->picture_structure != s->field_select[dir][0] + 1 &&
909 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
910 ref_picture = s->current_picture_ptr->f.data;
911 }
912
913 mpeg_motion(s, dest_y, dest_cb, dest_cr,
914 s->field_select[dir][0],
915 ref_picture, pix_op,
916 s->mv[dir][0][0], s->mv[dir][0][1], 16, mb_y >> 1);
917 }
918 break;
919 case MV_TYPE_16X8:
920 for (i = 0; i < 2; i++) {
921 uint8_t **ref2picture;
922
923 if (s->picture_structure == s->field_select[dir][i] + 1
924 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
925 ref2picture = ref_picture;
926 } else {
927 ref2picture = s->current_picture_ptr->f.data;
928 }
929
930 mpeg_motion(s, dest_y, dest_cb, dest_cr,
931 s->field_select[dir][i],
932 ref2picture, pix_op,
933 s->mv[dir][i][0], s->mv[dir][i][1] + 16 * i,
934 8, mb_y >> 1);
935
936 dest_y += 16 * s->linesize;
937 dest_cb += (16 >> s->chroma_y_shift) * s->uvlinesize;
938 dest_cr += (16 >> s->chroma_y_shift) * s->uvlinesize;
939 }
940 break;
941 case MV_TYPE_DMV:
942 if (s->picture_structure == PICT_FRAME) {
943 for (i = 0; i < 2; i++) {
944 int j;
945 for (j = 0; j < 2; j++)
946 mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
947 j, j ^ i, ref_picture, pix_op,
948 s->mv[dir][2 * i + j][0],
949 s->mv[dir][2 * i + j][1], 8, mb_y);
950 pix_op = s->hdsp.avg_pixels_tab;
951 }
952 } else {
953 for (i = 0; i < 2; i++) {
954 mpeg_motion(s, dest_y, dest_cb, dest_cr,
955 s->picture_structure != i + 1,
956 ref_picture, pix_op,
957 s->mv[dir][2 * i][0], s->mv[dir][2 * i][1],
958 16, mb_y >> 1);
959
960 // after put we make avg of the same block
961 pix_op = s->hdsp.avg_pixels_tab;
962
963 /* opposite parity is always in the same frame if this is
964 * second field */
965 if (!s->first_field) {
966 ref_picture = s->current_picture_ptr->f.data;
967 }
968 }
969 }
970 break;
971 default: assert(0);
972 }
973 }
974
975 void ff_MPV_motion(MpegEncContext *s,
976 uint8_t *dest_y, uint8_t *dest_cb,
977 uint8_t *dest_cr, int dir,
978 uint8_t **ref_picture,
979 op_pixels_func (*pix_op)[4],
980 qpel_mc_func (*qpix_op)[16])
981 {
982 #if !CONFIG_SMALL
983 if (s->out_format == FMT_MPEG1)
984 MPV_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
985 ref_picture, pix_op, qpix_op, 1);
986 else
987 #endif
988 MPV_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
989 ref_picture, pix_op, qpix_op, 0);
990 }