mpevideo_enc: disallow multiple slices for h261 and flv
[libav.git] / libavcodec / mpegvideo_enc.c
1 /*
2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7 *
8 * This file is part of Libav.
9 *
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /**
26 * @file
27 * The simplest mpeg encoder (well, it was the simplest!).
28 */
29
30 #include <stdint.h>
31
32 #include "libavutil/internal.h"
33 #include "libavutil/intmath.h"
34 #include "libavutil/mathematics.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/timer.h"
38 #include "avcodec.h"
39 #include "dct.h"
40 #include "idctdsp.h"
41 #include "mpeg12.h"
42 #include "mpegvideo.h"
43 #include "mpegvideodata.h"
44 #include "h261.h"
45 #include "h263.h"
46 #include "h263data.h"
47 #include "mjpegenc_common.h"
48 #include "mathops.h"
49 #include "mpegutils.h"
50 #include "mjpegenc.h"
51 #include "msmpeg4.h"
52 #include "pixblockdsp.h"
53 #include "qpeldsp.h"
54 #include "faandct.h"
55 #include "thread.h"
56 #include "aandcttab.h"
57 #include "flv.h"
58 #include "mpeg4video.h"
59 #include "internal.h"
60 #include "bytestream.h"
61 #include "wmv2.h"
62 #include "rv10.h"
63 #include <limits.h>
64
65 #define QUANT_BIAS_SHIFT 8
66
67 #define QMAT_SHIFT_MMX 16
68 #define QMAT_SHIFT 22
69
70 static int encode_picture(MpegEncContext *s, int picture_number);
71 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
72 static int sse_mb(MpegEncContext *s);
73 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
74 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
75
76 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
77 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
78
79 const AVOption ff_mpv_generic_options[] = {
80 FF_MPV_COMMON_OPTS
81 { NULL },
82 };
83
84 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
85 uint16_t (*qmat16)[2][64],
86 const uint16_t *quant_matrix,
87 int bias, int qmin, int qmax, int intra)
88 {
89 FDCTDSPContext *fdsp = &s->fdsp;
90 int qscale;
91 int shift = 0;
92
93 for (qscale = qmin; qscale <= qmax; qscale++) {
94 int i;
95 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
96 #if CONFIG_FAANDCT
97 fdsp->fdct == ff_faandct ||
98 #endif /* CONFIG_FAANDCT */
99 fdsp->fdct == ff_jpeg_fdct_islow_10) {
100 for (i = 0; i < 64; i++) {
101 const int j = s->idsp.idct_permutation[i];
102 int64_t den = (int64_t) qscale * quant_matrix[j];
103 /* 16 <= qscale * quant_matrix[i] <= 7905
104 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
105 * 19952 <= x <= 249205026
106 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
107 * 3444240 >= (1 << 36) / (x) >= 275 */
108
109 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / den);
110 }
111 } else if (fdsp->fdct == ff_fdct_ifast) {
112 for (i = 0; i < 64; i++) {
113 const int j = s->idsp.idct_permutation[i];
114 int64_t den = ff_aanscales[i] * (int64_t) qscale * quant_matrix[j];
115 /* 16 <= qscale * quant_matrix[i] <= 7905
116 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
117 * 19952 <= x <= 249205026
118 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
119 * 3444240 >= (1 << 36) / (x) >= 275 */
120
121 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) / den);
122 }
123 } else {
124 for (i = 0; i < 64; i++) {
125 const int j = s->idsp.idct_permutation[i];
126 int64_t den = (int64_t) qscale * quant_matrix[j];
127 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
128 * Assume x = qscale * quant_matrix[i]
129 * So 16 <= x <= 7905
130 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
131 * so 32768 >= (1 << 19) / (x) >= 67 */
132 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / den);
133 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
134 // (qscale * quant_matrix[i]);
135 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) / den;
136
137 if (qmat16[qscale][0][i] == 0 ||
138 qmat16[qscale][0][i] == 128 * 256)
139 qmat16[qscale][0][i] = 128 * 256 - 1;
140 qmat16[qscale][1][i] =
141 ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
142 qmat16[qscale][0][i]);
143 }
144 }
145
146 for (i = intra; i < 64; i++) {
147 int64_t max = 8191;
148 if (fdsp->fdct == ff_fdct_ifast) {
149 max = (8191LL * ff_aanscales[i]) >> 14;
150 }
151 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
152 shift++;
153 }
154 }
155 }
156 if (shift) {
157 av_log(NULL, AV_LOG_INFO,
158 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
159 QMAT_SHIFT - shift);
160 }
161 }
162
163 static inline void update_qscale(MpegEncContext *s)
164 {
165 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
166 (FF_LAMBDA_SHIFT + 7);
167 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
168
169 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
170 FF_LAMBDA_SHIFT;
171 }
172
173 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
174 {
175 int i;
176
177 if (matrix) {
178 put_bits(pb, 1, 1);
179 for (i = 0; i < 64; i++) {
180 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
181 }
182 } else
183 put_bits(pb, 1, 0);
184 }
185
186 /**
187 * init s->current_picture.qscale_table from s->lambda_table
188 */
189 void ff_init_qscale_tab(MpegEncContext *s)
190 {
191 int8_t * const qscale_table = s->current_picture.qscale_table;
192 int i;
193
194 for (i = 0; i < s->mb_num; i++) {
195 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
196 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
197 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
198 s->avctx->qmax);
199 }
200 }
201
202 static void update_duplicate_context_after_me(MpegEncContext *dst,
203 MpegEncContext *src)
204 {
205 #define COPY(a) dst->a= src->a
206 COPY(pict_type);
207 COPY(current_picture);
208 COPY(f_code);
209 COPY(b_code);
210 COPY(qscale);
211 COPY(lambda);
212 COPY(lambda2);
213 COPY(picture_in_gop_number);
214 COPY(gop_picture_number);
215 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
216 COPY(progressive_frame); // FIXME don't set in encode_header
217 COPY(partitioned_frame); // FIXME don't set in encode_header
218 #undef COPY
219 }
220
221 /**
222 * Set the given MpegEncContext to defaults for encoding.
223 * the changed fields will not depend upon the prior state of the MpegEncContext.
224 */
225 static void mpv_encode_defaults(MpegEncContext *s)
226 {
227 int i;
228 ff_mpv_common_defaults(s);
229
230 for (i = -16; i < 16; i++) {
231 default_fcode_tab[i + MAX_MV] = 1;
232 }
233 s->me.mv_penalty = default_mv_penalty;
234 s->fcode_tab = default_fcode_tab;
235
236 s->input_picture_number = 0;
237 s->picture_in_gop_number = 0;
238 }
239
240 /* init video encoder */
241 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
242 {
243 MpegEncContext *s = avctx->priv_data;
244 int i, ret, format_supported;
245
246 mpv_encode_defaults(s);
247
248 switch (avctx->codec_id) {
249 case AV_CODEC_ID_MPEG2VIDEO:
250 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
251 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
252 av_log(avctx, AV_LOG_ERROR,
253 "only YUV420 and YUV422 are supported\n");
254 return -1;
255 }
256 break;
257 case AV_CODEC_ID_MJPEG:
258 format_supported = 0;
259 /* JPEG color space */
260 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
261 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
262 (avctx->color_range == AVCOL_RANGE_JPEG &&
263 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
264 avctx->pix_fmt == AV_PIX_FMT_YUV422P)))
265 format_supported = 1;
266 /* MPEG color space */
267 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
268 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
269 avctx->pix_fmt == AV_PIX_FMT_YUV422P))
270 format_supported = 1;
271
272 if (!format_supported) {
273 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
274 return -1;
275 }
276 break;
277 default:
278 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
279 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
280 return -1;
281 }
282 }
283
284 switch (avctx->pix_fmt) {
285 case AV_PIX_FMT_YUVJ422P:
286 case AV_PIX_FMT_YUV422P:
287 s->chroma_format = CHROMA_422;
288 break;
289 case AV_PIX_FMT_YUVJ420P:
290 case AV_PIX_FMT_YUV420P:
291 default:
292 s->chroma_format = CHROMA_420;
293 break;
294 }
295
296 s->bit_rate = avctx->bit_rate;
297 s->width = avctx->width;
298 s->height = avctx->height;
299 if (avctx->gop_size > 600 &&
300 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
301 av_log(avctx, AV_LOG_ERROR,
302 "Warning keyframe interval too large! reducing it ...\n");
303 avctx->gop_size = 600;
304 }
305 s->gop_size = avctx->gop_size;
306 s->avctx = avctx;
307 if (avctx->max_b_frames > MAX_B_FRAMES) {
308 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
309 "is %d.\n", MAX_B_FRAMES);
310 }
311 s->max_b_frames = avctx->max_b_frames;
312 s->codec_id = avctx->codec->id;
313 s->strict_std_compliance = avctx->strict_std_compliance;
314 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
315 s->mpeg_quant = avctx->mpeg_quant;
316 s->rtp_mode = !!avctx->rtp_payload_size;
317 s->intra_dc_precision = avctx->intra_dc_precision;
318 s->user_specified_pts = AV_NOPTS_VALUE;
319
320 if (s->gop_size <= 1) {
321 s->intra_only = 1;
322 s->gop_size = 12;
323 } else {
324 s->intra_only = 0;
325 }
326
327 #if FF_API_MOTION_EST
328 FF_DISABLE_DEPRECATION_WARNINGS
329 s->me_method = avctx->me_method;
330 FF_ENABLE_DEPRECATION_WARNINGS
331 #endif
332
333 /* Fixed QSCALE */
334 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
335
336 #if FF_API_MPV_OPT
337 FF_DISABLE_DEPRECATION_WARNINGS
338 if (avctx->border_masking != 0.0)
339 s->border_masking = avctx->border_masking;
340 FF_ENABLE_DEPRECATION_WARNINGS
341 #endif
342
343 s->adaptive_quant = (s->avctx->lumi_masking ||
344 s->avctx->dark_masking ||
345 s->avctx->temporal_cplx_masking ||
346 s->avctx->spatial_cplx_masking ||
347 s->avctx->p_masking ||
348 s->border_masking ||
349 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
350 !s->fixed_qscale;
351
352 s->loop_filter = !!(s->avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
353
354 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
355 av_log(avctx, AV_LOG_ERROR,
356 "a vbv buffer size is needed, "
357 "for encoding with a maximum bitrate\n");
358 return -1;
359 }
360
361 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
362 av_log(avctx, AV_LOG_INFO,
363 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
364 }
365
366 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
367 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
368 return -1;
369 }
370
371 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
372 av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
373 return -1;
374 }
375
376 if (avctx->rc_max_rate &&
377 avctx->rc_max_rate == avctx->bit_rate &&
378 avctx->rc_max_rate != avctx->rc_min_rate) {
379 av_log(avctx, AV_LOG_INFO,
380 "impossible bitrate constraints, this will fail\n");
381 }
382
383 if (avctx->rc_buffer_size &&
384 avctx->bit_rate * (int64_t)avctx->time_base.num >
385 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
386 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
387 return -1;
388 }
389
390 if (!s->fixed_qscale &&
391 avctx->bit_rate * av_q2d(avctx->time_base) >
392 avctx->bit_rate_tolerance) {
393 av_log(avctx, AV_LOG_ERROR,
394 "bitrate tolerance too small for bitrate\n");
395 return -1;
396 }
397
398 if (s->avctx->rc_max_rate &&
399 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
400 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
401 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
402 90000LL * (avctx->rc_buffer_size - 1) >
403 s->avctx->rc_max_rate * 0xFFFFLL) {
404 av_log(avctx, AV_LOG_INFO,
405 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
406 "specified vbv buffer is too large for the given bitrate!\n");
407 }
408
409 if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
410 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
411 s->codec_id != AV_CODEC_ID_FLV1) {
412 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
413 return -1;
414 }
415
416 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
417 av_log(avctx, AV_LOG_ERROR,
418 "OBMC is only supported with simple mb decision\n");
419 return -1;
420 }
421
422 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
423 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
424 return -1;
425 }
426
427 if (s->max_b_frames &&
428 s->codec_id != AV_CODEC_ID_MPEG4 &&
429 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
430 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
431 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
432 return -1;
433 }
434
435 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
436 s->codec_id == AV_CODEC_ID_H263 ||
437 s->codec_id == AV_CODEC_ID_H263P) &&
438 (avctx->sample_aspect_ratio.num > 255 ||
439 avctx->sample_aspect_ratio.den > 255)) {
440 av_log(avctx, AV_LOG_ERROR,
441 "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
442 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
443 return -1;
444 }
445
446 if ((s->avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
447 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
448 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
449 return -1;
450 }
451
452 // FIXME mpeg2 uses that too
453 if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
454 av_log(avctx, AV_LOG_ERROR,
455 "mpeg2 style quantization not supported by codec\n");
456 return -1;
457 }
458
459 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
460 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
461 return -1;
462 }
463
464 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
465 s->avctx->mb_decision != FF_MB_DECISION_RD) {
466 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
467 return -1;
468 }
469
470 if (s->avctx->scenechange_threshold < 1000000000 &&
471 (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
472 av_log(avctx, AV_LOG_ERROR,
473 "closed gop with scene change detection are not supported yet, "
474 "set threshold to 1000000000\n");
475 return -1;
476 }
477
478 if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
479 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
480 av_log(avctx, AV_LOG_ERROR,
481 "low delay forcing is only available for mpeg2\n");
482 return -1;
483 }
484 if (s->max_b_frames != 0) {
485 av_log(avctx, AV_LOG_ERROR,
486 "b frames cannot be used with low delay\n");
487 return -1;
488 }
489 }
490
491 if (s->q_scale_type == 1) {
492 if (avctx->qmax > 12) {
493 av_log(avctx, AV_LOG_ERROR,
494 "non linear quant only supports qmax <= 12 currently\n");
495 return -1;
496 }
497 }
498
499 if (avctx->slices > 1 &&
500 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
501 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
502 return AVERROR(EINVAL);
503 }
504
505 if (s->avctx->thread_count > 1 &&
506 s->codec_id != AV_CODEC_ID_MPEG4 &&
507 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
508 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
509 (s->codec_id != AV_CODEC_ID_H263P)) {
510 av_log(avctx, AV_LOG_ERROR,
511 "multi threaded encoding not supported by codec\n");
512 return -1;
513 }
514
515 if (s->avctx->thread_count < 1) {
516 av_log(avctx, AV_LOG_ERROR,
517 "automatic thread number detection not supported by codec,"
518 "patch welcome\n");
519 return -1;
520 }
521
522 if (s->avctx->thread_count > 1)
523 s->rtp_mode = 1;
524
525 if (!avctx->time_base.den || !avctx->time_base.num) {
526 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
527 return -1;
528 }
529
530 if (avctx->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
531 av_log(avctx, AV_LOG_INFO,
532 "notice: b_frame_strategy only affects the first pass\n");
533 avctx->b_frame_strategy = 0;
534 }
535
536 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
537 if (i > 1) {
538 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
539 avctx->time_base.den /= i;
540 avctx->time_base.num /= i;
541 //return -1;
542 }
543
544 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
545 s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG) {
546 // (a + x * 3 / 8) / x
547 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
548 s->inter_quant_bias = 0;
549 } else {
550 s->intra_quant_bias = 0;
551 // (a - x / 4) / x
552 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
553 }
554
555 #if FF_API_QUANT_BIAS
556 FF_DISABLE_DEPRECATION_WARNINGS
557 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
558 s->intra_quant_bias = avctx->intra_quant_bias;
559 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
560 s->inter_quant_bias = avctx->inter_quant_bias;
561 FF_ENABLE_DEPRECATION_WARNINGS
562 #endif
563
564 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
565 s->avctx->time_base.den > (1 << 16) - 1) {
566 av_log(avctx, AV_LOG_ERROR,
567 "timebase %d/%d not supported by MPEG 4 standard, "
568 "the maximum admitted value for the timebase denominator "
569 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
570 (1 << 16) - 1);
571 return -1;
572 }
573 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
574
575 switch (avctx->codec->id) {
576 case AV_CODEC_ID_MPEG1VIDEO:
577 s->out_format = FMT_MPEG1;
578 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
579 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
580 break;
581 case AV_CODEC_ID_MPEG2VIDEO:
582 s->out_format = FMT_MPEG1;
583 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
584 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
585 s->rtp_mode = 1;
586 break;
587 case AV_CODEC_ID_MJPEG:
588 s->out_format = FMT_MJPEG;
589 s->intra_only = 1; /* force intra only for jpeg */
590 if (!CONFIG_MJPEG_ENCODER ||
591 ff_mjpeg_encode_init(s) < 0)
592 return -1;
593 avctx->delay = 0;
594 s->low_delay = 1;
595 break;
596 case AV_CODEC_ID_H261:
597 if (!CONFIG_H261_ENCODER)
598 return -1;
599 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
600 av_log(avctx, AV_LOG_ERROR,
601 "The specified picture size of %dx%d is not valid for the "
602 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
603 s->width, s->height);
604 return -1;
605 }
606 s->out_format = FMT_H261;
607 avctx->delay = 0;
608 s->low_delay = 1;
609 s->rtp_mode = 0; /* Sliced encoding not supported */
610 break;
611 case AV_CODEC_ID_H263:
612 if (!CONFIG_H263_ENCODER)
613 return -1;
614 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
615 s->width, s->height) == 8) {
616 av_log(avctx, AV_LOG_INFO,
617 "The specified picture size of %dx%d is not valid for "
618 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
619 "352x288, 704x576, and 1408x1152."
620 "Try H.263+.\n", s->width, s->height);
621 return -1;
622 }
623 s->out_format = FMT_H263;
624 avctx->delay = 0;
625 s->low_delay = 1;
626 break;
627 case AV_CODEC_ID_H263P:
628 s->out_format = FMT_H263;
629 s->h263_plus = 1;
630 /* Fx */
631 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
632 s->modified_quant = s->h263_aic;
633 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
634 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
635
636 /* /Fx */
637 /* These are just to be sure */
638 avctx->delay = 0;
639 s->low_delay = 1;
640 break;
641 case AV_CODEC_ID_FLV1:
642 s->out_format = FMT_H263;
643 s->h263_flv = 2; /* format = 1; 11-bit codes */
644 s->unrestricted_mv = 1;
645 s->rtp_mode = 0; /* don't allow GOB */
646 avctx->delay = 0;
647 s->low_delay = 1;
648 break;
649 case AV_CODEC_ID_RV10:
650 s->out_format = FMT_H263;
651 avctx->delay = 0;
652 s->low_delay = 1;
653 break;
654 case AV_CODEC_ID_RV20:
655 s->out_format = FMT_H263;
656 avctx->delay = 0;
657 s->low_delay = 1;
658 s->modified_quant = 1;
659 s->h263_aic = 1;
660 s->h263_plus = 1;
661 s->loop_filter = 1;
662 s->unrestricted_mv = 0;
663 break;
664 case AV_CODEC_ID_MPEG4:
665 s->out_format = FMT_H263;
666 s->h263_pred = 1;
667 s->unrestricted_mv = 1;
668 s->low_delay = s->max_b_frames ? 0 : 1;
669 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
670 break;
671 case AV_CODEC_ID_MSMPEG4V2:
672 s->out_format = FMT_H263;
673 s->h263_pred = 1;
674 s->unrestricted_mv = 1;
675 s->msmpeg4_version = 2;
676 avctx->delay = 0;
677 s->low_delay = 1;
678 break;
679 case AV_CODEC_ID_MSMPEG4V3:
680 s->out_format = FMT_H263;
681 s->h263_pred = 1;
682 s->unrestricted_mv = 1;
683 s->msmpeg4_version = 3;
684 s->flipflop_rounding = 1;
685 avctx->delay = 0;
686 s->low_delay = 1;
687 break;
688 case AV_CODEC_ID_WMV1:
689 s->out_format = FMT_H263;
690 s->h263_pred = 1;
691 s->unrestricted_mv = 1;
692 s->msmpeg4_version = 4;
693 s->flipflop_rounding = 1;
694 avctx->delay = 0;
695 s->low_delay = 1;
696 break;
697 case AV_CODEC_ID_WMV2:
698 s->out_format = FMT_H263;
699 s->h263_pred = 1;
700 s->unrestricted_mv = 1;
701 s->msmpeg4_version = 5;
702 s->flipflop_rounding = 1;
703 avctx->delay = 0;
704 s->low_delay = 1;
705 break;
706 default:
707 return -1;
708 }
709
710 avctx->has_b_frames = !s->low_delay;
711
712 s->encoding = 1;
713
714 s->progressive_frame =
715 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
716 AV_CODEC_FLAG_INTERLACED_ME) ||
717 s->alternate_scan);
718
719 /* init */
720 ff_mpv_idct_init(s);
721 if (ff_mpv_common_init(s) < 0)
722 return -1;
723
724 if (ARCH_X86)
725 ff_mpv_encode_init_x86(s);
726
727 ff_fdctdsp_init(&s->fdsp, avctx);
728 ff_me_cmp_init(&s->mecc, avctx);
729 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
730 ff_pixblockdsp_init(&s->pdsp, avctx);
731 ff_qpeldsp_init(&s->qdsp);
732
733 if (s->msmpeg4_version) {
734 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
735 2 * 2 * (MAX_LEVEL + 1) *
736 (MAX_RUN + 1) * 2 * sizeof(int), fail);
737 }
738 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
739
740 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
741 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
742 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
743 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
744 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
745 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
746 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
747 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
748
749 if (s->avctx->noise_reduction) {
750 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
751 2 * 64 * sizeof(uint16_t), fail);
752 }
753
754 if (CONFIG_H263_ENCODER)
755 ff_h263dsp_init(&s->h263dsp);
756 if (!s->dct_quantize)
757 s->dct_quantize = ff_dct_quantize_c;
758 if (!s->denoise_dct)
759 s->denoise_dct = denoise_dct_c;
760 s->fast_dct_quantize = s->dct_quantize;
761 if (avctx->trellis)
762 s->dct_quantize = dct_quantize_trellis_c;
763
764 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
765 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
766
767 s->quant_precision = 5;
768
769 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
770 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->avctx->frame_skip_cmp);
771
772 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
773 ff_h261_encode_init(s);
774 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
775 ff_h263_encode_init(s);
776 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
777 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
778 return ret;
779 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
780 && s->out_format == FMT_MPEG1)
781 ff_mpeg1_encode_init(s);
782
783 /* init q matrix */
784 for (i = 0; i < 64; i++) {
785 int j = s->idsp.idct_permutation[i];
786 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
787 s->mpeg_quant) {
788 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
789 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
790 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
791 s->intra_matrix[j] =
792 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
793 } else {
794 /* mpeg1/2 */
795 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
796 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
797 }
798 if (s->avctx->intra_matrix)
799 s->intra_matrix[j] = s->avctx->intra_matrix[i];
800 if (s->avctx->inter_matrix)
801 s->inter_matrix[j] = s->avctx->inter_matrix[i];
802 }
803
804 /* precompute matrix */
805 /* for mjpeg, we do include qscale in the matrix */
806 if (s->out_format != FMT_MJPEG) {
807 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
808 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
809 31, 1);
810 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
811 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
812 31, 0);
813 }
814
815 if (ff_rate_control_init(s) < 0)
816 return -1;
817
818 #if FF_API_ERROR_RATE
819 FF_DISABLE_DEPRECATION_WARNINGS
820 if (avctx->error_rate)
821 s->error_rate = avctx->error_rate;
822 FF_ENABLE_DEPRECATION_WARNINGS;
823 #endif
824
825 #if FF_API_NORMALIZE_AQP
826 FF_DISABLE_DEPRECATION_WARNINGS
827 if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
828 s->mpv_flags |= FF_MPV_FLAG_NAQ;
829 FF_ENABLE_DEPRECATION_WARNINGS;
830 #endif
831
832 #if FF_API_MV0
833 FF_DISABLE_DEPRECATION_WARNINGS
834 if (avctx->flags & CODEC_FLAG_MV0)
835 s->mpv_flags |= FF_MPV_FLAG_MV0;
836 FF_ENABLE_DEPRECATION_WARNINGS
837 #endif
838
839 #if FF_API_MPV_OPT
840 FF_DISABLE_DEPRECATION_WARNINGS
841 if (avctx->rc_qsquish != 0.0)
842 s->rc_qsquish = avctx->rc_qsquish;
843 if (avctx->rc_qmod_amp != 0.0)
844 s->rc_qmod_amp = avctx->rc_qmod_amp;
845 if (avctx->rc_qmod_freq)
846 s->rc_qmod_freq = avctx->rc_qmod_freq;
847 if (avctx->rc_buffer_aggressivity != 1.0)
848 s->rc_buffer_aggressivity = avctx->rc_buffer_aggressivity;
849 if (avctx->rc_initial_cplx != 0.0)
850 s->rc_initial_cplx = avctx->rc_initial_cplx;
851 if (avctx->lmin)
852 s->lmin = avctx->lmin;
853 if (avctx->lmax)
854 s->lmax = avctx->lmax;
855
856 if (avctx->rc_eq) {
857 av_freep(&s->rc_eq);
858 s->rc_eq = av_strdup(avctx->rc_eq);
859 if (!s->rc_eq)
860 return AVERROR(ENOMEM);
861 }
862 FF_ENABLE_DEPRECATION_WARNINGS
863 #endif
864
865 if (avctx->b_frame_strategy == 2) {
866 for (i = 0; i < s->max_b_frames + 2; i++) {
867 s->tmp_frames[i] = av_frame_alloc();
868 if (!s->tmp_frames[i])
869 return AVERROR(ENOMEM);
870
871 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
872 s->tmp_frames[i]->width = s->width >> avctx->brd_scale;
873 s->tmp_frames[i]->height = s->height >> avctx->brd_scale;
874
875 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
876 if (ret < 0)
877 return ret;
878 }
879 }
880
881 return 0;
882 fail:
883 ff_mpv_encode_end(avctx);
884 return AVERROR_UNKNOWN;
885 }
886
887 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
888 {
889 MpegEncContext *s = avctx->priv_data;
890 int i;
891
892 ff_rate_control_uninit(s);
893
894 ff_mpv_common_end(s);
895 if (CONFIG_MJPEG_ENCODER &&
896 s->out_format == FMT_MJPEG)
897 ff_mjpeg_encode_close(s);
898
899 av_freep(&avctx->extradata);
900
901 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
902 av_frame_free(&s->tmp_frames[i]);
903
904 ff_free_picture_tables(&s->new_picture);
905 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
906
907 av_freep(&s->avctx->stats_out);
908 av_freep(&s->ac_stats);
909
910 av_freep(&s->q_intra_matrix);
911 av_freep(&s->q_inter_matrix);
912 av_freep(&s->q_intra_matrix16);
913 av_freep(&s->q_inter_matrix16);
914 av_freep(&s->input_picture);
915 av_freep(&s->reordered_input_picture);
916 av_freep(&s->dct_offset);
917
918 return 0;
919 }
920
921 static int get_sae(uint8_t *src, int ref, int stride)
922 {
923 int x,y;
924 int acc = 0;
925
926 for (y = 0; y < 16; y++) {
927 for (x = 0; x < 16; x++) {
928 acc += FFABS(src[x + y * stride] - ref);
929 }
930 }
931
932 return acc;
933 }
934
935 static int get_intra_count(MpegEncContext *s, uint8_t *src,
936 uint8_t *ref, int stride)
937 {
938 int x, y, w, h;
939 int acc = 0;
940
941 w = s->width & ~15;
942 h = s->height & ~15;
943
944 for (y = 0; y < h; y += 16) {
945 for (x = 0; x < w; x += 16) {
946 int offset = x + y * stride;
947 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
948 stride, 16);
949 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
950 int sae = get_sae(src + offset, mean, stride);
951
952 acc += sae + 500 < sad;
953 }
954 }
955 return acc;
956 }
957
958 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
959 {
960 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
961 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
962 s->mb_stride, s->mb_height, s->b8_stride,
963 &s->linesize, &s->uvlinesize);
964 }
965
966 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
967 {
968 Picture *pic = NULL;
969 int64_t pts;
970 int i, display_picture_number = 0, ret;
971 int encoding_delay = s->max_b_frames ? s->max_b_frames
972 : (s->low_delay ? 0 : 1);
973 int flush_offset = 1;
974 int direct = 1;
975
976 if (pic_arg) {
977 pts = pic_arg->pts;
978 display_picture_number = s->input_picture_number++;
979
980 if (pts != AV_NOPTS_VALUE) {
981 if (s->user_specified_pts != AV_NOPTS_VALUE) {
982 int64_t time = pts;
983 int64_t last = s->user_specified_pts;
984
985 if (time <= last) {
986 av_log(s->avctx, AV_LOG_ERROR,
987 "Error, Invalid timestamp=%"PRId64", "
988 "last=%"PRId64"\n", pts, s->user_specified_pts);
989 return -1;
990 }
991
992 if (!s->low_delay && display_picture_number == 1)
993 s->dts_delta = time - last;
994 }
995 s->user_specified_pts = pts;
996 } else {
997 if (s->user_specified_pts != AV_NOPTS_VALUE) {
998 s->user_specified_pts =
999 pts = s->user_specified_pts + 1;
1000 av_log(s->avctx, AV_LOG_INFO,
1001 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1002 pts);
1003 } else {
1004 pts = display_picture_number;
1005 }
1006 }
1007
1008 if (!pic_arg->buf[0] ||
1009 pic_arg->linesize[0] != s->linesize ||
1010 pic_arg->linesize[1] != s->uvlinesize ||
1011 pic_arg->linesize[2] != s->uvlinesize)
1012 direct = 0;
1013 if ((s->width & 15) || (s->height & 15))
1014 direct = 0;
1015
1016 ff_dlog(s->avctx, "%d %d %td %td\n", pic_arg->linesize[0],
1017 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1018
1019 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1020 if (i < 0)
1021 return i;
1022
1023 pic = &s->picture[i];
1024 pic->reference = 3;
1025
1026 if (direct) {
1027 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1028 return ret;
1029 }
1030 ret = alloc_picture(s, pic, direct);
1031 if (ret < 0)
1032 return ret;
1033
1034 if (!direct) {
1035 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1036 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1037 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1038 // empty
1039 } else {
1040 int h_chroma_shift, v_chroma_shift;
1041 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1042 &h_chroma_shift,
1043 &v_chroma_shift);
1044
1045 for (i = 0; i < 3; i++) {
1046 int src_stride = pic_arg->linesize[i];
1047 int dst_stride = i ? s->uvlinesize : s->linesize;
1048 int h_shift = i ? h_chroma_shift : 0;
1049 int v_shift = i ? v_chroma_shift : 0;
1050 int w = s->width >> h_shift;
1051 int h = s->height >> v_shift;
1052 uint8_t *src = pic_arg->data[i];
1053 uint8_t *dst = pic->f->data[i];
1054
1055 if (!s->avctx->rc_buffer_size)
1056 dst += INPLACE_OFFSET;
1057
1058 if (src_stride == dst_stride)
1059 memcpy(dst, src, src_stride * h);
1060 else {
1061 int h2 = h;
1062 uint8_t *dst2 = dst;
1063 while (h2--) {
1064 memcpy(dst2, src, w);
1065 dst2 += dst_stride;
1066 src += src_stride;
1067 }
1068 }
1069 if ((s->width & 15) || (s->height & 15)) {
1070 s->mpvencdsp.draw_edges(dst, dst_stride,
1071 w, h,
1072 16 >> h_shift,
1073 16 >> v_shift,
1074 EDGE_BOTTOM);
1075 }
1076 }
1077 }
1078 }
1079 ret = av_frame_copy_props(pic->f, pic_arg);
1080 if (ret < 0)
1081 return ret;
1082
1083 pic->f->display_picture_number = display_picture_number;
1084 pic->f->pts = pts; // we set this here to avoid modifiying pic_arg
1085 } else {
1086 /* Flushing: When we have not received enough input frames,
1087 * ensure s->input_picture[0] contains the first picture */
1088 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1089 if (s->input_picture[flush_offset])
1090 break;
1091
1092 if (flush_offset <= 1)
1093 flush_offset = 1;
1094 else
1095 encoding_delay = encoding_delay - flush_offset + 1;
1096 }
1097
1098 /* shift buffer entries */
1099 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1100 s->input_picture[i - flush_offset] = s->input_picture[i];
1101
1102 s->input_picture[encoding_delay] = (Picture*) pic;
1103
1104 return 0;
1105 }
1106
1107 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1108 {
1109 int x, y, plane;
1110 int score = 0;
1111 int64_t score64 = 0;
1112
1113 for (plane = 0; plane < 3; plane++) {
1114 const int stride = p->f->linesize[plane];
1115 const int bw = plane ? 1 : 2;
1116 for (y = 0; y < s->mb_height * bw; y++) {
1117 for (x = 0; x < s->mb_width * bw; x++) {
1118 int off = p->shared ? 0 : 16;
1119 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1120 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1121 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1122
1123 switch (s->avctx->frame_skip_exp) {
1124 case 0: score = FFMAX(score, v); break;
1125 case 1: score += FFABS(v); break;
1126 case 2: score += v * v; break;
1127 case 3: score64 += FFABS(v * v * (int64_t)v); break;
1128 case 4: score64 += v * v * (int64_t)(v * v); break;
1129 }
1130 }
1131 }
1132 }
1133
1134 if (score)
1135 score64 = score;
1136
1137 if (score64 < s->avctx->frame_skip_threshold)
1138 return 1;
1139 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1140 return 1;
1141 return 0;
1142 }
1143
1144 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1145 {
1146 AVPacket pkt = { 0 };
1147 int ret, got_output;
1148
1149 av_init_packet(&pkt);
1150 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1151 if (ret < 0)
1152 return ret;
1153
1154 ret = pkt.size;
1155 av_packet_unref(&pkt);
1156 return ret;
1157 }
1158
1159 static int estimate_best_b_count(MpegEncContext *s)
1160 {
1161 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1162 AVCodecContext *c = avcodec_alloc_context3(NULL);
1163 const int scale = s->avctx->brd_scale;
1164 int i, j, out_size, p_lambda, b_lambda, lambda2;
1165 int64_t best_rd = INT64_MAX;
1166 int best_b_count = -1;
1167
1168 if (!c)
1169 return AVERROR(ENOMEM);
1170 assert(scale >= 0 && scale <= 3);
1171
1172 //emms_c();
1173 //s->next_picture_ptr->quality;
1174 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1175 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1176 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1177 if (!b_lambda) // FIXME we should do this somewhere else
1178 b_lambda = p_lambda;
1179 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1180 FF_LAMBDA_SHIFT;
1181
1182 c->width = s->width >> scale;
1183 c->height = s->height >> scale;
1184 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1185 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1186 c->mb_decision = s->avctx->mb_decision;
1187 c->me_cmp = s->avctx->me_cmp;
1188 c->mb_cmp = s->avctx->mb_cmp;
1189 c->me_sub_cmp = s->avctx->me_sub_cmp;
1190 c->pix_fmt = AV_PIX_FMT_YUV420P;
1191 c->time_base = s->avctx->time_base;
1192 c->max_b_frames = s->max_b_frames;
1193
1194 if (avcodec_open2(c, codec, NULL) < 0)
1195 return -1;
1196
1197 for (i = 0; i < s->max_b_frames + 2; i++) {
1198 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1199 s->next_picture_ptr;
1200
1201 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1202 pre_input = *pre_input_ptr;
1203
1204 if (!pre_input.shared && i) {
1205 pre_input.f->data[0] += INPLACE_OFFSET;
1206 pre_input.f->data[1] += INPLACE_OFFSET;
1207 pre_input.f->data[2] += INPLACE_OFFSET;
1208 }
1209
1210 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1211 s->tmp_frames[i]->linesize[0],
1212 pre_input.f->data[0],
1213 pre_input.f->linesize[0],
1214 c->width, c->height);
1215 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1216 s->tmp_frames[i]->linesize[1],
1217 pre_input.f->data[1],
1218 pre_input.f->linesize[1],
1219 c->width >> 1, c->height >> 1);
1220 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1221 s->tmp_frames[i]->linesize[2],
1222 pre_input.f->data[2],
1223 pre_input.f->linesize[2],
1224 c->width >> 1, c->height >> 1);
1225 }
1226 }
1227
1228 for (j = 0; j < s->max_b_frames + 1; j++) {
1229 int64_t rd = 0;
1230
1231 if (!s->input_picture[j])
1232 break;
1233
1234 c->error[0] = c->error[1] = c->error[2] = 0;
1235
1236 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1237 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1238
1239 out_size = encode_frame(c, s->tmp_frames[0]);
1240
1241 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1242
1243 for (i = 0; i < s->max_b_frames + 1; i++) {
1244 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1245
1246 s->tmp_frames[i + 1]->pict_type = is_p ?
1247 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1248 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1249
1250 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1251
1252 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1253 }
1254
1255 /* get the delayed frames */
1256 while (out_size) {
1257 out_size = encode_frame(c, NULL);
1258 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1259 }
1260
1261 rd += c->error[0] + c->error[1] + c->error[2];
1262
1263 if (rd < best_rd) {
1264 best_rd = rd;
1265 best_b_count = j;
1266 }
1267 }
1268
1269 avcodec_close(c);
1270 av_freep(&c);
1271
1272 return best_b_count;
1273 }
1274
1275 static int select_input_picture(MpegEncContext *s)
1276 {
1277 int i, ret;
1278
1279 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1280 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1281 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1282
1283 /* set next picture type & ordering */
1284 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1285 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1286 !s->next_picture_ptr || s->intra_only) {
1287 s->reordered_input_picture[0] = s->input_picture[0];
1288 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1289 s->reordered_input_picture[0]->f->coded_picture_number =
1290 s->coded_picture_number++;
1291 } else {
1292 int b_frames;
1293
1294 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1295 if (s->picture_in_gop_number < s->gop_size &&
1296 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1297 // FIXME check that te gop check above is +-1 correct
1298 av_frame_unref(s->input_picture[0]->f);
1299
1300 emms_c();
1301 ff_vbv_update(s, 0);
1302
1303 goto no_output_pic;
1304 }
1305 }
1306
1307 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1308 for (i = 0; i < s->max_b_frames + 1; i++) {
1309 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1310
1311 if (pict_num >= s->rc_context.num_entries)
1312 break;
1313 if (!s->input_picture[i]) {
1314 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1315 break;
1316 }
1317
1318 s->input_picture[i]->f->pict_type =
1319 s->rc_context.entry[pict_num].new_pict_type;
1320 }
1321 }
1322
1323 if (s->avctx->b_frame_strategy == 0) {
1324 b_frames = s->max_b_frames;
1325 while (b_frames && !s->input_picture[b_frames])
1326 b_frames--;
1327 } else if (s->avctx->b_frame_strategy == 1) {
1328 for (i = 1; i < s->max_b_frames + 1; i++) {
1329 if (s->input_picture[i] &&
1330 s->input_picture[i]->b_frame_score == 0) {
1331 s->input_picture[i]->b_frame_score =
1332 get_intra_count(s,
1333 s->input_picture[i ]->f->data[0],
1334 s->input_picture[i - 1]->f->data[0],
1335 s->linesize) + 1;
1336 }
1337 }
1338 for (i = 0; i < s->max_b_frames + 1; i++) {
1339 if (!s->input_picture[i] ||
1340 s->input_picture[i]->b_frame_score - 1 >
1341 s->mb_num / s->avctx->b_sensitivity)
1342 break;
1343 }
1344
1345 b_frames = FFMAX(0, i - 1);
1346
1347 /* reset scores */
1348 for (i = 0; i < b_frames + 1; i++) {
1349 s->input_picture[i]->b_frame_score = 0;
1350 }
1351 } else if (s->avctx->b_frame_strategy == 2) {
1352 b_frames = estimate_best_b_count(s);
1353 } else {
1354 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1355 b_frames = 0;
1356 }
1357
1358 emms_c();
1359
1360 for (i = b_frames - 1; i >= 0; i--) {
1361 int type = s->input_picture[i]->f->pict_type;
1362 if (type && type != AV_PICTURE_TYPE_B)
1363 b_frames = i;
1364 }
1365 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1366 b_frames == s->max_b_frames) {
1367 av_log(s->avctx, AV_LOG_ERROR,
1368 "warning, too many b frames in a row\n");
1369 }
1370
1371 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1372 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1373 s->gop_size > s->picture_in_gop_number) {
1374 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1375 } else {
1376 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1377 b_frames = 0;
1378 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1379 }
1380 }
1381
1382 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1383 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1384 b_frames--;
1385
1386 s->reordered_input_picture[0] = s->input_picture[b_frames];
1387 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1388 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1389 s->reordered_input_picture[0]->f->coded_picture_number =
1390 s->coded_picture_number++;
1391 for (i = 0; i < b_frames; i++) {
1392 s->reordered_input_picture[i + 1] = s->input_picture[i];
1393 s->reordered_input_picture[i + 1]->f->pict_type =
1394 AV_PICTURE_TYPE_B;
1395 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1396 s->coded_picture_number++;
1397 }
1398 }
1399 }
1400 no_output_pic:
1401 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1402
1403 if (s->reordered_input_picture[0]) {
1404 s->reordered_input_picture[0]->reference =
1405 s->reordered_input_picture[0]->f->pict_type !=
1406 AV_PICTURE_TYPE_B ? 3 : 0;
1407
1408 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1409 return ret;
1410
1411 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1412 // input is a shared pix, so we can't modifiy it -> alloc a new
1413 // one & ensure that the shared one is reuseable
1414
1415 Picture *pic;
1416 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1417 if (i < 0)
1418 return i;
1419 pic = &s->picture[i];
1420
1421 pic->reference = s->reordered_input_picture[0]->reference;
1422 if (alloc_picture(s, pic, 0) < 0) {
1423 return -1;
1424 }
1425
1426 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1427 if (ret < 0)
1428 return ret;
1429
1430 /* mark us unused / free shared pic */
1431 av_frame_unref(s->reordered_input_picture[0]->f);
1432 s->reordered_input_picture[0]->shared = 0;
1433
1434 s->current_picture_ptr = pic;
1435 } else {
1436 // input is not a shared pix -> reuse buffer for current_pix
1437 s->current_picture_ptr = s->reordered_input_picture[0];
1438 for (i = 0; i < 4; i++) {
1439 s->new_picture.f->data[i] += INPLACE_OFFSET;
1440 }
1441 }
1442 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1443 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1444 s->current_picture_ptr)) < 0)
1445 return ret;
1446
1447 s->picture_number = s->new_picture.f->display_picture_number;
1448 }
1449 return 0;
1450 }
1451
1452 static void frame_end(MpegEncContext *s)
1453 {
1454 int i;
1455
1456 if (s->unrestricted_mv &&
1457 s->current_picture.reference &&
1458 !s->intra_only) {
1459 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1460 int hshift = desc->log2_chroma_w;
1461 int vshift = desc->log2_chroma_h;
1462 s->mpvencdsp.draw_edges(s->current_picture.f->data[0], s->linesize,
1463 s->h_edge_pos, s->v_edge_pos,
1464 EDGE_WIDTH, EDGE_WIDTH,
1465 EDGE_TOP | EDGE_BOTTOM);
1466 s->mpvencdsp.draw_edges(s->current_picture.f->data[1], s->uvlinesize,
1467 s->h_edge_pos >> hshift,
1468 s->v_edge_pos >> vshift,
1469 EDGE_WIDTH >> hshift,
1470 EDGE_WIDTH >> vshift,
1471 EDGE_TOP | EDGE_BOTTOM);
1472 s->mpvencdsp.draw_edges(s->current_picture.f->data[2], s->uvlinesize,
1473 s->h_edge_pos >> hshift,
1474 s->v_edge_pos >> vshift,
1475 EDGE_WIDTH >> hshift,
1476 EDGE_WIDTH >> vshift,
1477 EDGE_TOP | EDGE_BOTTOM);
1478 }
1479
1480 emms_c();
1481
1482 s->last_pict_type = s->pict_type;
1483 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1484 if (s->pict_type!= AV_PICTURE_TYPE_B)
1485 s->last_non_b_pict_type = s->pict_type;
1486
1487 if (s->encoding) {
1488 /* release non-reference frames */
1489 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1490 if (!s->picture[i].reference)
1491 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1492 }
1493 }
1494
1495 #if FF_API_CODED_FRAME
1496 FF_DISABLE_DEPRECATION_WARNINGS
1497 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1498 FF_ENABLE_DEPRECATION_WARNINGS
1499 #endif
1500 #if FF_API_ERROR_FRAME
1501 FF_DISABLE_DEPRECATION_WARNINGS
1502 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1503 sizeof(s->current_picture.encoding_error));
1504 FF_ENABLE_DEPRECATION_WARNINGS
1505 #endif
1506 }
1507
1508 static void update_noise_reduction(MpegEncContext *s)
1509 {
1510 int intra, i;
1511
1512 for (intra = 0; intra < 2; intra++) {
1513 if (s->dct_count[intra] > (1 << 16)) {
1514 for (i = 0; i < 64; i++) {
1515 s->dct_error_sum[intra][i] >>= 1;
1516 }
1517 s->dct_count[intra] >>= 1;
1518 }
1519
1520 for (i = 0; i < 64; i++) {
1521 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1522 s->dct_count[intra] +
1523 s->dct_error_sum[intra][i] / 2) /
1524 (s->dct_error_sum[intra][i] + 1);
1525 }
1526 }
1527 }
1528
1529 static int frame_start(MpegEncContext *s)
1530 {
1531 int ret;
1532
1533 /* mark & release old frames */
1534 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1535 s->last_picture_ptr != s->next_picture_ptr &&
1536 s->last_picture_ptr->f->buf[0]) {
1537 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1538 }
1539
1540 s->current_picture_ptr->f->pict_type = s->pict_type;
1541 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1542
1543 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1544 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1545 s->current_picture_ptr)) < 0)
1546 return ret;
1547
1548 if (s->pict_type != AV_PICTURE_TYPE_B) {
1549 s->last_picture_ptr = s->next_picture_ptr;
1550 if (!s->droppable)
1551 s->next_picture_ptr = s->current_picture_ptr;
1552 }
1553
1554 if (s->last_picture_ptr) {
1555 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1556 if (s->last_picture_ptr->f->buf[0] &&
1557 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1558 s->last_picture_ptr)) < 0)
1559 return ret;
1560 }
1561 if (s->next_picture_ptr) {
1562 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1563 if (s->next_picture_ptr->f->buf[0] &&
1564 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1565 s->next_picture_ptr)) < 0)
1566 return ret;
1567 }
1568
1569 if (s->picture_structure!= PICT_FRAME) {
1570 int i;
1571 for (i = 0; i < 4; i++) {
1572 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1573 s->current_picture.f->data[i] +=
1574 s->current_picture.f->linesize[i];
1575 }
1576 s->current_picture.f->linesize[i] *= 2;
1577 s->last_picture.f->linesize[i] *= 2;
1578 s->next_picture.f->linesize[i] *= 2;
1579 }
1580 }
1581
1582 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1583 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1584 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1585 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1586 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1587 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1588 } else {
1589 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1590 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1591 }
1592
1593 if (s->dct_error_sum) {
1594 assert(s->avctx->noise_reduction && s->encoding);
1595 update_noise_reduction(s);
1596 }
1597
1598 return 0;
1599 }
1600
1601 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1602 const AVFrame *pic_arg, int *got_packet)
1603 {
1604 MpegEncContext *s = avctx->priv_data;
1605 int i, stuffing_count, ret;
1606 int context_count = s->slice_context_count;
1607
1608 s->picture_in_gop_number++;
1609
1610 if (load_input_picture(s, pic_arg) < 0)
1611 return -1;
1612
1613 if (select_input_picture(s) < 0) {
1614 return -1;
1615 }
1616
1617 /* output? */
1618 if (s->new_picture.f->data[0]) {
1619 uint8_t *sd;
1620 if (!pkt->data &&
1621 (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0)
1622 return ret;
1623 if (s->mb_info) {
1624 s->mb_info_ptr = av_packet_new_side_data(pkt,
1625 AV_PKT_DATA_H263_MB_INFO,
1626 s->mb_width*s->mb_height*12);
1627 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1628 }
1629
1630 for (i = 0; i < context_count; i++) {
1631 int start_y = s->thread_context[i]->start_mb_y;
1632 int end_y = s->thread_context[i]-> end_mb_y;
1633 int h = s->mb_height;
1634 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1635 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1636
1637 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1638 }
1639
1640 s->pict_type = s->new_picture.f->pict_type;
1641 //emms_c();
1642 ret = frame_start(s);
1643 if (ret < 0)
1644 return ret;
1645 vbv_retry:
1646 if (encode_picture(s, s->picture_number) < 0)
1647 return -1;
1648
1649 avctx->header_bits = s->header_bits;
1650 avctx->mv_bits = s->mv_bits;
1651 avctx->misc_bits = s->misc_bits;
1652 avctx->i_tex_bits = s->i_tex_bits;
1653 avctx->p_tex_bits = s->p_tex_bits;
1654 avctx->i_count = s->i_count;
1655 // FIXME f/b_count in avctx
1656 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1657 avctx->skip_count = s->skip_count;
1658
1659 frame_end(s);
1660
1661 sd = av_packet_new_side_data(pkt, AV_PKT_DATA_QUALITY_FACTOR,
1662 sizeof(int));
1663 if (!sd)
1664 return AVERROR(ENOMEM);
1665 *(int *)sd = s->current_picture.f->quality;
1666
1667 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1668 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1669
1670 if (avctx->rc_buffer_size) {
1671 RateControlContext *rcc = &s->rc_context;
1672 int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1673
1674 if (put_bits_count(&s->pb) > max_size &&
1675 s->lambda < s->lmax) {
1676 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1677 (s->qscale + 1) / s->qscale);
1678 if (s->adaptive_quant) {
1679 int i;
1680 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1681 s->lambda_table[i] =
1682 FFMAX(s->lambda_table[i] + 1,
1683 s->lambda_table[i] * (s->qscale + 1) /
1684 s->qscale);
1685 }
1686 s->mb_skipped = 0; // done in frame_start()
1687 // done in encode_picture() so we must undo it
1688 if (s->pict_type == AV_PICTURE_TYPE_P) {
1689 if (s->flipflop_rounding ||
1690 s->codec_id == AV_CODEC_ID_H263P ||
1691 s->codec_id == AV_CODEC_ID_MPEG4)
1692 s->no_rounding ^= 1;
1693 }
1694 if (s->pict_type != AV_PICTURE_TYPE_B) {
1695 s->time_base = s->last_time_base;
1696 s->last_non_b_time = s->time - s->pp_time;
1697 }
1698 for (i = 0; i < context_count; i++) {
1699 PutBitContext *pb = &s->thread_context[i]->pb;
1700 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1701 }
1702 goto vbv_retry;
1703 }
1704
1705 assert(s->avctx->rc_max_rate);
1706 }
1707
1708 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1709 ff_write_pass1_stats(s);
1710
1711 for (i = 0; i < 4; i++) {
1712 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1713 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1714 }
1715
1716 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1717 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1718 avctx->i_tex_bits + avctx->p_tex_bits ==
1719 put_bits_count(&s->pb));
1720 flush_put_bits(&s->pb);
1721 s->frame_bits = put_bits_count(&s->pb);
1722
1723 stuffing_count = ff_vbv_update(s, s->frame_bits);
1724 if (stuffing_count) {
1725 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1726 stuffing_count + 50) {
1727 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1728 return -1;
1729 }
1730
1731 switch (s->codec_id) {
1732 case AV_CODEC_ID_MPEG1VIDEO:
1733 case AV_CODEC_ID_MPEG2VIDEO:
1734 while (stuffing_count--) {
1735 put_bits(&s->pb, 8, 0);
1736 }
1737 break;
1738 case AV_CODEC_ID_MPEG4:
1739 put_bits(&s->pb, 16, 0);
1740 put_bits(&s->pb, 16, 0x1C3);
1741 stuffing_count -= 4;
1742 while (stuffing_count--) {
1743 put_bits(&s->pb, 8, 0xFF);
1744 }
1745 break;
1746 default:
1747 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1748 }
1749 flush_put_bits(&s->pb);
1750 s->frame_bits = put_bits_count(&s->pb);
1751 }
1752
1753 /* update mpeg1/2 vbv_delay for CBR */
1754 if (s->avctx->rc_max_rate &&
1755 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1756 s->out_format == FMT_MPEG1 &&
1757 90000LL * (avctx->rc_buffer_size - 1) <=
1758 s->avctx->rc_max_rate * 0xFFFFLL) {
1759 int vbv_delay, min_delay;
1760 double inbits = s->avctx->rc_max_rate *
1761 av_q2d(s->avctx->time_base);
1762 int minbits = s->frame_bits - 8 *
1763 (s->vbv_delay_ptr - s->pb.buf - 1);
1764 double bits = s->rc_context.buffer_index + minbits - inbits;
1765
1766 if (bits < 0)
1767 av_log(s->avctx, AV_LOG_ERROR,
1768 "Internal error, negative bits\n");
1769
1770 assert(s->repeat_first_field == 0);
1771
1772 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1773 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1774 s->avctx->rc_max_rate;
1775
1776 vbv_delay = FFMAX(vbv_delay, min_delay);
1777
1778 assert(vbv_delay < 0xFFFF);
1779
1780 s->vbv_delay_ptr[0] &= 0xF8;
1781 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1782 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1783 s->vbv_delay_ptr[2] &= 0x07;
1784 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1785 avctx->vbv_delay = vbv_delay * 300;
1786 }
1787 s->total_bits += s->frame_bits;
1788 avctx->frame_bits = s->frame_bits;
1789
1790 pkt->pts = s->current_picture.f->pts;
1791 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
1792 if (!s->current_picture.f->coded_picture_number)
1793 pkt->dts = pkt->pts - s->dts_delta;
1794 else
1795 pkt->dts = s->reordered_pts;
1796 s->reordered_pts = pkt->pts;
1797 } else
1798 pkt->dts = pkt->pts;
1799 if (s->current_picture.f->key_frame)
1800 pkt->flags |= AV_PKT_FLAG_KEY;
1801 if (s->mb_info)
1802 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1803 } else {
1804 s->frame_bits = 0;
1805 }
1806 assert((s->frame_bits & 7) == 0);
1807
1808 pkt->size = s->frame_bits / 8;
1809 *got_packet = !!pkt->size;
1810 return 0;
1811 }
1812
1813 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1814 int n, int threshold)
1815 {
1816 static const char tab[64] = {
1817 3, 2, 2, 1, 1, 1, 1, 1,
1818 1, 1, 1, 1, 1, 1, 1, 1,
1819 1, 1, 1, 1, 1, 1, 1, 1,
1820 0, 0, 0, 0, 0, 0, 0, 0,
1821 0, 0, 0, 0, 0, 0, 0, 0,
1822 0, 0, 0, 0, 0, 0, 0, 0,
1823 0, 0, 0, 0, 0, 0, 0, 0,
1824 0, 0, 0, 0, 0, 0, 0, 0
1825 };
1826 int score = 0;
1827 int run = 0;
1828 int i;
1829 int16_t *block = s->block[n];
1830 const int last_index = s->block_last_index[n];
1831 int skip_dc;
1832
1833 if (threshold < 0) {
1834 skip_dc = 0;
1835 threshold = -threshold;
1836 } else
1837 skip_dc = 1;
1838
1839 /* Are all we could set to zero already zero? */
1840 if (last_index <= skip_dc - 1)
1841 return;
1842
1843 for (i = 0; i <= last_index; i++) {
1844 const int j = s->intra_scantable.permutated[i];
1845 const int level = FFABS(block[j]);
1846 if (level == 1) {
1847 if (skip_dc && i == 0)
1848 continue;
1849 score += tab[run];
1850 run = 0;
1851 } else if (level > 1) {
1852 return;
1853 } else {
1854 run++;
1855 }
1856 }
1857 if (score >= threshold)
1858 return;
1859 for (i = skip_dc; i <= last_index; i++) {
1860 const int j = s->intra_scantable.permutated[i];
1861 block[j] = 0;
1862 }
1863 if (block[0])
1864 s->block_last_index[n] = 0;
1865 else
1866 s->block_last_index[n] = -1;
1867 }
1868
1869 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
1870 int last_index)
1871 {
1872 int i;
1873 const int maxlevel = s->max_qcoeff;
1874 const int minlevel = s->min_qcoeff;
1875 int overflow = 0;
1876
1877 if (s->mb_intra) {
1878 i = 1; // skip clipping of intra dc
1879 } else
1880 i = 0;
1881
1882 for (; i <= last_index; i++) {
1883 const int j = s->intra_scantable.permutated[i];
1884 int level = block[j];
1885
1886 if (level > maxlevel) {
1887 level = maxlevel;
1888 overflow++;
1889 } else if (level < minlevel) {
1890 level = minlevel;
1891 overflow++;
1892 }
1893
1894 block[j] = level;
1895 }
1896
1897 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1898 av_log(s->avctx, AV_LOG_INFO,
1899 "warning, clipping %d dct coefficients to %d..%d\n",
1900 overflow, minlevel, maxlevel);
1901 }
1902
1903 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1904 {
1905 int x, y;
1906 // FIXME optimize
1907 for (y = 0; y < 8; y++) {
1908 for (x = 0; x < 8; x++) {
1909 int x2, y2;
1910 int sum = 0;
1911 int sqr = 0;
1912 int count = 0;
1913
1914 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1915 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1916 int v = ptr[x2 + y2 * stride];
1917 sum += v;
1918 sqr += v * v;
1919 count++;
1920 }
1921 }
1922 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1923 }
1924 }
1925 }
1926
1927 static av_always_inline void encode_mb_internal(MpegEncContext *s,
1928 int motion_x, int motion_y,
1929 int mb_block_height,
1930 int mb_block_count)
1931 {
1932 int16_t weight[8][64];
1933 int16_t orig[8][64];
1934 const int mb_x = s->mb_x;
1935 const int mb_y = s->mb_y;
1936 int i;
1937 int skip_dct[8];
1938 int dct_offset = s->linesize * 8; // default for progressive frames
1939 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1940 ptrdiff_t wrap_y, wrap_c;
1941
1942 for (i = 0; i < mb_block_count; i++)
1943 skip_dct[i] = s->skipdct;
1944
1945 if (s->adaptive_quant) {
1946 const int last_qp = s->qscale;
1947 const int mb_xy = mb_x + mb_y * s->mb_stride;
1948
1949 s->lambda = s->lambda_table[mb_xy];
1950 update_qscale(s);
1951
1952 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
1953 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
1954 s->dquant = s->qscale - last_qp;
1955
1956 if (s->out_format == FMT_H263) {
1957 s->dquant = av_clip(s->dquant, -2, 2);
1958
1959 if (s->codec_id == AV_CODEC_ID_MPEG4) {
1960 if (!s->mb_intra) {
1961 if (s->pict_type == AV_PICTURE_TYPE_B) {
1962 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1963 s->dquant = 0;
1964 }
1965 if (s->mv_type == MV_TYPE_8X8)
1966 s->dquant = 0;
1967 }
1968 }
1969 }
1970 }
1971 ff_set_qscale(s, last_qp + s->dquant);
1972 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
1973 ff_set_qscale(s, s->qscale + s->dquant);
1974
1975 wrap_y = s->linesize;
1976 wrap_c = s->uvlinesize;
1977 ptr_y = s->new_picture.f->data[0] +
1978 (mb_y * 16 * wrap_y) + mb_x * 16;
1979 ptr_cb = s->new_picture.f->data[1] +
1980 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1981 ptr_cr = s->new_picture.f->data[2] +
1982 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1983
1984 if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
1985 uint8_t *ebuf = s->sc.edge_emu_buffer + 32;
1986 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
1987 wrap_y, wrap_y,
1988 16, 16, mb_x * 16, mb_y * 16,
1989 s->width, s->height);
1990 ptr_y = ebuf;
1991 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb,
1992 wrap_c, wrap_c,
1993 8, mb_block_height, mb_x * 8, mb_y * 8,
1994 s->width >> 1, s->height >> 1);
1995 ptr_cb = ebuf + 18 * wrap_y;
1996 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr,
1997 wrap_c, wrap_c,
1998 8, mb_block_height, mb_x * 8, mb_y * 8,
1999 s->width >> 1, s->height >> 1);
2000 ptr_cr = ebuf + 18 * wrap_y + 8;
2001 }
2002
2003 if (s->mb_intra) {
2004 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2005 int progressive_score, interlaced_score;
2006
2007 s->interlaced_dct = 0;
2008 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2009 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2010 NULL, wrap_y, 8) - 400;
2011
2012 if (progressive_score > 0) {
2013 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2014 NULL, wrap_y * 2, 8) +
2015 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2016 NULL, wrap_y * 2, 8);
2017 if (progressive_score > interlaced_score) {
2018 s->interlaced_dct = 1;
2019
2020 dct_offset = wrap_y;
2021 wrap_y <<= 1;
2022 if (s->chroma_format == CHROMA_422)
2023 wrap_c <<= 1;
2024 }
2025 }
2026 }
2027
2028 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2029 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2030 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2031 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2032
2033 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2034 skip_dct[4] = 1;
2035 skip_dct[5] = 1;
2036 } else {
2037 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2038 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2039 if (!s->chroma_y_shift) { /* 422 */
2040 s->pdsp.get_pixels(s->block[6],
2041 ptr_cb + (dct_offset >> 1), wrap_c);
2042 s->pdsp.get_pixels(s->block[7],
2043 ptr_cr + (dct_offset >> 1), wrap_c);
2044 }
2045 }
2046 } else {
2047 op_pixels_func (*op_pix)[4];
2048 qpel_mc_func (*op_qpix)[16];
2049 uint8_t *dest_y, *dest_cb, *dest_cr;
2050
2051 dest_y = s->dest[0];
2052 dest_cb = s->dest[1];
2053 dest_cr = s->dest[2];
2054
2055 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2056 op_pix = s->hdsp.put_pixels_tab;
2057 op_qpix = s->qdsp.put_qpel_pixels_tab;
2058 } else {
2059 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2060 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2061 }
2062
2063 if (s->mv_dir & MV_DIR_FORWARD) {
2064 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2065 s->last_picture.f->data,
2066 op_pix, op_qpix);
2067 op_pix = s->hdsp.avg_pixels_tab;
2068 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2069 }
2070 if (s->mv_dir & MV_DIR_BACKWARD) {
2071 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2072 s->next_picture.f->data,
2073 op_pix, op_qpix);
2074 }
2075
2076 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2077 int progressive_score, interlaced_score;
2078
2079 s->interlaced_dct = 0;
2080 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2081 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2082 ptr_y + wrap_y * 8,
2083 wrap_y, 8) - 400;
2084
2085 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2086 progressive_score -= 400;
2087
2088 if (progressive_score > 0) {
2089 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2090 wrap_y * 2, 8) +
2091 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2092 ptr_y + wrap_y,
2093 wrap_y * 2, 8);
2094
2095 if (progressive_score > interlaced_score) {
2096 s->interlaced_dct = 1;
2097
2098 dct_offset = wrap_y;
2099 wrap_y <<= 1;
2100 if (s->chroma_format == CHROMA_422)
2101 wrap_c <<= 1;
2102 }
2103 }
2104 }
2105
2106 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2107 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2108 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2109 dest_y + dct_offset, wrap_y);
2110 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2111 dest_y + dct_offset + 8, wrap_y);
2112
2113 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2114 skip_dct[4] = 1;
2115 skip_dct[5] = 1;
2116 } else {
2117 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2118 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2119 if (!s->chroma_y_shift) { /* 422 */
2120 s->pdsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
2121 dest_cb + (dct_offset >> 1), wrap_c);
2122 s->pdsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
2123 dest_cr + (dct_offset >> 1), wrap_c);
2124 }
2125 }
2126 /* pre quantization */
2127 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2128 2 * s->qscale * s->qscale) {
2129 // FIXME optimize
2130 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2131 skip_dct[0] = 1;
2132 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2133 skip_dct[1] = 1;
2134 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2135 wrap_y, 8) < 20 * s->qscale)
2136 skip_dct[2] = 1;
2137 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2138 wrap_y, 8) < 20 * s->qscale)
2139 skip_dct[3] = 1;
2140 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2141 skip_dct[4] = 1;
2142 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2143 skip_dct[5] = 1;
2144 if (!s->chroma_y_shift) { /* 422 */
2145 if (s->mecc.sad[1](NULL, ptr_cb + (dct_offset >> 1),
2146 dest_cb + (dct_offset >> 1),
2147 wrap_c, 8) < 20 * s->qscale)
2148 skip_dct[6] = 1;
2149 if (s->mecc.sad[1](NULL, ptr_cr + (dct_offset >> 1),
2150 dest_cr + (dct_offset >> 1),
2151 wrap_c, 8) < 20 * s->qscale)
2152 skip_dct[7] = 1;
2153 }
2154 }
2155 }
2156
2157 if (s->quantizer_noise_shaping) {
2158 if (!skip_dct[0])
2159 get_visual_weight(weight[0], ptr_y , wrap_y);
2160 if (!skip_dct[1])
2161 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2162 if (!skip_dct[2])
2163 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2164 if (!skip_dct[3])
2165 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2166 if (!skip_dct[4])
2167 get_visual_weight(weight[4], ptr_cb , wrap_c);
2168 if (!skip_dct[5])
2169 get_visual_weight(weight[5], ptr_cr , wrap_c);
2170 if (!s->chroma_y_shift) { /* 422 */
2171 if (!skip_dct[6])
2172 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
2173 wrap_c);
2174 if (!skip_dct[7])
2175 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
2176 wrap_c);
2177 }
2178 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2179 }
2180
2181 /* DCT & quantize */
2182 assert(s->out_format != FMT_MJPEG || s->qscale == 8);
2183 {
2184 for (i = 0; i < mb_block_count; i++) {
2185 if (!skip_dct[i]) {
2186 int overflow;
2187 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2188 // FIXME we could decide to change to quantizer instead of
2189 // clipping
2190 // JS: I don't think that would be a good idea it could lower
2191 // quality instead of improve it. Just INTRADC clipping
2192 // deserves changes in quantizer
2193 if (overflow)
2194 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2195 } else
2196 s->block_last_index[i] = -1;
2197 }
2198 if (s->quantizer_noise_shaping) {
2199 for (i = 0; i < mb_block_count; i++) {
2200 if (!skip_dct[i]) {
2201 s->block_last_index[i] =
2202 dct_quantize_refine(s, s->block[i], weight[i],
2203 orig[i], i, s->qscale);
2204 }
2205 }
2206 }
2207
2208 if (s->luma_elim_threshold && !s->mb_intra)
2209 for (i = 0; i < 4; i++)
2210 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2211 if (s->chroma_elim_threshold && !s->mb_intra)
2212 for (i = 4; i < mb_block_count; i++)
2213 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2214
2215 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2216 for (i = 0; i < mb_block_count; i++) {
2217 if (s->block_last_index[i] == -1)
2218 s->coded_score[i] = INT_MAX / 256;
2219 }
2220 }
2221 }
2222
2223 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2224 s->block_last_index[4] =
2225 s->block_last_index[5] = 0;
2226 s->block[4][0] =
2227 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2228 }
2229
2230 // non c quantize code returns incorrect block_last_index FIXME
2231 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2232 for (i = 0; i < mb_block_count; i++) {
2233 int j;
2234 if (s->block_last_index[i] > 0) {
2235 for (j = 63; j > 0; j--) {
2236 if (s->block[i][s->intra_scantable.permutated[j]])
2237 break;
2238 }
2239 s->block_last_index[i] = j;
2240 }
2241 }
2242 }
2243
2244 /* huffman encode */
2245 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2246 case AV_CODEC_ID_MPEG1VIDEO:
2247 case AV_CODEC_ID_MPEG2VIDEO:
2248 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2249 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2250 break;
2251 case AV_CODEC_ID_MPEG4:
2252 if (CONFIG_MPEG4_ENCODER)
2253 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2254 break;
2255 case AV_CODEC_ID_MSMPEG4V2:
2256 case AV_CODEC_ID_MSMPEG4V3:
2257 case AV_CODEC_ID_WMV1:
2258 if (CONFIG_MSMPEG4_ENCODER)
2259 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2260 break;
2261 case AV_CODEC_ID_WMV2:
2262 if (CONFIG_WMV2_ENCODER)
2263 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2264 break;
2265 case AV_CODEC_ID_H261:
2266 if (CONFIG_H261_ENCODER)
2267 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2268 break;
2269 case AV_CODEC_ID_H263:
2270 case AV_CODEC_ID_H263P:
2271 case AV_CODEC_ID_FLV1:
2272 case AV_CODEC_ID_RV10:
2273 case AV_CODEC_ID_RV20:
2274 if (CONFIG_H263_ENCODER)
2275 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2276 break;
2277 case AV_CODEC_ID_MJPEG:
2278 if (CONFIG_MJPEG_ENCODER)
2279 ff_mjpeg_encode_mb(s, s->block);
2280 break;
2281 default:
2282 assert(0);
2283 }
2284 }
2285
2286 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2287 {
2288 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 6);
2289 else encode_mb_internal(s, motion_x, motion_y, 16, 8);
2290 }
2291
2292 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2293 int i;
2294
2295 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2296
2297 /* mpeg1 */
2298 d->mb_skip_run= s->mb_skip_run;
2299 for(i=0; i<3; i++)
2300 d->last_dc[i] = s->last_dc[i];
2301
2302 /* statistics */
2303 d->mv_bits= s->mv_bits;
2304 d->i_tex_bits= s->i_tex_bits;
2305 d->p_tex_bits= s->p_tex_bits;
2306 d->i_count= s->i_count;
2307 d->f_count= s->f_count;
2308 d->b_count= s->b_count;
2309 d->skip_count= s->skip_count;
2310 d->misc_bits= s->misc_bits;
2311 d->last_bits= 0;
2312
2313 d->mb_skipped= 0;
2314 d->qscale= s->qscale;
2315 d->dquant= s->dquant;
2316
2317 d->esc3_level_length= s->esc3_level_length;
2318 }
2319
2320 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2321 int i;
2322
2323 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2324 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2325
2326 /* mpeg1 */
2327 d->mb_skip_run= s->mb_skip_run;
2328 for(i=0; i<3; i++)
2329 d->last_dc[i] = s->last_dc[i];
2330
2331 /* statistics */
2332 d->mv_bits= s->mv_bits;
2333 d->i_tex_bits= s->i_tex_bits;
2334 d->p_tex_bits= s->p_tex_bits;
2335 d->i_count= s->i_count;
2336 d->f_count= s->f_count;
2337 d->b_count= s->b_count;
2338 d->skip_count= s->skip_count;
2339 d->misc_bits= s->misc_bits;
2340
2341 d->mb_intra= s->mb_intra;
2342 d->mb_skipped= s->mb_skipped;
2343 d->mv_type= s->mv_type;
2344 d->mv_dir= s->mv_dir;
2345 d->pb= s->pb;
2346 if(s->data_partitioning){
2347 d->pb2= s->pb2;
2348 d->tex_pb= s->tex_pb;
2349 }
2350 d->block= s->block;
2351 for(i=0; i<8; i++)
2352 d->block_last_index[i]= s->block_last_index[i];
2353 d->interlaced_dct= s->interlaced_dct;
2354 d->qscale= s->qscale;
2355
2356 d->esc3_level_length= s->esc3_level_length;
2357 }
2358
2359 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2360 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2361 int *dmin, int *next_block, int motion_x, int motion_y)
2362 {
2363 int score;
2364 uint8_t *dest_backup[3];
2365
2366 copy_context_before_encode(s, backup, type);
2367
2368 s->block= s->blocks[*next_block];
2369 s->pb= pb[*next_block];
2370 if(s->data_partitioning){
2371 s->pb2 = pb2 [*next_block];
2372 s->tex_pb= tex_pb[*next_block];
2373 }
2374
2375 if(*next_block){
2376 memcpy(dest_backup, s->dest, sizeof(s->dest));
2377 s->dest[0] = s->sc.rd_scratchpad;
2378 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2379 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2380 assert(s->linesize >= 32); //FIXME
2381 }
2382
2383 encode_mb(s, motion_x, motion_y);
2384
2385 score= put_bits_count(&s->pb);
2386 if(s->data_partitioning){
2387 score+= put_bits_count(&s->pb2);
2388 score+= put_bits_count(&s->tex_pb);
2389 }
2390
2391 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2392 ff_mpv_decode_mb(s, s->block);
2393
2394 score *= s->lambda2;
2395 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2396 }
2397
2398 if(*next_block){
2399 memcpy(s->dest, dest_backup, sizeof(s->dest));
2400 }
2401
2402 if(score<*dmin){
2403 *dmin= score;
2404 *next_block^=1;
2405
2406 copy_context_after_encode(best, s, type);
2407 }
2408 }
2409
2410 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2411 uint32_t *sq = ff_square_tab + 256;
2412 int acc=0;
2413 int x,y;
2414
2415 if(w==16 && h==16)
2416 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2417 else if(w==8 && h==8)
2418 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2419
2420 for(y=0; y<h; y++){
2421 for(x=0; x<w; x++){
2422 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2423 }
2424 }
2425
2426 assert(acc>=0);
2427
2428 return acc;
2429 }
2430
2431 static int sse_mb(MpegEncContext *s){
2432 int w= 16;
2433 int h= 16;
2434
2435 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2436 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2437
2438 if(w==16 && h==16)
2439 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2440 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2441 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2442 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2443 }else{
2444 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2445 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2446 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2447 }
2448 else
2449 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2450 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2451 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2452 }
2453
2454 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2455 MpegEncContext *s= *(void**)arg;
2456
2457
2458 s->me.pre_pass=1;
2459 s->me.dia_size= s->avctx->pre_dia_size;
2460 s->first_slice_line=1;
2461 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2462 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2463 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2464 }
2465 s->first_slice_line=0;
2466 }
2467
2468 s->me.pre_pass=0;
2469
2470 return 0;
2471 }
2472
2473 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2474 MpegEncContext *s= *(void**)arg;
2475
2476 s->me.dia_size= s->avctx->dia_size;
2477 s->first_slice_line=1;
2478 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2479 s->mb_x=0; //for block init below
2480 ff_init_block_index(s);
2481 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2482 s->block_index[0]+=2;
2483 s->block_index[1]+=2;
2484 s->block_index[2]+=2;
2485 s->block_index[3]+=2;
2486
2487 /* compute motion vector & mb_type and store in context */
2488 if(s->pict_type==AV_PICTURE_TYPE_B)
2489 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2490 else
2491 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2492 }
2493 s->first_slice_line=0;
2494 }
2495 return 0;
2496 }
2497
2498 static int mb_var_thread(AVCodecContext *c, void *arg){
2499 MpegEncContext *s= *(void**)arg;
2500 int mb_x, mb_y;
2501
2502 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2503 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2504 int xx = mb_x * 16;
2505 int yy = mb_y * 16;
2506 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2507 int varc;
2508 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2509
2510 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2511 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2512
2513 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2514 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2515 s->me.mb_var_sum_temp += varc;
2516 }
2517 }
2518 return 0;
2519 }
2520
2521 static void write_slice_end(MpegEncContext *s){
2522 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2523 if(s->partitioned_frame){
2524 ff_mpeg4_merge_partitions(s);
2525 }
2526
2527 ff_mpeg4_stuffing(&s->pb);
2528 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2529 ff_mjpeg_encode_stuffing(&s->pb);
2530 }
2531
2532 avpriv_align_put_bits(&s->pb);
2533 flush_put_bits(&s->pb);
2534
2535 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2536 s->misc_bits+= get_bits_diff(s);
2537 }
2538
2539 static void write_mb_info(MpegEncContext *s)
2540 {
2541 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2542 int offset = put_bits_count(&s->pb);
2543 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2544 int gobn = s->mb_y / s->gob_index;
2545 int pred_x, pred_y;
2546 if (CONFIG_H263_ENCODER)
2547 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2548 bytestream_put_le32(&ptr, offset);
2549 bytestream_put_byte(&ptr, s->qscale);
2550 bytestream_put_byte(&ptr, gobn);
2551 bytestream_put_le16(&ptr, mba);
2552 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2553 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2554 /* 4MV not implemented */
2555 bytestream_put_byte(&ptr, 0); /* hmv2 */
2556 bytestream_put_byte(&ptr, 0); /* vmv2 */
2557 }
2558
2559 static void update_mb_info(MpegEncContext *s, int startcode)
2560 {
2561 if (!s->mb_info)
2562 return;
2563 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2564 s->mb_info_size += 12;
2565 s->prev_mb_info = s->last_mb_info;
2566 }
2567 if (startcode) {
2568 s->prev_mb_info = put_bits_count(&s->pb)/8;
2569 /* This might have incremented mb_info_size above, and we return without
2570 * actually writing any info into that slot yet. But in that case,
2571 * this will be called again at the start of the after writing the
2572 * start code, actually writing the mb info. */
2573 return;
2574 }
2575
2576 s->last_mb_info = put_bits_count(&s->pb)/8;
2577 if (!s->mb_info_size)
2578 s->mb_info_size += 12;
2579 write_mb_info(s);
2580 }
2581
2582 static int encode_thread(AVCodecContext *c, void *arg){
2583 MpegEncContext *s= *(void**)arg;
2584 int mb_x, mb_y, pdif = 0;
2585 int chr_h= 16>>s->chroma_y_shift;
2586 int i, j;
2587 MpegEncContext best_s = { 0 }, backup_s;
2588 uint8_t bit_buf[2][MAX_MB_BYTES];
2589 uint8_t bit_buf2[2][MAX_MB_BYTES];
2590 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2591 PutBitContext pb[2], pb2[2], tex_pb[2];
2592
2593 for(i=0; i<2; i++){
2594 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2595 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2596 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2597 }
2598
2599 s->last_bits= put_bits_count(&s->pb);
2600 s->mv_bits=0;
2601 s->misc_bits=0;
2602 s->i_tex_bits=0;
2603 s->p_tex_bits=0;
2604 s->i_count=0;
2605 s->f_count=0;
2606 s->b_count=0;
2607 s->skip_count=0;
2608
2609 for(i=0; i<3; i++){
2610 /* init last dc values */
2611 /* note: quant matrix value (8) is implied here */
2612 s->last_dc[i] = 128 << s->intra_dc_precision;
2613
2614 s->current_picture.encoding_error[i] = 0;
2615 }
2616 s->mb_skip_run = 0;
2617 memset(s->last_mv, 0, sizeof(s->last_mv));
2618
2619 s->last_mv_dir = 0;
2620
2621 switch(s->codec_id){
2622 case AV_CODEC_ID_H263:
2623 case AV_CODEC_ID_H263P:
2624 case AV_CODEC_ID_FLV1:
2625 if (CONFIG_H263_ENCODER)
2626 s->gob_index = H263_GOB_HEIGHT(s->height);
2627 break;
2628 case AV_CODEC_ID_MPEG4:
2629 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2630 ff_mpeg4_init_partitions(s);
2631 break;
2632 }
2633
2634 s->resync_mb_x=0;
2635 s->resync_mb_y=0;
2636 s->first_slice_line = 1;
2637 s->ptr_lastgob = s->pb.buf;
2638 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2639 s->mb_x=0;
2640 s->mb_y= mb_y;
2641
2642 ff_set_qscale(s, s->qscale);
2643 ff_init_block_index(s);
2644
2645 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2646 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2647 int mb_type= s->mb_type[xy];
2648 // int d;
2649 int dmin= INT_MAX;
2650 int dir;
2651
2652 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2653 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2654 return -1;
2655 }
2656 if(s->data_partitioning){
2657 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2658 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2659 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2660 return -1;
2661 }
2662 }
2663
2664 s->mb_x = mb_x;
2665 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2666 ff_update_block_index(s);
2667
2668 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2669 ff_h261_reorder_mb_index(s);
2670 xy= s->mb_y*s->mb_stride + s->mb_x;
2671 mb_type= s->mb_type[xy];
2672 }
2673
2674 /* write gob / video packet header */
2675 if(s->rtp_mode){
2676 int current_packet_size, is_gob_start;
2677
2678 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2679
2680 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2681
2682 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2683
2684 switch(s->codec_id){
2685 case AV_CODEC_ID_H263:
2686 case AV_CODEC_ID_H263P:
2687 if(!s->h263_slice_structured)
2688 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2689 break;
2690 case AV_CODEC_ID_MPEG2VIDEO:
2691 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2692 case AV_CODEC_ID_MPEG1VIDEO:
2693 if(s->mb_skip_run) is_gob_start=0;
2694 break;
2695 }
2696
2697 if(is_gob_start){
2698 if(s->start_mb_y != mb_y || mb_x!=0){
2699 write_slice_end(s);
2700
2701 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2702 ff_mpeg4_init_partitions(s);
2703 }
2704 }
2705
2706 assert((put_bits_count(&s->pb)&7) == 0);
2707 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2708
2709 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
2710 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2711 int d = 100 / s->error_rate;
2712 if(r % d == 0){
2713 current_packet_size=0;
2714 s->pb.buf_ptr= s->ptr_lastgob;
2715 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2716 }
2717 }
2718
2719 if (s->avctx->rtp_callback){
2720 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2721 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2722 }
2723 update_mb_info(s, 1);
2724
2725 switch(s->codec_id){
2726 case AV_CODEC_ID_MPEG4:
2727 if (CONFIG_MPEG4_ENCODER) {
2728 ff_mpeg4_encode_video_packet_header(s);
2729 ff_mpeg4_clean_buffers(s);
2730 }
2731 break;
2732 case AV_CODEC_ID_MPEG1VIDEO:
2733 case AV_CODEC_ID_MPEG2VIDEO:
2734 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2735 ff_mpeg1_encode_slice_header(s);
2736 ff_mpeg1_clean_buffers(s);
2737 }
2738 break;
2739 case AV_CODEC_ID_H263:
2740 case AV_CODEC_ID_H263P:
2741 if (CONFIG_H263_ENCODER)
2742 ff_h263_encode_gob_header(s, mb_y);
2743 break;
2744 }
2745
2746 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
2747 int bits= put_bits_count(&s->pb);
2748 s->misc_bits+= bits - s->last_bits;
2749 s->last_bits= bits;
2750 }
2751
2752 s->ptr_lastgob += current_packet_size;
2753 s->first_slice_line=1;
2754 s->resync_mb_x=mb_x;
2755 s->resync_mb_y=mb_y;
2756 }
2757 }
2758
2759 if( (s->resync_mb_x == s->mb_x)
2760 && s->resync_mb_y+1 == s->mb_y){
2761 s->first_slice_line=0;
2762 }
2763
2764 s->mb_skipped=0;
2765 s->dquant=0; //only for QP_RD
2766
2767 update_mb_info(s, 0);
2768
2769 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2770 int next_block=0;
2771 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2772
2773 copy_context_before_encode(&backup_s, s, -1);
2774 backup_s.pb= s->pb;
2775 best_s.data_partitioning= s->data_partitioning;
2776 best_s.partitioned_frame= s->partitioned_frame;
2777 if(s->data_partitioning){
2778 backup_s.pb2= s->pb2;
2779 backup_s.tex_pb= s->tex_pb;
2780 }
2781
2782 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2783 s->mv_dir = MV_DIR_FORWARD;
2784 s->mv_type = MV_TYPE_16X16;
2785 s->mb_intra= 0;
2786 s->mv[0][0][0] = s->p_mv_table[xy][0];
2787 s->mv[0][0][1] = s->p_mv_table[xy][1];
2788 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2789 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2790 }
2791 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2792 s->mv_dir = MV_DIR_FORWARD;
2793 s->mv_type = MV_TYPE_FIELD;
2794 s->mb_intra= 0;
2795 for(i=0; i<2; i++){
2796 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2797 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2798 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2799 }
2800 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2801 &dmin, &next_block, 0, 0);
2802 }
2803 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2804 s->mv_dir = MV_DIR_FORWARD;
2805 s->mv_type = MV_TYPE_16X16;
2806 s->mb_intra= 0;
2807 s->mv[0][0][0] = 0;
2808 s->mv[0][0][1] = 0;
2809 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2810 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2811 }
2812 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2813 s->mv_dir = MV_DIR_FORWARD;
2814 s->mv_type = MV_TYPE_8X8;
2815 s->mb_intra= 0;
2816 for(i=0; i<4; i++){
2817 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2818 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2819 }
2820 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2821 &dmin, &next_block, 0, 0);
2822 }
2823 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2824 s->mv_dir = MV_DIR_FORWARD;
2825 s->mv_type = MV_TYPE_16X16;
2826 s->mb_intra= 0;
2827 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2828 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2829 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2830 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2831 }
2832 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2833 s->mv_dir = MV_DIR_BACKWARD;
2834 s->mv_type = MV_TYPE_16X16;
2835 s->mb_intra= 0;
2836 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2837 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2838 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2839 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2840 }
2841 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2842 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2843 s->mv_type = MV_TYPE_16X16;
2844 s->mb_intra= 0;
2845 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2846 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2847 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2848 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2849 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2850 &dmin, &next_block, 0, 0);
2851 }
2852 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2853 s->mv_dir = MV_DIR_FORWARD;
2854 s->mv_type = MV_TYPE_FIELD;
2855 s->mb_intra= 0;
2856 for(i=0; i<2; i++){
2857 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2858 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2859 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2860 }
2861 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2862 &dmin, &next_block, 0, 0);
2863 }
2864 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2865 s->mv_dir = MV_DIR_BACKWARD;
2866 s->mv_type = MV_TYPE_FIELD;
2867 s->mb_intra= 0;
2868 for(i=0; i<2; i++){
2869 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2870 s->mv[1][i][0