mpegvideo_enc: enable rtp_mode when multiple slices are used
[libav.git] / libavcodec / mpegvideo_enc.c
1 /*
2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7 *
8 * This file is part of Libav.
9 *
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /**
26 * @file
27 * The simplest mpeg encoder (well, it was the simplest!).
28 */
29
30 #include <stdint.h>
31
32 #include "libavutil/internal.h"
33 #include "libavutil/intmath.h"
34 #include "libavutil/mathematics.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/timer.h"
38 #include "avcodec.h"
39 #include "dct.h"
40 #include "idctdsp.h"
41 #include "mpeg12.h"
42 #include "mpegvideo.h"
43 #include "mpegvideodata.h"
44 #include "h261.h"
45 #include "h263.h"
46 #include "h263data.h"
47 #include "mjpegenc_common.h"
48 #include "mathops.h"
49 #include "mpegutils.h"
50 #include "mjpegenc.h"
51 #include "msmpeg4.h"
52 #include "pixblockdsp.h"
53 #include "qpeldsp.h"
54 #include "faandct.h"
55 #include "thread.h"
56 #include "aandcttab.h"
57 #include "flv.h"
58 #include "mpeg4video.h"
59 #include "internal.h"
60 #include "bytestream.h"
61 #include "wmv2.h"
62 #include "rv10.h"
63 #include <limits.h>
64
65 #define QUANT_BIAS_SHIFT 8
66
67 #define QMAT_SHIFT_MMX 16
68 #define QMAT_SHIFT 22
69
70 static int encode_picture(MpegEncContext *s, int picture_number);
71 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
72 static int sse_mb(MpegEncContext *s);
73 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
74 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
75
76 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
77 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
78
79 const AVOption ff_mpv_generic_options[] = {
80 FF_MPV_COMMON_OPTS
81 { NULL },
82 };
83
84 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
85 uint16_t (*qmat16)[2][64],
86 const uint16_t *quant_matrix,
87 int bias, int qmin, int qmax, int intra)
88 {
89 FDCTDSPContext *fdsp = &s->fdsp;
90 int qscale;
91 int shift = 0;
92
93 for (qscale = qmin; qscale <= qmax; qscale++) {
94 int i;
95 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
96 #if CONFIG_FAANDCT
97 fdsp->fdct == ff_faandct ||
98 #endif /* CONFIG_FAANDCT */
99 fdsp->fdct == ff_jpeg_fdct_islow_10) {
100 for (i = 0; i < 64; i++) {
101 const int j = s->idsp.idct_permutation[i];
102 int64_t den = (int64_t) qscale * quant_matrix[j];
103 /* 16 <= qscale * quant_matrix[i] <= 7905
104 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
105 * 19952 <= x <= 249205026
106 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
107 * 3444240 >= (1 << 36) / (x) >= 275 */
108
109 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / den);
110 }
111 } else if (fdsp->fdct == ff_fdct_ifast) {
112 for (i = 0; i < 64; i++) {
113 const int j = s->idsp.idct_permutation[i];
114 int64_t den = ff_aanscales[i] * (int64_t) qscale * quant_matrix[j];
115 /* 16 <= qscale * quant_matrix[i] <= 7905
116 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
117 * 19952 <= x <= 249205026
118 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
119 * 3444240 >= (1 << 36) / (x) >= 275 */
120
121 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) / den);
122 }
123 } else {
124 for (i = 0; i < 64; i++) {
125 const int j = s->idsp.idct_permutation[i];
126 int64_t den = (int64_t) qscale * quant_matrix[j];
127 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
128 * Assume x = qscale * quant_matrix[i]
129 * So 16 <= x <= 7905
130 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
131 * so 32768 >= (1 << 19) / (x) >= 67 */
132 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / den);
133 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
134 // (qscale * quant_matrix[i]);
135 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) / den;
136
137 if (qmat16[qscale][0][i] == 0 ||
138 qmat16[qscale][0][i] == 128 * 256)
139 qmat16[qscale][0][i] = 128 * 256 - 1;
140 qmat16[qscale][1][i] =
141 ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
142 qmat16[qscale][0][i]);
143 }
144 }
145
146 for (i = intra; i < 64; i++) {
147 int64_t max = 8191;
148 if (fdsp->fdct == ff_fdct_ifast) {
149 max = (8191LL * ff_aanscales[i]) >> 14;
150 }
151 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
152 shift++;
153 }
154 }
155 }
156 if (shift) {
157 av_log(NULL, AV_LOG_INFO,
158 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
159 QMAT_SHIFT - shift);
160 }
161 }
162
163 static inline void update_qscale(MpegEncContext *s)
164 {
165 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
166 (FF_LAMBDA_SHIFT + 7);
167 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
168
169 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
170 FF_LAMBDA_SHIFT;
171 }
172
173 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
174 {
175 int i;
176
177 if (matrix) {
178 put_bits(pb, 1, 1);
179 for (i = 0; i < 64; i++) {
180 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
181 }
182 } else
183 put_bits(pb, 1, 0);
184 }
185
186 /**
187 * init s->current_picture.qscale_table from s->lambda_table
188 */
189 void ff_init_qscale_tab(MpegEncContext *s)
190 {
191 int8_t * const qscale_table = s->current_picture.qscale_table;
192 int i;
193
194 for (i = 0; i < s->mb_num; i++) {
195 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
196 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
197 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
198 s->avctx->qmax);
199 }
200 }
201
202 static void update_duplicate_context_after_me(MpegEncContext *dst,
203 MpegEncContext *src)
204 {
205 #define COPY(a) dst->a= src->a
206 COPY(pict_type);
207 COPY(current_picture);
208 COPY(f_code);
209 COPY(b_code);
210 COPY(qscale);
211 COPY(lambda);
212 COPY(lambda2);
213 COPY(picture_in_gop_number);
214 COPY(gop_picture_number);
215 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
216 COPY(progressive_frame); // FIXME don't set in encode_header
217 COPY(partitioned_frame); // FIXME don't set in encode_header
218 #undef COPY
219 }
220
221 /**
222 * Set the given MpegEncContext to defaults for encoding.
223 * the changed fields will not depend upon the prior state of the MpegEncContext.
224 */
225 static void mpv_encode_defaults(MpegEncContext *s)
226 {
227 int i;
228 ff_mpv_common_defaults(s);
229
230 for (i = -16; i < 16; i++) {
231 default_fcode_tab[i + MAX_MV] = 1;
232 }
233 s->me.mv_penalty = default_mv_penalty;
234 s->fcode_tab = default_fcode_tab;
235
236 s->input_picture_number = 0;
237 s->picture_in_gop_number = 0;
238 }
239
240 /* init video encoder */
241 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
242 {
243 MpegEncContext *s = avctx->priv_data;
244 int i, ret, format_supported;
245
246 mpv_encode_defaults(s);
247
248 switch (avctx->codec_id) {
249 case AV_CODEC_ID_MPEG2VIDEO:
250 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
251 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
252 av_log(avctx, AV_LOG_ERROR,
253 "only YUV420 and YUV422 are supported\n");
254 return -1;
255 }
256 break;
257 case AV_CODEC_ID_MJPEG:
258 format_supported = 0;
259 /* JPEG color space */
260 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
261 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
262 (avctx->color_range == AVCOL_RANGE_JPEG &&
263 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
264 avctx->pix_fmt == AV_PIX_FMT_YUV422P)))
265 format_supported = 1;
266 /* MPEG color space */
267 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
268 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
269 avctx->pix_fmt == AV_PIX_FMT_YUV422P))
270 format_supported = 1;
271
272 if (!format_supported) {
273 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
274 return -1;
275 }
276 break;
277 default:
278 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
279 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
280 return -1;
281 }
282 }
283
284 switch (avctx->pix_fmt) {
285 case AV_PIX_FMT_YUVJ422P:
286 case AV_PIX_FMT_YUV422P:
287 s->chroma_format = CHROMA_422;
288 break;
289 case AV_PIX_FMT_YUVJ420P:
290 case AV_PIX_FMT_YUV420P:
291 default:
292 s->chroma_format = CHROMA_420;
293 break;
294 }
295
296 s->bit_rate = avctx->bit_rate;
297 s->width = avctx->width;
298 s->height = avctx->height;
299 if (avctx->gop_size > 600 &&
300 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
301 av_log(avctx, AV_LOG_ERROR,
302 "Warning keyframe interval too large! reducing it ...\n");
303 avctx->gop_size = 600;
304 }
305 s->gop_size = avctx->gop_size;
306 s->avctx = avctx;
307 if (avctx->max_b_frames > MAX_B_FRAMES) {
308 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
309 "is %d.\n", MAX_B_FRAMES);
310 }
311 s->max_b_frames = avctx->max_b_frames;
312 s->codec_id = avctx->codec->id;
313 s->strict_std_compliance = avctx->strict_std_compliance;
314 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
315 s->mpeg_quant = avctx->mpeg_quant;
316 s->rtp_mode = !!avctx->rtp_payload_size;
317 s->intra_dc_precision = avctx->intra_dc_precision;
318 s->user_specified_pts = AV_NOPTS_VALUE;
319
320 if (s->gop_size <= 1) {
321 s->intra_only = 1;
322 s->gop_size = 12;
323 } else {
324 s->intra_only = 0;
325 }
326
327 #if FF_API_MOTION_EST
328 FF_DISABLE_DEPRECATION_WARNINGS
329 s->me_method = avctx->me_method;
330 FF_ENABLE_DEPRECATION_WARNINGS
331 #endif
332
333 /* Fixed QSCALE */
334 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
335
336 #if FF_API_MPV_OPT
337 FF_DISABLE_DEPRECATION_WARNINGS
338 if (avctx->border_masking != 0.0)
339 s->border_masking = avctx->border_masking;
340 FF_ENABLE_DEPRECATION_WARNINGS
341 #endif
342
343 s->adaptive_quant = (s->avctx->lumi_masking ||
344 s->avctx->dark_masking ||
345 s->avctx->temporal_cplx_masking ||
346 s->avctx->spatial_cplx_masking ||
347 s->avctx->p_masking ||
348 s->border_masking ||
349 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
350 !s->fixed_qscale;
351
352 s->loop_filter = !!(s->avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
353
354 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
355 av_log(avctx, AV_LOG_ERROR,
356 "a vbv buffer size is needed, "
357 "for encoding with a maximum bitrate\n");
358 return -1;
359 }
360
361 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
362 av_log(avctx, AV_LOG_INFO,
363 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
364 }
365
366 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
367 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
368 return -1;
369 }
370
371 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
372 av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
373 return -1;
374 }
375
376 if (avctx->rc_max_rate &&
377 avctx->rc_max_rate == avctx->bit_rate &&
378 avctx->rc_max_rate != avctx->rc_min_rate) {
379 av_log(avctx, AV_LOG_INFO,
380 "impossible bitrate constraints, this will fail\n");
381 }
382
383 if (avctx->rc_buffer_size &&
384 avctx->bit_rate * (int64_t)avctx->time_base.num >
385 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
386 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
387 return -1;
388 }
389
390 if (!s->fixed_qscale &&
391 avctx->bit_rate * av_q2d(avctx->time_base) >
392 avctx->bit_rate_tolerance) {
393 av_log(avctx, AV_LOG_ERROR,
394 "bitrate tolerance too small for bitrate\n");
395 return -1;
396 }
397
398 if (s->avctx->rc_max_rate &&
399 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
400 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
401 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
402 90000LL * (avctx->rc_buffer_size - 1) >
403 s->avctx->rc_max_rate * 0xFFFFLL) {
404 av_log(avctx, AV_LOG_INFO,
405 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
406 "specified vbv buffer is too large for the given bitrate!\n");
407 }
408
409 if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
410 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
411 s->codec_id != AV_CODEC_ID_FLV1) {
412 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
413 return -1;
414 }
415
416 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
417 av_log(avctx, AV_LOG_ERROR,
418 "OBMC is only supported with simple mb decision\n");
419 return -1;
420 }
421
422 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
423 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
424 return -1;
425 }
426
427 if (s->max_b_frames &&
428 s->codec_id != AV_CODEC_ID_MPEG4 &&
429 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
430 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
431 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
432 return -1;
433 }
434
435 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
436 s->codec_id == AV_CODEC_ID_H263 ||
437 s->codec_id == AV_CODEC_ID_H263P) &&
438 (avctx->sample_aspect_ratio.num > 255 ||
439 avctx->sample_aspect_ratio.den > 255)) {
440 av_log(avctx, AV_LOG_ERROR,
441 "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
442 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
443 return -1;
444 }
445
446 if ((s->avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
447 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
448 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
449 return -1;
450 }
451
452 // FIXME mpeg2 uses that too
453 if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
454 av_log(avctx, AV_LOG_ERROR,
455 "mpeg2 style quantization not supported by codec\n");
456 return -1;
457 }
458
459 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
460 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
461 return -1;
462 }
463
464 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
465 s->avctx->mb_decision != FF_MB_DECISION_RD) {
466 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
467 return -1;
468 }
469
470 if (s->avctx->scenechange_threshold < 1000000000 &&
471 (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
472 av_log(avctx, AV_LOG_ERROR,
473 "closed gop with scene change detection are not supported yet, "
474 "set threshold to 1000000000\n");
475 return -1;
476 }
477
478 if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
479 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
480 av_log(avctx, AV_LOG_ERROR,
481 "low delay forcing is only available for mpeg2\n");
482 return -1;
483 }
484 if (s->max_b_frames != 0) {
485 av_log(avctx, AV_LOG_ERROR,
486 "b frames cannot be used with low delay\n");
487 return -1;
488 }
489 }
490
491 if (s->q_scale_type == 1) {
492 if (avctx->qmax > 12) {
493 av_log(avctx, AV_LOG_ERROR,
494 "non linear quant only supports qmax <= 12 currently\n");
495 return -1;
496 }
497 }
498
499 if (avctx->slices > 1 &&
500 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
501 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
502 return AVERROR(EINVAL);
503 }
504
505 if (s->avctx->thread_count > 1 &&
506 s->codec_id != AV_CODEC_ID_MPEG4 &&
507 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
508 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
509 (s->codec_id != AV_CODEC_ID_H263P)) {
510 av_log(avctx, AV_LOG_ERROR,
511 "multi threaded encoding not supported by codec\n");
512 return -1;
513 }
514
515 if (s->avctx->thread_count < 1) {
516 av_log(avctx, AV_LOG_ERROR,
517 "automatic thread number detection not supported by codec,"
518 "patch welcome\n");
519 return -1;
520 }
521
522 if (!avctx->time_base.den || !avctx->time_base.num) {
523 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
524 return -1;
525 }
526
527 if (avctx->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
528 av_log(avctx, AV_LOG_INFO,
529 "notice: b_frame_strategy only affects the first pass\n");
530 avctx->b_frame_strategy = 0;
531 }
532
533 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
534 if (i > 1) {
535 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
536 avctx->time_base.den /= i;
537 avctx->time_base.num /= i;
538 //return -1;
539 }
540
541 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
542 s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG) {
543 // (a + x * 3 / 8) / x
544 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
545 s->inter_quant_bias = 0;
546 } else {
547 s->intra_quant_bias = 0;
548 // (a - x / 4) / x
549 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
550 }
551
552 #if FF_API_QUANT_BIAS
553 FF_DISABLE_DEPRECATION_WARNINGS
554 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
555 s->intra_quant_bias = avctx->intra_quant_bias;
556 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
557 s->inter_quant_bias = avctx->inter_quant_bias;
558 FF_ENABLE_DEPRECATION_WARNINGS
559 #endif
560
561 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
562 s->avctx->time_base.den > (1 << 16) - 1) {
563 av_log(avctx, AV_LOG_ERROR,
564 "timebase %d/%d not supported by MPEG 4 standard, "
565 "the maximum admitted value for the timebase denominator "
566 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
567 (1 << 16) - 1);
568 return -1;
569 }
570 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
571
572 switch (avctx->codec->id) {
573 case AV_CODEC_ID_MPEG1VIDEO:
574 s->out_format = FMT_MPEG1;
575 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
576 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
577 break;
578 case AV_CODEC_ID_MPEG2VIDEO:
579 s->out_format = FMT_MPEG1;
580 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
581 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
582 s->rtp_mode = 1;
583 break;
584 case AV_CODEC_ID_MJPEG:
585 s->out_format = FMT_MJPEG;
586 s->intra_only = 1; /* force intra only for jpeg */
587 if (!CONFIG_MJPEG_ENCODER ||
588 ff_mjpeg_encode_init(s) < 0)
589 return -1;
590 avctx->delay = 0;
591 s->low_delay = 1;
592 break;
593 case AV_CODEC_ID_H261:
594 if (!CONFIG_H261_ENCODER)
595 return -1;
596 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
597 av_log(avctx, AV_LOG_ERROR,
598 "The specified picture size of %dx%d is not valid for the "
599 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
600 s->width, s->height);
601 return -1;
602 }
603 s->out_format = FMT_H261;
604 avctx->delay = 0;
605 s->low_delay = 1;
606 s->rtp_mode = 0; /* Sliced encoding not supported */
607 break;
608 case AV_CODEC_ID_H263:
609 if (!CONFIG_H263_ENCODER)
610 return -1;
611 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
612 s->width, s->height) == 8) {
613 av_log(avctx, AV_LOG_INFO,
614 "The specified picture size of %dx%d is not valid for "
615 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
616 "352x288, 704x576, and 1408x1152."
617 "Try H.263+.\n", s->width, s->height);
618 return -1;
619 }
620 s->out_format = FMT_H263;
621 avctx->delay = 0;
622 s->low_delay = 1;
623 break;
624 case AV_CODEC_ID_H263P:
625 s->out_format = FMT_H263;
626 s->h263_plus = 1;
627 /* Fx */
628 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
629 s->modified_quant = s->h263_aic;
630 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
631 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
632
633 /* /Fx */
634 /* These are just to be sure */
635 avctx->delay = 0;
636 s->low_delay = 1;
637 break;
638 case AV_CODEC_ID_FLV1:
639 s->out_format = FMT_H263;
640 s->h263_flv = 2; /* format = 1; 11-bit codes */
641 s->unrestricted_mv = 1;
642 s->rtp_mode = 0; /* don't allow GOB */
643 avctx->delay = 0;
644 s->low_delay = 1;
645 break;
646 case AV_CODEC_ID_RV10:
647 s->out_format = FMT_H263;
648 avctx->delay = 0;
649 s->low_delay = 1;
650 break;
651 case AV_CODEC_ID_RV20:
652 s->out_format = FMT_H263;
653 avctx->delay = 0;
654 s->low_delay = 1;
655 s->modified_quant = 1;
656 s->h263_aic = 1;
657 s->h263_plus = 1;
658 s->loop_filter = 1;
659 s->unrestricted_mv = 0;
660 break;
661 case AV_CODEC_ID_MPEG4:
662 s->out_format = FMT_H263;
663 s->h263_pred = 1;
664 s->unrestricted_mv = 1;
665 s->low_delay = s->max_b_frames ? 0 : 1;
666 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
667 break;
668 case AV_CODEC_ID_MSMPEG4V2:
669 s->out_format = FMT_H263;
670 s->h263_pred = 1;
671 s->unrestricted_mv = 1;
672 s->msmpeg4_version = 2;
673 avctx->delay = 0;
674 s->low_delay = 1;
675 break;
676 case AV_CODEC_ID_MSMPEG4V3:
677 s->out_format = FMT_H263;
678 s->h263_pred = 1;
679 s->unrestricted_mv = 1;
680 s->msmpeg4_version = 3;
681 s->flipflop_rounding = 1;
682 avctx->delay = 0;
683 s->low_delay = 1;
684 break;
685 case AV_CODEC_ID_WMV1:
686 s->out_format = FMT_H263;
687 s->h263_pred = 1;
688 s->unrestricted_mv = 1;
689 s->msmpeg4_version = 4;
690 s->flipflop_rounding = 1;
691 avctx->delay = 0;
692 s->low_delay = 1;
693 break;
694 case AV_CODEC_ID_WMV2:
695 s->out_format = FMT_H263;
696 s->h263_pred = 1;
697 s->unrestricted_mv = 1;
698 s->msmpeg4_version = 5;
699 s->flipflop_rounding = 1;
700 avctx->delay = 0;
701 s->low_delay = 1;
702 break;
703 default:
704 return -1;
705 }
706
707 avctx->has_b_frames = !s->low_delay;
708
709 s->encoding = 1;
710
711 s->progressive_frame =
712 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
713 AV_CODEC_FLAG_INTERLACED_ME) ||
714 s->alternate_scan);
715
716 /* init */
717 ff_mpv_idct_init(s);
718 if (ff_mpv_common_init(s) < 0)
719 return -1;
720
721 if (ARCH_X86)
722 ff_mpv_encode_init_x86(s);
723
724 ff_fdctdsp_init(&s->fdsp, avctx);
725 ff_me_cmp_init(&s->mecc, avctx);
726 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
727 ff_pixblockdsp_init(&s->pdsp, avctx);
728 ff_qpeldsp_init(&s->qdsp);
729
730 if (s->msmpeg4_version) {
731 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
732 2 * 2 * (MAX_LEVEL + 1) *
733 (MAX_RUN + 1) * 2 * sizeof(int), fail);
734 }
735 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
736
737 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
738 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
739 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
740 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
741 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
742 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
743 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
744 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
745
746 if (s->avctx->noise_reduction) {
747 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
748 2 * 64 * sizeof(uint16_t), fail);
749 }
750
751 if (CONFIG_H263_ENCODER)
752 ff_h263dsp_init(&s->h263dsp);
753 if (!s->dct_quantize)
754 s->dct_quantize = ff_dct_quantize_c;
755 if (!s->denoise_dct)
756 s->denoise_dct = denoise_dct_c;
757 s->fast_dct_quantize = s->dct_quantize;
758 if (avctx->trellis)
759 s->dct_quantize = dct_quantize_trellis_c;
760
761 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
762 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
763
764 if (s->slice_context_count > 1) {
765 s->rtp_mode = 1;
766
767 if (avctx->codec_id == AV_CODEC_ID_H263 || avctx->codec_id == AV_CODEC_ID_H263P)
768 s->h263_slice_structured = 1;
769 }
770
771 s->quant_precision = 5;
772
773 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
774 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->avctx->frame_skip_cmp);
775
776 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
777 ff_h261_encode_init(s);
778 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
779 ff_h263_encode_init(s);
780 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
781 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
782 return ret;
783 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
784 && s->out_format == FMT_MPEG1)
785 ff_mpeg1_encode_init(s);
786
787 /* init q matrix */
788 for (i = 0; i < 64; i++) {
789 int j = s->idsp.idct_permutation[i];
790 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
791 s->mpeg_quant) {
792 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
793 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
794 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
795 s->intra_matrix[j] =
796 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
797 } else {
798 /* mpeg1/2 */
799 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
800 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
801 }
802 if (s->avctx->intra_matrix)
803 s->intra_matrix[j] = s->avctx->intra_matrix[i];
804 if (s->avctx->inter_matrix)
805 s->inter_matrix[j] = s->avctx->inter_matrix[i];
806 }
807
808 /* precompute matrix */
809 /* for mjpeg, we do include qscale in the matrix */
810 if (s->out_format != FMT_MJPEG) {
811 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
812 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
813 31, 1);
814 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
815 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
816 31, 0);
817 }
818
819 if (ff_rate_control_init(s) < 0)
820 return -1;
821
822 #if FF_API_ERROR_RATE
823 FF_DISABLE_DEPRECATION_WARNINGS
824 if (avctx->error_rate)
825 s->error_rate = avctx->error_rate;
826 FF_ENABLE_DEPRECATION_WARNINGS;
827 #endif
828
829 #if FF_API_NORMALIZE_AQP
830 FF_DISABLE_DEPRECATION_WARNINGS
831 if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
832 s->mpv_flags |= FF_MPV_FLAG_NAQ;
833 FF_ENABLE_DEPRECATION_WARNINGS;
834 #endif
835
836 #if FF_API_MV0
837 FF_DISABLE_DEPRECATION_WARNINGS
838 if (avctx->flags & CODEC_FLAG_MV0)
839 s->mpv_flags |= FF_MPV_FLAG_MV0;
840 FF_ENABLE_DEPRECATION_WARNINGS
841 #endif
842
843 #if FF_API_MPV_OPT
844 FF_DISABLE_DEPRECATION_WARNINGS
845 if (avctx->rc_qsquish != 0.0)
846 s->rc_qsquish = avctx->rc_qsquish;
847 if (avctx->rc_qmod_amp != 0.0)
848 s->rc_qmod_amp = avctx->rc_qmod_amp;
849 if (avctx->rc_qmod_freq)
850 s->rc_qmod_freq = avctx->rc_qmod_freq;
851 if (avctx->rc_buffer_aggressivity != 1.0)
852 s->rc_buffer_aggressivity = avctx->rc_buffer_aggressivity;
853 if (avctx->rc_initial_cplx != 0.0)
854 s->rc_initial_cplx = avctx->rc_initial_cplx;
855 if (avctx->lmin)
856 s->lmin = avctx->lmin;
857 if (avctx->lmax)
858 s->lmax = avctx->lmax;
859
860 if (avctx->rc_eq) {
861 av_freep(&s->rc_eq);
862 s->rc_eq = av_strdup(avctx->rc_eq);
863 if (!s->rc_eq)
864 return AVERROR(ENOMEM);
865 }
866 FF_ENABLE_DEPRECATION_WARNINGS
867 #endif
868
869 if (avctx->b_frame_strategy == 2) {
870 for (i = 0; i < s->max_b_frames + 2; i++) {
871 s->tmp_frames[i] = av_frame_alloc();
872 if (!s->tmp_frames[i])
873 return AVERROR(ENOMEM);
874
875 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
876 s->tmp_frames[i]->width = s->width >> avctx->brd_scale;
877 s->tmp_frames[i]->height = s->height >> avctx->brd_scale;
878
879 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
880 if (ret < 0)
881 return ret;
882 }
883 }
884
885 return 0;
886 fail:
887 ff_mpv_encode_end(avctx);
888 return AVERROR_UNKNOWN;
889 }
890
891 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
892 {
893 MpegEncContext *s = avctx->priv_data;
894 int i;
895
896 ff_rate_control_uninit(s);
897
898 ff_mpv_common_end(s);
899 if (CONFIG_MJPEG_ENCODER &&
900 s->out_format == FMT_MJPEG)
901 ff_mjpeg_encode_close(s);
902
903 av_freep(&avctx->extradata);
904
905 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
906 av_frame_free(&s->tmp_frames[i]);
907
908 ff_free_picture_tables(&s->new_picture);
909 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
910
911 av_freep(&s->avctx->stats_out);
912 av_freep(&s->ac_stats);
913
914 av_freep(&s->q_intra_matrix);
915 av_freep(&s->q_inter_matrix);
916 av_freep(&s->q_intra_matrix16);
917 av_freep(&s->q_inter_matrix16);
918 av_freep(&s->input_picture);
919 av_freep(&s->reordered_input_picture);
920 av_freep(&s->dct_offset);
921
922 return 0;
923 }
924
925 static int get_sae(uint8_t *src, int ref, int stride)
926 {
927 int x,y;
928 int acc = 0;
929
930 for (y = 0; y < 16; y++) {
931 for (x = 0; x < 16; x++) {
932 acc += FFABS(src[x + y * stride] - ref);
933 }
934 }
935
936 return acc;
937 }
938
939 static int get_intra_count(MpegEncContext *s, uint8_t *src,
940 uint8_t *ref, int stride)
941 {
942 int x, y, w, h;
943 int acc = 0;
944
945 w = s->width & ~15;
946 h = s->height & ~15;
947
948 for (y = 0; y < h; y += 16) {
949 for (x = 0; x < w; x += 16) {
950 int offset = x + y * stride;
951 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
952 stride, 16);
953 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
954 int sae = get_sae(src + offset, mean, stride);
955
956 acc += sae + 500 < sad;
957 }
958 }
959 return acc;
960 }
961
962 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
963 {
964 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
965 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
966 s->mb_stride, s->mb_height, s->b8_stride,
967 &s->linesize, &s->uvlinesize);
968 }
969
970 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
971 {
972 Picture *pic = NULL;
973 int64_t pts;
974 int i, display_picture_number = 0, ret;
975 int encoding_delay = s->max_b_frames ? s->max_b_frames
976 : (s->low_delay ? 0 : 1);
977 int flush_offset = 1;
978 int direct = 1;
979
980 if (pic_arg) {
981 pts = pic_arg->pts;
982 display_picture_number = s->input_picture_number++;
983
984 if (pts != AV_NOPTS_VALUE) {
985 if (s->user_specified_pts != AV_NOPTS_VALUE) {
986 int64_t time = pts;
987 int64_t last = s->user_specified_pts;
988
989 if (time <= last) {
990 av_log(s->avctx, AV_LOG_ERROR,
991 "Error, Invalid timestamp=%"PRId64", "
992 "last=%"PRId64"\n", pts, s->user_specified_pts);
993 return -1;
994 }
995
996 if (!s->low_delay && display_picture_number == 1)
997 s->dts_delta = time - last;
998 }
999 s->user_specified_pts = pts;
1000 } else {
1001 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1002 s->user_specified_pts =
1003 pts = s->user_specified_pts + 1;
1004 av_log(s->avctx, AV_LOG_INFO,
1005 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1006 pts);
1007 } else {
1008 pts = display_picture_number;
1009 }
1010 }
1011
1012 if (!pic_arg->buf[0] ||
1013 pic_arg->linesize[0] != s->linesize ||
1014 pic_arg->linesize[1] != s->uvlinesize ||
1015 pic_arg->linesize[2] != s->uvlinesize)
1016 direct = 0;
1017 if ((s->width & 15) || (s->height & 15))
1018 direct = 0;
1019
1020 ff_dlog(s->avctx, "%d %d %td %td\n", pic_arg->linesize[0],
1021 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1022
1023 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1024 if (i < 0)
1025 return i;
1026
1027 pic = &s->picture[i];
1028 pic->reference = 3;
1029
1030 if (direct) {
1031 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1032 return ret;
1033 }
1034 ret = alloc_picture(s, pic, direct);
1035 if (ret < 0)
1036 return ret;
1037
1038 if (!direct) {
1039 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1040 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1041 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1042 // empty
1043 } else {
1044 int h_chroma_shift, v_chroma_shift;
1045 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1046 &h_chroma_shift,
1047 &v_chroma_shift);
1048
1049 for (i = 0; i < 3; i++) {
1050 int src_stride = pic_arg->linesize[i];
1051 int dst_stride = i ? s->uvlinesize : s->linesize;
1052 int h_shift = i ? h_chroma_shift : 0;
1053 int v_shift = i ? v_chroma_shift : 0;
1054 int w = s->width >> h_shift;
1055 int h = s->height >> v_shift;
1056 uint8_t *src = pic_arg->data[i];
1057 uint8_t *dst = pic->f->data[i];
1058
1059 if (!s->avctx->rc_buffer_size)
1060 dst += INPLACE_OFFSET;
1061
1062 if (src_stride == dst_stride)
1063 memcpy(dst, src, src_stride * h);
1064 else {
1065 int h2 = h;
1066 uint8_t *dst2 = dst;
1067 while (h2--) {
1068 memcpy(dst2, src, w);
1069 dst2 += dst_stride;
1070 src += src_stride;
1071 }
1072 }
1073 if ((s->width & 15) || (s->height & 15)) {
1074 s->mpvencdsp.draw_edges(dst, dst_stride,
1075 w, h,
1076 16 >> h_shift,
1077 16 >> v_shift,
1078 EDGE_BOTTOM);
1079 }
1080 }
1081 }
1082 }
1083 ret = av_frame_copy_props(pic->f, pic_arg);
1084 if (ret < 0)
1085 return ret;
1086
1087 pic->f->display_picture_number = display_picture_number;
1088 pic->f->pts = pts; // we set this here to avoid modifiying pic_arg
1089 } else {
1090 /* Flushing: When we have not received enough input frames,
1091 * ensure s->input_picture[0] contains the first picture */
1092 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1093 if (s->input_picture[flush_offset])
1094 break;
1095
1096 if (flush_offset <= 1)
1097 flush_offset = 1;
1098 else
1099 encoding_delay = encoding_delay - flush_offset + 1;
1100 }
1101
1102 /* shift buffer entries */
1103 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1104 s->input_picture[i - flush_offset] = s->input_picture[i];
1105
1106 s->input_picture[encoding_delay] = (Picture*) pic;
1107
1108 return 0;
1109 }
1110
1111 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1112 {
1113 int x, y, plane;
1114 int score = 0;
1115 int64_t score64 = 0;
1116
1117 for (plane = 0; plane < 3; plane++) {
1118 const int stride = p->f->linesize[plane];
1119 const int bw = plane ? 1 : 2;
1120 for (y = 0; y < s->mb_height * bw; y++) {
1121 for (x = 0; x < s->mb_width * bw; x++) {
1122 int off = p->shared ? 0 : 16;
1123 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1124 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1125 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1126
1127 switch (s->avctx->frame_skip_exp) {
1128 case 0: score = FFMAX(score, v); break;
1129 case 1: score += FFABS(v); break;
1130 case 2: score += v * v; break;
1131 case 3: score64 += FFABS(v * v * (int64_t)v); break;
1132 case 4: score64 += v * v * (int64_t)(v * v); break;
1133 }
1134 }
1135 }
1136 }
1137
1138 if (score)
1139 score64 = score;
1140
1141 if (score64 < s->avctx->frame_skip_threshold)
1142 return 1;
1143 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1144 return 1;
1145 return 0;
1146 }
1147
1148 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1149 {
1150 AVPacket pkt = { 0 };
1151 int ret, got_output;
1152
1153 av_init_packet(&pkt);
1154 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1155 if (ret < 0)
1156 return ret;
1157
1158 ret = pkt.size;
1159 av_packet_unref(&pkt);
1160 return ret;
1161 }
1162
1163 static int estimate_best_b_count(MpegEncContext *s)
1164 {
1165 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1166 AVCodecContext *c = avcodec_alloc_context3(NULL);
1167 const int scale = s->avctx->brd_scale;
1168 int i, j, out_size, p_lambda, b_lambda, lambda2;
1169 int64_t best_rd = INT64_MAX;
1170 int best_b_count = -1;
1171
1172 if (!c)
1173 return AVERROR(ENOMEM);
1174 assert(scale >= 0 && scale <= 3);
1175
1176 //emms_c();
1177 //s->next_picture_ptr->quality;
1178 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1179 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1180 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1181 if (!b_lambda) // FIXME we should do this somewhere else
1182 b_lambda = p_lambda;
1183 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1184 FF_LAMBDA_SHIFT;
1185
1186 c->width = s->width >> scale;
1187 c->height = s->height >> scale;
1188 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1189 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1190 c->mb_decision = s->avctx->mb_decision;
1191 c->me_cmp = s->avctx->me_cmp;
1192 c->mb_cmp = s->avctx->mb_cmp;
1193 c->me_sub_cmp = s->avctx->me_sub_cmp;
1194 c->pix_fmt = AV_PIX_FMT_YUV420P;
1195 c->time_base = s->avctx->time_base;
1196 c->max_b_frames = s->max_b_frames;
1197
1198 if (avcodec_open2(c, codec, NULL) < 0)
1199 return -1;
1200
1201 for (i = 0; i < s->max_b_frames + 2; i++) {
1202 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1203 s->next_picture_ptr;
1204
1205 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1206 pre_input = *pre_input_ptr;
1207
1208 if (!pre_input.shared && i) {
1209 pre_input.f->data[0] += INPLACE_OFFSET;
1210 pre_input.f->data[1] += INPLACE_OFFSET;
1211 pre_input.f->data[2] += INPLACE_OFFSET;
1212 }
1213
1214 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1215 s->tmp_frames[i]->linesize[0],
1216 pre_input.f->data[0],
1217 pre_input.f->linesize[0],
1218 c->width, c->height);
1219 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1220 s->tmp_frames[i]->linesize[1],
1221 pre_input.f->data[1],
1222 pre_input.f->linesize[1],
1223 c->width >> 1, c->height >> 1);
1224 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1225 s->tmp_frames[i]->linesize[2],
1226 pre_input.f->data[2],
1227 pre_input.f->linesize[2],
1228 c->width >> 1, c->height >> 1);
1229 }
1230 }
1231
1232 for (j = 0; j < s->max_b_frames + 1; j++) {
1233 int64_t rd = 0;
1234
1235 if (!s->input_picture[j])
1236 break;
1237
1238 c->error[0] = c->error[1] = c->error[2] = 0;
1239
1240 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1241 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1242
1243 out_size = encode_frame(c, s->tmp_frames[0]);
1244
1245 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1246
1247 for (i = 0; i < s->max_b_frames + 1; i++) {
1248 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1249
1250 s->tmp_frames[i + 1]->pict_type = is_p ?
1251 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1252 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1253
1254 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1255
1256 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1257 }
1258
1259 /* get the delayed frames */
1260 while (out_size) {
1261 out_size = encode_frame(c, NULL);
1262 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1263 }
1264
1265 rd += c->error[0] + c->error[1] + c->error[2];
1266
1267 if (rd < best_rd) {
1268 best_rd = rd;
1269 best_b_count = j;
1270 }
1271 }
1272
1273 avcodec_close(c);
1274 av_freep(&c);
1275
1276 return best_b_count;
1277 }
1278
1279 static int select_input_picture(MpegEncContext *s)
1280 {
1281 int i, ret;
1282
1283 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1284 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1285 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1286
1287 /* set next picture type & ordering */
1288 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1289 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1290 !s->next_picture_ptr || s->intra_only) {
1291 s->reordered_input_picture[0] = s->input_picture[0];
1292 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1293 s->reordered_input_picture[0]->f->coded_picture_number =
1294 s->coded_picture_number++;
1295 } else {
1296 int b_frames;
1297
1298 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1299 if (s->picture_in_gop_number < s->gop_size &&
1300 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1301 // FIXME check that te gop check above is +-1 correct
1302 av_frame_unref(s->input_picture[0]->f);
1303
1304 emms_c();
1305 ff_vbv_update(s, 0);
1306
1307 goto no_output_pic;
1308 }
1309 }
1310
1311 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1312 for (i = 0; i < s->max_b_frames + 1; i++) {
1313 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1314
1315 if (pict_num >= s->rc_context.num_entries)
1316 break;
1317 if (!s->input_picture[i]) {
1318 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1319 break;
1320 }
1321
1322 s->input_picture[i]->f->pict_type =
1323 s->rc_context.entry[pict_num].new_pict_type;
1324 }
1325 }
1326
1327 if (s->avctx->b_frame_strategy == 0) {
1328 b_frames = s->max_b_frames;
1329 while (b_frames && !s->input_picture[b_frames])
1330 b_frames--;
1331 } else if (s->avctx->b_frame_strategy == 1) {
1332 for (i = 1; i < s->max_b_frames + 1; i++) {
1333 if (s->input_picture[i] &&
1334 s->input_picture[i]->b_frame_score == 0) {
1335 s->input_picture[i]->b_frame_score =
1336 get_intra_count(s,
1337 s->input_picture[i ]->f->data[0],
1338 s->input_picture[i - 1]->f->data[0],
1339 s->linesize) + 1;
1340 }
1341 }
1342 for (i = 0; i < s->max_b_frames + 1; i++) {
1343 if (!s->input_picture[i] ||
1344 s->input_picture[i]->b_frame_score - 1 >
1345 s->mb_num / s->avctx->b_sensitivity)
1346 break;
1347 }
1348
1349 b_frames = FFMAX(0, i - 1);
1350
1351 /* reset scores */
1352 for (i = 0; i < b_frames + 1; i++) {
1353 s->input_picture[i]->b_frame_score = 0;
1354 }
1355 } else if (s->avctx->b_frame_strategy == 2) {
1356 b_frames = estimate_best_b_count(s);
1357 } else {
1358 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1359 b_frames = 0;
1360 }
1361
1362 emms_c();
1363
1364 for (i = b_frames - 1; i >= 0; i--) {
1365 int type = s->input_picture[i]->f->pict_type;
1366 if (type && type != AV_PICTURE_TYPE_B)
1367 b_frames = i;
1368 }
1369 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1370 b_frames == s->max_b_frames) {
1371 av_log(s->avctx, AV_LOG_ERROR,
1372 "warning, too many b frames in a row\n");
1373 }
1374
1375 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1376 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1377 s->gop_size > s->picture_in_gop_number) {
1378 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1379 } else {
1380 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1381 b_frames = 0;
1382 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1383 }
1384 }
1385
1386 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1387 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1388 b_frames--;
1389
1390 s->reordered_input_picture[0] = s->input_picture[b_frames];
1391 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1392 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1393 s->reordered_input_picture[0]->f->coded_picture_number =
1394 s->coded_picture_number++;
1395 for (i = 0; i < b_frames; i++) {
1396 s->reordered_input_picture[i + 1] = s->input_picture[i];
1397 s->reordered_input_picture[i + 1]->f->pict_type =
1398 AV_PICTURE_TYPE_B;
1399 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1400 s->coded_picture_number++;
1401 }
1402 }
1403 }
1404 no_output_pic:
1405 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1406
1407 if (s->reordered_input_picture[0]) {
1408 s->reordered_input_picture[0]->reference =
1409 s->reordered_input_picture[0]->f->pict_type !=
1410 AV_PICTURE_TYPE_B ? 3 : 0;
1411
1412 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1413 return ret;
1414
1415 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1416 // input is a shared pix, so we can't modifiy it -> alloc a new
1417 // one & ensure that the shared one is reuseable
1418
1419 Picture *pic;
1420 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1421 if (i < 0)
1422 return i;
1423 pic = &s->picture[i];
1424
1425 pic->reference = s->reordered_input_picture[0]->reference;
1426 if (alloc_picture(s, pic, 0) < 0) {
1427 return -1;
1428 }
1429
1430 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1431 if (ret < 0)
1432 return ret;
1433
1434 /* mark us unused / free shared pic */
1435 av_frame_unref(s->reordered_input_picture[0]->f);
1436 s->reordered_input_picture[0]->shared = 0;
1437
1438 s->current_picture_ptr = pic;
1439 } else {
1440 // input is not a shared pix -> reuse buffer for current_pix
1441 s->current_picture_ptr = s->reordered_input_picture[0];
1442 for (i = 0; i < 4; i++) {
1443 s->new_picture.f->data[i] += INPLACE_OFFSET;
1444 }
1445 }
1446 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1447 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1448 s->current_picture_ptr)) < 0)
1449 return ret;
1450
1451 s->picture_number = s->new_picture.f->display_picture_number;
1452 }
1453 return 0;
1454 }
1455
1456 static void frame_end(MpegEncContext *s)
1457 {
1458 int i;
1459
1460 if (s->unrestricted_mv &&
1461 s->current_picture.reference &&
1462 !s->intra_only) {
1463 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1464 int hshift = desc->log2_chroma_w;
1465 int vshift = desc->log2_chroma_h;
1466 s->mpvencdsp.draw_edges(s->current_picture.f->data[0], s->linesize,
1467 s->h_edge_pos, s->v_edge_pos,
1468 EDGE_WIDTH, EDGE_WIDTH,
1469 EDGE_TOP | EDGE_BOTTOM);
1470 s->mpvencdsp.draw_edges(s->current_picture.f->data[1], s->uvlinesize,
1471 s->h_edge_pos >> hshift,
1472 s->v_edge_pos >> vshift,
1473 EDGE_WIDTH >> hshift,
1474 EDGE_WIDTH >> vshift,
1475 EDGE_TOP | EDGE_BOTTOM);
1476 s->mpvencdsp.draw_edges(s->current_picture.f->data[2], s->uvlinesize,
1477 s->h_edge_pos >> hshift,
1478 s->v_edge_pos >> vshift,
1479 EDGE_WIDTH >> hshift,
1480 EDGE_WIDTH >> vshift,
1481 EDGE_TOP | EDGE_BOTTOM);
1482 }
1483
1484 emms_c();
1485
1486 s->last_pict_type = s->pict_type;
1487 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1488 if (s->pict_type!= AV_PICTURE_TYPE_B)
1489 s->last_non_b_pict_type = s->pict_type;
1490
1491 if (s->encoding) {
1492 /* release non-reference frames */
1493 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1494 if (!s->picture[i].reference)
1495 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1496 }
1497 }
1498
1499 #if FF_API_CODED_FRAME
1500 FF_DISABLE_DEPRECATION_WARNINGS
1501 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1502 FF_ENABLE_DEPRECATION_WARNINGS
1503 #endif
1504 #if FF_API_ERROR_FRAME
1505 FF_DISABLE_DEPRECATION_WARNINGS
1506 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1507 sizeof(s->current_picture.encoding_error));
1508 FF_ENABLE_DEPRECATION_WARNINGS
1509 #endif
1510 }
1511
1512 static void update_noise_reduction(MpegEncContext *s)
1513 {
1514 int intra, i;
1515
1516 for (intra = 0; intra < 2; intra++) {
1517 if (s->dct_count[intra] > (1 << 16)) {
1518 for (i = 0; i < 64; i++) {
1519 s->dct_error_sum[intra][i] >>= 1;
1520 }
1521 s->dct_count[intra] >>= 1;
1522 }
1523
1524 for (i = 0; i < 64; i++) {
1525 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1526 s->dct_count[intra] +
1527 s->dct_error_sum[intra][i] / 2) /
1528 (s->dct_error_sum[intra][i] + 1);
1529 }
1530 }
1531 }
1532
1533 static int frame_start(MpegEncContext *s)
1534 {
1535 int ret;
1536
1537 /* mark & release old frames */
1538 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1539 s->last_picture_ptr != s->next_picture_ptr &&
1540 s->last_picture_ptr->f->buf[0]) {
1541 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1542 }
1543
1544 s->current_picture_ptr->f->pict_type = s->pict_type;
1545 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1546
1547 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1548 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1549 s->current_picture_ptr)) < 0)
1550 return ret;
1551
1552 if (s->pict_type != AV_PICTURE_TYPE_B) {
1553 s->last_picture_ptr = s->next_picture_ptr;
1554 if (!s->droppable)
1555 s->next_picture_ptr = s->current_picture_ptr;
1556 }
1557
1558 if (s->last_picture_ptr) {
1559 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1560 if (s->last_picture_ptr->f->buf[0] &&
1561 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1562 s->last_picture_ptr)) < 0)
1563 return ret;
1564 }
1565 if (s->next_picture_ptr) {
1566 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1567 if (s->next_picture_ptr->f->buf[0] &&
1568 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1569 s->next_picture_ptr)) < 0)
1570 return ret;
1571 }
1572
1573 if (s->picture_structure!= PICT_FRAME) {
1574 int i;
1575 for (i = 0; i < 4; i++) {
1576 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1577 s->current_picture.f->data[i] +=
1578 s->current_picture.f->linesize[i];
1579 }
1580 s->current_picture.f->linesize[i] *= 2;
1581 s->last_picture.f->linesize[i] *= 2;
1582 s->next_picture.f->linesize[i] *= 2;
1583 }
1584 }
1585
1586 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1587 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1588 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1589 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1590 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1591 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1592 } else {
1593 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1594 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1595 }
1596
1597 if (s->dct_error_sum) {
1598 assert(s->avctx->noise_reduction && s->encoding);
1599 update_noise_reduction(s);
1600 }
1601
1602 return 0;
1603 }
1604
1605 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1606 const AVFrame *pic_arg, int *got_packet)
1607 {
1608 MpegEncContext *s = avctx->priv_data;
1609 int i, stuffing_count, ret;
1610 int context_count = s->slice_context_count;
1611
1612 s->picture_in_gop_number++;
1613
1614 if (load_input_picture(s, pic_arg) < 0)
1615 return -1;
1616
1617 if (select_input_picture(s) < 0) {
1618 return -1;
1619 }
1620
1621 /* output? */
1622 if (s->new_picture.f->data[0]) {
1623 uint8_t *sd;
1624 if (!pkt->data &&
1625 (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0)
1626 return ret;
1627 if (s->mb_info) {
1628 s->mb_info_ptr = av_packet_new_side_data(pkt,
1629 AV_PKT_DATA_H263_MB_INFO,
1630 s->mb_width*s->mb_height*12);
1631 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1632 }
1633
1634 for (i = 0; i < context_count; i++) {
1635 int start_y = s->thread_context[i]->start_mb_y;
1636 int end_y = s->thread_context[i]-> end_mb_y;
1637 int h = s->mb_height;
1638 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1639 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1640
1641 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1642 }
1643
1644 s->pict_type = s->new_picture.f->pict_type;
1645 //emms_c();
1646 ret = frame_start(s);
1647 if (ret < 0)
1648 return ret;
1649 vbv_retry:
1650 if (encode_picture(s, s->picture_number) < 0)
1651 return -1;
1652
1653 avctx->header_bits = s->header_bits;
1654 avctx->mv_bits = s->mv_bits;
1655 avctx->misc_bits = s->misc_bits;
1656 avctx->i_tex_bits = s->i_tex_bits;
1657 avctx->p_tex_bits = s->p_tex_bits;
1658 avctx->i_count = s->i_count;
1659 // FIXME f/b_count in avctx
1660 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1661 avctx->skip_count = s->skip_count;
1662
1663 frame_end(s);
1664
1665 sd = av_packet_new_side_data(pkt, AV_PKT_DATA_QUALITY_FACTOR,
1666 sizeof(int));
1667 if (!sd)
1668 return AVERROR(ENOMEM);
1669 *(int *)sd = s->current_picture.f->quality;
1670
1671 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1672 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1673
1674 if (avctx->rc_buffer_size) {
1675 RateControlContext *rcc = &s->rc_context;
1676 int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1677
1678 if (put_bits_count(&s->pb) > max_size &&
1679 s->lambda < s->lmax) {
1680 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1681 (s->qscale + 1) / s->qscale);
1682 if (s->adaptive_quant) {
1683 int i;
1684 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1685 s->lambda_table[i] =
1686 FFMAX(s->lambda_table[i] + 1,
1687 s->lambda_table[i] * (s->qscale + 1) /
1688 s->qscale);
1689 }
1690 s->mb_skipped = 0; // done in frame_start()
1691 // done in encode_picture() so we must undo it
1692 if (s->pict_type == AV_PICTURE_TYPE_P) {
1693 if (s->flipflop_rounding ||
1694 s->codec_id == AV_CODEC_ID_H263P ||
1695 s->codec_id == AV_CODEC_ID_MPEG4)
1696 s->no_rounding ^= 1;
1697 }
1698 if (s->pict_type != AV_PICTURE_TYPE_B) {
1699 s->time_base = s->last_time_base;
1700 s->last_non_b_time = s->time - s->pp_time;
1701 }
1702 for (i = 0; i < context_count; i++) {
1703 PutBitContext *pb = &s->thread_context[i]->pb;
1704 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1705 }
1706 goto vbv_retry;
1707 }
1708
1709 assert(s->avctx->rc_max_rate);
1710 }
1711
1712 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1713 ff_write_pass1_stats(s);
1714
1715 for (i = 0; i < 4; i++) {
1716 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1717 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1718 }
1719
1720 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1721 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1722 avctx->i_tex_bits + avctx->p_tex_bits ==
1723 put_bits_count(&s->pb));
1724 flush_put_bits(&s->pb);
1725 s->frame_bits = put_bits_count(&s->pb);
1726
1727 stuffing_count = ff_vbv_update(s, s->frame_bits);
1728 if (stuffing_count) {
1729 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1730 stuffing_count + 50) {
1731 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1732 return -1;
1733 }
1734
1735 switch (s->codec_id) {
1736 case AV_CODEC_ID_MPEG1VIDEO:
1737 case AV_CODEC_ID_MPEG2VIDEO:
1738 while (stuffing_count--) {
1739 put_bits(&s->pb, 8, 0);
1740 }
1741 break;
1742 case AV_CODEC_ID_MPEG4:
1743 put_bits(&s->pb, 16, 0);
1744 put_bits(&s->pb, 16, 0x1C3);
1745 stuffing_count -= 4;
1746 while (stuffing_count--) {
1747 put_bits(&s->pb, 8, 0xFF);
1748 }
1749 break;
1750 default:
1751 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1752 }
1753 flush_put_bits(&s->pb);
1754 s->frame_bits = put_bits_count(&s->pb);
1755 }
1756
1757 /* update mpeg1/2 vbv_delay for CBR */
1758 if (s->avctx->rc_max_rate &&
1759 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1760 s->out_format == FMT_MPEG1 &&
1761 90000LL * (avctx->rc_buffer_size - 1) <=
1762 s->avctx->rc_max_rate * 0xFFFFLL) {
1763 int vbv_delay, min_delay;
1764 double inbits = s->avctx->rc_max_rate *
1765 av_q2d(s->avctx->time_base);
1766 int minbits = s->frame_bits - 8 *
1767 (s->vbv_delay_ptr - s->pb.buf - 1);
1768 double bits = s->rc_context.buffer_index + minbits - inbits;
1769
1770 if (bits < 0)
1771 av_log(s->avctx, AV_LOG_ERROR,
1772 "Internal error, negative bits\n");
1773
1774 assert(s->repeat_first_field == 0);
1775
1776 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1777 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1778 s->avctx->rc_max_rate;
1779
1780 vbv_delay = FFMAX(vbv_delay, min_delay);
1781
1782 assert(vbv_delay < 0xFFFF);
1783
1784 s->vbv_delay_ptr[0] &= 0xF8;
1785 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1786 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1787 s->vbv_delay_ptr[2] &= 0x07;
1788 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1789 avctx->vbv_delay = vbv_delay * 300;
1790 }
1791 s->total_bits += s->frame_bits;
1792 avctx->frame_bits = s->frame_bits;
1793
1794 pkt->pts = s->current_picture.f->pts;
1795 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
1796 if (!s->current_picture.f->coded_picture_number)
1797 pkt->dts = pkt->pts - s->dts_delta;
1798 else
1799 pkt->dts = s->reordered_pts;
1800 s->reordered_pts = pkt->pts;
1801 } else
1802 pkt->dts = pkt->pts;
1803 if (s->current_picture.f->key_frame)
1804 pkt->flags |= AV_PKT_FLAG_KEY;
1805 if (s->mb_info)
1806 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1807 } else {
1808 s->frame_bits = 0;
1809 }
1810 assert((s->frame_bits & 7) == 0);
1811
1812 pkt->size = s->frame_bits / 8;
1813 *got_packet = !!pkt->size;
1814 return 0;
1815 }
1816
1817 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1818 int n, int threshold)
1819 {
1820 static const char tab[64] = {
1821 3, 2, 2, 1, 1, 1, 1, 1,
1822 1, 1, 1, 1, 1, 1, 1, 1,
1823 1, 1, 1, 1, 1, 1, 1, 1,
1824 0, 0, 0, 0, 0, 0, 0, 0,
1825 0, 0, 0, 0, 0, 0, 0, 0,
1826 0, 0, 0, 0, 0, 0, 0, 0,
1827 0, 0, 0, 0, 0, 0, 0, 0,
1828 0, 0, 0, 0, 0, 0, 0, 0
1829 };
1830 int score = 0;
1831 int run = 0;
1832 int i;
1833 int16_t *block = s->block[n];
1834 const int last_index = s->block_last_index[n];
1835 int skip_dc;
1836
1837 if (threshold < 0) {
1838 skip_dc = 0;
1839 threshold = -threshold;
1840 } else
1841 skip_dc = 1;
1842
1843 /* Are all we could set to zero already zero? */
1844 if (last_index <= skip_dc - 1)
1845 return;
1846
1847 for (i = 0; i <= last_index; i++) {
1848 const int j = s->intra_scantable.permutated[i];
1849 const int level = FFABS(block[j]);
1850 if (level == 1) {
1851 if (skip_dc && i == 0)
1852 continue;
1853 score += tab[run];
1854 run = 0;
1855 } else if (level > 1) {
1856 return;
1857 } else {
1858 run++;
1859 }
1860 }
1861 if (score >= threshold)
1862 return;
1863 for (i = skip_dc; i <= last_index; i++) {
1864 const int j = s->intra_scantable.permutated[i];
1865 block[j] = 0;
1866 }
1867 if (block[0])
1868 s->block_last_index[n] = 0;
1869 else
1870 s->block_last_index[n] = -1;
1871 }
1872
1873 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
1874 int last_index)
1875 {
1876 int i;
1877 const int maxlevel = s->max_qcoeff;
1878 const int minlevel = s->min_qcoeff;
1879 int overflow = 0;
1880
1881 if (s->mb_intra) {
1882 i = 1; // skip clipping of intra dc
1883 } else
1884 i = 0;
1885
1886 for (; i <= last_index; i++) {
1887 const int j = s->intra_scantable.permutated[i];
1888 int level = block[j];
1889
1890 if (level > maxlevel) {
1891 level = maxlevel;
1892 overflow++;
1893 } else if (level < minlevel) {
1894 level = minlevel;
1895 overflow++;
1896 }
1897
1898 block[j] = level;
1899 }
1900
1901 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1902 av_log(s->avctx, AV_LOG_INFO,
1903 "warning, clipping %d dct coefficients to %d..%d\n",
1904 overflow, minlevel, maxlevel);
1905 }
1906
1907 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1908 {
1909 int x, y;
1910 // FIXME optimize
1911 for (y = 0; y < 8; y++) {
1912 for (x = 0; x < 8; x++) {
1913 int x2, y2;
1914 int sum = 0;
1915 int sqr = 0;
1916 int count = 0;
1917
1918 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1919 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1920 int v = ptr[x2 + y2 * stride];
1921 sum += v;
1922 sqr += v * v;
1923 count++;
1924 }
1925 }
1926 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1927 }
1928 }
1929 }
1930
1931 static av_always_inline void encode_mb_internal(MpegEncContext *s,
1932 int motion_x, int motion_y,
1933 int mb_block_height,
1934 int mb_block_count)
1935 {
1936 int16_t weight[8][64];
1937 int16_t orig[8][64];
1938 const int mb_x = s->mb_x;
1939 const int mb_y = s->mb_y;
1940 int i;
1941 int skip_dct[8];
1942 int dct_offset = s->linesize * 8; // default for progressive frames
1943 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1944 ptrdiff_t wrap_y, wrap_c;
1945
1946 for (i = 0; i < mb_block_count; i++)
1947 skip_dct[i] = s->skipdct;
1948
1949 if (s->adaptive_quant) {
1950 const int last_qp = s->qscale;
1951 const int mb_xy = mb_x + mb_y * s->mb_stride;
1952
1953 s->lambda = s->lambda_table[mb_xy];
1954 update_qscale(s);
1955
1956 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
1957 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
1958 s->dquant = s->qscale - last_qp;
1959
1960 if (s->out_format == FMT_H263) {
1961 s->dquant = av_clip(s->dquant, -2, 2);
1962
1963 if (s->codec_id == AV_CODEC_ID_MPEG4) {
1964 if (!s->mb_intra) {
1965 if (s->pict_type == AV_PICTURE_TYPE_B) {
1966 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1967 s->dquant = 0;
1968 }
1969 if (s->mv_type == MV_TYPE_8X8)
1970 s->dquant = 0;
1971 }
1972 }
1973 }
1974 }
1975 ff_set_qscale(s, last_qp + s->dquant);
1976 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
1977 ff_set_qscale(s, s->qscale + s->dquant);
1978
1979 wrap_y = s->linesize;
1980 wrap_c = s->uvlinesize;
1981 ptr_y = s->new_picture.f->data[0] +
1982 (mb_y * 16 * wrap_y) + mb_x * 16;
1983 ptr_cb = s->new_picture.f->data[1] +
1984 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1985 ptr_cr = s->new_picture.f->data[2] +
1986 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1987
1988 if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
1989 uint8_t *ebuf = s->sc.edge_emu_buffer + 32;
1990 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
1991 wrap_y, wrap_y,
1992 16, 16, mb_x * 16, mb_y * 16,
1993 s->width, s->height);
1994 ptr_y = ebuf;
1995 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb,
1996 wrap_c, wrap_c,
1997 8, mb_block_height, mb_x * 8, mb_y * 8,
1998 s->width >> 1, s->height >> 1);
1999 ptr_cb = ebuf + 18 * wrap_y;
2000 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr,
2001 wrap_c, wrap_c,
2002 8, mb_block_height, mb_x * 8, mb_y * 8,
2003 s->width >> 1, s->height >> 1);
2004 ptr_cr = ebuf + 18 * wrap_y + 8;
2005 }
2006
2007 if (s->mb_intra) {
2008 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2009 int progressive_score, interlaced_score;
2010
2011 s->interlaced_dct = 0;
2012 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2013 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2014 NULL, wrap_y, 8) - 400;
2015
2016 if (progressive_score > 0) {
2017 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2018 NULL, wrap_y * 2, 8) +
2019 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2020 NULL, wrap_y * 2, 8);
2021 if (progressive_score > interlaced_score) {
2022 s->interlaced_dct = 1;
2023
2024 dct_offset = wrap_y;
2025 wrap_y <<= 1;
2026 if (s->chroma_format == CHROMA_422)
2027 wrap_c <<= 1;
2028 }
2029 }
2030 }
2031
2032 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2033 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2034 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2035 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2036
2037 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2038 skip_dct[4] = 1;
2039 skip_dct[5] = 1;
2040 } else {
2041 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2042 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2043 if (!s->chroma_y_shift) { /* 422 */
2044 s->pdsp.get_pixels(s->block[6],
2045 ptr_cb + (dct_offset >> 1), wrap_c);
2046 s->pdsp.get_pixels(s->block[7],
2047 ptr_cr + (dct_offset >> 1), wrap_c);
2048 }
2049 }
2050 } else {
2051 op_pixels_func (*op_pix)[4];
2052 qpel_mc_func (*op_qpix)[16];
2053 uint8_t *dest_y, *dest_cb, *dest_cr;
2054
2055 dest_y = s->dest[0];
2056 dest_cb = s->dest[1];
2057 dest_cr = s->dest[2];
2058
2059 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2060 op_pix = s->hdsp.put_pixels_tab;
2061 op_qpix = s->qdsp.put_qpel_pixels_tab;
2062 } else {
2063 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2064 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2065 }
2066
2067 if (s->mv_dir & MV_DIR_FORWARD) {
2068 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2069 s->last_picture.f->data,
2070 op_pix, op_qpix);
2071 op_pix = s->hdsp.avg_pixels_tab;
2072 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2073 }
2074 if (s->mv_dir & MV_DIR_BACKWARD) {
2075 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2076 s->next_picture.f->data,
2077 op_pix, op_qpix);
2078 }
2079
2080 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2081 int progressive_score, interlaced_score;
2082
2083 s->interlaced_dct = 0;
2084 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2085 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2086 ptr_y + wrap_y * 8,
2087 wrap_y, 8) - 400;
2088
2089 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2090 progressive_score -= 400;
2091
2092 if (progressive_score > 0) {
2093 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2094 wrap_y * 2, 8) +
2095 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2096 ptr_y + wrap_y,
2097 wrap_y * 2, 8);
2098
2099 if (progressive_score > interlaced_score) {
2100 s->interlaced_dct = 1;
2101
2102 dct_offset = wrap_y;
2103 wrap_y <<= 1;
2104 if (s->chroma_format == CHROMA_422)
2105 wrap_c <<= 1;
2106 }
2107 }
2108 }
2109
2110 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2111 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2112 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2113 dest_y + dct_offset, wrap_y);
2114 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2115 dest_y + dct_offset + 8, wrap_y);
2116
2117 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2118 skip_dct[4] = 1;
2119 skip_dct[5] = 1;
2120 } else {
2121 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2122 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2123 if (!s->chroma_y_shift) { /* 422 */
2124 s->pdsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
2125 dest_cb + (dct_offset >> 1), wrap_c);
2126 s->pdsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
2127 dest_cr + (dct_offset >> 1), wrap_c);
2128 }
2129 }
2130 /* pre quantization */
2131 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2132 2 * s->qscale * s->qscale) {
2133 // FIXME optimize
2134 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2135 skip_dct[0] = 1;
2136 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2137 skip_dct[1] = 1;
2138 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2139 wrap_y, 8) < 20 * s->qscale)
2140 skip_dct[2] = 1;
2141 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2142 wrap_y, 8) < 20 * s->qscale)
2143 skip_dct[3] = 1;
2144 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2145 skip_dct[4] = 1;
2146 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2147 skip_dct[5] = 1;
2148 if (!s->chroma_y_shift) { /* 422 */
2149 if (s->mecc.sad[1](NULL, ptr_cb + (dct_offset >> 1),
2150 dest_cb + (dct_offset >> 1),
2151 wrap_c, 8) < 20 * s->qscale)
2152 skip_dct[6] = 1;
2153 if (s->mecc.sad[1](NULL, ptr_cr + (dct_offset >> 1),
2154 dest_cr + (dct_offset >> 1),
2155 wrap_c, 8) < 20 * s->qscale)
2156 skip_dct[7] = 1;
2157 }
2158 }
2159 }
2160
2161 if (s->quantizer_noise_shaping) {
2162 if (!skip_dct[0])
2163 get_visual_weight(weight[0], ptr_y , wrap_y);
2164 if (!skip_dct[1])
2165 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2166 if (!skip_dct[2])
2167 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2168 if (!skip_dct[3])
2169 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2170 if (!skip_dct[4])
2171 get_visual_weight(weight[4], ptr_cb , wrap_c);
2172 if (!skip_dct[5])
2173 get_visual_weight(weight[5], ptr_cr , wrap_c);
2174 if (!s->chroma_y_shift) { /* 422 */
2175 if (!skip_dct[6])
2176 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
2177 wrap_c);
2178 if (!skip_dct[7])
2179 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
2180 wrap_c);
2181 }
2182 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2183 }
2184
2185 /* DCT & quantize */
2186 assert(s->out_format != FMT_MJPEG || s->qscale == 8);
2187 {
2188 for (i = 0; i < mb_block_count; i++) {
2189 if (!skip_dct[i]) {
2190 int overflow;
2191 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2192 // FIXME we could decide to change to quantizer instead of
2193 // clipping
2194 // JS: I don't think that would be a good idea it could lower
2195 // quality instead of improve it. Just INTRADC clipping
2196 // deserves changes in quantizer
2197 if (overflow)
2198 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2199 } else
2200 s->block_last_index[i] = -1;
2201 }
2202 if (s->quantizer_noise_shaping) {
2203 for (i = 0; i < mb_block_count; i++) {
2204 if (!skip_dct[i]) {
2205 s->block_last_index[i] =
2206 dct_quantize_refine(s, s->block[i], weight[i],
2207 orig[i], i, s->qscale);
2208 }
2209 }
2210 }
2211
2212 if (s->luma_elim_threshold && !s->mb_intra)
2213 for (i = 0; i < 4; i++)
2214 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2215 if (s->chroma_elim_threshold && !s->mb_intra)
2216 for (i = 4; i < mb_block_count; i++)
2217 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2218
2219 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2220 for (i = 0; i < mb_block_count; i++) {
2221 if (s->block_last_index[i] == -1)
2222 s->coded_score[i] = INT_MAX / 256;
2223 }
2224 }
2225 }
2226
2227 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2228 s->block_last_index[4] =
2229 s->block_last_index[5] = 0;
2230 s->block[4][0] =
2231 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2232 }
2233
2234 // non c quantize code returns incorrect block_last_index FIXME
2235 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2236 for (i = 0; i < mb_block_count; i++) {
2237 int j;
2238 if (s->block_last_index[i] > 0) {
2239 for (j = 63; j > 0; j--) {
2240 if (s->block[i][s->intra_scantable.permutated[j]])
2241 break;
2242 }
2243 s->block_last_index[i] = j;
2244 }
2245 }
2246 }
2247
2248 /* huffman encode */
2249 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2250 case AV_CODEC_ID_MPEG1VIDEO:
2251 case AV_CODEC_ID_MPEG2VIDEO:
2252 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2253 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2254 break;
2255 case AV_CODEC_ID_MPEG4:
2256 if (CONFIG_MPEG4_ENCODER)
2257 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2258 break;
2259 case AV_CODEC_ID_MSMPEG4V2:
2260 case AV_CODEC_ID_MSMPEG4V3:
2261 case AV_CODEC_ID_WMV1:
2262 if (CONFIG_MSMPEG4_ENCODER)
2263 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2264 break;
2265 case AV_CODEC_ID_WMV2:
2266 if (CONFIG_WMV2_ENCODER)
2267 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2268 break;
2269 case AV_CODEC_ID_H261:
2270 if (CONFIG_H261_ENCODER)
2271 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2272 break;
2273 case AV_CODEC_ID_H263:
2274 case AV_CODEC_ID_H263P:
2275 case AV_CODEC_ID_FLV1:
2276 case AV_CODEC_ID_RV10:
2277 case AV_CODEC_ID_RV20:
2278 if (CONFIG_H263_ENCODER)
2279 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2280 break;
2281 case AV_CODEC_ID_MJPEG:
2282 if (CONFIG_MJPEG_ENCODER)
2283 ff_mjpeg_encode_mb(s, s->block);
2284 break;
2285 default:
2286 assert(0);
2287 }
2288 }
2289
2290 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2291 {
2292 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 6);
2293 else encode_mb_internal(s, motion_x, motion_y, 16, 8);
2294 }
2295
2296 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2297 int i;
2298
2299 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2300
2301 /* mpeg1 */
2302 d->mb_skip_run= s->mb_skip_run;
2303 for(i=0; i<3; i++)
2304 d->last_dc[i] = s->last_dc[i];
2305
2306 /* statistics */
2307 d->mv_bits= s->mv_bits;
2308 d->i_tex_bits= s->i_tex_bits;
2309 d->p_tex_bits= s->p_tex_bits;
2310 d->i_count= s->i_count;
2311 d->f_count= s->f_count;
2312 d->b_count= s->b_count;
2313 d->skip_count= s->skip_count;
2314 d->misc_bits= s->misc_bits;
2315 d->last_bits= 0;
2316
2317 d->mb_skipped= 0;
2318 d->qscale= s->qscale;
2319 d->dquant= s->dquant;
2320
2321 d->esc3_level_length= s->esc3_level_length;
2322 }
2323
2324 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2325 int i;
2326
2327 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2328 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2329
2330 /* mpeg1 */
2331 d->mb_skip_run= s->mb_skip_run;
2332 for(i=0; i<3; i++)
2333 d->last_dc[i] = s->last_dc[i];
2334
2335 /* statistics */
2336 d->mv_bits= s->mv_bits;
2337 d->i_tex_bits= s->i_tex_bits;
2338 d->p_tex_bits= s->p_tex_bits;
2339 d->i_count= s->i_count;
2340 d->f_count= s->f_count;
2341 d->b_count= s->b_count;
2342 d->skip_count= s->skip_count;
2343 d->misc_bits= s->misc_bits;
2344
2345 d->mb_intra= s->mb_intra;
2346 d->mb_skipped= s->mb_skipped;
2347 d->mv_type= s->mv_type;
2348 d->mv_dir= s->mv_dir;
2349 d->pb= s->pb;
2350 if(s->data_partitioning){
2351 d->pb2= s->pb2;
2352 d->tex_pb= s->tex_pb;
2353 }
2354 d->block= s->block;
2355 for(i=0; i<8; i++)
2356 d->block_last_index[i]= s->block_last_index[i];
2357 d->interlaced_dct= s->interlaced_dct;
2358 d->qscale= s->qscale;
2359
2360 d->esc3_level_length= s->esc3_level_length;
2361 }
2362
2363 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2364 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2365 int *dmin, int *next_block, int motion_x, int motion_y)
2366 {
2367 int score;
2368 uint8_t *dest_backup[3];
2369
2370 copy_context_before_encode(s, backup, type);
2371
2372 s->block= s->blocks[*next_block];
2373 s->pb= pb[*next_block];
2374 if(s->data_partitioning){
2375 s->pb2 = pb2 [*next_block];
2376 s->tex_pb= tex_pb[*next_block];
2377 }
2378
2379 if(*next_block){
2380 memcpy(dest_backup, s->dest, sizeof(s->dest));
2381 s->dest[0] = s->sc.rd_scratchpad;
2382 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2383 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2384 assert(s->linesize >= 32); //FIXME
2385 }
2386
2387 encode_mb(s, motion_x, motion_y);
2388
2389 score= put_bits_count(&s->pb);
2390 if(s->data_partitioning){
2391 score+= put_bits_count(&s->pb2);
2392 score+= put_bits_count(&s->tex_pb);
2393 }
2394
2395 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2396 ff_mpv_decode_mb(s, s->block);
2397
2398 score *= s->lambda2;
2399 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2400 }
2401
2402 if(*next_block){
2403 memcpy(s->dest, dest_backup, sizeof(s->dest));
2404 }
2405
2406 if(score<*dmin){
2407 *dmin= score;
2408 *next_block^=1;
2409
2410 copy_context_after_encode(best, s, type);
2411 }
2412 }
2413
2414 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2415 uint32_t *sq = ff_square_tab + 256;
2416 int acc=0;
2417 int x,y;
2418
2419 if(w==16 && h==16)
2420 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2421 else if(w==8 && h==8)
2422 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2423
2424 for(y=0; y<h; y++){
2425 for(x=0; x<w; x++){
2426 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2427 }
2428 }
2429
2430 assert(acc>=0);
2431
2432 return acc;
2433 }
2434
2435 static int sse_mb(MpegEncContext *s){
2436 int w= 16;
2437 int h= 16;
2438
2439 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2440 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2441
2442 if(w==16 && h==16)
2443 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2444 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2445 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2446 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2447 }else{
2448 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2449 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2450 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2451 }
2452 else
2453 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2454 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2455 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2456 }
2457
2458 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2459 MpegEncContext *s= *(void**)arg;
2460
2461
2462 s->me.pre_pass=1;
2463 s->me.dia_size= s->avctx->pre_dia_size;
2464 s->first_slice_line=1;
2465 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2466 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2467 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2468 }
2469 s->first_slice_line=0;
2470 }
2471
2472 s->me.pre_pass=0;
2473
2474 return 0;
2475 }
2476
2477 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2478 MpegEncContext *s= *(void**)arg;
2479
2480 s->me.dia_size= s->avctx->dia_size;
2481 s->first_slice_line=1;
2482 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2483 s->mb_x=0; //for block init below
2484 ff_init_block_index(s);
2485 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2486 s->block_index[0]+=2;
2487 s->block_index[1]+=2;
2488 s->block_index[2]+=2;
2489 s->block_index[3]+=2;
2490
2491 /* compute motion vector & mb_type and store in context */
2492 if(s->pict_type==AV_PICTURE_TYPE_B)
2493 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2494 else
2495 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2496 }
2497 s->first_slice_line=0;
2498 }
2499 return 0;
2500 }
2501
2502 static int mb_var_thread(AVCodecContext *c, void *arg){
2503 MpegEncContext *s= *(void**)arg;
2504 int mb_x, mb_y;
2505
2506 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2507 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2508 int xx = mb_x * 16;
2509 int yy = mb_y * 16;
2510 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2511 int varc;
2512 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2513
2514 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2515 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2516
2517 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2518 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2519 s->me.mb_var_sum_temp += varc;
2520 }
2521 }
2522 return 0;
2523 }
2524
2525 static void write_slice_end(MpegEncContext *s){
2526 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2527 if(s->partitioned_frame){
2528 ff_mpeg4_merge_partitions(s);
2529 }
2530
2531 ff_mpeg4_stuffing(&s->pb);
2532 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2533 ff_mjpeg_encode_stuffing(&s->pb);
2534 }
2535
2536 avpriv_align_put_bits(&s->pb);
2537 flush_put_bits(&s->pb);
2538
2539 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2540 s->misc_bits+= get_bits_diff(s);
2541 }
2542
2543 static void write_mb_info(MpegEncContext *s)
2544 {
2545 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2546 int offset = put_bits_count(&s->pb);
2547 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2548 int gobn = s->mb_y / s->gob_index;
2549 int pred_x, pred_y;
2550 if (CONFIG_H263_ENCODER)
2551 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2552 bytestream_put_le32(&ptr, offset);
2553 bytestream_put_byte(&ptr, s->qscale);
2554 bytestream_put_byte(&ptr, gobn);
2555 bytestream_put_le16(&ptr, mba);
2556 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2557 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2558 /* 4MV not implemented */
2559 bytestream_put_byte(&ptr, 0); /* hmv2 */
2560 bytestream_put_byte(&ptr, 0); /* vmv2 */
2561 }
2562
2563 static void update_mb_info(MpegEncContext *s, int startcode)
2564 {
2565 if (!s->mb_info)
2566 return;
2567 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2568 s->mb_info_size += 12;
2569 s->prev_mb_info = s->last_mb_info;
2570 }
2571 if (startcode) {
2572 s->prev_mb_info = put_bits_count(&s->pb)/8;
2573 /* This might have incremented mb_info_size above, and we return without
2574 * actually writing any info into that slot yet. But in that case,
2575 * this will be called again at the start of the after writing the
2576 * start code, actually writing the mb info. */
2577 return;
2578 }
2579
2580 s->last_mb_info = put_bits_count(&s->pb)/8;
2581 if (!s->mb_info_size)
2582 s->mb_info_size += 12;
2583 write_mb_info(s);
2584 }
2585
2586 static int encode_thread(AVCodecContext *c, void *arg){
2587 MpegEncContext *s= *(void**)arg;
2588 int mb_x, mb_y, pdif = 0;
2589 int chr_h= 16>>s->chroma_y_shift;
2590 int i, j;
2591 MpegEncContext best_s = { 0 }, backup_s;
2592 uint8_t bit_buf[2][MAX_MB_BYTES];
2593 uint8_t bit_buf2[2][MAX_MB_BYTES];
2594 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2595 PutBitContext pb[2], pb2[2], tex_pb[2];
2596
2597 for(i=0; i<2; i++){
2598 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2599 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2600 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2601 }
2602
2603 s->last_bits= put_bits_count(&s->pb);
2604 s->mv_bits=0;
2605 s->misc_bits=0;
2606 s->i_tex_bits=0;
2607 s->p_tex_bits=0;
2608 s->i_count=0;
2609 s->f_count=0;
2610 s->b_count=0;
2611 s->skip_count=0;
2612
2613 for(i=0; i<3; i++){
2614 /* init last dc values */
2615 /* note: quant matrix value (8) is implied here */
2616 s->last_dc[i] = 128 << s->intra_dc_precision;
2617
2618 s->current_picture.encoding_error[i] = 0;
2619 }
2620 s->mb_skip_run = 0;
2621 memset(s->last_mv, 0, sizeof(s->last_mv));
2622
2623 s->last_mv_dir = 0;
2624
2625 switch(s->codec_id){
2626 case AV_CODEC_ID_H263:
2627 case AV_CODEC_ID_H263P:
2628 case AV_CODEC_ID_FLV1:
2629 if (CONFIG_H263_ENCODER)
2630 s->gob_index = H263_GOB_HEIGHT(s->height);
2631 break;
2632 case AV_CODEC_ID_MPEG4:
2633 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2634 ff_mpeg4_init_partitions(s);
2635 break;
2636 }
2637
2638 s->resync_mb_x=0;
2639 s->resync_mb_y=0;
2640 s->first_slice_line = 1;
2641 s->ptr_lastgob = s->pb.buf;
2642 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2643 s->mb_x=0;
2644 s->mb_y= mb_y;
2645
2646 ff_set_qscale(s, s->qscale);
2647 ff_init_block_index(s);
2648
2649 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2650 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2651 int mb_type= s->mb_type[xy];
2652 // int d;
2653 int dmin= INT_MAX;
2654 int dir;
2655
2656 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2657 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2658 return -1;
2659 }
2660 if(s->data_partitioning){
2661 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2662 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2663 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2664 return -1;
2665 }
2666 }
2667
2668 s->mb_x = mb_x;
2669 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2670 ff_update_block_index(s);
2671
2672 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2673 ff_h261_reorder_mb_index(s);
2674 xy= s->mb_y*s->mb_stride + s->mb_x;
2675 mb_type= s->mb_type[xy];
2676 }
2677
2678 /* write gob / video packet header */
2679 if(s->rtp_mode){
2680 int current_packet_size, is_gob_start;
2681
2682 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2683
2684 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2685
2686 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2687
2688 switch(s->codec_id){
2689 case AV_CODEC_ID_H263:
2690 case AV_CODEC_ID_H263P:
2691 if(!s->h263_slice_structured)
2692 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2693 break;
2694 case AV_CODEC_ID_MPEG2VIDEO:
2695 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2696 case AV_CODEC_ID_MPEG1VIDEO:
2697 if(s->mb_skip_run) is_gob_start=0;
2698 break;
2699 }
2700
2701 if(is_gob_start){
2702 if(s->start_mb_y != mb_y || mb_x!=0){
2703 write_slice_end(s);
2704
2705 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2706 ff_mpeg4_init_partitions(s);
2707 }
2708 }
2709
2710 assert((put_bits_count(&s->pb)&7) == 0);
2711 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2712
2713 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
2714 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2715 int d = 100 / s->error_rate;
2716 if(r % d == 0){
2717 current_packet_size=0;
2718 s->pb.buf_ptr= s->ptr_lastgob;
2719 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2720 }
2721 }
2722
2723 if (s->avctx->rtp_callback){
2724 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2725 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2726 }
2727 update_mb_info(s, 1);
2728
2729 switch(s->codec_id){
2730 case AV_CODEC_ID_MPEG4:
2731 if (CONFIG_MPEG4_ENCODER) {
2732 ff_mpeg4_encode_video_packet_header(s);
2733 ff_mpeg4_clean_buffers(s);
2734 }
2735 break;
2736 case AV_CODEC_ID_MPEG1VIDEO:
2737 case AV_CODEC_ID_MPEG2VIDEO:
2738 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2739 ff_mpeg1_encode_slice_header(s);
2740 ff_mpeg1_clean_buffers(s);
2741 }
2742 break;
2743 case AV_CODEC_ID_H263:
2744 case AV_CODEC_ID_H263P:
2745 if (CONFIG_H263_ENCODER)
2746 ff_h263_encode_gob_header(s, mb_y);
2747 break;
2748 }
2749
2750 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
2751 int bits= put_bits_count(&s->pb);
2752 s->misc_bits+= bits - s->last_bits;
2753 s->last_bits= bits;
2754 }
2755
2756 s->ptr_lastgob += current_packet_size;
2757 s->first_slice_line=1;
2758 s->resync_mb_x=mb_x;
2759 s->resync_mb_y=mb_y;
2760 }
2761 }
2762
2763 if( (s->resync_mb_x == s->mb_x)
2764 && s->resync_mb_y+1 == s->mb_y){
2765 s->first_slice_line=0;
2766 }
2767
2768 s->mb_skipped=0;
2769 s->dquant=0; //only for QP_RD
2770
2771 update_mb_info(s, 0);
2772
2773 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2774 int next_block=0;
2775 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2776
2777 copy_context_before_encode(&backup_s, s, -1);
2778 backup_s.pb= s->pb;
2779 best_s.data_partitioning= s->data_partitioning;
2780 best_s.partitioned_frame= s->partitioned_frame;
2781 if(s->data_partitioning){
2782 backup_s.pb2= s->pb2;
2783 backup_s.tex_pb= s->tex_pb;
2784 }
2785
2786 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2787 s->mv_dir = MV_DIR_FORWARD;
2788 s->mv_type = MV_TYPE_16X16;
2789 s->mb_intra= 0;
2790 s->mv[0][0][0] = s->p_mv_table[xy][0];
2791 s->mv[0][0][1] = s->p_mv_table[xy][1];
2792 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2793 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2794 }
2795 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2796 s->mv_dir = MV_DIR_FORWARD;
2797 s->mv_type = MV_TYPE_FIELD;
2798 s->mb_intra= 0;
2799 for(i=0; i<2; i++){
2800 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2801 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2802 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2803 }
2804 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2805 &dmin, &next_block, 0, 0);
2806 }
2807 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2808 s->mv_dir = MV_DIR_FORWARD;
2809 s->mv_type = MV_TYPE_16X16;
2810 s->mb_intra= 0;
2811 s->mv[0][0][0] = 0;
2812 s->mv[0][0][1] = 0;
2813 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2814 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2815 }
2816 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2817 s->mv_dir = MV_DIR_FORWARD;
2818 s->mv_type = MV_TYPE_8X8;
2819 s->mb_intra= 0;
2820 for(i=0; i<4; i++){
2821 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2822 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2823 }
2824 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2825 &dmin, &next_block, 0, 0);
2826 }
2827 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2828 s->mv_dir = MV_DIR_FORWARD;
2829 s->mv_type = MV_TYPE_16X16;
2830 s->mb_intra= 0;
2831 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2832 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2833 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2834 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2835 }
2836 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2837 s->mv_dir = MV_DIR_BACKWARD;
2838 s->mv_type = MV_TYPE_16X16;
2839 s->mb_intra= 0;
2840 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2841 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2842 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2843 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2844 }
2845 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2846 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2847 s->mv_type = MV_TYPE_16X16;
2848 s->mb_intra= 0;
2849 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2850 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2851 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2852 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2853 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2854 &dmin, &next_block, 0, 0);
2855 }
2856 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2857 s->mv_dir = MV_DIR_FORWARD;
2858 s->mv_type = MV_TYPE_FIELD;
2859 s->mb_intra= 0;
2860 for(i=0; i<2; i++){
2861 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2862 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2863 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2864 }
2865 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2866 &dmin, &next_block, 0, 0);
2867 }
2868 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2869 s->mv_dir = MV_DIR_BACKWARD;
2870 s->mv_type = MV_TYPE_FIELD;
2871 s->mb_intra= 0;
2872