lavc: Move frame_skip_* to codec private options
[libav.git] / libavcodec / mpegvideo_enc.c
1 /*
2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7 *
8 * This file is part of Libav.
9 *
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /**
26 * @file
27 * The simplest mpeg encoder (well, it was the simplest!).
28 */
29
30 #include <stdint.h>
31
32 #include "libavutil/internal.h"
33 #include "libavutil/intmath.h"
34 #include "libavutil/mathematics.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/timer.h"
38 #include "avcodec.h"
39 #include "dct.h"
40 #include "idctdsp.h"
41 #include "mpeg12.h"
42 #include "mpegvideo.h"
43 #include "mpegvideodata.h"
44 #include "h261.h"
45 #include "h263.h"
46 #include "h263data.h"
47 #include "mjpegenc_common.h"
48 #include "mathops.h"
49 #include "mpegutils.h"
50 #include "mjpegenc.h"
51 #include "msmpeg4.h"
52 #include "pixblockdsp.h"
53 #include "qpeldsp.h"
54 #include "faandct.h"
55 #include "thread.h"
56 #include "aandcttab.h"
57 #include "flv.h"
58 #include "mpeg4video.h"
59 #include "internal.h"
60 #include "bytestream.h"
61 #include "wmv2.h"
62 #include "rv10.h"
63 #include <limits.h>
64
65 #define QUANT_BIAS_SHIFT 8
66
67 #define QMAT_SHIFT_MMX 16
68 #define QMAT_SHIFT 22
69
70 static int encode_picture(MpegEncContext *s, int picture_number);
71 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
72 static int sse_mb(MpegEncContext *s);
73 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
74 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
75
76 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
77 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
78
79 const AVOption ff_mpv_generic_options[] = {
80 FF_MPV_COMMON_OPTS
81 { NULL },
82 };
83
84 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
85 uint16_t (*qmat16)[2][64],
86 const uint16_t *quant_matrix,
87 int bias, int qmin, int qmax, int intra)
88 {
89 FDCTDSPContext *fdsp = &s->fdsp;
90 int qscale;
91 int shift = 0;
92
93 for (qscale = qmin; qscale <= qmax; qscale++) {
94 int i;
95 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
96 #if CONFIG_FAANDCT
97 fdsp->fdct == ff_faandct ||
98 #endif /* CONFIG_FAANDCT */
99 fdsp->fdct == ff_jpeg_fdct_islow_10) {
100 for (i = 0; i < 64; i++) {
101 const int j = s->idsp.idct_permutation[i];
102 int64_t den = (int64_t) qscale * quant_matrix[j];
103 /* 16 <= qscale * quant_matrix[i] <= 7905
104 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
105 * 19952 <= x <= 249205026
106 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
107 * 3444240 >= (1 << 36) / (x) >= 275 */
108
109 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / den);
110 }
111 } else if (fdsp->fdct == ff_fdct_ifast) {
112 for (i = 0; i < 64; i++) {
113 const int j = s->idsp.idct_permutation[i];
114 int64_t den = ff_aanscales[i] * (int64_t) qscale * quant_matrix[j];
115 /* 16 <= qscale * quant_matrix[i] <= 7905
116 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
117 * 19952 <= x <= 249205026
118 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
119 * 3444240 >= (1 << 36) / (x) >= 275 */
120
121 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) / den);
122 }
123 } else {
124 for (i = 0; i < 64; i++) {
125 const int j = s->idsp.idct_permutation[i];
126 int64_t den = (int64_t) qscale * quant_matrix[j];
127 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
128 * Assume x = qscale * quant_matrix[i]
129 * So 16 <= x <= 7905
130 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
131 * so 32768 >= (1 << 19) / (x) >= 67 */
132 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / den);
133 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
134 // (qscale * quant_matrix[i]);
135 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) / den;
136
137 if (qmat16[qscale][0][i] == 0 ||
138 qmat16[qscale][0][i] == 128 * 256)
139 qmat16[qscale][0][i] = 128 * 256 - 1;
140 qmat16[qscale][1][i] =
141 ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
142 qmat16[qscale][0][i]);
143 }
144 }
145
146 for (i = intra; i < 64; i++) {
147 int64_t max = 8191;
148 if (fdsp->fdct == ff_fdct_ifast) {
149 max = (8191LL * ff_aanscales[i]) >> 14;
150 }
151 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
152 shift++;
153 }
154 }
155 }
156 if (shift) {
157 av_log(NULL, AV_LOG_INFO,
158 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
159 QMAT_SHIFT - shift);
160 }
161 }
162
163 static inline void update_qscale(MpegEncContext *s)
164 {
165 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
166 (FF_LAMBDA_SHIFT + 7);
167 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
168
169 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
170 FF_LAMBDA_SHIFT;
171 }
172
173 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
174 {
175 int i;
176
177 if (matrix) {
178 put_bits(pb, 1, 1);
179 for (i = 0; i < 64; i++) {
180 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
181 }
182 } else
183 put_bits(pb, 1, 0);
184 }
185
186 /**
187 * init s->current_picture.qscale_table from s->lambda_table
188 */
189 void ff_init_qscale_tab(MpegEncContext *s)
190 {
191 int8_t * const qscale_table = s->current_picture.qscale_table;
192 int i;
193
194 for (i = 0; i < s->mb_num; i++) {
195 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
196 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
197 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
198 s->avctx->qmax);
199 }
200 }
201
202 static void update_duplicate_context_after_me(MpegEncContext *dst,
203 MpegEncContext *src)
204 {
205 #define COPY(a) dst->a= src->a
206 COPY(pict_type);
207 COPY(current_picture);
208 COPY(f_code);
209 COPY(b_code);
210 COPY(qscale);
211 COPY(lambda);
212 COPY(lambda2);
213 COPY(picture_in_gop_number);
214 COPY(gop_picture_number);
215 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
216 COPY(progressive_frame); // FIXME don't set in encode_header
217 COPY(partitioned_frame); // FIXME don't set in encode_header
218 #undef COPY
219 }
220
221 /**
222 * Set the given MpegEncContext to defaults for encoding.
223 * the changed fields will not depend upon the prior state of the MpegEncContext.
224 */
225 static void mpv_encode_defaults(MpegEncContext *s)
226 {
227 int i;
228 ff_mpv_common_defaults(s);
229
230 for (i = -16; i < 16; i++) {
231 default_fcode_tab[i + MAX_MV] = 1;
232 }
233 s->me.mv_penalty = default_mv_penalty;
234 s->fcode_tab = default_fcode_tab;
235
236 s->input_picture_number = 0;
237 s->picture_in_gop_number = 0;
238 }
239
240 /* init video encoder */
241 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
242 {
243 MpegEncContext *s = avctx->priv_data;
244 AVCPBProperties *cpb_props;
245 int i, ret, format_supported;
246
247 mpv_encode_defaults(s);
248
249 switch (avctx->codec_id) {
250 case AV_CODEC_ID_MPEG2VIDEO:
251 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
252 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
253 av_log(avctx, AV_LOG_ERROR,
254 "only YUV420 and YUV422 are supported\n");
255 return -1;
256 }
257 break;
258 case AV_CODEC_ID_MJPEG:
259 format_supported = 0;
260 /* JPEG color space */
261 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
262 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
263 (avctx->color_range == AVCOL_RANGE_JPEG &&
264 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
265 avctx->pix_fmt == AV_PIX_FMT_YUV422P)))
266 format_supported = 1;
267 /* MPEG color space */
268 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
269 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
270 avctx->pix_fmt == AV_PIX_FMT_YUV422P))
271 format_supported = 1;
272
273 if (!format_supported) {
274 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
275 return -1;
276 }
277 break;
278 default:
279 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
280 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
281 return -1;
282 }
283 }
284
285 switch (avctx->pix_fmt) {
286 case AV_PIX_FMT_YUVJ422P:
287 case AV_PIX_FMT_YUV422P:
288 s->chroma_format = CHROMA_422;
289 break;
290 case AV_PIX_FMT_YUVJ420P:
291 case AV_PIX_FMT_YUV420P:
292 default:
293 s->chroma_format = CHROMA_420;
294 break;
295 }
296
297 s->bit_rate = avctx->bit_rate;
298 s->width = avctx->width;
299 s->height = avctx->height;
300 if (avctx->gop_size > 600 &&
301 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
302 av_log(avctx, AV_LOG_ERROR,
303 "Warning keyframe interval too large! reducing it ...\n");
304 avctx->gop_size = 600;
305 }
306 s->gop_size = avctx->gop_size;
307 s->avctx = avctx;
308 if (avctx->max_b_frames > MAX_B_FRAMES) {
309 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
310 "is %d.\n", MAX_B_FRAMES);
311 }
312 s->max_b_frames = avctx->max_b_frames;
313 s->codec_id = avctx->codec->id;
314 s->strict_std_compliance = avctx->strict_std_compliance;
315 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
316 s->mpeg_quant = avctx->mpeg_quant;
317 s->rtp_mode = !!avctx->rtp_payload_size;
318 s->intra_dc_precision = avctx->intra_dc_precision;
319 s->user_specified_pts = AV_NOPTS_VALUE;
320
321 if (s->gop_size <= 1) {
322 s->intra_only = 1;
323 s->gop_size = 12;
324 } else {
325 s->intra_only = 0;
326 }
327
328 #if FF_API_MOTION_EST
329 FF_DISABLE_DEPRECATION_WARNINGS
330 s->me_method = avctx->me_method;
331 FF_ENABLE_DEPRECATION_WARNINGS
332 #endif
333
334 /* Fixed QSCALE */
335 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
336
337 #if FF_API_MPV_OPT
338 FF_DISABLE_DEPRECATION_WARNINGS
339 if (avctx->border_masking != 0.0)
340 s->border_masking = avctx->border_masking;
341 FF_ENABLE_DEPRECATION_WARNINGS
342 #endif
343
344 s->adaptive_quant = (s->avctx->lumi_masking ||
345 s->avctx->dark_masking ||
346 s->avctx->temporal_cplx_masking ||
347 s->avctx->spatial_cplx_masking ||
348 s->avctx->p_masking ||
349 s->border_masking ||
350 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
351 !s->fixed_qscale;
352
353 s->loop_filter = !!(s->avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
354
355 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
356 av_log(avctx, AV_LOG_ERROR,
357 "a vbv buffer size is needed, "
358 "for encoding with a maximum bitrate\n");
359 return -1;
360 }
361
362 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
363 av_log(avctx, AV_LOG_INFO,
364 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
365 }
366
367 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
368 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
369 return -1;
370 }
371
372 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
373 av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
374 return -1;
375 }
376
377 if (avctx->rc_max_rate &&
378 avctx->rc_max_rate == avctx->bit_rate &&
379 avctx->rc_max_rate != avctx->rc_min_rate) {
380 av_log(avctx, AV_LOG_INFO,
381 "impossible bitrate constraints, this will fail\n");
382 }
383
384 if (avctx->rc_buffer_size &&
385 avctx->bit_rate * (int64_t)avctx->time_base.num >
386 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
387 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
388 return -1;
389 }
390
391 if (!s->fixed_qscale &&
392 avctx->bit_rate * av_q2d(avctx->time_base) >
393 avctx->bit_rate_tolerance) {
394 av_log(avctx, AV_LOG_ERROR,
395 "bitrate tolerance too small for bitrate\n");
396 return -1;
397 }
398
399 if (s->avctx->rc_max_rate &&
400 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
401 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
402 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
403 90000LL * (avctx->rc_buffer_size - 1) >
404 s->avctx->rc_max_rate * 0xFFFFLL) {
405 av_log(avctx, AV_LOG_INFO,
406 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
407 "specified vbv buffer is too large for the given bitrate!\n");
408 }
409
410 if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
411 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
412 s->codec_id != AV_CODEC_ID_FLV1) {
413 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
414 return -1;
415 }
416
417 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
418 av_log(avctx, AV_LOG_ERROR,
419 "OBMC is only supported with simple mb decision\n");
420 return -1;
421 }
422
423 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
424 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
425 return -1;
426 }
427
428 if (s->max_b_frames &&
429 s->codec_id != AV_CODEC_ID_MPEG4 &&
430 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
431 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
432 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
433 return -1;
434 }
435
436 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
437 s->codec_id == AV_CODEC_ID_H263 ||
438 s->codec_id == AV_CODEC_ID_H263P) &&
439 (avctx->sample_aspect_ratio.num > 255 ||
440 avctx->sample_aspect_ratio.den > 255)) {
441 av_log(avctx, AV_LOG_ERROR,
442 "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
443 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
444 return -1;
445 }
446
447 if ((s->avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
448 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
449 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
450 return -1;
451 }
452
453 // FIXME mpeg2 uses that too
454 if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
455 av_log(avctx, AV_LOG_ERROR,
456 "mpeg2 style quantization not supported by codec\n");
457 return -1;
458 }
459
460 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
461 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
462 return -1;
463 }
464
465 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
466 s->avctx->mb_decision != FF_MB_DECISION_RD) {
467 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
468 return -1;
469 }
470
471 if (s->avctx->scenechange_threshold < 1000000000 &&
472 (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
473 av_log(avctx, AV_LOG_ERROR,
474 "closed gop with scene change detection are not supported yet, "
475 "set threshold to 1000000000\n");
476 return -1;
477 }
478
479 if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
480 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
481 av_log(avctx, AV_LOG_ERROR,
482 "low delay forcing is only available for mpeg2\n");
483 return -1;
484 }
485 if (s->max_b_frames != 0) {
486 av_log(avctx, AV_LOG_ERROR,
487 "b frames cannot be used with low delay\n");
488 return -1;
489 }
490 }
491
492 if (s->q_scale_type == 1) {
493 if (avctx->qmax > 12) {
494 av_log(avctx, AV_LOG_ERROR,
495 "non linear quant only supports qmax <= 12 currently\n");
496 return -1;
497 }
498 }
499
500 if (avctx->slices > 1 &&
501 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
502 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
503 return AVERROR(EINVAL);
504 }
505
506 if (s->avctx->thread_count > 1 &&
507 s->codec_id != AV_CODEC_ID_MPEG4 &&
508 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
509 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
510 (s->codec_id != AV_CODEC_ID_H263P)) {
511 av_log(avctx, AV_LOG_ERROR,
512 "multi threaded encoding not supported by codec\n");
513 return -1;
514 }
515
516 if (s->avctx->thread_count < 1) {
517 av_log(avctx, AV_LOG_ERROR,
518 "automatic thread number detection not supported by codec,"
519 "patch welcome\n");
520 return -1;
521 }
522
523 if (!avctx->time_base.den || !avctx->time_base.num) {
524 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
525 return -1;
526 }
527
528 #if FF_API_PRIVATE_OPT
529 FF_DISABLE_DEPRECATION_WARNINGS
530 if (avctx->b_frame_strategy)
531 s->b_frame_strategy = avctx->b_frame_strategy;
532 if (avctx->b_sensitivity != 40)
533 s->b_sensitivity = avctx->b_sensitivity;
534 FF_ENABLE_DEPRECATION_WARNINGS
535 #endif
536
537 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
538 av_log(avctx, AV_LOG_INFO,
539 "notice: b_frame_strategy only affects the first pass\n");
540 s->b_frame_strategy = 0;
541 }
542
543 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
544 if (i > 1) {
545 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
546 avctx->time_base.den /= i;
547 avctx->time_base.num /= i;
548 //return -1;
549 }
550
551 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
552 s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG) {
553 // (a + x * 3 / 8) / x
554 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
555 s->inter_quant_bias = 0;
556 } else {
557 s->intra_quant_bias = 0;
558 // (a - x / 4) / x
559 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
560 }
561
562 #if FF_API_QUANT_BIAS
563 FF_DISABLE_DEPRECATION_WARNINGS
564 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
565 s->intra_quant_bias = avctx->intra_quant_bias;
566 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
567 s->inter_quant_bias = avctx->inter_quant_bias;
568 FF_ENABLE_DEPRECATION_WARNINGS
569 #endif
570
571 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
572 s->avctx->time_base.den > (1 << 16) - 1) {
573 av_log(avctx, AV_LOG_ERROR,
574 "timebase %d/%d not supported by MPEG 4 standard, "
575 "the maximum admitted value for the timebase denominator "
576 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
577 (1 << 16) - 1);
578 return -1;
579 }
580 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
581
582 switch (avctx->codec->id) {
583 case AV_CODEC_ID_MPEG1VIDEO:
584 s->out_format = FMT_MPEG1;
585 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
586 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
587 break;
588 case AV_CODEC_ID_MPEG2VIDEO:
589 s->out_format = FMT_MPEG1;
590 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
591 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
592 s->rtp_mode = 1;
593 break;
594 case AV_CODEC_ID_MJPEG:
595 s->out_format = FMT_MJPEG;
596 s->intra_only = 1; /* force intra only for jpeg */
597 if (!CONFIG_MJPEG_ENCODER ||
598 ff_mjpeg_encode_init(s) < 0)
599 return -1;
600 avctx->delay = 0;
601 s->low_delay = 1;
602 break;
603 case AV_CODEC_ID_H261:
604 if (!CONFIG_H261_ENCODER)
605 return -1;
606 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
607 av_log(avctx, AV_LOG_ERROR,
608 "The specified picture size of %dx%d is not valid for the "
609 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
610 s->width, s->height);
611 return -1;
612 }
613 s->out_format = FMT_H261;
614 avctx->delay = 0;
615 s->low_delay = 1;
616 s->rtp_mode = 0; /* Sliced encoding not supported */
617 break;
618 case AV_CODEC_ID_H263:
619 if (!CONFIG_H263_ENCODER)
620 return -1;
621 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
622 s->width, s->height) == 8) {
623 av_log(avctx, AV_LOG_INFO,
624 "The specified picture size of %dx%d is not valid for "
625 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
626 "352x288, 704x576, and 1408x1152."
627 "Try H.263+.\n", s->width, s->height);
628 return -1;
629 }
630 s->out_format = FMT_H263;
631 avctx->delay = 0;
632 s->low_delay = 1;
633 break;
634 case AV_CODEC_ID_H263P:
635 s->out_format = FMT_H263;
636 s->h263_plus = 1;
637 /* Fx */
638 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
639 s->modified_quant = s->h263_aic;
640 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
641 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
642
643 /* /Fx */
644 /* These are just to be sure */
645 avctx->delay = 0;
646 s->low_delay = 1;
647 break;
648 case AV_CODEC_ID_FLV1:
649 s->out_format = FMT_H263;
650 s->h263_flv = 2; /* format = 1; 11-bit codes */
651 s->unrestricted_mv = 1;
652 s->rtp_mode = 0; /* don't allow GOB */
653 avctx->delay = 0;
654 s->low_delay = 1;
655 break;
656 case AV_CODEC_ID_RV10:
657 s->out_format = FMT_H263;
658 avctx->delay = 0;
659 s->low_delay = 1;
660 break;
661 case AV_CODEC_ID_RV20:
662 s->out_format = FMT_H263;
663 avctx->delay = 0;
664 s->low_delay = 1;
665 s->modified_quant = 1;
666 s->h263_aic = 1;
667 s->h263_plus = 1;
668 s->loop_filter = 1;
669 s->unrestricted_mv = 0;
670 break;
671 case AV_CODEC_ID_MPEG4:
672 s->out_format = FMT_H263;
673 s->h263_pred = 1;
674 s->unrestricted_mv = 1;
675 s->low_delay = s->max_b_frames ? 0 : 1;
676 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
677 break;
678 case AV_CODEC_ID_MSMPEG4V2:
679 s->out_format = FMT_H263;
680 s->h263_pred = 1;
681 s->unrestricted_mv = 1;
682 s->msmpeg4_version = 2;
683 avctx->delay = 0;
684 s->low_delay = 1;
685 break;
686 case AV_CODEC_ID_MSMPEG4V3:
687 s->out_format = FMT_H263;
688 s->h263_pred = 1;
689 s->unrestricted_mv = 1;
690 s->msmpeg4_version = 3;
691 s->flipflop_rounding = 1;
692 avctx->delay = 0;
693 s->low_delay = 1;
694 break;
695 case AV_CODEC_ID_WMV1:
696 s->out_format = FMT_H263;
697 s->h263_pred = 1;
698 s->unrestricted_mv = 1;
699 s->msmpeg4_version = 4;
700 s->flipflop_rounding = 1;
701 avctx->delay = 0;
702 s->low_delay = 1;
703 break;
704 case AV_CODEC_ID_WMV2:
705 s->out_format = FMT_H263;
706 s->h263_pred = 1;
707 s->unrestricted_mv = 1;
708 s->msmpeg4_version = 5;
709 s->flipflop_rounding = 1;
710 avctx->delay = 0;
711 s->low_delay = 1;
712 break;
713 default:
714 return -1;
715 }
716
717 avctx->has_b_frames = !s->low_delay;
718
719 s->encoding = 1;
720
721 s->progressive_frame =
722 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
723 AV_CODEC_FLAG_INTERLACED_ME) ||
724 s->alternate_scan);
725
726 /* init */
727 ff_mpv_idct_init(s);
728 if (ff_mpv_common_init(s) < 0)
729 return -1;
730
731 if (ARCH_X86)
732 ff_mpv_encode_init_x86(s);
733
734 ff_fdctdsp_init(&s->fdsp, avctx);
735 ff_me_cmp_init(&s->mecc, avctx);
736 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
737 ff_pixblockdsp_init(&s->pdsp, avctx);
738 ff_qpeldsp_init(&s->qdsp);
739
740 if (s->msmpeg4_version) {
741 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
742 2 * 2 * (MAX_LEVEL + 1) *
743 (MAX_RUN + 1) * 2 * sizeof(int), fail);
744 }
745 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
746
747 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
748 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
749 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
750 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
751 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
752 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
753 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
754 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
755
756 if (s->avctx->noise_reduction) {
757 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
758 2 * 64 * sizeof(uint16_t), fail);
759 }
760
761 if (CONFIG_H263_ENCODER)
762 ff_h263dsp_init(&s->h263dsp);
763 if (!s->dct_quantize)
764 s->dct_quantize = ff_dct_quantize_c;
765 if (!s->denoise_dct)
766 s->denoise_dct = denoise_dct_c;
767 s->fast_dct_quantize = s->dct_quantize;
768 if (avctx->trellis)
769 s->dct_quantize = dct_quantize_trellis_c;
770
771 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
772 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
773
774 if (s->slice_context_count > 1) {
775 s->rtp_mode = 1;
776
777 if (avctx->codec_id == AV_CODEC_ID_H263 || avctx->codec_id == AV_CODEC_ID_H263P)
778 s->h263_slice_structured = 1;
779 }
780
781 s->quant_precision = 5;
782
783 #if FF_API_PRIVATE_OPT
784 FF_DISABLE_DEPRECATION_WARNINGS
785 if (avctx->frame_skip_threshold)
786 s->frame_skip_threshold = avctx->frame_skip_threshold;
787 if (avctx->frame_skip_factor)
788 s->frame_skip_factor = avctx->frame_skip_factor;
789 if (avctx->frame_skip_exp)
790 s->frame_skip_exp = avctx->frame_skip_exp;
791 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
792 s->frame_skip_cmp = avctx->frame_skip_cmp;
793 FF_ENABLE_DEPRECATION_WARNINGS
794 #endif
795
796 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
797 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
798
799 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
800 ff_h261_encode_init(s);
801 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
802 ff_h263_encode_init(s);
803 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
804 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
805 return ret;
806 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
807 && s->out_format == FMT_MPEG1)
808 ff_mpeg1_encode_init(s);
809
810 /* init q matrix */
811 for (i = 0; i < 64; i++) {
812 int j = s->idsp.idct_permutation[i];
813 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
814 s->mpeg_quant) {
815 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
816 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
817 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
818 s->intra_matrix[j] =
819 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
820 } else {
821 /* mpeg1/2 */
822 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
823 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
824 }
825 if (s->avctx->intra_matrix)
826 s->intra_matrix[j] = s->avctx->intra_matrix[i];
827 if (s->avctx->inter_matrix)
828 s->inter_matrix[j] = s->avctx->inter_matrix[i];
829 }
830
831 /* precompute matrix */
832 /* for mjpeg, we do include qscale in the matrix */
833 if (s->out_format != FMT_MJPEG) {
834 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
835 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
836 31, 1);
837 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
838 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
839 31, 0);
840 }
841
842 if (ff_rate_control_init(s) < 0)
843 return -1;
844
845 #if FF_API_ERROR_RATE
846 FF_DISABLE_DEPRECATION_WARNINGS
847 if (avctx->error_rate)
848 s->error_rate = avctx->error_rate;
849 FF_ENABLE_DEPRECATION_WARNINGS;
850 #endif
851
852 #if FF_API_NORMALIZE_AQP
853 FF_DISABLE_DEPRECATION_WARNINGS
854 if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
855 s->mpv_flags |= FF_MPV_FLAG_NAQ;
856 FF_ENABLE_DEPRECATION_WARNINGS;
857 #endif
858
859 #if FF_API_MV0
860 FF_DISABLE_DEPRECATION_WARNINGS
861 if (avctx->flags & CODEC_FLAG_MV0)
862 s->mpv_flags |= FF_MPV_FLAG_MV0;
863 FF_ENABLE_DEPRECATION_WARNINGS
864 #endif
865
866 #if FF_API_MPV_OPT
867 FF_DISABLE_DEPRECATION_WARNINGS
868 if (avctx->rc_qsquish != 0.0)
869 s->rc_qsquish = avctx->rc_qsquish;
870 if (avctx->rc_qmod_amp != 0.0)
871 s->rc_qmod_amp = avctx->rc_qmod_amp;
872 if (avctx->rc_qmod_freq)
873 s->rc_qmod_freq = avctx->rc_qmod_freq;
874 if (avctx->rc_buffer_aggressivity != 1.0)
875 s->rc_buffer_aggressivity = avctx->rc_buffer_aggressivity;
876 if (avctx->rc_initial_cplx != 0.0)
877 s->rc_initial_cplx = avctx->rc_initial_cplx;
878 if (avctx->lmin)
879 s->lmin = avctx->lmin;
880 if (avctx->lmax)
881 s->lmax = avctx->lmax;
882
883 if (avctx->rc_eq) {
884 av_freep(&s->rc_eq);
885 s->rc_eq = av_strdup(avctx->rc_eq);
886 if (!s->rc_eq)
887 return AVERROR(ENOMEM);
888 }
889 FF_ENABLE_DEPRECATION_WARNINGS
890 #endif
891
892 #if FF_API_PRIVATE_OPT
893 FF_DISABLE_DEPRECATION_WARNINGS
894 if (avctx->brd_scale)
895 s->brd_scale = avctx->brd_scale;
896 FF_ENABLE_DEPRECATION_WARNINGS
897 #endif
898
899 if (s->b_frame_strategy == 2) {
900 for (i = 0; i < s->max_b_frames + 2; i++) {
901 s->tmp_frames[i] = av_frame_alloc();
902 if (!s->tmp_frames[i])
903 return AVERROR(ENOMEM);
904
905 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
906 s->tmp_frames[i]->width = s->width >> s->brd_scale;
907 s->tmp_frames[i]->height = s->height >> s->brd_scale;
908
909 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
910 if (ret < 0)
911 return ret;
912 }
913 }
914
915 cpb_props = ff_add_cpb_side_data(avctx);
916 if (!cpb_props)
917 return AVERROR(ENOMEM);
918 cpb_props->max_bitrate = avctx->rc_max_rate;
919 cpb_props->min_bitrate = avctx->rc_min_rate;
920 cpb_props->avg_bitrate = avctx->bit_rate;
921 cpb_props->buffer_size = avctx->rc_buffer_size;
922
923 return 0;
924 fail:
925 ff_mpv_encode_end(avctx);
926 return AVERROR_UNKNOWN;
927 }
928
929 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
930 {
931 MpegEncContext *s = avctx->priv_data;
932 int i;
933
934 ff_rate_control_uninit(s);
935
936 ff_mpv_common_end(s);
937 if (CONFIG_MJPEG_ENCODER &&
938 s->out_format == FMT_MJPEG)
939 ff_mjpeg_encode_close(s);
940
941 av_freep(&avctx->extradata);
942
943 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
944 av_frame_free(&s->tmp_frames[i]);
945
946 ff_free_picture_tables(&s->new_picture);
947 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
948
949 av_freep(&s->avctx->stats_out);
950 av_freep(&s->ac_stats);
951
952 av_freep(&s->q_intra_matrix);
953 av_freep(&s->q_inter_matrix);
954 av_freep(&s->q_intra_matrix16);
955 av_freep(&s->q_inter_matrix16);
956 av_freep(&s->input_picture);
957 av_freep(&s->reordered_input_picture);
958 av_freep(&s->dct_offset);
959
960 return 0;
961 }
962
963 static int get_sae(uint8_t *src, int ref, int stride)
964 {
965 int x,y;
966 int acc = 0;
967
968 for (y = 0; y < 16; y++) {
969 for (x = 0; x < 16; x++) {
970 acc += FFABS(src[x + y * stride] - ref);
971 }
972 }
973
974 return acc;
975 }
976
977 static int get_intra_count(MpegEncContext *s, uint8_t *src,
978 uint8_t *ref, int stride)
979 {
980 int x, y, w, h;
981 int acc = 0;
982
983 w = s->width & ~15;
984 h = s->height & ~15;
985
986 for (y = 0; y < h; y += 16) {
987 for (x = 0; x < w; x += 16) {
988 int offset = x + y * stride;
989 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
990 stride, 16);
991 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
992 int sae = get_sae(src + offset, mean, stride);
993
994 acc += sae + 500 < sad;
995 }
996 }
997 return acc;
998 }
999
1000 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1001 {
1002 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1003 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1004 s->mb_stride, s->mb_height, s->b8_stride,
1005 &s->linesize, &s->uvlinesize);
1006 }
1007
1008 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1009 {
1010 Picture *pic = NULL;
1011 int64_t pts;
1012 int i, display_picture_number = 0, ret;
1013 int encoding_delay = s->max_b_frames ? s->max_b_frames
1014 : (s->low_delay ? 0 : 1);
1015 int flush_offset = 1;
1016 int direct = 1;
1017
1018 if (pic_arg) {
1019 pts = pic_arg->pts;
1020 display_picture_number = s->input_picture_number++;
1021
1022 if (pts != AV_NOPTS_VALUE) {
1023 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1024 int64_t time = pts;
1025 int64_t last = s->user_specified_pts;
1026
1027 if (time <= last) {
1028 av_log(s->avctx, AV_LOG_ERROR,
1029 "Error, Invalid timestamp=%"PRId64", "
1030 "last=%"PRId64"\n", pts, s->user_specified_pts);
1031 return -1;
1032 }
1033
1034 if (!s->low_delay && display_picture_number == 1)
1035 s->dts_delta = time - last;
1036 }
1037 s->user_specified_pts = pts;
1038 } else {
1039 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1040 s->user_specified_pts =
1041 pts = s->user_specified_pts + 1;
1042 av_log(s->avctx, AV_LOG_INFO,
1043 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1044 pts);
1045 } else {
1046 pts = display_picture_number;
1047 }
1048 }
1049
1050 if (!pic_arg->buf[0] ||
1051 pic_arg->linesize[0] != s->linesize ||
1052 pic_arg->linesize[1] != s->uvlinesize ||
1053 pic_arg->linesize[2] != s->uvlinesize)
1054 direct = 0;
1055 if ((s->width & 15) || (s->height & 15))
1056 direct = 0;
1057
1058 ff_dlog(s->avctx, "%d %d %td %td\n", pic_arg->linesize[0],
1059 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1060
1061 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1062 if (i < 0)
1063 return i;
1064
1065 pic = &s->picture[i];
1066 pic->reference = 3;
1067
1068 if (direct) {
1069 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1070 return ret;
1071 }
1072 ret = alloc_picture(s, pic, direct);
1073 if (ret < 0)
1074 return ret;
1075
1076 if (!direct) {
1077 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1078 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1079 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1080 // empty
1081 } else {
1082 int h_chroma_shift, v_chroma_shift;
1083 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1084 &h_chroma_shift,
1085 &v_chroma_shift);
1086
1087 for (i = 0; i < 3; i++) {
1088 int src_stride = pic_arg->linesize[i];
1089 int dst_stride = i ? s->uvlinesize : s->linesize;
1090 int h_shift = i ? h_chroma_shift : 0;
1091 int v_shift = i ? v_chroma_shift : 0;
1092 int w = s->width >> h_shift;
1093 int h = s->height >> v_shift;
1094 uint8_t *src = pic_arg->data[i];
1095 uint8_t *dst = pic->f->data[i];
1096
1097 if (!s->avctx->rc_buffer_size)
1098 dst += INPLACE_OFFSET;
1099
1100 if (src_stride == dst_stride)
1101 memcpy(dst, src, src_stride * h);
1102 else {
1103 int h2 = h;
1104 uint8_t *dst2 = dst;
1105 while (h2--) {
1106 memcpy(dst2, src, w);
1107 dst2 += dst_stride;
1108 src += src_stride;
1109 }
1110 }
1111 if ((s->width & 15) || (s->height & 15)) {
1112 s->mpvencdsp.draw_edges(dst, dst_stride,
1113 w, h,
1114 16 >> h_shift,
1115 16 >> v_shift,
1116 EDGE_BOTTOM);
1117 }
1118 }
1119 }
1120 }
1121 ret = av_frame_copy_props(pic->f, pic_arg);
1122 if (ret < 0)
1123 return ret;
1124
1125 pic->f->display_picture_number = display_picture_number;
1126 pic->f->pts = pts; // we set this here to avoid modifiying pic_arg
1127 } else {
1128 /* Flushing: When we have not received enough input frames,
1129 * ensure s->input_picture[0] contains the first picture */
1130 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1131 if (s->input_picture[flush_offset])
1132 break;
1133
1134 if (flush_offset <= 1)
1135 flush_offset = 1;
1136 else
1137 encoding_delay = encoding_delay - flush_offset + 1;
1138 }
1139
1140 /* shift buffer entries */
1141 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1142 s->input_picture[i - flush_offset] = s->input_picture[i];
1143
1144 s->input_picture[encoding_delay] = (Picture*) pic;
1145
1146 return 0;
1147 }
1148
1149 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1150 {
1151 int x, y, plane;
1152 int score = 0;
1153 int64_t score64 = 0;
1154
1155 for (plane = 0; plane < 3; plane++) {
1156 const int stride = p->f->linesize[plane];
1157 const int bw = plane ? 1 : 2;
1158 for (y = 0; y < s->mb_height * bw; y++) {
1159 for (x = 0; x < s->mb_width * bw; x++) {
1160 int off = p->shared ? 0 : 16;
1161 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1162 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1163 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1164
1165 switch (s->frame_skip_exp) {
1166 case 0: score = FFMAX(score, v); break;
1167 case 1: score += FFABS(v); break;
1168 case 2: score += v * v; break;
1169 case 3: score64 += FFABS(v * v * (int64_t)v); break;
1170 case 4: score64 += v * v * (int64_t)(v * v); break;
1171 }
1172 }
1173 }
1174 }
1175
1176 if (score)
1177 score64 = score;
1178
1179 if (score64 < s->frame_skip_threshold)
1180 return 1;
1181 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1182 return 1;
1183 return 0;
1184 }
1185
1186 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1187 {
1188 AVPacket pkt = { 0 };
1189 int ret, got_output;
1190
1191 av_init_packet(&pkt);
1192 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1193 if (ret < 0)
1194 return ret;
1195
1196 ret = pkt.size;
1197 av_packet_unref(&pkt);
1198 return ret;
1199 }
1200
1201 static int estimate_best_b_count(MpegEncContext *s)
1202 {
1203 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1204 AVCodecContext *c = avcodec_alloc_context3(NULL);
1205 const int scale = s->brd_scale;
1206 int i, j, out_size, p_lambda, b_lambda, lambda2;
1207 int64_t best_rd = INT64_MAX;
1208 int best_b_count = -1;
1209
1210 if (!c)
1211 return AVERROR(ENOMEM);
1212 assert(scale >= 0 && scale <= 3);
1213
1214 //emms_c();
1215 //s->next_picture_ptr->quality;
1216 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1217 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1218 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1219 if (!b_lambda) // FIXME we should do this somewhere else
1220 b_lambda = p_lambda;
1221 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1222 FF_LAMBDA_SHIFT;
1223
1224 c->width = s->width >> scale;
1225 c->height = s->height >> scale;
1226 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1227 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1228 c->mb_decision = s->avctx->mb_decision;
1229 c->me_cmp = s->avctx->me_cmp;
1230 c->mb_cmp = s->avctx->mb_cmp;
1231 c->me_sub_cmp = s->avctx->me_sub_cmp;
1232 c->pix_fmt = AV_PIX_FMT_YUV420P;
1233 c->time_base = s->avctx->time_base;
1234 c->max_b_frames = s->max_b_frames;
1235
1236 if (avcodec_open2(c, codec, NULL) < 0)
1237 return -1;
1238
1239 for (i = 0; i < s->max_b_frames + 2; i++) {
1240 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1241 s->next_picture_ptr;
1242
1243 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1244 pre_input = *pre_input_ptr;
1245
1246 if (!pre_input.shared && i) {
1247 pre_input.f->data[0] += INPLACE_OFFSET;
1248 pre_input.f->data[1] += INPLACE_OFFSET;
1249 pre_input.f->data[2] += INPLACE_OFFSET;
1250 }
1251
1252 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1253 s->tmp_frames[i]->linesize[0],
1254 pre_input.f->data[0],
1255 pre_input.f->linesize[0],
1256 c->width, c->height);
1257 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1258 s->tmp_frames[i]->linesize[1],
1259 pre_input.f->data[1],
1260 pre_input.f->linesize[1],
1261 c->width >> 1, c->height >> 1);
1262 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1263 s->tmp_frames[i]->linesize[2],
1264 pre_input.f->data[2],
1265 pre_input.f->linesize[2],
1266 c->width >> 1, c->height >> 1);
1267 }
1268 }
1269
1270 for (j = 0; j < s->max_b_frames + 1; j++) {
1271 int64_t rd = 0;
1272
1273 if (!s->input_picture[j])
1274 break;
1275
1276 c->error[0] = c->error[1] = c->error[2] = 0;
1277
1278 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1279 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1280
1281 out_size = encode_frame(c, s->tmp_frames[0]);
1282
1283 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1284
1285 for (i = 0; i < s->max_b_frames + 1; i++) {
1286 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1287
1288 s->tmp_frames[i + 1]->pict_type = is_p ?
1289 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1290 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1291
1292 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1293
1294 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1295 }
1296
1297 /* get the delayed frames */
1298 while (out_size) {
1299 out_size = encode_frame(c, NULL);
1300 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1301 }
1302
1303 rd += c->error[0] + c->error[1] + c->error[2];
1304
1305 if (rd < best_rd) {
1306 best_rd = rd;
1307 best_b_count = j;
1308 }
1309 }
1310
1311 avcodec_close(c);
1312 av_freep(&c);
1313
1314 return best_b_count;
1315 }
1316
1317 static int select_input_picture(MpegEncContext *s)
1318 {
1319 int i, ret;
1320
1321 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1322 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1323 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1324
1325 /* set next picture type & ordering */
1326 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1327 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1328 !s->next_picture_ptr || s->intra_only) {
1329 s->reordered_input_picture[0] = s->input_picture[0];
1330 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1331 s->reordered_input_picture[0]->f->coded_picture_number =
1332 s->coded_picture_number++;
1333 } else {
1334 int b_frames = 0;
1335
1336 if (s->frame_skip_threshold || s->frame_skip_factor) {
1337 if (s->picture_in_gop_number < s->gop_size &&
1338 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1339 // FIXME check that te gop check above is +-1 correct
1340 av_frame_unref(s->input_picture[0]->f);
1341
1342 emms_c();
1343 ff_vbv_update(s, 0);
1344
1345 goto no_output_pic;
1346 }
1347 }
1348
1349 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1350 for (i = 0; i < s->max_b_frames + 1; i++) {
1351 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1352
1353 if (pict_num >= s->rc_context.num_entries)
1354 break;
1355 if (!s->input_picture[i]) {
1356 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1357 break;
1358 }
1359
1360 s->input_picture[i]->f->pict_type =
1361 s->rc_context.entry[pict_num].new_pict_type;
1362 }
1363 }
1364
1365 if (s->b_frame_strategy == 0) {
1366 b_frames = s->max_b_frames;
1367 while (b_frames && !s->input_picture[b_frames])
1368 b_frames--;
1369 } else if (s->b_frame_strategy == 1) {
1370 for (i = 1; i < s->max_b_frames + 1; i++) {
1371 if (s->input_picture[i] &&
1372 s->input_picture[i]->b_frame_score == 0) {
1373 s->input_picture[i]->b_frame_score =
1374 get_intra_count(s,
1375 s->input_picture[i ]->f->data[0],
1376 s->input_picture[i - 1]->f->data[0],
1377 s->linesize) + 1;
1378 }
1379 }
1380 for (i = 0; i < s->max_b_frames + 1; i++) {
1381 if (!s->input_picture[i] ||
1382 s->input_picture[i]->b_frame_score - 1 >
1383 s->mb_num / s->b_sensitivity)
1384 break;
1385 }
1386
1387 b_frames = FFMAX(0, i - 1);
1388
1389 /* reset scores */
1390 for (i = 0; i < b_frames + 1; i++) {
1391 s->input_picture[i]->b_frame_score = 0;
1392 }
1393 } else if (s->b_frame_strategy == 2) {
1394 b_frames = estimate_best_b_count(s);
1395 }
1396
1397 emms_c();
1398
1399 for (i = b_frames - 1; i >= 0; i--) {
1400 int type = s->input_picture[i]->f->pict_type;
1401 if (type && type != AV_PICTURE_TYPE_B)
1402 b_frames = i;
1403 }
1404 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1405 b_frames == s->max_b_frames) {
1406 av_log(s->avctx, AV_LOG_ERROR,
1407 "warning, too many b frames in a row\n");
1408 }
1409
1410 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1411 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1412 s->gop_size > s->picture_in_gop_number) {
1413 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1414 } else {
1415 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1416 b_frames = 0;
1417 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1418 }
1419 }
1420
1421 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1422 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1423 b_frames--;
1424
1425 s->reordered_input_picture[0] = s->input_picture[b_frames];
1426 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1427 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1428 s->reordered_input_picture[0]->f->coded_picture_number =
1429 s->coded_picture_number++;
1430 for (i = 0; i < b_frames; i++) {
1431 s->reordered_input_picture[i + 1] = s->input_picture[i];
1432 s->reordered_input_picture[i + 1]->f->pict_type =
1433 AV_PICTURE_TYPE_B;
1434 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1435 s->coded_picture_number++;
1436 }
1437 }
1438 }
1439 no_output_pic:
1440 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1441
1442 if (s->reordered_input_picture[0]) {
1443 s->reordered_input_picture[0]->reference =
1444 s->reordered_input_picture[0]->f->pict_type !=
1445 AV_PICTURE_TYPE_B ? 3 : 0;
1446
1447 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1448 return ret;
1449
1450 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1451 // input is a shared pix, so we can't modifiy it -> alloc a new
1452 // one & ensure that the shared one is reuseable
1453
1454 Picture *pic;
1455 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1456 if (i < 0)
1457 return i;
1458 pic = &s->picture[i];
1459
1460 pic->reference = s->reordered_input_picture[0]->reference;
1461 if (alloc_picture(s, pic, 0) < 0) {
1462 return -1;
1463 }
1464
1465 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1466 if (ret < 0)
1467 return ret;
1468
1469 /* mark us unused / free shared pic */
1470 av_frame_unref(s->reordered_input_picture[0]->f);
1471 s->reordered_input_picture[0]->shared = 0;
1472
1473 s->current_picture_ptr = pic;
1474 } else {
1475 // input is not a shared pix -> reuse buffer for current_pix
1476 s->current_picture_ptr = s->reordered_input_picture[0];
1477 for (i = 0; i < 4; i++) {
1478 s->new_picture.f->data[i] += INPLACE_OFFSET;
1479 }
1480 }
1481 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1482 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1483 s->current_picture_ptr)) < 0)
1484 return ret;
1485
1486 s->picture_number = s->new_picture.f->display_picture_number;
1487 }
1488 return 0;
1489 }
1490
1491 static void frame_end(MpegEncContext *s)
1492 {
1493 int i;
1494
1495 if (s->unrestricted_mv &&
1496 s->current_picture.reference &&
1497 !s->intra_only) {
1498 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1499 int hshift = desc->log2_chroma_w;
1500 int vshift = desc->log2_chroma_h;
1501 s->mpvencdsp.draw_edges(s->current_picture.f->data[0], s->linesize,
1502 s->h_edge_pos, s->v_edge_pos,
1503 EDGE_WIDTH, EDGE_WIDTH,
1504 EDGE_TOP | EDGE_BOTTOM);
1505 s->mpvencdsp.draw_edges(s->current_picture.f->data[1], s->uvlinesize,
1506 s->h_edge_pos >> hshift,
1507 s->v_edge_pos >> vshift,
1508 EDGE_WIDTH >> hshift,
1509 EDGE_WIDTH >> vshift,
1510 EDGE_TOP | EDGE_BOTTOM);
1511 s->mpvencdsp.draw_edges(s->current_picture.f->data[2], s->uvlinesize,
1512 s->h_edge_pos >> hshift,
1513 s->v_edge_pos >> vshift,
1514 EDGE_WIDTH >> hshift,
1515 EDGE_WIDTH >> vshift,
1516 EDGE_TOP | EDGE_BOTTOM);
1517 }
1518
1519 emms_c();
1520
1521 s->last_pict_type = s->pict_type;
1522 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1523 if (s->pict_type!= AV_PICTURE_TYPE_B)
1524 s->last_non_b_pict_type = s->pict_type;
1525
1526 if (s->encoding) {
1527 /* release non-reference frames */
1528 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1529 if (!s->picture[i].reference)
1530 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1531 }
1532 }
1533
1534 #if FF_API_CODED_FRAME
1535 FF_DISABLE_DEPRECATION_WARNINGS
1536 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1537 FF_ENABLE_DEPRECATION_WARNINGS
1538 #endif
1539 #if FF_API_ERROR_FRAME
1540 FF_DISABLE_DEPRECATION_WARNINGS
1541 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1542 sizeof(s->current_picture.encoding_error));
1543 FF_ENABLE_DEPRECATION_WARNINGS
1544 #endif
1545 }
1546
1547 static void update_noise_reduction(MpegEncContext *s)
1548 {
1549 int intra, i;
1550
1551 for (intra = 0; intra < 2; intra++) {
1552 if (s->dct_count[intra] > (1 << 16)) {
1553 for (i = 0; i < 64; i++) {
1554 s->dct_error_sum[intra][i] >>= 1;
1555 }
1556 s->dct_count[intra] >>= 1;
1557 }
1558
1559 for (i = 0; i < 64; i++) {
1560 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1561 s->dct_count[intra] +
1562 s->dct_error_sum[intra][i] / 2) /
1563 (s->dct_error_sum[intra][i] + 1);
1564 }
1565 }
1566 }
1567
1568 static int frame_start(MpegEncContext *s)
1569 {
1570 int ret;
1571
1572 /* mark & release old frames */
1573 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1574 s->last_picture_ptr != s->next_picture_ptr &&
1575 s->last_picture_ptr->f->buf[0]) {
1576 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1577 }
1578
1579 s->current_picture_ptr->f->pict_type = s->pict_type;
1580 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1581
1582 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1583 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1584 s->current_picture_ptr)) < 0)
1585 return ret;
1586
1587 if (s->pict_type != AV_PICTURE_TYPE_B) {
1588 s->last_picture_ptr = s->next_picture_ptr;
1589 if (!s->droppable)
1590 s->next_picture_ptr = s->current_picture_ptr;
1591 }
1592
1593 if (s->last_picture_ptr) {
1594 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1595 if (s->last_picture_ptr->f->buf[0] &&
1596 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1597 s->last_picture_ptr)) < 0)
1598 return ret;
1599 }
1600 if (s->next_picture_ptr) {
1601 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1602 if (s->next_picture_ptr->f->buf[0] &&
1603 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1604 s->next_picture_ptr)) < 0)
1605 return ret;
1606 }
1607
1608 if (s->picture_structure!= PICT_FRAME) {
1609 int i;
1610 for (i = 0; i < 4; i++) {
1611 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1612 s->current_picture.f->data[i] +=
1613 s->current_picture.f->linesize[i];
1614 }
1615 s->current_picture.f->linesize[i] *= 2;
1616 s->last_picture.f->linesize[i] *= 2;
1617 s->next_picture.f->linesize[i] *= 2;
1618 }
1619 }
1620
1621 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1622 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1623 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1624 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1625 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1626 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1627 } else {
1628 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1629 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1630 }
1631
1632 if (s->dct_error_sum) {
1633 assert(s->avctx->noise_reduction && s->encoding);
1634 update_noise_reduction(s);
1635 }
1636
1637 return 0;
1638 }
1639
1640 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1641 const AVFrame *pic_arg, int *got_packet)
1642 {
1643 MpegEncContext *s = avctx->priv_data;
1644 int i, stuffing_count, ret;
1645 int context_count = s->slice_context_count;
1646
1647 s->picture_in_gop_number++;
1648
1649 if (load_input_picture(s, pic_arg) < 0)
1650 return -1;
1651
1652 if (select_input_picture(s) < 0) {
1653 return -1;
1654 }
1655
1656 /* output? */
1657 if (s->new_picture.f->data[0]) {
1658 uint8_t *sd;
1659 if (!pkt->data &&
1660 (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0)
1661 return ret;
1662 if (s->mb_info) {
1663 s->mb_info_ptr = av_packet_new_side_data(pkt,
1664 AV_PKT_DATA_H263_MB_INFO,
1665 s->mb_width*s->mb_height*12);
1666 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1667 }
1668
1669 for (i = 0; i < context_count; i++) {
1670 int start_y = s->thread_context[i]->start_mb_y;
1671 int end_y = s->thread_context[i]-> end_mb_y;
1672 int h = s->mb_height;
1673 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1674 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1675
1676 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1677 }
1678
1679 s->pict_type = s->new_picture.f->pict_type;
1680 //emms_c();
1681 ret = frame_start(s);
1682 if (ret < 0)
1683 return ret;
1684 vbv_retry:
1685 if (encode_picture(s, s->picture_number) < 0)
1686 return -1;
1687
1688 #if FF_API_STAT_BITS
1689 FF_DISABLE_DEPRECATION_WARNINGS
1690 avctx->header_bits = s->header_bits;
1691 avctx->mv_bits = s->mv_bits;
1692 avctx->misc_bits = s->misc_bits;
1693 avctx->i_tex_bits = s->i_tex_bits;
1694 avctx->p_tex_bits = s->p_tex_bits;
1695 avctx->i_count = s->i_count;
1696 // FIXME f/b_count in avctx
1697 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1698 avctx->skip_count = s->skip_count;
1699 FF_ENABLE_DEPRECATION_WARNINGS
1700 #endif
1701
1702 frame_end(s);
1703
1704 sd = av_packet_new_side_data(pkt, AV_PKT_DATA_QUALITY_FACTOR,
1705 sizeof(int));
1706 if (!sd)
1707 return AVERROR(ENOMEM);
1708 *(int *)sd = s->current_picture.f->quality;
1709
1710 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1711 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1712
1713 if (avctx->rc_buffer_size) {
1714 RateControlContext *rcc = &s->rc_context;
1715 int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1716
1717 if (put_bits_count(&s->pb) > max_size &&
1718 s->lambda < s->lmax) {
1719 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1720 (s->qscale + 1) / s->qscale);
1721 if (s->adaptive_quant) {
1722 int i;
1723 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1724 s->lambda_table[i] =
1725 FFMAX(s->lambda_table[i] + 1,
1726 s->lambda_table[i] * (s->qscale + 1) /
1727 s->qscale);
1728 }
1729 s->mb_skipped = 0; // done in frame_start()
1730 // done in encode_picture() so we must undo it
1731 if (s->pict_type == AV_PICTURE_TYPE_P) {
1732 if (s->flipflop_rounding ||
1733 s->codec_id == AV_CODEC_ID_H263P ||
1734 s->codec_id == AV_CODEC_ID_MPEG4)
1735 s->no_rounding ^= 1;
1736 }
1737 if (s->pict_type != AV_PICTURE_TYPE_B) {
1738 s->time_base = s->last_time_base;
1739 s->last_non_b_time = s->time - s->pp_time;
1740 }
1741 for (i = 0; i < context_count; i++) {
1742 PutBitContext *pb = &s->thread_context[i]->pb;
1743 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1744 }
1745 goto vbv_retry;
1746 }
1747
1748 assert(s->avctx->rc_max_rate);
1749 }
1750
1751 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1752 ff_write_pass1_stats(s);
1753
1754 for (i = 0; i < 4; i++) {
1755 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1756 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1757 }
1758
1759 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1760 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1761 s->misc_bits + s->i_tex_bits +
1762 s->p_tex_bits);
1763 flush_put_bits(&s->pb);
1764 s->frame_bits = put_bits_count(&s->pb);
1765
1766 stuffing_count = ff_vbv_update(s, s->frame_bits);
1767 if (stuffing_count) {
1768 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1769 stuffing_count + 50) {
1770 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1771 return -1;
1772 }
1773
1774 switch (s->codec_id) {
1775 case AV_CODEC_ID_MPEG1VIDEO:
1776 case AV_CODEC_ID_MPEG2VIDEO:
1777 while (stuffing_count--) {
1778 put_bits(&s->pb, 8, 0);
1779 }
1780 break;
1781 case AV_CODEC_ID_MPEG4:
1782 put_bits(&s->pb, 16, 0);
1783 put_bits(&s->pb, 16, 0x1C3);
1784 stuffing_count -= 4;
1785 while (stuffing_count--) {
1786 put_bits(&s->pb, 8, 0xFF);
1787 }
1788 break;
1789 default:
1790 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1791 }
1792 flush_put_bits(&s->pb);
1793 s->frame_bits = put_bits_count(&s->pb);
1794 }
1795
1796 /* update mpeg1/2 vbv_delay for CBR */
1797 if (s->avctx->rc_max_rate &&
1798 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1799 s->out_format == FMT_MPEG1 &&
1800 90000LL * (avctx->rc_buffer_size - 1) <=
1801 s->avctx->rc_max_rate * 0xFFFFLL) {
1802 AVCPBProperties *props;
1803 size_t props_size;
1804
1805 int vbv_delay, min_delay;
1806 double inbits = s->avctx->rc_max_rate *
1807 av_q2d(s->avctx->time_base);
1808 int minbits = s->frame_bits - 8 *
1809 (s->vbv_delay_ptr - s->pb.buf - 1);
1810 double bits = s->rc_context.buffer_index + minbits - inbits;
1811
1812 if (bits < 0)
1813 av_log(s->avctx, AV_LOG_ERROR,
1814 "Internal error, negative bits\n");
1815
1816 assert(s->repeat_first_field == 0);
1817
1818 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1819 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1820 s->avctx->rc_max_rate;
1821
1822 vbv_delay = FFMAX(vbv_delay, min_delay);
1823
1824 assert(vbv_delay < 0xFFFF);
1825
1826 s->vbv_delay_ptr[0] &= 0xF8;
1827 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1828 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1829 s->vbv_delay_ptr[2] &= 0x07;
1830 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1831
1832 props = av_cpb_properties_alloc(&props_size);
1833 if (!props)
1834 return AVERROR(ENOMEM);
1835 props->vbv_delay = vbv_delay * 300;
1836
1837 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
1838 (uint8_t*)props, props_size);
1839 if (ret < 0) {
1840 av_freep(&props);
1841 return ret;
1842 }
1843
1844 #if FF_API_VBV_DELAY
1845 FF_DISABLE_DEPRECATION_WARNINGS
1846 avctx->vbv_delay = vbv_delay * 300;
1847 FF_ENABLE_DEPRECATION_WARNINGS
1848 #endif
1849 }
1850 s->total_bits += s->frame_bits;
1851 #if FF_API_STAT_BITS
1852 FF_DISABLE_DEPRECATION_WARNINGS
1853 avctx->frame_bits = s->frame_bits;
1854 FF_ENABLE_DEPRECATION_WARNINGS
1855 #endif
1856
1857
1858 pkt->pts = s->current_picture.f->pts;
1859 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
1860 if (!s->current_picture.f->coded_picture_number)
1861 pkt->dts = pkt->pts - s->dts_delta;
1862 else
1863 pkt->dts = s->reordered_pts;
1864 s->reordered_pts = pkt->pts;
1865 } else
1866 pkt->dts = pkt->pts;
1867 if (s->current_picture.f->key_frame)
1868 pkt->flags |= AV_PKT_FLAG_KEY;
1869 if (s->mb_info)
1870 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1871 } else {
1872 s->frame_bits = 0;
1873 }
1874 assert((s->frame_bits & 7) == 0);
1875
1876 pkt->size = s->frame_bits / 8;
1877 *got_packet = !!pkt->size;
1878 return 0;
1879 }
1880
1881 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1882 int n, int threshold)
1883 {
1884 static const char tab[64] = {
1885 3, 2, 2, 1, 1, 1, 1, 1,
1886 1, 1, 1, 1, 1, 1, 1, 1,
1887 1, 1, 1, 1, 1, 1, 1, 1,
1888 0, 0, 0, 0, 0, 0, 0, 0,
1889 0, 0, 0, 0, 0, 0, 0, 0,
1890 0, 0, 0, 0, 0, 0, 0, 0,
1891 0, 0, 0, 0, 0, 0, 0, 0,
1892 0, 0, 0, 0, 0, 0, 0, 0
1893 };
1894 int score = 0;
1895 int run = 0;
1896 int i;
1897 int16_t *block = s->block[n];
1898 const int last_index = s->block_last_index[n];
1899 int skip_dc;
1900
1901 if (threshold < 0) {
1902 skip_dc = 0;
1903 threshold = -threshold;
1904 } else
1905 skip_dc = 1;
1906
1907 /* Are all we could set to zero already zero? */
1908 if (last_index <= skip_dc - 1)
1909 return;
1910
1911 for (i = 0; i <= last_index; i++) {
1912 const int j = s->intra_scantable.permutated[i];
1913 const int level = FFABS(block[j]);
1914 if (level == 1) {
1915 if (skip_dc && i == 0)
1916 continue;
1917 score += tab[run];
1918 run = 0;
1919 } else if (level > 1) {
1920 return;
1921 } else {
1922 run++;
1923 }
1924 }
1925 if (score >= threshold)
1926 return;
1927 for (i = skip_dc; i <= last_index; i++) {
1928 const int j = s->intra_scantable.permutated[i];
1929 block[j] = 0;
1930 }
1931 if (block[0])
1932 s->block_last_index[n] = 0;
1933 else
1934 s->block_last_index[n] = -1;
1935 }
1936
1937 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
1938 int last_index)
1939 {
1940 int i;
1941 const int maxlevel = s->max_qcoeff;
1942 const int minlevel = s->min_qcoeff;
1943 int overflow = 0;
1944
1945 if (s->mb_intra) {
1946 i = 1; // skip clipping of intra dc
1947 } else
1948 i = 0;
1949
1950 for (; i <= last_index; i++) {
1951 const int j = s->intra_scantable.permutated[i];
1952 int level = block[j];
1953
1954 if (level > maxlevel) {
1955 level = maxlevel;
1956 overflow++;
1957 } else if (level < minlevel) {
1958 level = minlevel;
1959 overflow++;
1960 }
1961
1962 block[j] = level;
1963 }
1964
1965 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1966 av_log(s->avctx, AV_LOG_INFO,
1967 "warning, clipping %d dct coefficients to %d..%d\n",
1968 overflow, minlevel, maxlevel);
1969 }
1970
1971 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1972 {
1973 int x, y;
1974 // FIXME optimize
1975 for (y = 0; y < 8; y++) {
1976 for (x = 0; x < 8; x++) {
1977 int x2, y2;
1978 int sum = 0;
1979 int sqr = 0;
1980 int count = 0;
1981
1982 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1983 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1984 int v = ptr[x2 + y2 * stride];
1985 sum += v;
1986 sqr += v * v;
1987 count++;
1988 }
1989 }
1990 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1991 }
1992 }
1993 }
1994
1995 static av_always_inline void encode_mb_internal(MpegEncContext *s,
1996 int motion_x, int motion_y,
1997 int mb_block_height,
1998 int mb_block_count)
1999 {
2000 int16_t weight[8][64];
2001 int16_t orig[8][64];
2002 const int mb_x = s->mb_x;
2003 const int mb_y = s->mb_y;
2004 int i;
2005 int skip_dct[8];
2006 int dct_offset = s->linesize * 8; // default for progressive frames
2007 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2008 ptrdiff_t wrap_y, wrap_c;
2009
2010 for (i = 0; i < mb_block_count; i++)
2011 skip_dct[i] = s->skipdct;
2012
2013 if (s->adaptive_quant) {
2014 const int last_qp = s->qscale;
2015 const int mb_xy = mb_x + mb_y * s->mb_stride;
2016
2017 s->lambda = s->lambda_table[mb_xy];
2018 update_qscale(s);
2019
2020 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2021 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2022 s->dquant = s->qscale - last_qp;
2023
2024 if (s->out_format == FMT_H263) {
2025 s->dquant = av_clip(s->dquant, -2, 2);
2026
2027 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2028 if (!s->mb_intra) {
2029 if (s->pict_type == AV_PICTURE_TYPE_B) {
2030 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2031 s->dquant = 0;
2032 }
2033 if (s->mv_type == MV_TYPE_8X8)
2034 s->dquant = 0;
2035 }
2036 }
2037 }
2038 }
2039 ff_set_qscale(s, last_qp + s->dquant);
2040 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2041 ff_set_qscale(s, s->qscale + s->dquant);
2042
2043 wrap_y = s->linesize;
2044 wrap_c = s->uvlinesize;
2045 ptr_y = s->new_picture.f->data[0] +
2046 (mb_y * 16 * wrap_y) + mb_x * 16;
2047 ptr_cb = s->new_picture.f->data[1] +
2048 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
2049 ptr_cr = s->new_picture.f->data[2] +
2050 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
2051
2052 if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
2053 uint8_t *ebuf = s->sc.edge_emu_buffer + 32;
2054 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2055 wrap_y, wrap_y,
2056 16, 16, mb_x * 16, mb_y * 16,
2057 s->width, s->height);
2058 ptr_y = ebuf;
2059 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb,
2060 wrap_c, wrap_c,
2061 8, mb_block_height, mb_x * 8, mb_y * 8,
2062 s->width >> 1, s->height >> 1);
2063 ptr_cb = ebuf + 18 * wrap_y;
2064 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr,
2065 wrap_c, wrap_c,
2066 8, mb_block_height, mb_x * 8, mb_y * 8,
2067 s->width >> 1, s->height >> 1);
2068 ptr_cr = ebuf + 18 * wrap_y + 8;
2069 }
2070
2071 if (s->mb_intra) {
2072 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2073 int progressive_score, interlaced_score;
2074
2075 s->interlaced_dct = 0;
2076 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2077 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2078 NULL, wrap_y, 8) - 400;
2079
2080 if (progressive_score > 0) {
2081 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2082 NULL, wrap_y * 2, 8) +
2083 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2084 NULL, wrap_y * 2, 8);
2085 if (progressive_score > interlaced_score) {
2086 s->interlaced_dct = 1;
2087
2088 dct_offset = wrap_y;
2089 wrap_y <<= 1;
2090 if (s->chroma_format == CHROMA_422)
2091 wrap_c <<= 1;
2092 }
2093 }
2094 }
2095
2096 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2097 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2098 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2099 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2100
2101 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2102 skip_dct[4] = 1;
2103 skip_dct[5] = 1;
2104 } else {
2105 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2106 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2107 if (!s->chroma_y_shift) { /* 422 */
2108 s->pdsp.get_pixels(s->block[6],
2109 ptr_cb + (dct_offset >> 1), wrap_c);
2110 s->pdsp.get_pixels(s->block[7],
2111 ptr_cr + (dct_offset >> 1), wrap_c);
2112 }
2113 }
2114 } else {
2115 op_pixels_func (*op_pix)[4];
2116 qpel_mc_func (*op_qpix)[16];
2117 uint8_t *dest_y, *dest_cb, *dest_cr;
2118
2119 dest_y = s->dest[0];
2120 dest_cb = s->dest[1];
2121 dest_cr = s->dest[2];
2122
2123 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2124 op_pix = s->hdsp.put_pixels_tab;
2125 op_qpix = s->qdsp.put_qpel_pixels_tab;
2126 } else {
2127 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2128 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2129 }
2130
2131 if (s->mv_dir & MV_DIR_FORWARD) {
2132 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2133 s->last_picture.f->data,
2134 op_pix, op_qpix);
2135 op_pix = s->hdsp.avg_pixels_tab;
2136 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2137 }
2138 if (s->mv_dir & MV_DIR_BACKWARD) {
2139 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2140 s->next_picture.f->data,
2141 op_pix, op_qpix);
2142 }
2143
2144 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2145 int progressive_score, interlaced_score;
2146
2147 s->interlaced_dct = 0;
2148 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2149 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2150 ptr_y + wrap_y * 8,
2151 wrap_y, 8) - 400;
2152
2153 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2154 progressive_score -= 400;
2155
2156 if (progressive_score > 0) {
2157 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2158 wrap_y * 2, 8) +
2159 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2160 ptr_y + wrap_y,
2161 wrap_y * 2, 8);
2162
2163 if (progressive_score > interlaced_score) {
2164 s->interlaced_dct = 1;
2165
2166 dct_offset = wrap_y;
2167 wrap_y <<= 1;
2168 if (s->chroma_format == CHROMA_422)
2169 wrap_c <<= 1;
2170 }
2171 }
2172 }
2173
2174 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2175 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2176 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2177 dest_y + dct_offset, wrap_y);
2178 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2179 dest_y + dct_offset + 8, wrap_y);
2180
2181 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2182 skip_dct[4] = 1;
2183 skip_dct[5] = 1;
2184 } else {
2185 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2186 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2187 if (!s->chroma_y_shift) { /* 422 */
2188 s->pdsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
2189 dest_cb + (dct_offset >> 1), wrap_c);
2190 s->pdsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
2191 dest_cr + (dct_offset >> 1), wrap_c);
2192 }
2193 }
2194 /* pre quantization */
2195 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2196 2 * s->qscale * s->qscale) {
2197 // FIXME optimize
2198 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2199 skip_dct[0] = 1;
2200 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2201 skip_dct[1] = 1;
2202 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2203 wrap_y, 8) < 20 * s->qscale)
2204 skip_dct[2] = 1;
2205 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2206 wrap_y, 8) < 20 * s->qscale)
2207 skip_dct[3] = 1;
2208 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2209 skip_dct[4] = 1;
2210 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2211 skip_dct[5] = 1;
2212 if (!s->chroma_y_shift) { /* 422 */
2213 if (s->mecc.sad[1](NULL, ptr_cb + (dct_offset >> 1),
2214 dest_cb + (dct_offset >> 1),
2215 wrap_c, 8) < 20 * s->qscale)
2216 skip_dct[6] = 1;
2217 if (s->mecc.sad[1](NULL, ptr_cr + (dct_offset >> 1),
2218 dest_cr + (dct_offset >> 1),
2219 wrap_c, 8) < 20 * s->qscale)
2220 skip_dct[7] = 1;
2221 }
2222 }
2223 }
2224
2225 if (s->quantizer_noise_shaping) {
2226 if (!skip_dct[0])
2227 get_visual_weight(weight[0], ptr_y , wrap_y);
2228 if (!skip_dct[1])
2229 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2230 if (!skip_dct[2])
2231 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2232 if (!skip_dct[3])
2233 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2234 if (!skip_dct[4])
2235 get_visual_weight(weight[4], ptr_cb , wrap_c);
2236 if (!skip_dct[5])
2237 get_visual_weight(weight[5], ptr_cr , wrap_c);
2238 if (!s->chroma_y_shift) { /* 422 */
2239 if (!skip_dct[6])
2240 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
2241 wrap_c);
2242 if (!skip_dct[7])
2243 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
2244 wrap_c);
2245 }
2246 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2247 }
2248
2249 /* DCT & quantize */
2250 assert(s->out_format != FMT_MJPEG || s->qscale == 8);
2251 {
2252 for (i = 0; i < mb_block_count; i++) {
2253 if (!skip_dct[i]) {
2254 int overflow;
2255 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2256 // FIXME we could decide to change to quantizer instead of
2257 // clipping
2258 // JS: I don't think that would be a good idea it could lower
2259 // quality instead of improve it. Just INTRADC clipping
2260 // deserves changes in quantizer
2261 if (overflow)
2262 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2263 } else
2264 s->block_last_index[i] = -1;
2265 }
2266 if (s->quantizer_noise_shaping) {
2267 for (i = 0; i < mb_block_count; i++) {
2268 if (!skip_dct[i]) {
2269 s->block_last_index[i] =
2270 dct_quantize_refine(s, s->block[i], weight[i],
2271 orig[i], i, s->qscale);
2272 }
2273 }
2274 }
2275
2276 if (s->luma_elim_threshold && !s->mb_intra)
2277 for (i = 0; i < 4; i++)
2278 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2279 if (s->chroma_elim_threshold && !s->mb_intra)
2280 for (i = 4; i < mb_block_count; i++)
2281 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2282
2283 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2284 for (i = 0; i < mb_block_count; i++) {
2285 if (s->block_last_index[i] == -1)
2286 s->coded_score[i] = INT_MAX / 256;
2287 }
2288 }
2289 }
2290
2291 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2292 s->block_last_index[4] =
2293 s->block_last_index[5] = 0;
2294 s->block[4][0] =
2295 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2296 }
2297
2298 // non c quantize code returns incorrect block_last_index FIXME
2299 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2300 for (i = 0; i < mb_block_count; i++) {
2301 int j;
2302 if (s->block_last_index[i] > 0) {
2303 for (j = 63; j > 0; j--) {
2304 if (s->block[i][s->intra_scantable.permutated[j]])
2305 break;
2306 }
2307 s->block_last_index[i] = j;
2308 }
2309 }
2310 }
2311
2312 /* huffman encode */
2313 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2314 case AV_CODEC_ID_MPEG1VIDEO:
2315 case AV_CODEC_ID_MPEG2VIDEO:
2316 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2317 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2318 break;
2319 case AV_CODEC_ID_MPEG4:
2320 if (CONFIG_MPEG4_ENCODER)
2321 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2322 break;
2323 case AV_CODEC_ID_MSMPEG4V2:
2324 case AV_CODEC_ID_MSMPEG4V3:
2325 case AV_CODEC_ID_WMV1:
2326 if (CONFIG_MSMPEG4_ENCODER)
2327 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2328 break;
2329 case AV_CODEC_ID_WMV2:
2330 if (CONFIG_WMV2_ENCODER)
2331 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2332 break;
2333 case AV_CODEC_ID_H261:
2334 if (CONFIG_H261_ENCODER)
2335 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2336 break;
2337 case AV_CODEC_ID_H263:
2338 case AV_CODEC_ID_H263P:
2339 case AV_CODEC_ID_FLV1:
2340 case AV_CODEC_ID_RV10:
2341 case AV_CODEC_ID_RV20:
2342 if (CONFIG_H263_ENCODER)
2343 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2344 break;
2345 case AV_CODEC_ID_MJPEG:
2346 if (CONFIG_MJPEG_ENCODER)
2347 ff_mjpeg_encode_mb(s, s->block);
2348 break;
2349 default:
2350 assert(0);
2351 }
2352 }
2353
2354 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2355 {
2356 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 6);
2357 else encode_mb_internal(s, motion_x, motion_y, 16, 8);
2358 }
2359
2360 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2361 int i;
2362
2363 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2364
2365 /* mpeg1 */
2366 d->mb_skip_run= s->mb_skip_run;
2367 for(i=0; i<3; i++)
2368 d->last_dc[i] = s->last_dc[i];
2369
2370 /* statistics */
2371 d->mv_bits= s->mv_bits;
2372 d->i_tex_bits= s->i_tex_bits;
2373 d->p_tex_bits= s->p_tex_bits;
2374 d->i_count= s->i_count;
2375 d->f_count= s->f_count;
2376 d->b_count= s->b_count;
2377 d->skip_count= s->skip_count;
2378 d->misc_bits= s->misc_bits;
2379 d->last_bits= 0;
2380
2381 d->mb_skipped= 0;
2382 d->qscale= s->qscale;
2383 d->dquant= s->dquant;
2384
2385 d->esc3_level_length= s->esc3_level_length;
2386 }
2387
2388 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2389 int i;
2390
2391 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2392 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2393
2394 /* mpeg1 */
2395 d->mb_skip_run= s->mb_skip_run;
2396 for(i=0; i<3; i++)
2397 d->last_dc[i] = s->last_dc[i];
2398
2399 /* statistics */
2400 d->mv_bits= s->mv_bits;
2401 d->i_tex_bits= s->i_tex_bits;
2402 d->p_tex_bits= s->p_tex_bits;
2403 d->i_count= s->i_count;
2404 d->f_count= s->f_count;
2405 d->b_count= s->b_count;
2406 d->skip_count= s->skip_count;
2407 d->misc_bits= s->misc_bits;
2408
2409 d->mb_intra= s->mb_intra;
2410 d->mb_skipped= s->mb_skipped;
2411 d->mv_type= s->mv_type;
2412 d->mv_dir= s->mv_dir;
2413 d->pb= s->pb;
2414 if(s->data_partitioning){
2415 d->pb2= s->pb2;
2416 d->tex_pb= s->tex_pb;
2417 }
2418 d->block= s->block;
2419 for(i=0; i<8; i++)
2420 d->block_last_index[i]= s->block_last_index[i];
2421 d->interlaced_dct= s->interlaced_dct;
2422 d->qscale= s->qscale;
2423
2424 d->esc3_level_length= s->esc3_level_length;
2425 }
2426
2427 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2428 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2429 int *dmin, int *next_block, int motion_x, int motion_y)
2430 {
2431 int score;
2432 uint8_t *dest_backup[3];
2433
2434 copy_context_before_encode(s, backup, type);
2435
2436 s->block= s->blocks[*next_block];
2437 s->pb= pb[*next_block];
2438 if(s->data_partitioning){
2439 s->pb2 = pb2 [*next_block];
2440 s->tex_pb= tex_pb[*next_block];
2441 }
2442
2443 if(*next_block){
2444 memcpy(dest_backup, s->dest, sizeof(s->dest));
2445 s->dest[0] = s->sc.rd_scratchpad;
2446 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2447 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2448 assert(s->linesize >= 32); //FIXME
2449 }
2450
2451 encode_mb(s, motion_x, motion_y);
2452
2453 score= put_bits_count(&s->pb);
2454 if(s->data_partitioning){
2455 score+= put_bits_count(&s->pb2);
2456 score+= put_bits_count(&s->tex_pb);
2457 }
2458
2459 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2460 ff_mpv_decode_mb(s, s->block);
2461
2462 score *= s->lambda2;
2463 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2464 }
2465
2466 if(*next_block){
2467 memcpy(s->dest, dest_backup, sizeof(s->dest));
2468 }
2469
2470 if(score<*dmin){
2471 *dmin= score;
2472 *next_block^=1;
2473
2474 copy_context_after_encode(best, s, type);
2475 }
2476 }
2477
2478 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2479 uint32_t *sq = ff_square_tab + 256;
2480 int acc=0;
2481 int x,y;
2482
2483 if(w==16 && h==16)
2484 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2485 else if(w==8 && h==8)
2486 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2487
2488 for(y=0; y<h; y++){
2489 for(x=0; x<w; x++){
2490 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2491 }
2492 }
2493
2494 assert(acc>=0);
2495
2496 return acc;
2497 }
2498
2499 static int sse_mb(MpegEncContext *s){
2500 int w= 16;
2501 int h= 16;
2502
2503 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2504 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2505
2506 if(w==16 && h==16)
2507 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2508 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2509 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2510 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2511 }else{
2512 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2513 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2514 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2515 }
2516 else
2517 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2518 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2519 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2520 }
2521
2522 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2523 MpegEncContext *s= *(void**)arg;
2524
2525
2526 s->me.pre_pass=1;
2527 s->me.dia_size= s->avctx->pre_dia_size;
2528 s->first_slice_line=1;
2529 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2530 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2531 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2532 }
2533 s->first_slice_line=0;
2534 }
2535
2536 s->me.pre_pass=0;
2537
2538 return 0;
2539 }
2540
2541 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2542 MpegEncContext *s= *(void**)arg;
2543
2544 s->me.dia_size= s->avctx->dia_size;
2545 s->first_slice_line=1;
2546 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2547 s->mb_x=0; //for block init below
2548 ff_init_block_index(s);
2549 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2550 s->block_index[0]+=2;
2551 s->block_index[1]+=2;
2552 s->block_index[2]+=2;
2553 s->block_index[3]+=2;
2554
2555 /* compute motion vector & mb_type and store in context */
2556 if(s->pict_type==AV_PICTURE_TYPE_B)
2557 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2558 else
2559 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2560 }
2561 s->first_slice_line=0;
2562 }
2563 return 0;
2564 }
2565
2566 static int mb_var_thread(AVCodecContext *c, void *arg){
2567 MpegEncContext *s= *(void**)arg;
2568 int mb_x, mb_y;
2569
2570 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2571 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2572 int xx = mb_x * 16;
2573 int yy = mb_y * 16;
2574 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2575 int varc;
2576 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2577
2578 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2579 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2580
2581 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2582 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2583 s->me.mb_var_sum_temp += varc;
2584 }
2585 }
2586 return 0;
2587 }
2588
2589 static void write_slice_end(MpegEncContext *s){
2590 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2591 if(s->partitioned_frame){
2592 ff_mpeg4_merge_partitions(s);
2593 }
2594
2595 ff_mpeg4_stuffing(&s->pb);
2596 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2597 ff_mjpeg_encode_stuffing(&s->pb);
2598 }
2599
2600 avpriv_align_put_bits(&s->pb);
2601 flush_put_bits(&s->pb);
2602
2603 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2604 s->misc_bits+= get_bits_diff(s);
2605 }
2606
2607 static void write_mb_info(MpegEncContext *s)
2608 {
2609 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2610 int offset = put_bits_count(&s->pb);
2611 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2612 int gobn = s->mb_y / s->gob_index;
2613 int pred_x, pred_y;
2614 if (CONFIG_H263_ENCODER)
2615 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2616 bytestream_put_le32(&ptr, offset);
2617 bytestream_put_byte(&ptr, s->qscale);
2618 bytestream_put_byte(&ptr, gobn);
2619 bytestream_put_le16(&ptr, mba);
2620 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2621 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2622 /* 4MV not implemented */
2623 bytestream_put_byte(&ptr, 0); /* hmv2 */
2624 bytestream_put_byte(&ptr, 0); /* vmv2 */
2625 }
2626
2627 static void update_mb_info(MpegEncContext *s, int startcode)
2628 {
2629 if (!s->mb_info)
2630 return;
2631 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2632 s->mb_info_size += 12;
2633 s->prev_mb_info = s->last_mb_info;
2634 }
2635 if (startcode) {
2636 s->prev_mb_info = put_bits_count(&s->pb)/8;
2637 /* This might have incremented mb_info_size above, and we return without
2638 * actually writing any info into that slot yet. But in that case,
2639 * this will be called again at the start of the after writing the
2640 * start code, actually writing the mb info. */
2641 return;
2642 }
2643
2644 s->last_mb_info = put_bits_count(&s->pb)/8;
2645 if (!s->mb_info_size)
2646 s->mb_info_size += 12;
2647 write_mb_info(s);
2648 }
2649
2650 static int encode_thread(AVCodecContext *c, void *arg){
2651 MpegEncContext *s= *(void**)arg;
2652 int mb_x, mb_y, pdif = 0;
2653 int chr_h= 16>>s->chroma_y_shift;
2654 int i, j;
2655 MpegEncContext best_s = { 0 }, backup_s;
2656 uint8_t bit_buf[2][MAX_MB_BYTES];
2657 uint8_t bit_buf2[2][MAX_MB_BYTES];
2658 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2659 PutBitContext pb[2], pb2[2], tex_pb[2];
2660
2661 for(i=0; i<2; i++){
2662 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2663 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2664 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2665 }
2666
2667 s->last_bits= put_bits_count(&s->pb);
2668 s->mv_bits=0;
2669 s->misc_bits=0;
2670 s->i_tex_bits=0;
2671 s->p_tex_bits=0;
2672 s->i_count=0;
2673 s->f_count=0;
2674 s->b_count=0;
2675 s->skip_count=0;
2676
2677 for(i=0; i<3; i++){
2678 /* init last dc values */
2679 /* note: quant matrix value (8) is implied here */
2680 s->last_dc[i] = 128 << s->intra_dc_precision;
2681
2682 s->current_picture.encoding_error[i] = 0;
2683 }
2684 s->mb_skip_run = 0;
2685 memset(s->last_mv, 0, sizeof(s->last_mv));
2686
2687 s->last_mv_dir = 0;
2688
2689 switch(s->codec_id){
2690 case AV_CODEC_ID_H263:
2691 case AV_CODEC_ID_H263P:
2692 case AV_CODEC_ID_FLV1:
2693 if (CONFIG_H263_ENCODER)
2694 s->gob_index = H263_GOB_HEIGHT(s->height);
2695 break;
2696 case AV_CODEC_ID_MPEG4:
2697 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2698 ff_mpeg4_init_partitions(s);
2699 break;
2700 }
2701
2702 s->resync_mb_x=0;
2703 s->resync_mb_y=0;
2704 s->first_slice_line = 1;
2705 s->ptr_lastgob = s->pb.buf;
2706 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2707 s->mb_x=0;
2708 s->mb_y= mb_y;
2709
2710 ff_set_qscale(s, s->qscale);
2711 ff_init_block_index(s);
2712
2713 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2714 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2715 int mb_type= s->mb_type[xy];
2716 // int d;
2717 int dmin= INT_MAX;
2718 int dir;
2719
2720 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2721 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2722 return -1;
2723 }
2724 if(s->data_partitioning){
2725 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2726 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2727 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2728 return -1;
2729 }
2730 }
2731
2732 s->mb_x = mb_x;
2733 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2734 ff_update_block_index(s);
2735
2736 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2737 ff_h261_reorder_mb_index(s);
2738 xy= s->mb_y*s->mb_stride + s->mb_x;
2739 mb_type= s->mb_type[xy];
2740 }
2741
2742 /* write gob / video packet header */
2743 if(s->rtp_mode){
2744 int current_packet_size, is_gob_start;
2745
2746 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2747
2748 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2749
2750 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2751
2752 switch(s->codec_id){
2753 case AV_CODEC_ID_H263:
2754 case AV_CODEC_ID_H263P:
2755 if(!s->h263_slice_structured)
2756 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2757 break;
2758 case AV_CODEC_ID_MPEG2VIDEO:
2759 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2760 case AV_CODEC_ID_MPEG1VIDEO:
2761 if(s->mb_skip_run) is_gob_start=0;
2762 break;
2763 }
2764
2765 if(is_gob_start){
2766 if(s->start_mb_y != mb_y || mb_x!=0){
2767 write_slice_end(s);
2768
2769 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2770 ff_mpeg4_init_partitions(s);
2771 }
2772 }
2773
2774 assert((put_bits_count(&s->pb)&7) == 0);
2775 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2776
2777 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
2778 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2779 int d = 100 / s->error_rate;
2780 if(r % d == 0){
2781 current_packet_size=0;
2782 s->pb.buf_ptr= s->ptr_lastgob;
2783 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2784 }
2785 }
2786
2787 #if FF_API_RTP_CALLBACK
2788 FF_DISABLE_DEPRECATION_WARNINGS
2789 if (s->avctx->rtp_callback){
2790 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2791 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2792 }
2793 FF_ENABLE_DEPRECATION_WARNINGS
2794 #endif
2795 update_mb_info(s, 1);
2796
2797 switch(s->codec_id){
2798 case AV_CODEC_ID_MPEG4:
2799 if (CONFIG_MPEG4_ENCODER) {
2800 ff_mpeg4_encode_video_packet_header(s);
2801 ff_mpeg4_clean_buffers(s);
2802 }
2803 break;
2804 case AV_CODEC_ID_MPEG1VIDEO:
2805 case AV_CODEC_ID_MPEG2VIDEO:
2806 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2807 ff_mpeg1_encode_slice_header(s);
2808 ff_mpeg1_clean_buffers(s);
2809 }
2810 break;
2811 case AV_CODEC_ID_H263:
2812 case AV_CODEC_ID_H263P:
2813 if (CONFIG_H263_ENCODER)
2814 ff_h263_encode_gob_header(s, mb_y);
2815 break;
2816 }
2817
2818 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
2819 int bits= put_bits_count(&s->pb);
2820 s->misc_bits+= bits - s->last_bits;
2821 s->last_bits= bits;
2822 }
2823
2824 s->ptr_lastgob += current_packet_size;
2825 s->first_slice_line=1;
2826 s->resync_mb_x=mb_x;
2827 s->resync_mb_y=mb_y;
2828 }
2829 }
2830
2831 if( (s->resync_mb_x == s->mb_x)
2832 && s->resync_mb_y+1 == s->mb_y){
2833 s->first_slice_line=0;
2834 }
2835
2836 s->mb_skipped=0;
2837 s->dquant=0; //only for QP_RD
2838
2839 update_mb_info(s, 0);
2840
2841 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2842 int next_block=0;
2843 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2844
2845 copy_context_before_encode(&backup_s, s, -1);
2846 backup_s.pb= s->pb;
2847 best_s.data_partitioning= s->data_partitioning;
2848 best_s.partitioned_frame= s->partitioned_frame;
2849 if(s->data_partitioning){
2850 backup_s.pb2= s->pb2;
2851 backup_s.tex_pb= s->tex_pb;
2852 }
2853
2854 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2855 s->mv_dir = MV_DIR_FORWARD;
2856 s->mv_type = MV_TYPE_16X16;
2857 s->mb_intra= 0;
2858 s->mv[0][0][0] = s->p_mv_table[xy][0];
2859 s->mv[0][0][1] = s->p_mv_table[xy][1];
2860 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2861 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2862 }
2863 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2864 s->mv_dir = MV_DIR_FORWARD;
2865 s->mv_type = MV_TYPE_FIELD;
2866 s->mb_intra= 0;
2867 for(i=0; i<2; i++){
2868 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2869 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2870 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2871 }
2872 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2873 &dmin, &next_block, 0, 0);
2874 }
2875 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2876 s->mv_dir = MV_DIR_FORWARD;
2877 s->mv_type = MV_TYPE_16X16;
2878 s->mb_intra= 0;
2879 s->mv[0][0][0] = 0;
2880 s->mv[0][0][1] = 0;
2881 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2882 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2883 }
2884 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2885 s->mv_dir = MV_DIR_FORWARD;
2886 s->mv_type = MV_TYPE_8X8;
2887 s->mb_intra= 0;
2888 for(i=0; i<4; i++){
2889 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2890 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2891 }
2892 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2893 &dmin, &next_block, 0, 0);
2894 }
2895 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2896 s->mv_dir = MV_DIR_FORWARD;
2897 s->mv_type = MV_TYPE_16X16;
2898 s->mb_intra= 0;
2899 s->mv[0][0][0