Drop libxvid rate control support for mpegvideo encoding
[libav.git] / libavcodec / mpegvideo_enc.c
1 /*
2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7 *
8 * This file is part of Libav.
9 *
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /**
26 * @file
27 * The simplest mpeg encoder (well, it was the simplest!).
28 */
29
30 #include <stdint.h>
31
32 #include "libavutil/internal.h"
33 #include "libavutil/intmath.h"
34 #include "libavutil/mathematics.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/timer.h"
38 #include "avcodec.h"
39 #include "dct.h"
40 #include "idctdsp.h"
41 #include "mpeg12.h"
42 #include "mpegvideo.h"
43 #include "mpegvideodata.h"
44 #include "h261.h"
45 #include "h263.h"
46 #include "h263data.h"
47 #include "mjpegenc_common.h"
48 #include "mathops.h"
49 #include "mpegutils.h"
50 #include "mjpegenc.h"
51 #include "msmpeg4.h"
52 #include "pixblockdsp.h"
53 #include "qpeldsp.h"
54 #include "faandct.h"
55 #include "thread.h"
56 #include "aandcttab.h"
57 #include "flv.h"
58 #include "mpeg4video.h"
59 #include "internal.h"
60 #include "bytestream.h"
61 #include "wmv2.h"
62 #include "rv10.h"
63 #include <limits.h>
64
65 #define QUANT_BIAS_SHIFT 8
66
67 #define QMAT_SHIFT_MMX 16
68 #define QMAT_SHIFT 22
69
70 static int encode_picture(MpegEncContext *s, int picture_number);
71 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
72 static int sse_mb(MpegEncContext *s);
73 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
74 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
75
76 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
77 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
78
79 const AVOption ff_mpv_generic_options[] = {
80 FF_MPV_COMMON_OPTS
81 { NULL },
82 };
83
84 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
85 uint16_t (*qmat16)[2][64],
86 const uint16_t *quant_matrix,
87 int bias, int qmin, int qmax, int intra)
88 {
89 FDCTDSPContext *fdsp = &s->fdsp;
90 int qscale;
91 int shift = 0;
92
93 for (qscale = qmin; qscale <= qmax; qscale++) {
94 int i;
95 if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
96 #if CONFIG_FAANDCT
97 fdsp->fdct == ff_faandct ||
98 #endif /* CONFIG_FAANDCT */
99 fdsp->fdct == ff_jpeg_fdct_islow_10) {
100 for (i = 0; i < 64; i++) {
101 const int j = s->idsp.idct_permutation[i];
102 int64_t den = (int64_t) qscale * quant_matrix[j];
103 /* 16 <= qscale * quant_matrix[i] <= 7905
104 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
105 * 19952 <= x <= 249205026
106 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
107 * 3444240 >= (1 << 36) / (x) >= 275 */
108
109 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / den);
110 }
111 } else if (fdsp->fdct == ff_fdct_ifast) {
112 for (i = 0; i < 64; i++) {
113 const int j = s->idsp.idct_permutation[i];
114 int64_t den = ff_aanscales[i] * (int64_t) qscale * quant_matrix[j];
115 /* 16 <= qscale * quant_matrix[i] <= 7905
116 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
117 * 19952 <= x <= 249205026
118 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
119 * 3444240 >= (1 << 36) / (x) >= 275 */
120
121 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) / den);
122 }
123 } else {
124 for (i = 0; i < 64; i++) {
125 const int j = s->idsp.idct_permutation[i];
126 int64_t den = (int64_t) qscale * quant_matrix[j];
127 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
128 * Assume x = qscale * quant_matrix[i]
129 * So 16 <= x <= 7905
130 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
131 * so 32768 >= (1 << 19) / (x) >= 67 */
132 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / den);
133 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
134 // (qscale * quant_matrix[i]);
135 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) / den;
136
137 if (qmat16[qscale][0][i] == 0 ||
138 qmat16[qscale][0][i] == 128 * 256)
139 qmat16[qscale][0][i] = 128 * 256 - 1;
140 qmat16[qscale][1][i] =
141 ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
142 qmat16[qscale][0][i]);
143 }
144 }
145
146 for (i = intra; i < 64; i++) {
147 int64_t max = 8191;
148 if (fdsp->fdct == ff_fdct_ifast) {
149 max = (8191LL * ff_aanscales[i]) >> 14;
150 }
151 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
152 shift++;
153 }
154 }
155 }
156 if (shift) {
157 av_log(NULL, AV_LOG_INFO,
158 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
159 QMAT_SHIFT - shift);
160 }
161 }
162
163 static inline void update_qscale(MpegEncContext *s)
164 {
165 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
166 (FF_LAMBDA_SHIFT + 7);
167 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
168
169 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
170 FF_LAMBDA_SHIFT;
171 }
172
173 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
174 {
175 int i;
176
177 if (matrix) {
178 put_bits(pb, 1, 1);
179 for (i = 0; i < 64; i++) {
180 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
181 }
182 } else
183 put_bits(pb, 1, 0);
184 }
185
186 /**
187 * init s->current_picture.qscale_table from s->lambda_table
188 */
189 void ff_init_qscale_tab(MpegEncContext *s)
190 {
191 int8_t * const qscale_table = s->current_picture.qscale_table;
192 int i;
193
194 for (i = 0; i < s->mb_num; i++) {
195 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
196 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
197 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
198 s->avctx->qmax);
199 }
200 }
201
202 static void update_duplicate_context_after_me(MpegEncContext *dst,
203 MpegEncContext *src)
204 {
205 #define COPY(a) dst->a= src->a
206 COPY(pict_type);
207 COPY(current_picture);
208 COPY(f_code);
209 COPY(b_code);
210 COPY(qscale);
211 COPY(lambda);
212 COPY(lambda2);
213 COPY(picture_in_gop_number);
214 COPY(gop_picture_number);
215 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
216 COPY(progressive_frame); // FIXME don't set in encode_header
217 COPY(partitioned_frame); // FIXME don't set in encode_header
218 #undef COPY
219 }
220
221 /**
222 * Set the given MpegEncContext to defaults for encoding.
223 * the changed fields will not depend upon the prior state of the MpegEncContext.
224 */
225 static void mpv_encode_defaults(MpegEncContext *s)
226 {
227 int i;
228 ff_mpv_common_defaults(s);
229
230 for (i = -16; i < 16; i++) {
231 default_fcode_tab[i + MAX_MV] = 1;
232 }
233 s->me.mv_penalty = default_mv_penalty;
234 s->fcode_tab = default_fcode_tab;
235
236 s->input_picture_number = 0;
237 s->picture_in_gop_number = 0;
238 }
239
240 /* init video encoder */
241 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
242 {
243 MpegEncContext *s = avctx->priv_data;
244 AVCPBProperties *cpb_props;
245 int i, ret, format_supported;
246
247 mpv_encode_defaults(s);
248
249 switch (avctx->codec_id) {
250 case AV_CODEC_ID_MPEG2VIDEO:
251 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
252 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
253 av_log(avctx, AV_LOG_ERROR,
254 "only YUV420 and YUV422 are supported\n");
255 return -1;
256 }
257 break;
258 case AV_CODEC_ID_MJPEG:
259 format_supported = 0;
260 /* JPEG color space */
261 if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
262 avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
263 (avctx->color_range == AVCOL_RANGE_JPEG &&
264 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
265 avctx->pix_fmt == AV_PIX_FMT_YUV422P)))
266 format_supported = 1;
267 /* MPEG color space */
268 else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
269 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
270 avctx->pix_fmt == AV_PIX_FMT_YUV422P))
271 format_supported = 1;
272
273 if (!format_supported) {
274 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
275 return -1;
276 }
277 break;
278 default:
279 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
280 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
281 return -1;
282 }
283 }
284
285 switch (avctx->pix_fmt) {
286 case AV_PIX_FMT_YUVJ422P:
287 case AV_PIX_FMT_YUV422P:
288 s->chroma_format = CHROMA_422;
289 break;
290 case AV_PIX_FMT_YUVJ420P:
291 case AV_PIX_FMT_YUV420P:
292 default:
293 s->chroma_format = CHROMA_420;
294 break;
295 }
296
297 #if FF_API_PRIVATE_OPT
298 FF_DISABLE_DEPRECATION_WARNINGS
299 if (avctx->rtp_payload_size)
300 s->rtp_payload_size = avctx->rtp_payload_size;
301 if (avctx->me_penalty_compensation)
302 s->me_penalty_compensation = avctx->me_penalty_compensation;
303 if (avctx->pre_me)
304 s->me_pre = avctx->pre_me;
305 FF_ENABLE_DEPRECATION_WARNINGS
306 #endif
307
308 s->bit_rate = avctx->bit_rate;
309 s->width = avctx->width;
310 s->height = avctx->height;
311 if (avctx->gop_size > 600 &&
312 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
313 av_log(avctx, AV_LOG_ERROR,
314 "Warning keyframe interval too large! reducing it ...\n");
315 avctx->gop_size = 600;
316 }
317 s->gop_size = avctx->gop_size;
318 s->avctx = avctx;
319 if (avctx->max_b_frames > MAX_B_FRAMES) {
320 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
321 "is %d.\n", MAX_B_FRAMES);
322 }
323 s->max_b_frames = avctx->max_b_frames;
324 s->codec_id = avctx->codec->id;
325 s->strict_std_compliance = avctx->strict_std_compliance;
326 s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
327 s->rtp_mode = !!s->rtp_payload_size;
328 s->intra_dc_precision = avctx->intra_dc_precision;
329 s->user_specified_pts = AV_NOPTS_VALUE;
330
331 if (s->gop_size <= 1) {
332 s->intra_only = 1;
333 s->gop_size = 12;
334 } else {
335 s->intra_only = 0;
336 }
337
338 #if FF_API_MOTION_EST
339 FF_DISABLE_DEPRECATION_WARNINGS
340 s->me_method = avctx->me_method;
341 FF_ENABLE_DEPRECATION_WARNINGS
342 #endif
343
344 /* Fixed QSCALE */
345 s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
346
347 #if FF_API_MPV_OPT
348 FF_DISABLE_DEPRECATION_WARNINGS
349 if (avctx->border_masking != 0.0)
350 s->border_masking = avctx->border_masking;
351 FF_ENABLE_DEPRECATION_WARNINGS
352 #endif
353
354 s->adaptive_quant = (s->avctx->lumi_masking ||
355 s->avctx->dark_masking ||
356 s->avctx->temporal_cplx_masking ||
357 s->avctx->spatial_cplx_masking ||
358 s->avctx->p_masking ||
359 s->border_masking ||
360 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
361 !s->fixed_qscale;
362
363 s->loop_filter = !!(s->avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
364
365 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
366 av_log(avctx, AV_LOG_ERROR,
367 "a vbv buffer size is needed, "
368 "for encoding with a maximum bitrate\n");
369 return -1;
370 }
371
372 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
373 av_log(avctx, AV_LOG_INFO,
374 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
375 }
376
377 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
378 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
379 return -1;
380 }
381
382 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
383 av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
384 return -1;
385 }
386
387 if (avctx->rc_max_rate &&
388 avctx->rc_max_rate == avctx->bit_rate &&
389 avctx->rc_max_rate != avctx->rc_min_rate) {
390 av_log(avctx, AV_LOG_INFO,
391 "impossible bitrate constraints, this will fail\n");
392 }
393
394 if (avctx->rc_buffer_size &&
395 avctx->bit_rate * (int64_t)avctx->time_base.num >
396 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
397 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
398 return -1;
399 }
400
401 if (!s->fixed_qscale &&
402 avctx->bit_rate * av_q2d(avctx->time_base) >
403 avctx->bit_rate_tolerance) {
404 av_log(avctx, AV_LOG_ERROR,
405 "bitrate tolerance too small for bitrate\n");
406 return -1;
407 }
408
409 if (s->avctx->rc_max_rate &&
410 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
411 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
412 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
413 90000LL * (avctx->rc_buffer_size - 1) >
414 s->avctx->rc_max_rate * 0xFFFFLL) {
415 av_log(avctx, AV_LOG_INFO,
416 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
417 "specified vbv buffer is too large for the given bitrate!\n");
418 }
419
420 if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
421 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
422 s->codec_id != AV_CODEC_ID_FLV1) {
423 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
424 return -1;
425 }
426
427 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
428 av_log(avctx, AV_LOG_ERROR,
429 "OBMC is only supported with simple mb decision\n");
430 return -1;
431 }
432
433 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
434 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
435 return -1;
436 }
437
438 if (s->max_b_frames &&
439 s->codec_id != AV_CODEC_ID_MPEG4 &&
440 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
441 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
442 av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
443 return -1;
444 }
445
446 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
447 s->codec_id == AV_CODEC_ID_H263 ||
448 s->codec_id == AV_CODEC_ID_H263P) &&
449 (avctx->sample_aspect_ratio.num > 255 ||
450 avctx->sample_aspect_ratio.den > 255)) {
451 av_log(avctx, AV_LOG_ERROR,
452 "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
453 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
454 return -1;
455 }
456
457 if ((s->avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
458 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
459 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
460 return -1;
461 }
462
463 #if FF_API_PRIVATE_OPT
464 FF_DISABLE_DEPRECATION_WARNINGS
465 if (avctx->mpeg_quant)
466 s->mpeg_quant = avctx->mpeg_quant;
467 FF_ENABLE_DEPRECATION_WARNINGS
468 #endif
469
470 // FIXME mpeg2 uses that too
471 if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
472 av_log(avctx, AV_LOG_ERROR,
473 "mpeg2 style quantization not supported by codec\n");
474 return -1;
475 }
476
477 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
478 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
479 return -1;
480 }
481
482 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
483 s->avctx->mb_decision != FF_MB_DECISION_RD) {
484 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
485 return -1;
486 }
487
488 #if FF_API_PRIVATE_OPT
489 FF_DISABLE_DEPRECATION_WARNINGS
490 if (avctx->scenechange_threshold)
491 s->scenechange_threshold = avctx->scenechange_threshold;
492 FF_ENABLE_DEPRECATION_WARNINGS
493 #endif
494
495 if (s->scenechange_threshold < 1000000000 &&
496 (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
497 av_log(avctx, AV_LOG_ERROR,
498 "closed gop with scene change detection are not supported yet, "
499 "set threshold to 1000000000\n");
500 return -1;
501 }
502
503 if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
504 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
505 av_log(avctx, AV_LOG_ERROR,
506 "low delay forcing is only available for mpeg2\n");
507 return -1;
508 }
509 if (s->max_b_frames != 0) {
510 av_log(avctx, AV_LOG_ERROR,
511 "B-frames cannot be used with low delay\n");
512 return -1;
513 }
514 }
515
516 if (s->q_scale_type == 1) {
517 if (avctx->qmax > 12) {
518 av_log(avctx, AV_LOG_ERROR,
519 "non linear quant only supports qmax <= 12 currently\n");
520 return -1;
521 }
522 }
523
524 if (avctx->slices > 1 &&
525 (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
526 av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
527 return AVERROR(EINVAL);
528 }
529
530 if (s->avctx->thread_count > 1 &&
531 s->codec_id != AV_CODEC_ID_MPEG4 &&
532 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
533 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
534 (s->codec_id != AV_CODEC_ID_H263P)) {
535 av_log(avctx, AV_LOG_ERROR,
536 "multi threaded encoding not supported by codec\n");
537 return -1;
538 }
539
540 if (s->avctx->thread_count < 1) {
541 av_log(avctx, AV_LOG_ERROR,
542 "automatic thread number detection not supported by codec,"
543 "patch welcome\n");
544 return -1;
545 }
546
547 if (!avctx->time_base.den || !avctx->time_base.num) {
548 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
549 return -1;
550 }
551
552 #if FF_API_PRIVATE_OPT
553 FF_DISABLE_DEPRECATION_WARNINGS
554 if (avctx->b_frame_strategy)
555 s->b_frame_strategy = avctx->b_frame_strategy;
556 if (avctx->b_sensitivity != 40)
557 s->b_sensitivity = avctx->b_sensitivity;
558 FF_ENABLE_DEPRECATION_WARNINGS
559 #endif
560
561 if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
562 av_log(avctx, AV_LOG_INFO,
563 "notice: b_frame_strategy only affects the first pass\n");
564 s->b_frame_strategy = 0;
565 }
566
567 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
568 if (i > 1) {
569 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
570 avctx->time_base.den /= i;
571 avctx->time_base.num /= i;
572 //return -1;
573 }
574
575 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
576 s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG) {
577 // (a + x * 3 / 8) / x
578 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
579 s->inter_quant_bias = 0;
580 } else {
581 s->intra_quant_bias = 0;
582 // (a - x / 4) / x
583 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
584 }
585
586 #if FF_API_QUANT_BIAS
587 FF_DISABLE_DEPRECATION_WARNINGS
588 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
589 s->intra_quant_bias = avctx->intra_quant_bias;
590 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
591 s->inter_quant_bias = avctx->inter_quant_bias;
592 FF_ENABLE_DEPRECATION_WARNINGS
593 #endif
594
595 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
596 s->avctx->time_base.den > (1 << 16) - 1) {
597 av_log(avctx, AV_LOG_ERROR,
598 "timebase %d/%d not supported by MPEG 4 standard, "
599 "the maximum admitted value for the timebase denominator "
600 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
601 (1 << 16) - 1);
602 return -1;
603 }
604 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
605
606 switch (avctx->codec->id) {
607 case AV_CODEC_ID_MPEG1VIDEO:
608 s->out_format = FMT_MPEG1;
609 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
610 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
611 break;
612 case AV_CODEC_ID_MPEG2VIDEO:
613 s->out_format = FMT_MPEG1;
614 s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
615 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
616 s->rtp_mode = 1;
617 break;
618 case AV_CODEC_ID_MJPEG:
619 s->out_format = FMT_MJPEG;
620 s->intra_only = 1; /* force intra only for jpeg */
621 if (!CONFIG_MJPEG_ENCODER ||
622 ff_mjpeg_encode_init(s) < 0)
623 return -1;
624 avctx->delay = 0;
625 s->low_delay = 1;
626 break;
627 case AV_CODEC_ID_H261:
628 if (!CONFIG_H261_ENCODER)
629 return -1;
630 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
631 av_log(avctx, AV_LOG_ERROR,
632 "The specified picture size of %dx%d is not valid for the "
633 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
634 s->width, s->height);
635 return -1;
636 }
637 s->out_format = FMT_H261;
638 avctx->delay = 0;
639 s->low_delay = 1;
640 s->rtp_mode = 0; /* Sliced encoding not supported */
641 break;
642 case AV_CODEC_ID_H263:
643 if (!CONFIG_H263_ENCODER)
644 return -1;
645 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
646 s->width, s->height) == 8) {
647 av_log(avctx, AV_LOG_INFO,
648 "The specified picture size of %dx%d is not valid for "
649 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
650 "352x288, 704x576, and 1408x1152."
651 "Try H.263+.\n", s->width, s->height);
652 return -1;
653 }
654 s->out_format = FMT_H263;
655 avctx->delay = 0;
656 s->low_delay = 1;
657 break;
658 case AV_CODEC_ID_H263P:
659 s->out_format = FMT_H263;
660 s->h263_plus = 1;
661 /* Fx */
662 s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
663 s->modified_quant = s->h263_aic;
664 s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
665 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
666
667 /* /Fx */
668 /* These are just to be sure */
669 avctx->delay = 0;
670 s->low_delay = 1;
671 break;
672 case AV_CODEC_ID_FLV1:
673 s->out_format = FMT_H263;
674 s->h263_flv = 2; /* format = 1; 11-bit codes */
675 s->unrestricted_mv = 1;
676 s->rtp_mode = 0; /* don't allow GOB */
677 avctx->delay = 0;
678 s->low_delay = 1;
679 break;
680 case AV_CODEC_ID_RV10:
681 s->out_format = FMT_H263;
682 avctx->delay = 0;
683 s->low_delay = 1;
684 break;
685 case AV_CODEC_ID_RV20:
686 s->out_format = FMT_H263;
687 avctx->delay = 0;
688 s->low_delay = 1;
689 s->modified_quant = 1;
690 s->h263_aic = 1;
691 s->h263_plus = 1;
692 s->loop_filter = 1;
693 s->unrestricted_mv = 0;
694 break;
695 case AV_CODEC_ID_MPEG4:
696 s->out_format = FMT_H263;
697 s->h263_pred = 1;
698 s->unrestricted_mv = 1;
699 s->low_delay = s->max_b_frames ? 0 : 1;
700 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
701 break;
702 case AV_CODEC_ID_MSMPEG4V2:
703 s->out_format = FMT_H263;
704 s->h263_pred = 1;
705 s->unrestricted_mv = 1;
706 s->msmpeg4_version = 2;
707 avctx->delay = 0;
708 s->low_delay = 1;
709 break;
710 case AV_CODEC_ID_MSMPEG4V3:
711 s->out_format = FMT_H263;
712 s->h263_pred = 1;
713 s->unrestricted_mv = 1;
714 s->msmpeg4_version = 3;
715 s->flipflop_rounding = 1;
716 avctx->delay = 0;
717 s->low_delay = 1;
718 break;
719 case AV_CODEC_ID_WMV1:
720 s->out_format = FMT_H263;
721 s->h263_pred = 1;
722 s->unrestricted_mv = 1;
723 s->msmpeg4_version = 4;
724 s->flipflop_rounding = 1;
725 avctx->delay = 0;
726 s->low_delay = 1;
727 break;
728 case AV_CODEC_ID_WMV2:
729 s->out_format = FMT_H263;
730 s->h263_pred = 1;
731 s->unrestricted_mv = 1;
732 s->msmpeg4_version = 5;
733 s->flipflop_rounding = 1;
734 avctx->delay = 0;
735 s->low_delay = 1;
736 break;
737 default:
738 return -1;
739 }
740
741 #if FF_API_PRIVATE_OPT
742 FF_DISABLE_DEPRECATION_WARNINGS
743 if (avctx->noise_reduction)
744 s->noise_reduction = avctx->noise_reduction;
745 FF_ENABLE_DEPRECATION_WARNINGS
746 #endif
747
748 avctx->has_b_frames = !s->low_delay;
749
750 s->encoding = 1;
751
752 s->progressive_frame =
753 s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
754 AV_CODEC_FLAG_INTERLACED_ME) ||
755 s->alternate_scan);
756
757 /* init */
758 ff_mpv_idct_init(s);
759 if (ff_mpv_common_init(s) < 0)
760 return -1;
761
762 if (ARCH_X86)
763 ff_mpv_encode_init_x86(s);
764
765 ff_fdctdsp_init(&s->fdsp, avctx);
766 ff_me_cmp_init(&s->mecc, avctx);
767 ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
768 ff_pixblockdsp_init(&s->pdsp, avctx);
769 ff_qpeldsp_init(&s->qdsp);
770
771 if (s->msmpeg4_version) {
772 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
773 2 * 2 * (MAX_LEVEL + 1) *
774 (MAX_RUN + 1) * 2 * sizeof(int), fail);
775 }
776 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
777
778 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
779 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
780 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
781 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
782 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
783 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
784 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
785 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
786
787
788 if (s->noise_reduction) {
789 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
790 2 * 64 * sizeof(uint16_t), fail);
791 }
792
793 if (CONFIG_H263_ENCODER)
794 ff_h263dsp_init(&s->h263dsp);
795 if (!s->dct_quantize)
796 s->dct_quantize = ff_dct_quantize_c;
797 if (!s->denoise_dct)
798 s->denoise_dct = denoise_dct_c;
799 s->fast_dct_quantize = s->dct_quantize;
800 if (avctx->trellis)
801 s->dct_quantize = dct_quantize_trellis_c;
802
803 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
804 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
805
806 if (s->slice_context_count > 1) {
807 s->rtp_mode = 1;
808
809 if (avctx->codec_id == AV_CODEC_ID_H263 || avctx->codec_id == AV_CODEC_ID_H263P)
810 s->h263_slice_structured = 1;
811 }
812
813 s->quant_precision = 5;
814
815 #if FF_API_PRIVATE_OPT
816 FF_DISABLE_DEPRECATION_WARNINGS
817 if (avctx->frame_skip_threshold)
818 s->frame_skip_threshold = avctx->frame_skip_threshold;
819 if (avctx->frame_skip_factor)
820 s->frame_skip_factor = avctx->frame_skip_factor;
821 if (avctx->frame_skip_exp)
822 s->frame_skip_exp = avctx->frame_skip_exp;
823 if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
824 s->frame_skip_cmp = avctx->frame_skip_cmp;
825 FF_ENABLE_DEPRECATION_WARNINGS
826 #endif
827
828 ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
829 ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
830
831 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
832 ff_h261_encode_init(s);
833 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
834 ff_h263_encode_init(s);
835 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
836 if ((ret = ff_msmpeg4_encode_init(s)) < 0)
837 return ret;
838 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
839 && s->out_format == FMT_MPEG1)
840 ff_mpeg1_encode_init(s);
841
842 /* init q matrix */
843 for (i = 0; i < 64; i++) {
844 int j = s->idsp.idct_permutation[i];
845 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
846 s->mpeg_quant) {
847 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
848 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
849 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
850 s->intra_matrix[j] =
851 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
852 } else {
853 /* MPEG-1/2 */
854 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
855 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
856 }
857 if (s->avctx->intra_matrix)
858 s->intra_matrix[j] = s->avctx->intra_matrix[i];
859 if (s->avctx->inter_matrix)
860 s->inter_matrix[j] = s->avctx->inter_matrix[i];
861 }
862
863 /* precompute matrix */
864 /* for mjpeg, we do include qscale in the matrix */
865 if (s->out_format != FMT_MJPEG) {
866 ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
867 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
868 31, 1);
869 ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
870 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
871 31, 0);
872 }
873
874 if (ff_rate_control_init(s) < 0)
875 return -1;
876
877 #if FF_API_ERROR_RATE
878 FF_DISABLE_DEPRECATION_WARNINGS
879 if (avctx->error_rate)
880 s->error_rate = avctx->error_rate;
881 FF_ENABLE_DEPRECATION_WARNINGS;
882 #endif
883
884 #if FF_API_NORMALIZE_AQP
885 FF_DISABLE_DEPRECATION_WARNINGS
886 if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
887 s->mpv_flags |= FF_MPV_FLAG_NAQ;
888 FF_ENABLE_DEPRECATION_WARNINGS;
889 #endif
890
891 #if FF_API_MV0
892 FF_DISABLE_DEPRECATION_WARNINGS
893 if (avctx->flags & CODEC_FLAG_MV0)
894 s->mpv_flags |= FF_MPV_FLAG_MV0;
895 FF_ENABLE_DEPRECATION_WARNINGS
896 #endif
897
898 #if FF_API_MPV_OPT
899 FF_DISABLE_DEPRECATION_WARNINGS
900 if (avctx->rc_qsquish != 0.0)
901 s->rc_qsquish = avctx->rc_qsquish;
902 if (avctx->rc_qmod_amp != 0.0)
903 s->rc_qmod_amp = avctx->rc_qmod_amp;
904 if (avctx->rc_qmod_freq)
905 s->rc_qmod_freq = avctx->rc_qmod_freq;
906 if (avctx->rc_buffer_aggressivity != 1.0)
907 s->rc_buffer_aggressivity = avctx->rc_buffer_aggressivity;
908 if (avctx->rc_initial_cplx != 0.0)
909 s->rc_initial_cplx = avctx->rc_initial_cplx;
910 if (avctx->lmin)
911 s->lmin = avctx->lmin;
912 if (avctx->lmax)
913 s->lmax = avctx->lmax;
914
915 if (avctx->rc_eq) {
916 av_freep(&s->rc_eq);
917 s->rc_eq = av_strdup(avctx->rc_eq);
918 if (!s->rc_eq)
919 return AVERROR(ENOMEM);
920 }
921 FF_ENABLE_DEPRECATION_WARNINGS
922 #endif
923
924 #if FF_API_PRIVATE_OPT
925 FF_DISABLE_DEPRECATION_WARNINGS
926 if (avctx->brd_scale)
927 s->brd_scale = avctx->brd_scale;
928
929 if (avctx->prediction_method)
930 s->pred = avctx->prediction_method + 1;
931 FF_ENABLE_DEPRECATION_WARNINGS
932 #endif
933
934 if (s->b_frame_strategy == 2) {
935 for (i = 0; i < s->max_b_frames + 2; i++) {
936 s->tmp_frames[i] = av_frame_alloc();
937 if (!s->tmp_frames[i])
938 return AVERROR(ENOMEM);
939
940 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
941 s->tmp_frames[i]->width = s->width >> s->brd_scale;
942 s->tmp_frames[i]->height = s->height >> s->brd_scale;
943
944 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
945 if (ret < 0)
946 return ret;
947 }
948 }
949
950 cpb_props = ff_add_cpb_side_data(avctx);
951 if (!cpb_props)
952 return AVERROR(ENOMEM);
953 cpb_props->max_bitrate = avctx->rc_max_rate;
954 cpb_props->min_bitrate = avctx->rc_min_rate;
955 cpb_props->avg_bitrate = avctx->bit_rate;
956 cpb_props->buffer_size = avctx->rc_buffer_size;
957
958 return 0;
959 fail:
960 ff_mpv_encode_end(avctx);
961 return AVERROR_UNKNOWN;
962 }
963
964 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
965 {
966 MpegEncContext *s = avctx->priv_data;
967 int i;
968
969 ff_rate_control_uninit(s);
970 ff_mpv_common_end(s);
971 if (CONFIG_MJPEG_ENCODER &&
972 s->out_format == FMT_MJPEG)
973 ff_mjpeg_encode_close(s);
974
975 av_freep(&avctx->extradata);
976
977 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
978 av_frame_free(&s->tmp_frames[i]);
979
980 ff_free_picture_tables(&s->new_picture);
981 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
982
983 av_freep(&s->avctx->stats_out);
984 av_freep(&s->ac_stats);
985
986 av_freep(&s->q_intra_matrix);
987 av_freep(&s->q_inter_matrix);
988 av_freep(&s->q_intra_matrix16);
989 av_freep(&s->q_inter_matrix16);
990 av_freep(&s->input_picture);
991 av_freep(&s->reordered_input_picture);
992 av_freep(&s->dct_offset);
993
994 return 0;
995 }
996
997 static int get_sae(uint8_t *src, int ref, int stride)
998 {
999 int x,y;
1000 int acc = 0;
1001
1002 for (y = 0; y < 16; y++) {
1003 for (x = 0; x < 16; x++) {
1004 acc += FFABS(src[x + y * stride] - ref);
1005 }
1006 }
1007
1008 return acc;
1009 }
1010
1011 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1012 uint8_t *ref, int stride)
1013 {
1014 int x, y, w, h;
1015 int acc = 0;
1016
1017 w = s->width & ~15;
1018 h = s->height & ~15;
1019
1020 for (y = 0; y < h; y += 16) {
1021 for (x = 0; x < w; x += 16) {
1022 int offset = x + y * stride;
1023 int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1024 stride, 16);
1025 int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1026 int sae = get_sae(src + offset, mean, stride);
1027
1028 acc += sae + 500 < sad;
1029 }
1030 }
1031 return acc;
1032 }
1033
1034 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1035 {
1036 return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1037 s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1038 s->mb_stride, s->mb_height, s->b8_stride,
1039 &s->linesize, &s->uvlinesize);
1040 }
1041
1042 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1043 {
1044 Picture *pic = NULL;
1045 int64_t pts;
1046 int i, display_picture_number = 0, ret;
1047 int encoding_delay = s->max_b_frames ? s->max_b_frames
1048 : (s->low_delay ? 0 : 1);
1049 int flush_offset = 1;
1050 int direct = 1;
1051
1052 if (pic_arg) {
1053 pts = pic_arg->pts;
1054 display_picture_number = s->input_picture_number++;
1055
1056 if (pts != AV_NOPTS_VALUE) {
1057 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1058 int64_t time = pts;
1059 int64_t last = s->user_specified_pts;
1060
1061 if (time <= last) {
1062 av_log(s->avctx, AV_LOG_ERROR,
1063 "Error, Invalid timestamp=%"PRId64", "
1064 "last=%"PRId64"\n", pts, s->user_specified_pts);
1065 return -1;
1066 }
1067
1068 if (!s->low_delay && display_picture_number == 1)
1069 s->dts_delta = time - last;
1070 }
1071 s->user_specified_pts = pts;
1072 } else {
1073 if (s->user_specified_pts != AV_NOPTS_VALUE) {
1074 s->user_specified_pts =
1075 pts = s->user_specified_pts + 1;
1076 av_log(s->avctx, AV_LOG_INFO,
1077 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1078 pts);
1079 } else {
1080 pts = display_picture_number;
1081 }
1082 }
1083
1084 if (!pic_arg->buf[0] ||
1085 pic_arg->linesize[0] != s->linesize ||
1086 pic_arg->linesize[1] != s->uvlinesize ||
1087 pic_arg->linesize[2] != s->uvlinesize)
1088 direct = 0;
1089 if ((s->width & 15) || (s->height & 15))
1090 direct = 0;
1091
1092 ff_dlog(s->avctx, "%d %d %td %td\n", pic_arg->linesize[0],
1093 pic_arg->linesize[1], s->linesize, s->uvlinesize);
1094
1095 i = ff_find_unused_picture(s->avctx, s->picture, direct);
1096 if (i < 0)
1097 return i;
1098
1099 pic = &s->picture[i];
1100 pic->reference = 3;
1101
1102 if (direct) {
1103 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1104 return ret;
1105 }
1106 ret = alloc_picture(s, pic, direct);
1107 if (ret < 0)
1108 return ret;
1109
1110 if (!direct) {
1111 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1112 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1113 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1114 // empty
1115 } else {
1116 int h_chroma_shift, v_chroma_shift;
1117 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1118 &h_chroma_shift,
1119 &v_chroma_shift);
1120
1121 for (i = 0; i < 3; i++) {
1122 int src_stride = pic_arg->linesize[i];
1123 int dst_stride = i ? s->uvlinesize : s->linesize;
1124 int h_shift = i ? h_chroma_shift : 0;
1125 int v_shift = i ? v_chroma_shift : 0;
1126 int w = s->width >> h_shift;
1127 int h = s->height >> v_shift;
1128 uint8_t *src = pic_arg->data[i];
1129 uint8_t *dst = pic->f->data[i];
1130
1131 if (!s->avctx->rc_buffer_size)
1132 dst += INPLACE_OFFSET;
1133
1134 if (src_stride == dst_stride)
1135 memcpy(dst, src, src_stride * h);
1136 else {
1137 int h2 = h;
1138 uint8_t *dst2 = dst;
1139 while (h2--) {
1140 memcpy(dst2, src, w);
1141 dst2 += dst_stride;
1142 src += src_stride;
1143 }
1144 }
1145 if ((s->width & 15) || (s->height & 15)) {
1146 s->mpvencdsp.draw_edges(dst, dst_stride,
1147 w, h,
1148 16 >> h_shift,
1149 16 >> v_shift,
1150 EDGE_BOTTOM);
1151 }
1152 }
1153 }
1154 }
1155 ret = av_frame_copy_props(pic->f, pic_arg);
1156 if (ret < 0)
1157 return ret;
1158
1159 pic->f->display_picture_number = display_picture_number;
1160 pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1161 } else {
1162 /* Flushing: When we have not received enough input frames,
1163 * ensure s->input_picture[0] contains the first picture */
1164 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1165 if (s->input_picture[flush_offset])
1166 break;
1167
1168 if (flush_offset <= 1)
1169 flush_offset = 1;
1170 else
1171 encoding_delay = encoding_delay - flush_offset + 1;
1172 }
1173
1174 /* shift buffer entries */
1175 for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1176 s->input_picture[i - flush_offset] = s->input_picture[i];
1177
1178 s->input_picture[encoding_delay] = (Picture*) pic;
1179
1180 return 0;
1181 }
1182
1183 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1184 {
1185 int x, y, plane;
1186 int score = 0;
1187 int64_t score64 = 0;
1188
1189 for (plane = 0; plane < 3; plane++) {
1190 const int stride = p->f->linesize[plane];
1191 const int bw = plane ? 1 : 2;
1192 for (y = 0; y < s->mb_height * bw; y++) {
1193 for (x = 0; x < s->mb_width * bw; x++) {
1194 int off = p->shared ? 0 : 16;
1195 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1196 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1197 int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1198
1199 switch (s->frame_skip_exp) {
1200 case 0: score = FFMAX(score, v); break;
1201 case 1: score += FFABS(v); break;
1202 case 2: score += v * v; break;
1203 case 3: score64 += FFABS(v * v * (int64_t)v); break;
1204 case 4: score64 += v * v * (int64_t)(v * v); break;
1205 }
1206 }
1207 }
1208 }
1209
1210 if (score)
1211 score64 = score;
1212
1213 if (score64 < s->frame_skip_threshold)
1214 return 1;
1215 if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1216 return 1;
1217 return 0;
1218 }
1219
1220 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1221 {
1222 AVPacket pkt = { 0 };
1223 int ret;
1224 int size = 0;
1225
1226 av_init_packet(&pkt);
1227
1228 ret = avcodec_send_frame(c, frame);
1229 if (ret < 0)
1230 return ret;
1231
1232 do {
1233 ret = avcodec_receive_packet(c, &pkt);
1234 if (ret >= 0) {
1235 size += pkt.size;
1236 av_packet_unref(&pkt);
1237 } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1238 return ret;
1239 } while (ret >= 0);
1240
1241 return size;
1242 }
1243
1244 static int estimate_best_b_count(MpegEncContext *s)
1245 {
1246 const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1247 const int scale = s->brd_scale;
1248 int width = s->width >> scale;
1249 int height = s->height >> scale;
1250 int i, j, out_size, p_lambda, b_lambda, lambda2;
1251 int64_t best_rd = INT64_MAX;
1252 int best_b_count = -1;
1253 int ret = 0;
1254
1255 assert(scale >= 0 && scale <= 3);
1256
1257 //emms_c();
1258 //s->next_picture_ptr->quality;
1259 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1260 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1261 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1262 if (!b_lambda) // FIXME we should do this somewhere else
1263 b_lambda = p_lambda;
1264 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1265 FF_LAMBDA_SHIFT;
1266
1267 for (i = 0; i < s->max_b_frames + 2; i++) {
1268 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1269 s->next_picture_ptr;
1270
1271 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1272 pre_input = *pre_input_ptr;
1273
1274 if (!pre_input.shared && i) {
1275 pre_input.f->data[0] += INPLACE_OFFSET;
1276 pre_input.f->data[1] += INPLACE_OFFSET;
1277 pre_input.f->data[2] += INPLACE_OFFSET;
1278 }
1279
1280 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1281 s->tmp_frames[i]->linesize[0],
1282 pre_input.f->data[0],
1283 pre_input.f->linesize[0],
1284 width, height);
1285 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1286 s->tmp_frames[i]->linesize[1],
1287 pre_input.f->data[1],
1288 pre_input.f->linesize[1],
1289 width >> 1, height >> 1);
1290 s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1291 s->tmp_frames[i]->linesize[2],
1292 pre_input.f->data[2],
1293 pre_input.f->linesize[2],
1294 width >> 1, height >> 1);
1295 }
1296 }
1297
1298 for (j = 0; j < s->max_b_frames + 1; j++) {
1299 AVCodecContext *c;
1300 int64_t rd = 0;
1301
1302 if (!s->input_picture[j])
1303 break;
1304
1305 c = avcodec_alloc_context3(NULL);
1306 if (!c)
1307 return AVERROR(ENOMEM);
1308
1309 c->width = width;
1310 c->height = height;
1311 c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR;
1312 c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1313 c->mb_decision = s->avctx->mb_decision;
1314 c->me_cmp = s->avctx->me_cmp;
1315 c->mb_cmp = s->avctx->mb_cmp;
1316 c->me_sub_cmp = s->avctx->me_sub_cmp;
1317 c->pix_fmt = AV_PIX_FMT_YUV420P;
1318 c->time_base = s->avctx->time_base;
1319 c->max_b_frames = s->max_b_frames;
1320
1321 ret = avcodec_open2(c, codec, NULL);
1322 if (ret < 0)
1323 goto fail;
1324
1325 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1326 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1327
1328 out_size = encode_frame(c, s->tmp_frames[0]);
1329 if (out_size < 0) {
1330 ret = out_size;
1331 goto fail;
1332 }
1333
1334 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1335
1336 for (i = 0; i < s->max_b_frames + 1; i++) {
1337 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1338
1339 s->tmp_frames[i + 1]->pict_type = is_p ?
1340 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1341 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1342
1343 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1344 if (out_size < 0) {
1345 ret = out_size;
1346 goto fail;
1347 }
1348
1349 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1350 }
1351
1352 /* get the delayed frames */
1353 out_size = encode_frame(c, NULL);
1354 if (out_size < 0) {
1355 ret = out_size;
1356 goto fail;
1357 }
1358 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1359
1360 rd += c->error[0] + c->error[1] + c->error[2];
1361
1362 if (rd < best_rd) {
1363 best_rd = rd;
1364 best_b_count = j;
1365 }
1366
1367 fail:
1368 avcodec_free_context(&c);
1369 if (ret < 0)
1370 return ret;
1371 }
1372
1373 return best_b_count;
1374 }
1375
1376 static int select_input_picture(MpegEncContext *s)
1377 {
1378 int i, ret;
1379
1380 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1381 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1382 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1383
1384 /* set next picture type & ordering */
1385 if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1386 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1387 !s->next_picture_ptr || s->intra_only) {
1388 s->reordered_input_picture[0] = s->input_picture[0];
1389 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1390 s->reordered_input_picture[0]->f->coded_picture_number =
1391 s->coded_picture_number++;
1392 } else {
1393 int b_frames = 0;
1394
1395 if (s->frame_skip_threshold || s->frame_skip_factor) {
1396 if (s->picture_in_gop_number < s->gop_size &&
1397 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1398 // FIXME check that the gop check above is +-1 correct
1399 av_frame_unref(s->input_picture[0]->f);
1400
1401 emms_c();
1402 ff_vbv_update(s, 0);
1403
1404 goto no_output_pic;
1405 }
1406 }
1407
1408 if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1409 for (i = 0; i < s->max_b_frames + 1; i++) {
1410 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1411
1412 if (pict_num >= s->rc_context.num_entries)
1413 break;
1414 if (!s->input_picture[i]) {
1415 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1416 break;
1417 }
1418
1419 s->input_picture[i]->f->pict_type =
1420 s->rc_context.entry[pict_num].new_pict_type;
1421 }
1422 }
1423
1424 if (s->b_frame_strategy == 0) {
1425 b_frames = s->max_b_frames;
1426 while (b_frames && !s->input_picture[b_frames])
1427 b_frames--;
1428 } else if (s->b_frame_strategy == 1) {
1429 for (i = 1; i < s->max_b_frames + 1; i++) {
1430 if (s->input_picture[i] &&
1431 s->input_picture[i]->b_frame_score == 0) {
1432 s->input_picture[i]->b_frame_score =
1433 get_intra_count(s,
1434 s->input_picture[i ]->f->data[0],
1435 s->input_picture[i - 1]->f->data[0],
1436 s->linesize) + 1;
1437 }
1438 }
1439 for (i = 0; i < s->max_b_frames + 1; i++) {
1440 if (!s->input_picture[i] ||
1441 s->input_picture[i]->b_frame_score - 1 >
1442 s->mb_num / s->b_sensitivity)
1443 break;
1444 }
1445
1446 b_frames = FFMAX(0, i - 1);
1447
1448 /* reset scores */
1449 for (i = 0; i < b_frames + 1; i++) {
1450 s->input_picture[i]->b_frame_score = 0;
1451 }
1452 } else if (s->b_frame_strategy == 2) {
1453 b_frames = estimate_best_b_count(s);
1454 if (b_frames < 0)
1455 return b_frames;
1456 }
1457
1458 emms_c();
1459
1460 for (i = b_frames - 1; i >= 0; i--) {
1461 int type = s->input_picture[i]->f->pict_type;
1462 if (type && type != AV_PICTURE_TYPE_B)
1463 b_frames = i;
1464 }
1465 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1466 b_frames == s->max_b_frames) {
1467 av_log(s->avctx, AV_LOG_ERROR,
1468 "warning, too many B-frames in a row\n");
1469 }
1470
1471 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1472 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1473 s->gop_size > s->picture_in_gop_number) {
1474 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1475 } else {
1476 if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1477 b_frames = 0;
1478 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1479 }
1480 }
1481
1482 if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1483 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1484 b_frames--;
1485
1486 s->reordered_input_picture[0] = s->input_picture[b_frames];
1487 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1488 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1489 s->reordered_input_picture[0]->f->coded_picture_number =
1490 s->coded_picture_number++;
1491 for (i = 0; i < b_frames; i++) {
1492 s->reordered_input_picture[i + 1] = s->input_picture[i];
1493 s->reordered_input_picture[i + 1]->f->pict_type =
1494 AV_PICTURE_TYPE_B;
1495 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1496 s->coded_picture_number++;
1497 }
1498 }
1499 }
1500 no_output_pic:
1501 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1502
1503 if (s->reordered_input_picture[0]) {
1504 s->reordered_input_picture[0]->reference =
1505 s->reordered_input_picture[0]->f->pict_type !=
1506 AV_PICTURE_TYPE_B ? 3 : 0;
1507
1508 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1509 return ret;
1510
1511 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1512 // input is a shared pix, so we can't modify it -> allocate a new
1513 // one & ensure that the shared one is reuseable
1514
1515 Picture *pic;
1516 int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1517 if (i < 0)
1518 return i;
1519 pic = &s->picture[i];
1520
1521 pic->reference = s->reordered_input_picture[0]->reference;
1522 if (alloc_picture(s, pic, 0) < 0) {
1523 return -1;
1524 }
1525
1526 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1527 if (ret < 0)
1528 return ret;
1529
1530 /* mark us unused / free shared pic */
1531 av_frame_unref(s->reordered_input_picture[0]->f);
1532 s->reordered_input_picture[0]->shared = 0;
1533
1534 s->current_picture_ptr = pic;
1535 } else {
1536 // input is not a shared pix -> reuse buffer for current_pix
1537 s->current_picture_ptr = s->reordered_input_picture[0];
1538 for (i = 0; i < 4; i++) {
1539 s->new_picture.f->data[i] += INPLACE_OFFSET;
1540 }
1541 }
1542 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1543 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1544 s->current_picture_ptr)) < 0)
1545 return ret;
1546
1547 s->picture_number = s->new_picture.f->display_picture_number;
1548 }
1549 return 0;
1550 }
1551
1552 static void frame_end(MpegEncContext *s)
1553 {
1554 int i;
1555
1556 if (s->unrestricted_mv &&
1557 s->current_picture.reference &&
1558 !s->intra_only) {
1559 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1560 int hshift = desc->log2_chroma_w;
1561 int vshift = desc->log2_chroma_h;
1562 s->mpvencdsp.draw_edges(s->current_picture.f->data[0], s->linesize,
1563 s->h_edge_pos, s->v_edge_pos,
1564 EDGE_WIDTH, EDGE_WIDTH,
1565 EDGE_TOP | EDGE_BOTTOM);
1566 s->mpvencdsp.draw_edges(s->current_picture.f->data[1], s->uvlinesize,
1567 s->h_edge_pos >> hshift,
1568 s->v_edge_pos >> vshift,
1569 EDGE_WIDTH >> hshift,
1570 EDGE_WIDTH >> vshift,
1571 EDGE_TOP | EDGE_BOTTOM);
1572 s->mpvencdsp.draw_edges(s->current_picture.f->data[2], s->uvlinesize,
1573 s->h_edge_pos >> hshift,
1574 s->v_edge_pos >> vshift,
1575 EDGE_WIDTH >> hshift,
1576 EDGE_WIDTH >> vshift,
1577 EDGE_TOP | EDGE_BOTTOM);
1578 }
1579
1580 emms_c();
1581
1582 s->last_pict_type = s->pict_type;
1583 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1584 if (s->pict_type!= AV_PICTURE_TYPE_B)
1585 s->last_non_b_pict_type = s->pict_type;
1586
1587 if (s->encoding) {
1588 /* release non-reference frames */
1589 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1590 if (!s->picture[i].reference)
1591 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1592 }
1593 }
1594
1595 #if FF_API_CODED_FRAME
1596 FF_DISABLE_DEPRECATION_WARNINGS
1597 av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1598 FF_ENABLE_DEPRECATION_WARNINGS
1599 #endif
1600 #if FF_API_ERROR_FRAME
1601 FF_DISABLE_DEPRECATION_WARNINGS
1602 memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1603 sizeof(s->current_picture.encoding_error));
1604 FF_ENABLE_DEPRECATION_WARNINGS
1605 #endif
1606 }
1607
1608 static void update_noise_reduction(MpegEncContext *s)
1609 {
1610 int intra, i;
1611
1612 for (intra = 0; intra < 2; intra++) {
1613 if (s->dct_count[intra] > (1 << 16)) {
1614 for (i = 0; i < 64; i++) {
1615 s->dct_error_sum[intra][i] >>= 1;
1616 }
1617 s->dct_count[intra] >>= 1;
1618 }
1619
1620 for (i = 0; i < 64; i++) {
1621 s->dct_offset[intra][i] = (s->noise_reduction *
1622 s->dct_count[intra] +
1623 s->dct_error_sum[intra][i] / 2) /
1624 (s->dct_error_sum[intra][i] + 1);
1625 }
1626 }
1627 }
1628
1629 static int frame_start(MpegEncContext *s)
1630 {
1631 int ret;
1632
1633 /* mark & release old frames */
1634 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1635 s->last_picture_ptr != s->next_picture_ptr &&
1636 s->last_picture_ptr->f->buf[0]) {
1637 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1638 }
1639
1640 s->current_picture_ptr->f->pict_type = s->pict_type;
1641 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1642
1643 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1644 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1645 s->current_picture_ptr)) < 0)
1646 return ret;
1647
1648 if (s->pict_type != AV_PICTURE_TYPE_B) {
1649 s->last_picture_ptr = s->next_picture_ptr;
1650 if (!s->droppable)
1651 s->next_picture_ptr = s->current_picture_ptr;
1652 }
1653
1654 if (s->last_picture_ptr) {
1655 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1656 if (s->last_picture_ptr->f->buf[0] &&
1657 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1658 s->last_picture_ptr)) < 0)
1659 return ret;
1660 }
1661 if (s->next_picture_ptr) {
1662 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1663 if (s->next_picture_ptr->f->buf[0] &&
1664 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1665 s->next_picture_ptr)) < 0)
1666 return ret;
1667 }
1668
1669 if (s->picture_structure!= PICT_FRAME) {
1670 int i;
1671 for (i = 0; i < 4; i++) {
1672 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1673 s->current_picture.f->data[i] +=
1674 s->current_picture.f->linesize[i];
1675 }
1676 s->current_picture.f->linesize[i] *= 2;
1677 s->last_picture.f->linesize[i] *= 2;
1678 s->next_picture.f->linesize[i] *= 2;
1679 }
1680 }
1681
1682 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1683 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1684 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1685 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1686 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1687 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1688 } else {
1689 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1690 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1691 }
1692
1693 if (s->dct_error_sum) {
1694 assert(s->noise_reduction && s->encoding);
1695 update_noise_reduction(s);
1696 }
1697
1698 return 0;
1699 }
1700
1701 static void write_pass1_stats(MpegEncContext *s)
1702 {
1703 snprintf(s->avctx->stats_out, 256,
1704 "in:%d out:%d type:%d q:%d itex:%d ptex:%d mv:%d misc:%d "
1705 "fcode:%d bcode:%d mc-var:%d var:%d icount:%d skipcount:%d "
1706 "hbits:%d;\n",
1707 s->current_picture_ptr->f->display_picture_number,
1708 s->current_picture_ptr->f->coded_picture_number,
1709 s->pict_type,
1710 s->current_picture.f->quality,
1711 s->i_tex_bits,
1712 s->p_tex_bits,
1713 s->mv_bits,
1714 s->misc_bits,
1715 s->f_code,
1716 s->b_code,
1717 s->current_picture.mc_mb_var_sum,
1718 s->current_picture.mb_var_sum,
1719 s->i_count, s->skip_count,
1720 s->header_bits);
1721 }
1722
1723 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1724 const AVFrame *pic_arg, int *got_packet)
1725 {
1726 MpegEncContext *s = avctx->priv_data;
1727 int i, stuffing_count, ret;
1728 int context_count = s->slice_context_count;
1729
1730 s->picture_in_gop_number++;
1731
1732 if (load_input_picture(s, pic_arg) < 0)
1733 return -1;
1734
1735 if (select_input_picture(s) < 0) {
1736 return -1;
1737 }
1738
1739 /* output? */
1740 if (s->new_picture.f->data[0]) {
1741 uint8_t *sd;
1742 if (!pkt->data &&
1743 (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0)
1744 return ret;
1745 if (s->mb_info) {
1746 s->mb_info_ptr = av_packet_new_side_data(pkt,
1747 AV_PKT_DATA_H263_MB_INFO,
1748 s->mb_width*s->mb_height*12);
1749 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1750 }
1751
1752 for (i = 0; i < context_count; i++) {
1753 int start_y = s->thread_context[i]->start_mb_y;
1754 int end_y = s->thread_context[i]-> end_mb_y;
1755 int h = s->mb_height;
1756 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1757 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1758
1759 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1760 }
1761
1762 s->pict_type = s->new_picture.f->pict_type;
1763 //emms_c();
1764 ret = frame_start(s);
1765 if (ret < 0)
1766 return ret;
1767 vbv_retry:
1768 if (encode_picture(s, s->picture_number) < 0)
1769 return -1;
1770
1771 #if FF_API_STAT_BITS
1772 FF_DISABLE_DEPRECATION_WARNINGS
1773 avctx->header_bits = s->header_bits;
1774 avctx->mv_bits = s->mv_bits;
1775 avctx->misc_bits = s->misc_bits;
1776 avctx->i_tex_bits = s->i_tex_bits;
1777 avctx->p_tex_bits = s->p_tex_bits;
1778 avctx->i_count = s->i_count;
1779 // FIXME f/b_count in avctx
1780 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1781 avctx->skip_count = s->skip_count;
1782 FF_ENABLE_DEPRECATION_WARNINGS
1783 #endif
1784
1785 frame_end(s);
1786
1787 sd = av_packet_new_side_data(pkt, AV_PKT_DATA_QUALITY_FACTOR,
1788 sizeof(int));
1789 if (!sd)
1790 return AVERROR(ENOMEM);
1791 *(int *)sd = s->current_picture.f->quality;
1792
1793 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1794 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1795
1796 if (avctx->rc_buffer_size) {
1797 RateControlContext *rcc = &s->rc_context;
1798 int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1799
1800 if (put_bits_count(&s->pb) > max_size &&
1801 s->lambda < s->lmax) {
1802 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1803 (s->qscale + 1) / s->qscale);
1804 if (s->adaptive_quant) {
1805 int i;
1806 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1807 s->lambda_table[i] =
1808 FFMAX(s->lambda_table[i] + 1,
1809 s->lambda_table[i] * (s->qscale + 1) /
1810 s->qscale);
1811 }
1812 s->mb_skipped = 0; // done in frame_start()
1813 // done in encode_picture() so we must undo it
1814 if (s->pict_type == AV_PICTURE_TYPE_P) {
1815 if (s->flipflop_rounding ||
1816 s->codec_id == AV_CODEC_ID_H263P ||
1817 s->codec_id == AV_CODEC_ID_MPEG4)
1818 s->no_rounding ^= 1;
1819 }
1820 if (s->pict_type != AV_PICTURE_TYPE_B) {
1821 s->time_base = s->last_time_base;
1822 s->last_non_b_time = s->time - s->pp_time;
1823 }
1824 for (i = 0; i < context_count; i++) {
1825 PutBitContext *pb = &s->thread_context[i]->pb;
1826 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1827 }
1828 goto vbv_retry;
1829 }
1830
1831 assert(s->avctx->rc_max_rate);
1832 }
1833
1834 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1835 write_pass1_stats(s);
1836
1837 for (i = 0; i < 4; i++) {
1838 s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1839 avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1840 }
1841
1842 if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1843 assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1844 s->misc_bits + s->i_tex_bits +
1845 s->p_tex_bits);
1846 flush_put_bits(&s->pb);
1847 s->frame_bits = put_bits_count(&s->pb);
1848
1849 stuffing_count = ff_vbv_update(s, s->frame_bits);
1850 if (stuffing_count) {
1851 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1852 stuffing_count + 50) {
1853 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1854 return -1;
1855 }
1856
1857 switch (s->codec_id) {
1858 case AV_CODEC_ID_MPEG1VIDEO:
1859 case AV_CODEC_ID_MPEG2VIDEO:
1860 while (stuffing_count--) {
1861 put_bits(&s->pb, 8, 0);
1862 }
1863 break;
1864 case AV_CODEC_ID_MPEG4:
1865 put_bits(&s->pb, 16, 0);
1866 put_bits(&s->pb, 16, 0x1C3);
1867 stuffing_count -= 4;
1868 while (stuffing_count--) {
1869 put_bits(&s->pb, 8, 0xFF);
1870 }
1871 break;
1872 default:
1873 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1874 }
1875 flush_put_bits(&s->pb);
1876 s->frame_bits = put_bits_count(&s->pb);
1877 }
1878
1879 /* update MPEG-1/2 vbv_delay for CBR */
1880 if (s->avctx->rc_max_rate &&
1881 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1882 s->out_format == FMT_MPEG1 &&
1883 90000LL * (avctx->rc_buffer_size - 1) <=
1884 s->avctx->rc_max_rate * 0xFFFFLL) {
1885 AVCPBProperties *props;
1886 size_t props_size;
1887
1888 int vbv_delay, min_delay;
1889 double inbits = s->avctx->rc_max_rate *
1890 av_q2d(s->avctx->time_base);
1891 int minbits = s->frame_bits - 8 *
1892 (s->vbv_delay_ptr - s->pb.buf - 1);
1893 double bits = s->rc_context.buffer_index + minbits - inbits;
1894
1895 if (bits < 0)
1896 av_log(s->avctx, AV_LOG_ERROR,
1897 "Internal error, negative bits\n");
1898
1899 assert(s->repeat_first_field == 0);
1900
1901 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1902 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1903 s->avctx->rc_max_rate;
1904
1905 vbv_delay = FFMAX(vbv_delay, min_delay);
1906
1907 assert(vbv_delay < 0xFFFF);
1908
1909 s->vbv_delay_ptr[0] &= 0xF8;
1910 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1911 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1912 s->vbv_delay_ptr[2] &= 0x07;
1913 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1914
1915 props = av_cpb_properties_alloc(&props_size);
1916 if (!props)
1917 return AVERROR(ENOMEM);
1918 props->vbv_delay = vbv_delay * 300;
1919
1920 ret = av_packet_add_side_data(pkt, AV_PKT_DATA_CPB_PROPERTIES,
1921 (uint8_t*)props, props_size);
1922 if (ret < 0) {
1923 av_freep(&props);
1924 return ret;
1925 }
1926
1927 #if FF_API_VBV_DELAY
1928 FF_DISABLE_DEPRECATION_WARNINGS
1929 avctx->vbv_delay = vbv_delay * 300;
1930 FF_ENABLE_DEPRECATION_WARNINGS
1931 #endif
1932 }
1933 s->total_bits += s->frame_bits;
1934 #if FF_API_STAT_BITS
1935 FF_DISABLE_DEPRECATION_WARNINGS
1936 avctx->frame_bits = s->frame_bits;
1937 FF_ENABLE_DEPRECATION_WARNINGS
1938 #endif
1939
1940
1941 pkt->pts = s->current_picture.f->pts;
1942 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
1943 if (!s->current_picture.f->coded_picture_number)
1944 pkt->dts = pkt->pts - s->dts_delta;
1945 else
1946 pkt->dts = s->reordered_pts;
1947 s->reordered_pts = pkt->pts;
1948 } else
1949 pkt->dts = pkt->pts;
1950 if (s->current_picture.f->key_frame)
1951 pkt->flags |= AV_PKT_FLAG_KEY;
1952 if (s->mb_info)
1953 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1954 } else {
1955 s->frame_bits = 0;
1956 }
1957 assert((s->frame_bits & 7) == 0);
1958
1959 pkt->size = s->frame_bits / 8;
1960 *got_packet = !!pkt->size;
1961 return 0;
1962 }
1963
1964 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1965 int n, int threshold)
1966 {
1967 static const char tab[64] = {
1968 3, 2, 2, 1, 1, 1, 1, 1,
1969 1, 1, 1, 1, 1, 1, 1, 1,
1970 1, 1, 1, 1, 1, 1, 1, 1,
1971 0, 0, 0, 0, 0, 0, 0, 0,
1972 0, 0, 0, 0, 0, 0, 0, 0,
1973 0, 0, 0, 0, 0, 0, 0, 0,
1974 0, 0, 0, 0, 0, 0, 0, 0,
1975 0, 0, 0, 0, 0, 0, 0, 0
1976 };
1977 int score = 0;
1978 int run = 0;
1979 int i;
1980 int16_t *block = s->block[n];
1981 const int last_index = s->block_last_index[n];
1982 int skip_dc;
1983
1984 if (threshold < 0) {
1985 skip_dc = 0;
1986 threshold = -threshold;
1987 } else
1988 skip_dc = 1;
1989
1990 /* Are all we could set to zero already zero? */
1991 if (last_index <= skip_dc - 1)
1992 return;
1993
1994 for (i = 0; i <= last_index; i++) {
1995 const int j = s->intra_scantable.permutated[i];
1996 const int level = FFABS(block[j]);
1997 if (level == 1) {
1998 if (skip_dc && i == 0)
1999 continue;
2000 score += tab[run];
2001 run = 0;
2002 } else if (level > 1) {
2003 return;
2004 } else {
2005 run++;
2006 }
2007 }
2008 if (score >= threshold)
2009 return;
2010 for (i = skip_dc; i <= last_index; i++) {
2011 const int j = s->intra_scantable.permutated[i];
2012 block[j] = 0;
2013 }
2014 if (block[0])
2015 s->block_last_index[n] = 0;
2016 else
2017 s->block_last_index[n] = -1;
2018 }
2019
2020 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2021 int last_index)
2022 {
2023 int i;
2024 const int maxlevel = s->max_qcoeff;
2025 const int minlevel = s->min_qcoeff;
2026 int overflow = 0;
2027
2028 if (s->mb_intra) {
2029 i = 1; // skip clipping of intra dc
2030 } else
2031 i = 0;
2032
2033 for (; i <= last_index; i++) {
2034 const int j = s->intra_scantable.permutated[i];
2035 int level = block[j];
2036
2037 if (level > maxlevel) {
2038 level = maxlevel;
2039 overflow++;
2040 } else if (level < minlevel) {
2041 level = minlevel;
2042 overflow++;
2043 }
2044
2045 block[j] = level;
2046 }
2047
2048 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2049 av_log(s->avctx, AV_LOG_INFO,
2050 "warning, clipping %d dct coefficients to %d..%d\n",
2051 overflow, minlevel, maxlevel);
2052 }
2053
2054 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2055 {
2056 int x, y;
2057 // FIXME optimize
2058 for (y = 0; y < 8; y++) {
2059 for (x = 0; x < 8; x++) {
2060 int x2, y2;
2061 int sum = 0;
2062 int sqr = 0;
2063 int count = 0;
2064
2065 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2066 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2067 int v = ptr[x2 + y2 * stride];
2068 sum += v;
2069 sqr += v * v;
2070 count++;
2071 }
2072 }
2073 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2074 }
2075 }
2076 }
2077
2078 static av_always_inline void encode_mb_internal(MpegEncContext *s,
2079 int motion_x, int motion_y,
2080 int mb_block_height,
2081 int mb_block_count)
2082 {
2083 int16_t weight[8][64];
2084 int16_t orig[8][64];
2085 const int mb_x = s->mb_x;
2086 const int mb_y = s->mb_y;
2087 int i;
2088 int skip_dct[8];
2089 int dct_offset = s->linesize * 8; // default for progressive frames
2090 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2091 ptrdiff_t wrap_y, wrap_c;
2092
2093 for (i = 0; i < mb_block_count; i++)
2094 skip_dct[i] = s->skipdct;
2095
2096 if (s->adaptive_quant) {
2097 const int last_qp = s->qscale;
2098 const int mb_xy = mb_x + mb_y * s->mb_stride;
2099
2100 s->lambda = s->lambda_table[mb_xy];
2101 update_qscale(s);
2102
2103 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2104 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2105 s->dquant = s->qscale - last_qp;
2106
2107 if (s->out_format == FMT_H263) {
2108 s->dquant = av_clip(s->dquant, -2, 2);
2109
2110 if (s->codec_id == AV_CODEC_ID_MPEG4) {
2111 if (!s->mb_intra) {
2112 if (s->pict_type == AV_PICTURE_TYPE_B) {
2113 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2114 s->dquant = 0;
2115 }
2116 if (s->mv_type == MV_TYPE_8X8)
2117 s->dquant = 0;
2118 }
2119 }
2120 }
2121 }
2122 ff_set_qscale(s, last_qp + s->dquant);
2123 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2124 ff_set_qscale(s, s->qscale + s->dquant);
2125
2126 wrap_y = s->linesize;
2127 wrap_c = s->uvlinesize;
2128 ptr_y = s->new_picture.f->data[0] +
2129 (mb_y * 16 * wrap_y) + mb_x * 16;
2130 ptr_cb = s->new_picture.f->data[1] +
2131 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
2132 ptr_cr = s->new_picture.f->data[2] +
2133 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
2134
2135 if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
2136 uint8_t *ebuf = s->sc.edge_emu_buffer + 32;
2137 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2138 wrap_y, wrap_y,
2139 16, 16, mb_x * 16, mb_y * 16,
2140 s->width, s->height);
2141 ptr_y = ebuf;
2142 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb,
2143 wrap_c, wrap_c,
2144 8, mb_block_height, mb_x * 8, mb_y * 8,
2145 s->width >> 1, s->height >> 1);
2146 ptr_cb = ebuf + 18 * wrap_y;
2147 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr,
2148 wrap_c, wrap_c,
2149 8, mb_block_height, mb_x * 8, mb_y * 8,
2150 s->width >> 1, s->height >> 1);
2151 ptr_cr = ebuf + 18 * wrap_y + 8;
2152 }
2153
2154 if (s->mb_intra) {
2155 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2156 int progressive_score, interlaced_score;
2157
2158 s->interlaced_dct = 0;
2159 progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2160 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2161 NULL, wrap_y, 8) - 400;
2162
2163 if (progressive_score > 0) {
2164 interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2165 NULL, wrap_y * 2, 8) +
2166 s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2167 NULL, wrap_y * 2, 8);
2168 if (progressive_score > interlaced_score) {
2169 s->interlaced_dct = 1;
2170
2171 dct_offset = wrap_y;
2172 wrap_y <<= 1;
2173 if (s->chroma_format == CHROMA_422)
2174 wrap_c <<= 1;
2175 }
2176 }
2177 }
2178
2179 s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2180 s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2181 s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2182 s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2183
2184 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2185 skip_dct[4] = 1;
2186 skip_dct[5] = 1;
2187 } else {
2188 s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2189 s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2190 if (!s->chroma_y_shift) { /* 422 */
2191 s->pdsp.get_pixels(s->block[6],
2192 ptr_cb + (dct_offset >> 1), wrap_c);
2193 s->pdsp.get_pixels(s->block[7],
2194 ptr_cr + (dct_offset >> 1), wrap_c);
2195 }
2196 }
2197 } else {
2198 op_pixels_func (*op_pix)[4];
2199 qpel_mc_func (*op_qpix)[16];
2200 uint8_t *dest_y, *dest_cb, *dest_cr;
2201
2202 dest_y = s->dest[0];
2203 dest_cb = s->dest[1];
2204 dest_cr = s->dest[2];
2205
2206 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2207 op_pix = s->hdsp.put_pixels_tab;
2208 op_qpix = s->qdsp.put_qpel_pixels_tab;
2209 } else {
2210 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2211 op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2212 }
2213
2214 if (s->mv_dir & MV_DIR_FORWARD) {
2215 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2216 s->last_picture.f->data,
2217 op_pix, op_qpix);
2218 op_pix = s->hdsp.avg_pixels_tab;
2219 op_qpix = s->qdsp.avg_qpel_pixels_tab;
2220 }
2221 if (s->mv_dir & MV_DIR_BACKWARD) {
2222 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2223 s->next_picture.f->data,
2224 op_pix, op_qpix);
2225 }
2226
2227 if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2228 int progressive_score, interlaced_score;
2229
2230 s->interlaced_dct = 0;
2231 progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2232 s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2233 ptr_y + wrap_y * 8,
2234 wrap_y, 8) - 400;
2235
2236 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2237 progressive_score -= 400;
2238
2239 if (progressive_score > 0) {
2240 interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2241 wrap_y * 2, 8) +
2242 s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2243 ptr_y + wrap_y,
2244 wrap_y * 2, 8);
2245
2246 if (progressive_score > interlaced_score) {
2247 s->interlaced_dct = 1;
2248
2249 dct_offset = wrap_y;
2250 wrap_y <<= 1;
2251 if (s->chroma_format == CHROMA_422)
2252 wrap_c <<= 1;
2253 }
2254 }
2255 }
2256
2257 s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2258 s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2259 s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2260 dest_y + dct_offset, wrap_y);
2261 s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2262 dest_y + dct_offset + 8, wrap_y);
2263
2264 if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2265 skip_dct[4] = 1;
2266 skip_dct[5] = 1;
2267 } else {
2268 s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2269 s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2270 if (!s->chroma_y_shift) { /* 422 */
2271 s->pdsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
2272 dest_cb + (dct_offset >> 1), wrap_c);
2273 s->pdsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
2274 dest_cr + (dct_offset >> 1), wrap_c);
2275 }
2276 }
2277 /* pre quantization */
2278 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2279 2 * s->qscale * s->qscale) {
2280 // FIXME optimize
2281 if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2282 skip_dct[0] = 1;
2283 if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2284 skip_dct[1] = 1;
2285 if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2286 wrap_y, 8) < 20 * s->qscale)
2287 skip_dct[2] = 1;
2288 if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2289 wrap_y, 8) < 20 * s->qscale)
2290 skip_dct[3] = 1;
2291 if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2292 skip_dct[4] = 1;
2293 if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2294 skip_dct[5] = 1;
2295 if (!s->chroma_y_shift) { /* 422 */
2296 if (s->mecc.sad[1](NULL, ptr_cb + (dct_offset >> 1),
2297 dest_cb + (dct_offset >> 1),
2298 wrap_c, 8) < 20 * s->qscale)
2299 skip_dct[6] = 1;
2300 if (s->mecc.sad[1](NULL, ptr_cr + (dct_offset >> 1),
2301 dest_cr + (dct_offset >> 1),
2302 wrap_c, 8) < 20 * s->qscale)
2303 skip_dct[7] = 1;
2304 }
2305 }
2306 }
2307
2308 if (s->quantizer_noise_shaping) {
2309 if (!skip_dct[0])
2310 get_visual_weight(weight[0], ptr_y , wrap_y);
2311 if (!skip_dct[1])
2312 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2313 if (!skip_dct[2])
2314 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2315 if (!skip_dct[3])
2316 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2317 if (!skip_dct[4])
2318 get_visual_weight(weight[4], ptr_cb , wrap_c);
2319 if (!skip_dct[5])
2320 get_visual_weight(weight[5], ptr_cr , wrap_c);
2321 if (!s->chroma_y_shift) { /* 422 */
2322 if (!skip_dct[6])
2323 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
2324 wrap_c);
2325 if (!skip_dct[7])
2326 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
2327 wrap_c);
2328 }
2329 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2330 }
2331
2332 /* DCT & quantize */
2333 assert(s->out_format != FMT_MJPEG || s->qscale == 8);
2334 {
2335 for (i = 0; i < mb_block_count; i++) {
2336 if (!skip_dct[i]) {
2337 int overflow;
2338 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2339 // FIXME we could decide to change to quantizer instead of
2340 // clipping
2341 // JS: I don't think that would be a good idea it could lower
2342 // quality instead of improve it. Just INTRADC clipping
2343 // deserves changes in quantizer
2344 if (overflow)
2345 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2346 } else
2347 s->block_last_index[i] = -1;
2348 }
2349 if (s->quantizer_noise_shaping) {
2350 for (i = 0; i < mb_block_count; i++) {
2351 if (!skip_dct[i]) {
2352 s->block_last_index[i] =
2353 dct_quantize_refine(s, s->block[i], weight[i],
2354 orig[i], i, s->qscale);
2355 }
2356 }
2357 }
2358
2359 if (s->luma_elim_threshold && !s->mb_intra)
2360 for (i = 0; i < 4; i++)
2361 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2362 if (s->chroma_elim_threshold && !s->mb_intra)
2363 for (i = 4; i < mb_block_count; i++)
2364 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2365
2366 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2367 for (i = 0; i < mb_block_count; i++) {
2368 if (s->block_last_index[i] == -1)
2369 s->coded_score[i] = INT_MAX / 256;
2370 }
2371 }
2372 }
2373
2374 if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2375 s->block_last_index[4] =
2376 s->block_last_index[5] = 0;
2377 s->block[4][0] =
2378 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2379 }
2380
2381 // non c quantize code returns incorrect block_last_index FIXME
2382 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2383 for (i = 0; i < mb_block_count; i++) {
2384 int j;
2385 if (s->block_last_index[i] > 0) {
2386 for (j = 63; j > 0; j--) {
2387 if (s->block[i][s->intra_scantable.permutated[j]])
2388 break;
2389 }
2390 s->block_last_index[i] = j;
2391 }
2392 }
2393 }
2394
2395 /* huffman encode */
2396 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2397 case AV_CODEC_ID_MPEG1VIDEO:
2398 case AV_CODEC_ID_MPEG2VIDEO:
2399 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2400 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2401 break;
2402 case AV_CODEC_ID_MPEG4:
2403 if (CONFIG_MPEG4_ENCODER)
2404 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2405 break;
2406 case AV_CODEC_ID_MSMPEG4V2:
2407 case AV_CODEC_ID_MSMPEG4V3:
2408 case AV_CODEC_ID_WMV1:
2409 if (CONFIG_MSMPEG4_ENCODER)
2410 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2411 break;
2412 case AV_CODEC_ID_WMV2:
2413 if (CONFIG_WMV2_ENCODER)
2414 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2415 break;
2416 case AV_CODEC_ID_H261:
2417 if (CONFIG_H261_ENCODER)
2418 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2419 break;
2420 case AV_CODEC_ID_H263:
2421 case AV_CODEC_ID_H263P:
2422 case AV_CODEC_ID_FLV1:
2423 case AV_CODEC_ID_RV10:
2424 case AV_CODEC_ID_RV20:
2425 if (CONFIG_H263_ENCODER)
2426 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2427 break;
2428 case AV_CODEC_ID_MJPEG:
2429 if (CONFIG_MJPEG_ENCODER)
2430 ff_mjpeg_encode_mb(s, s->block);
2431 break;
2432 default:
2433 assert(0);
2434 }
2435 }
2436
2437 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2438 {
2439 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 6);
2440 else encode_mb_internal(s, motion_x, motion_y, 16, 8);
2441 }
2442
2443 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2444 int i;
2445
2446 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2447
2448 /* MPEG-1 */
2449 d->mb_skip_run= s->mb_skip_run;
2450 for(i=0; i<3; i++)
2451 d->last_dc[i] = s->last_dc[i];
2452
2453 /* statistics */
2454 d->mv_bits= s->mv_bits;
2455 d->i_tex_bits= s->i_tex_bits;
2456 d->p_tex_bits= s->p_tex_bits;
2457 d->i_count= s->i_count;
2458 d->f_count= s->f_count;
2459 d->b_count= s->b_count;
2460 d->skip_count= s->skip_count;
2461 d->misc_bits= s->misc_bits;
2462 d->last_bits= 0;
2463
2464 d->mb_skipped= 0;
2465 d->qscale= s->qscale;
2466 d->dquant= s->dquant;
2467
2468 d->esc3_level_length= s->esc3_level_length;
2469 }
2470
2471 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2472 int i;
2473
2474 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2475 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2476
2477 /* MPEG-1 */
2478 d->mb_skip_run= s->mb_skip_run;
2479 for(i=0; i<3; i++)
2480 d->last_dc[i] = s->last_dc[i];
2481
2482 /* statistics */
2483 d->mv_bits= s->mv_bits;
2484 d->i_tex_bits= s->i_tex_bits;
2485 d->p_tex_bits= s->p_tex_bits;
2486 d->i_count= s->i_count;
2487 d->f_count= s->f_count;
2488 d->b_count= s->b_count;
2489 d->skip_count= s->skip_count;
2490 d->misc_bits= s->misc_bits;
2491
2492 d->mb_intra= s->mb_intra;
2493 d->mb_skipped= s->mb_skipped;
2494 d->mv_type= s->mv_type;
2495 d->mv_dir= s->mv_dir;
2496 d->pb= s->pb;
2497 if(s->data_partitioning){
2498 d->pb2= s->pb2;
2499 d->tex_pb= s->tex_pb;
2500 }
2501 d->block= s->block;
2502 for(i=0; i<8; i++)
2503 d->block_last_index[i]= s->block_last_index[i];
2504 d->interlaced_dct= s->interlaced_dct;
2505 d->qscale= s->qscale;
2506
2507 d->esc3_level_length= s->esc3_level_length;
2508 }
2509
2510 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2511 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2512 int *dmin, int *next_block, int motion_x, int motion_y)
2513 {
2514 int score;
2515 uint8_t *dest_backup[3];
2516
2517 copy_context_before_encode(s, backup, type);
2518
2519 s->block= s->blocks[*next_block];
2520 s->pb= pb[*next_block];
2521 if(s->data_partitioning){
2522 s->pb2 = pb2 [*next_block];
2523 s->tex_pb= tex_pb[*next_block];
2524 }
2525
2526 if(*next_block){
2527 memcpy(dest_backup, s->dest, sizeof(s->dest));
2528 s->dest[0] = s->sc.rd_scratchpad;
2529 s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2530 s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2531 assert(s->linesize >= 32); //FIXME
2532 }
2533
2534 encode_mb(s, motion_x, motion_y);
2535
2536 score= put_bits_count(&s->pb);
2537 if(s->data_partitioning){
2538 score+= put_bits_count(&s->pb2);
2539 score+= put_bits_count(&s->tex_pb);
2540 }
2541
2542 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2543 ff_mpv_decode_mb(s, s->block);
2544
2545 score *= s->lambda2;
2546 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2547 }
2548
2549 if(*next_block){
2550 memcpy(s->dest, dest_backup, sizeof(s->dest));
2551 }
2552
2553 if(score<*dmin){
2554 *dmin= score;
2555 *next_block^=1;
2556
2557 copy_context_after_encode(best, s, type);
2558 }
2559 }
2560
2561 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2562 uint32_t *sq = ff_square_tab + 256;
2563 int acc=0;
2564 int x,y;
2565
2566 if(w==16 && h==16)
2567 return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2568 else if(w==8 && h==8)
2569 return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2570
2571 for(y=0; y<h; y++){
2572 for(x=0; x<w; x++){
2573 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2574 }
2575 }
2576
2577 assert(acc>=0);
2578
2579 return acc;
2580 }
2581
2582 static int sse_mb(MpegEncContext *s){
2583 int w= 16;
2584 int h= 16;
2585
2586 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2587 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2588
2589 if(w==16 && h==16)
2590 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2591 return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2592 s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2593 s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2594 }else{
2595 return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2596 s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2597 s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2598 }
2599 else
2600 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2601 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2602 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2603 }
2604
2605 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2606 MpegEncContext *s= *(void**)arg;
2607
2608
2609 s->me.pre_pass=1;
2610 s->me.dia_size= s->avctx->pre_dia_size;
2611 s->first_slice_line=1;
2612 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2613 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2614 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2615 }
2616 s->first_slice_line=0;
2617 }
2618
2619 s->me.pre_pass=0;
2620
2621 return 0;
2622 }
2623
2624 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2625 MpegEncContext *s= *(void**)arg;
2626
2627 s->me.dia_size= s->avctx->dia_size;
2628 s->first_slice_line=1;
2629 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2630 s->mb_x=0; //for block init below
2631 ff_init_block_index(s);
2632 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2633 s->block_index[0]+=2;
2634 s->block_index[1]+=2;
2635 s->block_index[2]+=2;
2636 s->block_index[3]+=2;
2637
2638 /* compute motion vector & mb_type and store in context */
2639 if(s->pict_type==AV_PICTURE_TYPE_B)
2640 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2641 else
2642 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2643 }
2644 s->first_slice_line=0;
2645 }
2646 return 0;
2647 }
2648
2649 static int mb_var_thread(AVCodecContext *c, void *arg){
2650 MpegEncContext *s= *(void**)arg;
2651 int mb_x, mb_y;
2652
2653 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2654 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2655 int xx = mb_x * 16;
2656 int yy = mb_y * 16;
2657 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2658 int varc;
2659 int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2660
2661 varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2662 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2663
2664 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2665 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2666 s->me.mb_var_sum_temp += varc;
2667 }
2668 }
2669 return 0;
2670 }
2671
2672 static void write_slice_end(MpegEncContext *s){
2673 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2674 if(s->partitioned_frame){
2675 ff_mpeg4_merge_partitions(s);
2676 }
2677
2678 ff_mpeg4_stuffing(&s->pb);
2679 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2680 ff_mjpeg_encode_stuffing(&s->pb);
2681 }
2682
2683 avpriv_align_put_bits(&s->pb);
2684 flush_put_bits(&s->pb);
2685
2686 if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2687 s->misc_bits+= get_bits_diff(s);
2688 }
2689
2690 static void write_mb_info(MpegEncContext *s)
2691 {
2692 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2693 int offset = put_bits_count(&s->pb);
2694 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2695 int gobn = s->mb_y / s->gob_index;
2696 int pred_x, pred_y;
2697 if (CONFIG_H263_ENCODER)
2698 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2699 bytestream_put_le32(&ptr, offset);
2700 bytestream_put_byte(&ptr, s->qscale);
2701 bytestream_put_byte(&ptr, gobn);
2702 bytestream_put_le16(&ptr, mba);
2703 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2704 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2705 /* 4MV not implemented */
2706 bytestream_put_byte(&ptr, 0); /* hmv2 */
2707 bytestream_put_byte(&ptr, 0); /* vmv2 */
2708 }
2709
2710 static void update_mb_info(MpegEncContext *s, int startcode)
2711 {
2712 if (!s->mb_info)
2713 return;
2714 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2715 s->mb_info_size += 12;
2716 s->prev_mb_info = s->last_mb_info;
2717 }
2718 if (startcode) {
2719 s->prev_mb_info = put_bits_count(&s->pb)/8;
2720 /* This might have incremented mb_info_size above, and we return without
2721 * actually writing any info into that slot yet. But in that case,
2722 * this will be called again at the start of the after writing the
2723 * start code, actually writing the mb info. */
2724 return;
2725 }
2726
2727 s->last_mb_info = put_bits_count(&s->pb)/8;
2728 if (!s->mb_info_size)
2729 s->mb_info_size += 12;
2730 write_mb_info(s);
2731 }
2732
2733 static int encode_thread(AVCodecContext *c, void *arg){
2734 MpegEncContext *s= *(void**)arg;
2735 int mb_x, mb_y;
2736 int chr_h= 16>>s->chroma_y_shift;
2737 int i, j;
2738 MpegEncContext best_s = { 0 }, backup_s;
2739 uint8_t bit_buf[2][MAX_MB_BYTES];
2740 uint8_t bit_buf2[2][MAX_MB_BYTES];
2741 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2742 PutBitContext pb[2], pb2[2], tex_pb[2];
2743
2744 for(i=0; i<2; i++){
2745 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2746 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2747 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2748 }
2749
2750 s->last_bits= put_bits_count(&s->pb);
2751 s->mv_bits=0;
2752 s->misc_bits=0;
2753 s->i_tex_bits=0;
2754 s->p_tex_bits=0;
2755 s->i_count=0;
2756 s->f_count=0;
2757 s->b_count=0;
2758 s->skip_count=0;
2759
2760 for(i=0; i<3; i++){
2761 /* init last dc values */
2762 /* note: quant matrix value (8) is implied here */
2763 s->last_dc[i] = 128 << s->intra_dc_precision;
2764
2765 s->current_picture.encoding_error[i] = 0;
2766 }
2767 s->mb_skip_run = 0;
2768 memset(s->last_mv, 0, sizeof(s->last_mv));
2769
2770 s->last_mv_dir = 0;
2771
2772 switch(s->codec_id){
2773 case AV_CODEC_ID_H263:
2774 case AV_CODEC_ID_H263P:
2775 case AV_CODEC_ID_FLV1:
2776 if (CONFIG_H263_ENCODER)
2777 s->gob_index = H263_GOB_HEIGHT(s->height);
2778 break;
2779 case AV_CODEC_ID_MPEG4:
2780 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2781 ff_mpeg4_init_partitions(s);
2782 break;
2783 }
2784
2785 s->resync_mb_x=0;
2786 s->resync_mb_y=0;
2787 s->first_slice_line = 1;
2788 s->ptr_lastgob = s->pb.buf;
2789 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2790 s->mb_x=0;
2791 s->mb_y= mb_y;
2792
2793 ff_set_qscale(s, s->qscale);
2794 ff_init_block_index(s);
2795
2796 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2797 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2798 int mb_type= s->mb_type[xy];
2799 // int d;
2800 int dmin= INT_MAX;
2801 int dir;
2802
2803 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2804 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2805 return -1;
2806 }
2807 if(s->data_partitioning){
2808 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2809 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2810 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2811 return -1;
2812 }
2813 }
2814
2815 s->mb_x = mb_x;
2816 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2817 ff_update_block_index(s);
2818
2819 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2820 ff_h261_reorder_mb_index(s);
2821 xy= s->mb_y*s->mb_stride + s->mb_x;
2822 mb_type= s->mb_type[xy];
2823 }
2824
2825 /* write gob / video packet header */
2826 if(s->rtp_mode){
2827 int current_packet_size, is_gob_start;
2828
2829 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2830
2831 is_gob_start = s->rtp_payload_size &&
2832 current_packet_size >= s->rtp_payload_size &&
2833 mb_y + mb_x > 0;
2834
2835 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2836
2837 switch(s->codec_id){
2838 case AV_CODEC_ID_H263:
2839 case AV_CODEC_ID_H263P:
2840 if(!s->h263_slice_structured)
2841 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2842 break;
2843 case AV_CODEC_ID_MPEG2VIDEO:
2844 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2845 case AV_CODEC_ID_MPEG1VIDEO:
2846 if(s->mb_skip_run) is_gob_start=0;
2847 break;
2848 }
2849
2850 if(is_gob_start){
2851 if(s->start_mb_y != mb_y || mb_x!=0){
2852 write_slice_end(s);
2853
2854 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2855 ff_mpeg4_init_partitions(s);
2856 }
2857 }
2858
2859 assert((put_bits_count(&s->pb)&7) == 0);
2860 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2861
2862 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
2863 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2864 int d = 100 / s->error_rate;
2865 if(r % d == 0){
2866 current_packet_size=0;
2867 s->pb.buf_ptr= s->ptr_lastgob;
2868 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2869 }
2870 }
2871
2872 #if FF_API_RTP_CALLBACK
2873 FF_DISABLE_DEPRECATION_WARNINGS
2874 if (s->avctx->rtp_callback){
2875 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2876 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2877 }
2878 FF_ENABLE_DEPRECATION_WARNINGS
2879 #endif
2880 update_mb_info(s, 1);
2881
2882 switch(s->codec_id){
2883 case AV_CODEC_ID_MPEG4:
2884 if (CONFIG_MPEG4_ENCODER) {
2885 ff_mpeg4_encode_video_packet_header(s);
2886 ff_mpeg4_clean_buffers(s);
2887 }
2888 break;
2889 case AV_CODEC_ID_MPEG1VIDEO:
2890 case AV_CODEC_ID_MPEG2VIDEO:
2891 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2892 ff_mpeg1_encode_slice_header(s);
2893 ff_mpeg1_clean_buffers(s);
2894 }
2895 break;
2896 case AV_CODEC_ID_H263:
2897 case AV_CODEC_ID_H263P:
2898 if (CONFIG_H263_ENCODER)
2899 ff_h263_encode_gob_header(s, mb_y);
2900 break;
2901 }
2902
2903 if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
2904 int bits= put_bits_count(&s->pb);
2905 s->misc_bits+= bits - s->last_bits;
2906 s->last_bits= bits;
2907 }
2908
2909 s->ptr_lastgob += current_packet_size;
2910 s->first_slice_line=1;
2911 s->resync_mb_x=mb_x;
2912 s->resync_mb_y=mb_y;
2913 }
2914 }
2915
2916 if( (s->resync_mb_x == s->mb_x)
2917 && s->resync_mb_y+1 == s->mb_y){
2918 s->first_slice_line=0;
2919 }
2920
2921 s->mb_skipped=0;
2922 s->dquant=0; //only for QP_RD
2923
2924 update_mb_info(s, 0);
2925
2926 if (mb_type & (mb_type-1