lavc: deprecate CODEC_FLAG_INPUT_PRESERVED
[libav.git] / libavcodec / mpegvideo_enc.c
1 /*
2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7 *
8 * This file is part of Libav.
9 *
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /**
26 * @file
27 * The simplest mpeg encoder (well, it was the simplest!).
28 */
29
30 #include <stdint.h>
31
32 #include "libavutil/internal.h"
33 #include "libavutil/intmath.h"
34 #include "libavutil/mathematics.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/timer.h"
38 #include "avcodec.h"
39 #include "dct.h"
40 #include "dsputil.h"
41 #include "mpeg12.h"
42 #include "mpegvideo.h"
43 #include "h261.h"
44 #include "h263.h"
45 #include "mathops.h"
46 #include "mpegutils.h"
47 #include "mjpegenc.h"
48 #include "msmpeg4.h"
49 #include "faandct.h"
50 #include "thread.h"
51 #include "aandcttab.h"
52 #include "flv.h"
53 #include "mpeg4video.h"
54 #include "internal.h"
55 #include "bytestream.h"
56 #include <limits.h>
57
58 static int encode_picture(MpegEncContext *s, int picture_number);
59 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
60 static int sse_mb(MpegEncContext *s);
61 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
62 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
63
64 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
65 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
66
67 const AVOption ff_mpv_generic_options[] = {
68 FF_MPV_COMMON_OPTS
69 { NULL },
70 };
71
72 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
73 uint16_t (*qmat16)[2][64],
74 const uint16_t *quant_matrix,
75 int bias, int qmin, int qmax, int intra)
76 {
77 int qscale;
78 int shift = 0;
79
80 for (qscale = qmin; qscale <= qmax; qscale++) {
81 int i;
82 if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
83 dsp->fdct == ff_jpeg_fdct_islow_10 ||
84 dsp->fdct == ff_faandct) {
85 for (i = 0; i < 64; i++) {
86 const int j = dsp->idct_permutation[i];
87 /* 16 <= qscale * quant_matrix[i] <= 7905
88 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
89 * 19952 <= x <= 249205026
90 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
91 * 3444240 >= (1 << 36) / (x) >= 275 */
92
93 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
94 (qscale * quant_matrix[j]));
95 }
96 } else if (dsp->fdct == ff_fdct_ifast) {
97 for (i = 0; i < 64; i++) {
98 const int j = dsp->idct_permutation[i];
99 /* 16 <= qscale * quant_matrix[i] <= 7905
100 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
101 * 19952 <= x <= 249205026
102 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
103 * 3444240 >= (1 << 36) / (x) >= 275 */
104
105 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
106 (ff_aanscales[i] * qscale *
107 quant_matrix[j]));
108 }
109 } else {
110 for (i = 0; i < 64; i++) {
111 const int j = dsp->idct_permutation[i];
112 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
113 * Assume x = qscale * quant_matrix[i]
114 * So 16 <= x <= 7905
115 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
116 * so 32768 >= (1 << 19) / (x) >= 67 */
117 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
118 (qscale * quant_matrix[j]));
119 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
120 // (qscale * quant_matrix[i]);
121 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
122 (qscale * quant_matrix[j]);
123
124 if (qmat16[qscale][0][i] == 0 ||
125 qmat16[qscale][0][i] == 128 * 256)
126 qmat16[qscale][0][i] = 128 * 256 - 1;
127 qmat16[qscale][1][i] =
128 ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
129 qmat16[qscale][0][i]);
130 }
131 }
132
133 for (i = intra; i < 64; i++) {
134 int64_t max = 8191;
135 if (dsp->fdct == ff_fdct_ifast) {
136 max = (8191LL * ff_aanscales[i]) >> 14;
137 }
138 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
139 shift++;
140 }
141 }
142 }
143 if (shift) {
144 av_log(NULL, AV_LOG_INFO,
145 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
146 QMAT_SHIFT - shift);
147 }
148 }
149
150 static inline void update_qscale(MpegEncContext *s)
151 {
152 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
153 (FF_LAMBDA_SHIFT + 7);
154 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
155
156 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
157 FF_LAMBDA_SHIFT;
158 }
159
160 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
161 {
162 int i;
163
164 if (matrix) {
165 put_bits(pb, 1, 1);
166 for (i = 0; i < 64; i++) {
167 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
168 }
169 } else
170 put_bits(pb, 1, 0);
171 }
172
173 /**
174 * init s->current_picture.qscale_table from s->lambda_table
175 */
176 void ff_init_qscale_tab(MpegEncContext *s)
177 {
178 int8_t * const qscale_table = s->current_picture.qscale_table;
179 int i;
180
181 for (i = 0; i < s->mb_num; i++) {
182 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
183 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
184 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
185 s->avctx->qmax);
186 }
187 }
188
189 static void update_duplicate_context_after_me(MpegEncContext *dst,
190 MpegEncContext *src)
191 {
192 #define COPY(a) dst->a= src->a
193 COPY(pict_type);
194 COPY(current_picture);
195 COPY(f_code);
196 COPY(b_code);
197 COPY(qscale);
198 COPY(lambda);
199 COPY(lambda2);
200 COPY(picture_in_gop_number);
201 COPY(gop_picture_number);
202 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
203 COPY(progressive_frame); // FIXME don't set in encode_header
204 COPY(partitioned_frame); // FIXME don't set in encode_header
205 #undef COPY
206 }
207
208 /**
209 * Set the given MpegEncContext to defaults for encoding.
210 * the changed fields will not depend upon the prior state of the MpegEncContext.
211 */
212 static void MPV_encode_defaults(MpegEncContext *s)
213 {
214 int i;
215 ff_MPV_common_defaults(s);
216
217 for (i = -16; i < 16; i++) {
218 default_fcode_tab[i + MAX_MV] = 1;
219 }
220 s->me.mv_penalty = default_mv_penalty;
221 s->fcode_tab = default_fcode_tab;
222
223 s->input_picture_number = 0;
224 s->picture_in_gop_number = 0;
225 }
226
227 /* init video encoder */
228 av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
229 {
230 MpegEncContext *s = avctx->priv_data;
231 int i, ret;
232
233 MPV_encode_defaults(s);
234
235 switch (avctx->codec_id) {
236 case AV_CODEC_ID_MPEG2VIDEO:
237 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
238 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
239 av_log(avctx, AV_LOG_ERROR,
240 "only YUV420 and YUV422 are supported\n");
241 return -1;
242 }
243 break;
244 case AV_CODEC_ID_MJPEG:
245 if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
246 avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
247 ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
248 avctx->pix_fmt != AV_PIX_FMT_YUV422P) ||
249 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
250 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
251 return -1;
252 }
253 break;
254 default:
255 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
256 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
257 return -1;
258 }
259 }
260
261 switch (avctx->pix_fmt) {
262 case AV_PIX_FMT_YUVJ422P:
263 case AV_PIX_FMT_YUV422P:
264 s->chroma_format = CHROMA_422;
265 break;
266 case AV_PIX_FMT_YUVJ420P:
267 case AV_PIX_FMT_YUV420P:
268 default:
269 s->chroma_format = CHROMA_420;
270 break;
271 }
272
273 s->bit_rate = avctx->bit_rate;
274 s->width = avctx->width;
275 s->height = avctx->height;
276 if (avctx->gop_size > 600 &&
277 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
278 av_log(avctx, AV_LOG_ERROR,
279 "Warning keyframe interval too large! reducing it ...\n");
280 avctx->gop_size = 600;
281 }
282 s->gop_size = avctx->gop_size;
283 s->avctx = avctx;
284 s->flags = avctx->flags;
285 s->flags2 = avctx->flags2;
286 if (avctx->max_b_frames > MAX_B_FRAMES) {
287 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
288 "is %d.\n", MAX_B_FRAMES);
289 }
290 s->max_b_frames = avctx->max_b_frames;
291 s->codec_id = avctx->codec->id;
292 s->strict_std_compliance = avctx->strict_std_compliance;
293 s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
294 s->mpeg_quant = avctx->mpeg_quant;
295 s->rtp_mode = !!avctx->rtp_payload_size;
296 s->intra_dc_precision = avctx->intra_dc_precision;
297 s->user_specified_pts = AV_NOPTS_VALUE;
298
299 if (s->gop_size <= 1) {
300 s->intra_only = 1;
301 s->gop_size = 12;
302 } else {
303 s->intra_only = 0;
304 }
305
306 s->me_method = avctx->me_method;
307
308 /* Fixed QSCALE */
309 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
310
311 s->adaptive_quant = (s->avctx->lumi_masking ||
312 s->avctx->dark_masking ||
313 s->avctx->temporal_cplx_masking ||
314 s->avctx->spatial_cplx_masking ||
315 s->avctx->p_masking ||
316 s->avctx->border_masking ||
317 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
318 !s->fixed_qscale;
319
320 s->loop_filter = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
321
322 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
323 av_log(avctx, AV_LOG_ERROR,
324 "a vbv buffer size is needed, "
325 "for encoding with a maximum bitrate\n");
326 return -1;
327 }
328
329 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
330 av_log(avctx, AV_LOG_INFO,
331 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
332 }
333
334 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
335 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
336 return -1;
337 }
338
339 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
340 av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
341 return -1;
342 }
343
344 if (avctx->rc_max_rate &&
345 avctx->rc_max_rate == avctx->bit_rate &&
346 avctx->rc_max_rate != avctx->rc_min_rate) {
347 av_log(avctx, AV_LOG_INFO,
348 "impossible bitrate constraints, this will fail\n");
349 }
350
351 if (avctx->rc_buffer_size &&
352 avctx->bit_rate * (int64_t)avctx->time_base.num >
353 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
354 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
355 return -1;
356 }
357
358 if (!s->fixed_qscale &&
359 avctx->bit_rate * av_q2d(avctx->time_base) >
360 avctx->bit_rate_tolerance) {
361 av_log(avctx, AV_LOG_ERROR,
362 "bitrate tolerance too small for bitrate\n");
363 return -1;
364 }
365
366 if (s->avctx->rc_max_rate &&
367 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
368 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
369 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
370 90000LL * (avctx->rc_buffer_size - 1) >
371 s->avctx->rc_max_rate * 0xFFFFLL) {
372 av_log(avctx, AV_LOG_INFO,
373 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
374 "specified vbv buffer is too large for the given bitrate!\n");
375 }
376
377 if ((s->flags & CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
378 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
379 s->codec_id != AV_CODEC_ID_FLV1) {
380 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
381 return -1;
382 }
383
384 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
385 av_log(avctx, AV_LOG_ERROR,
386 "OBMC is only supported with simple mb decision\n");
387 return -1;
388 }
389
390 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
391 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
392 return -1;
393 }
394
395 if (s->max_b_frames &&
396 s->codec_id != AV_CODEC_ID_MPEG4 &&
397 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
398 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
399 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
400 return -1;
401 }
402
403 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
404 s->codec_id == AV_CODEC_ID_H263 ||
405 s->codec_id == AV_CODEC_ID_H263P) &&
406 (avctx->sample_aspect_ratio.num > 255 ||
407 avctx->sample_aspect_ratio.den > 255)) {
408 av_log(avctx, AV_LOG_ERROR,
409 "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
410 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
411 return -1;
412 }
413
414 if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
415 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
416 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
417 return -1;
418 }
419
420 // FIXME mpeg2 uses that too
421 if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
422 av_log(avctx, AV_LOG_ERROR,
423 "mpeg2 style quantization not supported by codec\n");
424 return -1;
425 }
426
427 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
428 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
429 return -1;
430 }
431
432 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
433 s->avctx->mb_decision != FF_MB_DECISION_RD) {
434 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
435 return -1;
436 }
437
438 if (s->avctx->scenechange_threshold < 1000000000 &&
439 (s->flags & CODEC_FLAG_CLOSED_GOP)) {
440 av_log(avctx, AV_LOG_ERROR,
441 "closed gop with scene change detection are not supported yet, "
442 "set threshold to 1000000000\n");
443 return -1;
444 }
445
446 if (s->flags & CODEC_FLAG_LOW_DELAY) {
447 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
448 av_log(avctx, AV_LOG_ERROR,
449 "low delay forcing is only available for mpeg2\n");
450 return -1;
451 }
452 if (s->max_b_frames != 0) {
453 av_log(avctx, AV_LOG_ERROR,
454 "b frames cannot be used with low delay\n");
455 return -1;
456 }
457 }
458
459 if (s->q_scale_type == 1) {
460 if (avctx->qmax > 12) {
461 av_log(avctx, AV_LOG_ERROR,
462 "non linear quant only supports qmax <= 12 currently\n");
463 return -1;
464 }
465 }
466
467 if (s->avctx->thread_count > 1 &&
468 s->codec_id != AV_CODEC_ID_MPEG4 &&
469 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
470 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
471 (s->codec_id != AV_CODEC_ID_H263P)) {
472 av_log(avctx, AV_LOG_ERROR,
473 "multi threaded encoding not supported by codec\n");
474 return -1;
475 }
476
477 if (s->avctx->thread_count < 1) {
478 av_log(avctx, AV_LOG_ERROR,
479 "automatic thread number detection not supported by codec,"
480 "patch welcome\n");
481 return -1;
482 }
483
484 if (s->avctx->thread_count > 1)
485 s->rtp_mode = 1;
486
487 if (!avctx->time_base.den || !avctx->time_base.num) {
488 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
489 return -1;
490 }
491
492 i = (INT_MAX / 2 + 128) >> 8;
493 if (avctx->mb_threshold >= i) {
494 av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
495 i - 1);
496 return -1;
497 }
498
499 if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
500 av_log(avctx, AV_LOG_INFO,
501 "notice: b_frame_strategy only affects the first pass\n");
502 avctx->b_frame_strategy = 0;
503 }
504
505 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
506 if (i > 1) {
507 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
508 avctx->time_base.den /= i;
509 avctx->time_base.num /= i;
510 //return -1;
511 }
512
513 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
514 s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG) {
515 // (a + x * 3 / 8) / x
516 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
517 s->inter_quant_bias = 0;
518 } else {
519 s->intra_quant_bias = 0;
520 // (a - x / 4) / x
521 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
522 }
523
524 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
525 s->intra_quant_bias = avctx->intra_quant_bias;
526 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
527 s->inter_quant_bias = avctx->inter_quant_bias;
528
529 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
530 s->avctx->time_base.den > (1 << 16) - 1) {
531 av_log(avctx, AV_LOG_ERROR,
532 "timebase %d/%d not supported by MPEG 4 standard, "
533 "the maximum admitted value for the timebase denominator "
534 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
535 (1 << 16) - 1);
536 return -1;
537 }
538 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
539
540 switch (avctx->codec->id) {
541 case AV_CODEC_ID_MPEG1VIDEO:
542 s->out_format = FMT_MPEG1;
543 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
544 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
545 break;
546 case AV_CODEC_ID_MPEG2VIDEO:
547 s->out_format = FMT_MPEG1;
548 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
549 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
550 s->rtp_mode = 1;
551 break;
552 case AV_CODEC_ID_MJPEG:
553 s->out_format = FMT_MJPEG;
554 s->intra_only = 1; /* force intra only for jpeg */
555 if (!CONFIG_MJPEG_ENCODER ||
556 ff_mjpeg_encode_init(s) < 0)
557 return -1;
558 avctx->delay = 0;
559 s->low_delay = 1;
560 break;
561 case AV_CODEC_ID_H261:
562 if (!CONFIG_H261_ENCODER)
563 return -1;
564 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
565 av_log(avctx, AV_LOG_ERROR,
566 "The specified picture size of %dx%d is not valid for the "
567 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
568 s->width, s->height);
569 return -1;
570 }
571 s->out_format = FMT_H261;
572 avctx->delay = 0;
573 s->low_delay = 1;
574 break;
575 case AV_CODEC_ID_H263:
576 if (!CONFIG_H263_ENCODER)
577 return -1;
578 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
579 s->width, s->height) == 8) {
580 av_log(avctx, AV_LOG_INFO,
581 "The specified picture size of %dx%d is not valid for "
582 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
583 "352x288, 704x576, and 1408x1152."
584 "Try H.263+.\n", s->width, s->height);
585 return -1;
586 }
587 s->out_format = FMT_H263;
588 avctx->delay = 0;
589 s->low_delay = 1;
590 break;
591 case AV_CODEC_ID_H263P:
592 s->out_format = FMT_H263;
593 s->h263_plus = 1;
594 /* Fx */
595 s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
596 s->modified_quant = s->h263_aic;
597 s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
598 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
599
600 /* /Fx */
601 /* These are just to be sure */
602 avctx->delay = 0;
603 s->low_delay = 1;
604 break;
605 case AV_CODEC_ID_FLV1:
606 s->out_format = FMT_H263;
607 s->h263_flv = 2; /* format = 1; 11-bit codes */
608 s->unrestricted_mv = 1;
609 s->rtp_mode = 0; /* don't allow GOB */
610 avctx->delay = 0;
611 s->low_delay = 1;
612 break;
613 case AV_CODEC_ID_RV10:
614 s->out_format = FMT_H263;
615 avctx->delay = 0;
616 s->low_delay = 1;
617 break;
618 case AV_CODEC_ID_RV20:
619 s->out_format = FMT_H263;
620 avctx->delay = 0;
621 s->low_delay = 1;
622 s->modified_quant = 1;
623 s->h263_aic = 1;
624 s->h263_plus = 1;
625 s->loop_filter = 1;
626 s->unrestricted_mv = 0;
627 break;
628 case AV_CODEC_ID_MPEG4:
629 s->out_format = FMT_H263;
630 s->h263_pred = 1;
631 s->unrestricted_mv = 1;
632 s->low_delay = s->max_b_frames ? 0 : 1;
633 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
634 break;
635 case AV_CODEC_ID_MSMPEG4V2:
636 s->out_format = FMT_H263;
637 s->h263_pred = 1;
638 s->unrestricted_mv = 1;
639 s->msmpeg4_version = 2;
640 avctx->delay = 0;
641 s->low_delay = 1;
642 break;
643 case AV_CODEC_ID_MSMPEG4V3:
644 s->out_format = FMT_H263;
645 s->h263_pred = 1;
646 s->unrestricted_mv = 1;
647 s->msmpeg4_version = 3;
648 s->flipflop_rounding = 1;
649 avctx->delay = 0;
650 s->low_delay = 1;
651 break;
652 case AV_CODEC_ID_WMV1:
653 s->out_format = FMT_H263;
654 s->h263_pred = 1;
655 s->unrestricted_mv = 1;
656 s->msmpeg4_version = 4;
657 s->flipflop_rounding = 1;
658 avctx->delay = 0;
659 s->low_delay = 1;
660 break;
661 case AV_CODEC_ID_WMV2:
662 s->out_format = FMT_H263;
663 s->h263_pred = 1;
664 s->unrestricted_mv = 1;
665 s->msmpeg4_version = 5;
666 s->flipflop_rounding = 1;
667 avctx->delay = 0;
668 s->low_delay = 1;
669 break;
670 default:
671 return -1;
672 }
673
674 avctx->has_b_frames = !s->low_delay;
675
676 s->encoding = 1;
677
678 s->progressive_frame =
679 s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
680 CODEC_FLAG_INTERLACED_ME) ||
681 s->alternate_scan);
682
683 /* init */
684 if (ff_MPV_common_init(s) < 0)
685 return -1;
686
687 if (ARCH_X86)
688 ff_MPV_encode_init_x86(s);
689
690 s->avctx->coded_frame = s->current_picture.f;
691
692 if (s->msmpeg4_version) {
693 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
694 2 * 2 * (MAX_LEVEL + 1) *
695 (MAX_RUN + 1) * 2 * sizeof(int), fail);
696 }
697 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
698
699 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
700 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
701 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
702 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
703 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
704 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
705 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
706 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
707
708 if (s->avctx->noise_reduction) {
709 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
710 2 * 64 * sizeof(uint16_t), fail);
711 }
712
713 if (CONFIG_H263_ENCODER)
714 ff_h263dsp_init(&s->h263dsp);
715 if (!s->dct_quantize)
716 s->dct_quantize = ff_dct_quantize_c;
717 if (!s->denoise_dct)
718 s->denoise_dct = denoise_dct_c;
719 s->fast_dct_quantize = s->dct_quantize;
720 if (avctx->trellis)
721 s->dct_quantize = dct_quantize_trellis_c;
722
723 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
724 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
725
726 s->quant_precision = 5;
727
728 ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
729 ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
730
731 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
732 ff_h261_encode_init(s);
733 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
734 ff_h263_encode_init(s);
735 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
736 ff_msmpeg4_encode_init(s);
737 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
738 && s->out_format == FMT_MPEG1)
739 ff_mpeg1_encode_init(s);
740
741 /* init q matrix */
742 for (i = 0; i < 64; i++) {
743 int j = s->dsp.idct_permutation[i];
744 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
745 s->mpeg_quant) {
746 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
747 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
748 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
749 s->intra_matrix[j] =
750 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
751 } else {
752 /* mpeg1/2 */
753 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
754 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
755 }
756 if (s->avctx->intra_matrix)
757 s->intra_matrix[j] = s->avctx->intra_matrix[i];
758 if (s->avctx->inter_matrix)
759 s->inter_matrix[j] = s->avctx->inter_matrix[i];
760 }
761
762 /* precompute matrix */
763 /* for mjpeg, we do include qscale in the matrix */
764 if (s->out_format != FMT_MJPEG) {
765 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
766 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
767 31, 1);
768 ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
769 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
770 31, 0);
771 }
772
773 if (ff_rate_control_init(s) < 0)
774 return -1;
775
776 #if FF_API_ERROR_RATE
777 FF_DISABLE_DEPRECATION_WARNINGS
778 if (avctx->error_rate)
779 s->error_rate = avctx->error_rate;
780 FF_ENABLE_DEPRECATION_WARNINGS;
781 #endif
782
783 if (avctx->b_frame_strategy == 2) {
784 for (i = 0; i < s->max_b_frames + 2; i++) {
785 s->tmp_frames[i] = av_frame_alloc();
786 if (!s->tmp_frames[i])
787 return AVERROR(ENOMEM);
788
789 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
790 s->tmp_frames[i]->width = s->width >> avctx->brd_scale;
791 s->tmp_frames[i]->height = s->height >> avctx->brd_scale;
792
793 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
794 if (ret < 0)
795 return ret;
796 }
797 }
798
799 return 0;
800 fail:
801 ff_MPV_encode_end(avctx);
802 return AVERROR_UNKNOWN;
803 }
804
805 av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
806 {
807 MpegEncContext *s = avctx->priv_data;
808 int i;
809
810 ff_rate_control_uninit(s);
811
812 ff_MPV_common_end(s);
813 if (CONFIG_MJPEG_ENCODER &&
814 s->out_format == FMT_MJPEG)
815 ff_mjpeg_encode_close(s);
816
817 av_freep(&avctx->extradata);
818
819 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
820 av_frame_free(&s->tmp_frames[i]);
821
822 ff_free_picture_tables(&s->new_picture);
823 ff_mpeg_unref_picture(s, &s->new_picture);
824
825 av_freep(&s->avctx->stats_out);
826 av_freep(&s->ac_stats);
827
828 av_freep(&s->q_intra_matrix);
829 av_freep(&s->q_inter_matrix);
830 av_freep(&s->q_intra_matrix16);
831 av_freep(&s->q_inter_matrix16);
832 av_freep(&s->input_picture);
833 av_freep(&s->reordered_input_picture);
834 av_freep(&s->dct_offset);
835
836 return 0;
837 }
838
839 static int get_sae(uint8_t *src, int ref, int stride)
840 {
841 int x,y;
842 int acc = 0;
843
844 for (y = 0; y < 16; y++) {
845 for (x = 0; x < 16; x++) {
846 acc += FFABS(src[x + y * stride] - ref);
847 }
848 }
849
850 return acc;
851 }
852
853 static int get_intra_count(MpegEncContext *s, uint8_t *src,
854 uint8_t *ref, int stride)
855 {
856 int x, y, w, h;
857 int acc = 0;
858
859 w = s->width & ~15;
860 h = s->height & ~15;
861
862 for (y = 0; y < h; y += 16) {
863 for (x = 0; x < w; x += 16) {
864 int offset = x + y * stride;
865 int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
866 16);
867 int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
868 int sae = get_sae(src + offset, mean, stride);
869
870 acc += sae + 500 < sad;
871 }
872 }
873 return acc;
874 }
875
876
877 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
878 {
879 Picture *pic = NULL;
880 int64_t pts;
881 int i, display_picture_number = 0, ret;
882 const int encoding_delay = s->max_b_frames ? s->max_b_frames :
883 (s->low_delay ? 0 : 1);
884 int direct = 1;
885
886 if (pic_arg) {
887 pts = pic_arg->pts;
888 display_picture_number = s->input_picture_number++;
889
890 if (pts != AV_NOPTS_VALUE) {
891 if (s->user_specified_pts != AV_NOPTS_VALUE) {
892 int64_t time = pts;
893 int64_t last = s->user_specified_pts;
894
895 if (time <= last) {
896 av_log(s->avctx, AV_LOG_ERROR,
897 "Error, Invalid timestamp=%"PRId64", "
898 "last=%"PRId64"\n", pts, s->user_specified_pts);
899 return -1;
900 }
901
902 if (!s->low_delay && display_picture_number == 1)
903 s->dts_delta = time - last;
904 }
905 s->user_specified_pts = pts;
906 } else {
907 if (s->user_specified_pts != AV_NOPTS_VALUE) {
908 s->user_specified_pts =
909 pts = s->user_specified_pts + 1;
910 av_log(s->avctx, AV_LOG_INFO,
911 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
912 pts);
913 } else {
914 pts = display_picture_number;
915 }
916 }
917 }
918
919 if (pic_arg) {
920 if (!pic_arg->buf[0]);
921 direct = 0;
922 if (pic_arg->linesize[0] != s->linesize)
923 direct = 0;
924 if (pic_arg->linesize[1] != s->uvlinesize)
925 direct = 0;
926 if (pic_arg->linesize[2] != s->uvlinesize)
927 direct = 0;
928
929 av_dlog(s->avctx, "%d %d %td %td\n", pic_arg->linesize[0],
930 pic_arg->linesize[1], s->linesize, s->uvlinesize);
931
932 if (direct) {
933 i = ff_find_unused_picture(s, 1);
934 if (i < 0)
935 return i;
936
937 pic = &s->picture[i];
938 pic->reference = 3;
939
940 if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
941 return ret;
942 if (ff_alloc_picture(s, pic, 1) < 0) {
943 return -1;
944 }
945 } else {
946 i = ff_find_unused_picture(s, 0);
947 if (i < 0)
948 return i;
949
950 pic = &s->picture[i];
951 pic->reference = 3;
952
953 if (ff_alloc_picture(s, pic, 0) < 0) {
954 return -1;
955 }
956
957 if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
958 pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
959 pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
960 // empty
961 } else {
962 int h_chroma_shift, v_chroma_shift;
963 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
964 &h_chroma_shift,
965 &v_chroma_shift);
966
967 for (i = 0; i < 3; i++) {
968 int src_stride = pic_arg->linesize[i];
969 int dst_stride = i ? s->uvlinesize : s->linesize;
970 int h_shift = i ? h_chroma_shift : 0;
971 int v_shift = i ? v_chroma_shift : 0;
972 int w = s->width >> h_shift;
973 int h = s->height >> v_shift;
974 uint8_t *src = pic_arg->data[i];
975 uint8_t *dst = pic->f->data[i];
976
977 if (!s->avctx->rc_buffer_size)
978 dst += INPLACE_OFFSET;
979
980 if (src_stride == dst_stride)
981 memcpy(dst, src, src_stride * h);
982 else {
983 while (h--) {
984 memcpy(dst, src, w);
985 dst += dst_stride;
986 src += src_stride;
987 }
988 }
989 }
990 }
991 }
992 ret = av_frame_copy_props(pic->f, pic_arg);
993 if (ret < 0)
994 return ret;
995
996 pic->f->display_picture_number = display_picture_number;
997 pic->f->pts = pts; // we set this here to avoid modifiying pic_arg
998 }
999
1000 /* shift buffer entries */
1001 for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1002 s->input_picture[i - 1] = s->input_picture[i];
1003
1004 s->input_picture[encoding_delay] = (Picture*) pic;
1005
1006 return 0;
1007 }
1008
1009 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1010 {
1011 int x, y, plane;
1012 int score = 0;
1013 int64_t score64 = 0;
1014
1015 for (plane = 0; plane < 3; plane++) {
1016 const int stride = p->f->linesize[plane];
1017 const int bw = plane ? 1 : 2;
1018 for (y = 0; y < s->mb_height * bw; y++) {
1019 for (x = 0; x < s->mb_width * bw; x++) {
1020 int off = p->shared ? 0 : 16;
1021 uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1022 uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1023 int v = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1024
1025 switch (s->avctx->frame_skip_exp) {
1026 case 0: score = FFMAX(score, v); break;
1027 case 1: score += FFABS(v); break;
1028 case 2: score += v * v; break;
1029 case 3: score64 += FFABS(v * v * (int64_t)v); break;
1030 case 4: score64 += v * v * (int64_t)(v * v); break;
1031 }
1032 }
1033 }
1034 }
1035
1036 if (score)
1037 score64 = score;
1038
1039 if (score64 < s->avctx->frame_skip_threshold)
1040 return 1;
1041 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1042 return 1;
1043 return 0;
1044 }
1045
1046 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1047 {
1048 AVPacket pkt = { 0 };
1049 int ret, got_output;
1050
1051 av_init_packet(&pkt);
1052 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1053 if (ret < 0)
1054 return ret;
1055
1056 ret = pkt.size;
1057 av_free_packet(&pkt);
1058 return ret;
1059 }
1060
1061 static int estimate_best_b_count(MpegEncContext *s)
1062 {
1063 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1064 AVCodecContext *c = avcodec_alloc_context3(NULL);
1065 const int scale = s->avctx->brd_scale;
1066 int i, j, out_size, p_lambda, b_lambda, lambda2;
1067 int64_t best_rd = INT64_MAX;
1068 int best_b_count = -1;
1069
1070 assert(scale >= 0 && scale <= 3);
1071
1072 //emms_c();
1073 //s->next_picture_ptr->quality;
1074 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1075 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1076 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1077 if (!b_lambda) // FIXME we should do this somewhere else
1078 b_lambda = p_lambda;
1079 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1080 FF_LAMBDA_SHIFT;
1081
1082 c->width = s->width >> scale;
1083 c->height = s->height >> scale;
1084 c->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR;
1085 c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
1086 c->mb_decision = s->avctx->mb_decision;
1087 c->me_cmp = s->avctx->me_cmp;
1088 c->mb_cmp = s->avctx->mb_cmp;
1089 c->me_sub_cmp = s->avctx->me_sub_cmp;
1090 c->pix_fmt = AV_PIX_FMT_YUV420P;
1091 c->time_base = s->avctx->time_base;
1092 c->max_b_frames = s->max_b_frames;
1093
1094 if (avcodec_open2(c, codec, NULL) < 0)
1095 return -1;
1096
1097 for (i = 0; i < s->max_b_frames + 2; i++) {
1098 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1099 s->next_picture_ptr;
1100
1101 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1102 pre_input = *pre_input_ptr;
1103
1104 if (!pre_input.shared && i) {
1105 pre_input.f->data[0] += INPLACE_OFFSET;
1106 pre_input.f->data[1] += INPLACE_OFFSET;
1107 pre_input.f->data[2] += INPLACE_OFFSET;
1108 }
1109
1110 s->dsp.shrink[scale](s->tmp_frames[i]->data[0], s->tmp_frames[i]->linesize[0],
1111 pre_input.f->data[0], pre_input.f->linesize[0],
1112 c->width, c->height);
1113 s->dsp.shrink[scale](s->tmp_frames[i]->data[1], s->tmp_frames[i]->linesize[1],
1114 pre_input.f->data[1], pre_input.f->linesize[1],
1115 c->width >> 1, c->height >> 1);
1116 s->dsp.shrink[scale](s->tmp_frames[i]->data[2], s->tmp_frames[i]->linesize[2],
1117 pre_input.f->data[2], pre_input.f->linesize[2],
1118 c->width >> 1, c->height >> 1);
1119 }
1120 }
1121
1122 for (j = 0; j < s->max_b_frames + 1; j++) {
1123 int64_t rd = 0;
1124
1125 if (!s->input_picture[j])
1126 break;
1127
1128 c->error[0] = c->error[1] = c->error[2] = 0;
1129
1130 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1131 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1132
1133 out_size = encode_frame(c, s->tmp_frames[0]);
1134
1135 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1136
1137 for (i = 0; i < s->max_b_frames + 1; i++) {
1138 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1139
1140 s->tmp_frames[i + 1]->pict_type = is_p ?
1141 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1142 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1143
1144 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1145
1146 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1147 }
1148
1149 /* get the delayed frames */
1150 while (out_size) {
1151 out_size = encode_frame(c, NULL);
1152 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1153 }
1154
1155 rd += c->error[0] + c->error[1] + c->error[2];
1156
1157 if (rd < best_rd) {
1158 best_rd = rd;
1159 best_b_count = j;
1160 }
1161 }
1162
1163 avcodec_close(c);
1164 av_freep(&c);
1165
1166 return best_b_count;
1167 }
1168
1169 static int select_input_picture(MpegEncContext *s)
1170 {
1171 int i, ret;
1172
1173 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1174 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1175 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1176
1177 /* set next picture type & ordering */
1178 if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
1179 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1180 s->next_picture_ptr == NULL || s->intra_only) {
1181 s->reordered_input_picture[0] = s->input_picture[0];
1182 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1183 s->reordered_input_picture[0]->f->coded_picture_number =
1184 s->coded_picture_number++;
1185 } else {
1186 int b_frames;
1187
1188 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1189 if (s->picture_in_gop_number < s->gop_size &&
1190 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1191 // FIXME check that te gop check above is +-1 correct
1192 av_frame_unref(s->input_picture[0]->f);
1193
1194 emms_c();
1195 ff_vbv_update(s, 0);
1196
1197 goto no_output_pic;
1198 }
1199 }
1200
1201 if (s->flags & CODEC_FLAG_PASS2) {
1202 for (i = 0; i < s->max_b_frames + 1; i++) {
1203 int pict_num = s->input_picture[0]->f->display_picture_number + i;
1204
1205 if (pict_num >= s->rc_context.num_entries)
1206 break;
1207 if (!s->input_picture[i]) {
1208 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1209 break;
1210 }
1211
1212 s->input_picture[i]->f->pict_type =
1213 s->rc_context.entry[pict_num].new_pict_type;
1214 }
1215 }
1216
1217 if (s->avctx->b_frame_strategy == 0) {
1218 b_frames = s->max_b_frames;
1219 while (b_frames && !s->input_picture[b_frames])
1220 b_frames--;
1221 } else if (s->avctx->b_frame_strategy == 1) {
1222 for (i = 1; i < s->max_b_frames + 1; i++) {
1223 if (s->input_picture[i] &&
1224 s->input_picture[i]->b_frame_score == 0) {
1225 s->input_picture[i]->b_frame_score =
1226 get_intra_count(s,
1227 s->input_picture[i ]->f->data[0],
1228 s->input_picture[i - 1]->f->data[0],
1229 s->linesize) + 1;
1230 }
1231 }
1232 for (i = 0; i < s->max_b_frames + 1; i++) {
1233 if (s->input_picture[i] == NULL ||
1234 s->input_picture[i]->b_frame_score - 1 >
1235 s->mb_num / s->avctx->b_sensitivity)
1236 break;
1237 }
1238
1239 b_frames = FFMAX(0, i - 1);
1240
1241 /* reset scores */
1242 for (i = 0; i < b_frames + 1; i++) {
1243 s->input_picture[i]->b_frame_score = 0;
1244 }
1245 } else if (s->avctx->b_frame_strategy == 2) {
1246 b_frames = estimate_best_b_count(s);
1247 } else {
1248 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1249 b_frames = 0;
1250 }
1251
1252 emms_c();
1253
1254 for (i = b_frames - 1; i >= 0; i--) {
1255 int type = s->input_picture[i]->f->pict_type;
1256 if (type && type != AV_PICTURE_TYPE_B)
1257 b_frames = i;
1258 }
1259 if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1260 b_frames == s->max_b_frames) {
1261 av_log(s->avctx, AV_LOG_ERROR,
1262 "warning, too many b frames in a row\n");
1263 }
1264
1265 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1266 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1267 s->gop_size > s->picture_in_gop_number) {
1268 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1269 } else {
1270 if (s->flags & CODEC_FLAG_CLOSED_GOP)
1271 b_frames = 0;
1272 s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1273 }
1274 }
1275
1276 if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1277 s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1278 b_frames--;
1279
1280 s->reordered_input_picture[0] = s->input_picture[b_frames];
1281 if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1282 s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1283 s->reordered_input_picture[0]->f->coded_picture_number =
1284 s->coded_picture_number++;
1285 for (i = 0; i < b_frames; i++) {
1286 s->reordered_input_picture[i + 1] = s->input_picture[i];
1287 s->reordered_input_picture[i + 1]->f->pict_type =
1288 AV_PICTURE_TYPE_B;
1289 s->reordered_input_picture[i + 1]->f->coded_picture_number =
1290 s->coded_picture_number++;
1291 }
1292 }
1293 }
1294 no_output_pic:
1295 if (s->reordered_input_picture[0]) {
1296 s->reordered_input_picture[0]->reference =
1297 s->reordered_input_picture[0]->f->pict_type !=
1298 AV_PICTURE_TYPE_B ? 3 : 0;
1299
1300 ff_mpeg_unref_picture(s, &s->new_picture);
1301 if ((ret = ff_mpeg_ref_picture(s, &s->new_picture, s->reordered_input_picture[0])))
1302 return ret;
1303
1304 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1305 // input is a shared pix, so we can't modifiy it -> alloc a new
1306 // one & ensure that the shared one is reuseable
1307
1308 Picture *pic;
1309 int i = ff_find_unused_picture(s, 0);
1310 if (i < 0)
1311 return i;
1312 pic = &s->picture[i];
1313
1314 pic->reference = s->reordered_input_picture[0]->reference;
1315 if (ff_alloc_picture(s, pic, 0) < 0) {
1316 return -1;
1317 }
1318
1319 ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1320 if (ret < 0)
1321 return ret;
1322
1323 /* mark us unused / free shared pic */
1324 av_frame_unref(s->reordered_input_picture[0]->f);
1325 s->reordered_input_picture[0]->shared = 0;
1326
1327 s->current_picture_ptr = pic;
1328 } else {
1329 // input is not a shared pix -> reuse buffer for current_pix
1330 s->current_picture_ptr = s->reordered_input_picture[0];
1331 for (i = 0; i < 4; i++) {
1332 s->new_picture.f->data[i] += INPLACE_OFFSET;
1333 }
1334 }
1335 ff_mpeg_unref_picture(s, &s->current_picture);
1336 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1337 s->current_picture_ptr)) < 0)
1338 return ret;
1339
1340 s->picture_number = s->new_picture.f->display_picture_number;
1341 } else {
1342 ff_mpeg_unref_picture(s, &s->new_picture);
1343 }
1344 return 0;
1345 }
1346
1347 static void frame_end(MpegEncContext *s)
1348 {
1349 int i;
1350
1351 if (s->unrestricted_mv &&
1352 s->current_picture.reference &&
1353 !s->intra_only) {
1354 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1355 int hshift = desc->log2_chroma_w;
1356 int vshift = desc->log2_chroma_h;
1357 s->dsp.draw_edges(s->current_picture.f->data[0], s->linesize,
1358 s->h_edge_pos, s->v_edge_pos,
1359 EDGE_WIDTH, EDGE_WIDTH,
1360 EDGE_TOP | EDGE_BOTTOM);
1361 s->dsp.draw_edges(s->current_picture.f->data[1], s->uvlinesize,
1362 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1363 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1364 EDGE_TOP | EDGE_BOTTOM);
1365 s->dsp.draw_edges(s->current_picture.f->data[2], s->uvlinesize,
1366 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1367 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1368 EDGE_TOP | EDGE_BOTTOM);
1369 }
1370
1371 emms_c();
1372
1373 s->last_pict_type = s->pict_type;
1374 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1375 if (s->pict_type!= AV_PICTURE_TYPE_B)
1376 s->last_non_b_pict_type = s->pict_type;
1377
1378 if (s->encoding) {
1379 /* release non-reference frames */
1380 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1381 if (!s->picture[i].reference)
1382 ff_mpeg_unref_picture(s, &s->picture[i]);
1383 }
1384 }
1385
1386 s->avctx->coded_frame = s->current_picture_ptr->f;
1387
1388 }
1389
1390 static void update_noise_reduction(MpegEncContext *s)
1391 {
1392 int intra, i;
1393
1394 for (intra = 0; intra < 2; intra++) {
1395 if (s->dct_count[intra] > (1 << 16)) {
1396 for (i = 0; i < 64; i++) {
1397 s->dct_error_sum[intra][i] >>= 1;
1398 }
1399 s->dct_count[intra] >>= 1;
1400 }
1401
1402 for (i = 0; i < 64; i++) {
1403 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1404 s->dct_count[intra] +
1405 s->dct_error_sum[intra][i] / 2) /
1406 (s->dct_error_sum[intra][i] + 1);
1407 }
1408 }
1409 }
1410
1411 static int frame_start(MpegEncContext *s)
1412 {
1413 int ret;
1414
1415 /* mark & release old frames */
1416 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1417 s->last_picture_ptr != s->next_picture_ptr &&
1418 s->last_picture_ptr->f->buf[0]) {
1419 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1420 }
1421
1422 s->current_picture_ptr->f->pict_type = s->pict_type;
1423 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1424
1425 ff_mpeg_unref_picture(s, &s->current_picture);
1426 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1427 s->current_picture_ptr)) < 0)
1428 return ret;
1429
1430 if (s->pict_type != AV_PICTURE_TYPE_B) {
1431 s->last_picture_ptr = s->next_picture_ptr;
1432 if (!s->droppable)
1433 s->next_picture_ptr = s->current_picture_ptr;
1434 }
1435
1436 if (s->last_picture_ptr) {
1437 ff_mpeg_unref_picture(s, &s->last_picture);
1438 if (s->last_picture_ptr->f->buf[0] &&
1439 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1440 s->last_picture_ptr)) < 0)
1441 return ret;
1442 }
1443 if (s->next_picture_ptr) {
1444 ff_mpeg_unref_picture(s, &s->next_picture);
1445 if (s->next_picture_ptr->f->buf[0] &&
1446 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1447 s->next_picture_ptr)) < 0)
1448 return ret;
1449 }
1450
1451 if (s->picture_structure!= PICT_FRAME) {
1452 int i;
1453 for (i = 0; i < 4; i++) {
1454 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1455 s->current_picture.f->data[i] +=
1456 s->current_picture.f->linesize[i];
1457 }
1458 s->current_picture.f->linesize[i] *= 2;
1459 s->last_picture.f->linesize[i] *= 2;
1460 s->next_picture.f->linesize[i] *= 2;
1461 }
1462 }
1463
1464 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1465 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1466 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1467 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1468 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1469 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1470 } else {
1471 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1472 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1473 }
1474
1475 if (s->dct_error_sum) {
1476 assert(s->avctx->noise_reduction && s->encoding);
1477 update_noise_reduction(s);
1478 }
1479
1480 return 0;
1481 }
1482
1483 int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1484 const AVFrame *pic_arg, int *got_packet)
1485 {
1486 MpegEncContext *s = avctx->priv_data;
1487 int i, stuffing_count, ret;
1488 int context_count = s->slice_context_count;
1489
1490 s->picture_in_gop_number++;
1491
1492 if (load_input_picture(s, pic_arg) < 0)
1493 return -1;
1494
1495 if (select_input_picture(s) < 0) {
1496 return -1;
1497 }
1498
1499 /* output? */
1500 if (s->new_picture.f->data[0]) {
1501 if (!pkt->data &&
1502 (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0)
1503 return ret;
1504 if (s->mb_info) {
1505 s->mb_info_ptr = av_packet_new_side_data(pkt,
1506 AV_PKT_DATA_H263_MB_INFO,
1507 s->mb_width*s->mb_height*12);
1508 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1509 }
1510
1511 for (i = 0; i < context_count; i++) {
1512 int start_y = s->thread_context[i]->start_mb_y;
1513 int end_y = s->thread_context[i]-> end_mb_y;
1514 int h = s->mb_height;
1515 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1516 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1517
1518 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1519 }
1520
1521 s->pict_type = s->new_picture.f->pict_type;
1522 //emms_c();
1523 ret = frame_start(s);
1524 if (ret < 0)
1525 return ret;
1526 vbv_retry:
1527 if (encode_picture(s, s->picture_number) < 0)
1528 return -1;
1529
1530 avctx->header_bits = s->header_bits;
1531 avctx->mv_bits = s->mv_bits;
1532 avctx->misc_bits = s->misc_bits;
1533 avctx->i_tex_bits = s->i_tex_bits;
1534 avctx->p_tex_bits = s->p_tex_bits;
1535 avctx->i_count = s->i_count;
1536 // FIXME f/b_count in avctx
1537 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1538 avctx->skip_count = s->skip_count;
1539
1540 frame_end(s);
1541
1542 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1543 ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1544
1545 if (avctx->rc_buffer_size) {
1546 RateControlContext *rcc = &s->rc_context;
1547 int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1548
1549 if (put_bits_count(&s->pb) > max_size &&
1550 s->lambda < s->avctx->lmax) {
1551 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1552 (s->qscale + 1) / s->qscale);
1553 if (s->adaptive_quant) {
1554 int i;
1555 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1556 s->lambda_table[i] =
1557 FFMAX(s->lambda_table[i] + 1,
1558 s->lambda_table[i] * (s->qscale + 1) /
1559 s->qscale);
1560 }
1561 s->mb_skipped = 0; // done in frame_start()
1562 // done in encode_picture() so we must undo it
1563 if (s->pict_type == AV_PICTURE_TYPE_P) {
1564 if (s->flipflop_rounding ||
1565 s->codec_id == AV_CODEC_ID_H263P ||
1566 s->codec_id == AV_CODEC_ID_MPEG4)
1567 s->no_rounding ^= 1;
1568 }
1569 if (s->pict_type != AV_PICTURE_TYPE_B) {
1570 s->time_base = s->last_time_base;
1571 s->last_non_b_time = s->time - s->pp_time;
1572 }
1573 for (i = 0; i < context_count; i++) {
1574 PutBitContext *pb = &s->thread_context[i]->pb;
1575 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1576 }
1577 goto vbv_retry;
1578 }
1579
1580 assert(s->avctx->rc_max_rate);
1581 }
1582
1583 if (s->flags & CODEC_FLAG_PASS1)
1584 ff_write_pass1_stats(s);
1585
1586 for (i = 0; i < 4; i++) {
1587 s->current_picture_ptr->f->error[i] = s->current_picture.f->error[i];
1588 avctx->error[i] += s->current_picture_ptr->f->error[i];
1589 }
1590
1591 if (s->flags & CODEC_FLAG_PASS1)
1592 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1593 avctx->i_tex_bits + avctx->p_tex_bits ==
1594 put_bits_count(&s->pb));
1595 flush_put_bits(&s->pb);
1596 s->frame_bits = put_bits_count(&s->pb);
1597
1598 stuffing_count = ff_vbv_update(s, s->frame_bits);
1599 if (stuffing_count) {
1600 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1601 stuffing_count + 50) {
1602 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1603 return -1;
1604 }
1605
1606 switch (s->codec_id) {
1607 case AV_CODEC_ID_MPEG1VIDEO:
1608 case AV_CODEC_ID_MPEG2VIDEO:
1609 while (stuffing_count--) {
1610 put_bits(&s->pb, 8, 0);
1611 }
1612 break;
1613 case AV_CODEC_ID_MPEG4:
1614 put_bits(&s->pb, 16, 0);
1615 put_bits(&s->pb, 16, 0x1C3);
1616 stuffing_count -= 4;
1617 while (stuffing_count--) {
1618 put_bits(&s->pb, 8, 0xFF);
1619 }
1620 break;
1621 default:
1622 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1623 }
1624 flush_put_bits(&s->pb);
1625 s->frame_bits = put_bits_count(&s->pb);
1626 }
1627
1628 /* update mpeg1/2 vbv_delay for CBR */
1629 if (s->avctx->rc_max_rate &&
1630 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1631 s->out_format == FMT_MPEG1 &&
1632 90000LL * (avctx->rc_buffer_size - 1) <=
1633 s->avctx->rc_max_rate * 0xFFFFLL) {
1634 int vbv_delay, min_delay;
1635 double inbits = s->avctx->rc_max_rate *
1636 av_q2d(s->avctx->time_base);
1637 int minbits = s->frame_bits - 8 *
1638 (s->vbv_delay_ptr - s->pb.buf - 1);
1639 double bits = s->rc_context.buffer_index + minbits - inbits;
1640
1641 if (bits < 0)
1642 av_log(s->avctx, AV_LOG_ERROR,
1643 "Internal error, negative bits\n");
1644
1645 assert(s->repeat_first_field == 0);
1646
1647 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1648 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1649 s->avctx->rc_max_rate;
1650
1651 vbv_delay = FFMAX(vbv_delay, min_delay);
1652
1653 assert(vbv_delay < 0xFFFF);
1654
1655 s->vbv_delay_ptr[0] &= 0xF8;
1656 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1657 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1658 s->vbv_delay_ptr[2] &= 0x07;
1659 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1660 avctx->vbv_delay = vbv_delay * 300;
1661 }
1662 s->total_bits += s->frame_bits;
1663 avctx->frame_bits = s->frame_bits;
1664
1665 pkt->pts = s->current_picture.f->pts;
1666 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
1667 if (!s->current_picture.f->coded_picture_number)
1668 pkt->dts = pkt->pts - s->dts_delta;
1669 else
1670 pkt->dts = s->reordered_pts;
1671 s->reordered_pts = pkt->pts;
1672 } else
1673 pkt->dts = pkt->pts;
1674 if (s->current_picture.f->key_frame)
1675 pkt->flags |= AV_PKT_FLAG_KEY;
1676 if (s->mb_info)
1677 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1678 } else {
1679 s->frame_bits = 0;
1680 }
1681 assert((s->frame_bits & 7) == 0);
1682
1683 pkt->size = s->frame_bits / 8;
1684 *got_packet = !!pkt->size;
1685 return 0;
1686 }
1687
1688 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1689 int n, int threshold)
1690 {
1691 static const char tab[64] = {
1692 3, 2, 2, 1, 1, 1, 1, 1,
1693 1, 1, 1, 1, 1, 1, 1, 1,
1694 1, 1, 1, 1, 1, 1, 1, 1,
1695 0, 0, 0, 0, 0, 0, 0, 0,
1696 0, 0, 0, 0, 0, 0, 0, 0,
1697 0, 0, 0, 0, 0, 0, 0, 0,
1698 0, 0, 0, 0, 0, 0, 0, 0,
1699 0, 0, 0, 0, 0, 0, 0, 0
1700 };
1701 int score = 0;
1702 int run = 0;
1703 int i;
1704 int16_t *block = s->block[n];
1705 const int last_index = s->block_last_index[n];
1706 int skip_dc;
1707
1708 if (threshold < 0) {
1709 skip_dc = 0;
1710 threshold = -threshold;
1711 } else
1712 skip_dc = 1;
1713
1714 /* Are all we could set to zero already zero? */
1715 if (last_index <= skip_dc - 1)
1716 return;
1717
1718 for (i = 0; i <= last_index; i++) {
1719 const int j = s->intra_scantable.permutated[i];
1720 const int level = FFABS(block[j]);
1721 if (level == 1) {
1722 if (skip_dc && i == 0)
1723 continue;
1724 score += tab[run];
1725 run = 0;
1726 } else if (level > 1) {
1727 return;
1728 } else {
1729 run++;
1730 }
1731 }
1732 if (score >= threshold)
1733 return;
1734 for (i = skip_dc; i <= last_index; i++) {
1735 const int j = s->intra_scantable.permutated[i];
1736 block[j] = 0;
1737 }
1738 if (block[0])
1739 s->block_last_index[n] = 0;
1740 else
1741 s->block_last_index[n] = -1;
1742 }
1743
1744 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
1745 int last_index)
1746 {
1747 int i;
1748 const int maxlevel = s->max_qcoeff;
1749 const int minlevel = s->min_qcoeff;
1750 int overflow = 0;
1751
1752 if (s->mb_intra) {
1753 i = 1; // skip clipping of intra dc
1754 } else
1755 i = 0;
1756
1757 for (; i <= last_index; i++) {
1758 const int j = s->intra_scantable.permutated[i];
1759 int level = block[j];
1760
1761 if (level > maxlevel) {
1762 level = maxlevel;
1763 overflow++;
1764 } else if (level < minlevel) {
1765 level = minlevel;
1766 overflow++;
1767 }
1768
1769 block[j] = level;
1770 }
1771
1772 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1773 av_log(s->avctx, AV_LOG_INFO,
1774 "warning, clipping %d dct coefficients to %d..%d\n",
1775 overflow, minlevel, maxlevel);
1776 }
1777
1778 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1779 {
1780 int x, y;
1781 // FIXME optimize
1782 for (y = 0; y < 8; y++) {
1783 for (x = 0; x < 8; x++) {
1784 int x2, y2;
1785 int sum = 0;
1786 int sqr = 0;
1787 int count = 0;
1788
1789 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1790 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1791 int v = ptr[x2 + y2 * stride];
1792 sum += v;
1793 sqr += v * v;
1794 count++;
1795 }
1796 }
1797 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1798 }
1799 }
1800 }
1801
1802 static av_always_inline void encode_mb_internal(MpegEncContext *s,
1803 int motion_x, int motion_y,
1804 int mb_block_height,
1805 int mb_block_count)
1806 {
1807 int16_t weight[8][64];
1808 int16_t orig[8][64];
1809 const int mb_x = s->mb_x;
1810 const int mb_y = s->mb_y;
1811 int i;
1812 int skip_dct[8];
1813 int dct_offset = s->linesize * 8; // default for progressive frames
1814 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1815 ptrdiff_t wrap_y, wrap_c;
1816
1817 for (i = 0; i < mb_block_count; i++)
1818 skip_dct[i] = s->skipdct;
1819
1820 if (s->adaptive_quant) {
1821 const int last_qp = s->qscale;
1822 const int mb_xy = mb_x + mb_y * s->mb_stride;
1823
1824 s->lambda = s->lambda_table[mb_xy];
1825 update_qscale(s);
1826
1827 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
1828 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
1829 s->dquant = s->qscale - last_qp;
1830
1831 if (s->out_format == FMT_H263) {
1832 s->dquant = av_clip(s->dquant, -2, 2);
1833
1834 if (s->codec_id == AV_CODEC_ID_MPEG4) {
1835 if (!s->mb_intra) {
1836 if (s->pict_type == AV_PICTURE_TYPE_B) {
1837 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1838 s->dquant = 0;
1839 }
1840 if (s->mv_type == MV_TYPE_8X8)
1841 s->dquant = 0;
1842 }
1843 }
1844 }
1845 }
1846 ff_set_qscale(s, last_qp + s->dquant);
1847 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
1848 ff_set_qscale(s, s->qscale + s->dquant);
1849
1850 wrap_y = s->linesize;
1851 wrap_c = s->uvlinesize;
1852 ptr_y = s->new_picture.f->data[0] +
1853 (mb_y * 16 * wrap_y) + mb_x * 16;
1854 ptr_cb = s->new_picture.f->data[1] +
1855 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1856 ptr_cr = s->new_picture.f->data[2] +
1857 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1858
1859 if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
1860 uint8_t *ebuf = s->edge_emu_buffer + 32;
1861 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
1862 wrap_y, wrap_y,
1863 16, 16, mb_x * 16, mb_y * 16,
1864 s->width, s->height);
1865 ptr_y = ebuf;
1866 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb,
1867 wrap_c, wrap_c,
1868 8, mb_block_height, mb_x * 8, mb_y * 8,
1869 s->width >> 1, s->height >> 1);
1870 ptr_cb = ebuf + 18 * wrap_y;
1871 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr,
1872 wrap_c, wrap_c,
1873 8, mb_block_height, mb_x * 8, mb_y * 8,
1874 s->width >> 1, s->height >> 1);
1875 ptr_cr = ebuf + 18 * wrap_y + 8;
1876 }
1877
1878 if (s->mb_intra) {
1879 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1880 int progressive_score, interlaced_score;
1881
1882 s->interlaced_dct = 0;
1883 progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
1884 NULL, wrap_y, 8) +
1885 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
1886 NULL, wrap_y, 8) - 400;
1887
1888 if (progressive_score > 0) {
1889 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
1890 NULL, wrap_y * 2, 8) +
1891 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
1892 NULL, wrap_y * 2, 8);
1893 if (progressive_score > interlaced_score) {
1894 s->interlaced_dct = 1;
1895
1896 dct_offset = wrap_y;
1897 wrap_y <<= 1;
1898 if (s->chroma_format == CHROMA_422)
1899 wrap_c <<= 1;
1900 }
1901 }
1902 }
1903
1904 s->dsp.get_pixels(s->block[0], ptr_y , wrap_y);
1905 s->dsp.get_pixels(s->block[1], ptr_y + 8 , wrap_y);
1906 s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y);
1907 s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
1908
1909 if (s->flags & CODEC_FLAG_GRAY) {
1910 skip_dct[4] = 1;
1911 skip_dct[5] = 1;
1912 } else {
1913 s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
1914 s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
1915 if (!s->chroma_y_shift) { /* 422 */
1916 s->dsp.get_pixels(s->block[6],
1917 ptr_cb + (dct_offset >> 1), wrap_c);
1918 s->dsp.get_pixels(s->block[7],
1919 ptr_cr + (dct_offset >> 1), wrap_c);
1920 }
1921 }
1922 } else {
1923 op_pixels_func (*op_pix)[4];
1924 qpel_mc_func (*op_qpix)[16];
1925 uint8_t *dest_y, *dest_cb, *dest_cr;
1926
1927 dest_y = s->dest[0];
1928 dest_cb = s->dest[1];
1929 dest_cr = s->dest[2];
1930
1931 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1932 op_pix = s->hdsp.put_pixels_tab;
1933 op_qpix = s->dsp.put_qpel_pixels_tab;
1934 } else {
1935 op_pix = s->hdsp.put_no_rnd_pixels_tab;
1936 op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
1937 }
1938
1939 if (s->mv_dir & MV_DIR_FORWARD) {
1940 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
1941 s->last_picture.f->data,
1942 op_pix, op_qpix);
1943 op_pix = s->hdsp.avg_pixels_tab;
1944 op_qpix = s->dsp.avg_qpel_pixels_tab;
1945 }
1946 if (s->mv_dir & MV_DIR_BACKWARD) {
1947 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
1948 s->next_picture.f->data,
1949 op_pix, op_qpix);
1950 }
1951
1952 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1953 int progressive_score, interlaced_score;
1954
1955 s->interlaced_dct = 0;
1956 progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
1957 ptr_y, wrap_y,
1958 8) +
1959 s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
1960 ptr_y + wrap_y * 8, wrap_y,
1961 8) - 400;
1962
1963 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
1964 progressive_score -= 400;
1965
1966 if (progressive_score > 0) {
1967 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
1968 ptr_y,
1969 wrap_y * 2, 8) +
1970 s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
1971 ptr_y + wrap_y,
1972 wrap_y * 2, 8);
1973
1974 if (progressive_score > interlaced_score) {
1975 s->interlaced_dct = 1;
1976
1977 dct_offset = wrap_y;
1978 wrap_y <<= 1;
1979 if (s->chroma_format == CHROMA_422)
1980 wrap_c <<= 1;
1981 }
1982 }
1983 }
1984
1985 s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
1986 s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
1987 s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
1988 dest_y + dct_offset, wrap_y);
1989 s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
1990 dest_y + dct_offset + 8, wrap_y);
1991
1992 if (s->flags & CODEC_FLAG_GRAY) {
1993 skip_dct[4] = 1;
1994 skip_dct[5] = 1;
1995 } else {
1996 s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
1997 s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
1998 if (!s->chroma_y_shift) { /* 422 */
1999 s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
2000 dest_cb + (dct_offset >> 1), wrap_c);
2001 s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
2002 dest_cr + (dct_offset >> 1), wrap_c);
2003 }
2004 }
2005 /* pre quantization */
2006 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2007 2 * s->qscale * s->qscale) {
2008 // FIXME optimize
2009 if (s->dsp.sad[1](NULL, ptr_y , dest_y,
2010 wrap_y, 8) < 20 * s->qscale)
2011 skip_dct[0] = 1;
2012 if (s->dsp.sad[1](NULL, ptr_y + 8,
2013 dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2014 skip_dct[1] = 1;
2015 if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
2016 dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
2017 skip_dct[2] = 1;
2018 if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
2019 dest_y + dct_offset + 8,
2020 wrap_y, 8) < 20 * s->qscale)
2021 skip_dct[3] = 1;
2022 if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
2023 wrap_c, 8) < 20 * s->qscale)
2024 skip_dct[4] = 1;
2025 if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
2026 wrap_c, 8) < 20 * s->qscale)
2027 skip_dct[5] = 1;
2028 if (!s->chroma_y_shift) { /* 422 */
2029 if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1),
2030 dest_cb + (dct_offset >> 1),
2031 wrap_c, 8) < 20 * s->qscale)
2032 skip_dct[6] = 1;
2033 if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1),
2034 dest_cr + (dct_offset >> 1),
2035 wrap_c, 8) < 20 * s->qscale)
2036 skip_dct[7] = 1;
2037 }
2038 }
2039 }
2040
2041 if (s->quantizer_noise_shaping) {
2042 if (!skip_dct[0])
2043 get_visual_weight(weight[0], ptr_y , wrap_y);
2044 if (!skip_dct[1])
2045 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2046 if (!skip_dct[2])
2047 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2048 if (!skip_dct[3])
2049 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2050 if (!skip_dct[4])
2051 get_visual_weight(weight[4], ptr_cb , wrap_c);
2052 if (!skip_dct[5])
2053 get_visual_weight(weight[5], ptr_cr , wrap_c);
2054 if (!s->chroma_y_shift) { /* 422 */
2055 if (!skip_dct[6])
2056 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
2057 wrap_c);
2058 if (!skip_dct[7])
2059 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
2060 wrap_c);
2061 }
2062 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2063 }
2064
2065 /* DCT & quantize */
2066 assert(s->out_format != FMT_MJPEG || s->qscale == 8);
2067 {
2068 for (i = 0; i < mb_block_count; i++) {
2069 if (!skip_dct[i]) {
2070 int overflow;
2071 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2072 // FIXME we could decide to change to quantizer instead of
2073 // clipping
2074 // JS: I don't think that would be a good idea it could lower
2075 // quality instead of improve it. Just INTRADC clipping
2076 // deserves changes in quantizer
2077 if (overflow)
2078 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2079 } else
2080 s->block_last_index[i] = -1;
2081 }
2082 if (s->quantizer_noise_shaping) {
2083 for (i = 0; i < mb_block_count; i++) {
2084 if (!skip_dct[i]) {
2085 s->block_last_index[i] =
2086 dct_quantize_refine(s, s->block[i], weight[i],
2087 orig[i], i, s->qscale);
2088 }
2089 }
2090 }
2091
2092 if (s->luma_elim_threshold && !s->mb_intra)
2093 for (i = 0; i < 4; i++)
2094 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2095 if (s->chroma_elim_threshold && !s->mb_intra)
2096 for (i = 4; i < mb_block_count; i++)
2097 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2098
2099 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2100 for (i = 0; i < mb_block_count; i++) {
2101 if (s->block_last_index[i] == -1)
2102 s->coded_score[i] = INT_MAX / 256;
2103 }
2104 }
2105 }
2106
2107 if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
2108 s->block_last_index[4] =
2109 s->block_last_index[5] = 0;
2110 s->block[4][0] =
2111 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2112 }
2113
2114 // non c quantize code returns incorrect block_last_index FIXME
2115 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2116 for (i = 0; i < mb_block_count; i++) {
2117 int j;
2118 if (s->block_last_index[i] > 0) {
2119 for (j = 63; j > 0; j--) {
2120 if (s->block[i][s->intra_scantable.permutated[j]])
2121 break;
2122 }
2123 s->block_last_index[i] = j;
2124 }
2125 }
2126 }
2127
2128 /* huffman encode */
2129 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2130 case AV_CODEC_ID_MPEG1VIDEO:
2131 case AV_CODEC_ID_MPEG2VIDEO:
2132 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2133 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2134 break;
2135 case AV_CODEC_ID_MPEG4:
2136 if (CONFIG_MPEG4_ENCODER)
2137 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2138 break;
2139 case AV_CODEC_ID_MSMPEG4V2:
2140 case AV_CODEC_ID_MSMPEG4V3:
2141 case AV_CODEC_ID_WMV1:
2142 if (CONFIG_MSMPEG4_ENCODER)
2143 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2144 break;
2145 case AV_CODEC_ID_WMV2:
2146 if (CONFIG_WMV2_ENCODER)
2147 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2148 break;
2149 case AV_CODEC_ID_H261:
2150 if (CONFIG_H261_ENCODER)
2151 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2152 break;
2153 case AV_CODEC_ID_H263:
2154 case AV_CODEC_ID_H263P:
2155 case AV_CODEC_ID_FLV1:
2156 case AV_CODEC_ID_RV10:
2157 case AV_CODEC_ID_RV20:
2158 if (CONFIG_H263_ENCODER)
2159 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2160 break;
2161 case AV_CODEC_ID_MJPEG:
2162 if (CONFIG_MJPEG_ENCODER)
2163 ff_mjpeg_encode_mb(s, s->block);
2164 break;
2165 default:
2166 assert(0);
2167 }
2168 }
2169
2170 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2171 {
2172 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 6);
2173 else encode_mb_internal(s, motion_x, motion_y, 16, 8);
2174 }
2175
2176 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2177 int i;
2178
2179 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2180
2181 /* mpeg1 */
2182 d->mb_skip_run= s->mb_skip_run;
2183 for(i=0; i<3; i++)
2184 d->last_dc[i] = s->last_dc[i];
2185
2186 /* statistics */
2187 d->mv_bits= s->mv_bits;
2188 d->i_tex_bits= s->i_tex_bits;
2189 d->p_tex_bits= s->p_tex_bits;
2190 d->i_count= s->i_count;
2191 d->f_count= s->f_count;
2192 d->b_count= s->b_count;
2193 d->skip_count= s->skip_count;
2194 d->misc_bits= s->misc_bits;
2195 d->last_bits= 0;
2196
2197 d->mb_skipped= 0;
2198 d->qscale= s->qscale;
2199 d->dquant= s->dquant;
2200
2201 d->esc3_level_length= s->esc3_level_length;
2202 }
2203
2204 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2205 int i;
2206
2207 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2208 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2209
2210 /* mpeg1 */
2211 d->mb_skip_run= s->mb_skip_run;
2212 for(i=0; i<3; i++)
2213 d->last_dc[i] = s->last_dc[i];
2214
2215 /* statistics */
2216 d->mv_bits= s->mv_bits;
2217 d->i_tex_bits= s->i_tex_bits;
2218 d->p_tex_bits= s->p_tex_bits;
2219 d->i_count= s->i_count;
2220 d->f_count= s->f_count;
2221 d->b_count= s->b_count;
2222 d->skip_count= s->skip_count;
2223 d->misc_bits= s->misc_bits;
2224
2225 d->mb_intra= s->mb_intra;
2226 d->mb_skipped= s->mb_skipped;
2227 d->mv_type= s->mv_type;
2228 d->mv_dir= s->mv_dir;
2229 d->pb= s->pb;
2230 if(s->data_partitioning){
2231 d->pb2= s->pb2;
2232 d->tex_pb= s->tex_pb;
2233 }
2234 d->block= s->block;
2235 for(i=0; i<8; i++)
2236 d->block_last_index[i]= s->block_last_index[i];
2237 d->interlaced_dct= s->interlaced_dct;
2238 d->qscale= s->qscale;
2239
2240 d->esc3_level_length= s->esc3_level_length;
2241 }
2242
2243 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2244 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2245 int *dmin, int *next_block, int motion_x, int motion_y)
2246 {
2247 int score;
2248 uint8_t *dest_backup[3];
2249
2250 copy_context_before_encode(s, backup, type);
2251
2252 s->block= s->blocks[*next_block];
2253 s->pb= pb[*next_block];
2254 if(s->data_partitioning){
2255 s->pb2 = pb2 [*next_block];
2256 s->tex_pb= tex_pb[*next_block];
2257 }
2258
2259 if(*next_block){
2260 memcpy(dest_backup, s->dest, sizeof(s->dest));
2261 s->dest[0] = s->rd_scratchpad;
2262 s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2263 s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2264 assert(s->linesize >= 32); //FIXME
2265 }
2266
2267 encode_mb(s, motion_x, motion_y);
2268
2269 score= put_bits_count(&s->pb);
2270 if(s->data_partitioning){
2271 score+= put_bits_count(&s->pb2);
2272 score+= put_bits_count(&s->tex_pb);
2273 }
2274
2275 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2276 ff_MPV_decode_mb(s, s->block);
2277
2278 score *= s->lambda2;
2279 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2280 }
2281
2282 if(*next_block){
2283 memcpy(s->dest, dest_backup, sizeof(s->dest));
2284 }
2285
2286 if(score<*dmin){
2287 *dmin= score;
2288 *next_block^=1;
2289
2290 copy_context_after_encode(best, s, type);
2291 }
2292 }
2293
2294 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2295 uint32_t *sq = ff_square_tab + 256;
2296 int acc=0;
2297 int x,y;
2298
2299 if(w==16 && h==16)
2300 return s->dsp.sse[0](NULL, src1, src2, stride, 16);
2301 else if(w==8 && h==8)
2302 return s->dsp.sse[1](NULL, src1, src2, stride, 8);
2303
2304 for(y=0; y<h; y++){
2305 for(x=0; x<w; x++){
2306 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2307 }
2308 }
2309
2310 assert(acc>=0);
2311
2312 return acc;
2313 }
2314
2315 static int sse_mb(MpegEncContext *s){
2316 int w= 16;
2317 int h= 16;
2318
2319 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2320 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2321
2322 if(w==16 && h==16)
2323 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2324 return s->dsp.nsse[0](s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2325 +s->dsp.nsse[1](s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2326 +s->dsp.nsse[1](s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2327 }else{
2328 return s->dsp.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2329 +s->dsp.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2330 +s->dsp.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2331 }
2332 else
2333 return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2334 +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2335 +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2336 }
2337
2338 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2339 MpegEncContext *s= *(void**)arg;
2340
2341
2342 s->me.pre_pass=1;
2343 s->me.dia_size= s->avctx->pre_dia_size;
2344 s->first_slice_line=1;
2345 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2346 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2347 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2348 }
2349 s->first_slice_line=0;
2350 }
2351
2352 s->me.pre_pass=0;
2353
2354 return 0;
2355 }
2356
2357 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2358 MpegEncContext *s= *(void**)arg;
2359
2360 s->me.dia_size= s->avctx->dia_size;
2361 s->first_slice_line=1;
2362 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2363 s->mb_x=0; //for block init below
2364 ff_init_block_index(s);
2365 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2366 s->block_index[0]+=2;
2367 s->block_index[1]+=2;
2368 s->block_index[2]+=2;
2369 s->block_index[3]+=2;
2370
2371 /* compute motion vector & mb_type and store in context */
2372 if(s->pict_type==AV_PICTURE_TYPE_B)
2373 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2374 else
2375 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2376 }
2377 s->first_slice_line=0;
2378 }
2379 return 0;
2380 }
2381
2382 static int mb_var_thread(AVCodecContext *c, void *arg){
2383 MpegEncContext *s= *(void**)arg;
2384 int mb_x, mb_y;
2385
2386 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2387 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2388 int xx = mb_x * 16;
2389 int yy = mb_y * 16;
2390 uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2391 int varc;
2392 int sum = s->dsp.pix_sum(pix, s->linesize);
2393
2394 varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
2395
2396 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2397 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2398 s->me.mb_var_sum_temp += varc;
2399 }
2400 }
2401 return 0;
2402 }
2403
2404 static void write_slice_end(MpegEncContext *s){
2405 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2406 if(s->partitioned_frame){
2407 ff_mpeg4_merge_partitions(s);
2408 }
2409
2410 ff_mpeg4_stuffing(&s->pb);
2411 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2412 ff_mjpeg_encode_stuffing(&s->pb);
2413 }
2414
2415 avpriv_align_put_bits(&s->pb);
2416 flush_put_bits(&s->pb);
2417
2418 if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2419 s->misc_bits+= get_bits_diff(s);
2420 }
2421
2422 static void write_mb_info(MpegEncContext *s)
2423 {
2424 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2425 int offset = put_bits_count(&s->pb);
2426 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2427 int gobn = s->mb_y / s->gob_index;
2428 int pred_x, pred_y;
2429 if (CONFIG_H263_ENCODER)
2430 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2431 bytestream_put_le32(&ptr, offset);
2432 bytestream_put_byte(&ptr, s->qscale);
2433 bytestream_put_byte(&ptr, gobn);
2434 bytestream_put_le16(&ptr, mba);
2435 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2436 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2437 /* 4MV not implemented */
2438 bytestream_put_byte(&ptr, 0); /* hmv2 */
2439 bytestream_put_byte(&ptr, 0); /* vmv2 */
2440 }
2441
2442 static void update_mb_info(MpegEncContext *s, int startcode)
2443 {
2444 if (!s->mb_info)
2445 return;
2446 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2447 s->mb_info_size += 12;
2448 s->prev_mb_info = s->last_mb_info;
2449 }
2450 if (startcode) {
2451 s->prev_mb_info = put_bits_count(&s->pb)/8;
2452 /* This might have incremented mb_info_size above, and we return without
2453 * actually writing any info into that slot yet. But in that case,
2454 * this will be called again at the start of the after writing the
2455 * start code, actually writing the mb info. */
2456 return;
2457 }
2458
2459 s->last_mb_info = put_bits_count(&s->pb)/8;
2460 if (!s->mb_info_size)
2461 s->mb_info_size += 12;
2462 write_mb_info(s);
2463 }
2464
2465 static int encode_thread(AVCodecContext *c, void *arg){
2466 MpegEncContext *s= *(void**)arg;
2467 int mb_x, mb_y, pdif = 0;
2468 int chr_h= 16>>s->chroma_y_shift;
2469 int i, j;
2470 MpegEncContext best_s, backup_s;
2471 uint8_t bit_buf[2][MAX_MB_BYTES];
2472 uint8_t bit_buf2[2][MAX_MB_BYTES];
2473 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2474 PutBitContext pb[2], pb2[2], tex_pb[2];
2475
2476 for(i=0; i<2; i++){
2477 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2478 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2479 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2480 }
2481
2482 s->last_bits= put_bits_count(&s->pb);
2483 s->mv_bits=0;
2484 s->misc_bits=0;
2485 s->i_tex_bits=0;
2486 s->p_tex_bits=0;
2487 s->i_count=0;
2488 s->f_count=0;
2489 s->b_count=0;
2490 s->skip_count=0;
2491
2492 for(i=0; i<3; i++){
2493 /* init last dc values */
2494 /* note: quant matrix value (8) is implied here */
2495 s->last_dc[i] = 128 << s->intra_dc_precision;
2496
2497 s->current_picture.f->error[i] = 0;
2498 }
2499 s->mb_skip_run = 0;
2500 memset(s->last_mv, 0, sizeof(s->last_mv));
2501
2502 s->last_mv_dir = 0;
2503
2504 switch(s->codec_id){
2505 case AV_CODEC_ID_H263:
2506 case AV_CODEC_ID_H263P:
2507 case AV_CODEC_ID_FLV1:
2508 if (CONFIG_H263_ENCODER)
2509 s->gob_index = ff_h263_get_gob_height(s);
2510 break;
2511 case AV_CODEC_ID_MPEG4:
2512 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2513 ff_mpeg4_init_partitions(s);
2514 break;
2515 }
2516
2517 s->resync_mb_x=0;
2518 s->resync_mb_y=0;
2519 s->first_slice_line = 1;
2520 s->ptr_lastgob = s->pb.buf;
2521 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2522 s->mb_x=0;
2523 s->mb_y= mb_y;
2524
2525 ff_set_qscale(s, s->qscale);
2526 ff_init_block_index(s);
2527
2528 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2529 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2530 int mb_type= s->mb_type[xy];
2531 // int d;
2532 int dmin= INT_MAX;
2533 int dir;
2534
2535 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2536 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2537 return -1;
2538 }
2539 if(s->data_partitioning){
2540 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2541 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2542 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2543 return -1;
2544 }
2545 }
2546
2547 s->mb_x = mb_x;
2548 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2549 ff_update_block_index(s);
2550
2551 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2552 ff_h261_reorder_mb_index(s);
2553 xy= s->mb_y*s->mb_stride + s->mb_x;
2554 mb_type= s->mb_type[xy];
2555 }
2556
2557 /* write gob / video packet header */
2558 if(s->rtp_mode){
2559 int current_packet_size, is_gob_start;
2560
2561 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2562
2563 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2564
2565 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2566
2567 switch(s->codec_id){
2568 case AV_CODEC_ID_H263:
2569 case AV_CODEC_ID_H263P:
2570 if(!s->h263_slice_structured)
2571 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2572 break;
2573 case AV_CODEC_ID_MPEG2VIDEO:
2574 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2575 case AV_CODEC_ID_MPEG1VIDEO:
2576 if(s->mb_skip_run) is_gob_start=0;
2577 break;
2578 }
2579
2580 if(is_gob_start){
2581 if(s->start_mb_y != mb_y || mb_x!=0){
2582 write_slice_end(s);
2583
2584 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2585 ff_mpeg4_init_partitions(s);
2586 }
2587 }
2588
2589 assert((put_bits_count(&s->pb)&7) == 0);
2590 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2591
2592 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
2593 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2594 int d = 100 / s->error_rate;
2595 if(r % d == 0){
2596 current_packet_size=0;
2597 s->pb.buf_ptr= s->ptr_lastgob;
2598 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2599 }
2600 }
2601
2602 if (s->avctx->rtp_callback){
2603 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2604 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2605 }
2606 update_mb_info(s, 1);
2607
2608 switch(s->codec_id){
2609 case AV_CODEC_ID_MPEG4:
2610 if (CONFIG_MPEG4_ENCODER) {
2611 ff_mpeg4_encode_video_packet_header(s);
2612 ff_mpeg4_clean_buffers(s);
2613 }
2614 break;
2615 case AV_CODEC_ID_MPEG1VIDEO:
2616 case AV_CODEC_ID_MPEG2VIDEO:
2617 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2618 ff_mpeg1_encode_slice_header(s);
2619 ff_mpeg1_clean_buffers(s);
2620 }
2621 break;
2622 case AV_CODEC_ID_H263:
2623 case AV_CODEC_ID_H263P:
2624 if (CONFIG_H263_ENCODER)
2625 ff_h263_encode_gob_header(s, mb_y);
2626 break;
2627 }
2628
2629 if(s->flags&CODEC_FLAG_PASS1){
2630 int bits= put_bits_count(&s->pb);
2631 s->misc_bits+= bits - s->last_bits;
2632 s->last_bits= bits;
2633 }
2634
2635 s->ptr_lastgob += current_packet_size;
2636 s->first_slice_line=1;
2637 s->resync_mb_x=mb_x;
2638 s->resync_mb_y=mb_y;
2639 }
2640 }
2641
2642 if( (s->resync_mb_x == s->mb_x)
2643 && s->resync_mb_y+1 == s->mb_y){
2644 s->first_slice_line=0;
2645 }
2646
2647 s->mb_skipped=0;
2648 s->dquant=0; //only for QP_RD
2649
2650 update_mb_info(s, 0);
2651
2652 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2653 int next_block=0;
2654 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2655
2656 copy_context_before_encode(&backup_s, s, -1);
2657 backup_s.pb= s->pb;
2658 best_s.data_partitioning= s->data_partitioning;
2659 best_s.partitioned_frame= s->partitioned_frame;
2660 if(s->data_partitioning){
2661 backup_s.pb2= s->pb2;
2662 backup_s.tex_pb= s->tex_pb;
2663 }
2664
2665 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2666 s->mv_dir = MV_DIR_FORWARD;
2667 s->mv_type = MV_TYPE_16X16;
2668 s->mb_intra= 0;
2669 s->mv[0][0][0] = s->p_mv_table[xy][0];
2670 s->mv[0][0][1] = s->p_mv_table[xy][1];
2671 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2672 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2673 }
2674 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2675 s->mv_dir = MV_DIR_FORWARD;
2676 s->mv_type = MV_TYPE_FIELD;
2677 s->mb_intra= 0;
2678 for(i=0; i<2; i++){
2679 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2680 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2681 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2682 }
2683 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2684 &dmin, &next_block, 0, 0);
2685 }
2686 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2687 s->mv_dir = MV_DIR_FORWARD;
2688 s->mv_type = MV_TYPE_16X16;
2689 s->mb_intra= 0;
2690 s->mv[0][0][0] = 0;
2691 s->mv[0][0][1] = 0;
2692 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2693 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2694 }
2695 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2696 s->mv_dir = MV_DIR_FORWARD;
2697 s->mv_type = MV_TYPE_8X8;
2698 s->mb_intra= 0;
2699 for(i=0; i<4; i++){
2700 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2701 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2702 }
2703 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2704 &dmin, &next_block, 0, 0);
2705 }
2706 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2707 s->mv_dir = MV_DIR_FORWARD;
2708 s->mv_type = MV_TYPE_16X16;
2709 s->mb_intra= 0;
2710 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2711 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2712 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2713 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2714 }
2715 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2716 s->mv_dir = MV_DIR_BACKWARD;
2717 s->mv_type = MV_TYPE_16X16;
2718 s->mb_intra= 0;
2719 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2720 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2721 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2722 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2723 }
2724 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2725 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2726 s->mv_type = MV_TYPE_16X16;
2727 s->mb_intra= 0;
2728 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2729 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2730 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2731 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2732 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2733 &dmin, &next_block, 0, 0);
2734 }
2735 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2736 s->mv_dir = MV_DIR_FORWARD;
2737 s->mv_type = MV_TYPE_FIELD;
2738 s->mb_intra= 0;
2739 for(i=0; i<2; i++){
2740 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2741 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2742 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2743 }
2744 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2745 &dmin, &next_block, 0, 0);
2746 }
2747 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2748 s->mv_dir = MV_DIR_BACKWARD;
2749 s->mv_type = MV_TYPE_FIELD;
2750 s->mb_intra= 0;
2751 for(i=0; i<2; i++){
2752 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2753 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2754 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2755 }
2756 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
2757 &dmin, &next_block, 0, 0);
2758 }
2759 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
2760 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2761 s->mv_type = MV_TYPE_FIELD;
2762 s->mb_intra= 0;
2763 for(dir=0; dir<2; dir++){
2764 for(i=0; i<2; i++){
2765 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2766 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2767 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2768 }
2769 }
2770 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
2771 &dmin, &next_block, 0, 0);
2772 }
2773 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
2774 s->mv_dir = 0;
2775 s->mv_type = MV_TYPE_16X16;
2776 s->mb_intra= 1;
2777 s->mv[0][0][0] = 0;
2778 s->mv[0][0][1] = 0;
2779 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
2780 &dmin, &next_block, 0, 0);
2781 if(s->h263_pred || s->h263_aic){
2782 if(best_s.mb_intra)
2783 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
2784 else
2785 ff_clean_intra_table_entries(s); //old mode?
2786 }
2787 }
2788
2789 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
2790 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
2791 const int last_qp= backup_s.qscale;
2792 int qpi, qp, dc[6];
2793 int16_t ac[6][16];
2794 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
2795 static const int dquant_tab[4]={-1,1,-2,2};
2796
2797 assert(backup_s.dquant == 0);
2798
2799 //FIXME intra
2800 s->mv_dir= best_s.mv_dir;
2801 s->mv_type = MV_TYPE_16X16;
2802 s->mb_intra= best_s.mb_intra;
2803 s->mv[0][0][0] = best_s.mv[0][0][0];
2804 s->mv[0][0][1] = best_s.mv[0][0][1];
2805 s->mv[1][0][0] = best_s.mv[1][0][0];
2806 s->mv[1][0][1] = best_s.mv[1][0][1];
2807
2808 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
2809 for(; qpi<4; qpi++){
2810 int dquant= dquant_tab[qpi];
2811 qp= last_qp + dquant;
2812 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
2813 continue;
2814 backup_s.dquant= dquant;
2815 if(s->mb_intra && s->dc_val[0]){
2816 for(i=0; i<6; i++){
2817 dc[i]= s->dc_val[0][ s->block_index[i] ];
2818 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
2819 }
2820 }
2821
2822 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2823 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
2824 if(best_s.qscale != qp){
2825 if(s->mb_intra && s->dc_val[0]){
2826 for(i=0; i<6; i++){
2827 s->dc_val[0][ s->block_index[i] ]= dc[i];
2828 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
2829 }
2830 }
2831 }
2832 }
2833 }
2834 }
2835 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
2836 int mx= s->b_direct_mv_table[xy][0];
2837 int my= s->b_direct_mv_table[xy][1];
2838
2839 backup_s.dquant = 0;
2840 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2841 s->mb_intra= 0;
2842 ff_mpeg4_set_direct_mv(s, mx, my);
2843 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2844 &dmin, &next_block, mx, my);