2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
32 #include "libavutil/internal.h"
33 #include "libavutil/intmath.h"
34 #include "libavutil/mathematics.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/timer.h"
42 #include "mpegvideo.h"
43 #include "mpegvideodata.h"
47 #include "mjpegenc_common.h"
49 #include "mpegutils.h"
52 #include "pixblockdsp.h"
56 #include "aandcttab.h"
58 #include "mpeg4video.h"
60 #include "bytestream.h"
65 #define QUANT_BIAS_SHIFT 8
67 #define QMAT_SHIFT_MMX 16
70 static int encode_picture(MpegEncContext
*s
, int picture_number
);
71 static int dct_quantize_refine(MpegEncContext
*s
, int16_t *block
, int16_t *weight
, int16_t *orig
, int n
, int qscale
);
72 static int sse_mb(MpegEncContext
*s
);
73 static void denoise_dct_c(MpegEncContext
*s
, int16_t *block
);
74 static int dct_quantize_trellis_c(MpegEncContext
*s
, int16_t *block
, int n
, int qscale
, int *overflow
);
76 static uint8_t default_mv_penalty
[MAX_FCODE
+ 1][MAX_MV
* 2 + 1];
77 static uint8_t default_fcode_tab
[MAX_MV
* 2 + 1];
79 const AVOption ff_mpv_generic_options
[] = {
84 void ff_convert_matrix(MpegEncContext
*s
, int (*qmat
)[64],
85 uint16_t (*qmat16
)[2][64],
86 const uint16_t *quant_matrix
,
87 int bias
, int qmin
, int qmax
, int intra
)
89 FDCTDSPContext
*fdsp
= &s
->fdsp
;
93 for (qscale
= qmin
; qscale
<= qmax
; qscale
++) {
95 if (fdsp
->fdct
== ff_jpeg_fdct_islow_8
||
97 fdsp
->fdct
== ff_faandct
||
98 #endif /* CONFIG_FAANDCT */
99 fdsp
->fdct
== ff_jpeg_fdct_islow_10
) {
100 for (i
= 0; i
< 64; i
++) {
101 const int j
= s
->idsp
.idct_permutation
[i
];
102 int64_t den
= (int64_t) qscale
* quant_matrix
[j
];
103 /* 16 <= qscale * quant_matrix[i] <= 7905
104 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
105 * 19952 <= x <= 249205026
106 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
107 * 3444240 >= (1 << 36) / (x) >= 275 */
109 qmat
[qscale
][i
] = (int)((UINT64_C(1) << QMAT_SHIFT
) / den
);
111 } else if (fdsp
->fdct
== ff_fdct_ifast
) {
112 for (i
= 0; i
< 64; i
++) {
113 const int j
= s
->idsp
.idct_permutation
[i
];
114 int64_t den
= ff_aanscales
[i
] * (int64_t) qscale
* quant_matrix
[j
];
115 /* 16 <= qscale * quant_matrix[i] <= 7905
116 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
117 * 19952 <= x <= 249205026
118 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
119 * 3444240 >= (1 << 36) / (x) >= 275 */
121 qmat
[qscale
][i
] = (int)((UINT64_C(1) << (QMAT_SHIFT
+ 14)) / den
);
124 for (i
= 0; i
< 64; i
++) {
125 const int j
= s
->idsp
.idct_permutation
[i
];
126 int64_t den
= (int64_t) qscale
* quant_matrix
[j
];
127 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
128 * Assume x = qscale * quant_matrix[i]
130 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
131 * so 32768 >= (1 << 19) / (x) >= 67 */
132 qmat
[qscale
][i
] = (int)((UINT64_C(1) << QMAT_SHIFT
) / den
);
133 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
134 // (qscale * quant_matrix[i]);
135 qmat16
[qscale
][0][i
] = (1 << QMAT_SHIFT_MMX
) / den
;
137 if (qmat16
[qscale
][0][i
] == 0 ||
138 qmat16
[qscale
][0][i
] == 128 * 256)
139 qmat16
[qscale
][0][i
] = 128 * 256 - 1;
140 qmat16
[qscale
][1][i
] =
141 ROUNDED_DIV(bias
<< (16 - QUANT_BIAS_SHIFT
),
142 qmat16
[qscale
][0][i
]);
146 for (i
= intra
; i
< 64; i
++) {
148 if (fdsp
->fdct
== ff_fdct_ifast
) {
149 max
= (8191LL * ff_aanscales
[i
]) >> 14;
151 while (((max
* qmat
[qscale
][i
]) >> shift
) > INT_MAX
) {
157 av_log(NULL
, AV_LOG_INFO
,
158 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
163 static inline void update_qscale(MpegEncContext
*s
)
165 s
->qscale
= (s
->lambda
* 139 + FF_LAMBDA_SCALE
* 64) >>
166 (FF_LAMBDA_SHIFT
+ 7);
167 s
->qscale
= av_clip(s
->qscale
, s
->avctx
->qmin
, s
->avctx
->qmax
);
169 s
->lambda2
= (s
->lambda
* s
->lambda
+ FF_LAMBDA_SCALE
/ 2) >>
173 void ff_write_quant_matrix(PutBitContext
*pb
, uint16_t *matrix
)
179 for (i
= 0; i
< 64; i
++) {
180 put_bits(pb
, 8, matrix
[ff_zigzag_direct
[i
]]);
187 * init s->current_picture.qscale_table from s->lambda_table
189 void ff_init_qscale_tab(MpegEncContext
*s
)
191 int8_t * const qscale_table
= s
->current_picture
.qscale_table
;
194 for (i
= 0; i
< s
->mb_num
; i
++) {
195 unsigned int lam
= s
->lambda_table
[s
->mb_index2xy
[i
]];
196 int qp
= (lam
* 139 + FF_LAMBDA_SCALE
* 64) >> (FF_LAMBDA_SHIFT
+ 7);
197 qscale_table
[s
->mb_index2xy
[i
]] = av_clip(qp
, s
->avctx
->qmin
,
202 static void update_duplicate_context_after_me(MpegEncContext
*dst
,
205 #define COPY(a) dst->a= src->a
207 COPY(current_picture
);
213 COPY(picture_in_gop_number
);
214 COPY(gop_picture_number
);
215 COPY(frame_pred_frame_dct
); // FIXME don't set in encode_header
216 COPY(progressive_frame
); // FIXME don't set in encode_header
217 COPY(partitioned_frame
); // FIXME don't set in encode_header
222 * Set the given MpegEncContext to defaults for encoding.
223 * the changed fields will not depend upon the prior state of the MpegEncContext.
225 static void mpv_encode_defaults(MpegEncContext
*s
)
228 ff_mpv_common_defaults(s
);
230 for (i
= -16; i
< 16; i
++) {
231 default_fcode_tab
[i
+ MAX_MV
] = 1;
233 s
->me
.mv_penalty
= default_mv_penalty
;
234 s
->fcode_tab
= default_fcode_tab
;
236 s
->input_picture_number
= 0;
237 s
->picture_in_gop_number
= 0;
240 /* init video encoder */
241 av_cold
int ff_mpv_encode_init(AVCodecContext
*avctx
)
243 MpegEncContext
*s
= avctx
->priv_data
;
244 int i
, ret
, format_supported
;
246 mpv_encode_defaults(s
);
248 switch (avctx
->codec_id
) {
249 case AV_CODEC_ID_MPEG2VIDEO
:
250 if (avctx
->pix_fmt
!= AV_PIX_FMT_YUV420P
&&
251 avctx
->pix_fmt
!= AV_PIX_FMT_YUV422P
) {
252 av_log(avctx
, AV_LOG_ERROR
,
253 "only YUV420 and YUV422 are supported\n");
257 case AV_CODEC_ID_MJPEG
:
258 format_supported
= 0;
259 /* JPEG color space */
260 if (avctx
->pix_fmt
== AV_PIX_FMT_YUVJ420P
||
261 avctx
->pix_fmt
== AV_PIX_FMT_YUVJ422P
||
262 (avctx
->color_range
== AVCOL_RANGE_JPEG
&&
263 (avctx
->pix_fmt
== AV_PIX_FMT_YUV420P
||
264 avctx
->pix_fmt
== AV_PIX_FMT_YUV422P
)))
265 format_supported
= 1;
266 /* MPEG color space */
267 else if (avctx
->strict_std_compliance
<= FF_COMPLIANCE_UNOFFICIAL
&&
268 (avctx
->pix_fmt
== AV_PIX_FMT_YUV420P
||
269 avctx
->pix_fmt
== AV_PIX_FMT_YUV422P
))
270 format_supported
= 1;
272 if (!format_supported
) {
273 av_log(avctx
, AV_LOG_ERROR
, "colorspace not supported in jpeg\n");
278 if (avctx
->pix_fmt
!= AV_PIX_FMT_YUV420P
) {
279 av_log(avctx
, AV_LOG_ERROR
, "only YUV420 is supported\n");
284 switch (avctx
->pix_fmt
) {
285 case AV_PIX_FMT_YUVJ422P
:
286 case AV_PIX_FMT_YUV422P
:
287 s
->chroma_format
= CHROMA_422
;
289 case AV_PIX_FMT_YUVJ420P
:
290 case AV_PIX_FMT_YUV420P
:
292 s
->chroma_format
= CHROMA_420
;
296 s
->bit_rate
= avctx
->bit_rate
;
297 s
->width
= avctx
->width
;
298 s
->height
= avctx
->height
;
299 if (avctx
->gop_size
> 600 &&
300 avctx
->strict_std_compliance
> FF_COMPLIANCE_EXPERIMENTAL
) {
301 av_log(avctx
, AV_LOG_ERROR
,
302 "Warning keyframe interval too large! reducing it ...\n");
303 avctx
->gop_size
= 600;
305 s
->gop_size
= avctx
->gop_size
;
307 if (avctx
->max_b_frames
> MAX_B_FRAMES
) {
308 av_log(avctx
, AV_LOG_ERROR
, "Too many B-frames requested, maximum "
309 "is %d.\n", MAX_B_FRAMES
);
311 s
->max_b_frames
= avctx
->max_b_frames
;
312 s
->codec_id
= avctx
->codec
->id
;
313 s
->strict_std_compliance
= avctx
->strict_std_compliance
;
314 s
->quarter_sample
= (avctx
->flags
& AV_CODEC_FLAG_QPEL
) != 0;
315 s
->mpeg_quant
= avctx
->mpeg_quant
;
316 s
->rtp_mode
= !!avctx
->rtp_payload_size
;
317 s
->intra_dc_precision
= avctx
->intra_dc_precision
;
318 s
->user_specified_pts
= AV_NOPTS_VALUE
;
320 if (s
->gop_size
<= 1) {
327 #if FF_API_MOTION_EST
328 FF_DISABLE_DEPRECATION_WARNINGS
329 s
->me_method
= avctx
->me_method
;
330 FF_ENABLE_DEPRECATION_WARNINGS
334 s
->fixed_qscale
= !!(avctx
->flags
& AV_CODEC_FLAG_QSCALE
);
337 FF_DISABLE_DEPRECATION_WARNINGS
338 if (avctx
->border_masking
!= 0.0)
339 s
->border_masking
= avctx
->border_masking
;
340 FF_ENABLE_DEPRECATION_WARNINGS
343 s
->adaptive_quant
= (s
->avctx
->lumi_masking
||
344 s
->avctx
->dark_masking
||
345 s
->avctx
->temporal_cplx_masking
||
346 s
->avctx
->spatial_cplx_masking
||
347 s
->avctx
->p_masking
||
349 (s
->mpv_flags
& FF_MPV_FLAG_QP_RD
)) &&
352 s
->loop_filter
= !!(s
->avctx
->flags
& AV_CODEC_FLAG_LOOP_FILTER
);
354 if (avctx
->rc_max_rate
&& !avctx
->rc_buffer_size
) {
355 av_log(avctx
, AV_LOG_ERROR
,
356 "a vbv buffer size is needed, "
357 "for encoding with a maximum bitrate\n");
361 if (avctx
->rc_min_rate
&& avctx
->rc_max_rate
!= avctx
->rc_min_rate
) {
362 av_log(avctx
, AV_LOG_INFO
,
363 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
366 if (avctx
->rc_min_rate
&& avctx
->rc_min_rate
> avctx
->bit_rate
) {
367 av_log(avctx
, AV_LOG_ERROR
, "bitrate below min bitrate\n");
371 if (avctx
->rc_max_rate
&& avctx
->rc_max_rate
< avctx
->bit_rate
) {
372 av_log(avctx
, AV_LOG_INFO
, "bitrate above max bitrate\n");
376 if (avctx
->rc_max_rate
&&
377 avctx
->rc_max_rate
== avctx
->bit_rate
&&
378 avctx
->rc_max_rate
!= avctx
->rc_min_rate
) {
379 av_log(avctx
, AV_LOG_INFO
,
380 "impossible bitrate constraints, this will fail\n");
383 if (avctx
->rc_buffer_size
&&
384 avctx
->bit_rate
* (int64_t)avctx
->time_base
.num
>
385 avctx
->rc_buffer_size
* (int64_t)avctx
->time_base
.den
) {
386 av_log(avctx
, AV_LOG_ERROR
, "VBV buffer too small for bitrate\n");
390 if (!s
->fixed_qscale
&&
391 avctx
->bit_rate
* av_q2d(avctx
->time_base
) >
392 avctx
->bit_rate_tolerance
) {
393 av_log(avctx
, AV_LOG_ERROR
,
394 "bitrate tolerance too small for bitrate\n");
398 if (s
->avctx
->rc_max_rate
&&
399 s
->avctx
->rc_min_rate
== s
->avctx
->rc_max_rate
&&
400 (s
->codec_id
== AV_CODEC_ID_MPEG1VIDEO
||
401 s
->codec_id
== AV_CODEC_ID_MPEG2VIDEO
) &&
402 90000LL * (avctx
->rc_buffer_size
- 1) >
403 s
->avctx
->rc_max_rate
* 0xFFFFLL
) {
404 av_log(avctx
, AV_LOG_INFO
,
405 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
406 "specified vbv buffer is too large for the given bitrate!\n");
409 if ((s
->avctx
->flags
& AV_CODEC_FLAG_4MV
) && s
->codec_id
!= AV_CODEC_ID_MPEG4
&&
410 s
->codec_id
!= AV_CODEC_ID_H263
&& s
->codec_id
!= AV_CODEC_ID_H263P
&&
411 s
->codec_id
!= AV_CODEC_ID_FLV1
) {
412 av_log(avctx
, AV_LOG_ERROR
, "4MV not supported by codec\n");
416 if (s
->obmc
&& s
->avctx
->mb_decision
!= FF_MB_DECISION_SIMPLE
) {
417 av_log(avctx
, AV_LOG_ERROR
,
418 "OBMC is only supported with simple mb decision\n");
422 if (s
->quarter_sample
&& s
->codec_id
!= AV_CODEC_ID_MPEG4
) {
423 av_log(avctx
, AV_LOG_ERROR
, "qpel not supported by codec\n");
427 if (s
->max_b_frames
&&
428 s
->codec_id
!= AV_CODEC_ID_MPEG4
&&
429 s
->codec_id
!= AV_CODEC_ID_MPEG1VIDEO
&&
430 s
->codec_id
!= AV_CODEC_ID_MPEG2VIDEO
) {
431 av_log(avctx
, AV_LOG_ERROR
, "b frames not supported by codec\n");
435 if ((s
->codec_id
== AV_CODEC_ID_MPEG4
||
436 s
->codec_id
== AV_CODEC_ID_H263
||
437 s
->codec_id
== AV_CODEC_ID_H263P
) &&
438 (avctx
->sample_aspect_ratio
.num
> 255 ||
439 avctx
->sample_aspect_ratio
.den
> 255)) {
440 av_log(avctx
, AV_LOG_ERROR
,
441 "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
442 avctx
->sample_aspect_ratio
.num
, avctx
->sample_aspect_ratio
.den
);
446 if ((s
->avctx
->flags
& (AV_CODEC_FLAG_INTERLACED_DCT
| AV_CODEC_FLAG_INTERLACED_ME
)) &&
447 s
->codec_id
!= AV_CODEC_ID_MPEG4
&& s
->codec_id
!= AV_CODEC_ID_MPEG2VIDEO
) {
448 av_log(avctx
, AV_LOG_ERROR
, "interlacing not supported by codec\n");
452 // FIXME mpeg2 uses that too
453 if (s
->mpeg_quant
&& s
->codec_id
!= AV_CODEC_ID_MPEG4
) {
454 av_log(avctx
, AV_LOG_ERROR
,
455 "mpeg2 style quantization not supported by codec\n");
459 if ((s
->mpv_flags
& FF_MPV_FLAG_CBP_RD
) && !avctx
->trellis
) {
460 av_log(avctx
, AV_LOG_ERROR
, "CBP RD needs trellis quant\n");
464 if ((s
->mpv_flags
& FF_MPV_FLAG_QP_RD
) &&
465 s
->avctx
->mb_decision
!= FF_MB_DECISION_RD
) {
466 av_log(avctx
, AV_LOG_ERROR
, "QP RD needs mbd=2\n");
470 if (s
->avctx
->scenechange_threshold
< 1000000000 &&
471 (s
->avctx
->flags
& AV_CODEC_FLAG_CLOSED_GOP
)) {
472 av_log(avctx
, AV_LOG_ERROR
,
473 "closed gop with scene change detection are not supported yet, "
474 "set threshold to 1000000000\n");
478 if (s
->avctx
->flags
& AV_CODEC_FLAG_LOW_DELAY
) {
479 if (s
->codec_id
!= AV_CODEC_ID_MPEG2VIDEO
) {
480 av_log(avctx
, AV_LOG_ERROR
,
481 "low delay forcing is only available for mpeg2\n");
484 if (s
->max_b_frames
!= 0) {
485 av_log(avctx
, AV_LOG_ERROR
,
486 "b frames cannot be used with low delay\n");
491 if (s
->q_scale_type
== 1) {
492 if (avctx
->qmax
> 12) {
493 av_log(avctx
, AV_LOG_ERROR
,
494 "non linear quant only supports qmax <= 12 currently\n");
499 if (avctx
->slices
> 1 &&
500 (avctx
->codec_id
== AV_CODEC_ID_FLV1
|| avctx
->codec_id
== AV_CODEC_ID_H261
)) {
501 av_log(avctx
, AV_LOG_ERROR
, "Multiple slices are not supported by this codec\n");
502 return AVERROR(EINVAL
);
505 if (s
->avctx
->thread_count
> 1 &&
506 s
->codec_id
!= AV_CODEC_ID_MPEG4
&&
507 s
->codec_id
!= AV_CODEC_ID_MPEG1VIDEO
&&
508 s
->codec_id
!= AV_CODEC_ID_MPEG2VIDEO
&&
509 (s
->codec_id
!= AV_CODEC_ID_H263P
)) {
510 av_log(avctx
, AV_LOG_ERROR
,
511 "multi threaded encoding not supported by codec\n");
515 if (s
->avctx
->thread_count
< 1) {
516 av_log(avctx
, AV_LOG_ERROR
,
517 "automatic thread number detection not supported by codec,"
522 if (s
->avctx
->thread_count
> 1)
525 if (!avctx
->time_base
.den
|| !avctx
->time_base
.num
) {
526 av_log(avctx
, AV_LOG_ERROR
, "framerate not set\n");
530 if (avctx
->b_frame_strategy
&& (avctx
->flags
& AV_CODEC_FLAG_PASS2
)) {
531 av_log(avctx
, AV_LOG_INFO
,
532 "notice: b_frame_strategy only affects the first pass\n");
533 avctx
->b_frame_strategy
= 0;
536 i
= av_gcd(avctx
->time_base
.den
, avctx
->time_base
.num
);
538 av_log(avctx
, AV_LOG_INFO
, "removing common factors from framerate\n");
539 avctx
->time_base
.den
/= i
;
540 avctx
->time_base
.num
/= i
;
544 if (s
->mpeg_quant
|| s
->codec_id
== AV_CODEC_ID_MPEG1VIDEO
||
545 s
->codec_id
== AV_CODEC_ID_MPEG2VIDEO
|| s
->codec_id
== AV_CODEC_ID_MJPEG
) {
546 // (a + x * 3 / 8) / x
547 s
->intra_quant_bias
= 3 << (QUANT_BIAS_SHIFT
- 3);
548 s
->inter_quant_bias
= 0;
550 s
->intra_quant_bias
= 0;
552 s
->inter_quant_bias
= -(1 << (QUANT_BIAS_SHIFT
- 2));
555 #if FF_API_QUANT_BIAS
556 FF_DISABLE_DEPRECATION_WARNINGS
557 if (avctx
->intra_quant_bias
!= FF_DEFAULT_QUANT_BIAS
)
558 s
->intra_quant_bias
= avctx
->intra_quant_bias
;
559 if (avctx
->inter_quant_bias
!= FF_DEFAULT_QUANT_BIAS
)
560 s
->inter_quant_bias
= avctx
->inter_quant_bias
;
561 FF_ENABLE_DEPRECATION_WARNINGS
564 if (avctx
->codec_id
== AV_CODEC_ID_MPEG4
&&
565 s
->avctx
->time_base
.den
> (1 << 16) - 1) {
566 av_log(avctx
, AV_LOG_ERROR
,
567 "timebase %d/%d not supported by MPEG 4 standard, "
568 "the maximum admitted value for the timebase denominator "
569 "is %d\n", s
->avctx
->time_base
.num
, s
->avctx
->time_base
.den
,
573 s
->time_increment_bits
= av_log2(s
->avctx
->time_base
.den
- 1) + 1;
575 switch (avctx
->codec
->id
) {
576 case AV_CODEC_ID_MPEG1VIDEO
:
577 s
->out_format
= FMT_MPEG1
;
578 s
->low_delay
= !!(s
->avctx
->flags
& AV_CODEC_FLAG_LOW_DELAY
);
579 avctx
->delay
= s
->low_delay ?
0 : (s
->max_b_frames
+ 1);
581 case AV_CODEC_ID_MPEG2VIDEO
:
582 s
->out_format
= FMT_MPEG1
;
583 s
->low_delay
= !!(s
->avctx
->flags
& AV_CODEC_FLAG_LOW_DELAY
);
584 avctx
->delay
= s
->low_delay ?
0 : (s
->max_b_frames
+ 1);
587 case AV_CODEC_ID_MJPEG
:
588 s
->out_format
= FMT_MJPEG
;
589 s
->intra_only
= 1; /* force intra only for jpeg */
590 if (!CONFIG_MJPEG_ENCODER
||
591 ff_mjpeg_encode_init(s
) < 0)
596 case AV_CODEC_ID_H261
:
597 if (!CONFIG_H261_ENCODER
)
599 if (ff_h261_get_picture_format(s
->width
, s
->height
) < 0) {
600 av_log(avctx
, AV_LOG_ERROR
,
601 "The specified picture size of %dx%d is not valid for the "
602 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
603 s
->width
, s
->height
);
606 s
->out_format
= FMT_H261
;
609 s
->rtp_mode
= 0; /* Sliced encoding not supported */
611 case AV_CODEC_ID_H263
:
612 if (!CONFIG_H263_ENCODER
)
614 if (ff_match_2uint16(ff_h263_format
, FF_ARRAY_ELEMS(ff_h263_format
),
615 s
->width
, s
->height
) == 8) {
616 av_log(avctx
, AV_LOG_INFO
,
617 "The specified picture size of %dx%d is not valid for "
618 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
619 "352x288, 704x576, and 1408x1152."
620 "Try H.263+.\n", s
->width
, s
->height
);
623 s
->out_format
= FMT_H263
;
627 case AV_CODEC_ID_H263P
:
628 s
->out_format
= FMT_H263
;
631 s
->h263_aic
= (avctx
->flags
& AV_CODEC_FLAG_AC_PRED
) ?
1 : 0;
632 s
->modified_quant
= s
->h263_aic
;
633 s
->loop_filter
= (avctx
->flags
& AV_CODEC_FLAG_LOOP_FILTER
) ?
1 : 0;
634 s
->unrestricted_mv
= s
->obmc
|| s
->loop_filter
|| s
->umvplus
;
637 /* These are just to be sure */
641 case AV_CODEC_ID_FLV1
:
642 s
->out_format
= FMT_H263
;
643 s
->h263_flv
= 2; /* format = 1; 11-bit codes */
644 s
->unrestricted_mv
= 1;
645 s
->rtp_mode
= 0; /* don't allow GOB */
649 case AV_CODEC_ID_RV10
:
650 s
->out_format
= FMT_H263
;
654 case AV_CODEC_ID_RV20
:
655 s
->out_format
= FMT_H263
;
658 s
->modified_quant
= 1;
662 s
->unrestricted_mv
= 0;
664 case AV_CODEC_ID_MPEG4
:
665 s
->out_format
= FMT_H263
;
667 s
->unrestricted_mv
= 1;
668 s
->low_delay
= s
->max_b_frames ?
0 : 1;
669 avctx
->delay
= s
->low_delay ?
0 : (s
->max_b_frames
+ 1);
671 case AV_CODEC_ID_MSMPEG4V2
:
672 s
->out_format
= FMT_H263
;
674 s
->unrestricted_mv
= 1;
675 s
->msmpeg4_version
= 2;
679 case AV_CODEC_ID_MSMPEG4V3
:
680 s
->out_format
= FMT_H263
;
682 s
->unrestricted_mv
= 1;
683 s
->msmpeg4_version
= 3;
684 s
->flipflop_rounding
= 1;
688 case AV_CODEC_ID_WMV1
:
689 s
->out_format
= FMT_H263
;
691 s
->unrestricted_mv
= 1;
692 s
->msmpeg4_version
= 4;
693 s
->flipflop_rounding
= 1;
697 case AV_CODEC_ID_WMV2
:
698 s
->out_format
= FMT_H263
;
700 s
->unrestricted_mv
= 1;
701 s
->msmpeg4_version
= 5;
702 s
->flipflop_rounding
= 1;
710 avctx
->has_b_frames
= !s
->low_delay
;
714 s
->progressive_frame
=
715 s
->progressive_sequence
= !(avctx
->flags
& (AV_CODEC_FLAG_INTERLACED_DCT
|
716 AV_CODEC_FLAG_INTERLACED_ME
) ||
721 if (ff_mpv_common_init(s
) < 0)
725 ff_mpv_encode_init_x86(s
);
727 ff_fdctdsp_init(&s
->fdsp
, avctx
);
728 ff_me_cmp_init(&s
->mecc
, avctx
);
729 ff_mpegvideoencdsp_init(&s
->mpvencdsp
, avctx
);
730 ff_pixblockdsp_init(&s
->pdsp
, avctx
);
731 ff_qpeldsp_init(&s
->qdsp
);
733 if (s
->msmpeg4_version
) {
734 FF_ALLOCZ_OR_GOTO(s
->avctx
, s
->ac_stats
,
735 2 * 2 * (MAX_LEVEL
+ 1) *
736 (MAX_RUN
+ 1) * 2 * sizeof(int), fail
);
738 FF_ALLOCZ_OR_GOTO(s
->avctx
, s
->avctx
->stats_out
, 256, fail
);
740 FF_ALLOCZ_OR_GOTO(s
->avctx
, s
->q_intra_matrix
, 64 * 32 * sizeof(int), fail
);
741 FF_ALLOCZ_OR_GOTO(s
->avctx
, s
->q_inter_matrix
, 64 * 32 * sizeof(int), fail
);
742 FF_ALLOCZ_OR_GOTO(s
->avctx
, s
->q_intra_matrix16
, 64 * 32 * 2 * sizeof(uint16_t), fail
);
743 FF_ALLOCZ_OR_GOTO(s
->avctx
, s
->q_inter_matrix16
, 64 * 32 * 2 * sizeof(uint16_t), fail
);
744 FF_ALLOCZ_OR_GOTO(s
->avctx
, s
->input_picture
,
745 MAX_PICTURE_COUNT
* sizeof(Picture
*), fail
);
746 FF_ALLOCZ_OR_GOTO(s
->avctx
, s
->reordered_input_picture
,
747 MAX_PICTURE_COUNT
* sizeof(Picture
*), fail
);
749 if (s
->avctx
->noise_reduction
) {
750 FF_ALLOCZ_OR_GOTO(s
->avctx
, s
->dct_offset
,
751 2 * 64 * sizeof(uint16_t), fail
);
754 if (CONFIG_H263_ENCODER
)
755 ff_h263dsp_init(&s
->h263dsp
);
756 if (!s
->dct_quantize
)
757 s
->dct_quantize
= ff_dct_quantize_c
;
759 s
->denoise_dct
= denoise_dct_c
;
760 s
->fast_dct_quantize
= s
->dct_quantize
;
762 s
->dct_quantize
= dct_quantize_trellis_c
;
764 if ((CONFIG_H263P_ENCODER
|| CONFIG_RV20_ENCODER
) && s
->modified_quant
)
765 s
->chroma_qscale_table
= ff_h263_chroma_qscale_table
;
767 s
->quant_precision
= 5;
769 ff_set_cmp(&s
->mecc
, s
->mecc
.ildct_cmp
, s
->avctx
->ildct_cmp
);
770 ff_set_cmp(&s
->mecc
, s
->mecc
.frame_skip_cmp
, s
->avctx
->frame_skip_cmp
);
772 if (CONFIG_H261_ENCODER
&& s
->out_format
== FMT_H261
)
773 ff_h261_encode_init(s
);
774 if (CONFIG_H263_ENCODER
&& s
->out_format
== FMT_H263
)
775 ff_h263_encode_init(s
);
776 if (CONFIG_MSMPEG4_ENCODER
&& s
->msmpeg4_version
)
777 if ((ret
= ff_msmpeg4_encode_init(s
)) < 0)
779 if ((CONFIG_MPEG1VIDEO_ENCODER
|| CONFIG_MPEG2VIDEO_ENCODER
)
780 && s
->out_format
== FMT_MPEG1
)
781 ff_mpeg1_encode_init(s
);
784 for (i
= 0; i
< 64; i
++) {
785 int j
= s
->idsp
.idct_permutation
[i
];
786 if (CONFIG_MPEG4_ENCODER
&& s
->codec_id
== AV_CODEC_ID_MPEG4
&&
788 s
->intra_matrix
[j
] = ff_mpeg4_default_intra_matrix
[i
];
789 s
->inter_matrix
[j
] = ff_mpeg4_default_non_intra_matrix
[i
];
790 } else if (s
->out_format
== FMT_H263
|| s
->out_format
== FMT_H261
) {
792 s
->inter_matrix
[j
] = ff_mpeg1_default_non_intra_matrix
[i
];
795 s
->intra_matrix
[j
] = ff_mpeg1_default_intra_matrix
[i
];
796 s
->inter_matrix
[j
] = ff_mpeg1_default_non_intra_matrix
[i
];
798 if (s
->avctx
->intra_matrix
)
799 s
->intra_matrix
[j
] = s
->avctx
->intra_matrix
[i
];
800 if (s
->avctx
->inter_matrix
)
801 s
->inter_matrix
[j
] = s
->avctx
->inter_matrix
[i
];
804 /* precompute matrix */
805 /* for mjpeg, we do include qscale in the matrix */
806 if (s
->out_format
!= FMT_MJPEG
) {
807 ff_convert_matrix(s
, s
->q_intra_matrix
, s
->q_intra_matrix16
,
808 s
->intra_matrix
, s
->intra_quant_bias
, avctx
->qmin
,
810 ff_convert_matrix(s
, s
->q_inter_matrix
, s
->q_inter_matrix16
,
811 s
->inter_matrix
, s
->inter_quant_bias
, avctx
->qmin
,
815 if (ff_rate_control_init(s
) < 0)
818 #if FF_API_ERROR_RATE
819 FF_DISABLE_DEPRECATION_WARNINGS
820 if (avctx
->error_rate
)
821 s
->error_rate
= avctx
->error_rate
;
822 FF_ENABLE_DEPRECATION_WARNINGS
;
825 #if FF_API_NORMALIZE_AQP
826 FF_DISABLE_DEPRECATION_WARNINGS
827 if (avctx
->flags
& CODEC_FLAG_NORMALIZE_AQP
)
828 s
->mpv_flags
|= FF_MPV_FLAG_NAQ
;
829 FF_ENABLE_DEPRECATION_WARNINGS
;
833 FF_DISABLE_DEPRECATION_WARNINGS
834 if (avctx
->flags
& CODEC_FLAG_MV0
)
835 s
->mpv_flags
|= FF_MPV_FLAG_MV0
;
836 FF_ENABLE_DEPRECATION_WARNINGS
840 FF_DISABLE_DEPRECATION_WARNINGS
841 if (avctx
->rc_qsquish
!= 0.0)
842 s
->rc_qsquish
= avctx
->rc_qsquish
;
843 if (avctx
->rc_qmod_amp
!= 0.0)
844 s
->rc_qmod_amp
= avctx
->rc_qmod_amp
;
845 if (avctx
->rc_qmod_freq
)
846 s
->rc_qmod_freq
= avctx
->rc_qmod_freq
;
847 if (avctx
->rc_buffer_aggressivity
!= 1.0)
848 s
->rc_buffer_aggressivity
= avctx
->rc_buffer_aggressivity
;
849 if (avctx
->rc_initial_cplx
!= 0.0)
850 s
->rc_initial_cplx
= avctx
->rc_initial_cplx
;
852 s
->lmin
= avctx
->lmin
;
854 s
->lmax
= avctx
->lmax
;
858 s
->rc_eq
= av_strdup(avctx
->rc_eq
);
860 return AVERROR(ENOMEM
);
862 FF_ENABLE_DEPRECATION_WARNINGS
865 if (avctx
->b_frame_strategy
== 2) {
866 for (i
= 0; i
< s
->max_b_frames
+ 2; i
++) {
867 s
->tmp_frames
[i
] = av_frame_alloc();
868 if (!s
->tmp_frames
[i
])
869 return AVERROR(ENOMEM
);
871 s
->tmp_frames
[i
]->format
= AV_PIX_FMT_YUV420P
;
872 s
->tmp_frames
[i
]->width
= s
->width
>> avctx
->brd_scale
;
873 s
->tmp_frames
[i
]->height
= s
->height
>> avctx
->brd_scale
;
875 ret
= av_frame_get_buffer(s
->tmp_frames
[i
], 32);
883 ff_mpv_encode_end(avctx
);
884 return AVERROR_UNKNOWN
;
887 av_cold
int ff_mpv_encode_end(AVCodecContext
*avctx
)
889 MpegEncContext
*s
= avctx
->priv_data
;
892 ff_rate_control_uninit(s
);
894 ff_mpv_common_end(s
);
895 if (CONFIG_MJPEG_ENCODER
&&
896 s
->out_format
== FMT_MJPEG
)
897 ff_mjpeg_encode_close(s
);
899 av_freep(&avctx
->extradata
);
901 for (i
= 0; i
< FF_ARRAY_ELEMS(s
->tmp_frames
); i
++)
902 av_frame_free(&s
->tmp_frames
[i
]);
904 ff_free_picture_tables(&s
->new_picture
);
905 ff_mpeg_unref_picture(s
->avctx
, &s
->new_picture
);
907 av_freep(&s
->avctx
->stats_out
);
908 av_freep(&s
->ac_stats
);
910 av_freep(&s
->q_intra_matrix
);
911 av_freep(&s
->q_inter_matrix
);
912 av_freep(&s
->q_intra_matrix16
);
913 av_freep(&s
->q_inter_matrix16
);
914 av_freep(&s
->input_picture
);
915 av_freep(&s
->reordered_input_picture
);
916 av_freep(&s
->dct_offset
);
921 static int get_sae(uint8_t *src
, int ref
, int stride
)
926 for (y
= 0; y
< 16; y
++) {
927 for (x
= 0; x
< 16; x
++) {
928 acc
+= FFABS(src
[x
+ y
* stride
] - ref
);
935 static int get_intra_count(MpegEncContext
*s
, uint8_t *src
,
936 uint8_t *ref
, int stride
)
944 for (y
= 0; y
< h
; y
+= 16) {
945 for (x
= 0; x
< w
; x
+= 16) {
946 int offset
= x
+ y
* stride
;
947 int sad
= s
->mecc
.sad
[0](NULL
, src
+ offset
, ref
+ offset
,
949 int mean
= (s
->mpvencdsp
.pix_sum(src
+ offset
, stride
) + 128) >> 8;
950 int sae
= get_sae(src
+ offset
, mean
, stride
);
952 acc
+= sae
+ 500 < sad
;
958 static int alloc_picture(MpegEncContext
*s
, Picture
*pic
, int shared
)
960 return ff_alloc_picture(s
->avctx
, pic
, &s
->me
, &s
->sc
, shared
, 1,
961 s
->chroma_x_shift
, s
->chroma_y_shift
, s
->out_format
,
962 s
->mb_stride
, s
->mb_height
, s
->b8_stride
,
963 &s
->linesize
, &s
->uvlinesize
);
966 static int load_input_picture(MpegEncContext
*s
, const AVFrame
*pic_arg
)
970 int i
, display_picture_number
= 0, ret
;
971 int encoding_delay
= s
->max_b_frames ? s
->max_b_frames
972 : (s
->low_delay ?
0 : 1);
973 int flush_offset
= 1;
978 display_picture_number
= s
->input_picture_number
++;
980 if (pts
!= AV_NOPTS_VALUE
) {
981 if (s
->user_specified_pts
!= AV_NOPTS_VALUE
) {
983 int64_t last
= s
->user_specified_pts
;
986 av_log(s
->avctx
, AV_LOG_ERROR
,
987 "Error, Invalid timestamp=%"PRId64
", "
988 "last=%"PRId64
"\n", pts
, s
->user_specified_pts
);
992 if (!s
->low_delay
&& display_picture_number
== 1)
993 s
->dts_delta
= time
- last
;
995 s
->user_specified_pts
= pts
;
997 if (s
->user_specified_pts
!= AV_NOPTS_VALUE
) {
998 s
->user_specified_pts
=
999 pts
= s
->user_specified_pts
+ 1;
1000 av_log(s
->avctx
, AV_LOG_INFO
,
1001 "Warning: AVFrame.pts=? trying to guess (%"PRId64
")\n",
1004 pts
= display_picture_number
;
1008 if (!pic_arg
->buf
[0] ||
1009 pic_arg
->linesize
[0] != s
->linesize
||
1010 pic_arg
->linesize
[1] != s
->uvlinesize
||
1011 pic_arg
->linesize
[2] != s
->uvlinesize
)
1013 if ((s
->width
& 15) || (s
->height
& 15))
1016 ff_dlog(s
->avctx
, "%d %d %td %td\n", pic_arg
->linesize
[0],
1017 pic_arg
->linesize
[1], s
->linesize
, s
->uvlinesize
);
1019 i
= ff_find_unused_picture(s
->avctx
, s
->picture
, direct
);
1023 pic
= &s
->picture
[i
];
1027 if ((ret
= av_frame_ref(pic
->f
, pic_arg
)) < 0)
1030 ret
= alloc_picture(s
, pic
, direct
);
1035 if (pic
->f
->data
[0] + INPLACE_OFFSET
== pic_arg
->data
[0] &&
1036 pic
->f
->data
[1] + INPLACE_OFFSET
== pic_arg
->data
[1] &&
1037 pic
->f
->data
[2] + INPLACE_OFFSET
== pic_arg
->data
[2]) {
1040 int h_chroma_shift
, v_chroma_shift
;
1041 av_pix_fmt_get_chroma_sub_sample(s
->avctx
->pix_fmt
,
1045 for (i
= 0; i
< 3; i
++) {
1046 int src_stride
= pic_arg
->linesize
[i
];
1047 int dst_stride
= i ? s
->uvlinesize
: s
->linesize
;
1048 int h_shift
= i ? h_chroma_shift
: 0;
1049 int v_shift
= i ? v_chroma_shift
: 0;
1050 int w
= s
->width
>> h_shift
;
1051 int h
= s
->height
>> v_shift
;
1052 uint8_t *src
= pic_arg
->data
[i
];
1053 uint8_t *dst
= pic
->f
->data
[i
];
1055 if (!s
->avctx
->rc_buffer_size
)
1056 dst
+= INPLACE_OFFSET
;
1058 if (src_stride
== dst_stride
)
1059 memcpy(dst
, src
, src_stride
* h
);
1062 uint8_t *dst2
= dst
;
1064 memcpy(dst2
, src
, w
);
1069 if ((s
->width
& 15) || (s
->height
& 15)) {
1070 s
->mpvencdsp
.draw_edges(dst
, dst_stride
,
1079 ret
= av_frame_copy_props(pic
->f
, pic_arg
);
1083 pic
->f
->display_picture_number
= display_picture_number
;
1084 pic
->f
->pts
= pts
; // we set this here to avoid modifiying pic_arg
1086 /* Flushing: When we have not received enough input frames,
1087 * ensure s->input_picture[0] contains the first picture */
1088 for (flush_offset
= 0; flush_offset
< encoding_delay
+ 1; flush_offset
++)
1089 if (s
->input_picture
[flush_offset
])
1092 if (flush_offset
<= 1)
1095 encoding_delay
= encoding_delay
- flush_offset
+ 1;
1098 /* shift buffer entries */
1099 for (i
= flush_offset
; i
< MAX_PICTURE_COUNT
/*s->encoding_delay + 1*/; i
++)
1100 s
->input_picture
[i
- flush_offset
] = s
->input_picture
[i
];
1102 s
->input_picture
[encoding_delay
] = (Picture
*) pic
;
1107 static int skip_check(MpegEncContext
*s
, Picture
*p
, Picture
*ref
)
1111 int64_t score64
= 0;
1113 for (plane
= 0; plane
< 3; plane
++) {
1114 const int stride
= p
->f
->linesize
[plane
];
1115 const int bw
= plane ?
1 : 2;
1116 for (y
= 0; y
< s
->mb_height
* bw
; y
++) {
1117 for (x
= 0; x
< s
->mb_width
* bw
; x
++) {
1118 int off
= p
->shared ?
0 : 16;
1119 uint8_t *dptr
= p
->f
->data
[plane
] + 8 * (x
+ y
* stride
) + off
;
1120 uint8_t *rptr
= ref
->f
->data
[plane
] + 8 * (x
+ y
* stride
);
1121 int v
= s
->mecc
.frame_skip_cmp
[1](s
, dptr
, rptr
, stride
, 8);
1123 switch (s
->avctx
->frame_skip_exp
) {
1124 case 0: score
= FFMAX(score
, v
); break;
1125 case 1: score
+= FFABS(v
); break;
1126 case 2: score
+= v
* v
; break;
1127 case 3: score64
+= FFABS(v
* v
* (int64_t)v
); break;
1128 case 4: score64
+= v
* v
* (int64_t)(v
* v
); break;
1137 if (score64
< s
->avctx
->frame_skip_threshold
)
1139 if (score64
< ((s
->avctx
->frame_skip_factor
* (int64_t)s
->lambda
) >> 8))
1144 static int encode_frame(AVCodecContext
*c
, AVFrame
*frame
)
1146 AVPacket pkt
= { 0 };
1147 int ret
, got_output
;
1149 av_init_packet(&pkt
);
1150 ret
= avcodec_encode_video2(c
, &pkt
, frame
, &got_output
);
1155 av_packet_unref(&pkt
);
1159 static int estimate_best_b_count(MpegEncContext
*s
)
1161 AVCodec
*codec
= avcodec_find_encoder(s
->avctx
->codec_id
);
1162 AVCodecContext
*c
= avcodec_alloc_context3(NULL
);
1163 const int scale
= s
->avctx
->brd_scale
;
1164 int i
, j
, out_size
, p_lambda
, b_lambda
, lambda2
;
1165 int64_t best_rd
= INT64_MAX
;
1166 int best_b_count
= -1;
1169 return AVERROR(ENOMEM
);
1170 assert(scale
>= 0 && scale
<= 3);
1173 //s->next_picture_ptr->quality;
1174 p_lambda
= s
->last_lambda_for
[AV_PICTURE_TYPE_P
];
1175 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1176 b_lambda
= s
->last_lambda_for
[AV_PICTURE_TYPE_B
];
1177 if (!b_lambda
) // FIXME we should do this somewhere else
1178 b_lambda
= p_lambda
;
1179 lambda2
= (b_lambda
* b_lambda
+ (1 << FF_LAMBDA_SHIFT
) / 2) >>
1182 c
->width
= s
->width
>> scale
;
1183 c
->height
= s
->height
>> scale
;
1184 c
->flags
= AV_CODEC_FLAG_QSCALE
| AV_CODEC_FLAG_PSNR
;
1185 c
->flags
|= s
->avctx
->flags
& AV_CODEC_FLAG_QPEL
;
1186 c
->mb_decision
= s
->avctx
->mb_decision
;
1187 c
->me_cmp
= s
->avctx
->me_cmp
;
1188 c
->mb_cmp
= s
->avctx
->mb_cmp
;
1189 c
->me_sub_cmp
= s
->avctx
->me_sub_cmp
;
1190 c
->pix_fmt
= AV_PIX_FMT_YUV420P
;
1191 c
->time_base
= s
->avctx
->time_base
;
1192 c
->max_b_frames
= s
->max_b_frames
;
1194 if (avcodec_open2(c
, codec
, NULL
) < 0)
1197 for (i
= 0; i
< s
->max_b_frames
+ 2; i
++) {
1198 Picture pre_input
, *pre_input_ptr
= i ? s
->input_picture
[i
- 1] :
1199 s
->next_picture_ptr
;
1201 if (pre_input_ptr
&& (!i
|| s
->input_picture
[i
- 1])) {
1202 pre_input
= *pre_input_ptr
;
1204 if (!pre_input
.shared
&& i
) {
1205 pre_input
.f
->data
[0] += INPLACE_OFFSET
;
1206 pre_input
.f
->data
[1] += INPLACE_OFFSET
;
1207 pre_input
.f
->data
[2] += INPLACE_OFFSET
;
1210 s
->mpvencdsp
.shrink
[scale
](s
->tmp_frames
[i
]->data
[0],
1211 s
->tmp_frames
[i
]->linesize
[0],
1212 pre_input
.f
->data
[0],
1213 pre_input
.f
->linesize
[0],
1214 c
->width
, c
->height
);
1215 s
->mpvencdsp
.shrink
[scale
](s
->tmp_frames
[i
]->data
[1],
1216 s
->tmp_frames
[i
]->linesize
[1],
1217 pre_input
.f
->data
[1],
1218 pre_input
.f
->linesize
[1],
1219 c
->width
>> 1, c
->height
>> 1);
1220 s
->mpvencdsp
.shrink
[scale
](s
->tmp_frames
[i
]->data
[2],
1221 s
->tmp_frames
[i
]->linesize
[2],
1222 pre_input
.f
->data
[2],
1223 pre_input
.f
->linesize
[2],
1224 c
->width
>> 1, c
->height
>> 1);
1228 for (j
= 0; j
< s
->max_b_frames
+ 1; j
++) {
1231 if (!s
->input_picture
[j
])
1234 c
->error
[0] = c
->error
[1] = c
->error
[2] = 0;
1236 s
->tmp_frames
[0]->pict_type
= AV_PICTURE_TYPE_I
;
1237 s
->tmp_frames
[0]->quality
= 1 * FF_QP2LAMBDA
;
1239 out_size
= encode_frame(c
, s
->tmp_frames
[0]);
1241 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1243 for (i
= 0; i
< s
->max_b_frames
+ 1; i
++) {
1244 int is_p
= i
% (j
+ 1) == j
|| i
== s
->max_b_frames
;
1246 s
->tmp_frames
[i
+ 1]->pict_type
= is_p ?
1247 AV_PICTURE_TYPE_P
: AV_PICTURE_TYPE_B
;
1248 s
->tmp_frames
[i
+ 1]->quality
= is_p ? p_lambda
: b_lambda
;
1250 out_size
= encode_frame(c
, s
->tmp_frames
[i
+ 1]);
1252 rd
+= (out_size
* lambda2
) >> (FF_LAMBDA_SHIFT
- 3);
1255 /* get the delayed frames */
1257 out_size
= encode_frame(c
, NULL
);
1258 rd
+= (out_size
* lambda2
) >> (FF_LAMBDA_SHIFT
- 3);
1261 rd
+= c
->error
[0] + c
->error
[1] + c
->error
[2];
1272 return best_b_count
;
1275 static int select_input_picture(MpegEncContext
*s
)
1279 for (i
= 1; i
< MAX_PICTURE_COUNT
; i
++)
1280 s
->reordered_input_picture
[i
- 1] = s
->reordered_input_picture
[i
];
1281 s
->reordered_input_picture
[MAX_PICTURE_COUNT
- 1] = NULL
;
1283 /* set next picture type & ordering */
1284 if (!s
->reordered_input_picture
[0] && s
->input_picture
[0]) {
1285 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1286 !s
->next_picture_ptr
|| s
->intra_only
) {
1287 s
->reordered_input_picture
[0] = s
->input_picture
[0];
1288 s
->reordered_input_picture
[0]->f
->pict_type
= AV_PICTURE_TYPE_I
;
1289 s
->reordered_input_picture
[0]->f
->coded_picture_number
=
1290 s
->coded_picture_number
++;
1294 if (s
->avctx
->frame_skip_threshold
|| s
->avctx
->frame_skip_factor
) {
1295 if (s
->picture_in_gop_number
< s
->gop_size
&&
1296 skip_check(s
, s
->input_picture
[0], s
->next_picture_ptr
)) {
1297 // FIXME check that te gop check above is +-1 correct
1298 av_frame_unref(s
->input_picture
[0]->f
);
1301 ff_vbv_update(s
, 0);
1307 if (s
->avctx
->flags
& AV_CODEC_FLAG_PASS2
) {
1308 for (i
= 0; i
< s
->max_b_frames
+ 1; i
++) {
1309 int pict_num
= s
->input_picture
[0]->f
->display_picture_number
+ i
;
1311 if (pict_num
>= s
->rc_context
.num_entries
)
1313 if (!s
->input_picture
[i
]) {
1314 s
->rc_context
.entry
[pict_num
- 1].new_pict_type
= AV_PICTURE_TYPE_P
;
1318 s
->input_picture
[i
]->f
->pict_type
=
1319 s
->rc_context
.entry
[pict_num
].new_pict_type
;
1323 if (s
->avctx
->b_frame_strategy
== 0) {
1324 b_frames
= s
->max_b_frames
;
1325 while (b_frames
&& !s
->input_picture
[b_frames
])
1327 } else if (s
->avctx
->b_frame_strategy
== 1) {
1328 for (i
= 1; i
< s
->max_b_frames
+ 1; i
++) {
1329 if (s
->input_picture
[i
] &&
1330 s
->input_picture
[i
]->b_frame_score
== 0) {
1331 s
->input_picture
[i
]->b_frame_score
=
1333 s
->input_picture
[i
]->f
->data
[0],
1334 s
->input_picture
[i
- 1]->f
->data
[0],
1338 for (i
= 0; i
< s
->max_b_frames
+ 1; i
++) {
1339 if (!s
->input_picture
[i
] ||
1340 s
->input_picture
[i
]->b_frame_score
- 1 >
1341 s
->mb_num
/ s
->avctx
->b_sensitivity
)
1345 b_frames
= FFMAX(0, i
- 1);
1348 for (i
= 0; i
< b_frames
+ 1; i
++) {
1349 s
->input_picture
[i
]->b_frame_score
= 0;
1351 } else if (s
->avctx
->b_frame_strategy
== 2) {
1352 b_frames
= estimate_best_b_count(s
);
1354 av_log(s
->avctx
, AV_LOG_ERROR
, "illegal b frame strategy\n");
1360 for (i
= b_frames
- 1; i
>= 0; i
--) {
1361 int type
= s
->input_picture
[i
]->f
->pict_type
;
1362 if (type
&& type
!= AV_PICTURE_TYPE_B
)
1365 if (s
->input_picture
[b_frames
]->f
->pict_type
== AV_PICTURE_TYPE_B
&&
1366 b_frames
== s
->max_b_frames
) {
1367 av_log(s
->avctx
, AV_LOG_ERROR
,
1368 "warning, too many b frames in a row\n");
1371 if (s
->picture_in_gop_number
+ b_frames
>= s
->gop_size
) {
1372 if ((s
->mpv_flags
& FF_MPV_FLAG_STRICT_GOP
) &&
1373 s
->gop_size
> s
->picture_in_gop_number
) {
1374 b_frames
= s
->gop_size
- s
->picture_in_gop_number
- 1;
1376 if (s
->avctx
->flags
& AV_CODEC_FLAG_CLOSED_GOP
)
1378 s
->input_picture
[b_frames
]->f
->pict_type
= AV_PICTURE_TYPE_I
;
1382 if ((s
->avctx
->flags
& AV_CODEC_FLAG_CLOSED_GOP
) && b_frames
&&
1383 s
->input_picture
[b_frames
]->f
->pict_type
== AV_PICTURE_TYPE_I
)
1386 s
->reordered_input_picture
[0] = s
->input_picture
[b_frames
];
1387 if (s
->reordered_input_picture
[0]->f
->pict_type
!= AV_PICTURE_TYPE_I
)
1388 s
->reordered_input_picture
[0]->f
->pict_type
= AV_PICTURE_TYPE_P
;
1389 s
->reordered_input_picture
[0]->f
->coded_picture_number
=
1390 s
->coded_picture_number
++;
1391 for (i
= 0; i
< b_frames
; i
++) {
1392 s
->reordered_input_picture
[i
+ 1] = s
->input_picture
[i
];
1393 s
->reordered_input_picture
[i
+ 1]->f
->pict_type
=
1395 s
->reordered_input_picture
[i
+ 1]->f
->coded_picture_number
=
1396 s
->coded_picture_number
++;
1401 ff_mpeg_unref_picture(s
->avctx
, &s
->new_picture
);
1403 if (s
->reordered_input_picture
[0]) {
1404 s
->reordered_input_picture
[0]->reference
=
1405 s
->reordered_input_picture
[0]->f
->pict_type
!=
1406 AV_PICTURE_TYPE_B ?
3 : 0;
1408 if ((ret
= ff_mpeg_ref_picture(s
->avctx
, &s
->new_picture
, s
->reordered_input_picture
[0])))
1411 if (s
->reordered_input_picture
[0]->shared
|| s
->avctx
->rc_buffer_size
) {
1412 // input is a shared pix, so we can't modifiy it -> alloc a new
1413 // one & ensure that the shared one is reuseable
1416 int i
= ff_find_unused_picture(s
->avctx
, s
->picture
, 0);
1419 pic
= &s
->picture
[i
];
1421 pic
->reference
= s
->reordered_input_picture
[0]->reference
;
1422 if (alloc_picture(s
, pic
, 0) < 0) {
1426 ret
= av_frame_copy_props(pic
->f
, s
->reordered_input_picture
[0]->f
);
1430 /* mark us unused / free shared pic */
1431 av_frame_unref(s
->reordered_input_picture
[0]->f
);
1432 s
->reordered_input_picture
[0]->shared
= 0;
1434 s
->current_picture_ptr
= pic
;
1436 // input is not a shared pix -> reuse buffer for current_pix
1437 s
->current_picture_ptr
= s
->reordered_input_picture
[0];
1438 for (i
= 0; i
< 4; i
++) {
1439 s
->new_picture
.f
->data
[i
] += INPLACE_OFFSET
;
1442 ff_mpeg_unref_picture(s
->avctx
, &s
->current_picture
);
1443 if ((ret
= ff_mpeg_ref_picture(s
->avctx
, &s
->current_picture
,
1444 s
->current_picture_ptr
)) < 0)
1447 s
->picture_number
= s
->new_picture
.f
->display_picture_number
;
1452 static void frame_end(MpegEncContext
*s
)
1456 if (s
->unrestricted_mv
&&
1457 s
->current_picture
.reference
&&
1459 const AVPixFmtDescriptor
*desc
= av_pix_fmt_desc_get(s
->avctx
->pix_fmt
);
1460 int hshift
= desc
->log2_chroma_w
;
1461 int vshift
= desc
->log2_chroma_h
;
1462 s
->mpvencdsp
.draw_edges(s
->current_picture
.f
->data
[0], s
->linesize
,
1463 s
->h_edge_pos
, s
->v_edge_pos
,
1464 EDGE_WIDTH
, EDGE_WIDTH
,
1465 EDGE_TOP
| EDGE_BOTTOM
);
1466 s
->mpvencdsp
.draw_edges(s
->current_picture
.f
->data
[1], s
->uvlinesize
,
1467 s
->h_edge_pos
>> hshift
,
1468 s
->v_edge_pos
>> vshift
,
1469 EDGE_WIDTH
>> hshift
,
1470 EDGE_WIDTH
>> vshift
,
1471 EDGE_TOP
| EDGE_BOTTOM
);
1472 s
->mpvencdsp
.draw_edges(s
->current_picture
.f
->data
[2], s
->uvlinesize
,
1473 s
->h_edge_pos
>> hshift
,
1474 s
->v_edge_pos
>> vshift
,
1475 EDGE_WIDTH
>> hshift
,
1476 EDGE_WIDTH
>> vshift
,
1477 EDGE_TOP
| EDGE_BOTTOM
);
1482 s
->last_pict_type
= s
->pict_type
;
1483 s
->last_lambda_for
[s
->pict_type
] = s
->current_picture_ptr
->f
->quality
;
1484 if (s
->pict_type
!= AV_PICTURE_TYPE_B
)
1485 s
->last_non_b_pict_type
= s
->pict_type
;
1488 /* release non-reference frames */
1489 for (i
= 0; i
< MAX_PICTURE_COUNT
; i
++) {
1490 if (!s
->picture
[i
].reference
)
1491 ff_mpeg_unref_picture(s
->avctx
, &s
->picture
[i
]);
1495 #if FF_API_CODED_FRAME
1496 FF_DISABLE_DEPRECATION_WARNINGS
1497 av_frame_copy_props(s
->avctx
->coded_frame
, s
->current_picture
.f
);
1498 FF_ENABLE_DEPRECATION_WARNINGS
1500 #if FF_API_ERROR_FRAME
1501 FF_DISABLE_DEPRECATION_WARNINGS
1502 memcpy(s
->current_picture
.f
->error
, s
->current_picture
.encoding_error
,
1503 sizeof(s
->current_picture
.encoding_error
));
1504 FF_ENABLE_DEPRECATION_WARNINGS
1508 static void update_noise_reduction(MpegEncContext
*s
)
1512 for (intra
= 0; intra
< 2; intra
++) {
1513 if (s
->dct_count
[intra
] > (1 << 16)) {
1514 for (i
= 0; i
< 64; i
++) {
1515 s
->dct_error_sum
[intra
][i
] >>= 1;
1517 s
->dct_count
[intra
] >>= 1;
1520 for (i
= 0; i
< 64; i
++) {
1521 s
->dct_offset
[intra
][i
] = (s
->avctx
->noise_reduction
*
1522 s
->dct_count
[intra
] +
1523 s
->dct_error_sum
[intra
][i
] / 2) /
1524 (s
->dct_error_sum
[intra
][i
] + 1);
1529 static int frame_start(MpegEncContext
*s
)
1533 /* mark & release old frames */
1534 if (s
->pict_type
!= AV_PICTURE_TYPE_B
&& s
->last_picture_ptr
&&
1535 s
->last_picture_ptr
!= s
->next_picture_ptr
&&
1536 s
->last_picture_ptr
->f
->buf
[0]) {
1537 ff_mpeg_unref_picture(s
->avctx
, s
->last_picture_ptr
);
1540 s
->current_picture_ptr
->f
->pict_type
= s
->pict_type
;
1541 s
->current_picture_ptr
->f
->key_frame
= s
->pict_type
== AV_PICTURE_TYPE_I
;
1543 ff_mpeg_unref_picture(s
->avctx
, &s
->current_picture
);
1544 if ((ret
= ff_mpeg_ref_picture(s
->avctx
, &s
->current_picture
,
1545 s
->current_picture_ptr
)) < 0)
1548 if (s
->pict_type
!= AV_PICTURE_TYPE_B
) {
1549 s
->last_picture_ptr
= s
->next_picture_ptr
;
1551 s
->next_picture_ptr
= s
->current_picture_ptr
;
1554 if (s
->last_picture_ptr
) {
1555 ff_mpeg_unref_picture(s
->avctx
, &s
->last_picture
);
1556 if (s
->last_picture_ptr
->f
->buf
[0] &&
1557 (ret
= ff_mpeg_ref_picture(s
->avctx
, &s
->last_picture
,
1558 s
->last_picture_ptr
)) < 0)
1561 if (s
->next_picture_ptr
) {
1562 ff_mpeg_unref_picture(s
->avctx
, &s
->next_picture
);
1563 if (s
->next_picture_ptr
->f
->buf
[0] &&
1564 (ret
= ff_mpeg_ref_picture(s
->avctx
, &s
->next_picture
,
1565 s
->next_picture_ptr
)) < 0)
1569 if (s
->picture_structure
!= PICT_FRAME
) {
1571 for (i
= 0; i
< 4; i
++) {
1572 if (s
->picture_structure
== PICT_BOTTOM_FIELD
) {
1573 s
->current_picture
.f
->data
[i
] +=
1574 s
->current_picture
.f
->linesize
[i
];
1576 s
->current_picture
.f
->linesize
[i
] *= 2;
1577 s
->last_picture
.f
->linesize
[i
] *= 2;
1578 s
->next_picture
.f
->linesize
[i
] *= 2;
1582 if (s
->mpeg_quant
|| s
->codec_id
== AV_CODEC_ID_MPEG2VIDEO
) {
1583 s
->dct_unquantize_intra
= s
->dct_unquantize_mpeg2_intra
;
1584 s
->dct_unquantize_inter
= s
->dct_unquantize_mpeg2_inter
;
1585 } else if (s
->out_format
== FMT_H263
|| s
->out_format
== FMT_H261
) {
1586 s
->dct_unquantize_intra
= s
->dct_unquantize_h263_intra
;
1587 s
->dct_unquantize_inter
= s
->dct_unquantize_h263_inter
;
1589 s
->dct_unquantize_intra
= s
->dct_unquantize_mpeg1_intra
;
1590 s
->dct_unquantize_inter
= s
->dct_unquantize_mpeg1_inter
;
1593 if (s
->dct_error_sum
) {
1594 assert(s
->avctx
->noise_reduction
&& s
->encoding
);
1595 update_noise_reduction(s
);
1601 int ff_mpv_encode_picture(AVCodecContext
*avctx
, AVPacket
*pkt
,
1602 const AVFrame
*pic_arg
, int *got_packet
)
1604 MpegEncContext
*s
= avctx
->priv_data
;
1605 int i
, stuffing_count
, ret
;
1606 int context_count
= s
->slice_context_count
;
1608 s
->picture_in_gop_number
++;
1610 if (load_input_picture(s
, pic_arg
) < 0)
1613 if (select_input_picture(s
) < 0) {
1618 if (s
->new_picture
.f
->data
[0]) {
1621 (ret
= ff_alloc_packet(pkt
, s
->mb_width
*s
->mb_height
*MAX_MB_BYTES
)) < 0)
1624 s
->mb_info_ptr
= av_packet_new_side_data(pkt
,
1625 AV_PKT_DATA_H263_MB_INFO
,
1626 s
->mb_width
*s
->mb_height
*12);
1627 s
->prev_mb_info
= s
->last_mb_info
= s
->mb_info_size
= 0;
1630 for (i
= 0; i
< context_count
; i
++) {
1631 int start_y
= s
->thread_context
[i
]->start_mb_y
;
1632 int end_y
= s
->thread_context
[i
]-> end_mb_y
;
1633 int h
= s
->mb_height
;
1634 uint8_t *start
= pkt
->data
+ (size_t)(((int64_t) pkt
->size
) * start_y
/ h
);
1635 uint8_t *end
= pkt
->data
+ (size_t)(((int64_t) pkt
->size
) * end_y
/ h
);
1637 init_put_bits(&s
->thread_context
[i
]->pb
, start
, end
- start
);
1640 s
->pict_type
= s
->new_picture
.f
->pict_type
;
1642 ret
= frame_start(s
);
1646 if (encode_picture(s
, s
->picture_number
) < 0)
1649 avctx
->header_bits
= s
->header_bits
;
1650 avctx
->mv_bits
= s
->mv_bits
;
1651 avctx
->misc_bits
= s
->misc_bits
;
1652 avctx
->i_tex_bits
= s
->i_tex_bits
;
1653 avctx
->p_tex_bits
= s
->p_tex_bits
;
1654 avctx
->i_count
= s
->i_count
;
1655 // FIXME f/b_count in avctx
1656 avctx
->p_count
= s
->mb_num
- s
->i_count
- s
->skip_count
;
1657 avctx
->skip_count
= s
->skip_count
;
1661 sd
= av_packet_new_side_data(pkt
, AV_PKT_DATA_QUALITY_FACTOR
,
1664 return AVERROR(ENOMEM
);
1665 *(int *)sd
= s
->current_picture
.f
->quality
;
1667 if (CONFIG_MJPEG_ENCODER
&& s
->out_format
== FMT_MJPEG
)
1668 ff_mjpeg_encode_picture_trailer(&s
->pb
, s
->header_bits
);
1670 if (avctx
->rc_buffer_size
) {
1671 RateControlContext
*rcc
= &s
->rc_context
;
1672 int max_size
= rcc
->buffer_index
* avctx
->rc_max_available_vbv_use
;
1674 if (put_bits_count(&s
->pb
) > max_size
&&
1675 s
->lambda
< s
->lmax
) {
1676 s
->next_lambda
= FFMAX(s
->lambda
+ 1, s
->lambda
*
1677 (s
->qscale
+ 1) / s
->qscale
);
1678 if (s
->adaptive_quant
) {
1680 for (i
= 0; i
< s
->mb_height
* s
->mb_stride
; i
++)
1681 s
->lambda_table
[i
] =
1682 FFMAX(s
->lambda_table
[i
] + 1,
1683 s
->lambda_table
[i
] * (s
->qscale
+ 1) /
1686 s
->mb_skipped
= 0; // done in frame_start()
1687 // done in encode_picture() so we must undo it
1688 if (s
->pict_type
== AV_PICTURE_TYPE_P
) {
1689 if (s
->flipflop_rounding
||
1690 s
->codec_id
== AV_CODEC_ID_H263P
||
1691 s
->codec_id
== AV_CODEC_ID_MPEG4
)
1692 s
->no_rounding
^= 1;
1694 if (s
->pict_type
!= AV_PICTURE_TYPE_B
) {
1695 s
->time_base
= s
->last_time_base
;
1696 s
->last_non_b_time
= s
->time
- s
->pp_time
;
1698 for (i
= 0; i
< context_count
; i
++) {
1699 PutBitContext
*pb
= &s
->thread_context
[i
]->pb
;
1700 init_put_bits(pb
, pb
->buf
, pb
->buf_end
- pb
->buf
);
1705 assert(s
->avctx
->rc_max_rate
);
1708 if (s
->avctx
->flags
& AV_CODEC_FLAG_PASS1
)
1709 ff_write_pass1_stats(s
);
1711 for (i
= 0; i
< 4; i
++) {
1712 s
->current_picture_ptr
->encoding_error
[i
] = s
->current_picture
.encoding_error
[i
];
1713 avctx
->error
[i
] += s
->current_picture_ptr
->encoding_error
[i
];
1716 if (s
->avctx
->flags
& AV_CODEC_FLAG_PASS1
)
1717 assert(avctx
->header_bits
+ avctx
->mv_bits
+ avctx
->misc_bits
+
1718 avctx
->i_tex_bits
+ avctx
->p_tex_bits
==
1719 put_bits_count(&s
->pb
));
1720 flush_put_bits(&s
->pb
);
1721 s
->frame_bits
= put_bits_count(&s
->pb
);
1723 stuffing_count
= ff_vbv_update(s
, s
->frame_bits
);
1724 if (stuffing_count
) {
1725 if (s
->pb
.buf_end
- s
->pb
.buf
- (put_bits_count(&s
->pb
) >> 3) <
1726 stuffing_count
+ 50) {
1727 av_log(s
->avctx
, AV_LOG_ERROR
, "stuffing too large\n");
1731 switch (s
->codec_id
) {
1732 case AV_CODEC_ID_MPEG1VIDEO
:
1733 case AV_CODEC_ID_MPEG2VIDEO
:
1734 while (stuffing_count
--) {
1735 put_bits(&s
->pb
, 8, 0);
1738 case AV_CODEC_ID_MPEG4
:
1739 put_bits(&s
->pb
, 16, 0);
1740 put_bits(&s
->pb
, 16, 0x1C3);
1741 stuffing_count
-= 4;
1742 while (stuffing_count
--) {
1743 put_bits(&s
->pb
, 8, 0xFF);
1747 av_log(s
->avctx
, AV_LOG_ERROR
, "vbv buffer overflow\n");
1749 flush_put_bits(&s
->pb
);
1750 s
->frame_bits
= put_bits_count(&s
->pb
);
1753 /* update mpeg1/2 vbv_delay for CBR */
1754 if (s
->avctx
->rc_max_rate
&&
1755 s
->avctx
->rc_min_rate
== s
->avctx
->rc_max_rate
&&
1756 s
->out_format
== FMT_MPEG1
&&
1757 90000LL * (avctx
->rc_buffer_size
- 1) <=
1758 s
->avctx
->rc_max_rate
* 0xFFFFLL
) {
1759 int vbv_delay
, min_delay
;
1760 double inbits
= s
->avctx
->rc_max_rate
*
1761 av_q2d(s
->avctx
->time_base
);
1762 int minbits
= s
->frame_bits
- 8 *
1763 (s
->vbv_delay_ptr
- s
->pb
.buf
- 1);
1764 double bits
= s
->rc_context
.buffer_index
+ minbits
- inbits
;
1767 av_log(s
->avctx
, AV_LOG_ERROR
,
1768 "Internal error, negative bits\n");
1770 assert(s
->repeat_first_field
== 0);
1772 vbv_delay
= bits
* 90000 / s
->avctx
->rc_max_rate
;
1773 min_delay
= (minbits
* 90000LL + s
->avctx
->rc_max_rate
- 1) /
1774 s
->avctx
->rc_max_rate
;
1776 vbv_delay
= FFMAX(vbv_delay
, min_delay
);
1778 assert(vbv_delay
< 0xFFFF);
1780 s
->vbv_delay_ptr
[0] &= 0xF8;
1781 s
->vbv_delay_ptr
[0] |= vbv_delay
>> 13;
1782 s
->vbv_delay_ptr
[1] = vbv_delay
>> 5;
1783 s
->vbv_delay_ptr
[2] &= 0x07;
1784 s
->vbv_delay_ptr
[2] |= vbv_delay
<< 3;
1785 avctx
->vbv_delay
= vbv_delay
* 300;
1787 s
->total_bits
+= s
->frame_bits
;
1788 avctx
->frame_bits
= s
->frame_bits
;
1790 pkt
->pts
= s
->current_picture
.f
->pts
;
1791 if (!s
->low_delay
&& s
->pict_type
!= AV_PICTURE_TYPE_B
) {
1792 if (!s
->current_picture
.f
->coded_picture_number
)
1793 pkt
->dts
= pkt
->pts
- s
->dts_delta
;
1795 pkt
->dts
= s
->reordered_pts
;
1796 s
->reordered_pts
= pkt
->pts
;
1798 pkt
->dts
= pkt
->pts
;
1799 if (s
->current_picture
.f
->key_frame
)
1800 pkt
->flags
|= AV_PKT_FLAG_KEY
;
1802 av_packet_shrink_side_data(pkt
, AV_PKT_DATA_H263_MB_INFO
, s
->mb_info_size
);
1806 assert((s
->frame_bits
& 7) == 0);
1808 pkt
->size
= s
->frame_bits
/ 8;
1809 *got_packet
= !!pkt
->size
;
1813 static inline void dct_single_coeff_elimination(MpegEncContext
*s
,
1814 int n
, int threshold
)
1816 static const char tab
[64] = {
1817 3, 2, 2, 1, 1, 1, 1, 1,
1818 1, 1, 1, 1, 1, 1, 1, 1,
1819 1, 1, 1, 1, 1, 1, 1, 1,
1820 0, 0, 0, 0, 0, 0, 0, 0,
1821 0, 0, 0, 0, 0, 0, 0, 0,
1822 0, 0, 0, 0, 0, 0, 0, 0,
1823 0, 0, 0, 0, 0, 0, 0, 0,
1824 0, 0, 0, 0, 0, 0, 0, 0
1829 int16_t *block
= s
->block
[n
];
1830 const int last_index
= s
->block_last_index
[n
];
1833 if (threshold
< 0) {
1835 threshold
= -threshold
;
1839 /* Are all we could set to zero already zero? */
1840 if (last_index
<= skip_dc
- 1)
1843 for (i
= 0; i
<= last_index
; i
++) {
1844 const int j
= s
->intra_scantable
.permutated
[i
];
1845 const int level
= FFABS(block
[j
]);
1847 if (skip_dc
&& i
== 0)
1851 } else if (level
> 1) {
1857 if (score
>= threshold
)
1859 for (i
= skip_dc
; i
<= last_index
; i
++) {
1860 const int j
= s
->intra_scantable
.permutated
[i
];
1864 s
->block_last_index
[n
] = 0;
1866 s
->block_last_index
[n
] = -1;
1869 static inline void clip_coeffs(MpegEncContext
*s
, int16_t *block
,
1873 const int maxlevel
= s
->max_qcoeff
;
1874 const int minlevel
= s
->min_qcoeff
;
1878 i
= 1; // skip clipping of intra dc
1882 for (; i
<= last_index
; i
++) {
1883 const int j
= s
->intra_scantable
.permutated
[i
];
1884 int level
= block
[j
];
1886 if (level
> maxlevel
) {
1889 } else if (level
< minlevel
) {
1897 if (overflow
&& s
->avctx
->mb_decision
== FF_MB_DECISION_SIMPLE
)
1898 av_log(s
->avctx
, AV_LOG_INFO
,
1899 "warning, clipping %d dct coefficients to %d..%d\n",
1900 overflow
, minlevel
, maxlevel
);
1903 static void get_visual_weight(int16_t *weight
, uint8_t *ptr
, int stride
)
1907 for (y
= 0; y
< 8; y
++) {
1908 for (x
= 0; x
< 8; x
++) {
1914 for (y2
= FFMAX(y
- 1, 0); y2
< FFMIN(8, y
+ 2); y2
++) {
1915 for (x2
= FFMAX(x
- 1, 0); x2
< FFMIN(8, x
+ 2); x2
++) {
1916 int v
= ptr
[x2
+ y2
* stride
];
1922 weight
[x
+ 8 * y
]= (36 * ff_sqrt(count
* sqr
- sum
* sum
)) / count
;
1927 static av_always_inline
void encode_mb_internal(MpegEncContext
*s
,
1928 int motion_x
, int motion_y
,
1929 int mb_block_height
,
1932 int16_t weight
[8][64];
1933 int16_t orig
[8][64];
1934 const int mb_x
= s
->mb_x
;
1935 const int mb_y
= s
->mb_y
;
1938 int dct_offset
= s
->linesize
* 8; // default for progressive frames
1939 uint8_t *ptr_y
, *ptr_cb
, *ptr_cr
;
1940 ptrdiff_t wrap_y
, wrap_c
;
1942 for (i
= 0; i
< mb_block_count
; i
++)
1943 skip_dct
[i
] = s
->skipdct
;
1945 if (s
->adaptive_quant
) {
1946 const int last_qp
= s
->qscale
;
1947 const int mb_xy
= mb_x
+ mb_y
* s
->mb_stride
;
1949 s
->lambda
= s
->lambda_table
[mb_xy
];
1952 if (!(s
->mpv_flags
& FF_MPV_FLAG_QP_RD
)) {
1953 s
->qscale
= s
->current_picture_ptr
->qscale_table
[mb_xy
];
1954 s
->dquant
= s
->qscale
- last_qp
;
1956 if (s
->out_format
== FMT_H263
) {
1957 s
->dquant
= av_clip(s
->dquant
, -2, 2);
1959 if (s
->codec_id
== AV_CODEC_ID_MPEG4
) {
1961 if (s
->pict_type
== AV_PICTURE_TYPE_B
) {
1962 if (s
->dquant
& 1 || s
->mv_dir
& MV_DIRECT
)
1965 if (s
->mv_type
== MV_TYPE_8X8
)
1971 ff_set_qscale(s
, last_qp
+ s
->dquant
);
1972 } else if (s
->mpv_flags
& FF_MPV_FLAG_QP_RD
)
1973 ff_set_qscale(s
, s
->qscale
+ s
->dquant
);
1975 wrap_y
= s
->linesize
;
1976 wrap_c
= s
->uvlinesize
;
1977 ptr_y
= s
->new_picture
.f
->data
[0] +
1978 (mb_y
* 16 * wrap_y
) + mb_x
* 16;
1979 ptr_cb
= s
->new_picture
.f
->data
[1] +
1980 (mb_y
* mb_block_height
* wrap_c
) + mb_x
* 8;
1981 ptr_cr
= s
->new_picture
.f
->data
[2] +
1982 (mb_y
* mb_block_height
* wrap_c
) + mb_x
* 8;
1984 if (mb_x
* 16 + 16 > s
->width
|| mb_y
* 16 + 16 > s
->height
) {
1985 uint8_t *ebuf
= s
->sc
.edge_emu_buffer
+ 32;
1986 s
->vdsp
.emulated_edge_mc(ebuf
, ptr_y
,
1988 16, 16, mb_x
* 16, mb_y
* 16,
1989 s
->width
, s
->height
);
1991 s
->vdsp
.emulated_edge_mc(ebuf
+ 18 * wrap_y
, ptr_cb
,
1993 8, mb_block_height
, mb_x
* 8, mb_y
* 8,
1994 s
->width
>> 1, s
->height
>> 1);
1995 ptr_cb
= ebuf
+ 18 * wrap_y
;
1996 s
->vdsp
.emulated_edge_mc(ebuf
+ 18 * wrap_y
+ 8, ptr_cr
,
1998 8, mb_block_height
, mb_x
* 8, mb_y
* 8,
1999 s
->width
>> 1, s
->height
>> 1);
2000 ptr_cr
= ebuf
+ 18 * wrap_y
+ 8;
2004 if (s
->avctx
->flags
& AV_CODEC_FLAG_INTERLACED_DCT
) {
2005 int progressive_score
, interlaced_score
;
2007 s
->interlaced_dct
= 0;
2008 progressive_score
= s
->mecc
.ildct_cmp
[4](s
, ptr_y
, NULL
, wrap_y
, 8) +
2009 s
->mecc
.ildct_cmp
[4](s
, ptr_y
+ wrap_y
* 8,
2010 NULL
, wrap_y
, 8) - 400;
2012 if (progressive_score
> 0) {
2013 interlaced_score
= s
->mecc
.ildct_cmp
[4](s
, ptr_y
,
2014 NULL
, wrap_y
* 2, 8) +
2015 s
->mecc
.ildct_cmp
[4](s
, ptr_y
+ wrap_y
,
2016 NULL
, wrap_y
* 2, 8);
2017 if (progressive_score
> interlaced_score
) {
2018 s
->interlaced_dct
= 1;
2020 dct_offset
= wrap_y
;
2022 if (s
->chroma_format
== CHROMA_422
)
2028 s
->pdsp
.get_pixels(s
->block
[0], ptr_y
, wrap_y
);
2029 s
->pdsp
.get_pixels(s
->block
[1], ptr_y
+ 8, wrap_y
);
2030 s
->pdsp
.get_pixels(s
->block
[2], ptr_y
+ dct_offset
, wrap_y
);
2031 s
->pdsp
.get_pixels(s
->block
[3], ptr_y
+ dct_offset
+ 8, wrap_y
);
2033 if (s
->avctx
->flags
& AV_CODEC_FLAG_GRAY
) {
2037 s
->pdsp
.get_pixels(s
->block
[4], ptr_cb
, wrap_c
);
2038 s
->pdsp
.get_pixels(s
->block
[5], ptr_cr
, wrap_c
);
2039 if (!s
->chroma_y_shift
) { /* 422 */
2040 s
->pdsp
.get_pixels(s
->block
[6],
2041 ptr_cb
+ (dct_offset
>> 1), wrap_c
);
2042 s
->pdsp
.get_pixels(s
->block
[7],
2043 ptr_cr
+ (dct_offset
>> 1), wrap_c
);
2047 op_pixels_func (*op_pix
)[4];
2048 qpel_mc_func (*op_qpix
)[16];
2049 uint8_t *dest_y
, *dest_cb
, *dest_cr
;
2051 dest_y
= s
->dest
[0];
2052 dest_cb
= s
->dest
[1];
2053 dest_cr
= s
->dest
[2];
2055 if ((!s
->no_rounding
) || s
->pict_type
== AV_PICTURE_TYPE_B
) {
2056 op_pix
= s
->hdsp
.put_pixels_tab
;
2057 op_qpix
= s
->qdsp
.put_qpel_pixels_tab
;
2059 op_pix
= s
->hdsp
.put_no_rnd_pixels_tab
;
2060 op_qpix
= s
->qdsp
.put_no_rnd_qpel_pixels_tab
;
2063 if (s
->mv_dir
& MV_DIR_FORWARD
) {
2064 ff_mpv_motion(s
, dest_y
, dest_cb
, dest_cr
, 0,
2065 s
->last_picture
.f
->data
,
2067 op_pix
= s
->hdsp
.avg_pixels_tab
;
2068 op_qpix
= s
->qdsp
.avg_qpel_pixels_tab
;
2070 if (s
->mv_dir
& MV_DIR_BACKWARD
) {
2071 ff_mpv_motion(s
, dest_y
, dest_cb
, dest_cr
, 1,
2072 s
->next_picture
.f
->data
,
2076 if (s
->avctx
->flags
& AV_CODEC_FLAG_INTERLACED_DCT
) {
2077 int progressive_score
, interlaced_score
;
2079 s
->interlaced_dct
= 0;
2080 progressive_score
= s
->mecc
.ildct_cmp
[0](s
, dest_y
, ptr_y
, wrap_y
, 8) +
2081 s
->mecc
.ildct_cmp
[0](s
, dest_y
+ wrap_y
* 8,
2085 if (s
->avctx
->ildct_cmp
== FF_CMP_VSSE
)
2086 progressive_score
-= 400;
2088 if (progressive_score
> 0) {
2089 interlaced_score
= s
->mecc
.ildct_cmp
[0](s
, dest_y
, ptr_y
,
2091 s
->mecc
.ildct_cmp
[0](s
, dest_y
+ wrap_y
,
2095 if (progressive_score
> interlaced_score
) {
2096 s
->interlaced_dct
= 1;
2098 dct_offset
= wrap_y
;
2100 if (s
->chroma_format
== CHROMA_422
)
2106 s
->pdsp
.diff_pixels(s
->block
[0], ptr_y
, dest_y
, wrap_y
);
2107 s
->pdsp
.diff_pixels(s
->block
[1], ptr_y
+ 8, dest_y
+ 8, wrap_y
);
2108 s
->pdsp
.diff_pixels(s
->block
[2], ptr_y
+ dct_offset
,
2109 dest_y
+ dct_offset
, wrap_y
);
2110 s
->pdsp
.diff_pixels(s
->block
[3], ptr_y
+ dct_offset
+ 8,
2111 dest_y
+ dct_offset
+ 8, wrap_y
);
2113 if (s
->avctx
->flags
& AV_CODEC_FLAG_GRAY
) {
2117 s
->pdsp
.diff_pixels(s
->block
[4], ptr_cb
, dest_cb
, wrap_c
);
2118 s
->pdsp
.diff_pixels(s
->block
[5], ptr_cr
, dest_cr
, wrap_c
);
2119 if (!s
->chroma_y_shift
) { /* 422 */
2120 s
->pdsp
.diff_pixels(s
->block
[6], ptr_cb
+ (dct_offset
>> 1),
2121 dest_cb
+ (dct_offset
>> 1), wrap_c
);
2122 s
->pdsp
.diff_pixels(s
->block
[7], ptr_cr
+ (dct_offset
>> 1),
2123 dest_cr
+ (dct_offset
>> 1), wrap_c
);
2126 /* pre quantization */
2127 if (s
->current_picture
.mc_mb_var
[s
->mb_stride
* mb_y
+ mb_x
] <
2128 2 * s
->qscale
* s
->qscale
) {
2130 if (s
->mecc
.sad
[1](NULL
, ptr_y
, dest_y
, wrap_y
, 8) < 20 * s
->qscale
)
2132 if (s
->mecc
.sad
[1](NULL
, ptr_y
+ 8, dest_y
+ 8, wrap_y
, 8) < 20 * s
->qscale
)
2134 if (s
->mecc
.sad
[1](NULL
, ptr_y
+ dct_offset
, dest_y
+ dct_offset
,
2135 wrap_y
, 8) < 20 * s
->qscale
)
2137 if (s
->mecc
.sad
[1](NULL
, ptr_y
+ dct_offset
+ 8, dest_y
+ dct_offset
+ 8,
2138 wrap_y
, 8) < 20 * s
->qscale
)
2140 if (s
->mecc
.sad
[1](NULL
, ptr_cb
, dest_cb
, wrap_c
, 8) < 20 * s
->qscale
)
2142 if (s
->mecc
.sad
[1](NULL
, ptr_cr
, dest_cr
, wrap_c
, 8) < 20 * s
->qscale
)
2144 if (!s
->chroma_y_shift
) { /* 422 */
2145 if (s
->mecc
.sad
[1](NULL
, ptr_cb
+ (dct_offset
>> 1),
2146 dest_cb
+ (dct_offset
>> 1),
2147 wrap_c
, 8) < 20 * s
->qscale
)
2149 if (s
->mecc
.sad
[1](NULL
, ptr_cr
+ (dct_offset
>> 1),
2150 dest_cr
+ (dct_offset
>> 1),
2151 wrap_c
, 8) < 20 * s
->qscale
)
2157 if (s
->quantizer_noise_shaping
) {
2159 get_visual_weight(weight
[0], ptr_y
, wrap_y
);
2161 get_visual_weight(weight
[1], ptr_y
+ 8, wrap_y
);
2163 get_visual_weight(weight
[2], ptr_y
+ dct_offset
, wrap_y
);
2165 get_visual_weight(weight
[3], ptr_y
+ dct_offset
+ 8, wrap_y
);
2167 get_visual_weight(weight
[4], ptr_cb
, wrap_c
);
2169 get_visual_weight(weight
[5], ptr_cr
, wrap_c
);
2170 if (!s
->chroma_y_shift
) { /* 422 */
2172 get_visual_weight(weight
[6], ptr_cb
+ (dct_offset
>> 1),
2175 get_visual_weight(weight
[7], ptr_cr
+ (dct_offset
>> 1),
2178 memcpy(orig
[0], s
->block
[0], sizeof(int16_t) * 64 * mb_block_count
);
2181 /* DCT & quantize */
2182 assert(s
->out_format
!= FMT_MJPEG
|| s
->qscale
== 8);
2184 for (i
= 0; i
< mb_block_count
; i
++) {
2187 s
->block_last_index
[i
] = s
->dct_quantize(s
, s
->block
[i
], i
, s
->qscale
, &overflow
);
2188 // FIXME we could decide to change to quantizer instead of
2190 // JS: I don't think that would be a good idea it could lower
2191 // quality instead of improve it. Just INTRADC clipping
2192 // deserves changes in quantizer
2194 clip_coeffs(s
, s
->block
[i
], s
->block_last_index
[i
]);
2196 s
->block_last_index
[i
] = -1;
2198 if (s
->quantizer_noise_shaping
) {
2199 for (i
= 0; i
< mb_block_count
; i
++) {
2201 s
->block_last_index
[i
] =
2202 dct_quantize_refine(s
, s
->block
[i
], weight
[i
],
2203 orig
[i
], i
, s
->qscale
);
2208 if (s
->luma_elim_threshold
&& !s
->mb_intra
)
2209 for (i
= 0; i
< 4; i
++)
2210 dct_single_coeff_elimination(s
, i
, s
->luma_elim_threshold
);
2211 if (s
->chroma_elim_threshold
&& !s
->mb_intra
)
2212 for (i
= 4; i
< mb_block_count
; i
++)
2213 dct_single_coeff_elimination(s
, i
, s
->chroma_elim_threshold
);
2215 if (s
->mpv_flags
& FF_MPV_FLAG_CBP_RD
) {
2216 for (i
= 0; i
< mb_block_count
; i
++) {
2217 if (s
->block_last_index
[i
] == -1)
2218 s
->coded_score
[i
] = INT_MAX
/ 256;
2223 if ((s
->avctx
->flags
& AV_CODEC_FLAG_GRAY
) && s
->mb_intra
) {
2224 s
->block_last_index
[4] =
2225 s
->block_last_index
[5] = 0;
2227 s
->block
[5][0] = (1024 + s
->c_dc_scale
/ 2) / s
->c_dc_scale
;
2230 // non c quantize code returns incorrect block_last_index FIXME
2231 if (s
->alternate_scan
&& s
->dct_quantize
!= ff_dct_quantize_c
) {
2232 for (i
= 0; i
< mb_block_count
; i
++) {
2234 if (s
->block_last_index
[i
] > 0) {
2235 for (j
= 63; j
> 0; j
--) {
2236 if (s
->block
[i
][s
->intra_scantable
.permutated
[j
]])
2239 s
->block_last_index
[i
] = j
;
2244 /* huffman encode */
2245 switch(s
->codec_id
){ //FIXME funct ptr could be slightly faster
2246 case AV_CODEC_ID_MPEG1VIDEO
:
2247 case AV_CODEC_ID_MPEG2VIDEO
:
2248 if (CONFIG_MPEG1VIDEO_ENCODER
|| CONFIG_MPEG2VIDEO_ENCODER
)
2249 ff_mpeg1_encode_mb(s
, s
->block
, motion_x
, motion_y
);
2251 case AV_CODEC_ID_MPEG4
:
2252 if (CONFIG_MPEG4_ENCODER
)
2253 ff_mpeg4_encode_mb(s
, s
->block
, motion_x
, motion_y
);
2255 case AV_CODEC_ID_MSMPEG4V2
:
2256 case AV_CODEC_ID_MSMPEG4V3
:
2257 case AV_CODEC_ID_WMV1
:
2258 if (CONFIG_MSMPEG4_ENCODER
)
2259 ff_msmpeg4_encode_mb(s
, s
->block
, motion_x
, motion_y
);
2261 case AV_CODEC_ID_WMV2
:
2262 if (CONFIG_WMV2_ENCODER
)
2263 ff_wmv2_encode_mb(s
, s
->block
, motion_x
, motion_y
);
2265 case AV_CODEC_ID_H261
:
2266 if (CONFIG_H261_ENCODER
)
2267 ff_h261_encode_mb(s
, s
->block
, motion_x
, motion_y
);
2269 case AV_CODEC_ID_H263
:
2270 case AV_CODEC_ID_H263P
:
2271 case AV_CODEC_ID_FLV1
:
2272 case AV_CODEC_ID_RV10
:
2273 case AV_CODEC_ID_RV20
:
2274 if (CONFIG_H263_ENCODER
)
2275 ff_h263_encode_mb(s
, s
->block
, motion_x
, motion_y
);
2277 case AV_CODEC_ID_MJPEG
:
2278 if (CONFIG_MJPEG_ENCODER
)
2279 ff_mjpeg_encode_mb(s
, s
->block
);
2286 static av_always_inline
void encode_mb(MpegEncContext
*s
, int motion_x
, int motion_y
)
2288 if (s
->chroma_format
== CHROMA_420
) encode_mb_internal(s
, motion_x
, motion_y
, 8, 6);
2289 else encode_mb_internal(s
, motion_x
, motion_y
, 16, 8);
2292 static inline void copy_context_before_encode(MpegEncContext
*d
, MpegEncContext
*s
, int type
){
2295 memcpy(d
->last_mv
, s
->last_mv
, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2298 d
->mb_skip_run
= s
->mb_skip_run
;
2300 d
->last_dc
[i
] = s
->last_dc
[i
];
2303 d
->mv_bits
= s
->mv_bits
;
2304 d
->i_tex_bits
= s
->i_tex_bits
;
2305 d
->p_tex_bits
= s
->p_tex_bits
;
2306 d
->i_count
= s
->i_count
;
2307 d
->f_count
= s
->f_count
;
2308 d
->b_count
= s
->b_count
;
2309 d
->skip_count
= s
->skip_count
;
2310 d
->misc_bits
= s
->misc_bits
;
2314 d
->qscale
= s
->qscale
;
2315 d
->dquant
= s
->dquant
;
2317 d
->esc3_level_length
= s
->esc3_level_length
;
2320 static inline void copy_context_after_encode(MpegEncContext
*d
, MpegEncContext
*s
, int type
){
2323 memcpy(d
->mv
, s
->mv
, 2*4*2*sizeof(int));
2324 memcpy(d
->last_mv
, s
->last_mv
, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2327 d
->mb_skip_run
= s
->mb_skip_run
;
2329 d
->last_dc
[i
] = s
->last_dc
[i
];
2332 d
->mv_bits
= s
->mv_bits
;
2333 d
->i_tex_bits
= s
->i_tex_bits
;
2334 d
->p_tex_bits
= s
->p_tex_bits
;
2335 d
->i_count
= s
->i_count
;
2336 d
->f_count
= s
->f_count
;
2337 d
->b_count
= s
->b_count
;
2338 d
->skip_count
= s
->skip_count
;
2339 d
->misc_bits
= s
->misc_bits
;
2341 d
->mb_intra
= s
->mb_intra
;
2342 d
->mb_skipped
= s
->mb_skipped
;
2343 d
->mv_type
= s
->mv_type
;
2344 d
->mv_dir
= s
->mv_dir
;
2346 if(s
->data_partitioning
){
2348 d
->tex_pb
= s
->tex_pb
;
2352 d
->block_last_index
[i
]= s
->block_last_index
[i
];
2353 d
->interlaced_dct
= s
->interlaced_dct
;
2354 d
->qscale
= s
->qscale
;
2356 d
->esc3_level_length
= s
->esc3_level_length
;
2359 static inline void encode_mb_hq(MpegEncContext
*s
, MpegEncContext
*backup
, MpegEncContext
*best
, int type
,
2360 PutBitContext pb
[2], PutBitContext pb2
[2], PutBitContext tex_pb
[2],
2361 int *dmin
, int *next_block
, int motion_x
, int motion_y
)
2364 uint8_t *dest_backup
[3];
2366 copy_context_before_encode(s
, backup
, type
);
2368 s
->block
= s
->blocks
[*next_block
];
2369 s
->pb
= pb
[*next_block
];
2370 if(s
->data_partitioning
){
2371 s
->pb2
= pb2
[*next_block
];
2372 s
->tex_pb
= tex_pb
[*next_block
];
2376 memcpy(dest_backup
, s
->dest
, sizeof(s
->dest
));
2377 s
->dest
[0] = s
->sc
.rd_scratchpad
;
2378 s
->dest
[1] = s
->sc
.rd_scratchpad
+ 16*s
->linesize
;
2379 s
->dest
[2] = s
->sc
.rd_scratchpad
+ 16*s
->linesize
+ 8;
2380 assert(s
->linesize
>= 32); //FIXME
2383 encode_mb(s
, motion_x
, motion_y
);
2385 score
= put_bits_count(&s
->pb
);
2386 if(s
->data_partitioning
){
2387 score
+= put_bits_count(&s
->pb2
);
2388 score
+= put_bits_count(&s
->tex_pb
);
2391 if(s
->avctx
->mb_decision
== FF_MB_DECISION_RD
){
2392 ff_mpv_decode_mb(s
, s
->block
);
2394 score
*= s
->lambda2
;
2395 score
+= sse_mb(s
) << FF_LAMBDA_SHIFT
;
2399 memcpy(s
->dest
, dest_backup
, sizeof(s
->dest
));
2406 copy_context_after_encode(best
, s
, type
);
2410 static int sse(MpegEncContext
*s
, uint8_t *src1
, uint8_t *src2
, int w
, int h
, int stride
){
2411 uint32_t *sq
= ff_square_tab
+ 256;
2416 return s
->mecc
.sse
[0](NULL
, src1
, src2
, stride
, 16);
2417 else if(w
==8 && h
==8)
2418 return s
->mecc
.sse
[1](NULL
, src1
, src2
, stride
, 8);
2422 acc
+= sq
[src1
[x
+ y
*stride
] - src2
[x
+ y
*stride
]];
2431 static int sse_mb(MpegEncContext
*s
){
2435 if(s
->mb_x
*16 + 16 > s
->width
) w
= s
->width
- s
->mb_x
*16;
2436 if(s
->mb_y
*16 + 16 > s
->height
) h
= s
->height
- s
->mb_y
*16;
2439 if(s
->avctx
->mb_cmp
== FF_CMP_NSSE
){
2440 return s
->mecc
.nsse
[0](s
, s
->new_picture
.f
->data
[0] + s
->mb_x
* 16 + s
->mb_y
* s
->linesize
* 16, s
->dest
[0], s
->linesize
, 16) +
2441 s
->mecc
.nsse
[1](s
, s
->new_picture
.f
->data
[1] + s
->mb_x
* 8 + s
->mb_y
* s
->uvlinesize
* 8, s
->dest
[1], s
->uvlinesize
, 8) +
2442 s
->mecc
.nsse
[1](s
, s
->new_picture
.f
->data
[2] + s
->mb_x
* 8 + s
->mb_y
* s
->uvlinesize
* 8, s
->dest
[2], s
->uvlinesize
, 8);
2444 return s
->mecc
.sse
[0](NULL
, s
->new_picture
.f
->data
[0] + s
->mb_x
* 16 + s
->mb_y
* s
->linesize
* 16, s
->dest
[0], s
->linesize
, 16) +
2445 s
->mecc
.sse
[1](NULL
, s
->new_picture
.f
->data
[1] + s
->mb_x
* 8 + s
->mb_y
* s
->uvlinesize
* 8, s
->dest
[1], s
->uvlinesize
, 8) +
2446 s
->mecc
.sse
[1](NULL
, s
->new_picture
.f
->data
[2] + s
->mb_x
* 8 + s
->mb_y
* s
->uvlinesize
* 8, s
->dest
[2], s
->uvlinesize
, 8);
2449 return sse(s
, s
->new_picture
.f
->data
[0] + s
->mb_x
*16 + s
->mb_y
*s
->linesize
*16, s
->dest
[0], w
, h
, s
->linesize
)
2450 +sse(s
, s
->new_picture
.f
->data
[1] + s
->mb_x
*8 + s
->mb_y
*s
->uvlinesize
*8,s
->dest
[1], w
>>1, h
>>1, s
->uvlinesize
)
2451 +sse(s
, s
->new_picture
.f
->data
[2] + s
->mb_x
*8 + s
->mb_y
*s
->uvlinesize
*8,s
->dest
[2], w
>>1, h
>>1, s
->uvlinesize
);
2454 static int pre_estimate_motion_thread(AVCodecContext
*c
, void *arg
){
2455 MpegEncContext
*s
= *(void**)arg
;
2459 s
->me
.dia_size
= s
->avctx
->pre_dia_size
;
2460 s
->first_slice_line
=1;
2461 for(s
->mb_y
= s
->end_mb_y
-1; s
->mb_y
>= s
->start_mb_y
; s
->mb_y
--) {
2462 for(s
->mb_x
=s
->mb_width
-1; s
->mb_x
>=0 ;s
->mb_x
--) {
2463 ff_pre_estimate_p_frame_motion(s
, s
->mb_x
, s
->mb_y
);
2465 s
->first_slice_line
=0;
2473 static int estimate_motion_thread(AVCodecContext
*c
, void *arg
){
2474 MpegEncContext
*s
= *(void**)arg
;
2476 s
->me
.dia_size
= s
->avctx
->dia_size
;
2477 s
->first_slice_line
=1;
2478 for(s
->mb_y
= s
->start_mb_y
; s
->mb_y
< s
->end_mb_y
; s
->mb_y
++) {
2479 s
->mb_x
=0; //for block init below
2480 ff_init_block_index(s
);
2481 for(s
->mb_x
=0; s
->mb_x
< s
->mb_width
; s
->mb_x
++) {
2482 s
->block_index
[0]+=2;
2483 s
->block_index
[1]+=2;
2484 s
->block_index
[2]+=2;
2485 s
->block_index
[3]+=2;
2487 /* compute motion vector & mb_type and store in context */
2488 if(s
->pict_type
==AV_PICTURE_TYPE_B
)
2489 ff_estimate_b_frame_motion(s
, s
->mb_x
, s
->mb_y
);
2491 ff_estimate_p_frame_motion(s
, s
->mb_x
, s
->mb_y
);
2493 s
->first_slice_line
=0;
2498 static int mb_var_thread(AVCodecContext
*c
, void *arg
){
2499 MpegEncContext
*s
= *(void**)arg
;
2502 for(mb_y
=s
->start_mb_y
; mb_y
< s
->end_mb_y
; mb_y
++) {
2503 for(mb_x
=0; mb_x
< s
->mb_width
; mb_x
++) {
2506 uint8_t *pix
= s
->new_picture
.f
->data
[0] + (yy
* s
->linesize
) + xx
;
2508 int sum
= s
->mpvencdsp
.pix_sum(pix
, s
->linesize
);
2510 varc
= (s
->mpvencdsp
.pix_norm1(pix
, s
->linesize
) -
2511 (((unsigned) sum
* sum
) >> 8) + 500 + 128) >> 8;
2513 s
->current_picture
.mb_var
[s
->mb_stride
* mb_y
+ mb_x
] = varc
;
2514 s
->current_picture
.mb_mean
[s
->mb_stride
* mb_y
+ mb_x
] = (sum
+128)>>8;
2515 s
->me
.mb_var_sum_temp
+= varc
;
2521 static void write_slice_end(MpegEncContext
*s
){
2522 if(CONFIG_MPEG4_ENCODER
&& s
->codec_id
==AV_CODEC_ID_MPEG4
){
2523 if(s
->partitioned_frame
){
2524 ff_mpeg4_merge_partitions(s
);
2527 ff_mpeg4_stuffing(&s
->pb
);
2528 }else if(CONFIG_MJPEG_ENCODER
&& s
->out_format
== FMT_MJPEG
){
2529 ff_mjpeg_encode_stuffing(&s
->pb
);
2532 avpriv_align_put_bits(&s
->pb
);
2533 flush_put_bits(&s
->pb
);
2535 if ((s
->avctx
->flags
& AV_CODEC_FLAG_PASS1
) && !s
->partitioned_frame
)
2536 s
->misc_bits
+= get_bits_diff(s
);
2539 static void write_mb_info(MpegEncContext
*s
)
2541 uint8_t *ptr
= s
->mb_info_ptr
+ s
->mb_info_size
- 12;
2542 int offset
= put_bits_count(&s
->pb
);
2543 int mba
= s
->mb_x
+ s
->mb_width
* (s
->mb_y
% s
->gob_index
);
2544 int gobn
= s
->mb_y
/ s
->gob_index
;
2546 if (CONFIG_H263_ENCODER
)
2547 ff_h263_pred_motion(s
, 0, 0, &pred_x
, &pred_y
);
2548 bytestream_put_le32(&ptr
, offset
);
2549 bytestream_put_byte(&ptr
, s
->qscale
);
2550 bytestream_put_byte(&ptr
, gobn
);
2551 bytestream_put_le16(&ptr
, mba
);
2552 bytestream_put_byte(&ptr
, pred_x
); /* hmv1 */
2553 bytestream_put_byte(&ptr
, pred_y
); /* vmv1 */
2554 /* 4MV not implemented */
2555 bytestream_put_byte(&ptr
, 0); /* hmv2 */
2556 bytestream_put_byte(&ptr
, 0); /* vmv2 */
2559 static void update_mb_info(MpegEncContext
*s
, int startcode
)
2563 if (put_bits_count(&s
->pb
) - s
->prev_mb_info
*8 >= s
->mb_info
*8) {
2564 s
->mb_info_size
+= 12;
2565 s
->prev_mb_info
= s
->last_mb_info
;
2568 s
->prev_mb_info
= put_bits_count(&s
->pb
)/8;
2569 /* This might have incremented mb_info_size above, and we return without
2570 * actually writing any info into that slot yet. But in that case,
2571 * this will be called again at the start of the after writing the
2572 * start code, actually writing the mb info. */
2576 s
->last_mb_info
= put_bits_count(&s
->pb
)/8;
2577 if (!s
->mb_info_size
)
2578 s
->mb_info_size
+= 12;
2582 static int encode_thread(AVCodecContext
*c
, void *arg
){
2583 MpegEncContext
*s
= *(void**)arg
;
2584 int mb_x
, mb_y
, pdif
= 0;
2585 int chr_h
= 16>>s
->chroma_y_shift
;
2587 MpegEncContext best_s
= { 0 }, backup_s
;
2588 uint8_t bit_buf
[2][MAX_MB_BYTES
];
2589 uint8_t bit_buf2
[2][MAX_MB_BYTES
];
2590 uint8_t bit_buf_tex
[2][MAX_MB_BYTES
];
2591 PutBitContext pb
[2], pb2
[2], tex_pb
[2];
2594 init_put_bits(&pb
[i
], bit_buf
[i
], MAX_MB_BYTES
);
2595 init_put_bits(&pb2
[i
], bit_buf2
[i
], MAX_MB_BYTES
);
2596 init_put_bits(&tex_pb
[i
], bit_buf_tex
[i
], MAX_MB_BYTES
);
2599 s
->last_bits
= put_bits_count(&s
->pb
);
2610 /* init last dc values */
2611 /* note: quant matrix value (8) is implied here */
2612 s
->last_dc
[i
] = 128 << s
->intra_dc_precision
;
2614 s
->current_picture
.encoding_error
[i
] = 0;
2617 memset(s
->last_mv
, 0, sizeof(s
->last_mv
));
2621 switch(s
->codec_id
){
2622 case AV_CODEC_ID_H263
:
2623 case AV_CODEC_ID_H263P
:
2624 case AV_CODEC_ID_FLV1
:
2625 if (CONFIG_H263_ENCODER
)
2626 s
->gob_index
= H263_GOB_HEIGHT(s
->height
);
2628 case AV_CODEC_ID_MPEG4
:
2629 if(CONFIG_MPEG4_ENCODER
&& s
->partitioned_frame
)
2630 ff_mpeg4_init_partitions(s
);
2636 s
->first_slice_line
= 1;
2637 s
->ptr_lastgob
= s
->pb
.buf
;
2638 for(mb_y
= s
->start_mb_y
; mb_y
< s
->end_mb_y
; mb_y
++) {
2642 ff_set_qscale(s
, s
->qscale
);
2643 ff_init_block_index(s
);
2645 for(mb_x
=0; mb_x
< s
->mb_width
; mb_x
++) {
2646 int xy
= mb_y
*s
->mb_stride
+ mb_x
; // removed const, H261 needs to adjust this
2647 int mb_type
= s
->mb_type
[xy
];
2652 if(s
->pb
.buf_end
- s
->pb
.buf
- (put_bits_count(&s
->pb
)>>3) < MAX_MB_BYTES
){
2653 av_log(s
->avctx
, AV_LOG_ERROR
, "encoded frame too large\n");
2656 if(s
->data_partitioning
){
2657 if( s
->pb2
.buf_end
- s
->pb2
.buf
- (put_bits_count(&s
-> pb2
)>>3) < MAX_MB_BYTES
2658 || s
->tex_pb
.buf_end
- s
->tex_pb
.buf
- (put_bits_count(&s
->tex_pb
)>>3) < MAX_MB_BYTES
){
2659 av_log(s
->avctx
, AV_LOG_ERROR
, "encoded frame too large\n");
2665 s
->mb_y
= mb_y
; // moved into loop, can get changed by H.261
2666 ff_update_block_index(s
);
2668 if(CONFIG_H261_ENCODER
&& s
->codec_id
== AV_CODEC_ID_H261
){
2669 ff_h261_reorder_mb_index(s
);
2670 xy
= s
->mb_y
*s
->mb_stride
+ s
->mb_x
;
2671 mb_type
= s
->mb_type
[xy
];
2674 /* write gob / video packet header */
2676 int current_packet_size
, is_gob_start
;
2678 current_packet_size
= ((put_bits_count(&s
->pb
)+7)>>3) - (s
->ptr_lastgob
- s
->pb
.buf
);
2680 is_gob_start
= s
->avctx
->rtp_payload_size
&& current_packet_size
>= s
->avctx
->rtp_payload_size
&& mb_y
+ mb_x
>0;
2682 if(s
->start_mb_y
== mb_y
&& mb_y
> 0 && mb_x
==0) is_gob_start
=1;
2684 switch(s
->codec_id
){
2685 case AV_CODEC_ID_H263
:
2686 case AV_CODEC_ID_H263P
:
2687 if(!s
->h263_slice_structured
)
2688 if(s
->mb_x
|| s
->mb_y
%s
->gob_index
) is_gob_start
=0;
2690 case AV_CODEC_ID_MPEG2VIDEO
:
2691 if(s
->mb_x
==0 && s
->mb_y
!=0) is_gob_start
=1;
2692 case AV_CODEC_ID_MPEG1VIDEO
:
2693 if(s
->mb_skip_run
) is_gob_start
=0;
2698 if(s
->start_mb_y
!= mb_y
|| mb_x
!=0){
2701 if(CONFIG_MPEG4_ENCODER
&& s
->codec_id
==AV_CODEC_ID_MPEG4
&& s
->partitioned_frame
){
2702 ff_mpeg4_init_partitions(s
);
2706 assert((put_bits_count(&s
->pb
)&7) == 0);
2707 current_packet_size
= put_bits_ptr(&s
->pb
) - s
->ptr_lastgob
;
2709 if (s
->error_rate
&& s
->resync_mb_x
+ s
->resync_mb_y
> 0) {
2710 int r
= put_bits_count(&s
->pb
)/8 + s
->picture_number
+ 16 + s
->mb_x
+ s
->mb_y
;
2711 int d
= 100 / s
->error_rate
;
2713 current_packet_size
=0;
2714 s
->pb
.buf_ptr
= s
->ptr_lastgob
;
2715 assert(put_bits_ptr(&s
->pb
) == s
->ptr_lastgob
);
2719 if (s
->avctx
->rtp_callback
){
2720 int number_mb
= (mb_y
- s
->resync_mb_y
)*s
->mb_width
+ mb_x
- s
->resync_mb_x
;
2721 s
->avctx
->rtp_callback(s
->avctx
, s
->ptr_lastgob
, current_packet_size
, number_mb
);
2723 update_mb_info(s
, 1);
2725 switch(s
->codec_id
){
2726 case AV_CODEC_ID_MPEG4
:
2727 if (CONFIG_MPEG4_ENCODER
) {
2728 ff_mpeg4_encode_video_packet_header(s
);
2729 ff_mpeg4_clean_buffers(s
);
2732 case AV_CODEC_ID_MPEG1VIDEO
:
2733 case AV_CODEC_ID_MPEG2VIDEO
:
2734 if (CONFIG_MPEG1VIDEO_ENCODER
|| CONFIG_MPEG2VIDEO_ENCODER
) {
2735 ff_mpeg1_encode_slice_header(s
);
2736 ff_mpeg1_clean_buffers(s
);
2739 case AV_CODEC_ID_H263
:
2740 case AV_CODEC_ID_H263P
:
2741 if (CONFIG_H263_ENCODER
)
2742 ff_h263_encode_gob_header(s
, mb_y
);
2746 if (s
->avctx
->flags
& AV_CODEC_FLAG_PASS1
) {
2747 int bits
= put_bits_count(&s
->pb
);
2748 s
->misc_bits
+= bits
- s
->last_bits
;
2752 s
->ptr_lastgob
+= current_packet_size
;
2753 s
->first_slice_line
=1;
2754 s
->resync_mb_x
=mb_x
;
2755 s
->resync_mb_y
=mb_y
;
2759 if( (s
->resync_mb_x
== s
->mb_x
)
2760 && s
->resync_mb_y
+1 == s
->mb_y
){
2761 s
->first_slice_line
=0;
2765 s
->dquant
=0; //only for QP_RD
2767 update_mb_info(s
, 0);
2769 if (mb_type
& (mb_type
-1) || (s
->mpv_flags
& FF_MPV_FLAG_QP_RD
)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2771 int pb_bits_count
, pb2_bits_count
, tex_pb_bits_count
;
2773 copy_context_before_encode(&backup_s
, s
, -1);
2775 best_s
.data_partitioning
= s
->data_partitioning
;
2776 best_s
.partitioned_frame
= s
->partitioned_frame
;
2777 if(s
->data_partitioning
){
2778 backup_s
.pb2
= s
->pb2
;
2779 backup_s
.tex_pb
= s
->tex_pb
;
2782 if(mb_type
&CANDIDATE_MB_TYPE_INTER
){
2783 s
->mv_dir
= MV_DIR_FORWARD
;
2784 s
->mv_type
= MV_TYPE_16X16
;
2786 s
->mv
[0][0][0] = s
->p_mv_table
[xy
][0];
2787 s
->mv
[0][0][1] = s
->p_mv_table
[xy
][1];
2788 encode_mb_hq(s
, &backup_s
, &best_s
, CANDIDATE_MB_TYPE_INTER
, pb
, pb2
, tex_pb
,
2789 &dmin
, &next_block
, s
->mv
[0][0][0], s
->mv
[0][0][1]);
2791 if(mb_type
&CANDIDATE_MB_TYPE_INTER_I
){
2792 s
->mv_dir
= MV_DIR_FORWARD
;
2793 s
->mv_type
= MV_TYPE_FIELD
;
2796 j
= s
->field_select
[0][i
] = s
->p_field_select_table
[i
][xy
];
2797 s
->mv
[0][i
][0] = s
->p_field_mv_table
[i
][j
][xy
][0];
2798 s
->mv
[0][i
][1] = s
->p_field_mv_table
[i
][j
][xy
][1];
2800 encode_mb_hq(s
, &backup_s
, &best_s
, CANDIDATE_MB_TYPE_INTER_I
, pb
, pb2
, tex_pb
,
2801 &dmin
, &next_block
, 0, 0);
2803 if(mb_type
&CANDIDATE_MB_TYPE_SKIPPED
){
2804 s
->mv_dir
= MV_DIR_FORWARD
;
2805 s
->mv_type
= MV_TYPE_16X16
;
2809 encode_mb_hq(s
, &backup_s
, &best_s
, CANDIDATE_MB_TYPE_SKIPPED
, pb
, pb2
, tex_pb
,
2810 &dmin
, &next_block
, s
->mv
[0][0][0], s
->mv
[0][0][1]);
2812 if(mb_type
&CANDIDATE_MB_TYPE_INTER4V
){
2813 s
->mv_dir
= MV_DIR_FORWARD
;
2814 s
->mv_type
= MV_TYPE_8X8
;
2817 s
->mv
[0][i
][0] = s
->current_picture
.motion_val
[0][s
->block_index
[i
]][0];
2818 s
->mv
[0][i
][1] = s
->current_picture
.motion_val
[0][s
->block_index
[i
]][1];
2820 encode_mb_hq(s
, &backup_s
, &best_s
, CANDIDATE_MB_TYPE_INTER4V
, pb
, pb2
, tex_pb
,
2821 &dmin
, &next_block
, 0, 0);
2823 if(mb_type
&CANDIDATE_MB_TYPE_FORWARD
){
2824 s
->mv_dir
= MV_DIR_FORWARD
;
2825 s
->mv_type
= MV_TYPE_16X16
;
2827 s
->mv
[0][0][0] = s
->b_forw_mv_table
[xy
][0];
2828 s
->mv
[0][0][1] = s
->b_forw_mv_table
[xy
][1];
2829 encode_mb_hq(s
, &backup_s
, &best_s
, CANDIDATE_MB_TYPE_FORWARD
, pb
, pb2
, tex_pb
,
2830 &dmin
, &next_block
, s
->mv
[0][0][0], s
->mv
[0][0][1]);
2832 if(mb_type
&CANDIDATE_MB_TYPE_BACKWARD
){
2833 s
->mv_dir
= MV_DIR_BACKWARD
;
2834 s
->mv_type
= MV_TYPE_16X16
;
2836 s
->mv
[1][0][0] = s
->b_back_mv_table
[xy
][0];
2837 s
->mv
[1][0][1] = s
->b_back_mv_table
[xy
][1];
2838 encode_mb_hq(s
, &backup_s
, &best_s
, CANDIDATE_MB_TYPE_BACKWARD
, pb
, pb2
, tex_pb
,
2839 &dmin
, &next_block
, s
->mv
[1][0][0], s
->mv
[1][0][1]);
2841 if(mb_type
&CANDIDATE_MB_TYPE_BIDIR
){
2842 s
->mv_dir
= MV_DIR_FORWARD
| MV_DIR_BACKWARD
;
2843 s
->mv_type
= MV_TYPE_16X16
;
2845 s
->mv
[0][0][0] = s
->b_bidir_forw_mv_table
[xy
][0];
2846 s
->mv
[0][0][1] = s
->b_bidir_forw_mv_table
[xy
][1];
2847 s
->mv
[1][0][0] = s
->b_bidir_back_mv_table
[xy
][0];
2848 s
->mv
[1][0][1] = s
->b_bidir_back_mv_table
[xy
][1];
2849 encode_mb_hq(s
, &backup_s
, &best_s
, CANDIDATE_MB_TYPE_BIDIR
, pb
, pb2
, tex_pb
,
2850 &dmin
, &next_block
, 0, 0);
2852 if(mb_type
&CANDIDATE_MB_TYPE_FORWARD_I
){
2853 s
->mv_dir
= MV_DIR_FORWARD
;
2854 s
->mv_type
= MV_TYPE_FIELD
;
2857 j
= s
->field_select
[0][i
] = s
->b_field_select_table
[0][i
][xy
];
2858 s
->mv
[0][i
][0] = s
->b_field_mv_table
[0][i
][j
][xy
][0];
2859 s
->mv
[0][i
][1] = s
->b_field_mv_table
[0][i
][j
][xy
][1];
2861 encode_mb_hq(s
, &backup_s
, &best_s
, CANDIDATE_MB_TYPE_FORWARD_I
, pb
, pb2
, tex_pb
,
2862 &dmin
, &next_block
, 0, 0);
2864 if(mb_type
&CANDIDATE_MB_TYPE_BACKWARD_I
){
2865 s
->mv_dir
= MV_DIR_BACKWARD
;
2866 s
->mv_type
= MV_TYPE_FIELD
;
2869 j
= s
->field_select
[1][i
] = s
->b_field_select_table
[1][i
][xy
];