2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
32 #include "libavutil/internal.h"
33 #include "libavutil/intmath.h"
34 #include "libavutil/mathematics.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/timer.h"
42 #include "mpegvideo.h"
43 #include "mpegvideodata.h"
47 #include "mjpegenc_common.h"
49 #include "mpegutils.h"
52 #include "pixblockdsp.h"
56 #include "aandcttab.h"
58 #include "mpeg4video.h"
60 #include "bytestream.h"
65 #define QUANT_BIAS_SHIFT 8
67 #define QMAT_SHIFT_MMX 16
70 static int encode_picture(MpegEncContext
*s
, int picture_number
);
71 static int dct_quantize_refine(MpegEncContext
*s
, int16_t *block
, int16_t *weight
, int16_t *orig
, int n
, int qscale
);
72 static int sse_mb(MpegEncContext
*s
);
73 static void denoise_dct_c(MpegEncContext
*s
, int16_t *block
);
74 static int dct_quantize_trellis_c(MpegEncContext
*s
, int16_t *block
, int n
, int qscale
, int *overflow
);
76 static uint8_t default_mv_penalty
[MAX_FCODE
+ 1][MAX_MV
* 2 + 1];
77 static uint8_t default_fcode_tab
[MAX_MV
* 2 + 1];
79 const AVOption ff_mpv_generic_options
[] = {
84 void ff_convert_matrix(MpegEncContext
*s
, int (*qmat
)[64],
85 uint16_t (*qmat16
)[2][64],
86 const uint16_t *quant_matrix
,
87 int bias
, int qmin
, int qmax
, int intra
)
89 FDCTDSPContext
*fdsp
= &s
->fdsp
;
93 for (qscale
= qmin
; qscale
<= qmax
; qscale
++) {
95 if (fdsp
->fdct
== ff_jpeg_fdct_islow_8
||
97 fdsp
->fdct
== ff_faandct
||
98 #endif /* CONFIG_FAANDCT */
99 fdsp
->fdct
== ff_jpeg_fdct_islow_10
) {
100 for (i
= 0; i
< 64; i
++) {
101 const int j
= s
->idsp
.idct_permutation
[i
];
102 int64_t den
= (int64_t) qscale
* quant_matrix
[j
];
103 /* 16 <= qscale * quant_matrix[i] <= 7905
104 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
105 * 19952 <= x <= 249205026
106 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
107 * 3444240 >= (1 << 36) / (x) >= 275 */
109 qmat
[qscale
][i
] = (int)((UINT64_C(1) << QMAT_SHIFT
) / den
);
111 } else if (fdsp
->fdct
== ff_fdct_ifast
) {
112 for (i
= 0; i
< 64; i
++) {
113 const int j
= s
->idsp
.idct_permutation
[i
];
114 int64_t den
= ff_aanscales
[i
] * (int64_t) qscale
* quant_matrix
[j
];
115 /* 16 <= qscale * quant_matrix[i] <= 7905
116 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
117 * 19952 <= x <= 249205026
118 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
119 * 3444240 >= (1 << 36) / (x) >= 275 */
121 qmat
[qscale
][i
] = (int)((UINT64_C(1) << (QMAT_SHIFT
+ 14)) / den
);
124 for (i
= 0; i
< 64; i
++) {
125 const int j
= s
->idsp
.idct_permutation
[i
];
126 int64_t den
= (int64_t) qscale
* quant_matrix
[j
];
127 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
128 * Assume x = qscale * quant_matrix[i]
130 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
131 * so 32768 >= (1 << 19) / (x) >= 67 */
132 qmat
[qscale
][i
] = (int)((UINT64_C(1) << QMAT_SHIFT
) / den
);
133 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
134 // (qscale * quant_matrix[i]);
135 qmat16
[qscale
][0][i
] = (1 << QMAT_SHIFT_MMX
) / den
;
137 if (qmat16
[qscale
][0][i
] == 0 ||
138 qmat16
[qscale
][0][i
] == 128 * 256)
139 qmat16
[qscale
][0][i
] = 128 * 256 - 1;
140 qmat16
[qscale
][1][i
] =
141 ROUNDED_DIV(bias
<< (16 - QUANT_BIAS_SHIFT
),
142 qmat16
[qscale
][0][i
]);
146 for (i
= intra
; i
< 64; i
++) {
148 if (fdsp
->fdct
== ff_fdct_ifast
) {
149 max
= (8191LL * ff_aanscales
[i
]) >> 14;
151 while (((max
* qmat
[qscale
][i
]) >> shift
) > INT_MAX
) {
157 av_log(NULL
, AV_LOG_INFO
,
158 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
163 static inline void update_qscale(MpegEncContext
*s
)
165 s
->qscale
= (s
->lambda
* 139 + FF_LAMBDA_SCALE
* 64) >>
166 (FF_LAMBDA_SHIFT
+ 7);
167 s
->qscale
= av_clip(s
->qscale
, s
->avctx
->qmin
, s
->avctx
->qmax
);
169 s
->lambda2
= (s
->lambda
* s
->lambda
+ FF_LAMBDA_SCALE
/ 2) >>
173 void ff_write_quant_matrix(PutBitContext
*pb
, uint16_t *matrix
)
179 for (i
= 0; i
< 64; i
++) {
180 put_bits(pb
, 8, matrix
[ff_zigzag_direct
[i
]]);
187 * init s->current_picture.qscale_table from s->lambda_table
189 void ff_init_qscale_tab(MpegEncContext
*s
)
191 int8_t * const qscale_table
= s
->current_picture
.qscale_table
;
194 for (i
= 0; i
< s
->mb_num
; i
++) {
195 unsigned int lam
= s
->lambda_table
[s
->mb_index2xy
[i
]];
196 int qp
= (lam
* 139 + FF_LAMBDA_SCALE
* 64) >> (FF_LAMBDA_SHIFT
+ 7);
197 qscale_table
[s
->mb_index2xy
[i
]] = av_clip(qp
, s
->avctx
->qmin
,
202 static void update_duplicate_context_after_me(MpegEncContext
*dst
,
205 #define COPY(a) dst->a= src->a
207 COPY(current_picture
);
213 COPY(picture_in_gop_number
);
214 COPY(gop_picture_number
);
215 COPY(frame_pred_frame_dct
); // FIXME don't set in encode_header
216 COPY(progressive_frame
); // FIXME don't set in encode_header
217 COPY(partitioned_frame
); // FIXME don't set in encode_header
222 * Set the given MpegEncContext to defaults for encoding.
223 * the changed fields will not depend upon the prior state of the MpegEncContext.
225 static void mpv_encode_defaults(MpegEncContext
*s
)
228 ff_mpv_common_defaults(s
);
230 for (i
= -16; i
< 16; i
++) {
231 default_fcode_tab
[i
+ MAX_MV
] = 1;
233 s
->me
.mv_penalty
= default_mv_penalty
;
234 s
->fcode_tab
= default_fcode_tab
;
236 s
->input_picture_number
= 0;
237 s
->picture_in_gop_number
= 0;
240 /* init video encoder */
241 av_cold
int ff_mpv_encode_init(AVCodecContext
*avctx
)
243 MpegEncContext
*s
= avctx
->priv_data
;
244 int i
, ret
, format_supported
;
246 mpv_encode_defaults(s
);
248 switch (avctx
->codec_id
) {
249 case AV_CODEC_ID_MPEG2VIDEO
:
250 if (avctx
->pix_fmt
!= AV_PIX_FMT_YUV420P
&&
251 avctx
->pix_fmt
!= AV_PIX_FMT_YUV422P
) {
252 av_log(avctx
, AV_LOG_ERROR
,
253 "only YUV420 and YUV422 are supported\n");
257 case AV_CODEC_ID_MJPEG
:
258 format_supported
= 0;
259 /* JPEG color space */
260 if (avctx
->pix_fmt
== AV_PIX_FMT_YUVJ420P
||
261 avctx
->pix_fmt
== AV_PIX_FMT_YUVJ422P
||
262 (avctx
->color_range
== AVCOL_RANGE_JPEG
&&
263 (avctx
->pix_fmt
== AV_PIX_FMT_YUV420P
||
264 avctx
->pix_fmt
== AV_PIX_FMT_YUV422P
)))
265 format_supported
= 1;
266 /* MPEG color space */
267 else if (avctx
->strict_std_compliance
<= FF_COMPLIANCE_UNOFFICIAL
&&
268 (avctx
->pix_fmt
== AV_PIX_FMT_YUV420P
||
269 avctx
->pix_fmt
== AV_PIX_FMT_YUV422P
))
270 format_supported
= 1;
272 if (!format_supported
) {
273 av_log(avctx
, AV_LOG_ERROR
, "colorspace not supported in jpeg\n");
278 if (avctx
->pix_fmt
!= AV_PIX_FMT_YUV420P
) {
279 av_log(avctx
, AV_LOG_ERROR
, "only YUV420 is supported\n");
284 switch (avctx
->pix_fmt
) {
285 case AV_PIX_FMT_YUVJ422P
:
286 case AV_PIX_FMT_YUV422P
:
287 s
->chroma_format
= CHROMA_422
;
289 case AV_PIX_FMT_YUVJ420P
:
290 case AV_PIX_FMT_YUV420P
:
292 s
->chroma_format
= CHROMA_420
;
296 s
->bit_rate
= avctx
->bit_rate
;
297 s
->width
= avctx
->width
;
298 s
->height
= avctx
->height
;
299 if (avctx
->gop_size
> 600 &&
300 avctx
->strict_std_compliance
> FF_COMPLIANCE_EXPERIMENTAL
) {
301 av_log(avctx
, AV_LOG_ERROR
,
302 "Warning keyframe interval too large! reducing it ...\n");
303 avctx
->gop_size
= 600;
305 s
->gop_size
= avctx
->gop_size
;
307 if (avctx
->max_b_frames
> MAX_B_FRAMES
) {
308 av_log(avctx
, AV_LOG_ERROR
, "Too many B-frames requested, maximum "
309 "is %d.\n", MAX_B_FRAMES
);
311 s
->max_b_frames
= avctx
->max_b_frames
;
312 s
->codec_id
= avctx
->codec
->id
;
313 s
->strict_std_compliance
= avctx
->strict_std_compliance
;
314 s
->quarter_sample
= (avctx
->flags
& AV_CODEC_FLAG_QPEL
) != 0;
315 s
->mpeg_quant
= avctx
->mpeg_quant
;
316 s
->rtp_mode
= !!avctx
->rtp_payload_size
;
317 s
->intra_dc_precision
= avctx
->intra_dc_precision
;
318 s
->user_specified_pts
= AV_NOPTS_VALUE
;
320 if (s
->gop_size
<= 1) {
327 #if FF_API_MOTION_EST
328 FF_DISABLE_DEPRECATION_WARNINGS
329 s
->me_method
= avctx
->me_method
;
330 FF_ENABLE_DEPRECATION_WARNINGS
334 s
->fixed_qscale
= !!(avctx
->flags
& AV_CODEC_FLAG_QSCALE
);
337 FF_DISABLE_DEPRECATION_WARNINGS
338 if (avctx
->border_masking
!= 0.0)
339 s
->border_masking
= avctx
->border_masking
;
340 FF_ENABLE_DEPRECATION_WARNINGS
343 s
->adaptive_quant
= (s
->avctx
->lumi_masking
||
344 s
->avctx
->dark_masking
||
345 s
->avctx
->temporal_cplx_masking
||
346 s
->avctx
->spatial_cplx_masking
||
347 s
->avctx
->p_masking
||
349 (s
->mpv_flags
& FF_MPV_FLAG_QP_RD
)) &&
352 s
->loop_filter
= !!(s
->avctx
->flags
& AV_CODEC_FLAG_LOOP_FILTER
);
354 if (avctx
->rc_max_rate
&& !avctx
->rc_buffer_size
) {
355 av_log(avctx
, AV_LOG_ERROR
,
356 "a vbv buffer size is needed, "
357 "for encoding with a maximum bitrate\n");
361 if (avctx
->rc_min_rate
&& avctx
->rc_max_rate
!= avctx
->rc_min_rate
) {
362 av_log(avctx
, AV_LOG_INFO
,
363 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
366 if (avctx
->rc_min_rate
&& avctx
->rc_min_rate
> avctx
->bit_rate
) {
367 av_log(avctx
, AV_LOG_ERROR
, "bitrate below min bitrate\n");
371 if (avctx
->rc_max_rate
&& avctx
->rc_max_rate
< avctx
->bit_rate
) {
372 av_log(avctx
, AV_LOG_INFO
, "bitrate above max bitrate\n");
376 if (avctx
->rc_max_rate
&&
377 avctx
->rc_max_rate
== avctx
->bit_rate
&&
378 avctx
->rc_max_rate
!= avctx
->rc_min_rate
) {
379 av_log(avctx
, AV_LOG_INFO
,
380 "impossible bitrate constraints, this will fail\n");
383 if (avctx
->rc_buffer_size
&&
384 avctx
->bit_rate
* (int64_t)avctx
->time_base
.num
>
385 avctx
->rc_buffer_size
* (int64_t)avctx
->time_base
.den
) {
386 av_log(avctx
, AV_LOG_ERROR
, "VBV buffer too small for bitrate\n");
390 if (!s
->fixed_qscale
&&
391 avctx
->bit_rate
* av_q2d(avctx
->time_base
) >
392 avctx
->bit_rate_tolerance
) {
393 av_log(avctx
, AV_LOG_ERROR
,
394 "bitrate tolerance too small for bitrate\n");
398 if (s
->avctx
->rc_max_rate
&&
399 s
->avctx
->rc_min_rate
== s
->avctx
->rc_max_rate
&&
400 (s
->codec_id
== AV_CODEC_ID_MPEG1VIDEO
||
401 s
->codec_id
== AV_CODEC_ID_MPEG2VIDEO
) &&
402 90000LL * (avctx
->rc_buffer_size
- 1) >
403 s
->avctx
->rc_max_rate
* 0xFFFFLL
) {
404 av_log(avctx
, AV_LOG_INFO
,
405 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
406 "specified vbv buffer is too large for the given bitrate!\n");
409 if ((s
->avctx
->flags
& AV_CODEC_FLAG_4MV
) && s
->codec_id
!= AV_CODEC_ID_MPEG4
&&
410 s
->codec_id
!= AV_CODEC_ID_H263
&& s
->codec_id
!= AV_CODEC_ID_H263P
&&
411 s
->codec_id
!= AV_CODEC_ID_FLV1
) {
412 av_log(avctx
, AV_LOG_ERROR
, "4MV not supported by codec\n");
416 if (s
->obmc
&& s
->avctx
->mb_decision
!= FF_MB_DECISION_SIMPLE
) {
417 av_log(avctx
, AV_LOG_ERROR
,
418 "OBMC is only supported with simple mb decision\n");
422 if (s
->quarter_sample
&& s
->codec_id
!= AV_CODEC_ID_MPEG4
) {
423 av_log(avctx
, AV_LOG_ERROR
, "qpel not supported by codec\n");
427 if (s
->max_b_frames
&&
428 s
->codec_id
!= AV_CODEC_ID_MPEG4
&&
429 s
->codec_id
!= AV_CODEC_ID_MPEG1VIDEO
&&
430 s
->codec_id
!= AV_CODEC_ID_MPEG2VIDEO
) {
431 av_log(avctx
, AV_LOG_ERROR
, "b frames not supported by codec\n");
435 if ((s
->codec_id
== AV_CODEC_ID_MPEG4
||
436 s
->codec_id
== AV_CODEC_ID_H263
||
437 s
->codec_id
== AV_CODEC_ID_H263P
) &&
438 (avctx
->sample_aspect_ratio
.num
> 255 ||
439 avctx
->sample_aspect_ratio
.den
> 255)) {
440 av_log(avctx
, AV_LOG_ERROR
,
441 "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
442 avctx
->sample_aspect_ratio
.num
, avctx
->sample_aspect_ratio
.den
);
446 if ((s
->avctx
->flags
& (AV_CODEC_FLAG_INTERLACED_DCT
| AV_CODEC_FLAG_INTERLACED_ME
)) &&
447 s
->codec_id
!= AV_CODEC_ID_MPEG4
&& s
->codec_id
!= AV_CODEC_ID_MPEG2VIDEO
) {
448 av_log(avctx
, AV_LOG_ERROR
, "interlacing not supported by codec\n");
452 // FIXME mpeg2 uses that too
453 if (s
->mpeg_quant
&& s
->codec_id
!= AV_CODEC_ID_MPEG4
) {
454 av_log(avctx
, AV_LOG_ERROR
,
455 "mpeg2 style quantization not supported by codec\n");
459 if ((s
->mpv_flags
& FF_MPV_FLAG_CBP_RD
) && !avctx
->trellis
) {
460 av_log(avctx
, AV_LOG_ERROR
, "CBP RD needs trellis quant\n");
464 if ((s
->mpv_flags
& FF_MPV_FLAG_QP_RD
) &&
465 s
->avctx
->mb_decision
!= FF_MB_DECISION_RD
) {
466 av_log(avctx
, AV_LOG_ERROR
, "QP RD needs mbd=2\n");
470 if (s
->avctx
->scenechange_threshold
< 1000000000 &&
471 (s
->avctx
->flags
& AV_CODEC_FLAG_CLOSED_GOP
)) {
472 av_log(avctx
, AV_LOG_ERROR
,
473 "closed gop with scene change detection are not supported yet, "
474 "set threshold to 1000000000\n");
478 if (s
->avctx
->flags
& AV_CODEC_FLAG_LOW_DELAY
) {
479 if (s
->codec_id
!= AV_CODEC_ID_MPEG2VIDEO
) {
480 av_log(avctx
, AV_LOG_ERROR
,
481 "low delay forcing is only available for mpeg2\n");
484 if (s
->max_b_frames
!= 0) {
485 av_log(avctx
, AV_LOG_ERROR
,
486 "b frames cannot be used with low delay\n");
491 if (s
->q_scale_type
== 1) {
492 if (avctx
->qmax
> 12) {
493 av_log(avctx
, AV_LOG_ERROR
,
494 "non linear quant only supports qmax <= 12 currently\n");
499 if (avctx
->slices
> 1 &&
500 (avctx
->codec_id
== AV_CODEC_ID_FLV1
|| avctx
->codec_id
== AV_CODEC_ID_H261
)) {
501 av_log(avctx
, AV_LOG_ERROR
, "Multiple slices are not supported by this codec\n");
502 return AVERROR(EINVAL
);
505 if (s
->avctx
->thread_count
> 1 &&
506 s
->codec_id
!= AV_CODEC_ID_MPEG4
&&
507 s
->codec_id
!= AV_CODEC_ID_MPEG1VIDEO
&&
508 s
->codec_id
!= AV_CODEC_ID_MPEG2VIDEO
&&
509 (s
->codec_id
!= AV_CODEC_ID_H263P
)) {
510 av_log(avctx
, AV_LOG_ERROR
,
511 "multi threaded encoding not supported by codec\n");
515 if (s
->avctx
->thread_count
< 1) {
516 av_log(avctx
, AV_LOG_ERROR
,
517 "automatic thread number detection not supported by codec,"
522 if (!avctx
->time_base
.den
|| !avctx
->time_base
.num
) {
523 av_log(avctx
, AV_LOG_ERROR
, "framerate not set\n");
527 if (avctx
->b_frame_strategy
&& (avctx
->flags
& AV_CODEC_FLAG_PASS2
)) {
528 av_log(avctx
, AV_LOG_INFO
,
529 "notice: b_frame_strategy only affects the first pass\n");
530 avctx
->b_frame_strategy
= 0;
533 i
= av_gcd(avctx
->time_base
.den
, avctx
->time_base
.num
);
535 av_log(avctx
, AV_LOG_INFO
, "removing common factors from framerate\n");
536 avctx
->time_base
.den
/= i
;
537 avctx
->time_base
.num
/= i
;
541 if (s
->mpeg_quant
|| s
->codec_id
== AV_CODEC_ID_MPEG1VIDEO
||
542 s
->codec_id
== AV_CODEC_ID_MPEG2VIDEO
|| s
->codec_id
== AV_CODEC_ID_MJPEG
) {
543 // (a + x * 3 / 8) / x
544 s
->intra_quant_bias
= 3 << (QUANT_BIAS_SHIFT
- 3);
545 s
->inter_quant_bias
= 0;
547 s
->intra_quant_bias
= 0;
549 s
->inter_quant_bias
= -(1 << (QUANT_BIAS_SHIFT
- 2));
552 #if FF_API_QUANT_BIAS
553 FF_DISABLE_DEPRECATION_WARNINGS
554 if (avctx
->intra_quant_bias
!= FF_DEFAULT_QUANT_BIAS
)
555 s
->intra_quant_bias
= avctx
->intra_quant_bias
;
556 if (avctx
->inter_quant_bias
!= FF_DEFAULT_QUANT_BIAS
)
557 s
->inter_quant_bias
= avctx
->inter_quant_bias
;
558 FF_ENABLE_DEPRECATION_WARNINGS
561 if (avctx
->codec_id
== AV_CODEC_ID_MPEG4
&&
562 s
->avctx
->time_base
.den
> (1 << 16) - 1) {
563 av_log(avctx
, AV_LOG_ERROR
,
564 "timebase %d/%d not supported by MPEG 4 standard, "
565 "the maximum admitted value for the timebase denominator "
566 "is %d\n", s
->avctx
->time_base
.num
, s
->avctx
->time_base
.den
,
570 s
->time_increment_bits
= av_log2(s
->avctx
->time_base
.den
- 1) + 1;
572 switch (avctx
->codec
->id
) {
573 case AV_CODEC_ID_MPEG1VIDEO
:
574 s
->out_format
= FMT_MPEG1
;
575 s
->low_delay
= !!(s
->avctx
->flags
& AV_CODEC_FLAG_LOW_DELAY
);
576 avctx
->delay
= s
->low_delay ?
0 : (s
->max_b_frames
+ 1);
578 case AV_CODEC_ID_MPEG2VIDEO
:
579 s
->out_format
= FMT_MPEG1
;
580 s
->low_delay
= !!(s
->avctx
->flags
& AV_CODEC_FLAG_LOW_DELAY
);
581 avctx
->delay
= s
->low_delay ?
0 : (s
->max_b_frames
+ 1);
584 case AV_CODEC_ID_MJPEG
:
585 s
->out_format
= FMT_MJPEG
;
586 s
->intra_only
= 1; /* force intra only for jpeg */
587 if (!CONFIG_MJPEG_ENCODER
||
588 ff_mjpeg_encode_init(s
) < 0)
593 case AV_CODEC_ID_H261
:
594 if (!CONFIG_H261_ENCODER
)
596 if (ff_h261_get_picture_format(s
->width
, s
->height
) < 0) {
597 av_log(avctx
, AV_LOG_ERROR
,
598 "The specified picture size of %dx%d is not valid for the "
599 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
600 s
->width
, s
->height
);
603 s
->out_format
= FMT_H261
;
606 s
->rtp_mode
= 0; /* Sliced encoding not supported */
608 case AV_CODEC_ID_H263
:
609 if (!CONFIG_H263_ENCODER
)
611 if (ff_match_2uint16(ff_h263_format
, FF_ARRAY_ELEMS(ff_h263_format
),
612 s
->width
, s
->height
) == 8) {
613 av_log(avctx
, AV_LOG_INFO
,
614 "The specified picture size of %dx%d is not valid for "
615 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
616 "352x288, 704x576, and 1408x1152."
617 "Try H.263+.\n", s
->width
, s
->height
);
620 s
->out_format
= FMT_H263
;
624 case AV_CODEC_ID_H263P
:
625 s
->out_format
= FMT_H263
;
628 s
->h263_aic
= (avctx
->flags
& AV_CODEC_FLAG_AC_PRED
) ?
1 : 0;
629 s
->modified_quant
= s
->h263_aic
;
630 s
->loop_filter
= (avctx
->flags
& AV_CODEC_FLAG_LOOP_FILTER
) ?
1 : 0;
631 s
->unrestricted_mv
= s
->obmc
|| s
->loop_filter
|| s
->umvplus
;
634 /* These are just to be sure */
638 case AV_CODEC_ID_FLV1
:
639 s
->out_format
= FMT_H263
;
640 s
->h263_flv
= 2; /* format = 1; 11-bit codes */
641 s
->unrestricted_mv
= 1;
642 s
->rtp_mode
= 0; /* don't allow GOB */
646 case AV_CODEC_ID_RV10
:
647 s
->out_format
= FMT_H263
;
651 case AV_CODEC_ID_RV20
:
652 s
->out_format
= FMT_H263
;
655 s
->modified_quant
= 1;
659 s
->unrestricted_mv
= 0;
661 case AV_CODEC_ID_MPEG4
:
662 s
->out_format
= FMT_H263
;
664 s
->unrestricted_mv
= 1;
665 s
->low_delay
= s
->max_b_frames ?
0 : 1;
666 avctx
->delay
= s
->low_delay ?
0 : (s
->max_b_frames
+ 1);
668 case AV_CODEC_ID_MSMPEG4V2
:
669 s
->out_format
= FMT_H263
;
671 s
->unrestricted_mv
= 1;
672 s
->msmpeg4_version
= 2;
676 case AV_CODEC_ID_MSMPEG4V3
:
677 s
->out_format
= FMT_H263
;
679 s
->unrestricted_mv
= 1;
680 s
->msmpeg4_version
= 3;
681 s
->flipflop_rounding
= 1;
685 case AV_CODEC_ID_WMV1
:
686 s
->out_format
= FMT_H263
;
688 s
->unrestricted_mv
= 1;
689 s
->msmpeg4_version
= 4;
690 s
->flipflop_rounding
= 1;
694 case AV_CODEC_ID_WMV2
:
695 s
->out_format
= FMT_H263
;
697 s
->unrestricted_mv
= 1;
698 s
->msmpeg4_version
= 5;
699 s
->flipflop_rounding
= 1;
707 avctx
->has_b_frames
= !s
->low_delay
;
711 s
->progressive_frame
=
712 s
->progressive_sequence
= !(avctx
->flags
& (AV_CODEC_FLAG_INTERLACED_DCT
|
713 AV_CODEC_FLAG_INTERLACED_ME
) ||
718 if (ff_mpv_common_init(s
) < 0)
722 ff_mpv_encode_init_x86(s
);
724 ff_fdctdsp_init(&s
->fdsp
, avctx
);
725 ff_me_cmp_init(&s
->mecc
, avctx
);
726 ff_mpegvideoencdsp_init(&s
->mpvencdsp
, avctx
);
727 ff_pixblockdsp_init(&s
->pdsp
, avctx
);
728 ff_qpeldsp_init(&s
->qdsp
);
730 if (s
->msmpeg4_version
) {
731 FF_ALLOCZ_OR_GOTO(s
->avctx
, s
->ac_stats
,
732 2 * 2 * (MAX_LEVEL
+ 1) *
733 (MAX_RUN
+ 1) * 2 * sizeof(int), fail
);
735 FF_ALLOCZ_OR_GOTO(s
->avctx
, s
->avctx
->stats_out
, 256, fail
);
737 FF_ALLOCZ_OR_GOTO(s
->avctx
, s
->q_intra_matrix
, 64 * 32 * sizeof(int), fail
);
738 FF_ALLOCZ_OR_GOTO(s
->avctx
, s
->q_inter_matrix
, 64 * 32 * sizeof(int), fail
);
739 FF_ALLOCZ_OR_GOTO(s
->avctx
, s
->q_intra_matrix16
, 64 * 32 * 2 * sizeof(uint16_t), fail
);
740 FF_ALLOCZ_OR_GOTO(s
->avctx
, s
->q_inter_matrix16
, 64 * 32 * 2 * sizeof(uint16_t), fail
);
741 FF_ALLOCZ_OR_GOTO(s
->avctx
, s
->input_picture
,
742 MAX_PICTURE_COUNT
* sizeof(Picture
*), fail
);
743 FF_ALLOCZ_OR_GOTO(s
->avctx
, s
->reordered_input_picture
,
744 MAX_PICTURE_COUNT
* sizeof(Picture
*), fail
);
746 if (s
->avctx
->noise_reduction
) {
747 FF_ALLOCZ_OR_GOTO(s
->avctx
, s
->dct_offset
,
748 2 * 64 * sizeof(uint16_t), fail
);
751 if (CONFIG_H263_ENCODER
)
752 ff_h263dsp_init(&s
->h263dsp
);
753 if (!s
->dct_quantize
)
754 s
->dct_quantize
= ff_dct_quantize_c
;
756 s
->denoise_dct
= denoise_dct_c
;
757 s
->fast_dct_quantize
= s
->dct_quantize
;
759 s
->dct_quantize
= dct_quantize_trellis_c
;
761 if ((CONFIG_H263P_ENCODER
|| CONFIG_RV20_ENCODER
) && s
->modified_quant
)
762 s
->chroma_qscale_table
= ff_h263_chroma_qscale_table
;
764 if (s
->slice_context_count
> 1) {
767 if (avctx
->codec_id
== AV_CODEC_ID_H263
|| avctx
->codec_id
== AV_CODEC_ID_H263P
)
768 s
->h263_slice_structured
= 1;
771 s
->quant_precision
= 5;
773 ff_set_cmp(&s
->mecc
, s
->mecc
.ildct_cmp
, s
->avctx
->ildct_cmp
);
774 ff_set_cmp(&s
->mecc
, s
->mecc
.frame_skip_cmp
, s
->avctx
->frame_skip_cmp
);
776 if (CONFIG_H261_ENCODER
&& s
->out_format
== FMT_H261
)
777 ff_h261_encode_init(s
);
778 if (CONFIG_H263_ENCODER
&& s
->out_format
== FMT_H263
)
779 ff_h263_encode_init(s
);
780 if (CONFIG_MSMPEG4_ENCODER
&& s
->msmpeg4_version
)
781 if ((ret
= ff_msmpeg4_encode_init(s
)) < 0)
783 if ((CONFIG_MPEG1VIDEO_ENCODER
|| CONFIG_MPEG2VIDEO_ENCODER
)
784 && s
->out_format
== FMT_MPEG1
)
785 ff_mpeg1_encode_init(s
);
788 for (i
= 0; i
< 64; i
++) {
789 int j
= s
->idsp
.idct_permutation
[i
];
790 if (CONFIG_MPEG4_ENCODER
&& s
->codec_id
== AV_CODEC_ID_MPEG4
&&
792 s
->intra_matrix
[j
] = ff_mpeg4_default_intra_matrix
[i
];
793 s
->inter_matrix
[j
] = ff_mpeg4_default_non_intra_matrix
[i
];
794 } else if (s
->out_format
== FMT_H263
|| s
->out_format
== FMT_H261
) {
796 s
->inter_matrix
[j
] = ff_mpeg1_default_non_intra_matrix
[i
];
799 s
->intra_matrix
[j
] = ff_mpeg1_default_intra_matrix
[i
];
800 s
->inter_matrix
[j
] = ff_mpeg1_default_non_intra_matrix
[i
];
802 if (s
->avctx
->intra_matrix
)
803 s
->intra_matrix
[j
] = s
->avctx
->intra_matrix
[i
];
804 if (s
->avctx
->inter_matrix
)
805 s
->inter_matrix
[j
] = s
->avctx
->inter_matrix
[i
];
808 /* precompute matrix */
809 /* for mjpeg, we do include qscale in the matrix */
810 if (s
->out_format
!= FMT_MJPEG
) {
811 ff_convert_matrix(s
, s
->q_intra_matrix
, s
->q_intra_matrix16
,
812 s
->intra_matrix
, s
->intra_quant_bias
, avctx
->qmin
,
814 ff_convert_matrix(s
, s
->q_inter_matrix
, s
->q_inter_matrix16
,
815 s
->inter_matrix
, s
->inter_quant_bias
, avctx
->qmin
,
819 if (ff_rate_control_init(s
) < 0)
822 #if FF_API_ERROR_RATE
823 FF_DISABLE_DEPRECATION_WARNINGS
824 if (avctx
->error_rate
)
825 s
->error_rate
= avctx
->error_rate
;
826 FF_ENABLE_DEPRECATION_WARNINGS
;
829 #if FF_API_NORMALIZE_AQP
830 FF_DISABLE_DEPRECATION_WARNINGS
831 if (avctx
->flags
& CODEC_FLAG_NORMALIZE_AQP
)
832 s
->mpv_flags
|= FF_MPV_FLAG_NAQ
;
833 FF_ENABLE_DEPRECATION_WARNINGS
;
837 FF_DISABLE_DEPRECATION_WARNINGS
838 if (avctx
->flags
& CODEC_FLAG_MV0
)
839 s
->mpv_flags
|= FF_MPV_FLAG_MV0
;
840 FF_ENABLE_DEPRECATION_WARNINGS
844 FF_DISABLE_DEPRECATION_WARNINGS
845 if (avctx
->rc_qsquish
!= 0.0)
846 s
->rc_qsquish
= avctx
->rc_qsquish
;
847 if (avctx
->rc_qmod_amp
!= 0.0)
848 s
->rc_qmod_amp
= avctx
->rc_qmod_amp
;
849 if (avctx
->rc_qmod_freq
)
850 s
->rc_qmod_freq
= avctx
->rc_qmod_freq
;
851 if (avctx
->rc_buffer_aggressivity
!= 1.0)
852 s
->rc_buffer_aggressivity
= avctx
->rc_buffer_aggressivity
;
853 if (avctx
->rc_initial_cplx
!= 0.0)
854 s
->rc_initial_cplx
= avctx
->rc_initial_cplx
;
856 s
->lmin
= avctx
->lmin
;
858 s
->lmax
= avctx
->lmax
;
862 s
->rc_eq
= av_strdup(avctx
->rc_eq
);
864 return AVERROR(ENOMEM
);
866 FF_ENABLE_DEPRECATION_WARNINGS
869 if (avctx
->b_frame_strategy
== 2) {
870 for (i
= 0; i
< s
->max_b_frames
+ 2; i
++) {
871 s
->tmp_frames
[i
] = av_frame_alloc();
872 if (!s
->tmp_frames
[i
])
873 return AVERROR(ENOMEM
);
875 s
->tmp_frames
[i
]->format
= AV_PIX_FMT_YUV420P
;
876 s
->tmp_frames
[i
]->width
= s
->width
>> avctx
->brd_scale
;
877 s
->tmp_frames
[i
]->height
= s
->height
>> avctx
->brd_scale
;
879 ret
= av_frame_get_buffer(s
->tmp_frames
[i
], 32);
887 ff_mpv_encode_end(avctx
);
888 return AVERROR_UNKNOWN
;
891 av_cold
int ff_mpv_encode_end(AVCodecContext
*avctx
)
893 MpegEncContext
*s
= avctx
->priv_data
;
896 ff_rate_control_uninit(s
);
898 ff_mpv_common_end(s
);
899 if (CONFIG_MJPEG_ENCODER
&&
900 s
->out_format
== FMT_MJPEG
)
901 ff_mjpeg_encode_close(s
);
903 av_freep(&avctx
->extradata
);
905 for (i
= 0; i
< FF_ARRAY_ELEMS(s
->tmp_frames
); i
++)
906 av_frame_free(&s
->tmp_frames
[i
]);
908 ff_free_picture_tables(&s
->new_picture
);
909 ff_mpeg_unref_picture(s
->avctx
, &s
->new_picture
);
911 av_freep(&s
->avctx
->stats_out
);
912 av_freep(&s
->ac_stats
);
914 av_freep(&s
->q_intra_matrix
);
915 av_freep(&s
->q_inter_matrix
);
916 av_freep(&s
->q_intra_matrix16
);
917 av_freep(&s
->q_inter_matrix16
);
918 av_freep(&s
->input_picture
);
919 av_freep(&s
->reordered_input_picture
);
920 av_freep(&s
->dct_offset
);
925 static int get_sae(uint8_t *src
, int ref
, int stride
)
930 for (y
= 0; y
< 16; y
++) {
931 for (x
= 0; x
< 16; x
++) {
932 acc
+= FFABS(src
[x
+ y
* stride
] - ref
);
939 static int get_intra_count(MpegEncContext
*s
, uint8_t *src
,
940 uint8_t *ref
, int stride
)
948 for (y
= 0; y
< h
; y
+= 16) {
949 for (x
= 0; x
< w
; x
+= 16) {
950 int offset
= x
+ y
* stride
;
951 int sad
= s
->mecc
.sad
[0](NULL
, src
+ offset
, ref
+ offset
,
953 int mean
= (s
->mpvencdsp
.pix_sum(src
+ offset
, stride
) + 128) >> 8;
954 int sae
= get_sae(src
+ offset
, mean
, stride
);
956 acc
+= sae
+ 500 < sad
;
962 static int alloc_picture(MpegEncContext
*s
, Picture
*pic
, int shared
)
964 return ff_alloc_picture(s
->avctx
, pic
, &s
->me
, &s
->sc
, shared
, 1,
965 s
->chroma_x_shift
, s
->chroma_y_shift
, s
->out_format
,
966 s
->mb_stride
, s
->mb_height
, s
->b8_stride
,
967 &s
->linesize
, &s
->uvlinesize
);
970 static int load_input_picture(MpegEncContext
*s
, const AVFrame
*pic_arg
)
974 int i
, display_picture_number
= 0, ret
;
975 int encoding_delay
= s
->max_b_frames ? s
->max_b_frames
976 : (s
->low_delay ?
0 : 1);
977 int flush_offset
= 1;
982 display_picture_number
= s
->input_picture_number
++;
984 if (pts
!= AV_NOPTS_VALUE
) {
985 if (s
->user_specified_pts
!= AV_NOPTS_VALUE
) {
987 int64_t last
= s
->user_specified_pts
;
990 av_log(s
->avctx
, AV_LOG_ERROR
,
991 "Error, Invalid timestamp=%"PRId64
", "
992 "last=%"PRId64
"\n", pts
, s
->user_specified_pts
);
996 if (!s
->low_delay
&& display_picture_number
== 1)
997 s
->dts_delta
= time
- last
;
999 s
->user_specified_pts
= pts
;
1001 if (s
->user_specified_pts
!= AV_NOPTS_VALUE
) {
1002 s
->user_specified_pts
=
1003 pts
= s
->user_specified_pts
+ 1;
1004 av_log(s
->avctx
, AV_LOG_INFO
,
1005 "Warning: AVFrame.pts=? trying to guess (%"PRId64
")\n",
1008 pts
= display_picture_number
;
1012 if (!pic_arg
->buf
[0] ||
1013 pic_arg
->linesize
[0] != s
->linesize
||
1014 pic_arg
->linesize
[1] != s
->uvlinesize
||
1015 pic_arg
->linesize
[2] != s
->uvlinesize
)
1017 if ((s
->width
& 15) || (s
->height
& 15))
1020 ff_dlog(s
->avctx
, "%d %d %td %td\n", pic_arg
->linesize
[0],
1021 pic_arg
->linesize
[1], s
->linesize
, s
->uvlinesize
);
1023 i
= ff_find_unused_picture(s
->avctx
, s
->picture
, direct
);
1027 pic
= &s
->picture
[i
];
1031 if ((ret
= av_frame_ref(pic
->f
, pic_arg
)) < 0)
1034 ret
= alloc_picture(s
, pic
, direct
);
1039 if (pic
->f
->data
[0] + INPLACE_OFFSET
== pic_arg
->data
[0] &&
1040 pic
->f
->data
[1] + INPLACE_OFFSET
== pic_arg
->data
[1] &&
1041 pic
->f
->data
[2] + INPLACE_OFFSET
== pic_arg
->data
[2]) {
1044 int h_chroma_shift
, v_chroma_shift
;
1045 av_pix_fmt_get_chroma_sub_sample(s
->avctx
->pix_fmt
,
1049 for (i
= 0; i
< 3; i
++) {
1050 int src_stride
= pic_arg
->linesize
[i
];
1051 int dst_stride
= i ? s
->uvlinesize
: s
->linesize
;
1052 int h_shift
= i ? h_chroma_shift
: 0;
1053 int v_shift
= i ? v_chroma_shift
: 0;
1054 int w
= s
->width
>> h_shift
;
1055 int h
= s
->height
>> v_shift
;
1056 uint8_t *src
= pic_arg
->data
[i
];
1057 uint8_t *dst
= pic
->f
->data
[i
];
1059 if (!s
->avctx
->rc_buffer_size
)
1060 dst
+= INPLACE_OFFSET
;
1062 if (src_stride
== dst_stride
)
1063 memcpy(dst
, src
, src_stride
* h
);
1066 uint8_t *dst2
= dst
;
1068 memcpy(dst2
, src
, w
);
1073 if ((s
->width
& 15) || (s
->height
& 15)) {
1074 s
->mpvencdsp
.draw_edges(dst
, dst_stride
,
1083 ret
= av_frame_copy_props(pic
->f
, pic_arg
);
1087 pic
->f
->display_picture_number
= display_picture_number
;
1088 pic
->f
->pts
= pts
; // we set this here to avoid modifiying pic_arg
1090 /* Flushing: When we have not received enough input frames,
1091 * ensure s->input_picture[0] contains the first picture */
1092 for (flush_offset
= 0; flush_offset
< encoding_delay
+ 1; flush_offset
++)
1093 if (s
->input_picture
[flush_offset
])
1096 if (flush_offset
<= 1)
1099 encoding_delay
= encoding_delay
- flush_offset
+ 1;
1102 /* shift buffer entries */
1103 for (i
= flush_offset
; i
< MAX_PICTURE_COUNT
/*s->encoding_delay + 1*/; i
++)
1104 s
->input_picture
[i
- flush_offset
] = s
->input_picture
[i
];
1106 s
->input_picture
[encoding_delay
] = (Picture
*) pic
;
1111 static int skip_check(MpegEncContext
*s
, Picture
*p
, Picture
*ref
)
1115 int64_t score64
= 0;
1117 for (plane
= 0; plane
< 3; plane
++) {
1118 const int stride
= p
->f
->linesize
[plane
];
1119 const int bw
= plane ?
1 : 2;
1120 for (y
= 0; y
< s
->mb_height
* bw
; y
++) {
1121 for (x
= 0; x
< s
->mb_width
* bw
; x
++) {
1122 int off
= p
->shared ?
0 : 16;
1123 uint8_t *dptr
= p
->f
->data
[plane
] + 8 * (x
+ y
* stride
) + off
;
1124 uint8_t *rptr
= ref
->f
->data
[plane
] + 8 * (x
+ y
* stride
);
1125 int v
= s
->mecc
.frame_skip_cmp
[1](s
, dptr
, rptr
, stride
, 8);
1127 switch (s
->avctx
->frame_skip_exp
) {
1128 case 0: score
= FFMAX(score
, v
); break;
1129 case 1: score
+= FFABS(v
); break;
1130 case 2: score
+= v
* v
; break;
1131 case 3: score64
+= FFABS(v
* v
* (int64_t)v
); break;
1132 case 4: score64
+= v
* v
* (int64_t)(v
* v
); break;
1141 if (score64
< s
->avctx
->frame_skip_threshold
)
1143 if (score64
< ((s
->avctx
->frame_skip_factor
* (int64_t)s
->lambda
) >> 8))
1148 static int encode_frame(AVCodecContext
*c
, AVFrame
*frame
)
1150 AVPacket pkt
= { 0 };
1151 int ret
, got_output
;
1153 av_init_packet(&pkt
);
1154 ret
= avcodec_encode_video2(c
, &pkt
, frame
, &got_output
);
1159 av_packet_unref(&pkt
);
1163 static int estimate_best_b_count(MpegEncContext
*s
)
1165 AVCodec
*codec
= avcodec_find_encoder(s
->avctx
->codec_id
);
1166 AVCodecContext
*c
= avcodec_alloc_context3(NULL
);
1167 const int scale
= s
->avctx
->brd_scale
;
1168 int i
, j
, out_size
, p_lambda
, b_lambda
, lambda2
;
1169 int64_t best_rd
= INT64_MAX
;
1170 int best_b_count
= -1;
1173 return AVERROR(ENOMEM
);
1174 assert(scale
>= 0 && scale
<= 3);
1177 //s->next_picture_ptr->quality;
1178 p_lambda
= s
->last_lambda_for
[AV_PICTURE_TYPE_P
];
1179 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1180 b_lambda
= s
->last_lambda_for
[AV_PICTURE_TYPE_B
];
1181 if (!b_lambda
) // FIXME we should do this somewhere else
1182 b_lambda
= p_lambda
;
1183 lambda2
= (b_lambda
* b_lambda
+ (1 << FF_LAMBDA_SHIFT
) / 2) >>
1186 c
->width
= s
->width
>> scale
;
1187 c
->height
= s
->height
>> scale
;
1188 c
->flags
= AV_CODEC_FLAG_QSCALE
| AV_CODEC_FLAG_PSNR
;
1189 c
->flags
|= s
->avctx
->flags
& AV_CODEC_FLAG_QPEL
;
1190 c
->mb_decision
= s
->avctx
->mb_decision
;
1191 c
->me_cmp
= s
->avctx
->me_cmp
;
1192 c
->mb_cmp
= s
->avctx
->mb_cmp
;
1193 c
->me_sub_cmp
= s
->avctx
->me_sub_cmp
;
1194 c
->pix_fmt
= AV_PIX_FMT_YUV420P
;
1195 c
->time_base
= s
->avctx
->time_base
;
1196 c
->max_b_frames
= s
->max_b_frames
;
1198 if (avcodec_open2(c
, codec
, NULL
) < 0)
1201 for (i
= 0; i
< s
->max_b_frames
+ 2; i
++) {
1202 Picture pre_input
, *pre_input_ptr
= i ? s
->input_picture
[i
- 1] :
1203 s
->next_picture_ptr
;
1205 if (pre_input_ptr
&& (!i
|| s
->input_picture
[i
- 1])) {
1206 pre_input
= *pre_input_ptr
;
1208 if (!pre_input
.shared
&& i
) {
1209 pre_input
.f
->data
[0] += INPLACE_OFFSET
;
1210 pre_input
.f
->data
[1] += INPLACE_OFFSET
;
1211 pre_input
.f
->data
[2] += INPLACE_OFFSET
;
1214 s
->mpvencdsp
.shrink
[scale
](s
->tmp_frames
[i
]->data
[0],
1215 s
->tmp_frames
[i
]->linesize
[0],
1216 pre_input
.f
->data
[0],
1217 pre_input
.f
->linesize
[0],
1218 c
->width
, c
->height
);
1219 s
->mpvencdsp
.shrink
[scale
](s
->tmp_frames
[i
]->data
[1],
1220 s
->tmp_frames
[i
]->linesize
[1],
1221 pre_input
.f
->data
[1],
1222 pre_input
.f
->linesize
[1],
1223 c
->width
>> 1, c
->height
>> 1);
1224 s
->mpvencdsp
.shrink
[scale
](s
->tmp_frames
[i
]->data
[2],
1225 s
->tmp_frames
[i
]->linesize
[2],
1226 pre_input
.f
->data
[2],
1227 pre_input
.f
->linesize
[2],
1228 c
->width
>> 1, c
->height
>> 1);
1232 for (j
= 0; j
< s
->max_b_frames
+ 1; j
++) {
1235 if (!s
->input_picture
[j
])
1238 c
->error
[0] = c
->error
[1] = c
->error
[2] = 0;
1240 s
->tmp_frames
[0]->pict_type
= AV_PICTURE_TYPE_I
;
1241 s
->tmp_frames
[0]->quality
= 1 * FF_QP2LAMBDA
;
1243 out_size
= encode_frame(c
, s
->tmp_frames
[0]);
1245 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1247 for (i
= 0; i
< s
->max_b_frames
+ 1; i
++) {
1248 int is_p
= i
% (j
+ 1) == j
|| i
== s
->max_b_frames
;
1250 s
->tmp_frames
[i
+ 1]->pict_type
= is_p ?
1251 AV_PICTURE_TYPE_P
: AV_PICTURE_TYPE_B
;
1252 s
->tmp_frames
[i
+ 1]->quality
= is_p ? p_lambda
: b_lambda
;
1254 out_size
= encode_frame(c
, s
->tmp_frames
[i
+ 1]);
1256 rd
+= (out_size
* lambda2
) >> (FF_LAMBDA_SHIFT
- 3);
1259 /* get the delayed frames */
1261 out_size
= encode_frame(c
, NULL
);
1262 rd
+= (out_size
* lambda2
) >> (FF_LAMBDA_SHIFT
- 3);
1265 rd
+= c
->error
[0] + c
->error
[1] + c
->error
[2];
1276 return best_b_count
;
1279 static int select_input_picture(MpegEncContext
*s
)
1283 for (i
= 1; i
< MAX_PICTURE_COUNT
; i
++)
1284 s
->reordered_input_picture
[i
- 1] = s
->reordered_input_picture
[i
];
1285 s
->reordered_input_picture
[MAX_PICTURE_COUNT
- 1] = NULL
;
1287 /* set next picture type & ordering */
1288 if (!s
->reordered_input_picture
[0] && s
->input_picture
[0]) {
1289 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1290 !s
->next_picture_ptr
|| s
->intra_only
) {
1291 s
->reordered_input_picture
[0] = s
->input_picture
[0];
1292 s
->reordered_input_picture
[0]->f
->pict_type
= AV_PICTURE_TYPE_I
;
1293 s
->reordered_input_picture
[0]->f
->coded_picture_number
=
1294 s
->coded_picture_number
++;
1298 if (s
->avctx
->frame_skip_threshold
|| s
->avctx
->frame_skip_factor
) {
1299 if (s
->picture_in_gop_number
< s
->gop_size
&&
1300 skip_check(s
, s
->input_picture
[0], s
->next_picture_ptr
)) {
1301 // FIXME check that te gop check above is +-1 correct
1302 av_frame_unref(s
->input_picture
[0]->f
);
1305 ff_vbv_update(s
, 0);
1311 if (s
->avctx
->flags
& AV_CODEC_FLAG_PASS2
) {
1312 for (i
= 0; i
< s
->max_b_frames
+ 1; i
++) {
1313 int pict_num
= s
->input_picture
[0]->f
->display_picture_number
+ i
;
1315 if (pict_num
>= s
->rc_context
.num_entries
)
1317 if (!s
->input_picture
[i
]) {
1318 s
->rc_context
.entry
[pict_num
- 1].new_pict_type
= AV_PICTURE_TYPE_P
;
1322 s
->input_picture
[i
]->f
->pict_type
=
1323 s
->rc_context
.entry
[pict_num
].new_pict_type
;
1327 if (s
->avctx
->b_frame_strategy
== 0) {
1328 b_frames
= s
->max_b_frames
;
1329 while (b_frames
&& !s
->input_picture
[b_frames
])
1331 } else if (s
->avctx
->b_frame_strategy
== 1) {
1332 for (i
= 1; i
< s
->max_b_frames
+ 1; i
++) {
1333 if (s
->input_picture
[i
] &&
1334 s
->input_picture
[i
]->b_frame_score
== 0) {
1335 s
->input_picture
[i
]->b_frame_score
=
1337 s
->input_picture
[i
]->f
->data
[0],
1338 s
->input_picture
[i
- 1]->f
->data
[0],
1342 for (i
= 0; i
< s
->max_b_frames
+ 1; i
++) {
1343 if (!s
->input_picture
[i
] ||
1344 s
->input_picture
[i
]->b_frame_score
- 1 >
1345 s
->mb_num
/ s
->avctx
->b_sensitivity
)
1349 b_frames
= FFMAX(0, i
- 1);
1352 for (i
= 0; i
< b_frames
+ 1; i
++) {
1353 s
->input_picture
[i
]->b_frame_score
= 0;
1355 } else if (s
->avctx
->b_frame_strategy
== 2) {
1356 b_frames
= estimate_best_b_count(s
);
1358 av_log(s
->avctx
, AV_LOG_ERROR
, "illegal b frame strategy\n");
1364 for (i
= b_frames
- 1; i
>= 0; i
--) {
1365 int type
= s
->input_picture
[i
]->f
->pict_type
;
1366 if (type
&& type
!= AV_PICTURE_TYPE_B
)
1369 if (s
->input_picture
[b_frames
]->f
->pict_type
== AV_PICTURE_TYPE_B
&&
1370 b_frames
== s
->max_b_frames
) {
1371 av_log(s
->avctx
, AV_LOG_ERROR
,
1372 "warning, too many b frames in a row\n");
1375 if (s
->picture_in_gop_number
+ b_frames
>= s
->gop_size
) {
1376 if ((s
->mpv_flags
& FF_MPV_FLAG_STRICT_GOP
) &&
1377 s
->gop_size
> s
->picture_in_gop_number
) {
1378 b_frames
= s
->gop_size
- s
->picture_in_gop_number
- 1;
1380 if (s
->avctx
->flags
& AV_CODEC_FLAG_CLOSED_GOP
)
1382 s
->input_picture
[b_frames
]->f
->pict_type
= AV_PICTURE_TYPE_I
;
1386 if ((s
->avctx
->flags
& AV_CODEC_FLAG_CLOSED_GOP
) && b_frames
&&
1387 s
->input_picture
[b_frames
]->f
->pict_type
== AV_PICTURE_TYPE_I
)
1390 s
->reordered_input_picture
[0] = s
->input_picture
[b_frames
];
1391 if (s
->reordered_input_picture
[0]->f
->pict_type
!= AV_PICTURE_TYPE_I
)
1392 s
->reordered_input_picture
[0]->f
->pict_type
= AV_PICTURE_TYPE_P
;
1393 s
->reordered_input_picture
[0]->f
->coded_picture_number
=
1394 s
->coded_picture_number
++;
1395 for (i
= 0; i
< b_frames
; i
++) {
1396 s
->reordered_input_picture
[i
+ 1] = s
->input_picture
[i
];
1397 s
->reordered_input_picture
[i
+ 1]->f
->pict_type
=
1399 s
->reordered_input_picture
[i
+ 1]->f
->coded_picture_number
=
1400 s
->coded_picture_number
++;
1405 ff_mpeg_unref_picture(s
->avctx
, &s
->new_picture
);
1407 if (s
->reordered_input_picture
[0]) {
1408 s
->reordered_input_picture
[0]->reference
=
1409 s
->reordered_input_picture
[0]->f
->pict_type
!=
1410 AV_PICTURE_TYPE_B ?
3 : 0;
1412 if ((ret
= ff_mpeg_ref_picture(s
->avctx
, &s
->new_picture
, s
->reordered_input_picture
[0])))
1415 if (s
->reordered_input_picture
[0]->shared
|| s
->avctx
->rc_buffer_size
) {
1416 // input is a shared pix, so we can't modifiy it -> alloc a new
1417 // one & ensure that the shared one is reuseable
1420 int i
= ff_find_unused_picture(s
->avctx
, s
->picture
, 0);
1423 pic
= &s
->picture
[i
];
1425 pic
->reference
= s
->reordered_input_picture
[0]->reference
;
1426 if (alloc_picture(s
, pic
, 0) < 0) {
1430 ret
= av_frame_copy_props(pic
->f
, s
->reordered_input_picture
[0]->f
);
1434 /* mark us unused / free shared pic */
1435 av_frame_unref(s
->reordered_input_picture
[0]->f
);
1436 s
->reordered_input_picture
[0]->shared
= 0;
1438 s
->current_picture_ptr
= pic
;
1440 // input is not a shared pix -> reuse buffer for current_pix
1441 s
->current_picture_ptr
= s
->reordered_input_picture
[0];
1442 for (i
= 0; i
< 4; i
++) {
1443 s
->new_picture
.f
->data
[i
] += INPLACE_OFFSET
;
1446 ff_mpeg_unref_picture(s
->avctx
, &s
->current_picture
);
1447 if ((ret
= ff_mpeg_ref_picture(s
->avctx
, &s
->current_picture
,
1448 s
->current_picture_ptr
)) < 0)
1451 s
->picture_number
= s
->new_picture
.f
->display_picture_number
;
1456 static void frame_end(MpegEncContext
*s
)
1460 if (s
->unrestricted_mv
&&
1461 s
->current_picture
.reference
&&
1463 const AVPixFmtDescriptor
*desc
= av_pix_fmt_desc_get(s
->avctx
->pix_fmt
);
1464 int hshift
= desc
->log2_chroma_w
;
1465 int vshift
= desc
->log2_chroma_h
;
1466 s
->mpvencdsp
.draw_edges(s
->current_picture
.f
->data
[0], s
->linesize
,
1467 s
->h_edge_pos
, s
->v_edge_pos
,
1468 EDGE_WIDTH
, EDGE_WIDTH
,
1469 EDGE_TOP
| EDGE_BOTTOM
);
1470 s
->mpvencdsp
.draw_edges(s
->current_picture
.f
->data
[1], s
->uvlinesize
,
1471 s
->h_edge_pos
>> hshift
,
1472 s
->v_edge_pos
>> vshift
,
1473 EDGE_WIDTH
>> hshift
,
1474 EDGE_WIDTH
>> vshift
,
1475 EDGE_TOP
| EDGE_BOTTOM
);
1476 s
->mpvencdsp
.draw_edges(s
->current_picture
.f
->data
[2], s
->uvlinesize
,
1477 s
->h_edge_pos
>> hshift
,
1478 s
->v_edge_pos
>> vshift
,
1479 EDGE_WIDTH
>> hshift
,
1480 EDGE_WIDTH
>> vshift
,
1481 EDGE_TOP
| EDGE_BOTTOM
);
1486 s
->last_pict_type
= s
->pict_type
;
1487 s
->last_lambda_for
[s
->pict_type
] = s
->current_picture_ptr
->f
->quality
;
1488 if (s
->pict_type
!= AV_PICTURE_TYPE_B
)
1489 s
->last_non_b_pict_type
= s
->pict_type
;
1492 /* release non-reference frames */
1493 for (i
= 0; i
< MAX_PICTURE_COUNT
; i
++) {
1494 if (!s
->picture
[i
].reference
)
1495 ff_mpeg_unref_picture(s
->avctx
, &s
->picture
[i
]);
1499 #if FF_API_CODED_FRAME
1500 FF_DISABLE_DEPRECATION_WARNINGS
1501 av_frame_copy_props(s
->avctx
->coded_frame
, s
->current_picture
.f
);
1502 FF_ENABLE_DEPRECATION_WARNINGS
1504 #if FF_API_ERROR_FRAME
1505 FF_DISABLE_DEPRECATION_WARNINGS
1506 memcpy(s
->current_picture
.f
->error
, s
->current_picture
.encoding_error
,
1507 sizeof(s
->current_picture
.encoding_error
));
1508 FF_ENABLE_DEPRECATION_WARNINGS
1512 static void update_noise_reduction(MpegEncContext
*s
)
1516 for (intra
= 0; intra
< 2; intra
++) {
1517 if (s
->dct_count
[intra
] > (1 << 16)) {
1518 for (i
= 0; i
< 64; i
++) {
1519 s
->dct_error_sum
[intra
][i
] >>= 1;
1521 s
->dct_count
[intra
] >>= 1;
1524 for (i
= 0; i
< 64; i
++) {
1525 s
->dct_offset
[intra
][i
] = (s
->avctx
->noise_reduction
*
1526 s
->dct_count
[intra
] +
1527 s
->dct_error_sum
[intra
][i
] / 2) /
1528 (s
->dct_error_sum
[intra
][i
] + 1);
1533 static int frame_start(MpegEncContext
*s
)
1537 /* mark & release old frames */
1538 if (s
->pict_type
!= AV_PICTURE_TYPE_B
&& s
->last_picture_ptr
&&
1539 s
->last_picture_ptr
!= s
->next_picture_ptr
&&
1540 s
->last_picture_ptr
->f
->buf
[0]) {
1541 ff_mpeg_unref_picture(s
->avctx
, s
->last_picture_ptr
);
1544 s
->current_picture_ptr
->f
->pict_type
= s
->pict_type
;
1545 s
->current_picture_ptr
->f
->key_frame
= s
->pict_type
== AV_PICTURE_TYPE_I
;
1547 ff_mpeg_unref_picture(s
->avctx
, &s
->current_picture
);
1548 if ((ret
= ff_mpeg_ref_picture(s
->avctx
, &s
->current_picture
,
1549 s
->current_picture_ptr
)) < 0)
1552 if (s
->pict_type
!= AV_PICTURE_TYPE_B
) {
1553 s
->last_picture_ptr
= s
->next_picture_ptr
;
1555 s
->next_picture_ptr
= s
->current_picture_ptr
;
1558 if (s
->last_picture_ptr
) {
1559 ff_mpeg_unref_picture(s
->avctx
, &s
->last_picture
);
1560 if (s
->last_picture_ptr
->f
->buf
[0] &&
1561 (ret
= ff_mpeg_ref_picture(s
->avctx
, &s
->last_picture
,
1562 s
->last_picture_ptr
)) < 0)
1565 if (s
->next_picture_ptr
) {
1566 ff_mpeg_unref_picture(s
->avctx
, &s
->next_picture
);
1567 if (s
->next_picture_ptr
->f
->buf
[0] &&
1568 (ret
= ff_mpeg_ref_picture(s
->avctx
, &s
->next_picture
,
1569 s
->next_picture_ptr
)) < 0)
1573 if (s
->picture_structure
!= PICT_FRAME
) {
1575 for (i
= 0; i
< 4; i
++) {
1576 if (s
->picture_structure
== PICT_BOTTOM_FIELD
) {
1577 s
->current_picture
.f
->data
[i
] +=
1578 s
->current_picture
.f
->linesize
[i
];
1580 s
->current_picture
.f
->linesize
[i
] *= 2;
1581 s
->last_picture
.f
->linesize
[i
] *= 2;
1582 s
->next_picture
.f
->linesize
[i
] *= 2;
1586 if (s
->mpeg_quant
|| s
->codec_id
== AV_CODEC_ID_MPEG2VIDEO
) {
1587 s
->dct_unquantize_intra
= s
->dct_unquantize_mpeg2_intra
;
1588 s
->dct_unquantize_inter
= s
->dct_unquantize_mpeg2_inter
;
1589 } else if (s
->out_format
== FMT_H263
|| s
->out_format
== FMT_H261
) {
1590 s
->dct_unquantize_intra
= s
->dct_unquantize_h263_intra
;
1591 s
->dct_unquantize_inter
= s
->dct_unquantize_h263_inter
;
1593 s
->dct_unquantize_intra
= s
->dct_unquantize_mpeg1_intra
;
1594 s
->dct_unquantize_inter
= s
->dct_unquantize_mpeg1_inter
;
1597 if (s
->dct_error_sum
) {
1598 assert(s
->avctx
->noise_reduction
&& s
->encoding
);
1599 update_noise_reduction(s
);
1605 int ff_mpv_encode_picture(AVCodecContext
*avctx
, AVPacket
*pkt
,
1606 const AVFrame
*pic_arg
, int *got_packet
)
1608 MpegEncContext
*s
= avctx
->priv_data
;
1609 int i
, stuffing_count
, ret
;
1610 int context_count
= s
->slice_context_count
;
1612 s
->picture_in_gop_number
++;
1614 if (load_input_picture(s
, pic_arg
) < 0)
1617 if (select_input_picture(s
) < 0) {
1622 if (s
->new_picture
.f
->data
[0]) {
1625 (ret
= ff_alloc_packet(pkt
, s
->mb_width
*s
->mb_height
*MAX_MB_BYTES
)) < 0)
1628 s
->mb_info_ptr
= av_packet_new_side_data(pkt
,
1629 AV_PKT_DATA_H263_MB_INFO
,
1630 s
->mb_width
*s
->mb_height
*12);
1631 s
->prev_mb_info
= s
->last_mb_info
= s
->mb_info_size
= 0;
1634 for (i
= 0; i
< context_count
; i
++) {
1635 int start_y
= s
->thread_context
[i
]->start_mb_y
;
1636 int end_y
= s
->thread_context
[i
]-> end_mb_y
;
1637 int h
= s
->mb_height
;
1638 uint8_t *start
= pkt
->data
+ (size_t)(((int64_t) pkt
->size
) * start_y
/ h
);
1639 uint8_t *end
= pkt
->data
+ (size_t)(((int64_t) pkt
->size
) * end_y
/ h
);
1641 init_put_bits(&s
->thread_context
[i
]->pb
, start
, end
- start
);
1644 s
->pict_type
= s
->new_picture
.f
->pict_type
;
1646 ret
= frame_start(s
);
1650 if (encode_picture(s
, s
->picture_number
) < 0)
1653 avctx
->header_bits
= s
->header_bits
;
1654 avctx
->mv_bits
= s
->mv_bits
;
1655 avctx
->misc_bits
= s
->misc_bits
;
1656 avctx
->i_tex_bits
= s
->i_tex_bits
;
1657 avctx
->p_tex_bits
= s
->p_tex_bits
;
1658 avctx
->i_count
= s
->i_count
;
1659 // FIXME f/b_count in avctx
1660 avctx
->p_count
= s
->mb_num
- s
->i_count
- s
->skip_count
;
1661 avctx
->skip_count
= s
->skip_count
;
1665 sd
= av_packet_new_side_data(pkt
, AV_PKT_DATA_QUALITY_FACTOR
,
1668 return AVERROR(ENOMEM
);
1669 *(int *)sd
= s
->current_picture
.f
->quality
;
1671 if (CONFIG_MJPEG_ENCODER
&& s
->out_format
== FMT_MJPEG
)
1672 ff_mjpeg_encode_picture_trailer(&s
->pb
, s
->header_bits
);
1674 if (avctx
->rc_buffer_size
) {
1675 RateControlContext
*rcc
= &s
->rc_context
;
1676 int max_size
= rcc
->buffer_index
* avctx
->rc_max_available_vbv_use
;
1678 if (put_bits_count(&s
->pb
) > max_size
&&
1679 s
->lambda
< s
->lmax
) {
1680 s
->next_lambda
= FFMAX(s
->lambda
+ 1, s
->lambda
*
1681 (s
->qscale
+ 1) / s
->qscale
);
1682 if (s
->adaptive_quant
) {
1684 for (i
= 0; i
< s
->mb_height
* s
->mb_stride
; i
++)
1685 s
->lambda_table
[i
] =
1686 FFMAX(s
->lambda_table
[i
] + 1,
1687 s
->lambda_table
[i
] * (s
->qscale
+ 1) /
1690 s
->mb_skipped
= 0; // done in frame_start()
1691 // done in encode_picture() so we must undo it
1692 if (s
->pict_type
== AV_PICTURE_TYPE_P
) {
1693 if (s
->flipflop_rounding
||
1694 s
->codec_id
== AV_CODEC_ID_H263P
||
1695 s
->codec_id
== AV_CODEC_ID_MPEG4
)
1696 s
->no_rounding
^= 1;
1698 if (s
->pict_type
!= AV_PICTURE_TYPE_B
) {
1699 s
->time_base
= s
->last_time_base
;
1700 s
->last_non_b_time
= s
->time
- s
->pp_time
;
1702 for (i
= 0; i
< context_count
; i
++) {
1703 PutBitContext
*pb
= &s
->thread_context
[i
]->pb
;
1704 init_put_bits(pb
, pb
->buf
, pb
->buf_end
- pb
->buf
);
1709 assert(s
->avctx
->rc_max_rate
);
1712 if (s
->avctx
->flags
& AV_CODEC_FLAG_PASS1
)
1713 ff_write_pass1_stats(s
);
1715 for (i
= 0; i
< 4; i
++) {
1716 s
->current_picture_ptr
->encoding_error
[i
] = s
->current_picture
.encoding_error
[i
];
1717 avctx
->error
[i
] += s
->current_picture_ptr
->encoding_error
[i
];
1720 if (s
->avctx
->flags
& AV_CODEC_FLAG_PASS1
)
1721 assert(avctx
->header_bits
+ avctx
->mv_bits
+ avctx
->misc_bits
+
1722 avctx
->i_tex_bits
+ avctx
->p_tex_bits
==
1723 put_bits_count(&s
->pb
));
1724 flush_put_bits(&s
->pb
);
1725 s
->frame_bits
= put_bits_count(&s
->pb
);
1727 stuffing_count
= ff_vbv_update(s
, s
->frame_bits
);
1728 if (stuffing_count
) {
1729 if (s
->pb
.buf_end
- s
->pb
.buf
- (put_bits_count(&s
->pb
) >> 3) <
1730 stuffing_count
+ 50) {
1731 av_log(s
->avctx
, AV_LOG_ERROR
, "stuffing too large\n");
1735 switch (s
->codec_id
) {
1736 case AV_CODEC_ID_MPEG1VIDEO
:
1737 case AV_CODEC_ID_MPEG2VIDEO
:
1738 while (stuffing_count
--) {
1739 put_bits(&s
->pb
, 8, 0);
1742 case AV_CODEC_ID_MPEG4
:
1743 put_bits(&s
->pb
, 16, 0);
1744 put_bits(&s
->pb
, 16, 0x1C3);
1745 stuffing_count
-= 4;
1746 while (stuffing_count
--) {
1747 put_bits(&s
->pb
, 8, 0xFF);
1751 av_log(s
->avctx
, AV_LOG_ERROR
, "vbv buffer overflow\n");
1753 flush_put_bits(&s
->pb
);
1754 s
->frame_bits
= put_bits_count(&s
->pb
);
1757 /* update mpeg1/2 vbv_delay for CBR */
1758 if (s
->avctx
->rc_max_rate
&&
1759 s
->avctx
->rc_min_rate
== s
->avctx
->rc_max_rate
&&
1760 s
->out_format
== FMT_MPEG1
&&
1761 90000LL * (avctx
->rc_buffer_size
- 1) <=
1762 s
->avctx
->rc_max_rate
* 0xFFFFLL
) {
1763 int vbv_delay
, min_delay
;
1764 double inbits
= s
->avctx
->rc_max_rate
*
1765 av_q2d(s
->avctx
->time_base
);
1766 int minbits
= s
->frame_bits
- 8 *
1767 (s
->vbv_delay_ptr
- s
->pb
.buf
- 1);
1768 double bits
= s
->rc_context
.buffer_index
+ minbits
- inbits
;
1771 av_log(s
->avctx
, AV_LOG_ERROR
,
1772 "Internal error, negative bits\n");
1774 assert(s
->repeat_first_field
== 0);
1776 vbv_delay
= bits
* 90000 / s
->avctx
->rc_max_rate
;
1777 min_delay
= (minbits
* 90000LL + s
->avctx
->rc_max_rate
- 1) /
1778 s
->avctx
->rc_max_rate
;
1780 vbv_delay
= FFMAX(vbv_delay
, min_delay
);
1782 assert(vbv_delay
< 0xFFFF);
1784 s
->vbv_delay_ptr
[0] &= 0xF8;
1785 s
->vbv_delay_ptr
[0] |= vbv_delay
>> 13;
1786 s
->vbv_delay_ptr
[1] = vbv_delay
>> 5;
1787 s
->vbv_delay_ptr
[2] &= 0x07;
1788 s
->vbv_delay_ptr
[2] |= vbv_delay
<< 3;
1789 avctx
->vbv_delay
= vbv_delay
* 300;
1791 s
->total_bits
+= s
->frame_bits
;
1792 avctx
->frame_bits
= s
->frame_bits
;
1794 pkt
->pts
= s
->current_picture
.f
->pts
;
1795 if (!s
->low_delay
&& s
->pict_type
!= AV_PICTURE_TYPE_B
) {
1796 if (!s
->current_picture
.f
->coded_picture_number
)
1797 pkt
->dts
= pkt
->pts
- s
->dts_delta
;
1799 pkt
->dts
= s
->reordered_pts
;
1800 s
->reordered_pts
= pkt
->pts
;
1802 pkt
->dts
= pkt
->pts
;
1803 if (s
->current_picture
.f
->key_frame
)
1804 pkt
->flags
|= AV_PKT_FLAG_KEY
;
1806 av_packet_shrink_side_data(pkt
, AV_PKT_DATA_H263_MB_INFO
, s
->mb_info_size
);
1810 assert((s
->frame_bits
& 7) == 0);
1812 pkt
->size
= s
->frame_bits
/ 8;
1813 *got_packet
= !!pkt
->size
;
1817 static inline void dct_single_coeff_elimination(MpegEncContext
*s
,
1818 int n
, int threshold
)
1820 static const char tab
[64] = {
1821 3, 2, 2, 1, 1, 1, 1, 1,
1822 1, 1, 1, 1, 1, 1, 1, 1,
1823 1, 1, 1, 1, 1, 1, 1, 1,
1824 0, 0, 0, 0, 0, 0, 0, 0,
1825 0, 0, 0, 0, 0, 0, 0, 0,
1826 0, 0, 0, 0, 0, 0, 0, 0,
1827 0, 0, 0, 0, 0, 0, 0, 0,
1828 0, 0, 0, 0, 0, 0, 0, 0
1833 int16_t *block
= s
->block
[n
];
1834 const int last_index
= s
->block_last_index
[n
];
1837 if (threshold
< 0) {
1839 threshold
= -threshold
;
1843 /* Are all we could set to zero already zero? */
1844 if (last_index
<= skip_dc
- 1)
1847 for (i
= 0; i
<= last_index
; i
++) {
1848 const int j
= s
->intra_scantable
.permutated
[i
];
1849 const int level
= FFABS(block
[j
]);
1851 if (skip_dc
&& i
== 0)
1855 } else if (level
> 1) {
1861 if (score
>= threshold
)
1863 for (i
= skip_dc
; i
<= last_index
; i
++) {
1864 const int j
= s
->intra_scantable
.permutated
[i
];
1868 s
->block_last_index
[n
] = 0;
1870 s
->block_last_index
[n
] = -1;
1873 static inline void clip_coeffs(MpegEncContext
*s
, int16_t *block
,
1877 const int maxlevel
= s
->max_qcoeff
;
1878 const int minlevel
= s
->min_qcoeff
;
1882 i
= 1; // skip clipping of intra dc
1886 for (; i
<= last_index
; i
++) {
1887 const int j
= s
->intra_scantable
.permutated
[i
];
1888 int level
= block
[j
];
1890 if (level
> maxlevel
) {
1893 } else if (level
< minlevel
) {
1901 if (overflow
&& s
->avctx
->mb_decision
== FF_MB_DECISION_SIMPLE
)
1902 av_log(s
->avctx
, AV_LOG_INFO
,
1903 "warning, clipping %d dct coefficients to %d..%d\n",
1904 overflow
, minlevel
, maxlevel
);
1907 static void get_visual_weight(int16_t *weight
, uint8_t *ptr
, int stride
)
1911 for (y
= 0; y
< 8; y
++) {
1912 for (x
= 0; x
< 8; x
++) {
1918 for (y2
= FFMAX(y
- 1, 0); y2
< FFMIN(8, y
+ 2); y2
++) {
1919 for (x2
= FFMAX(x
- 1, 0); x2
< FFMIN(8, x
+ 2); x2
++) {
1920 int v
= ptr
[x2
+ y2
* stride
];
1926 weight
[x
+ 8 * y
]= (36 * ff_sqrt(count
* sqr
- sum
* sum
)) / count
;
1931 static av_always_inline
void encode_mb_internal(MpegEncContext
*s
,
1932 int motion_x
, int motion_y
,
1933 int mb_block_height
,
1936 int16_t weight
[8][64];
1937 int16_t orig
[8][64];
1938 const int mb_x
= s
->mb_x
;
1939 const int mb_y
= s
->mb_y
;
1942 int dct_offset
= s
->linesize
* 8; // default for progressive frames
1943 uint8_t *ptr_y
, *ptr_cb
, *ptr_cr
;
1944 ptrdiff_t wrap_y
, wrap_c
;
1946 for (i
= 0; i
< mb_block_count
; i
++)
1947 skip_dct
[i
] = s
->skipdct
;
1949 if (s
->adaptive_quant
) {
1950 const int last_qp
= s
->qscale
;
1951 const int mb_xy
= mb_x
+ mb_y
* s
->mb_stride
;
1953 s
->lambda
= s
->lambda_table
[mb_xy
];
1956 if (!(s
->mpv_flags
& FF_MPV_FLAG_QP_RD
)) {
1957 s
->qscale
= s
->current_picture_ptr
->qscale_table
[mb_xy
];
1958 s
->dquant
= s
->qscale
- last_qp
;
1960 if (s
->out_format
== FMT_H263
) {
1961 s
->dquant
= av_clip(s
->dquant
, -2, 2);
1963 if (s
->codec_id
== AV_CODEC_ID_MPEG4
) {
1965 if (s
->pict_type
== AV_PICTURE_TYPE_B
) {
1966 if (s
->dquant
& 1 || s
->mv_dir
& MV_DIRECT
)
1969 if (s
->mv_type
== MV_TYPE_8X8
)
1975 ff_set_qscale(s
, last_qp
+ s
->dquant
);
1976 } else if (s
->mpv_flags
& FF_MPV_FLAG_QP_RD
)
1977 ff_set_qscale(s
, s
->qscale
+ s
->dquant
);
1979 wrap_y
= s
->linesize
;
1980 wrap_c
= s
->uvlinesize
;
1981 ptr_y
= s
->new_picture
.f
->data
[0] +
1982 (mb_y
* 16 * wrap_y
) + mb_x
* 16;
1983 ptr_cb
= s
->new_picture
.f
->data
[1] +
1984 (mb_y
* mb_block_height
* wrap_c
) + mb_x
* 8;
1985 ptr_cr
= s
->new_picture
.f
->data
[2] +
1986 (mb_y
* mb_block_height
* wrap_c
) + mb_x
* 8;
1988 if (mb_x
* 16 + 16 > s
->width
|| mb_y
* 16 + 16 > s
->height
) {
1989 uint8_t *ebuf
= s
->sc
.edge_emu_buffer
+ 32;
1990 s
->vdsp
.emulated_edge_mc(ebuf
, ptr_y
,
1992 16, 16, mb_x
* 16, mb_y
* 16,
1993 s
->width
, s
->height
);
1995 s
->vdsp
.emulated_edge_mc(ebuf
+ 18 * wrap_y
, ptr_cb
,
1997 8, mb_block_height
, mb_x
* 8, mb_y
* 8,
1998 s
->width
>> 1, s
->height
>> 1);
1999 ptr_cb
= ebuf
+ 18 * wrap_y
;
2000 s
->vdsp
.emulated_edge_mc(ebuf
+ 18 * wrap_y
+ 8, ptr_cr
,
2002 8, mb_block_height
, mb_x
* 8, mb_y
* 8,
2003 s
->width
>> 1, s
->height
>> 1);
2004 ptr_cr
= ebuf
+ 18 * wrap_y
+ 8;
2008 if (s
->avctx
->flags
& AV_CODEC_FLAG_INTERLACED_DCT
) {
2009 int progressive_score
, interlaced_score
;
2011 s
->interlaced_dct
= 0;
2012 progressive_score
= s
->mecc
.ildct_cmp
[4](s
, ptr_y
, NULL
, wrap_y
, 8) +
2013 s
->mecc
.ildct_cmp
[4](s
, ptr_y
+ wrap_y
* 8,
2014 NULL
, wrap_y
, 8) - 400;
2016 if (progressive_score
> 0) {
2017 interlaced_score
= s
->mecc
.ildct_cmp
[4](s
, ptr_y
,
2018 NULL
, wrap_y
* 2, 8) +
2019 s
->mecc
.ildct_cmp
[4](s
, ptr_y
+ wrap_y
,
2020 NULL
, wrap_y
* 2, 8);
2021 if (progressive_score
> interlaced_score
) {
2022 s
->interlaced_dct
= 1;
2024 dct_offset
= wrap_y
;
2026 if (s
->chroma_format
== CHROMA_422
)
2032 s
->pdsp
.get_pixels(s
->block
[0], ptr_y
, wrap_y
);
2033 s
->pdsp
.get_pixels(s
->block
[1], ptr_y
+ 8, wrap_y
);
2034 s
->pdsp
.get_pixels(s
->block
[2], ptr_y
+ dct_offset
, wrap_y
);
2035 s
->pdsp
.get_pixels(s
->block
[3], ptr_y
+ dct_offset
+ 8, wrap_y
);
2037 if (s
->avctx
->flags
& AV_CODEC_FLAG_GRAY
) {
2041 s
->pdsp
.get_pixels(s
->block
[4], ptr_cb
, wrap_c
);
2042 s
->pdsp
.get_pixels(s
->block
[5], ptr_cr
, wrap_c
);
2043 if (!s
->chroma_y_shift
) { /* 422 */
2044 s
->pdsp
.get_pixels(s
->block
[6],
2045 ptr_cb
+ (dct_offset
>> 1), wrap_c
);
2046 s
->pdsp
.get_pixels(s
->block
[7],
2047 ptr_cr
+ (dct_offset
>> 1), wrap_c
);
2051 op_pixels_func (*op_pix
)[4];
2052 qpel_mc_func (*op_qpix
)[16];
2053 uint8_t *dest_y
, *dest_cb
, *dest_cr
;
2055 dest_y
= s
->dest
[0];
2056 dest_cb
= s
->dest
[1];
2057 dest_cr
= s
->dest
[2];
2059 if ((!s
->no_rounding
) || s
->pict_type
== AV_PICTURE_TYPE_B
) {
2060 op_pix
= s
->hdsp
.put_pixels_tab
;
2061 op_qpix
= s
->qdsp
.put_qpel_pixels_tab
;
2063 op_pix
= s
->hdsp
.put_no_rnd_pixels_tab
;
2064 op_qpix
= s
->qdsp
.put_no_rnd_qpel_pixels_tab
;
2067 if (s
->mv_dir
& MV_DIR_FORWARD
) {
2068 ff_mpv_motion(s
, dest_y
, dest_cb
, dest_cr
, 0,
2069 s
->last_picture
.f
->data
,
2071 op_pix
= s
->hdsp
.avg_pixels_tab
;
2072 op_qpix
= s
->qdsp
.avg_qpel_pixels_tab
;
2074 if (s
->mv_dir
& MV_DIR_BACKWARD
) {
2075 ff_mpv_motion(s
, dest_y
, dest_cb
, dest_cr
, 1,
2076 s
->next_picture
.f
->data
,
2080 if (s
->avctx
->flags
& AV_CODEC_FLAG_INTERLACED_DCT
) {
2081 int progressive_score
, interlaced_score
;
2083 s
->interlaced_dct
= 0;
2084 progressive_score
= s
->mecc
.ildct_cmp
[0](s
, dest_y
, ptr_y
, wrap_y
, 8) +
2085 s
->mecc
.ildct_cmp
[0](s
, dest_y
+ wrap_y
* 8,
2089 if (s
->avctx
->ildct_cmp
== FF_CMP_VSSE
)
2090 progressive_score
-= 400;
2092 if (progressive_score
> 0) {
2093 interlaced_score
= s
->mecc
.ildct_cmp
[0](s
, dest_y
, ptr_y
,
2095 s
->mecc
.ildct_cmp
[0](s
, dest_y
+ wrap_y
,
2099 if (progressive_score
> interlaced_score
) {
2100 s
->interlaced_dct
= 1;
2102 dct_offset
= wrap_y
;
2104 if (s
->chroma_format
== CHROMA_422
)
2110 s
->pdsp
.diff_pixels(s
->block
[0], ptr_y
, dest_y
, wrap_y
);
2111 s
->pdsp
.diff_pixels(s
->block
[1], ptr_y
+ 8, dest_y
+ 8, wrap_y
);
2112 s
->pdsp
.diff_pixels(s
->block
[2], ptr_y
+ dct_offset
,
2113 dest_y
+ dct_offset
, wrap_y
);
2114 s
->pdsp
.diff_pixels(s
->block
[3], ptr_y
+ dct_offset
+ 8,
2115 dest_y
+ dct_offset
+ 8, wrap_y
);
2117 if (s
->avctx
->flags
& AV_CODEC_FLAG_GRAY
) {
2121 s
->pdsp
.diff_pixels(s
->block
[4], ptr_cb
, dest_cb
, wrap_c
);
2122 s
->pdsp
.diff_pixels(s
->block
[5], ptr_cr
, dest_cr
, wrap_c
);
2123 if (!s
->chroma_y_shift
) { /* 422 */
2124 s
->pdsp
.diff_pixels(s
->block
[6], ptr_cb
+ (dct_offset
>> 1),
2125 dest_cb
+ (dct_offset
>> 1), wrap_c
);
2126 s
->pdsp
.diff_pixels(s
->block
[7], ptr_cr
+ (dct_offset
>> 1),
2127 dest_cr
+ (dct_offset
>> 1), wrap_c
);
2130 /* pre quantization */
2131 if (s
->current_picture
.mc_mb_var
[s
->mb_stride
* mb_y
+ mb_x
] <
2132 2 * s
->qscale
* s
->qscale
) {
2134 if (s
->mecc
.sad
[1](NULL
, ptr_y
, dest_y
, wrap_y
, 8) < 20 * s
->qscale
)
2136 if (s
->mecc
.sad
[1](NULL
, ptr_y
+ 8, dest_y
+ 8, wrap_y
, 8) < 20 * s
->qscale
)
2138 if (s
->mecc
.sad
[1](NULL
, ptr_y
+ dct_offset
, dest_y
+ dct_offset
,
2139 wrap_y
, 8) < 20 * s
->qscale
)
2141 if (s
->mecc
.sad
[1](NULL
, ptr_y
+ dct_offset
+ 8, dest_y
+ dct_offset
+ 8,
2142 wrap_y
, 8) < 20 * s
->qscale
)
2144 if (s
->mecc
.sad
[1](NULL
, ptr_cb
, dest_cb
, wrap_c
, 8) < 20 * s
->qscale
)
2146 if (s
->mecc
.sad
[1](NULL
, ptr_cr
, dest_cr
, wrap_c
, 8) < 20 * s
->qscale
)
2148 if (!s
->chroma_y_shift
) { /* 422 */
2149 if (s
->mecc
.sad
[1](NULL
, ptr_cb
+ (dct_offset
>> 1),
2150 dest_cb
+ (dct_offset
>> 1),
2151 wrap_c
, 8) < 20 * s
->qscale
)
2153 if (s
->mecc
.sad
[1](NULL
, ptr_cr
+ (dct_offset
>> 1),
2154 dest_cr
+ (dct_offset
>> 1),
2155 wrap_c
, 8) < 20 * s
->qscale
)
2161 if (s
->quantizer_noise_shaping
) {
2163 get_visual_weight(weight
[0], ptr_y
, wrap_y
);
2165 get_visual_weight(weight
[1], ptr_y
+ 8, wrap_y
);
2167 get_visual_weight(weight
[2], ptr_y
+ dct_offset
, wrap_y
);
2169 get_visual_weight(weight
[3], ptr_y
+ dct_offset
+ 8, wrap_y
);
2171 get_visual_weight(weight
[4], ptr_cb
, wrap_c
);
2173 get_visual_weight(weight
[5], ptr_cr
, wrap_c
);
2174 if (!s
->chroma_y_shift
) { /* 422 */
2176 get_visual_weight(weight
[6], ptr_cb
+ (dct_offset
>> 1),
2179 get_visual_weight(weight
[7], ptr_cr
+ (dct_offset
>> 1),
2182 memcpy(orig
[0], s
->block
[0], sizeof(int16_t) * 64 * mb_block_count
);
2185 /* DCT & quantize */
2186 assert(s
->out_format
!= FMT_MJPEG
|| s
->qscale
== 8);
2188 for (i
= 0; i
< mb_block_count
; i
++) {
2191 s
->block_last_index
[i
] = s
->dct_quantize(s
, s
->block
[i
], i
, s
->qscale
, &overflow
);
2192 // FIXME we could decide to change to quantizer instead of
2194 // JS: I don't think that would be a good idea it could lower
2195 // quality instead of improve it. Just INTRADC clipping
2196 // deserves changes in quantizer
2198 clip_coeffs(s
, s
->block
[i
], s
->block_last_index
[i
]);
2200 s
->block_last_index
[i
] = -1;
2202 if (s
->quantizer_noise_shaping
) {
2203 for (i
= 0; i
< mb_block_count
; i
++) {
2205 s
->block_last_index
[i
] =
2206 dct_quantize_refine(s
, s
->block
[i
], weight
[i
],
2207 orig
[i
], i
, s
->qscale
);
2212 if (s
->luma_elim_threshold
&& !s
->mb_intra
)
2213 for (i
= 0; i
< 4; i
++)
2214 dct_single_coeff_elimination(s
, i
, s
->luma_elim_threshold
);
2215 if (s
->chroma_elim_threshold
&& !s
->mb_intra
)
2216 for (i
= 4; i
< mb_block_count
; i
++)
2217 dct_single_coeff_elimination(s
, i
, s
->chroma_elim_threshold
);
2219 if (s
->mpv_flags
& FF_MPV_FLAG_CBP_RD
) {
2220 for (i
= 0; i
< mb_block_count
; i
++) {
2221 if (s
->block_last_index
[i
] == -1)
2222 s
->coded_score
[i
] = INT_MAX
/ 256;
2227 if ((s
->avctx
->flags
& AV_CODEC_FLAG_GRAY
) && s
->mb_intra
) {
2228 s
->block_last_index
[4] =
2229 s
->block_last_index
[5] = 0;
2231 s
->block
[5][0] = (1024 + s
->c_dc_scale
/ 2) / s
->c_dc_scale
;
2234 // non c quantize code returns incorrect block_last_index FIXME
2235 if (s
->alternate_scan
&& s
->dct_quantize
!= ff_dct_quantize_c
) {
2236 for (i
= 0; i
< mb_block_count
; i
++) {
2238 if (s
->block_last_index
[i
] > 0) {
2239 for (j
= 63; j
> 0; j
--) {
2240 if (s
->block
[i
][s
->intra_scantable
.permutated
[j
]])
2243 s
->block_last_index
[i
] = j
;
2248 /* huffman encode */
2249 switch(s
->codec_id
){ //FIXME funct ptr could be slightly faster
2250 case AV_CODEC_ID_MPEG1VIDEO
:
2251 case AV_CODEC_ID_MPEG2VIDEO
:
2252 if (CONFIG_MPEG1VIDEO_ENCODER
|| CONFIG_MPEG2VIDEO_ENCODER
)
2253 ff_mpeg1_encode_mb(s
, s
->block
, motion_x
, motion_y
);
2255 case AV_CODEC_ID_MPEG4
:
2256 if (CONFIG_MPEG4_ENCODER
)
2257 ff_mpeg4_encode_mb(s
, s
->block
, motion_x
, motion_y
);
2259 case AV_CODEC_ID_MSMPEG4V2
:
2260 case AV_CODEC_ID_MSMPEG4V3
:
2261 case AV_CODEC_ID_WMV1
:
2262 if (CONFIG_MSMPEG4_ENCODER
)
2263 ff_msmpeg4_encode_mb(s
, s
->block
, motion_x
, motion_y
);
2265 case AV_CODEC_ID_WMV2
:
2266 if (CONFIG_WMV2_ENCODER
)
2267 ff_wmv2_encode_mb(s
, s
->block
, motion_x
, motion_y
);
2269 case AV_CODEC_ID_H261
:
2270 if (CONFIG_H261_ENCODER
)
2271 ff_h261_encode_mb(s
, s
->block
, motion_x
, motion_y
);
2273 case AV_CODEC_ID_H263
:
2274 case AV_CODEC_ID_H263P
:
2275 case AV_CODEC_ID_FLV1
:
2276 case AV_CODEC_ID_RV10
:
2277 case AV_CODEC_ID_RV20
:
2278 if (CONFIG_H263_ENCODER
)
2279 ff_h263_encode_mb(s
, s
->block
, motion_x
, motion_y
);
2281 case AV_CODEC_ID_MJPEG
:
2282 if (CONFIG_MJPEG_ENCODER
)
2283 ff_mjpeg_encode_mb(s
, s
->block
);
2290 static av_always_inline
void encode_mb(MpegEncContext
*s
, int motion_x
, int motion_y
)
2292 if (s
->chroma_format
== CHROMA_420
) encode_mb_internal(s
, motion_x
, motion_y
, 8, 6);
2293 else encode_mb_internal(s
, motion_x
, motion_y
, 16, 8);
2296 static inline void copy_context_before_encode(MpegEncContext
*d
, MpegEncContext
*s
, int type
){
2299 memcpy(d
->last_mv
, s
->last_mv
, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2302 d
->mb_skip_run
= s
->mb_skip_run
;
2304 d
->last_dc
[i
] = s
->last_dc
[i
];
2307 d
->mv_bits
= s
->mv_bits
;
2308 d
->i_tex_bits
= s
->i_tex_bits
;
2309 d
->p_tex_bits
= s
->p_tex_bits
;
2310 d
->i_count
= s
->i_count
;
2311 d
->f_count
= s
->f_count
;
2312 d
->b_count
= s
->b_count
;
2313 d
->skip_count
= s
->skip_count
;
2314 d
->misc_bits
= s
->misc_bits
;
2318 d
->qscale
= s
->qscale
;
2319 d
->dquant
= s
->dquant
;
2321 d
->esc3_level_length
= s
->esc3_level_length
;
2324 static inline void copy_context_after_encode(MpegEncContext
*d
, MpegEncContext
*s
, int type
){
2327 memcpy(d
->mv
, s
->mv
, 2*4*2*sizeof(int));
2328 memcpy(d
->last_mv
, s
->last_mv
, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2331 d
->mb_skip_run
= s
->mb_skip_run
;
2333 d
->last_dc
[i
] = s
->last_dc
[i
];
2336 d
->mv_bits
= s
->mv_bits
;
2337 d
->i_tex_bits
= s
->i_tex_bits
;
2338 d
->p_tex_bits
= s
->p_tex_bits
;
2339 d
->i_count
= s
->i_count
;
2340 d
->f_count
= s
->f_count
;
2341 d
->b_count
= s
->b_count
;
2342 d
->skip_count
= s
->skip_count
;
2343 d
->misc_bits
= s
->misc_bits
;
2345 d
->mb_intra
= s
->mb_intra
;
2346 d
->mb_skipped
= s
->mb_skipped
;
2347 d
->mv_type
= s
->mv_type
;
2348 d
->mv_dir
= s
->mv_dir
;
2350 if(s
->data_partitioning
){
2352 d
->tex_pb
= s
->tex_pb
;
2356 d
->block_last_index
[i
]= s
->block_last_index
[i
];
2357 d
->interlaced_dct
= s
->interlaced_dct
;
2358 d
->qscale
= s
->qscale
;
2360 d
->esc3_level_length
= s
->esc3_level_length
;
2363 static inline void encode_mb_hq(MpegEncContext
*s
, MpegEncContext
*backup
, MpegEncContext
*best
, int type
,
2364 PutBitContext pb
[2], PutBitContext pb2
[2], PutBitContext tex_pb
[2],
2365 int *dmin
, int *next_block
, int motion_x
, int motion_y
)
2368 uint8_t *dest_backup
[3];
2370 copy_context_before_encode(s
, backup
, type
);
2372 s
->block
= s
->blocks
[*next_block
];
2373 s
->pb
= pb
[*next_block
];
2374 if(s
->data_partitioning
){
2375 s
->pb2
= pb2
[*next_block
];
2376 s
->tex_pb
= tex_pb
[*next_block
];
2380 memcpy(dest_backup
, s
->dest
, sizeof(s
->dest
));
2381 s
->dest
[0] = s
->sc
.rd_scratchpad
;
2382 s
->dest
[1] = s
->sc
.rd_scratchpad
+ 16*s
->linesize
;
2383 s
->dest
[2] = s
->sc
.rd_scratchpad
+ 16*s
->linesize
+ 8;
2384 assert(s
->linesize
>= 32); //FIXME
2387 encode_mb(s
, motion_x
, motion_y
);
2389 score
= put_bits_count(&s
->pb
);
2390 if(s
->data_partitioning
){
2391 score
+= put_bits_count(&s
->pb2
);
2392 score
+= put_bits_count(&s
->tex_pb
);
2395 if(s
->avctx
->mb_decision
== FF_MB_DECISION_RD
){
2396 ff_mpv_decode_mb(s
, s
->block
);
2398 score
*= s
->lambda2
;
2399 score
+= sse_mb(s
) << FF_LAMBDA_SHIFT
;
2403 memcpy(s
->dest
, dest_backup
, sizeof(s
->dest
));
2410 copy_context_after_encode(best
, s
, type
);
2414 static int sse(MpegEncContext
*s
, uint8_t *src1
, uint8_t *src2
, int w
, int h
, int stride
){
2415 uint32_t *sq
= ff_square_tab
+ 256;
2420 return s
->mecc
.sse
[0](NULL
, src1
, src2
, stride
, 16);
2421 else if(w
==8 && h
==8)
2422 return s
->mecc
.sse
[1](NULL
, src1
, src2
, stride
, 8);
2426 acc
+= sq
[src1
[x
+ y
*stride
] - src2
[x
+ y
*stride
]];
2435 static int sse_mb(MpegEncContext
*s
){
2439 if(s
->mb_x
*16 + 16 > s
->width
) w
= s
->width
- s
->mb_x
*16;
2440 if(s
->mb_y
*16 + 16 > s
->height
) h
= s
->height
- s
->mb_y
*16;
2443 if(s
->avctx
->mb_cmp
== FF_CMP_NSSE
){
2444 return s
->mecc
.nsse
[0](s
, s
->new_picture
.f
->data
[0] + s
->mb_x
* 16 + s
->mb_y
* s
->linesize
* 16, s
->dest
[0], s
->linesize
, 16) +
2445 s
->mecc
.nsse
[1](s
, s
->new_picture
.f
->data
[1] + s
->mb_x
* 8 + s
->mb_y
* s
->uvlinesize
* 8, s
->dest
[1], s
->uvlinesize
, 8) +
2446 s
->mecc
.nsse
[1](s
, s
->new_picture
.f
->data
[2] + s
->mb_x
* 8 + s
->mb_y
* s
->uvlinesize
* 8, s
->dest
[2], s
->uvlinesize
, 8);
2448 return s
->mecc
.sse
[0](NULL
, s
->new_picture
.f
->data
[0] + s
->mb_x
* 16 + s
->mb_y
* s
->linesize
* 16, s
->dest
[0], s
->linesize
, 16) +
2449 s
->mecc
.sse
[1](NULL
, s
->new_picture
.f
->data
[1] + s
->mb_x
* 8 + s
->mb_y
* s
->uvlinesize
* 8, s
->dest
[1], s
->uvlinesize
, 8) +
2450 s
->mecc
.sse
[1](NULL
, s
->new_picture
.f
->data
[2] + s
->mb_x
* 8 + s
->mb_y
* s
->uvlinesize
* 8, s
->dest
[2], s
->uvlinesize
, 8);
2453 return sse(s
, s
->new_picture
.f
->data
[0] + s
->mb_x
*16 + s
->mb_y
*s
->linesize
*16, s
->dest
[0], w
, h
, s
->linesize
)
2454 +sse(s
, s
->new_picture
.f
->data
[1] + s
->mb_x
*8 + s
->mb_y
*s
->uvlinesize
*8,s
->dest
[1], w
>>1, h
>>1, s
->uvlinesize
)
2455 +sse(s
, s
->new_picture
.f
->data
[2] + s
->mb_x
*8 + s
->mb_y
*s
->uvlinesize
*8,s
->dest
[2], w
>>1, h
>>1, s
->uvlinesize
);
2458 static int pre_estimate_motion_thread(AVCodecContext
*c
, void *arg
){
2459 MpegEncContext
*s
= *(void**)arg
;
2463 s
->me
.dia_size
= s
->avctx
->pre_dia_size
;
2464 s
->first_slice_line
=1;
2465 for(s
->mb_y
= s
->end_mb_y
-1; s
->mb_y
>= s
->start_mb_y
; s
->mb_y
--) {
2466 for(s
->mb_x
=s
->mb_width
-1; s
->mb_x
>=0 ;s
->mb_x
--) {
2467 ff_pre_estimate_p_frame_motion(s
, s
->mb_x
, s
->mb_y
);
2469 s
->first_slice_line
=0;
2477 static int estimate_motion_thread(AVCodecContext
*c
, void *arg
){
2478 MpegEncContext
*s
= *(void**)arg
;
2480 s
->me
.dia_size
= s
->avctx
->dia_size
;
2481 s
->first_slice_line
=1;
2482 for(s
->mb_y
= s
->start_mb_y
; s
->mb_y
< s
->end_mb_y
; s
->mb_y
++) {
2483 s
->mb_x
=0; //for block init below
2484 ff_init_block_index(s
);
2485 for(s
->mb_x
=0; s
->mb_x
< s
->mb_width
; s
->mb_x
++) {
2486 s
->block_index
[0]+=2;
2487 s
->block_index
[1]+=2;
2488 s
->block_index
[2]+=2;
2489 s
->block_index
[3]+=2;
2491 /* compute motion vector & mb_type and store in context */
2492 if(s
->pict_type
==AV_PICTURE_TYPE_B
)
2493 ff_estimate_b_frame_motion(s
, s
->mb_x
, s
->mb_y
);
2495 ff_estimate_p_frame_motion(s
, s
->mb_x
, s
->mb_y
);
2497 s
->first_slice_line
=0;
2502 static int mb_var_thread(AVCodecContext
*c
, void *arg
){
2503 MpegEncContext
*s
= *(void**)arg
;
2506 for(mb_y
=s
->start_mb_y
; mb_y
< s
->end_mb_y
; mb_y
++) {
2507 for(mb_x
=0; mb_x
< s
->mb_width
; mb_x
++) {
2510 uint8_t *pix
= s
->new_picture
.f
->data
[0] + (yy
* s
->linesize
) + xx
;
2512 int sum
= s
->mpvencdsp
.pix_sum(pix
, s
->linesize
);
2514 varc
= (s
->mpvencdsp
.pix_norm1(pix
, s
->linesize
) -
2515 (((unsigned) sum
* sum
) >> 8) + 500 + 128) >> 8;
2517 s
->current_picture
.mb_var
[s
->mb_stride
* mb_y
+ mb_x
] = varc
;
2518 s
->current_picture
.mb_mean
[s
->mb_stride
* mb_y
+ mb_x
] = (sum
+128)>>8;
2519 s
->me
.mb_var_sum_temp
+= varc
;
2525 static void write_slice_end(MpegEncContext
*s
){
2526 if(CONFIG_MPEG4_ENCODER
&& s
->codec_id
==AV_CODEC_ID_MPEG4
){
2527 if(s
->partitioned_frame
){
2528 ff_mpeg4_merge_partitions(s
);
2531 ff_mpeg4_stuffing(&s
->pb
);
2532 }else if(CONFIG_MJPEG_ENCODER
&& s
->out_format
== FMT_MJPEG
){
2533 ff_mjpeg_encode_stuffing(&s
->pb
);
2536 avpriv_align_put_bits(&s
->pb
);
2537 flush_put_bits(&s
->pb
);
2539 if ((s
->avctx
->flags
& AV_CODEC_FLAG_PASS1
) && !s
->partitioned_frame
)
2540 s
->misc_bits
+= get_bits_diff(s
);
2543 static void write_mb_info(MpegEncContext
*s
)
2545 uint8_t *ptr
= s
->mb_info_ptr
+ s
->mb_info_size
- 12;
2546 int offset
= put_bits_count(&s
->pb
);
2547 int mba
= s
->mb_x
+ s
->mb_width
* (s
->mb_y
% s
->gob_index
);
2548 int gobn
= s
->mb_y
/ s
->gob_index
;
2550 if (CONFIG_H263_ENCODER
)
2551 ff_h263_pred_motion(s
, 0, 0, &pred_x
, &pred_y
);
2552 bytestream_put_le32(&ptr
, offset
);
2553 bytestream_put_byte(&ptr
, s
->qscale
);
2554 bytestream_put_byte(&ptr
, gobn
);
2555 bytestream_put_le16(&ptr
, mba
);
2556 bytestream_put_byte(&ptr
, pred_x
); /* hmv1 */
2557 bytestream_put_byte(&ptr
, pred_y
); /* vmv1 */
2558 /* 4MV not implemented */
2559 bytestream_put_byte(&ptr
, 0); /* hmv2 */
2560 bytestream_put_byte(&ptr
, 0); /* vmv2 */
2563 static void update_mb_info(MpegEncContext
*s
, int startcode
)
2567 if (put_bits_count(&s
->pb
) - s
->prev_mb_info
*8 >= s
->mb_info
*8) {
2568 s
->mb_info_size
+= 12;
2569 s
->prev_mb_info
= s
->last_mb_info
;
2572 s
->prev_mb_info
= put_bits_count(&s
->pb
)/8;
2573 /* This might have incremented mb_info_size above, and we return without
2574 * actually writing any info into that slot yet. But in that case,
2575 * this will be called again at the start of the after writing the
2576 * start code, actually writing the mb info. */
2580 s
->last_mb_info
= put_bits_count(&s
->pb
)/8;
2581 if (!s
->mb_info_size
)
2582 s
->mb_info_size
+= 12;
2586 static int encode_thread(AVCodecContext
*c
, void *arg
){
2587 MpegEncContext
*s
= *(void**)arg
;
2588 int mb_x
, mb_y
, pdif
= 0;
2589 int chr_h
= 16>>s
->chroma_y_shift
;
2591 MpegEncContext best_s
= { 0 }, backup_s
;
2592 uint8_t bit_buf
[2][MAX_MB_BYTES
];
2593 uint8_t bit_buf2
[2][MAX_MB_BYTES
];
2594 uint8_t bit_buf_tex
[2][MAX_MB_BYTES
];
2595 PutBitContext pb
[2], pb2
[2], tex_pb
[2];
2598 init_put_bits(&pb
[i
], bit_buf
[i
], MAX_MB_BYTES
);
2599 init_put_bits(&pb2
[i
], bit_buf2
[i
], MAX_MB_BYTES
);
2600 init_put_bits(&tex_pb
[i
], bit_buf_tex
[i
], MAX_MB_BYTES
);
2603 s
->last_bits
= put_bits_count(&s
->pb
);
2614 /* init last dc values */
2615 /* note: quant matrix value (8) is implied here */
2616 s
->last_dc
[i
] = 128 << s
->intra_dc_precision
;
2618 s
->current_picture
.encoding_error
[i
] = 0;
2621 memset(s
->last_mv
, 0, sizeof(s
->last_mv
));
2625 switch(s
->codec_id
){
2626 case AV_CODEC_ID_H263
:
2627 case AV_CODEC_ID_H263P
:
2628 case AV_CODEC_ID_FLV1
:
2629 if (CONFIG_H263_ENCODER
)
2630 s
->gob_index
= H263_GOB_HEIGHT(s
->height
);
2632 case AV_CODEC_ID_MPEG4
:
2633 if(CONFIG_MPEG4_ENCODER
&& s
->partitioned_frame
)
2634 ff_mpeg4_init_partitions(s
);
2640 s
->first_slice_line
= 1;
2641 s
->ptr_lastgob
= s
->pb
.buf
;
2642 for(mb_y
= s
->start_mb_y
; mb_y
< s
->end_mb_y
; mb_y
++) {
2646 ff_set_qscale(s
, s
->qscale
);
2647 ff_init_block_index(s
);
2649 for(mb_x
=0; mb_x
< s
->mb_width
; mb_x
++) {
2650 int xy
= mb_y
*s
->mb_stride
+ mb_x
; // removed const, H261 needs to adjust this
2651 int mb_type
= s
->mb_type
[xy
];
2656 if(s
->pb
.buf_end
- s
->pb
.buf
- (put_bits_count(&s
->pb
)>>3) < MAX_MB_BYTES
){
2657 av_log(s
->avctx
, AV_LOG_ERROR
, "encoded frame too large\n");
2660 if(s
->data_partitioning
){
2661 if( s
->pb2
.buf_end
- s
->pb2
.buf
- (put_bits_count(&s
-> pb2
)>>3) < MAX_MB_BYTES
2662 || s
->tex_pb
.buf_end
- s
->tex_pb
.buf
- (put_bits_count(&s
->tex_pb
)>>3) < MAX_MB_BYTES
){
2663 av_log(s
->avctx
, AV_LOG_ERROR
, "encoded frame too large\n");
2669 s
->mb_y
= mb_y
; // moved into loop, can get changed by H.261
2670 ff_update_block_index(s
);
2672 if(CONFIG_H261_ENCODER
&& s
->codec_id
== AV_CODEC_ID_H261
){
2673 ff_h261_reorder_mb_index(s
);
2674 xy
= s
->mb_y
*s
->mb_stride
+ s
->mb_x
;
2675 mb_type
= s
->mb_type
[xy
];
2678 /* write gob / video packet header */
2680 int current_packet_size
, is_gob_start
;
2682 current_packet_size
= ((put_bits_count(&s
->pb
)+7)>>3) - (s
->ptr_lastgob
- s
->pb
.buf
);
2684 is_gob_start
= s
->avctx
->rtp_payload_size
&& current_packet_size
>= s
->avctx
->rtp_payload_size
&& mb_y
+ mb_x
>0;
2686 if(s
->start_mb_y
== mb_y
&& mb_y
> 0 && mb_x
==0) is_gob_start
=1;
2688 switch(s
->codec_id
){
2689 case AV_CODEC_ID_H263
:
2690 case AV_CODEC_ID_H263P
:
2691 if(!s
->h263_slice_structured
)
2692 if(s
->mb_x
|| s
->mb_y
%s
->gob_index
) is_gob_start
=0;
2694 case AV_CODEC_ID_MPEG2VIDEO
:
2695 if(s
->mb_x
==0 && s
->mb_y
!=0) is_gob_start
=1;
2696 case AV_CODEC_ID_MPEG1VIDEO
:
2697 if(s
->mb_skip_run
) is_gob_start
=0;
2702 if(s
->start_mb_y
!= mb_y
|| mb_x
!=0){
2705 if(CONFIG_MPEG4_ENCODER
&& s
->codec_id
==AV_CODEC_ID_MPEG4
&& s
->partitioned_frame
){
2706 ff_mpeg4_init_partitions(s
);
2710 assert((put_bits_count(&s
->pb
)&7) == 0);
2711 current_packet_size
= put_bits_ptr(&s
->pb
) - s
->ptr_lastgob
;
2713 if (s
->error_rate
&& s
->resync_mb_x
+ s
->resync_mb_y
> 0) {
2714 int r
= put_bits_count(&s
->pb
)/8 + s
->picture_number
+ 16 + s
->mb_x
+ s
->mb_y
;
2715 int d
= 100 / s
->error_rate
;
2717 current_packet_size
=0;
2718 s
->pb
.buf_ptr
= s
->ptr_lastgob
;
2719 assert(put_bits_ptr(&s
->pb
) == s
->ptr_lastgob
);
2723 if (s
->avctx
->rtp_callback
){
2724 int number_mb
= (mb_y
- s
->resync_mb_y
)*s
->mb_width
+ mb_x
- s
->resync_mb_x
;
2725 s
->avctx
->rtp_callback(s
->avctx
, s
->ptr_lastgob
, current_packet_size
, number_mb
);
2727 update_mb_info(s
, 1);
2729 switch(s
->codec_id
){
2730 case AV_CODEC_ID_MPEG4
:
2731 if (CONFIG_MPEG4_ENCODER
) {
2732 ff_mpeg4_encode_video_packet_header(s
);
2733 ff_mpeg4_clean_buffers(s
);
2736 case AV_CODEC_ID_MPEG1VIDEO
:
2737 case AV_CODEC_ID_MPEG2VIDEO
:
2738 if (CONFIG_MPEG1VIDEO_ENCODER
|| CONFIG_MPEG2VIDEO_ENCODER
) {
2739 ff_mpeg1_encode_slice_header(s
);
2740 ff_mpeg1_clean_buffers(s
);
2743 case AV_CODEC_ID_H263
:
2744 case AV_CODEC_ID_H263P
:
2745 if (CONFIG_H263_ENCODER
)
2746 ff_h263_encode_gob_header(s
, mb_y
);
2750 if (s
->avctx
->flags
& AV_CODEC_FLAG_PASS1
) {
2751 int bits
= put_bits_count(&s
->pb
);
2752 s
->misc_bits
+= bits
- s
->last_bits
;
2756 s
->ptr_lastgob
+= current_packet_size
;
2757 s
->first_slice_line
=1;
2758 s
->resync_mb_x
=mb_x
;
2759 s
->resync_mb_y
=mb_y
;
2763 if( (s
->resync_mb_x
== s
->mb_x
)
2764 && s
->resync_mb_y
+1 == s
->mb_y
){
2765 s
->first_slice_line
=0;
2769 s
->dquant
=0; //only for QP_RD
2771 update_mb_info(s
, 0);
2773 if (mb_type
& (mb_type
-1) || (s
->mpv_flags
& FF_MPV_FLAG_QP_RD
)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2775 int pb_bits_count
, pb2_bits_count
, tex_pb_bits_count
;
2777 copy_context_before_encode(&backup_s
, s
, -1);
2779 best_s
.data_partitioning
= s
->data_partitioning
;
2780 best_s
.partitioned_frame
= s
->partitioned_frame
;
2781 if(s
->data_partitioning
){
2782 backup_s
.pb2
= s
->pb2
;
2783 backup_s
.tex_pb
= s
->tex_pb
;
2786 if(mb_type
&CANDIDATE_MB_TYPE_INTER
){
2787 s
->mv_dir
= MV_DIR_FORWARD
;
2788 s
->mv_type
= MV_TYPE_16X16
;
2790 s
->mv
[0][0][0] = s
->p_mv_table
[xy
][0];
2791 s
->mv
[0][0][1] = s
->p_mv_table
[xy
][1];
2792 encode_mb_hq(s
, &backup_s
, &best_s
, CANDIDATE_MB_TYPE_INTER
, pb
, pb2
, tex_pb
,
2793 &dmin
, &next_block
, s
->mv
[0][0][0], s
->mv
[0][0][1]);
2795 if(mb_type
&CANDIDATE_MB_TYPE_INTER_I
){
2796 s
->mv_dir
= MV_DIR_FORWARD
;
2797 s
->mv_type
= MV_TYPE_FIELD
;
2800 j
= s
->field_select
[0][i
] = s
->p_field_select_table
[i
][xy
];
2801 s
->mv
[0][i
][0] = s
->p_field_mv_table
[i
][j
][xy
][0];
2802 s
->mv
[0][i
][1] = s
->p_field_mv_table
[i
][j
][xy
][1];
2804 encode_mb_hq(s
, &backup_s
, &best_s
, CANDIDATE_MB_TYPE_INTER_I
, pb
, pb2
, tex_pb
,
2805 &dmin
, &next_block
, 0, 0);
2807 if(mb_type
&CANDIDATE_MB_TYPE_SKIPPED
){
2808 s
->mv_dir
= MV_DIR_FORWARD
;
2809 s
->mv_type
= MV_TYPE_16X16
;
2813 encode_mb_hq(s
, &backup_s
, &best_s
, CANDIDATE_MB_TYPE_SKIPPED
, pb
, pb2
, tex_pb
,
2814 &dmin
, &next_block
, s
->mv
[0][0][0], s
->mv
[0][0][1]);
2816 if(mb_type
&CANDIDATE_MB_TYPE_INTER4V
){
2817 s
->mv_dir
= MV_DIR_FORWARD
;
2818 s
->mv_type
= MV_TYPE_8X8
;
2821 s
->mv
[0][i
][0] = s
->current_picture
.motion_val
[0][s
->block_index
[i
]][0];
2822 s
->mv
[0][i
][1] = s
->current_picture
.motion_val
[0][s
->block_index
[i
]][1];
2824 encode_mb_hq(s
, &backup_s
, &best_s
, CANDIDATE_MB_TYPE_INTER4V
, pb
, pb2
, tex_pb
,
2825 &dmin
, &next_block
, 0, 0);
2827 if(mb_type
&CANDIDATE_MB_TYPE_FORWARD
){
2828 s
->mv_dir
= MV_DIR_FORWARD
;
2829 s
->mv_type
= MV_TYPE_16X16
;
2831 s
->mv
[0][0][0] = s
->b_forw_mv_table
[xy
][0];
2832 s
->mv
[0][0][1] = s
->b_forw_mv_table
[xy
][1];
2833 encode_mb_hq(s
, &backup_s
, &best_s
, CANDIDATE_MB_TYPE_FORWARD
, pb
, pb2
, tex_pb
,
2834 &dmin
, &next_block
, s
->mv
[0][0][0], s
->mv
[0][0][1]);
2836 if(mb_type
&CANDIDATE_MB_TYPE_BACKWARD
){
2837 s
->mv_dir
= MV_DIR_BACKWARD
;
2838 s
->mv_type
= MV_TYPE_16X16
;
2840 s
->mv
[1][0][0] = s
->b_back_mv_table
[xy
][0];
2841 s
->mv
[1][0][1] = s
->b_back_mv_table
[xy
][1];
2842 encode_mb_hq(s
, &backup_s
, &best_s
, CANDIDATE_MB_TYPE_BACKWARD
, pb
, pb2
, tex_pb
,
2843 &dmin
, &next_block
, s
->mv
[1][0][0], s
->mv
[1][0][1]);
2845 if(mb_type
&CANDIDATE_MB_TYPE_BIDIR
){
2846 s
->mv_dir
= MV_DIR_FORWARD
| MV_DIR_BACKWARD
;
2847 s
->mv_type
= MV_TYPE_16X16
;
2849 s
->mv
[0][0][0] = s
->b_bidir_forw_mv_table
[xy
][0];
2850 s
->mv
[0][0][1] = s
->b_bidir_forw_mv_table
[xy
][1];
2851 s
->mv
[1][0][0] = s
->b_bidir_back_mv_table
[xy
][0];
2852 s
->mv
[1][0][1] = s
->b_bidir_back_mv_table
[xy
][1];
2853 encode_mb_hq(s
, &backup_s
, &best_s
, CANDIDATE_MB_TYPE_BIDIR
, pb
, pb2
, tex_pb
,
2854 &dmin
, &next_block
, 0, 0);
2856 if(mb_type
&CANDIDATE_MB_TYPE_FORWARD_I
){
2857 s
->mv_dir
= MV_DIR_FORWARD
;
2858 s
->mv_type
= MV_TYPE_FIELD
;
2861 j
= s
->field_select
[0][i
] = s
->b_field_select_table
[0][i
][xy
];
2862 s
->mv
[0][i
][0] = s
->b_field_mv_table
[0][i
][j
][xy
][0];
2863 s
->mv
[0][i
][1] = s
->b_field_mv_table
[0][i
][j
][xy
][1];
2865 encode_mb_hq(s
, &backup_s
, &best_s
, CANDIDATE_MB_TYPE_FORWARD_I
, pb
, pb2
, tex_pb
,
2866 &dmin
, &next_block
, 0, 0);
2868 if(mb_type
&CANDIDATE_MB_TYPE_BACKWARD_I
){
2869 s
->mv_dir
= MV_DIR_BACKWARD
;
2870 s
->mv_type
= MV_TYPE_FIELD
;