Sync with mplayer's stuff
[libav.git] / libavcodec / mpegvideo.c
1 /*
2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Gerard Lantau.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <math.h>
22 #include <string.h>
23 #include "avcodec.h"
24 #include "dsputil.h"
25 #include "mpegvideo.h"
26
27 #include "../config.h"
28
29 #ifdef ARCH_X86
30 #include "i386/mpegvideo.c"
31 #endif
32 #ifndef DCT_UNQUANTIZE
33 #define DCT_UNQUANTIZE(a,b,c,d) dct_unquantize(a,b,c,d)
34 #endif
35
36 #define EDGE_WIDTH 16
37
38 /* enable all paranoid tests for rounding, overflows, etc... */
39 //#define PARANOID
40
41 //#define DEBUG
42
43 /* for jpeg fast DCT */
44 #define CONST_BITS 14
45
46 static const unsigned short aanscales[64] = {
47 /* precomputed values scaled up by 14 bits */
48 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
49 22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270,
50 21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906,
51 19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315,
52 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
53 12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552,
54 8867, 12299, 11585, 10426, 8867, 6967, 4799, 2446,
55 4520, 6270, 5906, 5315, 4520, 3552, 2446, 1247
56 };
57
58 static UINT8 h263_chroma_roundtab[16] = {
59 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
60 };
61
62 static void encode_picture(MpegEncContext *s, int picture_number);
63 static void rate_control_init(MpegEncContext *s);
64 static int rate_estimate_qscale(MpegEncContext *s);
65
66 /* default motion estimation */
67 int motion_estimation_method = ME_LOG;
68
69 /* XXX: should use variable shift ? */
70 #define QMAT_SHIFT_MMX 19
71 #define QMAT_SHIFT 25
72
73 static void convert_matrix(int *qmat, const UINT16 *quant_matrix, int qscale)
74 {
75 int i;
76
77 if (av_fdct == jpeg_fdct_ifast) {
78 for(i=0;i<64;i++) {
79 /* 16 <= qscale * quant_matrix[i] <= 7905 */
80 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
81
82 qmat[i] = (int)((1ULL << (QMAT_SHIFT + 11)) / (aanscales[i] * qscale * quant_matrix[i]));
83 }
84 } else {
85 for(i=0;i<64;i++) {
86 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
87 So 16 <= qscale * quant_matrix[i] <= 7905
88 so (1 << QMAT_SHIFT) / 16 >= qmat[i] >= (1 << QMAT_SHIFT) / 7905
89 */
90 qmat[i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);
91 }
92 }
93 }
94
95 /* init common structure for both encoder and decoder */
96 int MPV_common_init(MpegEncContext *s)
97 {
98 int c_size, i;
99 UINT8 *pict;
100
101 #if defined ( HAVE_MMX ) && defined ( BIN_PORTABILITY )
102 MPV_common_init_mmx();
103 #endif
104 s->mb_width = (s->width + 15) / 16;
105 s->mb_height = (s->height + 15) / 16;
106 s->linesize = s->mb_width * 16 + 2 * EDGE_WIDTH;
107
108 for(i=0;i<3;i++) {
109 int w, h, shift, pict_start;
110
111 w = s->linesize;
112 h = s->mb_height * 16 + 2 * EDGE_WIDTH;
113 shift = (i == 0) ? 0 : 1;
114 c_size = (w >> shift) * (h >> shift);
115 pict_start = (w >> shift) * (EDGE_WIDTH >> shift) + (EDGE_WIDTH >> shift);
116
117 pict = av_mallocz(c_size);
118 if (pict == NULL)
119 goto fail;
120 s->last_picture_base[i] = pict;
121 s->last_picture[i] = pict + pict_start;
122
123 pict = av_mallocz(c_size);
124 if (pict == NULL)
125 goto fail;
126 s->next_picture_base[i] = pict;
127 s->next_picture[i] = pict + pict_start;
128
129 if (s->has_b_frames) {
130 pict = av_mallocz(c_size);
131 if (pict == NULL)
132 goto fail;
133 s->aux_picture_base[i] = pict;
134 s->aux_picture[i] = pict + pict_start;
135 }
136 }
137
138 if (s->out_format == FMT_H263) {
139 int size;
140 /* MV prediction */
141 size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
142 s->motion_val = malloc(size * 2 * sizeof(INT16));
143 if (s->motion_val == NULL)
144 goto fail;
145 memset(s->motion_val, 0, size * 2 * sizeof(INT16));
146 }
147
148 if (s->h263_pred) {
149 int y_size, c_size, i, size;
150
151 /* dc values */
152
153 y_size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
154 c_size = (s->mb_width + 2) * (s->mb_height + 2);
155 size = y_size + 2 * c_size;
156 s->dc_val[0] = malloc(size * sizeof(INT16));
157 if (s->dc_val[0] == NULL)
158 goto fail;
159 s->dc_val[1] = s->dc_val[0] + y_size;
160 s->dc_val[2] = s->dc_val[1] + c_size;
161 for(i=0;i<size;i++)
162 s->dc_val[0][i] = 1024;
163
164 /* ac values */
165 s->ac_val[0] = av_mallocz(size * sizeof(INT16) * 16);
166 if (s->ac_val[0] == NULL)
167 goto fail;
168 s->ac_val[1] = s->ac_val[0] + y_size;
169 s->ac_val[2] = s->ac_val[1] + c_size;
170
171 /* cbp values */
172 s->coded_block = av_mallocz(y_size);
173 if (!s->coded_block)
174 goto fail;
175 }
176 /* default structure is frame */
177 s->picture_structure = PICT_FRAME;
178
179 /* init default q matrix (only for mpeg and mjpeg) */
180 for(i=0;i<64;i++) {
181 s->intra_matrix[i] = default_intra_matrix[i];
182 s->chroma_intra_matrix[i] = default_intra_matrix[i];
183 s->non_intra_matrix[i] = default_non_intra_matrix[i];
184 s->chroma_non_intra_matrix[i] = default_non_intra_matrix[i];
185 }
186 /* init macroblock skip table */
187 if (!s->encoding) {
188 s->mbskip_table = av_mallocz(s->mb_width * s->mb_height);
189 if (!s->mbskip_table)
190 goto fail;
191 }
192
193 s->context_initialized = 1;
194 return 0;
195 fail:
196 if (s->motion_val)
197 free(s->motion_val);
198 if (s->dc_val[0])
199 free(s->dc_val[0]);
200 if (s->ac_val[0])
201 free(s->ac_val[0]);
202 if (s->coded_block)
203 free(s->coded_block);
204 if (s->mbskip_table)
205 free(s->mbskip_table);
206 for(i=0;i<3;i++) {
207 if (s->last_picture_base[i])
208 free(s->last_picture_base[i]);
209 if (s->next_picture_base[i])
210 free(s->next_picture_base[i]);
211 if (s->aux_picture_base[i])
212 free(s->aux_picture_base[i]);
213 }
214 return -1;
215 }
216
217 /* init common structure for both encoder and decoder */
218 void MPV_common_end(MpegEncContext *s)
219 {
220 int i;
221
222 if (s->motion_val)
223 free(s->motion_val);
224 if (s->h263_pred) {
225 free(s->dc_val[0]);
226 free(s->ac_val[0]);
227 free(s->coded_block);
228 }
229 if (s->mbskip_table)
230 free(s->mbskip_table);
231 for(i=0;i<3;i++) {
232 free(s->last_picture_base[i]);
233 free(s->next_picture_base[i]);
234 if (s->has_b_frames)
235 free(s->aux_picture_base[i]);
236 }
237 s->context_initialized = 0;
238 }
239
240 /* init video encoder */
241 int MPV_encode_init(AVCodecContext *avctx)
242 {
243 MpegEncContext *s = avctx->priv_data;
244
245 s->bit_rate = avctx->bit_rate;
246 s->frame_rate = avctx->frame_rate;
247 s->width = avctx->width;
248 s->height = avctx->height;
249 s->gop_size = avctx->gop_size;
250 if (s->gop_size <= 1) {
251 s->intra_only = 1;
252 s->gop_size = 12;
253 } else {
254 s->intra_only = 0;
255 }
256 s->full_search = motion_estimation_method;
257
258 s->fixed_qscale = (avctx->flags & CODEC_FLAG_QSCALE);
259
260 switch(avctx->codec->id) {
261 case CODEC_ID_MPEG1VIDEO:
262 s->out_format = FMT_MPEG1;
263 break;
264 case CODEC_ID_MJPEG:
265 s->out_format = FMT_MJPEG;
266 s->intra_only = 1; /* force intra only for jpeg */
267 if (mjpeg_init(s) < 0)
268 return -1;
269 break;
270 case CODEC_ID_H263:
271 if (h263_get_picture_format(s->width, s->height) == 7)
272 return -1;
273 s->out_format = FMT_H263;
274 break;
275 case CODEC_ID_H263P:
276 s->out_format = FMT_H263;
277 s->h263_plus = 1;
278 /* XXX: not unrectricted mv yet */
279 break;
280 case CODEC_ID_RV10:
281 s->out_format = FMT_H263;
282 s->h263_rv10 = 1;
283 break;
284 case CODEC_ID_OPENDIVX:
285 s->out_format = FMT_H263;
286 s->h263_pred = 1;
287 s->unrestricted_mv = 1;
288 break;
289 case CODEC_ID_MSMPEG4:
290 s->out_format = FMT_H263;
291 s->h263_msmpeg4 = 1;
292 s->h263_pred = 1;
293 s->unrestricted_mv = 1;
294 break;
295 default:
296 return -1;
297 }
298
299 if (s->out_format == FMT_H263)
300 h263_encode_init_vlc(s);
301
302 s->encoding = 1;
303
304 /* init */
305 if (MPV_common_init(s) < 0)
306 return -1;
307
308 /* rate control init */
309 rate_control_init(s);
310
311 s->picture_number = 0;
312 s->fake_picture_number = 0;
313 /* motion detector init */
314 s->f_code = 1;
315
316 return 0;
317 }
318
319 int MPV_encode_end(AVCodecContext *avctx)
320 {
321 MpegEncContext *s = avctx->priv_data;
322
323 #ifdef STATS
324 print_stats();
325 #endif
326 MPV_common_end(s);
327 if (s->out_format == FMT_MJPEG)
328 mjpeg_close(s);
329 return 0;
330 }
331
332 /* draw the edges of width 'w' of an image of size width, height */
333 static void draw_edges(UINT8 *buf, int wrap, int width, int height, int w)
334 {
335 UINT8 *ptr, *last_line;
336 int i;
337
338 last_line = buf + (height - 1) * wrap;
339 for(i=0;i<w;i++) {
340 /* top and bottom */
341 memcpy(buf - (i + 1) * wrap, buf, width);
342 memcpy(last_line + (i + 1) * wrap, last_line, width);
343 }
344 /* left and right */
345 ptr = buf;
346 for(i=0;i<height;i++) {
347 memset(ptr - w, ptr[0], w);
348 memset(ptr + width, ptr[width-1], w);
349 ptr += wrap;
350 }
351 /* corners */
352 for(i=0;i<w;i++) {
353 memset(buf - (i + 1) * wrap - w, buf[0], w); /* top left */
354 memset(buf - (i + 1) * wrap + width, buf[width-1], w); /* top right */
355 memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */
356 memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */
357 }
358 }
359
360 /* generic function for encode/decode called before a frame is coded/decoded */
361 #ifndef ARCH_X86
362 void MPV_frame_start(MpegEncContext *s)
363 {
364 int i;
365 UINT8 *tmp;
366
367 if (s->pict_type == B_TYPE) {
368 for(i=0;i<3;i++) {
369 s->current_picture[i] = s->aux_picture[i];
370 }
371 } else {
372 for(i=0;i<3;i++) {
373 /* swap next and last */
374 tmp = s->last_picture[i];
375 s->last_picture[i] = s->next_picture[i];
376 s->next_picture[i] = tmp;
377 s->current_picture[i] = tmp;
378 }
379 }
380 }
381 #endif
382 /* generic function for encode/decode called after a frame has been coded/decoded */
383 void MPV_frame_end(MpegEncContext *s)
384 {
385 /* draw edge for correct motion prediction if outside */
386 if (s->pict_type != B_TYPE) {
387 draw_edges(s->current_picture[0], s->linesize, s->width, s->height, EDGE_WIDTH);
388 draw_edges(s->current_picture[1], s->linesize/2, s->width/2, s->height/2, EDGE_WIDTH/2);
389 draw_edges(s->current_picture[2], s->linesize/2, s->width/2, s->height/2, EDGE_WIDTH/2);
390 }
391 }
392
393 int MPV_encode_picture(AVCodecContext *avctx,
394 unsigned char *buf, int buf_size, void *data)
395 {
396 MpegEncContext *s = avctx->priv_data;
397 AVPicture *pict = data;
398 int i, j;
399
400 if (s->fixed_qscale)
401 s->qscale = avctx->quality;
402
403 init_put_bits(&s->pb, buf, buf_size, NULL, NULL);
404
405 if (!s->intra_only) {
406 /* first picture of GOP is intra */
407 if ((s->picture_number % s->gop_size) == 0)
408 s->pict_type = I_TYPE;
409 else
410 s->pict_type = P_TYPE;
411 } else {
412 s->pict_type = I_TYPE;
413 }
414 avctx->key_frame = (s->pict_type == I_TYPE);
415
416 MPV_frame_start(s);
417
418 for(i=0;i<3;i++) {
419 UINT8 *src = pict->data[i];
420 UINT8 *dest = s->current_picture[i];
421 int src_wrap = pict->linesize[i];
422 int dest_wrap = s->linesize;
423 int w = s->width;
424 int h = s->height;
425
426 if (i >= 1) {
427 dest_wrap >>= 1;
428 w >>= 1;
429 h >>= 1;
430 }
431
432 for(j=0;j<h;j++) {
433 memcpy(dest, src, w);
434 dest += dest_wrap;
435 src += src_wrap;
436 }
437 s->new_picture[i] = s->current_picture[i];
438 }
439
440 encode_picture(s, s->picture_number);
441
442 MPV_frame_end(s);
443 s->picture_number++;
444
445 if (s->out_format == FMT_MJPEG)
446 mjpeg_picture_trailer(s);
447
448 flush_put_bits(&s->pb);
449 s->total_bits += (s->pb.buf_ptr - s->pb.buf) * 8;
450 avctx->quality = s->qscale;
451 return s->pb.buf_ptr - s->pb.buf;
452 }
453
454 static inline int clip(int a, int amin, int amax)
455 {
456 if (a < amin)
457 return amin;
458 else if (a > amax)
459 return amax;
460 else
461 return a;
462 }
463
464 static int dct_quantize(MpegEncContext *s, DCTELEM *block, int n, int qscale);
465 static int dct_quantize_mmx(MpegEncContext *s,
466 DCTELEM *block, int n,
467 int qscale);
468 static void dct_unquantize(MpegEncContext *s, DCTELEM *block, int n, int qscale);
469
470 /* apply one mpeg motion vector to the three components */
471 static inline void mpeg_motion(MpegEncContext *s,
472 UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
473 int dest_offset,
474 UINT8 **ref_picture, int src_offset,
475 int field_based, op_pixels_func *pix_op,
476 int motion_x, int motion_y, int h)
477 {
478 UINT8 *ptr;
479 int dxy, offset, mx, my, src_x, src_y, height, linesize;
480
481 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
482 src_x = s->mb_x * 16 + (motion_x >> 1);
483 src_y = s->mb_y * (16 >> field_based) + (motion_y >> 1);
484
485 /* WARNING: do no forget half pels */
486 height = s->height >> field_based;
487 src_x = clip(src_x, -16, s->width);
488 if (src_x == s->width)
489 dxy &= ~1;
490 src_y = clip(src_y, -16, height);
491 if (src_y == height)
492 dxy &= ~2;
493 linesize = s->linesize << field_based;
494 ptr = ref_picture[0] + (src_y * linesize) + (src_x) + src_offset;
495 dest_y += dest_offset;
496 pix_op[dxy](dest_y, ptr, linesize, h);
497 pix_op[dxy](dest_y + 8, ptr + 8, linesize, h);
498
499 if (s->out_format == FMT_H263) {
500 dxy = 0;
501 if ((motion_x & 3) != 0)
502 dxy |= 1;
503 if ((motion_y & 3) != 0)
504 dxy |= 2;
505 mx = motion_x >> 2;
506 my = motion_y >> 2;
507 } else {
508 mx = motion_x / 2;
509 my = motion_y / 2;
510 dxy = ((my & 1) << 1) | (mx & 1);
511 mx >>= 1;
512 my >>= 1;
513 }
514
515 src_x = s->mb_x * 8 + mx;
516 src_y = s->mb_y * (8 >> field_based) + my;
517 src_x = clip(src_x, -8, s->width >> 1);
518 if (src_x == (s->width >> 1))
519 dxy &= ~1;
520 src_y = clip(src_y, -8, height >> 1);
521 if (src_y == (height >> 1))
522 dxy &= ~2;
523
524 offset = (src_y * (linesize >> 1)) + src_x + (src_offset >> 1);
525 ptr = ref_picture[1] + offset;
526 pix_op[dxy](dest_cb + (dest_offset >> 1), ptr, linesize >> 1, h >> 1);
527 ptr = ref_picture[2] + offset;
528 pix_op[dxy](dest_cr + (dest_offset >> 1), ptr, linesize >> 1, h >> 1);
529 }
530
531 static inline void MPV_motion(MpegEncContext *s,
532 UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
533 int dir, UINT8 **ref_picture,
534 op_pixels_func *pix_op)
535 {
536 int dxy, offset, mx, my, src_x, src_y, motion_x, motion_y;
537 int mb_x, mb_y, i;
538 UINT8 *ptr, *dest;
539
540 mb_x = s->mb_x;
541 mb_y = s->mb_y;
542
543 switch(s->mv_type) {
544 case MV_TYPE_16X16:
545 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
546 ref_picture, 0,
547 0, pix_op,
548 s->mv[dir][0][0], s->mv[dir][0][1], 16);
549 break;
550 case MV_TYPE_8X8:
551 for(i=0;i<4;i++) {
552 motion_x = s->mv[dir][i][0];
553 motion_y = s->mv[dir][i][1];
554
555 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
556 src_x = mb_x * 16 + (motion_x >> 1) + (i & 1) * 8;
557 src_y = mb_y * 16 + (motion_y >> 1) + ((i >> 1) & 1) * 8;
558
559 /* WARNING: do no forget half pels */
560 src_x = clip(src_x, -16, s->width);
561 if (src_x == s->width)
562 dxy &= ~1;
563 src_y = clip(src_y, -16, s->height);
564 if (src_y == s->height)
565 dxy &= ~2;
566
567 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
568 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
569 pix_op[dxy](dest, ptr, s->linesize, 8);
570 }
571 /* In case of 8X8, we construct a single chroma motion vector
572 with a special rounding */
573 mx = 0;
574 my = 0;
575 for(i=0;i<4;i++) {
576 mx += s->mv[dir][i][0];
577 my += s->mv[dir][i][1];
578 }
579 if (mx >= 0)
580 mx = (h263_chroma_roundtab[mx & 0xf] + ((mx >> 3) & ~1));
581 else {
582 mx = -mx;
583 mx = -(h263_chroma_roundtab[mx & 0xf] + ((mx >> 3) & ~1));
584 }
585 if (my >= 0)
586 my = (h263_chroma_roundtab[my & 0xf] + ((my >> 3) & ~1));
587 else {
588 my = -my;
589 my = -(h263_chroma_roundtab[my & 0xf] + ((my >> 3) & ~1));
590 }
591 dxy = ((my & 1) << 1) | (mx & 1);
592 mx >>= 1;
593 my >>= 1;
594
595 src_x = mb_x * 8 + mx;
596 src_y = mb_y * 8 + my;
597 src_x = clip(src_x, -8, s->width/2);
598 if (src_x == s->width/2)
599 dxy &= ~1;
600 src_y = clip(src_y, -8, s->height/2);
601 if (src_y == s->height/2)
602 dxy &= ~2;
603
604 offset = (src_y * (s->linesize >> 1)) + src_x;
605 ptr = ref_picture[1] + offset;
606 pix_op[dxy](dest_cb, ptr, s->linesize >> 1, 8);
607 ptr = ref_picture[2] + offset;
608 pix_op[dxy](dest_cr, ptr, s->linesize >> 1, 8);
609 break;
610 case MV_TYPE_FIELD:
611 if (s->picture_structure == PICT_FRAME) {
612 /* top field */
613 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
614 ref_picture, s->field_select[dir][0] ? s->linesize : 0,
615 1, pix_op,
616 s->mv[dir][0][0], s->mv[dir][0][1], 8);
617 /* bottom field */
618 mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
619 ref_picture, s->field_select[dir][1] ? s->linesize : 0,
620 1, pix_op,
621 s->mv[dir][1][0], s->mv[dir][1][1], 8);
622 } else {
623
624
625 }
626 break;
627 }
628 }
629
630
631 /* put block[] to dest[] */
632 static inline void put_dct(MpegEncContext *s,
633 DCTELEM *block, int i, UINT8 *dest, int line_size)
634 {
635 if (!s->mpeg2)
636 DCT_UNQUANTIZE(s, block, i, s->qscale);
637 j_rev_dct (block);
638 put_pixels_clamped(block, dest, line_size);
639 }
640
641 /* add block[] to dest[] */
642 static inline void add_dct(MpegEncContext *s,
643 DCTELEM *block, int i, UINT8 *dest, int line_size)
644 {
645 if (s->block_last_index[i] >= 0) {
646 if (!s->mpeg2)
647 DCT_UNQUANTIZE(s, block, i, s->qscale);
648 j_rev_dct (block);
649 add_pixels_clamped(block, dest, line_size);
650 }
651 }
652
653 /* generic function called after a macroblock has been parsed by the
654 decoder or after it has been encoded by the encoder.
655
656 Important variables used:
657 s->mb_intra : true if intra macroblock
658 s->mv_dir : motion vector direction
659 s->mv_type : motion vector type
660 s->mv : motion vector
661 s->interlaced_dct : true if interlaced dct used (mpeg2)
662 */
663 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
664 {
665 int mb_x, mb_y, motion_x, motion_y;
666 int dct_linesize, dct_offset;
667 op_pixels_func *op_pix;
668
669 mb_x = s->mb_x;
670 mb_y = s->mb_y;
671
672 /* update DC predictors for P macroblocks */
673 if (!s->mb_intra) {
674 if (s->h263_pred) {
675 int wrap, x, y, v;
676 wrap = 2 * s->mb_width + 2;
677 v = 1024;
678 x = 2 * mb_x + 1;
679 y = 2 * mb_y + 1;
680 s->dc_val[0][(x) + (y) * wrap] = v;
681 s->dc_val[0][(x + 1) + (y) * wrap] = v;
682 s->dc_val[0][(x) + (y + 1) * wrap] = v;
683 s->dc_val[0][(x + 1) + (y + 1) * wrap] = v;
684 /* ac pred */
685 memset(s->ac_val[0][(x) + (y) * wrap], 0, 16 * sizeof(INT16));
686 memset(s->ac_val[0][(x + 1) + (y) * wrap], 0, 16 * sizeof(INT16));
687 memset(s->ac_val[0][(x) + (y + 1) * wrap], 0, 16 * sizeof(INT16));
688 memset(s->ac_val[0][(x + 1) + (y + 1) * wrap], 0, 16 * sizeof(INT16));
689 if (s->h263_msmpeg4) {
690 s->coded_block[(x) + (y) * wrap] = 0;
691 s->coded_block[(x + 1) + (y) * wrap] = 0;
692 s->coded_block[(x) + (y + 1) * wrap] = 0;
693 s->coded_block[(x + 1) + (y + 1) * wrap] = 0;
694 }
695 /* chroma */
696 wrap = s->mb_width + 2;
697 x = mb_x + 1;
698 y = mb_y + 1;
699 s->dc_val[1][(x) + (y) * wrap] = v;
700 s->dc_val[2][(x) + (y) * wrap] = v;
701 /* ac pred */
702 memset(s->ac_val[1][(x) + (y) * wrap], 0, 16 * sizeof(INT16));
703 memset(s->ac_val[2][(x) + (y) * wrap], 0, 16 * sizeof(INT16));
704 } else {
705 s->last_dc[0] = 128 << s->intra_dc_precision;
706 s->last_dc[1] = 128 << s->intra_dc_precision;
707 s->last_dc[2] = 128 << s->intra_dc_precision;
708 }
709 }
710
711 /* update motion predictor */
712 if (s->out_format == FMT_H263) {
713 int x, y, wrap;
714
715 x = 2 * mb_x + 1;
716 y = 2 * mb_y + 1;
717 wrap = 2 * s->mb_width + 2;
718 if (s->mb_intra) {
719 motion_x = 0;
720 motion_y = 0;
721 goto motion_init;
722 } else if (s->mv_type == MV_TYPE_16X16) {
723 motion_x = s->mv[0][0][0];
724 motion_y = s->mv[0][0][1];
725 motion_init:
726 /* no update if 8X8 because it has been done during parsing */
727 s->motion_val[(x) + (y) * wrap][0] = motion_x;
728 s->motion_val[(x) + (y) * wrap][1] = motion_y;
729 s->motion_val[(x + 1) + (y) * wrap][0] = motion_x;
730 s->motion_val[(x + 1) + (y) * wrap][1] = motion_y;
731 s->motion_val[(x) + (y + 1) * wrap][0] = motion_x;
732 s->motion_val[(x) + (y + 1) * wrap][1] = motion_y;
733 s->motion_val[(x + 1) + (y + 1) * wrap][0] = motion_x;
734 s->motion_val[(x + 1) + (y + 1) * wrap][1] = motion_y;
735 }
736 }
737
738 if (!s->intra_only) {
739 UINT8 *dest_y, *dest_cb, *dest_cr;
740 UINT8 *mbskip_ptr;
741
742 /* avoid copy if macroblock skipped in last frame too */
743 if (!s->encoding) {
744 mbskip_ptr = &s->mbskip_table[s->mb_y * s->mb_width + s->mb_x];
745 if (s->mb_skiped) {
746 s->mb_skiped = 0;
747 /* if previous was skipped too, then nothing to do ! */
748 if (*mbskip_ptr != 0)
749 goto the_end;
750 *mbskip_ptr = 1; /* indicate that this time we skiped it */
751 } else {
752 *mbskip_ptr = 0; /* not skipped */
753 }
754 }
755
756 dest_y = s->current_picture[0] + (mb_y * 16 * s->linesize) + mb_x * 16;
757 dest_cb = s->current_picture[1] + (mb_y * 8 * (s->linesize >> 1)) + mb_x * 8;
758 dest_cr = s->current_picture[2] + (mb_y * 8 * (s->linesize >> 1)) + mb_x * 8;
759
760 if (s->interlaced_dct) {
761 dct_linesize = s->linesize * 2;
762 dct_offset = s->linesize;
763 } else {
764 dct_linesize = s->linesize;
765 dct_offset = s->linesize * 8;
766 }
767
768 if (!s->mb_intra) {
769 /* motion handling */
770 if (!s->no_rounding)
771 op_pix = put_pixels_tab;
772 else
773 op_pix = put_no_rnd_pixels_tab;
774
775 if (s->mv_dir & MV_DIR_FORWARD) {
776 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture, op_pix);
777 if (!s->no_rounding)
778 op_pix = avg_pixels_tab;
779 else
780 op_pix = avg_no_rnd_pixels_tab;
781 }
782 if (s->mv_dir & MV_DIR_BACKWARD) {
783 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture, op_pix);
784 }
785
786 /* add dct residue */
787 add_dct(s, block[0], 0, dest_y, dct_linesize);
788 add_dct(s, block[1], 1, dest_y + 8, dct_linesize);
789 add_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
790 add_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
791
792 add_dct(s, block[4], 4, dest_cb, dct_linesize >> 1);
793 add_dct(s, block[5], 5, dest_cr, dct_linesize >> 1);
794 } else {
795 /* dct only in intra block */
796 put_dct(s, block[0], 0, dest_y, dct_linesize);
797 put_dct(s, block[1], 1, dest_y + 8, dct_linesize);
798 put_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
799 put_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
800
801 put_dct(s, block[4], 4, dest_cb, dct_linesize >> 1);
802 put_dct(s, block[5], 5, dest_cr, dct_linesize >> 1);
803 }
804 }
805 the_end:
806 emms_c();
807 }
808
809 static void encode_picture(MpegEncContext *s, int picture_number)
810 {
811 int mb_x, mb_y, wrap;
812 UINT8 *ptr;
813 DCTELEM block[6][64];
814 int i, motion_x, motion_y;
815
816 s->picture_number = picture_number;
817 if (!s->fixed_qscale)
818 s->qscale = rate_estimate_qscale(s);
819
820 /* precompute matrix */
821 if (s->out_format == FMT_MJPEG) {
822 /* for mjpeg, we do include qscale in the matrix */
823 s->intra_matrix[0] = default_intra_matrix[0];
824 for(i=1;i<64;i++)
825 s->intra_matrix[i] = (default_intra_matrix[i] * s->qscale) >> 3;
826 convert_matrix(s->q_intra_matrix, s->intra_matrix, 8);
827 } else {
828 convert_matrix(s->q_intra_matrix, s->intra_matrix, s->qscale);
829 convert_matrix(s->q_non_intra_matrix, s->non_intra_matrix, s->qscale);
830 }
831
832 switch(s->out_format) {
833 case FMT_MJPEG:
834 mjpeg_picture_header(s);
835 break;
836 case FMT_H263:
837 if (s->h263_msmpeg4)
838 msmpeg4_encode_picture_header(s, picture_number);
839 else if (s->h263_pred)
840 mpeg4_encode_picture_header(s, picture_number);
841 else if (s->h263_rv10)
842 rv10_encode_picture_header(s, picture_number);
843 else
844 h263_encode_picture_header(s, picture_number);
845 break;
846 case FMT_MPEG1:
847 mpeg1_encode_picture_header(s, picture_number);
848 break;
849 }
850
851 /* init last dc values */
852 /* note: quant matrix value (8) is implied here */
853 s->last_dc[0] = 128;
854 s->last_dc[1] = 128;
855 s->last_dc[2] = 128;
856 s->mb_incr = 1;
857 s->last_mv[0][0][0] = 0;
858 s->last_mv[0][0][1] = 0;
859 s->mv_type = MV_TYPE_16X16;
860 s->mv_dir = MV_DIR_FORWARD;
861
862 for(mb_y=0; mb_y < s->mb_height; mb_y++) {
863 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
864
865 s->mb_x = mb_x;
866 s->mb_y = mb_y;
867
868 /* compute motion vector and macro block type (intra or non intra) */
869 motion_x = 0;
870 motion_y = 0;
871 if (s->pict_type == P_TYPE) {
872 s->mb_intra = estimate_motion(s, mb_x, mb_y,
873 &motion_x,
874 &motion_y);
875 } else {
876 s->mb_intra = 1;
877 }
878
879 /* get the pixels */
880 wrap = s->linesize;
881 ptr = s->new_picture[0] + (mb_y * 16 * wrap) + mb_x * 16;
882 get_pixels(block[0], ptr, wrap);
883 get_pixels(block[1], ptr + 8, wrap);
884 get_pixels(block[2], ptr + 8 * wrap, wrap);
885 get_pixels(block[3], ptr + 8 * wrap + 8, wrap);
886 wrap = s->linesize >> 1;
887 ptr = s->new_picture[1] + (mb_y * 8 * wrap) + mb_x * 8;
888 get_pixels(block[4], ptr, wrap);
889
890 wrap = s->linesize >> 1;
891 ptr = s->new_picture[2] + (mb_y * 8 * wrap) + mb_x * 8;
892 get_pixels(block[5], ptr, wrap);
893
894 /* subtract previous frame if non intra */
895 if (!s->mb_intra) {
896 int dxy, offset, mx, my;
897
898 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
899 ptr = s->last_picture[0] +
900 ((mb_y * 16 + (motion_y >> 1)) * s->linesize) +
901 (mb_x * 16 + (motion_x >> 1));
902
903 sub_pixels_2(block[0], ptr, s->linesize, dxy);
904 sub_pixels_2(block[1], ptr + 8, s->linesize, dxy);
905 sub_pixels_2(block[2], ptr + s->linesize * 8, s->linesize, dxy);
906 sub_pixels_2(block[3], ptr + 8 + s->linesize * 8, s->linesize ,dxy);
907
908 if (s->out_format == FMT_H263) {
909 /* special rounding for h263 */
910 dxy = 0;
911 if ((motion_x & 3) != 0)
912 dxy |= 1;
913 if ((motion_y & 3) != 0)
914 dxy |= 2;
915 mx = motion_x >> 2;
916 my = motion_y >> 2;
917 } else {
918 mx = motion_x / 2;
919 my = motion_y / 2;
920 dxy = ((my & 1) << 1) | (mx & 1);
921 mx >>= 1;
922 my >>= 1;
923 }
924 offset = ((mb_y * 8 + my) * (s->linesize >> 1)) + (mb_x * 8 + mx);
925 ptr = s->last_picture[1] + offset;
926 sub_pixels_2(block[4], ptr, s->linesize >> 1, dxy);
927 ptr = s->last_picture[2] + offset;
928 sub_pixels_2(block[5], ptr, s->linesize >> 1, dxy);
929 }
930 emms_c();
931
932 /* DCT & quantize */
933 if (s->h263_msmpeg4) {
934 msmpeg4_dc_scale(s);
935 } else if (s->h263_pred) {
936 h263_dc_scale(s);
937 } else {
938 /* default quantization values */
939 s->y_dc_scale = 8;
940 s->c_dc_scale = 8;
941 }
942
943 for(i=0;i<6;i++) {
944 int last_index;
945 if (av_fdct == jpeg_fdct_ifast)
946 last_index = dct_quantize(s, block[i], i, s->qscale);
947 else
948 last_index = dct_quantize_mmx(s, block[i], i, s->qscale);
949 s->block_last_index[i] = last_index;
950 }
951
952 /* huffman encode */
953 switch(s->out_format) {
954 case FMT_MPEG1:
955 mpeg1_encode_mb(s, block, motion_x, motion_y);
956 break;
957 case FMT_H263:
958 if (s->h263_msmpeg4)
959 msmpeg4_encode_mb(s, block, motion_x, motion_y);
960 else
961 h263_encode_mb(s, block, motion_x, motion_y);
962 break;
963 case FMT_MJPEG:
964 mjpeg_encode_mb(s, block);
965 break;
966 }
967
968 /* decompress blocks so that we keep the state of the decoder */
969 s->mv[0][0][0] = motion_x;
970 s->mv[0][0][1] = motion_y;
971
972 MPV_decode_mb(s, block);
973 }
974 }
975 }
976
977 static int dct_quantize(MpegEncContext *s,
978 DCTELEM *block, int n,
979 int qscale)
980 {
981 int i, j, level, last_non_zero, q;
982 const int *qmat;
983
984 av_fdct (block);
985
986 if (s->mb_intra) {
987 if (n < 4)
988 q = s->y_dc_scale;
989 else
990 q = s->c_dc_scale;
991 q = q << 3;
992
993 /* note: block[0] is assumed to be positive */
994 block[0] = (block[0] + (q >> 1)) / q;
995 i = 1;
996 last_non_zero = 0;
997 if (s->out_format == FMT_H263) {
998 qmat = s->q_non_intra_matrix;
999 } else {
1000 qmat = s->q_intra_matrix;
1001 }
1002 } else {
1003 i = 0;
1004 last_non_zero = -1;
1005 qmat = s->q_non_intra_matrix;
1006 }
1007
1008 for(;i<64;i++) {
1009 j = zigzag_direct[i];
1010 level = block[j];
1011 level = level * qmat[j];
1012 #ifdef PARANOID
1013 {
1014 static int count = 0;
1015 int level1, level2, qmat1;
1016 double val;
1017 if (qmat == s->q_non_intra_matrix) {
1018 qmat1 = default_non_intra_matrix[j] * s->qscale;
1019 } else {
1020 qmat1 = default_intra_matrix[j] * s->qscale;
1021 }
1022 if (av_fdct != jpeg_fdct_ifast)
1023 val = ((double)block[j] * 8.0) / (double)qmat1;
1024 else
1025 val = ((double)block[j] * 8.0 * 2048.0) /
1026 ((double)qmat1 * aanscales[j]);
1027 level1 = (int)val;
1028 level2 = level / (1 << (QMAT_SHIFT - 3));
1029 if (level1 != level2) {
1030 fprintf(stderr, "%d: quant error qlevel=%d wanted=%d level=%d qmat1=%d qmat=%d wantedf=%0.6f\n",
1031 count, level2, level1, block[j], qmat1, qmat[j],
1032 val);
1033 count++;
1034 }
1035
1036 }
1037 #endif
1038 /* XXX: slight error for the low range. Test should be equivalent to
1039 (level <= -(1 << (QMAT_SHIFT - 3)) || level >= (1 <<
1040 (QMAT_SHIFT - 3)))
1041 */
1042 if (((level << (31 - (QMAT_SHIFT - 3))) >> (31 - (QMAT_SHIFT - 3))) !=
1043 level) {
1044 level = level / (1 << (QMAT_SHIFT - 3));
1045 /* XXX: currently, this code is not optimal. the range should be:
1046 mpeg1: -255..255
1047 mpeg2: -2048..2047
1048 h263: -128..127
1049 mpeg4: -2048..2047
1050 */
1051 if (level > 127)
1052 level = 127;
1053 else if (level < -128)
1054 level = -128;
1055 block[j] = level;
1056 last_non_zero = i;
1057 } else {
1058 block[j] = 0;
1059 }
1060 }
1061 return last_non_zero;
1062 }
1063
1064 static int dct_quantize_mmx(MpegEncContext *s,
1065 DCTELEM *block, int n,
1066 int qscale)
1067 {
1068 int i, j, level, last_non_zero, q;
1069 const int *qmat;
1070
1071 av_fdct (block);
1072
1073 if (s->mb_intra) {
1074 if (n < 4)
1075 q = s->y_dc_scale;
1076 else
1077 q = s->c_dc_scale;
1078
1079 /* note: block[0] is assumed to be positive */
1080 block[0] = (block[0] + (q >> 1)) / q;
1081 i = 1;
1082 last_non_zero = 0;
1083 if (s->out_format == FMT_H263) {
1084 qmat = s->q_non_intra_matrix;
1085 } else {
1086 qmat = s->q_intra_matrix;
1087 }
1088 } else {
1089 i = 0;
1090 last_non_zero = -1;
1091 qmat = s->q_non_intra_matrix;
1092 }
1093
1094 for(;i<64;i++) {
1095 j = zigzag_direct[i];
1096 level = block[j];
1097 level = level * qmat[j];
1098 /* XXX: slight error for the low range. Test should be equivalent to
1099 (level <= -(1 << (QMAT_SHIFT_MMX - 3)) || level >= (1 <<
1100 (QMAT_SHIFT_MMX - 3)))
1101 */
1102 if (((level << (31 - (QMAT_SHIFT_MMX - 3))) >> (31 - (QMAT_SHIFT_MMX - 3))) !=
1103 level) {
1104 level = level / (1 << (QMAT_SHIFT_MMX - 3));
1105 /* XXX: currently, this code is not optimal. the range should be:
1106 mpeg1: -255..255
1107 mpeg2: -2048..2047
1108 h263: -128..127
1109 mpeg4: -2048..2047
1110 */
1111 if (level > 127)
1112 level = 127;
1113 else if (level < -128)
1114 level = -128;
1115 block[j] = level;
1116 last_non_zero = i;
1117 } else {
1118 block[j] = 0;
1119 }
1120 }
1121 return last_non_zero;
1122 }
1123
1124 #ifndef HAVE_DCT_UNQUANTIZE
1125 static void dct_unquantize(MpegEncContext *s,
1126 DCTELEM *block, int n, int qscale)
1127 {
1128 int i, level;
1129 const UINT16 *quant_matrix;
1130
1131 if (s->mb_intra) {
1132 if (n < 4)
1133 block[0] = block[0] * s->y_dc_scale;
1134 else
1135 block[0] = block[0] * s->c_dc_scale;
1136 if (s->out_format == FMT_H263) {
1137 i = 1;
1138 goto unquant_even;
1139 }
1140 /* XXX: only mpeg1 */
1141 quant_matrix = s->intra_matrix;
1142 for(i=1;i<64;i++) {
1143 level = block[i];
1144 if (level) {
1145 if (level < 0) {
1146 level = -level;
1147 level = (int)(level * qscale * quant_matrix[i]) >> 3;
1148 level = (level - 1) | 1;
1149 level = -level;
1150 } else {
1151 level = (int)(level * qscale * quant_matrix[i]) >> 3;
1152 level = (level - 1) | 1;
1153 }
1154 #ifdef PARANOID
1155 if (level < -2048 || level > 2047)
1156 fprintf(stderr, "unquant error %d %d\n", i, level);
1157 #endif
1158 block[i] = level;
1159 }
1160 }
1161 } else {
1162 i = 0;
1163 unquant_even:
1164 quant_matrix = s->non_intra_matrix;
1165 for(;i<64;i++) {
1166 level = block[i];
1167 if (level) {
1168 if (level < 0) {
1169 level = -level;
1170 level = (((level << 1) + 1) * qscale *
1171 ((int) (quant_matrix[i]))) >> 4;
1172 level = (level - 1) | 1;
1173 level = -level;
1174 } else {
1175 level = (((level << 1) + 1) * qscale *
1176 ((int) (quant_matrix[i]))) >> 4;
1177 level = (level - 1) | 1;
1178 }
1179 #ifdef PARANOID
1180 if (level < -2048 || level > 2047)
1181 fprintf(stderr, "unquant error %d %d\n", i, level);
1182 #endif
1183 block[i] = level;
1184 }
1185 }
1186 }
1187 }
1188 #endif
1189
1190 /* rate control */
1191
1192 /* an I frame is I_FRAME_SIZE_RATIO bigger than a P frame */
1193 #define I_FRAME_SIZE_RATIO 3.0
1194 #define QSCALE_K 20
1195
1196 static void rate_control_init(MpegEncContext *s)
1197 {
1198 s->wanted_bits = 0;
1199
1200 if (s->intra_only) {
1201 s->I_frame_bits = ((INT64)s->bit_rate * FRAME_RATE_BASE) / s->frame_rate;
1202 s->P_frame_bits = s->I_frame_bits;
1203 } else {
1204 s->P_frame_bits = (int) ((float)(s->gop_size * s->bit_rate) /
1205 (float)((float)s->frame_rate / FRAME_RATE_BASE * (I_FRAME_SIZE_RATIO + s->gop_size - 1)));
1206 s->I_frame_bits = (int)(s->P_frame_bits * I_FRAME_SIZE_RATIO);
1207 }
1208
1209 #if defined(DEBUG)
1210 printf("I_frame_size=%d P_frame_size=%d\n",
1211 s->I_frame_bits, s->P_frame_bits);
1212 #endif
1213 }
1214
1215
1216 /*
1217 * This heuristic is rather poor, but at least we do not have to
1218 * change the qscale at every macroblock.
1219 */
1220 static int rate_estimate_qscale(MpegEncContext *s)
1221 {
1222 long long total_bits = s->total_bits;
1223 float q;
1224 int qscale, diff, qmin;
1225
1226 if (s->pict_type == I_TYPE) {
1227 s->wanted_bits += s->I_frame_bits;
1228 } else {
1229 s->wanted_bits += s->P_frame_bits;
1230 }
1231 diff = s->wanted_bits - total_bits;
1232 q = 31.0 - (float)diff / (QSCALE_K * s->mb_height * s->mb_width);
1233 /* adjust for I frame */
1234 if (s->pict_type == I_TYPE && !s->intra_only) {
1235 q /= I_FRAME_SIZE_RATIO;
1236 }
1237
1238 /* using a too small Q scale leeds to problems in mpeg1 and h263
1239 because AC coefficients are clamped to 255 or 127 */
1240 qmin = 3;
1241 if (q < qmin)
1242 q = qmin;
1243 else if (q > 31)
1244 q = 31;
1245 qscale = (int)(q + 0.5);
1246 #if defined(DEBUG)
1247 printf("%d: total=%Ld br=%0.1f diff=%d qest=%0.1f\n",
1248 s->picture_number,
1249 total_bits,
1250 (float)s->frame_rate / FRAME_RATE_BASE *
1251 total_bits / s->picture_number,
1252 diff, q);
1253 #endif
1254 return qscale;
1255 }
1256
1257 AVCodec mpeg1video_encoder = {
1258 "mpeg1video",
1259 CODEC_TYPE_VIDEO,
1260 CODEC_ID_MPEG1VIDEO,
1261 sizeof(MpegEncContext),
1262 MPV_encode_init,
1263 MPV_encode_picture,
1264 MPV_encode_end,
1265 };
1266
1267 AVCodec h263_encoder = {
1268 "h263",
1269 CODEC_TYPE_VIDEO,
1270 CODEC_ID_H263,
1271 sizeof(MpegEncContext),
1272 MPV_encode_init,
1273 MPV_encode_picture,
1274 MPV_encode_end,
1275 };
1276
1277 AVCodec h263p_encoder = {
1278 "h263p",
1279 CODEC_TYPE_VIDEO,
1280 CODEC_ID_H263P,
1281 sizeof(MpegEncContext),
1282 MPV_encode_init,
1283 MPV_encode_picture,
1284 MPV_encode_end,
1285 };
1286
1287 AVCodec rv10_encoder = {
1288 "rv10",
1289 CODEC_TYPE_VIDEO,
1290 CODEC_ID_RV10,
1291 sizeof(MpegEncContext),
1292 MPV_encode_init,
1293 MPV_encode_picture,
1294 MPV_encode_end,
1295 };
1296
1297 AVCodec mjpeg_encoder = {
1298 "mjpeg",
1299 CODEC_TYPE_VIDEO,
1300 CODEC_ID_MJPEG,
1301 sizeof(MpegEncContext),
1302 MPV_encode_init,
1303 MPV_encode_picture,
1304 MPV_encode_end,
1305 };
1306
1307 AVCodec opendivx_encoder = {
1308 "opendivx",
1309 CODEC_TYPE_VIDEO,
1310 CODEC_ID_OPENDIVX,
1311 sizeof(MpegEncContext),
1312 MPV_encode_init,
1313 MPV_encode_picture,
1314 MPV_encode_end,
1315 };
1316
1317 AVCodec msmpeg4_encoder = {
1318 "msmpeg4",
1319 CODEC_TYPE_VIDEO,
1320 CODEC_ID_MSMPEG4,
1321 sizeof(MpegEncContext),
1322 MPV_encode_init,
1323 MPV_encode_picture,
1324 MPV_encode_end,
1325 };