libavcodec: Apply parameter change side data when decoding audio
[libav.git] / libavcodec / utils.c
1 /*
2 * utils for libavcodec
3 * Copyright (c) 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * This file is part of Libav.
7 *
8 * Libav is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * Libav is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with Libav; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 /**
24 * @file
25 * utils.
26 */
27
28 #include "libavutil/avstring.h"
29 #include "libavutil/crc.h"
30 #include "libavutil/mathematics.h"
31 #include "libavutil/pixdesc.h"
32 #include "libavutil/audioconvert.h"
33 #include "libavutil/imgutils.h"
34 #include "libavutil/samplefmt.h"
35 #include "libavutil/dict.h"
36 #include "avcodec.h"
37 #include "dsputil.h"
38 #include "libavutil/opt.h"
39 #include "imgconvert.h"
40 #include "thread.h"
41 #include "audioconvert.h"
42 #include "internal.h"
43 #include "bytestream.h"
44 #include <stdlib.h>
45 #include <stdarg.h>
46 #include <limits.h>
47 #include <float.h>
48
49 static int volatile entangled_thread_counter=0;
50 static int (*ff_lockmgr_cb)(void **mutex, enum AVLockOp op);
51 static void *codec_mutex;
52 static void *avformat_mutex;
53
54 void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
55 {
56 if(min_size < *size)
57 return ptr;
58
59 min_size= FFMAX(17*min_size/16 + 32, min_size);
60
61 ptr= av_realloc(ptr, min_size);
62 if(!ptr) //we could set this to the unmodified min_size but this is safer if the user lost the ptr and uses NULL now
63 min_size= 0;
64
65 *size= min_size;
66
67 return ptr;
68 }
69
70 void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
71 {
72 void **p = ptr;
73 if (min_size < *size)
74 return;
75 min_size= FFMAX(17*min_size/16 + 32, min_size);
76 av_free(*p);
77 *p = av_malloc(min_size);
78 if (!*p) min_size = 0;
79 *size= min_size;
80 }
81
82 /* encoder management */
83 static AVCodec *first_avcodec = NULL;
84
85 AVCodec *av_codec_next(AVCodec *c){
86 if(c) return c->next;
87 else return first_avcodec;
88 }
89
90 #if !FF_API_AVCODEC_INIT
91 static
92 #endif
93 void avcodec_init(void)
94 {
95 static int initialized = 0;
96
97 if (initialized != 0)
98 return;
99 initialized = 1;
100
101 dsputil_static_init();
102 }
103
104 void avcodec_register(AVCodec *codec)
105 {
106 AVCodec **p;
107 avcodec_init();
108 p = &first_avcodec;
109 while (*p != NULL) p = &(*p)->next;
110 *p = codec;
111 codec->next = NULL;
112
113 if (codec->init_static_data)
114 codec->init_static_data(codec);
115 }
116
117 unsigned avcodec_get_edge_width(void)
118 {
119 return EDGE_WIDTH;
120 }
121
122 void avcodec_set_dimensions(AVCodecContext *s, int width, int height){
123 s->coded_width = width;
124 s->coded_height= height;
125 s->width = -((-width )>>s->lowres);
126 s->height= -((-height)>>s->lowres);
127 }
128
129 #define INTERNAL_BUFFER_SIZE (32+1)
130
131 void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
132 int linesize_align[AV_NUM_DATA_POINTERS])
133 {
134 int i;
135 int w_align= 1;
136 int h_align= 1;
137
138 switch(s->pix_fmt){
139 case PIX_FMT_YUV420P:
140 case PIX_FMT_YUYV422:
141 case PIX_FMT_UYVY422:
142 case PIX_FMT_YUV422P:
143 case PIX_FMT_YUV440P:
144 case PIX_FMT_YUV444P:
145 case PIX_FMT_GBRP:
146 case PIX_FMT_GRAY8:
147 case PIX_FMT_GRAY16BE:
148 case PIX_FMT_GRAY16LE:
149 case PIX_FMT_YUVJ420P:
150 case PIX_FMT_YUVJ422P:
151 case PIX_FMT_YUVJ440P:
152 case PIX_FMT_YUVJ444P:
153 case PIX_FMT_YUVA420P:
154 case PIX_FMT_YUV420P9LE:
155 case PIX_FMT_YUV420P9BE:
156 case PIX_FMT_YUV420P10LE:
157 case PIX_FMT_YUV420P10BE:
158 case PIX_FMT_YUV422P9LE:
159 case PIX_FMT_YUV422P9BE:
160 case PIX_FMT_YUV422P10LE:
161 case PIX_FMT_YUV422P10BE:
162 case PIX_FMT_YUV444P9LE:
163 case PIX_FMT_YUV444P9BE:
164 case PIX_FMT_YUV444P10LE:
165 case PIX_FMT_YUV444P10BE:
166 case PIX_FMT_GBRP9LE:
167 case PIX_FMT_GBRP9BE:
168 case PIX_FMT_GBRP10LE:
169 case PIX_FMT_GBRP10BE:
170 w_align = 16; //FIXME assume 16 pixel per macroblock
171 h_align = 16 * 2; // interlaced needs 2 macroblocks height
172 break;
173 case PIX_FMT_YUV411P:
174 case PIX_FMT_UYYVYY411:
175 w_align=32;
176 h_align=8;
177 break;
178 case PIX_FMT_YUV410P:
179 if(s->codec_id == CODEC_ID_SVQ1){
180 w_align=64;
181 h_align=64;
182 }
183 case PIX_FMT_RGB555:
184 if(s->codec_id == CODEC_ID_RPZA){
185 w_align=4;
186 h_align=4;
187 }
188 case PIX_FMT_PAL8:
189 case PIX_FMT_BGR8:
190 case PIX_FMT_RGB8:
191 if(s->codec_id == CODEC_ID_SMC){
192 w_align=4;
193 h_align=4;
194 }
195 break;
196 case PIX_FMT_BGR24:
197 if((s->codec_id == CODEC_ID_MSZH) || (s->codec_id == CODEC_ID_ZLIB)){
198 w_align=4;
199 h_align=4;
200 }
201 break;
202 default:
203 w_align= 1;
204 h_align= 1;
205 break;
206 }
207
208 *width = FFALIGN(*width , w_align);
209 *height= FFALIGN(*height, h_align);
210 if(s->codec_id == CODEC_ID_H264 || s->lowres)
211 *height+=2; // some of the optimized chroma MC reads one line too much
212 // which is also done in mpeg decoders with lowres > 0
213
214 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
215 linesize_align[i] = STRIDE_ALIGN;
216 //STRIDE_ALIGN is 8 for SSE* but this does not work for SVQ1 chroma planes
217 //we could change STRIDE_ALIGN to 16 for x86/sse but it would increase the
218 //picture size unneccessarily in some cases. The solution here is not
219 //pretty and better ideas are welcome!
220 #if HAVE_MMX
221 if(s->codec_id == CODEC_ID_SVQ1 || s->codec_id == CODEC_ID_VP5 ||
222 s->codec_id == CODEC_ID_VP6 || s->codec_id == CODEC_ID_VP6F ||
223 s->codec_id == CODEC_ID_VP6A) {
224 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
225 linesize_align[i] = 16;
226 }
227 #endif
228 }
229
230 void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height){
231 int chroma_shift = av_pix_fmt_descriptors[s->pix_fmt].log2_chroma_w;
232 int linesize_align[AV_NUM_DATA_POINTERS];
233 int align;
234 avcodec_align_dimensions2(s, width, height, linesize_align);
235 align = FFMAX(linesize_align[0], linesize_align[3]);
236 linesize_align[1] <<= chroma_shift;
237 linesize_align[2] <<= chroma_shift;
238 align = FFMAX3(align, linesize_align[1], linesize_align[2]);
239 *width=FFALIGN(*width, align);
240 }
241
242 static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
243 {
244 AVCodecInternal *avci = avctx->internal;
245 InternalBuffer *buf;
246 int buf_size, ret, i, needs_extended_data;
247
248 buf_size = av_samples_get_buffer_size(NULL, avctx->channels,
249 frame->nb_samples, avctx->sample_fmt,
250 32);
251 if (buf_size < 0)
252 return AVERROR(EINVAL);
253
254 needs_extended_data = av_sample_fmt_is_planar(avctx->sample_fmt) &&
255 avctx->channels > AV_NUM_DATA_POINTERS;
256
257 /* allocate InternalBuffer if needed */
258 if (!avci->buffer) {
259 avci->buffer = av_mallocz(sizeof(InternalBuffer));
260 if (!avci->buffer)
261 return AVERROR(ENOMEM);
262 }
263 buf = avci->buffer;
264
265 /* if there is a previously-used internal buffer, check its size and
266 channel count to see if we can reuse it */
267 if (buf->extended_data) {
268 /* if current buffer is too small, free it */
269 if (buf->extended_data[0] && buf_size > buf->audio_data_size) {
270 av_free(buf->extended_data[0]);
271 if (buf->extended_data != buf->data)
272 av_free(&buf->extended_data);
273 buf->extended_data = NULL;
274 buf->data[0] = NULL;
275 }
276 /* if number of channels has changed, reset and/or free extended data
277 pointers but leave data buffer in buf->data[0] for reuse */
278 if (buf->nb_channels != avctx->channels) {
279 if (buf->extended_data != buf->data)
280 av_free(buf->extended_data);
281 buf->extended_data = NULL;
282 }
283 }
284
285 /* if there is no previous buffer or the previous buffer cannot be used
286 as-is, allocate a new buffer and/or rearrange the channel pointers */
287 if (!buf->extended_data) {
288 /* if the channel pointers will fit, just set extended_data to data,
289 otherwise allocate the extended_data channel pointers */
290 if (needs_extended_data) {
291 buf->extended_data = av_mallocz(avctx->channels *
292 sizeof(*buf->extended_data));
293 if (!buf->extended_data)
294 return AVERROR(ENOMEM);
295 } else {
296 buf->extended_data = buf->data;
297 }
298
299 /* if there is a previous buffer and it is large enough, reuse it and
300 just fill-in new channel pointers and linesize, otherwise allocate
301 a new buffer */
302 if (buf->extended_data[0]) {
303 ret = av_samples_fill_arrays(buf->extended_data, &buf->linesize[0],
304 buf->extended_data[0], avctx->channels,
305 frame->nb_samples, avctx->sample_fmt,
306 32);
307 } else {
308 ret = av_samples_alloc(buf->extended_data, &buf->linesize[0],
309 avctx->channels, frame->nb_samples,
310 avctx->sample_fmt, 32);
311 }
312 if (ret)
313 return ret;
314
315 /* if data was not used for extended_data, we need to copy as many of
316 the extended_data channel pointers as will fit */
317 if (needs_extended_data) {
318 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
319 buf->data[i] = buf->extended_data[i];
320 }
321 buf->audio_data_size = buf_size;
322 buf->nb_channels = avctx->channels;
323 }
324
325 /* copy InternalBuffer info to the AVFrame */
326 frame->type = FF_BUFFER_TYPE_INTERNAL;
327 frame->extended_data = buf->extended_data;
328 frame->linesize[0] = buf->linesize[0];
329 memcpy(frame->data, buf->data, sizeof(frame->data));
330
331 if (avctx->pkt) frame->pkt_pts = avctx->pkt->pts;
332 else frame->pkt_pts = AV_NOPTS_VALUE;
333 frame->reordered_opaque = avctx->reordered_opaque;
334
335 if (avctx->debug & FF_DEBUG_BUFFERS)
336 av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p, "
337 "internal audio buffer used\n", frame);
338
339 return 0;
340 }
341
342 static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
343 {
344 int i;
345 int w= s->width;
346 int h= s->height;
347 InternalBuffer *buf;
348 AVCodecInternal *avci = s->internal;
349
350 if(pic->data[0]!=NULL) {
351 av_log(s, AV_LOG_ERROR, "pic->data[0]!=NULL in avcodec_default_get_buffer\n");
352 return -1;
353 }
354 if(avci->buffer_count >= INTERNAL_BUFFER_SIZE) {
355 av_log(s, AV_LOG_ERROR, "buffer_count overflow (missing release_buffer?)\n");
356 return -1;
357 }
358
359 if(av_image_check_size(w, h, 0, s))
360 return -1;
361
362 if (!avci->buffer) {
363 avci->buffer = av_mallocz((INTERNAL_BUFFER_SIZE+1) *
364 sizeof(InternalBuffer));
365 }
366
367 buf = &avci->buffer[avci->buffer_count];
368
369 if(buf->base[0] && (buf->width != w || buf->height != h || buf->pix_fmt != s->pix_fmt)){
370 if(s->active_thread_type&FF_THREAD_FRAME) {
371 av_log_missing_feature(s, "Width/height changing with frame threads is", 0);
372 return -1;
373 }
374
375 for (i = 0; i < AV_NUM_DATA_POINTERS; i++) {
376 av_freep(&buf->base[i]);
377 buf->data[i]= NULL;
378 }
379 }
380
381 if (!buf->base[0]) {
382 int h_chroma_shift, v_chroma_shift;
383 int size[4] = {0};
384 int tmpsize;
385 int unaligned;
386 AVPicture picture;
387 int stride_align[AV_NUM_DATA_POINTERS];
388 const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1;
389
390 avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
391
392 avcodec_align_dimensions2(s, &w, &h, stride_align);
393
394 if(!(s->flags&CODEC_FLAG_EMU_EDGE)){
395 w+= EDGE_WIDTH*2;
396 h+= EDGE_WIDTH*2;
397 }
398
399 do {
400 // NOTE: do not align linesizes individually, this breaks e.g. assumptions
401 // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
402 av_image_fill_linesizes(picture.linesize, s->pix_fmt, w);
403 // increase alignment of w for next try (rhs gives the lowest bit set in w)
404 w += w & ~(w-1);
405
406 unaligned = 0;
407 for (i=0; i<4; i++){
408 unaligned |= picture.linesize[i] % stride_align[i];
409 }
410 } while (unaligned);
411
412 tmpsize = av_image_fill_pointers(picture.data, s->pix_fmt, h, NULL, picture.linesize);
413 if (tmpsize < 0)
414 return -1;
415
416 for (i=0; i<3 && picture.data[i+1]; i++)
417 size[i] = picture.data[i+1] - picture.data[i];
418 size[i] = tmpsize - (picture.data[i] - picture.data[0]);
419
420 memset(buf->base, 0, sizeof(buf->base));
421 memset(buf->data, 0, sizeof(buf->data));
422
423 for(i=0; i<4 && size[i]; i++){
424 const int h_shift= i==0 ? 0 : h_chroma_shift;
425 const int v_shift= i==0 ? 0 : v_chroma_shift;
426
427 buf->linesize[i]= picture.linesize[i];
428
429 buf->base[i]= av_malloc(size[i]+16); //FIXME 16
430 if(buf->base[i]==NULL) return -1;
431 memset(buf->base[i], 128, size[i]);
432
433 // no edge if EDGE EMU or not planar YUV
434 if((s->flags&CODEC_FLAG_EMU_EDGE) || !size[2])
435 buf->data[i] = buf->base[i];
436 else
437 buf->data[i] = buf->base[i] + FFALIGN((buf->linesize[i]*EDGE_WIDTH>>v_shift) + (pixel_size*EDGE_WIDTH>>h_shift), stride_align[i]);
438 }
439 for (; i < AV_NUM_DATA_POINTERS; i++) {
440 buf->base[i] = buf->data[i] = NULL;
441 buf->linesize[i] = 0;
442 }
443 if(size[1] && !size[2])
444 ff_set_systematic_pal2((uint32_t*)buf->data[1], s->pix_fmt);
445 buf->width = s->width;
446 buf->height = s->height;
447 buf->pix_fmt= s->pix_fmt;
448 }
449 pic->type= FF_BUFFER_TYPE_INTERNAL;
450
451 for (i = 0; i < AV_NUM_DATA_POINTERS; i++) {
452 pic->base[i]= buf->base[i];
453 pic->data[i]= buf->data[i];
454 pic->linesize[i]= buf->linesize[i];
455 }
456 pic->extended_data = pic->data;
457 avci->buffer_count++;
458
459 if(s->pkt) pic->pkt_pts= s->pkt->pts;
460 else pic->pkt_pts= AV_NOPTS_VALUE;
461 pic->reordered_opaque= s->reordered_opaque;
462
463 if(s->debug&FF_DEBUG_BUFFERS)
464 av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p, %d "
465 "buffers used\n", pic, avci->buffer_count);
466
467 return 0;
468 }
469
470 int avcodec_default_get_buffer(AVCodecContext *avctx, AVFrame *frame)
471 {
472 switch (avctx->codec_type) {
473 case AVMEDIA_TYPE_VIDEO:
474 return video_get_buffer(avctx, frame);
475 case AVMEDIA_TYPE_AUDIO:
476 return audio_get_buffer(avctx, frame);
477 default:
478 return -1;
479 }
480 }
481
482 void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic){
483 int i;
484 InternalBuffer *buf, *last;
485 AVCodecInternal *avci = s->internal;
486
487 assert(s->codec_type == AVMEDIA_TYPE_VIDEO);
488
489 assert(pic->type==FF_BUFFER_TYPE_INTERNAL);
490 assert(avci->buffer_count);
491
492 if (avci->buffer) {
493 buf = NULL; /* avoids warning */
494 for (i = 0; i < avci->buffer_count; i++) { //just 3-5 checks so is not worth to optimize
495 buf = &avci->buffer[i];
496 if (buf->data[0] == pic->data[0])
497 break;
498 }
499 assert(i < avci->buffer_count);
500 avci->buffer_count--;
501 last = &avci->buffer[avci->buffer_count];
502
503 if (buf != last)
504 FFSWAP(InternalBuffer, *buf, *last);
505 }
506
507 for (i = 0; i < AV_NUM_DATA_POINTERS; i++) {
508 pic->data[i]=NULL;
509 // pic->base[i]=NULL;
510 }
511 //printf("R%X\n", pic->opaque);
512
513 if(s->debug&FF_DEBUG_BUFFERS)
514 av_log(s, AV_LOG_DEBUG, "default_release_buffer called on pic %p, %d "
515 "buffers used\n", pic, avci->buffer_count);
516 }
517
518 int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic){
519 AVFrame temp_pic;
520 int i;
521
522 assert(s->codec_type == AVMEDIA_TYPE_VIDEO);
523
524 /* If no picture return a new buffer */
525 if(pic->data[0] == NULL) {
526 /* We will copy from buffer, so must be readable */
527 pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
528 return s->get_buffer(s, pic);
529 }
530
531 /* If internal buffer type return the same buffer */
532 if(pic->type == FF_BUFFER_TYPE_INTERNAL) {
533 if(s->pkt) pic->pkt_pts= s->pkt->pts;
534 else pic->pkt_pts= AV_NOPTS_VALUE;
535 pic->reordered_opaque= s->reordered_opaque;
536 return 0;
537 }
538
539 /*
540 * Not internal type and reget_buffer not overridden, emulate cr buffer
541 */
542 temp_pic = *pic;
543 for(i = 0; i < AV_NUM_DATA_POINTERS; i++)
544 pic->data[i] = pic->base[i] = NULL;
545 pic->opaque = NULL;
546 /* Allocate new frame */
547 if (s->get_buffer(s, pic))
548 return -1;
549 /* Copy image data from old buffer to new buffer */
550 av_picture_copy((AVPicture*)pic, (AVPicture*)&temp_pic, s->pix_fmt, s->width,
551 s->height);
552 s->release_buffer(s, &temp_pic); // Release old frame
553 return 0;
554 }
555
556 int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size){
557 int i;
558
559 for(i=0; i<count; i++){
560 int r= func(c, (char*)arg + i*size);
561 if(ret) ret[i]= r;
562 }
563 return 0;
564 }
565
566 int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int jobnr, int threadnr),void *arg, int *ret, int count){
567 int i;
568
569 for(i=0; i<count; i++){
570 int r= func(c, arg, i, 0);
571 if(ret) ret[i]= r;
572 }
573 return 0;
574 }
575
576 enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat *fmt){
577 while (*fmt != PIX_FMT_NONE && ff_is_hwaccel_pix_fmt(*fmt))
578 ++fmt;
579 return fmt[0];
580 }
581
582 void avcodec_get_frame_defaults(AVFrame *pic){
583 memset(pic, 0, sizeof(AVFrame));
584
585 pic->pts= AV_NOPTS_VALUE;
586 pic->key_frame= 1;
587 }
588
589 AVFrame *avcodec_alloc_frame(void){
590 AVFrame *pic= av_malloc(sizeof(AVFrame));
591
592 if(pic==NULL) return NULL;
593
594 avcodec_get_frame_defaults(pic);
595
596 return pic;
597 }
598
599 #if FF_API_AVCODEC_OPEN
600 int attribute_align_arg avcodec_open(AVCodecContext *avctx, AVCodec *codec)
601 {
602 return avcodec_open2(avctx, codec, NULL);
603 }
604 #endif
605
606 int attribute_align_arg avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options)
607 {
608 int ret = 0;
609 AVDictionary *tmp = NULL;
610
611 if (options)
612 av_dict_copy(&tmp, *options, 0);
613
614 /* If there is a user-supplied mutex locking routine, call it. */
615 if (ff_lockmgr_cb) {
616 if ((*ff_lockmgr_cb)(&codec_mutex, AV_LOCK_OBTAIN))
617 return -1;
618 }
619
620 entangled_thread_counter++;
621 if(entangled_thread_counter != 1){
622 av_log(avctx, AV_LOG_ERROR, "insufficient thread locking around avcodec_open/close()\n");
623 ret = -1;
624 goto end;
625 }
626
627 if(avctx->codec || !codec) {
628 ret = AVERROR(EINVAL);
629 goto end;
630 }
631
632 avctx->internal = av_mallocz(sizeof(AVCodecInternal));
633 if (!avctx->internal) {
634 ret = AVERROR(ENOMEM);
635 goto end;
636 }
637
638 if (codec->priv_data_size > 0) {
639 if(!avctx->priv_data){
640 avctx->priv_data = av_mallocz(codec->priv_data_size);
641 if (!avctx->priv_data) {
642 ret = AVERROR(ENOMEM);
643 goto end;
644 }
645 if (codec->priv_class) {
646 *(AVClass**)avctx->priv_data= codec->priv_class;
647 av_opt_set_defaults(avctx->priv_data);
648 }
649 }
650 if (codec->priv_class && (ret = av_opt_set_dict(avctx->priv_data, &tmp)) < 0)
651 goto free_and_end;
652 } else {
653 avctx->priv_data = NULL;
654 }
655 if ((ret = av_opt_set_dict(avctx, &tmp)) < 0)
656 goto free_and_end;
657
658 if(avctx->coded_width && avctx->coded_height)
659 avcodec_set_dimensions(avctx, avctx->coded_width, avctx->coded_height);
660 else if(avctx->width && avctx->height)
661 avcodec_set_dimensions(avctx, avctx->width, avctx->height);
662
663 if ((avctx->coded_width || avctx->coded_height || avctx->width || avctx->height)
664 && ( av_image_check_size(avctx->coded_width, avctx->coded_height, 0, avctx) < 0
665 || av_image_check_size(avctx->width, avctx->height, 0, avctx) < 0)) {
666 av_log(avctx, AV_LOG_WARNING, "ignoring invalid width/height values\n");
667 avcodec_set_dimensions(avctx, 0, 0);
668 }
669
670 /* if the decoder init function was already called previously,
671 free the already allocated subtitle_header before overwriting it */
672 if (codec->decode)
673 av_freep(&avctx->subtitle_header);
674
675 #define SANE_NB_CHANNELS 128U
676 if (avctx->channels > SANE_NB_CHANNELS) {
677 ret = AVERROR(EINVAL);
678 goto free_and_end;
679 }
680
681 avctx->codec = codec;
682 if ((avctx->codec_type == AVMEDIA_TYPE_UNKNOWN || avctx->codec_type == codec->type) &&
683 avctx->codec_id == CODEC_ID_NONE) {
684 avctx->codec_type = codec->type;
685 avctx->codec_id = codec->id;
686 }
687 if (avctx->codec_id != codec->id || (avctx->codec_type != codec->type
688 && avctx->codec_type != AVMEDIA_TYPE_ATTACHMENT)) {
689 av_log(avctx, AV_LOG_ERROR, "codec type or id mismatches\n");
690 ret = AVERROR(EINVAL);
691 goto free_and_end;
692 }
693 avctx->frame_number = 0;
694 #if FF_API_ER
695
696 av_log(avctx, AV_LOG_DEBUG, "err{or,}_recognition separate: %d; %d\n",
697 avctx->error_recognition, avctx->err_recognition);
698 /* FF_ER_CAREFUL (==1) implies AV_EF_CRCCHECK (== 1<<1 - 1),
699 FF_ER_COMPLIANT (==2) implies AV_EF_{CRCCHECK,BITSTREAM} (== 1<<2 - 1), et cetera} */
700 avctx->err_recognition |= (1<<(avctx->error_recognition-(avctx->error_recognition>=FF_ER_VERY_AGGRESSIVE))) - 1;
701 av_log(avctx, AV_LOG_DEBUG, "err{or,}_recognition combined: %d; %d\n",
702 avctx->error_recognition, avctx->err_recognition);
703 #endif
704
705 if (HAVE_THREADS && !avctx->thread_opaque) {
706 ret = ff_thread_init(avctx);
707 if (ret < 0) {
708 goto free_and_end;
709 }
710 }
711
712 if (avctx->codec->max_lowres < avctx->lowres) {
713 av_log(avctx, AV_LOG_ERROR, "The maximum value for lowres supported by the decoder is %d\n",
714 avctx->codec->max_lowres);
715 ret = AVERROR(EINVAL);
716 goto free_and_end;
717 }
718 if (avctx->codec->encode) {
719 int i;
720 if (avctx->codec->sample_fmts) {
721 for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++)
722 if (avctx->sample_fmt == avctx->codec->sample_fmts[i])
723 break;
724 if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) {
725 av_log(avctx, AV_LOG_ERROR, "Specified sample_fmt is not supported.\n");
726 ret = AVERROR(EINVAL);
727 goto free_and_end;
728 }
729 }
730 if (avctx->codec->supported_samplerates) {
731 for (i = 0; avctx->codec->supported_samplerates[i] != 0; i++)
732 if (avctx->sample_rate == avctx->codec->supported_samplerates[i])
733 break;
734 if (avctx->codec->supported_samplerates[i] == 0) {
735 av_log(avctx, AV_LOG_ERROR, "Specified sample_rate is not supported\n");
736 ret = AVERROR(EINVAL);
737 goto free_and_end;
738 }
739 }
740 if (avctx->codec->channel_layouts) {
741 if (!avctx->channel_layout) {
742 av_log(avctx, AV_LOG_WARNING, "channel_layout not specified\n");
743 } else {
744 for (i = 0; avctx->codec->channel_layouts[i] != 0; i++)
745 if (avctx->channel_layout == avctx->codec->channel_layouts[i])
746 break;
747 if (avctx->codec->channel_layouts[i] == 0) {
748 av_log(avctx, AV_LOG_ERROR, "Specified channel_layout is not supported\n");
749 ret = AVERROR(EINVAL);
750 goto free_and_end;
751 }
752 }
753 }
754 if (avctx->channel_layout && avctx->channels) {
755 if (av_get_channel_layout_nb_channels(avctx->channel_layout) != avctx->channels) {
756 av_log(avctx, AV_LOG_ERROR, "channel layout does not match number of channels\n");
757 ret = AVERROR(EINVAL);
758 goto free_and_end;
759 }
760 } else if (avctx->channel_layout) {
761 avctx->channels = av_get_channel_layout_nb_channels(avctx->channel_layout);
762 }
763 }
764
765 if(avctx->codec->init && !(avctx->active_thread_type&FF_THREAD_FRAME)){
766 ret = avctx->codec->init(avctx);
767 if (ret < 0) {
768 goto free_and_end;
769 }
770 }
771 end:
772 entangled_thread_counter--;
773
774 /* Release any user-supplied mutex. */
775 if (ff_lockmgr_cb) {
776 (*ff_lockmgr_cb)(&codec_mutex, AV_LOCK_RELEASE);
777 }
778 if (options) {
779 av_dict_free(options);
780 *options = tmp;
781 }
782
783 return ret;
784 free_and_end:
785 av_dict_free(&tmp);
786 av_freep(&avctx->priv_data);
787 av_freep(&avctx->internal);
788 avctx->codec= NULL;
789 goto end;
790 }
791
792 int attribute_align_arg avcodec_encode_audio(AVCodecContext *avctx, uint8_t *buf, int buf_size,
793 const short *samples)
794 {
795 if(buf_size < FF_MIN_BUFFER_SIZE && 0){
796 av_log(avctx, AV_LOG_ERROR, "buffer smaller than minimum size\n");
797 return -1;
798 }
799 if((avctx->codec->capabilities & CODEC_CAP_DELAY) || samples){
800 int ret = avctx->codec->encode(avctx, buf, buf_size, samples);
801 avctx->frame_number++;
802 return ret;
803 }else
804 return 0;
805 }
806
807 int attribute_align_arg avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size,
808 const AVFrame *pict)
809 {
810 if(buf_size < FF_MIN_BUFFER_SIZE){
811 av_log(avctx, AV_LOG_ERROR, "buffer smaller than minimum size\n");
812 return -1;
813 }
814 if(av_image_check_size(avctx->width, avctx->height, 0, avctx))
815 return -1;
816 if((avctx->codec->capabilities & CODEC_CAP_DELAY) || pict){
817 int ret = avctx->codec->encode(avctx, buf, buf_size, pict);
818 avctx->frame_number++;
819 emms_c(); //needed to avoid an emms_c() call before every return;
820
821 return ret;
822 }else
823 return 0;
824 }
825
826 int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
827 const AVSubtitle *sub)
828 {
829 int ret;
830 if(sub->start_display_time) {
831 av_log(avctx, AV_LOG_ERROR, "start_display_time must be 0.\n");
832 return -1;
833 }
834 if(sub->num_rects == 0 || !sub->rects)
835 return -1;
836 ret = avctx->codec->encode(avctx, buf, buf_size, sub);
837 avctx->frame_number++;
838 return ret;
839 }
840
841 int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
842 int *got_picture_ptr,
843 AVPacket *avpkt)
844 {
845 int ret;
846
847 *got_picture_ptr= 0;
848 if((avctx->coded_width||avctx->coded_height) && av_image_check_size(avctx->coded_width, avctx->coded_height, 0, avctx))
849 return -1;
850
851 avctx->pkt = avpkt;
852
853 if((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size || (avctx->active_thread_type&FF_THREAD_FRAME)){
854 if (HAVE_THREADS && avctx->active_thread_type&FF_THREAD_FRAME)
855 ret = ff_thread_decode_frame(avctx, picture, got_picture_ptr,
856 avpkt);
857 else {
858 ret = avctx->codec->decode(avctx, picture, got_picture_ptr,
859 avpkt);
860 picture->pkt_dts= avpkt->dts;
861 }
862
863 emms_c(); //needed to avoid an emms_c() call before every return;
864
865 if (*got_picture_ptr)
866 avctx->frame_number++;
867 }else
868 ret= 0;
869
870 return ret;
871 }
872
873 #if FF_API_OLD_DECODE_AUDIO
874 int attribute_align_arg avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples,
875 int *frame_size_ptr,
876 AVPacket *avpkt)
877 {
878 AVFrame frame;
879 int ret, got_frame = 0;
880
881 if (avctx->get_buffer != avcodec_default_get_buffer) {
882 av_log(avctx, AV_LOG_ERROR, "A custom get_buffer() cannot be used with "
883 "avcodec_decode_audio3()\n");
884 return AVERROR(EINVAL);
885 }
886
887 ret = avcodec_decode_audio4(avctx, &frame, &got_frame, avpkt);
888
889 if (ret >= 0 && got_frame) {
890 int ch, plane_size;
891 int planar = av_sample_fmt_is_planar(avctx->sample_fmt);
892 int data_size = av_samples_get_buffer_size(&plane_size, avctx->channels,
893 frame.nb_samples,
894 avctx->sample_fmt, 1);
895 if (*frame_size_ptr < data_size) {
896 av_log(avctx, AV_LOG_ERROR, "output buffer size is too small for "
897 "the current frame (%d < %d)\n", *frame_size_ptr, data_size);
898 return AVERROR(EINVAL);
899 }
900
901 memcpy(samples, frame.extended_data[0], plane_size);
902
903 if (planar && avctx->channels > 1) {
904 uint8_t *out = ((uint8_t *)samples) + plane_size;
905 for (ch = 1; ch < avctx->channels; ch++) {
906 memcpy(out, frame.extended_data[ch], plane_size);
907 out += plane_size;
908 }
909 }
910 *frame_size_ptr = data_size;
911 } else {
912 *frame_size_ptr = 0;
913 }
914 return ret;
915 }
916 #endif
917
918 static void apply_param_change(AVCodecContext *avctx, AVPacket *avpkt)
919 {
920 int size = 0;
921 const uint8_t *data;
922 uint32_t flags;
923
924 if (!(avctx->codec->capabilities & CODEC_CAP_PARAM_CHANGE))
925 return;
926
927 data = av_packet_get_side_data(avpkt, AV_PKT_DATA_PARAM_CHANGE, &size);
928 if (!data || size < 4)
929 return;
930 flags = bytestream_get_le32(&data);
931 size -= 4;
932 if (size < 4) /* Required for any of the changes */
933 return;
934 if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) {
935 avctx->channels = bytestream_get_le32(&data);
936 size -= 4;
937 }
938 if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) {
939 if (size < 8)
940 return;
941 avctx->channel_layout = bytestream_get_le64(&data);
942 size -= 8;
943 }
944 if (size < 4)
945 return;
946 if (flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) {
947 avctx->sample_rate = bytestream_get_le32(&data);
948 size -= 4;
949 }
950 if (flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) {
951 if (size < 8)
952 return;
953 avctx->width = bytestream_get_le32(&data);
954 avctx->height = bytestream_get_le32(&data);
955 size -= 8;
956 }
957 }
958
959 int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
960 AVFrame *frame,
961 int *got_frame_ptr,
962 AVPacket *avpkt)
963 {
964 int ret = 0;
965
966 *got_frame_ptr = 0;
967
968 avctx->pkt = avpkt;
969
970 if (!avpkt->data && avpkt->size) {
971 av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
972 return AVERROR(EINVAL);
973 }
974
975 apply_param_change(avctx, avpkt);
976
977 if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size) {
978 ret = avctx->codec->decode(avctx, frame, got_frame_ptr, avpkt);
979 if (ret >= 0 && *got_frame_ptr) {
980 avctx->frame_number++;
981 frame->pkt_dts = avpkt->dts;
982 }
983 }
984 return ret;
985 }
986
987 int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
988 int *got_sub_ptr,
989 AVPacket *avpkt)
990 {
991 int ret;
992
993 avctx->pkt = avpkt;
994 *got_sub_ptr = 0;
995 ret = avctx->codec->decode(avctx, sub, got_sub_ptr, avpkt);
996 if (*got_sub_ptr)
997 avctx->frame_number++;
998 return ret;
999 }
1000
1001 void avsubtitle_free(AVSubtitle *sub)
1002 {
1003 int i;
1004
1005 for (i = 0; i < sub->num_rects; i++)
1006 {
1007 av_freep(&sub->rects[i]->pict.data[0]);
1008 av_freep(&sub->rects[i]->pict.data[1]);
1009 av_freep(&sub->rects[i]->pict.data[2]);
1010 av_freep(&sub->rects[i]->pict.data[3]);
1011 av_freep(&sub->rects[i]->text);
1012 av_freep(&sub->rects[i]->ass);
1013 av_freep(&sub->rects[i]);
1014 }
1015
1016 av_freep(&sub->rects);
1017
1018 memset(sub, 0, sizeof(AVSubtitle));
1019 }
1020
1021 av_cold int avcodec_close(AVCodecContext *avctx)
1022 {
1023 /* If there is a user-supplied mutex locking routine, call it. */
1024 if (ff_lockmgr_cb) {
1025 if ((*ff_lockmgr_cb)(&codec_mutex, AV_LOCK_OBTAIN))
1026 return -1;
1027 }
1028
1029 entangled_thread_counter++;
1030 if(entangled_thread_counter != 1){
1031 av_log(avctx, AV_LOG_ERROR, "insufficient thread locking around avcodec_open/close()\n");
1032 entangled_thread_counter--;
1033 return -1;
1034 }
1035
1036 if (HAVE_THREADS && avctx->thread_opaque)
1037 ff_thread_free(avctx);
1038 if (avctx->codec && avctx->codec->close)
1039 avctx->codec->close(avctx);
1040 avcodec_default_free_buffers(avctx);
1041 avctx->coded_frame = NULL;
1042 av_freep(&avctx->internal);
1043 if (avctx->codec && avctx->codec->priv_class)
1044 av_opt_free(avctx->priv_data);
1045 av_opt_free(avctx);
1046 av_freep(&avctx->priv_data);
1047 if(avctx->codec && avctx->codec->encode)
1048 av_freep(&avctx->extradata);
1049 avctx->codec = NULL;
1050 avctx->active_thread_type = 0;
1051 entangled_thread_counter--;
1052
1053 /* Release any user-supplied mutex. */
1054 if (ff_lockmgr_cb) {
1055 (*ff_lockmgr_cb)(&codec_mutex, AV_LOCK_RELEASE);
1056 }
1057 return 0;
1058 }
1059
1060 AVCodec *avcodec_find_encoder(enum CodecID id)
1061 {
1062 AVCodec *p, *experimental=NULL;
1063 p = first_avcodec;
1064 while (p) {
1065 if (p->encode != NULL && p->id == id) {
1066 if (p->capabilities & CODEC_CAP_EXPERIMENTAL && !experimental) {
1067 experimental = p;
1068 } else
1069 return p;
1070 }
1071 p = p->next;
1072 }
1073 return experimental;
1074 }
1075
1076 AVCodec *avcodec_find_encoder_by_name(const char *name)
1077 {
1078 AVCodec *p;
1079 if (!name)
1080 return NULL;
1081 p = first_avcodec;
1082 while (p) {
1083 if (p->encode != NULL && strcmp(name,p->name) == 0)
1084 return p;
1085 p = p->next;
1086 }
1087 return NULL;
1088 }
1089
1090 AVCodec *avcodec_find_decoder(enum CodecID id)
1091 {
1092 AVCodec *p;
1093 p = first_avcodec;
1094 while (p) {
1095 if (p->decode != NULL && p->id == id)
1096 return p;
1097 p = p->next;
1098 }
1099 return NULL;
1100 }
1101
1102 AVCodec *avcodec_find_decoder_by_name(const char *name)
1103 {
1104 AVCodec *p;
1105 if (!name)
1106 return NULL;
1107 p = first_avcodec;
1108 while (p) {
1109 if (p->decode != NULL && strcmp(name,p->name) == 0)
1110 return p;
1111 p = p->next;
1112 }
1113 return NULL;
1114 }
1115
1116 static int get_bit_rate(AVCodecContext *ctx)
1117 {
1118 int bit_rate;
1119 int bits_per_sample;
1120
1121 switch(ctx->codec_type) {
1122 case AVMEDIA_TYPE_VIDEO:
1123 case AVMEDIA_TYPE_DATA:
1124 case AVMEDIA_TYPE_SUBTITLE:
1125 case AVMEDIA_TYPE_ATTACHMENT:
1126 bit_rate = ctx->bit_rate;
1127 break;
1128 case AVMEDIA_TYPE_AUDIO:
1129 bits_per_sample = av_get_bits_per_sample(ctx->codec_id);
1130 bit_rate = bits_per_sample ? ctx->sample_rate * ctx->channels * bits_per_sample : ctx->bit_rate;
1131 break;
1132 default:
1133 bit_rate = 0;
1134 break;
1135 }
1136 return bit_rate;
1137 }
1138
1139 size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag)
1140 {
1141 int i, len, ret = 0;
1142
1143 for (i = 0; i < 4; i++) {
1144 len = snprintf(buf, buf_size,
1145 isprint(codec_tag&0xFF) ? "%c" : "[%d]", codec_tag&0xFF);
1146 buf += len;
1147 buf_size = buf_size > len ? buf_size - len : 0;
1148 ret += len;
1149 codec_tag>>=8;
1150 }
1151 return ret;
1152 }
1153
1154 void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode)
1155 {
1156 const char *codec_name;
1157 const char *profile = NULL;
1158 AVCodec *p;
1159 char buf1[32];
1160 int bitrate;
1161 AVRational display_aspect_ratio;
1162
1163 if (encode)
1164 p = avcodec_find_encoder(enc->codec_id);
1165 else
1166 p = avcodec_find_decoder(enc->codec_id);
1167
1168 if (p) {
1169 codec_name = p->name;
1170 profile = av_get_profile_name(p, enc->profile);
1171 } else if (enc->codec_id == CODEC_ID_MPEG2TS) {
1172 /* fake mpeg2 transport stream codec (currently not
1173 registered) */
1174 codec_name = "mpeg2ts";
1175 } else if (enc->codec_name[0] != '\0') {
1176 codec_name = enc->codec_name;
1177 } else {
1178 /* output avi tags */
1179 char tag_buf[32];
1180 av_get_codec_tag_string(tag_buf, sizeof(tag_buf), enc->codec_tag);
1181 snprintf(buf1, sizeof(buf1), "%s / 0x%04X", tag_buf, enc->codec_tag);
1182 codec_name = buf1;
1183 }
1184
1185 switch(enc->codec_type) {
1186 case AVMEDIA_TYPE_VIDEO:
1187 snprintf(buf, buf_size,
1188 "Video: %s%s",
1189 codec_name, enc->mb_decision ? " (hq)" : "");
1190 if (profile)
1191 snprintf(buf + strlen(buf), buf_size - strlen(buf),
1192 " (%s)", profile);
1193 if (enc->pix_fmt != PIX_FMT_NONE) {
1194 snprintf(buf + strlen(buf), buf_size - strlen(buf),
1195 ", %s",
1196 av_get_pix_fmt_name(enc->pix_fmt));
1197 }
1198 if (enc->width) {
1199 snprintf(buf + strlen(buf), buf_size - strlen(buf),
1200 ", %dx%d",
1201 enc->width, enc->height);
1202 if (enc->sample_aspect_ratio.num) {
1203 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
1204 enc->width*enc->sample_aspect_ratio.num,
1205 enc->height*enc->sample_aspect_ratio.den,
1206 1024*1024);
1207 snprintf(buf + strlen(buf), buf_size - strlen(buf),
1208 " [PAR %d:%d DAR %d:%d]",
1209 enc->sample_aspect_ratio.num, enc->sample_aspect_ratio.den,
1210 display_aspect_ratio.num, display_aspect_ratio.den);
1211 }
1212 if(av_log_get_level() >= AV_LOG_DEBUG){
1213 int g= av_gcd(enc->time_base.num, enc->time_base.den);
1214 snprintf(buf + strlen(buf), buf_size - strlen(buf),
1215 ", %d/%d",
1216 enc->time_base.num/g, enc->time_base.den/g);
1217 }
1218 }
1219 if (encode) {
1220 snprintf(buf + strlen(buf), buf_size - strlen(buf),
1221 ", q=%d-%d", enc->qmin, enc->qmax);
1222 }
1223 break;
1224 case AVMEDIA_TYPE_AUDIO:
1225 snprintf(buf, buf_size,
1226 "Audio: %s",
1227 codec_name);
1228 if (profile)
1229 snprintf(buf + strlen(buf), buf_size - strlen(buf),
1230 " (%s)", profile);
1231 if (enc->sample_rate) {
1232 snprintf(buf + strlen(buf), buf_size - strlen(buf),
1233 ", %d Hz", enc->sample_rate);
1234 }
1235 av_strlcat(buf, ", ", buf_size);
1236 av_get_channel_layout_string(buf + strlen(buf), buf_size - strlen(buf), enc->channels, enc->channel_layout);
1237 if (enc->sample_fmt != AV_SAMPLE_FMT_NONE) {
1238 snprintf(buf + strlen(buf), buf_size - strlen(buf),
1239 ", %s", av_get_sample_fmt_name(enc->sample_fmt));
1240 }
1241 break;
1242 case AVMEDIA_TYPE_DATA:
1243 snprintf(buf, buf_size, "Data: %s", codec_name);
1244 break;
1245 case AVMEDIA_TYPE_SUBTITLE:
1246 snprintf(buf, buf_size, "Subtitle: %s", codec_name);
1247 break;
1248 case AVMEDIA_TYPE_ATTACHMENT:
1249 snprintf(buf, buf_size, "Attachment: %s", codec_name);
1250 break;
1251 default:
1252 snprintf(buf, buf_size, "Invalid Codec type %d", enc->codec_type);
1253 return;
1254 }
1255 if (encode) {
1256 if (enc->flags & CODEC_FLAG_PASS1)
1257 snprintf(buf + strlen(buf), buf_size - strlen(buf),
1258 ", pass 1");
1259 if (enc->flags & CODEC_FLAG_PASS2)
1260 snprintf(buf + strlen(buf), buf_size - strlen(buf),
1261 ", pass 2");
1262 }
1263 bitrate = get_bit_rate(enc);
1264 if (bitrate != 0) {
1265 snprintf(buf + strlen(buf), buf_size - strlen(buf),
1266 ", %d kb/s", bitrate / 1000);
1267 }
1268 }
1269
1270 const char *av_get_profile_name(const AVCodec *codec, int profile)
1271 {
1272 const AVProfile *p;
1273 if (profile == FF_PROFILE_UNKNOWN || !codec->profiles)
1274 return NULL;
1275
1276 for (p = codec->profiles; p->profile != FF_PROFILE_UNKNOWN; p++)
1277 if (p->profile == profile)
1278 return p->name;
1279
1280 return NULL;
1281 }
1282
1283 unsigned avcodec_version( void )
1284 {
1285 return LIBAVCODEC_VERSION_INT;
1286 }
1287
1288 const char *avcodec_configuration(void)
1289 {
1290 return LIBAV_CONFIGURATION;
1291 }
1292
1293 const char *avcodec_license(void)
1294 {
1295 #define LICENSE_PREFIX "libavcodec license: "
1296 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
1297 }
1298
1299 void avcodec_flush_buffers(AVCodecContext *avctx)
1300 {
1301 if(HAVE_THREADS && avctx->active_thread_type&FF_THREAD_FRAME)
1302 ff_thread_flush(avctx);
1303 else if(avctx->codec->flush)
1304 avctx->codec->flush(avctx);
1305 }
1306
1307 static void video_free_buffers(AVCodecContext *s)
1308 {
1309 AVCodecInternal *avci = s->internal;
1310 int i, j;
1311
1312 if (!avci->buffer)
1313 return;
1314
1315 if (avci->buffer_count)
1316 av_log(s, AV_LOG_WARNING, "Found %i unreleased buffers!\n",
1317 avci->buffer_count);
1318 for(i=0; i<INTERNAL_BUFFER_SIZE; i++){
1319 InternalBuffer *buf = &avci->buffer[i];
1320 for(j=0; j<4; j++){
1321 av_freep(&buf->base[j]);
1322 buf->data[j]= NULL;
1323 }
1324 }
1325 av_freep(&avci->buffer);
1326
1327 avci->buffer_count=0;
1328 }
1329
1330 static void audio_free_buffers(AVCodecContext *avctx)
1331 {
1332 AVCodecInternal *avci = avctx->internal;
1333 InternalBuffer *buf;
1334
1335 if (!avci->buffer)
1336 return;
1337 buf = avci->buffer;
1338
1339 if (buf->extended_data) {
1340 av_free(buf->extended_data[0]);
1341 if (buf->extended_data != buf->data)
1342 av_free(buf->extended_data);
1343 }
1344 av_freep(&avci->buffer);
1345 }
1346
1347 void avcodec_default_free_buffers(AVCodecContext *avctx)
1348 {
1349 switch (avctx->codec_type) {
1350 case AVMEDIA_TYPE_VIDEO:
1351 video_free_buffers(avctx);
1352 break;
1353 case AVMEDIA_TYPE_AUDIO:
1354 audio_free_buffers(avctx);
1355 break;
1356 default:
1357 break;
1358 }
1359 }
1360
1361 #if FF_API_OLD_FF_PICT_TYPES
1362 char av_get_pict_type_char(int pict_type){
1363 return av_get_picture_type_char(pict_type);
1364 }
1365 #endif
1366
1367 int av_get_bits_per_sample(enum CodecID codec_id){
1368 switch(codec_id){
1369 case CODEC_ID_ADPCM_SBPRO_2:
1370 return 2;
1371 case CODEC_ID_ADPCM_SBPRO_3:
1372 return 3;
1373 case CODEC_ID_ADPCM_SBPRO_4:
1374 case CODEC_ID_ADPCM_CT:
1375 case CODEC_ID_ADPCM_IMA_WAV:
1376 case CODEC_ID_ADPCM_IMA_QT:
1377 case CODEC_ID_ADPCM_SWF:
1378 case CODEC_ID_ADPCM_MS:
1379 case CODEC_ID_ADPCM_YAMAHA:
1380 case CODEC_ID_ADPCM_G722:
1381 return 4;
1382 case CODEC_ID_PCM_ALAW:
1383 case CODEC_ID_PCM_MULAW:
1384 case CODEC_ID_PCM_S8:
1385 case CODEC_ID_PCM_U8:
1386 case CODEC_ID_PCM_ZORK:
1387 return 8;
1388 case CODEC_ID_PCM_S16BE:
1389 case CODEC_ID_PCM_S16LE:
1390 case CODEC_ID_PCM_S16LE_PLANAR:
1391 case CODEC_ID_PCM_U16BE:
1392 case CODEC_ID_PCM_U16LE:
1393 return 16;
1394 case CODEC_ID_PCM_S24DAUD:
1395 case CODEC_ID_PCM_S24BE:
1396 case CODEC_ID_PCM_S24LE:
1397 case CODEC_ID_PCM_U24BE:
1398 case CODEC_ID_PCM_U24LE:
1399 return 24;
1400 case CODEC_ID_PCM_S32BE:
1401 case CODEC_ID_PCM_S32LE:
1402 case CODEC_ID_PCM_U32BE:
1403 case CODEC_ID_PCM_U32LE:
1404 case CODEC_ID_PCM_F32BE:
1405 case CODEC_ID_PCM_F32LE:
1406 return 32;
1407 case CODEC_ID_PCM_F64BE:
1408 case CODEC_ID_PCM_F64LE:
1409 return 64;
1410 default:
1411 return 0;
1412 }
1413 }
1414
1415 #if FF_API_OLD_SAMPLE_FMT
1416 int av_get_bits_per_sample_format(enum AVSampleFormat sample_fmt) {
1417 return av_get_bytes_per_sample(sample_fmt) << 3;
1418 }
1419 #endif
1420
1421 #if !HAVE_THREADS
1422 int ff_thread_init(AVCodecContext *s){
1423 return -1;
1424 }
1425 #endif
1426
1427 unsigned int av_xiphlacing(unsigned char *s, unsigned int v)
1428 {
1429 unsigned int n = 0;
1430
1431 while(v >= 0xff) {
1432 *s++ = 0xff;
1433 v -= 0xff;
1434 n++;
1435 }
1436 *s = v;
1437 n++;
1438 return n;
1439 }
1440
1441 int ff_match_2uint16(const uint16_t (*tab)[2], int size, int a, int b){
1442 int i;
1443 for(i=0; i<size && !(tab[i][0]==a && tab[i][1]==b); i++);
1444 return i;
1445 }
1446
1447 void av_log_missing_feature(void *avc, const char *feature, int want_sample)
1448 {
1449 av_log(avc, AV_LOG_WARNING, "%s not implemented. Update your Libav "
1450 "version to the newest one from Git. If the problem still "
1451 "occurs, it means that your file has a feature which has not "
1452 "been implemented.\n", feature);
1453 if(want_sample)
1454 av_log_ask_for_sample(avc, NULL);
1455 }
1456
1457 void av_log_ask_for_sample(void *avc, const char *msg, ...)
1458 {
1459 va_list argument_list;
1460
1461 va_start(argument_list, msg);
1462
1463 if (msg)
1464 av_vlog(avc, AV_LOG_WARNING, msg, argument_list);
1465 av_log(avc, AV_LOG_WARNING, "If you want to help, upload a sample "
1466 "of this file to ftp://upload.libav.org/incoming/ "
1467 "and contact the libav-devel mailing list.\n");
1468
1469 va_end(argument_list);
1470 }
1471
1472 static AVHWAccel *first_hwaccel = NULL;
1473
1474 void av_register_hwaccel(AVHWAccel *hwaccel)
1475 {
1476 AVHWAccel **p = &first_hwaccel;
1477 while (*p)
1478 p = &(*p)->next;
1479 *p = hwaccel;
1480 hwaccel->next = NULL;
1481 }
1482
1483 AVHWAccel *av_hwaccel_next(AVHWAccel *hwaccel)
1484 {
1485 return hwaccel ? hwaccel->next : first_hwaccel;
1486 }
1487
1488 AVHWAccel *ff_find_hwaccel(enum CodecID codec_id, enum PixelFormat pix_fmt)
1489 {
1490 AVHWAccel *hwaccel=NULL;
1491
1492 while((hwaccel= av_hwaccel_next(hwaccel))){
1493 if ( hwaccel->id == codec_id
1494 && hwaccel->pix_fmt == pix_fmt)
1495 return hwaccel;
1496 }
1497 return NULL;
1498 }
1499
1500 int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op))
1501 {
1502 if (ff_lockmgr_cb) {
1503 if (ff_lockmgr_cb(&codec_mutex, AV_LOCK_DESTROY))
1504 return -1;
1505 if (ff_lockmgr_cb(&avformat_mutex, AV_LOCK_DESTROY))
1506 return -1;
1507 }
1508
1509 ff_lockmgr_cb = cb;
1510
1511 if (ff_lockmgr_cb) {
1512 if (ff_lockmgr_cb(&codec_mutex, AV_LOCK_CREATE))
1513 return -1;
1514 if (ff_lockmgr_cb(&avformat_mutex, AV_LOCK_CREATE))
1515 return -1;
1516 }
1517 return 0;
1518 }
1519
1520 int avpriv_lock_avformat(void)
1521 {
1522 if (ff_lockmgr_cb) {
1523 if ((*ff_lockmgr_cb)(&avformat_mutex, AV_LOCK_OBTAIN))
1524 return -1;
1525 }
1526 return 0;
1527 }
1528
1529 int avpriv_unlock_avformat(void)
1530 {
1531 if (ff_lockmgr_cb) {
1532 if ((*ff_lockmgr_cb)(&avformat_mutex, AV_LOCK_RELEASE))
1533 return -1;
1534 }
1535 return 0;
1536 }
1537
1538 unsigned int avpriv_toupper4(unsigned int x)
1539 {
1540 return toupper( x &0xFF)
1541 + (toupper((x>>8 )&0xFF)<<8 )
1542 + (toupper((x>>16)&0xFF)<<16)
1543 + (toupper((x>>24)&0xFF)<<24);
1544 }
1545
1546 #if !HAVE_THREADS
1547
1548 int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f)
1549 {
1550 f->owner = avctx;
1551 return avctx->get_buffer(avctx, f);
1552 }
1553
1554 void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
1555 {
1556 f->owner->release_buffer(f->owner, f);
1557 }
1558
1559 void ff_thread_finish_setup(AVCodecContext *avctx)
1560 {
1561 }
1562
1563 void ff_thread_report_progress(AVFrame *f, int progress, int field)
1564 {
1565 }
1566
1567 void ff_thread_await_progress(AVFrame *f, int progress, int field)
1568 {
1569 }
1570
1571 #endif
1572
1573 #if FF_API_THREAD_INIT
1574 int avcodec_thread_init(AVCodecContext *s, int thread_count)
1575 {
1576 s->thread_count = thread_count;
1577 return ff_thread_init(s);
1578 }
1579 #endif
1580
1581 enum AVMediaType avcodec_get_type(enum CodecID codec_id)
1582 {
1583 if (codec_id <= CODEC_ID_NONE)
1584 return AVMEDIA_TYPE_UNKNOWN;
1585 else if (codec_id < CODEC_ID_FIRST_AUDIO)
1586 return AVMEDIA_TYPE_VIDEO;
1587 else if (codec_id < CODEC_ID_FIRST_SUBTITLE)
1588 return AVMEDIA_TYPE_AUDIO;
1589 else if (codec_id < CODEC_ID_FIRST_UNKNOWN)
1590 return AVMEDIA_TYPE_SUBTITLE;
1591
1592 return AVMEDIA_TYPE_UNKNOWN;
1593 }