avisynth: Cast to the right type when loading avisynth library functions
[libav.git] / avplay.c
1 /*
2 * avplay : Simple Media Player based on the Libav libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This file is part of Libav.
6 *
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include <stdint.h>
27
28 #include "libavutil/avstring.h"
29 #include "libavutil/colorspace.h"
30 #include "libavutil/display.h"
31 #include "libavutil/mathematics.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/imgutils.h"
34 #include "libavutil/dict.h"
35 #include "libavutil/parseutils.h"
36 #include "libavutil/samplefmt.h"
37 #include "libavutil/time.h"
38 #include "libavformat/avformat.h"
39 #include "libavdevice/avdevice.h"
40 #include "libavresample/avresample.h"
41 #include "libavutil/opt.h"
42 #include "libavcodec/avfft.h"
43
44 #include "libavfilter/avfilter.h"
45 #include "libavfilter/buffersink.h"
46 #include "libavfilter/buffersrc.h"
47
48 #include "cmdutils.h"
49
50 #include <SDL.h>
51 #include <SDL_thread.h>
52
53 #ifdef __MINGW32__
54 #undef main /* We don't want SDL to override our main() */
55 #endif
56
57 #include <assert.h>
58
59 const char program_name[] = "avplay";
60 const int program_birth_year = 2003;
61
62 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
63 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
64 #define MIN_FRAMES 5
65
66 /* SDL audio buffer size, in samples. Should be small to have precise
67 A/V sync as SDL does not have hardware buffer fullness info. */
68 #define SDL_AUDIO_BUFFER_SIZE 1024
69
70 /* no AV sync correction is done if below the AV sync threshold */
71 #define AV_SYNC_THRESHOLD 0.01
72 /* no AV correction is done if too big error */
73 #define AV_NOSYNC_THRESHOLD 10.0
74
75 #define FRAME_SKIP_FACTOR 0.05
76
77 /* maximum audio speed change to get correct sync */
78 #define SAMPLE_CORRECTION_PERCENT_MAX 10
79
80 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
81 #define AUDIO_DIFF_AVG_NB 20
82
83 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
84 #define SAMPLE_ARRAY_SIZE (2 * 65536)
85
86 static int64_t sws_flags = SWS_BICUBIC;
87
88 typedef struct PacketQueue {
89 AVPacketList *first_pkt, *last_pkt;
90 int nb_packets;
91 int size;
92 int abort_request;
93 SDL_mutex *mutex;
94 SDL_cond *cond;
95 } PacketQueue;
96
97 #define VIDEO_PICTURE_QUEUE_SIZE 2
98 #define SUBPICTURE_QUEUE_SIZE 4
99
100 typedef struct VideoPicture {
101 double pts; // presentation timestamp for this picture
102 double target_clock; // av_gettime_relative() time at which this should be displayed ideally
103 int64_t pos; // byte position in file
104 SDL_Overlay *bmp;
105 int width, height; /* source height & width */
106 int allocated;
107 int reallocate;
108 enum AVPixelFormat pix_fmt;
109
110 AVRational sar;
111 } VideoPicture;
112
113 typedef struct SubPicture {
114 double pts; /* presentation time stamp for this picture */
115 AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119 AV_SYNC_AUDIO_MASTER, /* default choice */
120 AV_SYNC_VIDEO_MASTER,
121 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct PlayerState {
125 SDL_Thread *parse_tid;
126 SDL_Thread *video_tid;
127 SDL_Thread *refresh_tid;
128 AVInputFormat *iformat;
129 int no_background;
130 int abort_request;
131 int paused;
132 int last_paused;
133 int seek_req;
134 int seek_flags;
135 int64_t seek_pos;
136 int64_t seek_rel;
137 int read_pause_return;
138 AVFormatContext *ic;
139
140 int audio_stream;
141
142 int av_sync_type;
143 double external_clock; /* external clock base */
144 int64_t external_clock_time;
145
146 double audio_clock;
147 double audio_diff_cum; /* used for AV difference average computation */
148 double audio_diff_avg_coef;
149 double audio_diff_threshold;
150 int audio_diff_avg_count;
151 AVStream *audio_st;
152 AVCodecContext *audio_dec;
153 PacketQueue audioq;
154 int audio_hw_buf_size;
155 uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
156 uint8_t *audio_buf;
157 uint8_t *audio_buf1;
158 unsigned int audio_buf_size; /* in bytes */
159 int audio_buf_index; /* in bytes */
160 AVPacket audio_pkt_temp;
161 AVPacket audio_pkt;
162 enum AVSampleFormat sdl_sample_fmt;
163 uint64_t sdl_channel_layout;
164 int sdl_channels;
165 int sdl_sample_rate;
166 enum AVSampleFormat resample_sample_fmt;
167 uint64_t resample_channel_layout;
168 int resample_sample_rate;
169 AVAudioResampleContext *avr;
170 AVFrame *frame;
171
172 int show_audio; /* if true, display audio samples */
173 int16_t sample_array[SAMPLE_ARRAY_SIZE];
174 int sample_array_index;
175 int last_i_start;
176 RDFTContext *rdft;
177 int rdft_bits;
178 FFTSample *rdft_data;
179 int xpos;
180
181 SDL_Thread *subtitle_tid;
182 int subtitle_stream;
183 int subtitle_stream_changed;
184 AVStream *subtitle_st;
185 AVCodecContext *subtitle_dec;
186 PacketQueue subtitleq;
187 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
188 int subpq_size, subpq_rindex, subpq_windex;
189 SDL_mutex *subpq_mutex;
190 SDL_cond *subpq_cond;
191
192 double frame_timer;
193 double frame_last_pts;
194 double frame_last_delay;
195 double video_clock; // pts of last decoded frame / predicted pts of next decoded frame
196 int video_stream;
197 AVStream *video_st;
198 AVCodecContext *video_dec;
199 PacketQueue videoq;
200 double video_current_pts; // current displayed pts (different from video_clock if frame fifos are used)
201 double video_current_pts_drift; // video_current_pts - time (av_gettime_relative) at which we updated video_current_pts - used to have running video pts
202 int64_t video_current_pos; // current displayed file pos
203 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
204 int pictq_size, pictq_rindex, pictq_windex;
205 SDL_mutex *pictq_mutex;
206 SDL_cond *pictq_cond;
207
208 // QETimer *video_timer;
209 char filename[1024];
210 int width, height, xleft, ytop;
211
212 PtsCorrectionContext pts_ctx;
213
214 AVFilterContext *in_video_filter; // the first filter in the video chain
215 AVFilterContext *out_video_filter; // the last filter in the video chain
216
217 float skip_frames;
218 float skip_frames_index;
219 int refresh;
220
221 SpecifierOpt *codec_names;
222 int nb_codec_names;
223 } PlayerState;
224
225 /* options specified by the user */
226 static AVInputFormat *file_iformat;
227 static const char *input_filename;
228 static const char *window_title;
229 static int fs_screen_width;
230 static int fs_screen_height;
231 static int screen_width = 0;
232 static int screen_height = 0;
233 static int audio_disable;
234 static int video_disable;
235 static int wanted_stream[AVMEDIA_TYPE_NB] = {
236 [AVMEDIA_TYPE_AUDIO] = -1,
237 [AVMEDIA_TYPE_VIDEO] = -1,
238 [AVMEDIA_TYPE_SUBTITLE] = -1,
239 };
240 static int seek_by_bytes = -1;
241 static int display_disable;
242 static int show_status = 1;
243 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
244 static int64_t start_time = AV_NOPTS_VALUE;
245 static int64_t duration = AV_NOPTS_VALUE;
246 static int step = 0;
247 static int workaround_bugs = 1;
248 static int fast = 0;
249 static int genpts = 0;
250 static int idct = FF_IDCT_AUTO;
251 static enum AVDiscard skip_frame = AVDISCARD_DEFAULT;
252 static enum AVDiscard skip_idct = AVDISCARD_DEFAULT;
253 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
254 static int error_concealment = 3;
255 static int decoder_reorder_pts = -1;
256 static int noautoexit;
257 static int exit_on_keydown;
258 static int exit_on_mousedown;
259 static int loop = 1;
260 static int framedrop = 1;
261 static int infinite_buffer = 0;
262
263 static int rdftspeed = 20;
264 static char *vfilters = NULL;
265 static int autorotate = 1;
266
267 /* current context */
268 static int is_full_screen;
269 static PlayerState player_state;
270 static PlayerState *player = &player_state;
271 static int64_t audio_callback_time;
272
273 static AVPacket flush_pkt;
274
275 #define FF_ALLOC_EVENT (SDL_USEREVENT)
276 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
277 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
278
279 static SDL_Surface *screen;
280
281 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
282
283 /* packet queue handling */
284 static void packet_queue_init(PacketQueue *q)
285 {
286 memset(q, 0, sizeof(PacketQueue));
287 q->mutex = SDL_CreateMutex();
288 q->cond = SDL_CreateCond();
289 packet_queue_put(q, &flush_pkt);
290 }
291
292 static void packet_queue_flush(PacketQueue *q)
293 {
294 AVPacketList *pkt, *pkt1;
295
296 SDL_LockMutex(q->mutex);
297 for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
298 pkt1 = pkt->next;
299 av_packet_unref(&pkt->pkt);
300 av_freep(&pkt);
301 }
302 q->last_pkt = NULL;
303 q->first_pkt = NULL;
304 q->nb_packets = 0;
305 q->size = 0;
306 SDL_UnlockMutex(q->mutex);
307 }
308
309 static void packet_queue_end(PacketQueue *q)
310 {
311 packet_queue_flush(q);
312 SDL_DestroyMutex(q->mutex);
313 SDL_DestroyCond(q->cond);
314 }
315
316 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
317 {
318 AVPacketList *pkt1;
319
320 pkt1 = av_malloc(sizeof(AVPacketList));
321 if (!pkt1)
322 return -1;
323 pkt1->pkt = *pkt;
324 pkt1->next = NULL;
325
326
327 SDL_LockMutex(q->mutex);
328
329 if (!q->last_pkt)
330
331 q->first_pkt = pkt1;
332 else
333 q->last_pkt->next = pkt1;
334 q->last_pkt = pkt1;
335 q->nb_packets++;
336 q->size += pkt1->pkt.size + sizeof(*pkt1);
337 /* XXX: should duplicate packet data in DV case */
338 SDL_CondSignal(q->cond);
339
340 SDL_UnlockMutex(q->mutex);
341 return 0;
342 }
343
344 static void packet_queue_abort(PacketQueue *q)
345 {
346 SDL_LockMutex(q->mutex);
347
348 q->abort_request = 1;
349
350 SDL_CondSignal(q->cond);
351
352 SDL_UnlockMutex(q->mutex);
353 }
354
355 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
356 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
357 {
358 AVPacketList *pkt1;
359 int ret;
360
361 SDL_LockMutex(q->mutex);
362
363 for (;;) {
364 if (q->abort_request) {
365 ret = -1;
366 break;
367 }
368
369 pkt1 = q->first_pkt;
370 if (pkt1) {
371 q->first_pkt = pkt1->next;
372 if (!q->first_pkt)
373 q->last_pkt = NULL;
374 q->nb_packets--;
375 q->size -= pkt1->pkt.size + sizeof(*pkt1);
376 *pkt = pkt1->pkt;
377 av_free(pkt1);
378 ret = 1;
379 break;
380 } else if (!block) {
381 ret = 0;
382 break;
383 } else {
384 SDL_CondWait(q->cond, q->mutex);
385 }
386 }
387 SDL_UnlockMutex(q->mutex);
388 return ret;
389 }
390
391 static inline void fill_rectangle(SDL_Surface *screen,
392 int x, int y, int w, int h, int color)
393 {
394 SDL_Rect rect;
395 rect.x = x;
396 rect.y = y;
397 rect.w = w;
398 rect.h = h;
399 SDL_FillRect(screen, &rect, color);
400 }
401
402 #define ALPHA_BLEND(a, oldp, newp, s)\
403 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
404
405 #define RGBA_IN(r, g, b, a, s)\
406 {\
407 unsigned int v = ((const uint32_t *)(s))[0];\
408 a = (v >> 24) & 0xff;\
409 r = (v >> 16) & 0xff;\
410 g = (v >> 8) & 0xff;\
411 b = v & 0xff;\
412 }
413
414 #define YUVA_IN(y, u, v, a, s, pal)\
415 {\
416 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
417 a = (val >> 24) & 0xff;\
418 y = (val >> 16) & 0xff;\
419 u = (val >> 8) & 0xff;\
420 v = val & 0xff;\
421 }
422
423 #define YUVA_OUT(d, y, u, v, a)\
424 {\
425 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
426 }
427
428
429 #define BPP 1
430
431 static void blend_subrect(uint8_t *dst[4], uint16_t dst_linesize[4],
432 const AVSubtitleRect *rect, int imgw, int imgh)
433 {
434 int wrap, wrap3, width2, skip2;
435 int y, u, v, a, u1, v1, a1, w, h;
436 uint8_t *lum, *cb, *cr;
437 const uint8_t *p;
438 const uint32_t *pal;
439 int dstx, dsty, dstw, dsth;
440
441 dstw = av_clip(rect->w, 0, imgw);
442 dsth = av_clip(rect->h, 0, imgh);
443 dstx = av_clip(rect->x, 0, imgw - dstw);
444 dsty = av_clip(rect->y, 0, imgh - dsth);
445 /* sdl has U and V inverted */
446 lum = dst[0] + dsty * dst_linesize[0];
447 cb = dst[2] + (dsty >> 1) * dst_linesize[2];
448 cr = dst[1] + (dsty >> 1) * dst_linesize[1];
449
450 width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
451 skip2 = dstx >> 1;
452 wrap = dst_linesize[0];
453 wrap3 = rect->linesize[0];
454 p = rect->data[0];
455 pal = (const uint32_t *)rect->data[1]; /* Now in YCrCb! */
456
457 if (dsty & 1) {
458 lum += dstx;
459 cb += skip2;
460 cr += skip2;
461
462 if (dstx & 1) {
463 YUVA_IN(y, u, v, a, p, pal);
464 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
465 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
466 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
467 cb++;
468 cr++;
469 lum++;
470 p += BPP;
471 }
472 for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
473 YUVA_IN(y, u, v, a, p, pal);
474 u1 = u;
475 v1 = v;
476 a1 = a;
477 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
478
479 YUVA_IN(y, u, v, a, p + BPP, pal);
480 u1 += u;
481 v1 += v;
482 a1 += a;
483 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
484 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
485 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
486 cb++;
487 cr++;
488 p += 2 * BPP;
489 lum += 2;
490 }
491 if (w) {
492 YUVA_IN(y, u, v, a, p, pal);
493 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
494 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
495 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
496 p++;
497 lum++;
498 }
499 p += wrap3 - dstw * BPP;
500 lum += wrap - dstw - dstx;
501 cb += dst_linesize[2] - width2 - skip2;
502 cr += dst_linesize[1] - width2 - skip2;
503 }
504 for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
505 lum += dstx;
506 cb += skip2;
507 cr += skip2;
508
509 if (dstx & 1) {
510 YUVA_IN(y, u, v, a, p, pal);
511 u1 = u;
512 v1 = v;
513 a1 = a;
514 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
515 p += wrap3;
516 lum += wrap;
517 YUVA_IN(y, u, v, a, p, pal);
518 u1 += u;
519 v1 += v;
520 a1 += a;
521 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
522 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
523 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
524 cb++;
525 cr++;
526 p += -wrap3 + BPP;
527 lum += -wrap + 1;
528 }
529 for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
530 YUVA_IN(y, u, v, a, p, pal);
531 u1 = u;
532 v1 = v;
533 a1 = a;
534 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
535
536 YUVA_IN(y, u, v, a, p + BPP, pal);
537 u1 += u;
538 v1 += v;
539 a1 += a;
540 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
541 p += wrap3;
542 lum += wrap;
543
544 YUVA_IN(y, u, v, a, p, pal);
545 u1 += u;
546 v1 += v;
547 a1 += a;
548 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
549
550 YUVA_IN(y, u, v, a, p + BPP, pal);
551 u1 += u;
552 v1 += v;
553 a1 += a;
554 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
555
556 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
557 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
558
559 cb++;
560 cr++;
561 p += -wrap3 + 2 * BPP;
562 lum += -wrap + 2;
563 }
564 if (w) {
565 YUVA_IN(y, u, v, a, p, pal);
566 u1 = u;
567 v1 = v;
568 a1 = a;
569 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
570 p += wrap3;
571 lum += wrap;
572 YUVA_IN(y, u, v, a, p, pal);
573 u1 += u;
574 v1 += v;
575 a1 += a;
576 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
577 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
578 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
579 cb++;
580 cr++;
581 p += -wrap3 + BPP;
582 lum += -wrap + 1;
583 }
584 p += wrap3 + (wrap3 - dstw * BPP);
585 lum += wrap + (wrap - dstw - dstx);
586 cb += dst_linesize[2] - width2 - skip2;
587 cr += dst_linesize[1] - width2 - skip2;
588 }
589 /* handle odd height */
590 if (h) {
591 lum += dstx;
592 cb += skip2;
593 cr += skip2;
594
595 if (dstx & 1) {
596 YUVA_IN(y, u, v, a, p, pal);
597 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
598 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
599 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
600 cb++;
601 cr++;
602 lum++;
603 p += BPP;
604 }
605 for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
606 YUVA_IN(y, u, v, a, p, pal);
607 u1 = u;
608 v1 = v;
609 a1 = a;
610 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
611
612 YUVA_IN(y, u, v, a, p + BPP, pal);
613 u1 += u;
614 v1 += v;
615 a1 += a;
616 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
617 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
618 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
619 cb++;
620 cr++;
621 p += 2 * BPP;
622 lum += 2;
623 }
624 if (w) {
625 YUVA_IN(y, u, v, a, p, pal);
626 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
627 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
628 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
629 }
630 }
631 }
632
633 static void free_subpicture(SubPicture *sp)
634 {
635 avsubtitle_free(&sp->sub);
636 }
637
638 static void video_image_display(PlayerState *is)
639 {
640 VideoPicture *vp;
641 SubPicture *sp;
642 float aspect_ratio;
643 int width, height, x, y;
644 SDL_Rect rect;
645 int i;
646
647 vp = &is->pictq[is->pictq_rindex];
648 if (vp->bmp) {
649 if (!vp->sar.num)
650 aspect_ratio = 0;
651 else
652 aspect_ratio = av_q2d(vp->sar);
653 if (aspect_ratio <= 0.0)
654 aspect_ratio = 1.0;
655 aspect_ratio *= (float)vp->width / (float)vp->height;
656
657 if (is->subtitle_st)
658 {
659 if (is->subpq_size > 0)
660 {
661 sp = &is->subpq[is->subpq_rindex];
662
663 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
664 {
665 SDL_LockYUVOverlay (vp->bmp);
666
667 for (i = 0; i < sp->sub.num_rects; i++)
668 blend_subrect(vp->bmp->pixels, vp->bmp->pitches,
669 sp->sub.rects[i], vp->bmp->w, vp->bmp->h);
670
671 SDL_UnlockYUVOverlay (vp->bmp);
672 }
673 }
674 }
675
676
677 /* XXX: we suppose the screen has a 1.0 pixel ratio */
678 height = is->height;
679 width = ((int)rint(height * aspect_ratio)) & ~1;
680 if (width > is->width) {
681 width = is->width;
682 height = ((int)rint(width / aspect_ratio)) & ~1;
683 }
684 x = (is->width - width) / 2;
685 y = (is->height - height) / 2;
686 is->no_background = 0;
687 rect.x = is->xleft + x;
688 rect.y = is->ytop + y;
689 rect.w = width;
690 rect.h = height;
691 SDL_DisplayYUVOverlay(vp->bmp, &rect);
692 }
693 }
694
695 /* get the current audio output buffer size, in samples. With SDL, we
696 cannot have a precise information */
697 static int audio_write_get_buf_size(PlayerState *is)
698 {
699 return is->audio_buf_size - is->audio_buf_index;
700 }
701
702 static inline int compute_mod(int a, int b)
703 {
704 a = a % b;
705 if (a >= 0)
706 return a;
707 else
708 return a + b;
709 }
710
711 static void video_audio_display(PlayerState *s)
712 {
713 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
714 int ch, channels, h, h2, bgcolor, fgcolor;
715 int16_t time_diff;
716 int rdft_bits, nb_freq;
717
718 for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
719 ;
720 nb_freq = 1 << (rdft_bits - 1);
721
722 /* compute display index : center on currently output samples */
723 channels = s->sdl_channels;
724 nb_display_channels = channels;
725 if (!s->paused) {
726 int data_used = s->show_audio == 1 ? s->width : (2 * nb_freq);
727 n = 2 * channels;
728 delay = audio_write_get_buf_size(s);
729 delay /= n;
730
731 /* to be more precise, we take into account the time spent since
732 the last buffer computation */
733 if (audio_callback_time) {
734 time_diff = av_gettime_relative() - audio_callback_time;
735 delay -= (time_diff * s->sdl_sample_rate) / 1000000;
736 }
737
738 delay += 2 * data_used;
739 if (delay < data_used)
740 delay = data_used;
741
742 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
743 if (s->show_audio == 1) {
744 h = INT_MIN;
745 for (i = 0; i < 1000; i += channels) {
746 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
747 int a = s->sample_array[idx];
748 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
749 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
750 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
751 int score = a - d;
752 if (h < score && (b ^ c) < 0) {
753 h = score;
754 i_start = idx;
755 }
756 }
757 }
758
759 s->last_i_start = i_start;
760 } else {
761 i_start = s->last_i_start;
762 }
763
764 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
765 if (s->show_audio == 1) {
766 fill_rectangle(screen,
767 s->xleft, s->ytop, s->width, s->height,
768 bgcolor);
769
770 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
771
772 /* total height for one channel */
773 h = s->height / nb_display_channels;
774 /* graph height / 2 */
775 h2 = (h * 9) / 20;
776 for (ch = 0; ch < nb_display_channels; ch++) {
777 i = i_start + ch;
778 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
779 for (x = 0; x < s->width; x++) {
780 y = (s->sample_array[i] * h2) >> 15;
781 if (y < 0) {
782 y = -y;
783 ys = y1 - y;
784 } else {
785 ys = y1;
786 }
787 fill_rectangle(screen,
788 s->xleft + x, ys, 1, y,
789 fgcolor);
790 i += channels;
791 if (i >= SAMPLE_ARRAY_SIZE)
792 i -= SAMPLE_ARRAY_SIZE;
793 }
794 }
795
796 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
797
798 for (ch = 1; ch < nb_display_channels; ch++) {
799 y = s->ytop + ch * h;
800 fill_rectangle(screen,
801 s->xleft, y, s->width, 1,
802 fgcolor);
803 }
804 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
805 } else {
806 nb_display_channels= FFMIN(nb_display_channels, 2);
807 if (rdft_bits != s->rdft_bits) {
808 av_rdft_end(s->rdft);
809 av_free(s->rdft_data);
810 s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
811 s->rdft_bits = rdft_bits;
812 s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
813 }
814 {
815 FFTSample *data[2];
816 for (ch = 0; ch < nb_display_channels; ch++) {
817 data[ch] = s->rdft_data + 2 * nb_freq * ch;
818 i = i_start + ch;
819 for (x = 0; x < 2 * nb_freq; x++) {
820 double w = (x-nb_freq) * (1.0 / nb_freq);
821 data[ch][x] = s->sample_array[i] * (1.0 - w * w);
822 i += channels;
823 if (i >= SAMPLE_ARRAY_SIZE)
824 i -= SAMPLE_ARRAY_SIZE;
825 }
826 av_rdft_calc(s->rdft, data[ch]);
827 }
828 /* Least efficient way to do this, we should of course
829 * directly access it but it is more than fast enough. */
830 for (y = 0; y < s->height; y++) {
831 double w = 1 / sqrt(nb_freq);
832 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
833 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
834 + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
835 a = FFMIN(a, 255);
836 b = FFMIN(b, 255);
837 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
838
839 fill_rectangle(screen,
840 s->xpos, s->height-y, 1, 1,
841 fgcolor);
842 }
843 }
844 SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
845 s->xpos++;
846 if (s->xpos >= s->width)
847 s->xpos= s->xleft;
848 }
849 }
850
851 static int video_open(PlayerState *is)
852 {
853 int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
854 int w,h;
855
856 if (is_full_screen) flags |= SDL_FULLSCREEN;
857 else flags |= SDL_RESIZABLE;
858
859 if (is_full_screen && fs_screen_width) {
860 w = fs_screen_width;
861 h = fs_screen_height;
862 } else if (!is_full_screen && screen_width) {
863 w = screen_width;
864 h = screen_height;
865 } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
866 w = is->out_video_filter->inputs[0]->w;
867 h = is->out_video_filter->inputs[0]->h;
868 } else {
869 w = 640;
870 h = 480;
871 }
872 if (screen && is->width == screen->w && screen->w == w
873 && is->height== screen->h && screen->h == h)
874 return 0;
875
876 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
877 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
878 screen = SDL_SetVideoMode(w, h, 24, flags);
879 #else
880 screen = SDL_SetVideoMode(w, h, 0, flags);
881 #endif
882 if (!screen) {
883 fprintf(stderr, "SDL: could not set video mode - exiting\n");
884 return -1;
885 }
886 if (!window_title)
887 window_title = input_filename;
888 SDL_WM_SetCaption(window_title, window_title);
889
890 is->width = screen->w;
891 is->height = screen->h;
892
893 return 0;
894 }
895
896 /* display the current picture, if any */
897 static void video_display(PlayerState *is)
898 {
899 if (!screen)
900 video_open(player);
901 if (is->audio_st && is->show_audio)
902 video_audio_display(is);
903 else if (is->video_st)
904 video_image_display(is);
905 }
906
907 static int refresh_thread(void *opaque)
908 {
909 PlayerState *is= opaque;
910 while (!is->abort_request) {
911 SDL_Event event;
912 event.type = FF_REFRESH_EVENT;
913 event.user.data1 = opaque;
914 if (!is->refresh) {
915 is->refresh = 1;
916 SDL_PushEvent(&event);
917 }
918 av_usleep(is->audio_st && is->show_audio ? rdftspeed * 1000 : 5000); // FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
919 }
920 return 0;
921 }
922
923 /* get the current audio clock value */
924 static double get_audio_clock(PlayerState *is)
925 {
926 double pts;
927 int hw_buf_size, bytes_per_sec;
928 pts = is->audio_clock;
929 hw_buf_size = audio_write_get_buf_size(is);
930 bytes_per_sec = 0;
931 if (is->audio_st) {
932 bytes_per_sec = is->sdl_sample_rate * is->sdl_channels *
933 av_get_bytes_per_sample(is->sdl_sample_fmt);
934 }
935 if (bytes_per_sec)
936 pts -= (double)hw_buf_size / bytes_per_sec;
937 return pts;
938 }
939
940 /* get the current video clock value */
941 static double get_video_clock(PlayerState *is)
942 {
943 if (is->paused) {
944 return is->video_current_pts;
945 } else {
946 return is->video_current_pts_drift + av_gettime_relative() / 1000000.0;
947 }
948 }
949
950 /* get the current external clock value */
951 static double get_external_clock(PlayerState *is)
952 {
953 int64_t ti;
954 ti = av_gettime_relative();
955 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
956 }
957
958 /* get the current master clock value */
959 static double get_master_clock(PlayerState *is)
960 {
961 double val;
962
963 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
964 if (is->video_st)
965 val = get_video_clock(is);
966 else
967 val = get_audio_clock(is);
968 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
969 if (is->audio_st)
970 val = get_audio_clock(is);
971 else
972 val = get_video_clock(is);
973 } else {
974 val = get_external_clock(is);
975 }
976 return val;
977 }
978
979 /* seek in the stream */
980 static void stream_seek(PlayerState *is, int64_t pos, int64_t rel, int seek_by_bytes)
981 {
982 if (!is->seek_req) {
983 is->seek_pos = pos;
984 is->seek_rel = rel;
985 is->seek_flags &= ~AVSEEK_FLAG_BYTE;
986 if (seek_by_bytes)
987 is->seek_flags |= AVSEEK_FLAG_BYTE;
988 is->seek_req = 1;
989 }
990 }
991
992 /* pause or resume the video */
993 static void stream_pause(PlayerState *is)
994 {
995 if (is->paused) {
996 is->frame_timer += av_gettime_relative() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
997 if (is->read_pause_return != AVERROR(ENOSYS)) {
998 is->video_current_pts = is->video_current_pts_drift + av_gettime_relative() / 1000000.0;
999 }
1000 is->video_current_pts_drift = is->video_current_pts - av_gettime_relative() / 1000000.0;
1001 }
1002 is->paused = !is->paused;
1003 }
1004
1005 static double compute_target_time(double frame_current_pts, PlayerState *is)
1006 {
1007 double delay, sync_threshold, diff = 0;
1008
1009 /* compute nominal delay */
1010 delay = frame_current_pts - is->frame_last_pts;
1011 if (delay <= 0 || delay >= 10.0) {
1012 /* if incorrect delay, use previous one */
1013 delay = is->frame_last_delay;
1014 } else {
1015 is->frame_last_delay = delay;
1016 }
1017 is->frame_last_pts = frame_current_pts;
1018
1019 /* update delay to follow master synchronisation source */
1020 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1021 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1022 /* if video is slave, we try to correct big delays by
1023 duplicating or deleting a frame */
1024 diff = get_video_clock(is) - get_master_clock(is);
1025
1026 /* skip or repeat frame. We take into account the
1027 delay to compute the threshold. I still don't know
1028 if it is the best guess */
1029 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1030 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1031 if (diff <= -sync_threshold)
1032 delay = 0;
1033 else if (diff >= sync_threshold)
1034 delay = 2 * delay;
1035 }
1036 }
1037 is->frame_timer += delay;
1038
1039 av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1040 delay, frame_current_pts, -diff);
1041
1042 return is->frame_timer;
1043 }
1044
1045 /* called to display each frame */
1046 static void video_refresh_timer(void *opaque)
1047 {
1048 PlayerState *is = opaque;
1049 VideoPicture *vp;
1050
1051 SubPicture *sp, *sp2;
1052
1053 if (is->video_st) {
1054 retry:
1055 if (is->pictq_size == 0) {
1056 // nothing to do, no picture to display in the que
1057 } else {
1058 double time = av_gettime_relative() / 1000000.0;
1059 double next_target;
1060 /* dequeue the picture */
1061 vp = &is->pictq[is->pictq_rindex];
1062
1063 if (time < vp->target_clock)
1064 return;
1065 /* update current video pts */
1066 is->video_current_pts = vp->pts;
1067 is->video_current_pts_drift = is->video_current_pts - time;
1068 is->video_current_pos = vp->pos;
1069 if (is->pictq_size > 1) {
1070 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1071 assert(nextvp->target_clock >= vp->target_clock);
1072 next_target= nextvp->target_clock;
1073 } else {
1074 next_target = vp->target_clock + is->video_clock - vp->pts; // FIXME pass durations cleanly
1075 }
1076 if (framedrop && time > next_target) {
1077 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1078 if (is->pictq_size > 1 || time > next_target + 0.5) {
1079 /* update queue size and signal for next picture */
1080 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1081 is->pictq_rindex = 0;
1082
1083 SDL_LockMutex(is->pictq_mutex);
1084 is->pictq_size--;
1085 SDL_CondSignal(is->pictq_cond);
1086 SDL_UnlockMutex(is->pictq_mutex);
1087 goto retry;
1088 }
1089 }
1090
1091 if (is->subtitle_st) {
1092 if (is->subtitle_stream_changed) {
1093 SDL_LockMutex(is->subpq_mutex);
1094
1095 while (is->subpq_size) {
1096 free_subpicture(&is->subpq[is->subpq_rindex]);
1097
1098 /* update queue size and signal for next picture */
1099 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1100 is->subpq_rindex = 0;
1101
1102 is->subpq_size--;
1103 }
1104 is->subtitle_stream_changed = 0;
1105
1106 SDL_CondSignal(is->subpq_cond);
1107 SDL_UnlockMutex(is->subpq_mutex);
1108 } else {
1109 if (is->subpq_size > 0) {
1110 sp = &is->subpq[is->subpq_rindex];
1111
1112 if (is->subpq_size > 1)
1113 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1114 else
1115 sp2 = NULL;
1116
1117 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1118 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1119 {
1120 free_subpicture(sp);
1121
1122 /* update queue size and signal for next picture */
1123 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1124 is->subpq_rindex = 0;
1125
1126 SDL_LockMutex(is->subpq_mutex);
1127 is->subpq_size--;
1128 SDL_CondSignal(is->subpq_cond);
1129 SDL_UnlockMutex(is->subpq_mutex);
1130 }
1131 }
1132 }
1133 }
1134
1135 /* display picture */
1136 if (!display_disable)
1137 video_display(is);
1138
1139 /* update queue size and signal for next picture */
1140 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1141 is->pictq_rindex = 0;
1142
1143 SDL_LockMutex(is->pictq_mutex);
1144 is->pictq_size--;
1145 SDL_CondSignal(is->pictq_cond);
1146 SDL_UnlockMutex(is->pictq_mutex);
1147 }
1148 } else if (is->audio_st) {
1149 /* draw the next audio frame */
1150
1151 /* if only audio stream, then display the audio bars (better
1152 than nothing, just to test the implementation */
1153
1154 /* display picture */
1155 if (!display_disable)
1156 video_display(is);
1157 }
1158 if (show_status) {
1159 static int64_t last_time;
1160 int64_t cur_time;
1161 int aqsize, vqsize, sqsize;
1162 double av_diff;
1163
1164 cur_time = av_gettime_relative();
1165 if (!last_time || (cur_time - last_time) >= 30000) {
1166 aqsize = 0;
1167 vqsize = 0;
1168 sqsize = 0;
1169 if (is->audio_st)
1170 aqsize = is->audioq.size;
1171 if (is->video_st)
1172 vqsize = is->videoq.size;
1173 if (is->subtitle_st)
1174 sqsize = is->subtitleq.size;
1175 av_diff = 0;
1176 if (is->audio_st && is->video_st)
1177 av_diff = get_audio_clock(is) - get_video_clock(is);
1178 printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1179 get_master_clock(is), av_diff, FFMAX(is->skip_frames - 1, 0), aqsize / 1024,
1180 vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1181 fflush(stdout);
1182 last_time = cur_time;
1183 }
1184 }
1185 }
1186
1187 static void player_close(PlayerState *is)
1188 {
1189 VideoPicture *vp;
1190 int i;
1191 /* XXX: use a special url_shutdown call to abort parse cleanly */
1192 is->abort_request = 1;
1193 SDL_WaitThread(is->parse_tid, NULL);
1194 SDL_WaitThread(is->refresh_tid, NULL);
1195
1196 /* free all pictures */
1197 for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1198 vp = &is->pictq[i];
1199 if (vp->bmp) {
1200 SDL_FreeYUVOverlay(vp->bmp);
1201 vp->bmp = NULL;
1202 }
1203 }
1204 SDL_DestroyMutex(is->pictq_mutex);
1205 SDL_DestroyCond(is->pictq_cond);
1206 SDL_DestroyMutex(is->subpq_mutex);
1207 SDL_DestroyCond(is->subpq_cond);
1208 }
1209
1210 static void do_exit(void)
1211 {
1212 if (player) {
1213 player_close(player);
1214 player = NULL;
1215 }
1216 uninit_opts();
1217 avformat_network_deinit();
1218 if (show_status)
1219 printf("\n");
1220 SDL_Quit();
1221 av_log(NULL, AV_LOG_QUIET, "");
1222 exit(0);
1223 }
1224
1225 /* allocate a picture (needs to do that in main thread to avoid
1226 potential locking problems */
1227 static void alloc_picture(void *opaque)
1228 {
1229 PlayerState *is = opaque;
1230 VideoPicture *vp;
1231
1232 vp = &is->pictq[is->pictq_windex];
1233
1234 if (vp->bmp)
1235 SDL_FreeYUVOverlay(vp->bmp);
1236
1237 vp->width = is->out_video_filter->inputs[0]->w;
1238 vp->height = is->out_video_filter->inputs[0]->h;
1239 vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1240
1241 vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1242 SDL_YV12_OVERLAY,
1243 screen);
1244 if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1245 /* SDL allocates a buffer smaller than requested if the video
1246 * overlay hardware is unable to support the requested size. */
1247 fprintf(stderr, "Error: the video system does not support an image\n"
1248 "size of %dx%d pixels. Try using -vf \"scale=w:h\"\n"
1249 "to reduce the image size.\n", vp->width, vp->height );
1250 do_exit();
1251 }
1252
1253 SDL_LockMutex(is->pictq_mutex);
1254 vp->allocated = 1;
1255 SDL_CondSignal(is->pictq_cond);
1256 SDL_UnlockMutex(is->pictq_mutex);
1257 }
1258
1259 /* The 'pts' parameter is the dts of the packet / pts of the frame and
1260 * guessed if not known. */
1261 static int queue_picture(PlayerState *is, AVFrame *src_frame, double pts, int64_t pos)
1262 {
1263 VideoPicture *vp;
1264
1265 /* wait until we have space to put a new picture */
1266 SDL_LockMutex(is->pictq_mutex);
1267
1268 if (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1269 is->skip_frames = FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0 - FRAME_SKIP_FACTOR));
1270
1271 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1272 !is->videoq.abort_request) {
1273 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1274 }
1275 SDL_UnlockMutex(is->pictq_mutex);
1276
1277 if (is->videoq.abort_request)
1278 return -1;
1279
1280 vp = &is->pictq[is->pictq_windex];
1281
1282 vp->sar = src_frame->sample_aspect_ratio;
1283
1284 /* alloc or resize hardware picture buffer */
1285 if (!vp->bmp || vp->reallocate ||
1286 vp->width != is->out_video_filter->inputs[0]->w ||
1287 vp->height != is->out_video_filter->inputs[0]->h) {
1288 SDL_Event event;
1289
1290 vp->allocated = 0;
1291 vp->reallocate = 0;
1292
1293 /* the allocation must be done in the main thread to avoid
1294 locking problems */
1295 event.type = FF_ALLOC_EVENT;
1296 event.user.data1 = is;
1297 SDL_PushEvent(&event);
1298
1299 /* wait until the picture is allocated */
1300 SDL_LockMutex(is->pictq_mutex);
1301 while (!vp->allocated && !is->videoq.abort_request) {
1302 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1303 }
1304 SDL_UnlockMutex(is->pictq_mutex);
1305
1306 if (is->videoq.abort_request)
1307 return -1;
1308 }
1309
1310 /* if the frame is not skipped, then display it */
1311 if (vp->bmp) {
1312 uint8_t *data[4];
1313 int linesize[4];
1314
1315 /* get a pointer on the bitmap */
1316 SDL_LockYUVOverlay (vp->bmp);
1317
1318 data[0] = vp->bmp->pixels[0];
1319 data[1] = vp->bmp->pixels[2];
1320 data[2] = vp->bmp->pixels[1];
1321
1322 linesize[0] = vp->bmp->pitches[0];
1323 linesize[1] = vp->bmp->pitches[2];
1324 linesize[2] = vp->bmp->pitches[1];
1325
1326 // FIXME use direct rendering
1327 av_image_copy(data, linesize, src_frame->data, src_frame->linesize,
1328 vp->pix_fmt, vp->width, vp->height);
1329
1330 /* update the bitmap content */
1331 SDL_UnlockYUVOverlay(vp->bmp);
1332
1333 vp->pts = pts;
1334 vp->pos = pos;
1335
1336 /* now we can update the picture count */
1337 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1338 is->pictq_windex = 0;
1339 SDL_LockMutex(is->pictq_mutex);
1340 vp->target_clock = compute_target_time(vp->pts, is);
1341
1342 is->pictq_size++;
1343 SDL_UnlockMutex(is->pictq_mutex);
1344 }
1345 return 0;
1346 }
1347
1348 /* Compute the exact PTS for the picture if it is omitted in the stream.
1349 * The 'pts1' parameter is the dts of the packet / pts of the frame. */
1350 static int output_picture2(PlayerState *is, AVFrame *src_frame, double pts1, int64_t pos)
1351 {
1352 double frame_delay, pts;
1353 int ret;
1354
1355 pts = pts1;
1356
1357 if (pts != 0) {
1358 /* update video clock with pts, if present */
1359 is->video_clock = pts;
1360 } else {
1361 pts = is->video_clock;
1362 }
1363 /* update video clock for next frame */
1364 frame_delay = av_q2d(is->video_dec->time_base);
1365 /* For MPEG-2, the frame can be repeated, so we update the
1366 clock accordingly */
1367 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1368 is->video_clock += frame_delay;
1369
1370 ret = queue_picture(is, src_frame, pts, pos);
1371 av_frame_unref(src_frame);
1372 return ret;
1373 }
1374
1375 static int get_video_frame(PlayerState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1376 {
1377 int got_picture, i;
1378
1379 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1380 return -1;
1381
1382 if (pkt->data == flush_pkt.data) {
1383 avcodec_flush_buffers(is->video_dec);
1384
1385 SDL_LockMutex(is->pictq_mutex);
1386 // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1387 for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1388 is->pictq[i].target_clock= 0;
1389 }
1390 while (is->pictq_size && !is->videoq.abort_request) {
1391 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1392 }
1393 is->video_current_pos = -1;
1394 SDL_UnlockMutex(is->pictq_mutex);
1395
1396 init_pts_correction(&is->pts_ctx);
1397 is->frame_last_pts = AV_NOPTS_VALUE;
1398 is->frame_last_delay = 0;
1399 is->frame_timer = (double)av_gettime_relative() / 1000000.0;
1400 is->skip_frames = 1;
1401 is->skip_frames_index = 0;
1402 return 0;
1403 }
1404
1405 avcodec_decode_video2(is->video_dec, frame, &got_picture, pkt);
1406
1407 if (got_picture) {
1408 if (decoder_reorder_pts == -1) {
1409 *pts = guess_correct_pts(&is->pts_ctx, frame->pts, frame->pkt_dts);
1410 } else if (decoder_reorder_pts) {
1411 *pts = frame->pts;
1412 } else {
1413 *pts = frame->pkt_dts;
1414 }
1415
1416 if (*pts == AV_NOPTS_VALUE) {
1417 *pts = 0;
1418 }
1419 if (is->video_st->sample_aspect_ratio.num) {
1420 frame->sample_aspect_ratio = is->video_st->sample_aspect_ratio;
1421 }
1422
1423 is->skip_frames_index += 1;
1424 if (is->skip_frames_index >= is->skip_frames) {
1425 is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1426 return 1;
1427 }
1428 av_frame_unref(frame);
1429 }
1430 return 0;
1431 }
1432
1433 static int configure_video_filters(AVFilterGraph *graph, PlayerState *is, const char *vfilters)
1434 {
1435 char sws_flags_str[128];
1436 char buffersrc_args[256];
1437 int ret;
1438 AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter;
1439 AVCodecContext *codec = is->video_dec;
1440
1441 snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1442 graph->scale_sws_opts = av_strdup(sws_flags_str);
1443
1444 snprintf(buffersrc_args, sizeof(buffersrc_args), "%d:%d:%d:%d:%d:%d:%d",
1445 codec->width, codec->height, codec->pix_fmt,
1446 is->video_st->time_base.num, is->video_st->time_base.den,
1447 codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
1448
1449
1450 if ((ret = avfilter_graph_create_filter(&filt_src,
1451 avfilter_get_by_name("buffer"),
1452 "src", buffersrc_args, NULL,
1453 graph)) < 0)
1454 return ret;
1455 if ((ret = avfilter_graph_create_filter(&filt_out,
1456 avfilter_get_by_name("buffersink"),
1457 "out", NULL, NULL, graph)) < 0)
1458 return ret;
1459
1460 last_filter = filt_out;
1461
1462 /* Note: this macro adds a filter before the lastly added filter, so the
1463 * processing order of the filters is in reverse */
1464 #define INSERT_FILT(name, arg) do { \
1465 AVFilterContext *filt_ctx; \
1466 \
1467 ret = avfilter_graph_create_filter(&filt_ctx, \
1468 avfilter_get_by_name(name), \
1469 "avplay_" name, arg, NULL, graph); \
1470 if (ret < 0) \
1471 return ret; \
1472 \
1473 ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1474 if (ret < 0) \
1475 return ret; \
1476 \
1477 last_filter = filt_ctx; \
1478 } while (0)
1479
1480 INSERT_FILT("format", "yuv420p");
1481
1482 if (autorotate) {
1483 uint8_t* displaymatrix = av_stream_get_side_data(is->video_st,
1484 AV_PKT_DATA_DISPLAYMATRIX, NULL);
1485 if (displaymatrix) {
1486 double rot = av_display_rotation_get((int32_t*) displaymatrix);
1487 if (rot < -135 || rot > 135) {
1488 INSERT_FILT("vflip", NULL);
1489 INSERT_FILT("hflip", NULL);
1490 } else if (rot < -45) {
1491 INSERT_FILT("transpose", "dir=clock");
1492 } else if (rot > 45) {
1493 INSERT_FILT("transpose", "dir=cclock");
1494 }
1495 }
1496 }
1497
1498 if (vfilters) {
1499 AVFilterInOut *outputs = avfilter_inout_alloc();
1500 AVFilterInOut *inputs = avfilter_inout_alloc();
1501
1502 outputs->name = av_strdup("in");
1503 outputs->filter_ctx = filt_src;
1504 outputs->pad_idx = 0;
1505 outputs->next = NULL;
1506
1507 inputs->name = av_strdup("out");
1508 inputs->filter_ctx = last_filter;
1509 inputs->pad_idx = 0;
1510 inputs->next = NULL;
1511
1512 if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1513 return ret;
1514 } else {
1515 if ((ret = avfilter_link(filt_src, 0, last_filter, 0)) < 0)
1516 return ret;
1517 }
1518
1519 if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1520 return ret;
1521
1522 is->in_video_filter = filt_src;
1523 is->out_video_filter = filt_out;
1524
1525 return ret;
1526 }
1527
1528 static int video_thread(void *arg)
1529 {
1530 AVPacket pkt = { 0 };
1531 PlayerState *is = arg;
1532 AVFrame *frame = av_frame_alloc();
1533 int64_t pts_int;
1534 double pts;
1535 int ret;
1536
1537 AVFilterGraph *graph = avfilter_graph_alloc();
1538 AVFilterContext *filt_out = NULL, *filt_in = NULL;
1539 int last_w = is->video_dec->width;
1540 int last_h = is->video_dec->height;
1541 if (!graph) {
1542 av_frame_free(&frame);
1543 return AVERROR(ENOMEM);
1544 }
1545
1546 if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1547 goto the_end;
1548 filt_in = is->in_video_filter;
1549 filt_out = is->out_video_filter;
1550
1551 if (!frame) {
1552 avfilter_graph_free(&graph);
1553 return AVERROR(ENOMEM);
1554 }
1555
1556 for (;;) {
1557 AVRational tb;
1558 while (is->paused && !is->videoq.abort_request)
1559 SDL_Delay(10);
1560
1561 av_packet_unref(&pkt);
1562
1563 ret = get_video_frame(is, frame, &pts_int, &pkt);
1564 if (ret < 0)
1565 goto the_end;
1566
1567 if (!ret)
1568 continue;
1569
1570 if ( last_w != is->video_dec->width
1571 || last_h != is->video_dec->height) {
1572 av_log(NULL, AV_LOG_TRACE, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1573 is->video_dec->width, is->video_dec->height);
1574 avfilter_graph_free(&graph);
1575 graph = avfilter_graph_alloc();
1576 if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1577 goto the_end;
1578 filt_in = is->in_video_filter;
1579 filt_out = is->out_video_filter;
1580 last_w = is->video_dec->width;
1581 last_h = is->video_dec->height;
1582 }
1583
1584 frame->pts = pts_int;
1585 ret = av_buffersrc_add_frame(filt_in, frame);
1586 if (ret < 0)
1587 goto the_end;
1588
1589 while (ret >= 0) {
1590 ret = av_buffersink_get_frame(filt_out, frame);
1591 if (ret < 0) {
1592 ret = 0;
1593 break;
1594 }
1595
1596 pts_int = frame->pts;
1597 tb = filt_out->inputs[0]->time_base;
1598 if (av_cmp_q(tb, is->video_st->time_base)) {
1599 av_unused int64_t pts1 = pts_int;
1600 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1601 av_log(NULL, AV_LOG_TRACE, "video_thread(): "
1602 "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1603 tb.num, tb.den, pts1,
1604 is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1605 }
1606 pts = pts_int * av_q2d(is->video_st->time_base);
1607 ret = output_picture2(is, frame, pts, 0);
1608 }
1609
1610 if (ret < 0)
1611 goto the_end;
1612
1613
1614 if (step)
1615 if (player)
1616 stream_pause(player);
1617 }
1618 the_end:
1619 av_freep(&vfilters);
1620 avfilter_graph_free(&graph);
1621 av_packet_unref(&pkt);
1622 av_frame_free(&frame);
1623 return 0;
1624 }
1625
1626 static int subtitle_thread(void *arg)
1627 {
1628 PlayerState *is = arg;
1629 SubPicture *sp;
1630 AVPacket pkt1, *pkt = &pkt1;
1631 int got_subtitle;
1632 double pts;
1633 int i, j;
1634 int r, g, b, y, u, v, a;
1635
1636 for (;;) {
1637 while (is->paused && !is->subtitleq.abort_request) {
1638 SDL_Delay(10);
1639 }
1640 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1641 break;
1642
1643 if (pkt->data == flush_pkt.data) {
1644 avcodec_flush_buffers(is->subtitle_dec);
1645 continue;
1646 }
1647 SDL_LockMutex(is->subpq_mutex);
1648 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1649 !is->subtitleq.abort_request) {
1650 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1651 }
1652 SDL_UnlockMutex(is->subpq_mutex);
1653
1654 if (is->subtitleq.abort_request)
1655 return 0;
1656
1657 sp = &is->subpq[is->subpq_windex];
1658
1659 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1660 this packet, if any */
1661 pts = 0;
1662 if (pkt->pts != AV_NOPTS_VALUE)
1663 pts = av_q2d(is->subtitle_dec->time_base) * pkt->pts;
1664
1665 avcodec_decode_subtitle2(is->subtitle_dec, &sp->sub,
1666 &got_subtitle, pkt);
1667
1668 if (got_subtitle && sp->sub.format == 0) {
1669 sp->pts = pts;
1670
1671 for (i = 0; i < sp->sub.num_rects; i++)
1672 {
1673 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1674 {
1675 RGBA_IN(r, g, b, a, (uint32_t *)sp->sub.rects[i]->data[1] + j);
1676 y = RGB_TO_Y_CCIR(r, g, b);
1677 u = RGB_TO_U_CCIR(r, g, b, 0);
1678 v = RGB_TO_V_CCIR(r, g, b, 0);
1679 YUVA_OUT((uint32_t *)sp->sub.rects[i]->data[1] + j, y, u, v, a);
1680 }
1681 }
1682
1683 /* now we can update the picture count */
1684 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1685 is->subpq_windex = 0;
1686 SDL_LockMutex(is->subpq_mutex);
1687 is->subpq_size++;
1688 SDL_UnlockMutex(is->subpq_mutex);
1689 }
1690 av_packet_unref(pkt);
1691 }
1692 return 0;
1693 }
1694
1695 /* copy samples for viewing in editor window */
1696 static void update_sample_display(PlayerState *is, short *samples, int samples_size)
1697 {
1698 int size, len;
1699
1700 size = samples_size / sizeof(short);
1701 while (size > 0) {
1702 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1703 if (len > size)
1704 len = size;
1705 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1706 samples += len;
1707 is->sample_array_index += len;
1708 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1709 is->sample_array_index = 0;
1710 size -= len;
1711 }
1712 }
1713
1714 /* return the new audio buffer size (samples can be added or deleted
1715 to get better sync if video or external master clock) */
1716 static int synchronize_audio(PlayerState *is, short *samples,
1717 int samples_size1, double pts)
1718 {
1719 int n, samples_size;
1720 double ref_clock;
1721
1722 n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1723 samples_size = samples_size1;
1724
1725 /* if not master, then we try to remove or add samples to correct the clock */
1726 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1727 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1728 double diff, avg_diff;
1729 int wanted_size, min_size, max_size, nb_samples;
1730
1731 ref_clock = get_master_clock(is);
1732 diff = get_audio_clock(is) - ref_clock;
1733
1734 if (diff < AV_NOSYNC_THRESHOLD) {
1735 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1736 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1737 /* not enough measures to have a correct estimate */
1738 is->audio_diff_avg_count++;
1739 } else {
1740 /* estimate the A-V difference */
1741 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1742
1743 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1744 wanted_size = samples_size + ((int)(diff * is->sdl_sample_rate) * n);
1745 nb_samples = samples_size / n;
1746
1747 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1748 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1749 if (wanted_size < min_size)
1750 wanted_size = min_size;
1751 else if (wanted_size > max_size)
1752 wanted_size = max_size;
1753
1754 /* add or remove samples to correction the synchro */
1755 if (wanted_size < samples_size) {
1756 /* remove samples */
1757 samples_size = wanted_size;
1758 } else if (wanted_size > samples_size) {
1759 uint8_t *samples_end, *q;
1760 int nb;
1761
1762 /* add samples */
1763 nb = (samples_size - wanted_size);
1764 samples_end = (uint8_t *)samples + samples_size - n;
1765 q = samples_end + n;
1766 while (nb > 0) {
1767 memcpy(q, samples_end, n);
1768 q += n;
1769 nb -= n;
1770 }
1771 samples_size = wanted_size;
1772 }
1773 }
1774 av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1775 diff, avg_diff, samples_size - samples_size1,
1776 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1777 }
1778 } else {
1779 /* too big difference : may be initial PTS errors, so
1780 reset A-V filter */
1781 is->audio_diff_avg_count = 0;
1782 is->audio_diff_cum = 0;
1783 }
1784 }
1785
1786 return samples_size;
1787 }
1788
1789 /* decode one audio frame and returns its uncompressed size */
1790 static int audio_decode_frame(PlayerState *is, double *pts_ptr)
1791 {
1792 AVPacket *pkt_temp = &is->audio_pkt_temp;
1793 AVPacket *pkt = &is->audio_pkt;
1794 AVCodecContext *dec = is->audio_dec;
1795 int n, len1, data_size, got_frame;
1796 double pts;
1797 int new_packet = 0;
1798 int flush_complete = 0;
1799
1800 for (;;) {
1801 /* NOTE: the audio packet can contain several frames */
1802 while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1803 int resample_changed, audio_resample;
1804
1805 if (!is->frame) {
1806 if (!(is->frame = av_frame_alloc()))
1807 return AVERROR(ENOMEM);
1808 }
1809
1810 if (flush_complete)
1811 break;
1812 new_packet = 0;
1813 len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1814 if (len1 < 0) {
1815 /* if error, we skip the frame */
1816 pkt_temp->size = 0;
1817 break;
1818 }
1819
1820 pkt_temp->data += len1;
1821 pkt_temp->size -= len1;
1822
1823 if (!got_frame) {
1824 /* stop sending empty packets if the decoder is finished */
1825 if (!pkt_temp->data && (dec->codec->capabilities & AV_CODEC_CAP_DELAY))
1826 flush_complete = 1;
1827 continue;
1828 }
1829 data_size = av_samples_get_buffer_size(NULL, dec->channels,
1830 is->frame->nb_samples,
1831 is->frame->format, 1);
1832
1833 audio_resample = is->frame->format != is->sdl_sample_fmt ||
1834 is->frame->channel_layout != is->sdl_channel_layout ||
1835 is->frame->sample_rate != is->sdl_sample_rate;
1836
1837 resample_changed = is->frame->format != is->resample_sample_fmt ||
1838 is->frame->channel_layout != is->resample_channel_layout ||
1839 is->frame->sample_rate != is->resample_sample_rate;
1840
1841 if ((!is->avr && audio_resample) || resample_changed) {
1842 int ret;
1843 if (is->avr)
1844 avresample_close(is->avr);
1845 else if (audio_resample) {
1846 is->avr = avresample_alloc_context();
1847 if (!is->avr) {
1848 fprintf(stderr, "error allocating AVAudioResampleContext\n");
1849 break;
1850 }
1851 }
1852 if (audio_resample) {
1853 av_opt_set_int(is->avr, "in_channel_layout", is->frame->channel_layout, 0);
1854 av_opt_set_int(is->avr, "in_sample_fmt", is->frame->format, 0);
1855 av_opt_set_int(is->avr, "in_sample_rate", is->frame->sample_rate, 0);
1856 av_opt_set_int(is->avr, "out_channel_layout", is->sdl_channel_layout, 0);
1857 av_opt_set_int(is->avr, "out_sample_fmt", is->sdl_sample_fmt, 0);
1858 av_opt_set_int(is->avr, "out_sample_rate", is->sdl_sample_rate, 0);
1859
1860 if ((ret = avresample_open(is->avr)) < 0) {
1861 fprintf(stderr, "error initializing libavresample\n");
1862 break;
1863 }
1864 }
1865 is->resample_sample_fmt = is->frame->format;
1866 is->resample_channel_layout = is->frame->channel_layout;
1867 is->resample_sample_rate = is->frame->sample_rate;
1868 }
1869
1870 if (audio_resample) {
1871 void *tmp_out;
1872 int out_samples, out_size, out_linesize;
1873 int osize = av_get_bytes_per_sample(is->sdl_sample_fmt);
1874 int nb_samples = is->frame->nb_samples;
1875
1876 out_size = av_samples_get_buffer_size(&out_linesize,
1877 is->sdl_channels,
1878 nb_samples,
1879 is->sdl_sample_fmt, 0);
1880 tmp_out = av_realloc(is->audio_buf1, out_size);
1881 if (!tmp_out)
1882 return AVERROR(ENOMEM);
1883 is->audio_buf1 = tmp_out;
1884
1885 out_samples = avresample_convert(is->avr,
1886 &is->audio_buf1,
1887 out_linesize, nb_samples,
1888 is->frame->data,
1889 is->frame->linesize[0],
1890 is->frame->nb_samples);
1891 if (out_samples < 0) {
1892 fprintf(stderr, "avresample_convert() failed\n");
1893 break;
1894 }
1895 is->audio_buf = is->audio_buf1;
1896 data_size = out_samples * osize * is->sdl_channels;
1897 } else {
1898 is->audio_buf = is->frame->data[0];
1899 }
1900
1901 /* if no pts, then compute it */
1902 pts = is->audio_clock;
1903 *pts_ptr = pts;
1904 n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1905 is->audio_clock += (double)data_size /
1906 (double)(n * is->sdl_sample_rate);
1907 #ifdef DEBUG
1908 {
1909 static double last_clock;
1910 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1911 is->audio_clock - last_clock,
1912 is->audio_clock, pts);
1913 last_clock = is->audio_clock;
1914 }
1915 #endif
1916 return data_size;
1917 }
1918
1919 /* free the current packet */
1920 if (pkt->data)
1921 av_packet_unref(pkt);
1922 memset(pkt_temp, 0, sizeof(*pkt_temp));
1923
1924 if (is->paused || is->audioq.abort_request) {
1925 return -1;
1926 }
1927
1928 /* read next packet */
1929 if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
1930 return -1;
1931
1932 if (pkt->data == flush_pkt.data) {
1933 avcodec_flush_buffers(dec);
1934 flush_complete = 0;
1935 }
1936
1937 *pkt_temp = *pkt;
1938
1939 /* if update the audio clock with the pts */
1940 if (pkt->pts != AV_NOPTS_VALUE) {
1941 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1942 }
1943 }
1944 }
1945
1946 /* prepare a new audio buffer */
1947 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1948 {
1949 PlayerState *is = opaque;
1950 int audio_size, len1;
1951 double pts;
1952
1953 audio_callback_time = av_gettime_relative();
1954
1955 while (len > 0) {
1956 if (is->audio_buf_index >= is->audio_buf_size) {
1957 audio_size = audio_decode_frame(is, &pts);
1958 if (audio_size < 0) {
1959 /* if error, just output silence */
1960 is->audio_buf = is->silence_buf;
1961 is->audio_buf_size = sizeof(is->silence_buf);
1962 } else {
1963 if (is->show_audio)
1964 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1965 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1966 pts);
1967 is->audio_buf_size = audio_size;
1968 }
1969 is->audio_buf_index = 0;
1970 }
1971 len1 = is->audio_buf_size - is->audio_buf_index;
1972 if (len1 > len)
1973 len1 = len;
1974 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1975 len -= len1;
1976 stream += len1;
1977 is->audio_buf_index += len1;
1978 }
1979 }
1980
1981 static AVCodec *find_codec_or_die(const char *name, enum AVMediaType type)
1982 {
1983 const AVCodecDescriptor *desc;
1984 AVCodec *codec = avcodec_find_decoder_by_name(name);
1985
1986 if (!codec && (desc = avcodec_descriptor_get_by_name(name))) {
1987 codec = avcodec_find_decoder(desc->id);
1988 if (codec)
1989 av_log(NULL, AV_LOG_VERBOSE, "Matched decoder '%s' for codec '%s'.\n",
1990 codec->name, desc->name);
1991 }
1992
1993 if (!codec) {
1994 av_log(NULL, AV_LOG_FATAL, "Unknown decoder '%s'\n", name);
1995 exit_program(1);
1996 }
1997
1998 if (codec->type != type) {
1999 av_log(NULL, AV_LOG_FATAL, "Invalid decoder type '%s'\n", name);
2000 exit_program(1);
2001 }
2002
2003 return codec;
2004 }
2005
2006 static AVCodec *choose_decoder(PlayerState *is, AVFormatContext *ic, AVStream *st)
2007 {
2008 char *codec_name = NULL;
2009 int i, ret;
2010
2011 for (i = 0; i < is->nb_codec_names; i++) {
2012 char *spec = is->codec_names[i].specifier;
2013 if ((ret = check_stream_specifier(ic, st, spec)) > 0)
2014 codec_name = is->codec_names[i].u.str;
2015 else if (ret < 0)
2016 exit_program(1);
2017 }
2018
2019 if (codec_name) {
2020 AVCodec *codec = find_codec_or_die(codec_name, st->codecpar->codec_type);
2021 st->codecpar->codec_id = codec->id;
2022 return codec;
2023 } else
2024 return avcodec_find_decoder(st->codecpar->codec_id);
2025 }
2026
2027 /* open a given stream. Return 0 if OK */
2028 static int stream_component_open(PlayerState *is, int stream_index)
2029 {
2030 AVFormatContext *ic = is->ic;
2031 AVCodecContext *avctx;
2032 AVCodec *codec;
2033 SDL_AudioSpec wanted_spec, spec;
2034 AVDictionary *opts;
2035 AVDictionaryEntry *t = NULL;
2036 int ret = 0;
2037
2038 if (stream_index < 0 || stream_index >= ic->nb_streams)
2039 return -1;
2040
2041 avctx = avcodec_alloc_context3(NULL);
2042 if (!avctx)
2043 return AVERROR(ENOMEM);
2044
2045 ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2046 if (ret < 0) {
2047 avcodec_free_context(&avctx);
2048 return ret;
2049 }
2050
2051 opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], NULL);
2052
2053 codec = choose_decoder(is, ic, ic->streams[stream_index]);
2054 avctx->workaround_bugs = workaround_bugs;
2055 avctx->idct_algo = idct;
2056 avctx->skip_frame = skip_frame;
2057 avctx->skip_idct = skip_idct;
2058 avctx->skip_loop_filter = skip_loop_filter;
2059 avctx->error_concealment = error_concealment;
2060
2061 if (fast)
2062 avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2063
2064 if (!av_dict_get(opts, "threads", NULL, 0))
2065 av_dict_set(&opts, "threads", "auto", 0);
2066 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO)
2067 av_dict_set(&opts, "refcounted_frames", "1", 0);
2068 if (!codec ||
2069 (ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2070 goto fail;
2071 }
2072 if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2073 av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2074 ret = AVERROR_OPTION_NOT_FOUND;
2075 goto fail;
2076 }
2077
2078 /* prepare audio output */
2079 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2080 is->sdl_sample_rate = avctx->sample_rate;
2081
2082 if (!avctx->channel_layout)
2083 avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
2084 if (!avctx->channel_layout) {
2085 fprintf(stderr, "unable to guess channel layout\n");
2086 ret = AVERROR_INVALIDDATA;
2087 goto fail;
2088 }
2089 if (avctx->channels == 1)
2090 is->sdl_channel_layout = AV_CH_LAYOUT_MONO;
2091 else
2092 is->sdl_channel_layout = AV_CH_LAYOUT_STEREO;
2093 is->sdl_channels = av_get_channel_layout_nb_channels(is->sdl_channel_layout);
2094
2095 wanted_spec.format = AUDIO_S16SYS;
2096 wanted_spec.freq = is->sdl_sample_rate;
2097 wanted_spec.channels = is->sdl_channels;
2098 wanted_spec.silence = 0;
2099 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2100 wanted_spec.callback = sdl_audio_callback;
2101 wanted_spec.userdata = is;
2102 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2103 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2104 ret = AVERROR_UNKNOWN;
2105 goto fail;
2106 }
2107 is->audio_hw_buf_size = spec.size;
2108 is->sdl_sample_fmt = AV_SAMPLE_FMT_S16;
2109 is->resample_sample_fmt = is->sdl_sample_fmt;
2110 is->resample_channel_layout = avctx->channel_layout;
2111 is->resample_sample_rate = avctx->sample_rate;
2112 }
2113
2114 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2115 switch (avctx->codec_type) {
2116 case AVMEDIA_TYPE_AUDIO:
2117 is->audio_stream = stream_index;
2118 is->audio_st = ic->streams[stream_index];
2119 is->audio_dec = avctx;
2120 is->audio_buf_size = 0;
2121 is->audio_buf_index = 0;
2122
2123 /* init averaging filter */
2124 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2125 is->audio_diff_avg_count = 0;
2126 /* since we do not have a precise enough audio FIFO fullness,
2127 we correct audio sync only if larger than this threshold */
2128 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2129
2130 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2131 packet_queue_init(&is->audioq);
2132 SDL_PauseAudio(0);
2133 break;
2134 case AVMEDIA_TYPE_VIDEO:
2135 is->video_stream = stream_index;
2136 is->video_st = ic->streams[stream_index];
2137 is->video_dec = avctx;
2138
2139 packet_queue_init(&is->videoq);
2140 is->video_tid = SDL_CreateThread(video_thread, is);
2141 break;
2142 case AVMEDIA_TYPE_SUBTITLE:
2143 is->subtitle_stream = stream_index;
2144 is->subtitle_st = ic->streams[stream_index];
2145 is->subtitle_dec = avctx;
2146 packet_queue_init(&is->subtitleq);
2147
2148 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2149 break;
2150 default:
2151 break;
2152 }
2153
2154 fail:
2155 av_dict_free(&opts);
2156
2157 return ret;
2158 }
2159
2160 static void stream_component_close(PlayerState *is, int stream_index)
2161 {
2162 AVFormatContext *ic = is->ic;
2163 AVCodecParameters *par;
2164
2165 if (stream_index < 0 || stream_index >= ic->nb_streams)
2166 return;
2167 par = ic->streams[stream_index]->codecpar;
2168
2169 switch (par->codec_type) {
2170 case AVMEDIA_TYPE_AUDIO:
2171 packet_queue_abort(&is->audioq);
2172
2173 SDL_CloseAudio();
2174
2175 packet_queue_end(&is->audioq);
2176 av_packet_unref(&is->audio_pkt);
2177 if (is->avr)
2178 avresample_free(&is->avr);
2179 av_freep(&is->audio_buf1);
2180 is->audio_buf = NULL;
2181 av_frame_free(&is->frame);
2182
2183 if (is->rdft) {
2184 av_rdft_end(is->rdft);
2185 av_freep(&is->rdft_data);
2186 is->rdft = NULL;
2187 is->rdft_bits = 0;
2188 }
2189 break;
2190 case AVMEDIA_TYPE_VIDEO:
2191 packet_queue_abort(&is->videoq);
2192
2193 /* note: we also signal this mutex to make sure we deblock the
2194 video thread in all cases */
2195 SDL_LockMutex(is->pictq_mutex);
2196 SDL_CondSignal(is->pictq_cond);
2197 SDL_UnlockMutex(is->pictq_mutex);
2198
2199 SDL_WaitThread(is->video_tid, NULL);
2200
2201 packet_queue_end(&is->videoq);
2202 break;
2203 case AVMEDIA_TYPE_SUBTITLE:
2204 packet_queue_abort(&is->subtitleq);
2205
2206 /* note: we also signal this mutex to make sure we deblock the
2207 video thread in all cases */
2208 SDL_LockMutex(is->subpq_mutex);
2209 is->subtitle_stream_changed = 1;
2210
2211 SDL_CondSignal(is->subpq_cond);
2212 SDL_UnlockMutex(is->subpq_mutex);
2213
2214 SDL_WaitThread(is->subtitle_tid, NULL);
2215
2216 packet_queue_end(&is->subtitleq);
2217 break;
2218 default:
2219 break;
2220 }
2221
2222 ic->streams[stream_index]->discard = AVDISCARD_ALL;
2223 switch (par->codec_type) {
2224 case AVMEDIA_TYPE_AUDIO:
2225 avcodec_free_context(&is->audio_dec);
2226 is->audio_st = NULL;
2227 is->audio_stream = -1;
2228 break;
2229 case AVMEDIA_TYPE_VIDEO:
2230 avcodec_free_context(&is->video_dec);
2231 is->video_st = NULL;
2232 is->video_stream = -1;
2233 break;
2234 case AVMEDIA_TYPE_SUBTITLE:
2235 avcodec_free_context(&is->subtitle_dec);
2236 is->subtitle_st = NULL;
2237 is->subtitle_stream = -1;
2238 break;
2239 default:
2240 break;
2241 }
2242 }
2243
2244 /* since we have only one decoding thread, we can use a global
2245 variable instead of a thread local variable */
2246 static PlayerState *global_video_state;
2247
2248 static int decode_interrupt_cb(void *ctx)
2249 {
2250 return global_video_state && global_video_state->abort_request;
2251 }
2252
2253 static void stream_close(PlayerState *is)
2254 {
2255 /* disable interrupting */
2256 global_video_state = NULL;
2257
2258 /* close each stream */
2259 if (is->audio_stream >= 0)
2260 stream_component_close(is, is->audio_stream);
2261 if (is->video_stream >= 0)
2262 stream_component_close(is, is->video_stream);
2263 if (is->subtitle_stream >= 0)
2264 stream_component_close(is, is->subtitle_stream);
2265 if (is->ic) {
2266 avformat_close_input(&is->ic);
2267 }
2268 }
2269
2270 static int stream_setup(PlayerState *is)
2271 {
2272 AVFormatContext *ic = NULL;
2273 int err, i, ret;
2274 int st_index[AVMEDIA_TYPE_NB];
2275 AVDictionaryEntry *t;
2276 AVDictionary **opts;
2277 int orig_nb_streams;
2278
2279 memset(st_index, -1, sizeof(st_index));
2280 is->video_stream = -1;
2281 is->audio_stream = -1;
2282 is->subtitle_stream = -1;
2283
2284 global_video_state = is;
2285
2286 ic = avformat_alloc_context();
2287 if (!ic) {
2288 av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2289 ret = AVERROR(ENOMEM);
2290 goto fail;
2291 }
2292 ic->interrupt_callback.callback = decode_interrupt_cb;
2293 err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2294 if (err < 0) {
2295 print_error(is->filename, err);
2296 ret = -1;
2297 goto fail;
2298 }
2299
2300 if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2301 av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2302 ret = AVERROR_OPTION_NOT_FOUND;
2303 goto fail;
2304 }
2305 is->ic = ic;
2306
2307 if (genpts)
2308 ic->flags |= AVFMT_FLAG_GENPTS;
2309
2310 opts = setup_find_stream_info_opts(ic, codec_opts);
2311 orig_nb_streams = ic->nb_streams;
2312
2313 for (i = 0; i < ic->nb_streams; i++)
2314 choose_decoder(is, ic, ic->streams[i]);
2315
2316 err = avformat_find_stream_info(ic, opts);
2317
2318 for (i = 0; i < orig_nb_streams; i++)
2319 av_dict_free(&opts[i]);
2320 av_freep(&opts);
2321
2322 if (err < 0) {
2323 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2324 ret = -1;
2325 goto fail;
2326 }
2327
2328 if (ic->pb)
2329 ic->pb->eof_reached = 0; // FIXME hack, avplay maybe should not use url_feof() to test for the end
2330
2331 if (seek_by_bytes < 0)
2332 seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2333
2334 /* if seeking requested, we execute it */
2335 if (start_time != AV_NOPTS_VALUE) {
2336 int64_t timestamp;
2337
2338 timestamp = start_time;
2339 /* add the stream start time */
2340 if (ic->start_time != AV_NOPTS_VALUE)
2341 timestamp += ic->start_time;
2342 ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2343 if (ret < 0) {
2344 fprintf(stderr, "%s: could not seek to position %0.3f\n",
2345 is->filename, (double)timestamp / AV_TIME_BASE);
2346 }
2347 }
2348
2349 for (i = 0; i < ic->nb_streams; i++)
2350 ic->streams[i]->discard = AVDISCARD_ALL;
2351 if (!video_disable)
2352 st_index[AVMEDIA_TYPE_VIDEO] =
2353 av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2354 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2355 if (!audio_disable)
2356 st_index[AVMEDIA_TYPE_AUDIO] =
2357 av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2358 wanted_stream[AVMEDIA_TYPE_AUDIO],
2359 st_index[AVMEDIA_TYPE_VIDEO],
2360 NULL, 0);
2361 if (!video_disable)
2362 st_index[AVMEDIA_TYPE_SUBTITLE] =
2363 av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2364 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2365 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2366 st_index[AVMEDIA_TYPE_AUDIO] :
2367 st_index[AVMEDIA_TYPE_VIDEO]),
2368 NULL, 0);
2369 if (show_status) {
2370 av_dump_format(ic, 0, is->filename, 0);
2371 }
2372
2373 /* open the streams */
2374 if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2375 stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2376 }
2377
2378 ret = -1;
2379 if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2380 ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2381 }
2382 if (ret < 0) {
2383 if (!display_disable)
2384 is->show_audio = 2;
2385 }
2386
2387 if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2388 stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2389 }
2390
2391 if (is->video_stream < 0 && is->audio_stream < 0) {
2392 fprintf(stderr, "%s: could not open codecs\n", is->filename);
2393 ret = -1;
2394 goto fail;
2395 }
2396
2397 return 0;
2398
2399 fail:
2400 return ret;
2401 }
2402
2403 /* this thread gets the stream from the disk or the network */
2404 static int decode_thread(void *arg)
2405 {
2406 PlayerState *is = arg;
2407 AVPacket pkt1, *pkt = &pkt1;
2408 AVFormatContext *ic = is->ic;
2409 int pkt_in_play_range = 0;
2410 int ret, eof = 0;
2411
2412 for (;;) {
2413 if (is->abort_request)
2414 break;
2415 if (is->paused != is->last_paused) {
2416 is->last_paused = is->paused;
2417 if (is->paused)
2418 is->read_pause_return = av_read_pause(ic);
2419 else
2420 av_read_play(ic);
2421 }
2422 #if CONFIG_RTSP_DEMUXER
2423 if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2424 /* wait 10 ms to avoid trying to get another packet */
2425 /* XXX: horrible */
2426 SDL_Delay(10);
2427 continue;
2428 }
2429 #endif
2430 if (is->seek_req) {
2431 int64_t seek_target = is->seek_pos;
2432 int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2433 int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2434 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2435 // of the seek_pos/seek_rel variables
2436
2437 ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2438 if (ret < 0) {
2439 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2440 } else {
2441 if (is->audio_stream >= 0) {
2442 packet_queue_flush(&is->audioq);
2443 packet_queue_put(&is->audioq, &flush_pkt);
2444 }
2445 if (is->subtitle_stream >= 0) {
2446 packet_queue_flush(&is->subtitleq);
2447 packet_queue_put(&is->subtitleq, &flush_pkt);
2448 }
2449 if (is->video_stream >= 0) {
2450 packet_queue_flush(&is->videoq);
2451 packet_queue_put(&is->videoq, &flush_pkt);
2452 }
2453 }
2454 is->seek_req = 0;
2455 eof = 0;
2456 }
2457
2458 /* if the queue are full, no need to read more */
2459 if (!infinite_buffer &&
2460 (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2461 || ( (is->audioq .size > MIN_AUDIOQ_SIZE || is->audio_stream < 0)
2462 && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0)
2463 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0)))) {
2464 /* wait 10 ms */
2465 SDL_Delay(10);
2466 continue;
2467 }
2468 if (eof) {
2469 if (is->video_stream >= 0) {
2470 av_init_packet(pkt);
2471 pkt->data = NULL;
2472 pkt->size = 0;
2473 pkt->stream_index = is->video_stream;
2474 packet_queue_put(&is->videoq, pkt);
2475 }
2476 if (is->audio_stream >= 0 &&
2477 (is->audio_dec->codec->capabilities & AV_CODEC_CAP_DELAY)) {
2478 av_init_packet(pkt);
2479 pkt->data = NULL;
2480 pkt->size = 0;
2481 pkt->stream_index = is->audio_stream;
2482 packet_queue_put(&is->audioq, pkt);
2483 }
2484 SDL_Delay(10);
2485 if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2486 if (loop != 1 && (!loop || --loop)) {
2487 stream_seek(player, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2488 } else if (!noautoexit) {
2489 ret = AVERROR_EOF;
2490 goto fail;
2491 }
2492 }
2493 continue;
2494 }
2495 ret = av_read_frame(ic, pkt);
2496 if (ret < 0) {
2497 if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2498 eof = 1;
2499 if (ic->pb && ic->pb->error)
2500 break;
2501 SDL_Delay(100); /* wait for user event */
2502 continue;
2503 }
2504 /* check if packet is in play range specified by user, then queue, otherwise discard */
2505 pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2506 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2507 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2508 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2509 <= ((double)duration / 1000000);
2510 if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2511 packet_queue_put(&is->audioq, pkt);
2512 } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2513 packet_queue_put(&is->videoq, pkt);
2514 } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2515 packet_queue_put(&is->subtitleq, pkt);
2516 } else {
2517 av_packet_unref(pkt);
2518 }
2519 }
2520 /* wait until the end */
2521 while (!is->abort_request) {
2522 SDL_Delay(100);
2523 }
2524
2525 ret = 0;
2526
2527 fail:
2528 stream_close(is);
2529
2530 if (ret != 0) {
2531 SDL_Event event;
2532
2533 event.type = FF_QUIT_EVENT;
2534 event.user.data1 = is;
2535 SDL_PushEvent(&event);
2536 }
2537 return 0;
2538 }
2539
2540 static int stream_open(PlayerState *is,
2541 const char *filename, AVInputFormat *iformat)
2542 {
2543 int ret;
2544
2545 av_strlcpy(is->filename, filename, sizeof(is->filename));
2546 is->iformat = iformat;
2547 is->ytop = 0;
2548 is->xleft = 0;
2549
2550 if ((ret = stream_setup(is)) < 0) {
2551 return ret;
2552 }
2553
2554 /* start video display */
2555 is->pictq_mutex = SDL_CreateMutex();
2556 is->pictq_cond = SDL_CreateCond();
2557
2558 is->subpq_mutex = SDL_CreateMutex();
2559 is->subpq_cond = SDL_CreateCond();
2560
2561 is->av_sync_type = av_sync_type;
2562 is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2563 if (!is->refresh_tid)
2564 return -1;
2565 is->parse_tid = SDL_CreateThread(decode_thread, is);
2566 if (!is->parse_tid)
2567 return -1;
2568 return 0;
2569 }
2570
2571 static void stream_cycle_channel(PlayerState *is, int codec_type)
2572 {
2573 AVFormatContext *ic = is->ic;
2574 int start_index, stream_index;
2575 AVStream *st;
2576
2577 if (codec_type == AVMEDIA_TYPE_VIDEO)
2578 start_index = is->video_stream;
2579 else if (codec_type == AVMEDIA_TYPE_AUDIO)
2580 start_index = is->audio_stream;
2581 else
2582 start_index = is->subtitle_stream;
2583 if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2584 return;
2585 stream_index = start_index;
2586 for (;;) {
2587 if (++stream_index >= is->ic->nb_streams)
2588 {
2589 if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2590 {
2591 stream_index = -1;
2592 goto the_end;
2593 } else
2594 stream_index = 0;
2595 }
2596 if (stream_index == start_index)
2597 return;
2598 st = ic->streams[stream_index];
2599 if (st->codecpar->codec_type == codec_type) {
2600 /* check that parameters are OK */
2601 switch (codec_type) {
2602 case AVMEDIA_TYPE_AUDIO:
2603 if (st->codecpar->sample_rate != 0 &&
2604 st->codecpar->channels != 0)
2605 goto the_end;
2606 break;
2607 case AVMEDIA_TYPE_VIDEO:
2608 case AVMEDIA_TYPE_SUBTITLE:
2609 goto the_end;
2610 default:
2611 break;
2612 }
2613 }
2614 }
2615 the_end:
2616 stream_component_close(is, start_index);
2617 stream_component_open(is, stream_index);
2618 }
2619
2620
2621 static void toggle_full_screen(void)
2622 {
2623 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2624 /* OS X needs to empty the picture_queue */
2625 int i;
2626 for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2627 player->pictq[i].reallocate = 1;
2628 #endif
2629 is_full_screen = !is_full_screen;
2630 video_open(player);
2631 }
2632
2633 static void toggle_pause(void)
2634 {
2635 if (player)
2636 stream_pause(player);
2637 step = 0;
2638 }
2639
2640 static void step_to_next_frame(void)
2641 {
2642 if (player) {
2643 /* if the stream is paused unpause it, then step */
2644 if (player->paused)
2645 stream_pause(player);
2646 }
2647 step = 1;
2648 }
2649
2650 static void toggle_audio_display(void)
2651 {
2652 if (player) {
2653 int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2654 player->show_audio = (player->show_audio + 1) % 3;
2655 fill_rectangle(screen,
2656 player->xleft, player->ytop, player->width, player->height,
2657 bgcolor);
2658 SDL_UpdateRect(screen, player->xleft, player->ytop, player->width, player->height);
2659 }
2660 }
2661
2662 static void seek_chapter(PlayerState *is, int incr)
2663 {
2664 int64_t pos = get_master_clock(is) * AV_TIME_BASE;
2665 int i;
2666
2667 if (!is->ic->nb_chapters)
2668 return;
2669
2670 /* find the current chapter */
2671 for (i = 0; i < is->ic->nb_chapters; i++) {
2672 AVChapter *ch = is->ic->chapters[i];
2673 if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
2674 i--;
2675 break;
2676 }
2677 }
2678
2679 i += incr;
2680 i = FFMAX(i, 0);
2681 if (i >= is->ic->nb_chapters)
2682 return;
2683
2684 av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
2685 stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
2686 AV_TIME_BASE_Q), 0, 0);
2687 }
2688
2689 /* handle an event sent by the GUI */
2690 static void event_loop(void)
2691 {
2692 SDL_Event event;
2693 double incr, pos, frac;
2694
2695 for (;;) {
2696 double x;
2697 SDL_WaitEvent(&event);
2698 switch (event.type) {
2699 case SDL_KEYDOWN:
2700 if (exit_on_keydown) {
2701 do_exit();
2702 break;
2703 }
2704 switch (event.key.keysym.sym) {
2705 case SDLK_ESCAPE:
2706 case SDLK_q:
2707 do_exit();
2708 break;
2709 case SDLK_f:
2710 toggle_full_screen();
2711 break;
2712 case SDLK_p:
2713 case SDLK_SPACE:
2714 toggle_pause();
2715 break;
2716 case SDLK_s: // S: Step to next frame
2717 step_to_next_frame();
2718 break;
2719 case SDLK_a:
2720 if (player)
2721 stream_cycle_channel(player, AVMEDIA_TYPE_AUDIO);
2722 break;
2723 case SDLK_v:
2724 if (player)
2725 stream_cycle_channel(player, AVMEDIA_TYPE_VIDEO);
2726 break;
2727 case SDLK_t:
2728 if (player)
2729 stream_cycle_channel(player, AVMEDIA_TYPE_SUBTITLE);
2730 break;
2731 case SDLK_w:
2732 toggle_audio_display();
2733 break;
2734 case SDLK_PAGEUP:
2735 seek_chapter(player, 1);
2736 break;
2737 case SDLK_PAGEDOWN:
2738 seek_chapter(player, -1);
2739 break;
2740 case SDLK_LEFT:
2741 incr = -10.0;
2742 goto do_seek;
2743 case SDLK_RIGHT:
2744 incr = 10.0;
2745 goto do_seek;
2746 case SDLK_UP:
2747 incr = 60.0;
2748 goto do_seek;
2749 case SDLK_DOWN:
2750 incr = -60.0;
2751 do_seek:
2752 if (player) {
2753 if (seek_by_bytes) {
2754 if (player->video_stream >= 0 && player->video_current_pos >= 0) {
2755 pos = player->video_current_pos;
2756 } else if (player->audio_stream >= 0 && player->audio_pkt.pos >= 0) {
2757 pos = player->audio_pkt.pos;
2758 } else
2759 pos = avio_tell(player->ic->pb);
2760 if (player->ic->bit_rate)
2761 incr *= player->ic->bit_rate / 8.0;
2762 else
2763 incr *= 180000.0;
2764 pos += incr;
2765 stream_seek(player, pos, incr, 1);
2766 } else {
2767 pos = get_master_clock(player);
2768 pos += incr;
2769 stream_seek(player, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2770 }
2771 }
2772 break;
2773 default:
2774 break;
2775 }
2776 break;
2777 case SDL_MOUSEBUTTONDOWN:
2778 if (exit_on_mousedown) {
2779 do_exit();
2780 break;
2781 }
2782 case SDL_MOUSEMOTION:
2783 if (event.type == SDL_MOUSEBUTTONDOWN) {
2784 x = event.button.x;
2785 } else {
2786 if (event.motion.state != SDL_PRESSED)
2787 break;
2788 x = event.motion.x;
2789 }
2790 if (player) {
2791 if (seek_by_bytes || player->ic->duration <= 0) {
2792 uint64_t size = avio_size(player->ic->pb);
2793 stream_seek(player, size*x/player->width, 0, 1);
2794 } else {
2795 int64_t ts;
2796 int ns, hh, mm, ss;
2797 int tns, thh, tmm, tss;
2798 tns = player->ic->duration / 1000000LL;
2799 thh = tns / 3600;
2800 tmm = (tns % 3600) / 60;
2801 tss = (tns % 60);
2802 frac = x / player->width;
2803 ns = frac * tns;
2804 hh = ns / 3600;
2805 mm = (ns % 3600) / 60;
2806 ss = (ns % 60);
2807 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2808 hh, mm, ss, thh, tmm, tss);
2809 ts = frac * player->ic->duration;
2810 if (player->ic->start_time != AV_NOPTS_VALUE)
2811 ts += player->ic->start_time;
2812 stream_seek(player, ts, 0, 0);
2813 }
2814 }
2815 break;
2816 case SDL_VIDEORESIZE:
2817 if (player) {
2818 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2819 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2820 screen_width = player->width = event.resize.w;
2821 screen_height = player->height = event.resize.h;
2822 }
2823 break;
2824 case SDL_QUIT:
2825 case FF_QUIT_EVENT:
2826 do_exit();
2827 break;
2828 case FF_ALLOC_EVENT:
2829 video_open(event.user.data1);
2830 alloc_picture(event.user.data1);
2831 break;
2832 case FF_REFRESH_EVENT:
2833 video_refresh_timer(event.user.data1);
2834 player->refresh = 0;
2835 break;
2836 default:
2837 break;
2838 }
2839 }
2840 }
2841
2842 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
2843 {
2844 av_log(NULL, AV_LOG_ERROR,
2845 "Option '%s' has been removed, use private format options instead\n", opt);
2846 return AVERROR(EINVAL);
2847 }
2848
2849 static int opt_width(void *optctx, const char *opt, const char *arg)
2850 {
2851 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2852 return 0;
2853 }
2854
2855 static int opt_height(void *optctx, const char *opt, const char *arg)
2856 {
2857 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2858 return 0;
2859 }
2860
2861 static int opt_format(void *optctx, const char *opt, const char *arg)
2862 {
2863 file_iformat = av_find_input_format(arg);
2864 if (!file_iformat) {
2865 fprintf(stderr, "Unknown input format: %s\n", arg);
2866 return AVERROR(EINVAL);
2867 }
2868 return 0;
2869 }
2870
2871 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
2872 {
2873 av_log(NULL, AV_LOG_ERROR,
2874 "Option '%s' has been removed, use private format options instead\n", opt);
2875 return AVERROR(EINVAL);
2876 }
2877
2878 static int opt_sync(void *optctx, const char *opt, const char *arg)
2879 {
2880 if (!strcmp(arg, "audio"))
2881 av_sync_type = AV_SYNC_AUDIO_MASTER;
2882 else if (!strcmp(arg, "video"))
2883 av_sync_type = AV_SYNC_VIDEO_MASTER;
2884 else if (!strcmp(arg, "ext"))
2885 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2886 else {
2887 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2888 exit(1);
2889 }
2890 return 0;
2891 }
2892
2893 static int opt_seek(void *optctx, const char *opt, const char *arg)
2894 {
2895 start_time = parse_time_or_die(opt, arg, 1);
2896 return 0;
2897 }
2898
2899 static int opt_duration(void *optctx, const char *opt, const char *arg)
2900 {
2901 duration = parse_time_or_die(opt, arg, 1);
2902 return 0;
2903 }
2904
2905 #define OFF(x) offsetof(PlayerState, x)
2906 static const OptionDef options[] = {
2907 #include "cmdutils_common_opts.h"
2908 { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
2909 { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
2910 { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
2911 { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
2912 { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
2913 { "vn", OPT_BOOL, { &video_disable }, "disable video" },
2914 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
2915 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
2916 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
2917 { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
2918 { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
2919 { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
2920 { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
2921 { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
2922 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
2923 { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
2924 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
2925 { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
2926 { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
2927 { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2928 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
2929 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
2930 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
2931 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo", "algo" },
2932 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options", "bit_mask" },
2933 { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
2934 { "noautoexit", OPT_BOOL | OPT_EXPERT, { &noautoexit }, "Do not exit at the end of playback", "" },
2935 { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
2936 { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
2937 { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
2938 { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
2939 { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
2940 { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
2941 { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" },
2942 { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
2943 { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catchall option", "" },
2944 { "i", 0, { NULL }, "avconv compatibility dummy option", ""},
2945 { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
2946 { "c", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_INPUT, { .off = OFF(codec_names) }, "codec name", "codec" },
2947 { "codec", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_INPUT, { .off = OFF(codec_names) }, "codec name", "codec" },
2948
2949 { NULL, },
2950 };
2951
2952 static void show_usage(void)
2953 {
2954 printf("Simple media player\n");
2955 printf("usage: %s [options] input_file\n", program_name);
2956 printf("\n");
2957 }
2958
2959 void show_help_default(const char *opt, const char *arg)
2960 {
2961 av_log_set_callback(log_callback_help);
2962 show_usage();
2963 show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
2964 show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
2965 printf("\n");
2966 show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2967 show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2968 printf("\nWhile playing:\n"
2969 "q, ESC quit\n"
2970 "f toggle full screen\n"
2971 "p, SPC pause\n"
2972 "a cycle audio channel\n"
2973 "v cycle video channel\n"
2974 "t cycle subtitle channel\n"
2975 "w show audio waves\n"
2976 "s activate frame-step mode\n"
2977 "left/right seek backward/forward 10 seconds\n"
2978 "down/up seek backward/forward 1 minute\n"
2979 "mouse click seek to percentage in file corresponding to fraction of width\n"
2980 );
2981 }
2982
2983 static void opt_input_file(void *optctx, const char *filename)
2984 {
2985 if (input_filename) {
2986 fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2987 filename, input_filename);
2988 exit(1);
2989 }
2990 if (!strcmp(filename, "-"))
2991 filename = "pipe:";
2992 input_filename = filename;
2993 }
2994
2995 /* Called from the main */
2996 int main(int argc, char **argv)
2997 {
2998 int flags;
2999
3000 av_log_set_flags(AV_LOG_SKIP_REPEATED);
3001 parse_loglevel(argc, argv, options);
3002
3003 /* register all codecs, demux and protocols */
3004 avcodec_register_all();
3005 #if CONFIG_AVDEVICE
3006 avdevice_register_all();
3007 #endif
3008 avfilter_register_all();
3009 av_register_all();
3010 avformat_network_init();
3011
3012 init_opts();
3013
3014 show_banner();
3015
3016 parse_options(player, argc, argv, options, opt_input_file);
3017
3018 if (!input_filename) {
3019 show_usage();
3020 fprintf(stderr, "An input file must be specified\n");
3021 fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3022 exit(1);
3023 }
3024
3025 if (display_disable) {
3026 video_disable = 1;
3027 }
3028 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3029 #if !defined(__MINGW32__) && !defined(__APPLE__)
3030 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3031 #endif
3032 if (SDL_Init (flags)) {
3033 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3034 exit(1);
3035 }
3036
3037 if (!display_disable) {
3038 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3039 fs_screen_width = vi->current_w;
3040 fs_screen_height = vi->current_h;
3041 }
3042
3043 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3044 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3045 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3046
3047 av_init_packet(&flush_pkt);
3048 flush_pkt.data = (uint8_t *)&flush_pkt;
3049
3050 if (stream_open(player, input_filename, file_iformat) < 0) {
3051 fprintf(stderr, "Could not setup the player\n");
3052 stream_close(player);
3053 exit(1);
3054 }
3055
3056 event_loop();
3057
3058 /* never returns */
3059
3060 return 0;
3061 }