AVRational
[libav.git] / ffplay.c
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19 #define HAVE_AV_CONFIG_H
20 #include "avformat.h"
21
22 #include "cmdutils.h"
23
24 #include <SDL.h>
25 #include <SDL_thread.h>
26
27 #ifdef CONFIG_WIN32
28 #undef main /* We don't want SDL to override our main() */
29 #endif
30
31 #if defined(__linux__)
32 #define HAVE_X11
33 #endif
34
35 #ifdef HAVE_X11
36 #include <X11/Xlib.h>
37 #endif
38
39 //#define DEBUG_SYNC
40
41 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
42 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
43
44 /* SDL audio buffer size, in samples. Should be small to have precise
45 A/V sync as SDL does not have hardware buffer fullness info. */
46 #define SDL_AUDIO_BUFFER_SIZE 1024
47
48 /* no AV sync correction is done if below the AV sync threshold */
49 #define AV_SYNC_THRESHOLD 0.08
50 /* no AV correction is done if too big error */
51 #define AV_NOSYNC_THRESHOLD 10.0
52
53 /* maximum audio speed change to get correct sync */
54 #define SAMPLE_CORRECTION_PERCENT_MAX 10
55
56 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
57 #define AUDIO_DIFF_AVG_NB 20
58
59 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
60 #define SAMPLE_ARRAY_SIZE (2*65536)
61
62 typedef struct PacketQueue {
63 AVPacketList *first_pkt, *last_pkt;
64 int nb_packets;
65 int size;
66 int abort_request;
67 SDL_mutex *mutex;
68 SDL_cond *cond;
69 } PacketQueue;
70
71 #define VIDEO_PICTURE_QUEUE_SIZE 1
72
73 typedef struct VideoPicture {
74 double pts; /* presentation time stamp for this picture */
75 SDL_Overlay *bmp;
76 int width, height; /* source height & width */
77 int allocated;
78 } VideoPicture;
79
80 enum {
81 AV_SYNC_AUDIO_MASTER, /* default choice */
82 AV_SYNC_VIDEO_MASTER,
83 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
84 };
85
86 typedef struct VideoState {
87 SDL_Thread *parse_tid;
88 SDL_Thread *video_tid;
89 AVInputFormat *iformat;
90 int no_background;
91 int abort_request;
92 int paused;
93 int last_paused;
94 AVFormatContext *ic;
95 int dtg_active_format;
96
97 int audio_stream;
98
99 int av_sync_type;
100 double external_clock; /* external clock base */
101 int64_t external_clock_time;
102
103 double audio_clock;
104 double audio_diff_cum; /* used for AV difference average computation */
105 double audio_diff_avg_coef;
106 double audio_diff_threshold;
107 int audio_diff_avg_count;
108 AVStream *audio_st;
109 PacketQueue audioq;
110 int audio_hw_buf_size;
111 /* samples output by the codec. we reserve more space for avsync
112 compensation */
113 uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
114 int audio_buf_size; /* in bytes */
115 int audio_buf_index; /* in bytes */
116 AVPacket audio_pkt;
117 uint8_t *audio_pkt_data;
118 int audio_pkt_size;
119 int64_t audio_pkt_ipts;
120
121 int show_audio; /* if true, display audio samples */
122 int16_t sample_array[SAMPLE_ARRAY_SIZE];
123 int sample_array_index;
124 int last_i_start;
125
126 double frame_timer;
127 double frame_last_pts;
128 double frame_last_delay;
129 double video_clock;
130 int video_stream;
131 AVStream *video_st;
132 PacketQueue videoq;
133 int64_t ipts;
134 int picture_start; /* true if picture starts */
135 double video_last_P_pts; /* pts of the last P picture (needed if B
136 frames are present) */
137 double video_current_pts; /* current displayed pts (different from
138 video_clock if frame fifos are used) */
139 int64_t video_current_pts_time; /* time at which we updated
140 video_current_pts - used to
141 have running video pts */
142 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
143 int pictq_size, pictq_rindex, pictq_windex;
144 SDL_mutex *pictq_mutex;
145 SDL_cond *pictq_cond;
146
147 // QETimer *video_timer;
148 char filename[1024];
149 int width, height, xleft, ytop;
150 } VideoState;
151
152 void show_help(void);
153 static int audio_write_get_buf_size(VideoState *is);
154
155 /* options specified by the user */
156 static AVInputFormat *file_iformat;
157 static AVImageFormat *image_format;
158 static const char *input_filename;
159 static int fs_screen_width;
160 static int fs_screen_height;
161 static int screen_width = 640;
162 static int screen_height = 480;
163 static int audio_disable;
164 static int video_disable;
165 static int display_disable;
166 static int show_status;
167 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
168
169 /* current context */
170 static int is_full_screen;
171 static VideoState *cur_stream;
172 static int64_t audio_callback_time;
173
174 #define FF_ALLOC_EVENT (SDL_USEREVENT)
175 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
176 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
177
178 SDL_Surface *screen;
179
180 /* packet queue handling */
181 static void packet_queue_init(PacketQueue *q)
182 {
183 memset(q, 0, sizeof(PacketQueue));
184 q->mutex = SDL_CreateMutex();
185 q->cond = SDL_CreateCond();
186 }
187
188 static void packet_queue_end(PacketQueue *q)
189 {
190 AVPacketList *pkt, *pkt1;
191
192 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
193 pkt1 = pkt->next;
194 av_free_packet(&pkt->pkt);
195 }
196 SDL_DestroyMutex(q->mutex);
197 SDL_DestroyCond(q->cond);
198 }
199
200 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
201 {
202 AVPacketList *pkt1;
203
204 pkt1 = av_malloc(sizeof(AVPacketList));
205 if (!pkt1)
206 return -1;
207 pkt1->pkt = *pkt;
208 pkt1->next = NULL;
209
210 SDL_LockMutex(q->mutex);
211
212 if (!q->last_pkt)
213
214 q->first_pkt = pkt1;
215 else
216 q->last_pkt->next = pkt1;
217 q->last_pkt = pkt1;
218 q->nb_packets++;
219 q->size += pkt1->pkt.size;
220 /* XXX: should duplicate packet data in DV case */
221 SDL_CondSignal(q->cond);
222
223 SDL_UnlockMutex(q->mutex);
224 return 0;
225 }
226
227 static void packet_queue_abort(PacketQueue *q)
228 {
229 SDL_LockMutex(q->mutex);
230
231 q->abort_request = 1;
232
233 SDL_CondSignal(q->cond);
234
235 SDL_UnlockMutex(q->mutex);
236 }
237
238 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
239 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
240 {
241 AVPacketList *pkt1;
242 int ret;
243
244 SDL_LockMutex(q->mutex);
245
246 for(;;) {
247 if (q->abort_request) {
248 ret = -1;
249 break;
250 }
251
252 pkt1 = q->first_pkt;
253 if (pkt1) {
254 q->first_pkt = pkt1->next;
255 if (!q->first_pkt)
256 q->last_pkt = NULL;
257 q->nb_packets--;
258 q->size -= pkt1->pkt.size;
259 *pkt = pkt1->pkt;
260 av_free(pkt1);
261 ret = 1;
262 break;
263 } else if (!block) {
264 ret = 0;
265 break;
266 } else {
267 SDL_CondWait(q->cond, q->mutex);
268 }
269 }
270 SDL_UnlockMutex(q->mutex);
271 return ret;
272 }
273
274 static inline void fill_rectangle(SDL_Surface *screen,
275 int x, int y, int w, int h, int color)
276 {
277 SDL_Rect rect;
278 rect.x = x;
279 rect.y = y;
280 rect.w = w;
281 rect.h = h;
282 SDL_FillRect(screen, &rect, color);
283 }
284
285 #if 0
286 /* draw only the border of a rectangle */
287 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
288 {
289 int w1, w2, h1, h2;
290
291 /* fill the background */
292 w1 = x;
293 if (w1 < 0)
294 w1 = 0;
295 w2 = s->width - (x + w);
296 if (w2 < 0)
297 w2 = 0;
298 h1 = y;
299 if (h1 < 0)
300 h1 = 0;
301 h2 = s->height - (y + h);
302 if (h2 < 0)
303 h2 = 0;
304 fill_rectangle(screen,
305 s->xleft, s->ytop,
306 w1, s->height,
307 color);
308 fill_rectangle(screen,
309 s->xleft + s->width - w2, s->ytop,
310 w2, s->height,
311 color);
312 fill_rectangle(screen,
313 s->xleft + w1, s->ytop,
314 s->width - w1 - w2, h1,
315 color);
316 fill_rectangle(screen,
317 s->xleft + w1, s->ytop + s->height - h2,
318 s->width - w1 - w2, h2,
319 color);
320 }
321 #endif
322
323 static void video_image_display(VideoState *is)
324 {
325 VideoPicture *vp;
326 float aspect_ratio;
327 int width, height, x, y;
328 SDL_Rect rect;
329
330 vp = &is->pictq[is->pictq_rindex];
331 if (vp->bmp) {
332 /* XXX: use variable in the frame */
333 aspect_ratio = av_q2d(is->video_st->codec.sample_aspect_ratio)
334 * is->video_st->codec.width / is->video_st->codec.height;;
335 if (aspect_ratio <= 0.0)
336 aspect_ratio = (float)is->video_st->codec.width /
337 (float)is->video_st->codec.height;
338 /* if an active format is indicated, then it overrides the
339 mpeg format */
340 #if 0
341 if (is->video_st->codec.dtg_active_format != is->dtg_active_format) {
342 is->dtg_active_format = is->video_st->codec.dtg_active_format;
343 printf("dtg_active_format=%d\n", is->dtg_active_format);
344 }
345 #endif
346 #if 0
347 switch(is->video_st->codec.dtg_active_format) {
348 case FF_DTG_AFD_SAME:
349 default:
350 /* nothing to do */
351 break;
352 case FF_DTG_AFD_4_3:
353 aspect_ratio = 4.0 / 3.0;
354 break;
355 case FF_DTG_AFD_16_9:
356 aspect_ratio = 16.0 / 9.0;
357 break;
358 case FF_DTG_AFD_14_9:
359 aspect_ratio = 14.0 / 9.0;
360 break;
361 case FF_DTG_AFD_4_3_SP_14_9:
362 aspect_ratio = 14.0 / 9.0;
363 break;
364 case FF_DTG_AFD_16_9_SP_14_9:
365 aspect_ratio = 14.0 / 9.0;
366 break;
367 case FF_DTG_AFD_SP_4_3:
368 aspect_ratio = 4.0 / 3.0;
369 break;
370 }
371 #endif
372
373 /* XXX: we suppose the screen has a 1.0 pixel ratio */
374 height = is->height;
375 width = ((int)rint(height * aspect_ratio)) & -3;
376 if (width > is->width) {
377 width = is->width;
378 height = ((int)rint(width / aspect_ratio)) & -3;
379 }
380 x = (is->width - width) / 2;
381 y = (is->height - height) / 2;
382 if (!is->no_background) {
383 /* fill the background */
384 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
385 } else {
386 is->no_background = 0;
387 }
388 rect.x = is->xleft + x;
389 rect.y = is->xleft + y;
390 rect.w = width;
391 rect.h = height;
392 SDL_DisplayYUVOverlay(vp->bmp, &rect);
393 } else {
394 #if 0
395 fill_rectangle(screen,
396 is->xleft, is->ytop, is->width, is->height,
397 QERGB(0x00, 0x00, 0x00));
398 #endif
399 }
400 }
401
402 static inline int compute_mod(int a, int b)
403 {
404 a = a % b;
405 if (a >= 0)
406 return a;
407 else
408 return a + b;
409 }
410
411 static void video_audio_display(VideoState *s)
412 {
413 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
414 int ch, channels, h, h2, bgcolor, fgcolor;
415 int16_t time_diff;
416
417 /* compute display index : center on currently output samples */
418 channels = s->audio_st->codec.channels;
419 nb_display_channels = channels;
420 if (!s->paused) {
421 n = 2 * channels;
422 delay = audio_write_get_buf_size(s);
423 delay /= n;
424
425 /* to be more precise, we take into account the time spent since
426 the last buffer computation */
427 if (audio_callback_time) {
428 time_diff = av_gettime() - audio_callback_time;
429 delay += (time_diff * s->audio_st->codec.sample_rate) / 1000000;
430 }
431
432 delay -= s->width / 2;
433 if (delay < s->width)
434 delay = s->width;
435 i_start = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
436 s->last_i_start = i_start;
437 } else {
438 i_start = s->last_i_start;
439 }
440
441 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
442 fill_rectangle(screen,
443 s->xleft, s->ytop, s->width, s->height,
444 bgcolor);
445
446 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
447
448 /* total height for one channel */
449 h = s->height / nb_display_channels;
450 /* graph height / 2 */
451 h2 = (h * 9) / 20;
452 for(ch = 0;ch < nb_display_channels; ch++) {
453 i = i_start + ch;
454 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
455 for(x = 0; x < s->width; x++) {
456 y = (s->sample_array[i] * h2) >> 15;
457 if (y < 0) {
458 y = -y;
459 ys = y1 - y;
460 } else {
461 ys = y1;
462 }
463 fill_rectangle(screen,
464 s->xleft + x, ys, 1, y,
465 fgcolor);
466 i += channels;
467 if (i >= SAMPLE_ARRAY_SIZE)
468 i -= SAMPLE_ARRAY_SIZE;
469 }
470 }
471
472 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
473
474 for(ch = 1;ch < nb_display_channels; ch++) {
475 y = s->ytop + ch * h;
476 fill_rectangle(screen,
477 s->xleft, y, s->width, 1,
478 fgcolor);
479 }
480 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
481 }
482
483 /* display the current picture, if any */
484 static void video_display(VideoState *is)
485 {
486 if (is->audio_st && is->show_audio)
487 video_audio_display(is);
488 else if (is->video_st)
489 video_image_display(is);
490 }
491
492 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
493 {
494 SDL_Event event;
495 event.type = FF_REFRESH_EVENT;
496 event.user.data1 = opaque;
497 SDL_PushEvent(&event);
498 return 0; /* 0 means stop timer */
499 }
500
501 /* schedule a video refresh in 'delay' ms */
502 static void schedule_refresh(VideoState *is, int delay)
503 {
504 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
505 }
506
507 /* get the current audio clock value */
508 static double get_audio_clock(VideoState *is)
509 {
510 double pts;
511 int hw_buf_size, bytes_per_sec;
512 pts = is->audio_clock;
513 hw_buf_size = audio_write_get_buf_size(is);
514 bytes_per_sec = 0;
515 if (is->audio_st) {
516 bytes_per_sec = is->audio_st->codec.sample_rate *
517 2 * is->audio_st->codec.channels;
518 }
519 if (bytes_per_sec)
520 pts -= (double)hw_buf_size / bytes_per_sec;
521 return pts;
522 }
523
524 /* get the current video clock value */
525 static double get_video_clock(VideoState *is)
526 {
527 double delta;
528 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
529 return is->video_current_pts + delta;
530 }
531
532 /* get the current external clock value */
533 static double get_external_clock(VideoState *is)
534 {
535 int64_t ti;
536 ti = av_gettime();
537 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
538 }
539
540 /* get the current master clock value */
541 static double get_master_clock(VideoState *is)
542 {
543 double val;
544
545 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st)
546 val = get_video_clock(is);
547 else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st)
548 val = get_audio_clock(is);
549 else
550 val = get_external_clock(is);
551 return val;
552 }
553
554 /* called to display each frame */
555 static void video_refresh_timer(void *opaque)
556 {
557 VideoState *is = opaque;
558 VideoPicture *vp;
559 double actual_delay, delay, sync_threshold, ref_clock, diff;
560
561
562 if (is->video_st) {
563 if (is->pictq_size == 0) {
564 /* if no picture, need to wait */
565 schedule_refresh(is, 40);
566 } else {
567 /* dequeue the picture */
568 vp = &is->pictq[is->pictq_rindex];
569
570 /* update current video pts */
571 is->video_current_pts = vp->pts;
572 is->video_current_pts_time = av_gettime();
573
574 /* compute nominal delay */
575 delay = vp->pts - is->frame_last_pts;
576 if (delay <= 0 || delay >= 1.0) {
577 /* if incorrect delay, use previous one */
578 delay = is->frame_last_delay;
579 }
580 is->frame_last_delay = delay;
581 is->frame_last_pts = vp->pts;
582
583 /* update delay to follow master synchronisation source */
584 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
585 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
586 /* if video is slave, we try to correct big delays by
587 duplicating or deleting a frame */
588 ref_clock = get_master_clock(is);
589 diff = vp->pts - ref_clock;
590
591 /* skip or repeat frame. We take into account the
592 delay to compute the threshold. I still don't know
593 if it is the best guess */
594 sync_threshold = AV_SYNC_THRESHOLD;
595 if (delay > sync_threshold)
596 sync_threshold = delay;
597 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
598 if (diff <= -sync_threshold)
599 delay = 0;
600 else if (diff >= sync_threshold)
601 delay = 2 * delay;
602 }
603 }
604
605 is->frame_timer += delay;
606 /* compute the REAL delay (we need to do that to avoid
607 long term errors */
608 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
609 if (actual_delay < 0.010) {
610 /* XXX: should skip picture */
611 actual_delay = 0.010;
612 }
613 /* launch timer for next picture */
614 schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
615
616 #if defined(DEBUG_SYNC)
617 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
618 delay, actual_delay, vp->pts, -diff);
619 #endif
620
621 /* display picture */
622 video_display(is);
623
624 /* update queue size and signal for next picture */
625 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
626 is->pictq_rindex = 0;
627
628 SDL_LockMutex(is->pictq_mutex);
629 is->pictq_size--;
630 SDL_CondSignal(is->pictq_cond);
631 SDL_UnlockMutex(is->pictq_mutex);
632 }
633 } else if (is->audio_st) {
634 /* draw the next audio frame */
635
636 schedule_refresh(is, 40);
637
638 /* if only audio stream, then display the audio bars (better
639 than nothing, just to test the implementation */
640
641 /* display picture */
642 video_display(is);
643 } else {
644 schedule_refresh(is, 100);
645 }
646 if (show_status) {
647 static int64_t last_time;
648 int64_t cur_time;
649 int aqsize, vqsize;
650 double av_diff;
651
652 cur_time = av_gettime();
653 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
654 aqsize = 0;
655 vqsize = 0;
656 if (is->audio_st)
657 aqsize = is->audioq.size;
658 if (is->video_st)
659 vqsize = is->videoq.size;
660 av_diff = 0;
661 if (is->audio_st && is->video_st)
662 av_diff = get_audio_clock(is) - get_video_clock(is);
663 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB \r",
664 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024);
665 fflush(stdout);
666 last_time = cur_time;
667 }
668 }
669 }
670
671 /* allocate a picture (needs to do that in main thread to avoid
672 potential locking problems */
673 static void alloc_picture(void *opaque)
674 {
675 VideoState *is = opaque;
676 VideoPicture *vp;
677
678 vp = &is->pictq[is->pictq_windex];
679
680 if (vp->bmp)
681 SDL_FreeYUVOverlay(vp->bmp);
682
683 #if 0
684 /* XXX: use generic function */
685 /* XXX: disable overlay if no hardware acceleration or if RGB format */
686 switch(is->video_st->codec.pix_fmt) {
687 case PIX_FMT_YUV420P:
688 case PIX_FMT_YUV422P:
689 case PIX_FMT_YUV444P:
690 case PIX_FMT_YUV422:
691 case PIX_FMT_YUV410P:
692 case PIX_FMT_YUV411P:
693 is_yuv = 1;
694 break;
695 default:
696 is_yuv = 0;
697 break;
698 }
699 #endif
700 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec.width,
701 is->video_st->codec.height,
702 SDL_YV12_OVERLAY,
703 screen);
704 vp->width = is->video_st->codec.width;
705 vp->height = is->video_st->codec.height;
706
707 SDL_LockMutex(is->pictq_mutex);
708 vp->allocated = 1;
709 SDL_CondSignal(is->pictq_cond);
710 SDL_UnlockMutex(is->pictq_mutex);
711 }
712
713 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
714 {
715 VideoPicture *vp;
716 int dst_pix_fmt;
717 AVPicture pict;
718
719 /* wait until we have space to put a new picture */
720 SDL_LockMutex(is->pictq_mutex);
721 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
722 !is->videoq.abort_request) {
723 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
724 }
725 SDL_UnlockMutex(is->pictq_mutex);
726
727 if (is->videoq.abort_request)
728 return -1;
729
730 vp = &is->pictq[is->pictq_windex];
731
732 /* alloc or resize hardware picture buffer */
733 if (!vp->bmp ||
734 vp->width != is->video_st->codec.width ||
735 vp->height != is->video_st->codec.height) {
736 SDL_Event event;
737
738 vp->allocated = 0;
739
740 /* the allocation must be done in the main thread to avoid
741 locking problems */
742 event.type = FF_ALLOC_EVENT;
743 event.user.data1 = is;
744 SDL_PushEvent(&event);
745
746 /* wait until the picture is allocated */
747 SDL_LockMutex(is->pictq_mutex);
748 while (!vp->allocated && !is->videoq.abort_request) {
749 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
750 }
751 SDL_UnlockMutex(is->pictq_mutex);
752
753 if (is->videoq.abort_request)
754 return -1;
755 }
756
757 /* if the frame is not skipped, then display it */
758 if (vp->bmp) {
759 /* get a pointer on the bitmap */
760 SDL_LockYUVOverlay (vp->bmp);
761
762 dst_pix_fmt = PIX_FMT_YUV420P;
763 pict.data[0] = vp->bmp->pixels[0];
764 pict.data[1] = vp->bmp->pixels[2];
765 pict.data[2] = vp->bmp->pixels[1];
766
767 pict.linesize[0] = vp->bmp->pitches[0];
768 pict.linesize[1] = vp->bmp->pitches[2];
769 pict.linesize[2] = vp->bmp->pitches[1];
770
771 img_convert(&pict, dst_pix_fmt,
772 (AVPicture *)src_frame, is->video_st->codec.pix_fmt,
773 is->video_st->codec.width, is->video_st->codec.height);
774 /* update the bitmap content */
775 SDL_UnlockYUVOverlay(vp->bmp);
776
777 vp->pts = pts;
778
779 /* now we can update the picture count */
780 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
781 is->pictq_windex = 0;
782 SDL_LockMutex(is->pictq_mutex);
783 is->pictq_size++;
784 SDL_UnlockMutex(is->pictq_mutex);
785 }
786 return 0;
787 }
788
789 /* compute the exact PTS for the picture if it is omitted in the stream */
790 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
791 {
792 double frame_delay, pts;
793
794 pts = pts1;
795
796 /* if B frames are present, and if the current picture is a I
797 or P frame, we use the last pts */
798 if (is->video_st->codec.has_b_frames &&
799 src_frame->pict_type != FF_B_TYPE) {
800 /* use last pts */
801 pts = is->video_last_P_pts;
802 /* get the pts for the next I or P frame if present */
803 is->video_last_P_pts = pts1;
804 }
805
806 if (pts != 0) {
807 /* update video clock with pts, if present */
808 is->video_clock = pts;
809 } else {
810 frame_delay = (double)is->video_st->codec.frame_rate_base /
811 (double)is->video_st->codec.frame_rate;
812 is->video_clock += frame_delay;
813 /* for MPEG2, the frame can be repeated, so we update the
814 clock accordingly */
815 if (src_frame->repeat_pict) {
816 is->video_clock += src_frame->repeat_pict * (frame_delay * 0.5);
817 }
818 }
819
820 #if defined(DEBUG_SYNC) && 0
821 {
822 int ftype;
823 if (src_frame->pict_type == FF_B_TYPE)
824 ftype = 'B';
825 else if (src_frame->pict_type == FF_I_TYPE)
826 ftype = 'I';
827 else
828 ftype = 'P';
829 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
830 ftype, is->video_clock, pts1);
831 }
832 #endif
833 return queue_picture(is, src_frame, is->video_clock);
834 }
835
836 static int video_thread(void *arg)
837 {
838 VideoState *is = arg;
839 AVPacket pkt1, *pkt = &pkt1;
840 unsigned char *ptr;
841 int len, len1, got_picture;
842 AVFrame *frame= avcodec_alloc_frame();
843 int64_t ipts;
844 double pts;
845
846 for(;;) {
847 while (is->paused && !is->videoq.abort_request) {
848 SDL_Delay(10);
849 }
850 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
851 break;
852 /* NOTE: ipts is the PTS of the _first_ picture beginning in
853 this packet, if any */
854 ipts = pkt->pts;
855 ptr = pkt->data;
856 if (is->video_st->codec.codec_id == CODEC_ID_RAWVIDEO) {
857 avpicture_fill((AVPicture *)frame, ptr,
858 is->video_st->codec.pix_fmt,
859 is->video_st->codec.width,
860 is->video_st->codec.height);
861 pts = 0;
862 if (ipts != AV_NOPTS_VALUE)
863 pts = (double)ipts * is->ic->pts_num / is->ic->pts_den;
864 frame->pict_type = FF_I_TYPE;
865 if (output_picture2(is, frame, pts) < 0)
866 goto the_end;
867 } else {
868 len = pkt->size;
869 while (len > 0) {
870 if (is->picture_start) {
871 is->ipts = ipts;
872 is->picture_start = 0;
873 ipts = AV_NOPTS_VALUE;
874 }
875 len1 = avcodec_decode_video(&is->video_st->codec,
876 frame, &got_picture, ptr, len);
877 if (len1 < 0)
878 break;
879 if (got_picture) {
880 pts = 0;
881 if (is->ipts != AV_NOPTS_VALUE)
882 pts = (double)is->ipts * is->ic->pts_num / is->ic->pts_den;
883 if (output_picture2(is, frame, pts) < 0)
884 goto the_end;
885 is->picture_start = 1;
886 }
887 ptr += len1;
888 len -= len1;
889 }
890 }
891 av_free_packet(pkt);
892 }
893 the_end:
894 av_free(frame);
895 return 0;
896 }
897
898 /* copy samples for viewing in editor window */
899 static void update_sample_display(VideoState *is, short *samples, int samples_size)
900 {
901 int size, len, channels;
902
903 channels = is->audio_st->codec.channels;
904
905 size = samples_size / sizeof(short);
906 while (size > 0) {
907 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
908 if (len > size)
909 len = size;
910 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
911 samples += len;
912 is->sample_array_index += len;
913 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
914 is->sample_array_index = 0;
915 size -= len;
916 }
917 }
918
919 /* return the new audio buffer size (samples can be added or deleted
920 to get better sync if video or external master clock) */
921 static int synchronize_audio(VideoState *is, short *samples,
922 int samples_size1, double pts)
923 {
924 int n, samples_size;
925 double ref_clock;
926
927 n = 2 * is->audio_st->codec.channels;
928 samples_size = samples_size1;
929
930 /* if not master, then we try to remove or add samples to correct the clock */
931 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
932 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
933 double diff, avg_diff;
934 int wanted_size, min_size, max_size, nb_samples;
935
936 ref_clock = get_master_clock(is);
937 diff = get_audio_clock(is) - ref_clock;
938
939 if (diff < AV_NOSYNC_THRESHOLD) {
940 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
941 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
942 /* not enough measures to have a correct estimate */
943 is->audio_diff_avg_count++;
944 } else {
945 /* estimate the A-V difference */
946 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
947
948 if (fabs(avg_diff) >= is->audio_diff_threshold) {
949 wanted_size = samples_size + ((int)(diff * is->audio_st->codec.sample_rate) * n);
950 nb_samples = samples_size / n;
951
952 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
953 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
954 if (wanted_size < min_size)
955 wanted_size = min_size;
956 else if (wanted_size > max_size)
957 wanted_size = max_size;
958
959 /* add or remove samples to correction the synchro */
960 if (wanted_size < samples_size) {
961 /* remove samples */
962 samples_size = wanted_size;
963 } else if (wanted_size > samples_size) {
964 uint8_t *samples_end, *q;
965 int nb;
966
967 /* add samples */
968 nb = (samples_size - wanted_size);
969 samples_end = (uint8_t *)samples + samples_size - n;
970 q = samples_end + n;
971 while (nb > 0) {
972 memcpy(q, samples_end, n);
973 q += n;
974 nb -= n;
975 }
976 samples_size = wanted_size;
977 }
978 }
979 #if 0
980 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
981 diff, avg_diff, samples_size - samples_size1,
982 is->audio_clock, is->video_clock, is->audio_diff_threshold);
983 #endif
984 }
985 } else {
986 /* too big difference : may be initial PTS errors, so
987 reset A-V filter */
988 is->audio_diff_avg_count = 0;
989 is->audio_diff_cum = 0;
990 }
991 }
992
993 return samples_size;
994 }
995
996 /* decode one audio frame and returns its uncompressed size */
997 static int audio_decode_frame(VideoState *is, uint8_t *audio_buf, double *pts_ptr)
998 {
999 AVPacket *pkt = &is->audio_pkt;
1000 int len1, data_size;
1001 double pts;
1002
1003 for(;;) {
1004 if (is->paused || is->audioq.abort_request) {
1005 return -1;
1006 }
1007 while (is->audio_pkt_size > 0) {
1008 len1 = avcodec_decode_audio(&is->audio_st->codec,
1009 (int16_t *)audio_buf, &data_size,
1010 is->audio_pkt_data, is->audio_pkt_size);
1011 if (len1 < 0)
1012 break;
1013 is->audio_pkt_data += len1;
1014 is->audio_pkt_size -= len1;
1015 if (data_size > 0) {
1016 pts = 0;
1017 if (is->audio_pkt_ipts != AV_NOPTS_VALUE)
1018 pts = (double)is->audio_pkt_ipts * is->ic->pts_num / is->ic->pts_den;
1019 /* if no pts, then compute it */
1020 if (pts != 0) {
1021 is->audio_clock = pts;
1022 } else {
1023 int n;
1024 n = 2 * is->audio_st->codec.channels;
1025 is->audio_clock += (double)data_size / (double)(n * is->audio_st->codec.sample_rate);
1026 }
1027 #if defined(DEBUG_SYNC)
1028 {
1029 static double last_clock;
1030 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1031 is->audio_clock - last_clock,
1032 is->audio_clock, pts);
1033 last_clock = is->audio_clock;
1034 }
1035 #endif
1036 *pts_ptr = is->audio_clock;
1037 is->audio_pkt_ipts = AV_NOPTS_VALUE;
1038 /* we got samples : we can exit now */
1039 return data_size;
1040 }
1041 }
1042
1043 /* free previous packet if any */
1044 if (pkt->destruct)
1045 av_free_packet(pkt);
1046
1047 /* read next packet */
1048 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1049 return -1;
1050 is->audio_pkt_data = pkt->data;
1051 is->audio_pkt_size = pkt->size;
1052 is->audio_pkt_ipts = pkt->pts;
1053 }
1054 }
1055
1056 /* get the current audio output buffer size, in samples. With SDL, we
1057 cannot have a precise information */
1058 static int audio_write_get_buf_size(VideoState *is)
1059 {
1060 return is->audio_hw_buf_size - is->audio_buf_index;
1061 }
1062
1063
1064 /* prepare a new audio buffer */
1065 void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1066 {
1067 VideoState *is = opaque;
1068 int audio_size, len1;
1069 double pts;
1070
1071 audio_callback_time = av_gettime();
1072
1073 while (len > 0) {
1074 if (is->audio_buf_index >= is->audio_buf_size) {
1075 audio_size = audio_decode_frame(is, is->audio_buf, &pts);
1076 if (audio_size < 0) {
1077 /* if error, just output silence */
1078 is->audio_buf_size = 1024;
1079 memset(is->audio_buf, 0, is->audio_buf_size);
1080 } else {
1081 if (is->show_audio)
1082 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1083 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1084 pts);
1085 is->audio_buf_size = audio_size;
1086 }
1087 is->audio_buf_index = 0;
1088 }
1089 len1 = is->audio_buf_size - is->audio_buf_index;
1090 if (len1 > len)
1091 len1 = len;
1092 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1093 len -= len1;
1094 stream += len1;
1095 is->audio_buf_index += len1;
1096 }
1097 }
1098
1099
1100 /* open a given stream. Return 0 if OK */
1101 static int stream_component_open(VideoState *is, int stream_index)
1102 {
1103 AVFormatContext *ic = is->ic;
1104 AVCodecContext *enc;
1105 AVCodec *codec;
1106 SDL_AudioSpec wanted_spec, spec;
1107
1108 if (stream_index < 0 || stream_index >= ic->nb_streams)
1109 return -1;
1110 enc = &ic->streams[stream_index]->codec;
1111
1112 /* prepare audio output */
1113 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1114 wanted_spec.freq = enc->sample_rate;
1115 wanted_spec.format = AUDIO_S16SYS;
1116 /* hack for AC3. XXX: suppress that */
1117 if (enc->channels > 2)
1118 enc->channels = 2;
1119 wanted_spec.channels = enc->channels;
1120 wanted_spec.silence = 0;
1121 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1122 wanted_spec.callback = sdl_audio_callback;
1123 wanted_spec.userdata = is;
1124 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1125 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1126 return -1;
1127 }
1128 is->audio_hw_buf_size = spec.size;
1129 }
1130
1131 codec = avcodec_find_decoder(enc->codec_id);
1132 if (!codec ||
1133 avcodec_open(enc, codec) < 0)
1134 return -1;
1135 switch(enc->codec_type) {
1136 case CODEC_TYPE_AUDIO:
1137 is->audio_stream = stream_index;
1138 is->audio_st = ic->streams[stream_index];
1139 is->audio_buf_size = 0;
1140 is->audio_buf_index = 0;
1141 is->audio_pkt_size = 0;
1142
1143 /* init averaging filter */
1144 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1145 is->audio_diff_avg_count = 0;
1146 /* since we do not have a precise anough audio fifo fullness,
1147 we correct audio sync only if larger than this threshold */
1148 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1149
1150 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1151 packet_queue_init(&is->audioq);
1152 SDL_PauseAudio(0);
1153 break;
1154 case CODEC_TYPE_VIDEO:
1155 is->video_stream = stream_index;
1156 is->video_st = ic->streams[stream_index];
1157
1158 is->frame_last_delay = 40e-3;
1159 is->frame_timer = (double)av_gettime() / 1000000.0;
1160 is->picture_start = 1;
1161 is->video_current_pts_time = av_gettime();
1162
1163 packet_queue_init(&is->videoq);
1164 is->video_tid = SDL_CreateThread(video_thread, is);
1165 break;
1166 default:
1167 break;
1168 }
1169 return 0;
1170 }
1171
1172 static void stream_component_close(VideoState *is, int stream_index)
1173 {
1174 AVFormatContext *ic = is->ic;
1175 AVCodecContext *enc;
1176
1177 enc = &ic->streams[stream_index]->codec;
1178
1179 switch(enc->codec_type) {
1180 case CODEC_TYPE_AUDIO:
1181 packet_queue_abort(&is->audioq);
1182
1183 SDL_CloseAudio();
1184
1185 packet_queue_end(&is->audioq);
1186 break;
1187 case CODEC_TYPE_VIDEO:
1188 packet_queue_abort(&is->videoq);
1189
1190 /* note: we also signal this mutex to make sure we deblock the
1191 video thread in all cases */
1192 SDL_LockMutex(is->pictq_mutex);
1193 SDL_CondSignal(is->pictq_cond);
1194 SDL_UnlockMutex(is->pictq_mutex);
1195
1196 SDL_WaitThread(is->video_tid, NULL);
1197
1198 packet_queue_end(&is->videoq);
1199 break;
1200 default:
1201 break;
1202 }
1203
1204 avcodec_close(enc);
1205 switch(enc->codec_type) {
1206 case CODEC_TYPE_AUDIO:
1207 is->audio_st = NULL;
1208 is->audio_stream = -1;
1209 break;
1210 case CODEC_TYPE_VIDEO:
1211 is->video_st = NULL;
1212 is->video_stream = -1;
1213 break;
1214 default:
1215 break;
1216 }
1217 }
1218
1219 void dump_stream_info(AVFormatContext *s)
1220 {
1221 if (s->track != 0)
1222 fprintf(stderr, "Track: %d\n", s->track);
1223 if (s->title[0] != '\0')
1224 fprintf(stderr, "Title: %s\n", s->title);
1225 if (s->author[0] != '\0')
1226 fprintf(stderr, "Author: %s\n", s->author);
1227 if (s->album[0] != '\0')
1228 fprintf(stderr, "Album: %s\n", s->album);
1229 if (s->year != 0)
1230 fprintf(stderr, "Year: %d\n", s->year);
1231 if (s->genre[0] != '\0')
1232 fprintf(stderr, "Genre: %s\n", s->genre);
1233 }
1234
1235 /* since we have only one decoding thread, we can use a global
1236 variable instead of a thread local variable */
1237 static VideoState *global_video_state;
1238
1239 static int decode_interrupt_cb(void)
1240 {
1241 return (global_video_state && global_video_state->abort_request);
1242 }
1243
1244 /* this thread gets the stream from the disk or the network */
1245 static int decode_thread(void *arg)
1246 {
1247 VideoState *is = arg;
1248 AVFormatContext *ic;
1249 int err, i, ret, video_index, audio_index;
1250 AVPacket pkt1, *pkt = &pkt1;
1251 AVFormatParameters params, *ap = &params;
1252
1253 video_index = -1;
1254 audio_index = -1;
1255 is->video_stream = -1;
1256 is->audio_stream = -1;
1257
1258 global_video_state = is;
1259 url_set_interrupt_cb(decode_interrupt_cb);
1260
1261 memset(ap, 0, sizeof(*ap));
1262 ap->image_format = image_format;
1263
1264 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1265 if (err < 0) {
1266 print_error(is->filename, err);
1267 ret = -1;
1268 goto fail;
1269 }
1270 is->ic = ic;
1271 err = av_find_stream_info(ic);
1272 if (err < 0) {
1273 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1274 ret = -1;
1275 goto fail;
1276 }
1277
1278 for(i = 0; i < ic->nb_streams; i++) {
1279 AVCodecContext *enc = &ic->streams[i]->codec;
1280 switch(enc->codec_type) {
1281 case CODEC_TYPE_AUDIO:
1282 if (audio_index < 0 && !audio_disable)
1283 audio_index = i;
1284 break;
1285 case CODEC_TYPE_VIDEO:
1286 if (video_index < 0 && !video_disable)
1287 video_index = i;
1288 break;
1289 default:
1290 break;
1291 }
1292 }
1293 if (show_status) {
1294 dump_format(ic, 0, is->filename, 0);
1295 dump_stream_info(ic);
1296 }
1297
1298 /* open the streams */
1299 if (audio_index >= 0) {
1300 stream_component_open(is, audio_index);
1301 }
1302
1303 if (video_index >= 0) {
1304 stream_component_open(is, video_index);
1305 } else {
1306 if (!display_disable)
1307 is->show_audio = 1;
1308 }
1309
1310 if (is->video_stream < 0 && is->audio_stream < 0) {
1311 fprintf(stderr, "%s: could not open codecs\n", is->filename);
1312 ret = -1;
1313 goto fail;
1314 }
1315
1316 for(;;) {
1317 if (is->abort_request)
1318 break;
1319 #ifdef CONFIG_NETWORK
1320 if (is->paused != is->last_paused) {
1321 is->last_paused = is->paused;
1322 if (ic->iformat == &rtsp_demux) {
1323 if (is->paused)
1324 rtsp_pause(ic);
1325 else
1326 rtsp_resume(ic);
1327 }
1328 }
1329 if (is->paused && ic->iformat == &rtsp_demux) {
1330 /* wait 10 ms to avoid trying to get another packet */
1331 /* XXX: horrible */
1332 SDL_Delay(10);
1333 continue;
1334 }
1335 #endif
1336
1337 /* if the queue are full, no need to read more */
1338 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
1339 is->videoq.size > MAX_VIDEOQ_SIZE) {
1340 /* wait 10 ms */
1341 SDL_Delay(10);
1342 continue;
1343 }
1344 ret = av_read_packet(ic, pkt);
1345 if (ret < 0) {
1346 break;
1347 }
1348 if (pkt->stream_index == is->audio_stream) {
1349 packet_queue_put(&is->audioq, pkt);
1350 } else if (pkt->stream_index == is->video_stream) {
1351 packet_queue_put(&is->videoq, pkt);
1352 } else {
1353 av_free_packet(pkt);
1354 }
1355 }
1356 /* wait until the end */
1357 while (!is->abort_request) {
1358 SDL_Delay(100);
1359 }
1360
1361 ret = 0;
1362 fail:
1363 /* disable interrupting */
1364 global_video_state = NULL;
1365
1366 /* close each stream */
1367 if (is->audio_stream >= 0)
1368 stream_component_close(is, is->audio_stream);
1369 if (is->video_stream >= 0)
1370 stream_component_close(is, is->video_stream);
1371 if (is->ic) {
1372 av_close_input_file(is->ic);
1373 is->ic = NULL; /* safety */
1374 }
1375 url_set_interrupt_cb(NULL);
1376
1377 if (ret != 0) {
1378 SDL_Event event;
1379
1380 event.type = FF_QUIT_EVENT;
1381 event.user.data1 = is;
1382 SDL_PushEvent(&event);
1383 }
1384 return 0;
1385 }
1386
1387 /* pause or resume the video */
1388 static void stream_pause(VideoState *is)
1389 {
1390 is->paused = !is->paused;
1391 }
1392
1393 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
1394 {
1395 VideoState *is;
1396
1397 is = av_mallocz(sizeof(VideoState));
1398 if (!is)
1399 return NULL;
1400 pstrcpy(is->filename, sizeof(is->filename), filename);
1401 is->iformat = iformat;
1402 if (screen) {
1403 is->width = screen->w;
1404 is->height = screen->h;
1405 }
1406 is->ytop = 0;
1407 is->xleft = 0;
1408
1409 /* start video display */
1410 is->pictq_mutex = SDL_CreateMutex();
1411 is->pictq_cond = SDL_CreateCond();
1412
1413 /* add the refresh timer to draw the picture */
1414 schedule_refresh(is, 40);
1415
1416 is->av_sync_type = av_sync_type;
1417 is->parse_tid = SDL_CreateThread(decode_thread, is);
1418 if (!is->parse_tid) {
1419 av_free(is);
1420 return NULL;
1421 }
1422 return is;
1423 }
1424
1425 static void stream_close(VideoState *is)
1426 {
1427 VideoPicture *vp;
1428 int i;
1429 /* XXX: use a special url_shutdown call to abort parse cleanly */
1430 is->abort_request = 1;
1431 SDL_WaitThread(is->parse_tid, NULL);
1432
1433 /* free all pictures */
1434 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1435 vp = &is->pictq[i];
1436 if (vp->bmp) {
1437 SDL_FreeYUVOverlay(vp->bmp);
1438 vp->bmp = NULL;
1439 }
1440 }
1441 SDL_DestroyMutex(is->pictq_mutex);
1442 SDL_DestroyCond(is->pictq_cond);
1443 }
1444
1445 void stream_cycle_channel(VideoState *is, int codec_type)
1446 {
1447 AVFormatContext *ic = is->ic;
1448 int start_index, stream_index;
1449 AVStream *st;
1450
1451 if (codec_type == CODEC_TYPE_VIDEO)
1452 start_index = is->video_stream;
1453 else
1454 start_index = is->audio_stream;
1455 if (start_index < 0)
1456 return;
1457 stream_index = start_index;
1458 for(;;) {
1459 if (++stream_index >= is->ic->nb_streams)
1460 stream_index = 0;
1461 if (stream_index == start_index)
1462 return;
1463 st = ic->streams[stream_index];
1464 if (st->codec.codec_type == codec_type) {
1465 /* check that parameters are OK */
1466 switch(codec_type) {
1467 case CODEC_TYPE_AUDIO:
1468 if (st->codec.sample_rate != 0 &&
1469 st->codec.channels != 0)
1470 goto the_end;
1471 break;
1472 case CODEC_TYPE_VIDEO:
1473 goto the_end;
1474 default:
1475 break;
1476 }
1477 }
1478 }
1479 the_end:
1480 stream_component_close(is, start_index);
1481 stream_component_open(is, stream_index);
1482 }
1483
1484
1485 void toggle_full_screen(void)
1486 {
1487 int w, h, flags;
1488 is_full_screen = !is_full_screen;
1489 if (!fs_screen_width) {
1490 /* use default SDL method */
1491 SDL_WM_ToggleFullScreen(screen);
1492 } else {
1493 /* use the recorded resolution */
1494 flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
1495 if (is_full_screen) {
1496 w = fs_screen_width;
1497 h = fs_screen_height;
1498 flags |= SDL_FULLSCREEN;
1499 } else {
1500 w = screen_width;
1501 h = screen_height;
1502 flags |= SDL_RESIZABLE;
1503 }
1504 screen = SDL_SetVideoMode(w, h, 0, flags);
1505 cur_stream->width = w;
1506 cur_stream->height = h;
1507 }
1508 }
1509
1510 void toggle_pause(void)
1511 {
1512 if (cur_stream)
1513 stream_pause(cur_stream);
1514 }
1515
1516 void do_exit(void)
1517 {
1518 if (cur_stream) {
1519 stream_close(cur_stream);
1520 cur_stream = NULL;
1521 }
1522 if (show_status)
1523 printf("\n");
1524 SDL_Quit();
1525 exit(0);
1526 }
1527
1528 void toggle_audio_display(void)
1529 {
1530 if (cur_stream) {
1531 cur_stream->show_audio = !cur_stream->show_audio;
1532 }
1533 }
1534
1535 /* handle an event sent by the GUI */
1536 void event_loop(void)
1537 {
1538 SDL_Event event;
1539
1540 for(;;) {
1541 SDL_WaitEvent(&event);
1542 switch(event.type) {
1543 case SDL_KEYDOWN:
1544 switch(event.key.keysym.sym) {
1545 case SDLK_ESCAPE:
1546 case SDLK_q:
1547 do_exit();
1548 break;
1549 case SDLK_f:
1550 toggle_full_screen();
1551 break;
1552 case SDLK_p:
1553 case SDLK_SPACE:
1554 toggle_pause();
1555 break;
1556 case SDLK_a:
1557 if (cur_stream)
1558 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
1559 break;
1560 case SDLK_v:
1561 if (cur_stream)
1562 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
1563 break;
1564 case SDLK_w:
1565 toggle_audio_display();
1566 break;
1567 default:
1568 break;
1569 }
1570 break;
1571 case SDL_VIDEORESIZE:
1572 if (cur_stream) {
1573 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
1574 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
1575 cur_stream->width = event.resize.w;
1576 cur_stream->height = event.resize.h;
1577 }
1578 break;
1579 case SDL_QUIT:
1580 case FF_QUIT_EVENT:
1581 do_exit();
1582 break;
1583 case FF_ALLOC_EVENT:
1584 alloc_picture(event.user.data1);
1585 break;
1586 case FF_REFRESH_EVENT:
1587 video_refresh_timer(event.user.data1);
1588 break;
1589 default:
1590 break;
1591 }
1592 }
1593 }
1594
1595 void opt_width(const char *arg)
1596 {
1597 screen_width = atoi(arg);
1598 }
1599
1600 void opt_height(const char *arg)
1601 {
1602 screen_height = atoi(arg);
1603 }
1604
1605 static void opt_format(const char *arg)
1606 {
1607 file_iformat = av_find_input_format(arg);
1608 if (!file_iformat) {
1609 fprintf(stderr, "Unknown input format: %s\n", arg);
1610 exit(1);
1611 }
1612 }
1613
1614 static void opt_image_format(const char *arg)
1615 {
1616 AVImageFormat *f;
1617
1618 for(f = first_image_format; f != NULL; f = f->next) {
1619 if (!strcmp(arg, f->name))
1620 break;
1621 }
1622 if (!f) {
1623 fprintf(stderr, "Unknown image format: '%s'\n", arg);
1624 exit(1);
1625 }
1626 image_format = f;
1627 }
1628
1629 #ifdef CONFIG_NETWORK
1630 void opt_rtp_tcp(void)
1631 {
1632 /* only tcp protocol */
1633 rtsp_default_protocols = (1 << RTSP_PROTOCOL_RTP_TCP);
1634 }
1635 #endif
1636
1637 void opt_sync(const char *arg)
1638 {
1639 if (!strcmp(arg, "audio"))
1640 av_sync_type = AV_SYNC_AUDIO_MASTER;
1641 else if (!strcmp(arg, "video"))
1642 av_sync_type = AV_SYNC_VIDEO_MASTER;
1643 else if (!strcmp(arg, "ext"))
1644 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
1645 else
1646 show_help();
1647 }
1648
1649 const OptionDef options[] = {
1650 { "h", 0, {(void*)show_help}, "show help" },
1651 { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
1652 { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
1653 #if 0
1654 /* disabled as SDL/X11 does not support it correctly on application launch */
1655 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
1656 #endif
1657 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
1658 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
1659 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
1660 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
1661 { "img", HAS_ARG, {(void*)opt_image_format}, "force image format", "img_fmt" },
1662 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
1663 #ifdef CONFIG_NETWORK
1664 { "rtp_tcp", OPT_EXPERT, {(void*)&opt_rtp_tcp}, "force RTP/TCP protocol usage", "" },
1665 #endif
1666 { "sync", HAS_ARG | OPT_EXPERT, {(void*)&opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
1667 { NULL, },
1668 };
1669
1670 void show_help(void)
1671 {
1672 printf("ffplay version " FFMPEG_VERSION ", Copyright (c) 2003 Fabrice Bellard\n"
1673 "usage: ffplay [options] input_file\n"
1674 "Simple media player\n");
1675 printf("\n");
1676 show_help_options(options, "Main options:\n",
1677 OPT_EXPERT, 0);
1678 show_help_options(options, "\nAdvanced options:\n",
1679 OPT_EXPERT, OPT_EXPERT);
1680 printf("\nWhile playing:\n"
1681 "q, ESC quit\n"
1682 "f toggle full screen\n"
1683 "p, SPC pause\n"
1684 "a cycle audio channel\n"
1685 "v cycle video channel\n"
1686 "w show audio waves\n"
1687 );
1688 exit(1);
1689 }
1690
1691 void parse_arg_file(const char *filename)
1692 {
1693 if (!strcmp(filename, "-"))
1694 filename = "pipe:";
1695 input_filename = filename;
1696 }
1697
1698 /* Called from the main */
1699 int main(int argc, char **argv)
1700 {
1701 int flags, w, h;
1702
1703 /* register all codecs, demux and protocols */
1704 av_register_all();
1705
1706 parse_options(argc, argv, options);
1707
1708 if (!input_filename)
1709 show_help();
1710
1711 if (display_disable) {
1712 video_disable = 1;
1713 }
1714 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
1715 #ifndef CONFIG_WIN32
1716 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on win32 */
1717 #endif
1718 if (SDL_Init (flags)) {
1719 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
1720 exit(1);
1721 }
1722
1723 if (!display_disable) {
1724 #ifdef HAVE_X11
1725 /* save the screen resolution... SDL should allow full screen
1726 by resizing the window */
1727 {
1728 Display *dpy;
1729 dpy = XOpenDisplay(NULL);
1730 if (dpy) {
1731 fs_screen_width = DisplayWidth(dpy, DefaultScreen(dpy));
1732 fs_screen_height = DisplayHeight(dpy, DefaultScreen(dpy));
1733 XCloseDisplay(dpy);
1734 }
1735 }
1736 #endif
1737 flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
1738 if (is_full_screen && fs_screen_width) {
1739 w = fs_screen_width;
1740 h = fs_screen_height;
1741 flags |= SDL_FULLSCREEN;
1742 } else {
1743 w = screen_width;
1744 h = screen_height;
1745 flags |= SDL_RESIZABLE;
1746 }
1747 screen = SDL_SetVideoMode(w, h, 0, flags);
1748 if (!screen) {
1749 fprintf(stderr, "SDL: could not set video mode - exiting\n");
1750 exit(1);
1751 }
1752 SDL_WM_SetCaption("FFplay", "FFplay");
1753 }
1754
1755 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
1756 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
1757 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
1758 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
1759
1760 cur_stream = stream_open(input_filename, file_iformat);
1761
1762 event_loop();
1763
1764 /* never returns */
1765
1766 return 0;
1767 }