47de62808590d4fd22f2f91e7cdd6d59c458459c
[libav.git] / ffplay.c
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19 #define HAVE_AV_CONFIG_H
20 #include "avformat.h"
21
22 #include "cmdutils.h"
23
24 #include <SDL.h>
25 #include <SDL_thread.h>
26
27 #ifdef CONFIG_WIN32
28 #undef main /* We don't want SDL to override our main() */
29 #endif
30
31 #if defined(__linux__)
32 #define HAVE_X11
33 #endif
34
35 #ifdef HAVE_X11
36 #include <X11/Xlib.h>
37 #endif
38
39 //#define DEBUG_SYNC
40
41 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
42 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
43
44 /* SDL audio buffer size, in samples. Should be small to have precise
45 A/V sync as SDL does not have hardware buffer fullness info. */
46 #define SDL_AUDIO_BUFFER_SIZE 1024
47
48 /* no AV sync correction is done if below the AV sync threshold */
49 #define AV_SYNC_THRESHOLD 0.08
50 /* no AV correction is done if too big error */
51 #define AV_NOSYNC_THRESHOLD 10.0
52
53 /* maximum audio speed change to get correct sync */
54 #define SAMPLE_CORRECTION_PERCENT_MAX 10
55
56 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
57 #define AUDIO_DIFF_AVG_NB 20
58
59 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
60 #define SAMPLE_ARRAY_SIZE (2*65536)
61
62 typedef struct PacketQueue {
63 AVPacketList *first_pkt, *last_pkt;
64 int nb_packets;
65 int size;
66 int abort_request;
67 SDL_mutex *mutex;
68 SDL_cond *cond;
69 } PacketQueue;
70
71 #define VIDEO_PICTURE_QUEUE_SIZE 1
72
73 typedef struct VideoPicture {
74 double pts; /* presentation time stamp for this picture */
75 SDL_Overlay *bmp;
76 int width, height; /* source height & width */
77 int allocated;
78 } VideoPicture;
79
80 enum {
81 AV_SYNC_AUDIO_MASTER, /* default choice */
82 AV_SYNC_VIDEO_MASTER,
83 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
84 };
85
86 typedef struct VideoState {
87 SDL_Thread *parse_tid;
88 SDL_Thread *video_tid;
89 AVInputFormat *iformat;
90 int no_background;
91 int abort_request;
92 int paused;
93 int last_paused;
94 AVFormatContext *ic;
95 int dtg_active_format;
96
97 int audio_stream;
98
99 int av_sync_type;
100 double external_clock; /* external clock base */
101 int64_t external_clock_time;
102
103 double audio_clock;
104 double audio_diff_cum; /* used for AV difference average computation */
105 double audio_diff_avg_coef;
106 double audio_diff_threshold;
107 int audio_diff_avg_count;
108 AVStream *audio_st;
109 PacketQueue audioq;
110 int audio_hw_buf_size;
111 /* samples output by the codec. we reserve more space for avsync
112 compensation */
113 uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
114 int audio_buf_size; /* in bytes */
115 int audio_buf_index; /* in bytes */
116 AVPacket audio_pkt;
117 uint8_t *audio_pkt_data;
118 int audio_pkt_size;
119 int64_t audio_pkt_ipts;
120
121 int show_audio; /* if true, display audio samples */
122 int16_t sample_array[SAMPLE_ARRAY_SIZE];
123 int sample_array_index;
124 int last_i_start;
125
126 double frame_timer;
127 double frame_last_pts;
128 double frame_last_delay;
129 double video_clock;
130 int video_stream;
131 AVStream *video_st;
132 PacketQueue videoq;
133 int64_t ipts;
134 int picture_start; /* true if picture starts */
135 double video_last_P_pts; /* pts of the last P picture (needed if B
136 frames are present) */
137 double video_current_pts; /* current displayed pts (different from
138 video_clock if frame fifos are used) */
139 int64_t video_current_pts_time; /* time at which we updated
140 video_current_pts - used to
141 have running video pts */
142 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
143 int pictq_size, pictq_rindex, pictq_windex;
144 SDL_mutex *pictq_mutex;
145 SDL_cond *pictq_cond;
146
147 // QETimer *video_timer;
148 char filename[1024];
149 int width, height, xleft, ytop;
150 } VideoState;
151
152 void show_help(void);
153 static int audio_write_get_buf_size(VideoState *is);
154
155 /* options specified by the user */
156 static AVInputFormat *file_iformat;
157 static AVImageFormat *image_format;
158 static const char *input_filename;
159 static int fs_screen_width;
160 static int fs_screen_height;
161 static int screen_width = 640;
162 static int screen_height = 480;
163 static int audio_disable;
164 static int video_disable;
165 static int display_disable;
166 static int show_status;
167 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
168
169 /* current context */
170 static int is_full_screen;
171 static VideoState *cur_stream;
172 static int64_t audio_callback_time;
173
174 #define FF_ALLOC_EVENT (SDL_USEREVENT)
175 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
176 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
177
178 SDL_Surface *screen;
179
180 /* packet queue handling */
181 static void packet_queue_init(PacketQueue *q)
182 {
183 memset(q, 0, sizeof(PacketQueue));
184 q->mutex = SDL_CreateMutex();
185 q->cond = SDL_CreateCond();
186 }
187
188 static void packet_queue_end(PacketQueue *q)
189 {
190 AVPacketList *pkt, *pkt1;
191
192 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
193 pkt1 = pkt->next;
194 av_free_packet(&pkt->pkt);
195 }
196 SDL_DestroyMutex(q->mutex);
197 SDL_DestroyCond(q->cond);
198 }
199
200 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
201 {
202 AVPacketList *pkt1;
203
204 pkt1 = av_malloc(sizeof(AVPacketList));
205 if (!pkt1)
206 return -1;
207 pkt1->pkt = *pkt;
208 pkt1->next = NULL;
209
210 SDL_LockMutex(q->mutex);
211
212 if (!q->last_pkt)
213
214 q->first_pkt = pkt1;
215 else
216 q->last_pkt->next = pkt1;
217 q->last_pkt = pkt1;
218 q->nb_packets++;
219 q->size += pkt1->pkt.size;
220 /* XXX: should duplicate packet data in DV case */
221 SDL_CondSignal(q->cond);
222
223 SDL_UnlockMutex(q->mutex);
224 return 0;
225 }
226
227 static void packet_queue_abort(PacketQueue *q)
228 {
229 SDL_LockMutex(q->mutex);
230
231 q->abort_request = 1;
232
233 SDL_CondSignal(q->cond);
234
235 SDL_UnlockMutex(q->mutex);
236 }
237
238 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
239 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
240 {
241 AVPacketList *pkt1;
242 int ret;
243
244 SDL_LockMutex(q->mutex);
245
246 for(;;) {
247 if (q->abort_request) {
248 ret = -1;
249 break;
250 }
251
252 pkt1 = q->first_pkt;
253 if (pkt1) {
254 q->first_pkt = pkt1->next;
255 if (!q->first_pkt)
256 q->last_pkt = NULL;
257 q->nb_packets--;
258 q->size -= pkt1->pkt.size;
259 *pkt = pkt1->pkt;
260 av_free(pkt1);
261 ret = 1;
262 break;
263 } else if (!block) {
264 ret = 0;
265 break;
266 } else {
267 SDL_CondWait(q->cond, q->mutex);
268 }
269 }
270 SDL_UnlockMutex(q->mutex);
271 return ret;
272 }
273
274 static inline void fill_rectangle(SDL_Surface *screen,
275 int x, int y, int w, int h, int color)
276 {
277 SDL_Rect rect;
278 rect.x = x;
279 rect.y = y;
280 rect.w = w;
281 rect.h = h;
282 SDL_FillRect(screen, &rect, color);
283 }
284
285 #if 0
286 /* draw only the border of a rectangle */
287 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
288 {
289 int w1, w2, h1, h2;
290
291 /* fill the background */
292 w1 = x;
293 if (w1 < 0)
294 w1 = 0;
295 w2 = s->width - (x + w);
296 if (w2 < 0)
297 w2 = 0;
298 h1 = y;
299 if (h1 < 0)
300 h1 = 0;
301 h2 = s->height - (y + h);
302 if (h2 < 0)
303 h2 = 0;
304 fill_rectangle(screen,
305 s->xleft, s->ytop,
306 w1, s->height,
307 color);
308 fill_rectangle(screen,
309 s->xleft + s->width - w2, s->ytop,
310 w2, s->height,
311 color);
312 fill_rectangle(screen,
313 s->xleft + w1, s->ytop,
314 s->width - w1 - w2, h1,
315 color);
316 fill_rectangle(screen,
317 s->xleft + w1, s->ytop + s->height - h2,
318 s->width - w1 - w2, h2,
319 color);
320 }
321 #endif
322
323 static void video_image_display(VideoState *is)
324 {
325 VideoPicture *vp;
326 float aspect_ratio;
327 int width, height, x, y;
328 SDL_Rect rect;
329
330 vp = &is->pictq[is->pictq_rindex];
331 if (vp->bmp) {
332 /* XXX: use variable in the frame */
333 aspect_ratio = is->video_st->codec.aspect_ratio;
334 if (aspect_ratio <= 0.0)
335 aspect_ratio = (float)is->video_st->codec.width /
336 (float)is->video_st->codec.height;
337 /* if an active format is indicated, then it overrides the
338 mpeg format */
339 #if 0
340 if (is->video_st->codec.dtg_active_format != is->dtg_active_format) {
341 is->dtg_active_format = is->video_st->codec.dtg_active_format;
342 printf("dtg_active_format=%d\n", is->dtg_active_format);
343 }
344 #endif
345 #if 0
346 switch(is->video_st->codec.dtg_active_format) {
347 case FF_DTG_AFD_SAME:
348 default:
349 /* nothing to do */
350 break;
351 case FF_DTG_AFD_4_3:
352 aspect_ratio = 4.0 / 3.0;
353 break;
354 case FF_DTG_AFD_16_9:
355 aspect_ratio = 16.0 / 9.0;
356 break;
357 case FF_DTG_AFD_14_9:
358 aspect_ratio = 14.0 / 9.0;
359 break;
360 case FF_DTG_AFD_4_3_SP_14_9:
361 aspect_ratio = 14.0 / 9.0;
362 break;
363 case FF_DTG_AFD_16_9_SP_14_9:
364 aspect_ratio = 14.0 / 9.0;
365 break;
366 case FF_DTG_AFD_SP_4_3:
367 aspect_ratio = 4.0 / 3.0;
368 break;
369 }
370 #endif
371
372 /* XXX: we suppose the screen has a 1.0 pixel ratio */
373 height = is->height;
374 width = ((int)rint(height * aspect_ratio)) & -3;
375 if (width > is->width) {
376 width = is->width;
377 height = ((int)rint(width / aspect_ratio)) & -3;
378 }
379 x = (is->width - width) / 2;
380 y = (is->height - height) / 2;
381 if (!is->no_background) {
382 /* fill the background */
383 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
384 } else {
385 is->no_background = 0;
386 }
387 rect.x = is->xleft + x;
388 rect.y = is->xleft + y;
389 rect.w = width;
390 rect.h = height;
391 SDL_DisplayYUVOverlay(vp->bmp, &rect);
392 } else {
393 #if 0
394 fill_rectangle(screen,
395 is->xleft, is->ytop, is->width, is->height,
396 QERGB(0x00, 0x00, 0x00));
397 #endif
398 }
399 }
400
401 static inline int compute_mod(int a, int b)
402 {
403 a = a % b;
404 if (a >= 0)
405 return a;
406 else
407 return a + b;
408 }
409
410 static void video_audio_display(VideoState *s)
411 {
412 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
413 int ch, channels, h, h2, bgcolor, fgcolor;
414 int16_t time_diff;
415
416 /* compute display index : center on currently output samples */
417 channels = s->audio_st->codec.channels;
418 nb_display_channels = channels;
419 if (!s->paused) {
420 n = 2 * channels;
421 delay = audio_write_get_buf_size(s);
422 delay /= n;
423
424 /* to be more precise, we take into account the time spent since
425 the last buffer computation */
426 if (audio_callback_time) {
427 time_diff = av_gettime() - audio_callback_time;
428 delay += (time_diff * s->audio_st->codec.sample_rate) / 1000000;
429 }
430
431 delay -= s->width / 2;
432 if (delay < s->width)
433 delay = s->width;
434 i_start = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
435 s->last_i_start = i_start;
436 } else {
437 i_start = s->last_i_start;
438 }
439
440 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
441 fill_rectangle(screen,
442 s->xleft, s->ytop, s->width, s->height,
443 bgcolor);
444
445 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
446
447 /* total height for one channel */
448 h = s->height / nb_display_channels;
449 /* graph height / 2 */
450 h2 = (h * 9) / 20;
451 for(ch = 0;ch < nb_display_channels; ch++) {
452 i = i_start + ch;
453 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
454 for(x = 0; x < s->width; x++) {
455 y = (s->sample_array[i] * h2) >> 15;
456 if (y < 0) {
457 y = -y;
458 ys = y1 - y;
459 } else {
460 ys = y1;
461 }
462 fill_rectangle(screen,
463 s->xleft + x, ys, 1, y,
464 fgcolor);
465 i += channels;
466 if (i >= SAMPLE_ARRAY_SIZE)
467 i -= SAMPLE_ARRAY_SIZE;
468 }
469 }
470
471 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
472
473 for(ch = 1;ch < nb_display_channels; ch++) {
474 y = s->ytop + ch * h;
475 fill_rectangle(screen,
476 s->xleft, y, s->width, 1,
477 fgcolor);
478 }
479 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
480 }
481
482 /* display the current picture, if any */
483 static void video_display(VideoState *is)
484 {
485 if (is->audio_st && is->show_audio)
486 video_audio_display(is);
487 else if (is->video_st)
488 video_image_display(is);
489 }
490
491 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
492 {
493 SDL_Event event;
494 event.type = FF_REFRESH_EVENT;
495 event.user.data1 = opaque;
496 SDL_PushEvent(&event);
497 return 0; /* 0 means stop timer */
498 }
499
500 /* schedule a video refresh in 'delay' ms */
501 static void schedule_refresh(VideoState *is, int delay)
502 {
503 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
504 }
505
506 /* get the current audio clock value */
507 static double get_audio_clock(VideoState *is)
508 {
509 double pts;
510 int hw_buf_size, bytes_per_sec;
511 pts = is->audio_clock;
512 hw_buf_size = audio_write_get_buf_size(is);
513 bytes_per_sec = 0;
514 if (is->audio_st) {
515 bytes_per_sec = is->audio_st->codec.sample_rate *
516 2 * is->audio_st->codec.channels;
517 }
518 if (bytes_per_sec)
519 pts -= (double)hw_buf_size / bytes_per_sec;
520 return pts;
521 }
522
523 /* get the current video clock value */
524 static double get_video_clock(VideoState *is)
525 {
526 double delta;
527 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
528 return is->video_current_pts + delta;
529 }
530
531 /* get the current external clock value */
532 static double get_external_clock(VideoState *is)
533 {
534 int64_t ti;
535 ti = av_gettime();
536 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
537 }
538
539 /* get the current master clock value */
540 static double get_master_clock(VideoState *is)
541 {
542 double val;
543
544 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st)
545 val = get_video_clock(is);
546 else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st)
547 val = get_audio_clock(is);
548 else
549 val = get_external_clock(is);
550 return val;
551 }
552
553 /* called to display each frame */
554 static void video_refresh_timer(void *opaque)
555 {
556 VideoState *is = opaque;
557 VideoPicture *vp;
558 double actual_delay, delay, sync_threshold, ref_clock, diff;
559
560
561 if (is->video_st) {
562 if (is->pictq_size == 0) {
563 /* if no picture, need to wait */
564 schedule_refresh(is, 40);
565 } else {
566 /* dequeue the picture */
567 vp = &is->pictq[is->pictq_rindex];
568
569 /* update current video pts */
570 is->video_current_pts = vp->pts;
571 is->video_current_pts_time = av_gettime();
572
573 /* compute nominal delay */
574 delay = vp->pts - is->frame_last_pts;
575 if (delay <= 0 || delay >= 1.0) {
576 /* if incorrect delay, use previous one */
577 delay = is->frame_last_delay;
578 }
579 is->frame_last_delay = delay;
580 is->frame_last_pts = vp->pts;
581
582 /* update delay to follow master synchronisation source */
583 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
584 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
585 /* if video is slave, we try to correct big delays by
586 duplicating or deleting a frame */
587 ref_clock = get_master_clock(is);
588 diff = vp->pts - ref_clock;
589
590 /* skip or repeat frame. We take into account the
591 delay to compute the threshold. I still don't know
592 if it is the best guess */
593 sync_threshold = AV_SYNC_THRESHOLD;
594 if (delay > sync_threshold)
595 sync_threshold = delay;
596 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
597 if (diff <= -sync_threshold)
598 delay = 0;
599 else if (diff >= sync_threshold)
600 delay = 2 * delay;
601 }
602 }
603
604 is->frame_timer += delay;
605 /* compute the REAL delay (we need to do that to avoid
606 long term errors */
607 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
608 if (actual_delay < 0.010) {
609 /* XXX: should skip picture */
610 actual_delay = 0.010;
611 }
612 /* launch timer for next picture */
613 schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
614
615 #if defined(DEBUG_SYNC)
616 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
617 delay, actual_delay, vp->pts, -diff);
618 #endif
619
620 /* display picture */
621 video_display(is);
622
623 /* update queue size and signal for next picture */
624 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
625 is->pictq_rindex = 0;
626
627 SDL_LockMutex(is->pictq_mutex);
628 is->pictq_size--;
629 SDL_CondSignal(is->pictq_cond);
630 SDL_UnlockMutex(is->pictq_mutex);
631 }
632 } else if (is->audio_st) {
633 /* draw the next audio frame */
634
635 schedule_refresh(is, 40);
636
637 /* if only audio stream, then display the audio bars (better
638 than nothing, just to test the implementation */
639
640 /* display picture */
641 video_display(is);
642 } else {
643 schedule_refresh(is, 100);
644 }
645 if (show_status) {
646 static int64_t last_time;
647 int64_t cur_time;
648 int aqsize, vqsize;
649 double av_diff;
650
651 cur_time = av_gettime();
652 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
653 aqsize = 0;
654 vqsize = 0;
655 if (is->audio_st)
656 aqsize = is->audioq.size;
657 if (is->video_st)
658 vqsize = is->videoq.size;
659 av_diff = 0;
660 if (is->audio_st && is->video_st)
661 av_diff = get_audio_clock(is) - get_video_clock(is);
662 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB \r",
663 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024);
664 fflush(stdout);
665 last_time = cur_time;
666 }
667 }
668 }
669
670 /* allocate a picture (needs to do that in main thread to avoid
671 potential locking problems */
672 static void alloc_picture(void *opaque)
673 {
674 VideoState *is = opaque;
675 VideoPicture *vp;
676
677 vp = &is->pictq[is->pictq_windex];
678
679 if (vp->bmp)
680 SDL_FreeYUVOverlay(vp->bmp);
681
682 #if 0
683 /* XXX: use generic function */
684 /* XXX: disable overlay if no hardware acceleration or if RGB format */
685 switch(is->video_st->codec.pix_fmt) {
686 case PIX_FMT_YUV420P:
687 case PIX_FMT_YUV422P:
688 case PIX_FMT_YUV444P:
689 case PIX_FMT_YUV422:
690 case PIX_FMT_YUV410P:
691 case PIX_FMT_YUV411P:
692 is_yuv = 1;
693 break;
694 default:
695 is_yuv = 0;
696 break;
697 }
698 #endif
699 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec.width,
700 is->video_st->codec.height,
701 SDL_YV12_OVERLAY,
702 screen);
703 vp->width = is->video_st->codec.width;
704 vp->height = is->video_st->codec.height;
705
706 SDL_LockMutex(is->pictq_mutex);
707 vp->allocated = 1;
708 SDL_CondSignal(is->pictq_cond);
709 SDL_UnlockMutex(is->pictq_mutex);
710 }
711
712 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
713 {
714 VideoPicture *vp;
715 int dst_pix_fmt;
716 AVPicture pict;
717
718 /* wait until we have space to put a new picture */
719 SDL_LockMutex(is->pictq_mutex);
720 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
721 !is->videoq.abort_request) {
722 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
723 }
724 SDL_UnlockMutex(is->pictq_mutex);
725
726 if (is->videoq.abort_request)
727 return -1;
728
729 vp = &is->pictq[is->pictq_windex];
730
731 /* alloc or resize hardware picture buffer */
732 if (!vp->bmp ||
733 vp->width != is->video_st->codec.width ||
734 vp->height != is->video_st->codec.height) {
735 SDL_Event event;
736
737 vp->allocated = 0;
738
739 /* the allocation must be done in the main thread to avoid
740 locking problems */
741 event.type = FF_ALLOC_EVENT;
742 event.user.data1 = is;
743 SDL_PushEvent(&event);
744
745 /* wait until the picture is allocated */
746 SDL_LockMutex(is->pictq_mutex);
747 while (!vp->allocated && !is->videoq.abort_request) {
748 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
749 }
750 SDL_UnlockMutex(is->pictq_mutex);
751
752 if (is->videoq.abort_request)
753 return -1;
754 }
755
756 /* if the frame is not skipped, then display it */
757 if (vp->bmp) {
758 /* get a pointer on the bitmap */
759 SDL_LockYUVOverlay (vp->bmp);
760
761 dst_pix_fmt = PIX_FMT_YUV420P;
762 pict.data[0] = vp->bmp->pixels[0];
763 pict.data[1] = vp->bmp->pixels[2];
764 pict.data[2] = vp->bmp->pixels[1];
765
766 pict.linesize[0] = vp->bmp->pitches[0];
767 pict.linesize[1] = vp->bmp->pitches[2];
768 pict.linesize[2] = vp->bmp->pitches[1];
769
770 img_convert(&pict, dst_pix_fmt,
771 (AVPicture *)src_frame, is->video_st->codec.pix_fmt,
772 is->video_st->codec.width, is->video_st->codec.height);
773 /* update the bitmap content */
774 SDL_UnlockYUVOverlay(vp->bmp);
775
776 vp->pts = pts;
777
778 /* now we can update the picture count */
779 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
780 is->pictq_windex = 0;
781 SDL_LockMutex(is->pictq_mutex);
782 is->pictq_size++;
783 SDL_UnlockMutex(is->pictq_mutex);
784 }
785 return 0;
786 }
787
788 /* compute the exact PTS for the picture if it is omitted in the stream */
789 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
790 {
791 double frame_delay, pts;
792
793 pts = pts1;
794
795 /* if B frames are present, and if the current picture is a I
796 or P frame, we use the last pts */
797 if (is->video_st->codec.has_b_frames &&
798 src_frame->pict_type != FF_B_TYPE) {
799 /* use last pts */
800 pts = is->video_last_P_pts;
801 /* get the pts for the next I or P frame if present */
802 is->video_last_P_pts = pts1;
803 }
804
805 if (pts != 0) {
806 /* update video clock with pts, if present */
807 is->video_clock = pts;
808 } else {
809 frame_delay = (double)is->video_st->codec.frame_rate_base /
810 (double)is->video_st->codec.frame_rate;
811 is->video_clock += frame_delay;
812 /* for MPEG2, the frame can be repeated, so we update the
813 clock accordingly */
814 if (src_frame->repeat_pict) {
815 is->video_clock += src_frame->repeat_pict * (frame_delay * 0.5);
816 }
817 }
818
819 #if defined(DEBUG_SYNC) && 0
820 {
821 int ftype;
822 if (src_frame->pict_type == FF_B_TYPE)
823 ftype = 'B';
824 else if (src_frame->pict_type == FF_I_TYPE)
825 ftype = 'I';
826 else
827 ftype = 'P';
828 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
829 ftype, is->video_clock, pts1);
830 }
831 #endif
832 return queue_picture(is, src_frame, is->video_clock);
833 }
834
835 static int video_thread(void *arg)
836 {
837 VideoState *is = arg;
838 AVPacket pkt1, *pkt = &pkt1;
839 unsigned char *ptr;
840 int len, len1, got_picture;
841 AVFrame frame;
842 int64_t ipts;
843 double pts;
844
845 for(;;) {
846 while (is->paused && !is->videoq.abort_request) {
847 SDL_Delay(10);
848 }
849 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
850 break;
851 /* NOTE: ipts is the PTS of the _first_ picture beginning in
852 this packet, if any */
853 ipts = pkt->pts;
854 ptr = pkt->data;
855 if (is->video_st->codec.codec_id == CODEC_ID_RAWVIDEO) {
856 avpicture_fill((AVPicture *)&frame, ptr,
857 is->video_st->codec.pix_fmt,
858 is->video_st->codec.width,
859 is->video_st->codec.height);
860 pts = 0;
861 if (ipts != AV_NOPTS_VALUE)
862 pts = (double)ipts * is->ic->pts_num / is->ic->pts_den;
863 frame.pict_type = FF_I_TYPE;
864 if (output_picture2(is, &frame, pts) < 0)
865 goto the_end;
866 } else {
867 len = pkt->size;
868 while (len > 0) {
869 if (is->picture_start) {
870 is->ipts = ipts;
871 is->picture_start = 0;
872 ipts = AV_NOPTS_VALUE;
873 }
874 len1 = avcodec_decode_video(&is->video_st->codec,
875 &frame, &got_picture, ptr, len);
876 if (len1 < 0)
877 break;
878 if (got_picture) {
879 pts = 0;
880 if (is->ipts != AV_NOPTS_VALUE)
881 pts = (double)is->ipts * is->ic->pts_num / is->ic->pts_den;
882 if (output_picture2(is, &frame, pts) < 0)
883 goto the_end;
884 is->picture_start = 1;
885 }
886 ptr += len1;
887 len -= len1;
888 }
889 }
890 av_free_packet(pkt);
891 }
892 the_end:
893 return 0;
894 }
895
896 /* copy samples for viewing in editor window */
897 static void update_sample_display(VideoState *is, short *samples, int samples_size)
898 {
899 int size, len, channels;
900
901 channels = is->audio_st->codec.channels;
902
903 size = samples_size / sizeof(short);
904 while (size > 0) {
905 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
906 if (len > size)
907 len = size;
908 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
909 samples += len;
910 is->sample_array_index += len;
911 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
912 is->sample_array_index = 0;
913 size -= len;
914 }
915 }
916
917 /* return the new audio buffer size (samples can be added or deleted
918 to get better sync if video or external master clock) */
919 static int synchronize_audio(VideoState *is, short *samples,
920 int samples_size1, double pts)
921 {
922 int n, samples_size;
923 double ref_clock;
924
925 n = 2 * is->audio_st->codec.channels;
926 samples_size = samples_size1;
927
928 /* if not master, then we try to remove or add samples to correct the clock */
929 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
930 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
931 double diff, avg_diff;
932 int wanted_size, min_size, max_size, nb_samples;
933
934 ref_clock = get_master_clock(is);
935 diff = get_audio_clock(is) - ref_clock;
936
937 if (diff < AV_NOSYNC_THRESHOLD) {
938 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
939 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
940 /* not enough measures to have a correct estimate */
941 is->audio_diff_avg_count++;
942 } else {
943 /* estimate the A-V difference */
944 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
945
946 if (fabs(avg_diff) >= is->audio_diff_threshold) {
947 wanted_size = samples_size + ((int)(diff * is->audio_st->codec.sample_rate) * n);
948 nb_samples = samples_size / n;
949
950 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
951 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
952 if (wanted_size < min_size)
953 wanted_size = min_size;
954 else if (wanted_size > max_size)
955 wanted_size = max_size;
956
957 /* add or remove samples to correction the synchro */
958 if (wanted_size < samples_size) {
959 /* remove samples */
960 samples_size = wanted_size;
961 } else if (wanted_size > samples_size) {
962 uint8_t *samples_end, *q;
963 int nb;
964
965 /* add samples */
966 nb = (samples_size - wanted_size);
967 samples_end = (uint8_t *)samples + samples_size - n;
968 q = samples_end + n;
969 while (nb > 0) {
970 memcpy(q, samples_end, n);
971 q += n;
972 nb -= n;
973 }
974 samples_size = wanted_size;
975 }
976 }
977 #if 0
978 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
979 diff, avg_diff, samples_size - samples_size1,
980 is->audio_clock, is->video_clock, is->audio_diff_threshold);
981 #endif
982 }
983 } else {
984 /* too big difference : may be initial PTS errors, so
985 reset A-V filter */
986 is->audio_diff_avg_count = 0;
987 is->audio_diff_cum = 0;
988 }
989 }
990
991 return samples_size;
992 }
993
994 /* decode one audio frame and returns its uncompressed size */
995 static int audio_decode_frame(VideoState *is, uint8_t *audio_buf, double *pts_ptr)
996 {
997 AVPacket *pkt = &is->audio_pkt;
998 int len1, data_size;
999 double pts;
1000
1001 for(;;) {
1002 if (is->paused || is->audioq.abort_request) {
1003 return -1;
1004 }
1005 while (is->audio_pkt_size > 0) {
1006 len1 = avcodec_decode_audio(&is->audio_st->codec,
1007 (int16_t *)audio_buf, &data_size,
1008 is->audio_pkt_data, is->audio_pkt_size);
1009 if (len1 < 0)
1010 break;
1011 is->audio_pkt_data += len1;
1012 is->audio_pkt_size -= len1;
1013 if (data_size > 0) {
1014 pts = 0;
1015 if (is->audio_pkt_ipts != AV_NOPTS_VALUE)
1016 pts = (double)is->audio_pkt_ipts * is->ic->pts_num / is->ic->pts_den;
1017 /* if no pts, then compute it */
1018 if (pts != 0) {
1019 is->audio_clock = pts;
1020 } else {
1021 int n;
1022 n = 2 * is->audio_st->codec.channels;
1023 is->audio_clock += (double)data_size / (double)(n * is->audio_st->codec.sample_rate);
1024 }
1025 #if defined(DEBUG_SYNC)
1026 {
1027 static double last_clock;
1028 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1029 is->audio_clock - last_clock,
1030 is->audio_clock, pts);
1031 last_clock = is->audio_clock;
1032 }
1033 #endif
1034 *pts_ptr = is->audio_clock;
1035 is->audio_pkt_ipts = AV_NOPTS_VALUE;
1036 /* we got samples : we can exit now */
1037 return data_size;
1038 }
1039 }
1040
1041 /* free previous packet if any */
1042 if (pkt->destruct)
1043 av_free_packet(pkt);
1044
1045 /* read next packet */
1046 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1047 return -1;
1048 is->audio_pkt_data = pkt->data;
1049 is->audio_pkt_size = pkt->size;
1050 is->audio_pkt_ipts = pkt->pts;
1051 }
1052 }
1053
1054 /* get the current audio output buffer size, in samples. With SDL, we
1055 cannot have a precise information */
1056 static int audio_write_get_buf_size(VideoState *is)
1057 {
1058 return is->audio_hw_buf_size - is->audio_buf_index;
1059 }
1060
1061
1062 /* prepare a new audio buffer */
1063 void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1064 {
1065 VideoState *is = opaque;
1066 int audio_size, len1;
1067 double pts;
1068
1069 audio_callback_time = av_gettime();
1070
1071 while (len > 0) {
1072 if (is->audio_buf_index >= is->audio_buf_size) {
1073 audio_size = audio_decode_frame(is, is->audio_buf, &pts);
1074 if (audio_size < 0) {
1075 /* if error, just output silence */
1076 is->audio_buf_size = 1024;
1077 memset(is->audio_buf, 0, is->audio_buf_size);
1078 } else {
1079 if (is->show_audio)
1080 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1081 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1082 pts);
1083 is->audio_buf_size = audio_size;
1084 }
1085 is->audio_buf_index = 0;
1086 }
1087 len1 = is->audio_buf_size - is->audio_buf_index;
1088 if (len1 > len)
1089 len1 = len;
1090 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1091 len -= len1;
1092 stream += len1;
1093 is->audio_buf_index += len1;
1094 }
1095 }
1096
1097
1098 /* open a given stream. Return 0 if OK */
1099 static int stream_component_open(VideoState *is, int stream_index)
1100 {
1101 AVFormatContext *ic = is->ic;
1102 AVCodecContext *enc;
1103 AVCodec *codec;
1104 SDL_AudioSpec wanted_spec, spec;
1105
1106 if (stream_index < 0 || stream_index >= ic->nb_streams)
1107 return -1;
1108 enc = &ic->streams[stream_index]->codec;
1109
1110 /* prepare audio output */
1111 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1112 wanted_spec.freq = enc->sample_rate;
1113 wanted_spec.format = AUDIO_S16SYS;
1114 /* hack for AC3. XXX: suppress that */
1115 if (enc->channels > 2)
1116 enc->channels = 2;
1117 wanted_spec.channels = enc->channels;
1118 wanted_spec.silence = 0;
1119 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1120 wanted_spec.callback = sdl_audio_callback;
1121 wanted_spec.userdata = is;
1122 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1123 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1124 return -1;
1125 }
1126 is->audio_hw_buf_size = spec.size;
1127 }
1128
1129 codec = avcodec_find_decoder(enc->codec_id);
1130 if (!codec ||
1131 avcodec_open(enc, codec) < 0)
1132 return -1;
1133 switch(enc->codec_type) {
1134 case CODEC_TYPE_AUDIO:
1135 is->audio_stream = stream_index;
1136 is->audio_st = ic->streams[stream_index];
1137 is->audio_buf_size = 0;
1138 is->audio_buf_index = 0;
1139 is->audio_pkt_size = 0;
1140
1141 /* init averaging filter */
1142 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1143 is->audio_diff_avg_count = 0;
1144 /* since we do not have a precise anough audio fifo fullness,
1145 we correct audio sync only if larger than this threshold */
1146 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1147
1148 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1149 packet_queue_init(&is->audioq);
1150 SDL_PauseAudio(0);
1151 break;
1152 case CODEC_TYPE_VIDEO:
1153 is->video_stream = stream_index;
1154 is->video_st = ic->streams[stream_index];
1155
1156 is->frame_last_delay = 40e-3;
1157 is->frame_timer = (double)av_gettime() / 1000000.0;
1158 is->picture_start = 1;
1159 is->video_current_pts_time = av_gettime();
1160
1161 packet_queue_init(&is->videoq);
1162 is->video_tid = SDL_CreateThread(video_thread, is);
1163 break;
1164 default:
1165 break;
1166 }
1167 return 0;
1168 }
1169
1170 static void stream_component_close(VideoState *is, int stream_index)
1171 {
1172 AVFormatContext *ic = is->ic;
1173 AVCodecContext *enc;
1174
1175 enc = &ic->streams[stream_index]->codec;
1176
1177 switch(enc->codec_type) {
1178 case CODEC_TYPE_AUDIO:
1179 packet_queue_abort(&is->audioq);
1180
1181 SDL_CloseAudio();
1182
1183 packet_queue_end(&is->audioq);
1184 break;
1185 case CODEC_TYPE_VIDEO:
1186 packet_queue_abort(&is->videoq);
1187
1188 /* note: we also signal this mutex to make sure we deblock the
1189 video thread in all cases */
1190 SDL_LockMutex(is->pictq_mutex);
1191 SDL_CondSignal(is->pictq_cond);
1192 SDL_UnlockMutex(is->pictq_mutex);
1193
1194 SDL_WaitThread(is->video_tid, NULL);
1195
1196 packet_queue_end(&is->videoq);
1197 break;
1198 default:
1199 break;
1200 }
1201
1202 avcodec_close(enc);
1203 switch(enc->codec_type) {
1204 case CODEC_TYPE_AUDIO:
1205 is->audio_st = NULL;
1206 is->audio_stream = -1;
1207 break;
1208 case CODEC_TYPE_VIDEO:
1209 is->video_st = NULL;
1210 is->video_stream = -1;
1211 break;
1212 default:
1213 break;
1214 }
1215 }
1216
1217 void dump_stream_info(AVFormatContext *s)
1218 {
1219 if (s->track != 0)
1220 fprintf(stderr, "Track: %d\n", s->track);
1221 if (s->title[0] != '\0')
1222 fprintf(stderr, "Title: %s\n", s->title);
1223 if (s->author[0] != '\0')
1224 fprintf(stderr, "Author: %s\n", s->author);
1225 if (s->album[0] != '\0')
1226 fprintf(stderr, "Album: %s\n", s->album);
1227 if (s->year != 0)
1228 fprintf(stderr, "Year: %d\n", s->year);
1229 if (s->genre[0] != '\0')
1230 fprintf(stderr, "Genre: %s\n", s->genre);
1231 }
1232
1233 /* since we have only one decoding thread, we can use a global
1234 variable instead of a thread local variable */
1235 static VideoState *global_video_state;
1236
1237 static int decode_interrupt_cb(void)
1238 {
1239 return (global_video_state && global_video_state->abort_request);
1240 }
1241
1242 /* this thread gets the stream from the disk or the network */
1243 static int decode_thread(void *arg)
1244 {
1245 VideoState *is = arg;
1246 AVFormatContext *ic;
1247 int err, i, ret, video_index, audio_index;
1248 AVPacket pkt1, *pkt = &pkt1;
1249 AVFormatParameters params, *ap = &params;
1250
1251 video_index = -1;
1252 audio_index = -1;
1253 is->video_stream = -1;
1254 is->audio_stream = -1;
1255
1256 global_video_state = is;
1257 url_set_interrupt_cb(decode_interrupt_cb);
1258
1259 memset(ap, 0, sizeof(*ap));
1260 ap->image_format = image_format;
1261
1262 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1263 if (err < 0) {
1264 print_error(is->filename, err);
1265 ret = -1;
1266 goto fail;
1267 }
1268 is->ic = ic;
1269 err = av_find_stream_info(ic);
1270 if (err < 0) {
1271 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1272 ret = -1;
1273 goto fail;
1274 }
1275
1276 for(i = 0; i < ic->nb_streams; i++) {
1277 AVCodecContext *enc = &ic->streams[i]->codec;
1278 switch(enc->codec_type) {
1279 case CODEC_TYPE_AUDIO:
1280 if (audio_index < 0 && !audio_disable)
1281 audio_index = i;
1282 break;
1283 case CODEC_TYPE_VIDEO:
1284 if (video_index < 0 && !video_disable)
1285 video_index = i;
1286 break;
1287 default:
1288 break;
1289 }
1290 }
1291 if (show_status) {
1292 dump_format(ic, 0, is->filename, 0);
1293 dump_stream_info(ic);
1294 }
1295
1296 /* open the streams */
1297 if (audio_index >= 0) {
1298 stream_component_open(is, audio_index);
1299 }
1300
1301 if (video_index >= 0) {
1302 stream_component_open(is, video_index);
1303 } else {
1304 if (!display_disable)
1305 is->show_audio = 1;
1306 }
1307
1308 if (is->video_stream < 0 && is->audio_stream < 0) {
1309 fprintf(stderr, "%s: could not open codecs\n", is->filename);
1310 ret = -1;
1311 goto fail;
1312 }
1313
1314 for(;;) {
1315 if (is->abort_request)
1316 break;
1317 #ifdef CONFIG_NETWORK
1318 if (is->paused != is->last_paused) {
1319 is->last_paused = is->paused;
1320 if (ic->iformat == &rtsp_demux) {
1321 if (is->paused)
1322 rtsp_pause(ic);
1323 else
1324 rtsp_resume(ic);
1325 }
1326 }
1327 if (is->paused && ic->iformat == &rtsp_demux) {
1328 /* wait 10 ms to avoid trying to get another packet */
1329 /* XXX: horrible */
1330 SDL_Delay(10);
1331 continue;
1332 }
1333 #endif
1334
1335 /* if the queue are full, no need to read more */
1336 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
1337 is->videoq.size > MAX_VIDEOQ_SIZE) {
1338 /* wait 10 ms */
1339 SDL_Delay(10);
1340 continue;
1341 }
1342 ret = av_read_packet(ic, pkt);
1343 if (ret < 0) {
1344 break;
1345 }
1346 if (pkt->stream_index == is->audio_stream) {
1347 packet_queue_put(&is->audioq, pkt);
1348 } else if (pkt->stream_index == is->video_stream) {
1349 packet_queue_put(&is->videoq, pkt);
1350 } else {
1351 av_free_packet(pkt);
1352 }
1353 }
1354 /* wait until the end */
1355 while (!is->abort_request) {
1356 SDL_Delay(100);
1357 }
1358
1359 ret = 0;
1360 fail:
1361 /* disable interrupting */
1362 global_video_state = NULL;
1363
1364 /* close each stream */
1365 if (is->audio_stream >= 0)
1366 stream_component_close(is, is->audio_stream);
1367 if (is->video_stream >= 0)
1368 stream_component_close(is, is->video_stream);
1369 if (is->ic) {
1370 av_close_input_file(is->ic);
1371 is->ic = NULL; /* safety */
1372 }
1373 url_set_interrupt_cb(NULL);
1374
1375 if (ret != 0) {
1376 SDL_Event event;
1377
1378 event.type = FF_QUIT_EVENT;
1379 event.user.data1 = is;
1380 SDL_PushEvent(&event);
1381 }
1382 return 0;
1383 }
1384
1385 /* pause or resume the video */
1386 static void stream_pause(VideoState *is)
1387 {
1388 is->paused = !is->paused;
1389 }
1390
1391 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
1392 {
1393 VideoState *is;
1394
1395 is = av_mallocz(sizeof(VideoState));
1396 if (!is)
1397 return NULL;
1398 pstrcpy(is->filename, sizeof(is->filename), filename);
1399 is->iformat = iformat;
1400 if (screen) {
1401 is->width = screen->w;
1402 is->height = screen->h;
1403 }
1404 is->ytop = 0;
1405 is->xleft = 0;
1406
1407 /* start video display */
1408 is->pictq_mutex = SDL_CreateMutex();
1409 is->pictq_cond = SDL_CreateCond();
1410
1411 /* add the refresh timer to draw the picture */
1412 schedule_refresh(is, 40);
1413
1414 is->av_sync_type = av_sync_type;
1415 is->parse_tid = SDL_CreateThread(decode_thread, is);
1416 if (!is->parse_tid) {
1417 av_free(is);
1418 return NULL;
1419 }
1420 return is;
1421 }
1422
1423 static void stream_close(VideoState *is)
1424 {
1425 VideoPicture *vp;
1426 int i;
1427 /* XXX: use a special url_shutdown call to abort parse cleanly */
1428 is->abort_request = 1;
1429 SDL_WaitThread(is->parse_tid, NULL);
1430
1431 /* free all pictures */
1432 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1433 vp = &is->pictq[i];
1434 if (vp->bmp) {
1435 SDL_FreeYUVOverlay(vp->bmp);
1436 vp->bmp = NULL;
1437 }
1438 }
1439 SDL_DestroyMutex(is->pictq_mutex);
1440 SDL_DestroyCond(is->pictq_cond);
1441 }
1442
1443 void stream_cycle_channel(VideoState *is, int codec_type)
1444 {
1445 AVFormatContext *ic = is->ic;
1446 int start_index, stream_index;
1447 AVStream *st;
1448
1449 if (codec_type == CODEC_TYPE_VIDEO)
1450 start_index = is->video_stream;
1451 else
1452 start_index = is->audio_stream;
1453 if (start_index < 0)
1454 return;
1455 stream_index = start_index;
1456 for(;;) {
1457 if (++stream_index >= is->ic->nb_streams)
1458 stream_index = 0;
1459 if (stream_index == start_index)
1460 return;
1461 st = ic->streams[stream_index];
1462 if (st->codec.codec_type == codec_type) {
1463 /* check that parameters are OK */
1464 switch(codec_type) {
1465 case CODEC_TYPE_AUDIO:
1466 if (st->codec.sample_rate != 0 &&
1467 st->codec.channels != 0)
1468 goto the_end;
1469 break;
1470 case CODEC_TYPE_VIDEO:
1471 goto the_end;
1472 default:
1473 break;
1474 }
1475 }
1476 }
1477 the_end:
1478 stream_component_close(is, start_index);
1479 stream_component_open(is, stream_index);
1480 }
1481
1482
1483 void toggle_full_screen(void)
1484 {
1485 int w, h, flags;
1486 is_full_screen = !is_full_screen;
1487 if (!fs_screen_width) {
1488 /* use default SDL method */
1489 SDL_WM_ToggleFullScreen(screen);
1490 } else {
1491 /* use the recorded resolution */
1492 flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
1493 if (is_full_screen) {
1494 w = fs_screen_width;
1495 h = fs_screen_height;
1496 flags |= SDL_FULLSCREEN;
1497 } else {
1498 w = screen_width;
1499 h = screen_height;
1500 flags |= SDL_RESIZABLE;
1501 }
1502 screen = SDL_SetVideoMode(w, h, 0, flags);
1503 cur_stream->width = w;
1504 cur_stream->height = h;
1505 }
1506 }
1507
1508 void toggle_pause(void)
1509 {
1510 if (cur_stream)
1511 stream_pause(cur_stream);
1512 }
1513
1514 void do_exit(void)
1515 {
1516 if (cur_stream) {
1517 stream_close(cur_stream);
1518 cur_stream = NULL;
1519 }
1520 if (show_status)
1521 printf("\n");
1522 SDL_Quit();
1523 exit(0);
1524 }
1525
1526 void toggle_audio_display(void)
1527 {
1528 if (cur_stream) {
1529 cur_stream->show_audio = !cur_stream->show_audio;
1530 }
1531 }
1532
1533 /* handle an event sent by the GUI */
1534 void event_loop(void)
1535 {
1536 SDL_Event event;
1537
1538 for(;;) {
1539 SDL_WaitEvent(&event);
1540 switch(event.type) {
1541 case SDL_KEYDOWN:
1542 switch(event.key.keysym.sym) {
1543 case SDLK_ESCAPE:
1544 case SDLK_q:
1545 do_exit();
1546 break;
1547 case SDLK_f:
1548 toggle_full_screen();
1549 break;
1550 case SDLK_p:
1551 case SDLK_SPACE:
1552 toggle_pause();
1553 break;
1554 case SDLK_a:
1555 if (cur_stream)
1556 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
1557 break;
1558 case SDLK_v:
1559 if (cur_stream)
1560 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
1561 break;
1562 case SDLK_w:
1563 toggle_audio_display();
1564 break;
1565 default:
1566 break;
1567 }
1568 break;
1569 case SDL_VIDEORESIZE:
1570 if (cur_stream) {
1571 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
1572 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
1573 cur_stream->width = event.resize.w;
1574 cur_stream->height = event.resize.h;
1575 }
1576 break;
1577 case SDL_QUIT:
1578 case FF_QUIT_EVENT:
1579 do_exit();
1580 break;
1581 case FF_ALLOC_EVENT:
1582 alloc_picture(event.user.data1);
1583 break;
1584 case FF_REFRESH_EVENT:
1585 video_refresh_timer(event.user.data1);
1586 break;
1587 default:
1588 break;
1589 }
1590 }
1591 }
1592
1593 void opt_width(const char *arg)
1594 {
1595 screen_width = atoi(arg);
1596 }
1597
1598 void opt_height(const char *arg)
1599 {
1600 screen_height = atoi(arg);
1601 }
1602
1603 static void opt_format(const char *arg)
1604 {
1605 file_iformat = av_find_input_format(arg);
1606 if (!file_iformat) {
1607 fprintf(stderr, "Unknown input format: %s\n", arg);
1608 exit(1);
1609 }
1610 }
1611
1612 static void opt_image_format(const char *arg)
1613 {
1614 AVImageFormat *f;
1615
1616 for(f = first_image_format; f != NULL; f = f->next) {
1617 if (!strcmp(arg, f->name))
1618 break;
1619 }
1620 if (!f) {
1621 fprintf(stderr, "Unknown image format: '%s'\n", arg);
1622 exit(1);
1623 }
1624 image_format = f;
1625 }
1626
1627 #ifdef CONFIG_NETWORK
1628 void opt_rtp_tcp(void)
1629 {
1630 /* only tcp protocol */
1631 rtsp_default_protocols = (1 << RTSP_PROTOCOL_RTP_TCP);
1632 }
1633 #endif
1634
1635 void opt_sync(const char *arg)
1636 {
1637 if (!strcmp(arg, "audio"))
1638 av_sync_type = AV_SYNC_AUDIO_MASTER;
1639 else if (!strcmp(arg, "video"))
1640 av_sync_type = AV_SYNC_VIDEO_MASTER;
1641 else if (!strcmp(arg, "ext"))
1642 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
1643 else
1644 show_help();
1645 }
1646
1647 const OptionDef options[] = {
1648 { "h", 0, {(void*)show_help}, "show help" },
1649 { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
1650 { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
1651 #if 0
1652 /* disabled as SDL/X11 does not support it correctly on application launch */
1653 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
1654 #endif
1655 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
1656 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
1657 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
1658 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
1659 { "img", HAS_ARG, {(void*)opt_image_format}, "force image format", "img_fmt" },
1660 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
1661 #ifdef CONFIG_NETWORK
1662 { "rtp_tcp", OPT_EXPERT, {(void*)&opt_rtp_tcp}, "force RTP/TCP protocol usage", "" },
1663 #endif
1664 { "sync", HAS_ARG | OPT_EXPERT, {(void*)&opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
1665 { NULL, },
1666 };
1667
1668 void show_help(void)
1669 {
1670 printf("ffplay version " FFMPEG_VERSION ", Copyright (c) 2003 Fabrice Bellard\n"
1671 "usage: ffplay [options] input_file\n"
1672 "Simple media player\n");
1673 printf("\n");
1674 show_help_options(options, "Main options:\n",
1675 OPT_EXPERT, 0);
1676 show_help_options(options, "\nAdvanced options:\n",
1677 OPT_EXPERT, OPT_EXPERT);
1678 printf("\nWhile playing:\n"
1679 "q, ESC quit\n"
1680 "f toggle full screen\n"
1681 "p, SPC pause\n"
1682 "a cycle audio channel\n"
1683 "v cycle video channel\n"
1684 "w show audio waves\n"
1685 );
1686 exit(1);
1687 }
1688
1689 void parse_arg_file(const char *filename)
1690 {
1691 if (!strcmp(filename, "-"))
1692 filename = "pipe:";
1693 input_filename = filename;
1694 }
1695
1696 /* Called from the main */
1697 int main(int argc, char **argv)
1698 {
1699 int flags, w, h;
1700
1701 /* register all codecs, demux and protocols */
1702 av_register_all();
1703
1704 parse_options(argc, argv, options);
1705
1706 if (!input_filename)
1707 show_help();
1708
1709 if (display_disable) {
1710 video_disable = 1;
1711 }
1712 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
1713 #ifndef CONFIG_WIN32
1714 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on win32 */
1715 #endif
1716 if (SDL_Init (flags)) {
1717 fprintf(stderr, "Could not initialize SDL - exiting\n");
1718 exit(1);
1719 }
1720
1721 if (!display_disable) {
1722 #ifdef HAVE_X11
1723 /* save the screen resolution... SDL should allow full screen
1724 by resizing the window */
1725 {
1726 Display *dpy;
1727 dpy = XOpenDisplay(NULL);
1728 if (dpy) {
1729 fs_screen_width = DisplayWidth(dpy, DefaultScreen(dpy));
1730 fs_screen_height = DisplayHeight(dpy, DefaultScreen(dpy));
1731 XCloseDisplay(dpy);
1732 }
1733 }
1734 #endif
1735 flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
1736 if (is_full_screen && fs_screen_width) {
1737 w = fs_screen_width;
1738 h = fs_screen_height;
1739 flags |= SDL_FULLSCREEN;
1740 } else {
1741 w = screen_width;
1742 h = screen_height;
1743 flags |= SDL_RESIZABLE;
1744 }
1745 screen = SDL_SetVideoMode(w, h, 0, flags);
1746 if (!screen) {
1747 fprintf(stderr, "SDL: could not set video mode - exiting\n");
1748 exit(1);
1749 }
1750 SDL_WM_SetCaption("FFplay", "FFplay");
1751 }
1752
1753 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
1754 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
1755 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
1756 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
1757
1758 cur_stream = stream_open(input_filename, file_iformat);
1759
1760 event_loop();
1761
1762 /* never returns */
1763
1764 return 0;
1765 }