Use parentheses around && within ||, fix the gcc warning:
[libav.git] / ffplay.c
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include "config.h"
23 #include <math.h>
24 #include <limits.h>
25 #include "libavutil/avstring.h"
26 #include "libavutil/pixdesc.h"
27 #include "libavformat/avformat.h"
28 #include "libavdevice/avdevice.h"
29 #include "libswscale/swscale.h"
30 #include "libavcodec/audioconvert.h"
31 #include "libavcodec/colorspace.h"
32 #include "libavcodec/opt.h"
33
34 #include "cmdutils.h"
35
36 #include <SDL.h>
37 #include <SDL_thread.h>
38
39 #ifdef __MINGW32__
40 #undef main /* We don't want SDL to override our main() */
41 #endif
42
43 #undef exit
44 #undef printf
45 #undef fprintf
46
47 const char program_name[] = "FFplay";
48 const int program_birth_year = 2003;
49
50 //#define DEBUG_SYNC
51
52 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
53 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
54 #define MIN_FRAMES 5
55
56 /* SDL audio buffer size, in samples. Should be small to have precise
57 A/V sync as SDL does not have hardware buffer fullness info. */
58 #define SDL_AUDIO_BUFFER_SIZE 1024
59
60 /* no AV sync correction is done if below the AV sync threshold */
61 #define AV_SYNC_THRESHOLD 0.01
62 /* no AV correction is done if too big error */
63 #define AV_NOSYNC_THRESHOLD 10.0
64
65 /* maximum audio speed change to get correct sync */
66 #define SAMPLE_CORRECTION_PERCENT_MAX 10
67
68 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
69 #define AUDIO_DIFF_AVG_NB 20
70
71 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
72 #define SAMPLE_ARRAY_SIZE (2*65536)
73
74 static int sws_flags = SWS_BICUBIC;
75
76 typedef struct PacketQueue {
77 AVPacketList *first_pkt, *last_pkt;
78 int nb_packets;
79 int size;
80 int abort_request;
81 SDL_mutex *mutex;
82 SDL_cond *cond;
83 } PacketQueue;
84
85 #define VIDEO_PICTURE_QUEUE_SIZE 1
86 #define SUBPICTURE_QUEUE_SIZE 4
87
88 typedef struct VideoPicture {
89 double pts; ///<presentation time stamp for this picture
90 SDL_Overlay *bmp;
91 int width, height; /* source height & width */
92 int allocated;
93 SDL_TimerID timer_id;
94 } VideoPicture;
95
96 typedef struct SubPicture {
97 double pts; /* presentation time stamp for this picture */
98 AVSubtitle sub;
99 } SubPicture;
100
101 enum {
102 AV_SYNC_AUDIO_MASTER, /* default choice */
103 AV_SYNC_VIDEO_MASTER,
104 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
105 };
106
107 typedef struct VideoState {
108 SDL_Thread *parse_tid;
109 SDL_Thread *video_tid;
110 AVInputFormat *iformat;
111 int no_background;
112 int abort_request;
113 int paused;
114 int last_paused;
115 int seek_req;
116 int seek_flags;
117 int64_t seek_pos;
118 int64_t seek_rel;
119 int read_pause_return;
120 AVFormatContext *ic;
121 int dtg_active_format;
122
123 int audio_stream;
124
125 int av_sync_type;
126 double external_clock; /* external clock base */
127 int64_t external_clock_time;
128
129 double audio_clock;
130 double audio_diff_cum; /* used for AV difference average computation */
131 double audio_diff_avg_coef;
132 double audio_diff_threshold;
133 int audio_diff_avg_count;
134 AVStream *audio_st;
135 PacketQueue audioq;
136 int audio_hw_buf_size;
137 /* samples output by the codec. we reserve more space for avsync
138 compensation */
139 DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
140 DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
141 uint8_t *audio_buf;
142 unsigned int audio_buf_size; /* in bytes */
143 int audio_buf_index; /* in bytes */
144 AVPacket audio_pkt_temp;
145 AVPacket audio_pkt;
146 enum SampleFormat audio_src_fmt;
147 AVAudioConvert *reformat_ctx;
148
149 int show_audio; /* if true, display audio samples */
150 int16_t sample_array[SAMPLE_ARRAY_SIZE];
151 int sample_array_index;
152 int last_i_start;
153
154 SDL_Thread *subtitle_tid;
155 int subtitle_stream;
156 int subtitle_stream_changed;
157 AVStream *subtitle_st;
158 PacketQueue subtitleq;
159 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
160 int subpq_size, subpq_rindex, subpq_windex;
161 SDL_mutex *subpq_mutex;
162 SDL_cond *subpq_cond;
163
164 double frame_timer;
165 double frame_last_pts;
166 double frame_last_delay;
167 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
168 int video_stream;
169 AVStream *video_st;
170 PacketQueue videoq;
171 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
172 double video_current_pts_drift; ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
173 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
174 int pictq_size, pictq_rindex, pictq_windex;
175 SDL_mutex *pictq_mutex;
176 SDL_cond *pictq_cond;
177 struct SwsContext *img_convert_ctx;
178
179 // QETimer *video_timer;
180 char filename[1024];
181 int width, height, xleft, ytop;
182
183 int64_t faulty_pts;
184 int64_t faulty_dts;
185 int64_t last_dts_for_fault_detection;
186 int64_t last_pts_for_fault_detection;
187
188 } VideoState;
189
190 static void show_help(void);
191 static int audio_write_get_buf_size(VideoState *is);
192
193 /* options specified by the user */
194 static AVInputFormat *file_iformat;
195 static const char *input_filename;
196 static int fs_screen_width;
197 static int fs_screen_height;
198 static int screen_width = 0;
199 static int screen_height = 0;
200 static int frame_width = 0;
201 static int frame_height = 0;
202 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
203 static int audio_disable;
204 static int video_disable;
205 static int wanted_audio_stream= 0;
206 static int wanted_video_stream= 0;
207 static int wanted_subtitle_stream= -1;
208 static int seek_by_bytes;
209 static int display_disable;
210 static int show_status = 1;
211 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
212 static int64_t start_time = AV_NOPTS_VALUE;
213 static int debug = 0;
214 static int debug_mv = 0;
215 static int step = 0;
216 static int thread_count = 1;
217 static int workaround_bugs = 1;
218 static int fast = 0;
219 static int genpts = 0;
220 static int lowres = 0;
221 static int idct = FF_IDCT_AUTO;
222 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
223 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
224 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
225 static int error_recognition = FF_ER_CAREFUL;
226 static int error_concealment = 3;
227 static int decoder_reorder_pts= -1;
228
229 /* current context */
230 static int is_full_screen;
231 static VideoState *cur_stream;
232 static int64_t audio_callback_time;
233
234 static AVPacket flush_pkt;
235
236 #define FF_ALLOC_EVENT (SDL_USEREVENT)
237 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
238 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
239
240 static SDL_Surface *screen;
241
242 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
243
244 /* packet queue handling */
245 static void packet_queue_init(PacketQueue *q)
246 {
247 memset(q, 0, sizeof(PacketQueue));
248 q->mutex = SDL_CreateMutex();
249 q->cond = SDL_CreateCond();
250 packet_queue_put(q, &flush_pkt);
251 }
252
253 static void packet_queue_flush(PacketQueue *q)
254 {
255 AVPacketList *pkt, *pkt1;
256
257 SDL_LockMutex(q->mutex);
258 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
259 pkt1 = pkt->next;
260 av_free_packet(&pkt->pkt);
261 av_freep(&pkt);
262 }
263 q->last_pkt = NULL;
264 q->first_pkt = NULL;
265 q->nb_packets = 0;
266 q->size = 0;
267 SDL_UnlockMutex(q->mutex);
268 }
269
270 static void packet_queue_end(PacketQueue *q)
271 {
272 packet_queue_flush(q);
273 SDL_DestroyMutex(q->mutex);
274 SDL_DestroyCond(q->cond);
275 }
276
277 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
278 {
279 AVPacketList *pkt1;
280
281 /* duplicate the packet */
282 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
283 return -1;
284
285 pkt1 = av_malloc(sizeof(AVPacketList));
286 if (!pkt1)
287 return -1;
288 pkt1->pkt = *pkt;
289 pkt1->next = NULL;
290
291
292 SDL_LockMutex(q->mutex);
293
294 if (!q->last_pkt)
295
296 q->first_pkt = pkt1;
297 else
298 q->last_pkt->next = pkt1;
299 q->last_pkt = pkt1;
300 q->nb_packets++;
301 q->size += pkt1->pkt.size + sizeof(*pkt1);
302 /* XXX: should duplicate packet data in DV case */
303 SDL_CondSignal(q->cond);
304
305 SDL_UnlockMutex(q->mutex);
306 return 0;
307 }
308
309 static void packet_queue_abort(PacketQueue *q)
310 {
311 SDL_LockMutex(q->mutex);
312
313 q->abort_request = 1;
314
315 SDL_CondSignal(q->cond);
316
317 SDL_UnlockMutex(q->mutex);
318 }
319
320 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
321 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
322 {
323 AVPacketList *pkt1;
324 int ret;
325
326 SDL_LockMutex(q->mutex);
327
328 for(;;) {
329 if (q->abort_request) {
330 ret = -1;
331 break;
332 }
333
334 pkt1 = q->first_pkt;
335 if (pkt1) {
336 q->first_pkt = pkt1->next;
337 if (!q->first_pkt)
338 q->last_pkt = NULL;
339 q->nb_packets--;
340 q->size -= pkt1->pkt.size + sizeof(*pkt1);
341 *pkt = pkt1->pkt;
342 av_free(pkt1);
343 ret = 1;
344 break;
345 } else if (!block) {
346 ret = 0;
347 break;
348 } else {
349 SDL_CondWait(q->cond, q->mutex);
350 }
351 }
352 SDL_UnlockMutex(q->mutex);
353 return ret;
354 }
355
356 static inline void fill_rectangle(SDL_Surface *screen,
357 int x, int y, int w, int h, int color)
358 {
359 SDL_Rect rect;
360 rect.x = x;
361 rect.y = y;
362 rect.w = w;
363 rect.h = h;
364 SDL_FillRect(screen, &rect, color);
365 }
366
367 #if 0
368 /* draw only the border of a rectangle */
369 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
370 {
371 int w1, w2, h1, h2;
372
373 /* fill the background */
374 w1 = x;
375 if (w1 < 0)
376 w1 = 0;
377 w2 = s->width - (x + w);
378 if (w2 < 0)
379 w2 = 0;
380 h1 = y;
381 if (h1 < 0)
382 h1 = 0;
383 h2 = s->height - (y + h);
384 if (h2 < 0)
385 h2 = 0;
386 fill_rectangle(screen,
387 s->xleft, s->ytop,
388 w1, s->height,
389 color);
390 fill_rectangle(screen,
391 s->xleft + s->width - w2, s->ytop,
392 w2, s->height,
393 color);
394 fill_rectangle(screen,
395 s->xleft + w1, s->ytop,
396 s->width - w1 - w2, h1,
397 color);
398 fill_rectangle(screen,
399 s->xleft + w1, s->ytop + s->height - h2,
400 s->width - w1 - w2, h2,
401 color);
402 }
403 #endif
404
405 #define ALPHA_BLEND(a, oldp, newp, s)\
406 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
407
408 #define RGBA_IN(r, g, b, a, s)\
409 {\
410 unsigned int v = ((const uint32_t *)(s))[0];\
411 a = (v >> 24) & 0xff;\
412 r = (v >> 16) & 0xff;\
413 g = (v >> 8) & 0xff;\
414 b = v & 0xff;\
415 }
416
417 #define YUVA_IN(y, u, v, a, s, pal)\
418 {\
419 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
420 a = (val >> 24) & 0xff;\
421 y = (val >> 16) & 0xff;\
422 u = (val >> 8) & 0xff;\
423 v = val & 0xff;\
424 }
425
426 #define YUVA_OUT(d, y, u, v, a)\
427 {\
428 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
429 }
430
431
432 #define BPP 1
433
434 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
435 {
436 int wrap, wrap3, width2, skip2;
437 int y, u, v, a, u1, v1, a1, w, h;
438 uint8_t *lum, *cb, *cr;
439 const uint8_t *p;
440 const uint32_t *pal;
441 int dstx, dsty, dstw, dsth;
442
443 dstw = av_clip(rect->w, 0, imgw);
444 dsth = av_clip(rect->h, 0, imgh);
445 dstx = av_clip(rect->x, 0, imgw - dstw);
446 dsty = av_clip(rect->y, 0, imgh - dsth);
447 lum = dst->data[0] + dsty * dst->linesize[0];
448 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
449 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
450
451 width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
452 skip2 = dstx >> 1;
453 wrap = dst->linesize[0];
454 wrap3 = rect->pict.linesize[0];
455 p = rect->pict.data[0];
456 pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
457
458 if (dsty & 1) {
459 lum += dstx;
460 cb += skip2;
461 cr += skip2;
462
463 if (dstx & 1) {
464 YUVA_IN(y, u, v, a, p, pal);
465 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
466 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
467 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
468 cb++;
469 cr++;
470 lum++;
471 p += BPP;
472 }
473 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
474 YUVA_IN(y, u, v, a, p, pal);
475 u1 = u;
476 v1 = v;
477 a1 = a;
478 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
479
480 YUVA_IN(y, u, v, a, p + BPP, pal);
481 u1 += u;
482 v1 += v;
483 a1 += a;
484 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
485 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
486 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
487 cb++;
488 cr++;
489 p += 2 * BPP;
490 lum += 2;
491 }
492 if (w) {
493 YUVA_IN(y, u, v, a, p, pal);
494 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
495 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
496 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
497 p++;
498 lum++;
499 }
500 p += wrap3 - dstw * BPP;
501 lum += wrap - dstw - dstx;
502 cb += dst->linesize[1] - width2 - skip2;
503 cr += dst->linesize[2] - width2 - skip2;
504 }
505 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
506 lum += dstx;
507 cb += skip2;
508 cr += skip2;
509
510 if (dstx & 1) {
511 YUVA_IN(y, u, v, a, p, pal);
512 u1 = u;
513 v1 = v;
514 a1 = a;
515 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
516 p += wrap3;
517 lum += wrap;
518 YUVA_IN(y, u, v, a, p, pal);
519 u1 += u;
520 v1 += v;
521 a1 += a;
522 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
523 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
524 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
525 cb++;
526 cr++;
527 p += -wrap3 + BPP;
528 lum += -wrap + 1;
529 }
530 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
531 YUVA_IN(y, u, v, a, p, pal);
532 u1 = u;
533 v1 = v;
534 a1 = a;
535 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
536
537 YUVA_IN(y, u, v, a, p + BPP, pal);
538 u1 += u;
539 v1 += v;
540 a1 += a;
541 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
542 p += wrap3;
543 lum += wrap;
544
545 YUVA_IN(y, u, v, a, p, pal);
546 u1 += u;
547 v1 += v;
548 a1 += a;
549 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
550
551 YUVA_IN(y, u, v, a, p + BPP, pal);
552 u1 += u;
553 v1 += v;
554 a1 += a;
555 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
556
557 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
558 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
559
560 cb++;
561 cr++;
562 p += -wrap3 + 2 * BPP;
563 lum += -wrap + 2;
564 }
565 if (w) {
566 YUVA_IN(y, u, v, a, p, pal);
567 u1 = u;
568 v1 = v;
569 a1 = a;
570 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
571 p += wrap3;
572 lum += wrap;
573 YUVA_IN(y, u, v, a, p, pal);
574 u1 += u;
575 v1 += v;
576 a1 += a;
577 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
578 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
579 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
580 cb++;
581 cr++;
582 p += -wrap3 + BPP;
583 lum += -wrap + 1;
584 }
585 p += wrap3 + (wrap3 - dstw * BPP);
586 lum += wrap + (wrap - dstw - dstx);
587 cb += dst->linesize[1] - width2 - skip2;
588 cr += dst->linesize[2] - width2 - skip2;
589 }
590 /* handle odd height */
591 if (h) {
592 lum += dstx;
593 cb += skip2;
594 cr += skip2;
595
596 if (dstx & 1) {
597 YUVA_IN(y, u, v, a, p, pal);
598 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
599 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
600 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
601 cb++;
602 cr++;
603 lum++;
604 p += BPP;
605 }
606 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
607 YUVA_IN(y, u, v, a, p, pal);
608 u1 = u;
609 v1 = v;
610 a1 = a;
611 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
612
613 YUVA_IN(y, u, v, a, p + BPP, pal);
614 u1 += u;
615 v1 += v;
616 a1 += a;
617 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
618 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
619 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
620 cb++;
621 cr++;
622 p += 2 * BPP;
623 lum += 2;
624 }
625 if (w) {
626 YUVA_IN(y, u, v, a, p, pal);
627 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
628 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
629 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
630 }
631 }
632 }
633
634 static void free_subpicture(SubPicture *sp)
635 {
636 int i;
637
638 for (i = 0; i < sp->sub.num_rects; i++)
639 {
640 av_freep(&sp->sub.rects[i]->pict.data[0]);
641 av_freep(&sp->sub.rects[i]->pict.data[1]);
642 av_freep(&sp->sub.rects[i]);
643 }
644
645 av_free(sp->sub.rects);
646
647 memset(&sp->sub, 0, sizeof(AVSubtitle));
648 }
649
650 static void video_image_display(VideoState *is)
651 {
652 VideoPicture *vp;
653 SubPicture *sp;
654 AVPicture pict;
655 float aspect_ratio;
656 int width, height, x, y;
657 SDL_Rect rect;
658 int i;
659
660 vp = &is->pictq[is->pictq_rindex];
661 if (vp->bmp) {
662 /* XXX: use variable in the frame */
663 if (is->video_st->sample_aspect_ratio.num)
664 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
665 else if (is->video_st->codec->sample_aspect_ratio.num)
666 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
667 else
668 aspect_ratio = 0;
669 if (aspect_ratio <= 0.0)
670 aspect_ratio = 1.0;
671 aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec->height;
672 /* if an active format is indicated, then it overrides the
673 mpeg format */
674 #if 0
675 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
676 is->dtg_active_format = is->video_st->codec->dtg_active_format;
677 printf("dtg_active_format=%d\n", is->dtg_active_format);
678 }
679 #endif
680 #if 0
681 switch(is->video_st->codec->dtg_active_format) {
682 case FF_DTG_AFD_SAME:
683 default:
684 /* nothing to do */
685 break;
686 case FF_DTG_AFD_4_3:
687 aspect_ratio = 4.0 / 3.0;
688 break;
689 case FF_DTG_AFD_16_9:
690 aspect_ratio = 16.0 / 9.0;
691 break;
692 case FF_DTG_AFD_14_9:
693 aspect_ratio = 14.0 / 9.0;
694 break;
695 case FF_DTG_AFD_4_3_SP_14_9:
696 aspect_ratio = 14.0 / 9.0;
697 break;
698 case FF_DTG_AFD_16_9_SP_14_9:
699 aspect_ratio = 14.0 / 9.0;
700 break;
701 case FF_DTG_AFD_SP_4_3:
702 aspect_ratio = 4.0 / 3.0;
703 break;
704 }
705 #endif
706
707 if (is->subtitle_st)
708 {
709 if (is->subpq_size > 0)
710 {
711 sp = &is->subpq[is->subpq_rindex];
712
713 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
714 {
715 SDL_LockYUVOverlay (vp->bmp);
716
717 pict.data[0] = vp->bmp->pixels[0];
718 pict.data[1] = vp->bmp->pixels[2];
719 pict.data[2] = vp->bmp->pixels[1];
720
721 pict.linesize[0] = vp->bmp->pitches[0];
722 pict.linesize[1] = vp->bmp->pitches[2];
723 pict.linesize[2] = vp->bmp->pitches[1];
724
725 for (i = 0; i < sp->sub.num_rects; i++)
726 blend_subrect(&pict, sp->sub.rects[i],
727 vp->bmp->w, vp->bmp->h);
728
729 SDL_UnlockYUVOverlay (vp->bmp);
730 }
731 }
732 }
733
734
735 /* XXX: we suppose the screen has a 1.0 pixel ratio */
736 height = is->height;
737 width = ((int)rint(height * aspect_ratio)) & ~1;
738 if (width > is->width) {
739 width = is->width;
740 height = ((int)rint(width / aspect_ratio)) & ~1;
741 }
742 x = (is->width - width) / 2;
743 y = (is->height - height) / 2;
744 if (!is->no_background) {
745 /* fill the background */
746 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
747 } else {
748 is->no_background = 0;
749 }
750 rect.x = is->xleft + x;
751 rect.y = is->ytop + y;
752 rect.w = width;
753 rect.h = height;
754 SDL_DisplayYUVOverlay(vp->bmp, &rect);
755 } else {
756 #if 0
757 fill_rectangle(screen,
758 is->xleft, is->ytop, is->width, is->height,
759 QERGB(0x00, 0x00, 0x00));
760 #endif
761 }
762 }
763
764 static inline int compute_mod(int a, int b)
765 {
766 a = a % b;
767 if (a >= 0)
768 return a;
769 else
770 return a + b;
771 }
772
773 static void video_audio_display(VideoState *s)
774 {
775 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
776 int ch, channels, h, h2, bgcolor, fgcolor;
777 int16_t time_diff;
778
779 /* compute display index : center on currently output samples */
780 channels = s->audio_st->codec->channels;
781 nb_display_channels = channels;
782 if (!s->paused) {
783 n = 2 * channels;
784 delay = audio_write_get_buf_size(s);
785 delay /= n;
786
787 /* to be more precise, we take into account the time spent since
788 the last buffer computation */
789 if (audio_callback_time) {
790 time_diff = av_gettime() - audio_callback_time;
791 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
792 }
793
794 delay -= s->width / 2;
795 if (delay < s->width)
796 delay = s->width;
797
798 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
799
800 h= INT_MIN;
801 for(i=0; i<1000; i+=channels){
802 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
803 int a= s->sample_array[idx];
804 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
805 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
806 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
807 int score= a-d;
808 if(h<score && (b^c)<0){
809 h= score;
810 i_start= idx;
811 }
812 }
813
814 s->last_i_start = i_start;
815 } else {
816 i_start = s->last_i_start;
817 }
818
819 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
820 fill_rectangle(screen,
821 s->xleft, s->ytop, s->width, s->height,
822 bgcolor);
823
824 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
825
826 /* total height for one channel */
827 h = s->height / nb_display_channels;
828 /* graph height / 2 */
829 h2 = (h * 9) / 20;
830 for(ch = 0;ch < nb_display_channels; ch++) {
831 i = i_start + ch;
832 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
833 for(x = 0; x < s->width; x++) {
834 y = (s->sample_array[i] * h2) >> 15;
835 if (y < 0) {
836 y = -y;
837 ys = y1 - y;
838 } else {
839 ys = y1;
840 }
841 fill_rectangle(screen,
842 s->xleft + x, ys, 1, y,
843 fgcolor);
844 i += channels;
845 if (i >= SAMPLE_ARRAY_SIZE)
846 i -= SAMPLE_ARRAY_SIZE;
847 }
848 }
849
850 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
851
852 for(ch = 1;ch < nb_display_channels; ch++) {
853 y = s->ytop + ch * h;
854 fill_rectangle(screen,
855 s->xleft, y, s->width, 1,
856 fgcolor);
857 }
858 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
859 }
860
861 static int video_open(VideoState *is){
862 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
863 int w,h;
864
865 if(is_full_screen) flags |= SDL_FULLSCREEN;
866 else flags |= SDL_RESIZABLE;
867
868 if (is_full_screen && fs_screen_width) {
869 w = fs_screen_width;
870 h = fs_screen_height;
871 } else if(!is_full_screen && screen_width){
872 w = screen_width;
873 h = screen_height;
874 }else if (is->video_st && is->video_st->codec->width){
875 w = is->video_st->codec->width;
876 h = is->video_st->codec->height;
877 } else {
878 w = 640;
879 h = 480;
880 }
881 #ifndef __APPLE__
882 screen = SDL_SetVideoMode(w, h, 0, flags);
883 #else
884 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
885 screen = SDL_SetVideoMode(w, h, 24, flags);
886 #endif
887 if (!screen) {
888 fprintf(stderr, "SDL: could not set video mode - exiting\n");
889 return -1;
890 }
891 SDL_WM_SetCaption("FFplay", "FFplay");
892
893 is->width = screen->w;
894 is->height = screen->h;
895
896 return 0;
897 }
898
899 /* display the current picture, if any */
900 static void video_display(VideoState *is)
901 {
902 if(!screen)
903 video_open(cur_stream);
904 if (is->audio_st && is->show_audio)
905 video_audio_display(is);
906 else if (is->video_st)
907 video_image_display(is);
908 }
909
910 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
911 {
912 SDL_Event event;
913 event.type = FF_REFRESH_EVENT;
914 event.user.data1 = opaque;
915 SDL_PushEvent(&event);
916 return 0; /* 0 means stop timer */
917 }
918
919 /* schedule a video refresh in 'delay' ms */
920 static SDL_TimerID schedule_refresh(VideoState *is, int delay)
921 {
922 if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
923 return SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
924 }
925
926 /* get the current audio clock value */
927 static double get_audio_clock(VideoState *is)
928 {
929 double pts;
930 int hw_buf_size, bytes_per_sec;
931 pts = is->audio_clock;
932 hw_buf_size = audio_write_get_buf_size(is);
933 bytes_per_sec = 0;
934 if (is->audio_st) {
935 bytes_per_sec = is->audio_st->codec->sample_rate *
936 2 * is->audio_st->codec->channels;
937 }
938 if (bytes_per_sec)
939 pts -= (double)hw_buf_size / bytes_per_sec;
940 return pts;
941 }
942
943 /* get the current video clock value */
944 static double get_video_clock(VideoState *is)
945 {
946 if (is->paused) {
947 return is->video_current_pts;
948 } else {
949 return is->video_current_pts_drift + av_gettime() / 1000000.0;
950 }
951 }
952
953 /* get the current external clock value */
954 static double get_external_clock(VideoState *is)
955 {
956 int64_t ti;
957 ti = av_gettime();
958 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
959 }
960
961 /* get the current master clock value */
962 static double get_master_clock(VideoState *is)
963 {
964 double val;
965
966 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
967 if (is->video_st)
968 val = get_video_clock(is);
969 else
970 val = get_audio_clock(is);
971 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
972 if (is->audio_st)
973 val = get_audio_clock(is);
974 else
975 val = get_video_clock(is);
976 } else {
977 val = get_external_clock(is);
978 }
979 return val;
980 }
981
982 /* seek in the stream */
983 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
984 {
985 if (!is->seek_req) {
986 is->seek_pos = pos;
987 is->seek_rel = rel;
988 if (seek_by_bytes)
989 is->seek_flags |= AVSEEK_FLAG_BYTE;
990 is->seek_req = 1;
991 }
992 }
993
994 /* pause or resume the video */
995 static void stream_pause(VideoState *is)
996 {
997 if (is->paused) {
998 is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
999 if(is->read_pause_return != AVERROR(ENOSYS)){
1000 is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1001 }
1002 is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1003 }
1004 is->paused = !is->paused;
1005 }
1006
1007 static double compute_frame_delay(double frame_current_pts, VideoState *is)
1008 {
1009 double actual_delay, delay, sync_threshold, ref_clock, diff;
1010
1011 /* compute nominal delay */
1012 delay = frame_current_pts - is->frame_last_pts;
1013 if (delay <= 0 || delay >= 10.0) {
1014 /* if incorrect delay, use previous one */
1015 delay = is->frame_last_delay;
1016 } else {
1017 is->frame_last_delay = delay;
1018 }
1019 is->frame_last_pts = frame_current_pts;
1020
1021 /* update delay to follow master synchronisation source */
1022 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1023 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1024 /* if video is slave, we try to correct big delays by
1025 duplicating or deleting a frame */
1026 ref_clock = get_master_clock(is);
1027 diff = frame_current_pts - ref_clock;
1028
1029 /* skip or repeat frame. We take into account the
1030 delay to compute the threshold. I still don't know
1031 if it is the best guess */
1032 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1033 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1034 if (diff <= -sync_threshold)
1035 delay = 0;
1036 else if (diff >= sync_threshold)
1037 delay = 2 * delay;
1038 }
1039 }
1040
1041 is->frame_timer += delay;
1042 /* compute the REAL delay (we need to do that to avoid
1043 long term errors */
1044 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1045 if (actual_delay < 0.010) {
1046 /* XXX: should skip picture */
1047 actual_delay = 0.010;
1048 }
1049
1050 #if defined(DEBUG_SYNC)
1051 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1052 delay, actual_delay, frame_current_pts, -diff);
1053 #endif
1054
1055 return actual_delay;
1056 }
1057
1058 /* called to display each frame */
1059 static void video_refresh_timer(void *opaque)
1060 {
1061 VideoState *is = opaque;
1062 VideoPicture *vp;
1063
1064 SubPicture *sp, *sp2;
1065
1066 if (is->video_st) {
1067 if (is->pictq_size == 0) {
1068 // fprintf(stderr, "Internal error detected in the SDL timer\n");
1069 } else {
1070 /* dequeue the picture */
1071 vp = &is->pictq[is->pictq_rindex];
1072
1073 /* update current video pts */
1074 is->video_current_pts = vp->pts;
1075 is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1076
1077 if(is->subtitle_st) {
1078 if (is->subtitle_stream_changed) {
1079 SDL_LockMutex(is->subpq_mutex);
1080
1081 while (is->subpq_size) {
1082 free_subpicture(&is->subpq[is->subpq_rindex]);
1083
1084 /* update queue size and signal for next picture */
1085 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1086 is->subpq_rindex = 0;
1087
1088 is->subpq_size--;
1089 }
1090 is->subtitle_stream_changed = 0;
1091
1092 SDL_CondSignal(is->subpq_cond);
1093 SDL_UnlockMutex(is->subpq_mutex);
1094 } else {
1095 if (is->subpq_size > 0) {
1096 sp = &is->subpq[is->subpq_rindex];
1097
1098 if (is->subpq_size > 1)
1099 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1100 else
1101 sp2 = NULL;
1102
1103 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1104 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1105 {
1106 free_subpicture(sp);
1107
1108 /* update queue size and signal for next picture */
1109 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1110 is->subpq_rindex = 0;
1111
1112 SDL_LockMutex(is->subpq_mutex);
1113 is->subpq_size--;
1114 SDL_CondSignal(is->subpq_cond);
1115 SDL_UnlockMutex(is->subpq_mutex);
1116 }
1117 }
1118 }
1119 }
1120
1121 /* display picture */
1122 video_display(is);
1123
1124 /* update queue size and signal for next picture */
1125 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1126 is->pictq_rindex = 0;
1127
1128 SDL_LockMutex(is->pictq_mutex);
1129 vp->timer_id= 0;
1130 is->pictq_size--;
1131 SDL_CondSignal(is->pictq_cond);
1132 SDL_UnlockMutex(is->pictq_mutex);
1133 }
1134 } else if (is->audio_st) {
1135 /* draw the next audio frame */
1136
1137 schedule_refresh(is, 40);
1138
1139 /* if only audio stream, then display the audio bars (better
1140 than nothing, just to test the implementation */
1141
1142 /* display picture */
1143 video_display(is);
1144 } else {
1145 schedule_refresh(is, 100);
1146 }
1147 if (show_status) {
1148 static int64_t last_time;
1149 int64_t cur_time;
1150 int aqsize, vqsize, sqsize;
1151 double av_diff;
1152
1153 cur_time = av_gettime();
1154 if (!last_time || (cur_time - last_time) >= 30000) {
1155 aqsize = 0;
1156 vqsize = 0;
1157 sqsize = 0;
1158 if (is->audio_st)
1159 aqsize = is->audioq.size;
1160 if (is->video_st)
1161 vqsize = is->videoq.size;
1162 if (is->subtitle_st)
1163 sqsize = is->subtitleq.size;
1164 av_diff = 0;
1165 if (is->audio_st && is->video_st)
1166 av_diff = get_audio_clock(is) - get_video_clock(is);
1167 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB f=%Ld/%Ld \r",
1168 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1169 fflush(stdout);
1170 last_time = cur_time;
1171 }
1172 }
1173 }
1174
1175 /* allocate a picture (needs to do that in main thread to avoid
1176 potential locking problems */
1177 static void alloc_picture(void *opaque)
1178 {
1179 VideoState *is = opaque;
1180 VideoPicture *vp;
1181
1182 vp = &is->pictq[is->pictq_windex];
1183
1184 if (vp->bmp)
1185 SDL_FreeYUVOverlay(vp->bmp);
1186
1187 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1188 is->video_st->codec->height,
1189 SDL_YV12_OVERLAY,
1190 screen);
1191 vp->width = is->video_st->codec->width;
1192 vp->height = is->video_st->codec->height;
1193
1194 SDL_LockMutex(is->pictq_mutex);
1195 vp->allocated = 1;
1196 SDL_CondSignal(is->pictq_cond);
1197 SDL_UnlockMutex(is->pictq_mutex);
1198 }
1199
1200 /**
1201 *
1202 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1203 */
1204 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1205 {
1206 VideoPicture *vp;
1207 int dst_pix_fmt;
1208
1209 /* wait until we have space to put a new picture */
1210 SDL_LockMutex(is->pictq_mutex);
1211 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1212 !is->videoq.abort_request) {
1213 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1214 }
1215 SDL_UnlockMutex(is->pictq_mutex);
1216
1217 if (is->videoq.abort_request)
1218 return -1;
1219
1220 vp = &is->pictq[is->pictq_windex];
1221
1222 /* alloc or resize hardware picture buffer */
1223 if (!vp->bmp ||
1224 vp->width != is->video_st->codec->width ||
1225 vp->height != is->video_st->codec->height) {
1226 SDL_Event event;
1227
1228 vp->allocated = 0;
1229
1230 /* the allocation must be done in the main thread to avoid
1231 locking problems */
1232 event.type = FF_ALLOC_EVENT;
1233 event.user.data1 = is;
1234 SDL_PushEvent(&event);
1235
1236 /* wait until the picture is allocated */
1237 SDL_LockMutex(is->pictq_mutex);
1238 while (!vp->allocated && !is->videoq.abort_request) {
1239 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1240 }
1241 SDL_UnlockMutex(is->pictq_mutex);
1242
1243 if (is->videoq.abort_request)
1244 return -1;
1245 }
1246
1247 /* if the frame is not skipped, then display it */
1248 if (vp->bmp) {
1249 AVPicture pict;
1250
1251 /* get a pointer on the bitmap */
1252 SDL_LockYUVOverlay (vp->bmp);
1253
1254 dst_pix_fmt = PIX_FMT_YUV420P;
1255 memset(&pict,0,sizeof(AVPicture));
1256 pict.data[0] = vp->bmp->pixels[0];
1257 pict.data[1] = vp->bmp->pixels[2];
1258 pict.data[2] = vp->bmp->pixels[1];
1259
1260 pict.linesize[0] = vp->bmp->pitches[0];
1261 pict.linesize[1] = vp->bmp->pitches[2];
1262 pict.linesize[2] = vp->bmp->pitches[1];
1263 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1264 is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1265 is->video_st->codec->width, is->video_st->codec->height,
1266 is->video_st->codec->pix_fmt,
1267 is->video_st->codec->width, is->video_st->codec->height,
1268 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1269 if (is->img_convert_ctx == NULL) {
1270 fprintf(stderr, "Cannot initialize the conversion context\n");
1271 exit(1);
1272 }
1273 sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1274 0, is->video_st->codec->height, pict.data, pict.linesize);
1275 /* update the bitmap content */
1276 SDL_UnlockYUVOverlay(vp->bmp);
1277
1278 vp->pts = pts;
1279
1280 /* now we can update the picture count */
1281 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1282 is->pictq_windex = 0;
1283 SDL_LockMutex(is->pictq_mutex);
1284 is->pictq_size++;
1285 //We must schedule in a mutex as we must store the timer id before the timer dies or might end up freeing a alraedy freed id
1286 vp->timer_id= schedule_refresh(is, (int)(compute_frame_delay(vp->pts, is) * 1000 + 0.5));
1287 SDL_UnlockMutex(is->pictq_mutex);
1288 }
1289 return 0;
1290 }
1291
1292 /**
1293 * compute the exact PTS for the picture if it is omitted in the stream
1294 * @param pts1 the dts of the pkt / pts of the frame
1295 */
1296 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1297 {
1298 double frame_delay, pts;
1299
1300 pts = pts1;
1301
1302 if (pts != 0) {
1303 /* update video clock with pts, if present */
1304 is->video_clock = pts;
1305 } else {
1306 pts = is->video_clock;
1307 }
1308 /* update video clock for next frame */
1309 frame_delay = av_q2d(is->video_st->codec->time_base);
1310 /* for MPEG2, the frame can be repeated, so we update the
1311 clock accordingly */
1312 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1313 is->video_clock += frame_delay;
1314
1315 #if defined(DEBUG_SYNC) && 0
1316 {
1317 int ftype;
1318 if (src_frame->pict_type == FF_B_TYPE)
1319 ftype = 'B';
1320 else if (src_frame->pict_type == FF_I_TYPE)
1321 ftype = 'I';
1322 else
1323 ftype = 'P';
1324 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1325 ftype, pts, pts1);
1326 }
1327 #endif
1328 return queue_picture(is, src_frame, pts);
1329 }
1330
1331 static int video_thread(void *arg)
1332 {
1333 VideoState *is = arg;
1334 AVPacket pkt1, *pkt = &pkt1;
1335 int len1, got_picture, i;
1336 AVFrame *frame= avcodec_alloc_frame();
1337 double pts;
1338
1339 for(;;) {
1340 while (is->paused && !is->videoq.abort_request) {
1341 SDL_Delay(10);
1342 }
1343 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1344 break;
1345
1346 if(pkt->data == flush_pkt.data){
1347 avcodec_flush_buffers(is->video_st->codec);
1348
1349 SDL_LockMutex(is->pictq_mutex);
1350 //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1351 for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1352 if(is->pictq[i].timer_id){
1353 SDL_RemoveTimer(is->pictq[i].timer_id);
1354 is->pictq[i].timer_id=0;
1355 schedule_refresh(is, 1);
1356 }
1357 }
1358 while (is->pictq_size && !is->videoq.abort_request) {
1359 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1360 }
1361 SDL_UnlockMutex(is->pictq_mutex);
1362
1363 is->last_dts_for_fault_detection=
1364 is->last_pts_for_fault_detection= INT64_MIN;
1365 is->frame_last_pts= AV_NOPTS_VALUE;
1366 is->frame_last_delay = 0;
1367
1368 continue;
1369 }
1370
1371 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1372 this packet, if any */
1373 is->video_st->codec->reordered_opaque= pkt->pts;
1374 len1 = avcodec_decode_video2(is->video_st->codec,
1375 frame, &got_picture,
1376 pkt);
1377
1378 if (got_picture) {
1379 if(pkt->dts != AV_NOPTS_VALUE){
1380 is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1381 is->last_dts_for_fault_detection= pkt->dts;
1382 }
1383 if(frame->reordered_opaque != AV_NOPTS_VALUE){
1384 is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1385 is->last_pts_for_fault_detection= frame->reordered_opaque;
1386 }
1387 }
1388
1389 if( ( decoder_reorder_pts==1
1390 || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1391 || pkt->dts == AV_NOPTS_VALUE)
1392 && frame->reordered_opaque != AV_NOPTS_VALUE)
1393 pts= frame->reordered_opaque;
1394 else if(pkt->dts != AV_NOPTS_VALUE)
1395 pts= pkt->dts;
1396 else
1397 pts= 0;
1398 pts *= av_q2d(is->video_st->time_base);
1399
1400 // if (len1 < 0)
1401 // break;
1402 if (got_picture) {
1403 if (output_picture2(is, frame, pts) < 0)
1404 goto the_end;
1405 }
1406 av_free_packet(pkt);
1407 if (step)
1408 if (cur_stream)
1409 stream_pause(cur_stream);
1410 }
1411 the_end:
1412 av_free(frame);
1413 return 0;
1414 }
1415
1416 static int subtitle_thread(void *arg)
1417 {
1418 VideoState *is = arg;
1419 SubPicture *sp;
1420 AVPacket pkt1, *pkt = &pkt1;
1421 int len1, got_subtitle;
1422 double pts;
1423 int i, j;
1424 int r, g, b, y, u, v, a;
1425
1426 for(;;) {
1427 while (is->paused && !is->subtitleq.abort_request) {
1428 SDL_Delay(10);
1429 }
1430 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1431 break;
1432
1433 if(pkt->data == flush_pkt.data){
1434 avcodec_flush_buffers(is->subtitle_st->codec);
1435 continue;
1436 }
1437 SDL_LockMutex(is->subpq_mutex);
1438 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1439 !is->subtitleq.abort_request) {
1440 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1441 }
1442 SDL_UnlockMutex(is->subpq_mutex);
1443
1444 if (is->subtitleq.abort_request)
1445 goto the_end;
1446
1447 sp = &is->subpq[is->subpq_windex];
1448
1449 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1450 this packet, if any */
1451 pts = 0;
1452 if (pkt->pts != AV_NOPTS_VALUE)
1453 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1454
1455 len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1456 &sp->sub, &got_subtitle,
1457 pkt);
1458 // if (len1 < 0)
1459 // break;
1460 if (got_subtitle && sp->sub.format == 0) {
1461 sp->pts = pts;
1462
1463 for (i = 0; i < sp->sub.num_rects; i++)
1464 {
1465 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1466 {
1467 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1468 y = RGB_TO_Y_CCIR(r, g, b);
1469 u = RGB_TO_U_CCIR(r, g, b, 0);
1470 v = RGB_TO_V_CCIR(r, g, b, 0);
1471 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1472 }
1473 }
1474
1475 /* now we can update the picture count */
1476 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1477 is->subpq_windex = 0;
1478 SDL_LockMutex(is->subpq_mutex);
1479 is->subpq_size++;
1480 SDL_UnlockMutex(is->subpq_mutex);
1481 }
1482 av_free_packet(pkt);
1483 // if (step)
1484 // if (cur_stream)
1485 // stream_pause(cur_stream);
1486 }
1487 the_end:
1488 return 0;
1489 }
1490
1491 /* copy samples for viewing in editor window */
1492 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1493 {
1494 int size, len, channels;
1495
1496 channels = is->audio_st->codec->channels;
1497
1498 size = samples_size / sizeof(short);
1499 while (size > 0) {
1500 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1501 if (len > size)
1502 len = size;
1503 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1504 samples += len;
1505 is->sample_array_index += len;
1506 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1507 is->sample_array_index = 0;
1508 size -= len;
1509 }
1510 }
1511
1512 /* return the new audio buffer size (samples can be added or deleted
1513 to get better sync if video or external master clock) */
1514 static int synchronize_audio(VideoState *is, short *samples,
1515 int samples_size1, double pts)
1516 {
1517 int n, samples_size;
1518 double ref_clock;
1519
1520 n = 2 * is->audio_st->codec->channels;
1521 samples_size = samples_size1;
1522
1523 /* if not master, then we try to remove or add samples to correct the clock */
1524 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1525 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1526 double diff, avg_diff;
1527 int wanted_size, min_size, max_size, nb_samples;
1528
1529 ref_clock = get_master_clock(is);
1530 diff = get_audio_clock(is) - ref_clock;
1531
1532 if (diff < AV_NOSYNC_THRESHOLD) {
1533 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1534 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1535 /* not enough measures to have a correct estimate */
1536 is->audio_diff_avg_count++;
1537 } else {
1538 /* estimate the A-V difference */
1539 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1540
1541 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1542 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1543 nb_samples = samples_size / n;
1544
1545 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1546 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1547 if (wanted_size < min_size)
1548 wanted_size = min_size;
1549 else if (wanted_size > max_size)
1550 wanted_size = max_size;
1551
1552 /* add or remove samples to correction the synchro */
1553 if (wanted_size < samples_size) {
1554 /* remove samples */
1555 samples_size = wanted_size;
1556 } else if (wanted_size > samples_size) {
1557 uint8_t *samples_end, *q;
1558 int nb;
1559
1560 /* add samples */
1561 nb = (samples_size - wanted_size);
1562 samples_end = (uint8_t *)samples + samples_size - n;
1563 q = samples_end + n;
1564 while (nb > 0) {
1565 memcpy(q, samples_end, n);
1566 q += n;
1567 nb -= n;
1568 }
1569 samples_size = wanted_size;
1570 }
1571 }
1572 #if 0
1573 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1574 diff, avg_diff, samples_size - samples_size1,
1575 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1576 #endif
1577 }
1578 } else {
1579 /* too big difference : may be initial PTS errors, so
1580 reset A-V filter */
1581 is->audio_diff_avg_count = 0;
1582 is->audio_diff_cum = 0;
1583 }
1584 }
1585
1586 return samples_size;
1587 }
1588
1589 /* decode one audio frame and returns its uncompressed size */
1590 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1591 {
1592 AVPacket *pkt_temp = &is->audio_pkt_temp;
1593 AVPacket *pkt = &is->audio_pkt;
1594 AVCodecContext *dec= is->audio_st->codec;
1595 int n, len1, data_size;
1596 double pts;
1597
1598 for(;;) {
1599 /* NOTE: the audio packet can contain several frames */
1600 while (pkt_temp->size > 0) {
1601 data_size = sizeof(is->audio_buf1);
1602 len1 = avcodec_decode_audio3(dec,
1603 (int16_t *)is->audio_buf1, &data_size,
1604 pkt_temp);
1605 if (len1 < 0) {
1606 /* if error, we skip the frame */
1607 pkt_temp->size = 0;
1608 break;
1609 }
1610
1611 pkt_temp->data += len1;
1612 pkt_temp->size -= len1;
1613 if (data_size <= 0)
1614 continue;
1615
1616 if (dec->sample_fmt != is->audio_src_fmt) {
1617 if (is->reformat_ctx)
1618 av_audio_convert_free(is->reformat_ctx);
1619 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1620 dec->sample_fmt, 1, NULL, 0);
1621 if (!is->reformat_ctx) {
1622 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1623 avcodec_get_sample_fmt_name(dec->sample_fmt),
1624 avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1625 break;
1626 }
1627 is->audio_src_fmt= dec->sample_fmt;
1628 }
1629
1630 if (is->reformat_ctx) {
1631 const void *ibuf[6]= {is->audio_buf1};
1632 void *obuf[6]= {is->audio_buf2};
1633 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1634 int ostride[6]= {2};
1635 int len= data_size/istride[0];
1636 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1637 printf("av_audio_convert() failed\n");
1638 break;
1639 }
1640 is->audio_buf= is->audio_buf2;
1641 /* FIXME: existing code assume that data_size equals framesize*channels*2
1642 remove this legacy cruft */
1643 data_size= len*2;
1644 }else{
1645 is->audio_buf= is->audio_buf1;
1646 }
1647
1648 /* if no pts, then compute it */
1649 pts = is->audio_clock;
1650 *pts_ptr = pts;
1651 n = 2 * dec->channels;
1652 is->audio_clock += (double)data_size /
1653 (double)(n * dec->sample_rate);
1654 #if defined(DEBUG_SYNC)
1655 {
1656 static double last_clock;
1657 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1658 is->audio_clock - last_clock,
1659 is->audio_clock, pts);
1660 last_clock = is->audio_clock;
1661 }
1662 #endif
1663 return data_size;
1664 }
1665
1666 /* free the current packet */
1667 if (pkt->data)
1668 av_free_packet(pkt);
1669
1670 if (is->paused || is->audioq.abort_request) {
1671 return -1;
1672 }
1673
1674 /* read next packet */
1675 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1676 return -1;
1677 if(pkt->data == flush_pkt.data){
1678 avcodec_flush_buffers(dec);
1679 continue;
1680 }
1681
1682 pkt_temp->data = pkt->data;
1683 pkt_temp->size = pkt->size;
1684
1685 /* if update the audio clock with the pts */
1686 if (pkt->pts != AV_NOPTS_VALUE) {
1687 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1688 }
1689 }
1690 }
1691
1692 /* get the current audio output buffer size, in samples. With SDL, we
1693 cannot have a precise information */
1694 static int audio_write_get_buf_size(VideoState *is)
1695 {
1696 return is->audio_buf_size - is->audio_buf_index;
1697 }
1698
1699
1700 /* prepare a new audio buffer */
1701 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1702 {
1703 VideoState *is = opaque;
1704 int audio_size, len1;
1705 double pts;
1706
1707 audio_callback_time = av_gettime();
1708
1709 while (len > 0) {
1710 if (is->audio_buf_index >= is->audio_buf_size) {
1711 audio_size = audio_decode_frame(is, &pts);
1712 if (audio_size < 0) {
1713 /* if error, just output silence */
1714 is->audio_buf = is->audio_buf1;
1715 is->audio_buf_size = 1024;
1716 memset(is->audio_buf, 0, is->audio_buf_size);
1717 } else {
1718 if (is->show_audio)
1719 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1720 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1721 pts);
1722 is->audio_buf_size = audio_size;
1723 }
1724 is->audio_buf_index = 0;
1725 }
1726 len1 = is->audio_buf_size - is->audio_buf_index;
1727 if (len1 > len)
1728 len1 = len;
1729 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1730 len -= len1;
1731 stream += len1;
1732 is->audio_buf_index += len1;
1733 }
1734 }
1735
1736 /* open a given stream. Return 0 if OK */
1737 static int stream_component_open(VideoState *is, int stream_index)
1738 {
1739 AVFormatContext *ic = is->ic;
1740 AVCodecContext *enc;
1741 AVCodec *codec;
1742 SDL_AudioSpec wanted_spec, spec;
1743
1744 if (stream_index < 0 || stream_index >= ic->nb_streams)
1745 return -1;
1746 enc = ic->streams[stream_index]->codec;
1747
1748 /* prepare audio output */
1749 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1750 if (enc->channels > 0) {
1751 enc->request_channels = FFMIN(2, enc->channels);
1752 } else {
1753 enc->request_channels = 2;
1754 }
1755 }
1756
1757 codec = avcodec_find_decoder(enc->codec_id);
1758 enc->debug_mv = debug_mv;
1759 enc->debug = debug;
1760 enc->workaround_bugs = workaround_bugs;
1761 enc->lowres = lowres;
1762 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1763 enc->idct_algo= idct;
1764 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1765 enc->skip_frame= skip_frame;
1766 enc->skip_idct= skip_idct;
1767 enc->skip_loop_filter= skip_loop_filter;
1768 enc->error_recognition= error_recognition;
1769 enc->error_concealment= error_concealment;
1770 avcodec_thread_init(enc, thread_count);
1771
1772 set_context_opts(enc, avcodec_opts[enc->codec_type], 0);
1773
1774 if (!codec ||
1775 avcodec_open(enc, codec) < 0)
1776 return -1;
1777
1778 /* prepare audio output */
1779 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1780 wanted_spec.freq = enc->sample_rate;
1781 wanted_spec.format = AUDIO_S16SYS;
1782 wanted_spec.channels = enc->channels;
1783 wanted_spec.silence = 0;
1784 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1785 wanted_spec.callback = sdl_audio_callback;
1786 wanted_spec.userdata = is;
1787 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1788 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1789 return -1;
1790 }
1791 is->audio_hw_buf_size = spec.size;
1792 is->audio_src_fmt= SAMPLE_FMT_S16;
1793 }
1794
1795 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
1796 switch(enc->codec_type) {
1797 case CODEC_TYPE_AUDIO:
1798 is->audio_stream = stream_index;
1799 is->audio_st = ic->streams[stream_index];
1800 is->audio_buf_size = 0;
1801 is->audio_buf_index = 0;
1802
1803 /* init averaging filter */
1804 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1805 is->audio_diff_avg_count = 0;
1806 /* since we do not have a precise anough audio fifo fullness,
1807 we correct audio sync only if larger than this threshold */
1808 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1809
1810 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1811 packet_queue_init(&is->audioq);
1812 SDL_PauseAudio(0);
1813 break;
1814 case CODEC_TYPE_VIDEO:
1815 is->video_stream = stream_index;
1816 is->video_st = ic->streams[stream_index];
1817
1818 is->frame_timer = (double)av_gettime() / 1000000.0;
1819 // is->video_current_pts_time = av_gettime();
1820
1821 packet_queue_init(&is->videoq);
1822 is->video_tid = SDL_CreateThread(video_thread, is);
1823 break;
1824 case CODEC_TYPE_SUBTITLE:
1825 is->subtitle_stream = stream_index;
1826 is->subtitle_st = ic->streams[stream_index];
1827 packet_queue_init(&is->subtitleq);
1828
1829 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1830 break;
1831 default:
1832 break;
1833 }
1834 return 0;
1835 }
1836
1837 static void stream_component_close(VideoState *is, int stream_index)
1838 {
1839 AVFormatContext *ic = is->ic;
1840 AVCodecContext *enc;
1841
1842 if (stream_index < 0 || stream_index >= ic->nb_streams)
1843 return;
1844 enc = ic->streams[stream_index]->codec;
1845
1846 switch(enc->codec_type) {
1847 case CODEC_TYPE_AUDIO:
1848 packet_queue_abort(&is->audioq);
1849
1850 SDL_CloseAudio();
1851
1852 packet_queue_end(&is->audioq);
1853 if (is->reformat_ctx)
1854 av_audio_convert_free(is->reformat_ctx);
1855 break;
1856 case CODEC_TYPE_VIDEO:
1857 packet_queue_abort(&is->videoq);
1858
1859 /* note: we also signal this mutex to make sure we deblock the
1860 video thread in all cases */
1861 SDL_LockMutex(is->pictq_mutex);
1862 SDL_CondSignal(is->pictq_cond);
1863 SDL_UnlockMutex(is->pictq_mutex);
1864
1865 SDL_WaitThread(is->video_tid, NULL);
1866
1867 packet_queue_end(&is->videoq);
1868 break;
1869 case CODEC_TYPE_SUBTITLE:
1870 packet_queue_abort(&is->subtitleq);
1871
1872 /* note: we also signal this mutex to make sure we deblock the
1873 video thread in all cases */
1874 SDL_LockMutex(is->subpq_mutex);
1875 is->subtitle_stream_changed = 1;
1876
1877 SDL_CondSignal(is->subpq_cond);
1878 SDL_UnlockMutex(is->subpq_mutex);
1879
1880 SDL_WaitThread(is->subtitle_tid, NULL);
1881
1882 packet_queue_end(&is->subtitleq);
1883 break;
1884 default:
1885 break;
1886 }
1887
1888 ic->streams[stream_index]->discard = AVDISCARD_ALL;
1889 avcodec_close(enc);
1890 switch(enc->codec_type) {
1891 case CODEC_TYPE_AUDIO:
1892 is->audio_st = NULL;
1893 is->audio_stream = -1;
1894 break;
1895 case CODEC_TYPE_VIDEO:
1896 is->video_st = NULL;
1897 is->video_stream = -1;
1898 break;
1899 case CODEC_TYPE_SUBTITLE:
1900 is->subtitle_st = NULL;
1901 is->subtitle_stream = -1;
1902 break;
1903 default:
1904 break;
1905 }
1906 }
1907
1908 /* since we have only one decoding thread, we can use a global
1909 variable instead of a thread local variable */
1910 static VideoState *global_video_state;
1911
1912 static int decode_interrupt_cb(void)
1913 {
1914 return (global_video_state && global_video_state->abort_request);
1915 }
1916
1917 /* this thread gets the stream from the disk or the network */
1918 static int decode_thread(void *arg)
1919 {
1920 VideoState *is = arg;
1921 AVFormatContext *ic;
1922 int err, i, ret, video_index, audio_index, subtitle_index;
1923 AVPacket pkt1, *pkt = &pkt1;
1924 AVFormatParameters params, *ap = &params;
1925 int eof=0;
1926
1927 ic = avformat_alloc_context();
1928
1929 video_index = -1;
1930 audio_index = -1;
1931 subtitle_index = -1;
1932 is->video_stream = -1;
1933 is->audio_stream = -1;
1934 is->subtitle_stream = -1;
1935
1936 global_video_state = is;
1937 url_set_interrupt_cb(decode_interrupt_cb);
1938
1939 memset(ap, 0, sizeof(*ap));
1940
1941 ap->prealloced_context = 1;
1942 ap->width = frame_width;
1943 ap->height= frame_height;
1944 ap->time_base= (AVRational){1, 25};
1945 ap->pix_fmt = frame_pix_fmt;
1946
1947 set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
1948
1949 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1950 if (err < 0) {
1951 print_error(is->filename, err);
1952 ret = -1;
1953 goto fail;
1954 }
1955 is->ic = ic;
1956
1957 if(genpts)
1958 ic->flags |= AVFMT_FLAG_GENPTS;
1959
1960 err = av_find_stream_info(ic);
1961 if (err < 0) {
1962 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1963 ret = -1;
1964 goto fail;
1965 }
1966 if(ic->pb)
1967 ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1968
1969 /* if seeking requested, we execute it */
1970 if (start_time != AV_NOPTS_VALUE) {
1971 int64_t timestamp;
1972
1973 timestamp = start_time;
1974 /* add the stream start time */
1975 if (ic->start_time != AV_NOPTS_VALUE)
1976 timestamp += ic->start_time;
1977 ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
1978 if (ret < 0) {
1979 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1980 is->filename, (double)timestamp / AV_TIME_BASE);
1981 }
1982 }
1983
1984 for(i = 0; i < ic->nb_streams; i++) {
1985 AVCodecContext *enc = ic->streams[i]->codec;
1986 ic->streams[i]->discard = AVDISCARD_ALL;
1987 switch(enc->codec_type) {
1988 case CODEC_TYPE_AUDIO:
1989 if (wanted_audio_stream-- >= 0 && !audio_disable)
1990 audio_index = i;
1991 break;
1992 case CODEC_TYPE_VIDEO:
1993 if (wanted_video_stream-- >= 0 && !video_disable)
1994 video_index = i;
1995 break;
1996 case CODEC_TYPE_SUBTITLE:
1997 if (wanted_subtitle_stream-- >= 0 && !video_disable)
1998 subtitle_index = i;
1999 break;
2000 default:
2001 break;
2002 }
2003 }
2004 if (show_status) {
2005 dump_format(ic, 0, is->filename, 0);
2006 }
2007
2008 /* open the streams */
2009 if (audio_index >= 0) {
2010 stream_component_open(is, audio_index);
2011 }
2012
2013 if (video_index >= 0) {
2014 stream_component_open(is, video_index);
2015 } else {
2016 if (!display_disable)
2017 is->show_audio = 1;
2018 }
2019
2020 if (subtitle_index >= 0) {
2021 stream_component_open(is, subtitle_index);
2022 }
2023
2024 if (is->video_stream < 0 && is->audio_stream < 0) {
2025 fprintf(stderr, "%s: could not open codecs\n", is->filename);
2026 ret = -1;
2027 goto fail;
2028 }
2029
2030 for(;;) {
2031 if (is->abort_request)
2032 break;
2033 if (is->paused != is->last_paused) {
2034 is->last_paused = is->paused;
2035 if (is->paused)
2036 is->read_pause_return= av_read_pause(ic);
2037 else
2038 av_read_play(ic);
2039 }
2040 #if CONFIG_RTSP_DEMUXER
2041 if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2042 /* wait 10 ms to avoid trying to get another packet */
2043 /* XXX: horrible */
2044 SDL_Delay(10);
2045 continue;
2046 }
2047 #endif
2048 if (is->seek_req) {
2049 int64_t seek_target= is->seek_pos;
2050 int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2051 int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2052 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2053 // of the seek_pos/seek_rel variables
2054
2055 ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2056 if (ret < 0) {
2057 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2058 }else{
2059 if (is->audio_stream >= 0) {
2060 packet_queue_flush(&is->audioq);
2061 packet_queue_put(&is->audioq, &flush_pkt);
2062 }
2063 if (is->subtitle_stream >= 0) {
2064 packet_queue_flush(&is->subtitleq);
2065 packet_queue_put(&is->subtitleq, &flush_pkt);
2066 }
2067 if (is->video_stream >= 0) {
2068 packet_queue_flush(&is->videoq);
2069 packet_queue_put(&is->videoq, &flush_pkt);
2070 }
2071 }
2072 is->seek_req = 0;
2073 eof= 0;
2074 }
2075
2076 /* if the queue are full, no need to read more */
2077 if ( is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2078 || ( (is->audioq .size > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2079 && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream<0)
2080 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2081 /* wait 10 ms */
2082 SDL_Delay(10);
2083 continue;
2084 }
2085 if(url_feof(ic->pb) || eof) {
2086 if(is->video_stream >= 0){
2087 av_init_packet(pkt);
2088 pkt->data=NULL;
2089 pkt->size=0;
2090 pkt->stream_index= is->video_stream;
2091 packet_queue_put(&is->videoq, pkt);
2092 }
2093 SDL_Delay(10);
2094 continue;
2095 }
2096 ret = av_read_frame(ic, pkt);
2097 if (ret < 0) {
2098 if (ret == AVERROR_EOF)
2099 eof=1;
2100 if (url_ferror(ic->pb))
2101 break;
2102 SDL_Delay(100); /* wait for user event */
2103 continue;
2104 }
2105 if (pkt->stream_index == is->audio_stream) {
2106 packet_queue_put(&is->audioq, pkt);
2107 } else if (pkt->stream_index == is->video_stream) {
2108 packet_queue_put(&is->videoq, pkt);
2109 } else if (pkt->stream_index == is->subtitle_stream) {
2110 packet_queue_put(&is->subtitleq, pkt);
2111 } else {
2112 av_free_packet(pkt);
2113 }
2114 }
2115 /* wait until the end */
2116 while (!is->abort_request) {
2117 SDL_Delay(100);
2118 }
2119
2120 ret = 0;
2121 fail:
2122 /* disable interrupting */
2123 global_video_state = NULL;
2124
2125 /* close each stream */
2126 if (is->audio_stream >= 0)
2127 stream_component_close(is, is->audio_stream);
2128 if (is->video_stream >= 0)
2129 stream_component_close(is, is->video_stream);
2130 if (is->subtitle_stream >= 0)
2131 stream_component_close(is, is->subtitle_stream);
2132 if (is->ic) {
2133 av_close_input_file(is->ic);
2134 is->ic = NULL; /* safety */
2135 }
2136 url_set_interrupt_cb(NULL);
2137
2138 if (ret != 0) {
2139 SDL_Event event;
2140
2141 event.type = FF_QUIT_EVENT;
2142 event.user.data1 = is;
2143 SDL_PushEvent(&event);
2144 }
2145 return 0;
2146 }
2147
2148 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2149 {
2150 VideoState *is;
2151
2152 is = av_mallocz(sizeof(VideoState));
2153 if (!is)
2154 return NULL;
2155 av_strlcpy(is->filename, filename, sizeof(is->filename));
2156 is->iformat = iformat;
2157 is->ytop = 0;
2158 is->xleft = 0;
2159
2160 /* start video display */
2161 is->pictq_mutex = SDL_CreateMutex();
2162 is->pictq_cond = SDL_CreateCond();
2163
2164 is->subpq_mutex = SDL_CreateMutex();
2165 is->subpq_cond = SDL_CreateCond();
2166
2167 /* add the refresh timer to draw the picture */
2168 schedule_refresh(is, 40);
2169
2170 is->av_sync_type = av_sync_type;
2171 is->parse_tid = SDL_CreateThread(decode_thread, is);
2172 if (!is->parse_tid) {
2173 av_free(is);
2174 return NULL;
2175 }
2176 return is;
2177 }
2178
2179 static void stream_close(VideoState *is)
2180 {
2181 VideoPicture *vp;
2182 int i;
2183 /* XXX: use a special url_shutdown call to abort parse cleanly */
2184 is->abort_request = 1;
2185 SDL_WaitThread(is->parse_tid, NULL);
2186
2187 /* free all pictures */
2188 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2189 vp = &is->pictq[i];
2190 if (vp->bmp) {
2191 SDL_FreeYUVOverlay(vp->bmp);
2192 vp->bmp = NULL;
2193 }
2194 }
2195 SDL_DestroyMutex(is->pictq_mutex);
2196 SDL_DestroyCond(is->pictq_cond);
2197 SDL_DestroyMutex(is->subpq_mutex);
2198 SDL_DestroyCond(is->subpq_cond);
2199 if (is->img_convert_ctx)
2200 sws_freeContext(is->img_convert_ctx);
2201 av_free(is);
2202 }
2203
2204 static void stream_cycle_channel(VideoState *is, int codec_type)
2205 {
2206 AVFormatContext *ic = is->ic;
2207 int start_index, stream_index;
2208 AVStream *st;
2209
2210 if (codec_type == CODEC_TYPE_VIDEO)
2211 start_index = is->video_stream;
2212 else if (codec_type == CODEC_TYPE_AUDIO)
2213 start_index = is->audio_stream;
2214 else
2215 start_index = is->subtitle_stream;
2216 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2217 return;
2218 stream_index = start_index;
2219 for(;;) {
2220 if (++stream_index >= is->ic->nb_streams)
2221 {
2222 if (codec_type == CODEC_TYPE_SUBTITLE)
2223 {
2224 stream_index = -1;
2225 goto the_end;
2226 } else
2227 stream_index = 0;
2228 }
2229 if (stream_index == start_index)
2230 return;
2231 st = ic->streams[stream_index];
2232 if (st->codec->codec_type == codec_type) {
2233 /* check that parameters are OK */
2234 switch(codec_type) {
2235 case CODEC_TYPE_AUDIO:
2236 if (st->codec->sample_rate != 0 &&
2237 st->codec->channels != 0)
2238 goto the_end;
2239 break;
2240 case CODEC_TYPE_VIDEO:
2241 case CODEC_TYPE_SUBTITLE:
2242 goto the_end;
2243 default:
2244 break;
2245 }
2246 }
2247 }
2248 the_end:
2249 stream_component_close(is, start_index);
2250 stream_component_open(is, stream_index);
2251 }
2252
2253
2254 static void toggle_full_screen(void)
2255 {
2256 is_full_screen = !is_full_screen;
2257 if (!fs_screen_width) {
2258 /* use default SDL method */
2259 // SDL_WM_ToggleFullScreen(screen);
2260 }
2261 video_open(cur_stream);
2262 }
2263
2264 static void toggle_pause(void)
2265 {
2266 if (cur_stream)
2267 stream_pause(cur_stream);
2268 step = 0;
2269 }
2270
2271 static void step_to_next_frame(void)
2272 {
2273 if (cur_stream) {
2274 /* if the stream is paused unpause it, then step */
2275 if (cur_stream->paused)
2276 stream_pause(cur_stream);
2277 }
2278 step = 1;
2279 }
2280
2281 static void do_exit(void)
2282 {
2283 int i;
2284 if (cur_stream) {
2285 stream_close(cur_stream);
2286 cur_stream = NULL;
2287 }
2288 for (i = 0; i < CODEC_TYPE_NB; i++)
2289 av_free(avcodec_opts[i]);
2290 av_free(avformat_opts);
2291 av_free(sws_opts);
2292 if (show_status)
2293 printf("\n");
2294 SDL_Quit();
2295 exit(0);
2296 }
2297
2298 static void toggle_audio_display(void)
2299 {
2300 if (cur_stream) {
2301 cur_stream->show_audio = !cur_stream->show_audio;
2302 }
2303 }
2304
2305 /* handle an event sent by the GUI */
2306 static void event_loop(void)
2307 {
2308 SDL_Event event;
2309 double incr, pos, frac;
2310
2311 for(;;) {
2312 SDL_WaitEvent(&event);
2313 switch(event.type) {
2314 case SDL_KEYDOWN:
2315 switch(event.key.keysym.sym) {
2316 case SDLK_ESCAPE:
2317 case SDLK_q:
2318 do_exit();
2319 break;
2320 case SDLK_f:
2321 toggle_full_screen();
2322 break;
2323 case SDLK_p:
2324 case SDLK_SPACE:
2325 toggle_pause();
2326 break;
2327 case SDLK_s: //S: Step to next frame
2328 step_to_next_frame();
2329 break;
2330 case SDLK_a:
2331 if (cur_stream)
2332 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2333 break;
2334 case SDLK_v:
2335 if (cur_stream)
2336 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2337 break;
2338 case SDLK_t:
2339 if (cur_stream)
2340 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2341 break;
2342 case SDLK_w:
2343 toggle_audio_display();
2344 break;
2345 case SDLK_LEFT:
2346 incr = -10.0;
2347 goto do_seek;
2348 case SDLK_RIGHT:
2349 incr = 10.0;
2350 goto do_seek;
2351 case SDLK_UP:
2352 incr = 60.0;
2353 goto do_seek;
2354 case SDLK_DOWN:
2355 incr = -60.0;
2356 do_seek:
2357 if (cur_stream) {
2358 if (seek_by_bytes) {
2359 pos = url_ftell(cur_stream->ic->pb);
2360 if (cur_stream->ic->bit_rate)
2361 incr *= cur_stream->ic->bit_rate / 60.0;
2362 else
2363 incr *= 180000.0;
2364 pos += incr;
2365 stream_seek(cur_stream, pos, incr, 1);
2366 } else {
2367 pos = get_master_clock(cur_stream);
2368 pos += incr;
2369 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2370 }
2371 }
2372 break;
2373 default:
2374 break;
2375 }
2376 break;
2377 case SDL_MOUSEBUTTONDOWN:
2378 if (cur_stream) {
2379 if(seek_by_bytes || cur_stream->ic->duration<=0){
2380 uint64_t size= url_fsize(cur_stream->ic->pb);
2381 stream_seek(cur_stream, size*(double)event.button.x/(double)cur_stream->width, 0, 1);
2382 }else{
2383 int64_t ts;
2384 int ns, hh, mm, ss;
2385 int tns, thh, tmm, tss;
2386 tns = cur_stream->ic->duration/1000000LL;
2387 thh = tns/3600;
2388 tmm = (tns%3600)/60;
2389 tss = (tns%60);
2390 frac = (double)event.button.x/(double)cur_stream->width;
2391 ns = frac*tns;
2392 hh = ns/3600;
2393 mm = (ns%3600)/60;
2394 ss = (ns%60);
2395 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2396 hh, mm, ss, thh, tmm, tss);
2397 ts = frac*cur_stream->ic->duration;
2398 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2399 ts += cur_stream->ic->start_time;
2400 stream_seek(cur_stream, ts, 0, 0);
2401 }
2402 }
2403 break;
2404 case SDL_VIDEORESIZE:
2405 if (cur_stream) {
2406 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2407 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2408 screen_width = cur_stream->width = event.resize.w;
2409 screen_height= cur_stream->height= event.resize.h;
2410 }
2411 break;
2412 case SDL_QUIT:
2413 case FF_QUIT_EVENT:
2414 do_exit();
2415 break;
2416 case FF_ALLOC_EVENT:
2417 video_open(event.user.data1);
2418 alloc_picture(event.user.data1);
2419 break;
2420 case FF_REFRESH_EVENT:
2421 video_refresh_timer(event.user.data1);
2422 break;
2423 default:
2424 break;
2425 }
2426 }
2427 }
2428
2429 static void opt_frame_size(const char *arg)
2430 {
2431 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2432 fprintf(stderr, "Incorrect frame size\n");
2433 exit(1);
2434 }
2435 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2436 fprintf(stderr, "Frame size must be a multiple of 2\n");
2437 exit(1);
2438 }
2439 }
2440
2441 static int opt_width(const char *opt, const char *arg)
2442 {
2443 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2444 return 0;
2445 }
2446
2447 static int opt_height(const char *opt, const char *arg)
2448 {
2449 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2450 return 0;
2451 }
2452
2453 static void opt_format(const char *arg)
2454 {
2455 file_iformat = av_find_input_format(arg);
2456 if (!file_iformat) {
2457 fprintf(stderr, "Unknown input format: %s\n", arg);
2458 exit(1);
2459 }
2460 }
2461
2462 static void opt_frame_pix_fmt(const char *arg)
2463 {
2464 frame_pix_fmt = av_get_pix_fmt(arg);
2465 }
2466
2467 static int opt_sync(const char *opt, const char *arg)
2468 {
2469 if (!strcmp(arg, "audio"))
2470 av_sync_type = AV_SYNC_AUDIO_MASTER;
2471 else if (!strcmp(arg, "video"))
2472 av_sync_type = AV_SYNC_VIDEO_MASTER;
2473 else if (!strcmp(arg, "ext"))
2474 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2475 else {
2476 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2477 exit(1);
2478 }
2479 return 0;
2480 }
2481
2482 static int opt_seek(const char *opt, const char *arg)
2483 {
2484 start_time = parse_time_or_die(opt, arg, 1);
2485 return 0;
2486 }
2487
2488 static int opt_debug(const char *opt, const char *arg)
2489 {
2490 av_log_set_level(99);
2491 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2492 return 0;
2493 }
2494
2495 static int opt_vismv(const char *opt, const char *arg)
2496 {
2497 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2498 return 0;
2499 }
2500
2501 static int opt_thread_count(const char *opt, const char *arg)
2502 {
2503 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2504 #if !HAVE_THREADS
2505 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2506 #endif
2507 return 0;
2508 }
2509
2510 static const OptionDef options[] = {
2511 #include "cmdutils_common_opts.h"
2512 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2513 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2514 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2515 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2516 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2517 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2518 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "select desired audio stream", "stream_number" },
2519 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "select desired video stream", "stream_number" },
2520 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_subtitle_stream}, "select desired subtitle stream", "stream_number" },
2521 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2522 { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2523 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2524 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2525 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2526 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2527 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2528 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2529 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2530 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2531 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2532 { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2533 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2534 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2535 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2536 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2537 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2538 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)", "threshold" },
2539 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2540 { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2541 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2542 { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2543 { NULL, },
2544 };
2545
2546 static void show_usage(void)
2547 {
2548 printf("Simple media player\n");
2549 printf("usage: ffplay [options] input_file\n");
2550 printf("\n");
2551 }
2552
2553 static void show_help(void)
2554 {
2555 show_usage();
2556 show_help_options(options, "Main options:\n",
2557 OPT_EXPERT, 0);
2558 show_help_options(options, "\nAdvanced options:\n",
2559 OPT_EXPERT, OPT_EXPERT);
2560 printf("\nWhile playing:\n"
2561 "q, ESC quit\n"
2562 "f toggle full screen\n"
2563 "p, SPC pause\n"
2564 "a cycle audio channel\n"
2565 "v cycle video channel\n"
2566 "t cycle subtitle channel\n"
2567 "w show audio waves\n"
2568 "left/right seek backward/forward 10 seconds\n"
2569 "down/up seek backward/forward 1 minute\n"
2570 "mouse click seek to percentage in file corresponding to fraction of width\n"
2571 );
2572 }
2573
2574 static void opt_input_file(const char *filename)
2575 {
2576 if (!strcmp(filename, "-"))
2577 filename = "pipe:";
2578 input_filename = filename;
2579 }
2580
2581 /* Called from the main */
2582 int main(int argc, char **argv)
2583 {
2584 int flags, i;
2585
2586 /* register all codecs, demux and protocols */
2587 avcodec_register_all();
2588 avdevice_register_all();
2589 av_register_all();
2590
2591 for(i=0; i<CODEC_TYPE_NB; i++){
2592 avcodec_opts[i]= avcodec_alloc_context2(i);
2593 }
2594 avformat_opts = avformat_alloc_context();
2595 sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
2596
2597 show_banner();
2598
2599 parse_options(argc, argv, options, opt_input_file);
2600
2601 if (!input_filename) {
2602 show_usage();
2603 fprintf(stderr, "An input file must be specified\n");
2604 fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
2605 exit(1);
2606 }
2607
2608 if (display_disable) {
2609 video_disable = 1;
2610 }
2611 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2612 #if !defined(__MINGW32__) && !defined(__APPLE__)
2613 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2614 #endif
2615 if (SDL_Init (flags)) {
2616 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2617 exit(1);
2618 }
2619
2620 if (!display_disable) {
2621 #if HAVE_SDL_VIDEO_SIZE
2622 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2623 fs_screen_width = vi->current_w;
2624 fs_screen_height = vi->current_h;
2625 #endif
2626 }
2627
2628 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2629 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2630 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2631 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2632
2633 av_init_packet(&flush_pkt);
2634 flush_pkt.data= "FLUSH";
2635
2636 cur_stream = stream_open(input_filename, file_iformat);
2637
2638 event_loop();
2639
2640 /* never returns */
2641
2642 return 0;
2643 }