67e24372532d4bfffc3b5e24e2044ed4c4300005
[libav.git] / ffplay.c
1 /*
2 * FFplay : Simple Media Player based on the FFmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavformat/avformat.h"
30 #include "libavdevice/avdevice.h"
31 #include "libswscale/swscale.h"
32 #include "libavcodec/audioconvert.h"
33 #include "libavcodec/opt.h"
34 #include "libavcodec/avfft.h"
35
36 #if CONFIG_AVFILTER
37 # include "libavfilter/avfilter.h"
38 # include "libavfilter/avfiltergraph.h"
39 # include "libavfilter/graphparser.h"
40 #endif
41
42 #include "cmdutils.h"
43
44 #include <SDL.h>
45 #include <SDL_thread.h>
46
47 #ifdef __MINGW32__
48 #undef main /* We don't want SDL to override our main() */
49 #endif
50
51 #include <unistd.h>
52 #include <assert.h>
53
54 const char program_name[] = "FFplay";
55 const int program_birth_year = 2003;
56
57 //#define DEBUG_SYNC
58
59 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
60 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
61 #define MIN_FRAMES 5
62
63 /* SDL audio buffer size, in samples. Should be small to have precise
64 A/V sync as SDL does not have hardware buffer fullness info. */
65 #define SDL_AUDIO_BUFFER_SIZE 1024
66
67 /* no AV sync correction is done if below the AV sync threshold */
68 #define AV_SYNC_THRESHOLD 0.01
69 /* no AV correction is done if too big error */
70 #define AV_NOSYNC_THRESHOLD 10.0
71
72 #define FRAME_SKIP_FACTOR 0.05
73
74 /* maximum audio speed change to get correct sync */
75 #define SAMPLE_CORRECTION_PERCENT_MAX 10
76
77 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
78 #define AUDIO_DIFF_AVG_NB 20
79
80 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
81 #define SAMPLE_ARRAY_SIZE (2*65536)
82
83 static int sws_flags = SWS_BICUBIC;
84
85 typedef struct PacketQueue {
86 AVPacketList *first_pkt, *last_pkt;
87 int nb_packets;
88 int size;
89 int abort_request;
90 SDL_mutex *mutex;
91 SDL_cond *cond;
92 } PacketQueue;
93
94 #define VIDEO_PICTURE_QUEUE_SIZE 2
95 #define SUBPICTURE_QUEUE_SIZE 4
96
97 typedef struct VideoPicture {
98 double pts; ///<presentation time stamp for this picture
99 double target_clock; ///<av_gettime() time at which this should be displayed ideally
100 int64_t pos; ///<byte position in file
101 SDL_Overlay *bmp;
102 int width, height; /* source height & width */
103 int allocated;
104 enum PixelFormat pix_fmt;
105
106 #if CONFIG_AVFILTER
107 AVFilterPicRef *picref;
108 #endif
109 } VideoPicture;
110
111 typedef struct SubPicture {
112 double pts; /* presentation time stamp for this picture */
113 AVSubtitle sub;
114 } SubPicture;
115
116 enum {
117 AV_SYNC_AUDIO_MASTER, /* default choice */
118 AV_SYNC_VIDEO_MASTER,
119 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
120 };
121
122 typedef struct VideoState {
123 SDL_Thread *parse_tid;
124 SDL_Thread *video_tid;
125 SDL_Thread *refresh_tid;
126 AVInputFormat *iformat;
127 int no_background;
128 int abort_request;
129 int paused;
130 int last_paused;
131 int seek_req;
132 int seek_flags;
133 int64_t seek_pos;
134 int64_t seek_rel;
135 int read_pause_return;
136 AVFormatContext *ic;
137 int dtg_active_format;
138
139 int audio_stream;
140
141 int av_sync_type;
142 double external_clock; /* external clock base */
143 int64_t external_clock_time;
144
145 double audio_clock;
146 double audio_diff_cum; /* used for AV difference average computation */
147 double audio_diff_avg_coef;
148 double audio_diff_threshold;
149 int audio_diff_avg_count;
150 AVStream *audio_st;
151 PacketQueue audioq;
152 int audio_hw_buf_size;
153 /* samples output by the codec. we reserve more space for avsync
154 compensation */
155 DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
156 DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157 uint8_t *audio_buf;
158 unsigned int audio_buf_size; /* in bytes */
159 int audio_buf_index; /* in bytes */
160 AVPacket audio_pkt_temp;
161 AVPacket audio_pkt;
162 enum SampleFormat audio_src_fmt;
163 AVAudioConvert *reformat_ctx;
164
165 int show_audio; /* if true, display audio samples */
166 int16_t sample_array[SAMPLE_ARRAY_SIZE];
167 int sample_array_index;
168 int last_i_start;
169 RDFTContext *rdft;
170 int rdft_bits;
171 FFTSample *rdft_data;
172 int xpos;
173
174 SDL_Thread *subtitle_tid;
175 int subtitle_stream;
176 int subtitle_stream_changed;
177 AVStream *subtitle_st;
178 PacketQueue subtitleq;
179 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
180 int subpq_size, subpq_rindex, subpq_windex;
181 SDL_mutex *subpq_mutex;
182 SDL_cond *subpq_cond;
183
184 double frame_timer;
185 double frame_last_pts;
186 double frame_last_delay;
187 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
188 int video_stream;
189 AVStream *video_st;
190 PacketQueue videoq;
191 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
192 double video_current_pts_drift; ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
193 int64_t video_current_pos; ///<current displayed file pos
194 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
195 int pictq_size, pictq_rindex, pictq_windex;
196 SDL_mutex *pictq_mutex;
197 SDL_cond *pictq_cond;
198 #if !CONFIG_AVFILTER
199 struct SwsContext *img_convert_ctx;
200 #endif
201
202 // QETimer *video_timer;
203 char filename[1024];
204 int width, height, xleft, ytop;
205
206 int64_t faulty_pts;
207 int64_t faulty_dts;
208 int64_t last_dts_for_fault_detection;
209 int64_t last_pts_for_fault_detection;
210
211 #if CONFIG_AVFILTER
212 AVFilterContext *out_video_filter; ///<the last filter in the video chain
213 #endif
214
215 float skip_frames;
216 float skip_frames_index;
217 int refresh;
218 } VideoState;
219
220 static void show_help(void);
221 static int audio_write_get_buf_size(VideoState *is);
222
223 /* options specified by the user */
224 static AVInputFormat *file_iformat;
225 static const char *input_filename;
226 static const char *window_title;
227 static int fs_screen_width;
228 static int fs_screen_height;
229 static int screen_width = 0;
230 static int screen_height = 0;
231 static int frame_width = 0;
232 static int frame_height = 0;
233 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
234 static int audio_disable;
235 static int video_disable;
236 static int wanted_stream[AVMEDIA_TYPE_NB]={
237 [AVMEDIA_TYPE_AUDIO]=-1,
238 [AVMEDIA_TYPE_VIDEO]=-1,
239 [AVMEDIA_TYPE_SUBTITLE]=-1,
240 };
241 static int seek_by_bytes=-1;
242 static int display_disable;
243 static int show_status = 1;
244 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
245 static int64_t start_time = AV_NOPTS_VALUE;
246 static int64_t duration = AV_NOPTS_VALUE;
247 static int debug = 0;
248 static int debug_mv = 0;
249 static int step = 0;
250 static int thread_count = 1;
251 static int workaround_bugs = 1;
252 static int fast = 0;
253 static int genpts = 0;
254 static int lowres = 0;
255 static int idct = FF_IDCT_AUTO;
256 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
257 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
258 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
259 static int error_recognition = FF_ER_CAREFUL;
260 static int error_concealment = 3;
261 static int decoder_reorder_pts= -1;
262 static int autoexit;
263 static int exit_on_keydown;
264 static int exit_on_mousedown;
265 static int loop=1;
266 static int framedrop=1;
267
268 static int rdftspeed=20;
269 #if CONFIG_AVFILTER
270 static char *vfilters = NULL;
271 #endif
272
273 /* current context */
274 static int is_full_screen;
275 static VideoState *cur_stream;
276 static int64_t audio_callback_time;
277
278 static AVPacket flush_pkt;
279
280 #define FF_ALLOC_EVENT (SDL_USEREVENT)
281 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
282 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
283
284 static SDL_Surface *screen;
285
286 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
287
288 /* packet queue handling */
289 static void packet_queue_init(PacketQueue *q)
290 {
291 memset(q, 0, sizeof(PacketQueue));
292 q->mutex = SDL_CreateMutex();
293 q->cond = SDL_CreateCond();
294 packet_queue_put(q, &flush_pkt);
295 }
296
297 static void packet_queue_flush(PacketQueue *q)
298 {
299 AVPacketList *pkt, *pkt1;
300
301 SDL_LockMutex(q->mutex);
302 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
303 pkt1 = pkt->next;
304 av_free_packet(&pkt->pkt);
305 av_freep(&pkt);
306 }
307 q->last_pkt = NULL;
308 q->first_pkt = NULL;
309 q->nb_packets = 0;
310 q->size = 0;
311 SDL_UnlockMutex(q->mutex);
312 }
313
314 static void packet_queue_end(PacketQueue *q)
315 {
316 packet_queue_flush(q);
317 SDL_DestroyMutex(q->mutex);
318 SDL_DestroyCond(q->cond);
319 }
320
321 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
322 {
323 AVPacketList *pkt1;
324
325 /* duplicate the packet */
326 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
327 return -1;
328
329 pkt1 = av_malloc(sizeof(AVPacketList));
330 if (!pkt1)
331 return -1;
332 pkt1->pkt = *pkt;
333 pkt1->next = NULL;
334
335
336 SDL_LockMutex(q->mutex);
337
338 if (!q->last_pkt)
339
340 q->first_pkt = pkt1;
341 else
342 q->last_pkt->next = pkt1;
343 q->last_pkt = pkt1;
344 q->nb_packets++;
345 q->size += pkt1->pkt.size + sizeof(*pkt1);
346 /* XXX: should duplicate packet data in DV case */
347 SDL_CondSignal(q->cond);
348
349 SDL_UnlockMutex(q->mutex);
350 return 0;
351 }
352
353 static void packet_queue_abort(PacketQueue *q)
354 {
355 SDL_LockMutex(q->mutex);
356
357 q->abort_request = 1;
358
359 SDL_CondSignal(q->cond);
360
361 SDL_UnlockMutex(q->mutex);
362 }
363
364 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
365 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
366 {
367 AVPacketList *pkt1;
368 int ret;
369
370 SDL_LockMutex(q->mutex);
371
372 for(;;) {
373 if (q->abort_request) {
374 ret = -1;
375 break;
376 }
377
378 pkt1 = q->first_pkt;
379 if (pkt1) {
380 q->first_pkt = pkt1->next;
381 if (!q->first_pkt)
382 q->last_pkt = NULL;
383 q->nb_packets--;
384 q->size -= pkt1->pkt.size + sizeof(*pkt1);
385 *pkt = pkt1->pkt;
386 av_free(pkt1);
387 ret = 1;
388 break;
389 } else if (!block) {
390 ret = 0;
391 break;
392 } else {
393 SDL_CondWait(q->cond, q->mutex);
394 }
395 }
396 SDL_UnlockMutex(q->mutex);
397 return ret;
398 }
399
400 static inline void fill_rectangle(SDL_Surface *screen,
401 int x, int y, int w, int h, int color)
402 {
403 SDL_Rect rect;
404 rect.x = x;
405 rect.y = y;
406 rect.w = w;
407 rect.h = h;
408 SDL_FillRect(screen, &rect, color);
409 }
410
411 #if 0
412 /* draw only the border of a rectangle */
413 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
414 {
415 int w1, w2, h1, h2;
416
417 /* fill the background */
418 w1 = x;
419 if (w1 < 0)
420 w1 = 0;
421 w2 = s->width - (x + w);
422 if (w2 < 0)
423 w2 = 0;
424 h1 = y;
425 if (h1 < 0)
426 h1 = 0;
427 h2 = s->height - (y + h);
428 if (h2 < 0)
429 h2 = 0;
430 fill_rectangle(screen,
431 s->xleft, s->ytop,
432 w1, s->height,
433 color);
434 fill_rectangle(screen,
435 s->xleft + s->width - w2, s->ytop,
436 w2, s->height,
437 color);
438 fill_rectangle(screen,
439 s->xleft + w1, s->ytop,
440 s->width - w1 - w2, h1,
441 color);
442 fill_rectangle(screen,
443 s->xleft + w1, s->ytop + s->height - h2,
444 s->width - w1 - w2, h2,
445 color);
446 }
447 #endif
448
449 #define ALPHA_BLEND(a, oldp, newp, s)\
450 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
451
452 #define RGBA_IN(r, g, b, a, s)\
453 {\
454 unsigned int v = ((const uint32_t *)(s))[0];\
455 a = (v >> 24) & 0xff;\
456 r = (v >> 16) & 0xff;\
457 g = (v >> 8) & 0xff;\
458 b = v & 0xff;\
459 }
460
461 #define YUVA_IN(y, u, v, a, s, pal)\
462 {\
463 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
464 a = (val >> 24) & 0xff;\
465 y = (val >> 16) & 0xff;\
466 u = (val >> 8) & 0xff;\
467 v = val & 0xff;\
468 }
469
470 #define YUVA_OUT(d, y, u, v, a)\
471 {\
472 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
473 }
474
475
476 #define BPP 1
477
478 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
479 {
480 int wrap, wrap3, width2, skip2;
481 int y, u, v, a, u1, v1, a1, w, h;
482 uint8_t *lum, *cb, *cr;
483 const uint8_t *p;
484 const uint32_t *pal;
485 int dstx, dsty, dstw, dsth;
486
487 dstw = av_clip(rect->w, 0, imgw);
488 dsth = av_clip(rect->h, 0, imgh);
489 dstx = av_clip(rect->x, 0, imgw - dstw);
490 dsty = av_clip(rect->y, 0, imgh - dsth);
491 lum = dst->data[0] + dsty * dst->linesize[0];
492 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
493 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
494
495 width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
496 skip2 = dstx >> 1;
497 wrap = dst->linesize[0];
498 wrap3 = rect->pict.linesize[0];
499 p = rect->pict.data[0];
500 pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
501
502 if (dsty & 1) {
503 lum += dstx;
504 cb += skip2;
505 cr += skip2;
506
507 if (dstx & 1) {
508 YUVA_IN(y, u, v, a, p, pal);
509 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
510 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
511 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
512 cb++;
513 cr++;
514 lum++;
515 p += BPP;
516 }
517 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
518 YUVA_IN(y, u, v, a, p, pal);
519 u1 = u;
520 v1 = v;
521 a1 = a;
522 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
523
524 YUVA_IN(y, u, v, a, p + BPP, pal);
525 u1 += u;
526 v1 += v;
527 a1 += a;
528 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
529 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
530 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
531 cb++;
532 cr++;
533 p += 2 * BPP;
534 lum += 2;
535 }
536 if (w) {
537 YUVA_IN(y, u, v, a, p, pal);
538 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
539 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
540 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
541 p++;
542 lum++;
543 }
544 p += wrap3 - dstw * BPP;
545 lum += wrap - dstw - dstx;
546 cb += dst->linesize[1] - width2 - skip2;
547 cr += dst->linesize[2] - width2 - skip2;
548 }
549 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
550 lum += dstx;
551 cb += skip2;
552 cr += skip2;
553
554 if (dstx & 1) {
555 YUVA_IN(y, u, v, a, p, pal);
556 u1 = u;
557 v1 = v;
558 a1 = a;
559 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
560 p += wrap3;
561 lum += wrap;
562 YUVA_IN(y, u, v, a, p, pal);
563 u1 += u;
564 v1 += v;
565 a1 += a;
566 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
567 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
568 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
569 cb++;
570 cr++;
571 p += -wrap3 + BPP;
572 lum += -wrap + 1;
573 }
574 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
575 YUVA_IN(y, u, v, a, p, pal);
576 u1 = u;
577 v1 = v;
578 a1 = a;
579 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580
581 YUVA_IN(y, u, v, a, p + BPP, pal);
582 u1 += u;
583 v1 += v;
584 a1 += a;
585 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
586 p += wrap3;
587 lum += wrap;
588
589 YUVA_IN(y, u, v, a, p, pal);
590 u1 += u;
591 v1 += v;
592 a1 += a;
593 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
594
595 YUVA_IN(y, u, v, a, p + BPP, pal);
596 u1 += u;
597 v1 += v;
598 a1 += a;
599 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
600
601 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
602 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
603
604 cb++;
605 cr++;
606 p += -wrap3 + 2 * BPP;
607 lum += -wrap + 2;
608 }
609 if (w) {
610 YUVA_IN(y, u, v, a, p, pal);
611 u1 = u;
612 v1 = v;
613 a1 = a;
614 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
615 p += wrap3;
616 lum += wrap;
617 YUVA_IN(y, u, v, a, p, pal);
618 u1 += u;
619 v1 += v;
620 a1 += a;
621 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
622 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
623 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
624 cb++;
625 cr++;
626 p += -wrap3 + BPP;
627 lum += -wrap + 1;
628 }
629 p += wrap3 + (wrap3 - dstw * BPP);
630 lum += wrap + (wrap - dstw - dstx);
631 cb += dst->linesize[1] - width2 - skip2;
632 cr += dst->linesize[2] - width2 - skip2;
633 }
634 /* handle odd height */
635 if (h) {
636 lum += dstx;
637 cb += skip2;
638 cr += skip2;
639
640 if (dstx & 1) {
641 YUVA_IN(y, u, v, a, p, pal);
642 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
643 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
644 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
645 cb++;
646 cr++;
647 lum++;
648 p += BPP;
649 }
650 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
651 YUVA_IN(y, u, v, a, p, pal);
652 u1 = u;
653 v1 = v;
654 a1 = a;
655 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
656
657 YUVA_IN(y, u, v, a, p + BPP, pal);
658 u1 += u;
659 v1 += v;
660 a1 += a;
661 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
662 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
663 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
664 cb++;
665 cr++;
666 p += 2 * BPP;
667 lum += 2;
668 }
669 if (w) {
670 YUVA_IN(y, u, v, a, p, pal);
671 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
672 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
673 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
674 }
675 }
676 }
677
678 static void free_subpicture(SubPicture *sp)
679 {
680 int i;
681
682 for (i = 0; i < sp->sub.num_rects; i++)
683 {
684 av_freep(&sp->sub.rects[i]->pict.data[0]);
685 av_freep(&sp->sub.rects[i]->pict.data[1]);
686 av_freep(&sp->sub.rects[i]);
687 }
688
689 av_free(sp->sub.rects);
690
691 memset(&sp->sub, 0, sizeof(AVSubtitle));
692 }
693
694 static void video_image_display(VideoState *is)
695 {
696 VideoPicture *vp;
697 SubPicture *sp;
698 AVPicture pict;
699 float aspect_ratio;
700 int width, height, x, y;
701 SDL_Rect rect;
702 int i;
703
704 vp = &is->pictq[is->pictq_rindex];
705 if (vp->bmp) {
706 #if CONFIG_AVFILTER
707 if (vp->picref->pixel_aspect.num == 0)
708 aspect_ratio = 0;
709 else
710 aspect_ratio = av_q2d(vp->picref->pixel_aspect);
711 #else
712
713 /* XXX: use variable in the frame */
714 if (is->video_st->sample_aspect_ratio.num)
715 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
716 else if (is->video_st->codec->sample_aspect_ratio.num)
717 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
718 else
719 aspect_ratio = 0;
720 #endif
721 if (aspect_ratio <= 0.0)
722 aspect_ratio = 1.0;
723 aspect_ratio *= (float)vp->width / (float)vp->height;
724 /* if an active format is indicated, then it overrides the
725 mpeg format */
726 #if 0
727 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
728 is->dtg_active_format = is->video_st->codec->dtg_active_format;
729 printf("dtg_active_format=%d\n", is->dtg_active_format);
730 }
731 #endif
732 #if 0
733 switch(is->video_st->codec->dtg_active_format) {
734 case FF_DTG_AFD_SAME:
735 default:
736 /* nothing to do */
737 break;
738 case FF_DTG_AFD_4_3:
739 aspect_ratio = 4.0 / 3.0;
740 break;
741 case FF_DTG_AFD_16_9:
742 aspect_ratio = 16.0 / 9.0;
743 break;
744 case FF_DTG_AFD_14_9:
745 aspect_ratio = 14.0 / 9.0;
746 break;
747 case FF_DTG_AFD_4_3_SP_14_9:
748 aspect_ratio = 14.0 / 9.0;
749 break;
750 case FF_DTG_AFD_16_9_SP_14_9:
751 aspect_ratio = 14.0 / 9.0;
752 break;
753 case FF_DTG_AFD_SP_4_3:
754 aspect_ratio = 4.0 / 3.0;
755 break;
756 }
757 #endif
758
759 if (is->subtitle_st)
760 {
761 if (is->subpq_size > 0)
762 {
763 sp = &is->subpq[is->subpq_rindex];
764
765 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
766 {
767 SDL_LockYUVOverlay (vp->bmp);
768
769 pict.data[0] = vp->bmp->pixels[0];
770 pict.data[1] = vp->bmp->pixels[2];
771 pict.data[2] = vp->bmp->pixels[1];
772
773 pict.linesize[0] = vp->bmp->pitches[0];
774 pict.linesize[1] = vp->bmp->pitches[2];
775 pict.linesize[2] = vp->bmp->pitches[1];
776
777 for (i = 0; i < sp->sub.num_rects; i++)
778 blend_subrect(&pict, sp->sub.rects[i],
779 vp->bmp->w, vp->bmp->h);
780
781 SDL_UnlockYUVOverlay (vp->bmp);
782 }
783 }
784 }
785
786
787 /* XXX: we suppose the screen has a 1.0 pixel ratio */
788 height = is->height;
789 width = ((int)rint(height * aspect_ratio)) & ~1;
790 if (width > is->width) {
791 width = is->width;
792 height = ((int)rint(width / aspect_ratio)) & ~1;
793 }
794 x = (is->width - width) / 2;
795 y = (is->height - height) / 2;
796 if (!is->no_background) {
797 /* fill the background */
798 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
799 } else {
800 is->no_background = 0;
801 }
802 rect.x = is->xleft + x;
803 rect.y = is->ytop + y;
804 rect.w = width;
805 rect.h = height;
806 SDL_DisplayYUVOverlay(vp->bmp, &rect);
807 } else {
808 #if 0
809 fill_rectangle(screen,
810 is->xleft, is->ytop, is->width, is->height,
811 QERGB(0x00, 0x00, 0x00));
812 #endif
813 }
814 }
815
816 static inline int compute_mod(int a, int b)
817 {
818 a = a % b;
819 if (a >= 0)
820 return a;
821 else
822 return a + b;
823 }
824
825 static void video_audio_display(VideoState *s)
826 {
827 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
828 int ch, channels, h, h2, bgcolor, fgcolor;
829 int16_t time_diff;
830 int rdft_bits, nb_freq;
831
832 for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
833 ;
834 nb_freq= 1<<(rdft_bits-1);
835
836 /* compute display index : center on currently output samples */
837 channels = s->audio_st->codec->channels;
838 nb_display_channels = channels;
839 if (!s->paused) {
840 int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
841 n = 2 * channels;
842 delay = audio_write_get_buf_size(s);
843 delay /= n;
844
845 /* to be more precise, we take into account the time spent since
846 the last buffer computation */
847 if (audio_callback_time) {
848 time_diff = av_gettime() - audio_callback_time;
849 delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
850 }
851
852 delay += 2*data_used;
853 if (delay < data_used)
854 delay = data_used;
855
856 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
857 if(s->show_audio==1){
858 h= INT_MIN;
859 for(i=0; i<1000; i+=channels){
860 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
861 int a= s->sample_array[idx];
862 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
863 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
864 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
865 int score= a-d;
866 if(h<score && (b^c)<0){
867 h= score;
868 i_start= idx;
869 }
870 }
871 }
872
873 s->last_i_start = i_start;
874 } else {
875 i_start = s->last_i_start;
876 }
877
878 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
879 if(s->show_audio==1){
880 fill_rectangle(screen,
881 s->xleft, s->ytop, s->width, s->height,
882 bgcolor);
883
884 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
885
886 /* total height for one channel */
887 h = s->height / nb_display_channels;
888 /* graph height / 2 */
889 h2 = (h * 9) / 20;
890 for(ch = 0;ch < nb_display_channels; ch++) {
891 i = i_start + ch;
892 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
893 for(x = 0; x < s->width; x++) {
894 y = (s->sample_array[i] * h2) >> 15;
895 if (y < 0) {
896 y = -y;
897 ys = y1 - y;
898 } else {
899 ys = y1;
900 }
901 fill_rectangle(screen,
902 s->xleft + x, ys, 1, y,
903 fgcolor);
904 i += channels;
905 if (i >= SAMPLE_ARRAY_SIZE)
906 i -= SAMPLE_ARRAY_SIZE;
907 }
908 }
909
910 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
911
912 for(ch = 1;ch < nb_display_channels; ch++) {
913 y = s->ytop + ch * h;
914 fill_rectangle(screen,
915 s->xleft, y, s->width, 1,
916 fgcolor);
917 }
918 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
919 }else{
920 nb_display_channels= FFMIN(nb_display_channels, 2);
921 if(rdft_bits != s->rdft_bits){
922 av_rdft_end(s->rdft);
923 av_free(s->rdft_data);
924 s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
925 s->rdft_bits= rdft_bits;
926 s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
927 }
928 {
929 FFTSample *data[2];
930 for(ch = 0;ch < nb_display_channels; ch++) {
931 data[ch] = s->rdft_data + 2*nb_freq*ch;
932 i = i_start + ch;
933 for(x = 0; x < 2*nb_freq; x++) {
934 double w= (x-nb_freq)*(1.0/nb_freq);
935 data[ch][x]= s->sample_array[i]*(1.0-w*w);
936 i += channels;
937 if (i >= SAMPLE_ARRAY_SIZE)
938 i -= SAMPLE_ARRAY_SIZE;
939 }
940 av_rdft_calc(s->rdft, data[ch]);
941 }
942 //least efficient way to do this, we should of course directly access it but its more than fast enough
943 for(y=0; y<s->height; y++){
944 double w= 1/sqrt(nb_freq);
945 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
946 int b= sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0] + data[1][2*y+1]*data[1][2*y+1]));
947 a= FFMIN(a,255);
948 b= FFMIN(b,255);
949 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
950
951 fill_rectangle(screen,
952 s->xpos, s->height-y, 1, 1,
953 fgcolor);
954 }
955 }
956 SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
957 s->xpos++;
958 if(s->xpos >= s->width)
959 s->xpos= s->xleft;
960 }
961 }
962
963 static int video_open(VideoState *is){
964 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
965 int w,h;
966
967 if(is_full_screen) flags |= SDL_FULLSCREEN;
968 else flags |= SDL_RESIZABLE;
969
970 if (is_full_screen && fs_screen_width) {
971 w = fs_screen_width;
972 h = fs_screen_height;
973 } else if(!is_full_screen && screen_width){
974 w = screen_width;
975 h = screen_height;
976 #if CONFIG_AVFILTER
977 }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
978 w = is->out_video_filter->inputs[0]->w;
979 h = is->out_video_filter->inputs[0]->h;
980 #else
981 }else if (is->video_st && is->video_st->codec->width){
982 w = is->video_st->codec->width;
983 h = is->video_st->codec->height;
984 #endif
985 } else {
986 w = 640;
987 h = 480;
988 }
989 if(screen && is->width == screen->w && screen->w == w
990 && is->height== screen->h && screen->h == h)
991 return 0;
992
993 #ifndef __APPLE__
994 screen = SDL_SetVideoMode(w, h, 0, flags);
995 #else
996 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
997 screen = SDL_SetVideoMode(w, h, 24, flags);
998 #endif
999 if (!screen) {
1000 fprintf(stderr, "SDL: could not set video mode - exiting\n");
1001 return -1;
1002 }
1003 if (!window_title)
1004 window_title = input_filename;
1005 SDL_WM_SetCaption(window_title, window_title);
1006
1007 is->width = screen->w;
1008 is->height = screen->h;
1009
1010 return 0;
1011 }
1012
1013 /* display the current picture, if any */
1014 static void video_display(VideoState *is)
1015 {
1016 if(!screen)
1017 video_open(cur_stream);
1018 if (is->audio_st && is->show_audio)
1019 video_audio_display(is);
1020 else if (is->video_st)
1021 video_image_display(is);
1022 }
1023
1024 static int refresh_thread(void *opaque)
1025 {
1026 VideoState *is= opaque;
1027 while(!is->abort_request){
1028 SDL_Event event;
1029 event.type = FF_REFRESH_EVENT;
1030 event.user.data1 = opaque;
1031 if(!is->refresh){
1032 is->refresh=1;
1033 SDL_PushEvent(&event);
1034 }
1035 usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1036 }
1037 return 0;
1038 }
1039
1040 /* get the current audio clock value */
1041 static double get_audio_clock(VideoState *is)
1042 {
1043 double pts;
1044 int hw_buf_size, bytes_per_sec;
1045 pts = is->audio_clock;
1046 hw_buf_size = audio_write_get_buf_size(is);
1047 bytes_per_sec = 0;
1048 if (is->audio_st) {
1049 bytes_per_sec = is->audio_st->codec->sample_rate *
1050 2 * is->audio_st->codec->channels;
1051 }
1052 if (bytes_per_sec)
1053 pts -= (double)hw_buf_size / bytes_per_sec;
1054 return pts;
1055 }
1056
1057 /* get the current video clock value */
1058 static double get_video_clock(VideoState *is)
1059 {
1060 if (is->paused) {
1061 return is->video_current_pts;
1062 } else {
1063 return is->video_current_pts_drift + av_gettime() / 1000000.0;
1064 }
1065 }
1066
1067 /* get the current external clock value */
1068 static double get_external_clock(VideoState *is)
1069 {
1070 int64_t ti;
1071 ti = av_gettime();
1072 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1073 }
1074
1075 /* get the current master clock value */
1076 static double get_master_clock(VideoState *is)
1077 {
1078 double val;
1079
1080 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1081 if (is->video_st)
1082 val = get_video_clock(is);
1083 else
1084 val = get_audio_clock(is);
1085 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1086 if (is->audio_st)
1087 val = get_audio_clock(is);
1088 else
1089 val = get_video_clock(is);
1090 } else {
1091 val = get_external_clock(is);
1092 }
1093 return val;
1094 }
1095
1096 /* seek in the stream */
1097 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1098 {
1099 if (!is->seek_req) {
1100 is->seek_pos = pos;
1101 is->seek_rel = rel;
1102 is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1103 if (seek_by_bytes)
1104 is->seek_flags |= AVSEEK_FLAG_BYTE;
1105 is->seek_req = 1;
1106 }
1107 }
1108
1109 /* pause or resume the video */
1110 static void stream_pause(VideoState *is)
1111 {
1112 if (is->paused) {
1113 is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1114 if(is->read_pause_return != AVERROR(ENOSYS)){
1115 is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1116 }
1117 is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1118 }
1119 is->paused = !is->paused;
1120 }
1121
1122 static double compute_target_time(double frame_current_pts, VideoState *is)
1123 {
1124 double delay, sync_threshold, diff;
1125
1126 /* compute nominal delay */
1127 delay = frame_current_pts - is->frame_last_pts;
1128 if (delay <= 0 || delay >= 10.0) {
1129 /* if incorrect delay, use previous one */
1130 delay = is->frame_last_delay;
1131 } else {
1132 is->frame_last_delay = delay;
1133 }
1134 is->frame_last_pts = frame_current_pts;
1135
1136 /* update delay to follow master synchronisation source */
1137 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1138 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1139 /* if video is slave, we try to correct big delays by
1140 duplicating or deleting a frame */
1141 diff = get_video_clock(is) - get_master_clock(is);
1142
1143 /* skip or repeat frame. We take into account the
1144 delay to compute the threshold. I still don't know
1145 if it is the best guess */
1146 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1147 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1148 if (diff <= -sync_threshold)
1149 delay = 0;
1150 else if (diff >= sync_threshold)
1151 delay = 2 * delay;
1152 }
1153 }
1154 is->frame_timer += delay;
1155 #if defined(DEBUG_SYNC)
1156 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1157 delay, actual_delay, frame_current_pts, -diff);
1158 #endif
1159
1160 return is->frame_timer;
1161 }
1162
1163 /* called to display each frame */
1164 static void video_refresh_timer(void *opaque)
1165 {
1166 VideoState *is = opaque;
1167 VideoPicture *vp;
1168
1169 SubPicture *sp, *sp2;
1170
1171 if (is->video_st) {
1172 retry:
1173 if (is->pictq_size == 0) {
1174 //nothing to do, no picture to display in the que
1175 } else {
1176 double time= av_gettime()/1000000.0;
1177 double next_target;
1178 /* dequeue the picture */
1179 vp = &is->pictq[is->pictq_rindex];
1180
1181 if(time < vp->target_clock)
1182 return;
1183 /* update current video pts */
1184 is->video_current_pts = vp->pts;
1185 is->video_current_pts_drift = is->video_current_pts - time;
1186 is->video_current_pos = vp->pos;
1187 if(is->pictq_size > 1){
1188 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1189 assert(nextvp->target_clock >= vp->target_clock);
1190 next_target= nextvp->target_clock;
1191 }else{
1192 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1193 }
1194 if(framedrop && time > next_target){
1195 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1196 if(is->pictq_size > 1 || time > next_target + 0.5){
1197 /* update queue size and signal for next picture */
1198 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1199 is->pictq_rindex = 0;
1200
1201 SDL_LockMutex(is->pictq_mutex);
1202 is->pictq_size--;
1203 SDL_CondSignal(is->pictq_cond);
1204 SDL_UnlockMutex(is->pictq_mutex);
1205 goto retry;
1206 }
1207 }
1208
1209 if(is->subtitle_st) {
1210 if (is->subtitle_stream_changed) {
1211 SDL_LockMutex(is->subpq_mutex);
1212
1213 while (is->subpq_size) {
1214 free_subpicture(&is->subpq[is->subpq_rindex]);
1215
1216 /* update queue size and signal for next picture */
1217 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1218 is->subpq_rindex = 0;
1219
1220 is->subpq_size--;
1221 }
1222 is->subtitle_stream_changed = 0;
1223
1224 SDL_CondSignal(is->subpq_cond);
1225 SDL_UnlockMutex(is->subpq_mutex);
1226 } else {
1227 if (is->subpq_size > 0) {
1228 sp = &is->subpq[is->subpq_rindex];
1229
1230 if (is->subpq_size > 1)
1231 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1232 else
1233 sp2 = NULL;
1234
1235 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1236 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1237 {
1238 free_subpicture(sp);
1239
1240 /* update queue size and signal for next picture */
1241 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1242 is->subpq_rindex = 0;
1243
1244 SDL_LockMutex(is->subpq_mutex);
1245 is->subpq_size--;
1246 SDL_CondSignal(is->subpq_cond);
1247 SDL_UnlockMutex(is->subpq_mutex);
1248 }
1249 }
1250 }
1251 }
1252
1253 /* display picture */
1254 video_display(is);
1255
1256 /* update queue size and signal for next picture */
1257 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1258 is->pictq_rindex = 0;
1259
1260 SDL_LockMutex(is->pictq_mutex);
1261 is->pictq_size--;
1262 SDL_CondSignal(is->pictq_cond);
1263 SDL_UnlockMutex(is->pictq_mutex);
1264 }
1265 } else if (is->audio_st) {
1266 /* draw the next audio frame */
1267
1268 /* if only audio stream, then display the audio bars (better
1269 than nothing, just to test the implementation */
1270
1271 /* display picture */
1272 video_display(is);
1273 }
1274 if (show_status) {
1275 static int64_t last_time;
1276 int64_t cur_time;
1277 int aqsize, vqsize, sqsize;
1278 double av_diff;
1279
1280 cur_time = av_gettime();
1281 if (!last_time || (cur_time - last_time) >= 30000) {
1282 aqsize = 0;
1283 vqsize = 0;
1284 sqsize = 0;
1285 if (is->audio_st)
1286 aqsize = is->audioq.size;
1287 if (is->video_st)
1288 vqsize = is->videoq.size;
1289 if (is->subtitle_st)
1290 sqsize = is->subtitleq.size;
1291 av_diff = 0;
1292 if (is->audio_st && is->video_st)
1293 av_diff = get_audio_clock(is) - get_video_clock(is);
1294 printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1295 get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1296 fflush(stdout);
1297 last_time = cur_time;
1298 }
1299 }
1300 }
1301
1302 /* allocate a picture (needs to do that in main thread to avoid
1303 potential locking problems */
1304 static void alloc_picture(void *opaque)
1305 {
1306 VideoState *is = opaque;
1307 VideoPicture *vp;
1308
1309 vp = &is->pictq[is->pictq_windex];
1310
1311 if (vp->bmp)
1312 SDL_FreeYUVOverlay(vp->bmp);
1313
1314 #if CONFIG_AVFILTER
1315 if (vp->picref)
1316 avfilter_unref_pic(vp->picref);
1317 vp->picref = NULL;
1318
1319 vp->width = is->out_video_filter->inputs[0]->w;
1320 vp->height = is->out_video_filter->inputs[0]->h;
1321 vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1322 #else
1323 vp->width = is->video_st->codec->width;
1324 vp->height = is->video_st->codec->height;
1325 vp->pix_fmt = is->video_st->codec->pix_fmt;
1326 #endif
1327
1328 vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1329 SDL_YV12_OVERLAY,
1330 screen);
1331
1332 SDL_LockMutex(is->pictq_mutex);
1333 vp->allocated = 1;
1334 SDL_CondSignal(is->pictq_cond);
1335 SDL_UnlockMutex(is->pictq_mutex);
1336 }
1337
1338 /**
1339 *
1340 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1341 */
1342 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1343 {
1344 VideoPicture *vp;
1345 int dst_pix_fmt;
1346 #if CONFIG_AVFILTER
1347 AVPicture pict_src;
1348 #endif
1349 /* wait until we have space to put a new picture */
1350 SDL_LockMutex(is->pictq_mutex);
1351
1352 if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1353 is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1354
1355 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1356 !is->videoq.abort_request) {
1357 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1358 }
1359 SDL_UnlockMutex(is->pictq_mutex);
1360
1361 if (is->videoq.abort_request)
1362 return -1;
1363
1364 vp = &is->pictq[is->pictq_windex];
1365
1366 /* alloc or resize hardware picture buffer */
1367 if (!vp->bmp ||
1368 #if CONFIG_AVFILTER
1369 vp->width != is->out_video_filter->inputs[0]->w ||
1370 vp->height != is->out_video_filter->inputs[0]->h) {
1371 #else
1372 vp->width != is->video_st->codec->width ||
1373 vp->height != is->video_st->codec->height) {
1374 #endif
1375 SDL_Event event;
1376
1377 vp->allocated = 0;
1378
1379 /* the allocation must be done in the main thread to avoid
1380 locking problems */
1381 event.type = FF_ALLOC_EVENT;
1382 event.user.data1 = is;
1383 SDL_PushEvent(&event);
1384
1385 /* wait until the picture is allocated */
1386 SDL_LockMutex(is->pictq_mutex);
1387 while (!vp->allocated && !is->videoq.abort_request) {
1388 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1389 }
1390 SDL_UnlockMutex(is->pictq_mutex);
1391
1392 if (is->videoq.abort_request)
1393 return -1;
1394 }
1395
1396 /* if the frame is not skipped, then display it */
1397 if (vp->bmp) {
1398 AVPicture pict;
1399 #if CONFIG_AVFILTER
1400 if(vp->picref)
1401 avfilter_unref_pic(vp->picref);
1402 vp->picref = src_frame->opaque;
1403 #endif
1404
1405 /* get a pointer on the bitmap */
1406 SDL_LockYUVOverlay (vp->bmp);
1407
1408 dst_pix_fmt = PIX_FMT_YUV420P;
1409 memset(&pict,0,sizeof(AVPicture));
1410 pict.data[0] = vp->bmp->pixels[0];
1411 pict.data[1] = vp->bmp->pixels[2];
1412 pict.data[2] = vp->bmp->pixels[1];
1413
1414 pict.linesize[0] = vp->bmp->pitches[0];
1415 pict.linesize[1] = vp->bmp->pitches[2];
1416 pict.linesize[2] = vp->bmp->pitches[1];
1417
1418 #if CONFIG_AVFILTER
1419 pict_src.data[0] = src_frame->data[0];
1420 pict_src.data[1] = src_frame->data[1];
1421 pict_src.data[2] = src_frame->data[2];
1422
1423 pict_src.linesize[0] = src_frame->linesize[0];
1424 pict_src.linesize[1] = src_frame->linesize[1];
1425 pict_src.linesize[2] = src_frame->linesize[2];
1426
1427 //FIXME use direct rendering
1428 av_picture_copy(&pict, &pict_src,
1429 vp->pix_fmt, vp->width, vp->height);
1430 #else
1431 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1432 is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1433 vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1434 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1435 if (is->img_convert_ctx == NULL) {
1436 fprintf(stderr, "Cannot initialize the conversion context\n");
1437 exit(1);
1438 }
1439 sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1440 0, vp->height, pict.data, pict.linesize);
1441 #endif
1442 /* update the bitmap content */
1443 SDL_UnlockYUVOverlay(vp->bmp);
1444
1445 vp->pts = pts;
1446 vp->pos = pos;
1447
1448 /* now we can update the picture count */
1449 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1450 is->pictq_windex = 0;
1451 SDL_LockMutex(is->pictq_mutex);
1452 vp->target_clock= compute_target_time(vp->pts, is);
1453
1454 is->pictq_size++;
1455 SDL_UnlockMutex(is->pictq_mutex);
1456 }
1457 return 0;
1458 }
1459
1460 /**
1461 * compute the exact PTS for the picture if it is omitted in the stream
1462 * @param pts1 the dts of the pkt / pts of the frame
1463 */
1464 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1465 {
1466 double frame_delay, pts;
1467
1468 pts = pts1;
1469
1470 if (pts != 0) {
1471 /* update video clock with pts, if present */
1472 is->video_clock = pts;
1473 } else {
1474 pts = is->video_clock;
1475 }
1476 /* update video clock for next frame */
1477 frame_delay = av_q2d(is->video_st->codec->time_base);
1478 /* for MPEG2, the frame can be repeated, so we update the
1479 clock accordingly */
1480 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1481 is->video_clock += frame_delay;
1482
1483 #if defined(DEBUG_SYNC) && 0
1484 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1485 av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1486 #endif
1487 return queue_picture(is, src_frame, pts, pos);
1488 }
1489
1490 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1491 {
1492 int len1, got_picture, i;
1493
1494 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1495 return -1;
1496
1497 if(pkt->data == flush_pkt.data){
1498 avcodec_flush_buffers(is->video_st->codec);
1499
1500 SDL_LockMutex(is->pictq_mutex);
1501 //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1502 for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1503 is->pictq[i].target_clock= 0;
1504 }
1505 while (is->pictq_size && !is->videoq.abort_request) {
1506 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1507 }
1508 is->video_current_pos= -1;
1509 SDL_UnlockMutex(is->pictq_mutex);
1510
1511 is->last_dts_for_fault_detection=
1512 is->last_pts_for_fault_detection= INT64_MIN;
1513 is->frame_last_pts= AV_NOPTS_VALUE;
1514 is->frame_last_delay = 0;
1515 is->frame_timer = (double)av_gettime() / 1000000.0;
1516 is->skip_frames= 1;
1517 is->skip_frames_index= 0;
1518 return 0;
1519 }
1520
1521 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1522 this packet, if any */
1523 is->video_st->codec->reordered_opaque= pkt->pts;
1524 len1 = avcodec_decode_video2(is->video_st->codec,
1525 frame, &got_picture,
1526 pkt);
1527
1528 if (got_picture) {
1529 if(pkt->dts != AV_NOPTS_VALUE){
1530 is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1531 is->last_dts_for_fault_detection= pkt->dts;
1532 }
1533 if(frame->reordered_opaque != AV_NOPTS_VALUE){
1534 is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1535 is->last_pts_for_fault_detection= frame->reordered_opaque;
1536 }
1537 }
1538
1539 if( ( decoder_reorder_pts==1
1540 || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1541 || pkt->dts == AV_NOPTS_VALUE)
1542 && frame->reordered_opaque != AV_NOPTS_VALUE)
1543 *pts= frame->reordered_opaque;
1544 else if(pkt->dts != AV_NOPTS_VALUE)
1545 *pts= pkt->dts;
1546 else
1547 *pts= 0;
1548
1549 // if (len1 < 0)
1550 // break;
1551 if (got_picture){
1552 is->skip_frames_index += 1;
1553 if(is->skip_frames_index >= is->skip_frames){
1554 is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1555 return 1;
1556 }
1557
1558 }
1559 return 0;
1560 }
1561
1562 #if CONFIG_AVFILTER
1563 typedef struct {
1564 VideoState *is;
1565 AVFrame *frame;
1566 int use_dr1;
1567 } FilterPriv;
1568
1569 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1570 {
1571 AVFilterContext *ctx = codec->opaque;
1572 AVFilterPicRef *ref;
1573 int perms = AV_PERM_WRITE;
1574 int i, w, h, stride[4];
1575 unsigned edge;
1576
1577 if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1578 if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1579 if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1580 if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1581 }
1582 if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1583
1584 w = codec->width;
1585 h = codec->height;
1586 avcodec_align_dimensions2(codec, &w, &h, stride);
1587 edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1588 w += edge << 1;
1589 h += edge << 1;
1590
1591 if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1592 return -1;
1593
1594 ref->w = codec->width;
1595 ref->h = codec->height;
1596 for(i = 0; i < 4; i ++) {
1597 unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->pic->format].log2_chroma_w : 0;
1598 unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->pic->format].log2_chroma_h : 0;
1599
1600 if (ref->data[i]) {
1601 ref->data[i] += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1602 }
1603 pic->data[i] = ref->data[i];
1604 pic->linesize[i] = ref->linesize[i];
1605 }
1606 pic->opaque = ref;
1607 pic->age = INT_MAX;
1608 pic->type = FF_BUFFER_TYPE_USER;
1609 pic->reordered_opaque = codec->reordered_opaque;
1610 return 0;
1611 }
1612
1613 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1614 {
1615 memset(pic->data, 0, sizeof(pic->data));
1616 avfilter_unref_pic(pic->opaque);
1617 }
1618
1619 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1620 {
1621 AVFilterPicRef *ref = pic->opaque;
1622
1623 if (pic->data[0] == NULL) {
1624 pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1625 return codec->get_buffer(codec, pic);
1626 }
1627
1628 if ((codec->width != ref->w) || (codec->height != ref->h) ||
1629 (codec->pix_fmt != ref->pic->format)) {
1630 av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1631 return -1;
1632 }
1633
1634 pic->reordered_opaque = codec->reordered_opaque;
1635 return 0;
1636 }
1637
1638 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1639 {
1640 FilterPriv *priv = ctx->priv;
1641 AVCodecContext *codec;
1642 if(!opaque) return -1;
1643
1644 priv->is = opaque;
1645 codec = priv->is->video_st->codec;
1646 codec->opaque = ctx;
1647 if(codec->codec->capabilities & CODEC_CAP_DR1) {
1648 priv->use_dr1 = 1;
1649 codec->get_buffer = input_get_buffer;
1650 codec->release_buffer = input_release_buffer;
1651 codec->reget_buffer = input_reget_buffer;
1652 }
1653
1654 priv->frame = avcodec_alloc_frame();
1655
1656 return 0;
1657 }
1658
1659 static void input_uninit(AVFilterContext *ctx)
1660 {
1661 FilterPriv *priv = ctx->priv;
1662 av_free(priv->frame);
1663 }
1664
1665 static int input_request_frame(AVFilterLink *link)
1666 {
1667 FilterPriv *priv = link->src->priv;
1668 AVFilterPicRef *picref;
1669 int64_t pts = 0;
1670 AVPacket pkt;
1671 int ret;
1672
1673 while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1674 av_free_packet(&pkt);
1675 if (ret < 0)
1676 return -1;
1677
1678 if(priv->use_dr1) {
1679 picref = avfilter_ref_pic(priv->frame->opaque, ~0);
1680 } else {
1681 picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1682 av_picture_copy((AVPicture *)&picref->data, (AVPicture *)priv->frame,
1683 picref->pic->format, link->w, link->h);
1684 }
1685 av_free_packet(&pkt);
1686
1687 picref->pts = pts;
1688 picref->pos = pkt.pos;
1689 picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1690 avfilter_start_frame(link, picref);
1691 avfilter_draw_slice(link, 0, link->h, 1);
1692 avfilter_end_frame(link);
1693
1694 return 0;
1695 }
1696
1697 static int input_query_formats(AVFilterContext *ctx)
1698 {
1699 FilterPriv *priv = ctx->priv;
1700 enum PixelFormat pix_fmts[] = {
1701 priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1702 };
1703
1704 avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1705 return 0;
1706 }
1707
1708 static int input_config_props(AVFilterLink *link)
1709 {
1710 FilterPriv *priv = link->src->priv;
1711 AVCodecContext *c = priv->is->video_st->codec;
1712
1713 link->w = c->width;
1714 link->h = c->height;
1715
1716 return 0;
1717 }
1718
1719 static AVFilter input_filter =
1720 {
1721 .name = "ffplay_input",
1722
1723 .priv_size = sizeof(FilterPriv),
1724
1725 .init = input_init,
1726 .uninit = input_uninit,
1727
1728 .query_formats = input_query_formats,
1729
1730 .inputs = (AVFilterPad[]) {{ .name = NULL }},
1731 .outputs = (AVFilterPad[]) {{ .name = "default",
1732 .type = AVMEDIA_TYPE_VIDEO,
1733 .request_frame = input_request_frame,
1734 .config_props = input_config_props, },
1735 { .name = NULL }},
1736 };
1737
1738 static void output_end_frame(AVFilterLink *link)
1739 {
1740 }
1741
1742 static int output_query_formats(AVFilterContext *ctx)
1743 {
1744 enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1745
1746 avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1747 return 0;
1748 }
1749
1750 static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
1751 int64_t *pts, int64_t *pos)
1752 {
1753 AVFilterPicRef *pic;
1754
1755 if(avfilter_request_frame(ctx->inputs[0]))
1756 return -1;
1757 if(!(pic = ctx->inputs[0]->cur_pic))
1758 return -1;
1759 ctx->inputs[0]->cur_pic = NULL;
1760
1761 frame->opaque = pic;
1762 *pts = pic->pts;
1763 *pos = pic->pos;
1764
1765 memcpy(frame->data, pic->data, sizeof(frame->data));
1766 memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
1767
1768 return 1;
1769 }
1770
1771 static AVFilter output_filter =
1772 {
1773 .name = "ffplay_output",
1774
1775 .query_formats = output_query_formats,
1776
1777 .inputs = (AVFilterPad[]) {{ .name = "default",
1778 .type = AVMEDIA_TYPE_VIDEO,
1779 .end_frame = output_end_frame,
1780 .min_perms = AV_PERM_READ, },
1781 { .name = NULL }},
1782 .outputs = (AVFilterPad[]) {{ .name = NULL }},
1783 };
1784 #endif /* CONFIG_AVFILTER */
1785
1786 static int video_thread(void *arg)
1787 {
1788 VideoState *is = arg;
1789 AVFrame *frame= avcodec_alloc_frame();
1790 int64_t pts_int;
1791 double pts;
1792 int ret;
1793
1794 #if CONFIG_AVFILTER
1795 int64_t pos;
1796 char sws_flags_str[128];
1797 AVFilterContext *filt_src = NULL, *filt_out = NULL;
1798 AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
1799 snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1800 graph->scale_sws_opts = av_strdup(sws_flags_str);
1801
1802 if(!(filt_src = avfilter_open(&input_filter, "src"))) goto the_end;
1803 if(!(filt_out = avfilter_open(&output_filter, "out"))) goto the_end;
1804
1805 if(avfilter_init_filter(filt_src, NULL, is)) goto the_end;
1806 if(avfilter_init_filter(filt_out, NULL, frame)) goto the_end;
1807
1808
1809 if(vfilters) {
1810 AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1811 AVFilterInOut *inputs = av_malloc(sizeof(AVFilterInOut));
1812
1813 outputs->name = av_strdup("in");
1814 outputs->filter = filt_src;
1815 outputs->pad_idx = 0;
1816 outputs->next = NULL;
1817
1818 inputs->name = av_strdup("out");
1819 inputs->filter = filt_out;
1820 inputs->pad_idx = 0;
1821 inputs->next = NULL;
1822
1823 if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1824 goto the_end;
1825 av_freep(&vfilters);
1826 } else {
1827 if(avfilter_link(filt_src, 0, filt_out, 0) < 0) goto the_end;
1828 }
1829 avfilter_graph_add_filter(graph, filt_src);
1830 avfilter_graph_add_filter(graph, filt_out);
1831
1832 if(avfilter_graph_check_validity(graph, NULL)) goto the_end;
1833 if(avfilter_graph_config_formats(graph, NULL)) goto the_end;
1834 if(avfilter_graph_config_links(graph, NULL)) goto the_end;
1835
1836 is->out_video_filter = filt_out;
1837 #endif
1838
1839 for(;;) {
1840 #if !CONFIG_AVFILTER
1841 AVPacket pkt;
1842 #endif
1843 while (is->paused && !is->videoq.abort_request)
1844 SDL_Delay(10);
1845 #if CONFIG_AVFILTER
1846 ret = get_filtered_video_frame(filt_out, frame, &pts_int, &pos);
1847 #else
1848 ret = get_video_frame(is, frame, &pts_int, &pkt);
1849 #endif
1850
1851 if (ret < 0) goto the_end;
1852
1853 if (!ret)
1854 continue;
1855
1856 pts = pts_int*av_q2d(is->video_st->time_base);
1857
1858 #if CONFIG_AVFILTER
1859 ret = output_picture2(is, frame, pts, pos);
1860 #else
1861 ret = output_picture2(is, frame, pts, pkt.pos);
1862 av_free_packet(&pkt);
1863 #endif
1864 if (ret < 0)
1865 goto the_end;
1866
1867 if (step)
1868 if (cur_stream)
1869 stream_pause(cur_stream);
1870 }
1871 the_end:
1872 #if CONFIG_AVFILTER
1873 avfilter_graph_destroy(graph);
1874 av_freep(&graph);
1875 #endif
1876 av_free(frame);
1877 return 0;
1878 }
1879
1880 static int subtitle_thread(void *arg)
1881 {
1882 VideoState *is = arg;
1883 SubPicture *sp;
1884 AVPacket pkt1, *pkt = &pkt1;
1885 int len1, got_subtitle;
1886 double pts;
1887 int i, j;
1888 int r, g, b, y, u, v, a;
1889
1890 for(;;) {
1891 while (is->paused && !is->subtitleq.abort_request) {
1892 SDL_Delay(10);
1893 }
1894 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1895 break;
1896
1897 if(pkt->data == flush_pkt.data){
1898 avcodec_flush_buffers(is->subtitle_st->codec);
1899 continue;
1900 }
1901 SDL_LockMutex(is->subpq_mutex);
1902 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1903 !is->subtitleq.abort_request) {
1904 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1905 }
1906 SDL_UnlockMutex(is->subpq_mutex);
1907
1908 if (is->subtitleq.abort_request)
1909 goto the_end;
1910
1911 sp = &is->subpq[is->subpq_windex];
1912
1913 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1914 this packet, if any */
1915 pts = 0;
1916 if (pkt->pts != AV_NOPTS_VALUE)
1917 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1918
1919 len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1920 &sp->sub, &got_subtitle,
1921 pkt);
1922 // if (len1 < 0)
1923 // break;
1924 if (got_subtitle && sp->sub.format == 0) {
1925 sp->pts = pts;
1926
1927 for (i = 0; i < sp->sub.num_rects; i++)
1928 {
1929 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1930 {
1931 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1932 y = RGB_TO_Y_CCIR(r, g, b);
1933 u = RGB_TO_U_CCIR(r, g, b, 0);
1934 v = RGB_TO_V_CCIR(r, g, b, 0);
1935 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1936 }
1937 }
1938
1939 /* now we can update the picture count */
1940 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1941 is->subpq_windex = 0;
1942 SDL_LockMutex(is->subpq_mutex);
1943 is->subpq_size++;
1944 SDL_UnlockMutex(is->subpq_mutex);
1945 }
1946 av_free_packet(pkt);
1947 // if (step)
1948 // if (cur_stream)
1949 // stream_pause(cur_stream);
1950 }
1951 the_end:
1952 return 0;
1953 }
1954
1955 /* copy samples for viewing in editor window */
1956 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1957 {
1958 int size, len, channels;
1959
1960 channels = is->audio_st->codec->channels;
1961
1962 size = samples_size / sizeof(short);
1963 while (size > 0) {
1964 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1965 if (len > size)
1966 len = size;
1967 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1968 samples += len;
1969 is->sample_array_index += len;
1970 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1971 is->sample_array_index = 0;
1972 size -= len;
1973 }
1974 }
1975
1976 /* return the new audio buffer size (samples can be added or deleted
1977 to get better sync if video or external master clock) */
1978 static int synchronize_audio(VideoState *is, short *samples,
1979 int samples_size1, double pts)
1980 {
1981 int n, samples_size;
1982 double ref_clock;
1983
1984 n = 2 * is->audio_st->codec->channels;
1985 samples_size = samples_size1;
1986
1987 /* if not master, then we try to remove or add samples to correct the clock */
1988 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1989 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1990 double diff, avg_diff;
1991 int wanted_size, min_size, max_size, nb_samples;
1992
1993 ref_clock = get_master_clock(is);
1994 diff = get_audio_clock(is) - ref_clock;
1995
1996 if (diff < AV_NOSYNC_THRESHOLD) {
1997 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1998 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1999 /* not enough measures to have a correct estimate */
2000 is->audio_diff_avg_count++;
2001 } else {
2002 /* estimate the A-V difference */
2003 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2004
2005 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2006 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2007 nb_samples = samples_size / n;
2008
2009 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2010 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2011 if (wanted_size < min_size)
2012 wanted_size = min_size;
2013 else if (wanted_size > max_size)
2014 wanted_size = max_size;
2015
2016 /* add or remove samples to correction the synchro */
2017 if (wanted_size < samples_size) {
2018 /* remove samples */
2019 samples_size = wanted_size;
2020 } else if (wanted_size > samples_size) {
2021 uint8_t *samples_end, *q;
2022 int nb;
2023
2024 /* add samples */
2025 nb = (samples_size - wanted_size);
2026 samples_end = (uint8_t *)samples + samples_size - n;
2027 q = samples_end + n;
2028 while (nb > 0) {
2029 memcpy(q, samples_end, n);
2030 q += n;
2031 nb -= n;
2032 }
2033 samples_size = wanted_size;
2034 }
2035 }
2036 #if 0
2037 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2038 diff, avg_diff, samples_size - samples_size1,
2039 is->audio_clock, is->video_clock, is->audio_diff_threshold);
2040 #endif
2041 }
2042 } else {
2043 /* too big difference : may be initial PTS errors, so
2044 reset A-V filter */
2045 is->audio_diff_avg_count = 0;
2046 is->audio_diff_cum = 0;
2047 }
2048 }
2049
2050 return samples_size;
2051 }
2052
2053 /* decode one audio frame and returns its uncompressed size */
2054 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2055 {
2056 AVPacket *pkt_temp = &is->audio_pkt_temp;
2057 AVPacket *pkt = &is->audio_pkt;
2058 AVCodecContext *dec= is->audio_st->codec;
2059 int n, len1, data_size;
2060 double pts;
2061
2062 for(;;) {
2063 /* NOTE: the audio packet can contain several frames */
2064 while (pkt_temp->size > 0) {
2065 data_size = sizeof(is->audio_buf1);
2066 len1 = avcodec_decode_audio3(dec,
2067 (int16_t *)is->audio_buf1, &data_size,
2068 pkt_temp);
2069 if (len1 < 0) {
2070 /* if error, we skip the frame */
2071 pkt_temp->size = 0;
2072 break;
2073 }
2074
2075 pkt_temp->data += len1;
2076 pkt_temp->size -= len1;
2077 if (data_size <= 0)
2078 continue;
2079
2080 if (dec->sample_fmt != is->audio_src_fmt) {
2081 if (is->reformat_ctx)
2082 av_audio_convert_free(is->reformat_ctx);
2083 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
2084 dec->sample_fmt, 1, NULL, 0);
2085 if (!is->reformat_ctx) {
2086 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2087 avcodec_get_sample_fmt_name(dec->sample_fmt),
2088 avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
2089 break;
2090 }
2091 is->audio_src_fmt= dec->sample_fmt;
2092 }
2093
2094 if (is->reformat_ctx) {
2095 const void *ibuf[6]= {is->audio_buf1};
2096 void *obuf[6]= {is->audio_buf2};
2097 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
2098 int ostride[6]= {2};
2099 int len= data_size/istride[0];
2100 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2101 printf("av_audio_convert() failed\n");
2102 break;
2103 }
2104 is->audio_buf= is->audio_buf2;
2105 /* FIXME: existing code assume that data_size equals framesize*channels*2
2106 remove this legacy cruft */
2107 data_size= len*2;
2108 }else{
2109 is->audio_buf= is->audio_buf1;
2110 }
2111
2112 /* if no pts, then compute it */
2113 pts = is->audio_clock;
2114 *pts_ptr = pts;
2115 n = 2 * dec->channels;
2116 is->audio_clock += (double)data_size /
2117 (double)(n * dec->sample_rate);
2118 #if defined(DEBUG_SYNC)
2119 {
2120 static double last_clock;
2121 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2122 is->audio_clock - last_clock,
2123 is->audio_clock, pts);
2124 last_clock = is->audio_clock;
2125 }
2126 #endif
2127 return data_size;
2128 }
2129
2130 /* free the current packet */
2131 if (pkt->data)
2132 av_free_packet(pkt);
2133
2134 if (is->paused || is->audioq.abort_request) {
2135 return -1;
2136 }
2137
2138 /* read next packet */
2139 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2140 return -1;
2141 if(pkt->data == flush_pkt.data){
2142 avcodec_flush_buffers(dec);
2143 continue;
2144 }
2145
2146 pkt_temp->data = pkt->data;
2147 pkt_temp->size = pkt->size;
2148
2149 /* if update the audio clock with the pts */
2150 if (pkt->pts != AV_NOPTS_VALUE) {
2151 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2152 }
2153 }
2154 }
2155
2156 /* get the current audio output buffer size, in samples. With SDL, we
2157 cannot have a precise information */
2158 static int audio_write_get_buf_size(VideoState *is)
2159 {
2160 return is->audio_buf_size - is->audio_buf_index;
2161 }
2162
2163
2164 /* prepare a new audio buffer */
2165 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2166 {
2167 VideoState *is = opaque;
2168 int audio_size, len1;
2169 double pts;
2170
2171 audio_callback_time = av_gettime();
2172
2173 while (len > 0) {
2174 if (is->audio_buf_index >= is->audio_buf_size) {
2175 audio_size = audio_decode_frame(is, &pts);
2176 if (audio_size < 0) {
2177 /* if error, just output silence */
2178 is->audio_buf = is->audio_buf1;
2179 is->audio_buf_size = 1024;
2180 memset(is->audio_buf, 0, is->audio_buf_size);
2181 } else {
2182 if (is->show_audio)
2183 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2184 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2185 pts);
2186 is->audio_buf_size = audio_size;
2187 }
2188 is->audio_buf_index = 0;
2189 }
2190 len1 = is->audio_buf_size - is->audio_buf_index;
2191 if (len1 > len)
2192 len1 = len;
2193 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2194 len -= len1;
2195 stream += len1;
2196 is->audio_buf_index += len1;
2197 }
2198 }
2199
2200 /* open a given stream. Return 0 if OK */
2201 static int stream_component_open(VideoState *is, int stream_index)
2202 {
2203 AVFormatContext *ic = is->ic;
2204 AVCodecContext *avctx;
2205 AVCodec *codec;
2206 SDL_AudioSpec wanted_spec, spec;
2207
2208 if (stream_index < 0 || stream_index >= ic->nb_streams)
2209 return -1;
2210 avctx = ic->streams[stream_index]->codec;
2211
2212 /* prepare audio output */
2213 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2214 if (avctx->channels > 0) {
2215 avctx->request_channels = FFMIN(2, avctx->channels);
2216 } else {
2217 avctx->request_channels = 2;
2218 }
2219 }
2220
2221 codec = avcodec_find_decoder(avctx->codec_id);
2222 avctx->debug_mv = debug_mv;
2223 avctx->debug = debug;
2224 avctx->workaround_bugs = workaround_bugs;
2225 avctx->lowres = lowres;
2226 if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2227 avctx->idct_algo= idct;
2228 if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2229 avctx->skip_frame= skip_frame;
2230 avctx->skip_idct= skip_idct;
2231 avctx->skip_loop_filter= skip_loop_filter;
2232 avctx->error_recognition= error_recognition;
2233 avctx->error_concealment= error_concealment;
2234 avcodec_thread_init(avctx, thread_count);
2235
2236 set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
2237
2238 if (!codec ||
2239 avcodec_open(avctx, codec) < 0)
2240 return -1;
2241
2242 /* prepare audio output */
2243 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2244 wanted_spec.freq = avctx->sample_rate;
2245 wanted_spec.format = AUDIO_S16SYS;
2246 wanted_spec.channels = avctx->channels;
2247 wanted_spec.silence = 0;
2248 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2249 wanted_spec.callback = sdl_audio_callback;
2250 wanted_spec.userdata = is;
2251 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2252 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2253 return -1;
2254 }
2255 is->audio_hw_buf_size = spec.size;
2256 is->audio_src_fmt= SAMPLE_FMT_S16;
2257 }
2258
2259 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2260 switch(avctx->codec_type) {
2261 case AVMEDIA_TYPE_AUDIO:
2262 is->audio_stream = stream_index;
2263 is->audio_st = ic->streams[stream_index];
2264 is->audio_buf_size = 0;
2265 is->audio_buf_index = 0;
2266
2267 /* init averaging filter */
2268 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2269 is->audio_diff_avg_count = 0;
2270 /* since we do not have a precise anough audio fifo fullness,
2271 we correct audio sync only if larger than this threshold */
2272 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2273
2274 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2275 packet_queue_init(&is->audioq);
2276 SDL_PauseAudio(0);
2277 break;
2278 case AVMEDIA_TYPE_VIDEO:
2279 is->video_stream = stream_index;
2280 is->video_st = ic->streams[stream_index];
2281
2282 // is->video_current_pts_time = av_gettime();
2283
2284 packet_queue_init(&is->videoq);
2285 is->video_tid = SDL_CreateThread(video_thread, is);
2286 break;
2287 case AVMEDIA_TYPE_SUBTITLE:
2288 is->subtitle_stream = stream_index;
2289 is->subtitle_st = ic->streams[stream_index];
2290 packet_queue_init(&is->subtitleq);
2291
2292 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2293 break;
2294 default:
2295 break;
2296 }
2297 return 0;
2298 }
2299
2300 static void stream_component_close(VideoState *is, int stream_index)
2301 {
2302 AVFormatContext *ic = is->ic;
2303 AVCodecContext *avctx;
2304
2305 if (stream_index < 0 || stream_index >= ic->nb_streams)
2306 return;
2307 avctx = ic->streams[stream_index]->codec;
2308
2309 switch(avctx->codec_type) {
2310 case AVMEDIA_TYPE_AUDIO:
2311 packet_queue_abort(&is->audioq);
2312
2313 SDL_CloseAudio();
2314
2315 packet_queue_end(&is->audioq);
2316 if (is->reformat_ctx)
2317 av_audio_convert_free(is->reformat_ctx);
2318 is->reformat_ctx = NULL;
2319 break;
2320 case AVMEDIA_TYPE_VIDEO:
2321 packet_queue_abort(&is->videoq);
2322
2323 /* note: we also signal this mutex to make sure we deblock the
2324 video thread in all cases */
2325 SDL_LockMutex(is->pictq_mutex);
2326 SDL_CondSignal(is->pictq_cond);
2327 SDL_UnlockMutex(is->pictq_mutex);
2328
2329 SDL_WaitThread(is->video_tid, NULL);
2330
2331 packet_queue_end(&is->videoq);
2332 break;
2333 case AVMEDIA_TYPE_SUBTITLE:
2334 packet_queue_abort(&is->subtitleq);
2335
2336 /* note: we also signal this mutex to make sure we deblock the
2337 video thread in all cases */
2338 SDL_LockMutex(is->subpq_mutex);
2339 is->subtitle_stream_changed = 1;
2340
2341 SDL_CondSignal(is->subpq_cond);
2342 SDL_UnlockMutex(is->subpq_mutex);
2343
2344 SDL_WaitThread(is->subtitle_tid, NULL);
2345
2346 packet_queue_end(&is->subtitleq);
2347 break;
2348 default:
2349 break;
2350 }
2351
2352 ic->streams[stream_index]->discard = AVDISCARD_ALL;
2353 avcodec_close(avctx);
2354 switch(avctx->codec_type) {
2355 case AVMEDIA_TYPE_AUDIO:
2356 is->audio_st = NULL;
2357 is->audio_stream = -1;
2358 break;
2359 case AVMEDIA_TYPE_VIDEO:
2360 is->video_st = NULL;
2361 is->video_stream = -1;
2362 break;
2363 case AVMEDIA_TYPE_SUBTITLE:
2364 is->subtitle_st = NULL;
2365 is->subtitle_stream = -1;
2366 break;
2367 default:
2368 break;
2369 }
2370 }
2371
2372 /* since we have only one decoding thread, we can use a global
2373 variable instead of a thread local variable */
2374 static VideoState *global_video_state;
2375
2376 static int decode_interrupt_cb(void)
2377 {
2378 return (global_video_state && global_video_state->abort_request);
2379 }
2380
2381 /* this thread gets the stream from the disk or the network */
2382 static int decode_thread(void *arg)
2383 {
2384 VideoState *is = arg;
2385 AVFormatContext *ic;
2386 int err, i, ret;
2387 int st_index[AVMEDIA_TYPE_NB];
2388 int st_count[AVMEDIA_TYPE_NB]={0};
2389 int st_best_packet_count[AVMEDIA_TYPE_NB];
2390 AVPacket pkt1, *pkt = &pkt1;
2391 AVFormatParameters params, *ap = &params;
2392 int eof=0;
2393 int pkt_in_play_range = 0;
2394
2395 ic = avformat_alloc_context();
2396
2397 memset(st_index, -1, sizeof(st_index));
2398 memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2399 is->video_stream = -1;
2400 is->audio_stream = -1;
2401 is->subtitle_stream = -1;
2402
2403 global_video_state = is;
2404 url_set_interrupt_cb(decode_interrupt_cb);
2405
2406 memset(ap, 0, sizeof(*ap));
2407
2408 ap->prealloced_context = 1;
2409 ap->width = frame_width;
2410 ap->height= frame_height;
2411 ap->time_base= (AVRational){1, 25};
2412 ap->pix_fmt = frame_pix_fmt;
2413
2414 set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2415
2416 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2417 if (err < 0) {
2418 print_error(is->filename, err);
2419 ret = -1;
2420 goto fail;
2421 }
2422 is->ic = ic;
2423
2424 if(genpts)
2425 ic->flags |= AVFMT_FLAG_GENPTS;
2426
2427 err = av_find_stream_info(ic);
2428 if (err < 0) {
2429 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2430 ret = -1;
2431 goto fail;
2432 }
2433 if(ic->pb)
2434 ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2435
2436 if(seek_by_bytes<0)
2437 seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2438
2439 /* if seeking requested, we execute it */
2440 if (start_time != AV_NOPTS_VALUE) {
2441 int64_t timestamp;
2442
2443 timestamp = start_time;
2444 /* add the stream start time */
2445 if (ic->start_time != AV_NOPTS_VALUE)
2446 timestamp += ic->start_time;
2447 ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2448 if (ret < 0) {
2449 fprintf(stderr, "%s: could not seek to position %0.3f\n",
2450 is->filename, (double)timestamp / AV_TIME_BASE);
2451 }
2452 }
2453
2454 for(i = 0; i < ic->nb_streams; i++) {
2455 AVStream *st= ic->streams[i];
2456 AVCodecContext *avctx = st->codec;
2457 ic->streams[i]->discard = AVDISCARD_ALL;
2458 if(avctx->codec_type >= (unsigned)AVMEDIA_TYPE_NB)
2459 continue;
2460 if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2461 continue;
2462
2463 if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2464 continue;
2465 st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2466
2467 switch(avctx->codec_type) {
2468 case AVMEDIA_TYPE_AUDIO:
2469 if (!audio_disable)
2470 st_index[AVMEDIA_TYPE_AUDIO] = i;
2471 break;
2472 case AVMEDIA_TYPE_VIDEO:
2473 case AVMEDIA_TYPE_SUBTITLE:
2474 if (!video_disable)
2475 st_index[avctx->codec_type] = i;
2476 break;
2477 default:
2478 break;
2479 }
2480 }
2481 if (show_status) {
2482 dump_format(ic, 0, is->filename, 0);
2483 }
2484
2485 /* open the streams */
2486 if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2487 stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2488 }
2489
2490 ret=-1;
2491 if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2492 ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2493 }
2494 is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2495 if(ret<0) {
2496 if (!display_disable)
2497 is->show_audio = 2;
2498 }
2499
2500 if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2501 stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2502 }
2503
2504 if (is->video_stream < 0 && is->audio_stream < 0) {
2505 fprintf(stderr, "%s: could not open codecs\n", is->filename);
2506 ret = -1;
2507 goto fail;
2508 }
2509
2510 for(;;) {
2511 if (is->abort_request)
2512 break;
2513 if (is->paused != is->last_paused) {
2514 is->last_paused = is->paused;
2515 if (is->paused)
2516 is->read_pause_return= av_read_pause(ic);
2517 else
2518 av_read_play(ic);
2519 }
2520 #if CONFIG_RTSP_DEMUXER
2521 if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2522 /* wait 10 ms to avoid trying to get another packet */
2523 /* XXX: horrible */
2524 SDL_Delay(10);
2525 continue;
2526 }
2527 #endif
2528 if (is->seek_req) {
2529 int64_t seek_target= is->seek_pos;
2530 int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2531 int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2532 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2533 // of the seek_pos/seek_rel variables
2534
2535 ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2536 if (ret < 0) {
2537 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2538 }else{
2539 if (is->audio_stream >= 0) {
2540 packet_queue_flush(&is->audioq);
2541 packet_queue_put(&is->audioq, &flush_pkt);
2542 }
2543 if (is->subtitle_stream >= 0) {
2544 packet_queue_flush(&is->subtitleq);
2545 packet_queue_put(&is->subtitleq, &flush_pkt);
2546 }
2547 if (is->video_stream >= 0) {
2548 packet_queue_flush(&is->videoq);
2549 packet_queue_put(&is->videoq, &flush_pkt);
2550 }
2551 }
2552 is->seek_req = 0;
2553 eof= 0;
2554 }
2555
2556 /* if the queue are full, no need to read more */
2557 if ( is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2558 || ( (is->audioq .size > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2559 && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream<0)
2560 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2561 /* wait 10 ms */
2562 SDL_Delay(10);
2563 continue;
2564 }
2565 if(url_feof(ic->pb) || eof) {
2566 if(is->video_stream >= 0){
2567 av_init_packet(pkt);
2568 pkt->data=NULL;
2569 pkt->size=0;
2570 pkt->stream_index= is->video_stream;
2571 packet_queue_put(&is->videoq, pkt);
2572 }
2573 SDL_Delay(10);
2574 if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2575 if(loop!=1 && (!loop || --loop)){
2576 stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2577 }else if(autoexit){
2578 ret=AVERROR_EOF;
2579 goto fail;
2580 }
2581 }
2582 continue;
2583 }
2584 ret = av_read_frame(ic, pkt);
2585 if (ret < 0) {
2586 if (ret == AVERROR_EOF)
2587 eof=1;
2588 if (url_ferror(ic->pb))
2589 break;
2590 SDL_Delay(100); /* wait for user event */
2591 continue;
2592 }
2593 /* check if packet is in play range specified by user, then queue, otherwise discard */
2594 pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2595 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2596 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2597 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2598 <= ((double)duration/1000000);
2599 if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2600 packet_queue_put(&is->audioq, pkt);
2601 } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2602 packet_queue_put(&is->videoq, pkt);
2603 } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2604 packet_queue_put(&is->subtitleq, pkt);
2605 } else {
2606 av_free_packet(pkt);
2607 }
2608 }
2609 /* wait until the end */
2610 while (!is->abort_request) {
2611 SDL_Delay(100);
2612 }
2613
2614 ret = 0;
2615 fail:
2616 /* disable interrupting */
2617 global_video_state = NULL;
2618
2619 /* close each stream */
2620 if (is->audio_stream >= 0)
2621 stream_component_close(is, is->audio_stream);
2622 if (is->video_stream >= 0)
2623 stream_component_close(is, is->video_stream);
2624 if (is->subtitle_stream >= 0)
2625 stream_component_close(is, is->subtitle_stream);
2626 if (is->ic) {
2627 av_close_input_file(is->ic);
2628 is->ic = NULL; /* safety */
2629 }
2630 url_set_interrupt_cb(NULL);
2631
2632 if (ret != 0) {
2633 SDL_Event event;
2634
2635 event.type = FF_QUIT_EVENT;
2636 event.user.data1 = is;
2637 SDL_PushEvent(&event);
2638 }
2639 return 0;
2640 }
2641
2642 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2643 {
2644 VideoState *is;
2645
2646 is = av_mallocz(sizeof(VideoState));
2647 if (!is)
2648 return NULL;
2649 av_strlcpy(is->filename, filename, sizeof(is->filename));
2650 is->iformat = iformat;
2651 is->ytop = 0;
2652 is->xleft = 0;
2653
2654 /* start video display */
2655 is->pictq_mutex = SDL_CreateMutex();
2656 is->pictq_cond = SDL_CreateCond();
2657
2658 is->subpq_mutex = SDL_CreateMutex();
2659 is->subpq_cond = SDL_CreateCond();
2660
2661 is->av_sync_type = av_sync_type;
2662 is->parse_tid = SDL_CreateThread(decode_thread, is);
2663 if (!is->parse_tid) {
2664 av_free(is);
2665 return NULL;
2666 }
2667 return is;
2668 }
2669
2670 static void stream_close(VideoState *is)
2671 {
2672 VideoPicture *vp;
2673 int i;
2674 /* XXX: use a special url_shutdown call to abort parse cleanly */
2675 is->abort_request = 1;
2676 SDL_WaitThread(is->parse_tid, NULL);
2677 SDL_WaitThread(is->refresh_tid, NULL);
2678
2679 /* free all pictures */
2680 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2681 vp = &is->pictq[i];
2682 #if CONFIG_AVFILTER
2683 if (vp->picref) {
2684 avfilter_unref_pic(vp->picref);
2685 vp->picref = NULL;
2686 }
2687 #endif
2688 if (vp->bmp) {
2689 SDL_FreeYUVOverlay(vp->bmp);
2690 vp->bmp = NULL;
2691 }
2692 }
2693 SDL_DestroyMutex(is->pictq_mutex);
2694 SDL_DestroyCond(is->pictq_cond);
2695 SDL_DestroyMutex(is->subpq_mutex);
2696 SDL_DestroyCond(is->subpq_cond);
2697 #if !CONFIG_AVFILTER
2698 if (is->img_convert_ctx)
2699 sws_freeContext(is->img_convert_ctx);
2700 #endif
2701 av_free(is);
2702 }
2703
2704 static void stream_cycle_channel(VideoState *is, int codec_type)
2705 {
2706 AVFormatContext *ic = is->ic;
2707 int start_index, stream_index;
2708 AVStream *st;
2709
2710 if (codec_type == AVMEDIA_TYPE_VIDEO)
2711 start_index = is->video_stream;
2712 else if (codec_type == AVMEDIA_TYPE_AUDIO)
2713 start_index = is->audio_stream;
2714 else
2715 start_index = is->subtitle_stream;
2716 if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2717 return;
2718 stream_index = start_index;
2719 for(;;) {
2720 if (++stream_index >= is->ic->nb_streams)
2721 {
2722 if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2723 {
2724 stream_index = -1;
2725 goto the_end;
2726 } else
2727 stream_index = 0;
2728 }
2729 if (stream_index == start_index)
2730 return;
2731 st = ic->streams[stream_index];
2732 if (st->codec->codec_type == codec_type) {
2733 /* check that parameters are OK */
2734 switch(codec_type) {
2735 case AVMEDIA_TYPE_AUDIO:
2736 if (st->codec->sample_rate != 0 &&
2737 st->codec->channels != 0)
2738 goto the_end;
2739 break;
2740 case AVMEDIA_TYPE_VIDEO:
2741 case AVMEDIA_TYPE_SUBTITLE:
2742 goto the_end;
2743 default:
2744 break;
2745 }
2746 }
2747 }
2748 the_end:
2749 stream_component_close(is, start_index);
2750 stream_component_open(is, stream_index);
2751 }
2752
2753
2754 static void toggle_full_screen(void)
2755 {
2756 is_full_screen = !is_full_screen;
2757 if (!fs_screen_width) {
2758 /* use default SDL method */
2759 // SDL_WM_ToggleFullScreen(screen);
2760 }
2761 video_open(cur_stream);
2762 }
2763
2764 static void toggle_pause(void)
2765 {
2766 if (cur_stream)
2767 stream_pause(cur_stream);
2768 step = 0;
2769 }
2770
2771 static void step_to_next_frame(void)
2772 {
2773 if (cur_stream) {
2774 /* if the stream is paused unpause it, then step */
2775 if (cur_stream->paused)
2776 stream_pause(cur_stream);
2777 }
2778 step = 1;
2779 }
2780
2781 static void do_exit(void)
2782 {
2783 int i;
2784 if (cur_stream) {
2785 stream_close(cur_stream);
2786 cur_stream = NULL;
2787 }
2788 for (i = 0; i < AVMEDIA_TYPE_NB; i++)
2789 av_free(avcodec_opts[i]);
2790 av_free(avformat_opts);
2791 av_free(sws_opts);
2792 #if CONFIG_AVFILTER
2793 avfilter_uninit();
2794 #endif
2795 if (show_status)
2796 printf("\n");
2797 SDL_Quit();
2798 exit(0);
2799 }
2800
2801 static void toggle_audio_display(void)
2802 {
2803 if (cur_stream) {
2804 int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2805 cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2806 fill_rectangle(screen,
2807 cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2808 bgcolor);
2809 SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2810 }
2811 }
2812
2813 /* handle an event sent by the GUI */
2814 static void event_loop(void)
2815 {
2816 SDL_Event event;
2817 double incr, pos, frac;
2818
2819 for(;;) {
2820 double x;
2821 SDL_WaitEvent(&event);
2822 switch(event.type) {
2823 case SDL_KEYDOWN:
2824 if (exit_on_keydown) {
2825 do_exit();
2826 break;
2827 }
2828 switch(event.key.keysym.sym) {
2829 case SDLK_ESCAPE:
2830 case SDLK_q:
2831 do_exit();
2832 break;
2833 case SDLK_f:
2834 toggle_full_screen();
2835 break;
2836 case SDLK_p:
2837 case SDLK_SPACE:
2838 toggle_pause();
2839 break;
2840 case SDLK_s: //S: Step to next frame
2841 step_to_next_frame();
2842 break;
2843 case SDLK_a:
2844 if (cur_stream)
2845 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2846 break;
2847 case SDLK_v:
2848 if (cur_stream)
2849 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2850 break;
2851 case SDLK_t:
2852 if (cur_stream)
2853 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2854 break;
2855 case SDLK_w:
2856 toggle_audio_display();
2857 break;
2858 case SDLK_LEFT:
2859 incr = -10.0;
2860 goto do_seek;
2861 case SDLK_RIGHT:
2862 incr = 10.0;
2863 goto do_seek;
2864 case SDLK_UP:
2865 incr = 60.0;
2866 goto do_seek;
2867 case SDLK_DOWN:
2868 incr = -60.0;
2869 do_seek:
2870 if (cur_stream) {
2871 if (seek_by_bytes) {
2872 if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2873 pos= cur_stream->video_current_pos;
2874 }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2875 pos= cur_stream->audio_pkt.pos;
2876 }else
2877 pos = url_ftell(cur_stream->ic->pb);
2878 if (cur_stream->ic->bit_rate)
2879 incr *= cur_stream->ic->bit_rate / 8.0;
2880 else
2881 incr *= 180000.0;
2882 pos += incr;
2883 stream_seek(cur_stream, pos, incr, 1);
2884 } else {
2885 pos = get_master_clock(cur_stream);
2886 pos += incr;
2887 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2888 }
2889 }
2890 break;
2891 default:
2892 break;
2893 }
2894 break;
2895 case SDL_MOUSEBUTTONDOWN:
2896 if (exit_on_mousedown) {
2897 do_exit();
2898 break;
2899 }
2900 case SDL_MOUSEMOTION:
2901 if(event.type ==SDL_MOUSEBUTTONDOWN){
2902 x= event.button.x;
2903 }else{
2904 if(event.motion.state != SDL_PRESSED)
2905 break;
2906 x= event.motion.x;
2907 }
2908 if (cur_stream) {
2909 if(seek_by_bytes || cur_stream->ic->duration<=0){
2910 uint64_t size= url_fsize(cur_stream->ic->pb);
2911 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2912 }else{
2913 int64_t ts;
2914 int ns, hh, mm, ss;
2915 int tns, thh, tmm, tss;
2916 tns = cur_stream->ic->duration/1000000LL;
2917 thh = tns/3600;
2918 tmm = (tns%3600)/60;
2919 tss = (tns%60);
2920 frac = x/cur_stream->width;
2921 ns = frac*tns;
2922 hh = ns/3600;
2923 mm = (ns%3600)/60;
2924 ss = (ns%60);
2925 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2926 hh, mm, ss, thh, tmm, tss);
2927 ts = frac*cur_stream->ic->duration;
2928 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2929 ts += cur_stream->ic->start_time;
2930 stream_seek(cur_stream, ts, 0, 0);
2931 }
2932 }
2933 break;
2934 case SDL_VIDEORESIZE:
2935 if (cur_stream) {
2936 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2937 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2938 screen_width = cur_stream->width = event.resize.w;
2939 screen_height= cur_stream->height= event.resize.h;
2940 }
2941 break;
2942 case SDL_QUIT:
2943 case FF_QUIT_EVENT:
2944 do_exit();
2945 break;
2946 case FF_ALLOC_EVENT:
2947 video_open(event.user.data1);
2948 alloc_picture(event.user.data1);
2949 break;
2950 case FF_REFRESH_EVENT:
2951 video_refresh_timer(event.user.data1);
2952 cur_stream->refresh=0;
2953 break;
2954 default:
2955 break;
2956 }
2957 }
2958 }
2959
2960 static void opt_frame_size(const char *arg)
2961 {
2962 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2963 fprintf(stderr, "Incorrect frame size\n");
2964 exit(1);
2965 }
2966 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2967 fprintf(stderr, "Frame size must be a multiple of 2\n");
2968 exit(1);
2969 }
2970 }
2971
2972 static int opt_width(const char *opt, const char *arg)
2973 {
2974 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2975 return 0;
2976 }
2977
2978 static int opt_height(const char *opt, const char *arg)
2979 {
2980 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2981 return 0;
2982 }
2983
2984 static void opt_format(const char *arg)
2985 {
2986 file_iformat = av_find_input_format(arg);
2987 if (!file_iformat) {
2988 fprintf(stderr, "Unknown input format: %s\n", arg);
2989 exit(1);
2990 }
2991 }
2992
2993 static void opt_frame_pix_fmt(const char *arg)
2994 {
2995 frame_pix_fmt = av_get_pix_fmt(arg);
2996 }
2997
2998 static int opt_sync(const char *opt, const char *arg)
2999 {
3000 if (!strcmp(arg, "audio"))
3001 av_sync_type = AV_SYNC_AUDIO_MASTER;
3002 else if (!strcmp(arg, "video"))
3003 av_sync_type = AV_SYNC_VIDEO_MASTER;
3004 else if (!strcmp(arg, "ext"))
3005 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3006 else {
3007 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3008 exit(1);
3009 }
3010 return 0;
3011 }
3012
3013 static int opt_seek(const char *opt, const char *arg)
3014 {
3015 start_time = parse_time_or_die(opt, arg, 1);
3016 return 0;
3017 }
3018
3019 static int opt_duration(const char *opt, const char *arg)
3020 {
3021 duration = parse_time_or_die(opt, arg, 1);
3022 return 0;
3023 }
3024
3025 static int opt_debug(const char *opt, const char *arg)
3026 {
3027 av_log_set_level(99);
3028 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3029 return 0;
3030 }
3031
3032 static int opt_vismv(const char *opt, const char *arg)
3033 {
3034 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
3035 return 0;
3036 }
3037
3038 static int opt_thread_count(const char *opt, const char *arg)
3039 {
3040 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3041 #if !HAVE_THREADS
3042 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
3043 #endif
3044 return 0;
3045 }
3046
3047 static const OptionDef options[] = {
3048 #include "cmdutils_common_opts.h"
3049 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
3050 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
3051 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
3052 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3053 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3054 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3055 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3056 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3057 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3058 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3059 { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play \"duration\" seconds of audio/video", "duration" },
3060 { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3061 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3062 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3063 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3064 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3065 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3066 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3067 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3068 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3069 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3070 { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3071 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3072 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3073 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3074 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3075 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
3076 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)", "threshold" },
3077 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
3078 { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3079 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3080 { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3081 { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3082 { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3083 { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3084 { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3085 { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3086 #if CONFIG_AVFILTER
3087 { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3088 #endif
3089 { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3090 { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3091 { NULL, },
3092 };
3093
3094 static void show_usage(void)
3095 {
3096 printf("Simple media player\n");
3097 printf("usage: ffplay [options] input_file\n");
3098 printf("\n");
3099 }
3100
3101 static void show_help(void)
3102 {
3103 show_usage();
3104 show_help_options(options, "Main options:\n",
3105 OPT_EXPERT, 0);
3106 show_help_options(options, "\nAdvanced options:\n",
3107 OPT_EXPERT, OPT_EXPERT);
3108 printf("\nWhile playing:\n"
3109 "q, ESC quit\n"
3110 "f toggle full screen\n"
3111 "p, SPC pause\n"
3112 "a cycle audio channel\n"
3113 "v cycle video channel\n"
3114 "t cycle subtitle channel\n"
3115 "w show audio waves\n"
3116 "s activate frame-step mode\n"
3117 "left/right seek backward/forward 10 seconds\n"
3118 "down/up seek backward/forward 1 minute\n"
3119 "mouse click seek to percentage in file corresponding to fraction of width\n"
3120 );
3121 }
3122
3123 static void opt_input_file(const char *filename)
3124 {
3125 if (input_filename) {
3126 fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3127 filename, input_filename);
3128 exit(1);
3129 }
3130 if (!strcmp(filename, "-"))
3131 filename = "pipe:";
3132 input_filename = filename;
3133 }
3134
3135 /* Called from the main */
3136 int main(int argc, char **argv)
3137 {
3138 int flags, i;
3139
3140 /* register all codecs, demux and protocols */
3141 avcodec_register_all();
3142 #if CONFIG_AVDEVICE
3143 avdevice_register_all();
3144 #endif
3145 #if CONFIG_AVFILTER
3146 avfilter_register_all();
3147 #endif
3148 av_register_all();
3149
3150 for(i=0; i<AVMEDIA_TYPE_NB; i++){
3151 avcodec_opts[i]= avcodec_alloc_context2(i);
3152 }
3153 avformat_opts = avformat_alloc_context();
3154 #if !CONFIG_AVFILTER
3155 sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
3156 #endif
3157
3158 show_banner();
3159
3160 parse_options(argc, argv, options, opt_input_file);
3161
3162 if (!input_filename) {
3163 show_usage();
3164 fprintf(stderr, "An input file must be specified\n");
3165 fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3166 exit(1);
3167 }
3168
3169 if (display_disable) {
3170 video_disable = 1;
3171 }
3172 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3173 #if !defined(__MINGW32__) && !defined(__APPLE__)
3174 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3175 #endif
3176 if (SDL_Init (flags)) {
3177 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3178 exit(1);
3179 }
3180
3181 if (!display_disable) {
3182 #if HAVE_SDL_VIDEO_SIZE
3183 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3184 fs_screen_width = vi->current_w;
3185 fs_screen_height = vi->current_h;
3186 #endif
3187 }
3188
3189 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3190 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3191 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3192
3193 av_init_packet(&flush_pkt);
3194 flush_pkt.data= "FLUSH";
3195
3196 cur_stream = stream_open(input_filename, file_iformat);
3197
3198 event_loop();
3199
3200 /* never returns */
3201
3202 return 0;
3203 }