Update lavu minor and add APIchanges entry after r24174 (add bswap.h
[libav.git] / ffplay.c
1 /*
2 * FFplay : Simple Media Player based on the FFmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavformat/avformat.h"
30 #include "libavdevice/avdevice.h"
31 #include "libswscale/swscale.h"
32 #include "libavcodec/audioconvert.h"
33 #include "libavcodec/opt.h"
34 #include "libavcodec/avfft.h"
35
36 #if CONFIG_AVFILTER
37 # include "libavfilter/avfilter.h"
38 # include "libavfilter/avfiltergraph.h"
39 # include "libavfilter/graphparser.h"
40 #endif
41
42 #include "cmdutils.h"
43
44 #include <SDL.h>
45 #include <SDL_thread.h>
46
47 #ifdef __MINGW32__
48 #undef main /* We don't want SDL to override our main() */
49 #endif
50
51 #include <unistd.h>
52 #include <assert.h>
53
54 const char program_name[] = "FFplay";
55 const int program_birth_year = 2003;
56
57 //#define DEBUG_SYNC
58
59 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
60 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
61 #define MIN_FRAMES 5
62
63 /* SDL audio buffer size, in samples. Should be small to have precise
64 A/V sync as SDL does not have hardware buffer fullness info. */
65 #define SDL_AUDIO_BUFFER_SIZE 1024
66
67 /* no AV sync correction is done if below the AV sync threshold */
68 #define AV_SYNC_THRESHOLD 0.01
69 /* no AV correction is done if too big error */
70 #define AV_NOSYNC_THRESHOLD 10.0
71
72 #define FRAME_SKIP_FACTOR 0.05
73
74 /* maximum audio speed change to get correct sync */
75 #define SAMPLE_CORRECTION_PERCENT_MAX 10
76
77 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
78 #define AUDIO_DIFF_AVG_NB 20
79
80 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
81 #define SAMPLE_ARRAY_SIZE (2*65536)
82
83 static int sws_flags = SWS_BICUBIC;
84
85 typedef struct PacketQueue {
86 AVPacketList *first_pkt, *last_pkt;
87 int nb_packets;
88 int size;
89 int abort_request;
90 SDL_mutex *mutex;
91 SDL_cond *cond;
92 } PacketQueue;
93
94 #define VIDEO_PICTURE_QUEUE_SIZE 2
95 #define SUBPICTURE_QUEUE_SIZE 4
96
97 typedef struct VideoPicture {
98 double pts; ///<presentation time stamp for this picture
99 double target_clock; ///<av_gettime() time at which this should be displayed ideally
100 int64_t pos; ///<byte position in file
101 SDL_Overlay *bmp;
102 int width, height; /* source height & width */
103 int allocated;
104 enum PixelFormat pix_fmt;
105
106 #if CONFIG_AVFILTER
107 AVFilterPicRef *picref;
108 #endif
109 } VideoPicture;
110
111 typedef struct SubPicture {
112 double pts; /* presentation time stamp for this picture */
113 AVSubtitle sub;
114 } SubPicture;
115
116 enum {
117 AV_SYNC_AUDIO_MASTER, /* default choice */
118 AV_SYNC_VIDEO_MASTER,
119 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
120 };
121
122 typedef struct VideoState {
123 SDL_Thread *parse_tid;
124 SDL_Thread *video_tid;
125 SDL_Thread *refresh_tid;
126 AVInputFormat *iformat;
127 int no_background;
128 int abort_request;
129 int paused;
130 int last_paused;
131 int seek_req;
132 int seek_flags;
133 int64_t seek_pos;
134 int64_t seek_rel;
135 int read_pause_return;
136 AVFormatContext *ic;
137 int dtg_active_format;
138
139 int audio_stream;
140
141 int av_sync_type;
142 double external_clock; /* external clock base */
143 int64_t external_clock_time;
144
145 double audio_clock;
146 double audio_diff_cum; /* used for AV difference average computation */
147 double audio_diff_avg_coef;
148 double audio_diff_threshold;
149 int audio_diff_avg_count;
150 AVStream *audio_st;
151 PacketQueue audioq;
152 int audio_hw_buf_size;
153 /* samples output by the codec. we reserve more space for avsync
154 compensation */
155 DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
156 DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157 uint8_t *audio_buf;
158 unsigned int audio_buf_size; /* in bytes */
159 int audio_buf_index; /* in bytes */
160 AVPacket audio_pkt_temp;
161 AVPacket audio_pkt;
162 enum SampleFormat audio_src_fmt;
163 AVAudioConvert *reformat_ctx;
164
165 int show_audio; /* if true, display audio samples */
166 int16_t sample_array[SAMPLE_ARRAY_SIZE];
167 int sample_array_index;
168 int last_i_start;
169 RDFTContext *rdft;
170 int rdft_bits;
171 FFTSample *rdft_data;
172 int xpos;
173
174 SDL_Thread *subtitle_tid;
175 int subtitle_stream;
176 int subtitle_stream_changed;
177 AVStream *subtitle_st;
178 PacketQueue subtitleq;
179 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
180 int subpq_size, subpq_rindex, subpq_windex;
181 SDL_mutex *subpq_mutex;
182 SDL_cond *subpq_cond;
183
184 double frame_timer;
185 double frame_last_pts;
186 double frame_last_delay;
187 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
188 int video_stream;
189 AVStream *video_st;
190 PacketQueue videoq;
191 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
192 double video_current_pts_drift; ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
193 int64_t video_current_pos; ///<current displayed file pos
194 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
195 int pictq_size, pictq_rindex, pictq_windex;
196 SDL_mutex *pictq_mutex;
197 SDL_cond *pictq_cond;
198 #if !CONFIG_AVFILTER
199 struct SwsContext *img_convert_ctx;
200 #endif
201
202 // QETimer *video_timer;
203 char filename[1024];
204 int width, height, xleft, ytop;
205
206 int64_t faulty_pts;
207 int64_t faulty_dts;
208 int64_t last_dts_for_fault_detection;
209 int64_t last_pts_for_fault_detection;
210
211 #if CONFIG_AVFILTER
212 AVFilterContext *out_video_filter; ///<the last filter in the video chain
213 #endif
214
215 float skip_frames;
216 float skip_frames_index;
217 int refresh;
218 } VideoState;
219
220 static void show_help(void);
221 static int audio_write_get_buf_size(VideoState *is);
222
223 /* options specified by the user */
224 static AVInputFormat *file_iformat;
225 static const char *input_filename;
226 static const char *window_title;
227 static int fs_screen_width;
228 static int fs_screen_height;
229 static int screen_width = 0;
230 static int screen_height = 0;
231 static int frame_width = 0;
232 static int frame_height = 0;
233 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
234 static int audio_disable;
235 static int video_disable;
236 static int wanted_stream[AVMEDIA_TYPE_NB]={
237 [AVMEDIA_TYPE_AUDIO]=-1,
238 [AVMEDIA_TYPE_VIDEO]=-1,
239 [AVMEDIA_TYPE_SUBTITLE]=-1,
240 };
241 static int seek_by_bytes=-1;
242 static int display_disable;
243 static int show_status = 1;
244 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
245 static int64_t start_time = AV_NOPTS_VALUE;
246 static int64_t duration = AV_NOPTS_VALUE;
247 static int debug = 0;
248 static int debug_mv = 0;
249 static int step = 0;
250 static int thread_count = 1;
251 static int workaround_bugs = 1;
252 static int fast = 0;
253 static int genpts = 0;
254 static int lowres = 0;
255 static int idct = FF_IDCT_AUTO;
256 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
257 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
258 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
259 static int error_recognition = FF_ER_CAREFUL;
260 static int error_concealment = 3;
261 static int decoder_reorder_pts= -1;
262 static int autoexit;
263 static int exit_on_keydown;
264 static int exit_on_mousedown;
265 static int loop=1;
266 static int framedrop=1;
267
268 static int rdftspeed=20;
269 #if CONFIG_AVFILTER
270 static char *vfilters = NULL;
271 #endif
272
273 /* current context */
274 static int is_full_screen;
275 static VideoState *cur_stream;
276 static int64_t audio_callback_time;
277
278 static AVPacket flush_pkt;
279
280 #define FF_ALLOC_EVENT (SDL_USEREVENT)
281 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
282 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
283
284 static SDL_Surface *screen;
285
286 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
287
288 /* packet queue handling */
289 static void packet_queue_init(PacketQueue *q)
290 {
291 memset(q, 0, sizeof(PacketQueue));
292 q->mutex = SDL_CreateMutex();
293 q->cond = SDL_CreateCond();
294 packet_queue_put(q, &flush_pkt);
295 }
296
297 static void packet_queue_flush(PacketQueue *q)
298 {
299 AVPacketList *pkt, *pkt1;
300
301 SDL_LockMutex(q->mutex);
302 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
303 pkt1 = pkt->next;
304 av_free_packet(&pkt->pkt);
305 av_freep(&pkt);
306 }
307 q->last_pkt = NULL;
308 q->first_pkt = NULL;
309 q->nb_packets = 0;
310 q->size = 0;
311 SDL_UnlockMutex(q->mutex);
312 }
313
314 static void packet_queue_end(PacketQueue *q)
315 {
316 packet_queue_flush(q);
317 SDL_DestroyMutex(q->mutex);
318 SDL_DestroyCond(q->cond);
319 }
320
321 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
322 {
323 AVPacketList *pkt1;
324
325 /* duplicate the packet */
326 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
327 return -1;
328
329 pkt1 = av_malloc(sizeof(AVPacketList));
330 if (!pkt1)
331 return -1;
332 pkt1->pkt = *pkt;
333 pkt1->next = NULL;
334
335
336 SDL_LockMutex(q->mutex);
337
338 if (!q->last_pkt)
339
340 q->first_pkt = pkt1;
341 else
342 q->last_pkt->next = pkt1;
343 q->last_pkt = pkt1;
344 q->nb_packets++;
345 q->size += pkt1->pkt.size + sizeof(*pkt1);
346 /* XXX: should duplicate packet data in DV case */
347 SDL_CondSignal(q->cond);
348
349 SDL_UnlockMutex(q->mutex);
350 return 0;
351 }
352
353 static void packet_queue_abort(PacketQueue *q)
354 {
355 SDL_LockMutex(q->mutex);
356
357 q->abort_request = 1;
358
359 SDL_CondSignal(q->cond);
360
361 SDL_UnlockMutex(q->mutex);
362 }
363
364 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
365 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
366 {
367 AVPacketList *pkt1;
368 int ret;
369
370 SDL_LockMutex(q->mutex);
371
372 for(;;) {
373 if (q->abort_request) {
374 ret = -1;
375 break;
376 }
377
378 pkt1 = q->first_pkt;
379 if (pkt1) {
380 q->first_pkt = pkt1->next;
381 if (!q->first_pkt)
382 q->last_pkt = NULL;
383 q->nb_packets--;
384 q->size -= pkt1->pkt.size + sizeof(*pkt1);
385 *pkt = pkt1->pkt;
386 av_free(pkt1);
387 ret = 1;
388 break;
389 } else if (!block) {
390 ret = 0;
391 break;
392 } else {
393 SDL_CondWait(q->cond, q->mutex);
394 }
395 }
396 SDL_UnlockMutex(q->mutex);
397 return ret;
398 }
399
400 static inline void fill_rectangle(SDL_Surface *screen,
401 int x, int y, int w, int h, int color)
402 {
403 SDL_Rect rect;
404 rect.x = x;
405 rect.y = y;
406 rect.w = w;
407 rect.h = h;
408 SDL_FillRect(screen, &rect, color);
409 }
410
411 #if 0
412 /* draw only the border of a rectangle */
413 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
414 {
415 int w1, w2, h1, h2;
416
417 /* fill the background */
418 w1 = x;
419 if (w1 < 0)
420 w1 = 0;
421 w2 = s->width - (x + w);
422 if (w2 < 0)
423 w2 = 0;
424 h1 = y;
425 if (h1 < 0)
426 h1 = 0;
427 h2 = s->height - (y + h);
428 if (h2 < 0)
429 h2 = 0;
430 fill_rectangle(screen,
431 s->xleft, s->ytop,
432 w1, s->height,
433 color);
434 fill_rectangle(screen,
435 s->xleft + s->width - w2, s->ytop,
436 w2, s->height,
437 color);
438 fill_rectangle(screen,
439 s->xleft + w1, s->ytop,
440 s->width - w1 - w2, h1,
441 color);
442 fill_rectangle(screen,
443 s->xleft + w1, s->ytop + s->height - h2,
444 s->width - w1 - w2, h2,
445 color);
446 }
447 #endif
448
449 #define ALPHA_BLEND(a, oldp, newp, s)\
450 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
451
452 #define RGBA_IN(r, g, b, a, s)\
453 {\
454 unsigned int v = ((const uint32_t *)(s))[0];\
455 a = (v >> 24) & 0xff;\
456 r = (v >> 16) & 0xff;\
457 g = (v >> 8) & 0xff;\
458 b = v & 0xff;\
459 }
460
461 #define YUVA_IN(y, u, v, a, s, pal)\
462 {\
463 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
464 a = (val >> 24) & 0xff;\
465 y = (val >> 16) & 0xff;\
466 u = (val >> 8) & 0xff;\
467 v = val & 0xff;\
468 }
469
470 #define YUVA_OUT(d, y, u, v, a)\
471 {\
472 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
473 }
474
475
476 #define BPP 1
477
478 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
479 {
480 int wrap, wrap3, width2, skip2;
481 int y, u, v, a, u1, v1, a1, w, h;
482 uint8_t *lum, *cb, *cr;
483 const uint8_t *p;
484 const uint32_t *pal;
485 int dstx, dsty, dstw, dsth;
486
487 dstw = av_clip(rect->w, 0, imgw);
488 dsth = av_clip(rect->h, 0, imgh);
489 dstx = av_clip(rect->x, 0, imgw - dstw);
490 dsty = av_clip(rect->y, 0, imgh - dsth);
491 lum = dst->data[0] + dsty * dst->linesize[0];
492 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
493 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
494
495 width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
496 skip2 = dstx >> 1;
497 wrap = dst->linesize[0];
498 wrap3 = rect->pict.linesize[0];
499 p = rect->pict.data[0];
500 pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
501
502 if (dsty & 1) {
503 lum += dstx;
504 cb += skip2;
505 cr += skip2;
506
507 if (dstx & 1) {
508 YUVA_IN(y, u, v, a, p, pal);
509 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
510 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
511 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
512 cb++;
513 cr++;
514 lum++;
515 p += BPP;
516 }
517 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
518 YUVA_IN(y, u, v, a, p, pal);
519 u1 = u;
520 v1 = v;
521 a1 = a;
522 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
523
524 YUVA_IN(y, u, v, a, p + BPP, pal);
525 u1 += u;
526 v1 += v;
527 a1 += a;
528 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
529 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
530 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
531 cb++;
532 cr++;
533 p += 2 * BPP;
534 lum += 2;
535 }
536 if (w) {
537 YUVA_IN(y, u, v, a, p, pal);
538 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
539 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
540 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
541 p++;
542 lum++;
543 }
544 p += wrap3 - dstw * BPP;
545 lum += wrap - dstw - dstx;
546 cb += dst->linesize[1] - width2 - skip2;
547 cr += dst->linesize[2] - width2 - skip2;
548 }
549 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
550 lum += dstx;
551 cb += skip2;
552 cr += skip2;
553
554 if (dstx & 1) {
555 YUVA_IN(y, u, v, a, p, pal);
556 u1 = u;
557 v1 = v;
558 a1 = a;
559 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
560 p += wrap3;
561 lum += wrap;
562 YUVA_IN(y, u, v, a, p, pal);
563 u1 += u;
564 v1 += v;
565 a1 += a;
566 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
567 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
568 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
569 cb++;
570 cr++;
571 p += -wrap3 + BPP;
572 lum += -wrap + 1;
573 }
574 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
575 YUVA_IN(y, u, v, a, p, pal);
576 u1 = u;
577 v1 = v;
578 a1 = a;
579 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580
581 YUVA_IN(y, u, v, a, p + BPP, pal);
582 u1 += u;
583 v1 += v;
584 a1 += a;
585 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
586 p += wrap3;
587 lum += wrap;
588
589 YUVA_IN(y, u, v, a, p, pal);
590 u1 += u;
591 v1 += v;
592 a1 += a;
593 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
594
595 YUVA_IN(y, u, v, a, p + BPP, pal);
596 u1 += u;
597 v1 += v;
598 a1 += a;
599 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
600
601 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
602 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
603
604 cb++;
605 cr++;
606 p += -wrap3 + 2 * BPP;
607 lum += -wrap + 2;
608 }
609 if (w) {
610 YUVA_IN(y, u, v, a, p, pal);
611 u1 = u;
612 v1 = v;
613 a1 = a;
614 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
615 p += wrap3;
616 lum += wrap;
617 YUVA_IN(y, u, v, a, p, pal);
618 u1 += u;
619 v1 += v;
620 a1 += a;
621 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
622 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
623 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
624 cb++;
625 cr++;
626 p += -wrap3 + BPP;
627 lum += -wrap + 1;
628 }
629 p += wrap3 + (wrap3 - dstw * BPP);
630 lum += wrap + (wrap - dstw - dstx);
631 cb += dst->linesize[1] - width2 - skip2;
632 cr += dst->linesize[2] - width2 - skip2;
633 }
634 /* handle odd height */
635 if (h) {
636 lum += dstx;
637 cb += skip2;
638 cr += skip2;
639
640 if (dstx & 1) {
641 YUVA_IN(y, u, v, a, p, pal);
642 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
643 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
644 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
645 cb++;
646 cr++;
647 lum++;
648 p += BPP;
649 }
650 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
651 YUVA_IN(y, u, v, a, p, pal);
652 u1 = u;
653 v1 = v;
654 a1 = a;
655 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
656
657 YUVA_IN(y, u, v, a, p + BPP, pal);
658 u1 += u;
659 v1 += v;
660 a1 += a;
661 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
662 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
663 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
664 cb++;
665 cr++;
666 p += 2 * BPP;
667 lum += 2;
668 }
669 if (w) {
670 YUVA_IN(y, u, v, a, p, pal);
671 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
672 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
673 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
674 }
675 }
676 }
677
678 static void free_subpicture(SubPicture *sp)
679 {
680 avsubtitle_free(&sp->sub);
681 }
682
683 static void video_image_display(VideoState *is)
684 {
685 VideoPicture *vp;
686 SubPicture *sp;
687 AVPicture pict;
688 float aspect_ratio;
689 int width, height, x, y;
690 SDL_Rect rect;
691 int i;
692
693 vp = &is->pictq[is->pictq_rindex];
694 if (vp->bmp) {
695 #if CONFIG_AVFILTER
696 if (vp->picref->pixel_aspect.num == 0)
697 aspect_ratio = 0;
698 else
699 aspect_ratio = av_q2d(vp->picref->pixel_aspect);
700 #else
701
702 /* XXX: use variable in the frame */
703 if (is->video_st->sample_aspect_ratio.num)
704 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
705 else if (is->video_st->codec->sample_aspect_ratio.num)
706 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
707 else
708 aspect_ratio = 0;
709 #endif
710 if (aspect_ratio <= 0.0)
711 aspect_ratio = 1.0;
712 aspect_ratio *= (float)vp->width / (float)vp->height;
713 /* if an active format is indicated, then it overrides the
714 mpeg format */
715 #if 0
716 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
717 is->dtg_active_format = is->video_st->codec->dtg_active_format;
718 printf("dtg_active_format=%d\n", is->dtg_active_format);
719 }
720 #endif
721 #if 0
722 switch(is->video_st->codec->dtg_active_format) {
723 case FF_DTG_AFD_SAME:
724 default:
725 /* nothing to do */
726 break;
727 case FF_DTG_AFD_4_3:
728 aspect_ratio = 4.0 / 3.0;
729 break;
730 case FF_DTG_AFD_16_9:
731 aspect_ratio = 16.0 / 9.0;
732 break;
733 case FF_DTG_AFD_14_9:
734 aspect_ratio = 14.0 / 9.0;
735 break;
736 case FF_DTG_AFD_4_3_SP_14_9:
737 aspect_ratio = 14.0 / 9.0;
738 break;
739 case FF_DTG_AFD_16_9_SP_14_9:
740 aspect_ratio = 14.0 / 9.0;
741 break;
742 case FF_DTG_AFD_SP_4_3:
743 aspect_ratio = 4.0 / 3.0;
744 break;
745 }
746 #endif
747
748 if (is->subtitle_st)
749 {
750 if (is->subpq_size > 0)
751 {
752 sp = &is->subpq[is->subpq_rindex];
753
754 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
755 {
756 SDL_LockYUVOverlay (vp->bmp);
757
758 pict.data[0] = vp->bmp->pixels[0];
759 pict.data[1] = vp->bmp->pixels[2];
760 pict.data[2] = vp->bmp->pixels[1];
761
762 pict.linesize[0] = vp->bmp->pitches[0];
763 pict.linesize[1] = vp->bmp->pitches[2];
764 pict.linesize[2] = vp->bmp->pitches[1];
765
766 for (i = 0; i < sp->sub.num_rects; i++)
767 blend_subrect(&pict, sp->sub.rects[i],
768 vp->bmp->w, vp->bmp->h);
769
770 SDL_UnlockYUVOverlay (vp->bmp);
771 }
772 }
773 }
774
775
776 /* XXX: we suppose the screen has a 1.0 pixel ratio */
777 height = is->height;
778 width = ((int)rint(height * aspect_ratio)) & ~1;
779 if (width > is->width) {
780 width = is->width;
781 height = ((int)rint(width / aspect_ratio)) & ~1;
782 }
783 x = (is->width - width) / 2;
784 y = (is->height - height) / 2;
785 if (!is->no_background) {
786 /* fill the background */
787 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
788 } else {
789 is->no_background = 0;
790 }
791 rect.x = is->xleft + x;
792 rect.y = is->ytop + y;
793 rect.w = width;
794 rect.h = height;
795 SDL_DisplayYUVOverlay(vp->bmp, &rect);
796 } else {
797 #if 0
798 fill_rectangle(screen,
799 is->xleft, is->ytop, is->width, is->height,
800 QERGB(0x00, 0x00, 0x00));
801 #endif
802 }
803 }
804
805 static inline int compute_mod(int a, int b)
806 {
807 a = a % b;
808 if (a >= 0)
809 return a;
810 else
811 return a + b;
812 }
813
814 static void video_audio_display(VideoState *s)
815 {
816 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
817 int ch, channels, h, h2, bgcolor, fgcolor;
818 int16_t time_diff;
819 int rdft_bits, nb_freq;
820
821 for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
822 ;
823 nb_freq= 1<<(rdft_bits-1);
824
825 /* compute display index : center on currently output samples */
826 channels = s->audio_st->codec->channels;
827 nb_display_channels = channels;
828 if (!s->paused) {
829 int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
830 n = 2 * channels;
831 delay = audio_write_get_buf_size(s);
832 delay /= n;
833
834 /* to be more precise, we take into account the time spent since
835 the last buffer computation */
836 if (audio_callback_time) {
837 time_diff = av_gettime() - audio_callback_time;
838 delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
839 }
840
841 delay += 2*data_used;
842 if (delay < data_used)
843 delay = data_used;
844
845 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
846 if(s->show_audio==1){
847 h= INT_MIN;
848 for(i=0; i<1000; i+=channels){
849 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
850 int a= s->sample_array[idx];
851 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
852 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
853 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
854 int score= a-d;
855 if(h<score && (b^c)<0){
856 h= score;
857 i_start= idx;
858 }
859 }
860 }
861
862 s->last_i_start = i_start;
863 } else {
864 i_start = s->last_i_start;
865 }
866
867 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
868 if(s->show_audio==1){
869 fill_rectangle(screen,
870 s->xleft, s->ytop, s->width, s->height,
871 bgcolor);
872
873 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
874
875 /* total height for one channel */
876 h = s->height / nb_display_channels;
877 /* graph height / 2 */
878 h2 = (h * 9) / 20;
879 for(ch = 0;ch < nb_display_channels; ch++) {
880 i = i_start + ch;
881 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
882 for(x = 0; x < s->width; x++) {
883 y = (s->sample_array[i] * h2) >> 15;
884 if (y < 0) {
885 y = -y;
886 ys = y1 - y;
887 } else {
888 ys = y1;
889 }
890 fill_rectangle(screen,
891 s->xleft + x, ys, 1, y,
892 fgcolor);
893 i += channels;
894 if (i >= SAMPLE_ARRAY_SIZE)
895 i -= SAMPLE_ARRAY_SIZE;
896 }
897 }
898
899 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
900
901 for(ch = 1;ch < nb_display_channels; ch++) {
902 y = s->ytop + ch * h;
903 fill_rectangle(screen,
904 s->xleft, y, s->width, 1,
905 fgcolor);
906 }
907 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
908 }else{
909 nb_display_channels= FFMIN(nb_display_channels, 2);
910 if(rdft_bits != s->rdft_bits){
911 av_rdft_end(s->rdft);
912 av_free(s->rdft_data);
913 s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
914 s->rdft_bits= rdft_bits;
915 s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
916 }
917 {
918 FFTSample *data[2];
919 for(ch = 0;ch < nb_display_channels; ch++) {
920 data[ch] = s->rdft_data + 2*nb_freq*ch;
921 i = i_start + ch;
922 for(x = 0; x < 2*nb_freq; x++) {
923 double w= (x-nb_freq)*(1.0/nb_freq);
924 data[ch][x]= s->sample_array[i]*(1.0-w*w);
925 i += channels;
926 if (i >= SAMPLE_ARRAY_SIZE)
927 i -= SAMPLE_ARRAY_SIZE;
928 }
929 av_rdft_calc(s->rdft, data[ch]);
930 }
931 //least efficient way to do this, we should of course directly access it but its more than fast enough
932 for(y=0; y<s->height; y++){
933 double w= 1/sqrt(nb_freq);
934 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
935 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
936 + data[1][2*y+1]*data[1][2*y+1])) : a;
937 a= FFMIN(a,255);
938 b= FFMIN(b,255);
939 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
940
941 fill_rectangle(screen,
942 s->xpos, s->height-y, 1, 1,
943 fgcolor);
944 }
945 }
946 SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
947 s->xpos++;
948 if(s->xpos >= s->width)
949 s->xpos= s->xleft;
950 }
951 }
952
953 static int video_open(VideoState *is){
954 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
955 int w,h;
956
957 if(is_full_screen) flags |= SDL_FULLSCREEN;
958 else flags |= SDL_RESIZABLE;
959
960 if (is_full_screen && fs_screen_width) {
961 w = fs_screen_width;
962 h = fs_screen_height;
963 } else if(!is_full_screen && screen_width){
964 w = screen_width;
965 h = screen_height;
966 #if CONFIG_AVFILTER
967 }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
968 w = is->out_video_filter->inputs[0]->w;
969 h = is->out_video_filter->inputs[0]->h;
970 #else
971 }else if (is->video_st && is->video_st->codec->width){
972 w = is->video_st->codec->width;
973 h = is->video_st->codec->height;
974 #endif
975 } else {
976 w = 640;
977 h = 480;
978 }
979 if(screen && is->width == screen->w && screen->w == w
980 && is->height== screen->h && screen->h == h)
981 return 0;
982
983 #ifndef __APPLE__
984 screen = SDL_SetVideoMode(w, h, 0, flags);
985 #else
986 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
987 screen = SDL_SetVideoMode(w, h, 24, flags);
988 #endif
989 if (!screen) {
990 fprintf(stderr, "SDL: could not set video mode - exiting\n");
991 return -1;
992 }
993 if (!window_title)
994 window_title = input_filename;
995 SDL_WM_SetCaption(window_title, window_title);
996
997 is->width = screen->w;
998 is->height = screen->h;
999
1000 return 0;
1001 }
1002
1003 /* display the current picture, if any */
1004 static void video_display(VideoState *is)
1005 {
1006 if(!screen)
1007 video_open(cur_stream);
1008 if (is->audio_st && is->show_audio)
1009 video_audio_display(is);
1010 else if (is->video_st)
1011 video_image_display(is);
1012 }
1013
1014 static int refresh_thread(void *opaque)
1015 {
1016 VideoState *is= opaque;
1017 while(!is->abort_request){
1018 SDL_Event event;
1019 event.type = FF_REFRESH_EVENT;
1020 event.user.data1 = opaque;
1021 if(!is->refresh){
1022 is->refresh=1;
1023 SDL_PushEvent(&event);
1024 }
1025 usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1026 }
1027 return 0;
1028 }
1029
1030 /* get the current audio clock value */
1031 static double get_audio_clock(VideoState *is)
1032 {
1033 double pts;
1034 int hw_buf_size, bytes_per_sec;
1035 pts = is->audio_clock;
1036 hw_buf_size = audio_write_get_buf_size(is);
1037 bytes_per_sec = 0;
1038 if (is->audio_st) {
1039 bytes_per_sec = is->audio_st->codec->sample_rate *
1040 2 * is->audio_st->codec->channels;
1041 }
1042 if (bytes_per_sec)
1043 pts -= (double)hw_buf_size / bytes_per_sec;
1044 return pts;
1045 }
1046
1047 /* get the current video clock value */
1048 static double get_video_clock(VideoState *is)
1049 {
1050 if (is->paused) {
1051 return is->video_current_pts;
1052 } else {
1053 return is->video_current_pts_drift + av_gettime() / 1000000.0;
1054 }
1055 }
1056
1057 /* get the current external clock value */
1058 static double get_external_clock(VideoState *is)
1059 {
1060 int64_t ti;
1061 ti = av_gettime();
1062 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1063 }
1064
1065 /* get the current master clock value */
1066 static double get_master_clock(VideoState *is)
1067 {
1068 double val;
1069
1070 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1071 if (is->video_st)
1072 val = get_video_clock(is);
1073 else
1074 val = get_audio_clock(is);
1075 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1076 if (is->audio_st)
1077 val = get_audio_clock(is);
1078 else
1079 val = get_video_clock(is);
1080 } else {
1081 val = get_external_clock(is);
1082 }
1083 return val;
1084 }
1085
1086 /* seek in the stream */
1087 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1088 {
1089 if (!is->seek_req) {
1090 is->seek_pos = pos;
1091 is->seek_rel = rel;
1092 is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1093 if (seek_by_bytes)
1094 is->seek_flags |= AVSEEK_FLAG_BYTE;
1095 is->seek_req = 1;
1096 }
1097 }
1098
1099 /* pause or resume the video */
1100 static void stream_pause(VideoState *is)
1101 {
1102 if (is->paused) {
1103 is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1104 if(is->read_pause_return != AVERROR(ENOSYS)){
1105 is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1106 }
1107 is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1108 }
1109 is->paused = !is->paused;
1110 }
1111
1112 static double compute_target_time(double frame_current_pts, VideoState *is)
1113 {
1114 double delay, sync_threshold, diff;
1115
1116 /* compute nominal delay */
1117 delay = frame_current_pts - is->frame_last_pts;
1118 if (delay <= 0 || delay >= 10.0) {
1119 /* if incorrect delay, use previous one */
1120 delay = is->frame_last_delay;
1121 } else {
1122 is->frame_last_delay = delay;
1123 }
1124 is->frame_last_pts = frame_current_pts;
1125
1126 /* update delay to follow master synchronisation source */
1127 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1128 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1129 /* if video is slave, we try to correct big delays by
1130 duplicating or deleting a frame */
1131 diff = get_video_clock(is) - get_master_clock(is);
1132
1133 /* skip or repeat frame. We take into account the
1134 delay to compute the threshold. I still don't know
1135 if it is the best guess */
1136 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1137 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1138 if (diff <= -sync_threshold)
1139 delay = 0;
1140 else if (diff >= sync_threshold)
1141 delay = 2 * delay;
1142 }
1143 }
1144 is->frame_timer += delay;
1145 #if defined(DEBUG_SYNC)
1146 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1147 delay, actual_delay, frame_current_pts, -diff);
1148 #endif
1149
1150 return is->frame_timer;
1151 }
1152
1153 /* called to display each frame */
1154 static void video_refresh_timer(void *opaque)
1155 {
1156 VideoState *is = opaque;
1157 VideoPicture *vp;
1158
1159 SubPicture *sp, *sp2;
1160
1161 if (is->video_st) {
1162 retry:
1163 if (is->pictq_size == 0) {
1164 //nothing to do, no picture to display in the que
1165 } else {
1166 double time= av_gettime()/1000000.0;
1167 double next_target;
1168 /* dequeue the picture */
1169 vp = &is->pictq[is->pictq_rindex];
1170
1171 if(time < vp->target_clock)
1172 return;
1173 /* update current video pts */
1174 is->video_current_pts = vp->pts;
1175 is->video_current_pts_drift = is->video_current_pts - time;
1176 is->video_current_pos = vp->pos;
1177 if(is->pictq_size > 1){
1178 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1179 assert(nextvp->target_clock >= vp->target_clock);
1180 next_target= nextvp->target_clock;
1181 }else{
1182 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1183 }
1184 if(framedrop && time > next_target){
1185 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1186 if(is->pictq_size > 1 || time > next_target + 0.5){
1187 /* update queue size and signal for next picture */
1188 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1189 is->pictq_rindex = 0;
1190
1191 SDL_LockMutex(is->pictq_mutex);
1192 is->pictq_size--;
1193 SDL_CondSignal(is->pictq_cond);
1194 SDL_UnlockMutex(is->pictq_mutex);
1195 goto retry;
1196 }
1197 }
1198
1199 if(is->subtitle_st) {
1200 if (is->subtitle_stream_changed) {
1201 SDL_LockMutex(is->subpq_mutex);
1202
1203 while (is->subpq_size) {
1204 free_subpicture(&is->subpq[is->subpq_rindex]);
1205
1206 /* update queue size and signal for next picture */
1207 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1208 is->subpq_rindex = 0;
1209
1210 is->subpq_size--;
1211 }
1212 is->subtitle_stream_changed = 0;
1213
1214 SDL_CondSignal(is->subpq_cond);
1215 SDL_UnlockMutex(is->subpq_mutex);
1216 } else {
1217 if (is->subpq_size > 0) {
1218 sp = &is->subpq[is->subpq_rindex];
1219
1220 if (is->subpq_size > 1)
1221 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1222 else
1223 sp2 = NULL;
1224
1225 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1226 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1227 {
1228 free_subpicture(sp);
1229
1230 /* update queue size and signal for next picture */
1231 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1232 is->subpq_rindex = 0;
1233
1234 SDL_LockMutex(is->subpq_mutex);
1235 is->subpq_size--;
1236 SDL_CondSignal(is->subpq_cond);
1237 SDL_UnlockMutex(is->subpq_mutex);
1238 }
1239 }
1240 }
1241 }
1242
1243 /* display picture */
1244 video_display(is);
1245
1246 /* update queue size and signal for next picture */
1247 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1248 is->pictq_rindex = 0;
1249
1250 SDL_LockMutex(is->pictq_mutex);
1251 is->pictq_size--;
1252 SDL_CondSignal(is->pictq_cond);
1253 SDL_UnlockMutex(is->pictq_mutex);
1254 }
1255 } else if (is->audio_st) {
1256 /* draw the next audio frame */
1257
1258 /* if only audio stream, then display the audio bars (better
1259 than nothing, just to test the implementation */
1260
1261 /* display picture */
1262 video_display(is);
1263 }
1264 if (show_status) {
1265 static int64_t last_time;
1266 int64_t cur_time;
1267 int aqsize, vqsize, sqsize;
1268 double av_diff;
1269
1270 cur_time = av_gettime();
1271 if (!last_time || (cur_time - last_time) >= 30000) {
1272 aqsize = 0;
1273 vqsize = 0;
1274 sqsize = 0;
1275 if (is->audio_st)
1276 aqsize = is->audioq.size;
1277 if (is->video_st)
1278 vqsize = is->videoq.size;
1279 if (is->subtitle_st)
1280 sqsize = is->subtitleq.size;
1281 av_diff = 0;
1282 if (is->audio_st && is->video_st)
1283 av_diff = get_audio_clock(is) - get_video_clock(is);
1284 printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1285 get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1286 fflush(stdout);
1287 last_time = cur_time;
1288 }
1289 }
1290 }
1291
1292 /* allocate a picture (needs to do that in main thread to avoid
1293 potential locking problems */
1294 static void alloc_picture(void *opaque)
1295 {
1296 VideoState *is = opaque;
1297 VideoPicture *vp;
1298
1299 vp = &is->pictq[is->pictq_windex];
1300
1301 if (vp->bmp)
1302 SDL_FreeYUVOverlay(vp->bmp);
1303
1304 #if CONFIG_AVFILTER
1305 if (vp->picref)
1306 avfilter_unref_pic(vp->picref);
1307 vp->picref = NULL;
1308
1309 vp->width = is->out_video_filter->inputs[0]->w;
1310 vp->height = is->out_video_filter->inputs[0]->h;
1311 vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1312 #else
1313 vp->width = is->video_st->codec->width;
1314 vp->height = is->video_st->codec->height;
1315 vp->pix_fmt = is->video_st->codec->pix_fmt;
1316 #endif
1317
1318 vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1319 SDL_YV12_OVERLAY,
1320 screen);
1321
1322 SDL_LockMutex(is->pictq_mutex);
1323 vp->allocated = 1;
1324 SDL_CondSignal(is->pictq_cond);
1325 SDL_UnlockMutex(is->pictq_mutex);
1326 }
1327
1328 /**
1329 *
1330 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1331 */
1332 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1333 {
1334 VideoPicture *vp;
1335 int dst_pix_fmt;
1336 #if CONFIG_AVFILTER
1337 AVPicture pict_src;
1338 #endif
1339 /* wait until we have space to put a new picture */
1340 SDL_LockMutex(is->pictq_mutex);
1341
1342 if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1343 is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1344
1345 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1346 !is->videoq.abort_request) {
1347 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1348 }
1349 SDL_UnlockMutex(is->pictq_mutex);
1350
1351 if (is->videoq.abort_request)
1352 return -1;
1353
1354 vp = &is->pictq[is->pictq_windex];
1355
1356 /* alloc or resize hardware picture buffer */
1357 if (!vp->bmp ||
1358 #if CONFIG_AVFILTER
1359 vp->width != is->out_video_filter->inputs[0]->w ||
1360 vp->height != is->out_video_filter->inputs[0]->h) {
1361 #else
1362 vp->width != is->video_st->codec->width ||
1363 vp->height != is->video_st->codec->height) {
1364 #endif
1365 SDL_Event event;
1366
1367 vp->allocated = 0;
1368
1369 /* the allocation must be done in the main thread to avoid
1370 locking problems */
1371 event.type = FF_ALLOC_EVENT;
1372 event.user.data1 = is;
1373 SDL_PushEvent(&event);
1374
1375 /* wait until the picture is allocated */
1376 SDL_LockMutex(is->pictq_mutex);
1377 while (!vp->allocated && !is->videoq.abort_request) {
1378 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1379 }
1380 SDL_UnlockMutex(is->pictq_mutex);
1381
1382 if (is->videoq.abort_request)
1383 return -1;
1384 }
1385
1386 /* if the frame is not skipped, then display it */
1387 if (vp->bmp) {
1388 AVPicture pict;
1389 #if CONFIG_AVFILTER
1390 if(vp->picref)
1391 avfilter_unref_pic(vp->picref);
1392 vp->picref = src_frame->opaque;
1393 #endif
1394
1395 /* get a pointer on the bitmap */
1396 SDL_LockYUVOverlay (vp->bmp);
1397
1398 dst_pix_fmt = PIX_FMT_YUV420P;
1399 memset(&pict,0,sizeof(AVPicture));
1400 pict.data[0] = vp->bmp->pixels[0];
1401 pict.data[1] = vp->bmp->pixels[2];
1402 pict.data[2] = vp->bmp->pixels[1];
1403
1404 pict.linesize[0] = vp->bmp->pitches[0];
1405 pict.linesize[1] = vp->bmp->pitches[2];
1406 pict.linesize[2] = vp->bmp->pitches[1];
1407
1408 #if CONFIG_AVFILTER
1409 pict_src.data[0] = src_frame->data[0];
1410 pict_src.data[1] = src_frame->data[1];
1411 pict_src.data[2] = src_frame->data[2];
1412
1413 pict_src.linesize[0] = src_frame->linesize[0];
1414 pict_src.linesize[1] = src_frame->linesize[1];
1415 pict_src.linesize[2] = src_frame->linesize[2];
1416
1417 //FIXME use direct rendering
1418 av_picture_copy(&pict, &pict_src,
1419 vp->pix_fmt, vp->width, vp->height);
1420 #else
1421 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1422 is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1423 vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1424 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1425 if (is->img_convert_ctx == NULL) {
1426 fprintf(stderr, "Cannot initialize the conversion context\n");
1427 exit(1);
1428 }
1429 sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1430 0, vp->height, pict.data, pict.linesize);
1431 #endif
1432 /* update the bitmap content */
1433 SDL_UnlockYUVOverlay(vp->bmp);
1434
1435 vp->pts = pts;
1436 vp->pos = pos;
1437
1438 /* now we can update the picture count */
1439 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1440 is->pictq_windex = 0;
1441 SDL_LockMutex(is->pictq_mutex);
1442 vp->target_clock= compute_target_time(vp->pts, is);
1443
1444 is->pictq_size++;
1445 SDL_UnlockMutex(is->pictq_mutex);
1446 }
1447 return 0;
1448 }
1449
1450 /**
1451 * compute the exact PTS for the picture if it is omitted in the stream
1452 * @param pts1 the dts of the pkt / pts of the frame
1453 */
1454 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1455 {
1456 double frame_delay, pts;
1457
1458 pts = pts1;
1459
1460 if (pts != 0) {
1461 /* update video clock with pts, if present */
1462 is->video_clock = pts;
1463 } else {
1464 pts = is->video_clock;
1465 }
1466 /* update video clock for next frame */
1467 frame_delay = av_q2d(is->video_st->codec->time_base);
1468 /* for MPEG2, the frame can be repeated, so we update the
1469 clock accordingly */
1470 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1471 is->video_clock += frame_delay;
1472
1473 #if defined(DEBUG_SYNC) && 0
1474 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1475 av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1476 #endif
1477 return queue_picture(is, src_frame, pts, pos);
1478 }
1479
1480 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1481 {
1482 int len1, got_picture, i;
1483
1484 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1485 return -1;
1486
1487 if(pkt->data == flush_pkt.data){
1488 avcodec_flush_buffers(is->video_st->codec);
1489
1490 SDL_LockMutex(is->pictq_mutex);
1491 //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1492 for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1493 is->pictq[i].target_clock= 0;
1494 }
1495 while (is->pictq_size && !is->videoq.abort_request) {
1496 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1497 }
1498 is->video_current_pos= -1;
1499 SDL_UnlockMutex(is->pictq_mutex);
1500
1501 is->last_dts_for_fault_detection=
1502 is->last_pts_for_fault_detection= INT64_MIN;
1503 is->frame_last_pts= AV_NOPTS_VALUE;
1504 is->frame_last_delay = 0;
1505 is->frame_timer = (double)av_gettime() / 1000000.0;
1506 is->skip_frames= 1;
1507 is->skip_frames_index= 0;
1508 return 0;
1509 }
1510
1511 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1512 this packet, if any */
1513 is->video_st->codec->reordered_opaque= pkt->pts;
1514 len1 = avcodec_decode_video2(is->video_st->codec,
1515 frame, &got_picture,
1516 pkt);
1517
1518 if (got_picture) {
1519 if(pkt->dts != AV_NOPTS_VALUE){
1520 is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1521 is->last_dts_for_fault_detection= pkt->dts;
1522 }
1523 if(frame->reordered_opaque != AV_NOPTS_VALUE){
1524 is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1525 is->last_pts_for_fault_detection= frame->reordered_opaque;
1526 }
1527 }
1528
1529 if( ( decoder_reorder_pts==1
1530 || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1531 || pkt->dts == AV_NOPTS_VALUE)
1532 && frame->reordered_opaque != AV_NOPTS_VALUE)
1533 *pts= frame->reordered_opaque;
1534 else if(pkt->dts != AV_NOPTS_VALUE)
1535 *pts= pkt->dts;
1536 else
1537 *pts= 0;
1538
1539 // if (len1 < 0)
1540 // break;
1541 if (got_picture){
1542 is->skip_frames_index += 1;
1543 if(is->skip_frames_index >= is->skip_frames){
1544 is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1545 return 1;
1546 }
1547
1548 }
1549 return 0;
1550 }
1551
1552 #if CONFIG_AVFILTER
1553 typedef struct {
1554 VideoState *is;
1555 AVFrame *frame;
1556 int use_dr1;
1557 } FilterPriv;
1558
1559 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1560 {
1561 AVFilterContext *ctx = codec->opaque;
1562 AVFilterPicRef *ref;
1563 int perms = AV_PERM_WRITE;
1564 int i, w, h, stride[4];
1565 unsigned edge;
1566
1567 if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1568 if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1569 if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1570 if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1571 }
1572 if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1573
1574 w = codec->width;
1575 h = codec->height;
1576 avcodec_align_dimensions2(codec, &w, &h, stride);
1577 edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1578 w += edge << 1;
1579 h += edge << 1;
1580
1581 if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1582 return -1;
1583
1584 ref->w = codec->width;
1585 ref->h = codec->height;
1586 for(i = 0; i < 4; i ++) {
1587 unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->pic->format].log2_chroma_w : 0;
1588 unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->pic->format].log2_chroma_h : 0;
1589
1590 if (ref->data[i]) {
1591 ref->data[i] += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1592 }
1593 pic->data[i] = ref->data[i];
1594 pic->linesize[i] = ref->linesize[i];
1595 }
1596 pic->opaque = ref;
1597 pic->age = INT_MAX;
1598 pic->type = FF_BUFFER_TYPE_USER;
1599 pic->reordered_opaque = codec->reordered_opaque;
1600 return 0;
1601 }
1602
1603 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1604 {
1605 memset(pic->data, 0, sizeof(pic->data));
1606 avfilter_unref_pic(pic->opaque);
1607 }
1608
1609 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1610 {
1611 AVFilterPicRef *ref = pic->opaque;
1612
1613 if (pic->data[0] == NULL) {
1614 pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1615 return codec->get_buffer(codec, pic);
1616 }
1617
1618 if ((codec->width != ref->w) || (codec->height != ref->h) ||
1619 (codec->pix_fmt != ref->pic->format)) {
1620 av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1621 return -1;
1622 }
1623
1624 pic->reordered_opaque = codec->reordered_opaque;
1625 return 0;
1626 }
1627
1628 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1629 {
1630 FilterPriv *priv = ctx->priv;
1631 AVCodecContext *codec;
1632 if(!opaque) return -1;
1633
1634 priv->is = opaque;
1635 codec = priv->is->video_st->codec;
1636 codec->opaque = ctx;
1637 if(codec->codec->capabilities & CODEC_CAP_DR1) {
1638 priv->use_dr1 = 1;
1639 codec->get_buffer = input_get_buffer;
1640 codec->release_buffer = input_release_buffer;
1641 codec->reget_buffer = input_reget_buffer;
1642 }
1643
1644 priv->frame = avcodec_alloc_frame();
1645
1646 return 0;
1647 }
1648
1649 static void input_uninit(AVFilterContext *ctx)
1650 {
1651 FilterPriv *priv = ctx->priv;
1652 av_free(priv->frame);
1653 }
1654
1655 static int input_request_frame(AVFilterLink *link)
1656 {
1657 FilterPriv *priv = link->src->priv;
1658 AVFilterPicRef *picref;
1659 int64_t pts = 0;
1660 AVPacket pkt;
1661 int ret;
1662
1663 while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1664 av_free_packet(&pkt);
1665 if (ret < 0)
1666 return -1;
1667
1668 if(priv->use_dr1) {
1669 picref = avfilter_ref_pic(priv->frame->opaque, ~0);
1670 } else {
1671 picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1672 av_picture_copy((AVPicture *)&picref->data, (AVPicture *)priv->frame,
1673 picref->pic->format, link->w, link->h);
1674 }
1675 av_free_packet(&pkt);
1676
1677 picref->pts = pts;
1678 picref->pos = pkt.pos;
1679 picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1680 avfilter_start_frame(link, picref);
1681 avfilter_draw_slice(link, 0, link->h, 1);
1682 avfilter_end_frame(link);
1683
1684 return 0;
1685 }
1686
1687 static int input_query_formats(AVFilterContext *ctx)
1688 {
1689 FilterPriv *priv = ctx->priv;
1690 enum PixelFormat pix_fmts[] = {
1691 priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1692 };
1693
1694 avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1695 return 0;
1696 }
1697
1698 static int input_config_props(AVFilterLink *link)
1699 {
1700 FilterPriv *priv = link->src->priv;
1701 AVCodecContext *c = priv->is->video_st->codec;
1702
1703 link->w = c->width;
1704 link->h = c->height;
1705
1706 return 0;
1707 }
1708
1709 static AVFilter input_filter =
1710 {
1711 .name = "ffplay_input",
1712
1713 .priv_size = sizeof(FilterPriv),
1714
1715 .init = input_init,
1716 .uninit = input_uninit,
1717
1718 .query_formats = input_query_formats,
1719
1720 .inputs = (AVFilterPad[]) {{ .name = NULL }},
1721 .outputs = (AVFilterPad[]) {{ .name = "default",
1722 .type = AVMEDIA_TYPE_VIDEO,
1723 .request_frame = input_request_frame,
1724 .config_props = input_config_props, },
1725 { .name = NULL }},
1726 };
1727
1728 static void output_end_frame(AVFilterLink *link)
1729 {
1730 }
1731
1732 static int output_query_formats(AVFilterContext *ctx)
1733 {
1734 enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1735
1736 avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1737 return 0;
1738 }
1739
1740 static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
1741 int64_t *pts, int64_t *pos)
1742 {
1743 AVFilterPicRef *pic;
1744
1745 if(avfilter_request_frame(ctx->inputs[0]))
1746 return -1;
1747 if(!(pic = ctx->inputs[0]->cur_pic))
1748 return -1;
1749 ctx->inputs[0]->cur_pic = NULL;
1750
1751 frame->opaque = pic;
1752 *pts = pic->pts;
1753 *pos = pic->pos;
1754
1755 memcpy(frame->data, pic->data, sizeof(frame->data));
1756 memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
1757
1758 return 1;
1759 }
1760
1761 static AVFilter output_filter =
1762 {
1763 .name = "ffplay_output",
1764
1765 .query_formats = output_query_formats,
1766
1767 .inputs = (AVFilterPad[]) {{ .name = "default",
1768 .type = AVMEDIA_TYPE_VIDEO,
1769 .end_frame = output_end_frame,
1770 .min_perms = AV_PERM_READ, },
1771 { .name = NULL }},
1772 .outputs = (AVFilterPad[]) {{ .name = NULL }},
1773 };
1774 #endif /* CONFIG_AVFILTER */
1775
1776 static int video_thread(void *arg)
1777 {
1778 VideoState *is = arg;
1779 AVFrame *frame= avcodec_alloc_frame();
1780 int64_t pts_int;
1781 double pts;
1782 int ret;
1783
1784 #if CONFIG_AVFILTER
1785 int64_t pos;
1786 char sws_flags_str[128];
1787 AVFilterContext *filt_src = NULL, *filt_out = NULL;
1788 AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
1789 snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1790 graph->scale_sws_opts = av_strdup(sws_flags_str);
1791
1792 if(!(filt_src = avfilter_open(&input_filter, "src"))) goto the_end;
1793 if(!(filt_out = avfilter_open(&output_filter, "out"))) goto the_end;
1794
1795 if(avfilter_init_filter(filt_src, NULL, is)) goto the_end;
1796 if(avfilter_init_filter(filt_out, NULL, frame)) goto the_end;
1797
1798
1799 if(vfilters) {
1800 AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1801 AVFilterInOut *inputs = av_malloc(sizeof(AVFilterInOut));
1802
1803 outputs->name = av_strdup("in");
1804 outputs->filter = filt_src;
1805 outputs->pad_idx = 0;
1806 outputs->next = NULL;
1807
1808 inputs->name = av_strdup("out");
1809 inputs->filter = filt_out;
1810 inputs->pad_idx = 0;
1811 inputs->next = NULL;
1812
1813 if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1814 goto the_end;
1815 av_freep(&vfilters);
1816 } else {
1817 if(avfilter_link(filt_src, 0, filt_out, 0) < 0) goto the_end;
1818 }
1819 avfilter_graph_add_filter(graph, filt_src);
1820 avfilter_graph_add_filter(graph, filt_out);
1821
1822 if(avfilter_graph_check_validity(graph, NULL)) goto the_end;
1823 if(avfilter_graph_config_formats(graph, NULL)) goto the_end;
1824 if(avfilter_graph_config_links(graph, NULL)) goto the_end;
1825
1826 is->out_video_filter = filt_out;
1827 #endif
1828
1829 for(;;) {
1830 #if !CONFIG_AVFILTER
1831 AVPacket pkt;
1832 #endif
1833 while (is->paused && !is->videoq.abort_request)
1834 SDL_Delay(10);
1835 #if CONFIG_AVFILTER
1836 ret = get_filtered_video_frame(filt_out, frame, &pts_int, &pos);
1837 #else
1838 ret = get_video_frame(is, frame, &pts_int, &pkt);
1839 #endif
1840
1841 if (ret < 0) goto the_end;
1842
1843 if (!ret)
1844 continue;
1845
1846 pts = pts_int*av_q2d(is->video_st->time_base);
1847
1848 #if CONFIG_AVFILTER
1849 ret = output_picture2(is, frame, pts, pos);
1850 #else
1851 ret = output_picture2(is, frame, pts, pkt.pos);
1852 av_free_packet(&pkt);
1853 #endif
1854 if (ret < 0)
1855 goto the_end;
1856
1857 if (step)
1858 if (cur_stream)
1859 stream_pause(cur_stream);
1860 }
1861 the_end:
1862 #if CONFIG_AVFILTER
1863 avfilter_graph_destroy(graph);
1864 av_freep(&graph);
1865 #endif
1866 av_free(frame);
1867 return 0;
1868 }
1869
1870 static int subtitle_thread(void *arg)
1871 {
1872 VideoState *is = arg;
1873 SubPicture *sp;
1874 AVPacket pkt1, *pkt = &pkt1;
1875 int len1, got_subtitle;
1876 double pts;
1877 int i, j;
1878 int r, g, b, y, u, v, a;
1879
1880 for(;;) {
1881 while (is->paused && !is->subtitleq.abort_request) {
1882 SDL_Delay(10);
1883 }
1884 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1885 break;
1886
1887 if(pkt->data == flush_pkt.data){
1888 avcodec_flush_buffers(is->subtitle_st->codec);
1889 continue;
1890 }
1891 SDL_LockMutex(is->subpq_mutex);
1892 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1893 !is->subtitleq.abort_request) {
1894 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1895 }
1896 SDL_UnlockMutex(is->subpq_mutex);
1897
1898 if (is->subtitleq.abort_request)
1899 goto the_end;
1900
1901 sp = &is->subpq[is->subpq_windex];
1902
1903 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1904 this packet, if any */
1905 pts = 0;
1906 if (pkt->pts != AV_NOPTS_VALUE)
1907 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1908
1909 len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1910 &sp->sub, &got_subtitle,
1911 pkt);
1912 // if (len1 < 0)
1913 // break;
1914 if (got_subtitle && sp->sub.format == 0) {
1915 sp->pts = pts;
1916
1917 for (i = 0; i < sp->sub.num_rects; i++)
1918 {
1919 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1920 {
1921 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1922 y = RGB_TO_Y_CCIR(r, g, b);
1923 u = RGB_TO_U_CCIR(r, g, b, 0);
1924 v = RGB_TO_V_CCIR(r, g, b, 0);
1925 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1926 }
1927 }
1928
1929 /* now we can update the picture count */
1930 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1931 is->subpq_windex = 0;
1932 SDL_LockMutex(is->subpq_mutex);
1933 is->subpq_size++;
1934 SDL_UnlockMutex(is->subpq_mutex);
1935 }
1936 av_free_packet(pkt);
1937 // if (step)
1938 // if (cur_stream)
1939 // stream_pause(cur_stream);
1940 }
1941 the_end:
1942 return 0;
1943 }
1944
1945 /* copy samples for viewing in editor window */
1946 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1947 {
1948 int size, len, channels;
1949
1950 channels = is->audio_st->codec->channels;
1951
1952 size = samples_size / sizeof(short);
1953 while (size > 0) {
1954 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1955 if (len > size)
1956 len = size;
1957 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1958 samples += len;
1959 is->sample_array_index += len;
1960 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1961 is->sample_array_index = 0;
1962 size -= len;
1963 }
1964 }
1965
1966 /* return the new audio buffer size (samples can be added or deleted
1967 to get better sync if video or external master clock) */
1968 static int synchronize_audio(VideoState *is, short *samples,
1969 int samples_size1, double pts)
1970 {
1971 int n, samples_size;
1972 double ref_clock;
1973
1974 n = 2 * is->audio_st->codec->channels;
1975 samples_size = samples_size1;
1976
1977 /* if not master, then we try to remove or add samples to correct the clock */
1978 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1979 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1980 double diff, avg_diff;
1981 int wanted_size, min_size, max_size, nb_samples;
1982
1983 ref_clock = get_master_clock(is);
1984 diff = get_audio_clock(is) - ref_clock;
1985
1986 if (diff < AV_NOSYNC_THRESHOLD) {
1987 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1988 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1989 /* not enough measures to have a correct estimate */
1990 is->audio_diff_avg_count++;
1991 } else {
1992 /* estimate the A-V difference */
1993 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1994
1995 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1996 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1997 nb_samples = samples_size / n;
1998
1999 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2000 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2001 if (wanted_size < min_size)
2002 wanted_size = min_size;
2003 else if (wanted_size > max_size)
2004 wanted_size = max_size;
2005
2006 /* add or remove samples to correction the synchro */
2007 if (wanted_size < samples_size) {
2008 /* remove samples */
2009 samples_size = wanted_size;
2010 } else if (wanted_size > samples_size) {
2011 uint8_t *samples_end, *q;
2012 int nb;
2013
2014 /* add samples */
2015 nb = (samples_size - wanted_size);
2016 samples_end = (uint8_t *)samples + samples_size - n;
2017 q = samples_end + n;
2018 while (nb > 0) {
2019 memcpy(q, samples_end, n);
2020 q += n;
2021 nb -= n;
2022 }
2023 samples_size = wanted_size;
2024 }
2025 }
2026 #if 0
2027 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2028 diff, avg_diff, samples_size - samples_size1,
2029 is->audio_clock, is->video_clock, is->audio_diff_threshold);
2030 #endif
2031 }
2032 } else {
2033 /* too big difference : may be initial PTS errors, so
2034 reset A-V filter */
2035 is->audio_diff_avg_count = 0;
2036 is->audio_diff_cum = 0;
2037 }
2038 }
2039
2040 return samples_size;
2041 }
2042
2043 /* decode one audio frame and returns its uncompressed size */
2044 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2045 {
2046 AVPacket *pkt_temp = &is->audio_pkt_temp;
2047 AVPacket *pkt = &is->audio_pkt;
2048 AVCodecContext *dec= is->audio_st->codec;
2049 int n, len1, data_size;
2050 double pts;
2051
2052 for(;;) {
2053 /* NOTE: the audio packet can contain several frames */
2054 while (pkt_temp->size > 0) {
2055 data_size = sizeof(is->audio_buf1);
2056 len1 = avcodec_decode_audio3(dec,
2057 (int16_t *)is->audio_buf1, &data_size,
2058 pkt_temp);
2059 if (len1 < 0) {
2060 /* if error, we skip the frame */
2061 pkt_temp->size = 0;
2062 break;
2063 }
2064
2065 pkt_temp->data += len1;
2066 pkt_temp->size -= len1;
2067 if (data_size <= 0)
2068 continue;
2069
2070 if (dec->sample_fmt != is->audio_src_fmt) {
2071 if (is->reformat_ctx)
2072 av_audio_convert_free(is->reformat_ctx);
2073 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
2074 dec->sample_fmt, 1, NULL, 0);
2075 if (!is->reformat_ctx) {
2076 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2077 avcodec_get_sample_fmt_name(dec->sample_fmt),
2078 avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
2079 break;
2080 }
2081 is->audio_src_fmt= dec->sample_fmt;
2082 }
2083
2084 if (is->reformat_ctx) {
2085 const void *ibuf[6]= {is->audio_buf1};
2086 void *obuf[6]= {is->audio_buf2};
2087 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
2088 int ostride[6]= {2};
2089 int len= data_size/istride[0];
2090 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2091 printf("av_audio_convert() failed\n");
2092 break;
2093 }
2094 is->audio_buf= is->audio_buf2;
2095 /* FIXME: existing code assume that data_size equals framesize*channels*2
2096 remove this legacy cruft */
2097 data_size= len*2;
2098 }else{
2099 is->audio_buf= is->audio_buf1;
2100 }
2101
2102 /* if no pts, then compute it */
2103 pts = is->audio_clock;
2104 *pts_ptr = pts;
2105 n = 2 * dec->channels;
2106 is->audio_clock += (double)data_size /
2107 (double)(n * dec->sample_rate);
2108 #if defined(DEBUG_SYNC)
2109 {
2110 static double last_clock;
2111 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2112 is->audio_clock - last_clock,
2113 is->audio_clock, pts);
2114 last_clock = is->audio_clock;
2115 }
2116 #endif
2117 return data_size;
2118 }
2119
2120 /* free the current packet */
2121 if (pkt->data)
2122 av_free_packet(pkt);
2123
2124 if (is->paused || is->audioq.abort_request) {
2125 return -1;
2126 }
2127
2128 /* read next packet */
2129 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2130 return -1;
2131 if(pkt->data == flush_pkt.data){
2132 avcodec_flush_buffers(dec);
2133 continue;
2134 }
2135
2136 pkt_temp->data = pkt->data;
2137 pkt_temp->size = pkt->size;
2138
2139 /* if update the audio clock with the pts */
2140 if (pkt->pts != AV_NOPTS_VALUE) {
2141 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2142 }
2143 }
2144 }
2145
2146 /* get the current audio output buffer size, in samples. With SDL, we
2147 cannot have a precise information */
2148 static int audio_write_get_buf_size(VideoState *is)
2149 {
2150 return is->audio_buf_size - is->audio_buf_index;
2151 }
2152
2153
2154 /* prepare a new audio buffer */
2155 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2156 {
2157 VideoState *is = opaque;
2158 int audio_size, len1;
2159 double pts;
2160
2161 audio_callback_time = av_gettime();
2162
2163 while (len > 0) {
2164 if (is->audio_buf_index >= is->audio_buf_size) {
2165 audio_size = audio_decode_frame(is, &pts);
2166 if (audio_size < 0) {
2167 /* if error, just output silence */
2168 is->audio_buf = is->audio_buf1;
2169 is->audio_buf_size = 1024;
2170 memset(is->audio_buf, 0, is->audio_buf_size);
2171 } else {
2172 if (is->show_audio)
2173 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2174 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2175 pts);
2176 is->audio_buf_size = audio_size;
2177 }
2178 is->audio_buf_index = 0;
2179 }
2180 len1 = is->audio_buf_size - is->audio_buf_index;
2181 if (len1 > len)
2182 len1 = len;
2183 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2184 len -= len1;
2185 stream += len1;
2186 is->audio_buf_index += len1;
2187 }
2188 }
2189
2190 /* open a given stream. Return 0 if OK */
2191 static int stream_component_open(VideoState *is, int stream_index)
2192 {
2193 AVFormatContext *ic = is->ic;
2194 AVCodecContext *avctx;
2195 AVCodec *codec;
2196 SDL_AudioSpec wanted_spec, spec;
2197
2198 if (stream_index < 0 || stream_index >= ic->nb_streams)
2199 return -1;
2200 avctx = ic->streams[stream_index]->codec;
2201
2202 /* prepare audio output */
2203 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2204 if (avctx->channels > 0) {
2205 avctx->request_channels = FFMIN(2, avctx->channels);
2206 } else {
2207 avctx->request_channels = 2;
2208 }
2209 }
2210
2211 codec = avcodec_find_decoder(avctx->codec_id);
2212 avctx->debug_mv = debug_mv;
2213 avctx->debug = debug;
2214 avctx->workaround_bugs = workaround_bugs;
2215 avctx->lowres = lowres;
2216 if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2217 avctx->idct_algo= idct;
2218 if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2219 avctx->skip_frame= skip_frame;
2220 avctx->skip_idct= skip_idct;
2221 avctx->skip_loop_filter= skip_loop_filter;
2222 avctx->error_recognition= error_recognition;
2223 avctx->error_concealment= error_concealment;
2224 avcodec_thread_init(avctx, thread_count);
2225
2226 set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
2227
2228 if (!codec ||
2229 avcodec_open(avctx, codec) < 0)
2230 return -1;
2231
2232 /* prepare audio output */
2233 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2234 wanted_spec.freq = avctx->sample_rate;
2235 wanted_spec.format = AUDIO_S16SYS;
2236 wanted_spec.channels = avctx->channels;
2237 wanted_spec.silence = 0;
2238 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2239 wanted_spec.callback = sdl_audio_callback;
2240 wanted_spec.userdata = is;
2241 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2242 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2243 return -1;
2244 }
2245 is->audio_hw_buf_size = spec.size;
2246 is->audio_src_fmt= SAMPLE_FMT_S16;
2247 }
2248
2249 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2250 switch(avctx->codec_type) {
2251 case AVMEDIA_TYPE_AUDIO:
2252 is->audio_stream = stream_index;
2253 is->audio_st = ic->streams[stream_index];
2254 is->audio_buf_size = 0;
2255 is->audio_buf_index = 0;
2256
2257 /* init averaging filter */
2258 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2259 is->audio_diff_avg_count = 0;
2260 /* since we do not have a precise anough audio fifo fullness,
2261 we correct audio sync only if larger than this threshold */
2262 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2263
2264 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2265 packet_queue_init(&is->audioq);
2266 SDL_PauseAudio(0);
2267 break;
2268 case AVMEDIA_TYPE_VIDEO:
2269 is->video_stream = stream_index;
2270 is->video_st = ic->streams[stream_index];
2271
2272 // is->video_current_pts_time = av_gettime();
2273
2274 packet_queue_init(&is->videoq);
2275 is->video_tid = SDL_CreateThread(video_thread, is);
2276 break;
2277 case AVMEDIA_TYPE_SUBTITLE:
2278 is->subtitle_stream = stream_index;
2279 is->subtitle_st = ic->streams[stream_index];
2280 packet_queue_init(&is->subtitleq);
2281
2282 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2283 break;
2284 default:
2285 break;
2286 }
2287 return 0;
2288 }
2289
2290 static void stream_component_close(VideoState *is, int stream_index)
2291 {
2292 AVFormatContext *ic = is->ic;
2293 AVCodecContext *avctx;
2294
2295 if (stream_index < 0 || stream_index >= ic->nb_streams)
2296 return;
2297 avctx = ic->streams[stream_index]->codec;
2298
2299 switch(avctx->codec_type) {
2300 case AVMEDIA_TYPE_AUDIO:
2301 packet_queue_abort(&is->audioq);
2302
2303 SDL_CloseAudio();
2304
2305 packet_queue_end(&is->audioq);
2306 if (is->reformat_ctx)
2307 av_audio_convert_free(is->reformat_ctx);
2308 is->reformat_ctx = NULL;
2309 break;
2310 case AVMEDIA_TYPE_VIDEO:
2311 packet_queue_abort(&is->videoq);
2312
2313 /* note: we also signal this mutex to make sure we deblock the
2314 video thread in all cases */
2315 SDL_LockMutex(is->pictq_mutex);
2316 SDL_CondSignal(is->pictq_cond);
2317 SDL_UnlockMutex(is->pictq_mutex);
2318
2319 SDL_WaitThread(is->video_tid, NULL);
2320
2321 packet_queue_end(&is->videoq);
2322 break;
2323 case AVMEDIA_TYPE_SUBTITLE:
2324 packet_queue_abort(&is->subtitleq);
2325
2326 /* note: we also signal this mutex to make sure we deblock the
2327 video thread in all cases */
2328 SDL_LockMutex(is->subpq_mutex);
2329 is->subtitle_stream_changed = 1;
2330
2331 SDL_CondSignal(is->subpq_cond);
2332 SDL_UnlockMutex(is->subpq_mutex);
2333
2334 SDL_WaitThread(is->subtitle_tid, NULL);
2335
2336 packet_queue_end(&is->subtitleq);
2337 break;
2338 default:
2339 break;
2340 }
2341
2342 ic->streams[stream_index]->discard = AVDISCARD_ALL;
2343 avcodec_close(avctx);
2344 switch(avctx->codec_type) {
2345 case AVMEDIA_TYPE_AUDIO:
2346 is->audio_st = NULL;
2347 is->audio_stream = -1;
2348 break;
2349 case AVMEDIA_TYPE_VIDEO:
2350 is->video_st = NULL;
2351 is->video_stream = -1;
2352 break;
2353 case AVMEDIA_TYPE_SUBTITLE:
2354 is->subtitle_st = NULL;
2355 is->subtitle_stream = -1;
2356 break;
2357 default:
2358 break;
2359 }
2360 }
2361
2362 /* since we have only one decoding thread, we can use a global
2363 variable instead of a thread local variable */
2364 static VideoState *global_video_state;
2365
2366 static int decode_interrupt_cb(void)
2367 {
2368 return (global_video_state && global_video_state->abort_request);
2369 }
2370
2371 /* this thread gets the stream from the disk or the network */
2372 static int decode_thread(void *arg)
2373 {
2374 VideoState *is = arg;
2375 AVFormatContext *ic;
2376 int err, i, ret;
2377 int st_index[AVMEDIA_TYPE_NB];
2378 int st_count[AVMEDIA_TYPE_NB]={0};
2379 int st_best_packet_count[AVMEDIA_TYPE_NB];
2380 AVPacket pkt1, *pkt = &pkt1;
2381 AVFormatParameters params, *ap = &params;
2382 int eof=0;
2383 int pkt_in_play_range = 0;
2384
2385 ic = avformat_alloc_context();
2386
2387 memset(st_index, -1, sizeof(st_index));
2388 memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2389 is->video_stream = -1;
2390 is->audio_stream = -1;
2391 is->subtitle_stream = -1;
2392
2393 global_video_state = is;
2394 url_set_interrupt_cb(decode_interrupt_cb);
2395
2396 memset(ap, 0, sizeof(*ap));
2397
2398 ap->prealloced_context = 1;
2399 ap->width = frame_width;
2400 ap->height= frame_height;
2401 ap->time_base= (AVRational){1, 25};
2402 ap->pix_fmt = frame_pix_fmt;
2403
2404 set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2405
2406 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2407 if (err < 0) {
2408 print_error(is->filename, err);
2409 ret = -1;
2410 goto fail;
2411 }
2412 is->ic = ic;
2413
2414 if(genpts)
2415 ic->flags |= AVFMT_FLAG_GENPTS;
2416
2417 err = av_find_stream_info(ic);
2418 if (err < 0) {
2419 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2420 ret = -1;
2421 goto fail;
2422 }
2423 if(ic->pb)
2424 ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2425
2426 if(seek_by_bytes<0)
2427 seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2428
2429 /* if seeking requested, we execute it */
2430 if (start_time != AV_NOPTS_VALUE) {
2431 int64_t timestamp;
2432
2433 timestamp = start_time;
2434 /* add the stream start time */
2435 if (ic->start_time != AV_NOPTS_VALUE)
2436 timestamp += ic->start_time;
2437 ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2438 if (ret < 0) {
2439 fprintf(stderr, "%s: could not seek to position %0.3f\n",
2440 is->filename, (double)timestamp / AV_TIME_BASE);
2441 }
2442 }
2443
2444 for(i = 0; i < ic->nb_streams; i++) {
2445 AVStream *st= ic->streams[i];
2446 AVCodecContext *avctx = st->codec;
2447 ic->streams[i]->discard = AVDISCARD_ALL;
2448 if(avctx->codec_type >= (unsigned)AVMEDIA_TYPE_NB)
2449 continue;
2450 if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2451 continue;
2452
2453 if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2454 continue;
2455 st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2456
2457 switch(avctx->codec_type) {
2458 case AVMEDIA_TYPE_AUDIO:
2459 if (!audio_disable)
2460 st_index[AVMEDIA_TYPE_AUDIO] = i;
2461 break;
2462 case AVMEDIA_TYPE_VIDEO:
2463 case AVMEDIA_TYPE_SUBTITLE:
2464 if (!video_disable)
2465 st_index[avctx->codec_type] = i;
2466 break;
2467 default:
2468 break;
2469 }
2470 }
2471 if (show_status) {
2472 dump_format(ic, 0, is->filename, 0);
2473 }
2474
2475 /* open the streams */
2476 if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2477 stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2478 }
2479
2480 ret=-1;
2481 if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2482 ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2483 }
2484 is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2485 if(ret<0) {
2486 if (!display_disable)
2487 is->show_audio = 2;
2488 }
2489
2490 if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2491 stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2492 }
2493
2494 if (is->video_stream < 0 && is->audio_stream < 0) {
2495 fprintf(stderr, "%s: could not open codecs\n", is->filename);
2496 ret = -1;
2497 goto fail;
2498 }
2499
2500 for(;;) {
2501 if (is->abort_request)
2502 break;
2503 if (is->paused != is->last_paused) {
2504 is->last_paused = is->paused;
2505 if (is->paused)
2506 is->read_pause_return= av_read_pause(ic);
2507 else
2508 av_read_play(ic);
2509 }
2510 #if CONFIG_RTSP_DEMUXER
2511 if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2512 /* wait 10 ms to avoid trying to get another packet */
2513 /* XXX: horrible */
2514 SDL_Delay(10);
2515 continue;
2516 }
2517 #endif
2518 if (is->seek_req) {
2519 int64_t seek_target= is->seek_pos;
2520 int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2521 int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2522 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2523 // of the seek_pos/seek_rel variables
2524
2525 ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2526 if (ret < 0) {
2527 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2528 }else{
2529 if (is->audio_stream >= 0) {
2530 packet_queue_flush(&is->audioq);
2531 packet_queue_put(&is->audioq, &flush_pkt);
2532 }
2533 if (is->subtitle_stream >= 0) {
2534 packet_queue_flush(&is->subtitleq);
2535 packet_queue_put(&is->subtitleq, &flush_pkt);
2536 }
2537 if (is->video_stream >= 0) {
2538 packet_queue_flush(&is->videoq);
2539 packet_queue_put(&is->videoq, &flush_pkt);
2540 }
2541 }
2542 is->seek_req = 0;
2543 eof= 0;
2544 }
2545
2546 /* if the queue are full, no need to read more */
2547 if ( is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2548 || ( (is->audioq .size > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2549 && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream<0)
2550 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2551 /* wait 10 ms */
2552 SDL_Delay(10);
2553 continue;
2554 }
2555 if(url_feof(ic->pb) || eof) {
2556 if(is->video_stream >= 0){
2557 av_init_packet(pkt);
2558 pkt->data=NULL;
2559 pkt->size=0;
2560 pkt->stream_index= is->video_stream;
2561 packet_queue_put(&is->videoq, pkt);
2562 }
2563 SDL_Delay(10);
2564 if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2565 if(loop!=1 && (!loop || --loop)){
2566 stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2567 }else if(autoexit){
2568 ret=AVERROR_EOF;
2569 goto fail;
2570 }
2571 }
2572 continue;
2573 }
2574 ret = av_read_frame(ic, pkt);
2575 if (ret < 0) {
2576 if (ret == AVERROR_EOF)
2577 eof=1;
2578 if (url_ferror(ic->pb))
2579 break;
2580 SDL_Delay(100); /* wait for user event */
2581 continue;
2582 }
2583 /* check if packet is in play range specified by user, then queue, otherwise discard */
2584 pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2585 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2586 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2587 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2588 <= ((double)duration/1000000);
2589 if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2590 packet_queue_put(&is->audioq, pkt);
2591 } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2592 packet_queue_put(&is->videoq, pkt);
2593 } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2594 packet_queue_put(&is->subtitleq, pkt);
2595 } else {
2596 av_free_packet(pkt);
2597 }
2598 }
2599 /* wait until the end */
2600 while (!is->abort_request) {
2601 SDL_Delay(100);
2602 }
2603
2604 ret = 0;
2605 fail:
2606 /* disable interrupting */
2607 global_video_state = NULL;
2608
2609 /* close each stream */
2610 if (is->audio_stream >= 0)
2611 stream_component_close(is, is->audio_stream);
2612 if (is->video_stream >= 0)
2613 stream_component_close(is, is->video_stream);
2614 if (is->subtitle_stream >= 0)
2615 stream_component_close(is, is->subtitle_stream);
2616 if (is->ic) {
2617 av_close_input_file(is->ic);
2618 is->ic = NULL; /* safety */
2619 }
2620 url_set_interrupt_cb(NULL);
2621
2622 if (ret != 0) {
2623 SDL_Event event;
2624
2625 event.type = FF_QUIT_EVENT;
2626 event.user.data1 = is;
2627 SDL_PushEvent(&event);
2628 }
2629 return 0;
2630 }
2631
2632 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2633 {
2634 VideoState *is;
2635
2636 is = av_mallocz(sizeof(VideoState));
2637 if (!is)
2638 return NULL;
2639 av_strlcpy(is->filename, filename, sizeof(is->filename));
2640 is->iformat = iformat;
2641 is->ytop = 0;
2642 is->xleft = 0;
2643
2644 /* start video display */
2645 is->pictq_mutex = SDL_CreateMutex();
2646 is->pictq_cond = SDL_CreateCond();
2647
2648 is->subpq_mutex = SDL_CreateMutex();
2649 is->subpq_cond = SDL_CreateCond();
2650
2651 is->av_sync_type = av_sync_type;
2652 is->parse_tid = SDL_CreateThread(decode_thread, is);
2653 if (!is->parse_tid) {
2654 av_free(is);
2655 return NULL;
2656 }
2657 return is;
2658 }
2659
2660 static void stream_close(VideoState *is)
2661 {
2662 VideoPicture *vp;
2663 int i;
2664 /* XXX: use a special url_shutdown call to abort parse cleanly */
2665 is->abort_request = 1;
2666 SDL_WaitThread(is->parse_tid, NULL);
2667 SDL_WaitThread(is->refresh_tid, NULL);
2668
2669 /* free all pictures */
2670 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2671 vp = &is->pictq[i];
2672 #if CONFIG_AVFILTER
2673 if (vp->picref) {
2674 avfilter_unref_pic(vp->picref);
2675 vp->picref = NULL;
2676 }
2677 #endif
2678 if (vp->bmp) {
2679 SDL_FreeYUVOverlay(vp->bmp);
2680 vp->bmp = NULL;
2681 }
2682 }
2683 SDL_DestroyMutex(is->pictq_mutex);
2684 SDL_DestroyCond(is->pictq_cond);
2685 SDL_DestroyMutex(is->subpq_mutex);
2686 SDL_DestroyCond(is->subpq_cond);
2687 #if !CONFIG_AVFILTER
2688 if (is->img_convert_ctx)
2689 sws_freeContext(is->img_convert_ctx);
2690 #endif
2691 av_free(is);
2692 }
2693
2694 static void stream_cycle_channel(VideoState *is, int codec_type)
2695 {
2696 AVFormatContext *ic = is->ic;
2697 int start_index, stream_index;
2698 AVStream *st;
2699
2700 if (codec_type == AVMEDIA_TYPE_VIDEO)
2701 start_index = is->video_stream;
2702 else if (codec_type == AVMEDIA_TYPE_AUDIO)
2703 start_index = is->audio_stream;
2704 else
2705 start_index = is->subtitle_stream;
2706 if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2707 return;
2708 stream_index = start_index;
2709 for(;;) {
2710 if (++stream_index >= is->ic->nb_streams)
2711 {
2712 if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2713 {
2714 stream_index = -1;
2715 goto the_end;
2716 } else
2717 stream_index = 0;
2718 }
2719 if (stream_index == start_index)
2720 return;
2721 st = ic->streams[stream_index];
2722 if (st->codec->codec_type == codec_type) {
2723 /* check that parameters are OK */
2724 switch(codec_type) {
2725 case AVMEDIA_TYPE_AUDIO:
2726 if (st->codec->sample_rate != 0 &&
2727 st->codec->channels != 0)
2728 goto the_end;
2729 break;
2730 case AVMEDIA_TYPE_VIDEO:
2731 case AVMEDIA_TYPE_SUBTITLE:
2732 goto the_end;
2733 default:
2734 break;
2735 }
2736 }
2737 }
2738 the_end:
2739 stream_component_close(is, start_index);
2740 stream_component_open(is, stream_index);
2741 }
2742
2743
2744 static void toggle_full_screen(void)
2745 {
2746 is_full_screen = !is_full_screen;
2747 if (!fs_screen_width) {
2748 /* use default SDL method */
2749 // SDL_WM_ToggleFullScreen(screen);
2750 }
2751 video_open(cur_stream);
2752 }
2753
2754 static void toggle_pause(void)
2755 {
2756 if (cur_stream)
2757 stream_pause(cur_stream);
2758 step = 0;
2759 }
2760
2761 static void step_to_next_frame(void)
2762 {
2763 if (cur_stream) {
2764 /* if the stream is paused unpause it, then step */
2765 if (cur_stream->paused)
2766 stream_pause(cur_stream);
2767 }
2768 step = 1;
2769 }
2770
2771 static void do_exit(void)
2772 {
2773 int i;
2774 if (cur_stream) {
2775 stream_close(cur_stream);
2776 cur_stream = NULL;
2777 }
2778 for (i = 0; i < AVMEDIA_TYPE_NB; i++)
2779 av_free(avcodec_opts[i]);
2780 av_free(avformat_opts);
2781 av_free(sws_opts);
2782 #if CONFIG_AVFILTER
2783 avfilter_uninit();
2784 #endif
2785 if (show_status)
2786 printf("\n");
2787 SDL_Quit();
2788 exit(0);
2789 }
2790
2791 static void toggle_audio_display(void)
2792 {
2793 if (cur_stream) {
2794 int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2795 cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2796 fill_rectangle(screen,
2797 cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2798 bgcolor);
2799 SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2800 }
2801 }
2802
2803 /* handle an event sent by the GUI */
2804 static void event_loop(void)
2805 {
2806 SDL_Event event;
2807 double incr, pos, frac;
2808
2809 for(;;) {
2810 double x;
2811 SDL_WaitEvent(&event);
2812 switch(event.type) {
2813 case SDL_KEYDOWN:
2814 if (exit_on_keydown) {
2815 do_exit();
2816 break;
2817 }
2818 switch(event.key.keysym.sym) {
2819 case SDLK_ESCAPE:
2820 case SDLK_q:
2821 do_exit();
2822 break;
2823 case SDLK_f:
2824 toggle_full_screen();
2825 break;
2826 case SDLK_p:
2827 case SDLK_SPACE:
2828 toggle_pause();
2829 break;
2830 case SDLK_s: //S: Step to next frame
2831 step_to_next_frame();
2832 break;
2833 case SDLK_a:
2834 if (cur_stream)
2835 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2836 break;
2837 case SDLK_v:
2838 if (cur_stream)
2839 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2840 break;
2841 case SDLK_t:
2842 if (cur_stream)
2843 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2844 break;
2845 case SDLK_w:
2846 toggle_audio_display();
2847 break;
2848 case SDLK_LEFT:
2849 incr = -10.0;
2850 goto do_seek;
2851 case SDLK_RIGHT:
2852 incr = 10.0;
2853 goto do_seek;
2854 case SDLK_UP:
2855 incr = 60.0;
2856 goto do_seek;
2857 case SDLK_DOWN:
2858 incr = -60.0;
2859 do_seek:
2860 if (cur_stream) {
2861 if (seek_by_bytes) {
2862 if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2863 pos= cur_stream->video_current_pos;
2864 }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2865 pos= cur_stream->audio_pkt.pos;
2866 }else
2867 pos = url_ftell(cur_stream->ic->pb);
2868 if (cur_stream->ic->bit_rate)
2869 incr *= cur_stream->ic->bit_rate / 8.0;
2870 else
2871 incr *= 180000.0;
2872 pos += incr;
2873 stream_seek(cur_stream, pos, incr, 1);
2874 } else {
2875 pos = get_master_clock(cur_stream);
2876 pos += incr;
2877 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2878 }
2879 }
2880 break;
2881 default:
2882 break;
2883 }
2884 break;
2885 case SDL_MOUSEBUTTONDOWN:
2886 if (exit_on_mousedown) {
2887 do_exit();
2888 break;
2889 }
2890 case SDL_MOUSEMOTION:
2891 if(event.type ==SDL_MOUSEBUTTONDOWN){
2892 x= event.button.x;
2893 }else{
2894 if(event.motion.state != SDL_PRESSED)
2895 break;
2896 x= event.motion.x;
2897 }
2898 if (cur_stream) {
2899 if(seek_by_bytes || cur_stream->ic->duration<=0){
2900 uint64_t size= url_fsize(cur_stream->ic->pb);
2901 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2902 }else{
2903 int64_t ts;
2904 int ns, hh, mm, ss;
2905 int tns, thh, tmm, tss;
2906 tns = cur_stream->ic->duration/1000000LL;
2907 thh = tns/3600;
2908 tmm = (tns%3600)/60;
2909 tss = (tns%60);
2910 frac = x/cur_stream->width;
2911 ns = frac*tns;
2912 hh = ns/3600;
2913 mm = (ns%3600)/60;
2914 ss = (ns%60);
2915 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2916 hh, mm, ss, thh, tmm, tss);
2917 ts = frac*cur_stream->ic->duration;
2918 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2919 ts += cur_stream->ic->start_time;
2920 stream_seek(cur_stream, ts, 0, 0);
2921 }
2922 }
2923 break;
2924 case SDL_VIDEORESIZE:
2925 if (cur_stream) {
2926 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2927 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2928 screen_width = cur_stream->width = event.resize.w;
2929 screen_height= cur_stream->height= event.resize.h;
2930 }
2931 break;
2932 case SDL_QUIT:
2933 case FF_QUIT_EVENT:
2934 do_exit();
2935 break;
2936 case FF_ALLOC_EVENT:
2937 video_open(event.user.data1);
2938 alloc_picture(event.user.data1);
2939 break;
2940 case FF_REFRESH_EVENT:
2941 video_refresh_timer(event.user.data1);
2942 cur_stream->refresh=0;
2943 break;
2944 default:
2945 break;
2946 }
2947 }
2948 }
2949
2950 static void opt_frame_size(const char *arg)
2951 {
2952 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2953 fprintf(stderr, "Incorrect frame size\n");
2954 exit(1);
2955 }
2956 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2957 fprintf(stderr, "Frame size must be a multiple of 2\n");
2958 exit(1);
2959 }
2960 }
2961
2962 static int opt_width(const char *opt, const char *arg)
2963 {
2964 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2965 return 0;
2966 }
2967
2968 static int opt_height(const char *opt, const char *arg)
2969 {
2970 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2971 return 0;
2972 }
2973
2974 static void opt_format(const char *arg)
2975 {
2976 file_iformat = av_find_input_format(arg);
2977 if (!file_iformat) {
2978 fprintf(stderr, "Unknown input format: %s\n", arg);
2979 exit(1);
2980 }
2981 }
2982
2983 static void opt_frame_pix_fmt(const char *arg)
2984 {
2985 frame_pix_fmt = av_get_pix_fmt(arg);
2986 }
2987
2988 static int opt_sync(const char *opt, const char *arg)
2989 {
2990 if (!strcmp(arg, "audio"))
2991 av_sync_type = AV_SYNC_AUDIO_MASTER;
2992 else if (!strcmp(arg, "video"))
2993 av_sync_type = AV_SYNC_VIDEO_MASTER;
2994 else if (!strcmp(arg, "ext"))
2995 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2996 else {
2997 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2998 exit(1);
2999 }
3000 return 0;
3001 }
3002
3003 static int opt_seek(const char *opt, const char *arg)
3004 {
3005 start_time = parse_time_or_die(opt, arg, 1);
3006 return 0;
3007 }
3008
3009 static int opt_duration(const char *opt, const char *arg)
3010 {
3011 duration = parse_time_or_die(opt, arg, 1);
3012 return 0;
3013 }
3014
3015 static int opt_debug(const char *opt, const char *arg)
3016 {
3017 av_log_set_level(99);
3018 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3019 return 0;
3020 }
3021
3022 static int opt_vismv(const char *opt, const char *arg)
3023 {
3024 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
3025 return 0;
3026 }
3027
3028 static int opt_thread_count(const char *opt, const char *arg)
3029 {
3030 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3031 #if !HAVE_THREADS
3032 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
3033 #endif
3034 return 0;
3035 }
3036
3037 static const OptionDef options[] = {
3038 #include "cmdutils_common_opts.h"
3039 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
3040 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
3041 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
3042 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3043 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3044 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3045 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3046 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3047 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3048 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3049 { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play \"duration\" seconds of audio/video", "duration" },
3050 { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3051 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3052 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3053 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3054 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3055 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3056 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3057 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3058 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3059 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3060 { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3061 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3062 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3063 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3064 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3065 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
3066 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)", "threshold" },
3067 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
3068 { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3069 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3070 { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3071 { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3072 { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3073 { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3074 { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3075 { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3076 #if CONFIG_AVFILTER
3077 { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3078 #endif
3079 { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3080 { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3081 { NULL, },
3082 };
3083
3084 static void show_usage(void)
3085 {
3086 printf("Simple media player\n");
3087 printf("usage: ffplay [options] input_file\n");
3088 printf("\n");
3089 }
3090
3091 static void show_help(void)
3092 {
3093 show_usage();
3094 show_help_options(options, "Main options:\n",
3095 OPT_EXPERT, 0);
3096 show_help_options(options, "\nAdvanced options:\n",
3097 OPT_EXPERT, OPT_EXPERT);
3098 printf("\nWhile playing:\n"
3099 "q, ESC quit\n"
3100 "f toggle full screen\n"
3101 "p, SPC pause\n"
3102 "a cycle audio channel\n"
3103 "v cycle video channel\n"
3104 "t cycle subtitle channel\n"
3105 "w show audio waves\n"
3106 "s activate frame-step mode\n"
3107 "left/right seek backward/forward 10 seconds\n"
3108 "down/up seek backward/forward 1 minute\n"
3109 "mouse click seek to percentage in file corresponding to fraction of width\n"
3110 );
3111 }
3112
3113 static void opt_input_file(const char *filename)
3114 {
3115 if (input_filename) {
3116 fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3117 filename, input_filename);
3118 exit(1);
3119 }
3120 if (!strcmp(filename, "-"))
3121 filename = "pipe:";
3122 input_filename = filename;
3123 }
3124
3125 /* Called from the main */
3126 int main(int argc, char **argv)
3127 {
3128 int flags, i;
3129
3130 /* register all codecs, demux and protocols */
3131 avcodec_register_all();
3132 #if CONFIG_AVDEVICE
3133 avdevice_register_all();
3134 #endif
3135 #if CONFIG_AVFILTER
3136 avfilter_register_all();
3137 #endif
3138 av_register_all();
3139
3140 for(i=0; i<AVMEDIA_TYPE_NB; i++){
3141 avcodec_opts[i]= avcodec_alloc_context2(i);
3142 }
3143 avformat_opts = avformat_alloc_context();
3144 #if !CONFIG_AVFILTER
3145 sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
3146 #endif
3147
3148 show_banner();
3149
3150 parse_options(argc, argv, options, opt_input_file);
3151
3152 if (!input_filename) {
3153 show_usage();
3154 fprintf(stderr, "An input file must be specified\n");
3155 fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3156 exit(1);
3157 }
3158
3159 if (display_disable) {
3160 video_disable = 1;
3161 }
3162 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3163 #if !defined(__MINGW32__) && !defined(__APPLE__)
3164 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3165 #endif
3166 if (SDL_Init (flags)) {
3167 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3168 exit(1);
3169 }
3170
3171 if (!display_disable) {
3172 #if HAVE_SDL_VIDEO_SIZE
3173 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3174 fs_screen_width = vi->current_w;
3175 fs_screen_height = vi->current_h;
3176 #endif
3177 }
3178
3179 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3180 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3181 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3182
3183 av_init_packet(&flush_pkt);
3184 flush_pkt.data= "FLUSH";
3185
3186 cur_stream = stream_open(input_filename, file_iformat);
3187
3188 event_loop();
3189
3190 /* never returns */
3191
3192 return 0;
3193 }