Add compatibility wrappers for functions moved from lavf to lavc
[libav.git] / ffplay.c
1 /*
2 * FFplay : Simple Media Player based on the FFmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/pixdesc.h"
28 #include "libavformat/avformat.h"
29 #include "libavdevice/avdevice.h"
30 #include "libswscale/swscale.h"
31 #include "libavcodec/audioconvert.h"
32 #include "libavcodec/colorspace.h"
33 #include "libavcodec/opt.h"
34 #include "libavcodec/avfft.h"
35
36 #if CONFIG_AVFILTER
37 # include "libavfilter/avfilter.h"
38 # include "libavfilter/avfiltergraph.h"
39 # include "libavfilter/graphparser.h"
40 #endif
41
42 #include "cmdutils.h"
43
44 #include <SDL.h>
45 #include <SDL_thread.h>
46
47 #ifdef __MINGW32__
48 #undef main /* We don't want SDL to override our main() */
49 #endif
50
51 #include <unistd.h>
52 #include <assert.h>
53
54 const char program_name[] = "FFplay";
55 const int program_birth_year = 2003;
56
57 //#define DEBUG_SYNC
58
59 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
60 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
61 #define MIN_FRAMES 5
62
63 /* SDL audio buffer size, in samples. Should be small to have precise
64 A/V sync as SDL does not have hardware buffer fullness info. */
65 #define SDL_AUDIO_BUFFER_SIZE 1024
66
67 /* no AV sync correction is done if below the AV sync threshold */
68 #define AV_SYNC_THRESHOLD 0.01
69 /* no AV correction is done if too big error */
70 #define AV_NOSYNC_THRESHOLD 10.0
71
72 #define FRAME_SKIP_FACTOR 0.05
73
74 /* maximum audio speed change to get correct sync */
75 #define SAMPLE_CORRECTION_PERCENT_MAX 10
76
77 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
78 #define AUDIO_DIFF_AVG_NB 20
79
80 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
81 #define SAMPLE_ARRAY_SIZE (2*65536)
82
83 static int sws_flags = SWS_BICUBIC;
84
85 typedef struct PacketQueue {
86 AVPacketList *first_pkt, *last_pkt;
87 int nb_packets;
88 int size;
89 int abort_request;
90 SDL_mutex *mutex;
91 SDL_cond *cond;
92 } PacketQueue;
93
94 #define VIDEO_PICTURE_QUEUE_SIZE 2
95 #define SUBPICTURE_QUEUE_SIZE 4
96
97 typedef struct VideoPicture {
98 double pts; ///<presentation time stamp for this picture
99 double target_clock; ///<av_gettime() time at which this should be displayed ideally
100 int64_t pos; ///<byte position in file
101 SDL_Overlay *bmp;
102 int width, height; /* source height & width */
103 int allocated;
104 enum PixelFormat pix_fmt;
105
106 #if CONFIG_AVFILTER
107 AVFilterPicRef *picref;
108 #endif
109 } VideoPicture;
110
111 typedef struct SubPicture {
112 double pts; /* presentation time stamp for this picture */
113 AVSubtitle sub;
114 } SubPicture;
115
116 enum {
117 AV_SYNC_AUDIO_MASTER, /* default choice */
118 AV_SYNC_VIDEO_MASTER,
119 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
120 };
121
122 typedef struct VideoState {
123 SDL_Thread *parse_tid;
124 SDL_Thread *video_tid;
125 SDL_Thread *refresh_tid;
126 AVInputFormat *iformat;
127 int no_background;
128 int abort_request;
129 int paused;
130 int last_paused;
131 int seek_req;
132 int seek_flags;
133 int64_t seek_pos;
134 int64_t seek_rel;
135 int read_pause_return;
136 AVFormatContext *ic;
137 int dtg_active_format;
138
139 int audio_stream;
140
141 int av_sync_type;
142 double external_clock; /* external clock base */
143 int64_t external_clock_time;
144
145 double audio_clock;
146 double audio_diff_cum; /* used for AV difference average computation */
147 double audio_diff_avg_coef;
148 double audio_diff_threshold;
149 int audio_diff_avg_count;
150 AVStream *audio_st;
151 PacketQueue audioq;
152 int audio_hw_buf_size;
153 /* samples output by the codec. we reserve more space for avsync
154 compensation */
155 DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
156 DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157 uint8_t *audio_buf;
158 unsigned int audio_buf_size; /* in bytes */
159 int audio_buf_index; /* in bytes */
160 AVPacket audio_pkt_temp;
161 AVPacket audio_pkt;
162 enum SampleFormat audio_src_fmt;
163 AVAudioConvert *reformat_ctx;
164
165 int show_audio; /* if true, display audio samples */
166 int16_t sample_array[SAMPLE_ARRAY_SIZE];
167 int sample_array_index;
168 int last_i_start;
169 RDFTContext *rdft;
170 int rdft_bits;
171 int xpos;
172
173 SDL_Thread *subtitle_tid;
174 int subtitle_stream;
175 int subtitle_stream_changed;
176 AVStream *subtitle_st;
177 PacketQueue subtitleq;
178 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
179 int subpq_size, subpq_rindex, subpq_windex;
180 SDL_mutex *subpq_mutex;
181 SDL_cond *subpq_cond;
182
183 double frame_timer;
184 double frame_last_pts;
185 double frame_last_delay;
186 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
187 int video_stream;
188 AVStream *video_st;
189 PacketQueue videoq;
190 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
191 double video_current_pts_drift; ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
192 int64_t video_current_pos; ///<current displayed file pos
193 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
194 int pictq_size, pictq_rindex, pictq_windex;
195 SDL_mutex *pictq_mutex;
196 SDL_cond *pictq_cond;
197 #if !CONFIG_AVFILTER
198 struct SwsContext *img_convert_ctx;
199 #endif
200
201 // QETimer *video_timer;
202 char filename[1024];
203 int width, height, xleft, ytop;
204
205 int64_t faulty_pts;
206 int64_t faulty_dts;
207 int64_t last_dts_for_fault_detection;
208 int64_t last_pts_for_fault_detection;
209
210 #if CONFIG_AVFILTER
211 AVFilterContext *out_video_filter; ///<the last filter in the video chain
212 #endif
213
214 float skip_frames;
215 float skip_frames_index;
216 int refresh;
217 } VideoState;
218
219 static void show_help(void);
220 static int audio_write_get_buf_size(VideoState *is);
221
222 /* options specified by the user */
223 static AVInputFormat *file_iformat;
224 static const char *input_filename;
225 static const char *window_title;
226 static int fs_screen_width;
227 static int fs_screen_height;
228 static int screen_width = 0;
229 static int screen_height = 0;
230 static int frame_width = 0;
231 static int frame_height = 0;
232 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
233 static int audio_disable;
234 static int video_disable;
235 static int wanted_stream[AVMEDIA_TYPE_NB]={
236 [AVMEDIA_TYPE_AUDIO]=-1,
237 [AVMEDIA_TYPE_VIDEO]=-1,
238 [AVMEDIA_TYPE_SUBTITLE]=-1,
239 };
240 static int seek_by_bytes=-1;
241 static int display_disable;
242 static int show_status = 1;
243 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
244 static int64_t start_time = AV_NOPTS_VALUE;
245 static int64_t duration = AV_NOPTS_VALUE;
246 static int debug = 0;
247 static int debug_mv = 0;
248 static int step = 0;
249 static int thread_count = 1;
250 static int workaround_bugs = 1;
251 static int fast = 0;
252 static int genpts = 0;
253 static int lowres = 0;
254 static int idct = FF_IDCT_AUTO;
255 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
256 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
257 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
258 static int error_recognition = FF_ER_CAREFUL;
259 static int error_concealment = 3;
260 static int decoder_reorder_pts= -1;
261 static int autoexit;
262 static int loop=1;
263 static int framedrop=1;
264
265 static int rdftspeed=20;
266 #if CONFIG_AVFILTER
267 static char *vfilters = NULL;
268 #endif
269
270 /* current context */
271 static int is_full_screen;
272 static VideoState *cur_stream;
273 static int64_t audio_callback_time;
274
275 static AVPacket flush_pkt;
276
277 #define FF_ALLOC_EVENT (SDL_USEREVENT)
278 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
279 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
280
281 static SDL_Surface *screen;
282
283 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
284
285 /* packet queue handling */
286 static void packet_queue_init(PacketQueue *q)
287 {
288 memset(q, 0, sizeof(PacketQueue));
289 q->mutex = SDL_CreateMutex();
290 q->cond = SDL_CreateCond();
291 packet_queue_put(q, &flush_pkt);
292 }
293
294 static void packet_queue_flush(PacketQueue *q)
295 {
296 AVPacketList *pkt, *pkt1;
297
298 SDL_LockMutex(q->mutex);
299 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
300 pkt1 = pkt->next;
301 av_free_packet(&pkt->pkt);
302 av_freep(&pkt);
303 }
304 q->last_pkt = NULL;
305 q->first_pkt = NULL;
306 q->nb_packets = 0;
307 q->size = 0;
308 SDL_UnlockMutex(q->mutex);
309 }
310
311 static void packet_queue_end(PacketQueue *q)
312 {
313 packet_queue_flush(q);
314 SDL_DestroyMutex(q->mutex);
315 SDL_DestroyCond(q->cond);
316 }
317
318 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
319 {
320 AVPacketList *pkt1;
321
322 /* duplicate the packet */
323 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
324 return -1;
325
326 pkt1 = av_malloc(sizeof(AVPacketList));
327 if (!pkt1)
328 return -1;
329 pkt1->pkt = *pkt;
330 pkt1->next = NULL;
331
332
333 SDL_LockMutex(q->mutex);
334
335 if (!q->last_pkt)
336
337 q->first_pkt = pkt1;
338 else
339 q->last_pkt->next = pkt1;
340 q->last_pkt = pkt1;
341 q->nb_packets++;
342 q->size += pkt1->pkt.size + sizeof(*pkt1);
343 /* XXX: should duplicate packet data in DV case */
344 SDL_CondSignal(q->cond);
345
346 SDL_UnlockMutex(q->mutex);
347 return 0;
348 }
349
350 static void packet_queue_abort(PacketQueue *q)
351 {
352 SDL_LockMutex(q->mutex);
353
354 q->abort_request = 1;
355
356 SDL_CondSignal(q->cond);
357
358 SDL_UnlockMutex(q->mutex);
359 }
360
361 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
362 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
363 {
364 AVPacketList *pkt1;
365 int ret;
366
367 SDL_LockMutex(q->mutex);
368
369 for(;;) {
370 if (q->abort_request) {
371 ret = -1;
372 break;
373 }
374
375 pkt1 = q->first_pkt;
376 if (pkt1) {
377 q->first_pkt = pkt1->next;
378 if (!q->first_pkt)
379 q->last_pkt = NULL;
380 q->nb_packets--;
381 q->size -= pkt1->pkt.size + sizeof(*pkt1);
382 *pkt = pkt1->pkt;
383 av_free(pkt1);
384 ret = 1;
385 break;
386 } else if (!block) {
387 ret = 0;
388 break;
389 } else {
390 SDL_CondWait(q->cond, q->mutex);
391 }
392 }
393 SDL_UnlockMutex(q->mutex);
394 return ret;
395 }
396
397 static inline void fill_rectangle(SDL_Surface *screen,
398 int x, int y, int w, int h, int color)
399 {
400 SDL_Rect rect;
401 rect.x = x;
402 rect.y = y;
403 rect.w = w;
404 rect.h = h;
405 SDL_FillRect(screen, &rect, color);
406 }
407
408 #if 0
409 /* draw only the border of a rectangle */
410 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
411 {
412 int w1, w2, h1, h2;
413
414 /* fill the background */
415 w1 = x;
416 if (w1 < 0)
417 w1 = 0;
418 w2 = s->width - (x + w);
419 if (w2 < 0)
420 w2 = 0;
421 h1 = y;
422 if (h1 < 0)
423 h1 = 0;
424 h2 = s->height - (y + h);
425 if (h2 < 0)
426 h2 = 0;
427 fill_rectangle(screen,
428 s->xleft, s->ytop,
429 w1, s->height,
430 color);
431 fill_rectangle(screen,
432 s->xleft + s->width - w2, s->ytop,
433 w2, s->height,
434 color);
435 fill_rectangle(screen,
436 s->xleft + w1, s->ytop,
437 s->width - w1 - w2, h1,
438 color);
439 fill_rectangle(screen,
440 s->xleft + w1, s->ytop + s->height - h2,
441 s->width - w1 - w2, h2,
442 color);
443 }
444 #endif
445
446 #define ALPHA_BLEND(a, oldp, newp, s)\
447 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
448
449 #define RGBA_IN(r, g, b, a, s)\
450 {\
451 unsigned int v = ((const uint32_t *)(s))[0];\
452 a = (v >> 24) & 0xff;\
453 r = (v >> 16) & 0xff;\
454 g = (v >> 8) & 0xff;\
455 b = v & 0xff;\
456 }
457
458 #define YUVA_IN(y, u, v, a, s, pal)\
459 {\
460 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
461 a = (val >> 24) & 0xff;\
462 y = (val >> 16) & 0xff;\
463 u = (val >> 8) & 0xff;\
464 v = val & 0xff;\
465 }
466
467 #define YUVA_OUT(d, y, u, v, a)\
468 {\
469 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
470 }
471
472
473 #define BPP 1
474
475 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
476 {
477 int wrap, wrap3, width2, skip2;
478 int y, u, v, a, u1, v1, a1, w, h;
479 uint8_t *lum, *cb, *cr;
480 const uint8_t *p;
481 const uint32_t *pal;
482 int dstx, dsty, dstw, dsth;
483
484 dstw = av_clip(rect->w, 0, imgw);
485 dsth = av_clip(rect->h, 0, imgh);
486 dstx = av_clip(rect->x, 0, imgw - dstw);
487 dsty = av_clip(rect->y, 0, imgh - dsth);
488 lum = dst->data[0] + dsty * dst->linesize[0];
489 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
490 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
491
492 width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
493 skip2 = dstx >> 1;
494 wrap = dst->linesize[0];
495 wrap3 = rect->pict.linesize[0];
496 p = rect->pict.data[0];
497 pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
498
499 if (dsty & 1) {
500 lum += dstx;
501 cb += skip2;
502 cr += skip2;
503
504 if (dstx & 1) {
505 YUVA_IN(y, u, v, a, p, pal);
506 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
507 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
508 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
509 cb++;
510 cr++;
511 lum++;
512 p += BPP;
513 }
514 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
515 YUVA_IN(y, u, v, a, p, pal);
516 u1 = u;
517 v1 = v;
518 a1 = a;
519 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
520
521 YUVA_IN(y, u, v, a, p + BPP, pal);
522 u1 += u;
523 v1 += v;
524 a1 += a;
525 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
526 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
527 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
528 cb++;
529 cr++;
530 p += 2 * BPP;
531 lum += 2;
532 }
533 if (w) {
534 YUVA_IN(y, u, v, a, p, pal);
535 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
536 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
537 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
538 p++;
539 lum++;
540 }
541 p += wrap3 - dstw * BPP;
542 lum += wrap - dstw - dstx;
543 cb += dst->linesize[1] - width2 - skip2;
544 cr += dst->linesize[2] - width2 - skip2;
545 }
546 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
547 lum += dstx;
548 cb += skip2;
549 cr += skip2;
550
551 if (dstx & 1) {
552 YUVA_IN(y, u, v, a, p, pal);
553 u1 = u;
554 v1 = v;
555 a1 = a;
556 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
557 p += wrap3;
558 lum += wrap;
559 YUVA_IN(y, u, v, a, p, pal);
560 u1 += u;
561 v1 += v;
562 a1 += a;
563 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
564 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
565 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
566 cb++;
567 cr++;
568 p += -wrap3 + BPP;
569 lum += -wrap + 1;
570 }
571 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
572 YUVA_IN(y, u, v, a, p, pal);
573 u1 = u;
574 v1 = v;
575 a1 = a;
576 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
577
578 YUVA_IN(y, u, v, a, p + BPP, pal);
579 u1 += u;
580 v1 += v;
581 a1 += a;
582 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
583 p += wrap3;
584 lum += wrap;
585
586 YUVA_IN(y, u, v, a, p, pal);
587 u1 += u;
588 v1 += v;
589 a1 += a;
590 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
591
592 YUVA_IN(y, u, v, a, p + BPP, pal);
593 u1 += u;
594 v1 += v;
595 a1 += a;
596 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
597
598 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
599 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
600
601 cb++;
602 cr++;
603 p += -wrap3 + 2 * BPP;
604 lum += -wrap + 2;
605 }
606 if (w) {
607 YUVA_IN(y, u, v, a, p, pal);
608 u1 = u;
609 v1 = v;
610 a1 = a;
611 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
612 p += wrap3;
613 lum += wrap;
614 YUVA_IN(y, u, v, a, p, pal);
615 u1 += u;
616 v1 += v;
617 a1 += a;
618 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
619 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
620 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
621 cb++;
622 cr++;
623 p += -wrap3 + BPP;
624 lum += -wrap + 1;
625 }
626 p += wrap3 + (wrap3 - dstw * BPP);
627 lum += wrap + (wrap - dstw - dstx);
628 cb += dst->linesize[1] - width2 - skip2;
629 cr += dst->linesize[2] - width2 - skip2;
630 }
631 /* handle odd height */
632 if (h) {
633 lum += dstx;
634 cb += skip2;
635 cr += skip2;
636
637 if (dstx & 1) {
638 YUVA_IN(y, u, v, a, p, pal);
639 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
640 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
641 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
642 cb++;
643 cr++;
644 lum++;
645 p += BPP;
646 }
647 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
648 YUVA_IN(y, u, v, a, p, pal);
649 u1 = u;
650 v1 = v;
651 a1 = a;
652 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
653
654 YUVA_IN(y, u, v, a, p + BPP, pal);
655 u1 += u;
656 v1 += v;
657 a1 += a;
658 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
659 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
660 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
661 cb++;
662 cr++;
663 p += 2 * BPP;
664 lum += 2;
665 }
666 if (w) {
667 YUVA_IN(y, u, v, a, p, pal);
668 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
669 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
670 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
671 }
672 }
673 }
674
675 static void free_subpicture(SubPicture *sp)
676 {
677 int i;
678
679 for (i = 0; i < sp->sub.num_rects; i++)
680 {
681 av_freep(&sp->sub.rects[i]->pict.data[0]);
682 av_freep(&sp->sub.rects[i]->pict.data[1]);
683 av_freep(&sp->sub.rects[i]);
684 }
685
686 av_free(sp->sub.rects);
687
688 memset(&sp->sub, 0, sizeof(AVSubtitle));
689 }
690
691 static void video_image_display(VideoState *is)
692 {
693 VideoPicture *vp;
694 SubPicture *sp;
695 AVPicture pict;
696 float aspect_ratio;
697 int width, height, x, y;
698 SDL_Rect rect;
699 int i;
700
701 vp = &is->pictq[is->pictq_rindex];
702 if (vp->bmp) {
703 #if CONFIG_AVFILTER
704 if (vp->picref->pixel_aspect.num == 0)
705 aspect_ratio = 0;
706 else
707 aspect_ratio = av_q2d(vp->picref->pixel_aspect);
708 #else
709
710 /* XXX: use variable in the frame */
711 if (is->video_st->sample_aspect_ratio.num)
712 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
713 else if (is->video_st->codec->sample_aspect_ratio.num)
714 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
715 else
716 aspect_ratio = 0;
717 #endif
718 if (aspect_ratio <= 0.0)
719 aspect_ratio = 1.0;
720 aspect_ratio *= (float)vp->width / (float)vp->height;
721 /* if an active format is indicated, then it overrides the
722 mpeg format */
723 #if 0
724 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
725 is->dtg_active_format = is->video_st->codec->dtg_active_format;
726 printf("dtg_active_format=%d\n", is->dtg_active_format);
727 }
728 #endif
729 #if 0
730 switch(is->video_st->codec->dtg_active_format) {
731 case FF_DTG_AFD_SAME:
732 default:
733 /* nothing to do */
734 break;
735 case FF_DTG_AFD_4_3:
736 aspect_ratio = 4.0 / 3.0;
737 break;
738 case FF_DTG_AFD_16_9:
739 aspect_ratio = 16.0 / 9.0;
740 break;
741 case FF_DTG_AFD_14_9:
742 aspect_ratio = 14.0 / 9.0;
743 break;
744 case FF_DTG_AFD_4_3_SP_14_9:
745 aspect_ratio = 14.0 / 9.0;
746 break;
747 case FF_DTG_AFD_16_9_SP_14_9:
748 aspect_ratio = 14.0 / 9.0;
749 break;
750 case FF_DTG_AFD_SP_4_3:
751 aspect_ratio = 4.0 / 3.0;
752 break;
753 }
754 #endif
755
756 if (is->subtitle_st)
757 {
758 if (is->subpq_size > 0)
759 {
760 sp = &is->subpq[is->subpq_rindex];
761
762 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
763 {
764 SDL_LockYUVOverlay (vp->bmp);
765
766 pict.data[0] = vp->bmp->pixels[0];
767 pict.data[1] = vp->bmp->pixels[2];
768 pict.data[2] = vp->bmp->pixels[1];
769
770 pict.linesize[0] = vp->bmp->pitches[0];
771 pict.linesize[1] = vp->bmp->pitches[2];
772 pict.linesize[2] = vp->bmp->pitches[1];
773
774 for (i = 0; i < sp->sub.num_rects; i++)
775 blend_subrect(&pict, sp->sub.rects[i],
776 vp->bmp->w, vp->bmp->h);
777
778 SDL_UnlockYUVOverlay (vp->bmp);
779 }
780 }
781 }
782
783
784 /* XXX: we suppose the screen has a 1.0 pixel ratio */
785 height = is->height;
786 width = ((int)rint(height * aspect_ratio)) & ~1;
787 if (width > is->width) {
788 width = is->width;
789 height = ((int)rint(width / aspect_ratio)) & ~1;
790 }
791 x = (is->width - width) / 2;
792 y = (is->height - height) / 2;
793 if (!is->no_background) {
794 /* fill the background */
795 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
796 } else {
797 is->no_background = 0;
798 }
799 rect.x = is->xleft + x;
800 rect.y = is->ytop + y;
801 rect.w = width;
802 rect.h = height;
803 SDL_DisplayYUVOverlay(vp->bmp, &rect);
804 } else {
805 #if 0
806 fill_rectangle(screen,
807 is->xleft, is->ytop, is->width, is->height,
808 QERGB(0x00, 0x00, 0x00));
809 #endif
810 }
811 }
812
813 static inline int compute_mod(int a, int b)
814 {
815 a = a % b;
816 if (a >= 0)
817 return a;
818 else
819 return a + b;
820 }
821
822 static void video_audio_display(VideoState *s)
823 {
824 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
825 int ch, channels, h, h2, bgcolor, fgcolor;
826 int16_t time_diff;
827 int rdft_bits, nb_freq;
828
829 for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
830 ;
831 nb_freq= 1<<(rdft_bits-1);
832
833 /* compute display index : center on currently output samples */
834 channels = s->audio_st->codec->channels;
835 nb_display_channels = channels;
836 if (!s->paused) {
837 int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
838 n = 2 * channels;
839 delay = audio_write_get_buf_size(s);
840 delay /= n;
841
842 /* to be more precise, we take into account the time spent since
843 the last buffer computation */
844 if (audio_callback_time) {
845 time_diff = av_gettime() - audio_callback_time;
846 delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
847 }
848
849 delay += 2*data_used;
850 if (delay < data_used)
851 delay = data_used;
852
853 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
854 if(s->show_audio==1){
855 h= INT_MIN;
856 for(i=0; i<1000; i+=channels){
857 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
858 int a= s->sample_array[idx];
859 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
860 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
861 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
862 int score= a-d;
863 if(h<score && (b^c)<0){
864 h= score;
865 i_start= idx;
866 }
867 }
868 }
869
870 s->last_i_start = i_start;
871 } else {
872 i_start = s->last_i_start;
873 }
874
875 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
876 if(s->show_audio==1){
877 fill_rectangle(screen,
878 s->xleft, s->ytop, s->width, s->height,
879 bgcolor);
880
881 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
882
883 /* total height for one channel */
884 h = s->height / nb_display_channels;
885 /* graph height / 2 */
886 h2 = (h * 9) / 20;
887 for(ch = 0;ch < nb_display_channels; ch++) {
888 i = i_start + ch;
889 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
890 for(x = 0; x < s->width; x++) {
891 y = (s->sample_array[i] * h2) >> 15;
892 if (y < 0) {
893 y = -y;
894 ys = y1 - y;
895 } else {
896 ys = y1;
897 }
898 fill_rectangle(screen,
899 s->xleft + x, ys, 1, y,
900 fgcolor);
901 i += channels;
902 if (i >= SAMPLE_ARRAY_SIZE)
903 i -= SAMPLE_ARRAY_SIZE;
904 }
905 }
906
907 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
908
909 for(ch = 1;ch < nb_display_channels; ch++) {
910 y = s->ytop + ch * h;
911 fill_rectangle(screen,
912 s->xleft, y, s->width, 1,
913 fgcolor);
914 }
915 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
916 }else{
917 nb_display_channels= FFMIN(nb_display_channels, 2);
918 if(rdft_bits != s->rdft_bits){
919 av_rdft_end(s->rdft);
920 s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
921 s->rdft_bits= rdft_bits;
922 }
923 {
924 FFTSample data[2][2*nb_freq];
925 for(ch = 0;ch < nb_display_channels; ch++) {
926 i = i_start + ch;
927 for(x = 0; x < 2*nb_freq; x++) {
928 double w= (x-nb_freq)*(1.0/nb_freq);
929 data[ch][x]= s->sample_array[i]*(1.0-w*w);
930 i += channels;
931 if (i >= SAMPLE_ARRAY_SIZE)
932 i -= SAMPLE_ARRAY_SIZE;
933 }
934 av_rdft_calc(s->rdft, data[ch]);
935 }
936 //least efficient way to do this, we should of course directly access it but its more than fast enough
937 for(y=0; y<s->height; y++){
938 double w= 1/sqrt(nb_freq);
939 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
940 int b= sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0] + data[1][2*y+1]*data[1][2*y+1]));
941 a= FFMIN(a,255);
942 b= FFMIN(b,255);
943 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
944
945 fill_rectangle(screen,
946 s->xpos, s->height-y, 1, 1,
947 fgcolor);
948 }
949 }
950 SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
951 s->xpos++;
952 if(s->xpos >= s->width)
953 s->xpos= s->xleft;
954 }
955 }
956
957 static int video_open(VideoState *is){
958 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
959 int w,h;
960
961 if(is_full_screen) flags |= SDL_FULLSCREEN;
962 else flags |= SDL_RESIZABLE;
963
964 if (is_full_screen && fs_screen_width) {
965 w = fs_screen_width;
966 h = fs_screen_height;
967 } else if(!is_full_screen && screen_width){
968 w = screen_width;
969 h = screen_height;
970 #if CONFIG_AVFILTER
971 }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
972 w = is->out_video_filter->inputs[0]->w;
973 h = is->out_video_filter->inputs[0]->h;
974 #else
975 }else if (is->video_st && is->video_st->codec->width){
976 w = is->video_st->codec->width;
977 h = is->video_st->codec->height;
978 #endif
979 } else {
980 w = 640;
981 h = 480;
982 }
983 if(screen && is->width == screen->w && screen->w == w
984 && is->height== screen->h && screen->h == h)
985 return 0;
986
987 #ifndef __APPLE__
988 screen = SDL_SetVideoMode(w, h, 0, flags);
989 #else
990 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
991 screen = SDL_SetVideoMode(w, h, 24, flags);
992 #endif
993 if (!screen) {
994 fprintf(stderr, "SDL: could not set video mode - exiting\n");
995 return -1;
996 }
997 if (!window_title)
998 window_title = input_filename;
999 SDL_WM_SetCaption(window_title, window_title);
1000
1001 is->width = screen->w;
1002 is->height = screen->h;
1003
1004 return 0;
1005 }
1006
1007 /* display the current picture, if any */
1008 static void video_display(VideoState *is)
1009 {
1010 if(!screen)
1011 video_open(cur_stream);
1012 if (is->audio_st && is->show_audio)
1013 video_audio_display(is);
1014 else if (is->video_st)
1015 video_image_display(is);
1016 }
1017
1018 static int refresh_thread(void *opaque)
1019 {
1020 VideoState *is= opaque;
1021 while(!is->abort_request){
1022 SDL_Event event;
1023 event.type = FF_REFRESH_EVENT;
1024 event.user.data1 = opaque;
1025 if(!is->refresh){
1026 is->refresh=1;
1027 SDL_PushEvent(&event);
1028 }
1029 usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1030 }
1031 return 0;
1032 }
1033
1034 /* get the current audio clock value */
1035 static double get_audio_clock(VideoState *is)
1036 {
1037 double pts;
1038 int hw_buf_size, bytes_per_sec;
1039 pts = is->audio_clock;
1040 hw_buf_size = audio_write_get_buf_size(is);
1041 bytes_per_sec = 0;
1042 if (is->audio_st) {
1043 bytes_per_sec = is->audio_st->codec->sample_rate *
1044 2 * is->audio_st->codec->channels;
1045 }
1046 if (bytes_per_sec)
1047 pts -= (double)hw_buf_size / bytes_per_sec;
1048 return pts;
1049 }
1050
1051 /* get the current video clock value */
1052 static double get_video_clock(VideoState *is)
1053 {
1054 if (is->paused) {
1055 return is->video_current_pts;
1056 } else {
1057 return is->video_current_pts_drift + av_gettime() / 1000000.0;
1058 }
1059 }
1060
1061 /* get the current external clock value */
1062 static double get_external_clock(VideoState *is)
1063 {
1064 int64_t ti;
1065 ti = av_gettime();
1066 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1067 }
1068
1069 /* get the current master clock value */
1070 static double get_master_clock(VideoState *is)
1071 {
1072 double val;
1073
1074 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1075 if (is->video_st)
1076 val = get_video_clock(is);
1077 else
1078 val = get_audio_clock(is);
1079 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1080 if (is->audio_st)
1081 val = get_audio_clock(is);
1082 else
1083 val = get_video_clock(is);
1084 } else {
1085 val = get_external_clock(is);
1086 }
1087 return val;
1088 }
1089
1090 /* seek in the stream */
1091 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1092 {
1093 if (!is->seek_req) {
1094 is->seek_pos = pos;
1095 is->seek_rel = rel;
1096 is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1097 if (seek_by_bytes)
1098 is->seek_flags |= AVSEEK_FLAG_BYTE;
1099 is->seek_req = 1;
1100 }
1101 }
1102
1103 /* pause or resume the video */
1104 static void stream_pause(VideoState *is)
1105 {
1106 if (is->paused) {
1107 is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1108 if(is->read_pause_return != AVERROR(ENOSYS)){
1109 is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1110 }
1111 is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1112 }
1113 is->paused = !is->paused;
1114 }
1115
1116 static double compute_target_time(double frame_current_pts, VideoState *is)
1117 {
1118 double delay, sync_threshold, diff;
1119
1120 /* compute nominal delay */
1121 delay = frame_current_pts - is->frame_last_pts;
1122 if (delay <= 0 || delay >= 10.0) {
1123 /* if incorrect delay, use previous one */
1124 delay = is->frame_last_delay;
1125 } else {
1126 is->frame_last_delay = delay;
1127 }
1128 is->frame_last_pts = frame_current_pts;
1129
1130 /* update delay to follow master synchronisation source */
1131 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1132 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1133 /* if video is slave, we try to correct big delays by
1134 duplicating or deleting a frame */
1135 diff = get_video_clock(is) - get_master_clock(is);
1136
1137 /* skip or repeat frame. We take into account the
1138 delay to compute the threshold. I still don't know
1139 if it is the best guess */
1140 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1141 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1142 if (diff <= -sync_threshold)
1143 delay = 0;
1144 else if (diff >= sync_threshold)
1145 delay = 2 * delay;
1146 }
1147 }
1148 is->frame_timer += delay;
1149 #if defined(DEBUG_SYNC)
1150 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1151 delay, actual_delay, frame_current_pts, -diff);
1152 #endif
1153
1154 return is->frame_timer;
1155 }
1156
1157 /* called to display each frame */
1158 static void video_refresh_timer(void *opaque)
1159 {
1160 VideoState *is = opaque;
1161 VideoPicture *vp;
1162
1163 SubPicture *sp, *sp2;
1164
1165 if (is->video_st) {
1166 retry:
1167 if (is->pictq_size == 0) {
1168 //nothing to do, no picture to display in the que
1169 } else {
1170 double time= av_gettime()/1000000.0;
1171 double next_target;
1172 /* dequeue the picture */
1173 vp = &is->pictq[is->pictq_rindex];
1174
1175 if(time < vp->target_clock)
1176 return;
1177 /* update current video pts */
1178 is->video_current_pts = vp->pts;
1179 is->video_current_pts_drift = is->video_current_pts - time;
1180 is->video_current_pos = vp->pos;
1181 if(is->pictq_size > 1){
1182 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1183 assert(nextvp->target_clock >= vp->target_clock);
1184 next_target= nextvp->target_clock;
1185 }else{
1186 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1187 }
1188 if(framedrop && time > next_target){
1189 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1190 if(is->pictq_size > 1 || time > next_target + 0.5){
1191 /* update queue size and signal for next picture */
1192 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1193 is->pictq_rindex = 0;
1194
1195 SDL_LockMutex(is->pictq_mutex);
1196 is->pictq_size--;
1197 SDL_CondSignal(is->pictq_cond);
1198 SDL_UnlockMutex(is->pictq_mutex);
1199 goto retry;
1200 }
1201 }
1202
1203 if(is->subtitle_st) {
1204 if (is->subtitle_stream_changed) {
1205 SDL_LockMutex(is->subpq_mutex);
1206
1207 while (is->subpq_size) {
1208 free_subpicture(&is->subpq[is->subpq_rindex]);
1209
1210 /* update queue size and signal for next picture */
1211 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1212 is->subpq_rindex = 0;
1213
1214 is->subpq_size--;
1215 }
1216 is->subtitle_stream_changed = 0;
1217
1218 SDL_CondSignal(is->subpq_cond);
1219 SDL_UnlockMutex(is->subpq_mutex);
1220 } else {
1221 if (is->subpq_size > 0) {
1222 sp = &is->subpq[is->subpq_rindex];
1223
1224 if (is->subpq_size > 1)
1225 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1226 else
1227 sp2 = NULL;
1228
1229 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1230 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1231 {
1232 free_subpicture(sp);
1233
1234 /* update queue size and signal for next picture */
1235 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1236 is->subpq_rindex = 0;
1237
1238 SDL_LockMutex(is->subpq_mutex);
1239 is->subpq_size--;
1240 SDL_CondSignal(is->subpq_cond);
1241 SDL_UnlockMutex(is->subpq_mutex);
1242 }
1243 }
1244 }
1245 }
1246
1247 /* display picture */
1248 video_display(is);
1249
1250 /* update queue size and signal for next picture */
1251 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1252 is->pictq_rindex = 0;
1253
1254 SDL_LockMutex(is->pictq_mutex);
1255 is->pictq_size--;
1256 SDL_CondSignal(is->pictq_cond);
1257 SDL_UnlockMutex(is->pictq_mutex);
1258 }
1259 } else if (is->audio_st) {
1260 /* draw the next audio frame */
1261
1262 /* if only audio stream, then display the audio bars (better
1263 than nothing, just to test the implementation */
1264
1265 /* display picture */
1266 video_display(is);
1267 }
1268 if (show_status) {
1269 static int64_t last_time;
1270 int64_t cur_time;
1271 int aqsize, vqsize, sqsize;
1272 double av_diff;
1273
1274 cur_time = av_gettime();
1275 if (!last_time || (cur_time - last_time) >= 30000) {
1276 aqsize = 0;
1277 vqsize = 0;
1278 sqsize = 0;
1279 if (is->audio_st)
1280 aqsize = is->audioq.size;
1281 if (is->video_st)
1282 vqsize = is->videoq.size;
1283 if (is->subtitle_st)
1284 sqsize = is->subtitleq.size;
1285 av_diff = 0;
1286 if (is->audio_st && is->video_st)
1287 av_diff = get_audio_clock(is) - get_video_clock(is);
1288 printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1289 get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1290 fflush(stdout);
1291 last_time = cur_time;
1292 }
1293 }
1294 }
1295
1296 /* allocate a picture (needs to do that in main thread to avoid
1297 potential locking problems */
1298 static void alloc_picture(void *opaque)
1299 {
1300 VideoState *is = opaque;
1301 VideoPicture *vp;
1302
1303 vp = &is->pictq[is->pictq_windex];
1304
1305 if (vp->bmp)
1306 SDL_FreeYUVOverlay(vp->bmp);
1307
1308 #if CONFIG_AVFILTER
1309 if (vp->picref)
1310 avfilter_unref_pic(vp->picref);
1311 vp->picref = NULL;
1312
1313 vp->width = is->out_video_filter->inputs[0]->w;
1314 vp->height = is->out_video_filter->inputs[0]->h;
1315 vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1316 #else
1317 vp->width = is->video_st->codec->width;
1318 vp->height = is->video_st->codec->height;
1319 vp->pix_fmt = is->video_st->codec->pix_fmt;
1320 #endif
1321
1322 vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1323 SDL_YV12_OVERLAY,
1324 screen);
1325
1326 SDL_LockMutex(is->pictq_mutex);
1327 vp->allocated = 1;
1328 SDL_CondSignal(is->pictq_cond);
1329 SDL_UnlockMutex(is->pictq_mutex);
1330 }
1331
1332 /**
1333 *
1334 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1335 */
1336 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1337 {
1338 VideoPicture *vp;
1339 int dst_pix_fmt;
1340 #if CONFIG_AVFILTER
1341 AVPicture pict_src;
1342 #endif
1343 /* wait until we have space to put a new picture */
1344 SDL_LockMutex(is->pictq_mutex);
1345
1346 if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1347 is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1348
1349 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1350 !is->videoq.abort_request) {
1351 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1352 }
1353 SDL_UnlockMutex(is->pictq_mutex);
1354
1355 if (is->videoq.abort_request)
1356 return -1;
1357
1358 vp = &is->pictq[is->pictq_windex];
1359
1360 /* alloc or resize hardware picture buffer */
1361 if (!vp->bmp ||
1362 #if CONFIG_AVFILTER
1363 vp->width != is->out_video_filter->inputs[0]->w ||
1364 vp->height != is->out_video_filter->inputs[0]->h) {
1365 #else
1366 vp->width != is->video_st->codec->width ||
1367 vp->height != is->video_st->codec->height) {
1368 #endif
1369 SDL_Event event;
1370
1371 vp->allocated = 0;
1372
1373 /* the allocation must be done in the main thread to avoid
1374 locking problems */
1375 event.type = FF_ALLOC_EVENT;
1376 event.user.data1 = is;
1377 SDL_PushEvent(&event);
1378
1379 /* wait until the picture is allocated */
1380 SDL_LockMutex(is->pictq_mutex);
1381 while (!vp->allocated && !is->videoq.abort_request) {
1382 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1383 }
1384 SDL_UnlockMutex(is->pictq_mutex);
1385
1386 if (is->videoq.abort_request)
1387 return -1;
1388 }
1389
1390 /* if the frame is not skipped, then display it */
1391 if (vp->bmp) {
1392 AVPicture pict;
1393 #if CONFIG_AVFILTER
1394 if(vp->picref)
1395 avfilter_unref_pic(vp->picref);
1396 vp->picref = src_frame->opaque;
1397 #endif
1398
1399 /* get a pointer on the bitmap */
1400 SDL_LockYUVOverlay (vp->bmp);
1401
1402 dst_pix_fmt = PIX_FMT_YUV420P;
1403 memset(&pict,0,sizeof(AVPicture));
1404 pict.data[0] = vp->bmp->pixels[0];
1405 pict.data[1] = vp->bmp->pixels[2];
1406 pict.data[2] = vp->bmp->pixels[1];
1407
1408 pict.linesize[0] = vp->bmp->pitches[0];
1409 pict.linesize[1] = vp->bmp->pitches[2];
1410 pict.linesize[2] = vp->bmp->pitches[1];
1411
1412 #if CONFIG_AVFILTER
1413 pict_src.data[0] = src_frame->data[0];
1414 pict_src.data[1] = src_frame->data[1];
1415 pict_src.data[2] = src_frame->data[2];
1416
1417 pict_src.linesize[0] = src_frame->linesize[0];
1418 pict_src.linesize[1] = src_frame->linesize[1];
1419 pict_src.linesize[2] = src_frame->linesize[2];
1420
1421 //FIXME use direct rendering
1422 av_picture_copy(&pict, &pict_src,
1423 vp->pix_fmt, vp->width, vp->height);
1424 #else
1425 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1426 is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1427 vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1428 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1429 if (is->img_convert_ctx == NULL) {
1430 fprintf(stderr, "Cannot initialize the conversion context\n");
1431 exit(1);
1432 }
1433 sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1434 0, vp->height, pict.data, pict.linesize);
1435 #endif
1436 /* update the bitmap content */
1437 SDL_UnlockYUVOverlay(vp->bmp);
1438
1439 vp->pts = pts;
1440 vp->pos = pos;
1441
1442 /* now we can update the picture count */
1443 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1444 is->pictq_windex = 0;
1445 SDL_LockMutex(is->pictq_mutex);
1446 vp->target_clock= compute_target_time(vp->pts, is);
1447
1448 is->pictq_size++;
1449 SDL_UnlockMutex(is->pictq_mutex);
1450 }
1451 return 0;
1452 }
1453
1454 /**
1455 * compute the exact PTS for the picture if it is omitted in the stream
1456 * @param pts1 the dts of the pkt / pts of the frame
1457 */
1458 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1459 {
1460 double frame_delay, pts;
1461
1462 pts = pts1;
1463
1464 if (pts != 0) {
1465 /* update video clock with pts, if present */
1466 is->video_clock = pts;
1467 } else {
1468 pts = is->video_clock;
1469 }
1470 /* update video clock for next frame */
1471 frame_delay = av_q2d(is->video_st->codec->time_base);
1472 /* for MPEG2, the frame can be repeated, so we update the
1473 clock accordingly */
1474 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1475 is->video_clock += frame_delay;
1476
1477 #if defined(DEBUG_SYNC) && 0
1478 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1479 av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1480 #endif
1481 return queue_picture(is, src_frame, pts, pos);
1482 }
1483
1484 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1485 {
1486 int len1, got_picture, i;
1487
1488 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1489 return -1;
1490
1491 if(pkt->data == flush_pkt.data){
1492 avcodec_flush_buffers(is->video_st->codec);
1493
1494 SDL_LockMutex(is->pictq_mutex);
1495 //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1496 for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1497 is->pictq[i].target_clock= 0;
1498 }
1499 while (is->pictq_size && !is->videoq.abort_request) {
1500 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1501 }
1502 is->video_current_pos= -1;
1503 SDL_UnlockMutex(is->pictq_mutex);
1504
1505 is->last_dts_for_fault_detection=
1506 is->last_pts_for_fault_detection= INT64_MIN;
1507 is->frame_last_pts= AV_NOPTS_VALUE;
1508 is->frame_last_delay = 0;
1509 is->frame_timer = (double)av_gettime() / 1000000.0;
1510 is->skip_frames= 1;
1511 is->skip_frames_index= 0;
1512 return 0;
1513 }
1514
1515 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1516 this packet, if any */
1517 is->video_st->codec->reordered_opaque= pkt->pts;
1518 len1 = avcodec_decode_video2(is->video_st->codec,
1519 frame, &got_picture,
1520 pkt);
1521
1522 if (got_picture) {
1523 if(pkt->dts != AV_NOPTS_VALUE){
1524 is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1525 is->last_dts_for_fault_detection= pkt->dts;
1526 }
1527 if(frame->reordered_opaque != AV_NOPTS_VALUE){
1528 is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1529 is->last_pts_for_fault_detection= frame->reordered_opaque;
1530 }
1531 }
1532
1533 if( ( decoder_reorder_pts==1
1534 || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1535 || pkt->dts == AV_NOPTS_VALUE)
1536 && frame->reordered_opaque != AV_NOPTS_VALUE)
1537 *pts= frame->reordered_opaque;
1538 else if(pkt->dts != AV_NOPTS_VALUE)
1539 *pts= pkt->dts;
1540 else
1541 *pts= 0;
1542
1543 // if (len1 < 0)
1544 // break;
1545 if (got_picture){
1546 is->skip_frames_index += 1;
1547 if(is->skip_frames_index >= is->skip_frames){
1548 is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1549 return 1;
1550 }
1551
1552 }
1553 return 0;
1554 }
1555
1556 #if CONFIG_AVFILTER
1557 typedef struct {
1558 VideoState *is;
1559 AVFrame *frame;
1560 int use_dr1;
1561 } FilterPriv;
1562
1563 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1564 {
1565 AVFilterContext *ctx = codec->opaque;
1566 AVFilterPicRef *ref;
1567 int perms = AV_PERM_WRITE;
1568 int i, w, h, stride[4];
1569 unsigned edge;
1570
1571 if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1572 if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1573 if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1574 if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1575 }
1576 if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1577
1578 w = codec->width;
1579 h = codec->height;
1580 avcodec_align_dimensions2(codec, &w, &h, stride);
1581 edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1582 w += edge << 1;
1583 h += edge << 1;
1584
1585 if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1586 return -1;
1587
1588 ref->w = codec->width;
1589 ref->h = codec->height;
1590 for(i = 0; i < 3; i ++) {
1591 unsigned hshift = i == 0 ? 0 : av_pix_fmt_descriptors[ref->pic->format].log2_chroma_w;
1592 unsigned vshift = i == 0 ? 0 : av_pix_fmt_descriptors[ref->pic->format].log2_chroma_h;
1593
1594 if (ref->data[i]) {
1595 ref->data[i] += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1596 }
1597 pic->data[i] = ref->data[i];
1598 pic->linesize[i] = ref->linesize[i];
1599 }
1600 pic->opaque = ref;
1601 pic->age = INT_MAX;
1602 pic->type = FF_BUFFER_TYPE_USER;
1603 pic->reordered_opaque = codec->reordered_opaque;
1604 return 0;
1605 }
1606
1607 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1608 {
1609 memset(pic->data, 0, sizeof(pic->data));
1610 avfilter_unref_pic(pic->opaque);
1611 }
1612
1613 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1614 {
1615 AVFilterPicRef *ref = pic->opaque;
1616
1617 if (pic->data[0] == NULL) {
1618 pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1619 return codec->get_buffer(codec, pic);
1620 }
1621
1622 if ((codec->width != ref->w) || (codec->height != ref->h) ||
1623 (codec->pix_fmt != ref->pic->format)) {
1624 av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1625 return -1;
1626 }
1627
1628 pic->reordered_opaque = codec->reordered_opaque;
1629 return 0;
1630 }
1631
1632 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1633 {
1634 FilterPriv *priv = ctx->priv;
1635 AVCodecContext *codec;
1636 if(!opaque) return -1;
1637
1638 priv->is = opaque;
1639 codec = priv->is->video_st->codec;
1640 codec->opaque = ctx;
1641 if(codec->codec->capabilities & CODEC_CAP_DR1) {
1642 priv->use_dr1 = 1;
1643 codec->get_buffer = input_get_buffer;
1644 codec->release_buffer = input_release_buffer;
1645 codec->reget_buffer = input_reget_buffer;
1646 }
1647
1648 priv->frame = avcodec_alloc_frame();
1649
1650 return 0;
1651 }
1652
1653 static void input_uninit(AVFilterContext *ctx)
1654 {
1655 FilterPriv *priv = ctx->priv;
1656 av_free(priv->frame);
1657 }
1658
1659 static int input_request_frame(AVFilterLink *link)
1660 {
1661 FilterPriv *priv = link->src->priv;
1662 AVFilterPicRef *picref;
1663 int64_t pts = 0;
1664 AVPacket pkt;
1665 int ret;
1666
1667 while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1668 av_free_packet(&pkt);
1669 if (ret < 0)
1670 return -1;
1671
1672 if(priv->use_dr1) {
1673 picref = avfilter_ref_pic(priv->frame->opaque, ~0);
1674 } else {
1675 picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1676 av_picture_copy((AVPicture *)&picref->data, (AVPicture *)priv->frame,
1677 picref->pic->format, link->w, link->h);
1678 }
1679 av_free_packet(&pkt);
1680
1681 picref->pts = pts;
1682 picref->pos = pkt.pos;
1683 picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1684 avfilter_start_frame(link, picref);
1685 avfilter_draw_slice(link, 0, link->h, 1);
1686 avfilter_end_frame(link);
1687
1688 return 0;
1689 }
1690
1691 static int input_query_formats(AVFilterContext *ctx)
1692 {
1693 FilterPriv *priv = ctx->priv;
1694 enum PixelFormat pix_fmts[] = {
1695 priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1696 };
1697
1698 avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1699 return 0;
1700 }
1701
1702 static int input_config_props(AVFilterLink *link)
1703 {
1704 FilterPriv *priv = link->src->priv;
1705 AVCodecContext *c = priv->is->video_st->codec;
1706
1707 link->w = c->width;
1708 link->h = c->height;
1709
1710 return 0;
1711 }
1712
1713 static AVFilter input_filter =
1714 {
1715 .name = "ffplay_input",
1716
1717 .priv_size = sizeof(FilterPriv),
1718
1719 .init = input_init,
1720 .uninit = input_uninit,
1721
1722 .query_formats = input_query_formats,
1723
1724 .inputs = (AVFilterPad[]) {{ .name = NULL }},
1725 .outputs = (AVFilterPad[]) {{ .name = "default",
1726 .type = AVMEDIA_TYPE_VIDEO,
1727 .request_frame = input_request_frame,
1728 .config_props = input_config_props, },
1729 { .name = NULL }},
1730 };
1731
1732 static void output_end_frame(AVFilterLink *link)
1733 {
1734 }
1735
1736 static int output_query_formats(AVFilterContext *ctx)
1737 {
1738 enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1739
1740 avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1741 return 0;
1742 }
1743
1744 static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
1745 int64_t *pts, int64_t *pos)
1746 {
1747 AVFilterPicRef *pic;
1748
1749 if(avfilter_request_frame(ctx->inputs[0]))
1750 return -1;
1751 if(!(pic = ctx->inputs[0]->cur_pic))
1752 return -1;
1753 ctx->inputs[0]->cur_pic = NULL;
1754
1755 frame->opaque = pic;
1756 *pts = pic->pts;
1757 *pos = pic->pos;
1758
1759 memcpy(frame->data, pic->data, sizeof(frame->data));
1760 memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
1761
1762 return 1;
1763 }
1764
1765 static AVFilter output_filter =
1766 {
1767 .name = "ffplay_output",
1768
1769 .query_formats = output_query_formats,
1770
1771 .inputs = (AVFilterPad[]) {{ .name = "default",
1772 .type = AVMEDIA_TYPE_VIDEO,
1773 .end_frame = output_end_frame,
1774 .min_perms = AV_PERM_READ, },
1775 { .name = NULL }},
1776 .outputs = (AVFilterPad[]) {{ .name = NULL }},
1777 };
1778 #endif /* CONFIG_AVFILTER */
1779
1780 static int video_thread(void *arg)
1781 {
1782 VideoState *is = arg;
1783 AVFrame *frame= avcodec_alloc_frame();
1784 int64_t pts_int;
1785 double pts;
1786 int ret;
1787
1788 #if CONFIG_AVFILTER
1789 int64_t pos;
1790 char sws_flags_str[128];
1791 AVFilterContext *filt_src = NULL, *filt_out = NULL;
1792 AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
1793 snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1794 graph->scale_sws_opts = av_strdup(sws_flags_str);
1795
1796 if(!(filt_src = avfilter_open(&input_filter, "src"))) goto the_end;
1797 if(!(filt_out = avfilter_open(&output_filter, "out"))) goto the_end;
1798
1799 if(avfilter_init_filter(filt_src, NULL, is)) goto the_end;
1800 if(avfilter_init_filter(filt_out, NULL, frame)) goto the_end;
1801
1802
1803 if(vfilters) {
1804 AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1805 AVFilterInOut *inputs = av_malloc(sizeof(AVFilterInOut));
1806
1807 outputs->name = av_strdup("in");
1808 outputs->filter = filt_src;
1809 outputs->pad_idx = 0;
1810 outputs->next = NULL;
1811
1812 inputs->name = av_strdup("out");
1813 inputs->filter = filt_out;
1814 inputs->pad_idx = 0;
1815 inputs->next = NULL;
1816
1817 if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1818 goto the_end;
1819 av_freep(&vfilters);
1820 } else {
1821 if(avfilter_link(filt_src, 0, filt_out, 0) < 0) goto the_end;
1822 }
1823 avfilter_graph_add_filter(graph, filt_src);
1824 avfilter_graph_add_filter(graph, filt_out);
1825
1826 if(avfilter_graph_check_validity(graph, NULL)) goto the_end;
1827 if(avfilter_graph_config_formats(graph, NULL)) goto the_end;
1828 if(avfilter_graph_config_links(graph, NULL)) goto the_end;
1829
1830 is->out_video_filter = filt_out;
1831 #endif
1832
1833 for(;;) {
1834 #if !CONFIG_AVFILTER
1835 AVPacket pkt;
1836 #endif
1837 while (is->paused && !is->videoq.abort_request)
1838 SDL_Delay(10);
1839 #if CONFIG_AVFILTER
1840 ret = get_filtered_video_frame(filt_out, frame, &pts_int, &pos);
1841 #else
1842 ret = get_video_frame(is, frame, &pts_int, &pkt);
1843 #endif
1844
1845 if (ret < 0) goto the_end;
1846
1847 if (!ret)
1848 continue;
1849
1850 pts = pts_int*av_q2d(is->video_st->time_base);
1851
1852 #if CONFIG_AVFILTER
1853 ret = output_picture2(is, frame, pts, pos);
1854 #else
1855 ret = output_picture2(is, frame, pts, pkt.pos);
1856 av_free_packet(&pkt);
1857 #endif
1858 if (ret < 0)
1859 goto the_end;
1860
1861 if (step)
1862 if (cur_stream)
1863 stream_pause(cur_stream);
1864 }
1865 the_end:
1866 #if CONFIG_AVFILTER
1867 avfilter_graph_destroy(graph);
1868 av_freep(&graph);
1869 #endif
1870 av_free(frame);
1871 return 0;
1872 }
1873
1874 static int subtitle_thread(void *arg)
1875 {
1876 VideoState *is = arg;
1877 SubPicture *sp;
1878 AVPacket pkt1, *pkt = &pkt1;
1879 int len1, got_subtitle;
1880 double pts;
1881 int i, j;
1882 int r, g, b, y, u, v, a;
1883
1884 for(;;) {
1885 while (is->paused && !is->subtitleq.abort_request) {
1886 SDL_Delay(10);
1887 }
1888 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1889 break;
1890
1891 if(pkt->data == flush_pkt.data){
1892 avcodec_flush_buffers(is->subtitle_st->codec);
1893 continue;
1894 }
1895 SDL_LockMutex(is->subpq_mutex);
1896 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1897 !is->subtitleq.abort_request) {
1898 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1899 }
1900 SDL_UnlockMutex(is->subpq_mutex);
1901
1902 if (is->subtitleq.abort_request)
1903 goto the_end;
1904
1905 sp = &is->subpq[is->subpq_windex];
1906
1907 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1908 this packet, if any */
1909 pts = 0;
1910 if (pkt->pts != AV_NOPTS_VALUE)
1911 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1912
1913 len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1914 &sp->sub, &got_subtitle,
1915 pkt);
1916 // if (len1 < 0)
1917 // break;
1918 if (got_subtitle && sp->sub.format == 0) {
1919 sp->pts = pts;
1920
1921 for (i = 0; i < sp->sub.num_rects; i++)
1922 {
1923 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1924 {
1925 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1926 y = RGB_TO_Y_CCIR(r, g, b);
1927 u = RGB_TO_U_CCIR(r, g, b, 0);
1928 v = RGB_TO_V_CCIR(r, g, b, 0);
1929 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1930 }
1931 }
1932
1933 /* now we can update the picture count */
1934 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1935 is->subpq_windex = 0;
1936 SDL_LockMutex(is->subpq_mutex);
1937 is->subpq_size++;
1938 SDL_UnlockMutex(is->subpq_mutex);
1939 }
1940 av_free_packet(pkt);
1941 // if (step)
1942 // if (cur_stream)
1943 // stream_pause(cur_stream);
1944 }
1945 the_end:
1946 return 0;
1947 }
1948
1949 /* copy samples for viewing in editor window */
1950 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1951 {
1952 int size, len, channels;
1953
1954 channels = is->audio_st->codec->channels;
1955
1956 size = samples_size / sizeof(short);
1957 while (size > 0) {
1958 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1959 if (len > size)
1960 len = size;
1961 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1962 samples += len;
1963 is->sample_array_index += len;
1964 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1965 is->sample_array_index = 0;
1966 size -= len;
1967 }
1968 }
1969
1970 /* return the new audio buffer size (samples can be added or deleted
1971 to get better sync if video or external master clock) */
1972 static int synchronize_audio(VideoState *is, short *samples,
1973 int samples_size1, double pts)
1974 {
1975 int n, samples_size;
1976 double ref_clock;
1977
1978 n = 2 * is->audio_st->codec->channels;
1979 samples_size = samples_size1;
1980
1981 /* if not master, then we try to remove or add samples to correct the clock */
1982 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1983 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1984 double diff, avg_diff;
1985 int wanted_size, min_size, max_size, nb_samples;
1986
1987 ref_clock = get_master_clock(is);
1988 diff = get_audio_clock(is) - ref_clock;
1989
1990 if (diff < AV_NOSYNC_THRESHOLD) {
1991 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1992 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1993 /* not enough measures to have a correct estimate */
1994 is->audio_diff_avg_count++;
1995 } else {
1996 /* estimate the A-V difference */
1997 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1998
1999 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2000 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2001 nb_samples = samples_size / n;
2002
2003 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2004 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2005 if (wanted_size < min_size)
2006 wanted_size = min_size;
2007 else if (wanted_size > max_size)
2008 wanted_size = max_size;
2009
2010 /* add or remove samples to correction the synchro */
2011 if (wanted_size < samples_size) {
2012 /* remove samples */
2013 samples_size = wanted_size;
2014 } else if (wanted_size > samples_size) {
2015 uint8_t *samples_end, *q;
2016 int nb;
2017
2018 /* add samples */
2019 nb = (samples_size - wanted_size);
2020 samples_end = (uint8_t *)samples + samples_size - n;
2021 q = samples_end + n;
2022 while (nb > 0) {
2023 memcpy(q, samples_end, n);
2024 q += n;
2025 nb -= n;
2026 }
2027 samples_size = wanted_size;
2028 }
2029 }
2030 #if 0
2031 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2032 diff, avg_diff, samples_size - samples_size1,
2033 is->audio_clock, is->video_clock, is->audio_diff_threshold);
2034 #endif
2035 }
2036 } else {
2037 /* too big difference : may be initial PTS errors, so
2038 reset A-V filter */
2039 is->audio_diff_avg_count = 0;
2040 is->audio_diff_cum = 0;
2041 }
2042 }
2043
2044 return samples_size;
2045 }
2046
2047 /* decode one audio frame and returns its uncompressed size */
2048 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2049 {
2050 AVPacket *pkt_temp = &is->audio_pkt_temp;
2051 AVPacket *pkt = &is->audio_pkt;
2052 AVCodecContext *dec= is->audio_st->codec;
2053 int n, len1, data_size;
2054 double pts;
2055
2056 for(;;) {
2057 /* NOTE: the audio packet can contain several frames */
2058 while (pkt_temp->size > 0) {
2059 data_size = sizeof(is->audio_buf1);
2060 len1 = avcodec_decode_audio3(dec,
2061 (int16_t *)is->audio_buf1, &data_size,
2062 pkt_temp);
2063 if (len1 < 0) {
2064 /* if error, we skip the frame */
2065 pkt_temp->size = 0;
2066 break;
2067 }
2068
2069 pkt_temp->data += len1;
2070 pkt_temp->size -= len1;
2071 if (data_size <= 0)
2072 continue;
2073
2074 if (dec->sample_fmt != is->audio_src_fmt) {
2075 if (is->reformat_ctx)
2076 av_audio_convert_free(is->reformat_ctx);
2077 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
2078 dec->sample_fmt, 1, NULL, 0);
2079 if (!is->reformat_ctx) {
2080 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2081 avcodec_get_sample_fmt_name(dec->sample_fmt),
2082 avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
2083 break;
2084 }
2085 is->audio_src_fmt= dec->sample_fmt;
2086 }
2087
2088 if (is->reformat_ctx) {
2089 const void *ibuf[6]= {is->audio_buf1};
2090 void *obuf[6]= {is->audio_buf2};
2091 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
2092 int ostride[6]= {2};
2093 int len= data_size/istride[0];
2094 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2095 printf("av_audio_convert() failed\n");
2096 break;
2097 }
2098 is->audio_buf= is->audio_buf2;
2099 /* FIXME: existing code assume that data_size equals framesize*channels*2
2100 remove this legacy cruft */
2101 data_size= len*2;
2102 }else{
2103 is->audio_buf= is->audio_buf1;
2104 }
2105
2106 /* if no pts, then compute it */
2107 pts = is->audio_clock;
2108 *pts_ptr = pts;
2109 n = 2 * dec->channels;
2110 is->audio_clock += (double)data_size /
2111 (double)(n * dec->sample_rate);
2112 #if defined(DEBUG_SYNC)
2113 {
2114 static double last_clock;
2115 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2116 is->audio_clock - last_clock,
2117 is->audio_clock, pts);
2118 last_clock = is->audio_clock;
2119 }
2120 #endif
2121 return data_size;
2122 }
2123
2124 /* free the current packet */
2125 if (pkt->data)
2126 av_free_packet(pkt);
2127
2128 if (is->paused || is->audioq.abort_request) {
2129 return -1;
2130 }
2131
2132 /* read next packet */
2133 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2134 return -1;
2135 if(pkt->data == flush_pkt.data){
2136 avcodec_flush_buffers(dec);
2137 continue;
2138 }
2139
2140 pkt_temp->data = pkt->data;
2141 pkt_temp->size = pkt->size;
2142
2143 /* if update the audio clock with the pts */
2144 if (pkt->pts != AV_NOPTS_VALUE) {
2145 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2146 }
2147 }
2148 }
2149
2150 /* get the current audio output buffer size, in samples. With SDL, we
2151 cannot have a precise information */
2152 static int audio_write_get_buf_size(VideoState *is)
2153 {
2154 return is->audio_buf_size - is->audio_buf_index;
2155 }
2156
2157
2158 /* prepare a new audio buffer */
2159 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2160 {
2161 VideoState *is = opaque;
2162 int audio_size, len1;
2163 double pts;
2164
2165 audio_callback_time = av_gettime();
2166
2167 while (len > 0) {
2168 if (is->audio_buf_index >= is->audio_buf_size) {
2169 audio_size = audio_decode_frame(is, &pts);
2170 if (audio_size < 0) {
2171 /* if error, just output silence */
2172 is->audio_buf = is->audio_buf1;
2173 is->audio_buf_size = 1024;
2174 memset(is->audio_buf, 0, is->audio_buf_size);
2175 } else {
2176 if (is->show_audio)
2177 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2178 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2179 pts);
2180 is->audio_buf_size = audio_size;
2181 }
2182 is->audio_buf_index = 0;
2183 }
2184 len1 = is->audio_buf_size - is->audio_buf_index;
2185 if (len1 > len)
2186 len1 = len;
2187 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2188 len -= len1;
2189 stream += len1;
2190 is->audio_buf_index += len1;
2191 }
2192 }
2193
2194 /* open a given stream. Return 0 if OK */
2195 static int stream_component_open(VideoState *is, int stream_index)
2196 {
2197 AVFormatContext *ic = is->ic;
2198 AVCodecContext *avctx;
2199 AVCodec *codec;
2200 SDL_AudioSpec wanted_spec, spec;
2201
2202 if (stream_index < 0 || stream_index >= ic->nb_streams)
2203 return -1;
2204 avctx = ic->streams[stream_index]->codec;
2205
2206 /* prepare audio output */
2207 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2208 if (avctx->channels > 0) {
2209 avctx->request_channels = FFMIN(2, avctx->channels);
2210 } else {
2211 avctx->request_channels = 2;
2212 }
2213 }
2214
2215 codec = avcodec_find_decoder(avctx->codec_id);
2216 avctx->debug_mv = debug_mv;
2217 avctx->debug = debug;
2218 avctx->workaround_bugs = workaround_bugs;
2219 avctx->lowres = lowres;
2220 if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2221 avctx->idct_algo= idct;
2222 if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2223 avctx->skip_frame= skip_frame;
2224 avctx->skip_idct= skip_idct;
2225 avctx->skip_loop_filter= skip_loop_filter;
2226 avctx->error_recognition= error_recognition;
2227 avctx->error_concealment= error_concealment;
2228 avcodec_thread_init(avctx, thread_count);
2229
2230 set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
2231
2232 if (!codec ||
2233 avcodec_open(avctx, codec) < 0)
2234 return -1;
2235
2236 /* prepare audio output */
2237 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2238 wanted_spec.freq = avctx->sample_rate;
2239 wanted_spec.format = AUDIO_S16SYS;
2240 wanted_spec.channels = avctx->channels;
2241 wanted_spec.silence = 0;
2242 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2243 wanted_spec.callback = sdl_audio_callback;
2244 wanted_spec.userdata = is;
2245 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2246 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2247 return -1;
2248 }
2249 is->audio_hw_buf_size = spec.size;
2250 is->audio_src_fmt= SAMPLE_FMT_S16;
2251 }
2252
2253 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2254 switch(avctx->codec_type) {
2255 case AVMEDIA_TYPE_AUDIO:
2256 is->audio_stream = stream_index;
2257 is->audio_st = ic->streams[stream_index];
2258 is->audio_buf_size = 0;
2259 is->audio_buf_index = 0;
2260
2261 /* init averaging filter */
2262 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2263 is->audio_diff_avg_count = 0;
2264 /* since we do not have a precise anough audio fifo fullness,
2265 we correct audio sync only if larger than this threshold */
2266 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2267
2268 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2269 packet_queue_init(&is->audioq);
2270 SDL_PauseAudio(0);
2271 break;
2272 case AVMEDIA_TYPE_VIDEO:
2273 is->video_stream = stream_index;
2274 is->video_st = ic->streams[stream_index];
2275
2276 // is->video_current_pts_time = av_gettime();
2277
2278 packet_queue_init(&is->videoq);
2279 is->video_tid = SDL_CreateThread(video_thread, is);
2280 break;
2281 case AVMEDIA_TYPE_SUBTITLE:
2282 is->subtitle_stream = stream_index;
2283 is->subtitle_st = ic->streams[stream_index];
2284 packet_queue_init(&is->subtitleq);
2285
2286 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2287 break;
2288 default:
2289 break;
2290 }
2291 return 0;
2292 }
2293
2294 static void stream_component_close(VideoState *is, int stream_index)
2295 {
2296 AVFormatContext *ic = is->ic;
2297 AVCodecContext *avctx;
2298
2299 if (stream_index < 0 || stream_index >= ic->nb_streams)
2300 return;
2301 avctx = ic->streams[stream_index]->codec;
2302
2303 switch(avctx->codec_type) {
2304 case AVMEDIA_TYPE_AUDIO:
2305 packet_queue_abort(&is->audioq);
2306
2307 SDL_CloseAudio();
2308
2309 packet_queue_end(&is->audioq);
2310 if (is->reformat_ctx)
2311 av_audio_convert_free(is->reformat_ctx);
2312 is->reformat_ctx = NULL;
2313 break;
2314 case AVMEDIA_TYPE_VIDEO:
2315 packet_queue_abort(&is->videoq);
2316
2317 /* note: we also signal this mutex to make sure we deblock the
2318 video thread in all cases */
2319 SDL_LockMutex(is->pictq_mutex);
2320 SDL_CondSignal(is->pictq_cond);
2321 SDL_UnlockMutex(is->pictq_mutex);
2322
2323 SDL_WaitThread(is->video_tid, NULL);
2324
2325 packet_queue_end(&is->videoq);
2326 break;
2327 case AVMEDIA_TYPE_SUBTITLE:
2328 packet_queue_abort(&is->subtitleq);
2329
2330 /* note: we also signal this mutex to make sure we deblock the
2331 video thread in all cases */
2332 SDL_LockMutex(is->subpq_mutex);
2333 is->subtitle_stream_changed = 1;
2334
2335 SDL_CondSignal(is->subpq_cond);
2336 SDL_UnlockMutex(is->subpq_mutex);
2337
2338 SDL_WaitThread(is->subtitle_tid, NULL);
2339
2340 packet_queue_end(&is->subtitleq);
2341 break;
2342 default:
2343 break;
2344 }
2345
2346 ic->streams[stream_index]->discard = AVDISCARD_ALL;
2347 avcodec_close(avctx);
2348 switch(avctx->codec_type) {
2349 case AVMEDIA_TYPE_AUDIO:
2350 is->audio_st = NULL;
2351 is->audio_stream = -1;
2352 break;
2353 case AVMEDIA_TYPE_VIDEO:
2354 is->video_st = NULL;
2355 is->video_stream = -1;
2356 break;
2357 case AVMEDIA_TYPE_SUBTITLE:
2358 is->subtitle_st = NULL;
2359 is->subtitle_stream = -1;
2360 break;
2361 default:
2362 break;
2363 }
2364 }
2365
2366 /* since we have only one decoding thread, we can use a global
2367 variable instead of a thread local variable */
2368 static VideoState *global_video_state;
2369
2370 static int decode_interrupt_cb(void)
2371 {
2372 return (global_video_state && global_video_state->abort_request);
2373 }
2374
2375 /* this thread gets the stream from the disk or the network */
2376 static int decode_thread(void *arg)
2377 {
2378 VideoState *is = arg;
2379 AVFormatContext *ic;
2380 int err, i, ret;
2381 int st_index[AVMEDIA_TYPE_NB];
2382 int st_count[AVMEDIA_TYPE_NB]={0};
2383 int st_best_packet_count[AVMEDIA_TYPE_NB];
2384 AVPacket pkt1, *pkt = &pkt1;
2385 AVFormatParameters params, *ap = &params;
2386 int eof=0;
2387 int pkt_in_play_range = 0;
2388
2389 ic = avformat_alloc_context();
2390
2391 memset(st_index, -1, sizeof(st_index));
2392 memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2393 is->video_stream = -1;
2394 is->audio_stream = -1;
2395 is->subtitle_stream = -1;
2396
2397 global_video_state = is;
2398 url_set_interrupt_cb(decode_interrupt_cb);
2399
2400 memset(ap, 0, sizeof(*ap));
2401
2402 ap->prealloced_context = 1;
2403 ap->width = frame_width;
2404 ap->height= frame_height;
2405 ap->time_base= (AVRational){1, 25};
2406 ap->pix_fmt = frame_pix_fmt;
2407
2408 set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2409
2410 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2411 if (err < 0) {
2412 print_error(is->filename, err);
2413 ret = -1;
2414 goto fail;
2415 }
2416 is->ic = ic;
2417
2418 if(genpts)
2419 ic->flags |= AVFMT_FLAG_GENPTS;
2420
2421 err = av_find_stream_info(ic);
2422 if (err < 0) {
2423 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2424 ret = -1;
2425 goto fail;
2426 }
2427 if(ic->pb)
2428 ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2429
2430 if(seek_by_bytes<0)
2431 seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2432
2433 /* if seeking requested, we execute it */
2434 if (start_time != AV_NOPTS_VALUE) {
2435 int64_t timestamp;
2436
2437 timestamp = start_time;
2438 /* add the stream start time */
2439 if (ic->start_time != AV_NOPTS_VALUE)
2440 timestamp += ic->start_time;
2441 ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2442 if (ret < 0) {
2443 fprintf(stderr, "%s: could not seek to position %0.3f\n",
2444 is->filename, (double)timestamp / AV_TIME_BASE);
2445 }
2446 }
2447
2448 for(i = 0; i < ic->nb_streams; i++) {
2449 AVStream *st= ic->streams[i];
2450 AVCodecContext *avctx = st->codec;
2451 ic->streams[i]->discard = AVDISCARD_ALL;
2452 if(avctx->codec_type >= (unsigned)AVMEDIA_TYPE_NB)
2453 continue;
2454 if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2455 continue;
2456
2457 if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2458 continue;
2459 st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2460
2461 switch(avctx->codec_type) {
2462 case AVMEDIA_TYPE_AUDIO:
2463 if (!audio_disable)
2464 st_index[AVMEDIA_TYPE_AUDIO] = i;
2465 break;
2466 case AVMEDIA_TYPE_VIDEO:
2467 case AVMEDIA_TYPE_SUBTITLE:
2468 if (!video_disable)
2469 st_index[avctx->codec_type] = i;
2470 break;
2471 default:
2472 break;
2473 }
2474 }
2475 if (show_status) {
2476 dump_format(ic, 0, is->filename, 0);
2477 }
2478
2479 /* open the streams */
2480 if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2481 stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2482 }
2483
2484 ret=-1;
2485 if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2486 ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2487 }
2488 is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2489 if(ret<0) {
2490 if (!display_disable)
2491 is->show_audio = 2;
2492 }
2493
2494 if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2495 stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2496 }
2497
2498 if (is->video_stream < 0 && is->audio_stream < 0) {
2499 fprintf(stderr, "%s: could not open codecs\n", is->filename);
2500 ret = -1;
2501 goto fail;
2502 }
2503
2504 for(;;) {
2505 if (is->abort_request)
2506 break;
2507 if (is->paused != is->last_paused) {
2508 is->last_paused = is->paused;
2509 if (is->paused)
2510 is->read_pause_return= av_read_pause(ic);
2511 else
2512 av_read_play(ic);
2513 }
2514 #if CONFIG_RTSP_DEMUXER
2515 if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2516 /* wait 10 ms to avoid trying to get another packet */
2517 /* XXX: horrible */
2518 SDL_Delay(10);
2519 continue;
2520 }
2521 #endif
2522 if (is->seek_req) {
2523 int64_t seek_target= is->seek_pos;
2524 int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2525 int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2526 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2527 // of the seek_pos/seek_rel variables
2528
2529 ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2530 if (ret < 0) {
2531 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2532 }else{
2533 if (is->audio_stream >= 0) {
2534 packet_queue_flush(&is->audioq);
2535 packet_queue_put(&is->audioq, &flush_pkt);
2536 }
2537 if (is->subtitle_stream >= 0) {
2538 packet_queue_flush(&is->subtitleq);
2539 packet_queue_put(&is->subtitleq, &flush_pkt);
2540 }
2541 if (is->video_stream >= 0) {
2542 packet_queue_flush(&is->videoq);
2543 packet_queue_put(&is->videoq, &flush_pkt);
2544 }
2545 }
2546 is->seek_req = 0;
2547 eof= 0;
2548 }
2549
2550 /* if the queue are full, no need to read more */
2551 if ( is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2552 || ( (is->audioq .size > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2553 && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream<0)
2554 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2555 /* wait 10 ms */
2556 SDL_Delay(10);
2557 continue;
2558 }
2559 if(url_feof(ic->pb) || eof) {
2560 if(is->video_stream >= 0){
2561 av_init_packet(pkt);
2562 pkt->data=NULL;
2563 pkt->size=0;
2564 pkt->stream_index= is->video_stream;
2565 packet_queue_put(&is->videoq, pkt);
2566 }
2567 SDL_Delay(10);
2568 if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2569 if(loop!=1 && (!loop || --loop)){
2570 stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2571 }else if(autoexit){
2572 ret=AVERROR_EOF;
2573 goto fail;
2574 }
2575 }
2576 continue;
2577 }
2578 ret = av_read_frame(ic, pkt);
2579 if (ret < 0) {
2580 if (ret == AVERROR_EOF)
2581 eof=1;
2582 if (url_ferror(ic->pb))
2583 break;
2584 SDL_Delay(100); /* wait for user event */
2585 continue;
2586 }
2587 /* check if packet is in play range specified by user, then queue, otherwise discard */
2588 pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2589 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2590 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2591 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2592 <= ((double)duration/1000000);
2593 if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2594 packet_queue_put(&is->audioq, pkt);
2595 } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2596 packet_queue_put(&is->videoq, pkt);
2597 } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2598 packet_queue_put(&is->subtitleq, pkt);
2599 } else {
2600 av_free_packet(pkt);
2601 }
2602 }
2603 /* wait until the end */
2604 while (!is->abort_request) {
2605 SDL_Delay(100);
2606 }
2607
2608 ret = 0;
2609 fail:
2610 /* disable interrupting */
2611 global_video_state = NULL;
2612
2613 /* close each stream */
2614 if (is->audio_stream >= 0)
2615 stream_component_close(is, is->audio_stream);
2616 if (is->video_stream >= 0)
2617 stream_component_close(is, is->video_stream);
2618 if (is->subtitle_stream >= 0)
2619 stream_component_close(is, is->subtitle_stream);
2620 if (is->ic) {
2621 av_close_input_file(is->ic);
2622 is->ic = NULL; /* safety */
2623 }
2624 url_set_interrupt_cb(NULL);
2625
2626 if (ret != 0) {
2627 SDL_Event event;
2628
2629 event.type = FF_QUIT_EVENT;
2630 event.user.data1 = is;
2631 SDL_PushEvent(&event);
2632 }
2633 return 0;
2634 }
2635
2636 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2637 {
2638 VideoState *is;
2639
2640 is = av_mallocz(sizeof(VideoState));
2641 if (!is)
2642 return NULL;
2643 av_strlcpy(is->filename, filename, sizeof(is->filename));
2644 is->iformat = iformat;
2645 is->ytop = 0;
2646 is->xleft = 0;
2647
2648 /* start video display */
2649 is->pictq_mutex = SDL_CreateMutex();
2650 is->pictq_cond = SDL_CreateCond();
2651
2652 is->subpq_mutex = SDL_CreateMutex();
2653 is->subpq_cond = SDL_CreateCond();
2654
2655 is->av_sync_type = av_sync_type;
2656 is->parse_tid = SDL_CreateThread(decode_thread, is);
2657 if (!is->parse_tid) {
2658 av_free(is);
2659 return NULL;
2660 }
2661 return is;
2662 }
2663
2664 static void stream_close(VideoState *is)
2665 {
2666 VideoPicture *vp;
2667 int i;
2668 /* XXX: use a special url_shutdown call to abort parse cleanly */
2669 is->abort_request = 1;
2670 SDL_WaitThread(is->parse_tid, NULL);
2671 SDL_WaitThread(is->refresh_tid, NULL);
2672
2673 /* free all pictures */
2674 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2675 vp = &is->pictq[i];
2676 #if CONFIG_AVFILTER
2677 if (vp->picref) {
2678 avfilter_unref_pic(vp->picref);
2679 vp->picref = NULL;
2680 }
2681 #endif
2682 if (vp->bmp) {
2683 SDL_FreeYUVOverlay(vp->bmp);
2684 vp->bmp = NULL;
2685 }
2686 }
2687 SDL_DestroyMutex(is->pictq_mutex);
2688 SDL_DestroyCond(is->pictq_cond);
2689 SDL_DestroyMutex(is->subpq_mutex);
2690 SDL_DestroyCond(is->subpq_cond);
2691 #if !CONFIG_AVFILTER
2692 if (is->img_convert_ctx)
2693 sws_freeContext(is->img_convert_ctx);
2694 #endif
2695 av_free(is);
2696 }
2697
2698 static void stream_cycle_channel(VideoState *is, int codec_type)
2699 {
2700 AVFormatContext *ic = is->ic;
2701 int start_index, stream_index;
2702 AVStream *st;
2703
2704 if (codec_type == AVMEDIA_TYPE_VIDEO)
2705 start_index = is->video_stream;
2706 else if (codec_type == AVMEDIA_TYPE_AUDIO)
2707 start_index = is->audio_stream;
2708 else
2709 start_index = is->subtitle_stream;
2710 if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2711 return;
2712 stream_index = start_index;
2713 for(;;) {
2714 if (++stream_index >= is->ic->nb_streams)
2715 {
2716 if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2717 {
2718 stream_index = -1;
2719 goto the_end;
2720 } else
2721 stream_index = 0;
2722 }
2723 if (stream_index == start_index)
2724 return;
2725 st = ic->streams[stream_index];
2726 if (st->codec->codec_type == codec_type) {
2727 /* check that parameters are OK */
2728 switch(codec_type) {
2729 case AVMEDIA_TYPE_AUDIO:
2730 if (st->codec->sample_rate != 0 &&
2731 st->codec->channels != 0)
2732 goto the_end;
2733 break;
2734 case AVMEDIA_TYPE_VIDEO:
2735 case AVMEDIA_TYPE_SUBTITLE:
2736 goto the_end;
2737 default:
2738 break;
2739 }
2740 }
2741 }
2742 the_end:
2743 stream_component_close(is, start_index);
2744 stream_component_open(is, stream_index);
2745 }
2746
2747
2748 static void toggle_full_screen(void)
2749 {
2750 is_full_screen = !is_full_screen;
2751 if (!fs_screen_width) {
2752 /* use default SDL method */
2753 // SDL_WM_ToggleFullScreen(screen);
2754 }
2755 video_open(cur_stream);
2756 }
2757
2758 static void toggle_pause(void)
2759 {
2760 if (cur_stream)
2761 stream_pause(cur_stream);
2762 step = 0;
2763 }
2764
2765 static void step_to_next_frame(void)
2766 {
2767 if (cur_stream) {
2768 /* if the stream is paused unpause it, then step */
2769 if (cur_stream->paused)
2770 stream_pause(cur_stream);
2771 }
2772 step = 1;
2773 }
2774
2775 static void do_exit(void)
2776 {
2777 int i;
2778 if (cur_stream) {
2779 stream_close(cur_stream);
2780 cur_stream = NULL;
2781 }
2782 for (i = 0; i < AVMEDIA_TYPE_NB; i++)
2783 av_free(avcodec_opts[i]);
2784 av_free(avformat_opts);
2785 av_free(sws_opts);
2786 #if CONFIG_AVFILTER
2787 avfilter_uninit();
2788 #endif
2789 if (show_status)
2790 printf("\n");
2791 SDL_Quit();
2792 exit(0);
2793 }
2794
2795 static void toggle_audio_display(void)
2796 {
2797 if (cur_stream) {
2798 int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2799 cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2800 fill_rectangle(screen,
2801 cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2802 bgcolor);
2803 SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2804 }
2805 }
2806
2807 /* handle an event sent by the GUI */
2808 static void event_loop(void)
2809 {
2810 SDL_Event event;
2811 double incr, pos, frac;
2812
2813 for(;;) {
2814 double x;
2815 SDL_WaitEvent(&event);
2816 switch(event.type) {
2817 case SDL_KEYDOWN:
2818 switch(event.key.keysym.sym) {
2819 case SDLK_ESCAPE:
2820 case SDLK_q:
2821 do_exit();
2822 break;
2823 case SDLK_f:
2824 toggle_full_screen();
2825 break;
2826 case SDLK_p:
2827 case SDLK_SPACE:
2828 toggle_pause();
2829 break;
2830 case SDLK_s: //S: Step to next frame
2831 step_to_next_frame();
2832 break;
2833 case SDLK_a:
2834 if (cur_stream)
2835 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2836 break;
2837 case SDLK_v:
2838 if (cur_stream)
2839 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2840 break;
2841 case SDLK_t:
2842 if (cur_stream)
2843 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2844 break;
2845 case SDLK_w:
2846 toggle_audio_display();
2847 break;
2848 case SDLK_LEFT:
2849 incr = -10.0;
2850 goto do_seek;
2851 case SDLK_RIGHT:
2852 incr = 10.0;
2853 goto do_seek;
2854 case SDLK_UP:
2855 incr = 60.0;
2856 goto do_seek;
2857 case SDLK_DOWN:
2858 incr = -60.0;
2859 do_seek:
2860 if (cur_stream) {
2861 if (seek_by_bytes) {
2862 if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2863 pos= cur_stream->video_current_pos;
2864 }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2865 pos= cur_stream->audio_pkt.pos;
2866 }else
2867 pos = url_ftell(cur_stream->ic->pb);
2868 if (cur_stream->ic->bit_rate)
2869 incr *= cur_stream->ic->bit_rate / 8.0;
2870 else
2871 incr *= 180000.0;
2872 pos += incr;
2873 stream_seek(cur_stream, pos, incr, 1);
2874 } else {
2875 pos = get_master_clock(cur_stream);
2876 pos += incr;
2877 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2878 }
2879 }
2880 break;
2881 default:
2882 break;
2883 }
2884 break;
2885 case SDL_MOUSEBUTTONDOWN:
2886 case SDL_MOUSEMOTION:
2887 if(event.type ==SDL_MOUSEBUTTONDOWN){
2888 x= event.button.x;
2889 }else{
2890 if(event.motion.state != SDL_PRESSED)
2891 break;
2892 x= event.motion.x;
2893 }
2894 if (cur_stream) {
2895 if(seek_by_bytes || cur_stream->ic->duration<=0){
2896 uint64_t size= url_fsize(cur_stream->ic->pb);
2897 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2898 }else{
2899 int64_t ts;
2900 int ns, hh, mm, ss;
2901 int tns, thh, tmm, tss;
2902 tns = cur_stream->ic->duration/1000000LL;
2903 thh = tns/3600;
2904 tmm = (tns%3600)/60;
2905 tss = (tns%60);
2906 frac = x/cur_stream->width;
2907 ns = frac*tns;
2908 hh = ns/3600;
2909 mm = (ns%3600)/60;
2910 ss = (ns%60);
2911 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2912 hh, mm, ss, thh, tmm, tss);
2913 ts = frac*cur_stream->ic->duration;
2914 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2915 ts += cur_stream->ic->start_time;
2916 stream_seek(cur_stream, ts, 0, 0);
2917 }
2918 }
2919 break;
2920 case SDL_VIDEORESIZE:
2921 if (cur_stream) {
2922 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2923 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2924 screen_width = cur_stream->width = event.resize.w;
2925 screen_height= cur_stream->height= event.resize.h;
2926 }
2927 break;
2928 case SDL_QUIT:
2929 case FF_QUIT_EVENT:
2930 do_exit();
2931 break;
2932 case FF_ALLOC_EVENT:
2933 video_open(event.user.data1);
2934 alloc_picture(event.user.data1);
2935 break;
2936 case FF_REFRESH_EVENT:
2937 video_refresh_timer(event.user.data1);
2938 cur_stream->refresh=0;
2939 break;
2940 default:
2941 break;
2942 }
2943 }
2944 }
2945
2946 static void opt_frame_size(const char *arg)
2947 {
2948 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2949 fprintf(stderr, "Incorrect frame size\n");
2950 exit(1);
2951 }
2952 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2953 fprintf(stderr, "Frame size must be a multiple of 2\n");
2954 exit(1);
2955 }
2956 }
2957
2958 static int opt_width(const char *opt, const char *arg)
2959 {
2960 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2961 return 0;
2962 }
2963
2964 static int opt_height(const char *opt, const char *arg)
2965 {
2966 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2967 return 0;
2968 }
2969
2970 static void opt_format(const char *arg)
2971 {
2972 file_iformat = av_find_input_format(arg);
2973 if (!file_iformat) {
2974 fprintf(stderr, "Unknown input format: %s\n", arg);
2975 exit(1);
2976 }
2977 }
2978
2979 static void opt_frame_pix_fmt(const char *arg)
2980 {
2981 frame_pix_fmt = av_get_pix_fmt(arg);
2982 }
2983
2984 static int opt_sync(const char *opt, const char *arg)
2985 {
2986 if (!strcmp(arg, "audio"))
2987 av_sync_type = AV_SYNC_AUDIO_MASTER;
2988 else if (!strcmp(arg, "video"))
2989 av_sync_type = AV_SYNC_VIDEO_MASTER;
2990 else if (!strcmp(arg, "ext"))
2991 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2992 else {
2993 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2994 exit(1);
2995 }
2996 return 0;
2997 }
2998
2999 static int opt_seek(const char *opt, const char *arg)
3000 {
3001 start_time = parse_time_or_die(opt, arg, 1);
3002 return 0;
3003 }
3004
3005 static int opt_duration(const char *opt, const char *arg)
3006 {
3007 duration = parse_time_or_die(opt, arg, 1);
3008 return 0;
3009 }
3010
3011 static int opt_debug(const char *opt, const char *arg)
3012 {
3013 av_log_set_level(99);
3014 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3015 return 0;
3016 }
3017
3018 static int opt_vismv(const char *opt, const char *arg)
3019 {
3020 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
3021 return 0;
3022 }
3023
3024 static int opt_thread_count(const char *opt, const char *arg)
3025 {
3026 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3027 #if !HAVE_THREADS
3028 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
3029 #endif
3030 return 0;
3031 }
3032
3033 static const OptionDef options[] = {
3034 #include "cmdutils_common_opts.h"
3035 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
3036 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
3037 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
3038 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3039 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3040 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3041 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3042 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3043 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3044 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3045 { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play \"duration\" seconds of audio/video", "duration" },
3046 { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3047 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3048 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3049 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3050 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3051 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3052 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3053 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3054 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3055 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3056 { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3057 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3058 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3059 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3060 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3061 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
3062 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)", "threshold" },
3063 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
3064 { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3065 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3066 { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3067 { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3068 { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3069 { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3070 #if CONFIG_AVFILTER
3071 { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3072 #endif
3073 { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3074 { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3075 { NULL, },
3076 };
3077
3078 static void show_usage(void)
3079 {
3080 printf("Simple media player\n");
3081 printf("usage: ffplay [options] input_file\n");
3082 printf("\n");
3083 }
3084
3085 static void show_help(void)
3086 {
3087 show_usage();
3088 show_help_options(options, "Main options:\n",
3089 OPT_EXPERT, 0);
3090 show_help_options(options, "\nAdvanced options:\n",
3091 OPT_EXPERT, OPT_EXPERT);
3092 printf("\nWhile playing:\n"
3093 "q, ESC quit\n"
3094 "f toggle full screen\n"
3095 "p, SPC pause\n"
3096 "a cycle audio channel\n"
3097 "v cycle video channel\n"
3098 "t cycle subtitle channel\n"
3099 "w show audio waves\n"
3100 "s activate frame-step mode\n"
3101 "left/right seek backward/forward 10 seconds\n"
3102 "down/up seek backward/forward 1 minute\n"
3103 "mouse click seek to percentage in file corresponding to fraction of width\n"
3104 );
3105 }
3106
3107 static void opt_input_file(const char *filename)
3108 {
3109 if (input_filename) {
3110 fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3111 filename, input_filename);
3112 exit(1);
3113 }
3114 if (!strcmp(filename, "-"))
3115 filename = "pipe:";
3116 input_filename = filename;
3117 }
3118
3119 /* Called from the main */
3120 int main(int argc, char **argv)
3121 {
3122 int flags, i;
3123
3124 /* register all codecs, demux and protocols */
3125 avcodec_register_all();
3126 #if CONFIG_AVDEVICE
3127 avdevice_register_all();
3128 #endif
3129 #if CONFIG_AVFILTER
3130 avfilter_register_all();
3131 #endif
3132 av_register_all();
3133
3134 for(i=0; i<AVMEDIA_TYPE_NB; i++){
3135 avcodec_opts[i]= avcodec_alloc_context2(i);
3136 }
3137 avformat_opts = avformat_alloc_context();
3138 #if !CONFIG_AVFILTER
3139 sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
3140 #endif
3141
3142 show_banner();
3143
3144 parse_options(argc, argv, options, opt_input_file);
3145
3146 if (!input_filename) {
3147 show_usage();
3148 fprintf(stderr, "An input file must be specified\n");
3149 fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3150 exit(1);
3151 }
3152
3153 if (display_disable) {
3154 video_disable = 1;
3155 }
3156 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3157 #if !defined(__MINGW32__) && !defined(__APPLE__)
3158 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3159 #endif
3160 if (SDL_Init (flags)) {
3161 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3162 exit(1);
3163 }
3164
3165 if (!display_disable) {
3166 #if HAVE_SDL_VIDEO_SIZE
3167 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3168 fs_screen_width = vi->current_w;
3169 fs_screen_height = vi->current_h;
3170 #endif
3171 }
3172
3173 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3174 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3175 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3176
3177 av_init_packet(&flush_pkt);
3178 flush_pkt.data= "FLUSH";
3179
3180 cur_stream = stream_open(input_filename, file_iformat);
3181
3182 event_loop();
3183
3184 /* never returns */
3185
3186 return 0;
3187 }