f88c2e8455ee29950ac04f282135601e90f18e1d
[libav.git] / ffplay.c
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include "config.h"
23 #include <math.h>
24 #include <limits.h>
25 #include "libavutil/avstring.h"
26 #include "libavutil/pixdesc.h"
27 #include "libavformat/avformat.h"
28 #include "libavdevice/avdevice.h"
29 #include "libswscale/swscale.h"
30 #include "libavcodec/audioconvert.h"
31 #include "libavcodec/colorspace.h"
32 #include "libavcodec/opt.h"
33 #include "libavcodec/dsputil.h"
34
35 #include "cmdutils.h"
36
37 #include <SDL.h>
38 #include <SDL_thread.h>
39
40 #ifdef __MINGW32__
41 #undef main /* We don't want SDL to override our main() */
42 #endif
43
44 #undef exit
45 #undef printf
46 #undef fprintf
47
48 const char program_name[] = "FFplay";
49 const int program_birth_year = 2003;
50
51 //#define DEBUG_SYNC
52
53 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
54 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
55 #define MIN_FRAMES 5
56
57 /* SDL audio buffer size, in samples. Should be small to have precise
58 A/V sync as SDL does not have hardware buffer fullness info. */
59 #define SDL_AUDIO_BUFFER_SIZE 1024
60
61 /* no AV sync correction is done if below the AV sync threshold */
62 #define AV_SYNC_THRESHOLD 0.01
63 /* no AV correction is done if too big error */
64 #define AV_NOSYNC_THRESHOLD 10.0
65
66 /* maximum audio speed change to get correct sync */
67 #define SAMPLE_CORRECTION_PERCENT_MAX 10
68
69 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
70 #define AUDIO_DIFF_AVG_NB 20
71
72 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
73 #define SAMPLE_ARRAY_SIZE (2*65536)
74
75 static int sws_flags = SWS_BICUBIC;
76
77 typedef struct PacketQueue {
78 AVPacketList *first_pkt, *last_pkt;
79 int nb_packets;
80 int size;
81 int abort_request;
82 SDL_mutex *mutex;
83 SDL_cond *cond;
84 } PacketQueue;
85
86 #define VIDEO_PICTURE_QUEUE_SIZE 1
87 #define SUBPICTURE_QUEUE_SIZE 4
88
89 typedef struct VideoPicture {
90 double pts; ///<presentation time stamp for this picture
91 int64_t pos; ///<byte position in file
92 SDL_Overlay *bmp;
93 int width, height; /* source height & width */
94 int allocated;
95 SDL_TimerID timer_id;
96 } VideoPicture;
97
98 typedef struct SubPicture {
99 double pts; /* presentation time stamp for this picture */
100 AVSubtitle sub;
101 } SubPicture;
102
103 enum {
104 AV_SYNC_AUDIO_MASTER, /* default choice */
105 AV_SYNC_VIDEO_MASTER,
106 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
107 };
108
109 typedef struct VideoState {
110 SDL_Thread *parse_tid;
111 SDL_Thread *video_tid;
112 AVInputFormat *iformat;
113 int no_background;
114 int abort_request;
115 int paused;
116 int last_paused;
117 int seek_req;
118 int seek_flags;
119 int64_t seek_pos;
120 int64_t seek_rel;
121 int read_pause_return;
122 AVFormatContext *ic;
123 int dtg_active_format;
124
125 int audio_stream;
126
127 int av_sync_type;
128 double external_clock; /* external clock base */
129 int64_t external_clock_time;
130
131 double audio_clock;
132 double audio_diff_cum; /* used for AV difference average computation */
133 double audio_diff_avg_coef;
134 double audio_diff_threshold;
135 int audio_diff_avg_count;
136 AVStream *audio_st;
137 PacketQueue audioq;
138 int audio_hw_buf_size;
139 /* samples output by the codec. we reserve more space for avsync
140 compensation */
141 DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
142 DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
143 uint8_t *audio_buf;
144 unsigned int audio_buf_size; /* in bytes */
145 int audio_buf_index; /* in bytes */
146 AVPacket audio_pkt_temp;
147 AVPacket audio_pkt;
148 enum SampleFormat audio_src_fmt;
149 AVAudioConvert *reformat_ctx;
150
151 int show_audio; /* if true, display audio samples */
152 int16_t sample_array[SAMPLE_ARRAY_SIZE];
153 int sample_array_index;
154 int last_i_start;
155 RDFTContext rdft;
156 int rdft_bits;
157 int xpos;
158
159 SDL_Thread *subtitle_tid;
160 int subtitle_stream;
161 int subtitle_stream_changed;
162 AVStream *subtitle_st;
163 PacketQueue subtitleq;
164 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
165 int subpq_size, subpq_rindex, subpq_windex;
166 SDL_mutex *subpq_mutex;
167 SDL_cond *subpq_cond;
168
169 double frame_timer;
170 double frame_last_pts;
171 double frame_last_delay;
172 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
173 int video_stream;
174 AVStream *video_st;
175 PacketQueue videoq;
176 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
177 double video_current_pts_drift; ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
178 int64_t video_current_pos; ///<current displayed file pos
179 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
180 int pictq_size, pictq_rindex, pictq_windex;
181 SDL_mutex *pictq_mutex;
182 SDL_cond *pictq_cond;
183 struct SwsContext *img_convert_ctx;
184
185 // QETimer *video_timer;
186 char filename[1024];
187 int width, height, xleft, ytop;
188
189 int64_t faulty_pts;
190 int64_t faulty_dts;
191 int64_t last_dts_for_fault_detection;
192 int64_t last_pts_for_fault_detection;
193
194 } VideoState;
195
196 static void show_help(void);
197 static int audio_write_get_buf_size(VideoState *is);
198
199 /* options specified by the user */
200 static AVInputFormat *file_iformat;
201 static const char *input_filename;
202 static int fs_screen_width;
203 static int fs_screen_height;
204 static int screen_width = 0;
205 static int screen_height = 0;
206 static int frame_width = 0;
207 static int frame_height = 0;
208 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
209 static int audio_disable;
210 static int video_disable;
211 static int wanted_audio_stream= 0;
212 static int wanted_video_stream= 0;
213 static int wanted_subtitle_stream= -1;
214 static int seek_by_bytes=-1;
215 static int display_disable;
216 static int show_status = 1;
217 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
218 static int64_t start_time = AV_NOPTS_VALUE;
219 static int debug = 0;
220 static int debug_mv = 0;
221 static int step = 0;
222 static int thread_count = 1;
223 static int workaround_bugs = 1;
224 static int fast = 0;
225 static int genpts = 0;
226 static int lowres = 0;
227 static int idct = FF_IDCT_AUTO;
228 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
229 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
230 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
231 static int error_recognition = FF_ER_CAREFUL;
232 static int error_concealment = 3;
233 static int decoder_reorder_pts= -1;
234 static int autoexit;
235
236 /* current context */
237 static int is_full_screen;
238 static VideoState *cur_stream;
239 static int64_t audio_callback_time;
240
241 static AVPacket flush_pkt;
242
243 #define FF_ALLOC_EVENT (SDL_USEREVENT)
244 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
245 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
246
247 static SDL_Surface *screen;
248
249 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
250
251 /* packet queue handling */
252 static void packet_queue_init(PacketQueue *q)
253 {
254 memset(q, 0, sizeof(PacketQueue));
255 q->mutex = SDL_CreateMutex();
256 q->cond = SDL_CreateCond();
257 packet_queue_put(q, &flush_pkt);
258 }
259
260 static void packet_queue_flush(PacketQueue *q)
261 {
262 AVPacketList *pkt, *pkt1;
263
264 SDL_LockMutex(q->mutex);
265 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
266 pkt1 = pkt->next;
267 av_free_packet(&pkt->pkt);
268 av_freep(&pkt);
269 }
270 q->last_pkt = NULL;
271 q->first_pkt = NULL;
272 q->nb_packets = 0;
273 q->size = 0;
274 SDL_UnlockMutex(q->mutex);
275 }
276
277 static void packet_queue_end(PacketQueue *q)
278 {
279 packet_queue_flush(q);
280 SDL_DestroyMutex(q->mutex);
281 SDL_DestroyCond(q->cond);
282 }
283
284 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
285 {
286 AVPacketList *pkt1;
287
288 /* duplicate the packet */
289 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
290 return -1;
291
292 pkt1 = av_malloc(sizeof(AVPacketList));
293 if (!pkt1)
294 return -1;
295 pkt1->pkt = *pkt;
296 pkt1->next = NULL;
297
298
299 SDL_LockMutex(q->mutex);
300
301 if (!q->last_pkt)
302
303 q->first_pkt = pkt1;
304 else
305 q->last_pkt->next = pkt1;
306 q->last_pkt = pkt1;
307 q->nb_packets++;
308 q->size += pkt1->pkt.size + sizeof(*pkt1);
309 /* XXX: should duplicate packet data in DV case */
310 SDL_CondSignal(q->cond);
311
312 SDL_UnlockMutex(q->mutex);
313 return 0;
314 }
315
316 static void packet_queue_abort(PacketQueue *q)
317 {
318 SDL_LockMutex(q->mutex);
319
320 q->abort_request = 1;
321
322 SDL_CondSignal(q->cond);
323
324 SDL_UnlockMutex(q->mutex);
325 }
326
327 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
328 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
329 {
330 AVPacketList *pkt1;
331 int ret;
332
333 SDL_LockMutex(q->mutex);
334
335 for(;;) {
336 if (q->abort_request) {
337 ret = -1;
338 break;
339 }
340
341 pkt1 = q->first_pkt;
342 if (pkt1) {
343 q->first_pkt = pkt1->next;
344 if (!q->first_pkt)
345 q->last_pkt = NULL;
346 q->nb_packets--;
347 q->size -= pkt1->pkt.size + sizeof(*pkt1);
348 *pkt = pkt1->pkt;
349 av_free(pkt1);
350 ret = 1;
351 break;
352 } else if (!block) {
353 ret = 0;
354 break;
355 } else {
356 SDL_CondWait(q->cond, q->mutex);
357 }
358 }
359 SDL_UnlockMutex(q->mutex);
360 return ret;
361 }
362
363 static inline void fill_rectangle(SDL_Surface *screen,
364 int x, int y, int w, int h, int color)
365 {
366 SDL_Rect rect;
367 rect.x = x;
368 rect.y = y;
369 rect.w = w;
370 rect.h = h;
371 SDL_FillRect(screen, &rect, color);
372 }
373
374 #if 0
375 /* draw only the border of a rectangle */
376 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
377 {
378 int w1, w2, h1, h2;
379
380 /* fill the background */
381 w1 = x;
382 if (w1 < 0)
383 w1 = 0;
384 w2 = s->width - (x + w);
385 if (w2 < 0)
386 w2 = 0;
387 h1 = y;
388 if (h1 < 0)
389 h1 = 0;
390 h2 = s->height - (y + h);
391 if (h2 < 0)
392 h2 = 0;
393 fill_rectangle(screen,
394 s->xleft, s->ytop,
395 w1, s->height,
396 color);
397 fill_rectangle(screen,
398 s->xleft + s->width - w2, s->ytop,
399 w2, s->height,
400 color);
401 fill_rectangle(screen,
402 s->xleft + w1, s->ytop,
403 s->width - w1 - w2, h1,
404 color);
405 fill_rectangle(screen,
406 s->xleft + w1, s->ytop + s->height - h2,
407 s->width - w1 - w2, h2,
408 color);
409 }
410 #endif
411
412 #define ALPHA_BLEND(a, oldp, newp, s)\
413 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
414
415 #define RGBA_IN(r, g, b, a, s)\
416 {\
417 unsigned int v = ((const uint32_t *)(s))[0];\
418 a = (v >> 24) & 0xff;\
419 r = (v >> 16) & 0xff;\
420 g = (v >> 8) & 0xff;\
421 b = v & 0xff;\
422 }
423
424 #define YUVA_IN(y, u, v, a, s, pal)\
425 {\
426 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
427 a = (val >> 24) & 0xff;\
428 y = (val >> 16) & 0xff;\
429 u = (val >> 8) & 0xff;\
430 v = val & 0xff;\
431 }
432
433 #define YUVA_OUT(d, y, u, v, a)\
434 {\
435 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
436 }
437
438
439 #define BPP 1
440
441 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
442 {
443 int wrap, wrap3, width2, skip2;
444 int y, u, v, a, u1, v1, a1, w, h;
445 uint8_t *lum, *cb, *cr;
446 const uint8_t *p;
447 const uint32_t *pal;
448 int dstx, dsty, dstw, dsth;
449
450 dstw = av_clip(rect->w, 0, imgw);
451 dsth = av_clip(rect->h, 0, imgh);
452 dstx = av_clip(rect->x, 0, imgw - dstw);
453 dsty = av_clip(rect->y, 0, imgh - dsth);
454 lum = dst->data[0] + dsty * dst->linesize[0];
455 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
456 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
457
458 width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
459 skip2 = dstx >> 1;
460 wrap = dst->linesize[0];
461 wrap3 = rect->pict.linesize[0];
462 p = rect->pict.data[0];
463 pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
464
465 if (dsty & 1) {
466 lum += dstx;
467 cb += skip2;
468 cr += skip2;
469
470 if (dstx & 1) {
471 YUVA_IN(y, u, v, a, p, pal);
472 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
473 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
474 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
475 cb++;
476 cr++;
477 lum++;
478 p += BPP;
479 }
480 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
481 YUVA_IN(y, u, v, a, p, pal);
482 u1 = u;
483 v1 = v;
484 a1 = a;
485 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
486
487 YUVA_IN(y, u, v, a, p + BPP, pal);
488 u1 += u;
489 v1 += v;
490 a1 += a;
491 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
492 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
493 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
494 cb++;
495 cr++;
496 p += 2 * BPP;
497 lum += 2;
498 }
499 if (w) {
500 YUVA_IN(y, u, v, a, p, pal);
501 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
502 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
503 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
504 p++;
505 lum++;
506 }
507 p += wrap3 - dstw * BPP;
508 lum += wrap - dstw - dstx;
509 cb += dst->linesize[1] - width2 - skip2;
510 cr += dst->linesize[2] - width2 - skip2;
511 }
512 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
513 lum += dstx;
514 cb += skip2;
515 cr += skip2;
516
517 if (dstx & 1) {
518 YUVA_IN(y, u, v, a, p, pal);
519 u1 = u;
520 v1 = v;
521 a1 = a;
522 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
523 p += wrap3;
524 lum += wrap;
525 YUVA_IN(y, u, v, a, p, pal);
526 u1 += u;
527 v1 += v;
528 a1 += a;
529 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
530 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
531 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
532 cb++;
533 cr++;
534 p += -wrap3 + BPP;
535 lum += -wrap + 1;
536 }
537 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
538 YUVA_IN(y, u, v, a, p, pal);
539 u1 = u;
540 v1 = v;
541 a1 = a;
542 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
543
544 YUVA_IN(y, u, v, a, p + BPP, pal);
545 u1 += u;
546 v1 += v;
547 a1 += a;
548 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
549 p += wrap3;
550 lum += wrap;
551
552 YUVA_IN(y, u, v, a, p, pal);
553 u1 += u;
554 v1 += v;
555 a1 += a;
556 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
557
558 YUVA_IN(y, u, v, a, p + BPP, pal);
559 u1 += u;
560 v1 += v;
561 a1 += a;
562 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
563
564 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
565 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
566
567 cb++;
568 cr++;
569 p += -wrap3 + 2 * BPP;
570 lum += -wrap + 2;
571 }
572 if (w) {
573 YUVA_IN(y, u, v, a, p, pal);
574 u1 = u;
575 v1 = v;
576 a1 = a;
577 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
578 p += wrap3;
579 lum += wrap;
580 YUVA_IN(y, u, v, a, p, pal);
581 u1 += u;
582 v1 += v;
583 a1 += a;
584 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
585 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
586 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
587 cb++;
588 cr++;
589 p += -wrap3 + BPP;
590 lum += -wrap + 1;
591 }
592 p += wrap3 + (wrap3 - dstw * BPP);
593 lum += wrap + (wrap - dstw - dstx);
594 cb += dst->linesize[1] - width2 - skip2;
595 cr += dst->linesize[2] - width2 - skip2;
596 }
597 /* handle odd height */
598 if (h) {
599 lum += dstx;
600 cb += skip2;
601 cr += skip2;
602
603 if (dstx & 1) {
604 YUVA_IN(y, u, v, a, p, pal);
605 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
606 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
607 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
608 cb++;
609 cr++;
610 lum++;
611 p += BPP;
612 }
613 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
614 YUVA_IN(y, u, v, a, p, pal);
615 u1 = u;
616 v1 = v;
617 a1 = a;
618 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
619
620 YUVA_IN(y, u, v, a, p + BPP, pal);
621 u1 += u;
622 v1 += v;
623 a1 += a;
624 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
625 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
626 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
627 cb++;
628 cr++;
629 p += 2 * BPP;
630 lum += 2;
631 }
632 if (w) {
633 YUVA_IN(y, u, v, a, p, pal);
634 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
635 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
636 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
637 }
638 }
639 }
640
641 static void free_subpicture(SubPicture *sp)
642 {
643 int i;
644
645 for (i = 0; i < sp->sub.num_rects; i++)
646 {
647 av_freep(&sp->sub.rects[i]->pict.data[0]);
648 av_freep(&sp->sub.rects[i]->pict.data[1]);
649 av_freep(&sp->sub.rects[i]);
650 }
651
652 av_free(sp->sub.rects);
653
654 memset(&sp->sub, 0, sizeof(AVSubtitle));
655 }
656
657 static void video_image_display(VideoState *is)
658 {
659 VideoPicture *vp;
660 SubPicture *sp;
661 AVPicture pict;
662 float aspect_ratio;
663 int width, height, x, y;
664 SDL_Rect rect;
665 int i;
666
667 vp = &is->pictq[is->pictq_rindex];
668 if (vp->bmp) {
669 /* XXX: use variable in the frame */
670 if (is->video_st->sample_aspect_ratio.num)
671 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
672 else if (is->video_st->codec->sample_aspect_ratio.num)
673 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
674 else
675 aspect_ratio = 0;
676 if (aspect_ratio <= 0.0)
677 aspect_ratio = 1.0;
678 aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec->height;
679 /* if an active format is indicated, then it overrides the
680 mpeg format */
681 #if 0
682 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
683 is->dtg_active_format = is->video_st->codec->dtg_active_format;
684 printf("dtg_active_format=%d\n", is->dtg_active_format);
685 }
686 #endif
687 #if 0
688 switch(is->video_st->codec->dtg_active_format) {
689 case FF_DTG_AFD_SAME:
690 default:
691 /* nothing to do */
692 break;
693 case FF_DTG_AFD_4_3:
694 aspect_ratio = 4.0 / 3.0;
695 break;
696 case FF_DTG_AFD_16_9:
697 aspect_ratio = 16.0 / 9.0;
698 break;
699 case FF_DTG_AFD_14_9:
700 aspect_ratio = 14.0 / 9.0;
701 break;
702 case FF_DTG_AFD_4_3_SP_14_9:
703 aspect_ratio = 14.0 / 9.0;
704 break;
705 case FF_DTG_AFD_16_9_SP_14_9:
706 aspect_ratio = 14.0 / 9.0;
707 break;
708 case FF_DTG_AFD_SP_4_3:
709 aspect_ratio = 4.0 / 3.0;
710 break;
711 }
712 #endif
713
714 if (is->subtitle_st)
715 {
716 if (is->subpq_size > 0)
717 {
718 sp = &is->subpq[is->subpq_rindex];
719
720 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
721 {
722 SDL_LockYUVOverlay (vp->bmp);
723
724 pict.data[0] = vp->bmp->pixels[0];
725 pict.data[1] = vp->bmp->pixels[2];
726 pict.data[2] = vp->bmp->pixels[1];
727
728 pict.linesize[0] = vp->bmp->pitches[0];
729 pict.linesize[1] = vp->bmp->pitches[2];
730 pict.linesize[2] = vp->bmp->pitches[1];
731
732 for (i = 0; i < sp->sub.num_rects; i++)
733 blend_subrect(&pict, sp->sub.rects[i],
734 vp->bmp->w, vp->bmp->h);
735
736 SDL_UnlockYUVOverlay (vp->bmp);
737 }
738 }
739 }
740
741
742 /* XXX: we suppose the screen has a 1.0 pixel ratio */
743 height = is->height;
744 width = ((int)rint(height * aspect_ratio)) & ~1;
745 if (width > is->width) {
746 width = is->width;
747 height = ((int)rint(width / aspect_ratio)) & ~1;
748 }
749 x = (is->width - width) / 2;
750 y = (is->height - height) / 2;
751 if (!is->no_background) {
752 /* fill the background */
753 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
754 } else {
755 is->no_background = 0;
756 }
757 rect.x = is->xleft + x;
758 rect.y = is->ytop + y;
759 rect.w = width;
760 rect.h = height;
761 SDL_DisplayYUVOverlay(vp->bmp, &rect);
762 } else {
763 #if 0
764 fill_rectangle(screen,
765 is->xleft, is->ytop, is->width, is->height,
766 QERGB(0x00, 0x00, 0x00));
767 #endif
768 }
769 }
770
771 static inline int compute_mod(int a, int b)
772 {
773 a = a % b;
774 if (a >= 0)
775 return a;
776 else
777 return a + b;
778 }
779
780 static void video_audio_display(VideoState *s)
781 {
782 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
783 int ch, channels, h, h2, bgcolor, fgcolor;
784 int16_t time_diff;
785 int rdft_bits, nb_freq;
786
787 for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
788 ;
789 nb_freq= 1<<(rdft_bits-1);
790
791 /* compute display index : center on currently output samples */
792 channels = s->audio_st->codec->channels;
793 nb_display_channels = channels;
794 if (!s->paused) {
795 int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
796 n = 2 * channels;
797 delay = audio_write_get_buf_size(s);
798 delay /= n;
799
800 /* to be more precise, we take into account the time spent since
801 the last buffer computation */
802 if (audio_callback_time) {
803 time_diff = av_gettime() - audio_callback_time;
804 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
805 }
806
807 delay -= data_used / 2;
808 if (delay < data_used)
809 delay = data_used;
810
811 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
812 if(s->show_audio==1){
813 h= INT_MIN;
814 for(i=0; i<1000; i+=channels){
815 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
816 int a= s->sample_array[idx];
817 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
818 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
819 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
820 int score= a-d;
821 if(h<score && (b^c)<0){
822 h= score;
823 i_start= idx;
824 }
825 }
826 }
827
828 s->last_i_start = i_start;
829 } else {
830 i_start = s->last_i_start;
831 }
832
833 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
834 if(s->show_audio==1){
835 fill_rectangle(screen,
836 s->xleft, s->ytop, s->width, s->height,
837 bgcolor);
838
839 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
840
841 /* total height for one channel */
842 h = s->height / nb_display_channels;
843 /* graph height / 2 */
844 h2 = (h * 9) / 20;
845 for(ch = 0;ch < nb_display_channels; ch++) {
846 i = i_start + ch;
847 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
848 for(x = 0; x < s->width; x++) {
849 y = (s->sample_array[i] * h2) >> 15;
850 if (y < 0) {
851 y = -y;
852 ys = y1 - y;
853 } else {
854 ys = y1;
855 }
856 fill_rectangle(screen,
857 s->xleft + x, ys, 1, y,
858 fgcolor);
859 i += channels;
860 if (i >= SAMPLE_ARRAY_SIZE)
861 i -= SAMPLE_ARRAY_SIZE;
862 }
863 }
864
865 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
866
867 for(ch = 1;ch < nb_display_channels; ch++) {
868 y = s->ytop + ch * h;
869 fill_rectangle(screen,
870 s->xleft, y, s->width, 1,
871 fgcolor);
872 }
873 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
874 }else{
875 nb_display_channels= FFMIN(nb_display_channels, 2);
876 if(rdft_bits != s->rdft_bits){
877 ff_rdft_end(&s->rdft);
878 ff_rdft_init(&s->rdft, rdft_bits, RDFT);
879 s->rdft_bits= rdft_bits;
880 }
881 {
882 FFTSample data[2][2*nb_freq];
883 for(ch = 0;ch < nb_display_channels; ch++) {
884 i = i_start + ch;
885 for(x = 0; x < 2*nb_freq; x++) {
886 double w= (x-nb_freq)*(1.0/nb_freq);
887 data[ch][x]= s->sample_array[i]*(1.0-w*w);
888 i += channels;
889 if (i >= SAMPLE_ARRAY_SIZE)
890 i -= SAMPLE_ARRAY_SIZE;
891 }
892 ff_rdft_calc(&s->rdft, data[ch]);
893 }
894 //least efficient way to do this, we should of course directly access it but its more than fast enough
895 for(y=0; y<s->height; y++){
896 double w= 1/sqrt(nb_freq);
897 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
898 int b= sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0] + data[1][2*y+1]*data[1][2*y+1]));
899 a= FFMIN(a,255);
900 b= FFMIN(b,255);
901 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
902
903 fill_rectangle(screen,
904 s->xpos, s->height-y, 1, 1,
905 fgcolor);
906 }
907 }
908 SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
909 s->xpos++;
910 if(s->xpos >= s->width)
911 s->xpos= s->xleft;
912 }
913 }
914
915 static int video_open(VideoState *is){
916 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
917 int w,h;
918
919 if(is_full_screen) flags |= SDL_FULLSCREEN;
920 else flags |= SDL_RESIZABLE;
921
922 if (is_full_screen && fs_screen_width) {
923 w = fs_screen_width;
924 h = fs_screen_height;
925 } else if(!is_full_screen && screen_width){
926 w = screen_width;
927 h = screen_height;
928 }else if (is->video_st && is->video_st->codec->width){
929 w = is->video_st->codec->width;
930 h = is->video_st->codec->height;
931 } else {
932 w = 640;
933 h = 480;
934 }
935 #ifndef __APPLE__
936 screen = SDL_SetVideoMode(w, h, 0, flags);
937 #else
938 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
939 screen = SDL_SetVideoMode(w, h, 24, flags);
940 #endif
941 if (!screen) {
942 fprintf(stderr, "SDL: could not set video mode - exiting\n");
943 return -1;
944 }
945 SDL_WM_SetCaption("FFplay", "FFplay");
946
947 is->width = screen->w;
948 is->height = screen->h;
949
950 return 0;
951 }
952
953 /* display the current picture, if any */
954 static void video_display(VideoState *is)
955 {
956 if(!screen)
957 video_open(cur_stream);
958 if (is->audio_st && is->show_audio)
959 video_audio_display(is);
960 else if (is->video_st)
961 video_image_display(is);
962 }
963
964 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
965 {
966 SDL_Event event;
967 event.type = FF_REFRESH_EVENT;
968 event.user.data1 = opaque;
969 SDL_PushEvent(&event);
970 return 0; /* 0 means stop timer */
971 }
972
973 /* schedule a video refresh in 'delay' ms */
974 static SDL_TimerID schedule_refresh(VideoState *is, int delay)
975 {
976 if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
977 return SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
978 }
979
980 /* get the current audio clock value */
981 static double get_audio_clock(VideoState *is)
982 {
983 double pts;
984 int hw_buf_size, bytes_per_sec;
985 pts = is->audio_clock;
986 hw_buf_size = audio_write_get_buf_size(is);
987 bytes_per_sec = 0;
988 if (is->audio_st) {
989 bytes_per_sec = is->audio_st->codec->sample_rate *
990 2 * is->audio_st->codec->channels;
991 }
992 if (bytes_per_sec)
993 pts -= (double)hw_buf_size / bytes_per_sec;
994 return pts;
995 }
996
997 /* get the current video clock value */
998 static double get_video_clock(VideoState *is)
999 {
1000 if (is->paused) {
1001 return is->video_current_pts;
1002 } else {
1003 return is->video_current_pts_drift + av_gettime() / 1000000.0;
1004 }
1005 }
1006
1007 /* get the current external clock value */
1008 static double get_external_clock(VideoState *is)
1009 {
1010 int64_t ti;
1011 ti = av_gettime();
1012 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1013 }
1014
1015 /* get the current master clock value */
1016 static double get_master_clock(VideoState *is)
1017 {
1018 double val;
1019
1020 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1021 if (is->video_st)
1022 val = get_video_clock(is);
1023 else
1024 val = get_audio_clock(is);
1025 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1026 if (is->audio_st)
1027 val = get_audio_clock(is);
1028 else
1029 val = get_video_clock(is);
1030 } else {
1031 val = get_external_clock(is);
1032 }
1033 return val;
1034 }
1035
1036 /* seek in the stream */
1037 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1038 {
1039 if (!is->seek_req) {
1040 is->seek_pos = pos;
1041 is->seek_rel = rel;
1042 is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1043 if (seek_by_bytes)
1044 is->seek_flags |= AVSEEK_FLAG_BYTE;
1045 is->seek_req = 1;
1046 }
1047 }
1048
1049 /* pause or resume the video */
1050 static void stream_pause(VideoState *is)
1051 {
1052 if (is->paused) {
1053 is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1054 if(is->read_pause_return != AVERROR(ENOSYS)){
1055 is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1056 }
1057 is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1058 }
1059 is->paused = !is->paused;
1060 }
1061
1062 static double compute_frame_delay(double frame_current_pts, VideoState *is)
1063 {
1064 double actual_delay, delay, sync_threshold, diff;
1065
1066 /* compute nominal delay */
1067 delay = frame_current_pts - is->frame_last_pts;
1068 if (delay <= 0 || delay >= 10.0) {
1069 /* if incorrect delay, use previous one */
1070 delay = is->frame_last_delay;
1071 } else {
1072 is->frame_last_delay = delay;
1073 }
1074 is->frame_last_pts = frame_current_pts;
1075
1076 /* update delay to follow master synchronisation source */
1077 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1078 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1079 /* if video is slave, we try to correct big delays by
1080 duplicating or deleting a frame */
1081 diff = get_video_clock(is) - get_master_clock(is);
1082
1083 /* skip or repeat frame. We take into account the
1084 delay to compute the threshold. I still don't know
1085 if it is the best guess */
1086 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1087 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1088 if (diff <= -sync_threshold)
1089 delay = 0;
1090 else if (diff >= sync_threshold)
1091 delay = 2 * delay;
1092 }
1093 }
1094
1095 is->frame_timer += delay;
1096 /* compute the REAL delay (we need to do that to avoid
1097 long term errors */
1098 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1099 if (actual_delay < 0.010) {
1100 /* XXX: should skip picture */
1101 actual_delay = 0.010;
1102 }
1103
1104 #if defined(DEBUG_SYNC)
1105 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1106 delay, actual_delay, frame_current_pts, -diff);
1107 #endif
1108
1109 return actual_delay;
1110 }
1111
1112 /* called to display each frame */
1113 static void video_refresh_timer(void *opaque)
1114 {
1115 VideoState *is = opaque;
1116 VideoPicture *vp;
1117
1118 SubPicture *sp, *sp2;
1119
1120 if (is->video_st) {
1121 if (is->pictq_size == 0) {
1122 fprintf(stderr, "Internal error detected in the SDL timer\n");
1123 } else {
1124 /* dequeue the picture */
1125 vp = &is->pictq[is->pictq_rindex];
1126
1127 /* update current video pts */
1128 is->video_current_pts = vp->pts;
1129 is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1130 is->video_current_pos = vp->pos;
1131
1132 if(is->subtitle_st) {
1133 if (is->subtitle_stream_changed) {
1134 SDL_LockMutex(is->subpq_mutex);
1135
1136 while (is->subpq_size) {
1137 free_subpicture(&is->subpq[is->subpq_rindex]);
1138
1139 /* update queue size and signal for next picture */
1140 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1141 is->subpq_rindex = 0;
1142
1143 is->subpq_size--;
1144 }
1145 is->subtitle_stream_changed = 0;
1146
1147 SDL_CondSignal(is->subpq_cond);
1148 SDL_UnlockMutex(is->subpq_mutex);
1149 } else {
1150 if (is->subpq_size > 0) {
1151 sp = &is->subpq[is->subpq_rindex];
1152
1153 if (is->subpq_size > 1)
1154 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1155 else
1156 sp2 = NULL;
1157
1158 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1159 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1160 {
1161 free_subpicture(sp);
1162
1163 /* update queue size and signal for next picture */
1164 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1165 is->subpq_rindex = 0;
1166
1167 SDL_LockMutex(is->subpq_mutex);
1168 is->subpq_size--;
1169 SDL_CondSignal(is->subpq_cond);
1170 SDL_UnlockMutex(is->subpq_mutex);
1171 }
1172 }
1173 }
1174 }
1175
1176 /* display picture */
1177 video_display(is);
1178
1179 /* update queue size and signal for next picture */
1180 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1181 is->pictq_rindex = 0;
1182
1183 SDL_LockMutex(is->pictq_mutex);
1184 vp->timer_id= 0;
1185 is->pictq_size--;
1186 SDL_CondSignal(is->pictq_cond);
1187 SDL_UnlockMutex(is->pictq_mutex);
1188 }
1189 } else if (is->audio_st) {
1190 /* draw the next audio frame */
1191
1192 schedule_refresh(is, 40);
1193
1194 /* if only audio stream, then display the audio bars (better
1195 than nothing, just to test the implementation */
1196
1197 /* display picture */
1198 video_display(is);
1199 } else {
1200 schedule_refresh(is, 100);
1201 }
1202 if (show_status) {
1203 static int64_t last_time;
1204 int64_t cur_time;
1205 int aqsize, vqsize, sqsize;
1206 double av_diff;
1207
1208 cur_time = av_gettime();
1209 if (!last_time || (cur_time - last_time) >= 30000) {
1210 aqsize = 0;
1211 vqsize = 0;
1212 sqsize = 0;
1213 if (is->audio_st)
1214 aqsize = is->audioq.size;
1215 if (is->video_st)
1216 vqsize = is->videoq.size;
1217 if (is->subtitle_st)
1218 sqsize = is->subtitleq.size;
1219 av_diff = 0;
1220 if (is->audio_st && is->video_st)
1221 av_diff = get_audio_clock(is) - get_video_clock(is);
1222 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB f=%Ld/%Ld \r",
1223 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1224 fflush(stdout);
1225 last_time = cur_time;
1226 }
1227 }
1228 }
1229
1230 /* allocate a picture (needs to do that in main thread to avoid
1231 potential locking problems */
1232 static void alloc_picture(void *opaque)
1233 {
1234 VideoState *is = opaque;
1235 VideoPicture *vp;
1236
1237 vp = &is->pictq[is->pictq_windex];
1238
1239 if (vp->bmp)
1240 SDL_FreeYUVOverlay(vp->bmp);
1241
1242 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1243 is->video_st->codec->height,
1244 SDL_YV12_OVERLAY,
1245 screen);
1246 vp->width = is->video_st->codec->width;
1247 vp->height = is->video_st->codec->height;
1248
1249 SDL_LockMutex(is->pictq_mutex);
1250 vp->allocated = 1;
1251 SDL_CondSignal(is->pictq_cond);
1252 SDL_UnlockMutex(is->pictq_mutex);
1253 }
1254
1255 /**
1256 *
1257 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1258 */
1259 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1260 {
1261 VideoPicture *vp;
1262 int dst_pix_fmt;
1263
1264 /* wait until we have space to put a new picture */
1265 SDL_LockMutex(is->pictq_mutex);
1266 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1267 !is->videoq.abort_request) {
1268 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1269 }
1270 SDL_UnlockMutex(is->pictq_mutex);
1271
1272 if (is->videoq.abort_request)
1273 return -1;
1274
1275 vp = &is->pictq[is->pictq_windex];
1276
1277 /* alloc or resize hardware picture buffer */
1278 if (!vp->bmp ||
1279 vp->width != is->video_st->codec->width ||
1280 vp->height != is->video_st->codec->height) {
1281 SDL_Event event;
1282
1283 vp->allocated = 0;
1284
1285 /* the allocation must be done in the main thread to avoid
1286 locking problems */
1287 event.type = FF_ALLOC_EVENT;
1288 event.user.data1 = is;
1289 SDL_PushEvent(&event);
1290
1291 /* wait until the picture is allocated */
1292 SDL_LockMutex(is->pictq_mutex);
1293 while (!vp->allocated && !is->videoq.abort_request) {
1294 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1295 }
1296 SDL_UnlockMutex(is->pictq_mutex);
1297
1298 if (is->videoq.abort_request)
1299 return -1;
1300 }
1301
1302 /* if the frame is not skipped, then display it */
1303 if (vp->bmp) {
1304 AVPicture pict;
1305
1306 /* get a pointer on the bitmap */
1307 SDL_LockYUVOverlay (vp->bmp);
1308
1309 dst_pix_fmt = PIX_FMT_YUV420P;
1310 memset(&pict,0,sizeof(AVPicture));
1311 pict.data[0] = vp->bmp->pixels[0];
1312 pict.data[1] = vp->bmp->pixels[2];
1313 pict.data[2] = vp->bmp->pixels[1];
1314
1315 pict.linesize[0] = vp->bmp->pitches[0];
1316 pict.linesize[1] = vp->bmp->pitches[2];
1317 pict.linesize[2] = vp->bmp->pitches[1];
1318 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1319 is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1320 is->video_st->codec->width, is->video_st->codec->height,
1321 is->video_st->codec->pix_fmt,
1322 is->video_st->codec->width, is->video_st->codec->height,
1323 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1324 if (is->img_convert_ctx == NULL) {
1325 fprintf(stderr, "Cannot initialize the conversion context\n");
1326 exit(1);
1327 }
1328 sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1329 0, is->video_st->codec->height, pict.data, pict.linesize);
1330 /* update the bitmap content */
1331 SDL_UnlockYUVOverlay(vp->bmp);
1332
1333 vp->pts = pts;
1334 vp->pos = pos;
1335
1336 /* now we can update the picture count */
1337 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1338 is->pictq_windex = 0;
1339 SDL_LockMutex(is->pictq_mutex);
1340 is->pictq_size++;
1341 //We must schedule in a mutex as we must store the timer id before the timer dies or might end up freeing a alraedy freed id
1342 vp->timer_id= schedule_refresh(is, (int)(compute_frame_delay(vp->pts, is) * 1000 + 0.5));
1343 SDL_UnlockMutex(is->pictq_mutex);
1344 }
1345 return 0;
1346 }
1347
1348 /**
1349 * compute the exact PTS for the picture if it is omitted in the stream
1350 * @param pts1 the dts of the pkt / pts of the frame
1351 */
1352 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1353 {
1354 double frame_delay, pts;
1355
1356 pts = pts1;
1357
1358 if (pts != 0) {
1359 /* update video clock with pts, if present */
1360 is->video_clock = pts;
1361 } else {
1362 pts = is->video_clock;
1363 }
1364 /* update video clock for next frame */
1365 frame_delay = av_q2d(is->video_st->codec->time_base);
1366 /* for MPEG2, the frame can be repeated, so we update the
1367 clock accordingly */
1368 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1369 is->video_clock += frame_delay;
1370
1371 #if defined(DEBUG_SYNC) && 0
1372 {
1373 int ftype;
1374 if (src_frame->pict_type == FF_B_TYPE)
1375 ftype = 'B';
1376 else if (src_frame->pict_type == FF_I_TYPE)
1377 ftype = 'I';
1378 else
1379 ftype = 'P';
1380 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1381 ftype, pts, pts1);
1382 }
1383 #endif
1384 return queue_picture(is, src_frame, pts, pos);
1385 }
1386
1387 static int video_thread(void *arg)
1388 {
1389 VideoState *is = arg;
1390 AVPacket pkt1, *pkt = &pkt1;
1391 int len1, got_picture, i;
1392 AVFrame *frame= avcodec_alloc_frame();
1393 double pts;
1394
1395 for(;;) {
1396 while (is->paused && !is->videoq.abort_request) {
1397 SDL_Delay(10);
1398 }
1399 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1400 break;
1401
1402 if(pkt->data == flush_pkt.data){
1403 avcodec_flush_buffers(is->video_st->codec);
1404
1405 SDL_LockMutex(is->pictq_mutex);
1406 //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1407 for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1408 if(is->pictq[i].timer_id){
1409 SDL_RemoveTimer(is->pictq[i].timer_id);
1410 is->pictq[i].timer_id=0;
1411 schedule_refresh(is, 1);
1412 }
1413 }
1414 while (is->pictq_size && !is->videoq.abort_request) {
1415 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1416 }
1417 is->video_current_pos= -1;
1418 SDL_UnlockMutex(is->pictq_mutex);
1419
1420 is->last_dts_for_fault_detection=
1421 is->last_pts_for_fault_detection= INT64_MIN;
1422 is->frame_last_pts= AV_NOPTS_VALUE;
1423 is->frame_last_delay = 0;
1424 is->frame_timer = (double)av_gettime() / 1000000.0;
1425
1426 continue;
1427 }
1428
1429 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1430 this packet, if any */
1431 is->video_st->codec->reordered_opaque= pkt->pts;
1432 len1 = avcodec_decode_video2(is->video_st->codec,
1433 frame, &got_picture,
1434 pkt);
1435
1436 if (got_picture) {
1437 if(pkt->dts != AV_NOPTS_VALUE){
1438 is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1439 is->last_dts_for_fault_detection= pkt->dts;
1440 }
1441 if(frame->reordered_opaque != AV_NOPTS_VALUE){
1442 is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1443 is->last_pts_for_fault_detection= frame->reordered_opaque;
1444 }
1445 }
1446
1447 if( ( decoder_reorder_pts==1
1448 || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1449 || pkt->dts == AV_NOPTS_VALUE)
1450 && frame->reordered_opaque != AV_NOPTS_VALUE)
1451 pts= frame->reordered_opaque;
1452 else if(pkt->dts != AV_NOPTS_VALUE)
1453 pts= pkt->dts;
1454 else
1455 pts= 0;
1456 pts *= av_q2d(is->video_st->time_base);
1457
1458 // if (len1 < 0)
1459 // break;
1460 if (got_picture) {
1461 if (output_picture2(is, frame, pts, pkt->pos) < 0)
1462 goto the_end;
1463 }
1464 av_free_packet(pkt);
1465 if (step)
1466 if (cur_stream)
1467 stream_pause(cur_stream);
1468 }
1469 the_end:
1470 av_free(frame);
1471 return 0;
1472 }
1473
1474 static int subtitle_thread(void *arg)
1475 {
1476 VideoState *is = arg;
1477 SubPicture *sp;
1478 AVPacket pkt1, *pkt = &pkt1;
1479 int len1, got_subtitle;
1480 double pts;
1481 int i, j;
1482 int r, g, b, y, u, v, a;
1483
1484 for(;;) {
1485 while (is->paused && !is->subtitleq.abort_request) {
1486 SDL_Delay(10);
1487 }
1488 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1489 break;
1490
1491 if(pkt->data == flush_pkt.data){
1492 avcodec_flush_buffers(is->subtitle_st->codec);
1493 continue;
1494 }
1495 SDL_LockMutex(is->subpq_mutex);
1496 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1497 !is->subtitleq.abort_request) {
1498 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1499 }
1500 SDL_UnlockMutex(is->subpq_mutex);
1501
1502 if (is->subtitleq.abort_request)
1503 goto the_end;
1504
1505 sp = &is->subpq[is->subpq_windex];
1506
1507 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1508 this packet, if any */
1509 pts = 0;
1510 if (pkt->pts != AV_NOPTS_VALUE)
1511 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1512
1513 len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1514 &sp->sub, &got_subtitle,
1515 pkt);
1516 // if (len1 < 0)
1517 // break;
1518 if (got_subtitle && sp->sub.format == 0) {
1519 sp->pts = pts;
1520
1521 for (i = 0; i < sp->sub.num_rects; i++)
1522 {
1523 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1524 {
1525 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1526 y = RGB_TO_Y_CCIR(r, g, b);
1527 u = RGB_TO_U_CCIR(r, g, b, 0);
1528 v = RGB_TO_V_CCIR(r, g, b, 0);
1529 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1530 }
1531 }
1532
1533 /* now we can update the picture count */
1534 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1535 is->subpq_windex = 0;
1536 SDL_LockMutex(is->subpq_mutex);
1537 is->subpq_size++;
1538 SDL_UnlockMutex(is->subpq_mutex);
1539 }
1540 av_free_packet(pkt);
1541 // if (step)
1542 // if (cur_stream)
1543 // stream_pause(cur_stream);
1544 }
1545 the_end:
1546 return 0;
1547 }
1548
1549 /* copy samples for viewing in editor window */
1550 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1551 {
1552 int size, len, channels;
1553
1554 channels = is->audio_st->codec->channels;
1555
1556 size = samples_size / sizeof(short);
1557 while (size > 0) {
1558 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1559 if (len > size)
1560 len = size;
1561 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1562 samples += len;
1563 is->sample_array_index += len;
1564 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1565 is->sample_array_index = 0;
1566 size -= len;
1567 }
1568 }
1569
1570 /* return the new audio buffer size (samples can be added or deleted
1571 to get better sync if video or external master clock) */
1572 static int synchronize_audio(VideoState *is, short *samples,
1573 int samples_size1, double pts)
1574 {
1575 int n, samples_size;
1576 double ref_clock;
1577
1578 n = 2 * is->audio_st->codec->channels;
1579 samples_size = samples_size1;
1580
1581 /* if not master, then we try to remove or add samples to correct the clock */
1582 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1583 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1584 double diff, avg_diff;
1585 int wanted_size, min_size, max_size, nb_samples;
1586
1587 ref_clock = get_master_clock(is);
1588 diff = get_audio_clock(is) - ref_clock;
1589
1590 if (diff < AV_NOSYNC_THRESHOLD) {
1591 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1592 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1593 /* not enough measures to have a correct estimate */
1594 is->audio_diff_avg_count++;
1595 } else {
1596 /* estimate the A-V difference */
1597 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1598
1599 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1600 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1601 nb_samples = samples_size / n;
1602
1603 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1604 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1605 if (wanted_size < min_size)
1606 wanted_size = min_size;
1607 else if (wanted_size > max_size)
1608 wanted_size = max_size;
1609
1610 /* add or remove samples to correction the synchro */
1611 if (wanted_size < samples_size) {
1612 /* remove samples */
1613 samples_size = wanted_size;
1614 } else if (wanted_size > samples_size) {
1615 uint8_t *samples_end, *q;
1616 int nb;
1617
1618 /* add samples */
1619 nb = (samples_size - wanted_size);
1620 samples_end = (uint8_t *)samples + samples_size - n;
1621 q = samples_end + n;
1622 while (nb > 0) {
1623 memcpy(q, samples_end, n);
1624 q += n;
1625 nb -= n;
1626 }
1627 samples_size = wanted_size;
1628 }
1629 }
1630 #if 0
1631 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1632 diff, avg_diff, samples_size - samples_size1,
1633 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1634 #endif
1635 }
1636 } else {
1637 /* too big difference : may be initial PTS errors, so
1638 reset A-V filter */
1639 is->audio_diff_avg_count = 0;
1640 is->audio_diff_cum = 0;
1641 }
1642 }
1643
1644 return samples_size;
1645 }
1646
1647 /* decode one audio frame and returns its uncompressed size */
1648 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1649 {
1650 AVPacket *pkt_temp = &is->audio_pkt_temp;
1651 AVPacket *pkt = &is->audio_pkt;
1652 AVCodecContext *dec= is->audio_st->codec;
1653 int n, len1, data_size;
1654 double pts;
1655
1656 for(;;) {
1657 /* NOTE: the audio packet can contain several frames */
1658 while (pkt_temp->size > 0) {
1659 data_size = sizeof(is->audio_buf1);
1660 len1 = avcodec_decode_audio3(dec,
1661 (int16_t *)is->audio_buf1, &data_size,
1662 pkt_temp);
1663 if (len1 < 0) {
1664 /* if error, we skip the frame */
1665 pkt_temp->size = 0;
1666 break;
1667 }
1668
1669 pkt_temp->data += len1;
1670 pkt_temp->size -= len1;
1671 if (data_size <= 0)
1672 continue;
1673
1674 if (dec->sample_fmt != is->audio_src_fmt) {
1675 if (is->reformat_ctx)
1676 av_audio_convert_free(is->reformat_ctx);
1677 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1678 dec->sample_fmt, 1, NULL, 0);
1679 if (!is->reformat_ctx) {
1680 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1681 avcodec_get_sample_fmt_name(dec->sample_fmt),
1682 avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1683 break;
1684 }
1685 is->audio_src_fmt= dec->sample_fmt;
1686 }
1687
1688 if (is->reformat_ctx) {
1689 const void *ibuf[6]= {is->audio_buf1};
1690 void *obuf[6]= {is->audio_buf2};
1691 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1692 int ostride[6]= {2};
1693 int len= data_size/istride[0];
1694 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1695 printf("av_audio_convert() failed\n");
1696 break;
1697 }
1698 is->audio_buf= is->audio_buf2;
1699 /* FIXME: existing code assume that data_size equals framesize*channels*2
1700 remove this legacy cruft */
1701 data_size= len*2;
1702 }else{
1703 is->audio_buf= is->audio_buf1;
1704 }
1705
1706 /* if no pts, then compute it */
1707 pts = is->audio_clock;
1708 *pts_ptr = pts;
1709 n = 2 * dec->channels;
1710 is->audio_clock += (double)data_size /
1711 (double)(n * dec->sample_rate);
1712 #if defined(DEBUG_SYNC)
1713 {
1714 static double last_clock;
1715 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1716 is->audio_clock - last_clock,
1717 is->audio_clock, pts);
1718 last_clock = is->audio_clock;
1719 }
1720 #endif
1721 return data_size;
1722 }
1723
1724 /* free the current packet */
1725 if (pkt->data)
1726 av_free_packet(pkt);
1727
1728 if (is->paused || is->audioq.abort_request) {
1729 return -1;
1730 }
1731
1732 /* read next packet */
1733 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1734 return -1;
1735 if(pkt->data == flush_pkt.data){
1736 avcodec_flush_buffers(dec);
1737 continue;
1738 }
1739
1740 pkt_temp->data = pkt->data;
1741 pkt_temp->size = pkt->size;
1742
1743 /* if update the audio clock with the pts */
1744 if (pkt->pts != AV_NOPTS_VALUE) {
1745 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1746 }
1747 }
1748 }
1749
1750 /* get the current audio output buffer size, in samples. With SDL, we
1751 cannot have a precise information */
1752 static int audio_write_get_buf_size(VideoState *is)
1753 {
1754 return is->audio_buf_size - is->audio_buf_index;
1755 }
1756
1757
1758 /* prepare a new audio buffer */
1759 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1760 {
1761 VideoState *is = opaque;
1762 int audio_size, len1;
1763 double pts;
1764
1765 audio_callback_time = av_gettime();
1766
1767 while (len > 0) {
1768 if (is->audio_buf_index >= is->audio_buf_size) {
1769 audio_size = audio_decode_frame(is, &pts);
1770 if (audio_size < 0) {
1771 /* if error, just output silence */
1772 is->audio_buf = is->audio_buf1;
1773 is->audio_buf_size = 1024;
1774 memset(is->audio_buf, 0, is->audio_buf_size);
1775 } else {
1776 if (is->show_audio)
1777 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1778 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1779 pts);
1780 is->audio_buf_size = audio_size;
1781 }
1782 is->audio_buf_index = 0;
1783 }
1784 len1 = is->audio_buf_size - is->audio_buf_index;
1785 if (len1 > len)
1786 len1 = len;
1787 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1788 len -= len1;
1789 stream += len1;
1790 is->audio_buf_index += len1;
1791 }
1792 }
1793
1794 /* open a given stream. Return 0 if OK */
1795 static int stream_component_open(VideoState *is, int stream_index)
1796 {
1797 AVFormatContext *ic = is->ic;
1798 AVCodecContext *enc;
1799 AVCodec *codec;
1800 SDL_AudioSpec wanted_spec, spec;
1801
1802 if (stream_index < 0 || stream_index >= ic->nb_streams)
1803 return -1;
1804 enc = ic->streams[stream_index]->codec;
1805
1806 /* prepare audio output */
1807 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1808 if (enc->channels > 0) {
1809 enc->request_channels = FFMIN(2, enc->channels);
1810 } else {
1811 enc->request_channels = 2;
1812 }
1813 }
1814
1815 codec = avcodec_find_decoder(enc->codec_id);
1816 enc->debug_mv = debug_mv;
1817 enc->debug = debug;
1818 enc->workaround_bugs = workaround_bugs;
1819 enc->lowres = lowres;
1820 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1821 enc->idct_algo= idct;
1822 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1823 enc->skip_frame= skip_frame;
1824 enc->skip_idct= skip_idct;
1825 enc->skip_loop_filter= skip_loop_filter;
1826 enc->error_recognition= error_recognition;
1827 enc->error_concealment= error_concealment;
1828 avcodec_thread_init(enc, thread_count);
1829
1830 set_context_opts(enc, avcodec_opts[enc->codec_type], 0);
1831
1832 if (!codec ||
1833 avcodec_open(enc, codec) < 0)
1834 return -1;
1835
1836 /* prepare audio output */
1837 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1838 wanted_spec.freq = enc->sample_rate;
1839 wanted_spec.format = AUDIO_S16SYS;
1840 wanted_spec.channels = enc->channels;
1841 wanted_spec.silence = 0;
1842 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1843 wanted_spec.callback = sdl_audio_callback;
1844 wanted_spec.userdata = is;
1845 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1846 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1847 return -1;
1848 }
1849 is->audio_hw_buf_size = spec.size;
1850 is->audio_src_fmt= SAMPLE_FMT_S16;
1851 }
1852
1853 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
1854 switch(enc->codec_type) {
1855 case CODEC_TYPE_AUDIO:
1856 is->audio_stream = stream_index;
1857 is->audio_st = ic->streams[stream_index];
1858 is->audio_buf_size = 0;
1859 is->audio_buf_index = 0;
1860
1861 /* init averaging filter */
1862 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1863 is->audio_diff_avg_count = 0;
1864 /* since we do not have a precise anough audio fifo fullness,
1865 we correct audio sync only if larger than this threshold */
1866 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1867
1868 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1869 packet_queue_init(&is->audioq);
1870 SDL_PauseAudio(0);
1871 break;
1872 case CODEC_TYPE_VIDEO:
1873 is->video_stream = stream_index;
1874 is->video_st = ic->streams[stream_index];
1875
1876 // is->video_current_pts_time = av_gettime();
1877
1878 packet_queue_init(&is->videoq);
1879 is->video_tid = SDL_CreateThread(video_thread, is);
1880 break;
1881 case CODEC_TYPE_SUBTITLE:
1882 is->subtitle_stream = stream_index;
1883 is->subtitle_st = ic->streams[stream_index];
1884 packet_queue_init(&is->subtitleq);
1885
1886 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1887 break;
1888 default:
1889 break;
1890 }
1891 return 0;
1892 }
1893
1894 static void stream_component_close(VideoState *is, int stream_index)
1895 {
1896 AVFormatContext *ic = is->ic;
1897 AVCodecContext *enc;
1898
1899 if (stream_index < 0 || stream_index >= ic->nb_streams)
1900 return;
1901 enc = ic->streams[stream_index]->codec;
1902
1903 switch(enc->codec_type) {
1904 case CODEC_TYPE_AUDIO:
1905 packet_queue_abort(&is->audioq);
1906
1907 SDL_CloseAudio();
1908
1909 packet_queue_end(&is->audioq);
1910 if (is->reformat_ctx)
1911 av_audio_convert_free(is->reformat_ctx);
1912 break;
1913 case CODEC_TYPE_VIDEO:
1914 packet_queue_abort(&is->videoq);
1915
1916 /* note: we also signal this mutex to make sure we deblock the
1917 video thread in all cases */
1918 SDL_LockMutex(is->pictq_mutex);
1919 SDL_CondSignal(is->pictq_cond);
1920 SDL_UnlockMutex(is->pictq_mutex);
1921
1922 SDL_WaitThread(is->video_tid, NULL);
1923
1924 packet_queue_end(&is->videoq);
1925 break;
1926 case CODEC_TYPE_SUBTITLE:
1927 packet_queue_abort(&is->subtitleq);
1928
1929 /* note: we also signal this mutex to make sure we deblock the
1930 video thread in all cases */
1931 SDL_LockMutex(is->subpq_mutex);
1932 is->subtitle_stream_changed = 1;
1933
1934 SDL_CondSignal(is->subpq_cond);
1935 SDL_UnlockMutex(is->subpq_mutex);
1936
1937 SDL_WaitThread(is->subtitle_tid, NULL);
1938
1939 packet_queue_end(&is->subtitleq);
1940 break;
1941 default:
1942 break;
1943 }
1944
1945 ic->streams[stream_index]->discard = AVDISCARD_ALL;
1946 avcodec_close(enc);
1947 switch(enc->codec_type) {
1948 case CODEC_TYPE_AUDIO:
1949 is->audio_st = NULL;
1950 is->audio_stream = -1;
1951 break;
1952 case CODEC_TYPE_VIDEO:
1953 is->video_st = NULL;
1954 is->video_stream = -1;
1955 break;
1956 case CODEC_TYPE_SUBTITLE:
1957 is->subtitle_st = NULL;
1958 is->subtitle_stream = -1;
1959 break;
1960 default:
1961 break;
1962 }
1963 }
1964
1965 /* since we have only one decoding thread, we can use a global
1966 variable instead of a thread local variable */
1967 static VideoState *global_video_state;
1968
1969 static int decode_interrupt_cb(void)
1970 {
1971 return (global_video_state && global_video_state->abort_request);
1972 }
1973
1974 /* this thread gets the stream from the disk or the network */
1975 static int decode_thread(void *arg)
1976 {
1977 VideoState *is = arg;
1978 AVFormatContext *ic;
1979 int err, i, ret, video_index, audio_index, subtitle_index;
1980 AVPacket pkt1, *pkt = &pkt1;
1981 AVFormatParameters params, *ap = &params;
1982 int eof=0;
1983
1984 ic = avformat_alloc_context();
1985
1986 video_index = -1;
1987 audio_index = -1;
1988 subtitle_index = -1;
1989 is->video_stream = -1;
1990 is->audio_stream = -1;
1991 is->subtitle_stream = -1;
1992
1993 global_video_state = is;
1994 url_set_interrupt_cb(decode_interrupt_cb);
1995
1996 memset(ap, 0, sizeof(*ap));
1997
1998 ap->prealloced_context = 1;
1999 ap->width = frame_width;
2000 ap->height= frame_height;
2001 ap->time_base= (AVRational){1, 25};
2002 ap->pix_fmt = frame_pix_fmt;
2003
2004 set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2005
2006 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2007 if (err < 0) {
2008 print_error(is->filename, err);
2009 ret = -1;
2010 goto fail;
2011 }
2012 is->ic = ic;
2013
2014 if(genpts)
2015 ic->flags |= AVFMT_FLAG_GENPTS;
2016
2017 err = av_find_stream_info(ic);
2018 if (err < 0) {
2019 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2020 ret = -1;
2021 goto fail;
2022 }
2023 if(ic->pb)
2024 ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2025
2026 if(seek_by_bytes<0)
2027 seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2028
2029 /* if seeking requested, we execute it */
2030 if (start_time != AV_NOPTS_VALUE) {
2031 int64_t timestamp;
2032
2033 timestamp = start_time;
2034 /* add the stream start time */
2035 if (ic->start_time != AV_NOPTS_VALUE)
2036 timestamp += ic->start_time;
2037 ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2038 if (ret < 0) {
2039 fprintf(stderr, "%s: could not seek to position %0.3f\n",
2040 is->filename, (double)timestamp / AV_TIME_BASE);
2041 }
2042 }
2043
2044 for(i = 0; i < ic->nb_streams; i++) {
2045 AVCodecContext *enc = ic->streams[i]->codec;
2046 ic->streams[i]->discard = AVDISCARD_ALL;
2047 switch(enc->codec_type) {
2048 case CODEC_TYPE_AUDIO:
2049 if (wanted_audio_stream-- >= 0 && !audio_disable)
2050 audio_index = i;
2051 break;
2052 case CODEC_TYPE_VIDEO:
2053 if (wanted_video_stream-- >= 0 && !video_disable)
2054 video_index = i;
2055 break;
2056 case CODEC_TYPE_SUBTITLE:
2057 if (wanted_subtitle_stream-- >= 0 && !video_disable)
2058 subtitle_index = i;
2059 break;
2060 default:
2061 break;
2062 }
2063 }
2064 if (show_status) {
2065 dump_format(ic, 0, is->filename, 0);
2066 }
2067
2068 /* open the streams */
2069 if (audio_index >= 0) {
2070 stream_component_open(is, audio_index);
2071 }
2072
2073 if (video_index >= 0) {
2074 stream_component_open(is, video_index);
2075 } else {
2076 /* add the refresh timer to draw the picture */
2077 schedule_refresh(is, 40);
2078
2079 if (!display_disable)
2080 is->show_audio = 2;
2081 }
2082
2083 if (subtitle_index >= 0) {
2084 stream_component_open(is, subtitle_index);
2085 }
2086
2087 if (is->video_stream < 0 && is->audio_stream < 0) {
2088 fprintf(stderr, "%s: could not open codecs\n", is->filename);
2089 ret = -1;
2090 goto fail;
2091 }
2092
2093 for(;;) {
2094 if (is->abort_request)
2095 break;
2096 if (is->paused != is->last_paused) {
2097 is->last_paused = is->paused;
2098 if (is->paused)
2099 is->read_pause_return= av_read_pause(ic);
2100 else
2101 av_read_play(ic);
2102 }
2103 #if CONFIG_RTSP_DEMUXER
2104 if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2105 /* wait 10 ms to avoid trying to get another packet */
2106 /* XXX: horrible */
2107 SDL_Delay(10);
2108 continue;
2109 }
2110 #endif
2111 if (is->seek_req) {
2112 int64_t seek_target= is->seek_pos;
2113 int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2114 int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2115 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2116 // of the seek_pos/seek_rel variables
2117
2118 ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2119 if (ret < 0) {
2120 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2121 }else{
2122 if (is->audio_stream >= 0) {
2123 packet_queue_flush(&is->audioq);
2124 packet_queue_put(&is->audioq, &flush_pkt);
2125 }
2126 if (is->subtitle_stream >= 0) {
2127 packet_queue_flush(&is->subtitleq);
2128 packet_queue_put(&is->subtitleq, &flush_pkt);
2129 }
2130 if (is->video_stream >= 0) {
2131 packet_queue_flush(&is->videoq);
2132 packet_queue_put(&is->videoq, &flush_pkt);
2133 }
2134 }
2135 is->seek_req = 0;
2136 eof= 0;
2137 }
2138
2139 /* if the queue are full, no need to read more */
2140 if ( is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2141 || ( (is->audioq .size > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2142 && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream<0)
2143 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2144 /* wait 10 ms */
2145 SDL_Delay(10);
2146 continue;
2147 }
2148 if(url_feof(ic->pb) || eof) {
2149 if(is->video_stream >= 0){
2150 av_init_packet(pkt);
2151 pkt->data=NULL;
2152 pkt->size=0;
2153 pkt->stream_index= is->video_stream;
2154 packet_queue_put(&is->videoq, pkt);
2155 }
2156 SDL_Delay(10);
2157 if(autoexit && is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2158 ret=AVERROR_EOF;
2159 goto fail;
2160 }
2161 continue;
2162 }
2163 ret = av_read_frame(ic, pkt);
2164 if (ret < 0) {
2165 if (ret == AVERROR_EOF)
2166 eof=1;
2167 if (url_ferror(ic->pb))
2168 break;
2169 SDL_Delay(100); /* wait for user event */
2170 continue;
2171 }
2172 if (pkt->stream_index == is->audio_stream) {
2173 packet_queue_put(&is->audioq, pkt);
2174 } else if (pkt->stream_index == is->video_stream) {
2175 packet_queue_put(&is->videoq, pkt);
2176 } else if (pkt->stream_index == is->subtitle_stream) {
2177 packet_queue_put(&is->subtitleq, pkt);
2178 } else {
2179 av_free_packet(pkt);
2180 }
2181 }
2182 /* wait until the end */
2183 while (!is->abort_request) {
2184 SDL_Delay(100);
2185 }
2186
2187 ret = 0;
2188 fail:
2189 /* disable interrupting */
2190 global_video_state = NULL;
2191
2192 /* close each stream */
2193 if (is->audio_stream >= 0)
2194 stream_component_close(is, is->audio_stream);
2195 if (is->video_stream >= 0)
2196 stream_component_close(is, is->video_stream);
2197 if (is->subtitle_stream >= 0)
2198 stream_component_close(is, is->subtitle_stream);
2199 if (is->ic) {
2200 av_close_input_file(is->ic);
2201 is->ic = NULL; /* safety */
2202 }
2203 url_set_interrupt_cb(NULL);
2204
2205 if (ret != 0) {
2206 SDL_Event event;
2207
2208 event.type = FF_QUIT_EVENT;
2209 event.user.data1 = is;
2210 SDL_PushEvent(&event);
2211 }
2212 return 0;
2213 }
2214
2215 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2216 {
2217 VideoState *is;
2218
2219 is = av_mallocz(sizeof(VideoState));
2220 if (!is)
2221 return NULL;
2222 av_strlcpy(is->filename, filename, sizeof(is->filename));
2223 is->iformat = iformat;
2224 is->ytop = 0;
2225 is->xleft = 0;
2226
2227 /* start video display */
2228 is->pictq_mutex = SDL_CreateMutex();
2229 is->pictq_cond = SDL_CreateCond();
2230
2231 is->subpq_mutex = SDL_CreateMutex();
2232 is->subpq_cond = SDL_CreateCond();
2233
2234 is->av_sync_type = av_sync_type;
2235 is->parse_tid = SDL_CreateThread(decode_thread, is);
2236 if (!is->parse_tid) {
2237 av_free(is);
2238 return NULL;
2239 }
2240 return is;
2241 }
2242
2243 static void stream_close(VideoState *is)
2244 {
2245 VideoPicture *vp;
2246 int i;
2247 /* XXX: use a special url_shutdown call to abort parse cleanly */
2248 is->abort_request = 1;
2249 SDL_WaitThread(is->parse_tid, NULL);
2250
2251 /* free all pictures */
2252 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2253 vp = &is->pictq[i];
2254 if (vp->bmp) {
2255 SDL_FreeYUVOverlay(vp->bmp);
2256 vp->bmp = NULL;
2257 }
2258 }
2259 SDL_DestroyMutex(is->pictq_mutex);
2260 SDL_DestroyCond(is->pictq_cond);
2261 SDL_DestroyMutex(is->subpq_mutex);
2262 SDL_DestroyCond(is->subpq_cond);
2263 if (is->img_convert_ctx)
2264 sws_freeContext(is->img_convert_ctx);
2265 av_free(is);
2266 }
2267
2268 static void stream_cycle_channel(VideoState *is, int codec_type)
2269 {
2270 AVFormatContext *ic = is->ic;
2271 int start_index, stream_index;
2272 AVStream *st;
2273
2274 if (codec_type == CODEC_TYPE_VIDEO)
2275 start_index = is->video_stream;
2276 else if (codec_type == CODEC_TYPE_AUDIO)
2277 start_index = is->audio_stream;
2278 else
2279 start_index = is->subtitle_stream;
2280 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2281 return;
2282 stream_index = start_index;
2283 for(;;) {
2284 if (++stream_index >= is->ic->nb_streams)
2285 {
2286 if (codec_type == CODEC_TYPE_SUBTITLE)
2287 {
2288 stream_index = -1;
2289 goto the_end;
2290 } else
2291 stream_index = 0;
2292 }
2293 if (stream_index == start_index)
2294 return;
2295 st = ic->streams[stream_index];
2296 if (st->codec->codec_type == codec_type) {
2297 /* check that parameters are OK */
2298 switch(codec_type) {
2299 case CODEC_TYPE_AUDIO:
2300 if (st->codec->sample_rate != 0 &&
2301 st->codec->channels != 0)
2302 goto the_end;
2303 break;
2304 case CODEC_TYPE_VIDEO:
2305 case CODEC_TYPE_SUBTITLE:
2306 goto the_end;
2307 default:
2308 break;
2309 }
2310 }
2311 }
2312 the_end:
2313 stream_component_close(is, start_index);
2314 stream_component_open(is, stream_index);
2315 }
2316
2317
2318 static void toggle_full_screen(void)
2319 {
2320 is_full_screen = !is_full_screen;
2321 if (!fs_screen_width) {
2322 /* use default SDL method */
2323 // SDL_WM_ToggleFullScreen(screen);
2324 }
2325 video_open(cur_stream);
2326 }
2327
2328 static void toggle_pause(void)
2329 {
2330 if (cur_stream)
2331 stream_pause(cur_stream);
2332 step = 0;
2333 }
2334
2335 static void step_to_next_frame(void)
2336 {
2337 if (cur_stream) {
2338 /* if the stream is paused unpause it, then step */
2339 if (cur_stream->paused)
2340 stream_pause(cur_stream);
2341 }
2342 step = 1;
2343 }
2344
2345 static void do_exit(void)
2346 {
2347 int i;
2348 if (cur_stream) {
2349 stream_close(cur_stream);
2350 cur_stream = NULL;
2351 }
2352 for (i = 0; i < CODEC_TYPE_NB; i++)
2353 av_free(avcodec_opts[i]);
2354 av_free(avformat_opts);
2355 av_free(sws_opts);
2356 if (show_status)
2357 printf("\n");
2358 SDL_Quit();
2359 exit(0);
2360 }
2361
2362 static void toggle_audio_display(void)
2363 {
2364 if (cur_stream) {
2365 int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2366 cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2367 fill_rectangle(screen,
2368 cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2369 bgcolor);
2370 SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2371 }
2372 }
2373
2374 /* handle an event sent by the GUI */
2375 static void event_loop(void)
2376 {
2377 SDL_Event event;
2378 double incr, pos, frac;
2379
2380 for(;;) {
2381 double x;
2382 SDL_WaitEvent(&event);
2383 switch(event.type) {
2384 case SDL_KEYDOWN:
2385 switch(event.key.keysym.sym) {
2386 case SDLK_ESCAPE:
2387 case SDLK_q:
2388 do_exit();
2389 break;
2390 case SDLK_f:
2391 toggle_full_screen();
2392 break;
2393 case SDLK_p:
2394 case SDLK_SPACE:
2395 toggle_pause();
2396 break;
2397 case SDLK_s: //S: Step to next frame
2398 step_to_next_frame();
2399 break;
2400 case SDLK_a:
2401 if (cur_stream)
2402 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2403 break;
2404 case SDLK_v:
2405 if (cur_stream)
2406 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2407 break;
2408 case SDLK_t:
2409 if (cur_stream)
2410 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2411 break;
2412 case SDLK_w:
2413 toggle_audio_display();
2414 break;
2415 case SDLK_LEFT:
2416 incr = -10.0;
2417 goto do_seek;
2418 case SDLK_RIGHT:
2419 incr = 10.0;
2420 goto do_seek;
2421 case SDLK_UP:
2422 incr = 60.0;
2423 goto do_seek;
2424 case SDLK_DOWN:
2425 incr = -60.0;
2426 do_seek:
2427 if (cur_stream) {
2428 if (seek_by_bytes) {
2429 if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2430 pos= cur_stream->video_current_pos;
2431 }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2432 pos= cur_stream->audio_pkt.pos;
2433 }else
2434 pos = url_ftell(cur_stream->ic->pb);
2435 if (cur_stream->ic->bit_rate)
2436 incr *= cur_stream->ic->bit_rate / 8.0;
2437 else
2438 incr *= 180000.0;
2439 pos += incr;
2440 stream_seek(cur_stream, pos, incr, 1);
2441 } else {
2442 pos = get_master_clock(cur_stream);
2443 pos += incr;
2444 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2445 }
2446 }
2447 break;
2448 default:
2449 break;
2450 }
2451 break;
2452 case SDL_MOUSEBUTTONDOWN:
2453 case SDL_MOUSEMOTION:
2454 if(event.type ==SDL_MOUSEBUTTONDOWN){
2455 x= event.button.x;
2456 }else{
2457 if(event.motion.state != SDL_PRESSED)
2458 break;
2459 x= event.motion.x;
2460 }
2461 if (cur_stream) {
2462 if(seek_by_bytes || cur_stream->ic->duration<=0){
2463 uint64_t size= url_fsize(cur_stream->ic->pb);
2464 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2465 }else{
2466 int64_t ts;
2467 int ns, hh, mm, ss;
2468 int tns, thh, tmm, tss;
2469 tns = cur_stream->ic->duration/1000000LL;
2470 thh = tns/3600;
2471 tmm = (tns%3600)/60;
2472 tss = (tns%60);
2473 frac = x/cur_stream->width;
2474 ns = frac*tns;
2475 hh = ns/3600;
2476 mm = (ns%3600)/60;
2477 ss = (ns%60);
2478 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2479 hh, mm, ss, thh, tmm, tss);
2480 ts = frac*cur_stream->ic->duration;
2481 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2482 ts += cur_stream->ic->start_time;
2483 stream_seek(cur_stream, ts, 0, 0);
2484 }
2485 }
2486 break;
2487 case SDL_VIDEORESIZE:
2488 if (cur_stream) {
2489 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2490 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2491 screen_width = cur_stream->width = event.resize.w;
2492 screen_height= cur_stream->height= event.resize.h;
2493 }
2494 break;
2495 case SDL_QUIT:
2496 case FF_QUIT_EVENT:
2497 do_exit();
2498 break;
2499 case FF_ALLOC_EVENT:
2500 video_open(event.user.data1);
2501 alloc_picture(event.user.data1);
2502 break;
2503 case FF_REFRESH_EVENT:
2504 video_refresh_timer(event.user.data1);
2505 break;
2506 default:
2507 break;
2508 }
2509 }
2510 }
2511
2512 static void opt_frame_size(const char *arg)
2513 {
2514 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2515 fprintf(stderr, "Incorrect frame size\n");
2516 exit(1);
2517 }
2518 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2519 fprintf(stderr, "Frame size must be a multiple of 2\n");
2520 exit(1);
2521 }
2522 }
2523
2524 static int opt_width(const char *opt, const char *arg)
2525 {
2526 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2527 return 0;
2528 }
2529
2530 static int opt_height(const char *opt, const char *arg)
2531 {
2532 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2533 return 0;
2534 }
2535
2536 static void opt_format(const char *arg)
2537 {
2538 file_iformat = av_find_input_format(arg);
2539 if (!file_iformat) {
2540 fprintf(stderr, "Unknown input format: %s\n", arg);
2541 exit(1);
2542 }
2543 }
2544
2545 static void opt_frame_pix_fmt(const char *arg)
2546 {
2547 frame_pix_fmt = av_get_pix_fmt(arg);
2548 }
2549
2550 static int opt_sync(const char *opt, const char *arg)
2551 {
2552 if (!strcmp(arg, "audio"))
2553 av_sync_type = AV_SYNC_AUDIO_MASTER;
2554 else if (!strcmp(arg, "video"))
2555 av_sync_type = AV_SYNC_VIDEO_MASTER;
2556 else if (!strcmp(arg, "ext"))
2557 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2558 else {
2559 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2560 exit(1);
2561 }
2562 return 0;
2563 }
2564
2565 static int opt_seek(const char *opt, const char *arg)
2566 {
2567 start_time = parse_time_or_die(opt, arg, 1);
2568 return 0;
2569 }
2570
2571 static int opt_debug(const char *opt, const char *arg)
2572 {
2573 av_log_set_level(99);
2574 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2575 return 0;
2576 }
2577
2578 static int opt_vismv(const char *opt, const char *arg)
2579 {
2580 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2581 return 0;
2582 }
2583
2584 static int opt_thread_count(const char *opt, const char *arg)
2585 {
2586 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2587 #if !HAVE_THREADS
2588 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2589 #endif
2590 return 0;
2591 }
2592
2593 static const OptionDef options[] = {
2594 #include "cmdutils_common_opts.h"
2595 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2596 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2597 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2598 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2599 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2600 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2601 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "select desired audio stream", "stream_number" },
2602 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "select desired video stream", "stream_number" },
2603 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_subtitle_stream}, "select desired subtitle stream", "stream_number" },
2604 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2605 { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2606 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2607 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2608 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2609 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2610 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2611 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2612 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2613 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2614 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2615 { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2616 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2617 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2618 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2619 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2620 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2621 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)", "threshold" },
2622 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2623 { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2624 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2625 { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2626 { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2627 { NULL, },
2628 };
2629
2630 static void show_usage(void)
2631 {
2632 printf("Simple media player\n");
2633 printf("usage: ffplay [options] input_file\n");
2634 printf("\n");
2635 }
2636
2637 static void show_help(void)
2638 {
2639 show_usage();
2640 show_help_options(options, "Main options:\n",
2641 OPT_EXPERT, 0);
2642 show_help_options(options, "\nAdvanced options:\n",
2643 OPT_EXPERT, OPT_EXPERT);
2644 printf("\nWhile playing:\n"
2645 "q, ESC quit\n"
2646 "f toggle full screen\n"
2647 "p, SPC pause\n"
2648 "a cycle audio channel\n"
2649 "v cycle video channel\n"
2650 "t cycle subtitle channel\n"
2651 "w show audio waves\n"
2652 "left/right seek backward/forward 10 seconds\n"
2653 "down/up seek backward/forward 1 minute\n"
2654 "mouse click seek to percentage in file corresponding to fraction of width\n"
2655 );
2656 }
2657
2658 static void opt_input_file(const char *filename)
2659 {
2660 if (!strcmp(filename, "-"))
2661 filename = "pipe:";
2662 input_filename = filename;
2663 }
2664
2665 /* Called from the main */
2666 int main(int argc, char **argv)
2667 {
2668 int flags, i;
2669
2670 /* register all codecs, demux and protocols */
2671 avcodec_register_all();
2672 avdevice_register_all();
2673 av_register_all();
2674
2675 for(i=0; i<CODEC_TYPE_NB; i++){
2676 avcodec_opts[i]= avcodec_alloc_context2(i);
2677 }
2678 avformat_opts = avformat_alloc_context();
2679 sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
2680
2681 show_banner();
2682
2683 parse_options(argc, argv, options, opt_input_file);
2684
2685 if (!input_filename) {
2686 show_usage();
2687 fprintf(stderr, "An input file must be specified\n");
2688 fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
2689 exit(1);
2690 }
2691
2692 if (display_disable) {
2693 video_disable = 1;
2694 }
2695 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2696 #if !defined(__MINGW32__) && !defined(__APPLE__)
2697 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2698 #endif
2699 if (SDL_Init (flags)) {
2700 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2701 exit(1);
2702 }
2703
2704 if (!display_disable) {
2705 #if HAVE_SDL_VIDEO_SIZE
2706 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2707 fs_screen_width = vi->current_w;
2708 fs_screen_height = vi->current_h;
2709 #endif
2710 }
2711
2712 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2713 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2714 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2715
2716 av_init_packet(&flush_pkt);
2717 flush_pkt.data= "FLUSH";
2718
2719 cur_stream = stream_open(input_filename, file_iformat);
2720
2721 event_loop();
2722
2723 /* never returns */
2724
2725 return 0;
2726 }