Print usage instead of help when no files are specified.
[libav.git] / ffplay.c
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include "config.h"
23 #include <math.h>
24 #include <limits.h>
25 #include "libavutil/avstring.h"
26 #include "libavformat/avformat.h"
27 #include "libavdevice/avdevice.h"
28 #include "libswscale/swscale.h"
29 #include "libavcodec/audioconvert.h"
30 #include "libavcodec/colorspace.h"
31 #include "libavcodec/opt.h"
32
33 #include "cmdutils.h"
34
35 #include <SDL.h>
36 #include <SDL_thread.h>
37
38 #ifdef __MINGW32__
39 #undef main /* We don't want SDL to override our main() */
40 #endif
41
42 #undef exit
43 #undef printf
44 #undef fprintf
45
46 const char program_name[] = "FFplay";
47 const int program_birth_year = 2003;
48
49 //#define DEBUG_SYNC
50
51 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
52 #define MAX_AUDIOQ_SIZE (20 * 16 * 1024)
53 #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
54
55 /* SDL audio buffer size, in samples. Should be small to have precise
56 A/V sync as SDL does not have hardware buffer fullness info. */
57 #define SDL_AUDIO_BUFFER_SIZE 1024
58
59 /* no AV sync correction is done if below the AV sync threshold */
60 #define AV_SYNC_THRESHOLD 0.01
61 /* no AV correction is done if too big error */
62 #define AV_NOSYNC_THRESHOLD 10.0
63
64 /* maximum audio speed change to get correct sync */
65 #define SAMPLE_CORRECTION_PERCENT_MAX 10
66
67 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
68 #define AUDIO_DIFF_AVG_NB 20
69
70 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
71 #define SAMPLE_ARRAY_SIZE (2*65536)
72
73 static int sws_flags = SWS_BICUBIC;
74
75 typedef struct PacketQueue {
76 AVPacketList *first_pkt, *last_pkt;
77 int nb_packets;
78 int size;
79 int abort_request;
80 SDL_mutex *mutex;
81 SDL_cond *cond;
82 } PacketQueue;
83
84 #define VIDEO_PICTURE_QUEUE_SIZE 1
85 #define SUBPICTURE_QUEUE_SIZE 4
86
87 typedef struct VideoPicture {
88 double pts; ///<presentation time stamp for this picture
89 SDL_Overlay *bmp;
90 int width, height; /* source height & width */
91 int allocated;
92 } VideoPicture;
93
94 typedef struct SubPicture {
95 double pts; /* presentation time stamp for this picture */
96 AVSubtitle sub;
97 } SubPicture;
98
99 enum {
100 AV_SYNC_AUDIO_MASTER, /* default choice */
101 AV_SYNC_VIDEO_MASTER,
102 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
103 };
104
105 typedef struct VideoState {
106 SDL_Thread *parse_tid;
107 SDL_Thread *video_tid;
108 AVInputFormat *iformat;
109 int no_background;
110 int abort_request;
111 int paused;
112 int last_paused;
113 int seek_req;
114 int seek_flags;
115 int64_t seek_pos;
116 int64_t seek_rel;
117 AVFormatContext *ic;
118 int dtg_active_format;
119
120 int audio_stream;
121
122 int av_sync_type;
123 double external_clock; /* external clock base */
124 int64_t external_clock_time;
125
126 double audio_clock;
127 double audio_diff_cum; /* used for AV difference average computation */
128 double audio_diff_avg_coef;
129 double audio_diff_threshold;
130 int audio_diff_avg_count;
131 AVStream *audio_st;
132 PacketQueue audioq;
133 int audio_hw_buf_size;
134 /* samples output by the codec. we reserve more space for avsync
135 compensation */
136 DECLARE_ALIGNED(16,uint8_t,audio_buf1[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
137 DECLARE_ALIGNED(16,uint8_t,audio_buf2[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
138 uint8_t *audio_buf;
139 unsigned int audio_buf_size; /* in bytes */
140 int audio_buf_index; /* in bytes */
141 AVPacket audio_pkt_temp;
142 AVPacket audio_pkt;
143 enum SampleFormat audio_src_fmt;
144 AVAudioConvert *reformat_ctx;
145
146 int show_audio; /* if true, display audio samples */
147 int16_t sample_array[SAMPLE_ARRAY_SIZE];
148 int sample_array_index;
149 int last_i_start;
150
151 SDL_Thread *subtitle_tid;
152 int subtitle_stream;
153 int subtitle_stream_changed;
154 AVStream *subtitle_st;
155 PacketQueue subtitleq;
156 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
157 int subpq_size, subpq_rindex, subpq_windex;
158 SDL_mutex *subpq_mutex;
159 SDL_cond *subpq_cond;
160
161 double frame_timer;
162 double frame_last_pts;
163 double frame_last_delay;
164 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
165 int video_stream;
166 AVStream *video_st;
167 PacketQueue videoq;
168 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
169 int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
170 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
171 int pictq_size, pictq_rindex, pictq_windex;
172 SDL_mutex *pictq_mutex;
173 SDL_cond *pictq_cond;
174 struct SwsContext *img_convert_ctx;
175
176 // QETimer *video_timer;
177 char filename[1024];
178 int width, height, xleft, ytop;
179 } VideoState;
180
181 static void show_help(void);
182 static int audio_write_get_buf_size(VideoState *is);
183
184 /* options specified by the user */
185 static AVInputFormat *file_iformat;
186 static const char *input_filename;
187 static int fs_screen_width;
188 static int fs_screen_height;
189 static int screen_width = 0;
190 static int screen_height = 0;
191 static int frame_width = 0;
192 static int frame_height = 0;
193 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
194 static int audio_disable;
195 static int video_disable;
196 static int wanted_audio_stream= 0;
197 static int wanted_video_stream= 0;
198 static int wanted_subtitle_stream= -1;
199 static int seek_by_bytes;
200 static int display_disable;
201 static int show_status = 1;
202 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
203 static int64_t start_time = AV_NOPTS_VALUE;
204 static int debug = 0;
205 static int debug_mv = 0;
206 static int step = 0;
207 static int thread_count = 1;
208 static int workaround_bugs = 1;
209 static int fast = 0;
210 static int genpts = 0;
211 static int lowres = 0;
212 static int idct = FF_IDCT_AUTO;
213 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
214 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
215 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
216 static int error_recognition = FF_ER_CAREFUL;
217 static int error_concealment = 3;
218 static int decoder_reorder_pts= 0;
219
220 /* current context */
221 static int is_full_screen;
222 static VideoState *cur_stream;
223 static int64_t audio_callback_time;
224
225 static AVPacket flush_pkt;
226
227 #define FF_ALLOC_EVENT (SDL_USEREVENT)
228 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
229 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
230
231 static SDL_Surface *screen;
232
233 /* packet queue handling */
234 static void packet_queue_init(PacketQueue *q)
235 {
236 memset(q, 0, sizeof(PacketQueue));
237 q->mutex = SDL_CreateMutex();
238 q->cond = SDL_CreateCond();
239 }
240
241 static void packet_queue_flush(PacketQueue *q)
242 {
243 AVPacketList *pkt, *pkt1;
244
245 SDL_LockMutex(q->mutex);
246 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
247 pkt1 = pkt->next;
248 av_free_packet(&pkt->pkt);
249 av_freep(&pkt);
250 }
251 q->last_pkt = NULL;
252 q->first_pkt = NULL;
253 q->nb_packets = 0;
254 q->size = 0;
255 SDL_UnlockMutex(q->mutex);
256 }
257
258 static void packet_queue_end(PacketQueue *q)
259 {
260 packet_queue_flush(q);
261 SDL_DestroyMutex(q->mutex);
262 SDL_DestroyCond(q->cond);
263 }
264
265 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
266 {
267 AVPacketList *pkt1;
268
269 /* duplicate the packet */
270 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
271 return -1;
272
273 pkt1 = av_malloc(sizeof(AVPacketList));
274 if (!pkt1)
275 return -1;
276 pkt1->pkt = *pkt;
277 pkt1->next = NULL;
278
279
280 SDL_LockMutex(q->mutex);
281
282 if (!q->last_pkt)
283
284 q->first_pkt = pkt1;
285 else
286 q->last_pkt->next = pkt1;
287 q->last_pkt = pkt1;
288 q->nb_packets++;
289 q->size += pkt1->pkt.size + sizeof(*pkt1);
290 /* XXX: should duplicate packet data in DV case */
291 SDL_CondSignal(q->cond);
292
293 SDL_UnlockMutex(q->mutex);
294 return 0;
295 }
296
297 static void packet_queue_abort(PacketQueue *q)
298 {
299 SDL_LockMutex(q->mutex);
300
301 q->abort_request = 1;
302
303 SDL_CondSignal(q->cond);
304
305 SDL_UnlockMutex(q->mutex);
306 }
307
308 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
309 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
310 {
311 AVPacketList *pkt1;
312 int ret;
313
314 SDL_LockMutex(q->mutex);
315
316 for(;;) {
317 if (q->abort_request) {
318 ret = -1;
319 break;
320 }
321
322 pkt1 = q->first_pkt;
323 if (pkt1) {
324 q->first_pkt = pkt1->next;
325 if (!q->first_pkt)
326 q->last_pkt = NULL;
327 q->nb_packets--;
328 q->size -= pkt1->pkt.size + sizeof(*pkt1);
329 *pkt = pkt1->pkt;
330 av_free(pkt1);
331 ret = 1;
332 break;
333 } else if (!block) {
334 ret = 0;
335 break;
336 } else {
337 SDL_CondWait(q->cond, q->mutex);
338 }
339 }
340 SDL_UnlockMutex(q->mutex);
341 return ret;
342 }
343
344 static inline void fill_rectangle(SDL_Surface *screen,
345 int x, int y, int w, int h, int color)
346 {
347 SDL_Rect rect;
348 rect.x = x;
349 rect.y = y;
350 rect.w = w;
351 rect.h = h;
352 SDL_FillRect(screen, &rect, color);
353 }
354
355 #if 0
356 /* draw only the border of a rectangle */
357 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
358 {
359 int w1, w2, h1, h2;
360
361 /* fill the background */
362 w1 = x;
363 if (w1 < 0)
364 w1 = 0;
365 w2 = s->width - (x + w);
366 if (w2 < 0)
367 w2 = 0;
368 h1 = y;
369 if (h1 < 0)
370 h1 = 0;
371 h2 = s->height - (y + h);
372 if (h2 < 0)
373 h2 = 0;
374 fill_rectangle(screen,
375 s->xleft, s->ytop,
376 w1, s->height,
377 color);
378 fill_rectangle(screen,
379 s->xleft + s->width - w2, s->ytop,
380 w2, s->height,
381 color);
382 fill_rectangle(screen,
383 s->xleft + w1, s->ytop,
384 s->width - w1 - w2, h1,
385 color);
386 fill_rectangle(screen,
387 s->xleft + w1, s->ytop + s->height - h2,
388 s->width - w1 - w2, h2,
389 color);
390 }
391 #endif
392
393 #define ALPHA_BLEND(a, oldp, newp, s)\
394 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
395
396 #define RGBA_IN(r, g, b, a, s)\
397 {\
398 unsigned int v = ((const uint32_t *)(s))[0];\
399 a = (v >> 24) & 0xff;\
400 r = (v >> 16) & 0xff;\
401 g = (v >> 8) & 0xff;\
402 b = v & 0xff;\
403 }
404
405 #define YUVA_IN(y, u, v, a, s, pal)\
406 {\
407 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
408 a = (val >> 24) & 0xff;\
409 y = (val >> 16) & 0xff;\
410 u = (val >> 8) & 0xff;\
411 v = val & 0xff;\
412 }
413
414 #define YUVA_OUT(d, y, u, v, a)\
415 {\
416 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
417 }
418
419
420 #define BPP 1
421
422 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
423 {
424 int wrap, wrap3, width2, skip2;
425 int y, u, v, a, u1, v1, a1, w, h;
426 uint8_t *lum, *cb, *cr;
427 const uint8_t *p;
428 const uint32_t *pal;
429 int dstx, dsty, dstw, dsth;
430
431 dstw = av_clip(rect->w, 0, imgw);
432 dsth = av_clip(rect->h, 0, imgh);
433 dstx = av_clip(rect->x, 0, imgw - dstw);
434 dsty = av_clip(rect->y, 0, imgh - dsth);
435 lum = dst->data[0] + dsty * dst->linesize[0];
436 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
437 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
438
439 width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
440 skip2 = dstx >> 1;
441 wrap = dst->linesize[0];
442 wrap3 = rect->pict.linesize[0];
443 p = rect->pict.data[0];
444 pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
445
446 if (dsty & 1) {
447 lum += dstx;
448 cb += skip2;
449 cr += skip2;
450
451 if (dstx & 1) {
452 YUVA_IN(y, u, v, a, p, pal);
453 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
454 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
455 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
456 cb++;
457 cr++;
458 lum++;
459 p += BPP;
460 }
461 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
462 YUVA_IN(y, u, v, a, p, pal);
463 u1 = u;
464 v1 = v;
465 a1 = a;
466 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
467
468 YUVA_IN(y, u, v, a, p + BPP, pal);
469 u1 += u;
470 v1 += v;
471 a1 += a;
472 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
473 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
474 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
475 cb++;
476 cr++;
477 p += 2 * BPP;
478 lum += 2;
479 }
480 if (w) {
481 YUVA_IN(y, u, v, a, p, pal);
482 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
483 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
484 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
485 p++;
486 lum++;
487 }
488 p += wrap3 - dstw * BPP;
489 lum += wrap - dstw - dstx;
490 cb += dst->linesize[1] - width2 - skip2;
491 cr += dst->linesize[2] - width2 - skip2;
492 }
493 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
494 lum += dstx;
495 cb += skip2;
496 cr += skip2;
497
498 if (dstx & 1) {
499 YUVA_IN(y, u, v, a, p, pal);
500 u1 = u;
501 v1 = v;
502 a1 = a;
503 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
504 p += wrap3;
505 lum += wrap;
506 YUVA_IN(y, u, v, a, p, pal);
507 u1 += u;
508 v1 += v;
509 a1 += a;
510 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
511 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
512 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
513 cb++;
514 cr++;
515 p += -wrap3 + BPP;
516 lum += -wrap + 1;
517 }
518 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
519 YUVA_IN(y, u, v, a, p, pal);
520 u1 = u;
521 v1 = v;
522 a1 = a;
523 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
524
525 YUVA_IN(y, u, v, a, p + BPP, pal);
526 u1 += u;
527 v1 += v;
528 a1 += a;
529 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
530 p += wrap3;
531 lum += wrap;
532
533 YUVA_IN(y, u, v, a, p, pal);
534 u1 += u;
535 v1 += v;
536 a1 += a;
537 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
538
539 YUVA_IN(y, u, v, a, p + BPP, pal);
540 u1 += u;
541 v1 += v;
542 a1 += a;
543 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
544
545 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
546 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
547
548 cb++;
549 cr++;
550 p += -wrap3 + 2 * BPP;
551 lum += -wrap + 2;
552 }
553 if (w) {
554 YUVA_IN(y, u, v, a, p, pal);
555 u1 = u;
556 v1 = v;
557 a1 = a;
558 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
559 p += wrap3;
560 lum += wrap;
561 YUVA_IN(y, u, v, a, p, pal);
562 u1 += u;
563 v1 += v;
564 a1 += a;
565 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
566 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
567 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
568 cb++;
569 cr++;
570 p += -wrap3 + BPP;
571 lum += -wrap + 1;
572 }
573 p += wrap3 + (wrap3 - dstw * BPP);
574 lum += wrap + (wrap - dstw - dstx);
575 cb += dst->linesize[1] - width2 - skip2;
576 cr += dst->linesize[2] - width2 - skip2;
577 }
578 /* handle odd height */
579 if (h) {
580 lum += dstx;
581 cb += skip2;
582 cr += skip2;
583
584 if (dstx & 1) {
585 YUVA_IN(y, u, v, a, p, pal);
586 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
587 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
588 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
589 cb++;
590 cr++;
591 lum++;
592 p += BPP;
593 }
594 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
595 YUVA_IN(y, u, v, a, p, pal);
596 u1 = u;
597 v1 = v;
598 a1 = a;
599 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
600
601 YUVA_IN(y, u, v, a, p + BPP, pal);
602 u1 += u;
603 v1 += v;
604 a1 += a;
605 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
606 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
607 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
608 cb++;
609 cr++;
610 p += 2 * BPP;
611 lum += 2;
612 }
613 if (w) {
614 YUVA_IN(y, u, v, a, p, pal);
615 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
616 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
617 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
618 }
619 }
620 }
621
622 static void free_subpicture(SubPicture *sp)
623 {
624 int i;
625
626 for (i = 0; i < sp->sub.num_rects; i++)
627 {
628 av_freep(&sp->sub.rects[i]->pict.data[0]);
629 av_freep(&sp->sub.rects[i]->pict.data[1]);
630 av_freep(&sp->sub.rects[i]);
631 }
632
633 av_free(sp->sub.rects);
634
635 memset(&sp->sub, 0, sizeof(AVSubtitle));
636 }
637
638 static void video_image_display(VideoState *is)
639 {
640 VideoPicture *vp;
641 SubPicture *sp;
642 AVPicture pict;
643 float aspect_ratio;
644 int width, height, x, y;
645 SDL_Rect rect;
646 int i;
647
648 vp = &is->pictq[is->pictq_rindex];
649 if (vp->bmp) {
650 /* XXX: use variable in the frame */
651 if (is->video_st->sample_aspect_ratio.num)
652 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
653 else if (is->video_st->codec->sample_aspect_ratio.num)
654 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
655 else
656 aspect_ratio = 0;
657 if (aspect_ratio <= 0.0)
658 aspect_ratio = 1.0;
659 aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec->height;
660 /* if an active format is indicated, then it overrides the
661 mpeg format */
662 #if 0
663 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
664 is->dtg_active_format = is->video_st->codec->dtg_active_format;
665 printf("dtg_active_format=%d\n", is->dtg_active_format);
666 }
667 #endif
668 #if 0
669 switch(is->video_st->codec->dtg_active_format) {
670 case FF_DTG_AFD_SAME:
671 default:
672 /* nothing to do */
673 break;
674 case FF_DTG_AFD_4_3:
675 aspect_ratio = 4.0 / 3.0;
676 break;
677 case FF_DTG_AFD_16_9:
678 aspect_ratio = 16.0 / 9.0;
679 break;
680 case FF_DTG_AFD_14_9:
681 aspect_ratio = 14.0 / 9.0;
682 break;
683 case FF_DTG_AFD_4_3_SP_14_9:
684 aspect_ratio = 14.0 / 9.0;
685 break;
686 case FF_DTG_AFD_16_9_SP_14_9:
687 aspect_ratio = 14.0 / 9.0;
688 break;
689 case FF_DTG_AFD_SP_4_3:
690 aspect_ratio = 4.0 / 3.0;
691 break;
692 }
693 #endif
694
695 if (is->subtitle_st)
696 {
697 if (is->subpq_size > 0)
698 {
699 sp = &is->subpq[is->subpq_rindex];
700
701 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
702 {
703 SDL_LockYUVOverlay (vp->bmp);
704
705 pict.data[0] = vp->bmp->pixels[0];
706 pict.data[1] = vp->bmp->pixels[2];
707 pict.data[2] = vp->bmp->pixels[1];
708
709 pict.linesize[0] = vp->bmp->pitches[0];
710 pict.linesize[1] = vp->bmp->pitches[2];
711 pict.linesize[2] = vp->bmp->pitches[1];
712
713 for (i = 0; i < sp->sub.num_rects; i++)
714 blend_subrect(&pict, sp->sub.rects[i],
715 vp->bmp->w, vp->bmp->h);
716
717 SDL_UnlockYUVOverlay (vp->bmp);
718 }
719 }
720 }
721
722
723 /* XXX: we suppose the screen has a 1.0 pixel ratio */
724 height = is->height;
725 width = ((int)rint(height * aspect_ratio)) & ~1;
726 if (width > is->width) {
727 width = is->width;
728 height = ((int)rint(width / aspect_ratio)) & ~1;
729 }
730 x = (is->width - width) / 2;
731 y = (is->height - height) / 2;
732 if (!is->no_background) {
733 /* fill the background */
734 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
735 } else {
736 is->no_background = 0;
737 }
738 rect.x = is->xleft + x;
739 rect.y = is->ytop + y;
740 rect.w = width;
741 rect.h = height;
742 SDL_DisplayYUVOverlay(vp->bmp, &rect);
743 } else {
744 #if 0
745 fill_rectangle(screen,
746 is->xleft, is->ytop, is->width, is->height,
747 QERGB(0x00, 0x00, 0x00));
748 #endif
749 }
750 }
751
752 static inline int compute_mod(int a, int b)
753 {
754 a = a % b;
755 if (a >= 0)
756 return a;
757 else
758 return a + b;
759 }
760
761 static void video_audio_display(VideoState *s)
762 {
763 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
764 int ch, channels, h, h2, bgcolor, fgcolor;
765 int16_t time_diff;
766
767 /* compute display index : center on currently output samples */
768 channels = s->audio_st->codec->channels;
769 nb_display_channels = channels;
770 if (!s->paused) {
771 n = 2 * channels;
772 delay = audio_write_get_buf_size(s);
773 delay /= n;
774
775 /* to be more precise, we take into account the time spent since
776 the last buffer computation */
777 if (audio_callback_time) {
778 time_diff = av_gettime() - audio_callback_time;
779 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
780 }
781
782 delay -= s->width / 2;
783 if (delay < s->width)
784 delay = s->width;
785
786 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
787
788 h= INT_MIN;
789 for(i=0; i<1000; i+=channels){
790 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
791 int a= s->sample_array[idx];
792 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
793 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
794 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
795 int score= a-d;
796 if(h<score && (b^c)<0){
797 h= score;
798 i_start= idx;
799 }
800 }
801
802 s->last_i_start = i_start;
803 } else {
804 i_start = s->last_i_start;
805 }
806
807 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
808 fill_rectangle(screen,
809 s->xleft, s->ytop, s->width, s->height,
810 bgcolor);
811
812 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
813
814 /* total height for one channel */
815 h = s->height / nb_display_channels;
816 /* graph height / 2 */
817 h2 = (h * 9) / 20;
818 for(ch = 0;ch < nb_display_channels; ch++) {
819 i = i_start + ch;
820 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
821 for(x = 0; x < s->width; x++) {
822 y = (s->sample_array[i] * h2) >> 15;
823 if (y < 0) {
824 y = -y;
825 ys = y1 - y;
826 } else {
827 ys = y1;
828 }
829 fill_rectangle(screen,
830 s->xleft + x, ys, 1, y,
831 fgcolor);
832 i += channels;
833 if (i >= SAMPLE_ARRAY_SIZE)
834 i -= SAMPLE_ARRAY_SIZE;
835 }
836 }
837
838 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
839
840 for(ch = 1;ch < nb_display_channels; ch++) {
841 y = s->ytop + ch * h;
842 fill_rectangle(screen,
843 s->xleft, y, s->width, 1,
844 fgcolor);
845 }
846 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
847 }
848
849 static int video_open(VideoState *is){
850 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
851 int w,h;
852
853 if(is_full_screen) flags |= SDL_FULLSCREEN;
854 else flags |= SDL_RESIZABLE;
855
856 if (is_full_screen && fs_screen_width) {
857 w = fs_screen_width;
858 h = fs_screen_height;
859 } else if(!is_full_screen && screen_width){
860 w = screen_width;
861 h = screen_height;
862 }else if (is->video_st && is->video_st->codec->width){
863 w = is->video_st->codec->width;
864 h = is->video_st->codec->height;
865 } else {
866 w = 640;
867 h = 480;
868 }
869 #ifndef __APPLE__
870 screen = SDL_SetVideoMode(w, h, 0, flags);
871 #else
872 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
873 screen = SDL_SetVideoMode(w, h, 24, flags);
874 #endif
875 if (!screen) {
876 fprintf(stderr, "SDL: could not set video mode - exiting\n");
877 return -1;
878 }
879 SDL_WM_SetCaption("FFplay", "FFplay");
880
881 is->width = screen->w;
882 is->height = screen->h;
883
884 return 0;
885 }
886
887 /* display the current picture, if any */
888 static void video_display(VideoState *is)
889 {
890 if(!screen)
891 video_open(cur_stream);
892 if (is->audio_st && is->show_audio)
893 video_audio_display(is);
894 else if (is->video_st)
895 video_image_display(is);
896 }
897
898 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
899 {
900 SDL_Event event;
901 event.type = FF_REFRESH_EVENT;
902 event.user.data1 = opaque;
903 SDL_PushEvent(&event);
904 return 0; /* 0 means stop timer */
905 }
906
907 /* schedule a video refresh in 'delay' ms */
908 static void schedule_refresh(VideoState *is, int delay)
909 {
910 if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
911 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
912 }
913
914 /* get the current audio clock value */
915 static double get_audio_clock(VideoState *is)
916 {
917 double pts;
918 int hw_buf_size, bytes_per_sec;
919 pts = is->audio_clock;
920 hw_buf_size = audio_write_get_buf_size(is);
921 bytes_per_sec = 0;
922 if (is->audio_st) {
923 bytes_per_sec = is->audio_st->codec->sample_rate *
924 2 * is->audio_st->codec->channels;
925 }
926 if (bytes_per_sec)
927 pts -= (double)hw_buf_size / bytes_per_sec;
928 return pts;
929 }
930
931 /* get the current video clock value */
932 static double get_video_clock(VideoState *is)
933 {
934 double delta;
935 if (is->paused) {
936 delta = 0;
937 } else {
938 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
939 }
940 return is->video_current_pts + delta;
941 }
942
943 /* get the current external clock value */
944 static double get_external_clock(VideoState *is)
945 {
946 int64_t ti;
947 ti = av_gettime();
948 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
949 }
950
951 /* get the current master clock value */
952 static double get_master_clock(VideoState *is)
953 {
954 double val;
955
956 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
957 if (is->video_st)
958 val = get_video_clock(is);
959 else
960 val = get_audio_clock(is);
961 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
962 if (is->audio_st)
963 val = get_audio_clock(is);
964 else
965 val = get_video_clock(is);
966 } else {
967 val = get_external_clock(is);
968 }
969 return val;
970 }
971
972 /* seek in the stream */
973 static void stream_seek(VideoState *is, int64_t pos, int64_t rel)
974 {
975 if (!is->seek_req) {
976 is->seek_pos = pos;
977 is->seek_rel = rel;
978 if (seek_by_bytes)
979 is->seek_flags |= AVSEEK_FLAG_BYTE;
980 is->seek_req = 1;
981 }
982 }
983
984 /* pause or resume the video */
985 static void stream_pause(VideoState *is)
986 {
987 is->paused = !is->paused;
988 if (!is->paused) {
989 is->video_current_pts = get_video_clock(is);
990 is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
991 }
992 }
993
994 static double compute_frame_delay(double frame_current_pts, VideoState *is)
995 {
996 double actual_delay, delay, sync_threshold, ref_clock, diff;
997
998 /* compute nominal delay */
999 delay = frame_current_pts - is->frame_last_pts;
1000 if (delay <= 0 || delay >= 10.0) {
1001 /* if incorrect delay, use previous one */
1002 delay = is->frame_last_delay;
1003 } else {
1004 is->frame_last_delay = delay;
1005 }
1006 is->frame_last_pts = frame_current_pts;
1007
1008 /* update delay to follow master synchronisation source */
1009 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1010 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1011 /* if video is slave, we try to correct big delays by
1012 duplicating or deleting a frame */
1013 ref_clock = get_master_clock(is);
1014 diff = frame_current_pts - ref_clock;
1015
1016 /* skip or repeat frame. We take into account the
1017 delay to compute the threshold. I still don't know
1018 if it is the best guess */
1019 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1020 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1021 if (diff <= -sync_threshold)
1022 delay = 0;
1023 else if (diff >= sync_threshold)
1024 delay = 2 * delay;
1025 }
1026 }
1027
1028 is->frame_timer += delay;
1029 /* compute the REAL delay (we need to do that to avoid
1030 long term errors */
1031 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1032 if (actual_delay < 0.010) {
1033 /* XXX: should skip picture */
1034 actual_delay = 0.010;
1035 }
1036
1037 #if defined(DEBUG_SYNC)
1038 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1039 delay, actual_delay, frame_current_pts, -diff);
1040 #endif
1041
1042 return actual_delay;
1043 }
1044
1045 /* called to display each frame */
1046 static void video_refresh_timer(void *opaque)
1047 {
1048 VideoState *is = opaque;
1049 VideoPicture *vp;
1050
1051 SubPicture *sp, *sp2;
1052
1053 if (is->video_st) {
1054 if (is->pictq_size == 0) {
1055 /* if no picture, need to wait */
1056 schedule_refresh(is, 1);
1057 } else {
1058 /* dequeue the picture */
1059 vp = &is->pictq[is->pictq_rindex];
1060
1061 /* update current video pts */
1062 is->video_current_pts = vp->pts;
1063 is->video_current_pts_time = av_gettime();
1064
1065 /* launch timer for next picture */
1066 schedule_refresh(is, (int)(compute_frame_delay(vp->pts, is) * 1000 + 0.5));
1067
1068 if(is->subtitle_st) {
1069 if (is->subtitle_stream_changed) {
1070 SDL_LockMutex(is->subpq_mutex);
1071
1072 while (is->subpq_size) {
1073 free_subpicture(&is->subpq[is->subpq_rindex]);
1074
1075 /* update queue size and signal for next picture */
1076 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1077 is->subpq_rindex = 0;
1078
1079 is->subpq_size--;
1080 }
1081 is->subtitle_stream_changed = 0;
1082
1083 SDL_CondSignal(is->subpq_cond);
1084 SDL_UnlockMutex(is->subpq_mutex);
1085 } else {
1086 if (is->subpq_size > 0) {
1087 sp = &is->subpq[is->subpq_rindex];
1088
1089 if (is->subpq_size > 1)
1090 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1091 else
1092 sp2 = NULL;
1093
1094 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1095 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1096 {
1097 free_subpicture(sp);
1098
1099 /* update queue size and signal for next picture */
1100 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1101 is->subpq_rindex = 0;
1102
1103 SDL_LockMutex(is->subpq_mutex);
1104 is->subpq_size--;
1105 SDL_CondSignal(is->subpq_cond);
1106 SDL_UnlockMutex(is->subpq_mutex);
1107 }
1108 }
1109 }
1110 }
1111
1112 /* display picture */
1113 video_display(is);
1114
1115 /* update queue size and signal for next picture */
1116 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1117 is->pictq_rindex = 0;
1118
1119 SDL_LockMutex(is->pictq_mutex);
1120 is->pictq_size--;
1121 SDL_CondSignal(is->pictq_cond);
1122 SDL_UnlockMutex(is->pictq_mutex);
1123 }
1124 } else if (is->audio_st) {
1125 /* draw the next audio frame */
1126
1127 schedule_refresh(is, 40);
1128
1129 /* if only audio stream, then display the audio bars (better
1130 than nothing, just to test the implementation */
1131
1132 /* display picture */
1133 video_display(is);
1134 } else {
1135 schedule_refresh(is, 100);
1136 }
1137 if (show_status) {
1138 static int64_t last_time;
1139 int64_t cur_time;
1140 int aqsize, vqsize, sqsize;
1141 double av_diff;
1142
1143 cur_time = av_gettime();
1144 if (!last_time || (cur_time - last_time) >= 30000) {
1145 aqsize = 0;
1146 vqsize = 0;
1147 sqsize = 0;
1148 if (is->audio_st)
1149 aqsize = is->audioq.size;
1150 if (is->video_st)
1151 vqsize = is->videoq.size;
1152 if (is->subtitle_st)
1153 sqsize = is->subtitleq.size;
1154 av_diff = 0;
1155 if (is->audio_st && is->video_st)
1156 av_diff = get_audio_clock(is) - get_video_clock(is);
1157 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
1158 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1159 fflush(stdout);
1160 last_time = cur_time;
1161 }
1162 }
1163 }
1164
1165 /* allocate a picture (needs to do that in main thread to avoid
1166 potential locking problems */
1167 static void alloc_picture(void *opaque)
1168 {
1169 VideoState *is = opaque;
1170 VideoPicture *vp;
1171
1172 vp = &is->pictq[is->pictq_windex];
1173
1174 if (vp->bmp)
1175 SDL_FreeYUVOverlay(vp->bmp);
1176
1177 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1178 is->video_st->codec->height,
1179 SDL_YV12_OVERLAY,
1180 screen);
1181 vp->width = is->video_st->codec->width;
1182 vp->height = is->video_st->codec->height;
1183
1184 SDL_LockMutex(is->pictq_mutex);
1185 vp->allocated = 1;
1186 SDL_CondSignal(is->pictq_cond);
1187 SDL_UnlockMutex(is->pictq_mutex);
1188 }
1189
1190 /**
1191 *
1192 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1193 */
1194 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1195 {
1196 VideoPicture *vp;
1197 int dst_pix_fmt;
1198
1199 /* wait until we have space to put a new picture */
1200 SDL_LockMutex(is->pictq_mutex);
1201 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1202 !is->videoq.abort_request) {
1203 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1204 }
1205 SDL_UnlockMutex(is->pictq_mutex);
1206
1207 if (is->videoq.abort_request)
1208 return -1;
1209
1210 vp = &is->pictq[is->pictq_windex];
1211
1212 /* alloc or resize hardware picture buffer */
1213 if (!vp->bmp ||
1214 vp->width != is->video_st->codec->width ||
1215 vp->height != is->video_st->codec->height) {
1216 SDL_Event event;
1217
1218 vp->allocated = 0;
1219
1220 /* the allocation must be done in the main thread to avoid
1221 locking problems */
1222 event.type = FF_ALLOC_EVENT;
1223 event.user.data1 = is;
1224 SDL_PushEvent(&event);
1225
1226 /* wait until the picture is allocated */
1227 SDL_LockMutex(is->pictq_mutex);
1228 while (!vp->allocated && !is->videoq.abort_request) {
1229 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1230 }
1231 SDL_UnlockMutex(is->pictq_mutex);
1232
1233 if (is->videoq.abort_request)
1234 return -1;
1235 }
1236
1237 /* if the frame is not skipped, then display it */
1238 if (vp->bmp) {
1239 AVPicture pict;
1240
1241 /* get a pointer on the bitmap */
1242 SDL_LockYUVOverlay (vp->bmp);
1243
1244 dst_pix_fmt = PIX_FMT_YUV420P;
1245 memset(&pict,0,sizeof(AVPicture));
1246 pict.data[0] = vp->bmp->pixels[0];
1247 pict.data[1] = vp->bmp->pixels[2];
1248 pict.data[2] = vp->bmp->pixels[1];
1249
1250 pict.linesize[0] = vp->bmp->pitches[0];
1251 pict.linesize[1] = vp->bmp->pitches[2];
1252 pict.linesize[2] = vp->bmp->pitches[1];
1253 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1254 is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1255 is->video_st->codec->width, is->video_st->codec->height,
1256 is->video_st->codec->pix_fmt,
1257 is->video_st->codec->width, is->video_st->codec->height,
1258 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1259 if (is->img_convert_ctx == NULL) {
1260 fprintf(stderr, "Cannot initialize the conversion context\n");
1261 exit(1);
1262 }
1263 sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1264 0, is->video_st->codec->height, pict.data, pict.linesize);
1265 /* update the bitmap content */
1266 SDL_UnlockYUVOverlay(vp->bmp);
1267
1268 vp->pts = pts;
1269
1270 /* now we can update the picture count */
1271 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1272 is->pictq_windex = 0;
1273 SDL_LockMutex(is->pictq_mutex);
1274 is->pictq_size++;
1275 SDL_UnlockMutex(is->pictq_mutex);
1276 }
1277 return 0;
1278 }
1279
1280 /**
1281 * compute the exact PTS for the picture if it is omitted in the stream
1282 * @param pts1 the dts of the pkt / pts of the frame
1283 */
1284 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1285 {
1286 double frame_delay, pts;
1287
1288 pts = pts1;
1289
1290 if (pts != 0) {
1291 /* update video clock with pts, if present */
1292 is->video_clock = pts;
1293 } else {
1294 pts = is->video_clock;
1295 }
1296 /* update video clock for next frame */
1297 frame_delay = av_q2d(is->video_st->codec->time_base);
1298 /* for MPEG2, the frame can be repeated, so we update the
1299 clock accordingly */
1300 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1301 is->video_clock += frame_delay;
1302
1303 #if defined(DEBUG_SYNC) && 0
1304 {
1305 int ftype;
1306 if (src_frame->pict_type == FF_B_TYPE)
1307 ftype = 'B';
1308 else if (src_frame->pict_type == FF_I_TYPE)
1309 ftype = 'I';
1310 else
1311 ftype = 'P';
1312 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1313 ftype, pts, pts1);
1314 }
1315 #endif
1316 return queue_picture(is, src_frame, pts);
1317 }
1318
1319 static int video_thread(void *arg)
1320 {
1321 VideoState *is = arg;
1322 AVPacket pkt1, *pkt = &pkt1;
1323 int len1, got_picture;
1324 AVFrame *frame= avcodec_alloc_frame();
1325 double pts;
1326
1327 for(;;) {
1328 while (is->paused && !is->videoq.abort_request) {
1329 SDL_Delay(10);
1330 }
1331 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1332 break;
1333
1334 if(pkt->data == flush_pkt.data){
1335 avcodec_flush_buffers(is->video_st->codec);
1336 continue;
1337 }
1338
1339 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1340 this packet, if any */
1341 is->video_st->codec->reordered_opaque= pkt->pts;
1342 len1 = avcodec_decode_video2(is->video_st->codec,
1343 frame, &got_picture,
1344 pkt);
1345
1346 if( (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1347 && frame->reordered_opaque != AV_NOPTS_VALUE)
1348 pts= frame->reordered_opaque;
1349 else if(pkt->dts != AV_NOPTS_VALUE)
1350 pts= pkt->dts;
1351 else
1352 pts= 0;
1353 pts *= av_q2d(is->video_st->time_base);
1354
1355 // if (len1 < 0)
1356 // break;
1357 if (got_picture) {
1358 if (output_picture2(is, frame, pts) < 0)
1359 goto the_end;
1360 }
1361 av_free_packet(pkt);
1362 if (step)
1363 if (cur_stream)
1364 stream_pause(cur_stream);
1365 }
1366 the_end:
1367 av_free(frame);
1368 return 0;
1369 }
1370
1371 static int subtitle_thread(void *arg)
1372 {
1373 VideoState *is = arg;
1374 SubPicture *sp;
1375 AVPacket pkt1, *pkt = &pkt1;
1376 int len1, got_subtitle;
1377 double pts;
1378 int i, j;
1379 int r, g, b, y, u, v, a;
1380
1381 for(;;) {
1382 while (is->paused && !is->subtitleq.abort_request) {
1383 SDL_Delay(10);
1384 }
1385 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1386 break;
1387
1388 if(pkt->data == flush_pkt.data){
1389 avcodec_flush_buffers(is->subtitle_st->codec);
1390 continue;
1391 }
1392 SDL_LockMutex(is->subpq_mutex);
1393 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1394 !is->subtitleq.abort_request) {
1395 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1396 }
1397 SDL_UnlockMutex(is->subpq_mutex);
1398
1399 if (is->subtitleq.abort_request)
1400 goto the_end;
1401
1402 sp = &is->subpq[is->subpq_windex];
1403
1404 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1405 this packet, if any */
1406 pts = 0;
1407 if (pkt->pts != AV_NOPTS_VALUE)
1408 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1409
1410 len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1411 &sp->sub, &got_subtitle,
1412 pkt);
1413 // if (len1 < 0)
1414 // break;
1415 if (got_subtitle && sp->sub.format == 0) {
1416 sp->pts = pts;
1417
1418 for (i = 0; i < sp->sub.num_rects; i++)
1419 {
1420 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1421 {
1422 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1423 y = RGB_TO_Y_CCIR(r, g, b);
1424 u = RGB_TO_U_CCIR(r, g, b, 0);
1425 v = RGB_TO_V_CCIR(r, g, b, 0);
1426 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1427 }
1428 }
1429
1430 /* now we can update the picture count */
1431 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1432 is->subpq_windex = 0;
1433 SDL_LockMutex(is->subpq_mutex);
1434 is->subpq_size++;
1435 SDL_UnlockMutex(is->subpq_mutex);
1436 }
1437 av_free_packet(pkt);
1438 // if (step)
1439 // if (cur_stream)
1440 // stream_pause(cur_stream);
1441 }
1442 the_end:
1443 return 0;
1444 }
1445
1446 /* copy samples for viewing in editor window */
1447 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1448 {
1449 int size, len, channels;
1450
1451 channels = is->audio_st->codec->channels;
1452
1453 size = samples_size / sizeof(short);
1454 while (size > 0) {
1455 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1456 if (len > size)
1457 len = size;
1458 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1459 samples += len;
1460 is->sample_array_index += len;
1461 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1462 is->sample_array_index = 0;
1463 size -= len;
1464 }
1465 }
1466
1467 /* return the new audio buffer size (samples can be added or deleted
1468 to get better sync if video or external master clock) */
1469 static int synchronize_audio(VideoState *is, short *samples,
1470 int samples_size1, double pts)
1471 {
1472 int n, samples_size;
1473 double ref_clock;
1474
1475 n = 2 * is->audio_st->codec->channels;
1476 samples_size = samples_size1;
1477
1478 /* if not master, then we try to remove or add samples to correct the clock */
1479 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1480 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1481 double diff, avg_diff;
1482 int wanted_size, min_size, max_size, nb_samples;
1483
1484 ref_clock = get_master_clock(is);
1485 diff = get_audio_clock(is) - ref_clock;
1486
1487 if (diff < AV_NOSYNC_THRESHOLD) {
1488 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1489 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1490 /* not enough measures to have a correct estimate */
1491 is->audio_diff_avg_count++;
1492 } else {
1493 /* estimate the A-V difference */
1494 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1495
1496 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1497 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1498 nb_samples = samples_size / n;
1499
1500 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1501 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1502 if (wanted_size < min_size)
1503 wanted_size = min_size;
1504 else if (wanted_size > max_size)
1505 wanted_size = max_size;
1506
1507 /* add or remove samples to correction the synchro */
1508 if (wanted_size < samples_size) {
1509 /* remove samples */
1510 samples_size = wanted_size;
1511 } else if (wanted_size > samples_size) {
1512 uint8_t *samples_end, *q;
1513 int nb;
1514
1515 /* add samples */
1516 nb = (samples_size - wanted_size);
1517 samples_end = (uint8_t *)samples + samples_size - n;
1518 q = samples_end + n;
1519 while (nb > 0) {
1520 memcpy(q, samples_end, n);
1521 q += n;
1522 nb -= n;
1523 }
1524 samples_size = wanted_size;
1525 }
1526 }
1527 #if 0
1528 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1529 diff, avg_diff, samples_size - samples_size1,
1530 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1531 #endif
1532 }
1533 } else {
1534 /* too big difference : may be initial PTS errors, so
1535 reset A-V filter */
1536 is->audio_diff_avg_count = 0;
1537 is->audio_diff_cum = 0;
1538 }
1539 }
1540
1541 return samples_size;
1542 }
1543
1544 /* decode one audio frame and returns its uncompressed size */
1545 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1546 {
1547 AVPacket *pkt_temp = &is->audio_pkt_temp;
1548 AVPacket *pkt = &is->audio_pkt;
1549 AVCodecContext *dec= is->audio_st->codec;
1550 int n, len1, data_size;
1551 double pts;
1552
1553 for(;;) {
1554 /* NOTE: the audio packet can contain several frames */
1555 while (pkt_temp->size > 0) {
1556 data_size = sizeof(is->audio_buf1);
1557 len1 = avcodec_decode_audio3(dec,
1558 (int16_t *)is->audio_buf1, &data_size,
1559 pkt_temp);
1560 if (len1 < 0) {
1561 /* if error, we skip the frame */
1562 pkt_temp->size = 0;
1563 break;
1564 }
1565
1566 pkt_temp->data += len1;
1567 pkt_temp->size -= len1;
1568 if (data_size <= 0)
1569 continue;
1570
1571 if (dec->sample_fmt != is->audio_src_fmt) {
1572 if (is->reformat_ctx)
1573 av_audio_convert_free(is->reformat_ctx);
1574 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1575 dec->sample_fmt, 1, NULL, 0);
1576 if (!is->reformat_ctx) {
1577 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1578 avcodec_get_sample_fmt_name(dec->sample_fmt),
1579 avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1580 break;
1581 }
1582 is->audio_src_fmt= dec->sample_fmt;
1583 }
1584
1585 if (is->reformat_ctx) {
1586 const void *ibuf[6]= {is->audio_buf1};
1587 void *obuf[6]= {is->audio_buf2};
1588 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1589 int ostride[6]= {2};
1590 int len= data_size/istride[0];
1591 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1592 printf("av_audio_convert() failed\n");
1593 break;
1594 }
1595 is->audio_buf= is->audio_buf2;
1596 /* FIXME: existing code assume that data_size equals framesize*channels*2
1597 remove this legacy cruft */
1598 data_size= len*2;
1599 }else{
1600 is->audio_buf= is->audio_buf1;
1601 }
1602
1603 /* if no pts, then compute it */
1604 pts = is->audio_clock;
1605 *pts_ptr = pts;
1606 n = 2 * dec->channels;
1607 is->audio_clock += (double)data_size /
1608 (double)(n * dec->sample_rate);
1609 #if defined(DEBUG_SYNC)
1610 {
1611 static double last_clock;
1612 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1613 is->audio_clock - last_clock,
1614 is->audio_clock, pts);
1615 last_clock = is->audio_clock;
1616 }
1617 #endif
1618 return data_size;
1619 }
1620
1621 /* free the current packet */
1622 if (pkt->data)
1623 av_free_packet(pkt);
1624
1625 if (is->paused || is->audioq.abort_request) {
1626 return -1;
1627 }
1628
1629 /* read next packet */
1630 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1631 return -1;
1632 if(pkt->data == flush_pkt.data){
1633 avcodec_flush_buffers(dec);
1634 continue;
1635 }
1636
1637 pkt_temp->data = pkt->data;
1638 pkt_temp->size = pkt->size;
1639
1640 /* if update the audio clock with the pts */
1641 if (pkt->pts != AV_NOPTS_VALUE) {
1642 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1643 }
1644 }
1645 }
1646
1647 /* get the current audio output buffer size, in samples. With SDL, we
1648 cannot have a precise information */
1649 static int audio_write_get_buf_size(VideoState *is)
1650 {
1651 return is->audio_buf_size - is->audio_buf_index;
1652 }
1653
1654
1655 /* prepare a new audio buffer */
1656 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1657 {
1658 VideoState *is = opaque;
1659 int audio_size, len1;
1660 double pts;
1661
1662 audio_callback_time = av_gettime();
1663
1664 while (len > 0) {
1665 if (is->audio_buf_index >= is->audio_buf_size) {
1666 audio_size = audio_decode_frame(is, &pts);
1667 if (audio_size < 0) {
1668 /* if error, just output silence */
1669 is->audio_buf = is->audio_buf1;
1670 is->audio_buf_size = 1024;
1671 memset(is->audio_buf, 0, is->audio_buf_size);
1672 } else {
1673 if (is->show_audio)
1674 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1675 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1676 pts);
1677 is->audio_buf_size = audio_size;
1678 }
1679 is->audio_buf_index = 0;
1680 }
1681 len1 = is->audio_buf_size - is->audio_buf_index;
1682 if (len1 > len)
1683 len1 = len;
1684 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1685 len -= len1;
1686 stream += len1;
1687 is->audio_buf_index += len1;
1688 }
1689 }
1690
1691 /* open a given stream. Return 0 if OK */
1692 static int stream_component_open(VideoState *is, int stream_index)
1693 {
1694 AVFormatContext *ic = is->ic;
1695 AVCodecContext *enc;
1696 AVCodec *codec;
1697 SDL_AudioSpec wanted_spec, spec;
1698
1699 if (stream_index < 0 || stream_index >= ic->nb_streams)
1700 return -1;
1701 enc = ic->streams[stream_index]->codec;
1702
1703 /* prepare audio output */
1704 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1705 if (enc->channels > 0) {
1706 enc->request_channels = FFMIN(2, enc->channels);
1707 } else {
1708 enc->request_channels = 2;
1709 }
1710 }
1711
1712 codec = avcodec_find_decoder(enc->codec_id);
1713 enc->debug_mv = debug_mv;
1714 enc->debug = debug;
1715 enc->workaround_bugs = workaround_bugs;
1716 enc->lowres = lowres;
1717 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1718 enc->idct_algo= idct;
1719 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1720 enc->skip_frame= skip_frame;
1721 enc->skip_idct= skip_idct;
1722 enc->skip_loop_filter= skip_loop_filter;
1723 enc->error_recognition= error_recognition;
1724 enc->error_concealment= error_concealment;
1725
1726 set_context_opts(enc, avcodec_opts[enc->codec_type], 0);
1727
1728 if (!codec ||
1729 avcodec_open(enc, codec) < 0)
1730 return -1;
1731
1732 /* prepare audio output */
1733 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1734 wanted_spec.freq = enc->sample_rate;
1735 wanted_spec.format = AUDIO_S16SYS;
1736 wanted_spec.channels = enc->channels;
1737 wanted_spec.silence = 0;
1738 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1739 wanted_spec.callback = sdl_audio_callback;
1740 wanted_spec.userdata = is;
1741 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1742 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1743 return -1;
1744 }
1745 is->audio_hw_buf_size = spec.size;
1746 is->audio_src_fmt= SAMPLE_FMT_S16;
1747 }
1748
1749 if(thread_count>1)
1750 avcodec_thread_init(enc, thread_count);
1751 enc->thread_count= thread_count;
1752 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
1753 switch(enc->codec_type) {
1754 case CODEC_TYPE_AUDIO:
1755 is->audio_stream = stream_index;
1756 is->audio_st = ic->streams[stream_index];
1757 is->audio_buf_size = 0;
1758 is->audio_buf_index = 0;
1759
1760 /* init averaging filter */
1761 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1762 is->audio_diff_avg_count = 0;
1763 /* since we do not have a precise anough audio fifo fullness,
1764 we correct audio sync only if larger than this threshold */
1765 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1766
1767 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1768 packet_queue_init(&is->audioq);
1769 SDL_PauseAudio(0);
1770 break;
1771 case CODEC_TYPE_VIDEO:
1772 is->video_stream = stream_index;
1773 is->video_st = ic->streams[stream_index];
1774
1775 is->frame_last_delay = 40e-3;
1776 is->frame_timer = (double)av_gettime() / 1000000.0;
1777 is->video_current_pts_time = av_gettime();
1778
1779 packet_queue_init(&is->videoq);
1780 is->video_tid = SDL_CreateThread(video_thread, is);
1781 break;
1782 case CODEC_TYPE_SUBTITLE:
1783 is->subtitle_stream = stream_index;
1784 is->subtitle_st = ic->streams[stream_index];
1785 packet_queue_init(&is->subtitleq);
1786
1787 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1788 break;
1789 default:
1790 break;
1791 }
1792 return 0;
1793 }
1794
1795 static void stream_component_close(VideoState *is, int stream_index)
1796 {
1797 AVFormatContext *ic = is->ic;
1798 AVCodecContext *enc;
1799
1800 if (stream_index < 0 || stream_index >= ic->nb_streams)
1801 return;
1802 enc = ic->streams[stream_index]->codec;
1803
1804 switch(enc->codec_type) {
1805 case CODEC_TYPE_AUDIO:
1806 packet_queue_abort(&is->audioq);
1807
1808 SDL_CloseAudio();
1809
1810 packet_queue_end(&is->audioq);
1811 if (is->reformat_ctx)
1812 av_audio_convert_free(is->reformat_ctx);
1813 break;
1814 case CODEC_TYPE_VIDEO:
1815 packet_queue_abort(&is->videoq);
1816
1817 /* note: we also signal this mutex to make sure we deblock the
1818 video thread in all cases */
1819 SDL_LockMutex(is->pictq_mutex);
1820 SDL_CondSignal(is->pictq_cond);
1821 SDL_UnlockMutex(is->pictq_mutex);
1822
1823 SDL_WaitThread(is->video_tid, NULL);
1824
1825 packet_queue_end(&is->videoq);
1826 break;
1827 case CODEC_TYPE_SUBTITLE:
1828 packet_queue_abort(&is->subtitleq);
1829
1830 /* note: we also signal this mutex to make sure we deblock the
1831 video thread in all cases */
1832 SDL_LockMutex(is->subpq_mutex);
1833 is->subtitle_stream_changed = 1;
1834
1835 SDL_CondSignal(is->subpq_cond);
1836 SDL_UnlockMutex(is->subpq_mutex);
1837
1838 SDL_WaitThread(is->subtitle_tid, NULL);
1839
1840 packet_queue_end(&is->subtitleq);
1841 break;
1842 default:
1843 break;
1844 }
1845
1846 ic->streams[stream_index]->discard = AVDISCARD_ALL;
1847 avcodec_close(enc);
1848 switch(enc->codec_type) {
1849 case CODEC_TYPE_AUDIO:
1850 is->audio_st = NULL;
1851 is->audio_stream = -1;
1852 break;
1853 case CODEC_TYPE_VIDEO:
1854 is->video_st = NULL;
1855 is->video_stream = -1;
1856 break;
1857 case CODEC_TYPE_SUBTITLE:
1858 is->subtitle_st = NULL;
1859 is->subtitle_stream = -1;
1860 break;
1861 default:
1862 break;
1863 }
1864 }
1865
1866 /* since we have only one decoding thread, we can use a global
1867 variable instead of a thread local variable */
1868 static VideoState *global_video_state;
1869
1870 static int decode_interrupt_cb(void)
1871 {
1872 return (global_video_state && global_video_state->abort_request);
1873 }
1874
1875 /* this thread gets the stream from the disk or the network */
1876 static int decode_thread(void *arg)
1877 {
1878 VideoState *is = arg;
1879 AVFormatContext *ic;
1880 int err, i, ret, video_index, audio_index, subtitle_index;
1881 AVPacket pkt1, *pkt = &pkt1;
1882 AVFormatParameters params, *ap = &params;
1883 int eof=0;
1884
1885 video_index = -1;
1886 audio_index = -1;
1887 subtitle_index = -1;
1888 is->video_stream = -1;
1889 is->audio_stream = -1;
1890 is->subtitle_stream = -1;
1891
1892 global_video_state = is;
1893 url_set_interrupt_cb(decode_interrupt_cb);
1894
1895 memset(ap, 0, sizeof(*ap));
1896
1897 ap->width = frame_width;
1898 ap->height= frame_height;
1899 ap->time_base= (AVRational){1, 25};
1900 ap->pix_fmt = frame_pix_fmt;
1901
1902 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1903 if (err < 0) {
1904 print_error(is->filename, err);
1905 ret = -1;
1906 goto fail;
1907 }
1908 is->ic = ic;
1909
1910 if(genpts)
1911 ic->flags |= AVFMT_FLAG_GENPTS;
1912
1913 err = av_find_stream_info(ic);
1914 if (err < 0) {
1915 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1916 ret = -1;
1917 goto fail;
1918 }
1919 if(ic->pb)
1920 ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1921
1922 /* if seeking requested, we execute it */
1923 if (start_time != AV_NOPTS_VALUE) {
1924 int64_t timestamp;
1925
1926 timestamp = start_time;
1927 /* add the stream start time */
1928 if (ic->start_time != AV_NOPTS_VALUE)
1929 timestamp += ic->start_time;
1930 ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
1931 if (ret < 0) {
1932 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1933 is->filename, (double)timestamp / AV_TIME_BASE);
1934 }
1935 }
1936
1937 for(i = 0; i < ic->nb_streams; i++) {
1938 AVCodecContext *enc = ic->streams[i]->codec;
1939 ic->streams[i]->discard = AVDISCARD_ALL;
1940 switch(enc->codec_type) {
1941 case CODEC_TYPE_AUDIO:
1942 if (wanted_audio_stream-- >= 0 && !audio_disable)
1943 audio_index = i;
1944 break;
1945 case CODEC_TYPE_VIDEO:
1946 if (wanted_video_stream-- >= 0 && !video_disable)
1947 video_index = i;
1948 break;
1949 case CODEC_TYPE_SUBTITLE:
1950 if (wanted_subtitle_stream-- >= 0 && !video_disable)
1951 subtitle_index = i;
1952 break;
1953 default:
1954 break;
1955 }
1956 }
1957 if (show_status) {
1958 dump_format(ic, 0, is->filename, 0);
1959 }
1960
1961 /* open the streams */
1962 if (audio_index >= 0) {
1963 stream_component_open(is, audio_index);
1964 }
1965
1966 if (video_index >= 0) {
1967 stream_component_open(is, video_index);
1968 } else {
1969 if (!display_disable)
1970 is->show_audio = 1;
1971 }
1972
1973 if (subtitle_index >= 0) {
1974 stream_component_open(is, subtitle_index);
1975 }
1976
1977 if (is->video_stream < 0 && is->audio_stream < 0) {
1978 fprintf(stderr, "%s: could not open codecs\n", is->filename);
1979 ret = -1;
1980 goto fail;
1981 }
1982
1983 for(;;) {
1984 if (is->abort_request)
1985 break;
1986 if (is->paused != is->last_paused) {
1987 is->last_paused = is->paused;
1988 if (is->paused)
1989 av_read_pause(ic);
1990 else
1991 av_read_play(ic);
1992 }
1993 #if CONFIG_RTSP_DEMUXER
1994 if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
1995 /* wait 10 ms to avoid trying to get another packet */
1996 /* XXX: horrible */
1997 SDL_Delay(10);
1998 continue;
1999 }
2000 #endif
2001 if (is->seek_req) {
2002 int64_t seek_target= is->seek_pos;
2003 int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2004 int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2005 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2006 // of the seek_pos/seek_rel variables
2007
2008 ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2009 if (ret < 0) {
2010 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2011 }else{
2012 if (is->audio_stream >= 0) {
2013 packet_queue_flush(&is->audioq);
2014 packet_queue_put(&is->audioq, &flush_pkt);
2015 }
2016 if (is->subtitle_stream >= 0) {
2017 packet_queue_flush(&is->subtitleq);
2018 packet_queue_put(&is->subtitleq, &flush_pkt);
2019 }
2020 if (is->video_stream >= 0) {
2021 packet_queue_flush(&is->videoq);
2022 packet_queue_put(&is->videoq, &flush_pkt);
2023 }
2024 }
2025 is->seek_req = 0;
2026 eof= 0;
2027 }
2028
2029 /* if the queue are full, no need to read more */
2030 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2031 is->videoq.size > MAX_VIDEOQ_SIZE ||
2032 is->subtitleq.size > MAX_SUBTITLEQ_SIZE) {
2033 /* wait 10 ms */
2034 SDL_Delay(10);
2035 continue;
2036 }
2037 if(url_feof(ic->pb) || eof) {
2038 if(is->video_stream >= 0){
2039 av_init_packet(pkt);
2040 pkt->data=NULL;
2041 pkt->size=0;
2042 pkt->stream_index= is->video_stream;
2043 packet_queue_put(&is->videoq, pkt);
2044 }
2045 SDL_Delay(10);
2046 continue;
2047 }
2048 ret = av_read_frame(ic, pkt);
2049 if (ret < 0) {
2050 if (ret == AVERROR_EOF)
2051 eof=1;
2052 if (url_ferror(ic->pb))
2053 break;
2054 SDL_Delay(100); /* wait for user event */
2055 continue;
2056 }
2057 if (pkt->stream_index == is->audio_stream) {
2058 packet_queue_put(&is->audioq, pkt);
2059 } else if (pkt->stream_index == is->video_stream) {
2060 packet_queue_put(&is->videoq, pkt);
2061 } else if (pkt->stream_index == is->subtitle_stream) {
2062 packet_queue_put(&is->subtitleq, pkt);
2063 } else {
2064 av_free_packet(pkt);
2065 }
2066 }
2067 /* wait until the end */
2068 while (!is->abort_request) {
2069 SDL_Delay(100);
2070 }
2071
2072 ret = 0;
2073 fail:
2074 /* disable interrupting */
2075 global_video_state = NULL;
2076
2077 /* close each stream */
2078 if (is->audio_stream >= 0)
2079 stream_component_close(is, is->audio_stream);
2080 if (is->video_stream >= 0)
2081 stream_component_close(is, is->video_stream);
2082 if (is->subtitle_stream >= 0)
2083 stream_component_close(is, is->subtitle_stream);
2084 if (is->ic) {
2085 av_close_input_file(is->ic);
2086 is->ic = NULL; /* safety */
2087 }
2088 url_set_interrupt_cb(NULL);
2089
2090 if (ret != 0) {
2091 SDL_Event event;
2092
2093 event.type = FF_QUIT_EVENT;
2094 event.user.data1 = is;
2095 SDL_PushEvent(&event);
2096 }
2097 return 0;
2098 }
2099
2100 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2101 {
2102 VideoState *is;
2103
2104 is = av_mallocz(sizeof(VideoState));
2105 if (!is)
2106 return NULL;
2107 av_strlcpy(is->filename, filename, sizeof(is->filename));
2108 is->iformat = iformat;
2109 is->ytop = 0;
2110 is->xleft = 0;
2111
2112 /* start video display */
2113 is->pictq_mutex = SDL_CreateMutex();
2114 is->pictq_cond = SDL_CreateCond();
2115
2116 is->subpq_mutex = SDL_CreateMutex();
2117 is->subpq_cond = SDL_CreateCond();
2118
2119 /* add the refresh timer to draw the picture */
2120 schedule_refresh(is, 40);
2121
2122 is->av_sync_type = av_sync_type;
2123 is->parse_tid = SDL_CreateThread(decode_thread, is);
2124 if (!is->parse_tid) {
2125 av_free(is);
2126 return NULL;
2127 }
2128 return is;
2129 }
2130
2131 static void stream_close(VideoState *is)
2132 {
2133 VideoPicture *vp;
2134 int i;
2135 /* XXX: use a special url_shutdown call to abort parse cleanly */
2136 is->abort_request = 1;
2137 SDL_WaitThread(is->parse_tid, NULL);
2138
2139 /* free all pictures */
2140 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2141 vp = &is->pictq[i];
2142 if (vp->bmp) {
2143 SDL_FreeYUVOverlay(vp->bmp);
2144 vp->bmp = NULL;
2145 }
2146 }
2147 SDL_DestroyMutex(is->pictq_mutex);
2148 SDL_DestroyCond(is->pictq_cond);
2149 SDL_DestroyMutex(is->subpq_mutex);
2150 SDL_DestroyCond(is->subpq_cond);
2151 if (is->img_convert_ctx)
2152 sws_freeContext(is->img_convert_ctx);
2153 av_free(is);
2154 }
2155
2156 static void stream_cycle_channel(VideoState *is, int codec_type)
2157 {
2158 AVFormatContext *ic = is->ic;
2159 int start_index, stream_index;
2160 AVStream *st;
2161
2162 if (codec_type == CODEC_TYPE_VIDEO)
2163 start_index = is->video_stream;
2164 else if (codec_type == CODEC_TYPE_AUDIO)
2165 start_index = is->audio_stream;
2166 else
2167 start_index = is->subtitle_stream;
2168 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2169 return;
2170 stream_index = start_index;
2171 for(;;) {
2172 if (++stream_index >= is->ic->nb_streams)
2173 {
2174 if (codec_type == CODEC_TYPE_SUBTITLE)
2175 {
2176 stream_index = -1;
2177 goto the_end;
2178 } else
2179 stream_index = 0;
2180 }
2181 if (stream_index == start_index)
2182 return;
2183 st = ic->streams[stream_index];
2184 if (st->codec->codec_type == codec_type) {
2185 /* check that parameters are OK */
2186 switch(codec_type) {
2187 case CODEC_TYPE_AUDIO:
2188 if (st->codec->sample_rate != 0 &&
2189 st->codec->channels != 0)
2190 goto the_end;
2191 break;
2192 case CODEC_TYPE_VIDEO:
2193 case CODEC_TYPE_SUBTITLE:
2194 goto the_end;
2195 default:
2196 break;
2197 }
2198 }
2199 }
2200 the_end:
2201 stream_component_close(is, start_index);
2202 stream_component_open(is, stream_index);
2203 }
2204
2205
2206 static void toggle_full_screen(void)
2207 {
2208 is_full_screen = !is_full_screen;
2209 if (!fs_screen_width) {
2210 /* use default SDL method */
2211 // SDL_WM_ToggleFullScreen(screen);
2212 }
2213 video_open(cur_stream);
2214 }
2215
2216 static void toggle_pause(void)
2217 {
2218 if (cur_stream)
2219 stream_pause(cur_stream);
2220 step = 0;
2221 }
2222
2223 static void step_to_next_frame(void)
2224 {
2225 if (cur_stream) {
2226 /* if the stream is paused unpause it, then step */
2227 if (cur_stream->paused)
2228 stream_pause(cur_stream);
2229 }
2230 step = 1;
2231 }
2232
2233 static void do_exit(void)
2234 {
2235 int i;
2236 if (cur_stream) {
2237 stream_close(cur_stream);
2238 cur_stream = NULL;
2239 }
2240 for (i = 0; i < CODEC_TYPE_NB; i++)
2241 av_free(avcodec_opts[i]);
2242 av_free(avformat_opts);
2243 av_free(sws_opts);
2244 if (show_status)
2245 printf("\n");
2246 SDL_Quit();
2247 exit(0);
2248 }
2249
2250 static void toggle_audio_display(void)
2251 {
2252 if (cur_stream) {
2253 cur_stream->show_audio = !cur_stream->show_audio;
2254 }
2255 }
2256
2257 /* handle an event sent by the GUI */
2258 static void event_loop(void)
2259 {
2260 SDL_Event event;
2261 double incr, pos, frac;
2262
2263 for(;;) {
2264 SDL_WaitEvent(&event);
2265 switch(event.type) {
2266 case SDL_KEYDOWN:
2267 switch(event.key.keysym.sym) {
2268 case SDLK_ESCAPE:
2269 case SDLK_q:
2270 do_exit();
2271 break;
2272 case SDLK_f:
2273 toggle_full_screen();
2274 break;
2275 case SDLK_p:
2276 case SDLK_SPACE:
2277 toggle_pause();
2278 break;
2279 case SDLK_s: //S: Step to next frame
2280 step_to_next_frame();
2281 break;
2282 case SDLK_a:
2283 if (cur_stream)
2284 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2285 break;
2286 case SDLK_v:
2287 if (cur_stream)
2288 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2289 break;
2290 case SDLK_t:
2291 if (cur_stream)
2292 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2293 break;
2294 case SDLK_w:
2295 toggle_audio_display();
2296 break;
2297 case SDLK_LEFT:
2298 incr = -10.0;
2299 goto do_seek;
2300 case SDLK_RIGHT:
2301 incr = 10.0;
2302 goto do_seek;
2303 case SDLK_UP:
2304 incr = 60.0;
2305 goto do_seek;
2306 case SDLK_DOWN:
2307 incr = -60.0;
2308 do_seek:
2309 if (cur_stream) {
2310 if (seek_by_bytes) {
2311 pos = url_ftell(cur_stream->ic->pb);
2312 if (cur_stream->ic->bit_rate)
2313 incr *= cur_stream->ic->bit_rate / 60.0;
2314 else
2315 incr *= 180000.0;
2316 pos += incr;
2317 stream_seek(cur_stream, pos, incr);
2318 } else {
2319 pos = get_master_clock(cur_stream);
2320 pos += incr;
2321 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE));
2322 }
2323 }
2324 break;
2325 default:
2326 break;
2327 }
2328 break;
2329 case SDL_MOUSEBUTTONDOWN:
2330 if (cur_stream) {
2331 int64_t ts;
2332 int ns, hh, mm, ss;
2333 int tns, thh, tmm, tss;
2334 tns = cur_stream->ic->duration/1000000LL;
2335 thh = tns/3600;
2336 tmm = (tns%3600)/60;
2337 tss = (tns%60);
2338 frac = (double)event.button.x/(double)cur_stream->width;
2339 ns = frac*tns;
2340 hh = ns/3600;
2341 mm = (ns%3600)/60;
2342 ss = (ns%60);
2343 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2344 hh, mm, ss, thh, tmm, tss);
2345 ts = frac*cur_stream->ic->duration;
2346 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2347 ts += cur_stream->ic->start_time;
2348 stream_seek(cur_stream, ts, 0);
2349 }
2350 break;
2351 case SDL_VIDEORESIZE:
2352 if (cur_stream) {
2353 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2354 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2355 screen_width = cur_stream->width = event.resize.w;
2356 screen_height= cur_stream->height= event.resize.h;
2357 }
2358 break;
2359 case SDL_QUIT:
2360 case FF_QUIT_EVENT:
2361 do_exit();
2362 break;
2363 case FF_ALLOC_EVENT:
2364 video_open(event.user.data1);
2365 alloc_picture(event.user.data1);
2366 break;
2367 case FF_REFRESH_EVENT:
2368 video_refresh_timer(event.user.data1);
2369 break;
2370 default:
2371 break;
2372 }
2373 }
2374 }
2375
2376 static void opt_frame_size(const char *arg)
2377 {
2378 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2379 fprintf(stderr, "Incorrect frame size\n");
2380 exit(1);
2381 }
2382 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2383 fprintf(stderr, "Frame size must be a multiple of 2\n");
2384 exit(1);
2385 }
2386 }
2387
2388 static int opt_width(const char *opt, const char *arg)
2389 {
2390 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2391 return 0;
2392 }
2393
2394 static int opt_height(const char *opt, const char *arg)
2395 {
2396 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2397 return 0;
2398 }
2399
2400 static void opt_format(const char *arg)
2401 {
2402 file_iformat = av_find_input_format(arg);
2403 if (!file_iformat) {
2404 fprintf(stderr, "Unknown input format: %s\n", arg);
2405 exit(1);
2406 }
2407 }
2408
2409 static void opt_frame_pix_fmt(const char *arg)
2410 {
2411 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2412 }
2413
2414 static int opt_sync(const char *opt, const char *arg)
2415 {
2416 if (!strcmp(arg, "audio"))
2417 av_sync_type = AV_SYNC_AUDIO_MASTER;
2418 else if (!strcmp(arg, "video"))
2419 av_sync_type = AV_SYNC_VIDEO_MASTER;
2420 else if (!strcmp(arg, "ext"))
2421 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2422 else {
2423 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2424 exit(1);
2425 }
2426 return 0;
2427 }
2428
2429 static int opt_seek(const char *opt, const char *arg)
2430 {
2431 start_time = parse_time_or_die(opt, arg, 1);
2432 return 0;
2433 }
2434
2435 static int opt_debug(const char *opt, const char *arg)
2436 {
2437 av_log_set_level(99);
2438 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2439 return 0;
2440 }
2441
2442 static int opt_vismv(const char *opt, const char *arg)
2443 {
2444 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2445 return 0;
2446 }
2447
2448 static int opt_thread_count(const char *opt, const char *arg)
2449 {
2450 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2451 #if !HAVE_THREADS
2452 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2453 #endif
2454 return 0;
2455 }
2456
2457 static const OptionDef options[] = {
2458 #include "cmdutils_common_opts.h"
2459 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2460 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2461 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2462 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2463 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2464 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2465 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "select desired audio stream", "stream_number" },
2466 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "select desired video stream", "stream_number" },
2467 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_subtitle_stream}, "select desired subtitle stream", "stream_number" },
2468 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2469 { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2470 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2471 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2472 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2473 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2474 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2475 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2476 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2477 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2478 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2479 { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2480 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2481 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2482 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2483 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2484 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2485 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)", "threshold" },
2486 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2487 { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2488 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2489 { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2490 { NULL, },
2491 };
2492
2493 static void show_usage(void)
2494 {
2495 printf("Simple media player\n");
2496 printf("usage: ffplay [options] input_file\n");
2497 printf("\n");
2498 }
2499
2500 static void show_help(void)
2501 {
2502 show_usage();
2503 show_help_options(options, "Main options:\n",
2504 OPT_EXPERT, 0);
2505 show_help_options(options, "\nAdvanced options:\n",
2506 OPT_EXPERT, OPT_EXPERT);
2507 printf("\nWhile playing:\n"
2508 "q, ESC quit\n"
2509 "f toggle full screen\n"
2510 "p, SPC pause\n"
2511 "a cycle audio channel\n"
2512 "v cycle video channel\n"
2513 "t cycle subtitle channel\n"
2514 "w show audio waves\n"
2515 "left/right seek backward/forward 10 seconds\n"
2516 "down/up seek backward/forward 1 minute\n"
2517 "mouse click seek to percentage in file corresponding to fraction of width\n"
2518 );
2519 }
2520
2521 static void opt_input_file(const char *filename)
2522 {
2523 if (!strcmp(filename, "-"))
2524 filename = "pipe:";
2525 input_filename = filename;
2526 }
2527
2528 /* Called from the main */
2529 int main(int argc, char **argv)
2530 {
2531 int flags, i;
2532
2533 /* register all codecs, demux and protocols */
2534 avcodec_register_all();
2535 avdevice_register_all();
2536 av_register_all();
2537
2538 for(i=0; i<CODEC_TYPE_NB; i++){
2539 avcodec_opts[i]= avcodec_alloc_context2(i);
2540 }
2541 avformat_opts = avformat_alloc_context();
2542 sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
2543
2544 show_banner();
2545
2546 parse_options(argc, argv, options, opt_input_file);
2547
2548 if (!input_filename) {
2549 show_usage();
2550 fprintf(stderr, "An input file must be specified\n");
2551 fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
2552 exit(1);
2553 }
2554
2555 if (display_disable) {
2556 video_disable = 1;
2557 }
2558 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2559 #if !defined(__MINGW32__) && !defined(__APPLE__)
2560 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2561 #endif
2562 if (SDL_Init (flags)) {
2563 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2564 exit(1);
2565 }
2566
2567 if (!display_disable) {
2568 #if HAVE_SDL_VIDEO_SIZE
2569 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2570 fs_screen_width = vi->current_w;
2571 fs_screen_height = vi->current_h;
2572 #endif
2573 }
2574
2575 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2576 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2577 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2578 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2579
2580 av_init_packet(&flush_pkt);
2581 flush_pkt.data= "FLUSH";
2582
2583 cur_stream = stream_open(input_filename, file_iformat);
2584
2585 event_loop();
2586
2587 /* never returns */
2588
2589 return 0;
2590 }