correct initial timestamps which have AV_NOPTS_VALUE
[libav.git] / ffplay.c
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include <math.h>
23 #include <limits.h>
24 #include "avformat.h"
25 #include "swscale.h"
26 #include "avstring.h"
27
28 #include "version.h"
29 #include "cmdutils.h"
30
31 #include <SDL.h>
32 #include <SDL_thread.h>
33
34 #ifdef __MINGW32__
35 #undef main /* We don't want SDL to override our main() */
36 #endif
37
38 #undef exit
39
40 //#define DEBUG_SYNC
41
42 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
43 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
44 #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
45
46 /* SDL audio buffer size, in samples. Should be small to have precise
47 A/V sync as SDL does not have hardware buffer fullness info. */
48 #define SDL_AUDIO_BUFFER_SIZE 1024
49
50 /* no AV sync correction is done if below the AV sync threshold */
51 #define AV_SYNC_THRESHOLD 0.01
52 /* no AV correction is done if too big error */
53 #define AV_NOSYNC_THRESHOLD 10.0
54
55 /* maximum audio speed change to get correct sync */
56 #define SAMPLE_CORRECTION_PERCENT_MAX 10
57
58 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
59 #define AUDIO_DIFF_AVG_NB 20
60
61 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
62 #define SAMPLE_ARRAY_SIZE (2*65536)
63
64 static int sws_flags = SWS_BICUBIC;
65
66 typedef struct PacketQueue {
67 AVPacketList *first_pkt, *last_pkt;
68 int nb_packets;
69 int size;
70 int abort_request;
71 SDL_mutex *mutex;
72 SDL_cond *cond;
73 } PacketQueue;
74
75 #define VIDEO_PICTURE_QUEUE_SIZE 1
76 #define SUBPICTURE_QUEUE_SIZE 4
77
78 typedef struct VideoPicture {
79 double pts; ///<presentation time stamp for this picture
80 SDL_Overlay *bmp;
81 int width, height; /* source height & width */
82 int allocated;
83 } VideoPicture;
84
85 typedef struct SubPicture {
86 double pts; /* presentation time stamp for this picture */
87 AVSubtitle sub;
88 } SubPicture;
89
90 enum {
91 AV_SYNC_AUDIO_MASTER, /* default choice */
92 AV_SYNC_VIDEO_MASTER,
93 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
94 };
95
96 typedef struct VideoState {
97 SDL_Thread *parse_tid;
98 SDL_Thread *video_tid;
99 AVInputFormat *iformat;
100 int no_background;
101 int abort_request;
102 int paused;
103 int last_paused;
104 int seek_req;
105 int seek_flags;
106 int64_t seek_pos;
107 AVFormatContext *ic;
108 int dtg_active_format;
109
110 int audio_stream;
111
112 int av_sync_type;
113 double external_clock; /* external clock base */
114 int64_t external_clock_time;
115
116 double audio_clock;
117 double audio_diff_cum; /* used for AV difference average computation */
118 double audio_diff_avg_coef;
119 double audio_diff_threshold;
120 int audio_diff_avg_count;
121 AVStream *audio_st;
122 PacketQueue audioq;
123 int audio_hw_buf_size;
124 /* samples output by the codec. we reserve more space for avsync
125 compensation */
126 DECLARE_ALIGNED(16,uint8_t,audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
127 unsigned int audio_buf_size; /* in bytes */
128 int audio_buf_index; /* in bytes */
129 AVPacket audio_pkt;
130 uint8_t *audio_pkt_data;
131 int audio_pkt_size;
132
133 int show_audio; /* if true, display audio samples */
134 int16_t sample_array[SAMPLE_ARRAY_SIZE];
135 int sample_array_index;
136 int last_i_start;
137
138 SDL_Thread *subtitle_tid;
139 int subtitle_stream;
140 int subtitle_stream_changed;
141 AVStream *subtitle_st;
142 PacketQueue subtitleq;
143 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
144 int subpq_size, subpq_rindex, subpq_windex;
145 SDL_mutex *subpq_mutex;
146 SDL_cond *subpq_cond;
147
148 double frame_timer;
149 double frame_last_pts;
150 double frame_last_delay;
151 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
152 int video_stream;
153 AVStream *video_st;
154 PacketQueue videoq;
155 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
156 int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
157 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
158 int pictq_size, pictq_rindex, pictq_windex;
159 SDL_mutex *pictq_mutex;
160 SDL_cond *pictq_cond;
161
162 // QETimer *video_timer;
163 char filename[1024];
164 int width, height, xleft, ytop;
165 } VideoState;
166
167 void show_help(void);
168 static int audio_write_get_buf_size(VideoState *is);
169
170 /* options specified by the user */
171 static AVInputFormat *file_iformat;
172 static const char *input_filename;
173 static int fs_screen_width;
174 static int fs_screen_height;
175 static int screen_width = 0;
176 static int screen_height = 0;
177 static int frame_width = 0;
178 static int frame_height = 0;
179 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
180 static int audio_disable;
181 static int video_disable;
182 static int wanted_audio_stream= 0;
183 static int wanted_video_stream= 0;
184 static int seek_by_bytes;
185 static int display_disable;
186 static int show_status;
187 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
188 static int64_t start_time = AV_NOPTS_VALUE;
189 static int debug = 0;
190 static int debug_mv = 0;
191 static int step = 0;
192 static int thread_count = 1;
193 static int workaround_bugs = 1;
194 static int fast = 0;
195 static int genpts = 0;
196 static int lowres = 0;
197 static int idct = FF_IDCT_AUTO;
198 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
199 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
200 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
201 static int error_resilience = FF_ER_CAREFUL;
202 static int error_concealment = 3;
203 static int decoder_reorder_pts= 0;
204
205 /* current context */
206 static int is_full_screen;
207 static VideoState *cur_stream;
208 static int64_t audio_callback_time;
209
210 AVPacket flush_pkt;
211
212 #define FF_ALLOC_EVENT (SDL_USEREVENT)
213 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
214 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
215
216 SDL_Surface *screen;
217
218 /* packet queue handling */
219 static void packet_queue_init(PacketQueue *q)
220 {
221 memset(q, 0, sizeof(PacketQueue));
222 q->mutex = SDL_CreateMutex();
223 q->cond = SDL_CreateCond();
224 }
225
226 static void packet_queue_flush(PacketQueue *q)
227 {
228 AVPacketList *pkt, *pkt1;
229
230 SDL_LockMutex(q->mutex);
231 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
232 pkt1 = pkt->next;
233 av_free_packet(&pkt->pkt);
234 av_freep(&pkt);
235 }
236 q->last_pkt = NULL;
237 q->first_pkt = NULL;
238 q->nb_packets = 0;
239 q->size = 0;
240 SDL_UnlockMutex(q->mutex);
241 }
242
243 static void packet_queue_end(PacketQueue *q)
244 {
245 packet_queue_flush(q);
246 SDL_DestroyMutex(q->mutex);
247 SDL_DestroyCond(q->cond);
248 }
249
250 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
251 {
252 AVPacketList *pkt1;
253
254 /* duplicate the packet */
255 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
256 return -1;
257
258 pkt1 = av_malloc(sizeof(AVPacketList));
259 if (!pkt1)
260 return -1;
261 pkt1->pkt = *pkt;
262 pkt1->next = NULL;
263
264
265 SDL_LockMutex(q->mutex);
266
267 if (!q->last_pkt)
268
269 q->first_pkt = pkt1;
270 else
271 q->last_pkt->next = pkt1;
272 q->last_pkt = pkt1;
273 q->nb_packets++;
274 q->size += pkt1->pkt.size;
275 /* XXX: should duplicate packet data in DV case */
276 SDL_CondSignal(q->cond);
277
278 SDL_UnlockMutex(q->mutex);
279 return 0;
280 }
281
282 static void packet_queue_abort(PacketQueue *q)
283 {
284 SDL_LockMutex(q->mutex);
285
286 q->abort_request = 1;
287
288 SDL_CondSignal(q->cond);
289
290 SDL_UnlockMutex(q->mutex);
291 }
292
293 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
294 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
295 {
296 AVPacketList *pkt1;
297 int ret;
298
299 SDL_LockMutex(q->mutex);
300
301 for(;;) {
302 if (q->abort_request) {
303 ret = -1;
304 break;
305 }
306
307 pkt1 = q->first_pkt;
308 if (pkt1) {
309 q->first_pkt = pkt1->next;
310 if (!q->first_pkt)
311 q->last_pkt = NULL;
312 q->nb_packets--;
313 q->size -= pkt1->pkt.size;
314 *pkt = pkt1->pkt;
315 av_free(pkt1);
316 ret = 1;
317 break;
318 } else if (!block) {
319 ret = 0;
320 break;
321 } else {
322 SDL_CondWait(q->cond, q->mutex);
323 }
324 }
325 SDL_UnlockMutex(q->mutex);
326 return ret;
327 }
328
329 static inline void fill_rectangle(SDL_Surface *screen,
330 int x, int y, int w, int h, int color)
331 {
332 SDL_Rect rect;
333 rect.x = x;
334 rect.y = y;
335 rect.w = w;
336 rect.h = h;
337 SDL_FillRect(screen, &rect, color);
338 }
339
340 #if 0
341 /* draw only the border of a rectangle */
342 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
343 {
344 int w1, w2, h1, h2;
345
346 /* fill the background */
347 w1 = x;
348 if (w1 < 0)
349 w1 = 0;
350 w2 = s->width - (x + w);
351 if (w2 < 0)
352 w2 = 0;
353 h1 = y;
354 if (h1 < 0)
355 h1 = 0;
356 h2 = s->height - (y + h);
357 if (h2 < 0)
358 h2 = 0;
359 fill_rectangle(screen,
360 s->xleft, s->ytop,
361 w1, s->height,
362 color);
363 fill_rectangle(screen,
364 s->xleft + s->width - w2, s->ytop,
365 w2, s->height,
366 color);
367 fill_rectangle(screen,
368 s->xleft + w1, s->ytop,
369 s->width - w1 - w2, h1,
370 color);
371 fill_rectangle(screen,
372 s->xleft + w1, s->ytop + s->height - h2,
373 s->width - w1 - w2, h2,
374 color);
375 }
376 #endif
377
378
379
380 #define SCALEBITS 10
381 #define ONE_HALF (1 << (SCALEBITS - 1))
382 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
383
384 #define RGB_TO_Y_CCIR(r, g, b) \
385 ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
386 FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
387
388 #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
389 (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
390 FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
391
392 #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
393 (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
394 FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
395
396 #define ALPHA_BLEND(a, oldp, newp, s)\
397 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
398
399 #define RGBA_IN(r, g, b, a, s)\
400 {\
401 unsigned int v = ((const uint32_t *)(s))[0];\
402 a = (v >> 24) & 0xff;\
403 r = (v >> 16) & 0xff;\
404 g = (v >> 8) & 0xff;\
405 b = v & 0xff;\
406 }
407
408 #define YUVA_IN(y, u, v, a, s, pal)\
409 {\
410 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)s];\
411 a = (val >> 24) & 0xff;\
412 y = (val >> 16) & 0xff;\
413 u = (val >> 8) & 0xff;\
414 v = val & 0xff;\
415 }
416
417 #define YUVA_OUT(d, y, u, v, a)\
418 {\
419 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
420 }
421
422
423 #define BPP 1
424
425 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect)
426 {
427 int wrap, wrap3, width2, skip2;
428 int y, u, v, a, u1, v1, a1, w, h;
429 uint8_t *lum, *cb, *cr;
430 const uint8_t *p;
431 const uint32_t *pal;
432
433 lum = dst->data[0] + rect->y * dst->linesize[0];
434 cb = dst->data[1] + (rect->y >> 1) * dst->linesize[1];
435 cr = dst->data[2] + (rect->y >> 1) * dst->linesize[2];
436
437 width2 = (rect->w + 1) >> 1;
438 skip2 = rect->x >> 1;
439 wrap = dst->linesize[0];
440 wrap3 = rect->linesize;
441 p = rect->bitmap;
442 pal = rect->rgba_palette; /* Now in YCrCb! */
443
444 if (rect->y & 1) {
445 lum += rect->x;
446 cb += skip2;
447 cr += skip2;
448
449 if (rect->x & 1) {
450 YUVA_IN(y, u, v, a, p, pal);
451 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
452 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
453 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
454 cb++;
455 cr++;
456 lum++;
457 p += BPP;
458 }
459 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
460 YUVA_IN(y, u, v, a, p, pal);
461 u1 = u;
462 v1 = v;
463 a1 = a;
464 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
465
466 YUVA_IN(y, u, v, a, p + BPP, pal);
467 u1 += u;
468 v1 += v;
469 a1 += a;
470 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
471 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
472 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
473 cb++;
474 cr++;
475 p += 2 * BPP;
476 lum += 2;
477 }
478 if (w) {
479 YUVA_IN(y, u, v, a, p, pal);
480 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
481 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
482 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
483 }
484 p += wrap3 + (wrap3 - rect->w * BPP);
485 lum += wrap + (wrap - rect->w - rect->x);
486 cb += dst->linesize[1] - width2 - skip2;
487 cr += dst->linesize[2] - width2 - skip2;
488 }
489 for(h = rect->h - (rect->y & 1); h >= 2; h -= 2) {
490 lum += rect->x;
491 cb += skip2;
492 cr += skip2;
493
494 if (rect->x & 1) {
495 YUVA_IN(y, u, v, a, p, pal);
496 u1 = u;
497 v1 = v;
498 a1 = a;
499 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
500 p += wrap3;
501 lum += wrap;
502 YUVA_IN(y, u, v, a, p, pal);
503 u1 += u;
504 v1 += v;
505 a1 += a;
506 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
507 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
508 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
509 cb++;
510 cr++;
511 p += -wrap3 + BPP;
512 lum += -wrap + 1;
513 }
514 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
515 YUVA_IN(y, u, v, a, p, pal);
516 u1 = u;
517 v1 = v;
518 a1 = a;
519 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
520
521 YUVA_IN(y, u, v, a, p, pal);
522 u1 += u;
523 v1 += v;
524 a1 += a;
525 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
526 p += wrap3;
527 lum += wrap;
528
529 YUVA_IN(y, u, v, a, p, pal);
530 u1 += u;
531 v1 += v;
532 a1 += a;
533 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
534
535 YUVA_IN(y, u, v, a, p, pal);
536 u1 += u;
537 v1 += v;
538 a1 += a;
539 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
540
541 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
542 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
543
544 cb++;
545 cr++;
546 p += -wrap3 + 2 * BPP;
547 lum += -wrap + 2;
548 }
549 if (w) {
550 YUVA_IN(y, u, v, a, p, pal);
551 u1 = u;
552 v1 = v;
553 a1 = a;
554 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
555 p += wrap3;
556 lum += wrap;
557 YUVA_IN(y, u, v, a, p, pal);
558 u1 += u;
559 v1 += v;
560 a1 += a;
561 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
562 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
563 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
564 cb++;
565 cr++;
566 p += -wrap3 + BPP;
567 lum += -wrap + 1;
568 }
569 p += wrap3 + (wrap3 - rect->w * BPP);
570 lum += wrap + (wrap - rect->w - rect->x);
571 cb += dst->linesize[1] - width2 - skip2;
572 cr += dst->linesize[2] - width2 - skip2;
573 }
574 /* handle odd height */
575 if (h) {
576 lum += rect->x;
577 cb += skip2;
578 cr += skip2;
579
580 if (rect->x & 1) {
581 YUVA_IN(y, u, v, a, p, pal);
582 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
583 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
584 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
585 cb++;
586 cr++;
587 lum++;
588 p += BPP;
589 }
590 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
591 YUVA_IN(y, u, v, a, p, pal);
592 u1 = u;
593 v1 = v;
594 a1 = a;
595 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
596
597 YUVA_IN(y, u, v, a, p + BPP, pal);
598 u1 += u;
599 v1 += v;
600 a1 += a;
601 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
602 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
603 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
604 cb++;
605 cr++;
606 p += 2 * BPP;
607 lum += 2;
608 }
609 if (w) {
610 YUVA_IN(y, u, v, a, p, pal);
611 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
612 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
613 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
614 }
615 }
616 }
617
618 static void free_subpicture(SubPicture *sp)
619 {
620 int i;
621
622 for (i = 0; i < sp->sub.num_rects; i++)
623 {
624 av_free(sp->sub.rects[i].bitmap);
625 av_free(sp->sub.rects[i].rgba_palette);
626 }
627
628 av_free(sp->sub.rects);
629
630 memset(&sp->sub, 0, sizeof(AVSubtitle));
631 }
632
633 static void video_image_display(VideoState *is)
634 {
635 VideoPicture *vp;
636 SubPicture *sp;
637 AVPicture pict;
638 float aspect_ratio;
639 int width, height, x, y;
640 SDL_Rect rect;
641 int i;
642
643 vp = &is->pictq[is->pictq_rindex];
644 if (vp->bmp) {
645 /* XXX: use variable in the frame */
646 if (is->video_st->codec->sample_aspect_ratio.num == 0)
647 aspect_ratio = 0;
648 else
649 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio)
650 * is->video_st->codec->width / is->video_st->codec->height;;
651 if (aspect_ratio <= 0.0)
652 aspect_ratio = (float)is->video_st->codec->width /
653 (float)is->video_st->codec->height;
654 /* if an active format is indicated, then it overrides the
655 mpeg format */
656 #if 0
657 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
658 is->dtg_active_format = is->video_st->codec->dtg_active_format;
659 printf("dtg_active_format=%d\n", is->dtg_active_format);
660 }
661 #endif
662 #if 0
663 switch(is->video_st->codec->dtg_active_format) {
664 case FF_DTG_AFD_SAME:
665 default:
666 /* nothing to do */
667 break;
668 case FF_DTG_AFD_4_3:
669 aspect_ratio = 4.0 / 3.0;
670 break;
671 case FF_DTG_AFD_16_9:
672 aspect_ratio = 16.0 / 9.0;
673 break;
674 case FF_DTG_AFD_14_9:
675 aspect_ratio = 14.0 / 9.0;
676 break;
677 case FF_DTG_AFD_4_3_SP_14_9:
678 aspect_ratio = 14.0 / 9.0;
679 break;
680 case FF_DTG_AFD_16_9_SP_14_9:
681 aspect_ratio = 14.0 / 9.0;
682 break;
683 case FF_DTG_AFD_SP_4_3:
684 aspect_ratio = 4.0 / 3.0;
685 break;
686 }
687 #endif
688
689 if (is->subtitle_st)
690 {
691 if (is->subpq_size > 0)
692 {
693 sp = &is->subpq[is->subpq_rindex];
694
695 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
696 {
697 SDL_LockYUVOverlay (vp->bmp);
698
699 pict.data[0] = vp->bmp->pixels[0];
700 pict.data[1] = vp->bmp->pixels[2];
701 pict.data[2] = vp->bmp->pixels[1];
702
703 pict.linesize[0] = vp->bmp->pitches[0];
704 pict.linesize[1] = vp->bmp->pitches[2];
705 pict.linesize[2] = vp->bmp->pitches[1];
706
707 for (i = 0; i < sp->sub.num_rects; i++)
708 blend_subrect(&pict, &sp->sub.rects[i]);
709
710 SDL_UnlockYUVOverlay (vp->bmp);
711 }
712 }
713 }
714
715
716 /* XXX: we suppose the screen has a 1.0 pixel ratio */
717 height = is->height;
718 width = ((int)rint(height * aspect_ratio)) & -3;
719 if (width > is->width) {
720 width = is->width;
721 height = ((int)rint(width / aspect_ratio)) & -3;
722 }
723 x = (is->width - width) / 2;
724 y = (is->height - height) / 2;
725 if (!is->no_background) {
726 /* fill the background */
727 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
728 } else {
729 is->no_background = 0;
730 }
731 rect.x = is->xleft + x;
732 rect.y = is->ytop + y;
733 rect.w = width;
734 rect.h = height;
735 SDL_DisplayYUVOverlay(vp->bmp, &rect);
736 } else {
737 #if 0
738 fill_rectangle(screen,
739 is->xleft, is->ytop, is->width, is->height,
740 QERGB(0x00, 0x00, 0x00));
741 #endif
742 }
743 }
744
745 static inline int compute_mod(int a, int b)
746 {
747 a = a % b;
748 if (a >= 0)
749 return a;
750 else
751 return a + b;
752 }
753
754 static void video_audio_display(VideoState *s)
755 {
756 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
757 int ch, channels, h, h2, bgcolor, fgcolor;
758 int16_t time_diff;
759
760 /* compute display index : center on currently output samples */
761 channels = s->audio_st->codec->channels;
762 nb_display_channels = channels;
763 if (!s->paused) {
764 n = 2 * channels;
765 delay = audio_write_get_buf_size(s);
766 delay /= n;
767
768 /* to be more precise, we take into account the time spent since
769 the last buffer computation */
770 if (audio_callback_time) {
771 time_diff = av_gettime() - audio_callback_time;
772 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
773 }
774
775 delay -= s->width / 2;
776 if (delay < s->width)
777 delay = s->width;
778
779 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
780
781 h= INT_MIN;
782 for(i=0; i<1000; i+=channels){
783 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
784 int a= s->sample_array[idx];
785 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
786 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
787 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
788 int score= a-d;
789 if(h<score && (b^c)<0){
790 h= score;
791 i_start= idx;
792 }
793 }
794
795 s->last_i_start = i_start;
796 } else {
797 i_start = s->last_i_start;
798 }
799
800 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
801 fill_rectangle(screen,
802 s->xleft, s->ytop, s->width, s->height,
803 bgcolor);
804
805 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
806
807 /* total height for one channel */
808 h = s->height / nb_display_channels;
809 /* graph height / 2 */
810 h2 = (h * 9) / 20;
811 for(ch = 0;ch < nb_display_channels; ch++) {
812 i = i_start + ch;
813 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
814 for(x = 0; x < s->width; x++) {
815 y = (s->sample_array[i] * h2) >> 15;
816 if (y < 0) {
817 y = -y;
818 ys = y1 - y;
819 } else {
820 ys = y1;
821 }
822 fill_rectangle(screen,
823 s->xleft + x, ys, 1, y,
824 fgcolor);
825 i += channels;
826 if (i >= SAMPLE_ARRAY_SIZE)
827 i -= SAMPLE_ARRAY_SIZE;
828 }
829 }
830
831 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
832
833 for(ch = 1;ch < nb_display_channels; ch++) {
834 y = s->ytop + ch * h;
835 fill_rectangle(screen,
836 s->xleft, y, s->width, 1,
837 fgcolor);
838 }
839 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
840 }
841
842 static int video_open(VideoState *is){
843 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
844 int w,h;
845
846 if(is_full_screen) flags |= SDL_FULLSCREEN;
847 else flags |= SDL_RESIZABLE;
848
849 if (is_full_screen && fs_screen_width) {
850 w = fs_screen_width;
851 h = fs_screen_height;
852 } else if(!is_full_screen && screen_width){
853 w = screen_width;
854 h = screen_height;
855 }else if (is->video_st && is->video_st->codec->width){
856 w = is->video_st->codec->width;
857 h = is->video_st->codec->height;
858 } else {
859 w = 640;
860 h = 480;
861 }
862 #ifndef CONFIG_DARWIN
863 screen = SDL_SetVideoMode(w, h, 0, flags);
864 #else
865 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
866 screen = SDL_SetVideoMode(w, h, 24, flags);
867 #endif
868 if (!screen) {
869 fprintf(stderr, "SDL: could not set video mode - exiting\n");
870 return -1;
871 }
872 SDL_WM_SetCaption("FFplay", "FFplay");
873
874 is->width = screen->w;
875 is->height = screen->h;
876
877 return 0;
878 }
879
880 /* display the current picture, if any */
881 static void video_display(VideoState *is)
882 {
883 if(!screen)
884 video_open(cur_stream);
885 if (is->audio_st && is->show_audio)
886 video_audio_display(is);
887 else if (is->video_st)
888 video_image_display(is);
889 }
890
891 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
892 {
893 SDL_Event event;
894 event.type = FF_REFRESH_EVENT;
895 event.user.data1 = opaque;
896 SDL_PushEvent(&event);
897 return 0; /* 0 means stop timer */
898 }
899
900 /* schedule a video refresh in 'delay' ms */
901 static void schedule_refresh(VideoState *is, int delay)
902 {
903 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
904 }
905
906 /* get the current audio clock value */
907 static double get_audio_clock(VideoState *is)
908 {
909 double pts;
910 int hw_buf_size, bytes_per_sec;
911 pts = is->audio_clock;
912 hw_buf_size = audio_write_get_buf_size(is);
913 bytes_per_sec = 0;
914 if (is->audio_st) {
915 bytes_per_sec = is->audio_st->codec->sample_rate *
916 2 * is->audio_st->codec->channels;
917 }
918 if (bytes_per_sec)
919 pts -= (double)hw_buf_size / bytes_per_sec;
920 return pts;
921 }
922
923 /* get the current video clock value */
924 static double get_video_clock(VideoState *is)
925 {
926 double delta;
927 if (is->paused) {
928 delta = 0;
929 } else {
930 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
931 }
932 return is->video_current_pts + delta;
933 }
934
935 /* get the current external clock value */
936 static double get_external_clock(VideoState *is)
937 {
938 int64_t ti;
939 ti = av_gettime();
940 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
941 }
942
943 /* get the current master clock value */
944 static double get_master_clock(VideoState *is)
945 {
946 double val;
947
948 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
949 if (is->video_st)
950 val = get_video_clock(is);
951 else
952 val = get_audio_clock(is);
953 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
954 if (is->audio_st)
955 val = get_audio_clock(is);
956 else
957 val = get_video_clock(is);
958 } else {
959 val = get_external_clock(is);
960 }
961 return val;
962 }
963
964 /* seek in the stream */
965 static void stream_seek(VideoState *is, int64_t pos, int rel)
966 {
967 if (!is->seek_req) {
968 is->seek_pos = pos;
969 is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
970 if (seek_by_bytes)
971 is->seek_flags |= AVSEEK_FLAG_BYTE;
972 is->seek_req = 1;
973 }
974 }
975
976 /* pause or resume the video */
977 static void stream_pause(VideoState *is)
978 {
979 is->paused = !is->paused;
980 if (!is->paused) {
981 is->video_current_pts = get_video_clock(is);
982 is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
983 }
984 }
985
986 /* called to display each frame */
987 static void video_refresh_timer(void *opaque)
988 {
989 VideoState *is = opaque;
990 VideoPicture *vp;
991 double actual_delay, delay, sync_threshold, ref_clock, diff;
992
993 SubPicture *sp, *sp2;
994
995 if (is->video_st) {
996 if (is->pictq_size == 0) {
997 /* if no picture, need to wait */
998 schedule_refresh(is, 1);
999 } else {
1000 /* dequeue the picture */
1001 vp = &is->pictq[is->pictq_rindex];
1002
1003 /* update current video pts */
1004 is->video_current_pts = vp->pts;
1005 is->video_current_pts_time = av_gettime();
1006
1007 /* compute nominal delay */
1008 delay = vp->pts - is->frame_last_pts;
1009 if (delay <= 0 || delay >= 1.0) {
1010 /* if incorrect delay, use previous one */
1011 delay = is->frame_last_delay;
1012 }
1013 is->frame_last_delay = delay;
1014 is->frame_last_pts = vp->pts;
1015
1016 /* update delay to follow master synchronisation source */
1017 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1018 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1019 /* if video is slave, we try to correct big delays by
1020 duplicating or deleting a frame */
1021 ref_clock = get_master_clock(is);
1022 diff = vp->pts - ref_clock;
1023
1024 /* skip or repeat frame. We take into account the
1025 delay to compute the threshold. I still don't know
1026 if it is the best guess */
1027 sync_threshold = AV_SYNC_THRESHOLD;
1028 if (delay > sync_threshold)
1029 sync_threshold = delay;
1030 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1031 if (diff <= -sync_threshold)
1032 delay = 0;
1033 else if (diff >= sync_threshold)
1034 delay = 2 * delay;
1035 }
1036 }
1037
1038 is->frame_timer += delay;
1039 /* compute the REAL delay (we need to do that to avoid
1040 long term errors */
1041 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1042 if (actual_delay < 0.010) {
1043 /* XXX: should skip picture */
1044 actual_delay = 0.010;
1045 }
1046 /* launch timer for next picture */
1047 schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
1048
1049 #if defined(DEBUG_SYNC)
1050 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1051 delay, actual_delay, vp->pts, -diff);
1052 #endif
1053
1054 if(is->subtitle_st) {
1055 if (is->subtitle_stream_changed) {
1056 SDL_LockMutex(is->subpq_mutex);
1057
1058 while (is->subpq_size) {
1059 free_subpicture(&is->subpq[is->subpq_rindex]);
1060
1061 /* update queue size and signal for next picture */
1062 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1063 is->subpq_rindex = 0;
1064
1065 is->subpq_size--;
1066 }
1067 is->subtitle_stream_changed = 0;
1068
1069 SDL_CondSignal(is->subpq_cond);
1070 SDL_UnlockMutex(is->subpq_mutex);
1071 } else {
1072 if (is->subpq_size > 0) {
1073 sp = &is->subpq[is->subpq_rindex];
1074
1075 if (is->subpq_size > 1)
1076 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1077 else
1078 sp2 = NULL;
1079
1080 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1081 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1082 {
1083 free_subpicture(sp);
1084
1085 /* update queue size and signal for next picture */
1086 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1087 is->subpq_rindex = 0;
1088
1089 SDL_LockMutex(is->subpq_mutex);
1090 is->subpq_size--;
1091 SDL_CondSignal(is->subpq_cond);
1092 SDL_UnlockMutex(is->subpq_mutex);
1093 }
1094 }
1095 }
1096 }
1097
1098 /* display picture */
1099 video_display(is);
1100
1101 /* update queue size and signal for next picture */
1102 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1103 is->pictq_rindex = 0;
1104
1105 SDL_LockMutex(is->pictq_mutex);
1106 is->pictq_size--;
1107 SDL_CondSignal(is->pictq_cond);
1108 SDL_UnlockMutex(is->pictq_mutex);
1109 }
1110 } else if (is->audio_st) {
1111 /* draw the next audio frame */
1112
1113 schedule_refresh(is, 40);
1114
1115 /* if only audio stream, then display the audio bars (better
1116 than nothing, just to test the implementation */
1117
1118 /* display picture */
1119 video_display(is);
1120 } else {
1121 schedule_refresh(is, 100);
1122 }
1123 if (show_status) {
1124 static int64_t last_time;
1125 int64_t cur_time;
1126 int aqsize, vqsize, sqsize;
1127 double av_diff;
1128
1129 cur_time = av_gettime();
1130 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1131 aqsize = 0;
1132 vqsize = 0;
1133 sqsize = 0;
1134 if (is->audio_st)
1135 aqsize = is->audioq.size;
1136 if (is->video_st)
1137 vqsize = is->videoq.size;
1138 if (is->subtitle_st)
1139 sqsize = is->subtitleq.size;
1140 av_diff = 0;
1141 if (is->audio_st && is->video_st)
1142 av_diff = get_audio_clock(is) - get_video_clock(is);
1143 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
1144 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1145 fflush(stdout);
1146 last_time = cur_time;
1147 }
1148 }
1149 }
1150
1151 /* allocate a picture (needs to do that in main thread to avoid
1152 potential locking problems */
1153 static void alloc_picture(void *opaque)
1154 {
1155 VideoState *is = opaque;
1156 VideoPicture *vp;
1157
1158 vp = &is->pictq[is->pictq_windex];
1159
1160 if (vp->bmp)
1161 SDL_FreeYUVOverlay(vp->bmp);
1162
1163 #if 0
1164 /* XXX: use generic function */
1165 /* XXX: disable overlay if no hardware acceleration or if RGB format */
1166 switch(is->video_st->codec->pix_fmt) {
1167 case PIX_FMT_YUV420P:
1168 case PIX_FMT_YUV422P:
1169 case PIX_FMT_YUV444P:
1170 case PIX_FMT_YUYV422:
1171 case PIX_FMT_YUV410P:
1172 case PIX_FMT_YUV411P:
1173 is_yuv = 1;
1174 break;
1175 default:
1176 is_yuv = 0;
1177 break;
1178 }
1179 #endif
1180 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1181 is->video_st->codec->height,
1182 SDL_YV12_OVERLAY,
1183 screen);
1184 vp->width = is->video_st->codec->width;
1185 vp->height = is->video_st->codec->height;
1186
1187 SDL_LockMutex(is->pictq_mutex);
1188 vp->allocated = 1;
1189 SDL_CondSignal(is->pictq_cond);
1190 SDL_UnlockMutex(is->pictq_mutex);
1191 }
1192
1193 /**
1194 *
1195 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1196 */
1197 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1198 {
1199 VideoPicture *vp;
1200 int dst_pix_fmt;
1201 AVPicture pict;
1202 static struct SwsContext *img_convert_ctx;
1203
1204 /* wait until we have space to put a new picture */
1205 SDL_LockMutex(is->pictq_mutex);
1206 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1207 !is->videoq.abort_request) {
1208 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1209 }
1210 SDL_UnlockMutex(is->pictq_mutex);
1211
1212 if (is->videoq.abort_request)
1213 return -1;
1214
1215 vp = &is->pictq[is->pictq_windex];
1216
1217 /* alloc or resize hardware picture buffer */
1218 if (!vp->bmp ||
1219 vp->width != is->video_st->codec->width ||
1220 vp->height != is->video_st->codec->height) {
1221 SDL_Event event;
1222
1223 vp->allocated = 0;
1224
1225 /* the allocation must be done in the main thread to avoid
1226 locking problems */
1227 event.type = FF_ALLOC_EVENT;
1228 event.user.data1 = is;
1229 SDL_PushEvent(&event);
1230
1231 /* wait until the picture is allocated */
1232 SDL_LockMutex(is->pictq_mutex);
1233 while (!vp->allocated && !is->videoq.abort_request) {
1234 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1235 }
1236 SDL_UnlockMutex(is->pictq_mutex);
1237
1238 if (is->videoq.abort_request)
1239 return -1;
1240 }
1241
1242 /* if the frame is not skipped, then display it */
1243 if (vp->bmp) {
1244 /* get a pointer on the bitmap */
1245 SDL_LockYUVOverlay (vp->bmp);
1246
1247 dst_pix_fmt = PIX_FMT_YUV420P;
1248 pict.data[0] = vp->bmp->pixels[0];
1249 pict.data[1] = vp->bmp->pixels[2];
1250 pict.data[2] = vp->bmp->pixels[1];
1251
1252 pict.linesize[0] = vp->bmp->pitches[0];
1253 pict.linesize[1] = vp->bmp->pitches[2];
1254 pict.linesize[2] = vp->bmp->pitches[1];
1255 img_convert_ctx = sws_getCachedContext(img_convert_ctx,
1256 is->video_st->codec->width, is->video_st->codec->height,
1257 is->video_st->codec->pix_fmt,
1258 is->video_st->codec->width, is->video_st->codec->height,
1259 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1260 if (img_convert_ctx == NULL) {
1261 fprintf(stderr, "Cannot initialize the conversion context\n");
1262 exit(1);
1263 }
1264 sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1265 0, is->video_st->codec->height, pict.data, pict.linesize);
1266 /* update the bitmap content */
1267 SDL_UnlockYUVOverlay(vp->bmp);
1268
1269 vp->pts = pts;
1270
1271 /* now we can update the picture count */
1272 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1273 is->pictq_windex = 0;
1274 SDL_LockMutex(is->pictq_mutex);
1275 is->pictq_size++;
1276 SDL_UnlockMutex(is->pictq_mutex);
1277 }
1278 return 0;
1279 }
1280
1281 /**
1282 * compute the exact PTS for the picture if it is omitted in the stream
1283 * @param pts1 the dts of the pkt / pts of the frame
1284 */
1285 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1286 {
1287 double frame_delay, pts;
1288
1289 pts = pts1;
1290
1291 if (pts != 0) {
1292 /* update video clock with pts, if present */
1293 is->video_clock = pts;
1294 } else {
1295 pts = is->video_clock;
1296 }
1297 /* update video clock for next frame */
1298 frame_delay = av_q2d(is->video_st->codec->time_base);
1299 /* for MPEG2, the frame can be repeated, so we update the
1300 clock accordingly */
1301 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1302 is->video_clock += frame_delay;
1303
1304 #if defined(DEBUG_SYNC) && 0
1305 {
1306 int ftype;
1307 if (src_frame->pict_type == FF_B_TYPE)
1308 ftype = 'B';
1309 else if (src_frame->pict_type == FF_I_TYPE)
1310 ftype = 'I';
1311 else
1312 ftype = 'P';
1313 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1314 ftype, pts, pts1);
1315 }
1316 #endif
1317 return queue_picture(is, src_frame, pts);
1318 }
1319
1320 static uint64_t global_video_pkt_pts= AV_NOPTS_VALUE;
1321
1322 static int my_get_buffer(struct AVCodecContext *c, AVFrame *pic){
1323 int ret= avcodec_default_get_buffer(c, pic);
1324 uint64_t *pts= av_malloc(sizeof(uint64_t));
1325 *pts= global_video_pkt_pts;
1326 pic->opaque= pts;
1327 return ret;
1328 }
1329
1330 static void my_release_buffer(struct AVCodecContext *c, AVFrame *pic){
1331 if(pic) av_freep(&pic->opaque);
1332 avcodec_default_release_buffer(c, pic);
1333 }
1334
1335 static int video_thread(void *arg)
1336 {
1337 VideoState *is = arg;
1338 AVPacket pkt1, *pkt = &pkt1;
1339 int len1, got_picture;
1340 AVFrame *frame= avcodec_alloc_frame();
1341 double pts;
1342
1343 for(;;) {
1344 while (is->paused && !is->videoq.abort_request) {
1345 SDL_Delay(10);
1346 }
1347 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1348 break;
1349
1350 if(pkt->data == flush_pkt.data){
1351 avcodec_flush_buffers(is->video_st->codec);
1352 continue;
1353 }
1354
1355 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1356 this packet, if any */
1357 global_video_pkt_pts= pkt->pts;
1358 len1 = avcodec_decode_video(is->video_st->codec,
1359 frame, &got_picture,
1360 pkt->data, pkt->size);
1361
1362 if( (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1363 && frame->opaque && *(uint64_t*)frame->opaque != AV_NOPTS_VALUE)
1364 pts= *(uint64_t*)frame->opaque;
1365 else if(pkt->dts != AV_NOPTS_VALUE)
1366 pts= pkt->dts;
1367 else
1368 pts= 0;
1369 pts *= av_q2d(is->video_st->time_base);
1370
1371 // if (len1 < 0)
1372 // break;
1373 if (got_picture) {
1374 if (output_picture2(is, frame, pts) < 0)
1375 goto the_end;
1376 }
1377 av_free_packet(pkt);
1378 if (step)
1379 if (cur_stream)
1380 stream_pause(cur_stream);
1381 }
1382 the_end:
1383 av_free(frame);
1384 return 0;
1385 }
1386
1387 static int subtitle_thread(void *arg)
1388 {
1389 VideoState *is = arg;
1390 SubPicture *sp;
1391 AVPacket pkt1, *pkt = &pkt1;
1392 int len1, got_subtitle;
1393 double pts;
1394 int i, j;
1395 int r, g, b, y, u, v, a;
1396
1397 for(;;) {
1398 while (is->paused && !is->subtitleq.abort_request) {
1399 SDL_Delay(10);
1400 }
1401 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1402 break;
1403
1404 if(pkt->data == flush_pkt.data){
1405 avcodec_flush_buffers(is->subtitle_st->codec);
1406 continue;
1407 }
1408 SDL_LockMutex(is->subpq_mutex);
1409 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1410 !is->subtitleq.abort_request) {
1411 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1412 }
1413 SDL_UnlockMutex(is->subpq_mutex);
1414
1415 if (is->subtitleq.abort_request)
1416 goto the_end;
1417
1418 sp = &is->subpq[is->subpq_windex];
1419
1420 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1421 this packet, if any */
1422 pts = 0;
1423 if (pkt->pts != AV_NOPTS_VALUE)
1424 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1425
1426 len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1427 &sp->sub, &got_subtitle,
1428 pkt->data, pkt->size);
1429 // if (len1 < 0)
1430 // break;
1431 if (got_subtitle && sp->sub.format == 0) {
1432 sp->pts = pts;
1433
1434 for (i = 0; i < sp->sub.num_rects; i++)
1435 {
1436 for (j = 0; j < sp->sub.rects[i].nb_colors; j++)
1437 {
1438 RGBA_IN(r, g, b, a, sp->sub.rects[i].rgba_palette + j);
1439 y = RGB_TO_Y_CCIR(r, g, b);
1440 u = RGB_TO_U_CCIR(r, g, b, 0);
1441 v = RGB_TO_V_CCIR(r, g, b, 0);
1442 YUVA_OUT(sp->sub.rects[i].rgba_palette + j, y, u, v, a);
1443 }
1444 }
1445
1446 /* now we can update the picture count */
1447 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1448 is->subpq_windex = 0;
1449 SDL_LockMutex(is->subpq_mutex);
1450 is->subpq_size++;
1451 SDL_UnlockMutex(is->subpq_mutex);
1452 }
1453 av_free_packet(pkt);
1454 // if (step)
1455 // if (cur_stream)
1456 // stream_pause(cur_stream);
1457 }
1458 the_end:
1459 return 0;
1460 }
1461
1462 /* copy samples for viewing in editor window */
1463 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1464 {
1465 int size, len, channels;
1466
1467 channels = is->audio_st->codec->channels;
1468
1469 size = samples_size / sizeof(short);
1470 while (size > 0) {
1471 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1472 if (len > size)
1473 len = size;
1474 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1475 samples += len;
1476 is->sample_array_index += len;
1477 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1478 is->sample_array_index = 0;
1479 size -= len;
1480 }
1481 }
1482
1483 /* return the new audio buffer size (samples can be added or deleted
1484 to get better sync if video or external master clock) */
1485 static int synchronize_audio(VideoState *is, short *samples,
1486 int samples_size1, double pts)
1487 {
1488 int n, samples_size;
1489 double ref_clock;
1490
1491 n = 2 * is->audio_st->codec->channels;
1492 samples_size = samples_size1;
1493
1494 /* if not master, then we try to remove or add samples to correct the clock */
1495 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1496 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1497 double diff, avg_diff;
1498 int wanted_size, min_size, max_size, nb_samples;
1499
1500 ref_clock = get_master_clock(is);
1501 diff = get_audio_clock(is) - ref_clock;
1502
1503 if (diff < AV_NOSYNC_THRESHOLD) {
1504 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1505 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1506 /* not enough measures to have a correct estimate */
1507 is->audio_diff_avg_count++;
1508 } else {
1509 /* estimate the A-V difference */
1510 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1511
1512 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1513 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1514 nb_samples = samples_size / n;
1515
1516 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1517 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1518 if (wanted_size < min_size)
1519 wanted_size = min_size;
1520 else if (wanted_size > max_size)
1521 wanted_size = max_size;
1522
1523 /* add or remove samples to correction the synchro */
1524 if (wanted_size < samples_size) {
1525 /* remove samples */
1526 samples_size = wanted_size;
1527 } else if (wanted_size > samples_size) {
1528 uint8_t *samples_end, *q;
1529 int nb;
1530
1531 /* add samples */
1532 nb = (samples_size - wanted_size);
1533 samples_end = (uint8_t *)samples + samples_size - n;
1534 q = samples_end + n;
1535 while (nb > 0) {
1536 memcpy(q, samples_end, n);
1537 q += n;
1538 nb -= n;
1539 }
1540 samples_size = wanted_size;
1541 }
1542 }
1543 #if 0
1544 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1545 diff, avg_diff, samples_size - samples_size1,
1546 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1547 #endif
1548 }
1549 } else {
1550 /* too big difference : may be initial PTS errors, so
1551 reset A-V filter */
1552 is->audio_diff_avg_count = 0;
1553 is->audio_diff_cum = 0;
1554 }
1555 }
1556
1557 return samples_size;
1558 }
1559
1560 /* decode one audio frame and returns its uncompressed size */
1561 static int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int buf_size, double *pts_ptr)
1562 {
1563 AVPacket *pkt = &is->audio_pkt;
1564 int n, len1, data_size;
1565 double pts;
1566
1567 for(;;) {
1568 /* NOTE: the audio packet can contain several frames */
1569 while (is->audio_pkt_size > 0) {
1570 data_size = buf_size;
1571 len1 = avcodec_decode_audio2(is->audio_st->codec,
1572 (int16_t *)audio_buf, &data_size,
1573 is->audio_pkt_data, is->audio_pkt_size);
1574 if (len1 < 0) {
1575 /* if error, we skip the frame */
1576 is->audio_pkt_size = 0;
1577 break;
1578 }
1579
1580 is->audio_pkt_data += len1;
1581 is->audio_pkt_size -= len1;
1582 if (data_size <= 0)
1583 continue;
1584 /* if no pts, then compute it */
1585 pts = is->audio_clock;
1586 *pts_ptr = pts;
1587 n = 2 * is->audio_st->codec->channels;
1588 is->audio_clock += (double)data_size /
1589 (double)(n * is->audio_st->codec->sample_rate);
1590 #if defined(DEBUG_SYNC)
1591 {
1592 static double last_clock;
1593 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1594 is->audio_clock - last_clock,
1595 is->audio_clock, pts);
1596 last_clock = is->audio_clock;
1597 }
1598 #endif
1599 return data_size;
1600 }
1601
1602 /* free the current packet */
1603 if (pkt->data)
1604 av_free_packet(pkt);
1605
1606 if (is->paused || is->audioq.abort_request) {
1607 return -1;
1608 }
1609
1610 /* read next packet */
1611 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1612 return -1;
1613 if(pkt->data == flush_pkt.data){
1614 avcodec_flush_buffers(is->audio_st->codec);
1615 continue;
1616 }
1617
1618 is->audio_pkt_data = pkt->data;
1619 is->audio_pkt_size = pkt->size;
1620
1621 /* if update the audio clock with the pts */
1622 if (pkt->pts != AV_NOPTS_VALUE) {
1623 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1624 }
1625 }
1626 }
1627
1628 /* get the current audio output buffer size, in samples. With SDL, we
1629 cannot have a precise information */
1630 static int audio_write_get_buf_size(VideoState *is)
1631 {
1632 return is->audio_buf_size - is->audio_buf_index;
1633 }
1634
1635
1636 /* prepare a new audio buffer */
1637 void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1638 {
1639 VideoState *is = opaque;
1640 int audio_size, len1;
1641 double pts;
1642
1643 audio_callback_time = av_gettime();
1644
1645 while (len > 0) {
1646 if (is->audio_buf_index >= is->audio_buf_size) {
1647 audio_size = audio_decode_frame(is, is->audio_buf, sizeof(is->audio_buf), &pts);
1648 if (audio_size < 0) {
1649 /* if error, just output silence */
1650 is->audio_buf_size = 1024;
1651 memset(is->audio_buf, 0, is->audio_buf_size);
1652 } else {
1653 if (is->show_audio)
1654 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1655 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1656 pts);
1657 is->audio_buf_size = audio_size;
1658 }
1659 is->audio_buf_index = 0;
1660 }
1661 len1 = is->audio_buf_size - is->audio_buf_index;
1662 if (len1 > len)
1663 len1 = len;
1664 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1665 len -= len1;
1666 stream += len1;
1667 is->audio_buf_index += len1;
1668 }
1669 }
1670
1671 /* open a given stream. Return 0 if OK */
1672 static int stream_component_open(VideoState *is, int stream_index)
1673 {
1674 AVFormatContext *ic = is->ic;
1675 AVCodecContext *enc;
1676 AVCodec *codec;
1677 SDL_AudioSpec wanted_spec, spec;
1678
1679 if (stream_index < 0 || stream_index >= ic->nb_streams)
1680 return -1;
1681 enc = ic->streams[stream_index]->codec;
1682
1683 /* prepare audio output */
1684 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1685 wanted_spec.freq = enc->sample_rate;
1686 wanted_spec.format = AUDIO_S16SYS;
1687 /* hack for AC3. XXX: suppress that */
1688 if (enc->channels > 2)
1689 enc->channels = 2;
1690 wanted_spec.channels = enc->channels;
1691 wanted_spec.silence = 0;
1692 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1693 wanted_spec.callback = sdl_audio_callback;
1694 wanted_spec.userdata = is;
1695 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1696 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1697 return -1;
1698 }
1699 is->audio_hw_buf_size = spec.size;
1700 }
1701
1702 codec = avcodec_find_decoder(enc->codec_id);
1703 enc->debug_mv = debug_mv;
1704 enc->debug = debug;
1705 enc->workaround_bugs = workaround_bugs;
1706 enc->lowres = lowres;
1707 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1708 enc->idct_algo= idct;
1709 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1710 enc->skip_frame= skip_frame;
1711 enc->skip_idct= skip_idct;
1712 enc->skip_loop_filter= skip_loop_filter;
1713 enc->error_resilience= error_resilience;
1714 enc->error_concealment= error_concealment;
1715 if (!codec ||
1716 avcodec_open(enc, codec) < 0)
1717 return -1;
1718 if(thread_count>1)
1719 avcodec_thread_init(enc, thread_count);
1720 enc->thread_count= thread_count;
1721 switch(enc->codec_type) {
1722 case CODEC_TYPE_AUDIO:
1723 is->audio_stream = stream_index;
1724 is->audio_st = ic->streams[stream_index];
1725 is->audio_buf_size = 0;
1726 is->audio_buf_index = 0;
1727
1728 /* init averaging filter */
1729 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1730 is->audio_diff_avg_count = 0;
1731 /* since we do not have a precise anough audio fifo fullness,
1732 we correct audio sync only if larger than this threshold */
1733 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1734
1735 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1736 packet_queue_init(&is->audioq);
1737 SDL_PauseAudio(0);
1738 break;
1739 case CODEC_TYPE_VIDEO:
1740 is->video_stream = stream_index;
1741 is->video_st = ic->streams[stream_index];
1742
1743 is->frame_last_delay = 40e-3;
1744 is->frame_timer = (double)av_gettime() / 1000000.0;
1745 is->video_current_pts_time = av_gettime();
1746
1747 packet_queue_init(&is->videoq);
1748 is->video_tid = SDL_CreateThread(video_thread, is);
1749
1750 enc-> get_buffer= my_get_buffer;
1751 enc->release_buffer= my_release_buffer;
1752 break;
1753 case CODEC_TYPE_SUBTITLE:
1754 is->subtitle_stream = stream_index;
1755 is->subtitle_st = ic->streams[stream_index];
1756 packet_queue_init(&is->subtitleq);
1757
1758 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1759 break;
1760 default:
1761 break;
1762 }
1763 return 0;
1764 }
1765
1766 static void stream_component_close(VideoState *is, int stream_index)
1767 {
1768 AVFormatContext *ic = is->ic;
1769 AVCodecContext *enc;
1770
1771 if (stream_index < 0 || stream_index >= ic->nb_streams)
1772 return;
1773 enc = ic->streams[stream_index]->codec;
1774
1775 switch(enc->codec_type) {
1776 case CODEC_TYPE_AUDIO:
1777 packet_queue_abort(&is->audioq);
1778
1779 SDL_CloseAudio();
1780
1781 packet_queue_end(&is->audioq);
1782 break;
1783 case CODEC_TYPE_VIDEO:
1784 packet_queue_abort(&is->videoq);
1785
1786 /* note: we also signal this mutex to make sure we deblock the
1787 video thread in all cases */
1788 SDL_LockMutex(is->pictq_mutex);
1789 SDL_CondSignal(is->pictq_cond);
1790 SDL_UnlockMutex(is->pictq_mutex);
1791
1792 SDL_WaitThread(is->video_tid, NULL);
1793
1794 packet_queue_end(&is->videoq);
1795 break;
1796 case CODEC_TYPE_SUBTITLE:
1797 packet_queue_abort(&is->subtitleq);
1798
1799 /* note: we also signal this mutex to make sure we deblock the
1800 video thread in all cases */
1801 SDL_LockMutex(is->subpq_mutex);
1802 is->subtitle_stream_changed = 1;
1803
1804 SDL_CondSignal(is->subpq_cond);
1805 SDL_UnlockMutex(is->subpq_mutex);
1806
1807 SDL_WaitThread(is->subtitle_tid, NULL);
1808
1809 packet_queue_end(&is->subtitleq);
1810 break;
1811 default:
1812 break;
1813 }
1814
1815 avcodec_close(enc);
1816 switch(enc->codec_type) {
1817 case CODEC_TYPE_AUDIO:
1818 is->audio_st = NULL;
1819 is->audio_stream = -1;
1820 break;
1821 case CODEC_TYPE_VIDEO:
1822 is->video_st = NULL;
1823 is->video_stream = -1;
1824 break;
1825 case CODEC_TYPE_SUBTITLE:
1826 is->subtitle_st = NULL;
1827 is->subtitle_stream = -1;
1828 break;
1829 default:
1830 break;
1831 }
1832 }
1833
1834 static void dump_stream_info(const AVFormatContext *s)
1835 {
1836 if (s->track != 0)
1837 fprintf(stderr, "Track: %d\n", s->track);
1838 if (s->title[0] != '\0')
1839 fprintf(stderr, "Title: %s\n", s->title);
1840 if (s->author[0] != '\0')
1841 fprintf(stderr, "Author: %s\n", s->author);
1842 if (s->copyright[0] != '\0')
1843 fprintf(stderr, "Copyright: %s\n", s->copyright);
1844 if (s->comment[0] != '\0')
1845 fprintf(stderr, "Comment: %s\n", s->comment);
1846 if (s->album[0] != '\0')
1847 fprintf(stderr, "Album: %s\n", s->album);
1848 if (s->year != 0)
1849 fprintf(stderr, "Year: %d\n", s->year);
1850 if (s->genre[0] != '\0')
1851 fprintf(stderr, "Genre: %s\n", s->genre);
1852 }
1853
1854 /* since we have only one decoding thread, we can use a global
1855 variable instead of a thread local variable */
1856 static VideoState *global_video_state;
1857
1858 static int decode_interrupt_cb(void)
1859 {
1860 return (global_video_state && global_video_state->abort_request);
1861 }
1862
1863 /* this thread gets the stream from the disk or the network */
1864 static int decode_thread(void *arg)
1865 {
1866 VideoState *is = arg;
1867 AVFormatContext *ic;
1868 int err, i, ret, video_index, audio_index, use_play;
1869 AVPacket pkt1, *pkt = &pkt1;
1870 AVFormatParameters params, *ap = &params;
1871
1872 video_index = -1;
1873 audio_index = -1;
1874 is->video_stream = -1;
1875 is->audio_stream = -1;
1876 is->subtitle_stream = -1;
1877
1878 global_video_state = is;
1879 url_set_interrupt_cb(decode_interrupt_cb);
1880
1881 memset(ap, 0, sizeof(*ap));
1882 ap->initial_pause = 1; /* we force a pause when starting an RTSP
1883 stream */
1884
1885 ap->width = frame_width;
1886 ap->height= frame_height;
1887 ap->time_base= (AVRational){1, 25};
1888 ap->pix_fmt = frame_pix_fmt;
1889
1890 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1891 if (err < 0) {
1892 print_error(is->filename, err);
1893 ret = -1;
1894 goto fail;
1895 }
1896 is->ic = ic;
1897 #ifdef CONFIG_RTSP_DEMUXER
1898 use_play = (ic->iformat == &rtsp_demuxer);
1899 #else
1900 use_play = 0;
1901 #endif
1902
1903 if(genpts)
1904 ic->flags |= AVFMT_FLAG_GENPTS;
1905
1906 if (!use_play) {
1907 err = av_find_stream_info(ic);
1908 if (err < 0) {
1909 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1910 ret = -1;
1911 goto fail;
1912 }
1913 ic->pb.eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1914 }
1915
1916 /* if seeking requested, we execute it */
1917 if (start_time != AV_NOPTS_VALUE) {
1918 int64_t timestamp;
1919
1920 timestamp = start_time;
1921 /* add the stream start time */
1922 if (ic->start_time != AV_NOPTS_VALUE)
1923 timestamp += ic->start_time;
1924 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1925 if (ret < 0) {
1926 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1927 is->filename, (double)timestamp / AV_TIME_BASE);
1928 }
1929 }
1930
1931 /* now we can begin to play (RTSP stream only) */
1932 av_read_play(ic);
1933
1934 if (use_play) {
1935 err = av_find_stream_info(ic);
1936 if (err < 0) {
1937 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1938 ret = -1;
1939 goto fail;
1940 }
1941 }
1942
1943 for(i = 0; i < ic->nb_streams; i++) {
1944 AVCodecContext *enc = ic->streams[i]->codec;
1945 switch(enc->codec_type) {
1946 case CODEC_TYPE_AUDIO:
1947 if ((audio_index < 0 || wanted_audio_stream-- > 0) && !audio_disable)
1948 audio_index = i;
1949 break;
1950 case CODEC_TYPE_VIDEO:
1951 if ((video_index < 0 || wanted_video_stream-- > 0) && !video_disable)
1952 video_index = i;
1953 break;
1954 default:
1955 break;
1956 }
1957 }
1958 if (show_status) {
1959 dump_format(ic, 0, is->filename, 0);
1960 dump_stream_info(ic);
1961 }
1962
1963 /* open the streams */
1964 if (audio_index >= 0) {
1965 stream_component_open(is, audio_index);
1966 }
1967
1968 if (video_index >= 0) {
1969 stream_component_open(is, video_index);
1970 } else {
1971 if (!display_disable)
1972 is->show_audio = 1;
1973 }
1974
1975 if (is->video_stream < 0 && is->audio_stream < 0) {
1976 fprintf(stderr, "%s: could not open codecs\n", is->filename);
1977 ret = -1;
1978 goto fail;
1979 }
1980
1981 for(;;) {
1982 if (is->abort_request)
1983 break;
1984 if (is->paused != is->last_paused) {
1985 is->last_paused = is->paused;
1986 if (is->paused)
1987 av_read_pause(ic);
1988 else
1989 av_read_play(ic);
1990 }
1991 #ifdef CONFIG_RTSP_DEMUXER
1992 if (is->paused && ic->iformat == &rtsp_demuxer) {
1993 /* wait 10 ms to avoid trying to get another packet */
1994 /* XXX: horrible */
1995 SDL_Delay(10);
1996 continue;
1997 }
1998 #endif
1999 if (is->seek_req) {
2000 int stream_index= -1;
2001 int64_t seek_target= is->seek_pos;
2002
2003 if (is-> video_stream >= 0) stream_index= is-> video_stream;
2004 else if(is-> audio_stream >= 0) stream_index= is-> audio_stream;
2005 else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
2006
2007 if(stream_index>=0){
2008 seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
2009 }
2010
2011 ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
2012 if (ret < 0) {
2013 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2014 }else{
2015 if (is->audio_stream >= 0) {
2016 packet_queue_flush(&is->audioq);
2017 packet_queue_put(&is->audioq, &flush_pkt);
2018 }
2019 if (is->subtitle_stream >= 0) {
2020 packet_queue_flush(&is->subtitleq);
2021 packet_queue_put(&is->subtitleq, &flush_pkt);
2022 }
2023 if (is->video_stream >= 0) {
2024 packet_queue_flush(&is->videoq);
2025 packet_queue_put(&is->videoq, &flush_pkt);
2026 }
2027 }
2028 is->seek_req = 0;
2029 }
2030
2031 /* if the queue are full, no need to read more */
2032 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2033 is->videoq.size > MAX_VIDEOQ_SIZE ||
2034 is->subtitleq.size > MAX_SUBTITLEQ_SIZE ||
2035 url_feof(&ic->pb)) {
2036 /* wait 10 ms */
2037 SDL_Delay(10);
2038 continue;
2039 }
2040 ret = av_read_frame(ic, pkt);
2041 if (ret < 0) {
2042 if (url_ferror(&ic->pb) == 0) {
2043 SDL_Delay(100); /* wait for user event */
2044 continue;
2045 } else
2046 break;
2047 }
2048 if (pkt->stream_index == is->audio_stream) {
2049 packet_queue_put(&is->audioq, pkt);
2050 } else if (pkt->stream_index == is->video_stream) {
2051 packet_queue_put(&is->videoq, pkt);
2052 } else if (pkt->stream_index == is->subtitle_stream) {
2053 packet_queue_put(&is->subtitleq, pkt);
2054 } else {
2055 av_free_packet(pkt);
2056 }
2057 }
2058 /* wait until the end */
2059 while (!is->abort_request) {
2060 SDL_Delay(100);
2061 }
2062
2063 ret = 0;
2064 fail:
2065 /* disable interrupting */
2066 global_video_state = NULL;
2067
2068 /* close each stream */
2069 if (is->audio_stream >= 0)
2070 stream_component_close(is, is->audio_stream);
2071 if (is->video_stream >= 0)
2072 stream_component_close(is, is->video_stream);
2073 if (is->subtitle_stream >= 0)
2074 stream_component_close(is, is->subtitle_stream);
2075 if (is->ic) {
2076 av_close_input_file(is->ic);
2077 is->ic = NULL; /* safety */
2078 }
2079 url_set_interrupt_cb(NULL);
2080
2081 if (ret != 0) {
2082 SDL_Event event;
2083
2084 event.type = FF_QUIT_EVENT;
2085 event.user.data1 = is;
2086 SDL_PushEvent(&event);
2087 }
2088 return 0;
2089 }
2090
2091 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2092 {
2093 VideoState *is;
2094
2095 is = av_mallocz(sizeof(VideoState));
2096 if (!is)
2097 return NULL;
2098 av_strlcpy(is->filename, filename, sizeof(is->filename));
2099 is->iformat = iformat;
2100 is->ytop = 0;
2101 is->xleft = 0;
2102
2103 /* start video display */
2104 is->pictq_mutex = SDL_CreateMutex();
2105 is->pictq_cond = SDL_CreateCond();
2106
2107 is->subpq_mutex = SDL_CreateMutex();
2108 is->subpq_cond = SDL_CreateCond();
2109
2110 /* add the refresh timer to draw the picture */
2111 schedule_refresh(is, 40);
2112
2113 is->av_sync_type = av_sync_type;
2114 is->parse_tid = SDL_CreateThread(decode_thread, is);
2115 if (!is->parse_tid) {
2116 av_free(is);
2117 return NULL;
2118 }
2119 return is;
2120 }
2121
2122 static void stream_close(VideoState *is)
2123 {
2124 VideoPicture *vp;
2125 int i;
2126 /* XXX: use a special url_shutdown call to abort parse cleanly */
2127 is->abort_request = 1;
2128 SDL_WaitThread(is->parse_tid, NULL);
2129
2130 /* free all pictures */
2131 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2132 vp = &is->pictq[i];
2133 if (vp->bmp) {
2134 SDL_FreeYUVOverlay(vp->bmp);
2135 vp->bmp = NULL;
2136 }
2137 }
2138 SDL_DestroyMutex(is->pictq_mutex);
2139 SDL_DestroyCond(is->pictq_cond);
2140 SDL_DestroyMutex(is->subpq_mutex);
2141 SDL_DestroyCond(is->subpq_cond);
2142 }
2143
2144 static void stream_cycle_channel(VideoState *is, int codec_type)
2145 {
2146 AVFormatContext *ic = is->ic;
2147 int start_index, stream_index;
2148 AVStream *st;
2149
2150 if (codec_type == CODEC_TYPE_VIDEO)
2151 start_index = is->video_stream;
2152 else if (codec_type == CODEC_TYPE_AUDIO)
2153 start_index = is->audio_stream;
2154 else
2155 start_index = is->subtitle_stream;
2156 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2157 return;
2158 stream_index = start_index;
2159 for(;;) {
2160 if (++stream_index >= is->ic->nb_streams)
2161 {
2162 if (codec_type == CODEC_TYPE_SUBTITLE)
2163 {
2164 stream_index = -1;
2165 goto the_end;
2166 } else
2167 stream_index = 0;
2168 }
2169 if (stream_index == start_index)
2170 return;
2171 st = ic->streams[stream_index];
2172 if (st->codec->codec_type == codec_type) {
2173 /* check that parameters are OK */
2174 switch(codec_type) {
2175 case CODEC_TYPE_AUDIO:
2176 if (st->codec->sample_rate != 0 &&
2177 st->codec->channels != 0)
2178 goto the_end;
2179 break;
2180 case CODEC_TYPE_VIDEO:
2181 case CODEC_TYPE_SUBTITLE:
2182 goto the_end;
2183 default:
2184 break;
2185 }
2186 }
2187 }
2188 the_end:
2189 stream_component_close(is, start_index);
2190 stream_component_open(is, stream_index);
2191 }
2192
2193
2194 static void toggle_full_screen(void)
2195 {
2196 is_full_screen = !is_full_screen;
2197 if (!fs_screen_width) {
2198 /* use default SDL method */
2199 // SDL_WM_ToggleFullScreen(screen);
2200 }
2201 video_open(cur_stream);
2202 }
2203
2204 static void toggle_pause(void)
2205 {
2206 if (cur_stream)
2207 stream_pause(cur_stream);
2208 step = 0;
2209 }
2210
2211 static void step_to_next_frame(void)
2212 {
2213 if (cur_stream) {
2214 if (cur_stream->paused)
2215 cur_stream->paused=0;
2216 cur_stream->video_current_pts = get_video_clock(cur_stream);
2217 }
2218 step = 1;
2219 }
2220
2221 static void do_exit(void)
2222 {
2223 if (cur_stream) {
2224 stream_close(cur_stream);
2225 cur_stream = NULL;
2226 }
2227 if (show_status)
2228 printf("\n");
2229 SDL_Quit();
2230 exit(0);
2231 }
2232
2233 static void toggle_audio_display(void)
2234 {
2235 if (cur_stream) {
2236 cur_stream->show_audio = !cur_stream->show_audio;
2237 }
2238 }
2239
2240 /* handle an event sent by the GUI */
2241 static void event_loop(void)
2242 {
2243 SDL_Event event;
2244 double incr, pos, frac;
2245
2246 for(;;) {
2247 SDL_WaitEvent(&event);
2248 switch(event.type) {
2249 case SDL_KEYDOWN:
2250 switch(event.key.keysym.sym) {
2251 case SDLK_ESCAPE:
2252 case SDLK_q:
2253 do_exit();
2254 break;
2255 case SDLK_f:
2256 toggle_full_screen();
2257 break;
2258 case SDLK_p:
2259 case SDLK_SPACE:
2260 toggle_pause();
2261 break;
2262 case SDLK_s: //S: Step to next frame
2263 step_to_next_frame();
2264 break;
2265 case SDLK_a:
2266 if (cur_stream)
2267 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2268 break;
2269 case SDLK_v:
2270 if (cur_stream)
2271 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2272 break;
2273 case SDLK_t:
2274 if (cur_stream)
2275 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2276 break;
2277 case SDLK_w:
2278 toggle_audio_display();
2279 break;
2280 case SDLK_LEFT:
2281 incr = -10.0;
2282 goto do_seek;
2283 case SDLK_RIGHT:
2284 incr = 10.0;
2285 goto do_seek;
2286 case SDLK_UP:
2287 incr = 60.0;
2288 goto do_seek;
2289 case SDLK_DOWN:
2290 incr = -60.0;
2291 do_seek:
2292 if (cur_stream) {
2293 if (seek_by_bytes) {
2294 pos = url_ftell(&cur_stream->ic->pb);
2295 if (cur_stream->ic->bit_rate)
2296 incr *= cur_stream->ic->bit_rate / 60.0;
2297 else
2298 incr *= 180000.0;
2299 pos += incr;
2300 stream_seek(cur_stream, pos, incr);
2301 } else {
2302 pos = get_master_clock(cur_stream);
2303 pos += incr;
2304 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2305 }
2306 }
2307 break;
2308 default:
2309 break;
2310 }
2311 break;
2312 case SDL_MOUSEBUTTONDOWN:
2313 if (cur_stream) {
2314 int ns, hh, mm, ss;
2315 int tns, thh, tmm, tss;
2316 tns = cur_stream->ic->duration/1000000LL;
2317 thh = tns/3600;
2318 tmm = (tns%3600)/60;
2319 tss = (tns%60);
2320 frac = (double)event.button.x/(double)cur_stream->width;
2321 ns = frac*tns;
2322 hh = ns/3600;
2323 mm = (ns%3600)/60;
2324 ss = (ns%60);
2325 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2326 hh, mm, ss, thh, tmm, tss);
2327 stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2328 }
2329 break;
2330 case SDL_VIDEORESIZE:
2331 if (cur_stream) {
2332 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2333 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2334 screen_width = cur_stream->width = event.resize.w;
2335 screen_height= cur_stream->height= event.resize.h;
2336 }
2337 break;
2338 case SDL_QUIT:
2339 case FF_QUIT_EVENT:
2340 do_exit();
2341 break;
2342 case FF_ALLOC_EVENT:
2343 video_open(event.user.data1);
2344 alloc_picture(event.user.data1);
2345 break;
2346 case FF_REFRESH_EVENT:
2347 video_refresh_timer(event.user.data1);
2348 break;
2349 default:
2350 break;
2351 }
2352 }
2353 }
2354
2355 static void opt_frame_size(const char *arg)
2356 {
2357 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2358 fprintf(stderr, "Incorrect frame size\n");
2359 exit(1);
2360 }
2361 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2362 fprintf(stderr, "Frame size must be a multiple of 2\n");
2363 exit(1);
2364 }
2365 }
2366
2367 static void opt_width(const char *arg)
2368 {
2369 screen_width = atoi(arg);
2370 if(screen_width<=0){
2371 fprintf(stderr, "invalid width\n");
2372 exit(1);
2373 }
2374 }
2375
2376 static void opt_height(const char *arg)
2377 {
2378 screen_height = atoi(arg);
2379 if(screen_height<=0){
2380 fprintf(stderr, "invalid height\n");
2381 exit(1);
2382 }
2383 }
2384
2385 static void opt_format(const char *arg)
2386 {
2387 file_iformat = av_find_input_format(arg);
2388 if (!file_iformat) {
2389 fprintf(stderr, "Unknown input format: %s\n", arg);
2390 exit(1);
2391 }
2392 }
2393
2394 static void opt_frame_pix_fmt(const char *arg)
2395 {
2396 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2397 }
2398
2399 #ifdef CONFIG_RTSP_DEMUXER
2400 static void opt_rtp_tcp(void)
2401 {
2402 /* only tcp protocol */
2403 rtsp_default_protocols = (1 << RTSP_PROTOCOL_RTP_TCP);
2404 }
2405 #endif
2406
2407 static void opt_sync(const char *arg)
2408 {
2409 if (!strcmp(arg, "audio"))
2410 av_sync_type = AV_SYNC_AUDIO_MASTER;
2411 else if (!strcmp(arg, "video"))
2412 av_sync_type = AV_SYNC_VIDEO_MASTER;
2413 else if (!strcmp(arg, "ext"))
2414 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2415 else
2416 show_help();
2417 }
2418
2419 static void opt_seek(const char *arg)
2420 {
2421 start_time = parse_date(arg, 1);
2422 }
2423
2424 static void opt_debug(const char *arg)
2425 {
2426 av_log_level = 99;
2427 debug = atoi(arg);
2428 }
2429
2430 static void opt_vismv(const char *arg)
2431 {
2432 debug_mv = atoi(arg);
2433 }
2434
2435 static void opt_thread_count(const char *arg)
2436 {
2437 thread_count= atoi(arg);
2438 #if !defined(HAVE_THREADS)
2439 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2440 #endif
2441 }
2442
2443 const OptionDef options[] = {
2444 { "h", 0, {(void*)show_help}, "show help" },
2445 { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2446 { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2447 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2448 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2449 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2450 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2451 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "", "" },
2452 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "", "" },
2453 { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2454 { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2455 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2456 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2457 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2458 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2459 { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2460 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2461 { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2462 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2463 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2464 { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2465 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2466 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2467 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2468 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2469 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2470 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_resilience}, "set error detection threshold (0-4)", "threshold" },
2471 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2472 #ifdef CONFIG_RTSP_DEMUXER
2473 { "rtp_tcp", OPT_EXPERT, {(void*)&opt_rtp_tcp}, "force RTP/TCP protocol usage", "" },
2474 #endif
2475 { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2476 { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2477 { NULL, },
2478 };
2479
2480 void show_help(void)
2481 {
2482 printf("ffplay version " FFMPEG_VERSION ", Copyright (c) 2003-2007 Fabrice Bellard, et al.\n"
2483 "usage: ffplay [options] input_file\n"
2484 "Simple media player\n");
2485 printf("\n");
2486 show_help_options(options, "Main options:\n",
2487 OPT_EXPERT, 0);
2488 show_help_options(options, "\nAdvanced options:\n",
2489 OPT_EXPERT, OPT_EXPERT);
2490 printf("\nWhile playing:\n"
2491 "q, ESC quit\n"
2492 "f toggle full screen\n"
2493 "p, SPC pause\n"
2494 "a cycle audio channel\n"
2495 "v cycle video channel\n"
2496 "t cycle subtitle channel\n"
2497 "w show audio waves\n"
2498 "left/right seek backward/forward 10 seconds\n"
2499 "down/up seek backward/forward 1 minute\n"
2500 "mouse click seek to percentage in file corresponding to fraction of width\n"
2501 );
2502 exit(1);
2503 }
2504
2505 void parse_arg_file(const char *filename)
2506 {
2507 if (!strcmp(filename, "-"))
2508 filename = "pipe:";
2509 input_filename = filename;
2510 }
2511
2512 /* Called from the main */
2513 int main(int argc, char **argv)
2514 {
2515 int flags;
2516
2517 /* register all codecs, demux and protocols */
2518 av_register_all();
2519
2520 parse_options(argc, argv, options);
2521
2522 if (!input_filename)
2523 show_help();
2524
2525 if (display_disable) {
2526 video_disable = 1;
2527 }
2528 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2529 #if !defined(__MINGW32__) && !defined(CONFIG_DARWIN)
2530 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on win32 or darwin */
2531 #endif
2532 if (SDL_Init (flags)) {
2533 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2534 exit(1);
2535 }
2536
2537 if (!display_disable) {
2538 #ifdef HAVE_SDL_VIDEO_SIZE
2539 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2540 fs_screen_width = vi->current_w;
2541 fs_screen_height = vi->current_h;
2542 #endif
2543 }
2544
2545 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2546 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2547 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2548 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2549
2550 av_init_packet(&flush_pkt);
2551 flush_pkt.data= "FLUSH";
2552
2553 cur_stream = stream_open(input_filename, file_iformat);
2554
2555 event_loop();
2556
2557 /* never returns */
2558
2559 return 0;
2560 }