Remove OS/2 support
[libav.git] / ffplay.c
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include <math.h>
23 #include <limits.h>
24 #include "avformat.h"
25 #include "swscale.h"
26 #include "avstring.h"
27
28 #include "version.h"
29 #include "cmdutils.h"
30
31 #include <SDL.h>
32 #include <SDL_thread.h>
33
34 #ifdef __MINGW32__
35 #undef main /* We don't want SDL to override our main() */
36 #endif
37
38 #undef exit
39
40 //#define DEBUG_SYNC
41
42 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
43 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
44 #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
45
46 /* SDL audio buffer size, in samples. Should be small to have precise
47 A/V sync as SDL does not have hardware buffer fullness info. */
48 #define SDL_AUDIO_BUFFER_SIZE 1024
49
50 /* no AV sync correction is done if below the AV sync threshold */
51 #define AV_SYNC_THRESHOLD 0.01
52 /* no AV correction is done if too big error */
53 #define AV_NOSYNC_THRESHOLD 10.0
54
55 /* maximum audio speed change to get correct sync */
56 #define SAMPLE_CORRECTION_PERCENT_MAX 10
57
58 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
59 #define AUDIO_DIFF_AVG_NB 20
60
61 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
62 #define SAMPLE_ARRAY_SIZE (2*65536)
63
64 static int sws_flags = SWS_BICUBIC;
65
66 typedef struct PacketQueue {
67 AVPacketList *first_pkt, *last_pkt;
68 int nb_packets;
69 int size;
70 int abort_request;
71 SDL_mutex *mutex;
72 SDL_cond *cond;
73 } PacketQueue;
74
75 #define VIDEO_PICTURE_QUEUE_SIZE 1
76 #define SUBPICTURE_QUEUE_SIZE 4
77
78 typedef struct VideoPicture {
79 double pts; ///<presentation time stamp for this picture
80 SDL_Overlay *bmp;
81 int width, height; /* source height & width */
82 int allocated;
83 } VideoPicture;
84
85 typedef struct SubPicture {
86 double pts; /* presentation time stamp for this picture */
87 AVSubtitle sub;
88 } SubPicture;
89
90 enum {
91 AV_SYNC_AUDIO_MASTER, /* default choice */
92 AV_SYNC_VIDEO_MASTER,
93 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
94 };
95
96 typedef struct VideoState {
97 SDL_Thread *parse_tid;
98 SDL_Thread *video_tid;
99 AVInputFormat *iformat;
100 int no_background;
101 int abort_request;
102 int paused;
103 int last_paused;
104 int seek_req;
105 int seek_flags;
106 int64_t seek_pos;
107 AVFormatContext *ic;
108 int dtg_active_format;
109
110 int audio_stream;
111
112 int av_sync_type;
113 double external_clock; /* external clock base */
114 int64_t external_clock_time;
115
116 double audio_clock;
117 double audio_diff_cum; /* used for AV difference average computation */
118 double audio_diff_avg_coef;
119 double audio_diff_threshold;
120 int audio_diff_avg_count;
121 AVStream *audio_st;
122 PacketQueue audioq;
123 int audio_hw_buf_size;
124 /* samples output by the codec. we reserve more space for avsync
125 compensation */
126 DECLARE_ALIGNED(16,uint8_t,audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
127 unsigned int audio_buf_size; /* in bytes */
128 int audio_buf_index; /* in bytes */
129 AVPacket audio_pkt;
130 uint8_t *audio_pkt_data;
131 int audio_pkt_size;
132
133 int show_audio; /* if true, display audio samples */
134 int16_t sample_array[SAMPLE_ARRAY_SIZE];
135 int sample_array_index;
136 int last_i_start;
137
138 SDL_Thread *subtitle_tid;
139 int subtitle_stream;
140 int subtitle_stream_changed;
141 AVStream *subtitle_st;
142 PacketQueue subtitleq;
143 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
144 int subpq_size, subpq_rindex, subpq_windex;
145 SDL_mutex *subpq_mutex;
146 SDL_cond *subpq_cond;
147
148 double frame_timer;
149 double frame_last_pts;
150 double frame_last_delay;
151 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
152 int video_stream;
153 AVStream *video_st;
154 PacketQueue videoq;
155 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
156 int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
157 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
158 int pictq_size, pictq_rindex, pictq_windex;
159 SDL_mutex *pictq_mutex;
160 SDL_cond *pictq_cond;
161
162 // QETimer *video_timer;
163 char filename[1024];
164 int width, height, xleft, ytop;
165 } VideoState;
166
167 void show_help(void);
168 static int audio_write_get_buf_size(VideoState *is);
169
170 /* options specified by the user */
171 static AVInputFormat *file_iformat;
172 static const char *input_filename;
173 static int fs_screen_width;
174 static int fs_screen_height;
175 static int screen_width = 0;
176 static int screen_height = 0;
177 static int frame_width = 0;
178 static int frame_height = 0;
179 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
180 static int audio_disable;
181 static int video_disable;
182 static int wanted_audio_stream= 0;
183 static int seek_by_bytes;
184 static int display_disable;
185 static int show_status;
186 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
187 static int64_t start_time = AV_NOPTS_VALUE;
188 static int debug = 0;
189 static int debug_mv = 0;
190 static int step = 0;
191 static int thread_count = 1;
192 static int workaround_bugs = 1;
193 static int fast = 0;
194 static int genpts = 0;
195 static int lowres = 0;
196 static int idct = FF_IDCT_AUTO;
197 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
198 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
199 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
200 static int error_resilience = FF_ER_CAREFUL;
201 static int error_concealment = 3;
202 static int decoder_reorder_pts= 0;
203
204 /* current context */
205 static int is_full_screen;
206 static VideoState *cur_stream;
207 static int64_t audio_callback_time;
208
209 AVPacket flush_pkt;
210
211 #define FF_ALLOC_EVENT (SDL_USEREVENT)
212 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
213 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
214
215 SDL_Surface *screen;
216
217 /* packet queue handling */
218 static void packet_queue_init(PacketQueue *q)
219 {
220 memset(q, 0, sizeof(PacketQueue));
221 q->mutex = SDL_CreateMutex();
222 q->cond = SDL_CreateCond();
223 }
224
225 static void packet_queue_flush(PacketQueue *q)
226 {
227 AVPacketList *pkt, *pkt1;
228
229 SDL_LockMutex(q->mutex);
230 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
231 pkt1 = pkt->next;
232 av_free_packet(&pkt->pkt);
233 av_freep(&pkt);
234 }
235 q->last_pkt = NULL;
236 q->first_pkt = NULL;
237 q->nb_packets = 0;
238 q->size = 0;
239 SDL_UnlockMutex(q->mutex);
240 }
241
242 static void packet_queue_end(PacketQueue *q)
243 {
244 packet_queue_flush(q);
245 SDL_DestroyMutex(q->mutex);
246 SDL_DestroyCond(q->cond);
247 }
248
249 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
250 {
251 AVPacketList *pkt1;
252
253 /* duplicate the packet */
254 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
255 return -1;
256
257 pkt1 = av_malloc(sizeof(AVPacketList));
258 if (!pkt1)
259 return -1;
260 pkt1->pkt = *pkt;
261 pkt1->next = NULL;
262
263
264 SDL_LockMutex(q->mutex);
265
266 if (!q->last_pkt)
267
268 q->first_pkt = pkt1;
269 else
270 q->last_pkt->next = pkt1;
271 q->last_pkt = pkt1;
272 q->nb_packets++;
273 q->size += pkt1->pkt.size;
274 /* XXX: should duplicate packet data in DV case */
275 SDL_CondSignal(q->cond);
276
277 SDL_UnlockMutex(q->mutex);
278 return 0;
279 }
280
281 static void packet_queue_abort(PacketQueue *q)
282 {
283 SDL_LockMutex(q->mutex);
284
285 q->abort_request = 1;
286
287 SDL_CondSignal(q->cond);
288
289 SDL_UnlockMutex(q->mutex);
290 }
291
292 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
293 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
294 {
295 AVPacketList *pkt1;
296 int ret;
297
298 SDL_LockMutex(q->mutex);
299
300 for(;;) {
301 if (q->abort_request) {
302 ret = -1;
303 break;
304 }
305
306 pkt1 = q->first_pkt;
307 if (pkt1) {
308 q->first_pkt = pkt1->next;
309 if (!q->first_pkt)
310 q->last_pkt = NULL;
311 q->nb_packets--;
312 q->size -= pkt1->pkt.size;
313 *pkt = pkt1->pkt;
314 av_free(pkt1);
315 ret = 1;
316 break;
317 } else if (!block) {
318 ret = 0;
319 break;
320 } else {
321 SDL_CondWait(q->cond, q->mutex);
322 }
323 }
324 SDL_UnlockMutex(q->mutex);
325 return ret;
326 }
327
328 static inline void fill_rectangle(SDL_Surface *screen,
329 int x, int y, int w, int h, int color)
330 {
331 SDL_Rect rect;
332 rect.x = x;
333 rect.y = y;
334 rect.w = w;
335 rect.h = h;
336 SDL_FillRect(screen, &rect, color);
337 }
338
339 #if 0
340 /* draw only the border of a rectangle */
341 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
342 {
343 int w1, w2, h1, h2;
344
345 /* fill the background */
346 w1 = x;
347 if (w1 < 0)
348 w1 = 0;
349 w2 = s->width - (x + w);
350 if (w2 < 0)
351 w2 = 0;
352 h1 = y;
353 if (h1 < 0)
354 h1 = 0;
355 h2 = s->height - (y + h);
356 if (h2 < 0)
357 h2 = 0;
358 fill_rectangle(screen,
359 s->xleft, s->ytop,
360 w1, s->height,
361 color);
362 fill_rectangle(screen,
363 s->xleft + s->width - w2, s->ytop,
364 w2, s->height,
365 color);
366 fill_rectangle(screen,
367 s->xleft + w1, s->ytop,
368 s->width - w1 - w2, h1,
369 color);
370 fill_rectangle(screen,
371 s->xleft + w1, s->ytop + s->height - h2,
372 s->width - w1 - w2, h2,
373 color);
374 }
375 #endif
376
377
378
379 #define SCALEBITS 10
380 #define ONE_HALF (1 << (SCALEBITS - 1))
381 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
382
383 #define RGB_TO_Y_CCIR(r, g, b) \
384 ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
385 FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
386
387 #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
388 (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
389 FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
390
391 #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
392 (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
393 FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
394
395 #define ALPHA_BLEND(a, oldp, newp, s)\
396 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
397
398 #define RGBA_IN(r, g, b, a, s)\
399 {\
400 unsigned int v = ((const uint32_t *)(s))[0];\
401 a = (v >> 24) & 0xff;\
402 r = (v >> 16) & 0xff;\
403 g = (v >> 8) & 0xff;\
404 b = v & 0xff;\
405 }
406
407 #define YUVA_IN(y, u, v, a, s, pal)\
408 {\
409 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)s];\
410 a = (val >> 24) & 0xff;\
411 y = (val >> 16) & 0xff;\
412 u = (val >> 8) & 0xff;\
413 v = val & 0xff;\
414 }
415
416 #define YUVA_OUT(d, y, u, v, a)\
417 {\
418 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
419 }
420
421
422 #define BPP 1
423
424 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect)
425 {
426 int wrap, wrap3, width2, skip2;
427 int y, u, v, a, u1, v1, a1, w, h;
428 uint8_t *lum, *cb, *cr;
429 const uint8_t *p;
430 const uint32_t *pal;
431
432 lum = dst->data[0] + rect->y * dst->linesize[0];
433 cb = dst->data[1] + (rect->y >> 1) * dst->linesize[1];
434 cr = dst->data[2] + (rect->y >> 1) * dst->linesize[2];
435
436 width2 = (rect->w + 1) >> 1;
437 skip2 = rect->x >> 1;
438 wrap = dst->linesize[0];
439 wrap3 = rect->linesize;
440 p = rect->bitmap;
441 pal = rect->rgba_palette; /* Now in YCrCb! */
442
443 if (rect->y & 1) {
444 lum += rect->x;
445 cb += skip2;
446 cr += skip2;
447
448 if (rect->x & 1) {
449 YUVA_IN(y, u, v, a, p, pal);
450 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
451 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
452 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
453 cb++;
454 cr++;
455 lum++;
456 p += BPP;
457 }
458 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
459 YUVA_IN(y, u, v, a, p, pal);
460 u1 = u;
461 v1 = v;
462 a1 = a;
463 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
464
465 YUVA_IN(y, u, v, a, p + BPP, pal);
466 u1 += u;
467 v1 += v;
468 a1 += a;
469 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
470 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
471 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
472 cb++;
473 cr++;
474 p += 2 * BPP;
475 lum += 2;
476 }
477 if (w) {
478 YUVA_IN(y, u, v, a, p, pal);
479 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
480 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
481 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
482 }
483 p += wrap3 + (wrap3 - rect->w * BPP);
484 lum += wrap + (wrap - rect->w - rect->x);
485 cb += dst->linesize[1] - width2 - skip2;
486 cr += dst->linesize[2] - width2 - skip2;
487 }
488 for(h = rect->h - (rect->y & 1); h >= 2; h -= 2) {
489 lum += rect->x;
490 cb += skip2;
491 cr += skip2;
492
493 if (rect->x & 1) {
494 YUVA_IN(y, u, v, a, p, pal);
495 u1 = u;
496 v1 = v;
497 a1 = a;
498 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
499 p += wrap3;
500 lum += wrap;
501 YUVA_IN(y, u, v, a, p, pal);
502 u1 += u;
503 v1 += v;
504 a1 += a;
505 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
506 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
507 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
508 cb++;
509 cr++;
510 p += -wrap3 + BPP;
511 lum += -wrap + 1;
512 }
513 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
514 YUVA_IN(y, u, v, a, p, pal);
515 u1 = u;
516 v1 = v;
517 a1 = a;
518 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
519
520 YUVA_IN(y, u, v, a, p, pal);
521 u1 += u;
522 v1 += v;
523 a1 += a;
524 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
525 p += wrap3;
526 lum += wrap;
527
528 YUVA_IN(y, u, v, a, p, pal);
529 u1 += u;
530 v1 += v;
531 a1 += a;
532 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
533
534 YUVA_IN(y, u, v, a, p, pal);
535 u1 += u;
536 v1 += v;
537 a1 += a;
538 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
539
540 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
541 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
542
543 cb++;
544 cr++;
545 p += -wrap3 + 2 * BPP;
546 lum += -wrap + 2;
547 }
548 if (w) {
549 YUVA_IN(y, u, v, a, p, pal);
550 u1 = u;
551 v1 = v;
552 a1 = a;
553 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
554 p += wrap3;
555 lum += wrap;
556 YUVA_IN(y, u, v, a, p, pal);
557 u1 += u;
558 v1 += v;
559 a1 += a;
560 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
561 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
562 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
563 cb++;
564 cr++;
565 p += -wrap3 + BPP;
566 lum += -wrap + 1;
567 }
568 p += wrap3 + (wrap3 - rect->w * BPP);
569 lum += wrap + (wrap - rect->w - rect->x);
570 cb += dst->linesize[1] - width2 - skip2;
571 cr += dst->linesize[2] - width2 - skip2;
572 }
573 /* handle odd height */
574 if (h) {
575 lum += rect->x;
576 cb += skip2;
577 cr += skip2;
578
579 if (rect->x & 1) {
580 YUVA_IN(y, u, v, a, p, pal);
581 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
582 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
583 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
584 cb++;
585 cr++;
586 lum++;
587 p += BPP;
588 }
589 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
590 YUVA_IN(y, u, v, a, p, pal);
591 u1 = u;
592 v1 = v;
593 a1 = a;
594 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
595
596 YUVA_IN(y, u, v, a, p + BPP, pal);
597 u1 += u;
598 v1 += v;
599 a1 += a;
600 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
601 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
602 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
603 cb++;
604 cr++;
605 p += 2 * BPP;
606 lum += 2;
607 }
608 if (w) {
609 YUVA_IN(y, u, v, a, p, pal);
610 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
611 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
612 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
613 }
614 }
615 }
616
617 static void free_subpicture(SubPicture *sp)
618 {
619 int i;
620
621 for (i = 0; i < sp->sub.num_rects; i++)
622 {
623 av_free(sp->sub.rects[i].bitmap);
624 av_free(sp->sub.rects[i].rgba_palette);
625 }
626
627 av_free(sp->sub.rects);
628
629 memset(&sp->sub, 0, sizeof(AVSubtitle));
630 }
631
632 static void video_image_display(VideoState *is)
633 {
634 VideoPicture *vp;
635 SubPicture *sp;
636 AVPicture pict;
637 float aspect_ratio;
638 int width, height, x, y;
639 SDL_Rect rect;
640 int i;
641
642 vp = &is->pictq[is->pictq_rindex];
643 if (vp->bmp) {
644 /* XXX: use variable in the frame */
645 if (is->video_st->codec->sample_aspect_ratio.num == 0)
646 aspect_ratio = 0;
647 else
648 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio)
649 * is->video_st->codec->width / is->video_st->codec->height;;
650 if (aspect_ratio <= 0.0)
651 aspect_ratio = (float)is->video_st->codec->width /
652 (float)is->video_st->codec->height;
653 /* if an active format is indicated, then it overrides the
654 mpeg format */
655 #if 0
656 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
657 is->dtg_active_format = is->video_st->codec->dtg_active_format;
658 printf("dtg_active_format=%d\n", is->dtg_active_format);
659 }
660 #endif
661 #if 0
662 switch(is->video_st->codec->dtg_active_format) {
663 case FF_DTG_AFD_SAME:
664 default:
665 /* nothing to do */
666 break;
667 case FF_DTG_AFD_4_3:
668 aspect_ratio = 4.0 / 3.0;
669 break;
670 case FF_DTG_AFD_16_9:
671 aspect_ratio = 16.0 / 9.0;
672 break;
673 case FF_DTG_AFD_14_9:
674 aspect_ratio = 14.0 / 9.0;
675 break;
676 case FF_DTG_AFD_4_3_SP_14_9:
677 aspect_ratio = 14.0 / 9.0;
678 break;
679 case FF_DTG_AFD_16_9_SP_14_9:
680 aspect_ratio = 14.0 / 9.0;
681 break;
682 case FF_DTG_AFD_SP_4_3:
683 aspect_ratio = 4.0 / 3.0;
684 break;
685 }
686 #endif
687
688 if (is->subtitle_st)
689 {
690 if (is->subpq_size > 0)
691 {
692 sp = &is->subpq[is->subpq_rindex];
693
694 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
695 {
696 SDL_LockYUVOverlay (vp->bmp);
697
698 pict.data[0] = vp->bmp->pixels[0];
699 pict.data[1] = vp->bmp->pixels[2];
700 pict.data[2] = vp->bmp->pixels[1];
701
702 pict.linesize[0] = vp->bmp->pitches[0];
703 pict.linesize[1] = vp->bmp->pitches[2];
704 pict.linesize[2] = vp->bmp->pitches[1];
705
706 for (i = 0; i < sp->sub.num_rects; i++)
707 blend_subrect(&pict, &sp->sub.rects[i]);
708
709 SDL_UnlockYUVOverlay (vp->bmp);
710 }
711 }
712 }
713
714
715 /* XXX: we suppose the screen has a 1.0 pixel ratio */
716 height = is->height;
717 width = ((int)rint(height * aspect_ratio)) & -3;
718 if (width > is->width) {
719 width = is->width;
720 height = ((int)rint(width / aspect_ratio)) & -3;
721 }
722 x = (is->width - width) / 2;
723 y = (is->height - height) / 2;
724 if (!is->no_background) {
725 /* fill the background */
726 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
727 } else {
728 is->no_background = 0;
729 }
730 rect.x = is->xleft + x;
731 rect.y = is->ytop + y;
732 rect.w = width;
733 rect.h = height;
734 SDL_DisplayYUVOverlay(vp->bmp, &rect);
735 } else {
736 #if 0
737 fill_rectangle(screen,
738 is->xleft, is->ytop, is->width, is->height,
739 QERGB(0x00, 0x00, 0x00));
740 #endif
741 }
742 }
743
744 static inline int compute_mod(int a, int b)
745 {
746 a = a % b;
747 if (a >= 0)
748 return a;
749 else
750 return a + b;
751 }
752
753 static void video_audio_display(VideoState *s)
754 {
755 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
756 int ch, channels, h, h2, bgcolor, fgcolor;
757 int16_t time_diff;
758
759 /* compute display index : center on currently output samples */
760 channels = s->audio_st->codec->channels;
761 nb_display_channels = channels;
762 if (!s->paused) {
763 n = 2 * channels;
764 delay = audio_write_get_buf_size(s);
765 delay /= n;
766
767 /* to be more precise, we take into account the time spent since
768 the last buffer computation */
769 if (audio_callback_time) {
770 time_diff = av_gettime() - audio_callback_time;
771 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
772 }
773
774 delay -= s->width / 2;
775 if (delay < s->width)
776 delay = s->width;
777
778 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
779
780 h= INT_MIN;
781 for(i=0; i<1000; i+=channels){
782 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
783 int a= s->sample_array[idx];
784 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
785 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
786 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
787 int score= a-d;
788 if(h<score && (b^c)<0){
789 h= score;
790 i_start= idx;
791 }
792 }
793
794 s->last_i_start = i_start;
795 } else {
796 i_start = s->last_i_start;
797 }
798
799 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
800 fill_rectangle(screen,
801 s->xleft, s->ytop, s->width, s->height,
802 bgcolor);
803
804 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
805
806 /* total height for one channel */
807 h = s->height / nb_display_channels;
808 /* graph height / 2 */
809 h2 = (h * 9) / 20;
810 for(ch = 0;ch < nb_display_channels; ch++) {
811 i = i_start + ch;
812 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
813 for(x = 0; x < s->width; x++) {
814 y = (s->sample_array[i] * h2) >> 15;
815 if (y < 0) {
816 y = -y;
817 ys = y1 - y;
818 } else {
819 ys = y1;
820 }
821 fill_rectangle(screen,
822 s->xleft + x, ys, 1, y,
823 fgcolor);
824 i += channels;
825 if (i >= SAMPLE_ARRAY_SIZE)
826 i -= SAMPLE_ARRAY_SIZE;
827 }
828 }
829
830 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
831
832 for(ch = 1;ch < nb_display_channels; ch++) {
833 y = s->ytop + ch * h;
834 fill_rectangle(screen,
835 s->xleft, y, s->width, 1,
836 fgcolor);
837 }
838 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
839 }
840
841 static int video_open(VideoState *is){
842 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
843 int w,h;
844
845 if(is_full_screen) flags |= SDL_FULLSCREEN;
846 else flags |= SDL_RESIZABLE;
847
848 if (is_full_screen && fs_screen_width) {
849 w = fs_screen_width;
850 h = fs_screen_height;
851 } else if(!is_full_screen && screen_width){
852 w = screen_width;
853 h = screen_height;
854 }else if (is->video_st && is->video_st->codec->width){
855 w = is->video_st->codec->width;
856 h = is->video_st->codec->height;
857 } else {
858 w = 640;
859 h = 480;
860 }
861 #ifndef CONFIG_DARWIN
862 screen = SDL_SetVideoMode(w, h, 0, flags);
863 #else
864 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
865 screen = SDL_SetVideoMode(w, h, 24, flags);
866 #endif
867 if (!screen) {
868 fprintf(stderr, "SDL: could not set video mode - exiting\n");
869 return -1;
870 }
871 SDL_WM_SetCaption("FFplay", "FFplay");
872
873 is->width = screen->w;
874 is->height = screen->h;
875
876 return 0;
877 }
878
879 /* display the current picture, if any */
880 static void video_display(VideoState *is)
881 {
882 if(!screen)
883 video_open(cur_stream);
884 if (is->audio_st && is->show_audio)
885 video_audio_display(is);
886 else if (is->video_st)
887 video_image_display(is);
888 }
889
890 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
891 {
892 SDL_Event event;
893 event.type = FF_REFRESH_EVENT;
894 event.user.data1 = opaque;
895 SDL_PushEvent(&event);
896 return 0; /* 0 means stop timer */
897 }
898
899 /* schedule a video refresh in 'delay' ms */
900 static void schedule_refresh(VideoState *is, int delay)
901 {
902 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
903 }
904
905 /* get the current audio clock value */
906 static double get_audio_clock(VideoState *is)
907 {
908 double pts;
909 int hw_buf_size, bytes_per_sec;
910 pts = is->audio_clock;
911 hw_buf_size = audio_write_get_buf_size(is);
912 bytes_per_sec = 0;
913 if (is->audio_st) {
914 bytes_per_sec = is->audio_st->codec->sample_rate *
915 2 * is->audio_st->codec->channels;
916 }
917 if (bytes_per_sec)
918 pts -= (double)hw_buf_size / bytes_per_sec;
919 return pts;
920 }
921
922 /* get the current video clock value */
923 static double get_video_clock(VideoState *is)
924 {
925 double delta;
926 if (is->paused) {
927 delta = 0;
928 } else {
929 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
930 }
931 return is->video_current_pts + delta;
932 }
933
934 /* get the current external clock value */
935 static double get_external_clock(VideoState *is)
936 {
937 int64_t ti;
938 ti = av_gettime();
939 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
940 }
941
942 /* get the current master clock value */
943 static double get_master_clock(VideoState *is)
944 {
945 double val;
946
947 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
948 if (is->video_st)
949 val = get_video_clock(is);
950 else
951 val = get_audio_clock(is);
952 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
953 if (is->audio_st)
954 val = get_audio_clock(is);
955 else
956 val = get_video_clock(is);
957 } else {
958 val = get_external_clock(is);
959 }
960 return val;
961 }
962
963 /* seek in the stream */
964 static void stream_seek(VideoState *is, int64_t pos, int rel)
965 {
966 if (!is->seek_req) {
967 is->seek_pos = pos;
968 is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
969 if (seek_by_bytes)
970 is->seek_flags |= AVSEEK_FLAG_BYTE;
971 is->seek_req = 1;
972 }
973 }
974
975 /* pause or resume the video */
976 static void stream_pause(VideoState *is)
977 {
978 is->paused = !is->paused;
979 if (!is->paused) {
980 is->video_current_pts = get_video_clock(is);
981 is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
982 }
983 }
984
985 /* called to display each frame */
986 static void video_refresh_timer(void *opaque)
987 {
988 VideoState *is = opaque;
989 VideoPicture *vp;
990 double actual_delay, delay, sync_threshold, ref_clock, diff;
991
992 SubPicture *sp, *sp2;
993
994 if (is->video_st) {
995 if (is->pictq_size == 0) {
996 /* if no picture, need to wait */
997 schedule_refresh(is, 1);
998 } else {
999 /* dequeue the picture */
1000 vp = &is->pictq[is->pictq_rindex];
1001
1002 /* update current video pts */
1003 is->video_current_pts = vp->pts;
1004 is->video_current_pts_time = av_gettime();
1005
1006 /* compute nominal delay */
1007 delay = vp->pts - is->frame_last_pts;
1008 if (delay <= 0 || delay >= 1.0) {
1009 /* if incorrect delay, use previous one */
1010 delay = is->frame_last_delay;
1011 }
1012 is->frame_last_delay = delay;
1013 is->frame_last_pts = vp->pts;
1014
1015 /* update delay to follow master synchronisation source */
1016 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1017 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1018 /* if video is slave, we try to correct big delays by
1019 duplicating or deleting a frame */
1020 ref_clock = get_master_clock(is);
1021 diff = vp->pts - ref_clock;
1022
1023 /* skip or repeat frame. We take into account the
1024 delay to compute the threshold. I still don't know
1025 if it is the best guess */
1026 sync_threshold = AV_SYNC_THRESHOLD;
1027 if (delay > sync_threshold)
1028 sync_threshold = delay;
1029 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1030 if (diff <= -sync_threshold)
1031 delay = 0;
1032 else if (diff >= sync_threshold)
1033 delay = 2 * delay;
1034 }
1035 }
1036
1037 is->frame_timer += delay;
1038 /* compute the REAL delay (we need to do that to avoid
1039 long term errors */
1040 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1041 if (actual_delay < 0.010) {
1042 /* XXX: should skip picture */
1043 actual_delay = 0.010;
1044 }
1045 /* launch timer for next picture */
1046 schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
1047
1048 #if defined(DEBUG_SYNC)
1049 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1050 delay, actual_delay, vp->pts, -diff);
1051 #endif
1052
1053 if(is->subtitle_st) {
1054 if (is->subtitle_stream_changed) {
1055 SDL_LockMutex(is->subpq_mutex);
1056
1057 while (is->subpq_size) {
1058 free_subpicture(&is->subpq[is->subpq_rindex]);
1059
1060 /* update queue size and signal for next picture */
1061 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1062 is->subpq_rindex = 0;
1063
1064 is->subpq_size--;
1065 }
1066 is->subtitle_stream_changed = 0;
1067
1068 SDL_CondSignal(is->subpq_cond);
1069 SDL_UnlockMutex(is->subpq_mutex);
1070 } else {
1071 if (is->subpq_size > 0) {
1072 sp = &is->subpq[is->subpq_rindex];
1073
1074 if (is->subpq_size > 1)
1075 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1076 else
1077 sp2 = NULL;
1078
1079 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1080 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1081 {
1082 free_subpicture(sp);
1083
1084 /* update queue size and signal for next picture */
1085 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1086 is->subpq_rindex = 0;
1087
1088 SDL_LockMutex(is->subpq_mutex);
1089 is->subpq_size--;
1090 SDL_CondSignal(is->subpq_cond);
1091 SDL_UnlockMutex(is->subpq_mutex);
1092 }
1093 }
1094 }
1095 }
1096
1097 /* display picture */
1098 video_display(is);
1099
1100 /* update queue size and signal for next picture */
1101 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1102 is->pictq_rindex = 0;
1103
1104 SDL_LockMutex(is->pictq_mutex);
1105 is->pictq_size--;
1106 SDL_CondSignal(is->pictq_cond);
1107 SDL_UnlockMutex(is->pictq_mutex);
1108 }
1109 } else if (is->audio_st) {
1110 /* draw the next audio frame */
1111
1112 schedule_refresh(is, 40);
1113
1114 /* if only audio stream, then display the audio bars (better
1115 than nothing, just to test the implementation */
1116
1117 /* display picture */
1118 video_display(is);
1119 } else {
1120 schedule_refresh(is, 100);
1121 }
1122 if (show_status) {
1123 static int64_t last_time;
1124 int64_t cur_time;
1125 int aqsize, vqsize, sqsize;
1126 double av_diff;
1127
1128 cur_time = av_gettime();
1129 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1130 aqsize = 0;
1131 vqsize = 0;
1132 sqsize = 0;
1133 if (is->audio_st)
1134 aqsize = is->audioq.size;
1135 if (is->video_st)
1136 vqsize = is->videoq.size;
1137 if (is->subtitle_st)
1138 sqsize = is->subtitleq.size;
1139 av_diff = 0;
1140 if (is->audio_st && is->video_st)
1141 av_diff = get_audio_clock(is) - get_video_clock(is);
1142 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
1143 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1144 fflush(stdout);
1145 last_time = cur_time;
1146 }
1147 }
1148 }
1149
1150 /* allocate a picture (needs to do that in main thread to avoid
1151 potential locking problems */
1152 static void alloc_picture(void *opaque)
1153 {
1154 VideoState *is = opaque;
1155 VideoPicture *vp;
1156
1157 vp = &is->pictq[is->pictq_windex];
1158
1159 if (vp->bmp)
1160 SDL_FreeYUVOverlay(vp->bmp);
1161
1162 #if 0
1163 /* XXX: use generic function */
1164 /* XXX: disable overlay if no hardware acceleration or if RGB format */
1165 switch(is->video_st->codec->pix_fmt) {
1166 case PIX_FMT_YUV420P:
1167 case PIX_FMT_YUV422P:
1168 case PIX_FMT_YUV444P:
1169 case PIX_FMT_YUYV422:
1170 case PIX_FMT_YUV410P:
1171 case PIX_FMT_YUV411P:
1172 is_yuv = 1;
1173 break;
1174 default:
1175 is_yuv = 0;
1176 break;
1177 }
1178 #endif
1179 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1180 is->video_st->codec->height,
1181 SDL_YV12_OVERLAY,
1182 screen);
1183 vp->width = is->video_st->codec->width;
1184 vp->height = is->video_st->codec->height;
1185
1186 SDL_LockMutex(is->pictq_mutex);
1187 vp->allocated = 1;
1188 SDL_CondSignal(is->pictq_cond);
1189 SDL_UnlockMutex(is->pictq_mutex);
1190 }
1191
1192 /**
1193 *
1194 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1195 */
1196 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1197 {
1198 VideoPicture *vp;
1199 int dst_pix_fmt;
1200 AVPicture pict;
1201 static struct SwsContext *img_convert_ctx;
1202
1203 /* wait until we have space to put a new picture */
1204 SDL_LockMutex(is->pictq_mutex);
1205 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1206 !is->videoq.abort_request) {
1207 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1208 }
1209 SDL_UnlockMutex(is->pictq_mutex);
1210
1211 if (is->videoq.abort_request)
1212 return -1;
1213
1214 vp = &is->pictq[is->pictq_windex];
1215
1216 /* alloc or resize hardware picture buffer */
1217 if (!vp->bmp ||
1218 vp->width != is->video_st->codec->width ||
1219 vp->height != is->video_st->codec->height) {
1220 SDL_Event event;
1221
1222 vp->allocated = 0;
1223
1224 /* the allocation must be done in the main thread to avoid
1225 locking problems */
1226 event.type = FF_ALLOC_EVENT;
1227 event.user.data1 = is;
1228 SDL_PushEvent(&event);
1229
1230 /* wait until the picture is allocated */
1231 SDL_LockMutex(is->pictq_mutex);
1232 while (!vp->allocated && !is->videoq.abort_request) {
1233 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1234 }
1235 SDL_UnlockMutex(is->pictq_mutex);
1236
1237 if (is->videoq.abort_request)
1238 return -1;
1239 }
1240
1241 /* if the frame is not skipped, then display it */
1242 if (vp->bmp) {
1243 /* get a pointer on the bitmap */
1244 SDL_LockYUVOverlay (vp->bmp);
1245
1246 dst_pix_fmt = PIX_FMT_YUV420P;
1247 pict.data[0] = vp->bmp->pixels[0];
1248 pict.data[1] = vp->bmp->pixels[2];
1249 pict.data[2] = vp->bmp->pixels[1];
1250
1251 pict.linesize[0] = vp->bmp->pitches[0];
1252 pict.linesize[1] = vp->bmp->pitches[2];
1253 pict.linesize[2] = vp->bmp->pitches[1];
1254 if (img_convert_ctx == NULL) {
1255 img_convert_ctx = sws_getContext(is->video_st->codec->width,
1256 is->video_st->codec->height, is->video_st->codec->pix_fmt,
1257 is->video_st->codec->width, is->video_st->codec->height,
1258 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1259 if (img_convert_ctx == NULL) {
1260 fprintf(stderr, "Cannot initialize the conversion context\n");
1261 exit(1);
1262 }
1263 }
1264 sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1265 0, is->video_st->codec->height, pict.data, pict.linesize);
1266 /* update the bitmap content */
1267 SDL_UnlockYUVOverlay(vp->bmp);
1268
1269 vp->pts = pts;
1270
1271 /* now we can update the picture count */
1272 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1273 is->pictq_windex = 0;
1274 SDL_LockMutex(is->pictq_mutex);
1275 is->pictq_size++;
1276 SDL_UnlockMutex(is->pictq_mutex);
1277 }
1278 return 0;
1279 }
1280
1281 /**
1282 * compute the exact PTS for the picture if it is omitted in the stream
1283 * @param pts1 the dts of the pkt / pts of the frame
1284 */
1285 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1286 {
1287 double frame_delay, pts;
1288
1289 pts = pts1;
1290
1291 if (pts != 0) {
1292 /* update video clock with pts, if present */
1293 is->video_clock = pts;
1294 } else {
1295 pts = is->video_clock;
1296 }
1297 /* update video clock for next frame */
1298 frame_delay = av_q2d(is->video_st->codec->time_base);
1299 /* for MPEG2, the frame can be repeated, so we update the
1300 clock accordingly */
1301 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1302 is->video_clock += frame_delay;
1303
1304 #if defined(DEBUG_SYNC) && 0
1305 {
1306 int ftype;
1307 if (src_frame->pict_type == FF_B_TYPE)
1308 ftype = 'B';
1309 else if (src_frame->pict_type == FF_I_TYPE)
1310 ftype = 'I';
1311 else
1312 ftype = 'P';
1313 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1314 ftype, pts, pts1);
1315 }
1316 #endif
1317 return queue_picture(is, src_frame, pts);
1318 }
1319
1320 static uint64_t global_video_pkt_pts= AV_NOPTS_VALUE;
1321
1322 static int my_get_buffer(struct AVCodecContext *c, AVFrame *pic){
1323 int ret= avcodec_default_get_buffer(c, pic);
1324 uint64_t *pts= av_malloc(sizeof(uint64_t));
1325 *pts= global_video_pkt_pts;
1326 pic->opaque= pts;
1327 return ret;
1328 }
1329
1330 static void my_release_buffer(struct AVCodecContext *c, AVFrame *pic){
1331 if(pic) av_freep(&pic->opaque);
1332 avcodec_default_release_buffer(c, pic);
1333 }
1334
1335 static int video_thread(void *arg)
1336 {
1337 VideoState *is = arg;
1338 AVPacket pkt1, *pkt = &pkt1;
1339 int len1, got_picture;
1340 AVFrame *frame= avcodec_alloc_frame();
1341 double pts;
1342
1343 for(;;) {
1344 while (is->paused && !is->videoq.abort_request) {
1345 SDL_Delay(10);
1346 }
1347 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1348 break;
1349
1350 if(pkt->data == flush_pkt.data){
1351 avcodec_flush_buffers(is->video_st->codec);
1352 continue;
1353 }
1354
1355 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1356 this packet, if any */
1357 global_video_pkt_pts= pkt->pts;
1358 len1 = avcodec_decode_video(is->video_st->codec,
1359 frame, &got_picture,
1360 pkt->data, pkt->size);
1361
1362 if( (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1363 && frame->opaque && *(uint64_t*)frame->opaque != AV_NOPTS_VALUE)
1364 pts= *(uint64_t*)frame->opaque;
1365 else if(pkt->dts != AV_NOPTS_VALUE)
1366 pts= pkt->dts;
1367 else
1368 pts= 0;
1369 pts *= av_q2d(is->video_st->time_base);
1370
1371 // if (len1 < 0)
1372 // break;
1373 if (got_picture) {
1374 if (output_picture2(is, frame, pts) < 0)
1375 goto the_end;
1376 }
1377 av_free_packet(pkt);
1378 if (step)
1379 if (cur_stream)
1380 stream_pause(cur_stream);
1381 }
1382 the_end:
1383 av_free(frame);
1384 return 0;
1385 }
1386
1387 static int subtitle_thread(void *arg)
1388 {
1389 VideoState *is = arg;
1390 SubPicture *sp;
1391 AVPacket pkt1, *pkt = &pkt1;
1392 int len1, got_subtitle;
1393 double pts;
1394 int i, j;
1395 int r, g, b, y, u, v, a;
1396
1397 for(;;) {
1398 while (is->paused && !is->subtitleq.abort_request) {
1399 SDL_Delay(10);
1400 }
1401 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1402 break;
1403
1404 if(pkt->data == flush_pkt.data){
1405 avcodec_flush_buffers(is->subtitle_st->codec);
1406 continue;
1407 }
1408 SDL_LockMutex(is->subpq_mutex);
1409 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1410 !is->subtitleq.abort_request) {
1411 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1412 }
1413 SDL_UnlockMutex(is->subpq_mutex);
1414
1415 if (is->subtitleq.abort_request)
1416 goto the_end;
1417
1418 sp = &is->subpq[is->subpq_windex];
1419
1420 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1421 this packet, if any */
1422 pts = 0;
1423 if (pkt->pts != AV_NOPTS_VALUE)
1424 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1425
1426 len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1427 &sp->sub, &got_subtitle,
1428 pkt->data, pkt->size);
1429 // if (len1 < 0)
1430 // break;
1431 if (got_subtitle && sp->sub.format == 0) {
1432 sp->pts = pts;
1433
1434 for (i = 0; i < sp->sub.num_rects; i++)
1435 {
1436 for (j = 0; j < sp->sub.rects[i].nb_colors; j++)
1437 {
1438 RGBA_IN(r, g, b, a, sp->sub.rects[i].rgba_palette + j);
1439 y = RGB_TO_Y_CCIR(r, g, b);
1440 u = RGB_TO_U_CCIR(r, g, b, 0);
1441 v = RGB_TO_V_CCIR(r, g, b, 0);
1442 YUVA_OUT(sp->sub.rects[i].rgba_palette + j, y, u, v, a);
1443 }
1444 }
1445
1446 /* now we can update the picture count */
1447 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1448 is->subpq_windex = 0;
1449 SDL_LockMutex(is->subpq_mutex);
1450 is->subpq_size++;
1451 SDL_UnlockMutex(is->subpq_mutex);
1452 }
1453 av_free_packet(pkt);
1454 // if (step)
1455 // if (cur_stream)
1456 // stream_pause(cur_stream);
1457 }
1458 the_end:
1459 return 0;
1460 }
1461
1462 /* copy samples for viewing in editor window */
1463 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1464 {
1465 int size, len, channels;
1466
1467 channels = is->audio_st->codec->channels;
1468
1469 size = samples_size / sizeof(short);
1470 while (size > 0) {
1471 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1472 if (len > size)
1473 len = size;
1474 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1475 samples += len;
1476 is->sample_array_index += len;
1477 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1478 is->sample_array_index = 0;
1479 size -= len;
1480 }
1481 }
1482
1483 /* return the new audio buffer size (samples can be added or deleted
1484 to get better sync if video or external master clock) */
1485 static int synchronize_audio(VideoState *is, short *samples,
1486 int samples_size1, double pts)
1487 {
1488 int n, samples_size;
1489 double ref_clock;
1490
1491 n = 2 * is->audio_st->codec->channels;
1492 samples_size = samples_size1;
1493
1494 /* if not master, then we try to remove or add samples to correct the clock */
1495 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1496 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1497 double diff, avg_diff;
1498 int wanted_size, min_size, max_size, nb_samples;
1499
1500 ref_clock = get_master_clock(is);
1501 diff = get_audio_clock(is) - ref_clock;
1502
1503 if (diff < AV_NOSYNC_THRESHOLD) {
1504 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1505 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1506 /* not enough measures to have a correct estimate */
1507 is->audio_diff_avg_count++;
1508 } else {
1509 /* estimate the A-V difference */
1510 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1511
1512 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1513 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1514 nb_samples = samples_size / n;
1515
1516 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1517 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1518 if (wanted_size < min_size)
1519 wanted_size = min_size;
1520 else if (wanted_size > max_size)
1521 wanted_size = max_size;
1522
1523 /* add or remove samples to correction the synchro */
1524 if (wanted_size < samples_size) {
1525 /* remove samples */
1526 samples_size = wanted_size;
1527 } else if (wanted_size > samples_size) {
1528 uint8_t *samples_end, *q;
1529 int nb;
1530
1531 /* add samples */
1532 nb = (samples_size - wanted_size);
1533 samples_end = (uint8_t *)samples + samples_size - n;
1534 q = samples_end + n;
1535 while (nb > 0) {
1536 memcpy(q, samples_end, n);
1537 q += n;
1538 nb -= n;
1539 }
1540 samples_size = wanted_size;
1541 }
1542 }
1543 #if 0
1544 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1545 diff, avg_diff, samples_size - samples_size1,
1546 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1547 #endif
1548 }
1549 } else {
1550 /* too big difference : may be initial PTS errors, so
1551 reset A-V filter */
1552 is->audio_diff_avg_count = 0;
1553 is->audio_diff_cum = 0;
1554 }
1555 }
1556
1557 return samples_size;
1558 }
1559
1560 /* decode one audio frame and returns its uncompressed size */
1561 static int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int buf_size, double *pts_ptr)
1562 {
1563 AVPacket *pkt = &is->audio_pkt;
1564 int n, len1, data_size;
1565 double pts;
1566
1567 for(;;) {
1568 /* NOTE: the audio packet can contain several frames */
1569 while (is->audio_pkt_size > 0) {
1570 data_size = buf_size;
1571 len1 = avcodec_decode_audio2(is->audio_st->codec,
1572 (int16_t *)audio_buf, &data_size,
1573 is->audio_pkt_data, is->audio_pkt_size);
1574 if (len1 < 0) {
1575 /* if error, we skip the frame */
1576 is->audio_pkt_size = 0;
1577 break;
1578 }
1579
1580 is->audio_pkt_data += len1;
1581 is->audio_pkt_size -= len1;
1582 if (data_size <= 0)
1583 continue;
1584 /* if no pts, then compute it */
1585 pts = is->audio_clock;
1586 *pts_ptr = pts;
1587 n = 2 * is->audio_st->codec->channels;
1588 is->audio_clock += (double)data_size /
1589 (double)(n * is->audio_st->codec->sample_rate);
1590 #if defined(DEBUG_SYNC)
1591 {
1592 static double last_clock;
1593 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1594 is->audio_clock - last_clock,
1595 is->audio_clock, pts);
1596 last_clock = is->audio_clock;
1597 }
1598 #endif
1599 return data_size;
1600 }
1601
1602 /* free the current packet */
1603 if (pkt->data)
1604 av_free_packet(pkt);
1605
1606 if (is->paused || is->audioq.abort_request) {
1607 return -1;
1608 }
1609
1610 /* read next packet */
1611 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1612 return -1;
1613 if(pkt->data == flush_pkt.data){
1614 avcodec_flush_buffers(is->audio_st->codec);
1615 continue;
1616 }
1617
1618 is->audio_pkt_data = pkt->data;
1619 is->audio_pkt_size = pkt->size;
1620
1621 /* if update the audio clock with the pts */
1622 if (pkt->pts != AV_NOPTS_VALUE) {
1623 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1624 }
1625 }
1626 }
1627
1628 /* get the current audio output buffer size, in samples. With SDL, we
1629 cannot have a precise information */
1630 static int audio_write_get_buf_size(VideoState *is)
1631 {
1632 return is->audio_buf_size - is->audio_buf_index;
1633 }
1634
1635
1636 /* prepare a new audio buffer */
1637 void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1638 {
1639 VideoState *is = opaque;
1640 int audio_size, len1;
1641 double pts;
1642
1643 audio_callback_time = av_gettime();
1644
1645 while (len > 0) {
1646 if (is->audio_buf_index >= is->audio_buf_size) {
1647 audio_size = audio_decode_frame(is, is->audio_buf, sizeof(is->audio_buf), &pts);
1648 if (audio_size < 0) {
1649 /* if error, just output silence */
1650 is->audio_buf_size = 1024;
1651 memset(is->audio_buf, 0, is->audio_buf_size);
1652 } else {
1653 if (is->show_audio)
1654 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1655 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1656 pts);
1657 is->audio_buf_size = audio_size;
1658 }
1659 is->audio_buf_index = 0;
1660 }
1661 len1 = is->audio_buf_size - is->audio_buf_index;
1662 if (len1 > len)
1663 len1 = len;
1664 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1665 len -= len1;
1666 stream += len1;
1667 is->audio_buf_index += len1;
1668 }
1669 }
1670
1671 /* open a given stream. Return 0 if OK */
1672 static int stream_component_open(VideoState *is, int stream_index)
1673 {
1674 AVFormatContext *ic = is->ic;
1675 AVCodecContext *enc;
1676 AVCodec *codec;
1677 SDL_AudioSpec wanted_spec, spec;
1678
1679 if (stream_index < 0 || stream_index >= ic->nb_streams)
1680 return -1;
1681 enc = ic->streams[stream_index]->codec;
1682
1683 /* prepare audio output */
1684 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1685 wanted_spec.freq = enc->sample_rate;
1686 wanted_spec.format = AUDIO_S16SYS;
1687 /* hack for AC3. XXX: suppress that */
1688 if (enc->channels > 2)
1689 enc->channels = 2;
1690 wanted_spec.channels = enc->channels;
1691 wanted_spec.silence = 0;
1692 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1693 wanted_spec.callback = sdl_audio_callback;
1694 wanted_spec.userdata = is;
1695 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1696 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1697 return -1;
1698 }
1699 is->audio_hw_buf_size = spec.size;
1700 }
1701
1702 codec = avcodec_find_decoder(enc->codec_id);
1703 enc->debug_mv = debug_mv;
1704 enc->debug = debug;
1705 enc->workaround_bugs = workaround_bugs;
1706 enc->lowres = lowres;
1707 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1708 enc->idct_algo= idct;
1709 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1710 enc->skip_frame= skip_frame;
1711 enc->skip_idct= skip_idct;
1712 enc->skip_loop_filter= skip_loop_filter;
1713 enc->error_resilience= error_resilience;
1714 enc->error_concealment= error_concealment;
1715 if (!codec ||
1716 avcodec_open(enc, codec) < 0)
1717 return -1;
1718 if(thread_count>1)
1719 avcodec_thread_init(enc, thread_count);
1720 enc->thread_count= thread_count;
1721 switch(enc->codec_type) {
1722 case CODEC_TYPE_AUDIO:
1723 is->audio_stream = stream_index;
1724 is->audio_st = ic->streams[stream_index];
1725 is->audio_buf_size = 0;
1726 is->audio_buf_index = 0;
1727
1728 /* init averaging filter */
1729 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1730 is->audio_diff_avg_count = 0;
1731 /* since we do not have a precise anough audio fifo fullness,
1732 we correct audio sync only if larger than this threshold */
1733 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1734
1735 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1736 packet_queue_init(&is->audioq);
1737 SDL_PauseAudio(0);
1738 break;
1739 case CODEC_TYPE_VIDEO:
1740 is->video_stream = stream_index;
1741 is->video_st = ic->streams[stream_index];
1742
1743 is->frame_last_delay = 40e-3;
1744 is->frame_timer = (double)av_gettime() / 1000000.0;
1745 is->video_current_pts_time = av_gettime();
1746
1747 packet_queue_init(&is->videoq);
1748 is->video_tid = SDL_CreateThread(video_thread, is);
1749
1750 enc-> get_buffer= my_get_buffer;
1751 enc->release_buffer= my_release_buffer;
1752 break;
1753 case CODEC_TYPE_SUBTITLE:
1754 is->subtitle_stream = stream_index;
1755 is->subtitle_st = ic->streams[stream_index];
1756 packet_queue_init(&is->subtitleq);
1757
1758 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1759 break;
1760 default:
1761 break;
1762 }
1763 return 0;
1764 }
1765
1766 static void stream_component_close(VideoState *is, int stream_index)
1767 {
1768 AVFormatContext *ic = is->ic;
1769 AVCodecContext *enc;
1770
1771 if (stream_index < 0 || stream_index >= ic->nb_streams)
1772 return;
1773 enc = ic->streams[stream_index]->codec;
1774
1775 switch(enc->codec_type) {
1776 case CODEC_TYPE_AUDIO:
1777 packet_queue_abort(&is->audioq);
1778
1779 SDL_CloseAudio();
1780
1781 packet_queue_end(&is->audioq);
1782 break;
1783 case CODEC_TYPE_VIDEO:
1784 packet_queue_abort(&is->videoq);
1785
1786 /* note: we also signal this mutex to make sure we deblock the
1787 video thread in all cases */
1788 SDL_LockMutex(is->pictq_mutex);
1789 SDL_CondSignal(is->pictq_cond);
1790 SDL_UnlockMutex(is->pictq_mutex);
1791
1792 SDL_WaitThread(is->video_tid, NULL);
1793
1794 packet_queue_end(&is->videoq);
1795 break;
1796 case CODEC_TYPE_SUBTITLE:
1797 packet_queue_abort(&is->subtitleq);
1798
1799 /* note: we also signal this mutex to make sure we deblock the
1800 video thread in all cases */
1801 SDL_LockMutex(is->subpq_mutex);
1802 is->subtitle_stream_changed = 1;
1803
1804 SDL_CondSignal(is->subpq_cond);
1805 SDL_UnlockMutex(is->subpq_mutex);
1806
1807 SDL_WaitThread(is->subtitle_tid, NULL);
1808
1809 packet_queue_end(&is->subtitleq);
1810 break;
1811 default:
1812 break;
1813 }
1814
1815 avcodec_close(enc);
1816 switch(enc->codec_type) {
1817 case CODEC_TYPE_AUDIO:
1818 is->audio_st = NULL;
1819 is->audio_stream = -1;
1820 break;
1821 case CODEC_TYPE_VIDEO:
1822 is->video_st = NULL;
1823 is->video_stream = -1;
1824 break;
1825 case CODEC_TYPE_SUBTITLE:
1826 is->subtitle_st = NULL;
1827 is->subtitle_stream = -1;
1828 break;
1829 default:
1830 break;
1831 }
1832 }
1833
1834 static void dump_stream_info(const AVFormatContext *s)
1835 {
1836 if (s->track != 0)
1837 fprintf(stderr, "Track: %d\n", s->track);
1838 if (s->title[0] != '\0')
1839 fprintf(stderr, "Title: %s\n", s->title);
1840 if (s->author[0] != '\0')
1841 fprintf(stderr, "Author: %s\n", s->author);
1842 if (s->copyright[0] != '\0')
1843 fprintf(stderr, "Copyright: %s\n", s->copyright);
1844 if (s->comment[0] != '\0')
1845 fprintf(stderr, "Comment: %s\n", s->comment);
1846 if (s->album[0] != '\0')
1847 fprintf(stderr, "Album: %s\n", s->album);
1848 if (s->year != 0)
1849 fprintf(stderr, "Year: %d\n", s->year);
1850 if (s->genre[0] != '\0')
1851 fprintf(stderr, "Genre: %s\n", s->genre);
1852 }
1853
1854 /* since we have only one decoding thread, we can use a global
1855 variable instead of a thread local variable */
1856 static VideoState *global_video_state;
1857
1858 static int decode_interrupt_cb(void)
1859 {
1860 return (global_video_state && global_video_state->abort_request);
1861 }
1862
1863 /* this thread gets the stream from the disk or the network */
1864 static int decode_thread(void *arg)
1865 {
1866 VideoState *is = arg;
1867 AVFormatContext *ic;
1868 int err, i, ret, video_index, audio_index, use_play;
1869 AVPacket pkt1, *pkt = &pkt1;
1870 AVFormatParameters params, *ap = &params;
1871
1872 video_index = -1;
1873 audio_index = -1;
1874 is->video_stream = -1;
1875 is->audio_stream = -1;
1876 is->subtitle_stream = -1;
1877
1878 global_video_state = is;
1879 url_set_interrupt_cb(decode_interrupt_cb);
1880
1881 memset(ap, 0, sizeof(*ap));
1882 ap->initial_pause = 1; /* we force a pause when starting an RTSP
1883 stream */
1884
1885 ap->width = frame_width;
1886 ap->height= frame_height;
1887 ap->time_base= (AVRational){1, 25};
1888 ap->pix_fmt = frame_pix_fmt;
1889
1890 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1891 if (err < 0) {
1892 print_error(is->filename, err);
1893 ret = -1;
1894 goto fail;
1895 }
1896 is->ic = ic;
1897 #ifdef CONFIG_RTSP_DEMUXER
1898 use_play = (ic->iformat == &rtsp_demuxer);
1899 #else
1900 use_play = 0;
1901 #endif
1902
1903 if(genpts)
1904 ic->flags |= AVFMT_FLAG_GENPTS;
1905
1906 if (!use_play) {
1907 err = av_find_stream_info(ic);
1908 if (err < 0) {
1909 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1910 ret = -1;
1911 goto fail;
1912 }
1913 ic->pb.eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1914 }
1915
1916 /* if seeking requested, we execute it */
1917 if (start_time != AV_NOPTS_VALUE) {
1918 int64_t timestamp;
1919
1920 timestamp = start_time;
1921 /* add the stream start time */
1922 if (ic->start_time != AV_NOPTS_VALUE)
1923 timestamp += ic->start_time;
1924 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1925 if (ret < 0) {
1926 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1927 is->filename, (double)timestamp / AV_TIME_BASE);
1928 }
1929 }
1930
1931 /* now we can begin to play (RTSP stream only) */
1932 av_read_play(ic);
1933
1934 if (use_play) {
1935 err = av_find_stream_info(ic);
1936 if (err < 0) {
1937 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1938 ret = -1;
1939 goto fail;
1940 }
1941 }
1942
1943 for(i = 0; i < ic->nb_streams; i++) {
1944 AVCodecContext *enc = ic->streams[i]->codec;
1945 switch(enc->codec_type) {
1946 case CODEC_TYPE_AUDIO:
1947 if ((audio_index < 0 || wanted_audio_stream-- > 0) && !audio_disable)
1948 audio_index = i;
1949 break;
1950 case CODEC_TYPE_VIDEO:
1951 if (video_index < 0 && !video_disable)
1952 video_index = i;
1953 break;
1954 default:
1955 break;
1956 }
1957 }
1958 if (show_status) {
1959 dump_format(ic, 0, is->filename, 0);
1960 dump_stream_info(ic);
1961 }
1962
1963 /* open the streams */
1964 if (audio_index >= 0) {
1965 stream_component_open(is, audio_index);
1966 }
1967
1968 if (video_index >= 0) {
1969 stream_component_open(is, video_index);
1970 } else {
1971 if (!display_disable)
1972 is->show_audio = 1;
1973 }
1974
1975 if (is->video_stream < 0 && is->audio_stream < 0) {
1976 fprintf(stderr, "%s: could not open codecs\n", is->filename);
1977 ret = -1;
1978 goto fail;
1979 }
1980
1981 for(;;) {
1982 if (is->abort_request)
1983 break;
1984 if (is->paused != is->last_paused) {
1985 is->last_paused = is->paused;
1986 if (is->paused)
1987 av_read_pause(ic);
1988 else
1989 av_read_play(ic);
1990 }
1991 #ifdef CONFIG_RTSP_DEMUXER
1992 if (is->paused && ic->iformat == &rtsp_demuxer) {
1993 /* wait 10 ms to avoid trying to get another packet */
1994 /* XXX: horrible */
1995 SDL_Delay(10);
1996 continue;
1997 }
1998 #endif
1999 if (is->seek_req) {
2000 int stream_index= -1;
2001 int64_t seek_target= is->seek_pos;
2002
2003 if (is-> video_stream >= 0) stream_index= is-> video_stream;
2004 else if(is-> audio_stream >= 0) stream_index= is-> audio_stream;
2005 else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
2006
2007 if(stream_index>=0){
2008 seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
2009 }
2010
2011 ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
2012 if (ret < 0) {
2013 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2014 }else{
2015 if (is->audio_stream >= 0) {
2016 packet_queue_flush(&is->audioq);
2017 packet_queue_put(&is->audioq, &flush_pkt);
2018 }
2019 if (is->subtitle_stream >= 0) {
2020 packet_queue_flush(&is->subtitleq);
2021 packet_queue_put(&is->subtitleq, &flush_pkt);
2022 }
2023 if (is->video_stream >= 0) {
2024 packet_queue_flush(&is->videoq);
2025 packet_queue_put(&is->videoq, &flush_pkt);
2026 }
2027 }
2028 is->seek_req = 0;
2029 }
2030
2031 /* if the queue are full, no need to read more */
2032 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2033 is->videoq.size > MAX_VIDEOQ_SIZE ||
2034 is->subtitleq.size > MAX_SUBTITLEQ_SIZE ||
2035 url_feof(&ic->pb)) {
2036 /* wait 10 ms */
2037 SDL_Delay(10);
2038 continue;
2039 }
2040 ret = av_read_frame(ic, pkt);
2041 if (ret < 0) {
2042 if (url_ferror(&ic->pb) == 0) {
2043 SDL_Delay(100); /* wait for user event */
2044 continue;
2045 } else
2046 break;
2047 }
2048 if (pkt->stream_index == is->audio_stream) {
2049 packet_queue_put(&is->audioq, pkt);
2050 } else if (pkt->stream_index == is->video_stream) {
2051 packet_queue_put(&is->videoq, pkt);
2052 } else if (pkt->stream_index == is->subtitle_stream) {
2053 packet_queue_put(&is->subtitleq, pkt);
2054 } else {
2055 av_free_packet(pkt);
2056 }
2057 }
2058 /* wait until the end */
2059 while (!is->abort_request) {
2060 SDL_Delay(100);
2061 }
2062
2063 ret = 0;
2064 fail:
2065 /* disable interrupting */
2066 global_video_state = NULL;
2067
2068 /* close each stream */
2069 if (is->audio_stream >= 0)
2070 stream_component_close(is, is->audio_stream);
2071 if (is->video_stream >= 0)
2072 stream_component_close(is, is->video_stream);
2073 if (is->subtitle_stream >= 0)
2074 stream_component_close(is, is->subtitle_stream);
2075 if (is->ic) {
2076 av_close_input_file(is->ic);
2077 is->ic = NULL; /* safety */
2078 }
2079 url_set_interrupt_cb(NULL);
2080
2081 if (ret != 0) {
2082 SDL_Event event;
2083
2084 event.type = FF_QUIT_EVENT;
2085 event.user.data1 = is;
2086 SDL_PushEvent(&event);
2087 }
2088 return 0;
2089 }
2090
2091 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2092 {
2093 VideoState *is;
2094
2095 is = av_mallocz(sizeof(VideoState));
2096 if (!is)
2097 return NULL;
2098 av_strlcpy(is->filename, filename, sizeof(is->filename));
2099 is->iformat = iformat;
2100 is->ytop = 0;
2101 is->xleft = 0;
2102
2103 /* start video display */
2104 is->pictq_mutex = SDL_CreateMutex();
2105 is->pictq_cond = SDL_CreateCond();
2106
2107 is->subpq_mutex = SDL_CreateMutex();
2108 is->subpq_cond = SDL_CreateCond();
2109
2110 /* add the refresh timer to draw the picture */
2111 schedule_refresh(is, 40);
2112
2113 is->av_sync_type = av_sync_type;
2114 is->parse_tid = SDL_CreateThread(decode_thread, is);
2115 if (!is->parse_tid) {
2116 av_free(is);
2117 return NULL;
2118 }
2119 return is;
2120 }
2121
2122 static void stream_close(VideoState *is)
2123 {
2124 VideoPicture *vp;
2125 int i;
2126 /* XXX: use a special url_shutdown call to abort parse cleanly */
2127 is->abort_request = 1;
2128 SDL_WaitThread(is->parse_tid, NULL);
2129
2130 /* free all pictures */
2131 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2132 vp = &is->pictq[i];
2133 if (vp->bmp) {
2134 SDL_FreeYUVOverlay(vp->bmp);
2135 vp->bmp = NULL;
2136 }
2137 }
2138 SDL_DestroyMutex(is->pictq_mutex);
2139 SDL_DestroyCond(is->pictq_cond);
2140 SDL_DestroyMutex(is->subpq_mutex);
2141 SDL_DestroyCond(is->subpq_cond);
2142 }
2143
2144 static void stream_cycle_channel(VideoState *is, int codec_type)
2145 {
2146 AVFormatContext *ic = is->ic;
2147 int start_index, stream_index;
2148 AVStream *st;
2149
2150 if (codec_type == CODEC_TYPE_VIDEO)
2151 start_index = is->video_stream;
2152 else if (codec_type == CODEC_TYPE_AUDIO)
2153 start_index = is->audio_stream;
2154 else
2155 start_index = is->subtitle_stream;
2156 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2157 return;
2158 stream_index = start_index;
2159 for(;;) {
2160 if (++stream_index >= is->ic->nb_streams)
2161 {
2162 if (codec_type == CODEC_TYPE_SUBTITLE)
2163 {
2164 stream_index = -1;
2165 goto the_end;
2166 } else
2167 stream_index = 0;
2168 }
2169 if (stream_index == start_index)
2170 return;
2171 st = ic->streams[stream_index];
2172 if (st->codec->codec_type == codec_type) {
2173 /* check that parameters are OK */
2174 switch(codec_type) {
2175 case CODEC_TYPE_AUDIO:
2176 if (st->codec->sample_rate != 0 &&
2177 st->codec->channels != 0)
2178 goto the_end;
2179 break;
2180 case CODEC_TYPE_VIDEO:
2181 case CODEC_TYPE_SUBTITLE:
2182 goto the_end;
2183 default:
2184 break;
2185 }
2186 }
2187 }
2188 the_end:
2189 stream_component_close(is, start_index);
2190 stream_component_open(is, stream_index);
2191 }
2192
2193
2194 static void toggle_full_screen(void)
2195 {
2196 is_full_screen = !is_full_screen;
2197 if (!fs_screen_width) {
2198 /* use default SDL method */
2199 // SDL_WM_ToggleFullScreen(screen);
2200 }
2201 video_open(cur_stream);
2202 }
2203
2204 static void toggle_pause(void)
2205 {
2206 if (cur_stream)
2207 stream_pause(cur_stream);
2208 step = 0;
2209 }
2210
2211 static void step_to_next_frame(void)
2212 {
2213 if (cur_stream) {
2214 if (cur_stream->paused)
2215 cur_stream->paused=0;
2216 cur_stream->video_current_pts = get_video_clock(cur_stream);
2217 }
2218 step = 1;
2219 }
2220
2221 static void do_exit(void)
2222 {
2223 if (cur_stream) {
2224 stream_close(cur_stream);
2225 cur_stream = NULL;
2226 }
2227 if (show_status)
2228 printf("\n");
2229 SDL_Quit();
2230 exit(0);
2231 }
2232
2233 static void toggle_audio_display(void)
2234 {
2235 if (cur_stream) {
2236 cur_stream->show_audio = !cur_stream->show_audio;
2237 }
2238 }
2239
2240 /* handle an event sent by the GUI */
2241 static void event_loop(void)
2242 {
2243 SDL_Event event;
2244 double incr, pos, frac;
2245
2246 for(;;) {
2247 SDL_WaitEvent(&event);
2248 switch(event.type) {
2249 case SDL_KEYDOWN:
2250 switch(event.key.keysym.sym) {
2251 case SDLK_ESCAPE:
2252 case SDLK_q:
2253 do_exit();
2254 break;
2255 case SDLK_f:
2256 toggle_full_screen();
2257 break;
2258 case SDLK_p:
2259 case SDLK_SPACE:
2260 toggle_pause();
2261 break;
2262 case SDLK_s: //S: Step to next frame
2263 step_to_next_frame();
2264 break;
2265 case SDLK_a:
2266 if (cur_stream)
2267 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2268 break;
2269 case SDLK_v:
2270 if (cur_stream)
2271 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2272 break;
2273 case SDLK_t:
2274 if (cur_stream)
2275 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2276 break;
2277 case SDLK_w:
2278 toggle_audio_display();
2279 break;
2280 case SDLK_LEFT:
2281 incr = -10.0;
2282 goto do_seek;
2283 case SDLK_RIGHT:
2284 incr = 10.0;
2285 goto do_seek;
2286 case SDLK_UP:
2287 incr = 60.0;
2288 goto do_seek;
2289 case SDLK_DOWN:
2290 incr = -60.0;
2291 do_seek:
2292 if (cur_stream) {
2293 if (seek_by_bytes) {
2294 pos = url_ftell(&cur_stream->ic->pb);
2295 if (cur_stream->ic->bit_rate)
2296 incr *= cur_stream->ic->bit_rate / 60.0;
2297 else
2298 incr *= 180000.0;
2299 pos += incr;
2300 stream_seek(cur_stream, pos, incr);
2301 } else {
2302 pos = get_master_clock(cur_stream);
2303 pos += incr;
2304 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2305 }
2306 }
2307 break;
2308 default:
2309 break;
2310 }
2311 break;
2312 case SDL_MOUSEBUTTONDOWN:
2313 if (cur_stream) {
2314 int ns, hh, mm, ss;
2315 int tns, thh, tmm, tss;
2316 tns = cur_stream->ic->duration/1000000LL;
2317 thh = tns/3600;
2318 tmm = (tns%3600)/60;
2319 tss = (tns%60);
2320 frac = (double)event.button.x/(double)cur_stream->width;
2321 ns = frac*tns;
2322 hh = ns/3600;
2323 mm = (ns%3600)/60;
2324 ss = (ns%60);
2325 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2326 hh, mm, ss, thh, tmm, tss);
2327 stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2328 }
2329 break;
2330 case SDL_VIDEORESIZE:
2331 if (cur_stream) {
2332 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2333 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2334 screen_width = cur_stream->width = event.resize.w;
2335 screen_height= cur_stream->height= event.resize.h;
2336 }
2337 break;
2338 case SDL_QUIT:
2339 case FF_QUIT_EVENT:
2340 do_exit();
2341 break;
2342 case FF_ALLOC_EVENT:
2343 video_open(event.user.data1);
2344 alloc_picture(event.user.data1);
2345 break;
2346 case FF_REFRESH_EVENT:
2347 video_refresh_timer(event.user.data1);
2348 break;
2349 default:
2350 break;
2351 }
2352 }
2353 }
2354
2355 static void opt_frame_size(const char *arg)
2356 {
2357 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2358 fprintf(stderr, "Incorrect frame size\n");
2359 exit(1);
2360 }
2361 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2362 fprintf(stderr, "Frame size must be a multiple of 2\n");
2363 exit(1);
2364 }
2365 }
2366
2367 static void opt_width(const char *arg)
2368 {
2369 screen_width = atoi(arg);
2370 if(screen_width<=0){
2371 fprintf(stderr, "invalid width\n");
2372 exit(1);
2373 }
2374 }
2375
2376 static void opt_height(const char *arg)
2377 {
2378 screen_height = atoi(arg);
2379 if(screen_height<=0){
2380 fprintf(stderr, "invalid height\n");
2381 exit(1);
2382 }
2383 }
2384
2385 static void opt_format(const char *arg)
2386 {
2387 file_iformat = av_find_input_format(arg);
2388 if (!file_iformat) {
2389 fprintf(stderr, "Unknown input format: %s\n", arg);
2390 exit(1);
2391 }
2392 }
2393
2394 static void opt_frame_pix_fmt(const char *arg)
2395 {
2396 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2397 }
2398
2399 #ifdef CONFIG_RTSP_DEMUXER
2400 static void opt_rtp_tcp(void)
2401 {
2402 /* only tcp protocol */
2403 rtsp_default_protocols = (1 << RTSP_PROTOCOL_RTP_TCP);
2404 }
2405 #endif
2406
2407 static void opt_sync(const char *arg)
2408 {
2409 if (!strcmp(arg, "audio"))
2410 av_sync_type = AV_SYNC_AUDIO_MASTER;
2411 else if (!strcmp(arg, "video"))
2412 av_sync_type = AV_SYNC_VIDEO_MASTER;
2413 else if (!strcmp(arg, "ext"))
2414 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2415 else
2416 show_help();
2417 }
2418
2419 static void opt_seek(const char *arg)
2420 {
2421 start_time = parse_date(arg, 1);
2422 }
2423
2424 static void opt_debug(const char *arg)
2425 {
2426 av_log_level = 99;
2427 debug = atoi(arg);
2428 }
2429
2430 static void opt_vismv(const char *arg)
2431 {
2432 debug_mv = atoi(arg);
2433 }
2434
2435 static void opt_thread_count(const char *arg)
2436 {
2437 thread_count= atoi(arg);
2438 #if !defined(HAVE_THREADS)
2439 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2440 #endif
2441 }
2442
2443 const OptionDef options[] = {
2444 { "h", 0, {(void*)show_help}, "show help" },
2445 { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2446 { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2447 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2448 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2449 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2450 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2451 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "", "" },
2452 { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2453 { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2454 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2455 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2456 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2457 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2458 { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2459 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2460 { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2461 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2462 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2463 { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2464 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2465 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2466 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2467 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2468 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2469 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_resilience}, "set error detection threshold (0-4)", "threshold" },
2470 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2471 #ifdef CONFIG_RTSP_DEMUXER
2472 { "rtp_tcp", OPT_EXPERT, {(void*)&opt_rtp_tcp}, "force RTP/TCP protocol usage", "" },
2473 #endif
2474 { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2475 { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2476 { NULL, },
2477 };
2478
2479 void show_help(void)
2480 {
2481 printf("ffplay version " FFMPEG_VERSION ", Copyright (c) 2003-2007 Fabrice Bellard, et al.\n"
2482 "usage: ffplay [options] input_file\n"
2483 "Simple media player\n");
2484 printf("\n");
2485 show_help_options(options, "Main options:\n",
2486 OPT_EXPERT, 0);
2487 show_help_options(options, "\nAdvanced options:\n",
2488 OPT_EXPERT, OPT_EXPERT);
2489 printf("\nWhile playing:\n"
2490 "q, ESC quit\n"
2491 "f toggle full screen\n"
2492 "p, SPC pause\n"
2493 "a cycle audio channel\n"
2494 "v cycle video channel\n"
2495 "t cycle subtitle channel\n"
2496 "w show audio waves\n"
2497 "left/right seek backward/forward 10 seconds\n"
2498 "down/up seek backward/forward 1 minute\n"
2499 "mouse click seek to percentage in file corresponding to fraction of width\n"
2500 );
2501 exit(1);
2502 }
2503
2504 void parse_arg_file(const char *filename)
2505 {
2506 if (!strcmp(filename, "-"))
2507 filename = "pipe:";
2508 input_filename = filename;
2509 }
2510
2511 /* Called from the main */
2512 int main(int argc, char **argv)
2513 {
2514 int flags;
2515
2516 /* register all codecs, demux and protocols */
2517 av_register_all();
2518
2519 parse_options(argc, argv, options);
2520
2521 if (!input_filename)
2522 show_help();
2523
2524 if (display_disable) {
2525 video_disable = 1;
2526 }
2527 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2528 #if !defined(__MINGW32__) && !defined(CONFIG_DARWIN)
2529 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on win32 or darwin */
2530 #endif
2531 if (SDL_Init (flags)) {
2532 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2533 exit(1);
2534 }
2535
2536 if (!display_disable) {
2537 #ifdef HAVE_SDL_VIDEO_SIZE
2538 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2539 fs_screen_width = vi->current_w;
2540 fs_screen_height = vi->current_h;
2541 #endif
2542 }
2543
2544 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2545 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2546 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2547 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2548
2549 av_init_packet(&flush_pkt);
2550 flush_pkt.data= "FLUSH";
2551
2552 cur_stream = stream_open(input_filename, file_iformat);
2553
2554 event_loop();
2555
2556 /* never returns */
2557
2558 return 0;
2559 }