Introduce libavdevice
[libav.git] / ffplay.c
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include <math.h>
23 #include <limits.h>
24 #include "avformat.h"
25 #include "avdevice.h"
26 #include "rtsp.h"
27 #include "swscale.h"
28 #include "avstring.h"
29
30 #include "version.h"
31 #include "cmdutils.h"
32
33 #include <SDL.h>
34 #include <SDL_thread.h>
35
36 #ifdef __MINGW32__
37 #undef main /* We don't want SDL to override our main() */
38 #endif
39
40 #undef exit
41
42 static const char program_name[] = "FFplay";
43 static const int program_birth_year = 2003;
44
45 //#define DEBUG_SYNC
46
47 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
48 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
49 #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
50
51 /* SDL audio buffer size, in samples. Should be small to have precise
52 A/V sync as SDL does not have hardware buffer fullness info. */
53 #define SDL_AUDIO_BUFFER_SIZE 1024
54
55 /* no AV sync correction is done if below the AV sync threshold */
56 #define AV_SYNC_THRESHOLD 0.01
57 /* no AV correction is done if too big error */
58 #define AV_NOSYNC_THRESHOLD 10.0
59
60 /* maximum audio speed change to get correct sync */
61 #define SAMPLE_CORRECTION_PERCENT_MAX 10
62
63 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
64 #define AUDIO_DIFF_AVG_NB 20
65
66 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
67 #define SAMPLE_ARRAY_SIZE (2*65536)
68
69 static int sws_flags = SWS_BICUBIC;
70
71 typedef struct PacketQueue {
72 AVPacketList *first_pkt, *last_pkt;
73 int nb_packets;
74 int size;
75 int abort_request;
76 SDL_mutex *mutex;
77 SDL_cond *cond;
78 } PacketQueue;
79
80 #define VIDEO_PICTURE_QUEUE_SIZE 1
81 #define SUBPICTURE_QUEUE_SIZE 4
82
83 typedef struct VideoPicture {
84 double pts; ///<presentation time stamp for this picture
85 SDL_Overlay *bmp;
86 int width, height; /* source height & width */
87 int allocated;
88 } VideoPicture;
89
90 typedef struct SubPicture {
91 double pts; /* presentation time stamp for this picture */
92 AVSubtitle sub;
93 } SubPicture;
94
95 enum {
96 AV_SYNC_AUDIO_MASTER, /* default choice */
97 AV_SYNC_VIDEO_MASTER,
98 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
99 };
100
101 typedef struct VideoState {
102 SDL_Thread *parse_tid;
103 SDL_Thread *video_tid;
104 AVInputFormat *iformat;
105 int no_background;
106 int abort_request;
107 int paused;
108 int last_paused;
109 int seek_req;
110 int seek_flags;
111 int64_t seek_pos;
112 AVFormatContext *ic;
113 int dtg_active_format;
114
115 int audio_stream;
116
117 int av_sync_type;
118 double external_clock; /* external clock base */
119 int64_t external_clock_time;
120
121 double audio_clock;
122 double audio_diff_cum; /* used for AV difference average computation */
123 double audio_diff_avg_coef;
124 double audio_diff_threshold;
125 int audio_diff_avg_count;
126 AVStream *audio_st;
127 PacketQueue audioq;
128 int audio_hw_buf_size;
129 /* samples output by the codec. we reserve more space for avsync
130 compensation */
131 DECLARE_ALIGNED(16,uint8_t,audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
132 unsigned int audio_buf_size; /* in bytes */
133 int audio_buf_index; /* in bytes */
134 AVPacket audio_pkt;
135 uint8_t *audio_pkt_data;
136 int audio_pkt_size;
137
138 int show_audio; /* if true, display audio samples */
139 int16_t sample_array[SAMPLE_ARRAY_SIZE];
140 int sample_array_index;
141 int last_i_start;
142
143 SDL_Thread *subtitle_tid;
144 int subtitle_stream;
145 int subtitle_stream_changed;
146 AVStream *subtitle_st;
147 PacketQueue subtitleq;
148 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
149 int subpq_size, subpq_rindex, subpq_windex;
150 SDL_mutex *subpq_mutex;
151 SDL_cond *subpq_cond;
152
153 double frame_timer;
154 double frame_last_pts;
155 double frame_last_delay;
156 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
157 int video_stream;
158 AVStream *video_st;
159 PacketQueue videoq;
160 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
161 int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
162 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
163 int pictq_size, pictq_rindex, pictq_windex;
164 SDL_mutex *pictq_mutex;
165 SDL_cond *pictq_cond;
166
167 // QETimer *video_timer;
168 char filename[1024];
169 int width, height, xleft, ytop;
170 } VideoState;
171
172 void show_help(void);
173 static int audio_write_get_buf_size(VideoState *is);
174
175 /* options specified by the user */
176 static AVInputFormat *file_iformat;
177 static const char *input_filename;
178 static int fs_screen_width;
179 static int fs_screen_height;
180 static int screen_width = 0;
181 static int screen_height = 0;
182 static int frame_width = 0;
183 static int frame_height = 0;
184 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
185 static int audio_disable;
186 static int video_disable;
187 static int wanted_audio_stream= 0;
188 static int wanted_video_stream= 0;
189 static int seek_by_bytes;
190 static int display_disable;
191 static int show_status;
192 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
193 static int64_t start_time = AV_NOPTS_VALUE;
194 static int debug = 0;
195 static int debug_mv = 0;
196 static int step = 0;
197 static int thread_count = 1;
198 static int workaround_bugs = 1;
199 static int fast = 0;
200 static int genpts = 0;
201 static int lowres = 0;
202 static int idct = FF_IDCT_AUTO;
203 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
204 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
205 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
206 static int error_resilience = FF_ER_CAREFUL;
207 static int error_concealment = 3;
208 static int decoder_reorder_pts= 0;
209
210 /* current context */
211 static int is_full_screen;
212 static VideoState *cur_stream;
213 static int64_t audio_callback_time;
214
215 AVPacket flush_pkt;
216
217 #define FF_ALLOC_EVENT (SDL_USEREVENT)
218 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
219 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
220
221 SDL_Surface *screen;
222
223 /* packet queue handling */
224 static void packet_queue_init(PacketQueue *q)
225 {
226 memset(q, 0, sizeof(PacketQueue));
227 q->mutex = SDL_CreateMutex();
228 q->cond = SDL_CreateCond();
229 }
230
231 static void packet_queue_flush(PacketQueue *q)
232 {
233 AVPacketList *pkt, *pkt1;
234
235 SDL_LockMutex(q->mutex);
236 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
237 pkt1 = pkt->next;
238 av_free_packet(&pkt->pkt);
239 av_freep(&pkt);
240 }
241 q->last_pkt = NULL;
242 q->first_pkt = NULL;
243 q->nb_packets = 0;
244 q->size = 0;
245 SDL_UnlockMutex(q->mutex);
246 }
247
248 static void packet_queue_end(PacketQueue *q)
249 {
250 packet_queue_flush(q);
251 SDL_DestroyMutex(q->mutex);
252 SDL_DestroyCond(q->cond);
253 }
254
255 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
256 {
257 AVPacketList *pkt1;
258
259 /* duplicate the packet */
260 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
261 return -1;
262
263 pkt1 = av_malloc(sizeof(AVPacketList));
264 if (!pkt1)
265 return -1;
266 pkt1->pkt = *pkt;
267 pkt1->next = NULL;
268
269
270 SDL_LockMutex(q->mutex);
271
272 if (!q->last_pkt)
273
274 q->first_pkt = pkt1;
275 else
276 q->last_pkt->next = pkt1;
277 q->last_pkt = pkt1;
278 q->nb_packets++;
279 q->size += pkt1->pkt.size;
280 /* XXX: should duplicate packet data in DV case */
281 SDL_CondSignal(q->cond);
282
283 SDL_UnlockMutex(q->mutex);
284 return 0;
285 }
286
287 static void packet_queue_abort(PacketQueue *q)
288 {
289 SDL_LockMutex(q->mutex);
290
291 q->abort_request = 1;
292
293 SDL_CondSignal(q->cond);
294
295 SDL_UnlockMutex(q->mutex);
296 }
297
298 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
299 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
300 {
301 AVPacketList *pkt1;
302 int ret;
303
304 SDL_LockMutex(q->mutex);
305
306 for(;;) {
307 if (q->abort_request) {
308 ret = -1;
309 break;
310 }
311
312 pkt1 = q->first_pkt;
313 if (pkt1) {
314 q->first_pkt = pkt1->next;
315 if (!q->first_pkt)
316 q->last_pkt = NULL;
317 q->nb_packets--;
318 q->size -= pkt1->pkt.size;
319 *pkt = pkt1->pkt;
320 av_free(pkt1);
321 ret = 1;
322 break;
323 } else if (!block) {
324 ret = 0;
325 break;
326 } else {
327 SDL_CondWait(q->cond, q->mutex);
328 }
329 }
330 SDL_UnlockMutex(q->mutex);
331 return ret;
332 }
333
334 static inline void fill_rectangle(SDL_Surface *screen,
335 int x, int y, int w, int h, int color)
336 {
337 SDL_Rect rect;
338 rect.x = x;
339 rect.y = y;
340 rect.w = w;
341 rect.h = h;
342 SDL_FillRect(screen, &rect, color);
343 }
344
345 #if 0
346 /* draw only the border of a rectangle */
347 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
348 {
349 int w1, w2, h1, h2;
350
351 /* fill the background */
352 w1 = x;
353 if (w1 < 0)
354 w1 = 0;
355 w2 = s->width - (x + w);
356 if (w2 < 0)
357 w2 = 0;
358 h1 = y;
359 if (h1 < 0)
360 h1 = 0;
361 h2 = s->height - (y + h);
362 if (h2 < 0)
363 h2 = 0;
364 fill_rectangle(screen,
365 s->xleft, s->ytop,
366 w1, s->height,
367 color);
368 fill_rectangle(screen,
369 s->xleft + s->width - w2, s->ytop,
370 w2, s->height,
371 color);
372 fill_rectangle(screen,
373 s->xleft + w1, s->ytop,
374 s->width - w1 - w2, h1,
375 color);
376 fill_rectangle(screen,
377 s->xleft + w1, s->ytop + s->height - h2,
378 s->width - w1 - w2, h2,
379 color);
380 }
381 #endif
382
383
384
385 #define SCALEBITS 10
386 #define ONE_HALF (1 << (SCALEBITS - 1))
387 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
388
389 #define RGB_TO_Y_CCIR(r, g, b) \
390 ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
391 FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
392
393 #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
394 (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
395 FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
396
397 #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
398 (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
399 FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
400
401 #define ALPHA_BLEND(a, oldp, newp, s)\
402 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
403
404 #define RGBA_IN(r, g, b, a, s)\
405 {\
406 unsigned int v = ((const uint32_t *)(s))[0];\
407 a = (v >> 24) & 0xff;\
408 r = (v >> 16) & 0xff;\
409 g = (v >> 8) & 0xff;\
410 b = v & 0xff;\
411 }
412
413 #define YUVA_IN(y, u, v, a, s, pal)\
414 {\
415 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
416 a = (val >> 24) & 0xff;\
417 y = (val >> 16) & 0xff;\
418 u = (val >> 8) & 0xff;\
419 v = val & 0xff;\
420 }
421
422 #define YUVA_OUT(d, y, u, v, a)\
423 {\
424 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
425 }
426
427
428 #define BPP 1
429
430 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
431 {
432 int wrap, wrap3, width2, skip2;
433 int y, u, v, a, u1, v1, a1, w, h;
434 uint8_t *lum, *cb, *cr;
435 const uint8_t *p;
436 const uint32_t *pal;
437 int dstx, dsty, dstw, dsth;
438
439 dstx = FFMIN(FFMAX(rect->x, 0), imgw);
440 dstw = FFMIN(FFMAX(rect->w, 0), imgw - dstx);
441 dsty = FFMIN(FFMAX(rect->y, 0), imgh);
442 dsth = FFMIN(FFMAX(rect->h, 0), imgh - dsty);
443 lum = dst->data[0] + dsty * dst->linesize[0];
444 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
445 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
446
447 width2 = (dstw + 1) >> 1;
448 skip2 = dstx >> 1;
449 wrap = dst->linesize[0];
450 wrap3 = rect->linesize;
451 p = rect->bitmap;
452 pal = rect->rgba_palette; /* Now in YCrCb! */
453
454 if (dsty & 1) {
455 lum += dstx;
456 cb += skip2;
457 cr += skip2;
458
459 if (dstx & 1) {
460 YUVA_IN(y, u, v, a, p, pal);
461 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
462 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
463 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
464 cb++;
465 cr++;
466 lum++;
467 p += BPP;
468 }
469 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
470 YUVA_IN(y, u, v, a, p, pal);
471 u1 = u;
472 v1 = v;
473 a1 = a;
474 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
475
476 YUVA_IN(y, u, v, a, p + BPP, pal);
477 u1 += u;
478 v1 += v;
479 a1 += a;
480 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
481 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
482 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
483 cb++;
484 cr++;
485 p += 2 * BPP;
486 lum += 2;
487 }
488 if (w) {
489 YUVA_IN(y, u, v, a, p, pal);
490 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
491 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
492 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
493 }
494 p += wrap3 + (wrap3 - dstw * BPP);
495 lum += wrap + (wrap - dstw - dstx);
496 cb += dst->linesize[1] - width2 - skip2;
497 cr += dst->linesize[2] - width2 - skip2;
498 }
499 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
500 lum += dstx;
501 cb += skip2;
502 cr += skip2;
503
504 if (dstx & 1) {
505 YUVA_IN(y, u, v, a, p, pal);
506 u1 = u;
507 v1 = v;
508 a1 = a;
509 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
510 p += wrap3;
511 lum += wrap;
512 YUVA_IN(y, u, v, a, p, pal);
513 u1 += u;
514 v1 += v;
515 a1 += a;
516 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
517 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
518 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
519 cb++;
520 cr++;
521 p += -wrap3 + BPP;
522 lum += -wrap + 1;
523 }
524 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
525 YUVA_IN(y, u, v, a, p, pal);
526 u1 = u;
527 v1 = v;
528 a1 = a;
529 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
530
531 YUVA_IN(y, u, v, a, p, pal);
532 u1 += u;
533 v1 += v;
534 a1 += a;
535 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
536 p += wrap3;
537 lum += wrap;
538
539 YUVA_IN(y, u, v, a, p, pal);
540 u1 += u;
541 v1 += v;
542 a1 += a;
543 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
544
545 YUVA_IN(y, u, v, a, p, pal);
546 u1 += u;
547 v1 += v;
548 a1 += a;
549 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
550
551 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
552 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
553
554 cb++;
555 cr++;
556 p += -wrap3 + 2 * BPP;
557 lum += -wrap + 2;
558 }
559 if (w) {
560 YUVA_IN(y, u, v, a, p, pal);
561 u1 = u;
562 v1 = v;
563 a1 = a;
564 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
565 p += wrap3;
566 lum += wrap;
567 YUVA_IN(y, u, v, a, p, pal);
568 u1 += u;
569 v1 += v;
570 a1 += a;
571 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
572 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
573 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
574 cb++;
575 cr++;
576 p += -wrap3 + BPP;
577 lum += -wrap + 1;
578 }
579 p += wrap3 + (wrap3 - dstw * BPP);
580 lum += wrap + (wrap - dstw - dstx);
581 cb += dst->linesize[1] - width2 - skip2;
582 cr += dst->linesize[2] - width2 - skip2;
583 }
584 /* handle odd height */
585 if (h) {
586 lum += dstx;
587 cb += skip2;
588 cr += skip2;
589
590 if (dstx & 1) {
591 YUVA_IN(y, u, v, a, p, pal);
592 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
593 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
594 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
595 cb++;
596 cr++;
597 lum++;
598 p += BPP;
599 }
600 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
601 YUVA_IN(y, u, v, a, p, pal);
602 u1 = u;
603 v1 = v;
604 a1 = a;
605 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
606
607 YUVA_IN(y, u, v, a, p + BPP, pal);
608 u1 += u;
609 v1 += v;
610 a1 += a;
611 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
612 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
613 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
614 cb++;
615 cr++;
616 p += 2 * BPP;
617 lum += 2;
618 }
619 if (w) {
620 YUVA_IN(y, u, v, a, p, pal);
621 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
622 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
623 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
624 }
625 }
626 }
627
628 static void free_subpicture(SubPicture *sp)
629 {
630 int i;
631
632 for (i = 0; i < sp->sub.num_rects; i++)
633 {
634 av_free(sp->sub.rects[i].bitmap);
635 av_free(sp->sub.rects[i].rgba_palette);
636 }
637
638 av_free(sp->sub.rects);
639
640 memset(&sp->sub, 0, sizeof(AVSubtitle));
641 }
642
643 static void video_image_display(VideoState *is)
644 {
645 VideoPicture *vp;
646 SubPicture *sp;
647 AVPicture pict;
648 float aspect_ratio;
649 int width, height, x, y;
650 SDL_Rect rect;
651 int i;
652
653 vp = &is->pictq[is->pictq_rindex];
654 if (vp->bmp) {
655 /* XXX: use variable in the frame */
656 if (is->video_st->codec->sample_aspect_ratio.num == 0)
657 aspect_ratio = 0;
658 else
659 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio)
660 * is->video_st->codec->width / is->video_st->codec->height;;
661 if (aspect_ratio <= 0.0)
662 aspect_ratio = (float)is->video_st->codec->width /
663 (float)is->video_st->codec->height;
664 /* if an active format is indicated, then it overrides the
665 mpeg format */
666 #if 0
667 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
668 is->dtg_active_format = is->video_st->codec->dtg_active_format;
669 printf("dtg_active_format=%d\n", is->dtg_active_format);
670 }
671 #endif
672 #if 0
673 switch(is->video_st->codec->dtg_active_format) {
674 case FF_DTG_AFD_SAME:
675 default:
676 /* nothing to do */
677 break;
678 case FF_DTG_AFD_4_3:
679 aspect_ratio = 4.0 / 3.0;
680 break;
681 case FF_DTG_AFD_16_9:
682 aspect_ratio = 16.0 / 9.0;
683 break;
684 case FF_DTG_AFD_14_9:
685 aspect_ratio = 14.0 / 9.0;
686 break;
687 case FF_DTG_AFD_4_3_SP_14_9:
688 aspect_ratio = 14.0 / 9.0;
689 break;
690 case FF_DTG_AFD_16_9_SP_14_9:
691 aspect_ratio = 14.0 / 9.0;
692 break;
693 case FF_DTG_AFD_SP_4_3:
694 aspect_ratio = 4.0 / 3.0;
695 break;
696 }
697 #endif
698
699 if (is->subtitle_st)
700 {
701 if (is->subpq_size > 0)
702 {
703 sp = &is->subpq[is->subpq_rindex];
704
705 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
706 {
707 SDL_LockYUVOverlay (vp->bmp);
708
709 pict.data[0] = vp->bmp->pixels[0];
710 pict.data[1] = vp->bmp->pixels[2];
711 pict.data[2] = vp->bmp->pixels[1];
712
713 pict.linesize[0] = vp->bmp->pitches[0];
714 pict.linesize[1] = vp->bmp->pitches[2];
715 pict.linesize[2] = vp->bmp->pitches[1];
716
717 for (i = 0; i < sp->sub.num_rects; i++)
718 blend_subrect(&pict, &sp->sub.rects[i],
719 vp->bmp->w, vp->bmp->h);
720
721 SDL_UnlockYUVOverlay (vp->bmp);
722 }
723 }
724 }
725
726
727 /* XXX: we suppose the screen has a 1.0 pixel ratio */
728 height = is->height;
729 width = ((int)rint(height * aspect_ratio)) & -3;
730 if (width > is->width) {
731 width = is->width;
732 height = ((int)rint(width / aspect_ratio)) & -3;
733 }
734 x = (is->width - width) / 2;
735 y = (is->height - height) / 2;
736 if (!is->no_background) {
737 /* fill the background */
738 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
739 } else {
740 is->no_background = 0;
741 }
742 rect.x = is->xleft + x;
743 rect.y = is->ytop + y;
744 rect.w = width;
745 rect.h = height;
746 SDL_DisplayYUVOverlay(vp->bmp, &rect);
747 } else {
748 #if 0
749 fill_rectangle(screen,
750 is->xleft, is->ytop, is->width, is->height,
751 QERGB(0x00, 0x00, 0x00));
752 #endif
753 }
754 }
755
756 static inline int compute_mod(int a, int b)
757 {
758 a = a % b;
759 if (a >= 0)
760 return a;
761 else
762 return a + b;
763 }
764
765 static void video_audio_display(VideoState *s)
766 {
767 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
768 int ch, channels, h, h2, bgcolor, fgcolor;
769 int16_t time_diff;
770
771 /* compute display index : center on currently output samples */
772 channels = s->audio_st->codec->channels;
773 nb_display_channels = channels;
774 if (!s->paused) {
775 n = 2 * channels;
776 delay = audio_write_get_buf_size(s);
777 delay /= n;
778
779 /* to be more precise, we take into account the time spent since
780 the last buffer computation */
781 if (audio_callback_time) {
782 time_diff = av_gettime() - audio_callback_time;
783 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
784 }
785
786 delay -= s->width / 2;
787 if (delay < s->width)
788 delay = s->width;
789
790 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
791
792 h= INT_MIN;
793 for(i=0; i<1000; i+=channels){
794 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
795 int a= s->sample_array[idx];
796 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
797 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
798 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
799 int score= a-d;
800 if(h<score && (b^c)<0){
801 h= score;
802 i_start= idx;
803 }
804 }
805
806 s->last_i_start = i_start;
807 } else {
808 i_start = s->last_i_start;
809 }
810
811 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
812 fill_rectangle(screen,
813 s->xleft, s->ytop, s->width, s->height,
814 bgcolor);
815
816 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
817
818 /* total height for one channel */
819 h = s->height / nb_display_channels;
820 /* graph height / 2 */
821 h2 = (h * 9) / 20;
822 for(ch = 0;ch < nb_display_channels; ch++) {
823 i = i_start + ch;
824 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
825 for(x = 0; x < s->width; x++) {
826 y = (s->sample_array[i] * h2) >> 15;
827 if (y < 0) {
828 y = -y;
829 ys = y1 - y;
830 } else {
831 ys = y1;
832 }
833 fill_rectangle(screen,
834 s->xleft + x, ys, 1, y,
835 fgcolor);
836 i += channels;
837 if (i >= SAMPLE_ARRAY_SIZE)
838 i -= SAMPLE_ARRAY_SIZE;
839 }
840 }
841
842 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
843
844 for(ch = 1;ch < nb_display_channels; ch++) {
845 y = s->ytop + ch * h;
846 fill_rectangle(screen,
847 s->xleft, y, s->width, 1,
848 fgcolor);
849 }
850 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
851 }
852
853 static int video_open(VideoState *is){
854 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
855 int w,h;
856
857 if(is_full_screen) flags |= SDL_FULLSCREEN;
858 else flags |= SDL_RESIZABLE;
859
860 if (is_full_screen && fs_screen_width) {
861 w = fs_screen_width;
862 h = fs_screen_height;
863 } else if(!is_full_screen && screen_width){
864 w = screen_width;
865 h = screen_height;
866 }else if (is->video_st && is->video_st->codec->width){
867 w = is->video_st->codec->width;
868 h = is->video_st->codec->height;
869 } else {
870 w = 640;
871 h = 480;
872 }
873 #ifndef __APPLE__
874 screen = SDL_SetVideoMode(w, h, 0, flags);
875 #else
876 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
877 screen = SDL_SetVideoMode(w, h, 24, flags);
878 #endif
879 if (!screen) {
880 fprintf(stderr, "SDL: could not set video mode - exiting\n");
881 return -1;
882 }
883 SDL_WM_SetCaption("FFplay", "FFplay");
884
885 is->width = screen->w;
886 is->height = screen->h;
887
888 return 0;
889 }
890
891 /* display the current picture, if any */
892 static void video_display(VideoState *is)
893 {
894 if(!screen)
895 video_open(cur_stream);
896 if (is->audio_st && is->show_audio)
897 video_audio_display(is);
898 else if (is->video_st)
899 video_image_display(is);
900 }
901
902 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
903 {
904 SDL_Event event;
905 event.type = FF_REFRESH_EVENT;
906 event.user.data1 = opaque;
907 SDL_PushEvent(&event);
908 return 0; /* 0 means stop timer */
909 }
910
911 /* schedule a video refresh in 'delay' ms */
912 static void schedule_refresh(VideoState *is, int delay)
913 {
914 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
915 }
916
917 /* get the current audio clock value */
918 static double get_audio_clock(VideoState *is)
919 {
920 double pts;
921 int hw_buf_size, bytes_per_sec;
922 pts = is->audio_clock;
923 hw_buf_size = audio_write_get_buf_size(is);
924 bytes_per_sec = 0;
925 if (is->audio_st) {
926 bytes_per_sec = is->audio_st->codec->sample_rate *
927 2 * is->audio_st->codec->channels;
928 }
929 if (bytes_per_sec)
930 pts -= (double)hw_buf_size / bytes_per_sec;
931 return pts;
932 }
933
934 /* get the current video clock value */
935 static double get_video_clock(VideoState *is)
936 {
937 double delta;
938 if (is->paused) {
939 delta = 0;
940 } else {
941 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
942 }
943 return is->video_current_pts + delta;
944 }
945
946 /* get the current external clock value */
947 static double get_external_clock(VideoState *is)
948 {
949 int64_t ti;
950 ti = av_gettime();
951 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
952 }
953
954 /* get the current master clock value */
955 static double get_master_clock(VideoState *is)
956 {
957 double val;
958
959 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
960 if (is->video_st)
961 val = get_video_clock(is);
962 else
963 val = get_audio_clock(is);
964 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
965 if (is->audio_st)
966 val = get_audio_clock(is);
967 else
968 val = get_video_clock(is);
969 } else {
970 val = get_external_clock(is);
971 }
972 return val;
973 }
974
975 /* seek in the stream */
976 static void stream_seek(VideoState *is, int64_t pos, int rel)
977 {
978 if (!is->seek_req) {
979 is->seek_pos = pos;
980 is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
981 if (seek_by_bytes)
982 is->seek_flags |= AVSEEK_FLAG_BYTE;
983 is->seek_req = 1;
984 }
985 }
986
987 /* pause or resume the video */
988 static void stream_pause(VideoState *is)
989 {
990 is->paused = !is->paused;
991 if (!is->paused) {
992 is->video_current_pts = get_video_clock(is);
993 is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
994 }
995 }
996
997 /* called to display each frame */
998 static void video_refresh_timer(void *opaque)
999 {
1000 VideoState *is = opaque;
1001 VideoPicture *vp;
1002 double actual_delay, delay, sync_threshold, ref_clock, diff;
1003
1004 SubPicture *sp, *sp2;
1005
1006 if (is->video_st) {
1007 if (is->pictq_size == 0) {
1008 /* if no picture, need to wait */
1009 schedule_refresh(is, 1);
1010 } else {
1011 /* dequeue the picture */
1012 vp = &is->pictq[is->pictq_rindex];
1013
1014 /* update current video pts */
1015 is->video_current_pts = vp->pts;
1016 is->video_current_pts_time = av_gettime();
1017
1018 /* compute nominal delay */
1019 delay = vp->pts - is->frame_last_pts;
1020 if (delay <= 0 || delay >= 2.0) {
1021 /* if incorrect delay, use previous one */
1022 delay = is->frame_last_delay;
1023 }
1024 is->frame_last_delay = delay;
1025 is->frame_last_pts = vp->pts;
1026
1027 /* update delay to follow master synchronisation source */
1028 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1029 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1030 /* if video is slave, we try to correct big delays by
1031 duplicating or deleting a frame */
1032 ref_clock = get_master_clock(is);
1033 diff = vp->pts - ref_clock;
1034
1035 /* skip or repeat frame. We take into account the
1036 delay to compute the threshold. I still don't know
1037 if it is the best guess */
1038 sync_threshold = AV_SYNC_THRESHOLD;
1039 if (delay > sync_threshold)
1040 sync_threshold = delay;
1041 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1042 if (diff <= -sync_threshold)
1043 delay = 0;
1044 else if (diff >= sync_threshold)
1045 delay = 2 * delay;
1046 }
1047 }
1048
1049 is->frame_timer += delay;
1050 /* compute the REAL delay (we need to do that to avoid
1051 long term errors */
1052 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1053 if (actual_delay < 0.010) {
1054 /* XXX: should skip picture */
1055 actual_delay = 0.010;
1056 }
1057 /* launch timer for next picture */
1058 schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
1059
1060 #if defined(DEBUG_SYNC)
1061 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1062 delay, actual_delay, vp->pts, -diff);
1063 #endif
1064
1065 if(is->subtitle_st) {
1066 if (is->subtitle_stream_changed) {
1067 SDL_LockMutex(is->subpq_mutex);
1068
1069 while (is->subpq_size) {
1070 free_subpicture(&is->subpq[is->subpq_rindex]);
1071
1072 /* update queue size and signal for next picture */
1073 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1074 is->subpq_rindex = 0;
1075
1076 is->subpq_size--;
1077 }
1078 is->subtitle_stream_changed = 0;
1079
1080 SDL_CondSignal(is->subpq_cond);
1081 SDL_UnlockMutex(is->subpq_mutex);
1082 } else {
1083 if (is->subpq_size > 0) {
1084 sp = &is->subpq[is->subpq_rindex];
1085
1086 if (is->subpq_size > 1)
1087 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1088 else
1089 sp2 = NULL;
1090
1091 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1092 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1093 {
1094 free_subpicture(sp);
1095
1096 /* update queue size and signal for next picture */
1097 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1098 is->subpq_rindex = 0;
1099
1100 SDL_LockMutex(is->subpq_mutex);
1101 is->subpq_size--;
1102 SDL_CondSignal(is->subpq_cond);
1103 SDL_UnlockMutex(is->subpq_mutex);
1104 }
1105 }
1106 }
1107 }
1108
1109 /* display picture */
1110 video_display(is);
1111
1112 /* update queue size and signal for next picture */
1113 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1114 is->pictq_rindex = 0;
1115
1116 SDL_LockMutex(is->pictq_mutex);
1117 is->pictq_size--;
1118 SDL_CondSignal(is->pictq_cond);
1119 SDL_UnlockMutex(is->pictq_mutex);
1120 }
1121 } else if (is->audio_st) {
1122 /* draw the next audio frame */
1123
1124 schedule_refresh(is, 40);
1125
1126 /* if only audio stream, then display the audio bars (better
1127 than nothing, just to test the implementation */
1128
1129 /* display picture */
1130 video_display(is);
1131 } else {
1132 schedule_refresh(is, 100);
1133 }
1134 if (show_status) {
1135 static int64_t last_time;
1136 int64_t cur_time;
1137 int aqsize, vqsize, sqsize;
1138 double av_diff;
1139
1140 cur_time = av_gettime();
1141 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1142 aqsize = 0;
1143 vqsize = 0;
1144 sqsize = 0;
1145 if (is->audio_st)
1146 aqsize = is->audioq.size;
1147 if (is->video_st)
1148 vqsize = is->videoq.size;
1149 if (is->subtitle_st)
1150 sqsize = is->subtitleq.size;
1151 av_diff = 0;
1152 if (is->audio_st && is->video_st)
1153 av_diff = get_audio_clock(is) - get_video_clock(is);
1154 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
1155 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1156 fflush(stdout);
1157 last_time = cur_time;
1158 }
1159 }
1160 }
1161
1162 /* allocate a picture (needs to do that in main thread to avoid
1163 potential locking problems */
1164 static void alloc_picture(void *opaque)
1165 {
1166 VideoState *is = opaque;
1167 VideoPicture *vp;
1168
1169 vp = &is->pictq[is->pictq_windex];
1170
1171 if (vp->bmp)
1172 SDL_FreeYUVOverlay(vp->bmp);
1173
1174 #if 0
1175 /* XXX: use generic function */
1176 /* XXX: disable overlay if no hardware acceleration or if RGB format */
1177 switch(is->video_st->codec->pix_fmt) {
1178 case PIX_FMT_YUV420P:
1179 case PIX_FMT_YUV422P:
1180 case PIX_FMT_YUV444P:
1181 case PIX_FMT_YUYV422:
1182 case PIX_FMT_YUV410P:
1183 case PIX_FMT_YUV411P:
1184 is_yuv = 1;
1185 break;
1186 default:
1187 is_yuv = 0;
1188 break;
1189 }
1190 #endif
1191 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1192 is->video_st->codec->height,
1193 SDL_YV12_OVERLAY,
1194 screen);
1195 vp->width = is->video_st->codec->width;
1196 vp->height = is->video_st->codec->height;
1197
1198 SDL_LockMutex(is->pictq_mutex);
1199 vp->allocated = 1;
1200 SDL_CondSignal(is->pictq_cond);
1201 SDL_UnlockMutex(is->pictq_mutex);
1202 }
1203
1204 /**
1205 *
1206 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1207 */
1208 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1209 {
1210 VideoPicture *vp;
1211 int dst_pix_fmt;
1212 AVPicture pict;
1213 static struct SwsContext *img_convert_ctx;
1214
1215 /* wait until we have space to put a new picture */
1216 SDL_LockMutex(is->pictq_mutex);
1217 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1218 !is->videoq.abort_request) {
1219 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1220 }
1221 SDL_UnlockMutex(is->pictq_mutex);
1222
1223 if (is->videoq.abort_request)
1224 return -1;
1225
1226 vp = &is->pictq[is->pictq_windex];
1227
1228 /* alloc or resize hardware picture buffer */
1229 if (!vp->bmp ||
1230 vp->width != is->video_st->codec->width ||
1231 vp->height != is->video_st->codec->height) {
1232 SDL_Event event;
1233
1234 vp->allocated = 0;
1235
1236 /* the allocation must be done in the main thread to avoid
1237 locking problems */
1238 event.type = FF_ALLOC_EVENT;
1239 event.user.data1 = is;
1240 SDL_PushEvent(&event);
1241
1242 /* wait until the picture is allocated */
1243 SDL_LockMutex(is->pictq_mutex);
1244 while (!vp->allocated && !is->videoq.abort_request) {
1245 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1246 }
1247 SDL_UnlockMutex(is->pictq_mutex);
1248
1249 if (is->videoq.abort_request)
1250 return -1;
1251 }
1252
1253 /* if the frame is not skipped, then display it */
1254 if (vp->bmp) {
1255 /* get a pointer on the bitmap */
1256 SDL_LockYUVOverlay (vp->bmp);
1257
1258 dst_pix_fmt = PIX_FMT_YUV420P;
1259 pict.data[0] = vp->bmp->pixels[0];
1260 pict.data[1] = vp->bmp->pixels[2];
1261 pict.data[2] = vp->bmp->pixels[1];
1262
1263 pict.linesize[0] = vp->bmp->pitches[0];
1264 pict.linesize[1] = vp->bmp->pitches[2];
1265 pict.linesize[2] = vp->bmp->pitches[1];
1266 img_convert_ctx = sws_getCachedContext(img_convert_ctx,
1267 is->video_st->codec->width, is->video_st->codec->height,
1268 is->video_st->codec->pix_fmt,
1269 is->video_st->codec->width, is->video_st->codec->height,
1270 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1271 if (img_convert_ctx == NULL) {
1272 fprintf(stderr, "Cannot initialize the conversion context\n");
1273 exit(1);
1274 }
1275 sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1276 0, is->video_st->codec->height, pict.data, pict.linesize);
1277 /* update the bitmap content */
1278 SDL_UnlockYUVOverlay(vp->bmp);
1279
1280 vp->pts = pts;
1281
1282 /* now we can update the picture count */
1283 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1284 is->pictq_windex = 0;
1285 SDL_LockMutex(is->pictq_mutex);
1286 is->pictq_size++;
1287 SDL_UnlockMutex(is->pictq_mutex);
1288 }
1289 return 0;
1290 }
1291
1292 /**
1293 * compute the exact PTS for the picture if it is omitted in the stream
1294 * @param pts1 the dts of the pkt / pts of the frame
1295 */
1296 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1297 {
1298 double frame_delay, pts;
1299
1300 pts = pts1;
1301
1302 if (pts != 0) {
1303 /* update video clock with pts, if present */
1304 is->video_clock = pts;
1305 } else {
1306 pts = is->video_clock;
1307 }
1308 /* update video clock for next frame */
1309 frame_delay = av_q2d(is->video_st->codec->time_base);
1310 /* for MPEG2, the frame can be repeated, so we update the
1311 clock accordingly */
1312 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1313 is->video_clock += frame_delay;
1314
1315 #if defined(DEBUG_SYNC) && 0
1316 {
1317 int ftype;
1318 if (src_frame->pict_type == FF_B_TYPE)
1319 ftype = 'B';
1320 else if (src_frame->pict_type == FF_I_TYPE)
1321 ftype = 'I';
1322 else
1323 ftype = 'P';
1324 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1325 ftype, pts, pts1);
1326 }
1327 #endif
1328 return queue_picture(is, src_frame, pts);
1329 }
1330
1331 static uint64_t global_video_pkt_pts= AV_NOPTS_VALUE;
1332
1333 static int my_get_buffer(struct AVCodecContext *c, AVFrame *pic){
1334 int ret= avcodec_default_get_buffer(c, pic);
1335 uint64_t *pts= av_malloc(sizeof(uint64_t));
1336 *pts= global_video_pkt_pts;
1337 pic->opaque= pts;
1338 return ret;
1339 }
1340
1341 static void my_release_buffer(struct AVCodecContext *c, AVFrame *pic){
1342 if(pic) av_freep(&pic->opaque);
1343 avcodec_default_release_buffer(c, pic);
1344 }
1345
1346 static int video_thread(void *arg)
1347 {
1348 VideoState *is = arg;
1349 AVPacket pkt1, *pkt = &pkt1;
1350 int len1, got_picture;
1351 AVFrame *frame= avcodec_alloc_frame();
1352 double pts;
1353
1354 for(;;) {
1355 while (is->paused && !is->videoq.abort_request) {
1356 SDL_Delay(10);
1357 }
1358 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1359 break;
1360
1361 if(pkt->data == flush_pkt.data){
1362 avcodec_flush_buffers(is->video_st->codec);
1363 continue;
1364 }
1365
1366 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1367 this packet, if any */
1368 global_video_pkt_pts= pkt->pts;
1369 len1 = avcodec_decode_video(is->video_st->codec,
1370 frame, &got_picture,
1371 pkt->data, pkt->size);
1372
1373 if( (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1374 && frame->opaque && *(uint64_t*)frame->opaque != AV_NOPTS_VALUE)
1375 pts= *(uint64_t*)frame->opaque;
1376 else if(pkt->dts != AV_NOPTS_VALUE)
1377 pts= pkt->dts;
1378 else
1379 pts= 0;
1380 pts *= av_q2d(is->video_st->time_base);
1381
1382 // if (len1 < 0)
1383 // break;
1384 if (got_picture) {
1385 if (output_picture2(is, frame, pts) < 0)
1386 goto the_end;
1387 }
1388 av_free_packet(pkt);
1389 if (step)
1390 if (cur_stream)
1391 stream_pause(cur_stream);
1392 }
1393 the_end:
1394 av_free(frame);
1395 return 0;
1396 }
1397
1398 static int subtitle_thread(void *arg)
1399 {
1400 VideoState *is = arg;
1401 SubPicture *sp;
1402 AVPacket pkt1, *pkt = &pkt1;
1403 int len1, got_subtitle;
1404 double pts;
1405 int i, j;
1406 int r, g, b, y, u, v, a;
1407
1408 for(;;) {
1409 while (is->paused && !is->subtitleq.abort_request) {
1410 SDL_Delay(10);
1411 }
1412 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1413 break;
1414
1415 if(pkt->data == flush_pkt.data){
1416 avcodec_flush_buffers(is->subtitle_st->codec);
1417 continue;
1418 }
1419 SDL_LockMutex(is->subpq_mutex);
1420 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1421 !is->subtitleq.abort_request) {
1422 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1423 }
1424 SDL_UnlockMutex(is->subpq_mutex);
1425
1426 if (is->subtitleq.abort_request)
1427 goto the_end;
1428
1429 sp = &is->subpq[is->subpq_windex];
1430
1431 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1432 this packet, if any */
1433 pts = 0;
1434 if (pkt->pts != AV_NOPTS_VALUE)
1435 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1436
1437 len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1438 &sp->sub, &got_subtitle,
1439 pkt->data, pkt->size);
1440 // if (len1 < 0)
1441 // break;
1442 if (got_subtitle && sp->sub.format == 0) {
1443 sp->pts = pts;
1444
1445 for (i = 0; i < sp->sub.num_rects; i++)
1446 {
1447 for (j = 0; j < sp->sub.rects[i].nb_colors; j++)
1448 {
1449 RGBA_IN(r, g, b, a, sp->sub.rects[i].rgba_palette + j);
1450 y = RGB_TO_Y_CCIR(r, g, b);
1451 u = RGB_TO_U_CCIR(r, g, b, 0);
1452 v = RGB_TO_V_CCIR(r, g, b, 0);
1453 YUVA_OUT(sp->sub.rects[i].rgba_palette + j, y, u, v, a);
1454 }
1455 }
1456
1457 /* now we can update the picture count */
1458 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1459 is->subpq_windex = 0;
1460 SDL_LockMutex(is->subpq_mutex);
1461 is->subpq_size++;
1462 SDL_UnlockMutex(is->subpq_mutex);
1463 }
1464 av_free_packet(pkt);
1465 // if (step)
1466 // if (cur_stream)
1467 // stream_pause(cur_stream);
1468 }
1469 the_end:
1470 return 0;
1471 }
1472
1473 /* copy samples for viewing in editor window */
1474 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1475 {
1476 int size, len, channels;
1477
1478 channels = is->audio_st->codec->channels;
1479
1480 size = samples_size / sizeof(short);
1481 while (size > 0) {
1482 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1483 if (len > size)
1484 len = size;
1485 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1486 samples += len;
1487 is->sample_array_index += len;
1488 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1489 is->sample_array_index = 0;
1490 size -= len;
1491 }
1492 }
1493
1494 /* return the new audio buffer size (samples can be added or deleted
1495 to get better sync if video or external master clock) */
1496 static int synchronize_audio(VideoState *is, short *samples,
1497 int samples_size1, double pts)
1498 {
1499 int n, samples_size;
1500 double ref_clock;
1501
1502 n = 2 * is->audio_st->codec->channels;
1503 samples_size = samples_size1;
1504
1505 /* if not master, then we try to remove or add samples to correct the clock */
1506 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1507 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1508 double diff, avg_diff;
1509 int wanted_size, min_size, max_size, nb_samples;
1510
1511 ref_clock = get_master_clock(is);
1512 diff = get_audio_clock(is) - ref_clock;
1513
1514 if (diff < AV_NOSYNC_THRESHOLD) {
1515 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1516 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1517 /* not enough measures to have a correct estimate */
1518 is->audio_diff_avg_count++;
1519 } else {
1520 /* estimate the A-V difference */
1521 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1522
1523 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1524 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1525 nb_samples = samples_size / n;
1526
1527 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1528 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1529 if (wanted_size < min_size)
1530 wanted_size = min_size;
1531 else if (wanted_size > max_size)
1532 wanted_size = max_size;
1533
1534 /* add or remove samples to correction the synchro */
1535 if (wanted_size < samples_size) {
1536 /* remove samples */
1537 samples_size = wanted_size;
1538 } else if (wanted_size > samples_size) {
1539 uint8_t *samples_end, *q;
1540 int nb;
1541
1542 /* add samples */
1543 nb = (samples_size - wanted_size);
1544 samples_end = (uint8_t *)samples + samples_size - n;
1545 q = samples_end + n;
1546 while (nb > 0) {
1547 memcpy(q, samples_end, n);
1548 q += n;
1549 nb -= n;
1550 }
1551 samples_size = wanted_size;
1552 }
1553 }
1554 #if 0
1555 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1556 diff, avg_diff, samples_size - samples_size1,
1557 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1558 #endif
1559 }
1560 } else {
1561 /* too big difference : may be initial PTS errors, so
1562 reset A-V filter */
1563 is->audio_diff_avg_count = 0;
1564 is->audio_diff_cum = 0;
1565 }
1566 }
1567
1568 return samples_size;
1569 }
1570
1571 /* decode one audio frame and returns its uncompressed size */
1572 static int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int buf_size, double *pts_ptr)
1573 {
1574 AVPacket *pkt = &is->audio_pkt;
1575 int n, len1, data_size;
1576 double pts;
1577
1578 for(;;) {
1579 /* NOTE: the audio packet can contain several frames */
1580 while (is->audio_pkt_size > 0) {
1581 data_size = buf_size;
1582 len1 = avcodec_decode_audio2(is->audio_st->codec,
1583 (int16_t *)audio_buf, &data_size,
1584 is->audio_pkt_data, is->audio_pkt_size);
1585 if (len1 < 0) {
1586 /* if error, we skip the frame */
1587 is->audio_pkt_size = 0;
1588 break;
1589 }
1590
1591 is->audio_pkt_data += len1;
1592 is->audio_pkt_size -= len1;
1593 if (data_size <= 0)
1594 continue;
1595 /* if no pts, then compute it */
1596 pts = is->audio_clock;
1597 *pts_ptr = pts;
1598 n = 2 * is->audio_st->codec->channels;
1599 is->audio_clock += (double)data_size /
1600 (double)(n * is->audio_st->codec->sample_rate);
1601 #if defined(DEBUG_SYNC)
1602 {
1603 static double last_clock;
1604 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1605 is->audio_clock - last_clock,
1606 is->audio_clock, pts);
1607 last_clock = is->audio_clock;
1608 }
1609 #endif
1610 return data_size;
1611 }
1612
1613 /* free the current packet */
1614 if (pkt->data)
1615 av_free_packet(pkt);
1616
1617 if (is->paused || is->audioq.abort_request) {
1618 return -1;
1619 }
1620
1621 /* read next packet */
1622 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1623 return -1;
1624 if(pkt->data == flush_pkt.data){
1625 avcodec_flush_buffers(is->audio_st->codec);
1626 continue;
1627 }
1628
1629 is->audio_pkt_data = pkt->data;
1630 is->audio_pkt_size = pkt->size;
1631
1632 /* if update the audio clock with the pts */
1633 if (pkt->pts != AV_NOPTS_VALUE) {
1634 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1635 }
1636 }
1637 }
1638
1639 /* get the current audio output buffer size, in samples. With SDL, we
1640 cannot have a precise information */
1641 static int audio_write_get_buf_size(VideoState *is)
1642 {
1643 return is->audio_buf_size - is->audio_buf_index;
1644 }
1645
1646
1647 /* prepare a new audio buffer */
1648 void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1649 {
1650 VideoState *is = opaque;
1651 int audio_size, len1;
1652 double pts;
1653
1654 audio_callback_time = av_gettime();
1655
1656 while (len > 0) {
1657 if (is->audio_buf_index >= is->audio_buf_size) {
1658 audio_size = audio_decode_frame(is, is->audio_buf, sizeof(is->audio_buf), &pts);
1659 if (audio_size < 0) {
1660 /* if error, just output silence */
1661 is->audio_buf_size = 1024;
1662 memset(is->audio_buf, 0, is->audio_buf_size);
1663 } else {
1664 if (is->show_audio)
1665 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1666 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1667 pts);
1668 is->audio_buf_size = audio_size;
1669 }
1670 is->audio_buf_index = 0;
1671 }
1672 len1 = is->audio_buf_size - is->audio_buf_index;
1673 if (len1 > len)
1674 len1 = len;
1675 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1676 len -= len1;
1677 stream += len1;
1678 is->audio_buf_index += len1;
1679 }
1680 }
1681
1682 /* open a given stream. Return 0 if OK */
1683 static int stream_component_open(VideoState *is, int stream_index)
1684 {
1685 AVFormatContext *ic = is->ic;
1686 AVCodecContext *enc;
1687 AVCodec *codec;
1688 SDL_AudioSpec wanted_spec, spec;
1689
1690 if (stream_index < 0 || stream_index >= ic->nb_streams)
1691 return -1;
1692 enc = ic->streams[stream_index]->codec;
1693
1694 /* prepare audio output */
1695 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1696 wanted_spec.freq = enc->sample_rate;
1697 wanted_spec.format = AUDIO_S16SYS;
1698 /* hack for AC3. XXX: suppress that */
1699 if (enc->channels > 2)
1700 enc->channels = 2;
1701 wanted_spec.channels = enc->channels;
1702 wanted_spec.silence = 0;
1703 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1704 wanted_spec.callback = sdl_audio_callback;
1705 wanted_spec.userdata = is;
1706 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1707 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1708 return -1;
1709 }
1710 is->audio_hw_buf_size = spec.size;
1711 }
1712
1713 codec = avcodec_find_decoder(enc->codec_id);
1714 enc->debug_mv = debug_mv;
1715 enc->debug = debug;
1716 enc->workaround_bugs = workaround_bugs;
1717 enc->lowres = lowres;
1718 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1719 enc->idct_algo= idct;
1720 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1721 enc->skip_frame= skip_frame;
1722 enc->skip_idct= skip_idct;
1723 enc->skip_loop_filter= skip_loop_filter;
1724 enc->error_resilience= error_resilience;
1725 enc->error_concealment= error_concealment;
1726 if (!codec ||
1727 avcodec_open(enc, codec) < 0)
1728 return -1;
1729 if(thread_count>1)
1730 avcodec_thread_init(enc, thread_count);
1731 enc->thread_count= thread_count;
1732 switch(enc->codec_type) {
1733 case CODEC_TYPE_AUDIO:
1734 is->audio_stream = stream_index;
1735 is->audio_st = ic->streams[stream_index];
1736 is->audio_buf_size = 0;
1737 is->audio_buf_index = 0;
1738
1739 /* init averaging filter */
1740 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1741 is->audio_diff_avg_count = 0;
1742 /* since we do not have a precise anough audio fifo fullness,
1743 we correct audio sync only if larger than this threshold */
1744 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1745
1746 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1747 packet_queue_init(&is->audioq);
1748 SDL_PauseAudio(0);
1749 break;
1750 case CODEC_TYPE_VIDEO:
1751 is->video_stream = stream_index;
1752 is->video_st = ic->streams[stream_index];
1753
1754 is->frame_last_delay = 40e-3;
1755 is->frame_timer = (double)av_gettime() / 1000000.0;
1756 is->video_current_pts_time = av_gettime();
1757
1758 packet_queue_init(&is->videoq);
1759 is->video_tid = SDL_CreateThread(video_thread, is);
1760
1761 enc-> get_buffer= my_get_buffer;
1762 enc->release_buffer= my_release_buffer;
1763 break;
1764 case CODEC_TYPE_SUBTITLE:
1765 is->subtitle_stream = stream_index;
1766 is->subtitle_st = ic->streams[stream_index];
1767 packet_queue_init(&is->subtitleq);
1768
1769 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1770 break;
1771 default:
1772 break;
1773 }
1774 return 0;
1775 }
1776
1777 static void stream_component_close(VideoState *is, int stream_index)
1778 {
1779 AVFormatContext *ic = is->ic;
1780 AVCodecContext *enc;
1781
1782 if (stream_index < 0 || stream_index >= ic->nb_streams)
1783 return;
1784 enc = ic->streams[stream_index]->codec;
1785
1786 switch(enc->codec_type) {
1787 case CODEC_TYPE_AUDIO:
1788 packet_queue_abort(&is->audioq);
1789
1790 SDL_CloseAudio();
1791
1792 packet_queue_end(&is->audioq);
1793 break;
1794 case CODEC_TYPE_VIDEO:
1795 packet_queue_abort(&is->videoq);
1796
1797 /* note: we also signal this mutex to make sure we deblock the
1798 video thread in all cases */
1799 SDL_LockMutex(is->pictq_mutex);
1800 SDL_CondSignal(is->pictq_cond);
1801 SDL_UnlockMutex(is->pictq_mutex);
1802
1803 SDL_WaitThread(is->video_tid, NULL);
1804
1805 packet_queue_end(&is->videoq);
1806 break;
1807 case CODEC_TYPE_SUBTITLE:
1808 packet_queue_abort(&is->subtitleq);
1809
1810 /* note: we also signal this mutex to make sure we deblock the
1811 video thread in all cases */
1812 SDL_LockMutex(is->subpq_mutex);
1813 is->subtitle_stream_changed = 1;
1814
1815 SDL_CondSignal(is->subpq_cond);
1816 SDL_UnlockMutex(is->subpq_mutex);
1817
1818 SDL_WaitThread(is->subtitle_tid, NULL);
1819
1820 packet_queue_end(&is->subtitleq);
1821 break;
1822 default:
1823 break;
1824 }
1825
1826 avcodec_close(enc);
1827 switch(enc->codec_type) {
1828 case CODEC_TYPE_AUDIO:
1829 is->audio_st = NULL;
1830 is->audio_stream = -1;
1831 break;
1832 case CODEC_TYPE_VIDEO:
1833 is->video_st = NULL;
1834 is->video_stream = -1;
1835 break;
1836 case CODEC_TYPE_SUBTITLE:
1837 is->subtitle_st = NULL;
1838 is->subtitle_stream = -1;
1839 break;
1840 default:
1841 break;
1842 }
1843 }
1844
1845 static void dump_stream_info(const AVFormatContext *s)
1846 {
1847 if (s->track != 0)
1848 fprintf(stderr, "Track: %d\n", s->track);
1849 if (s->title[0] != '\0')
1850 fprintf(stderr, "Title: %s\n", s->title);
1851 if (s->author[0] != '\0')
1852 fprintf(stderr, "Author: %s\n", s->author);
1853 if (s->copyright[0] != '\0')
1854 fprintf(stderr, "Copyright: %s\n", s->copyright);
1855 if (s->comment[0] != '\0')
1856 fprintf(stderr, "Comment: %s\n", s->comment);
1857 if (s->album[0] != '\0')
1858 fprintf(stderr, "Album: %s\n", s->album);
1859 if (s->year != 0)
1860 fprintf(stderr, "Year: %d\n", s->year);
1861 if (s->genre[0] != '\0')
1862 fprintf(stderr, "Genre: %s\n", s->genre);
1863 }
1864
1865 /* since we have only one decoding thread, we can use a global
1866 variable instead of a thread local variable */
1867 static VideoState *global_video_state;
1868
1869 static int decode_interrupt_cb(void)
1870 {
1871 return (global_video_state && global_video_state->abort_request);
1872 }
1873
1874 /* this thread gets the stream from the disk or the network */
1875 static int decode_thread(void *arg)
1876 {
1877 VideoState *is = arg;
1878 AVFormatContext *ic;
1879 int err, i, ret, video_index, audio_index;
1880 AVPacket pkt1, *pkt = &pkt1;
1881 AVFormatParameters params, *ap = &params;
1882
1883 video_index = -1;
1884 audio_index = -1;
1885 is->video_stream = -1;
1886 is->audio_stream = -1;
1887 is->subtitle_stream = -1;
1888
1889 global_video_state = is;
1890 url_set_interrupt_cb(decode_interrupt_cb);
1891
1892 memset(ap, 0, sizeof(*ap));
1893
1894 ap->width = frame_width;
1895 ap->height= frame_height;
1896 ap->time_base= (AVRational){1, 25};
1897 ap->pix_fmt = frame_pix_fmt;
1898
1899 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1900 if (err < 0) {
1901 print_error(is->filename, err);
1902 ret = -1;
1903 goto fail;
1904 }
1905 is->ic = ic;
1906
1907 if(genpts)
1908 ic->flags |= AVFMT_FLAG_GENPTS;
1909
1910 err = av_find_stream_info(ic);
1911 if (err < 0) {
1912 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1913 ret = -1;
1914 goto fail;
1915 }
1916 if(ic->pb)
1917 ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1918
1919 /* if seeking requested, we execute it */
1920 if (start_time != AV_NOPTS_VALUE) {
1921 int64_t timestamp;
1922
1923 timestamp = start_time;
1924 /* add the stream start time */
1925 if (ic->start_time != AV_NOPTS_VALUE)
1926 timestamp += ic->start_time;
1927 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1928 if (ret < 0) {
1929 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1930 is->filename, (double)timestamp / AV_TIME_BASE);
1931 }
1932 }
1933
1934 for(i = 0; i < ic->nb_streams; i++) {
1935 AVCodecContext *enc = ic->streams[i]->codec;
1936 switch(enc->codec_type) {
1937 case CODEC_TYPE_AUDIO:
1938 if ((audio_index < 0 || wanted_audio_stream-- > 0) && !audio_disable)
1939 audio_index = i;
1940 break;
1941 case CODEC_TYPE_VIDEO:
1942 if ((video_index < 0 || wanted_video_stream-- > 0) && !video_disable)
1943 video_index = i;
1944 break;
1945 default:
1946 break;
1947 }
1948 }
1949 if (show_status) {
1950 dump_format(ic, 0, is->filename, 0);
1951 dump_stream_info(ic);
1952 }
1953
1954 /* open the streams */
1955 if (audio_index >= 0) {
1956 stream_component_open(is, audio_index);
1957 }
1958
1959 if (video_index >= 0) {
1960 stream_component_open(is, video_index);
1961 } else {
1962 if (!display_disable)
1963 is->show_audio = 1;
1964 }
1965
1966 if (is->video_stream < 0 && is->audio_stream < 0) {
1967 fprintf(stderr, "%s: could not open codecs\n", is->filename);
1968 ret = -1;
1969 goto fail;
1970 }
1971
1972 for(;;) {
1973 if (is->abort_request)
1974 break;
1975 if (is->paused != is->last_paused) {
1976 is->last_paused = is->paused;
1977 if (is->paused)
1978 av_read_pause(ic);
1979 else
1980 av_read_play(ic);
1981 }
1982 #if defined(CONFIG_RTSP_DEMUXER) || defined(CONFIG_MMSH_PROTOCOL)
1983 if (is->paused &&
1984 (!strcmp(ic->iformat->name, "rtsp") ||
1985 (ic->pb && !strcmp(url_fileno(ic->pb)->prot->name, "mmsh")))) {
1986 /* wait 10 ms to avoid trying to get another packet */
1987 /* XXX: horrible */
1988 SDL_Delay(10);
1989 continue;
1990 }
1991 #endif
1992 if (is->seek_req) {
1993 int stream_index= -1;
1994 int64_t seek_target= is->seek_pos;
1995
1996 if (is-> video_stream >= 0) stream_index= is-> video_stream;
1997 else if(is-> audio_stream >= 0) stream_index= is-> audio_stream;
1998 else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
1999
2000 if(stream_index>=0){
2001 seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
2002 }
2003
2004 ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
2005 if (ret < 0) {
2006 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2007 }else{
2008 if (is->audio_stream >= 0) {
2009 packet_queue_flush(&is->audioq);
2010 packet_queue_put(&is->audioq, &flush_pkt);
2011 }
2012 if (is->subtitle_stream >= 0) {
2013 packet_queue_flush(&is->subtitleq);
2014 packet_queue_put(&is->subtitleq, &flush_pkt);
2015 }
2016 if (is->video_stream >= 0) {
2017 packet_queue_flush(&is->videoq);
2018 packet_queue_put(&is->videoq, &flush_pkt);
2019 }
2020 }
2021 is->seek_req = 0;
2022 }
2023
2024 /* if the queue are full, no need to read more */
2025 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2026 is->videoq.size > MAX_VIDEOQ_SIZE ||
2027 is->subtitleq.size > MAX_SUBTITLEQ_SIZE ||
2028 url_feof(ic->pb)) {
2029 /* wait 10 ms */
2030 SDL_Delay(10);
2031 continue;
2032 }
2033 ret = av_read_frame(ic, pkt);
2034 if (ret < 0) {
2035 if (url_ferror(ic->pb) == 0) {
2036 SDL_Delay(100); /* wait for user event */
2037 continue;
2038 } else
2039 break;
2040 }
2041 if (pkt->stream_index == is->audio_stream) {
2042 packet_queue_put(&is->audioq, pkt);
2043 } else if (pkt->stream_index == is->video_stream) {
2044 packet_queue_put(&is->videoq, pkt);
2045 } else if (pkt->stream_index == is->subtitle_stream) {
2046 packet_queue_put(&is->subtitleq, pkt);
2047 } else {
2048 av_free_packet(pkt);
2049 }
2050 }
2051 /* wait until the end */
2052 while (!is->abort_request) {
2053 SDL_Delay(100);
2054 }
2055
2056 ret = 0;
2057 fail:
2058 /* disable interrupting */
2059 global_video_state = NULL;
2060
2061 /* close each stream */
2062 if (is->audio_stream >= 0)
2063 stream_component_close(is, is->audio_stream);
2064 if (is->video_stream >= 0)
2065 stream_component_close(is, is->video_stream);
2066 if (is->subtitle_stream >= 0)
2067 stream_component_close(is, is->subtitle_stream);
2068 if (is->ic) {
2069 av_close_input_file(is->ic);
2070 is->ic = NULL; /* safety */
2071 }
2072 url_set_interrupt_cb(NULL);
2073
2074 if (ret != 0) {
2075 SDL_Event event;
2076
2077 event.type = FF_QUIT_EVENT;
2078 event.user.data1 = is;
2079 SDL_PushEvent(&event);
2080 }
2081 return 0;
2082 }
2083
2084 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2085 {
2086 VideoState *is;
2087
2088 is = av_mallocz(sizeof(VideoState));
2089 if (!is)
2090 return NULL;
2091 av_strlcpy(is->filename, filename, sizeof(is->filename));
2092 is->iformat = iformat;
2093 is->ytop = 0;
2094 is->xleft = 0;
2095
2096 /* start video display */
2097 is->pictq_mutex = SDL_CreateMutex();
2098 is->pictq_cond = SDL_CreateCond();
2099
2100 is->subpq_mutex = SDL_CreateMutex();
2101 is->subpq_cond = SDL_CreateCond();
2102
2103 /* add the refresh timer to draw the picture */
2104 schedule_refresh(is, 40);
2105
2106 is->av_sync_type = av_sync_type;
2107 is->parse_tid = SDL_CreateThread(decode_thread, is);
2108 if (!is->parse_tid) {
2109 av_free(is);
2110 return NULL;
2111 }
2112 return is;
2113 }
2114
2115 static void stream_close(VideoState *is)
2116 {
2117 VideoPicture *vp;
2118 int i;
2119 /* XXX: use a special url_shutdown call to abort parse cleanly */
2120 is->abort_request = 1;
2121 SDL_WaitThread(is->parse_tid, NULL);
2122
2123 /* free all pictures */
2124 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2125 vp = &is->pictq[i];
2126 if (vp->bmp) {
2127 SDL_FreeYUVOverlay(vp->bmp);
2128 vp->bmp = NULL;
2129 }
2130 }
2131 SDL_DestroyMutex(is->pictq_mutex);
2132 SDL_DestroyCond(is->pictq_cond);
2133 SDL_DestroyMutex(is->subpq_mutex);
2134 SDL_DestroyCond(is->subpq_cond);
2135 }
2136
2137 static void stream_cycle_channel(VideoState *is, int codec_type)
2138 {
2139 AVFormatContext *ic = is->ic;
2140 int start_index, stream_index;
2141 AVStream *st;
2142
2143 if (codec_type == CODEC_TYPE_VIDEO)
2144 start_index = is->video_stream;
2145 else if (codec_type == CODEC_TYPE_AUDIO)
2146 start_index = is->audio_stream;
2147 else
2148 start_index = is->subtitle_stream;
2149 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2150 return;
2151 stream_index = start_index;
2152 for(;;) {
2153 if (++stream_index >= is->ic->nb_streams)
2154 {
2155 if (codec_type == CODEC_TYPE_SUBTITLE)
2156 {
2157 stream_index = -1;
2158 goto the_end;
2159 } else
2160 stream_index = 0;
2161 }
2162 if (stream_index == start_index)
2163 return;
2164 st = ic->streams[stream_index];
2165 if (st->codec->codec_type == codec_type) {
2166 /* check that parameters are OK */
2167 switch(codec_type) {
2168 case CODEC_TYPE_AUDIO:
2169 if (st->codec->sample_rate != 0 &&
2170 st->codec->channels != 0)
2171 goto the_end;
2172 break;
2173 case CODEC_TYPE_VIDEO:
2174 case CODEC_TYPE_SUBTITLE:
2175 goto the_end;
2176 default:
2177 break;
2178 }
2179 }
2180 }
2181 the_end:
2182 stream_component_close(is, start_index);
2183 stream_component_open(is, stream_index);
2184 }
2185
2186
2187 static void toggle_full_screen(void)
2188 {
2189 is_full_screen = !is_full_screen;
2190 if (!fs_screen_width) {
2191 /* use default SDL method */
2192 // SDL_WM_ToggleFullScreen(screen);
2193 }
2194 video_open(cur_stream);
2195 }
2196
2197 static void toggle_pause(void)
2198 {
2199 if (cur_stream)
2200 stream_pause(cur_stream);
2201 step = 0;
2202 }
2203
2204 static void step_to_next_frame(void)
2205 {
2206 if (cur_stream) {
2207 /* if the stream is paused unpause it, then step */
2208 if (cur_stream->paused)
2209 stream_pause(cur_stream);
2210 }
2211 step = 1;
2212 }
2213
2214 static void do_exit(void)
2215 {
2216 if (cur_stream) {
2217 stream_close(cur_stream);
2218 cur_stream = NULL;
2219 }
2220 if (show_status)
2221 printf("\n");
2222 SDL_Quit();
2223 exit(0);
2224 }
2225
2226 static void toggle_audio_display(void)
2227 {
2228 if (cur_stream) {
2229 cur_stream->show_audio = !cur_stream->show_audio;
2230 }
2231 }
2232
2233 /* handle an event sent by the GUI */
2234 static void event_loop(void)
2235 {
2236 SDL_Event event;
2237 double incr, pos, frac;
2238
2239 for(;;) {
2240 SDL_WaitEvent(&event);
2241 switch(event.type) {
2242 case SDL_KEYDOWN:
2243 switch(event.key.keysym.sym) {
2244 case SDLK_ESCAPE:
2245 case SDLK_q:
2246 do_exit();
2247 break;
2248 case SDLK_f:
2249 toggle_full_screen();
2250 break;
2251 case SDLK_p:
2252 case SDLK_SPACE:
2253 toggle_pause();
2254 break;
2255 case SDLK_s: //S: Step to next frame
2256 step_to_next_frame();
2257 break;
2258 case SDLK_a:
2259 if (cur_stream)
2260 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2261 break;
2262 case SDLK_v:
2263 if (cur_stream)
2264 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2265 break;
2266 case SDLK_t:
2267 if (cur_stream)
2268 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2269 break;
2270 case SDLK_w:
2271 toggle_audio_display();
2272 break;
2273 case SDLK_LEFT:
2274 incr = -10.0;
2275 goto do_seek;
2276 case SDLK_RIGHT:
2277 incr = 10.0;
2278 goto do_seek;
2279 case SDLK_UP:
2280 incr = 60.0;
2281 goto do_seek;
2282 case SDLK_DOWN:
2283 incr = -60.0;
2284 do_seek:
2285 if (cur_stream) {
2286 if (seek_by_bytes) {
2287 pos = url_ftell(cur_stream->ic->pb);
2288 if (cur_stream->ic->bit_rate)
2289 incr *= cur_stream->ic->bit_rate / 60.0;
2290 else
2291 incr *= 180000.0;
2292 pos += incr;
2293 stream_seek(cur_stream, pos, incr);
2294 } else {
2295 pos = get_master_clock(cur_stream);
2296 pos += incr;
2297 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2298 }
2299 }
2300 break;
2301 default:
2302 break;
2303 }
2304 break;
2305 case SDL_MOUSEBUTTONDOWN:
2306 if (cur_stream) {
2307 int ns, hh, mm, ss;
2308 int tns, thh, tmm, tss;
2309 tns = cur_stream->ic->duration/1000000LL;
2310 thh = tns/3600;
2311 tmm = (tns%3600)/60;
2312 tss = (tns%60);
2313 frac = (double)event.button.x/(double)cur_stream->width;
2314 ns = frac*tns;
2315 hh = ns/3600;
2316 mm = (ns%3600)/60;
2317 ss = (ns%60);
2318 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2319 hh, mm, ss, thh, tmm, tss);
2320 stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2321 }
2322 break;
2323 case SDL_VIDEORESIZE:
2324 if (cur_stream) {
2325 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2326 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2327 screen_width = cur_stream->width = event.resize.w;
2328 screen_height= cur_stream->height= event.resize.h;
2329 }
2330 break;
2331 case SDL_QUIT:
2332 case FF_QUIT_EVENT:
2333 do_exit();
2334 break;
2335 case FF_ALLOC_EVENT:
2336 video_open(event.user.data1);
2337 alloc_picture(event.user.data1);
2338 break;
2339 case FF_REFRESH_EVENT:
2340 video_refresh_timer(event.user.data1);
2341 break;
2342 default:
2343 break;
2344 }
2345 }
2346 }
2347
2348 static void opt_frame_size(const char *arg)
2349 {
2350 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2351 fprintf(stderr, "Incorrect frame size\n");
2352 exit(1);
2353 }
2354 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2355 fprintf(stderr, "Frame size must be a multiple of 2\n");
2356 exit(1);
2357 }
2358 }
2359
2360 static void opt_width(const char *arg)
2361 {
2362 screen_width = atoi(arg);
2363 if(screen_width<=0){
2364 fprintf(stderr, "invalid width\n");
2365 exit(1);
2366 }
2367 }
2368
2369 static void opt_height(const char *arg)
2370 {
2371 screen_height = atoi(arg);
2372 if(screen_height<=0){
2373 fprintf(stderr, "invalid height\n");
2374 exit(1);
2375 }
2376 }
2377
2378 static void opt_format(const char *arg)
2379 {
2380 file_iformat = av_find_input_format(arg);
2381 if (!file_iformat) {
2382 fprintf(stderr, "Unknown input format: %s\n", arg);
2383 exit(1);
2384 }
2385 }
2386
2387 static void opt_frame_pix_fmt(const char *arg)
2388 {
2389 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2390 }
2391
2392 #ifdef CONFIG_RTSP_DEMUXER
2393 static void opt_rtp_tcp(void)
2394 {
2395 /* only tcp protocol */
2396 rtsp_default_protocols = (1 << RTSP_PROTOCOL_RTP_TCP);
2397 }
2398 #endif
2399
2400 static void opt_sync(const char *arg)
2401 {
2402 if (!strcmp(arg, "audio"))
2403 av_sync_type = AV_SYNC_AUDIO_MASTER;
2404 else if (!strcmp(arg, "video"))
2405 av_sync_type = AV_SYNC_VIDEO_MASTER;
2406 else if (!strcmp(arg, "ext"))
2407 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2408 else {
2409 show_help();
2410 exit(1);
2411 }
2412 }
2413
2414 static void opt_seek(const char *arg)
2415 {
2416 start_time = parse_date(arg, 1);
2417 if (start_time == INT64_MIN) {
2418 fprintf(stderr, "Invalid duration specification: %s\n", arg);
2419 exit(1);
2420 }
2421 }
2422
2423 static void opt_debug(const char *arg)
2424 {
2425 av_log_level = 99;
2426 debug = atoi(arg);
2427 }
2428
2429 static void opt_vismv(const char *arg)
2430 {
2431 debug_mv = atoi(arg);
2432 }
2433
2434 static void opt_thread_count(const char *arg)
2435 {
2436 thread_count= atoi(arg);
2437 #if !defined(HAVE_THREADS)
2438 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2439 #endif
2440 }
2441
2442 static void opt_show_help(void)
2443 {
2444 show_help();
2445 exit(0);
2446 }
2447
2448 const OptionDef options[] = {
2449 { "h", 0, {(void*)opt_show_help}, "show help" },
2450 { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2451 { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2452 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2453 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2454 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2455 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2456 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "", "" },
2457 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "", "" },
2458 { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2459 { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2460 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2461 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2462 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2463 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2464 { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2465 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2466 { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2467 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2468 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2469 { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2470 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2471 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2472 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2473 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2474 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2475 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_resilience}, "set error detection threshold (0-4)", "threshold" },
2476 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2477 #ifdef CONFIG_RTSP_DEMUXER
2478 { "rtp_tcp", OPT_EXPERT, {(void*)&opt_rtp_tcp}, "force RTP/TCP protocol usage", "" },
2479 #endif
2480 { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2481 { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2482 { NULL, },
2483 };
2484
2485 void show_help(void)
2486 {
2487 printf("usage: ffplay [options] input_file\n"
2488 "Simple media player\n");
2489 printf("\n");
2490 show_help_options(options, "Main options:\n",
2491 OPT_EXPERT, 0);
2492 show_help_options(options, "\nAdvanced options:\n",
2493 OPT_EXPERT, OPT_EXPERT);
2494 printf("\nWhile playing:\n"
2495 "q, ESC quit\n"
2496 "f toggle full screen\n"
2497 "p, SPC pause\n"
2498 "a cycle audio channel\n"
2499 "v cycle video channel\n"
2500 "t cycle subtitle channel\n"
2501 "w show audio waves\n"
2502 "left/right seek backward/forward 10 seconds\n"
2503 "down/up seek backward/forward 1 minute\n"
2504 "mouse click seek to percentage in file corresponding to fraction of width\n"
2505 );
2506 }
2507
2508 void opt_input_file(const char *filename)
2509 {
2510 if (!strcmp(filename, "-"))
2511 filename = "pipe:";
2512 input_filename = filename;
2513 }
2514
2515 /* Called from the main */
2516 int main(int argc, char **argv)
2517 {
2518 int flags;
2519
2520 /* register all codecs, demux and protocols */
2521 avcodec_register_all();
2522 avdevice_register_all();
2523 av_register_all();
2524
2525 show_banner(program_name, program_birth_year);
2526
2527 parse_options(argc, argv, options, opt_input_file);
2528
2529 if (!input_filename) {
2530 show_help();
2531 exit(1);
2532 }
2533
2534 if (display_disable) {
2535 video_disable = 1;
2536 }
2537 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2538 #if !defined(__MINGW32__) && !defined(__APPLE__)
2539 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2540 #endif
2541 if (SDL_Init (flags)) {
2542 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2543 exit(1);
2544 }
2545
2546 if (!display_disable) {
2547 #ifdef HAVE_SDL_VIDEO_SIZE
2548 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2549 fs_screen_width = vi->current_w;
2550 fs_screen_height = vi->current_h;
2551 #endif
2552 }
2553
2554 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2555 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2556 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2557 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2558
2559 av_init_packet(&flush_pkt);
2560 flush_pkt.data= "FLUSH";
2561
2562 cur_stream = stream_open(input_filename, file_iformat);
2563
2564 event_loop();
2565
2566 /* never returns */
2567
2568 return 0;
2569 }