ffplay: use new avformat_open_* API.
[libav.git] / ffplay.c
1 /*
2 * ffplay : Simple Media Player based on the Libav libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This file is part of Libav.
6 *
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/dict.h"
31 #include "libavutil/parseutils.h"
32 #include "libavutil/samplefmt.h"
33 #include "libavformat/avformat.h"
34 #include "libavdevice/avdevice.h"
35 #include "libswscale/swscale.h"
36 #include "libavcodec/audioconvert.h"
37 #include "libavutil/opt.h"
38 #include "libavcodec/avfft.h"
39
40 #if CONFIG_AVFILTER
41 # include "libavfilter/avfilter.h"
42 # include "libavfilter/avfiltergraph.h"
43 #endif
44
45 #include "cmdutils.h"
46
47 #include <SDL.h>
48 #include <SDL_thread.h>
49
50 #ifdef __MINGW32__
51 #undef main /* We don't want SDL to override our main() */
52 #endif
53
54 #include <unistd.h>
55 #include <assert.h>
56
57 const char program_name[] = "ffplay";
58 const int program_birth_year = 2003;
59
60 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
61 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
62 #define MIN_FRAMES 5
63
64 /* SDL audio buffer size, in samples. Should be small to have precise
65 A/V sync as SDL does not have hardware buffer fullness info. */
66 #define SDL_AUDIO_BUFFER_SIZE 1024
67
68 /* no AV sync correction is done if below the AV sync threshold */
69 #define AV_SYNC_THRESHOLD 0.01
70 /* no AV correction is done if too big error */
71 #define AV_NOSYNC_THRESHOLD 10.0
72
73 #define FRAME_SKIP_FACTOR 0.05
74
75 /* maximum audio speed change to get correct sync */
76 #define SAMPLE_CORRECTION_PERCENT_MAX 10
77
78 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
79 #define AUDIO_DIFF_AVG_NB 20
80
81 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
82 #define SAMPLE_ARRAY_SIZE (2*65536)
83
84 static int sws_flags = SWS_BICUBIC;
85
86 typedef struct PacketQueue {
87 AVPacketList *first_pkt, *last_pkt;
88 int nb_packets;
89 int size;
90 int abort_request;
91 SDL_mutex *mutex;
92 SDL_cond *cond;
93 } PacketQueue;
94
95 #define VIDEO_PICTURE_QUEUE_SIZE 2
96 #define SUBPICTURE_QUEUE_SIZE 4
97
98 typedef struct VideoPicture {
99 double pts; ///<presentation time stamp for this picture
100 double target_clock; ///<av_gettime() time at which this should be displayed ideally
101 int64_t pos; ///<byte position in file
102 SDL_Overlay *bmp;
103 int width, height; /* source height & width */
104 int allocated;
105 enum PixelFormat pix_fmt;
106
107 #if CONFIG_AVFILTER
108 AVFilterBufferRef *picref;
109 #endif
110 } VideoPicture;
111
112 typedef struct SubPicture {
113 double pts; /* presentation time stamp for this picture */
114 AVSubtitle sub;
115 } SubPicture;
116
117 enum {
118 AV_SYNC_AUDIO_MASTER, /* default choice */
119 AV_SYNC_VIDEO_MASTER,
120 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
121 };
122
123 typedef struct VideoState {
124 SDL_Thread *parse_tid;
125 SDL_Thread *video_tid;
126 SDL_Thread *refresh_tid;
127 AVInputFormat *iformat;
128 int no_background;
129 int abort_request;
130 int paused;
131 int last_paused;
132 int seek_req;
133 int seek_flags;
134 int64_t seek_pos;
135 int64_t seek_rel;
136 int read_pause_return;
137 AVFormatContext *ic;
138 int dtg_active_format;
139
140 int audio_stream;
141
142 int av_sync_type;
143 double external_clock; /* external clock base */
144 int64_t external_clock_time;
145
146 double audio_clock;
147 double audio_diff_cum; /* used for AV difference average computation */
148 double audio_diff_avg_coef;
149 double audio_diff_threshold;
150 int audio_diff_avg_count;
151 AVStream *audio_st;
152 PacketQueue audioq;
153 int audio_hw_buf_size;
154 /* samples output by the codec. we reserve more space for avsync
155 compensation */
156 DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157 DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158 uint8_t *audio_buf;
159 unsigned int audio_buf_size; /* in bytes */
160 int audio_buf_index; /* in bytes */
161 AVPacket audio_pkt_temp;
162 AVPacket audio_pkt;
163 enum AVSampleFormat audio_src_fmt;
164 AVAudioConvert *reformat_ctx;
165
166 int show_audio; /* if true, display audio samples */
167 int16_t sample_array[SAMPLE_ARRAY_SIZE];
168 int sample_array_index;
169 int last_i_start;
170 RDFTContext *rdft;
171 int rdft_bits;
172 FFTSample *rdft_data;
173 int xpos;
174
175 SDL_Thread *subtitle_tid;
176 int subtitle_stream;
177 int subtitle_stream_changed;
178 AVStream *subtitle_st;
179 PacketQueue subtitleq;
180 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
181 int subpq_size, subpq_rindex, subpq_windex;
182 SDL_mutex *subpq_mutex;
183 SDL_cond *subpq_cond;
184
185 double frame_timer;
186 double frame_last_pts;
187 double frame_last_delay;
188 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
189 int video_stream;
190 AVStream *video_st;
191 PacketQueue videoq;
192 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
193 double video_current_pts_drift; ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
194 int64_t video_current_pos; ///<current displayed file pos
195 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
196 int pictq_size, pictq_rindex, pictq_windex;
197 SDL_mutex *pictq_mutex;
198 SDL_cond *pictq_cond;
199 #if !CONFIG_AVFILTER
200 struct SwsContext *img_convert_ctx;
201 #endif
202
203 // QETimer *video_timer;
204 char filename[1024];
205 int width, height, xleft, ytop;
206
207 PtsCorrectionContext pts_ctx;
208
209 #if CONFIG_AVFILTER
210 AVFilterContext *out_video_filter; ///<the last filter in the video chain
211 #endif
212
213 float skip_frames;
214 float skip_frames_index;
215 int refresh;
216 } VideoState;
217
218 static void show_help(void);
219
220 /* options specified by the user */
221 static AVInputFormat *file_iformat;
222 static const char *input_filename;
223 static const char *window_title;
224 static int fs_screen_width;
225 static int fs_screen_height;
226 static int screen_width = 0;
227 static int screen_height = 0;
228 static int frame_width = 0;
229 static int frame_height = 0;
230 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
231 static int audio_disable;
232 static int video_disable;
233 static int wanted_stream[AVMEDIA_TYPE_NB]={
234 [AVMEDIA_TYPE_AUDIO]=-1,
235 [AVMEDIA_TYPE_VIDEO]=-1,
236 [AVMEDIA_TYPE_SUBTITLE]=-1,
237 };
238 static int seek_by_bytes=-1;
239 static int display_disable;
240 static int show_status = 1;
241 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
242 static int64_t start_time = AV_NOPTS_VALUE;
243 static int64_t duration = AV_NOPTS_VALUE;
244 static int debug = 0;
245 static int debug_mv = 0;
246 static int step = 0;
247 static int thread_count = 1;
248 static int workaround_bugs = 1;
249 static int fast = 0;
250 static int genpts = 0;
251 static int lowres = 0;
252 static int idct = FF_IDCT_AUTO;
253 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
254 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
255 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
256 static int error_recognition = FF_ER_CAREFUL;
257 static int error_concealment = 3;
258 static int decoder_reorder_pts= -1;
259 static int autoexit;
260 static int exit_on_keydown;
261 static int exit_on_mousedown;
262 static int loop=1;
263 static int framedrop=1;
264
265 static int rdftspeed=20;
266 #if CONFIG_AVFILTER
267 static char *vfilters = NULL;
268 #endif
269
270 /* current context */
271 static int is_full_screen;
272 static VideoState *cur_stream;
273 static int64_t audio_callback_time;
274
275 static AVPacket flush_pkt;
276
277 #define FF_ALLOC_EVENT (SDL_USEREVENT)
278 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
279 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
280
281 static SDL_Surface *screen;
282
283 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
284
285 /* packet queue handling */
286 static void packet_queue_init(PacketQueue *q)
287 {
288 memset(q, 0, sizeof(PacketQueue));
289 q->mutex = SDL_CreateMutex();
290 q->cond = SDL_CreateCond();
291 packet_queue_put(q, &flush_pkt);
292 }
293
294 static void packet_queue_flush(PacketQueue *q)
295 {
296 AVPacketList *pkt, *pkt1;
297
298 SDL_LockMutex(q->mutex);
299 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
300 pkt1 = pkt->next;
301 av_free_packet(&pkt->pkt);
302 av_freep(&pkt);
303 }
304 q->last_pkt = NULL;
305 q->first_pkt = NULL;
306 q->nb_packets = 0;
307 q->size = 0;
308 SDL_UnlockMutex(q->mutex);
309 }
310
311 static void packet_queue_end(PacketQueue *q)
312 {
313 packet_queue_flush(q);
314 SDL_DestroyMutex(q->mutex);
315 SDL_DestroyCond(q->cond);
316 }
317
318 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
319 {
320 AVPacketList *pkt1;
321
322 /* duplicate the packet */
323 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
324 return -1;
325
326 pkt1 = av_malloc(sizeof(AVPacketList));
327 if (!pkt1)
328 return -1;
329 pkt1->pkt = *pkt;
330 pkt1->next = NULL;
331
332
333 SDL_LockMutex(q->mutex);
334
335 if (!q->last_pkt)
336
337 q->first_pkt = pkt1;
338 else
339 q->last_pkt->next = pkt1;
340 q->last_pkt = pkt1;
341 q->nb_packets++;
342 q->size += pkt1->pkt.size + sizeof(*pkt1);
343 /* XXX: should duplicate packet data in DV case */
344 SDL_CondSignal(q->cond);
345
346 SDL_UnlockMutex(q->mutex);
347 return 0;
348 }
349
350 static void packet_queue_abort(PacketQueue *q)
351 {
352 SDL_LockMutex(q->mutex);
353
354 q->abort_request = 1;
355
356 SDL_CondSignal(q->cond);
357
358 SDL_UnlockMutex(q->mutex);
359 }
360
361 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
362 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
363 {
364 AVPacketList *pkt1;
365 int ret;
366
367 SDL_LockMutex(q->mutex);
368
369 for(;;) {
370 if (q->abort_request) {
371 ret = -1;
372 break;
373 }
374
375 pkt1 = q->first_pkt;
376 if (pkt1) {
377 q->first_pkt = pkt1->next;
378 if (!q->first_pkt)
379 q->last_pkt = NULL;
380 q->nb_packets--;
381 q->size -= pkt1->pkt.size + sizeof(*pkt1);
382 *pkt = pkt1->pkt;
383 av_free(pkt1);
384 ret = 1;
385 break;
386 } else if (!block) {
387 ret = 0;
388 break;
389 } else {
390 SDL_CondWait(q->cond, q->mutex);
391 }
392 }
393 SDL_UnlockMutex(q->mutex);
394 return ret;
395 }
396
397 static inline void fill_rectangle(SDL_Surface *screen,
398 int x, int y, int w, int h, int color)
399 {
400 SDL_Rect rect;
401 rect.x = x;
402 rect.y = y;
403 rect.w = w;
404 rect.h = h;
405 SDL_FillRect(screen, &rect, color);
406 }
407
408 #define ALPHA_BLEND(a, oldp, newp, s)\
409 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
410
411 #define RGBA_IN(r, g, b, a, s)\
412 {\
413 unsigned int v = ((const uint32_t *)(s))[0];\
414 a = (v >> 24) & 0xff;\
415 r = (v >> 16) & 0xff;\
416 g = (v >> 8) & 0xff;\
417 b = v & 0xff;\
418 }
419
420 #define YUVA_IN(y, u, v, a, s, pal)\
421 {\
422 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
423 a = (val >> 24) & 0xff;\
424 y = (val >> 16) & 0xff;\
425 u = (val >> 8) & 0xff;\
426 v = val & 0xff;\
427 }
428
429 #define YUVA_OUT(d, y, u, v, a)\
430 {\
431 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
432 }
433
434
435 #define BPP 1
436
437 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
438 {
439 int wrap, wrap3, width2, skip2;
440 int y, u, v, a, u1, v1, a1, w, h;
441 uint8_t *lum, *cb, *cr;
442 const uint8_t *p;
443 const uint32_t *pal;
444 int dstx, dsty, dstw, dsth;
445
446 dstw = av_clip(rect->w, 0, imgw);
447 dsth = av_clip(rect->h, 0, imgh);
448 dstx = av_clip(rect->x, 0, imgw - dstw);
449 dsty = av_clip(rect->y, 0, imgh - dsth);
450 lum = dst->data[0] + dsty * dst->linesize[0];
451 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
452 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
453
454 width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
455 skip2 = dstx >> 1;
456 wrap = dst->linesize[0];
457 wrap3 = rect->pict.linesize[0];
458 p = rect->pict.data[0];
459 pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
460
461 if (dsty & 1) {
462 lum += dstx;
463 cb += skip2;
464 cr += skip2;
465
466 if (dstx & 1) {
467 YUVA_IN(y, u, v, a, p, pal);
468 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
469 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
470 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
471 cb++;
472 cr++;
473 lum++;
474 p += BPP;
475 }
476 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
477 YUVA_IN(y, u, v, a, p, pal);
478 u1 = u;
479 v1 = v;
480 a1 = a;
481 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
482
483 YUVA_IN(y, u, v, a, p + BPP, pal);
484 u1 += u;
485 v1 += v;
486 a1 += a;
487 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
488 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
489 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
490 cb++;
491 cr++;
492 p += 2 * BPP;
493 lum += 2;
494 }
495 if (w) {
496 YUVA_IN(y, u, v, a, p, pal);
497 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
498 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
499 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
500 p++;
501 lum++;
502 }
503 p += wrap3 - dstw * BPP;
504 lum += wrap - dstw - dstx;
505 cb += dst->linesize[1] - width2 - skip2;
506 cr += dst->linesize[2] - width2 - skip2;
507 }
508 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
509 lum += dstx;
510 cb += skip2;
511 cr += skip2;
512
513 if (dstx & 1) {
514 YUVA_IN(y, u, v, a, p, pal);
515 u1 = u;
516 v1 = v;
517 a1 = a;
518 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
519 p += wrap3;
520 lum += wrap;
521 YUVA_IN(y, u, v, a, p, pal);
522 u1 += u;
523 v1 += v;
524 a1 += a;
525 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
526 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
527 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
528 cb++;
529 cr++;
530 p += -wrap3 + BPP;
531 lum += -wrap + 1;
532 }
533 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
534 YUVA_IN(y, u, v, a, p, pal);
535 u1 = u;
536 v1 = v;
537 a1 = a;
538 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
539
540 YUVA_IN(y, u, v, a, p + BPP, pal);
541 u1 += u;
542 v1 += v;
543 a1 += a;
544 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
545 p += wrap3;
546 lum += wrap;
547
548 YUVA_IN(y, u, v, a, p, pal);
549 u1 += u;
550 v1 += v;
551 a1 += a;
552 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
553
554 YUVA_IN(y, u, v, a, p + BPP, pal);
555 u1 += u;
556 v1 += v;
557 a1 += a;
558 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
559
560 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
561 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
562
563 cb++;
564 cr++;
565 p += -wrap3 + 2 * BPP;
566 lum += -wrap + 2;
567 }
568 if (w) {
569 YUVA_IN(y, u, v, a, p, pal);
570 u1 = u;
571 v1 = v;
572 a1 = a;
573 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
574 p += wrap3;
575 lum += wrap;
576 YUVA_IN(y, u, v, a, p, pal);
577 u1 += u;
578 v1 += v;
579 a1 += a;
580 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
581 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
582 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
583 cb++;
584 cr++;
585 p += -wrap3 + BPP;
586 lum += -wrap + 1;
587 }
588 p += wrap3 + (wrap3 - dstw * BPP);
589 lum += wrap + (wrap - dstw - dstx);
590 cb += dst->linesize[1] - width2 - skip2;
591 cr += dst->linesize[2] - width2 - skip2;
592 }
593 /* handle odd height */
594 if (h) {
595 lum += dstx;
596 cb += skip2;
597 cr += skip2;
598
599 if (dstx & 1) {
600 YUVA_IN(y, u, v, a, p, pal);
601 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
602 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
603 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
604 cb++;
605 cr++;
606 lum++;
607 p += BPP;
608 }
609 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
610 YUVA_IN(y, u, v, a, p, pal);
611 u1 = u;
612 v1 = v;
613 a1 = a;
614 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
615
616 YUVA_IN(y, u, v, a, p + BPP, pal);
617 u1 += u;
618 v1 += v;
619 a1 += a;
620 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
621 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
622 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
623 cb++;
624 cr++;
625 p += 2 * BPP;
626 lum += 2;
627 }
628 if (w) {
629 YUVA_IN(y, u, v, a, p, pal);
630 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
631 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
632 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
633 }
634 }
635 }
636
637 static void free_subpicture(SubPicture *sp)
638 {
639 avsubtitle_free(&sp->sub);
640 }
641
642 static void video_image_display(VideoState *is)
643 {
644 VideoPicture *vp;
645 SubPicture *sp;
646 AVPicture pict;
647 float aspect_ratio;
648 int width, height, x, y;
649 SDL_Rect rect;
650 int i;
651
652 vp = &is->pictq[is->pictq_rindex];
653 if (vp->bmp) {
654 #if CONFIG_AVFILTER
655 if (vp->picref->video->pixel_aspect.num == 0)
656 aspect_ratio = 0;
657 else
658 aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
659 #else
660
661 /* XXX: use variable in the frame */
662 if (is->video_st->sample_aspect_ratio.num)
663 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
664 else if (is->video_st->codec->sample_aspect_ratio.num)
665 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
666 else
667 aspect_ratio = 0;
668 #endif
669 if (aspect_ratio <= 0.0)
670 aspect_ratio = 1.0;
671 aspect_ratio *= (float)vp->width / (float)vp->height;
672
673 if (is->subtitle_st)
674 {
675 if (is->subpq_size > 0)
676 {
677 sp = &is->subpq[is->subpq_rindex];
678
679 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
680 {
681 SDL_LockYUVOverlay (vp->bmp);
682
683 pict.data[0] = vp->bmp->pixels[0];
684 pict.data[1] = vp->bmp->pixels[2];
685 pict.data[2] = vp->bmp->pixels[1];
686
687 pict.linesize[0] = vp->bmp->pitches[0];
688 pict.linesize[1] = vp->bmp->pitches[2];
689 pict.linesize[2] = vp->bmp->pitches[1];
690
691 for (i = 0; i < sp->sub.num_rects; i++)
692 blend_subrect(&pict, sp->sub.rects[i],
693 vp->bmp->w, vp->bmp->h);
694
695 SDL_UnlockYUVOverlay (vp->bmp);
696 }
697 }
698 }
699
700
701 /* XXX: we suppose the screen has a 1.0 pixel ratio */
702 height = is->height;
703 width = ((int)rint(height * aspect_ratio)) & ~1;
704 if (width > is->width) {
705 width = is->width;
706 height = ((int)rint(width / aspect_ratio)) & ~1;
707 }
708 x = (is->width - width) / 2;
709 y = (is->height - height) / 2;
710 is->no_background = 0;
711 rect.x = is->xleft + x;
712 rect.y = is->ytop + y;
713 rect.w = width;
714 rect.h = height;
715 SDL_DisplayYUVOverlay(vp->bmp, &rect);
716 }
717 }
718
719 /* get the current audio output buffer size, in samples. With SDL, we
720 cannot have a precise information */
721 static int audio_write_get_buf_size(VideoState *is)
722 {
723 return is->audio_buf_size - is->audio_buf_index;
724 }
725
726 static inline int compute_mod(int a, int b)
727 {
728 a = a % b;
729 if (a >= 0)
730 return a;
731 else
732 return a + b;
733 }
734
735 static void video_audio_display(VideoState *s)
736 {
737 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
738 int ch, channels, h, h2, bgcolor, fgcolor;
739 int16_t time_diff;
740 int rdft_bits, nb_freq;
741
742 for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
743 ;
744 nb_freq= 1<<(rdft_bits-1);
745
746 /* compute display index : center on currently output samples */
747 channels = s->audio_st->codec->channels;
748 nb_display_channels = channels;
749 if (!s->paused) {
750 int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
751 n = 2 * channels;
752 delay = audio_write_get_buf_size(s);
753 delay /= n;
754
755 /* to be more precise, we take into account the time spent since
756 the last buffer computation */
757 if (audio_callback_time) {
758 time_diff = av_gettime() - audio_callback_time;
759 delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
760 }
761
762 delay += 2*data_used;
763 if (delay < data_used)
764 delay = data_used;
765
766 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
767 if(s->show_audio==1){
768 h= INT_MIN;
769 for(i=0; i<1000; i+=channels){
770 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
771 int a= s->sample_array[idx];
772 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
773 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
774 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
775 int score= a-d;
776 if(h<score && (b^c)<0){
777 h= score;
778 i_start= idx;
779 }
780 }
781 }
782
783 s->last_i_start = i_start;
784 } else {
785 i_start = s->last_i_start;
786 }
787
788 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
789 if(s->show_audio==1){
790 fill_rectangle(screen,
791 s->xleft, s->ytop, s->width, s->height,
792 bgcolor);
793
794 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
795
796 /* total height for one channel */
797 h = s->height / nb_display_channels;
798 /* graph height / 2 */
799 h2 = (h * 9) / 20;
800 for(ch = 0;ch < nb_display_channels; ch++) {
801 i = i_start + ch;
802 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
803 for(x = 0; x < s->width; x++) {
804 y = (s->sample_array[i] * h2) >> 15;
805 if (y < 0) {
806 y = -y;
807 ys = y1 - y;
808 } else {
809 ys = y1;
810 }
811 fill_rectangle(screen,
812 s->xleft + x, ys, 1, y,
813 fgcolor);
814 i += channels;
815 if (i >= SAMPLE_ARRAY_SIZE)
816 i -= SAMPLE_ARRAY_SIZE;
817 }
818 }
819
820 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
821
822 for(ch = 1;ch < nb_display_channels; ch++) {
823 y = s->ytop + ch * h;
824 fill_rectangle(screen,
825 s->xleft, y, s->width, 1,
826 fgcolor);
827 }
828 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
829 }else{
830 nb_display_channels= FFMIN(nb_display_channels, 2);
831 if(rdft_bits != s->rdft_bits){
832 av_rdft_end(s->rdft);
833 av_free(s->rdft_data);
834 s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
835 s->rdft_bits= rdft_bits;
836 s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
837 }
838 {
839 FFTSample *data[2];
840 for(ch = 0;ch < nb_display_channels; ch++) {
841 data[ch] = s->rdft_data + 2*nb_freq*ch;
842 i = i_start + ch;
843 for(x = 0; x < 2*nb_freq; x++) {
844 double w= (x-nb_freq)*(1.0/nb_freq);
845 data[ch][x]= s->sample_array[i]*(1.0-w*w);
846 i += channels;
847 if (i >= SAMPLE_ARRAY_SIZE)
848 i -= SAMPLE_ARRAY_SIZE;
849 }
850 av_rdft_calc(s->rdft, data[ch]);
851 }
852 //least efficient way to do this, we should of course directly access it but its more than fast enough
853 for(y=0; y<s->height; y++){
854 double w= 1/sqrt(nb_freq);
855 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
856 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
857 + data[1][2*y+1]*data[1][2*y+1])) : a;
858 a= FFMIN(a,255);
859 b= FFMIN(b,255);
860 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
861
862 fill_rectangle(screen,
863 s->xpos, s->height-y, 1, 1,
864 fgcolor);
865 }
866 }
867 SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
868 s->xpos++;
869 if(s->xpos >= s->width)
870 s->xpos= s->xleft;
871 }
872 }
873
874 static int video_open(VideoState *is){
875 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
876 int w,h;
877
878 if(is_full_screen) flags |= SDL_FULLSCREEN;
879 else flags |= SDL_RESIZABLE;
880
881 if (is_full_screen && fs_screen_width) {
882 w = fs_screen_width;
883 h = fs_screen_height;
884 } else if(!is_full_screen && screen_width){
885 w = screen_width;
886 h = screen_height;
887 #if CONFIG_AVFILTER
888 }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
889 w = is->out_video_filter->inputs[0]->w;
890 h = is->out_video_filter->inputs[0]->h;
891 #else
892 }else if (is->video_st && is->video_st->codec->width){
893 w = is->video_st->codec->width;
894 h = is->video_st->codec->height;
895 #endif
896 } else {
897 w = 640;
898 h = 480;
899 }
900 if(screen && is->width == screen->w && screen->w == w
901 && is->height== screen->h && screen->h == h)
902 return 0;
903
904 #ifndef __APPLE__
905 screen = SDL_SetVideoMode(w, h, 0, flags);
906 #else
907 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
908 screen = SDL_SetVideoMode(w, h, 24, flags);
909 #endif
910 if (!screen) {
911 fprintf(stderr, "SDL: could not set video mode - exiting\n");
912 return -1;
913 }
914 if (!window_title)
915 window_title = input_filename;
916 SDL_WM_SetCaption(window_title, window_title);
917
918 is->width = screen->w;
919 is->height = screen->h;
920
921 return 0;
922 }
923
924 /* display the current picture, if any */
925 static void video_display(VideoState *is)
926 {
927 if(!screen)
928 video_open(cur_stream);
929 if (is->audio_st && is->show_audio)
930 video_audio_display(is);
931 else if (is->video_st)
932 video_image_display(is);
933 }
934
935 static int refresh_thread(void *opaque)
936 {
937 VideoState *is= opaque;
938 while(!is->abort_request){
939 SDL_Event event;
940 event.type = FF_REFRESH_EVENT;
941 event.user.data1 = opaque;
942 if(!is->refresh){
943 is->refresh=1;
944 SDL_PushEvent(&event);
945 }
946 usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
947 }
948 return 0;
949 }
950
951 /* get the current audio clock value */
952 static double get_audio_clock(VideoState *is)
953 {
954 double pts;
955 int hw_buf_size, bytes_per_sec;
956 pts = is->audio_clock;
957 hw_buf_size = audio_write_get_buf_size(is);
958 bytes_per_sec = 0;
959 if (is->audio_st) {
960 bytes_per_sec = is->audio_st->codec->sample_rate *
961 2 * is->audio_st->codec->channels;
962 }
963 if (bytes_per_sec)
964 pts -= (double)hw_buf_size / bytes_per_sec;
965 return pts;
966 }
967
968 /* get the current video clock value */
969 static double get_video_clock(VideoState *is)
970 {
971 if (is->paused) {
972 return is->video_current_pts;
973 } else {
974 return is->video_current_pts_drift + av_gettime() / 1000000.0;
975 }
976 }
977
978 /* get the current external clock value */
979 static double get_external_clock(VideoState *is)
980 {
981 int64_t ti;
982 ti = av_gettime();
983 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
984 }
985
986 /* get the current master clock value */
987 static double get_master_clock(VideoState *is)
988 {
989 double val;
990
991 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
992 if (is->video_st)
993 val = get_video_clock(is);
994 else
995 val = get_audio_clock(is);
996 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
997 if (is->audio_st)
998 val = get_audio_clock(is);
999 else
1000 val = get_video_clock(is);
1001 } else {
1002 val = get_external_clock(is);
1003 }
1004 return val;
1005 }
1006
1007 /* seek in the stream */
1008 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1009 {
1010 if (!is->seek_req) {
1011 is->seek_pos = pos;
1012 is->seek_rel = rel;
1013 is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1014 if (seek_by_bytes)
1015 is->seek_flags |= AVSEEK_FLAG_BYTE;
1016 is->seek_req = 1;
1017 }
1018 }
1019
1020 /* pause or resume the video */
1021 static void stream_pause(VideoState *is)
1022 {
1023 if (is->paused) {
1024 is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1025 if(is->read_pause_return != AVERROR(ENOSYS)){
1026 is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1027 }
1028 is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1029 }
1030 is->paused = !is->paused;
1031 }
1032
1033 static double compute_target_time(double frame_current_pts, VideoState *is)
1034 {
1035 double delay, sync_threshold, diff;
1036
1037 /* compute nominal delay */
1038 delay = frame_current_pts - is->frame_last_pts;
1039 if (delay <= 0 || delay >= 10.0) {
1040 /* if incorrect delay, use previous one */
1041 delay = is->frame_last_delay;
1042 } else {
1043 is->frame_last_delay = delay;
1044 }
1045 is->frame_last_pts = frame_current_pts;
1046
1047 /* update delay to follow master synchronisation source */
1048 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1049 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1050 /* if video is slave, we try to correct big delays by
1051 duplicating or deleting a frame */
1052 diff = get_video_clock(is) - get_master_clock(is);
1053
1054 /* skip or repeat frame. We take into account the
1055 delay to compute the threshold. I still don't know
1056 if it is the best guess */
1057 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1058 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1059 if (diff <= -sync_threshold)
1060 delay = 0;
1061 else if (diff >= sync_threshold)
1062 delay = 2 * delay;
1063 }
1064 }
1065 is->frame_timer += delay;
1066
1067 av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1068 delay, frame_current_pts, -diff);
1069
1070 return is->frame_timer;
1071 }
1072
1073 /* called to display each frame */
1074 static void video_refresh_timer(void *opaque)
1075 {
1076 VideoState *is = opaque;
1077 VideoPicture *vp;
1078
1079 SubPicture *sp, *sp2;
1080
1081 if (is->video_st) {
1082 retry:
1083 if (is->pictq_size == 0) {
1084 //nothing to do, no picture to display in the que
1085 } else {
1086 double time= av_gettime()/1000000.0;
1087 double next_target;
1088 /* dequeue the picture */
1089 vp = &is->pictq[is->pictq_rindex];
1090
1091 if(time < vp->target_clock)
1092 return;
1093 /* update current video pts */
1094 is->video_current_pts = vp->pts;
1095 is->video_current_pts_drift = is->video_current_pts - time;
1096 is->video_current_pos = vp->pos;
1097 if(is->pictq_size > 1){
1098 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1099 assert(nextvp->target_clock >= vp->target_clock);
1100 next_target= nextvp->target_clock;
1101 }else{
1102 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1103 }
1104 if(framedrop && time > next_target){
1105 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1106 if(is->pictq_size > 1 || time > next_target + 0.5){
1107 /* update queue size and signal for next picture */
1108 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1109 is->pictq_rindex = 0;
1110
1111 SDL_LockMutex(is->pictq_mutex);
1112 is->pictq_size--;
1113 SDL_CondSignal(is->pictq_cond);
1114 SDL_UnlockMutex(is->pictq_mutex);
1115 goto retry;
1116 }
1117 }
1118
1119 if(is->subtitle_st) {
1120 if (is->subtitle_stream_changed) {
1121 SDL_LockMutex(is->subpq_mutex);
1122
1123 while (is->subpq_size) {
1124 free_subpicture(&is->subpq[is->subpq_rindex]);
1125
1126 /* update queue size and signal for next picture */
1127 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1128 is->subpq_rindex = 0;
1129
1130 is->subpq_size--;
1131 }
1132 is->subtitle_stream_changed = 0;
1133
1134 SDL_CondSignal(is->subpq_cond);
1135 SDL_UnlockMutex(is->subpq_mutex);
1136 } else {
1137 if (is->subpq_size > 0) {
1138 sp = &is->subpq[is->subpq_rindex];
1139
1140 if (is->subpq_size > 1)
1141 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1142 else
1143 sp2 = NULL;
1144
1145 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1146 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1147 {
1148 free_subpicture(sp);
1149
1150 /* update queue size and signal for next picture */
1151 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1152 is->subpq_rindex = 0;
1153
1154 SDL_LockMutex(is->subpq_mutex);
1155 is->subpq_size--;
1156 SDL_CondSignal(is->subpq_cond);
1157 SDL_UnlockMutex(is->subpq_mutex);
1158 }
1159 }
1160 }
1161 }
1162
1163 /* display picture */
1164 if (!display_disable)
1165 video_display(is);
1166
1167 /* update queue size and signal for next picture */
1168 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1169 is->pictq_rindex = 0;
1170
1171 SDL_LockMutex(is->pictq_mutex);
1172 is->pictq_size--;
1173 SDL_CondSignal(is->pictq_cond);
1174 SDL_UnlockMutex(is->pictq_mutex);
1175 }
1176 } else if (is->audio_st) {
1177 /* draw the next audio frame */
1178
1179 /* if only audio stream, then display the audio bars (better
1180 than nothing, just to test the implementation */
1181
1182 /* display picture */
1183 if (!display_disable)
1184 video_display(is);
1185 }
1186 if (show_status) {
1187 static int64_t last_time;
1188 int64_t cur_time;
1189 int aqsize, vqsize, sqsize;
1190 double av_diff;
1191
1192 cur_time = av_gettime();
1193 if (!last_time || (cur_time - last_time) >= 30000) {
1194 aqsize = 0;
1195 vqsize = 0;
1196 sqsize = 0;
1197 if (is->audio_st)
1198 aqsize = is->audioq.size;
1199 if (is->video_st)
1200 vqsize = is->videoq.size;
1201 if (is->subtitle_st)
1202 sqsize = is->subtitleq.size;
1203 av_diff = 0;
1204 if (is->audio_st && is->video_st)
1205 av_diff = get_audio_clock(is) - get_video_clock(is);
1206 printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1207 get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1208 fflush(stdout);
1209 last_time = cur_time;
1210 }
1211 }
1212 }
1213
1214 static void stream_close(VideoState *is)
1215 {
1216 VideoPicture *vp;
1217 int i;
1218 /* XXX: use a special url_shutdown call to abort parse cleanly */
1219 is->abort_request = 1;
1220 SDL_WaitThread(is->parse_tid, NULL);
1221 SDL_WaitThread(is->refresh_tid, NULL);
1222
1223 /* free all pictures */
1224 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1225 vp = &is->pictq[i];
1226 #if CONFIG_AVFILTER
1227 if (vp->picref) {
1228 avfilter_unref_buffer(vp->picref);
1229 vp->picref = NULL;
1230 }
1231 #endif
1232 if (vp->bmp) {
1233 SDL_FreeYUVOverlay(vp->bmp);
1234 vp->bmp = NULL;
1235 }
1236 }
1237 SDL_DestroyMutex(is->pictq_mutex);
1238 SDL_DestroyCond(is->pictq_cond);
1239 SDL_DestroyMutex(is->subpq_mutex);
1240 SDL_DestroyCond(is->subpq_cond);
1241 #if !CONFIG_AVFILTER
1242 if (is->img_convert_ctx)
1243 sws_freeContext(is->img_convert_ctx);
1244 #endif
1245 av_free(is);
1246 }
1247
1248 static void do_exit(void)
1249 {
1250 if (cur_stream) {
1251 stream_close(cur_stream);
1252 cur_stream = NULL;
1253 }
1254 uninit_opts();
1255 #if CONFIG_AVFILTER
1256 avfilter_uninit();
1257 #endif
1258 if (show_status)
1259 printf("\n");
1260 SDL_Quit();
1261 av_log(NULL, AV_LOG_QUIET, "");
1262 exit(0);
1263 }
1264
1265 /* allocate a picture (needs to do that in main thread to avoid
1266 potential locking problems */
1267 static void alloc_picture(void *opaque)
1268 {
1269 VideoState *is = opaque;
1270 VideoPicture *vp;
1271
1272 vp = &is->pictq[is->pictq_windex];
1273
1274 if (vp->bmp)
1275 SDL_FreeYUVOverlay(vp->bmp);
1276
1277 #if CONFIG_AVFILTER
1278 if (vp->picref)
1279 avfilter_unref_buffer(vp->picref);
1280 vp->picref = NULL;
1281
1282 vp->width = is->out_video_filter->inputs[0]->w;
1283 vp->height = is->out_video_filter->inputs[0]->h;
1284 vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1285 #else
1286 vp->width = is->video_st->codec->width;
1287 vp->height = is->video_st->codec->height;
1288 vp->pix_fmt = is->video_st->codec->pix_fmt;
1289 #endif
1290
1291 vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1292 SDL_YV12_OVERLAY,
1293 screen);
1294 if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1295 /* SDL allocates a buffer smaller than requested if the video
1296 * overlay hardware is unable to support the requested size. */
1297 fprintf(stderr, "Error: the video system does not support an image\n"
1298 "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1299 "to reduce the image size.\n", vp->width, vp->height );
1300 do_exit();
1301 }
1302
1303 SDL_LockMutex(is->pictq_mutex);
1304 vp->allocated = 1;
1305 SDL_CondSignal(is->pictq_cond);
1306 SDL_UnlockMutex(is->pictq_mutex);
1307 }
1308
1309 /**
1310 *
1311 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1312 */
1313 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1314 {
1315 VideoPicture *vp;
1316 int dst_pix_fmt;
1317 #if CONFIG_AVFILTER
1318 AVPicture pict_src;
1319 #endif
1320 /* wait until we have space to put a new picture */
1321 SDL_LockMutex(is->pictq_mutex);
1322
1323 if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1324 is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1325
1326 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1327 !is->videoq.abort_request) {
1328 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1329 }
1330 SDL_UnlockMutex(is->pictq_mutex);
1331
1332 if (is->videoq.abort_request)
1333 return -1;
1334
1335 vp = &is->pictq[is->pictq_windex];
1336
1337 /* alloc or resize hardware picture buffer */
1338 if (!vp->bmp ||
1339 #if CONFIG_AVFILTER
1340 vp->width != is->out_video_filter->inputs[0]->w ||
1341 vp->height != is->out_video_filter->inputs[0]->h) {
1342 #else
1343 vp->width != is->video_st->codec->width ||
1344 vp->height != is->video_st->codec->height) {
1345 #endif
1346 SDL_Event event;
1347
1348 vp->allocated = 0;
1349
1350 /* the allocation must be done in the main thread to avoid
1351 locking problems */
1352 event.type = FF_ALLOC_EVENT;
1353 event.user.data1 = is;
1354 SDL_PushEvent(&event);
1355
1356 /* wait until the picture is allocated */
1357 SDL_LockMutex(is->pictq_mutex);
1358 while (!vp->allocated && !is->videoq.abort_request) {
1359 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1360 }
1361 SDL_UnlockMutex(is->pictq_mutex);
1362
1363 if (is->videoq.abort_request)
1364 return -1;
1365 }
1366
1367 /* if the frame is not skipped, then display it */
1368 if (vp->bmp) {
1369 AVPicture pict;
1370 #if CONFIG_AVFILTER
1371 if(vp->picref)
1372 avfilter_unref_buffer(vp->picref);
1373 vp->picref = src_frame->opaque;
1374 #endif
1375
1376 /* get a pointer on the bitmap */
1377 SDL_LockYUVOverlay (vp->bmp);
1378
1379 dst_pix_fmt = PIX_FMT_YUV420P;
1380 memset(&pict,0,sizeof(AVPicture));
1381 pict.data[0] = vp->bmp->pixels[0];
1382 pict.data[1] = vp->bmp->pixels[2];
1383 pict.data[2] = vp->bmp->pixels[1];
1384
1385 pict.linesize[0] = vp->bmp->pitches[0];
1386 pict.linesize[1] = vp->bmp->pitches[2];
1387 pict.linesize[2] = vp->bmp->pitches[1];
1388
1389 #if CONFIG_AVFILTER
1390 pict_src.data[0] = src_frame->data[0];
1391 pict_src.data[1] = src_frame->data[1];
1392 pict_src.data[2] = src_frame->data[2];
1393
1394 pict_src.linesize[0] = src_frame->linesize[0];
1395 pict_src.linesize[1] = src_frame->linesize[1];
1396 pict_src.linesize[2] = src_frame->linesize[2];
1397
1398 //FIXME use direct rendering
1399 av_picture_copy(&pict, &pict_src,
1400 vp->pix_fmt, vp->width, vp->height);
1401 #else
1402 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1403 is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1404 vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1405 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1406 if (is->img_convert_ctx == NULL) {
1407 fprintf(stderr, "Cannot initialize the conversion context\n");
1408 exit(1);
1409 }
1410 sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1411 0, vp->height, pict.data, pict.linesize);
1412 #endif
1413 /* update the bitmap content */
1414 SDL_UnlockYUVOverlay(vp->bmp);
1415
1416 vp->pts = pts;
1417 vp->pos = pos;
1418
1419 /* now we can update the picture count */
1420 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1421 is->pictq_windex = 0;
1422 SDL_LockMutex(is->pictq_mutex);
1423 vp->target_clock= compute_target_time(vp->pts, is);
1424
1425 is->pictq_size++;
1426 SDL_UnlockMutex(is->pictq_mutex);
1427 }
1428 return 0;
1429 }
1430
1431 /**
1432 * compute the exact PTS for the picture if it is omitted in the stream
1433 * @param pts1 the dts of the pkt / pts of the frame
1434 */
1435 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1436 {
1437 double frame_delay, pts;
1438
1439 pts = pts1;
1440
1441 if (pts != 0) {
1442 /* update video clock with pts, if present */
1443 is->video_clock = pts;
1444 } else {
1445 pts = is->video_clock;
1446 }
1447 /* update video clock for next frame */
1448 frame_delay = av_q2d(is->video_st->codec->time_base);
1449 /* for MPEG2, the frame can be repeated, so we update the
1450 clock accordingly */
1451 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1452 is->video_clock += frame_delay;
1453
1454 return queue_picture(is, src_frame, pts, pos);
1455 }
1456
1457 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1458 {
1459 int len1, got_picture, i;
1460
1461 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1462 return -1;
1463
1464 if (pkt->data == flush_pkt.data) {
1465 avcodec_flush_buffers(is->video_st->codec);
1466
1467 SDL_LockMutex(is->pictq_mutex);
1468 //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1469 for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1470 is->pictq[i].target_clock= 0;
1471 }
1472 while (is->pictq_size && !is->videoq.abort_request) {
1473 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1474 }
1475 is->video_current_pos = -1;
1476 SDL_UnlockMutex(is->pictq_mutex);
1477
1478 init_pts_correction(&is->pts_ctx);
1479 is->frame_last_pts = AV_NOPTS_VALUE;
1480 is->frame_last_delay = 0;
1481 is->frame_timer = (double)av_gettime() / 1000000.0;
1482 is->skip_frames = 1;
1483 is->skip_frames_index = 0;
1484 return 0;
1485 }
1486
1487 len1 = avcodec_decode_video2(is->video_st->codec,
1488 frame, &got_picture,
1489 pkt);
1490
1491 if (got_picture) {
1492 if (decoder_reorder_pts == -1) {
1493 *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1494 } else if (decoder_reorder_pts) {
1495 *pts = frame->pkt_pts;
1496 } else {
1497 *pts = frame->pkt_dts;
1498 }
1499
1500 if (*pts == AV_NOPTS_VALUE) {
1501 *pts = 0;
1502 }
1503
1504 is->skip_frames_index += 1;
1505 if(is->skip_frames_index >= is->skip_frames){
1506 is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1507 return 1;
1508 }
1509
1510 }
1511 return 0;
1512 }
1513
1514 #if CONFIG_AVFILTER
1515 typedef struct {
1516 VideoState *is;
1517 AVFrame *frame;
1518 int use_dr1;
1519 } FilterPriv;
1520
1521 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1522 {
1523 AVFilterContext *ctx = codec->opaque;
1524 AVFilterBufferRef *ref;
1525 int perms = AV_PERM_WRITE;
1526 int i, w, h, stride[4];
1527 unsigned edge;
1528 int pixel_size;
1529
1530 if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1531 perms |= AV_PERM_NEG_LINESIZES;
1532
1533 if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1534 if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1535 if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1536 if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1537 }
1538 if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1539
1540 w = codec->width;
1541 h = codec->height;
1542 avcodec_align_dimensions2(codec, &w, &h, stride);
1543 edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1544 w += edge << 1;
1545 h += edge << 1;
1546
1547 if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1548 return -1;
1549
1550 pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1551 ref->video->w = codec->width;
1552 ref->video->h = codec->height;
1553 for(i = 0; i < 4; i ++) {
1554 unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1555 unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1556
1557 if (ref->data[i]) {
1558 ref->data[i] += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1559 }
1560 pic->data[i] = ref->data[i];
1561 pic->linesize[i] = ref->linesize[i];
1562 }
1563 pic->opaque = ref;
1564 pic->age = INT_MAX;
1565 pic->type = FF_BUFFER_TYPE_USER;
1566 pic->reordered_opaque = codec->reordered_opaque;
1567 if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1568 else pic->pkt_pts = AV_NOPTS_VALUE;
1569 return 0;
1570 }
1571
1572 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1573 {
1574 memset(pic->data, 0, sizeof(pic->data));
1575 avfilter_unref_buffer(pic->opaque);
1576 }
1577
1578 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1579 {
1580 AVFilterBufferRef *ref = pic->opaque;
1581
1582 if (pic->data[0] == NULL) {
1583 pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1584 return codec->get_buffer(codec, pic);
1585 }
1586
1587 if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1588 (codec->pix_fmt != ref->format)) {
1589 av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1590 return -1;
1591 }
1592
1593 pic->reordered_opaque = codec->reordered_opaque;
1594 if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1595 else pic->pkt_pts = AV_NOPTS_VALUE;
1596 return 0;
1597 }
1598
1599 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1600 {
1601 FilterPriv *priv = ctx->priv;
1602 AVCodecContext *codec;
1603 if(!opaque) return -1;
1604
1605 priv->is = opaque;
1606 codec = priv->is->video_st->codec;
1607 codec->opaque = ctx;
1608 if(codec->codec->capabilities & CODEC_CAP_DR1) {
1609 priv->use_dr1 = 1;
1610 codec->get_buffer = input_get_buffer;
1611 codec->release_buffer = input_release_buffer;
1612 codec->reget_buffer = input_reget_buffer;
1613 codec->thread_safe_callbacks = 1;
1614 }
1615
1616 priv->frame = avcodec_alloc_frame();
1617
1618 return 0;
1619 }
1620
1621 static void input_uninit(AVFilterContext *ctx)
1622 {
1623 FilterPriv *priv = ctx->priv;
1624 av_free(priv->frame);
1625 }
1626
1627 static int input_request_frame(AVFilterLink *link)
1628 {
1629 FilterPriv *priv = link->src->priv;
1630 AVFilterBufferRef *picref;
1631 int64_t pts = 0;
1632 AVPacket pkt;
1633 int ret;
1634
1635 while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1636 av_free_packet(&pkt);
1637 if (ret < 0)
1638 return -1;
1639
1640 if(priv->use_dr1) {
1641 picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1642 } else {
1643 picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1644 av_image_copy(picref->data, picref->linesize,
1645 priv->frame->data, priv->frame->linesize,
1646 picref->format, link->w, link->h);
1647 }
1648 av_free_packet(&pkt);
1649
1650 picref->pts = pts;
1651 picref->pos = pkt.pos;
1652 picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1653 avfilter_start_frame(link, picref);
1654 avfilter_draw_slice(link, 0, link->h, 1);
1655 avfilter_end_frame(link);
1656
1657 return 0;
1658 }
1659
1660 static int input_query_formats(AVFilterContext *ctx)
1661 {
1662 FilterPriv *priv = ctx->priv;
1663 enum PixelFormat pix_fmts[] = {
1664 priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1665 };
1666
1667 avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1668 return 0;
1669 }
1670
1671 static int input_config_props(AVFilterLink *link)
1672 {
1673 FilterPriv *priv = link->src->priv;
1674 AVCodecContext *c = priv->is->video_st->codec;
1675
1676 link->w = c->width;
1677 link->h = c->height;
1678 link->time_base = priv->is->video_st->time_base;
1679
1680 return 0;
1681 }
1682
1683 static AVFilter input_filter =
1684 {
1685 .name = "ffplay_input",
1686
1687 .priv_size = sizeof(FilterPriv),
1688
1689 .init = input_init,
1690 .uninit = input_uninit,
1691
1692 .query_formats = input_query_formats,
1693
1694 .inputs = (AVFilterPad[]) {{ .name = NULL }},
1695 .outputs = (AVFilterPad[]) {{ .name = "default",
1696 .type = AVMEDIA_TYPE_VIDEO,
1697 .request_frame = input_request_frame,
1698 .config_props = input_config_props, },
1699 { .name = NULL }},
1700 };
1701
1702 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1703 {
1704 char sws_flags_str[128];
1705 int ret;
1706 FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1707 AVFilterContext *filt_src = NULL, *filt_out = NULL;
1708 snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1709 graph->scale_sws_opts = av_strdup(sws_flags_str);
1710
1711 if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1712 NULL, is, graph)) < 0)
1713 goto the_end;
1714 if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1715 NULL, &ffsink_ctx, graph)) < 0)
1716 goto the_end;
1717
1718 if(vfilters) {
1719 AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1720 AVFilterInOut *inputs = av_malloc(sizeof(AVFilterInOut));
1721
1722 outputs->name = av_strdup("in");
1723 outputs->filter_ctx = filt_src;
1724 outputs->pad_idx = 0;
1725 outputs->next = NULL;
1726
1727 inputs->name = av_strdup("out");
1728 inputs->filter_ctx = filt_out;
1729 inputs->pad_idx = 0;
1730 inputs->next = NULL;
1731
1732 if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1733 goto the_end;
1734 av_freep(&vfilters);
1735 } else {
1736 if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1737 goto the_end;
1738 }
1739
1740 if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1741 goto the_end;
1742
1743 is->out_video_filter = filt_out;
1744 the_end:
1745 return ret;
1746 }
1747
1748 #endif /* CONFIG_AVFILTER */
1749
1750 static int video_thread(void *arg)
1751 {
1752 VideoState *is = arg;
1753 AVFrame *frame= avcodec_alloc_frame();
1754 int64_t pts_int;
1755 double pts;
1756 int ret;
1757
1758 #if CONFIG_AVFILTER
1759 AVFilterGraph *graph = avfilter_graph_alloc();
1760 AVFilterContext *filt_out = NULL;
1761 int64_t pos;
1762
1763 if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1764 goto the_end;
1765 filt_out = is->out_video_filter;
1766 #endif
1767
1768 for(;;) {
1769 #if !CONFIG_AVFILTER
1770 AVPacket pkt;
1771 #else
1772 AVFilterBufferRef *picref;
1773 AVRational tb;
1774 #endif
1775 while (is->paused && !is->videoq.abort_request)
1776 SDL_Delay(10);
1777 #if CONFIG_AVFILTER
1778 ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1779 if (picref) {
1780 pts_int = picref->pts;
1781 pos = picref->pos;
1782 frame->opaque = picref;
1783 }
1784
1785 if (av_cmp_q(tb, is->video_st->time_base)) {
1786 av_unused int64_t pts1 = pts_int;
1787 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1788 av_dlog(NULL, "video_thread(): "
1789 "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1790 tb.num, tb.den, pts1,
1791 is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1792 }
1793 #else
1794 ret = get_video_frame(is, frame, &pts_int, &pkt);
1795 #endif
1796
1797 if (ret < 0) goto the_end;
1798
1799 if (!ret)
1800 continue;
1801
1802 pts = pts_int*av_q2d(is->video_st->time_base);
1803
1804 #if CONFIG_AVFILTER
1805 ret = output_picture2(is, frame, pts, pos);
1806 #else
1807 ret = output_picture2(is, frame, pts, pkt.pos);
1808 av_free_packet(&pkt);
1809 #endif
1810 if (ret < 0)
1811 goto the_end;
1812
1813 if (step)
1814 if (cur_stream)
1815 stream_pause(cur_stream);
1816 }
1817 the_end:
1818 #if CONFIG_AVFILTER
1819 avfilter_graph_free(&graph);
1820 #endif
1821 av_free(frame);
1822 return 0;
1823 }
1824
1825 static int subtitle_thread(void *arg)
1826 {
1827 VideoState *is = arg;
1828 SubPicture *sp;
1829 AVPacket pkt1, *pkt = &pkt1;
1830 int len1, got_subtitle;
1831 double pts;
1832 int i, j;
1833 int r, g, b, y, u, v, a;
1834
1835 for(;;) {
1836 while (is->paused && !is->subtitleq.abort_request) {
1837 SDL_Delay(10);
1838 }
1839 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1840 break;
1841
1842 if(pkt->data == flush_pkt.data){
1843 avcodec_flush_buffers(is->subtitle_st->codec);
1844 continue;
1845 }
1846 SDL_LockMutex(is->subpq_mutex);
1847 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1848 !is->subtitleq.abort_request) {
1849 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1850 }
1851 SDL_UnlockMutex(is->subpq_mutex);
1852
1853 if (is->subtitleq.abort_request)
1854 goto the_end;
1855
1856 sp = &is->subpq[is->subpq_windex];
1857
1858 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1859 this packet, if any */
1860 pts = 0;
1861 if (pkt->pts != AV_NOPTS_VALUE)
1862 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1863
1864 len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1865 &sp->sub, &got_subtitle,
1866 pkt);
1867 if (got_subtitle && sp->sub.format == 0) {
1868 sp->pts = pts;
1869
1870 for (i = 0; i < sp->sub.num_rects; i++)
1871 {
1872 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1873 {
1874 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1875 y = RGB_TO_Y_CCIR(r, g, b);
1876 u = RGB_TO_U_CCIR(r, g, b, 0);
1877 v = RGB_TO_V_CCIR(r, g, b, 0);
1878 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1879 }
1880 }
1881
1882 /* now we can update the picture count */
1883 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1884 is->subpq_windex = 0;
1885 SDL_LockMutex(is->subpq_mutex);
1886 is->subpq_size++;
1887 SDL_UnlockMutex(is->subpq_mutex);
1888 }
1889 av_free_packet(pkt);
1890 }
1891 the_end:
1892 return 0;
1893 }
1894
1895 /* copy samples for viewing in editor window */
1896 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1897 {
1898 int size, len, channels;
1899
1900 channels = is->audio_st->codec->channels;
1901
1902 size = samples_size / sizeof(short);
1903 while (size > 0) {
1904 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1905 if (len > size)
1906 len = size;
1907 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1908 samples += len;
1909 is->sample_array_index += len;
1910 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1911 is->sample_array_index = 0;
1912 size -= len;
1913 }
1914 }
1915
1916 /* return the new audio buffer size (samples can be added or deleted
1917 to get better sync if video or external master clock) */
1918 static int synchronize_audio(VideoState *is, short *samples,
1919 int samples_size1, double pts)
1920 {
1921 int n, samples_size;
1922 double ref_clock;
1923
1924 n = 2 * is->audio_st->codec->channels;
1925 samples_size = samples_size1;
1926
1927 /* if not master, then we try to remove or add samples to correct the clock */
1928 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1929 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1930 double diff, avg_diff;
1931 int wanted_size, min_size, max_size, nb_samples;
1932
1933 ref_clock = get_master_clock(is);
1934 diff = get_audio_clock(is) - ref_clock;
1935
1936 if (diff < AV_NOSYNC_THRESHOLD) {
1937 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1938 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1939 /* not enough measures to have a correct estimate */
1940 is->audio_diff_avg_count++;
1941 } else {
1942 /* estimate the A-V difference */
1943 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1944
1945 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1946 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1947 nb_samples = samples_size / n;
1948
1949 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1950 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1951 if (wanted_size < min_size)
1952 wanted_size = min_size;
1953 else if (wanted_size > max_size)
1954 wanted_size = max_size;
1955
1956 /* add or remove samples to correction the synchro */
1957 if (wanted_size < samples_size) {
1958 /* remove samples */
1959 samples_size = wanted_size;
1960 } else if (wanted_size > samples_size) {
1961 uint8_t *samples_end, *q;
1962 int nb;
1963
1964 /* add samples */
1965 nb = (samples_size - wanted_size);
1966 samples_end = (uint8_t *)samples + samples_size - n;
1967 q = samples_end + n;
1968 while (nb > 0) {
1969 memcpy(q, samples_end, n);
1970 q += n;
1971 nb -= n;
1972 }
1973 samples_size = wanted_size;
1974 }
1975 }
1976 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1977 diff, avg_diff, samples_size - samples_size1,
1978 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1979 }
1980 } else {
1981 /* too big difference : may be initial PTS errors, so
1982 reset A-V filter */
1983 is->audio_diff_avg_count = 0;
1984 is->audio_diff_cum = 0;
1985 }
1986 }
1987
1988 return samples_size;
1989 }
1990
1991 /* decode one audio frame and returns its uncompressed size */
1992 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1993 {
1994 AVPacket *pkt_temp = &is->audio_pkt_temp;
1995 AVPacket *pkt = &is->audio_pkt;
1996 AVCodecContext *dec= is->audio_st->codec;
1997 int n, len1, data_size;
1998 double pts;
1999
2000 for(;;) {
2001 /* NOTE: the audio packet can contain several frames */
2002 while (pkt_temp->size > 0) {
2003 data_size = sizeof(is->audio_buf1);
2004 len1 = avcodec_decode_audio3(dec,
2005 (int16_t *)is->audio_buf1, &data_size,
2006 pkt_temp);
2007 if (len1 < 0) {
2008 /* if error, we skip the frame */
2009 pkt_temp->size = 0;
2010 break;
2011 }
2012
2013 pkt_temp->data += len1;
2014 pkt_temp->size -= len1;
2015 if (data_size <= 0)
2016 continue;
2017
2018 if (dec->sample_fmt != is->audio_src_fmt) {
2019 if (is->reformat_ctx)
2020 av_audio_convert_free(is->reformat_ctx);
2021 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2022 dec->sample_fmt, 1, NULL, 0);
2023 if (!is->reformat_ctx) {
2024 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2025 av_get_sample_fmt_name(dec->sample_fmt),
2026 av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2027 break;
2028 }
2029 is->audio_src_fmt= dec->sample_fmt;
2030 }
2031
2032 if (is->reformat_ctx) {
2033 const void *ibuf[6]= {is->audio_buf1};
2034 void *obuf[6]= {is->audio_buf2};
2035 int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2036 int ostride[6]= {2};
2037 int len= data_size/istride[0];
2038 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2039 printf("av_audio_convert() failed\n");
2040 break;
2041 }
2042 is->audio_buf= is->audio_buf2;
2043 /* FIXME: existing code assume that data_size equals framesize*channels*2
2044 remove this legacy cruft */
2045 data_size= len*2;
2046 }else{
2047 is->audio_buf= is->audio_buf1;
2048 }
2049
2050 /* if no pts, then compute it */
2051 pts = is->audio_clock;
2052 *pts_ptr = pts;
2053 n = 2 * dec->channels;
2054 is->audio_clock += (double)data_size /
2055 (double)(n * dec->sample_rate);
2056 #ifdef DEBUG
2057 {
2058 static double last_clock;
2059 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2060 is->audio_clock - last_clock,
2061 is->audio_clock, pts);
2062 last_clock = is->audio_clock;
2063 }
2064 #endif
2065 return data_size;
2066 }
2067
2068 /* free the current packet */
2069 if (pkt->data)
2070 av_free_packet(pkt);
2071
2072 if (is->paused || is->audioq.abort_request) {
2073 return -1;
2074 }
2075
2076 /* read next packet */
2077 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2078 return -1;
2079 if(pkt->data == flush_pkt.data){
2080 avcodec_flush_buffers(dec);
2081 continue;
2082 }
2083
2084 pkt_temp->data = pkt->data;
2085 pkt_temp->size = pkt->size;
2086
2087 /* if update the audio clock with the pts */
2088 if (pkt->pts != AV_NOPTS_VALUE) {
2089 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2090 }
2091 }
2092 }
2093
2094 /* prepare a new audio buffer */
2095 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2096 {
2097 VideoState *is = opaque;
2098 int audio_size, len1;
2099 double pts;
2100
2101 audio_callback_time = av_gettime();
2102
2103 while (len > 0) {
2104 if (is->audio_buf_index >= is->audio_buf_size) {
2105 audio_size = audio_decode_frame(is, &pts);
2106 if (audio_size < 0) {
2107 /* if error, just output silence */
2108 is->audio_buf = is->audio_buf1;
2109 is->audio_buf_size = 1024;
2110 memset(is->audio_buf, 0, is->audio_buf_size);
2111 } else {
2112 if (is->show_audio)
2113 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2114 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2115 pts);
2116 is->audio_buf_size = audio_size;
2117 }
2118 is->audio_buf_index = 0;
2119 }
2120 len1 = is->audio_buf_size - is->audio_buf_index;
2121 if (len1 > len)
2122 len1 = len;
2123 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2124 len -= len1;
2125 stream += len1;
2126 is->audio_buf_index += len1;
2127 }
2128 }
2129
2130 /* open a given stream. Return 0 if OK */
2131 static int stream_component_open(VideoState *is, int stream_index)
2132 {
2133 AVFormatContext *ic = is->ic;
2134 AVCodecContext *avctx;
2135 AVCodec *codec;
2136 SDL_AudioSpec wanted_spec, spec;
2137
2138 if (stream_index < 0 || stream_index >= ic->nb_streams)
2139 return -1;
2140 avctx = ic->streams[stream_index]->codec;
2141
2142 /* prepare audio output */
2143 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2144 if (avctx->channels > 0) {
2145 avctx->request_channels = FFMIN(2, avctx->channels);
2146 } else {
2147 avctx->request_channels = 2;
2148 }
2149 }
2150
2151 codec = avcodec_find_decoder(avctx->codec_id);
2152 avctx->debug_mv = debug_mv;
2153 avctx->debug = debug;
2154 avctx->workaround_bugs = workaround_bugs;
2155 avctx->lowres = lowres;
2156 if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2157 avctx->idct_algo= idct;
2158 if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2159 avctx->skip_frame= skip_frame;
2160 avctx->skip_idct= skip_idct;
2161 avctx->skip_loop_filter= skip_loop_filter;
2162 avctx->error_recognition= error_recognition;
2163 avctx->error_concealment= error_concealment;
2164 avctx->thread_count= thread_count;
2165
2166 set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2167
2168 if (!codec ||
2169 avcodec_open(avctx, codec) < 0)
2170 return -1;
2171
2172 /* prepare audio output */
2173 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2174 wanted_spec.freq = avctx->sample_rate;
2175 wanted_spec.format = AUDIO_S16SYS;
2176 wanted_spec.channels = avctx->channels;
2177 wanted_spec.silence = 0;
2178 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2179 wanted_spec.callback = sdl_audio_callback;
2180 wanted_spec.userdata = is;
2181 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2182 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2183 return -1;
2184 }
2185 is->audio_hw_buf_size = spec.size;
2186 is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2187 }
2188
2189 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2190 switch(avctx->codec_type) {
2191 case AVMEDIA_TYPE_AUDIO:
2192 is->audio_stream = stream_index;
2193 is->audio_st = ic->streams[stream_index];
2194 is->audio_buf_size = 0;
2195 is->audio_buf_index = 0;
2196
2197 /* init averaging filter */
2198 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2199 is->audio_diff_avg_count = 0;
2200 /* since we do not have a precise anough audio fifo fullness,
2201 we correct audio sync only if larger than this threshold */
2202 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2203
2204 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2205 packet_queue_init(&is->audioq);
2206 SDL_PauseAudio(0);
2207 break;
2208 case AVMEDIA_TYPE_VIDEO:
2209 is->video_stream = stream_index;
2210 is->video_st = ic->streams[stream_index];
2211
2212 packet_queue_init(&is->videoq);
2213 is->video_tid = SDL_CreateThread(video_thread, is);
2214 break;
2215 case AVMEDIA_TYPE_SUBTITLE:
2216 is->subtitle_stream = stream_index;
2217 is->subtitle_st = ic->streams[stream_index];
2218 packet_queue_init(&is->subtitleq);
2219
2220 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2221 break;
2222 default:
2223 break;
2224 }
2225 return 0;
2226 }
2227
2228 static void stream_component_close(VideoState *is, int stream_index)
2229 {
2230 AVFormatContext *ic = is->ic;
2231 AVCodecContext *avctx;
2232
2233 if (stream_index < 0 || stream_index >= ic->nb_streams)
2234 return;
2235 avctx = ic->streams[stream_index]->codec;
2236
2237 switch(avctx->codec_type) {
2238 case AVMEDIA_TYPE_AUDIO:
2239 packet_queue_abort(&is->audioq);
2240
2241 SDL_CloseAudio();
2242
2243 packet_queue_end(&is->audioq);
2244 if (is->reformat_ctx)
2245 av_audio_convert_free(is->reformat_ctx);
2246 is->reformat_ctx = NULL;
2247 break;
2248 case AVMEDIA_TYPE_VIDEO:
2249 packet_queue_abort(&is->videoq);
2250
2251 /* note: we also signal this mutex to make sure we deblock the
2252 video thread in all cases */
2253 SDL_LockMutex(is->pictq_mutex);
2254 SDL_CondSignal(is->pictq_cond);
2255 SDL_UnlockMutex(is->pictq_mutex);
2256
2257 SDL_WaitThread(is->video_tid, NULL);
2258
2259 packet_queue_end(&is->videoq);
2260 break;
2261 case AVMEDIA_TYPE_SUBTITLE:
2262 packet_queue_abort(&is->subtitleq);
2263
2264 /* note: we also signal this mutex to make sure we deblock the
2265 video thread in all cases */
2266 SDL_LockMutex(is->subpq_mutex);
2267 is->subtitle_stream_changed = 1;
2268
2269 SDL_CondSignal(is->subpq_cond);
2270 SDL_UnlockMutex(is->subpq_mutex);
2271
2272 SDL_WaitThread(is->subtitle_tid, NULL);
2273
2274 packet_queue_end(&is->subtitleq);
2275 break;
2276 default:
2277 break;
2278 }
2279
2280 ic->streams[stream_index]->discard = AVDISCARD_ALL;
2281 avcodec_close(avctx);
2282 switch(avctx->codec_type) {
2283 case AVMEDIA_TYPE_AUDIO:
2284 is->audio_st = NULL;
2285 is->audio_stream = -1;
2286 break;
2287 case AVMEDIA_TYPE_VIDEO:
2288 is->video_st = NULL;
2289 is->video_stream = -1;
2290 break;
2291 case AVMEDIA_TYPE_SUBTITLE:
2292 is->subtitle_st = NULL;
2293 is->subtitle_stream = -1;
2294 break;
2295 default:
2296 break;
2297 }
2298 }
2299
2300 /* since we have only one decoding thread, we can use a global
2301 variable instead of a thread local variable */
2302 static VideoState *global_video_state;
2303
2304 static int decode_interrupt_cb(void)
2305 {
2306 return (global_video_state && global_video_state->abort_request);
2307 }
2308
2309 /* this thread gets the stream from the disk or the network */
2310 static int decode_thread(void *arg)
2311 {
2312 VideoState *is = arg;
2313 AVFormatContext *ic = NULL;
2314 int err, i, ret;
2315 int st_index[AVMEDIA_TYPE_NB];
2316 AVPacket pkt1, *pkt = &pkt1;
2317 int eof=0;
2318 int pkt_in_play_range = 0;
2319 AVDictionaryEntry *t;
2320
2321 memset(st_index, -1, sizeof(st_index));
2322 is->video_stream = -1;
2323 is->audio_stream = -1;
2324 is->subtitle_stream = -1;
2325
2326 global_video_state = is;
2327 avio_set_interrupt_cb(decode_interrupt_cb);
2328
2329 err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2330 if (err < 0) {
2331 print_error(is->filename, err);
2332 ret = -1;
2333 goto fail;
2334 }
2335 if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2336 av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2337 ret = AVERROR_OPTION_NOT_FOUND;
2338 goto fail;
2339 }
2340 is->ic = ic;
2341
2342 if(genpts)
2343 ic->flags |= AVFMT_FLAG_GENPTS;
2344
2345 /* Set AVCodecContext options so they will be seen by av_find_stream_info() */
2346 for (i = 0; i < ic->nb_streams; i++) {
2347 AVCodecContext *dec = ic->streams[i]->codec;
2348 switch (dec->codec_type) {
2349 case AVMEDIA_TYPE_AUDIO:
2350 set_context_opts(dec, avcodec_opts[AVMEDIA_TYPE_AUDIO],
2351 AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM,
2352 NULL);
2353 break;
2354 case AVMEDIA_TYPE_VIDEO:
2355 set_context_opts(dec, avcodec_opts[AVMEDIA_TYPE_VIDEO],
2356 AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM,
2357 NULL);
2358 break;
2359 }
2360 }
2361
2362 err = av_find_stream_info(ic);
2363 if (err < 0) {
2364 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2365 ret = -1;
2366 goto fail;
2367 }
2368 if(ic->pb)
2369 ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2370
2371 if(seek_by_bytes<0)
2372 seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2373
2374 /* if seeking requested, we execute it */
2375 if (start_time != AV_NOPTS_VALUE) {
2376 int64_t timestamp;
2377
2378 timestamp = start_time;
2379 /* add the stream start time */
2380 if (ic->start_time != AV_NOPTS_VALUE)
2381 timestamp += ic->start_time;
2382 ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2383 if (ret < 0) {
2384 fprintf(stderr, "%s: could not seek to position %0.3f\n",
2385 is->filename, (double)timestamp / AV_TIME_BASE);
2386 }
2387 }
2388
2389 for (i = 0; i < ic->nb_streams; i++)
2390 ic->streams[i]->discard = AVDISCARD_ALL;
2391 if (!video_disable)
2392 st_index[AVMEDIA_TYPE_VIDEO] =
2393 av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2394 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2395 if (!audio_disable)
2396 st_index[AVMEDIA_TYPE_AUDIO] =
2397 av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2398 wanted_stream[AVMEDIA_TYPE_AUDIO],
2399 st_index[AVMEDIA_TYPE_VIDEO],
2400 NULL, 0);
2401 if (!video_disable)
2402 st_index[AVMEDIA_TYPE_SUBTITLE] =
2403 av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2404 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2405 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2406 st_index[AVMEDIA_TYPE_AUDIO] :
2407 st_index[AVMEDIA_TYPE_VIDEO]),
2408 NULL, 0);
2409 if (show_status) {
2410 av_dump_format(ic, 0, is->filename, 0);
2411 }
2412
2413 /* open the streams */
2414 if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2415 stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2416 }
2417
2418 ret=-1;
2419 if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2420 ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2421 }
2422 is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2423 if(ret<0) {
2424 if (!display_disable)
2425 is->show_audio = 2;
2426 }
2427
2428 if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2429 stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2430 }
2431
2432 if (is->video_stream < 0 && is->audio_stream < 0) {
2433 fprintf(stderr, "%s: could not open codecs\n", is->filename);
2434 ret = -1;
2435 goto fail;
2436 }
2437
2438 for(;;) {
2439 if (is->abort_request)
2440 break;
2441 if (is->paused != is->last_paused) {
2442 is->last_paused = is->paused;
2443 if (is->paused)
2444 is->read_pause_return= av_read_pause(ic);
2445 else
2446 av_read_play(ic);
2447 }
2448 #if CONFIG_RTSP_DEMUXER
2449 if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2450 /* wait 10 ms to avoid trying to get another packet */
2451 /* XXX: horrible */
2452 SDL_Delay(10);
2453 continue;
2454 }
2455 #endif
2456 if (is->seek_req) {
2457 int64_t seek_target= is->seek_pos;
2458 int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2459 int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2460 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2461 // of the seek_pos/seek_rel variables
2462
2463 ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2464 if (ret < 0) {
2465 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2466 }else{
2467 if (is->audio_stream >= 0) {
2468 packet_queue_flush(&is->audioq);
2469 packet_queue_put(&is->audioq, &flush_pkt);
2470 }
2471 if (is->subtitle_stream >= 0) {
2472 packet_queue_flush(&is->subtitleq);
2473 packet_queue_put(&is->subtitleq, &flush_pkt);
2474 }
2475 if (is->video_stream >= 0) {
2476 packet_queue_flush(&is->videoq);
2477 packet_queue_put(&is->videoq, &flush_pkt);
2478 }
2479 }
2480 is->seek_req = 0;
2481 eof= 0;
2482 }
2483
2484 /* if the queue are full, no need to read more */
2485 if ( is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2486 || ( (is->audioq .size > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2487 && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream<0)
2488 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2489 /* wait 10 ms */
2490 SDL_Delay(10);
2491 continue;
2492 }
2493 if(eof) {
2494 if(is->video_stream >= 0){
2495 av_init_packet(pkt);
2496 pkt->data=NULL;
2497 pkt->size=0;
2498 pkt->stream_index= is->video_stream;
2499 packet_queue_put(&is->videoq, pkt);
2500 }
2501 SDL_Delay(10);
2502 if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2503 if(loop!=1 && (!loop || --loop)){
2504 stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2505 }else if(autoexit){
2506 ret=AVERROR_EOF;
2507 goto fail;
2508 }
2509 }
2510 continue;
2511 }
2512 ret = av_read_frame(ic, pkt);
2513 if (ret < 0) {
2514 if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2515 eof=1;
2516 if (ic->pb && ic->pb->error)
2517 break;
2518 SDL_Delay(100); /* wait for user event */
2519 continue;
2520 }
2521 /* check if packet is in play range specified by user, then queue, otherwise discard */
2522 pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2523 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2524 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2525 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2526 <= ((double)duration/1000000);
2527 if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2528 packet_queue_put(&is->audioq, pkt);
2529 } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2530 packet_queue_put(&is->videoq, pkt);
2531 } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2532 packet_queue_put(&is->subtitleq, pkt);
2533 } else {
2534 av_free_packet(pkt);
2535 }
2536 }
2537 /* wait until the end */
2538 while (!is->abort_request) {
2539 SDL_Delay(100);
2540 }
2541
2542 ret = 0;
2543 fail:
2544 /* disable interrupting */
2545 global_video_state = NULL;
2546
2547 /* close each stream */
2548 if (is->audio_stream >= 0)
2549 stream_component_close(is, is->audio_stream);
2550 if (is->video_stream >= 0)
2551 stream_component_close(is, is->video_stream);
2552 if (is->subtitle_stream >= 0)
2553 stream_component_close(is, is->subtitle_stream);
2554 if (is->ic) {
2555 av_close_input_file(is->ic);
2556 is->ic = NULL; /* safety */
2557 }
2558 avio_set_interrupt_cb(NULL);
2559
2560 if (ret != 0) {
2561 SDL_Event event;
2562
2563 event.type = FF_QUIT_EVENT;
2564 event.user.data1 = is;
2565 SDL_PushEvent(&event);
2566 }
2567 return 0;
2568 }
2569
2570 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2571 {
2572 VideoState *is;
2573
2574 is = av_mallocz(sizeof(VideoState));
2575 if (!is)
2576 return NULL;
2577 av_strlcpy(is->filename, filename, sizeof(is->filename));
2578 is->iformat = iformat;
2579 is->ytop = 0;
2580 is->xleft = 0;
2581
2582 /* start video display */
2583 is->pictq_mutex = SDL_CreateMutex();
2584 is->pictq_cond = SDL_CreateCond();
2585
2586 is->subpq_mutex = SDL_CreateMutex();
2587 is->subpq_cond = SDL_CreateCond();
2588
2589 is->av_sync_type = av_sync_type;
2590 is->parse_tid = SDL_CreateThread(decode_thread, is);
2591 if (!is->parse_tid) {
2592 av_free(is);
2593 return NULL;
2594 }
2595 return is;
2596 }
2597
2598 static void stream_cycle_channel(VideoState *is, int codec_type)
2599 {
2600 AVFormatContext *ic = is->ic;
2601 int start_index, stream_index;
2602 AVStream *st;
2603
2604 if (codec_type == AVMEDIA_TYPE_VIDEO)
2605 start_index = is->video_stream;
2606 else if (codec_type == AVMEDIA_TYPE_AUDIO)
2607 start_index = is->audio_stream;
2608 else
2609 start_index = is->subtitle_stream;
2610 if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2611 return;
2612 stream_index = start_index;
2613 for(;;) {
2614 if (++stream_index >= is->ic->nb_streams)
2615 {
2616 if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2617 {
2618 stream_index = -1;
2619 goto the_end;
2620 } else
2621 stream_index = 0;
2622 }
2623 if (stream_index == start_index)
2624 return;
2625 st = ic->streams[stream_index];
2626 if (st->codec->codec_type == codec_type) {
2627 /* check that parameters are OK */
2628 switch(codec_type) {
2629 case AVMEDIA_TYPE_AUDIO:
2630 if (st->codec->sample_rate != 0 &&
2631 st->codec->channels != 0)
2632 goto the_end;
2633 break;
2634 case AVMEDIA_TYPE_VIDEO:
2635 case AVMEDIA_TYPE_SUBTITLE:
2636 goto the_end;
2637 default:
2638 break;
2639 }
2640 }
2641 }
2642 the_end:
2643 stream_component_close(is, start_index);
2644 stream_component_open(is, stream_index);
2645 }
2646
2647
2648 static void toggle_full_screen(void)
2649 {
2650 is_full_screen = !is_full_screen;
2651 video_open(cur_stream);
2652 }
2653
2654 static void toggle_pause(void)
2655 {
2656 if (cur_stream)
2657 stream_pause(cur_stream);
2658 step = 0;
2659 }
2660
2661 static void step_to_next_frame(void)
2662 {
2663 if (cur_stream) {
2664 /* if the stream is paused unpause it, then step */
2665 if (cur_stream->paused)
2666 stream_pause(cur_stream);
2667 }
2668 step = 1;
2669 }
2670
2671 static void toggle_audio_display(void)
2672 {
2673 if (cur_stream) {
2674 int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2675 cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2676 fill_rectangle(screen,
2677 cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2678 bgcolor);
2679 SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2680 }
2681 }
2682
2683 /* handle an event sent by the GUI */
2684 static void event_loop(void)
2685 {
2686 SDL_Event event;
2687 double incr, pos, frac;
2688
2689 for(;;) {
2690 double x;
2691 SDL_WaitEvent(&event);
2692 switch(event.type) {
2693 case SDL_KEYDOWN:
2694 if (exit_on_keydown) {
2695 do_exit();
2696 break;
2697 }
2698 switch(event.key.keysym.sym) {
2699 case SDLK_ESCAPE:
2700 case SDLK_q:
2701 do_exit();
2702 break;
2703 case SDLK_f:
2704 toggle_full_screen();
2705 break;
2706 case SDLK_p:
2707 case SDLK_SPACE:
2708 toggle_pause();
2709 break;
2710 case SDLK_s: //S: Step to next frame
2711 step_to_next_frame();
2712 break;
2713 case SDLK_a:
2714 if (cur_stream)
2715 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2716 break;
2717 case SDLK_v:
2718 if (cur_stream)
2719 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2720 break;
2721 case SDLK_t:
2722 if (cur_stream)
2723 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2724 break;
2725 case SDLK_w:
2726 toggle_audio_display();
2727 break;
2728 case SDLK_LEFT:
2729 incr = -10.0;
2730 goto do_seek;
2731 case SDLK_RIGHT:
2732 incr = 10.0;
2733 goto do_seek;
2734 case SDLK_UP:
2735 incr = 60.0;
2736 goto do_seek;
2737 case SDLK_DOWN:
2738 incr = -60.0;
2739 do_seek:
2740 if (cur_stream) {
2741 if (seek_by_bytes) {
2742 if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2743 pos= cur_stream->video_current_pos;
2744 }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2745 pos= cur_stream->audio_pkt.pos;
2746 }else
2747 pos = avio_tell(cur_stream->ic->pb);
2748 if (cur_stream->ic->bit_rate)
2749 incr *= cur_stream->ic->bit_rate / 8.0;
2750 else
2751 incr *= 180000.0;
2752 pos += incr;
2753 stream_seek(cur_stream, pos, incr, 1);
2754 } else {
2755 pos = get_master_clock(cur_stream);
2756 pos += incr;
2757 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2758 }
2759 }
2760 break;
2761 default:
2762 break;
2763 }
2764 break;
2765 case SDL_MOUSEBUTTONDOWN:
2766 if (exit_on_mousedown) {
2767 do_exit();
2768 break;
2769 }
2770 case SDL_MOUSEMOTION:
2771 if(event.type ==SDL_MOUSEBUTTONDOWN){
2772 x= event.button.x;
2773 }else{
2774 if(event.motion.state != SDL_PRESSED)
2775 break;
2776 x= event.motion.x;
2777 }
2778 if (cur_stream) {
2779 if(seek_by_bytes || cur_stream->ic->duration<=0){
2780 uint64_t size= avio_size(cur_stream->ic->pb);
2781 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2782 }else{
2783 int64_t ts;
2784 int ns, hh, mm, ss;
2785 int tns, thh, tmm, tss;
2786 tns = cur_stream->ic->duration/1000000LL;
2787 thh = tns/3600;
2788 tmm = (tns%3600)/60;
2789 tss = (tns%60);
2790 frac = x/cur_stream->width;
2791 ns = frac*tns;
2792 hh = ns/3600;
2793 mm = (ns%3600)/60;
2794 ss = (ns%60);
2795 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2796 hh, mm, ss, thh, tmm, tss);
2797 ts = frac*cur_stream->ic->duration;
2798 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2799 ts += cur_stream->ic->start_time;
2800 stream_seek(cur_stream, ts, 0, 0);
2801 }
2802 }
2803 break;
2804 case SDL_VIDEORESIZE:
2805 if (cur_stream) {
2806 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2807 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2808 screen_width = cur_stream->width = event.resize.w;
2809 screen_height= cur_stream->height= event.resize.h;
2810 }
2811 break;
2812 case SDL_QUIT:
2813 case FF_QUIT_EVENT:
2814 do_exit();
2815 break;
2816 case FF_ALLOC_EVENT:
2817 video_open(event.user.data1);
2818 alloc_picture(event.user.data1);
2819 break;
2820 case FF_REFRESH_EVENT:
2821 video_refresh_timer(event.user.data1);
2822 cur_stream->refresh=0;
2823 break;
2824 default:
2825 break;
2826 }
2827 }
2828 }
2829
2830 static int opt_frame_size(const char *opt, const char *arg)
2831 {
2832 if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2833 fprintf(stderr, "Incorrect frame size\n");
2834 return AVERROR(EINVAL);
2835 }
2836 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2837 fprintf(stderr, "Frame size must be a multiple of 2\n");
2838 return AVERROR(EINVAL);
2839 }
2840 return 0;
2841 }
2842
2843 static int opt_width(const char *opt, const char *arg)
2844 {
2845 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2846 return 0;
2847 }
2848
2849 static int opt_height(const char *opt, const char *arg)
2850 {
2851 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2852 return 0;
2853 }
2854
2855 static int opt_format(const char *opt, const char *arg)
2856 {
2857 file_iformat = av_find_input_format(arg);
2858 if (!file_iformat) {
2859 fprintf(stderr, "Unknown input format: %s\n", arg);
2860 return AVERROR(EINVAL);
2861 }
2862 return 0;
2863 }
2864
2865 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2866 {
2867 frame_pix_fmt = av_get_pix_fmt(arg);
2868 return 0;
2869 }
2870
2871 static int opt_sync(const char *opt, const char *arg)
2872 {
2873 if (!strcmp(arg, "audio"))
2874 av_sync_type = AV_SYNC_AUDIO_MASTER;
2875 else if (!strcmp(arg, "video"))
2876 av_sync_type = AV_SYNC_VIDEO_MASTER;
2877 else if (!strcmp(arg, "ext"))
2878 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2879 else {
2880 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2881 exit(1);
2882 }
2883 return 0;
2884 }
2885
2886 static int opt_seek(const char *opt, const char *arg)
2887 {
2888 start_time = parse_time_or_die(opt, arg, 1);
2889 return 0;
2890 }
2891
2892 static int opt_duration(const char *opt, const char *arg)
2893 {
2894 duration = parse_time_or_die(opt, arg, 1);
2895 return 0;
2896 }
2897
2898 static int opt_debug(const char *opt, const char *arg)
2899 {
2900 av_log_set_level(99);
2901 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2902 return 0;
2903 }
2904
2905 static int opt_vismv(const char *opt, const char *arg)
2906 {
2907 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2908 return 0;
2909 }
2910
2911 static int opt_thread_count(const char *opt, const char *arg)
2912 {
2913 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2914 #if !HAVE_THREADS
2915 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2916 #endif
2917 return 0;
2918 }
2919
2920 static const OptionDef options[] = {
2921 #include "cmdutils_common_opts.h"
2922 { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2923 { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2924 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2925 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2926 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2927 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2928 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2929 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2930 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2931 { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2932 { "t", HAS_ARG, {(void*)&opt_duration}, "play \"duration\" seconds of audio/video", "duration" },
2933 { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2934 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2935 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2936 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2937 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2938 { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2939 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2940 { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2941 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2942 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2943 { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2944 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2945 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2946 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2947 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2948 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2949 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)", "threshold" },
2950 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2951 { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2952 { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2953 { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2954 { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2955 { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2956 { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2957 { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2958 { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2959 #if CONFIG_AVFILTER
2960 { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2961 #endif
2962 { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2963 { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2964 { "i", 0, {NULL}, "ffmpeg compatibility dummy option", ""},
2965 { NULL, },
2966 };
2967
2968 static void show_usage(void)
2969 {
2970 printf("Simple media player\n");
2971 printf("usage: ffplay [options] input_file\n");
2972 printf("\n");
2973 }
2974
2975 static void show_help(void)
2976 {
2977 av_log_set_callback(log_callback_help);
2978 show_usage();
2979 show_help_options(options, "Main options:\n",
2980 OPT_EXPERT, 0);
2981 show_help_options(options, "\nAdvanced options:\n",
2982 OPT_EXPERT, OPT_EXPERT);
2983 printf("\n");
2984 av_opt_show2(avcodec_opts[0], NULL,
2985 AV_OPT_FLAG_DECODING_PARAM, 0);
2986 printf("\n");
2987 av_opt_show2(avformat_opts, NULL,
2988 AV_OPT_FLAG_DECODING_PARAM, 0);
2989 #if !CONFIG_AVFILTER
2990 printf("\n");
2991 av_opt_show2(sws_opts, NULL,
2992 AV_OPT_FLAG_ENCODING_PARAM, 0);
2993 #endif
2994 printf("\nWhile playing:\n"
2995 "q, ESC quit\n"
2996 "f toggle full screen\n"
2997 "p, SPC pause\n"
2998 "a cycle audio channel\n"
2999 "v cycle video channel\n"
3000 "t cycle subtitle channel\n"
3001 "w show audio waves\n"
3002 "s activate frame-step mode\n"
3003 "left/right seek backward/forward 10 seconds\n"
3004 "down/up seek backward/forward 1 minute\n"
3005 "mouse click seek to percentage in file corresponding to fraction of width\n"
3006 );
3007 }
3008
3009 static void opt_input_file(const char *filename)
3010 {
3011 if (input_filename) {
3012 fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3013 filename, input_filename);
3014 exit(1);
3015 }
3016 if (!strcmp(filename, "-"))
3017 filename = "pipe:";
3018 input_filename = filename;
3019 }
3020
3021 /* Called from the main */
3022 int main(int argc, char **argv)
3023 {
3024 int flags;
3025
3026 av_log_set_flags(AV_LOG_SKIP_REPEATED);
3027
3028 /* register all codecs, demux and protocols */
3029 avcodec_register_all();
3030 #if CONFIG_AVDEVICE
3031 avdevice_register_all();
3032 #endif
3033 #if CONFIG_AVFILTER
3034 avfilter_register_all();
3035 #endif
3036 av_register_all();
3037
3038 init_opts();
3039
3040 show_banner();
3041
3042 parse_options(argc, argv, options, opt_input_file);
3043
3044 if (!input_filename) {
3045 show_usage();
3046 fprintf(stderr, "An input file must be specified\n");
3047 fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3048 exit(1);
3049 }
3050
3051 if (display_disable) {
3052 video_disable = 1;
3053 }
3054 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3055 #if !defined(__MINGW32__) && !defined(__APPLE__)
3056 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3057 #endif
3058 if (SDL_Init (flags)) {
3059 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3060 exit(1);
3061 }
3062
3063 if (!display_disable) {
3064 #if HAVE_SDL_VIDEO_SIZE
3065 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3066 fs_screen_width = vi->current_w;
3067 fs_screen_height = vi->current_h;
3068 #endif
3069 }
3070
3071 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3072 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3073 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3074
3075 av_init_packet(&flush_pkt);
3076 flush_pkt.data= "FLUSH";
3077
3078 cur_stream = stream_open(input_filename, file_iformat);
3079
3080 event_loop();
3081
3082 /* never returns */
3083
3084 return 0;
3085 }