build: rework rules for things in the tools dir
[libav.git] / ffplay.c
1 /*
2 * ffplay : Simple Media Player based on the Libav libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This file is part of Libav.
6 *
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavformat/avformat.h"
35 #include "libavdevice/avdevice.h"
36 #include "libswscale/swscale.h"
37 #include "libavcodec/audioconvert.h"
38 #include "libavutil/opt.h"
39 #include "libavcodec/avfft.h"
40
41 #if CONFIG_AVFILTER
42 # include "libavfilter/avfilter.h"
43 # include "libavfilter/avfiltergraph.h"
44 #endif
45
46 #include "cmdutils.h"
47
48 #include <SDL.h>
49 #include <SDL_thread.h>
50
51 #ifdef __MINGW32__
52 #undef main /* We don't want SDL to override our main() */
53 #endif
54
55 #include <unistd.h>
56 #include <assert.h>
57
58 const char program_name[] = "ffplay";
59 const int program_birth_year = 2003;
60
61 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
62 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
63 #define MIN_FRAMES 5
64
65 /* SDL audio buffer size, in samples. Should be small to have precise
66 A/V sync as SDL does not have hardware buffer fullness info. */
67 #define SDL_AUDIO_BUFFER_SIZE 1024
68
69 /* no AV sync correction is done if below the AV sync threshold */
70 #define AV_SYNC_THRESHOLD 0.01
71 /* no AV correction is done if too big error */
72 #define AV_NOSYNC_THRESHOLD 10.0
73
74 #define FRAME_SKIP_FACTOR 0.05
75
76 /* maximum audio speed change to get correct sync */
77 #define SAMPLE_CORRECTION_PERCENT_MAX 10
78
79 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
80 #define AUDIO_DIFF_AVG_NB 20
81
82 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
83 #define SAMPLE_ARRAY_SIZE (2*65536)
84
85 static int sws_flags = SWS_BICUBIC;
86
87 typedef struct PacketQueue {
88 AVPacketList *first_pkt, *last_pkt;
89 int nb_packets;
90 int size;
91 int abort_request;
92 SDL_mutex *mutex;
93 SDL_cond *cond;
94 } PacketQueue;
95
96 #define VIDEO_PICTURE_QUEUE_SIZE 2
97 #define SUBPICTURE_QUEUE_SIZE 4
98
99 typedef struct VideoPicture {
100 double pts; ///<presentation time stamp for this picture
101 double target_clock; ///<av_gettime() time at which this should be displayed ideally
102 int64_t pos; ///<byte position in file
103 SDL_Overlay *bmp;
104 int width, height; /* source height & width */
105 int allocated;
106 enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109 AVFilterBufferRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114 double pts; /* presentation time stamp for this picture */
115 AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119 AV_SYNC_AUDIO_MASTER, /* default choice */
120 AV_SYNC_VIDEO_MASTER,
121 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125 SDL_Thread *parse_tid;
126 SDL_Thread *video_tid;
127 SDL_Thread *refresh_tid;
128 AVInputFormat *iformat;
129 int no_background;
130 int abort_request;
131 int paused;
132 int last_paused;
133 int seek_req;
134 int seek_flags;
135 int64_t seek_pos;
136 int64_t seek_rel;
137 int read_pause_return;
138 AVFormatContext *ic;
139 int dtg_active_format;
140
141 int audio_stream;
142
143 int av_sync_type;
144 double external_clock; /* external clock base */
145 int64_t external_clock_time;
146
147 double audio_clock;
148 double audio_diff_cum; /* used for AV difference average computation */
149 double audio_diff_avg_coef;
150 double audio_diff_threshold;
151 int audio_diff_avg_count;
152 AVStream *audio_st;
153 PacketQueue audioq;
154 int audio_hw_buf_size;
155 /* samples output by the codec. we reserve more space for avsync
156 compensation */
157 DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158 DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
159 uint8_t *audio_buf;
160 unsigned int audio_buf_size; /* in bytes */
161 int audio_buf_index; /* in bytes */
162 AVPacket audio_pkt_temp;
163 AVPacket audio_pkt;
164 enum AVSampleFormat audio_src_fmt;
165 AVAudioConvert *reformat_ctx;
166
167 int show_audio; /* if true, display audio samples */
168 int16_t sample_array[SAMPLE_ARRAY_SIZE];
169 int sample_array_index;
170 int last_i_start;
171 RDFTContext *rdft;
172 int rdft_bits;
173 FFTSample *rdft_data;
174 int xpos;
175
176 SDL_Thread *subtitle_tid;
177 int subtitle_stream;
178 int subtitle_stream_changed;
179 AVStream *subtitle_st;
180 PacketQueue subtitleq;
181 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
182 int subpq_size, subpq_rindex, subpq_windex;
183 SDL_mutex *subpq_mutex;
184 SDL_cond *subpq_cond;
185
186 double frame_timer;
187 double frame_last_pts;
188 double frame_last_delay;
189 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
190 int video_stream;
191 AVStream *video_st;
192 PacketQueue videoq;
193 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
194 double video_current_pts_drift; ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
195 int64_t video_current_pos; ///<current displayed file pos
196 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
197 int pictq_size, pictq_rindex, pictq_windex;
198 SDL_mutex *pictq_mutex;
199 SDL_cond *pictq_cond;
200 #if !CONFIG_AVFILTER
201 struct SwsContext *img_convert_ctx;
202 #endif
203
204 // QETimer *video_timer;
205 char filename[1024];
206 int width, height, xleft, ytop;
207
208 PtsCorrectionContext pts_ctx;
209
210 #if CONFIG_AVFILTER
211 AVFilterContext *out_video_filter; ///<the last filter in the video chain
212 #endif
213
214 float skip_frames;
215 float skip_frames_index;
216 int refresh;
217 } VideoState;
218
219 static void show_help(void);
220
221 /* options specified by the user */
222 static AVInputFormat *file_iformat;
223 static const char *input_filename;
224 static const char *window_title;
225 static int fs_screen_width;
226 static int fs_screen_height;
227 static int screen_width = 0;
228 static int screen_height = 0;
229 static int frame_width = 0;
230 static int frame_height = 0;
231 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
232 static int audio_disable;
233 static int video_disable;
234 static int wanted_stream[AVMEDIA_TYPE_NB]={
235 [AVMEDIA_TYPE_AUDIO]=-1,
236 [AVMEDIA_TYPE_VIDEO]=-1,
237 [AVMEDIA_TYPE_SUBTITLE]=-1,
238 };
239 static int seek_by_bytes=-1;
240 static int display_disable;
241 static int show_status = 1;
242 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
243 static int64_t start_time = AV_NOPTS_VALUE;
244 static int64_t duration = AV_NOPTS_VALUE;
245 static int debug = 0;
246 static int debug_mv = 0;
247 static int step = 0;
248 static int thread_count = 1;
249 static int workaround_bugs = 1;
250 static int fast = 0;
251 static int genpts = 0;
252 static int lowres = 0;
253 static int idct = FF_IDCT_AUTO;
254 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
255 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
256 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
257 static int error_recognition = FF_ER_CAREFUL;
258 static int error_concealment = 3;
259 static int decoder_reorder_pts= -1;
260 static int autoexit;
261 static int exit_on_keydown;
262 static int exit_on_mousedown;
263 static int loop=1;
264 static int framedrop=1;
265
266 static int rdftspeed=20;
267 #if CONFIG_AVFILTER
268 static char *vfilters = NULL;
269 #endif
270
271 /* current context */
272 static int is_full_screen;
273 static VideoState *cur_stream;
274 static int64_t audio_callback_time;
275
276 static AVPacket flush_pkt;
277
278 #define FF_ALLOC_EVENT (SDL_USEREVENT)
279 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
280 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
281
282 static SDL_Surface *screen;
283
284 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
285
286 /* packet queue handling */
287 static void packet_queue_init(PacketQueue *q)
288 {
289 memset(q, 0, sizeof(PacketQueue));
290 q->mutex = SDL_CreateMutex();
291 q->cond = SDL_CreateCond();
292 packet_queue_put(q, &flush_pkt);
293 }
294
295 static void packet_queue_flush(PacketQueue *q)
296 {
297 AVPacketList *pkt, *pkt1;
298
299 SDL_LockMutex(q->mutex);
300 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
301 pkt1 = pkt->next;
302 av_free_packet(&pkt->pkt);
303 av_freep(&pkt);
304 }
305 q->last_pkt = NULL;
306 q->first_pkt = NULL;
307 q->nb_packets = 0;
308 q->size = 0;
309 SDL_UnlockMutex(q->mutex);
310 }
311
312 static void packet_queue_end(PacketQueue *q)
313 {
314 packet_queue_flush(q);
315 SDL_DestroyMutex(q->mutex);
316 SDL_DestroyCond(q->cond);
317 }
318
319 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
320 {
321 AVPacketList *pkt1;
322
323 /* duplicate the packet */
324 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
325 return -1;
326
327 pkt1 = av_malloc(sizeof(AVPacketList));
328 if (!pkt1)
329 return -1;
330 pkt1->pkt = *pkt;
331 pkt1->next = NULL;
332
333
334 SDL_LockMutex(q->mutex);
335
336 if (!q->last_pkt)
337
338 q->first_pkt = pkt1;
339 else
340 q->last_pkt->next = pkt1;
341 q->last_pkt = pkt1;
342 q->nb_packets++;
343 q->size += pkt1->pkt.size + sizeof(*pkt1);
344 /* XXX: should duplicate packet data in DV case */
345 SDL_CondSignal(q->cond);
346
347 SDL_UnlockMutex(q->mutex);
348 return 0;
349 }
350
351 static void packet_queue_abort(PacketQueue *q)
352 {
353 SDL_LockMutex(q->mutex);
354
355 q->abort_request = 1;
356
357 SDL_CondSignal(q->cond);
358
359 SDL_UnlockMutex(q->mutex);
360 }
361
362 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
363 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
364 {
365 AVPacketList *pkt1;
366 int ret;
367
368 SDL_LockMutex(q->mutex);
369
370 for(;;) {
371 if (q->abort_request) {
372 ret = -1;
373 break;
374 }
375
376 pkt1 = q->first_pkt;
377 if (pkt1) {
378 q->first_pkt = pkt1->next;
379 if (!q->first_pkt)
380 q->last_pkt = NULL;
381 q->nb_packets--;
382 q->size -= pkt1->pkt.size + sizeof(*pkt1);
383 *pkt = pkt1->pkt;
384 av_free(pkt1);
385 ret = 1;
386 break;
387 } else if (!block) {
388 ret = 0;
389 break;
390 } else {
391 SDL_CondWait(q->cond, q->mutex);
392 }
393 }
394 SDL_UnlockMutex(q->mutex);
395 return ret;
396 }
397
398 static inline void fill_rectangle(SDL_Surface *screen,
399 int x, int y, int w, int h, int color)
400 {
401 SDL_Rect rect;
402 rect.x = x;
403 rect.y = y;
404 rect.w = w;
405 rect.h = h;
406 SDL_FillRect(screen, &rect, color);
407 }
408
409 #define ALPHA_BLEND(a, oldp, newp, s)\
410 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
411
412 #define RGBA_IN(r, g, b, a, s)\
413 {\
414 unsigned int v = ((const uint32_t *)(s))[0];\
415 a = (v >> 24) & 0xff;\
416 r = (v >> 16) & 0xff;\
417 g = (v >> 8) & 0xff;\
418 b = v & 0xff;\
419 }
420
421 #define YUVA_IN(y, u, v, a, s, pal)\
422 {\
423 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
424 a = (val >> 24) & 0xff;\
425 y = (val >> 16) & 0xff;\
426 u = (val >> 8) & 0xff;\
427 v = val & 0xff;\
428 }
429
430 #define YUVA_OUT(d, y, u, v, a)\
431 {\
432 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
433 }
434
435
436 #define BPP 1
437
438 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
439 {
440 int wrap, wrap3, width2, skip2;
441 int y, u, v, a, u1, v1, a1, w, h;
442 uint8_t *lum, *cb, *cr;
443 const uint8_t *p;
444 const uint32_t *pal;
445 int dstx, dsty, dstw, dsth;
446
447 dstw = av_clip(rect->w, 0, imgw);
448 dsth = av_clip(rect->h, 0, imgh);
449 dstx = av_clip(rect->x, 0, imgw - dstw);
450 dsty = av_clip(rect->y, 0, imgh - dsth);
451 lum = dst->data[0] + dsty * dst->linesize[0];
452 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
453 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
454
455 width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
456 skip2 = dstx >> 1;
457 wrap = dst->linesize[0];
458 wrap3 = rect->pict.linesize[0];
459 p = rect->pict.data[0];
460 pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
461
462 if (dsty & 1) {
463 lum += dstx;
464 cb += skip2;
465 cr += skip2;
466
467 if (dstx & 1) {
468 YUVA_IN(y, u, v, a, p, pal);
469 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
470 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
471 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
472 cb++;
473 cr++;
474 lum++;
475 p += BPP;
476 }
477 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
478 YUVA_IN(y, u, v, a, p, pal);
479 u1 = u;
480 v1 = v;
481 a1 = a;
482 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
483
484 YUVA_IN(y, u, v, a, p + BPP, pal);
485 u1 += u;
486 v1 += v;
487 a1 += a;
488 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
489 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
490 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
491 cb++;
492 cr++;
493 p += 2 * BPP;
494 lum += 2;
495 }
496 if (w) {
497 YUVA_IN(y, u, v, a, p, pal);
498 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
499 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
500 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
501 p++;
502 lum++;
503 }
504 p += wrap3 - dstw * BPP;
505 lum += wrap - dstw - dstx;
506 cb += dst->linesize[1] - width2 - skip2;
507 cr += dst->linesize[2] - width2 - skip2;
508 }
509 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
510 lum += dstx;
511 cb += skip2;
512 cr += skip2;
513
514 if (dstx & 1) {
515 YUVA_IN(y, u, v, a, p, pal);
516 u1 = u;
517 v1 = v;
518 a1 = a;
519 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
520 p += wrap3;
521 lum += wrap;
522 YUVA_IN(y, u, v, a, p, pal);
523 u1 += u;
524 v1 += v;
525 a1 += a;
526 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
527 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
528 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
529 cb++;
530 cr++;
531 p += -wrap3 + BPP;
532 lum += -wrap + 1;
533 }
534 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
535 YUVA_IN(y, u, v, a, p, pal);
536 u1 = u;
537 v1 = v;
538 a1 = a;
539 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
540
541 YUVA_IN(y, u, v, a, p + BPP, pal);
542 u1 += u;
543 v1 += v;
544 a1 += a;
545 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
546 p += wrap3;
547 lum += wrap;
548
549 YUVA_IN(y, u, v, a, p, pal);
550 u1 += u;
551 v1 += v;
552 a1 += a;
553 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
554
555 YUVA_IN(y, u, v, a, p + BPP, pal);
556 u1 += u;
557 v1 += v;
558 a1 += a;
559 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
560
561 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
562 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
563
564 cb++;
565 cr++;
566 p += -wrap3 + 2 * BPP;
567 lum += -wrap + 2;
568 }
569 if (w) {
570 YUVA_IN(y, u, v, a, p, pal);
571 u1 = u;
572 v1 = v;
573 a1 = a;
574 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
575 p += wrap3;
576 lum += wrap;
577 YUVA_IN(y, u, v, a, p, pal);
578 u1 += u;
579 v1 += v;
580 a1 += a;
581 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
582 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
583 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
584 cb++;
585 cr++;
586 p += -wrap3 + BPP;
587 lum += -wrap + 1;
588 }
589 p += wrap3 + (wrap3 - dstw * BPP);
590 lum += wrap + (wrap - dstw - dstx);
591 cb += dst->linesize[1] - width2 - skip2;
592 cr += dst->linesize[2] - width2 - skip2;
593 }
594 /* handle odd height */
595 if (h) {
596 lum += dstx;
597 cb += skip2;
598 cr += skip2;
599
600 if (dstx & 1) {
601 YUVA_IN(y, u, v, a, p, pal);
602 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
603 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
604 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
605 cb++;
606 cr++;
607 lum++;
608 p += BPP;
609 }
610 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
611 YUVA_IN(y, u, v, a, p, pal);
612 u1 = u;
613 v1 = v;
614 a1 = a;
615 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
616
617 YUVA_IN(y, u, v, a, p + BPP, pal);
618 u1 += u;
619 v1 += v;
620 a1 += a;
621 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
622 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
623 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
624 cb++;
625 cr++;
626 p += 2 * BPP;
627 lum += 2;
628 }
629 if (w) {
630 YUVA_IN(y, u, v, a, p, pal);
631 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
632 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
633 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
634 }
635 }
636 }
637
638 static void free_subpicture(SubPicture *sp)
639 {
640 avsubtitle_free(&sp->sub);
641 }
642
643 static void video_image_display(VideoState *is)
644 {
645 VideoPicture *vp;
646 SubPicture *sp;
647 AVPicture pict;
648 float aspect_ratio;
649 int width, height, x, y;
650 SDL_Rect rect;
651 int i;
652
653 vp = &is->pictq[is->pictq_rindex];
654 if (vp->bmp) {
655 #if CONFIG_AVFILTER
656 if (vp->picref->video->pixel_aspect.num == 0)
657 aspect_ratio = 0;
658 else
659 aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
660 #else
661
662 /* XXX: use variable in the frame */
663 if (is->video_st->sample_aspect_ratio.num)
664 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
665 else if (is->video_st->codec->sample_aspect_ratio.num)
666 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
667 else
668 aspect_ratio = 0;
669 #endif
670 if (aspect_ratio <= 0.0)
671 aspect_ratio = 1.0;
672 aspect_ratio *= (float)vp->width / (float)vp->height;
673
674 if (is->subtitle_st)
675 {
676 if (is->subpq_size > 0)
677 {
678 sp = &is->subpq[is->subpq_rindex];
679
680 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
681 {
682 SDL_LockYUVOverlay (vp->bmp);
683
684 pict.data[0] = vp->bmp->pixels[0];
685 pict.data[1] = vp->bmp->pixels[2];
686 pict.data[2] = vp->bmp->pixels[1];
687
688 pict.linesize[0] = vp->bmp->pitches[0];
689 pict.linesize[1] = vp->bmp->pitches[2];
690 pict.linesize[2] = vp->bmp->pitches[1];
691
692 for (i = 0; i < sp->sub.num_rects; i++)
693 blend_subrect(&pict, sp->sub.rects[i],
694 vp->bmp->w, vp->bmp->h);
695
696 SDL_UnlockYUVOverlay (vp->bmp);
697 }
698 }
699 }
700
701
702 /* XXX: we suppose the screen has a 1.0 pixel ratio */
703 height = is->height;
704 width = ((int)rint(height * aspect_ratio)) & ~1;
705 if (width > is->width) {
706 width = is->width;
707 height = ((int)rint(width / aspect_ratio)) & ~1;
708 }
709 x = (is->width - width) / 2;
710 y = (is->height - height) / 2;
711 is->no_background = 0;
712 rect.x = is->xleft + x;
713 rect.y = is->ytop + y;
714 rect.w = width;
715 rect.h = height;
716 SDL_DisplayYUVOverlay(vp->bmp, &rect);
717 }
718 }
719
720 /* get the current audio output buffer size, in samples. With SDL, we
721 cannot have a precise information */
722 static int audio_write_get_buf_size(VideoState *is)
723 {
724 return is->audio_buf_size - is->audio_buf_index;
725 }
726
727 static inline int compute_mod(int a, int b)
728 {
729 a = a % b;
730 if (a >= 0)
731 return a;
732 else
733 return a + b;
734 }
735
736 static void video_audio_display(VideoState *s)
737 {
738 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
739 int ch, channels, h, h2, bgcolor, fgcolor;
740 int16_t time_diff;
741 int rdft_bits, nb_freq;
742
743 for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
744 ;
745 nb_freq= 1<<(rdft_bits-1);
746
747 /* compute display index : center on currently output samples */
748 channels = s->audio_st->codec->channels;
749 nb_display_channels = channels;
750 if (!s->paused) {
751 int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
752 n = 2 * channels;
753 delay = audio_write_get_buf_size(s);
754 delay /= n;
755
756 /* to be more precise, we take into account the time spent since
757 the last buffer computation */
758 if (audio_callback_time) {
759 time_diff = av_gettime() - audio_callback_time;
760 delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
761 }
762
763 delay += 2*data_used;
764 if (delay < data_used)
765 delay = data_used;
766
767 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
768 if(s->show_audio==1){
769 h= INT_MIN;
770 for(i=0; i<1000; i+=channels){
771 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
772 int a= s->sample_array[idx];
773 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
774 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
775 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
776 int score= a-d;
777 if(h<score && (b^c)<0){
778 h= score;
779 i_start= idx;
780 }
781 }
782 }
783
784 s->last_i_start = i_start;
785 } else {
786 i_start = s->last_i_start;
787 }
788
789 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
790 if(s->show_audio==1){
791 fill_rectangle(screen,
792 s->xleft, s->ytop, s->width, s->height,
793 bgcolor);
794
795 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
796
797 /* total height for one channel */
798 h = s->height / nb_display_channels;
799 /* graph height / 2 */
800 h2 = (h * 9) / 20;
801 for(ch = 0;ch < nb_display_channels; ch++) {
802 i = i_start + ch;
803 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
804 for(x = 0; x < s->width; x++) {
805 y = (s->sample_array[i] * h2) >> 15;
806 if (y < 0) {
807 y = -y;
808 ys = y1 - y;
809 } else {
810 ys = y1;
811 }
812 fill_rectangle(screen,
813 s->xleft + x, ys, 1, y,
814 fgcolor);
815 i += channels;
816 if (i >= SAMPLE_ARRAY_SIZE)
817 i -= SAMPLE_ARRAY_SIZE;
818 }
819 }
820
821 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
822
823 for(ch = 1;ch < nb_display_channels; ch++) {
824 y = s->ytop + ch * h;
825 fill_rectangle(screen,
826 s->xleft, y, s->width, 1,
827 fgcolor);
828 }
829 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
830 }else{
831 nb_display_channels= FFMIN(nb_display_channels, 2);
832 if(rdft_bits != s->rdft_bits){
833 av_rdft_end(s->rdft);
834 av_free(s->rdft_data);
835 s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
836 s->rdft_bits= rdft_bits;
837 s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
838 }
839 {
840 FFTSample *data[2];
841 for(ch = 0;ch < nb_display_channels; ch++) {
842 data[ch] = s->rdft_data + 2*nb_freq*ch;
843 i = i_start + ch;
844 for(x = 0; x < 2*nb_freq; x++) {
845 double w= (x-nb_freq)*(1.0/nb_freq);
846 data[ch][x]= s->sample_array[i]*(1.0-w*w);
847 i += channels;
848 if (i >= SAMPLE_ARRAY_SIZE)
849 i -= SAMPLE_ARRAY_SIZE;
850 }
851 av_rdft_calc(s->rdft, data[ch]);
852 }
853 //least efficient way to do this, we should of course directly access it but its more than fast enough
854 for(y=0; y<s->height; y++){
855 double w= 1/sqrt(nb_freq);
856 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
857 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
858 + data[1][2*y+1]*data[1][2*y+1])) : a;
859 a= FFMIN(a,255);
860 b= FFMIN(b,255);
861 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
862
863 fill_rectangle(screen,
864 s->xpos, s->height-y, 1, 1,
865 fgcolor);
866 }
867 }
868 SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
869 s->xpos++;
870 if(s->xpos >= s->width)
871 s->xpos= s->xleft;
872 }
873 }
874
875 static int video_open(VideoState *is){
876 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
877 int w,h;
878
879 if(is_full_screen) flags |= SDL_FULLSCREEN;
880 else flags |= SDL_RESIZABLE;
881
882 if (is_full_screen && fs_screen_width) {
883 w = fs_screen_width;
884 h = fs_screen_height;
885 } else if(!is_full_screen && screen_width){
886 w = screen_width;
887 h = screen_height;
888 #if CONFIG_AVFILTER
889 }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
890 w = is->out_video_filter->inputs[0]->w;
891 h = is->out_video_filter->inputs[0]->h;
892 #else
893 }else if (is->video_st && is->video_st->codec->width){
894 w = is->video_st->codec->width;
895 h = is->video_st->codec->height;
896 #endif
897 } else {
898 w = 640;
899 h = 480;
900 }
901 if(screen && is->width == screen->w && screen->w == w
902 && is->height== screen->h && screen->h == h)
903 return 0;
904
905 #ifndef __APPLE__
906 screen = SDL_SetVideoMode(w, h, 0, flags);
907 #else
908 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
909 screen = SDL_SetVideoMode(w, h, 24, flags);
910 #endif
911 if (!screen) {
912 fprintf(stderr, "SDL: could not set video mode - exiting\n");
913 return -1;
914 }
915 if (!window_title)
916 window_title = input_filename;
917 SDL_WM_SetCaption(window_title, window_title);
918
919 is->width = screen->w;
920 is->height = screen->h;
921
922 return 0;
923 }
924
925 /* display the current picture, if any */
926 static void video_display(VideoState *is)
927 {
928 if(!screen)
929 video_open(cur_stream);
930 if (is->audio_st && is->show_audio)
931 video_audio_display(is);
932 else if (is->video_st)
933 video_image_display(is);
934 }
935
936 static int refresh_thread(void *opaque)
937 {
938 VideoState *is= opaque;
939 while(!is->abort_request){
940 SDL_Event event;
941 event.type = FF_REFRESH_EVENT;
942 event.user.data1 = opaque;
943 if(!is->refresh){
944 is->refresh=1;
945 SDL_PushEvent(&event);
946 }
947 usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
948 }
949 return 0;
950 }
951
952 /* get the current audio clock value */
953 static double get_audio_clock(VideoState *is)
954 {
955 double pts;
956 int hw_buf_size, bytes_per_sec;
957 pts = is->audio_clock;
958 hw_buf_size = audio_write_get_buf_size(is);
959 bytes_per_sec = 0;
960 if (is->audio_st) {
961 bytes_per_sec = is->audio_st->codec->sample_rate *
962 2 * is->audio_st->codec->channels;
963 }
964 if (bytes_per_sec)
965 pts -= (double)hw_buf_size / bytes_per_sec;
966 return pts;
967 }
968
969 /* get the current video clock value */
970 static double get_video_clock(VideoState *is)
971 {
972 if (is->paused) {
973 return is->video_current_pts;
974 } else {
975 return is->video_current_pts_drift + av_gettime() / 1000000.0;
976 }
977 }
978
979 /* get the current external clock value */
980 static double get_external_clock(VideoState *is)
981 {
982 int64_t ti;
983 ti = av_gettime();
984 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
985 }
986
987 /* get the current master clock value */
988 static double get_master_clock(VideoState *is)
989 {
990 double val;
991
992 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
993 if (is->video_st)
994 val = get_video_clock(is);
995 else
996 val = get_audio_clock(is);
997 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
998 if (is->audio_st)
999 val = get_audio_clock(is);
1000 else
1001 val = get_video_clock(is);
1002 } else {
1003 val = get_external_clock(is);
1004 }
1005 return val;
1006 }
1007
1008 /* seek in the stream */
1009 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1010 {
1011 if (!is->seek_req) {
1012 is->seek_pos = pos;
1013 is->seek_rel = rel;
1014 is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1015 if (seek_by_bytes)
1016 is->seek_flags |= AVSEEK_FLAG_BYTE;
1017 is->seek_req = 1;
1018 }
1019 }
1020
1021 /* pause or resume the video */
1022 static void stream_pause(VideoState *is)
1023 {
1024 if (is->paused) {
1025 is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1026 if(is->read_pause_return != AVERROR(ENOSYS)){
1027 is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1028 }
1029 is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1030 }
1031 is->paused = !is->paused;
1032 }
1033
1034 static double compute_target_time(double frame_current_pts, VideoState *is)
1035 {
1036 double delay, sync_threshold, diff;
1037
1038 /* compute nominal delay */
1039 delay = frame_current_pts - is->frame_last_pts;
1040 if (delay <= 0 || delay >= 10.0) {
1041 /* if incorrect delay, use previous one */
1042 delay = is->frame_last_delay;
1043 } else {
1044 is->frame_last_delay = delay;
1045 }
1046 is->frame_last_pts = frame_current_pts;
1047
1048 /* update delay to follow master synchronisation source */
1049 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1050 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1051 /* if video is slave, we try to correct big delays by
1052 duplicating or deleting a frame */
1053 diff = get_video_clock(is) - get_master_clock(is);
1054
1055 /* skip or repeat frame. We take into account the
1056 delay to compute the threshold. I still don't know
1057 if it is the best guess */
1058 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1059 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1060 if (diff <= -sync_threshold)
1061 delay = 0;
1062 else if (diff >= sync_threshold)
1063 delay = 2 * delay;
1064 }
1065 }
1066 is->frame_timer += delay;
1067
1068 av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1069 delay, frame_current_pts, -diff);
1070
1071 return is->frame_timer;
1072 }
1073
1074 /* called to display each frame */
1075 static void video_refresh_timer(void *opaque)
1076 {
1077 VideoState *is = opaque;
1078 VideoPicture *vp;
1079
1080 SubPicture *sp, *sp2;
1081
1082 if (is->video_st) {
1083 retry:
1084 if (is->pictq_size == 0) {
1085 //nothing to do, no picture to display in the que
1086 } else {
1087 double time= av_gettime()/1000000.0;
1088 double next_target;
1089 /* dequeue the picture */
1090 vp = &is->pictq[is->pictq_rindex];
1091
1092 if(time < vp->target_clock)
1093 return;
1094 /* update current video pts */
1095 is->video_current_pts = vp->pts;
1096 is->video_current_pts_drift = is->video_current_pts - time;
1097 is->video_current_pos = vp->pos;
1098 if(is->pictq_size > 1){
1099 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1100 assert(nextvp->target_clock >= vp->target_clock);
1101 next_target= nextvp->target_clock;
1102 }else{
1103 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1104 }
1105 if(framedrop && time > next_target){
1106 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1107 if(is->pictq_size > 1 || time > next_target + 0.5){
1108 /* update queue size and signal for next picture */
1109 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1110 is->pictq_rindex = 0;
1111
1112 SDL_LockMutex(is->pictq_mutex);
1113 is->pictq_size--;
1114 SDL_CondSignal(is->pictq_cond);
1115 SDL_UnlockMutex(is->pictq_mutex);
1116 goto retry;
1117 }
1118 }
1119
1120 if(is->subtitle_st) {
1121 if (is->subtitle_stream_changed) {
1122 SDL_LockMutex(is->subpq_mutex);
1123
1124 while (is->subpq_size) {
1125 free_subpicture(&is->subpq[is->subpq_rindex]);
1126
1127 /* update queue size and signal for next picture */
1128 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1129 is->subpq_rindex = 0;
1130
1131 is->subpq_size--;
1132 }
1133 is->subtitle_stream_changed = 0;
1134
1135 SDL_CondSignal(is->subpq_cond);
1136 SDL_UnlockMutex(is->subpq_mutex);
1137 } else {
1138 if (is->subpq_size > 0) {
1139 sp = &is->subpq[is->subpq_rindex];
1140
1141 if (is->subpq_size > 1)
1142 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1143 else
1144 sp2 = NULL;
1145
1146 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1147 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1148 {
1149 free_subpicture(sp);
1150
1151 /* update queue size and signal for next picture */
1152 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1153 is->subpq_rindex = 0;
1154
1155 SDL_LockMutex(is->subpq_mutex);
1156 is->subpq_size--;
1157 SDL_CondSignal(is->subpq_cond);
1158 SDL_UnlockMutex(is->subpq_mutex);
1159 }
1160 }
1161 }
1162 }
1163
1164 /* display picture */
1165 if (!display_disable)
1166 video_display(is);
1167
1168 /* update queue size and signal for next picture */
1169 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1170 is->pictq_rindex = 0;
1171
1172 SDL_LockMutex(is->pictq_mutex);
1173 is->pictq_size--;
1174 SDL_CondSignal(is->pictq_cond);
1175 SDL_UnlockMutex(is->pictq_mutex);
1176 }
1177 } else if (is->audio_st) {
1178 /* draw the next audio frame */
1179
1180 /* if only audio stream, then display the audio bars (better
1181 than nothing, just to test the implementation */
1182
1183 /* display picture */
1184 if (!display_disable)
1185 video_display(is);
1186 }
1187 if (show_status) {
1188 static int64_t last_time;
1189 int64_t cur_time;
1190 int aqsize, vqsize, sqsize;
1191 double av_diff;
1192
1193 cur_time = av_gettime();
1194 if (!last_time || (cur_time - last_time) >= 30000) {
1195 aqsize = 0;
1196 vqsize = 0;
1197 sqsize = 0;
1198 if (is->audio_st)
1199 aqsize = is->audioq.size;
1200 if (is->video_st)
1201 vqsize = is->videoq.size;
1202 if (is->subtitle_st)
1203 sqsize = is->subtitleq.size;
1204 av_diff = 0;
1205 if (is->audio_st && is->video_st)
1206 av_diff = get_audio_clock(is) - get_video_clock(is);
1207 printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1208 get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1209 fflush(stdout);
1210 last_time = cur_time;
1211 }
1212 }
1213 }
1214
1215 static void stream_close(VideoState *is)
1216 {
1217 VideoPicture *vp;
1218 int i;
1219 /* XXX: use a special url_shutdown call to abort parse cleanly */
1220 is->abort_request = 1;
1221 SDL_WaitThread(is->parse_tid, NULL);
1222 SDL_WaitThread(is->refresh_tid, NULL);
1223
1224 /* free all pictures */
1225 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1226 vp = &is->pictq[i];
1227 #if CONFIG_AVFILTER
1228 if (vp->picref) {
1229 avfilter_unref_buffer(vp->picref);
1230 vp->picref = NULL;
1231 }
1232 #endif
1233 if (vp->bmp) {
1234 SDL_FreeYUVOverlay(vp->bmp);
1235 vp->bmp = NULL;
1236 }
1237 }
1238 SDL_DestroyMutex(is->pictq_mutex);
1239 SDL_DestroyCond(is->pictq_cond);
1240 SDL_DestroyMutex(is->subpq_mutex);
1241 SDL_DestroyCond(is->subpq_cond);
1242 #if !CONFIG_AVFILTER
1243 if (is->img_convert_ctx)
1244 sws_freeContext(is->img_convert_ctx);
1245 #endif
1246 av_free(is);
1247 }
1248
1249 static void do_exit(void)
1250 {
1251 if (cur_stream) {
1252 stream_close(cur_stream);
1253 cur_stream = NULL;
1254 }
1255 uninit_opts();
1256 #if CONFIG_AVFILTER
1257 avfilter_uninit();
1258 #endif
1259 if (show_status)
1260 printf("\n");
1261 SDL_Quit();
1262 av_log(NULL, AV_LOG_QUIET, "");
1263 exit(0);
1264 }
1265
1266 /* allocate a picture (needs to do that in main thread to avoid
1267 potential locking problems */
1268 static void alloc_picture(void *opaque)
1269 {
1270 VideoState *is = opaque;
1271 VideoPicture *vp;
1272
1273 vp = &is->pictq[is->pictq_windex];
1274
1275 if (vp->bmp)
1276 SDL_FreeYUVOverlay(vp->bmp);
1277
1278 #if CONFIG_AVFILTER
1279 if (vp->picref)
1280 avfilter_unref_buffer(vp->picref);
1281 vp->picref = NULL;
1282
1283 vp->width = is->out_video_filter->inputs[0]->w;
1284 vp->height = is->out_video_filter->inputs[0]->h;
1285 vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1286 #else
1287 vp->width = is->video_st->codec->width;
1288 vp->height = is->video_st->codec->height;
1289 vp->pix_fmt = is->video_st->codec->pix_fmt;
1290 #endif
1291
1292 vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1293 SDL_YV12_OVERLAY,
1294 screen);
1295 if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1296 /* SDL allocates a buffer smaller than requested if the video
1297 * overlay hardware is unable to support the requested size. */
1298 fprintf(stderr, "Error: the video system does not support an image\n"
1299 "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1300 "to reduce the image size.\n", vp->width, vp->height );
1301 do_exit();
1302 }
1303
1304 SDL_LockMutex(is->pictq_mutex);
1305 vp->allocated = 1;
1306 SDL_CondSignal(is->pictq_cond);
1307 SDL_UnlockMutex(is->pictq_mutex);
1308 }
1309
1310 /**
1311 *
1312 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1313 */
1314 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1315 {
1316 VideoPicture *vp;
1317 #if CONFIG_AVFILTER
1318 AVPicture pict_src;
1319 #else
1320 int dst_pix_fmt = PIX_FMT_YUV420P;
1321 #endif
1322 /* wait until we have space to put a new picture */
1323 SDL_LockMutex(is->pictq_mutex);
1324
1325 if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1326 is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1327
1328 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1329 !is->videoq.abort_request) {
1330 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1331 }
1332 SDL_UnlockMutex(is->pictq_mutex);
1333
1334 if (is->videoq.abort_request)
1335 return -1;
1336
1337 vp = &is->pictq[is->pictq_windex];
1338
1339 /* alloc or resize hardware picture buffer */
1340 if (!vp->bmp ||
1341 #if CONFIG_AVFILTER
1342 vp->width != is->out_video_filter->inputs[0]->w ||
1343 vp->height != is->out_video_filter->inputs[0]->h) {
1344 #else
1345 vp->width != is->video_st->codec->width ||
1346 vp->height != is->video_st->codec->height) {
1347 #endif
1348 SDL_Event event;
1349
1350 vp->allocated = 0;
1351
1352 /* the allocation must be done in the main thread to avoid
1353 locking problems */
1354 event.type = FF_ALLOC_EVENT;
1355 event.user.data1 = is;
1356 SDL_PushEvent(&event);
1357
1358 /* wait until the picture is allocated */
1359 SDL_LockMutex(is->pictq_mutex);
1360 while (!vp->allocated && !is->videoq.abort_request) {
1361 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1362 }
1363 SDL_UnlockMutex(is->pictq_mutex);
1364
1365 if (is->videoq.abort_request)
1366 return -1;
1367 }
1368
1369 /* if the frame is not skipped, then display it */
1370 if (vp->bmp) {
1371 AVPicture pict;
1372 #if CONFIG_AVFILTER
1373 if(vp->picref)
1374 avfilter_unref_buffer(vp->picref);
1375 vp->picref = src_frame->opaque;
1376 #endif
1377
1378 /* get a pointer on the bitmap */
1379 SDL_LockYUVOverlay (vp->bmp);
1380
1381 memset(&pict,0,sizeof(AVPicture));
1382 pict.data[0] = vp->bmp->pixels[0];
1383 pict.data[1] = vp->bmp->pixels[2];
1384 pict.data[2] = vp->bmp->pixels[1];
1385
1386 pict.linesize[0] = vp->bmp->pitches[0];
1387 pict.linesize[1] = vp->bmp->pitches[2];
1388 pict.linesize[2] = vp->bmp->pitches[1];
1389
1390 #if CONFIG_AVFILTER
1391 pict_src.data[0] = src_frame->data[0];
1392 pict_src.data[1] = src_frame->data[1];
1393 pict_src.data[2] = src_frame->data[2];
1394
1395 pict_src.linesize[0] = src_frame->linesize[0];
1396 pict_src.linesize[1] = src_frame->linesize[1];
1397 pict_src.linesize[2] = src_frame->linesize[2];
1398
1399 //FIXME use direct rendering
1400 av_picture_copy(&pict, &pict_src,
1401 vp->pix_fmt, vp->width, vp->height);
1402 #else
1403 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1404 is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1405 vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1406 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1407 if (is->img_convert_ctx == NULL) {
1408 fprintf(stderr, "Cannot initialize the conversion context\n");
1409 exit(1);
1410 }
1411 sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1412 0, vp->height, pict.data, pict.linesize);
1413 #endif
1414 /* update the bitmap content */
1415 SDL_UnlockYUVOverlay(vp->bmp);
1416
1417 vp->pts = pts;
1418 vp->pos = pos;
1419
1420 /* now we can update the picture count */
1421 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1422 is->pictq_windex = 0;
1423 SDL_LockMutex(is->pictq_mutex);
1424 vp->target_clock= compute_target_time(vp->pts, is);
1425
1426 is->pictq_size++;
1427 SDL_UnlockMutex(is->pictq_mutex);
1428 }
1429 return 0;
1430 }
1431
1432 /**
1433 * compute the exact PTS for the picture if it is omitted in the stream
1434 * @param pts1 the dts of the pkt / pts of the frame
1435 */
1436 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1437 {
1438 double frame_delay, pts;
1439
1440 pts = pts1;
1441
1442 if (pts != 0) {
1443 /* update video clock with pts, if present */
1444 is->video_clock = pts;
1445 } else {
1446 pts = is->video_clock;
1447 }
1448 /* update video clock for next frame */
1449 frame_delay = av_q2d(is->video_st->codec->time_base);
1450 /* for MPEG2, the frame can be repeated, so we update the
1451 clock accordingly */
1452 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1453 is->video_clock += frame_delay;
1454
1455 return queue_picture(is, src_frame, pts, pos);
1456 }
1457
1458 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1459 {
1460 int len1, got_picture, i;
1461
1462 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1463 return -1;
1464
1465 if (pkt->data == flush_pkt.data) {
1466 avcodec_flush_buffers(is->video_st->codec);
1467
1468 SDL_LockMutex(is->pictq_mutex);
1469 //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1470 for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1471 is->pictq[i].target_clock= 0;
1472 }
1473 while (is->pictq_size && !is->videoq.abort_request) {
1474 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1475 }
1476 is->video_current_pos = -1;
1477 SDL_UnlockMutex(is->pictq_mutex);
1478
1479 init_pts_correction(&is->pts_ctx);
1480 is->frame_last_pts = AV_NOPTS_VALUE;
1481 is->frame_last_delay = 0;
1482 is->frame_timer = (double)av_gettime() / 1000000.0;
1483 is->skip_frames = 1;
1484 is->skip_frames_index = 0;
1485 return 0;
1486 }
1487
1488 len1 = avcodec_decode_video2(is->video_st->codec,
1489 frame, &got_picture,
1490 pkt);
1491
1492 if (got_picture) {
1493 if (decoder_reorder_pts == -1) {
1494 *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1495 } else if (decoder_reorder_pts) {
1496 *pts = frame->pkt_pts;
1497 } else {
1498 *pts = frame->pkt_dts;
1499 }
1500
1501 if (*pts == AV_NOPTS_VALUE) {
1502 *pts = 0;
1503 }
1504
1505 is->skip_frames_index += 1;
1506 if(is->skip_frames_index >= is->skip_frames){
1507 is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1508 return 1;
1509 }
1510
1511 }
1512 return 0;
1513 }
1514
1515 #if CONFIG_AVFILTER
1516 typedef struct {
1517 VideoState *is;
1518 AVFrame *frame;
1519 int use_dr1;
1520 } FilterPriv;
1521
1522 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1523 {
1524 AVFilterContext *ctx = codec->opaque;
1525 AVFilterBufferRef *ref;
1526 int perms = AV_PERM_WRITE;
1527 int i, w, h, stride[4];
1528 unsigned edge;
1529 int pixel_size;
1530
1531 if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1532 perms |= AV_PERM_NEG_LINESIZES;
1533
1534 if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1535 if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1536 if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1537 if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1538 }
1539 if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1540
1541 w = codec->width;
1542 h = codec->height;
1543 avcodec_align_dimensions2(codec, &w, &h, stride);
1544 edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1545 w += edge << 1;
1546 h += edge << 1;
1547
1548 if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1549 return -1;
1550
1551 pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1552 ref->video->w = codec->width;
1553 ref->video->h = codec->height;
1554 for(i = 0; i < 4; i ++) {
1555 unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1556 unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1557
1558 if (ref->data[i]) {
1559 ref->data[i] += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1560 }
1561 pic->data[i] = ref->data[i];
1562 pic->linesize[i] = ref->linesize[i];
1563 }
1564 pic->opaque = ref;
1565 pic->age = INT_MAX;
1566 pic->type = FF_BUFFER_TYPE_USER;
1567 pic->reordered_opaque = codec->reordered_opaque;
1568 if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1569 else pic->pkt_pts = AV_NOPTS_VALUE;
1570 return 0;
1571 }
1572
1573 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1574 {
1575 memset(pic->data, 0, sizeof(pic->data));
1576 avfilter_unref_buffer(pic->opaque);
1577 }
1578
1579 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1580 {
1581 AVFilterBufferRef *ref = pic->opaque;
1582
1583 if (pic->data[0] == NULL) {
1584 pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1585 return codec->get_buffer(codec, pic);
1586 }
1587
1588 if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1589 (codec->pix_fmt != ref->format)) {
1590 av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1591 return -1;
1592 }
1593
1594 pic->reordered_opaque = codec->reordered_opaque;
1595 if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1596 else pic->pkt_pts = AV_NOPTS_VALUE;
1597 return 0;
1598 }
1599
1600 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1601 {
1602 FilterPriv *priv = ctx->priv;
1603 AVCodecContext *codec;
1604 if(!opaque) return -1;
1605
1606 priv->is = opaque;
1607 codec = priv->is->video_st->codec;
1608 codec->opaque = ctx;
1609 if(codec->codec->capabilities & CODEC_CAP_DR1) {
1610 priv->use_dr1 = 1;
1611 codec->get_buffer = input_get_buffer;
1612 codec->release_buffer = input_release_buffer;
1613 codec->reget_buffer = input_reget_buffer;
1614 codec->thread_safe_callbacks = 1;
1615 }
1616
1617 priv->frame = avcodec_alloc_frame();
1618
1619 return 0;
1620 }
1621
1622 static void input_uninit(AVFilterContext *ctx)
1623 {
1624 FilterPriv *priv = ctx->priv;
1625 av_free(priv->frame);
1626 }
1627
1628 static int input_request_frame(AVFilterLink *link)
1629 {
1630 FilterPriv *priv = link->src->priv;
1631 AVFilterBufferRef *picref;
1632 int64_t pts = 0;
1633 AVPacket pkt;
1634 int ret;
1635
1636 while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1637 av_free_packet(&pkt);
1638 if (ret < 0)
1639 return -1;
1640
1641 if(priv->use_dr1) {
1642 picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1643 } else {
1644 picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1645 av_image_copy(picref->data, picref->linesize,
1646 priv->frame->data, priv->frame->linesize,
1647 picref->format, link->w, link->h);
1648 }
1649 av_free_packet(&pkt);
1650
1651 picref->pts = pts;
1652 picref->pos = pkt.pos;
1653 picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1654 avfilter_start_frame(link, picref);
1655 avfilter_draw_slice(link, 0, link->h, 1);
1656 avfilter_end_frame(link);
1657
1658 return 0;
1659 }
1660
1661 static int input_query_formats(AVFilterContext *ctx)
1662 {
1663 FilterPriv *priv = ctx->priv;
1664 enum PixelFormat pix_fmts[] = {
1665 priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1666 };
1667
1668 avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1669 return 0;
1670 }
1671
1672 static int input_config_props(AVFilterLink *link)
1673 {
1674 FilterPriv *priv = link->src->priv;
1675 AVCodecContext *c = priv->is->video_st->codec;
1676
1677 link->w = c->width;
1678 link->h = c->height;
1679 link->time_base = priv->is->video_st->time_base;
1680
1681 return 0;
1682 }
1683
1684 static AVFilter input_filter =
1685 {
1686 .name = "ffplay_input",
1687
1688 .priv_size = sizeof(FilterPriv),
1689
1690 .init = input_init,
1691 .uninit = input_uninit,
1692
1693 .query_formats = input_query_formats,
1694
1695 .inputs = (AVFilterPad[]) {{ .name = NULL }},
1696 .outputs = (AVFilterPad[]) {{ .name = "default",
1697 .type = AVMEDIA_TYPE_VIDEO,
1698 .request_frame = input_request_frame,
1699 .config_props = input_config_props, },
1700 { .name = NULL }},
1701 };
1702
1703 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1704 {
1705 char sws_flags_str[128];
1706 int ret;
1707 FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1708 AVFilterContext *filt_src = NULL, *filt_out = NULL;
1709 snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1710 graph->scale_sws_opts = av_strdup(sws_flags_str);
1711
1712 if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1713 NULL, is, graph)) < 0)
1714 goto the_end;
1715 if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1716 NULL, &ffsink_ctx, graph)) < 0)
1717 goto the_end;
1718
1719 if(vfilters) {
1720 AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1721 AVFilterInOut *inputs = av_malloc(sizeof(AVFilterInOut));
1722
1723 outputs->name = av_strdup("in");
1724 outputs->filter_ctx = filt_src;
1725 outputs->pad_idx = 0;
1726 outputs->next = NULL;
1727
1728 inputs->name = av_strdup("out");
1729 inputs->filter_ctx = filt_out;
1730 inputs->pad_idx = 0;
1731 inputs->next = NULL;
1732
1733 if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1734 goto the_end;
1735 av_freep(&vfilters);
1736 } else {
1737 if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1738 goto the_end;
1739 }
1740
1741 if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1742 goto the_end;
1743
1744 is->out_video_filter = filt_out;
1745 the_end:
1746 return ret;
1747 }
1748
1749 #endif /* CONFIG_AVFILTER */
1750
1751 static int video_thread(void *arg)
1752 {
1753 VideoState *is = arg;
1754 AVFrame *frame= avcodec_alloc_frame();
1755 int64_t pts_int;
1756 double pts;
1757 int ret;
1758
1759 #if CONFIG_AVFILTER
1760 AVFilterGraph *graph = avfilter_graph_alloc();
1761 AVFilterContext *filt_out = NULL;
1762 int64_t pos;
1763
1764 if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1765 goto the_end;
1766 filt_out = is->out_video_filter;
1767 #endif
1768
1769 for(;;) {
1770 #if !CONFIG_AVFILTER
1771 AVPacket pkt;
1772 #else
1773 AVFilterBufferRef *picref;
1774 AVRational tb;
1775 #endif
1776 while (is->paused && !is->videoq.abort_request)
1777 SDL_Delay(10);
1778 #if CONFIG_AVFILTER
1779 ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1780 if (picref) {
1781 pts_int = picref->pts;
1782 pos = picref->pos;
1783 frame->opaque = picref;
1784 }
1785
1786 if (av_cmp_q(tb, is->video_st->time_base)) {
1787 av_unused int64_t pts1 = pts_int;
1788 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1789 av_dlog(NULL, "video_thread(): "
1790 "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1791 tb.num, tb.den, pts1,
1792 is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1793 }
1794 #else
1795 ret = get_video_frame(is, frame, &pts_int, &pkt);
1796 #endif
1797
1798 if (ret < 0) goto the_end;
1799
1800 if (!ret)
1801 continue;
1802
1803 pts = pts_int*av_q2d(is->video_st->time_base);
1804
1805 #if CONFIG_AVFILTER
1806 ret = output_picture2(is, frame, pts, pos);
1807 #else
1808 ret = output_picture2(is, frame, pts, pkt.pos);
1809 av_free_packet(&pkt);
1810 #endif
1811 if (ret < 0)
1812 goto the_end;
1813
1814 if (step)
1815 if (cur_stream)
1816 stream_pause(cur_stream);
1817 }
1818 the_end:
1819 #if CONFIG_AVFILTER
1820 avfilter_graph_free(&graph);
1821 #endif
1822 av_free(frame);
1823 return 0;
1824 }
1825
1826 static int subtitle_thread(void *arg)
1827 {
1828 VideoState *is = arg;
1829 SubPicture *sp;
1830 AVPacket pkt1, *pkt = &pkt1;
1831 int len1, got_subtitle;
1832 double pts;
1833 int i, j;
1834 int r, g, b, y, u, v, a;
1835
1836 for(;;) {
1837 while (is->paused && !is->subtitleq.abort_request) {
1838 SDL_Delay(10);
1839 }
1840 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1841 break;
1842
1843 if(pkt->data == flush_pkt.data){
1844 avcodec_flush_buffers(is->subtitle_st->codec);
1845 continue;
1846 }
1847 SDL_LockMutex(is->subpq_mutex);
1848 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1849 !is->subtitleq.abort_request) {
1850 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1851 }
1852 SDL_UnlockMutex(is->subpq_mutex);
1853
1854 if (is->subtitleq.abort_request)
1855 goto the_end;
1856
1857 sp = &is->subpq[is->subpq_windex];
1858
1859 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1860 this packet, if any */
1861 pts = 0;
1862 if (pkt->pts != AV_NOPTS_VALUE)
1863 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1864
1865 len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1866 &sp->sub, &got_subtitle,
1867 pkt);
1868 if (got_subtitle && sp->sub.format == 0) {
1869 sp->pts = pts;
1870
1871 for (i = 0; i < sp->sub.num_rects; i++)
1872 {
1873 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1874 {
1875 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1876 y = RGB_TO_Y_CCIR(r, g, b);
1877 u = RGB_TO_U_CCIR(r, g, b, 0);
1878 v = RGB_TO_V_CCIR(r, g, b, 0);
1879 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1880 }
1881 }
1882
1883 /* now we can update the picture count */
1884 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1885 is->subpq_windex = 0;
1886 SDL_LockMutex(is->subpq_mutex);
1887 is->subpq_size++;
1888 SDL_UnlockMutex(is->subpq_mutex);
1889 }
1890 av_free_packet(pkt);
1891 }
1892 the_end:
1893 return 0;
1894 }
1895
1896 /* copy samples for viewing in editor window */
1897 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1898 {
1899 int size, len;
1900
1901 size = samples_size / sizeof(short);
1902 while (size > 0) {
1903 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1904 if (len > size)
1905 len = size;
1906 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1907 samples += len;
1908 is->sample_array_index += len;
1909 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1910 is->sample_array_index = 0;
1911 size -= len;
1912 }
1913 }
1914
1915 /* return the new audio buffer size (samples can be added or deleted
1916 to get better sync if video or external master clock) */
1917 static int synchronize_audio(VideoState *is, short *samples,
1918 int samples_size1, double pts)
1919 {
1920 int n, samples_size;
1921 double ref_clock;
1922
1923 n = 2 * is->audio_st->codec->channels;
1924 samples_size = samples_size1;
1925
1926 /* if not master, then we try to remove or add samples to correct the clock */
1927 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1928 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1929 double diff, avg_diff;
1930 int wanted_size, min_size, max_size, nb_samples;
1931
1932 ref_clock = get_master_clock(is);
1933 diff = get_audio_clock(is) - ref_clock;
1934
1935 if (diff < AV_NOSYNC_THRESHOLD) {
1936 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1937 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1938 /* not enough measures to have a correct estimate */
1939 is->audio_diff_avg_count++;
1940 } else {
1941 /* estimate the A-V difference */
1942 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1943
1944 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1945 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1946 nb_samples = samples_size / n;
1947
1948 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1949 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1950 if (wanted_size < min_size)
1951 wanted_size = min_size;
1952 else if (wanted_size > max_size)
1953 wanted_size = max_size;
1954
1955 /* add or remove samples to correction the synchro */
1956 if (wanted_size < samples_size) {
1957 /* remove samples */
1958 samples_size = wanted_size;
1959 } else if (wanted_size > samples_size) {
1960 uint8_t *samples_end, *q;
1961 int nb;
1962
1963 /* add samples */
1964 nb = (samples_size - wanted_size);
1965 samples_end = (uint8_t *)samples + samples_size - n;
1966 q = samples_end + n;
1967 while (nb > 0) {
1968 memcpy(q, samples_end, n);
1969 q += n;
1970 nb -= n;
1971 }
1972 samples_size = wanted_size;
1973 }
1974 }
1975 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1976 diff, avg_diff, samples_size - samples_size1,
1977 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1978 }
1979 } else {
1980 /* too big difference : may be initial PTS errors, so
1981 reset A-V filter */
1982 is->audio_diff_avg_count = 0;
1983 is->audio_diff_cum = 0;
1984 }
1985 }
1986
1987 return samples_size;
1988 }
1989
1990 /* decode one audio frame and returns its uncompressed size */
1991 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1992 {
1993 AVPacket *pkt_temp = &is->audio_pkt_temp;
1994 AVPacket *pkt = &is->audio_pkt;
1995 AVCodecContext *dec= is->audio_st->codec;
1996 int n, len1, data_size;
1997 double pts;
1998
1999 for(;;) {
2000 /* NOTE: the audio packet can contain several frames */
2001 while (pkt_temp->size > 0) {
2002 data_size = sizeof(is->audio_buf1);
2003 len1 = avcodec_decode_audio3(dec,
2004 (int16_t *)is->audio_buf1, &data_size,
2005 pkt_temp);
2006 if (len1 < 0) {
2007 /* if error, we skip the frame */
2008 pkt_temp->size = 0;
2009 break;
2010 }
2011
2012 pkt_temp->data += len1;
2013 pkt_temp->size -= len1;
2014 if (data_size <= 0)
2015 continue;
2016
2017 if (dec->sample_fmt != is->audio_src_fmt) {
2018 if (is->reformat_ctx)
2019 av_audio_convert_free(is->reformat_ctx);
2020 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2021 dec->sample_fmt, 1, NULL, 0);
2022 if (!is->reformat_ctx) {
2023 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2024 av_get_sample_fmt_name(dec->sample_fmt),
2025 av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2026 break;
2027 }
2028 is->audio_src_fmt= dec->sample_fmt;
2029 }
2030
2031 if (is->reformat_ctx) {
2032 const void *ibuf[6]= {is->audio_buf1};
2033 void *obuf[6]= {is->audio_buf2};
2034 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2035 int ostride[6]= {2};
2036 int len= data_size/istride[0];
2037 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2038 printf("av_audio_convert() failed\n");
2039 break;
2040 }
2041 is->audio_buf= is->audio_buf2;
2042 /* FIXME: existing code assume that data_size equals framesize*channels*2
2043 remove this legacy cruft */
2044 data_size= len*2;
2045 }else{
2046 is->audio_buf= is->audio_buf1;
2047 }
2048
2049 /* if no pts, then compute it */
2050 pts = is->audio_clock;
2051 *pts_ptr = pts;
2052 n = 2 * dec->channels;
2053 is->audio_clock += (double)data_size /
2054 (double)(n * dec->sample_rate);
2055 #ifdef DEBUG
2056 {
2057 static double last_clock;
2058 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2059 is->audio_clock - last_clock,
2060 is->audio_clock, pts);
2061 last_clock = is->audio_clock;
2062 }
2063 #endif
2064 return data_size;
2065 }
2066
2067 /* free the current packet */
2068 if (pkt->data)
2069 av_free_packet(pkt);
2070
2071 if (is->paused || is->audioq.abort_request) {
2072 return -1;
2073 }
2074
2075 /* read next packet */
2076 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2077 return -1;
2078 if(pkt->data == flush_pkt.data){
2079 avcodec_flush_buffers(dec);
2080 continue;
2081 }
2082
2083 pkt_temp->data = pkt->data;
2084 pkt_temp->size = pkt->size;
2085
2086 /* if update the audio clock with the pts */
2087 if (pkt->pts != AV_NOPTS_VALUE) {
2088 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2089 }
2090 }
2091 }
2092
2093 /* prepare a new audio buffer */
2094 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2095 {
2096 VideoState *is = opaque;
2097 int audio_size, len1;
2098 double pts;
2099
2100 audio_callback_time = av_gettime();
2101
2102 while (len > 0) {
2103 if (is->audio_buf_index >= is->audio_buf_size) {
2104 audio_size = audio_decode_frame(is, &pts);
2105 if (audio_size < 0) {
2106 /* if error, just output silence */
2107 is->audio_buf = is->audio_buf1;
2108 is->audio_buf_size = 1024;
2109 memset(is->audio_buf, 0, is->audio_buf_size);
2110 } else {
2111 if (is->show_audio)
2112 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2113 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2114 pts);
2115 is->audio_buf_size = audio_size;
2116 }
2117 is->audio_buf_index = 0;
2118 }
2119 len1 = is->audio_buf_size - is->audio_buf_index;
2120 if (len1 > len)
2121 len1 = len;
2122 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2123 len -= len1;
2124 stream += len1;
2125 is->audio_buf_index += len1;
2126 }
2127 }
2128
2129 /* open a given stream. Return 0 if OK */
2130 static int stream_component_open(VideoState *is, int stream_index)
2131 {
2132 AVFormatContext *ic = is->ic;
2133 AVCodecContext *avctx;
2134 AVCodec *codec;
2135 SDL_AudioSpec wanted_spec, spec;
2136
2137 if (stream_index < 0 || stream_index >= ic->nb_streams)
2138 return -1;
2139 avctx = ic->streams[stream_index]->codec;
2140
2141 /* prepare audio output */
2142 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2143 if (avctx->channels > 0) {
2144 avctx->request_channels = FFMIN(2, avctx->channels);
2145 } else {
2146 avctx->request_channels = 2;
2147 }
2148 }
2149
2150 codec = avcodec_find_decoder(avctx->codec_id);
2151 avctx->debug_mv = debug_mv;
2152 avctx->debug = debug;
2153 avctx->workaround_bugs = workaround_bugs;
2154 avctx->lowres = lowres;
2155 if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2156 avctx->idct_algo= idct;
2157 if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2158 avctx->skip_frame= skip_frame;
2159 avctx->skip_idct= skip_idct;
2160 avctx->skip_loop_filter= skip_loop_filter;
2161 avctx->error_recognition= error_recognition;
2162 avctx->error_concealment= error_concealment;
2163 avctx->thread_count= thread_count;
2164
2165 set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2166
2167 if (!codec ||
2168 avcodec_open(avctx, codec) < 0)
2169 return -1;
2170
2171 /* prepare audio output */
2172 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2173 wanted_spec.freq = avctx->sample_rate;
2174 wanted_spec.format = AUDIO_S16SYS;
2175 wanted_spec.channels = avctx->channels;
2176 wanted_spec.silence = 0;
2177 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2178 wanted_spec.callback = sdl_audio_callback;
2179 wanted_spec.userdata = is;
2180 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2181 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2182 return -1;
2183 }
2184 is->audio_hw_buf_size = spec.size;
2185 is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2186 }
2187
2188 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2189 switch(avctx->codec_type) {
2190 case AVMEDIA_TYPE_AUDIO:
2191 is->audio_stream = stream_index;
2192 is->audio_st = ic->streams[stream_index];
2193 is->audio_buf_size = 0;
2194 is->audio_buf_index = 0;
2195
2196 /* init averaging filter */
2197 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2198 is->audio_diff_avg_count = 0;
2199 /* since we do not have a precise anough audio fifo fullness,
2200 we correct audio sync only if larger than this threshold */
2201 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2202
2203 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2204 packet_queue_init(&is->audioq);
2205 SDL_PauseAudio(0);
2206 break;
2207 case AVMEDIA_TYPE_VIDEO:
2208 is->video_stream = stream_index;
2209 is->video_st = ic->streams[stream_index];
2210
2211 packet_queue_init(&is->videoq);
2212 is->video_tid = SDL_CreateThread(video_thread, is);
2213 break;
2214 case AVMEDIA_TYPE_SUBTITLE:
2215 is->subtitle_stream = stream_index;
2216 is->subtitle_st = ic->streams[stream_index];
2217 packet_queue_init(&is->subtitleq);
2218
2219 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2220 break;
2221 default:
2222 break;
2223 }
2224 return 0;
2225 }
2226
2227 static void stream_component_close(VideoState *is, int stream_index)
2228 {
2229 AVFormatContext *ic = is->ic;
2230 AVCodecContext *avctx;
2231
2232 if (stream_index < 0 || stream_index >= ic->nb_streams)
2233 return;
2234 avctx = ic->streams[stream_index]->codec;
2235
2236 switch(avctx->codec_type) {
2237 case AVMEDIA_TYPE_AUDIO:
2238 packet_queue_abort(&is->audioq);
2239
2240 SDL_CloseAudio();
2241
2242 packet_queue_end(&is->audioq);
2243 if (is->reformat_ctx)
2244 av_audio_convert_free(is->reformat_ctx);
2245 is->reformat_ctx = NULL;
2246 break;
2247 case AVMEDIA_TYPE_VIDEO:
2248 packet_queue_abort(&is->videoq);
2249
2250 /* note: we also signal this mutex to make sure we deblock the
2251 video thread in all cases */
2252 SDL_LockMutex(is->pictq_mutex);
2253 SDL_CondSignal(is->pictq_cond);
2254 SDL_UnlockMutex(is->pictq_mutex);
2255
2256 SDL_WaitThread(is->video_tid, NULL);
2257
2258 packet_queue_end(&is->videoq);
2259 break;
2260 case AVMEDIA_TYPE_SUBTITLE:
2261 packet_queue_abort(&is->subtitleq);
2262
2263 /* note: we also signal this mutex to make sure we deblock the
2264 video thread in all cases */
2265 SDL_LockMutex(is->subpq_mutex);
2266 is->subtitle_stream_changed = 1;
2267
2268 SDL_CondSignal(is->subpq_cond);
2269 SDL_UnlockMutex(is->subpq_mutex);
2270
2271 SDL_WaitThread(is->subtitle_tid, NULL);
2272
2273 packet_queue_end(&is->subtitleq);
2274 break;
2275 default:
2276 break;
2277 }
2278
2279 ic->streams[stream_index]->discard = AVDISCARD_ALL;
2280 avcodec_close(avctx);
2281 switch(avctx->codec_type) {
2282 case AVMEDIA_TYPE_AUDIO:
2283 is->audio_st = NULL;
2284 is->audio_stream = -1;
2285 break;
2286 case AVMEDIA_TYPE_VIDEO:
2287 is->video_st = NULL;
2288 is->video_stream = -1;
2289 break;
2290 case AVMEDIA_TYPE_SUBTITLE:
2291 is->subtitle_st = NULL;
2292 is->subtitle_stream = -1;
2293 break;
2294 default:
2295 break;
2296 }
2297 }
2298
2299 /* since we have only one decoding thread, we can use a global
2300 variable instead of a thread local variable */
2301 static VideoState *global_video_state;
2302
2303 static int decode_interrupt_cb(void)
2304 {
2305 return (global_video_state && global_video_state->abort_request);
2306 }
2307
2308 /* this thread gets the stream from the disk or the network */
2309 static int decode_thread(void *arg)
2310 {
2311 VideoState *is = arg;
2312 AVFormatContext *ic = NULL;
2313 int err, i, ret;
2314 int st_index[AVMEDIA_TYPE_NB];
2315 AVPacket pkt1, *pkt = &pkt1;
2316 int eof=0;
2317 int pkt_in_play_range = 0;
2318 AVDictionaryEntry *t;
2319
2320 memset(st_index, -1, sizeof(st_index));
2321 is->video_stream = -1;
2322 is->audio_stream = -1;
2323 is->subtitle_stream = -1;
2324
2325 global_video_state = is;
2326 avio_set_interrupt_cb(decode_interrupt_cb);
2327
2328 err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2329 if (err < 0) {
2330 print_error(is->filename, err);
2331 ret = -1;
2332 goto fail;
2333 }
2334 if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2335 av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2336 ret = AVERROR_OPTION_NOT_FOUND;
2337 goto fail;
2338 }
2339 is->ic = ic;
2340
2341 if(genpts)
2342 ic->flags |= AVFMT_FLAG_GENPTS;
2343
2344 /* Set AVCodecContext options so they will be seen by av_find_stream_info() */
2345 for (i = 0; i < ic->nb_streams; i++) {
2346 AVCodecContext *dec = ic->streams[i]->codec;
2347 switch (dec->codec_type) {
2348 case AVMEDIA_TYPE_AUDIO:
2349 set_context_opts(dec, avcodec_opts[AVMEDIA_TYPE_AUDIO],
2350 AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM,
2351 NULL);
2352 break;
2353 case AVMEDIA_TYPE_VIDEO:
2354 set_context_opts(dec, avcodec_opts[AVMEDIA_TYPE_VIDEO],
2355 AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM,
2356 NULL);
2357 break;
2358 }
2359 }
2360
2361 err = av_find_stream_info(ic);
2362 if (err < 0) {
2363 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2364 ret = -1;
2365 goto fail;
2366 }
2367 if(ic->pb)
2368 ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2369
2370 if(seek_by_bytes<0)
2371 seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2372
2373 /* if seeking requested, we execute it */
2374 if (start_time != AV_NOPTS_VALUE) {
2375 int64_t timestamp;
2376
2377 timestamp = start_time;
2378 /* add the stream start time */
2379 if (ic->start_time != AV_NOPTS_VALUE)
2380 timestamp += ic->start_time;
2381 ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2382 if (ret < 0) {
2383 fprintf(stderr, "%s: could not seek to position %0.3f\n",
2384 is->filename, (double)timestamp / AV_TIME_BASE);
2385 }
2386 }
2387
2388 for (i = 0; i < ic->nb_streams; i++)
2389 ic->streams[i]->discard = AVDISCARD_ALL;
2390 if (!video_disable)
2391 st_index[AVMEDIA_TYPE_VIDEO] =
2392 av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2393 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2394 if (!audio_disable)
2395 st_index[AVMEDIA_TYPE_AUDIO] =
2396 av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2397 wanted_stream[AVMEDIA_TYPE_AUDIO],
2398 st_index[AVMEDIA_TYPE_VIDEO],
2399 NULL, 0);
2400 if (!video_disable)
2401 st_index[AVMEDIA_TYPE_SUBTITLE] =
2402 av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2403 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2404 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2405 st_index[AVMEDIA_TYPE_AUDIO] :
2406 st_index[AVMEDIA_TYPE_VIDEO]),
2407 NULL, 0);
2408 if (show_status) {
2409 av_dump_format(ic, 0, is->filename, 0);
2410 }
2411
2412 /* open the streams */
2413 if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2414 stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2415 }
2416
2417 ret=-1;
2418 if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2419 ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2420 }
2421 is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2422 if(ret<0) {
2423 if (!display_disable)
2424 is->show_audio = 2;
2425 }
2426
2427 if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2428 stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2429 }
2430
2431 if (is->video_stream < 0 && is->audio_stream < 0) {
2432 fprintf(stderr, "%s: could not open codecs\n", is->filename);
2433 ret = -1;
2434 goto fail;
2435 }
2436
2437 for(;;) {
2438 if (is->abort_request)
2439 break;
2440 if (is->paused != is->last_paused) {
2441 is->last_paused = is->paused;
2442 if (is->paused)
2443 is->read_pause_return= av_read_pause(ic);
2444 else
2445 av_read_play(ic);
2446 }
2447 #if CONFIG_RTSP_DEMUXER
2448 if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2449 /* wait 10 ms to avoid trying to get another packet */
2450 /* XXX: horrible */
2451 SDL_Delay(10);
2452 continue;
2453 }
2454 #endif
2455 if (is->seek_req) {
2456 int64_t seek_target= is->seek_pos;
2457 int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2458 int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2459 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2460 // of the seek_pos/seek_rel variables
2461
2462 ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2463 if (ret < 0) {
2464 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2465 }else{
2466 if (is->audio_stream >= 0) {
2467 packet_queue_flush(&is->audioq);
2468 packet_queue_put(&is->audioq, &flush_pkt);
2469 }
2470 if (is->subtitle_stream >= 0) {
2471 packet_queue_flush(&is->subtitleq);
2472 packet_queue_put(&is->subtitleq, &flush_pkt);
2473 }
2474 if (is->video_stream >= 0) {
2475 packet_queue_flush(&is->videoq);
2476 packet_queue_put(&is->videoq, &flush_pkt);
2477 }
2478 }
2479 is->seek_req = 0;
2480 eof= 0;
2481 }
2482
2483 /* if the queue are full, no need to read more */
2484 if ( is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2485 || ( (is->audioq .size > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2486 && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream<0)
2487 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2488 /* wait 10 ms */
2489 SDL_Delay(10);
2490 continue;
2491 }
2492 if(eof) {
2493 if(is->video_stream >= 0){
2494 av_init_packet(pkt);
2495 pkt->data=NULL;
2496 pkt->size=0;
2497 pkt->stream_index= is->video_stream;
2498 packet_queue_put(&is->videoq, pkt);
2499 }
2500 SDL_Delay(10);
2501 if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2502 if(loop!=1 && (!loop || --loop)){
2503 stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2504 }else if(autoexit){
2505 ret=AVERROR_EOF;
2506 goto fail;
2507 }
2508 }
2509 continue;
2510 }
2511 ret = av_read_frame(ic, pkt);
2512 if (ret < 0) {
2513 if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2514 eof=1;
2515 if (ic->pb && ic->pb->error)
2516 break;
2517 SDL_Delay(100); /* wait for user event */
2518 continue;
2519 }
2520 /* check if packet is in play range specified by user, then queue, otherwise discard */
2521 pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2522 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2523 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2524 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2525 <= ((double)duration/1000000);
2526 if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2527 packet_queue_put(&is->audioq, pkt);
2528 } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2529 packet_queue_put(&is->videoq, pkt);
2530 } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2531 packet_queue_put(&is->subtitleq, pkt);
2532 } else {
2533 av_free_packet(pkt);
2534 }
2535 }
2536 /* wait until the end */
2537 while (!is->abort_request) {
2538 SDL_Delay(100);
2539 }
2540
2541 ret = 0;
2542 fail:
2543 /* disable interrupting */
2544 global_video_state = NULL;
2545
2546 /* close each stream */
2547 if (is->audio_stream >= 0)
2548 stream_component_close(is, is->audio_stream);
2549 if (is->video_stream >= 0)
2550 stream_component_close(is, is->video_stream);
2551 if (is->subtitle_stream >= 0)
2552 stream_component_close(is, is->subtitle_stream);
2553 if (is->ic) {
2554 av_close_input_file(is->ic);
2555 is->ic = NULL; /* safety */
2556 }
2557 avio_set_interrupt_cb(NULL);
2558
2559 if (ret != 0) {
2560 SDL_Event event;
2561
2562 event.type = FF_QUIT_EVENT;
2563 event.user.data1 = is;
2564 SDL_PushEvent(&event);
2565 }
2566 return 0;
2567 }
2568
2569 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2570 {
2571 VideoState *is;
2572
2573 is = av_mallocz(sizeof(VideoState));
2574 if (!is)
2575 return NULL;
2576 av_strlcpy(is->filename, filename, sizeof(is->filename));
2577 is->iformat = iformat;
2578 is->ytop = 0;
2579 is->xleft = 0;
2580
2581 /* start video display */
2582 is->pictq_mutex = SDL_CreateMutex();
2583 is->pictq_cond = SDL_CreateCond();
2584
2585 is->subpq_mutex = SDL_CreateMutex();
2586 is->subpq_cond = SDL_CreateCond();
2587
2588 is->av_sync_type = av_sync_type;
2589 is->parse_tid = SDL_CreateThread(decode_thread, is);
2590 if (!is->parse_tid) {
2591 av_free(is);
2592 return NULL;
2593 }
2594 return is;
2595 }
2596
2597 static void stream_cycle_channel(VideoState *is, int codec_type)
2598 {
2599 AVFormatContext *ic = is->ic;
2600 int start_index, stream_index;
2601 AVStream *st;
2602
2603 if (codec_type == AVMEDIA_TYPE_VIDEO)
2604 start_index = is->video_stream;
2605 else if (codec_type == AVMEDIA_TYPE_AUDIO)
2606 start_index = is->audio_stream;
2607 else
2608 start_index = is->subtitle_stream;
2609 if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2610 return;
2611 stream_index = start_index;
2612 for(;;) {
2613 if (++stream_index >= is->ic->nb_streams)
2614 {
2615 if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2616 {
2617 stream_index = -1;
2618 goto the_end;
2619 } else
2620 stream_index = 0;
2621 }
2622 if (stream_index == start_index)
2623 return;
2624 st = ic->streams[stream_index];
2625 if (st->codec->codec_type == codec_type) {
2626 /* check that parameters are OK */
2627 switch(codec_type) {
2628 case AVMEDIA_TYPE_AUDIO:
2629 if (st->codec->sample_rate != 0 &&
2630 st->codec->channels != 0)
2631 goto the_end;
2632 break;
2633 case AVMEDIA_TYPE_VIDEO:
2634 case AVMEDIA_TYPE_SUBTITLE:
2635 goto the_end;
2636 default:
2637 break;
2638 }
2639 }
2640 }
2641 the_end:
2642 stream_component_close(is, start_index);
2643 stream_component_open(is, stream_index);
2644 }
2645
2646
2647 static void toggle_full_screen(void)
2648 {
2649 is_full_screen = !is_full_screen;
2650 video_open(cur_stream);
2651 }
2652
2653 static void toggle_pause(void)
2654 {
2655 if (cur_stream)
2656 stream_pause(cur_stream);
2657 step = 0;
2658 }
2659
2660 static void step_to_next_frame(void)
2661 {
2662 if (cur_stream) {
2663 /* if the stream is paused unpause it, then step */
2664 if (cur_stream->paused)
2665 stream_pause(cur_stream);
2666 }
2667 step = 1;
2668 }
2669
2670 static void toggle_audio_display(void)
2671 {
2672 if (cur_stream) {
2673 int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2674 cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2675 fill_rectangle(screen,
2676 cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2677 bgcolor);
2678 SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2679 }
2680 }
2681
2682 /* handle an event sent by the GUI */
2683 static void event_loop(void)
2684 {
2685 SDL_Event event;
2686 double incr, pos, frac;
2687
2688 for(;;) {
2689 double x;
2690 SDL_WaitEvent(&event);
2691 switch(event.type) {
2692 case SDL_KEYDOWN:
2693 if (exit_on_keydown) {
2694 do_exit();
2695 break;
2696 }
2697 switch(event.key.keysym.sym) {
2698 case SDLK_ESCAPE:
2699 case SDLK_q:
2700 do_exit();
2701 break;
2702 case SDLK_f:
2703 toggle_full_screen();
2704 break;
2705 case SDLK_p:
2706 case SDLK_SPACE:
2707 toggle_pause();
2708 break;
2709 case SDLK_s: //S: Step to next frame
2710 step_to_next_frame();
2711 break;
2712 case SDLK_a:
2713 if (cur_stream)
2714 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2715 break;
2716 case SDLK_v:
2717 if (cur_stream)
2718 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2719 break;
2720 case SDLK_t:
2721 if (cur_stream)
2722 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2723 break;
2724 case SDLK_w:
2725 toggle_audio_display();
2726 break;
2727 case SDLK_LEFT:
2728 incr = -10.0;
2729 goto do_seek;
2730 case SDLK_RIGHT:
2731 incr = 10.0;
2732 goto do_seek;
2733 case SDLK_UP:
2734 incr = 60.0;
2735 goto do_seek;
2736 case SDLK_DOWN:
2737 incr = -60.0;
2738 do_seek:
2739 if (cur_stream) {
2740 if (seek_by_bytes) {
2741 if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2742 pos= cur_stream->video_current_pos;
2743 }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2744 pos= cur_stream->audio_pkt.pos;
2745 }else
2746 pos = avio_tell(cur_stream->ic->pb);
2747 if (cur_stream->ic->bit_rate)
2748 incr *= cur_stream->ic->bit_rate / 8.0;
2749 else
2750 incr *= 180000.0;
2751 pos += incr;
2752 stream_seek(cur_stream, pos, incr, 1);
2753 } else {
2754 pos = get_master_clock(cur_stream);
2755 pos += incr;
2756 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2757 }
2758 }
2759 break;
2760 default:
2761 break;
2762 }
2763 break;
2764 case SDL_MOUSEBUTTONDOWN:
2765 if (exit_on_mousedown) {
2766 do_exit();
2767 break;
2768 }
2769 case SDL_MOUSEMOTION:
2770 if(event.type ==SDL_MOUSEBUTTONDOWN){
2771 x= event.button.x;
2772 }else{
2773 if(event.motion.state != SDL_PRESSED)
2774 break;
2775 x= event.motion.x;
2776 }
2777 if (cur_stream) {
2778 if(seek_by_bytes || cur_stream->ic->duration<=0){
2779 uint64_t size= avio_size(cur_stream->ic->pb);
2780 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2781 }else{
2782 int64_t ts;
2783 int ns, hh, mm, ss;
2784 int tns, thh, tmm, tss;
2785 tns = cur_stream->ic->duration/1000000LL;
2786 thh = tns/3600;
2787 tmm = (tns%3600)/60;
2788 tss = (tns%60);
2789 frac = x/cur_stream->width;
2790 ns = frac*tns;
2791 hh = ns/3600;
2792 mm = (ns%3600)/60;
2793 ss = (ns%60);
2794 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2795 hh, mm, ss, thh, tmm, tss);
2796 ts = frac*cur_stream->ic->duration;
2797 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2798 ts += cur_stream->ic->start_time;
2799 stream_seek(cur_stream, ts, 0, 0);
2800 }
2801 }
2802 break;
2803 case SDL_VIDEORESIZE:
2804 if (cur_stream) {
2805 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2806 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2807 screen_width = cur_stream->width = event.resize.w;
2808 screen_height= cur_stream->height= event.resize.h;
2809 }
2810 break;
2811 case SDL_QUIT:
2812 case FF_QUIT_EVENT:
2813 do_exit();
2814 break;
2815 case FF_ALLOC_EVENT:
2816 video_open(event.user.data1);
2817 alloc_picture(event.user.data1);
2818 break;
2819 case FF_REFRESH_EVENT:
2820 video_refresh_timer(event.user.data1);
2821 cur_stream->refresh=0;
2822 break;
2823 default:
2824 break;
2825 }
2826 }
2827 }
2828
2829 static int opt_frame_size(const char *opt, const char *arg)
2830 {
2831 if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2832 fprintf(stderr, "Incorrect frame size\n");
2833 return AVERROR(EINVAL);
2834 }
2835 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2836 fprintf(stderr, "Frame size must be a multiple of 2\n");
2837 return AVERROR(EINVAL);
2838 }
2839 return 0;
2840 }
2841
2842 static int opt_width(const char *opt, const char *arg)
2843 {
2844 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2845 return 0;
2846 }
2847
2848 static int opt_height(const char *opt, const char *arg)
2849 {
2850 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2851 return 0;
2852 }
2853
2854 static int opt_format(const char *opt, const char *arg)
2855 {
2856 file_iformat = av_find_input_format(arg);
2857 if (!file_iformat) {
2858 fprintf(stderr, "Unknown input format: %s\n", arg);
2859 return AVERROR(EINVAL);
2860 }
2861 return 0;
2862 }
2863
2864 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2865 {
2866 frame_pix_fmt = av_get_pix_fmt(arg);
2867 return 0;
2868 }
2869
2870 static int opt_sync(const char *opt, const char *arg)
2871 {
2872 if (!strcmp(arg, "audio"))
2873 av_sync_type = AV_SYNC_AUDIO_MASTER;
2874 else if (!strcmp(arg, "video"))
2875 av_sync_type = AV_SYNC_VIDEO_MASTER;
2876 else if (!strcmp(arg, "ext"))
2877 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2878 else {
2879 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2880 exit(1);
2881 }
2882 return 0;
2883 }
2884
2885 static int opt_seek(const char *opt, const char *arg)
2886 {
2887 start_time = parse_time_or_die(opt, arg, 1);
2888 return 0;
2889 }
2890
2891 static int opt_duration(const char *opt, const char *arg)
2892 {
2893 duration = parse_time_or_die(opt, arg, 1);
2894 return 0;
2895 }
2896
2897 static int opt_debug(const char *opt, const char *arg)
2898 {
2899 av_log_set_level(99);
2900 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2901 return 0;
2902 }
2903
2904 static int opt_vismv(const char *opt, const char *arg)
2905 {
2906 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2907 return 0;
2908 }
2909
2910 static int opt_thread_count(const char *opt, const char *arg)
2911 {
2912 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2913 #if !HAVE_THREADS
2914 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2915 #endif
2916 return 0;
2917 }
2918
2919 static const OptionDef options[] = {
2920 #include "cmdutils_common_opts.h"
2921 { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2922 { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2923 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2924 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2925 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2926 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2927 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2928 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2929 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2930 { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2931 { "t", HAS_ARG, {(void*)&opt_duration}, "play \"duration\" seconds of audio/video", "duration" },
2932 { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2933 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2934 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2935 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2936 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2937 { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2938 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2939 { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2940 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2941 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2942 { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2943 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2944 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2945 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2946 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2947 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2948 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)", "threshold" },
2949 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2950 { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2951 { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2952 { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2953 { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2954 { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2955 { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2956 { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2957 { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2958 #if CONFIG_AVFILTER
2959 { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2960 #endif
2961 { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2962 { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2963 { "i", 0, {NULL}, "ffmpeg compatibility dummy option", ""},
2964 { NULL, },
2965 };
2966
2967 static void show_usage(void)
2968 {
2969 printf("Simple media player\n");
2970 printf("usage: ffplay [options] input_file\n");
2971 printf("\n");
2972 }
2973
2974 static void show_help(void)
2975 {
2976 av_log_set_callback(log_callback_help);
2977 show_usage();
2978 show_help_options(options, "Main options:\n",
2979 OPT_EXPERT, 0);
2980 show_help_options(options, "\nAdvanced options:\n",
2981 OPT_EXPERT, OPT_EXPERT);
2982 printf("\n");
2983 av_opt_show2(avcodec_opts[0], NULL,
2984 AV_OPT_FLAG_DECODING_PARAM, 0);
2985 printf("\n");
2986 av_opt_show2(avformat_opts, NULL,
2987 AV_OPT_FLAG_DECODING_PARAM, 0);
2988 #if !CONFIG_AVFILTER
2989 printf("\n");
2990 av_opt_show2(sws_opts, NULL,
2991 AV_OPT_FLAG_ENCODING_PARAM, 0);
2992 #endif
2993 printf("\nWhile playing:\n"
2994 "q, ESC quit\n"
2995 "f toggle full screen\n"
2996 "p, SPC pause\n"
2997 "a cycle audio channel\n"
2998 "v cycle video channel\n"
2999 "t cycle subtitle channel\n"
3000 "w show audio waves\n"
3001 "s activate frame-step mode\n"
3002 "left/right seek backward/forward 10 seconds\n"
3003 "down/up seek backward/forward 1 minute\n"
3004 "mouse click seek to percentage in file corresponding to fraction of width\n"
3005 );
3006 }
3007
3008 static void opt_input_file(const char *filename)
3009 {
3010 if (input_filename) {
3011 fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3012 filename, input_filename);
3013 exit(1);
3014 }
3015 if (!strcmp(filename, "-"))
3016 filename = "pipe:";
3017 input_filename = filename;
3018 }
3019
3020 /* Called from the main */
3021 int main(int argc, char **argv)
3022 {
3023 int flags;
3024
3025 av_log_set_flags(AV_LOG_SKIP_REPEATED);
3026
3027 /* register all codecs, demux and protocols */
3028 avcodec_register_all();
3029 #if CONFIG_AVDEVICE
3030 avdevice_register_all();
3031 #endif
3032 #if CONFIG_AVFILTER
3033 avfilter_register_all();
3034 #endif
3035 av_register_all();
3036
3037 init_opts();
3038
3039 show_banner();
3040
3041 parse_options(argc, argv, options, opt_input_file);
3042
3043 if (!input_filename) {
3044 show_usage();
3045 fprintf(stderr, "An input file must be specified\n");
3046 fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3047 exit(1);
3048 }
3049
3050 if (display_disable) {
3051 video_disable = 1;
3052 }
3053 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3054 #if !defined(__MINGW32__) && !defined(__APPLE__)
3055 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3056 #endif
3057 if (SDL_Init (flags)) {
3058 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3059 exit(1);
3060 }
3061
3062 if (!display_disable) {
3063 #if HAVE_SDL_VIDEO_SIZE
3064 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3065 fs_screen_width = vi->current_w;
3066 fs_screen_height = vi->current_h;
3067 #endif
3068 }
3069
3070 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3071 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3072 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3073
3074 av_init_packet(&flush_pkt);
3075 flush_pkt.data= "FLUSH";
3076
3077 cur_stream = stream_open(input_filename, file_iformat);
3078
3079 event_loop();
3080
3081 /* never returns */
3082
3083 return 0;
3084 }