a5c4403092bf9fe12d4e4c8786913c4f041f84e1
[libav.git] / ffplay.c
1 /*
2 * FFplay : Simple Media Player based on the FFmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/pixdesc.h"
28 #include "libavformat/avformat.h"
29 #include "libavdevice/avdevice.h"
30 #include "libswscale/swscale.h"
31 #include "libavcodec/audioconvert.h"
32 #include "libavcodec/colorspace.h"
33 #include "libavcodec/opt.h"
34 #include "libavcodec/avfft.h"
35
36 #if CONFIG_AVFILTER
37 # include "libavfilter/avfilter.h"
38 # include "libavfilter/avfiltergraph.h"
39 # include "libavfilter/graphparser.h"
40 #endif
41
42 #include "cmdutils.h"
43
44 #include <SDL.h>
45 #include <SDL_thread.h>
46
47 #ifdef __MINGW32__
48 #undef main /* We don't want SDL to override our main() */
49 #endif
50
51 #include <unistd.h>
52 #include <assert.h>
53
54 const char program_name[] = "FFplay";
55 const int program_birth_year = 2003;
56
57 //#define DEBUG_SYNC
58
59 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
60 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
61 #define MIN_FRAMES 5
62
63 /* SDL audio buffer size, in samples. Should be small to have precise
64 A/V sync as SDL does not have hardware buffer fullness info. */
65 #define SDL_AUDIO_BUFFER_SIZE 1024
66
67 /* no AV sync correction is done if below the AV sync threshold */
68 #define AV_SYNC_THRESHOLD 0.01
69 /* no AV correction is done if too big error */
70 #define AV_NOSYNC_THRESHOLD 10.0
71
72 #define FRAME_SKIP_FACTOR 0.05
73
74 /* maximum audio speed change to get correct sync */
75 #define SAMPLE_CORRECTION_PERCENT_MAX 10
76
77 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
78 #define AUDIO_DIFF_AVG_NB 20
79
80 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
81 #define SAMPLE_ARRAY_SIZE (2*65536)
82
83 #if !CONFIG_AVFILTER
84 static int sws_flags = SWS_BICUBIC;
85 #endif
86
87 typedef struct PacketQueue {
88 AVPacketList *first_pkt, *last_pkt;
89 int nb_packets;
90 int size;
91 int abort_request;
92 SDL_mutex *mutex;
93 SDL_cond *cond;
94 } PacketQueue;
95
96 #define VIDEO_PICTURE_QUEUE_SIZE 2
97 #define SUBPICTURE_QUEUE_SIZE 4
98
99 typedef struct VideoPicture {
100 double pts; ///<presentation time stamp for this picture
101 double target_clock; ///<av_gettime() time at which this should be displayed ideally
102 int64_t pos; ///<byte position in file
103 SDL_Overlay *bmp;
104 int width, height; /* source height & width */
105 int allocated;
106 enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109 AVFilterPicRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114 double pts; /* presentation time stamp for this picture */
115 AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119 AV_SYNC_AUDIO_MASTER, /* default choice */
120 AV_SYNC_VIDEO_MASTER,
121 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125 SDL_Thread *parse_tid;
126 SDL_Thread *video_tid;
127 SDL_Thread *refresh_tid;
128 AVInputFormat *iformat;
129 int no_background;
130 int abort_request;
131 int paused;
132 int last_paused;
133 int seek_req;
134 int seek_flags;
135 int64_t seek_pos;
136 int64_t seek_rel;
137 int read_pause_return;
138 AVFormatContext *ic;
139 int dtg_active_format;
140
141 int audio_stream;
142
143 int av_sync_type;
144 double external_clock; /* external clock base */
145 int64_t external_clock_time;
146
147 double audio_clock;
148 double audio_diff_cum; /* used for AV difference average computation */
149 double audio_diff_avg_coef;
150 double audio_diff_threshold;
151 int audio_diff_avg_count;
152 AVStream *audio_st;
153 PacketQueue audioq;
154 int audio_hw_buf_size;
155 /* samples output by the codec. we reserve more space for avsync
156 compensation */
157 DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158 DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
159 uint8_t *audio_buf;
160 unsigned int audio_buf_size; /* in bytes */
161 int audio_buf_index; /* in bytes */
162 AVPacket audio_pkt_temp;
163 AVPacket audio_pkt;
164 enum SampleFormat audio_src_fmt;
165 AVAudioConvert *reformat_ctx;
166
167 int show_audio; /* if true, display audio samples */
168 int16_t sample_array[SAMPLE_ARRAY_SIZE];
169 int sample_array_index;
170 int last_i_start;
171 RDFTContext *rdft;
172 int rdft_bits;
173 int xpos;
174
175 SDL_Thread *subtitle_tid;
176 int subtitle_stream;
177 int subtitle_stream_changed;
178 AVStream *subtitle_st;
179 PacketQueue subtitleq;
180 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
181 int subpq_size, subpq_rindex, subpq_windex;
182 SDL_mutex *subpq_mutex;
183 SDL_cond *subpq_cond;
184
185 double frame_timer;
186 double frame_last_pts;
187 double frame_last_delay;
188 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
189 int video_stream;
190 AVStream *video_st;
191 PacketQueue videoq;
192 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
193 double video_current_pts_drift; ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
194 int64_t video_current_pos; ///<current displayed file pos
195 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
196 int pictq_size, pictq_rindex, pictq_windex;
197 SDL_mutex *pictq_mutex;
198 SDL_cond *pictq_cond;
199 #if !CONFIG_AVFILTER
200 struct SwsContext *img_convert_ctx;
201 #endif
202
203 // QETimer *video_timer;
204 char filename[1024];
205 int width, height, xleft, ytop;
206
207 int64_t faulty_pts;
208 int64_t faulty_dts;
209 int64_t last_dts_for_fault_detection;
210 int64_t last_pts_for_fault_detection;
211
212 #if CONFIG_AVFILTER
213 AVFilterContext *out_video_filter; ///<the last filter in the video chain
214 #endif
215
216 float skip_frames;
217 float skip_frames_index;
218 int refresh;
219 } VideoState;
220
221 static void show_help(void);
222 static int audio_write_get_buf_size(VideoState *is);
223
224 /* options specified by the user */
225 static AVInputFormat *file_iformat;
226 static const char *input_filename;
227 static const char *window_title;
228 static int fs_screen_width;
229 static int fs_screen_height;
230 static int screen_width = 0;
231 static int screen_height = 0;
232 static int frame_width = 0;
233 static int frame_height = 0;
234 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
235 static int audio_disable;
236 static int video_disable;
237 static int wanted_stream[AVMEDIA_TYPE_NB]={
238 [AVMEDIA_TYPE_AUDIO]=-1,
239 [AVMEDIA_TYPE_VIDEO]=-1,
240 [AVMEDIA_TYPE_SUBTITLE]=-1,
241 };
242 static int seek_by_bytes=-1;
243 static int display_disable;
244 static int show_status = 1;
245 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246 static int64_t start_time = AV_NOPTS_VALUE;
247 static int debug = 0;
248 static int debug_mv = 0;
249 static int step = 0;
250 static int thread_count = 1;
251 static int workaround_bugs = 1;
252 static int fast = 0;
253 static int genpts = 0;
254 static int lowres = 0;
255 static int idct = FF_IDCT_AUTO;
256 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
257 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
258 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
259 static int error_recognition = FF_ER_CAREFUL;
260 static int error_concealment = 3;
261 static int decoder_reorder_pts= -1;
262 static int autoexit;
263 static int loop=1;
264 static int framedrop=1;
265
266 static int rdftspeed=20;
267 #if CONFIG_AVFILTER
268 static char *vfilters = NULL;
269 #endif
270
271 /* current context */
272 static int is_full_screen;
273 static VideoState *cur_stream;
274 static int64_t audio_callback_time;
275
276 static AVPacket flush_pkt;
277
278 #define FF_ALLOC_EVENT (SDL_USEREVENT)
279 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
280 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
281
282 static SDL_Surface *screen;
283
284 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
285
286 /* packet queue handling */
287 static void packet_queue_init(PacketQueue *q)
288 {
289 memset(q, 0, sizeof(PacketQueue));
290 q->mutex = SDL_CreateMutex();
291 q->cond = SDL_CreateCond();
292 packet_queue_put(q, &flush_pkt);
293 }
294
295 static void packet_queue_flush(PacketQueue *q)
296 {
297 AVPacketList *pkt, *pkt1;
298
299 SDL_LockMutex(q->mutex);
300 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
301 pkt1 = pkt->next;
302 av_free_packet(&pkt->pkt);
303 av_freep(&pkt);
304 }
305 q->last_pkt = NULL;
306 q->first_pkt = NULL;
307 q->nb_packets = 0;
308 q->size = 0;
309 SDL_UnlockMutex(q->mutex);
310 }
311
312 static void packet_queue_end(PacketQueue *q)
313 {
314 packet_queue_flush(q);
315 SDL_DestroyMutex(q->mutex);
316 SDL_DestroyCond(q->cond);
317 }
318
319 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
320 {
321 AVPacketList *pkt1;
322
323 /* duplicate the packet */
324 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
325 return -1;
326
327 pkt1 = av_malloc(sizeof(AVPacketList));
328 if (!pkt1)
329 return -1;
330 pkt1->pkt = *pkt;
331 pkt1->next = NULL;
332
333
334 SDL_LockMutex(q->mutex);
335
336 if (!q->last_pkt)
337
338 q->first_pkt = pkt1;
339 else
340 q->last_pkt->next = pkt1;
341 q->last_pkt = pkt1;
342 q->nb_packets++;
343 q->size += pkt1->pkt.size + sizeof(*pkt1);
344 /* XXX: should duplicate packet data in DV case */
345 SDL_CondSignal(q->cond);
346
347 SDL_UnlockMutex(q->mutex);
348 return 0;
349 }
350
351 static void packet_queue_abort(PacketQueue *q)
352 {
353 SDL_LockMutex(q->mutex);
354
355 q->abort_request = 1;
356
357 SDL_CondSignal(q->cond);
358
359 SDL_UnlockMutex(q->mutex);
360 }
361
362 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
363 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
364 {
365 AVPacketList *pkt1;
366 int ret;
367
368 SDL_LockMutex(q->mutex);
369
370 for(;;) {
371 if (q->abort_request) {
372 ret = -1;
373 break;
374 }
375
376 pkt1 = q->first_pkt;
377 if (pkt1) {
378 q->first_pkt = pkt1->next;
379 if (!q->first_pkt)
380 q->last_pkt = NULL;
381 q->nb_packets--;
382 q->size -= pkt1->pkt.size + sizeof(*pkt1);
383 *pkt = pkt1->pkt;
384 av_free(pkt1);
385 ret = 1;
386 break;
387 } else if (!block) {
388 ret = 0;
389 break;
390 } else {
391 SDL_CondWait(q->cond, q->mutex);
392 }
393 }
394 SDL_UnlockMutex(q->mutex);
395 return ret;
396 }
397
398 static inline void fill_rectangle(SDL_Surface *screen,
399 int x, int y, int w, int h, int color)
400 {
401 SDL_Rect rect;
402 rect.x = x;
403 rect.y = y;
404 rect.w = w;
405 rect.h = h;
406 SDL_FillRect(screen, &rect, color);
407 }
408
409 #if 0
410 /* draw only the border of a rectangle */
411 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
412 {
413 int w1, w2, h1, h2;
414
415 /* fill the background */
416 w1 = x;
417 if (w1 < 0)
418 w1 = 0;
419 w2 = s->width - (x + w);
420 if (w2 < 0)
421 w2 = 0;
422 h1 = y;
423 if (h1 < 0)
424 h1 = 0;
425 h2 = s->height - (y + h);
426 if (h2 < 0)
427 h2 = 0;
428 fill_rectangle(screen,
429 s->xleft, s->ytop,
430 w1, s->height,
431 color);
432 fill_rectangle(screen,
433 s->xleft + s->width - w2, s->ytop,
434 w2, s->height,
435 color);
436 fill_rectangle(screen,
437 s->xleft + w1, s->ytop,
438 s->width - w1 - w2, h1,
439 color);
440 fill_rectangle(screen,
441 s->xleft + w1, s->ytop + s->height - h2,
442 s->width - w1 - w2, h2,
443 color);
444 }
445 #endif
446
447 #define ALPHA_BLEND(a, oldp, newp, s)\
448 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
449
450 #define RGBA_IN(r, g, b, a, s)\
451 {\
452 unsigned int v = ((const uint32_t *)(s))[0];\
453 a = (v >> 24) & 0xff;\
454 r = (v >> 16) & 0xff;\
455 g = (v >> 8) & 0xff;\
456 b = v & 0xff;\
457 }
458
459 #define YUVA_IN(y, u, v, a, s, pal)\
460 {\
461 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
462 a = (val >> 24) & 0xff;\
463 y = (val >> 16) & 0xff;\
464 u = (val >> 8) & 0xff;\
465 v = val & 0xff;\
466 }
467
468 #define YUVA_OUT(d, y, u, v, a)\
469 {\
470 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
471 }
472
473
474 #define BPP 1
475
476 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
477 {
478 int wrap, wrap3, width2, skip2;
479 int y, u, v, a, u1, v1, a1, w, h;
480 uint8_t *lum, *cb, *cr;
481 const uint8_t *p;
482 const uint32_t *pal;
483 int dstx, dsty, dstw, dsth;
484
485 dstw = av_clip(rect->w, 0, imgw);
486 dsth = av_clip(rect->h, 0, imgh);
487 dstx = av_clip(rect->x, 0, imgw - dstw);
488 dsty = av_clip(rect->y, 0, imgh - dsth);
489 lum = dst->data[0] + dsty * dst->linesize[0];
490 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
491 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
492
493 width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
494 skip2 = dstx >> 1;
495 wrap = dst->linesize[0];
496 wrap3 = rect->pict.linesize[0];
497 p = rect->pict.data[0];
498 pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
499
500 if (dsty & 1) {
501 lum += dstx;
502 cb += skip2;
503 cr += skip2;
504
505 if (dstx & 1) {
506 YUVA_IN(y, u, v, a, p, pal);
507 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
508 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
509 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
510 cb++;
511 cr++;
512 lum++;
513 p += BPP;
514 }
515 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
516 YUVA_IN(y, u, v, a, p, pal);
517 u1 = u;
518 v1 = v;
519 a1 = a;
520 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
521
522 YUVA_IN(y, u, v, a, p + BPP, pal);
523 u1 += u;
524 v1 += v;
525 a1 += a;
526 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
527 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
528 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
529 cb++;
530 cr++;
531 p += 2 * BPP;
532 lum += 2;
533 }
534 if (w) {
535 YUVA_IN(y, u, v, a, p, pal);
536 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
537 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
538 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
539 p++;
540 lum++;
541 }
542 p += wrap3 - dstw * BPP;
543 lum += wrap - dstw - dstx;
544 cb += dst->linesize[1] - width2 - skip2;
545 cr += dst->linesize[2] - width2 - skip2;
546 }
547 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
548 lum += dstx;
549 cb += skip2;
550 cr += skip2;
551
552 if (dstx & 1) {
553 YUVA_IN(y, u, v, a, p, pal);
554 u1 = u;
555 v1 = v;
556 a1 = a;
557 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
558 p += wrap3;
559 lum += wrap;
560 YUVA_IN(y, u, v, a, p, pal);
561 u1 += u;
562 v1 += v;
563 a1 += a;
564 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
565 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
566 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
567 cb++;
568 cr++;
569 p += -wrap3 + BPP;
570 lum += -wrap + 1;
571 }
572 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
573 YUVA_IN(y, u, v, a, p, pal);
574 u1 = u;
575 v1 = v;
576 a1 = a;
577 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
578
579 YUVA_IN(y, u, v, a, p + BPP, pal);
580 u1 += u;
581 v1 += v;
582 a1 += a;
583 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
584 p += wrap3;
585 lum += wrap;
586
587 YUVA_IN(y, u, v, a, p, pal);
588 u1 += u;
589 v1 += v;
590 a1 += a;
591 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
592
593 YUVA_IN(y, u, v, a, p + BPP, pal);
594 u1 += u;
595 v1 += v;
596 a1 += a;
597 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
598
599 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
600 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
601
602 cb++;
603 cr++;
604 p += -wrap3 + 2 * BPP;
605 lum += -wrap + 2;
606 }
607 if (w) {
608 YUVA_IN(y, u, v, a, p, pal);
609 u1 = u;
610 v1 = v;
611 a1 = a;
612 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
613 p += wrap3;
614 lum += wrap;
615 YUVA_IN(y, u, v, a, p, pal);
616 u1 += u;
617 v1 += v;
618 a1 += a;
619 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
620 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
621 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
622 cb++;
623 cr++;
624 p += -wrap3 + BPP;
625 lum += -wrap + 1;
626 }
627 p += wrap3 + (wrap3 - dstw * BPP);
628 lum += wrap + (wrap - dstw - dstx);
629 cb += dst->linesize[1] - width2 - skip2;
630 cr += dst->linesize[2] - width2 - skip2;
631 }
632 /* handle odd height */
633 if (h) {
634 lum += dstx;
635 cb += skip2;
636 cr += skip2;
637
638 if (dstx & 1) {
639 YUVA_IN(y, u, v, a, p, pal);
640 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
641 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
642 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
643 cb++;
644 cr++;
645 lum++;
646 p += BPP;
647 }
648 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
649 YUVA_IN(y, u, v, a, p, pal);
650 u1 = u;
651 v1 = v;
652 a1 = a;
653 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
654
655 YUVA_IN(y, u, v, a, p + BPP, pal);
656 u1 += u;
657 v1 += v;
658 a1 += a;
659 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
660 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
661 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
662 cb++;
663 cr++;
664 p += 2 * BPP;
665 lum += 2;
666 }
667 if (w) {
668 YUVA_IN(y, u, v, a, p, pal);
669 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
670 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
671 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
672 }
673 }
674 }
675
676 static void free_subpicture(SubPicture *sp)
677 {
678 int i;
679
680 for (i = 0; i < sp->sub.num_rects; i++)
681 {
682 av_freep(&sp->sub.rects[i]->pict.data[0]);
683 av_freep(&sp->sub.rects[i]->pict.data[1]);
684 av_freep(&sp->sub.rects[i]);
685 }
686
687 av_free(sp->sub.rects);
688
689 memset(&sp->sub, 0, sizeof(AVSubtitle));
690 }
691
692 static void video_image_display(VideoState *is)
693 {
694 VideoPicture *vp;
695 SubPicture *sp;
696 AVPicture pict;
697 float aspect_ratio;
698 int width, height, x, y;
699 SDL_Rect rect;
700 int i;
701
702 vp = &is->pictq[is->pictq_rindex];
703 if (vp->bmp) {
704 #if CONFIG_AVFILTER
705 if (vp->picref->pixel_aspect.num == 0)
706 aspect_ratio = 0;
707 else
708 aspect_ratio = av_q2d(vp->picref->pixel_aspect);
709 #else
710
711 /* XXX: use variable in the frame */
712 if (is->video_st->sample_aspect_ratio.num)
713 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
714 else if (is->video_st->codec->sample_aspect_ratio.num)
715 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
716 else
717 aspect_ratio = 0;
718 #endif
719 if (aspect_ratio <= 0.0)
720 aspect_ratio = 1.0;
721 aspect_ratio *= (float)vp->width / (float)vp->height;
722 /* if an active format is indicated, then it overrides the
723 mpeg format */
724 #if 0
725 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
726 is->dtg_active_format = is->video_st->codec->dtg_active_format;
727 printf("dtg_active_format=%d\n", is->dtg_active_format);
728 }
729 #endif
730 #if 0
731 switch(is->video_st->codec->dtg_active_format) {
732 case FF_DTG_AFD_SAME:
733 default:
734 /* nothing to do */
735 break;
736 case FF_DTG_AFD_4_3:
737 aspect_ratio = 4.0 / 3.0;
738 break;
739 case FF_DTG_AFD_16_9:
740 aspect_ratio = 16.0 / 9.0;
741 break;
742 case FF_DTG_AFD_14_9:
743 aspect_ratio = 14.0 / 9.0;
744 break;
745 case FF_DTG_AFD_4_3_SP_14_9:
746 aspect_ratio = 14.0 / 9.0;
747 break;
748 case FF_DTG_AFD_16_9_SP_14_9:
749 aspect_ratio = 14.0 / 9.0;
750 break;
751 case FF_DTG_AFD_SP_4_3:
752 aspect_ratio = 4.0 / 3.0;
753 break;
754 }
755 #endif
756
757 if (is->subtitle_st)
758 {
759 if (is->subpq_size > 0)
760 {
761 sp = &is->subpq[is->subpq_rindex];
762
763 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
764 {
765 SDL_LockYUVOverlay (vp->bmp);
766
767 pict.data[0] = vp->bmp->pixels[0];
768 pict.data[1] = vp->bmp->pixels[2];
769 pict.data[2] = vp->bmp->pixels[1];
770
771 pict.linesize[0] = vp->bmp->pitches[0];
772 pict.linesize[1] = vp->bmp->pitches[2];
773 pict.linesize[2] = vp->bmp->pitches[1];
774
775 for (i = 0; i < sp->sub.num_rects; i++)
776 blend_subrect(&pict, sp->sub.rects[i],
777 vp->bmp->w, vp->bmp->h);
778
779 SDL_UnlockYUVOverlay (vp->bmp);
780 }
781 }
782 }
783
784
785 /* XXX: we suppose the screen has a 1.0 pixel ratio */
786 height = is->height;
787 width = ((int)rint(height * aspect_ratio)) & ~1;
788 if (width > is->width) {
789 width = is->width;
790 height = ((int)rint(width / aspect_ratio)) & ~1;
791 }
792 x = (is->width - width) / 2;
793 y = (is->height - height) / 2;
794 if (!is->no_background) {
795 /* fill the background */
796 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
797 } else {
798 is->no_background = 0;
799 }
800 rect.x = is->xleft + x;
801 rect.y = is->ytop + y;
802 rect.w = width;
803 rect.h = height;
804 SDL_DisplayYUVOverlay(vp->bmp, &rect);
805 } else {
806 #if 0
807 fill_rectangle(screen,
808 is->xleft, is->ytop, is->width, is->height,
809 QERGB(0x00, 0x00, 0x00));
810 #endif
811 }
812 }
813
814 static inline int compute_mod(int a, int b)
815 {
816 a = a % b;
817 if (a >= 0)
818 return a;
819 else
820 return a + b;
821 }
822
823 static void video_audio_display(VideoState *s)
824 {
825 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
826 int ch, channels, h, h2, bgcolor, fgcolor;
827 int16_t time_diff;
828 int rdft_bits, nb_freq;
829
830 for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
831 ;
832 nb_freq= 1<<(rdft_bits-1);
833
834 /* compute display index : center on currently output samples */
835 channels = s->audio_st->codec->channels;
836 nb_display_channels = channels;
837 if (!s->paused) {
838 int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
839 n = 2 * channels;
840 delay = audio_write_get_buf_size(s);
841 delay /= n;
842
843 /* to be more precise, we take into account the time spent since
844 the last buffer computation */
845 if (audio_callback_time) {
846 time_diff = av_gettime() - audio_callback_time;
847 delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
848 }
849
850 delay += 2*data_used;
851 if (delay < data_used)
852 delay = data_used;
853
854 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
855 if(s->show_audio==1){
856 h= INT_MIN;
857 for(i=0; i<1000; i+=channels){
858 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
859 int a= s->sample_array[idx];
860 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
861 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
862 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
863 int score= a-d;
864 if(h<score && (b^c)<0){
865 h= score;
866 i_start= idx;
867 }
868 }
869 }
870
871 s->last_i_start = i_start;
872 } else {
873 i_start = s->last_i_start;
874 }
875
876 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
877 if(s->show_audio==1){
878 fill_rectangle(screen,
879 s->xleft, s->ytop, s->width, s->height,
880 bgcolor);
881
882 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
883
884 /* total height for one channel */
885 h = s->height / nb_display_channels;
886 /* graph height / 2 */
887 h2 = (h * 9) / 20;
888 for(ch = 0;ch < nb_display_channels; ch++) {
889 i = i_start + ch;
890 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
891 for(x = 0; x < s->width; x++) {
892 y = (s->sample_array[i] * h2) >> 15;
893 if (y < 0) {
894 y = -y;
895 ys = y1 - y;
896 } else {
897 ys = y1;
898 }
899 fill_rectangle(screen,
900 s->xleft + x, ys, 1, y,
901 fgcolor);
902 i += channels;
903 if (i >= SAMPLE_ARRAY_SIZE)
904 i -= SAMPLE_ARRAY_SIZE;
905 }
906 }
907
908 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
909
910 for(ch = 1;ch < nb_display_channels; ch++) {
911 y = s->ytop + ch * h;
912 fill_rectangle(screen,
913 s->xleft, y, s->width, 1,
914 fgcolor);
915 }
916 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
917 }else{
918 nb_display_channels= FFMIN(nb_display_channels, 2);
919 if(rdft_bits != s->rdft_bits){
920 av_rdft_end(s->rdft);
921 s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
922 s->rdft_bits= rdft_bits;
923 }
924 {
925 FFTSample data[2][2*nb_freq];
926 for(ch = 0;ch < nb_display_channels; ch++) {
927 i = i_start + ch;
928 for(x = 0; x < 2*nb_freq; x++) {
929 double w= (x-nb_freq)*(1.0/nb_freq);
930 data[ch][x]= s->sample_array[i]*(1.0-w*w);
931 i += channels;
932 if (i >= SAMPLE_ARRAY_SIZE)
933 i -= SAMPLE_ARRAY_SIZE;
934 }
935 av_rdft_calc(s->rdft, data[ch]);
936 }
937 //least efficient way to do this, we should of course directly access it but its more than fast enough
938 for(y=0; y<s->height; y++){
939 double w= 1/sqrt(nb_freq);
940 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
941 int b= sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0] + data[1][2*y+1]*data[1][2*y+1]));
942 a= FFMIN(a,255);
943 b= FFMIN(b,255);
944 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
945
946 fill_rectangle(screen,
947 s->xpos, s->height-y, 1, 1,
948 fgcolor);
949 }
950 }
951 SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
952 s->xpos++;
953 if(s->xpos >= s->width)
954 s->xpos= s->xleft;
955 }
956 }
957
958 static int video_open(VideoState *is){
959 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
960 int w,h;
961
962 if(is_full_screen) flags |= SDL_FULLSCREEN;
963 else flags |= SDL_RESIZABLE;
964
965 if (is_full_screen && fs_screen_width) {
966 w = fs_screen_width;
967 h = fs_screen_height;
968 } else if(!is_full_screen && screen_width){
969 w = screen_width;
970 h = screen_height;
971 #if CONFIG_AVFILTER
972 }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
973 w = is->out_video_filter->inputs[0]->w;
974 h = is->out_video_filter->inputs[0]->h;
975 #else
976 }else if (is->video_st && is->video_st->codec->width){
977 w = is->video_st->codec->width;
978 h = is->video_st->codec->height;
979 #endif
980 } else {
981 w = 640;
982 h = 480;
983 }
984 if(screen && is->width == screen->w && screen->w == w
985 && is->height== screen->h && screen->h == h)
986 return 0;
987
988 #ifndef __APPLE__
989 screen = SDL_SetVideoMode(w, h, 0, flags);
990 #else
991 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
992 screen = SDL_SetVideoMode(w, h, 24, flags);
993 #endif
994 if (!screen) {
995 fprintf(stderr, "SDL: could not set video mode - exiting\n");
996 return -1;
997 }
998 if (!window_title)
999 window_title = input_filename;
1000 SDL_WM_SetCaption(window_title, window_title);
1001
1002 is->width = screen->w;
1003 is->height = screen->h;
1004
1005 return 0;
1006 }
1007
1008 /* display the current picture, if any */
1009 static void video_display(VideoState *is)
1010 {
1011 if(!screen)
1012 video_open(cur_stream);
1013 if (is->audio_st && is->show_audio)
1014 video_audio_display(is);
1015 else if (is->video_st)
1016 video_image_display(is);
1017 }
1018
1019 static int refresh_thread(void *opaque)
1020 {
1021 VideoState *is= opaque;
1022 while(!is->abort_request){
1023 SDL_Event event;
1024 event.type = FF_REFRESH_EVENT;
1025 event.user.data1 = opaque;
1026 if(!is->refresh){
1027 is->refresh=1;
1028 SDL_PushEvent(&event);
1029 }
1030 usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1031 }
1032 return 0;
1033 }
1034
1035 /* get the current audio clock value */
1036 static double get_audio_clock(VideoState *is)
1037 {
1038 double pts;
1039 int hw_buf_size, bytes_per_sec;
1040 pts = is->audio_clock;
1041 hw_buf_size = audio_write_get_buf_size(is);
1042 bytes_per_sec = 0;
1043 if (is->audio_st) {
1044 bytes_per_sec = is->audio_st->codec->sample_rate *
1045 2 * is->audio_st->codec->channels;
1046 }
1047 if (bytes_per_sec)
1048 pts -= (double)hw_buf_size / bytes_per_sec;
1049 return pts;
1050 }
1051
1052 /* get the current video clock value */
1053 static double get_video_clock(VideoState *is)
1054 {
1055 if (is->paused) {
1056 return is->video_current_pts;
1057 } else {
1058 return is->video_current_pts_drift + av_gettime() / 1000000.0;
1059 }
1060 }
1061
1062 /* get the current external clock value */
1063 static double get_external_clock(VideoState *is)
1064 {
1065 int64_t ti;
1066 ti = av_gettime();
1067 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1068 }
1069
1070 /* get the current master clock value */
1071 static double get_master_clock(VideoState *is)
1072 {
1073 double val;
1074
1075 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1076 if (is->video_st)
1077 val = get_video_clock(is);
1078 else
1079 val = get_audio_clock(is);
1080 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1081 if (is->audio_st)
1082 val = get_audio_clock(is);
1083 else
1084 val = get_video_clock(is);
1085 } else {
1086 val = get_external_clock(is);
1087 }
1088 return val;
1089 }
1090
1091 /* seek in the stream */
1092 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1093 {
1094 if (!is->seek_req) {
1095 is->seek_pos = pos;
1096 is->seek_rel = rel;
1097 is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1098 if (seek_by_bytes)
1099 is->seek_flags |= AVSEEK_FLAG_BYTE;
1100 is->seek_req = 1;
1101 }
1102 }
1103
1104 /* pause or resume the video */
1105 static void stream_pause(VideoState *is)
1106 {
1107 if (is->paused) {
1108 is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1109 if(is->read_pause_return != AVERROR(ENOSYS)){
1110 is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1111 }
1112 is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1113 }
1114 is->paused = !is->paused;
1115 }
1116
1117 static double compute_target_time(double frame_current_pts, VideoState *is)
1118 {
1119 double delay, sync_threshold, diff;
1120
1121 /* compute nominal delay */
1122 delay = frame_current_pts - is->frame_last_pts;
1123 if (delay <= 0 || delay >= 10.0) {
1124 /* if incorrect delay, use previous one */
1125 delay = is->frame_last_delay;
1126 } else {
1127 is->frame_last_delay = delay;
1128 }
1129 is->frame_last_pts = frame_current_pts;
1130
1131 /* update delay to follow master synchronisation source */
1132 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1133 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1134 /* if video is slave, we try to correct big delays by
1135 duplicating or deleting a frame */
1136 diff = get_video_clock(is) - get_master_clock(is);
1137
1138 /* skip or repeat frame. We take into account the
1139 delay to compute the threshold. I still don't know
1140 if it is the best guess */
1141 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1142 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1143 if (diff <= -sync_threshold)
1144 delay = 0;
1145 else if (diff >= sync_threshold)
1146 delay = 2 * delay;
1147 }
1148 }
1149 is->frame_timer += delay;
1150 #if defined(DEBUG_SYNC)
1151 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1152 delay, actual_delay, frame_current_pts, -diff);
1153 #endif
1154
1155 return is->frame_timer;
1156 }
1157
1158 /* called to display each frame */
1159 static void video_refresh_timer(void *opaque)
1160 {
1161 VideoState *is = opaque;
1162 VideoPicture *vp;
1163
1164 SubPicture *sp, *sp2;
1165
1166 if (is->video_st) {
1167 retry:
1168 if (is->pictq_size == 0) {
1169 //nothing to do, no picture to display in the que
1170 } else {
1171 double time= av_gettime()/1000000.0;
1172 double next_target;
1173 /* dequeue the picture */
1174 vp = &is->pictq[is->pictq_rindex];
1175
1176 if(time < vp->target_clock)
1177 return;
1178 /* update current video pts */
1179 is->video_current_pts = vp->pts;
1180 is->video_current_pts_drift = is->video_current_pts - time;
1181 is->video_current_pos = vp->pos;
1182 if(is->pictq_size > 1){
1183 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1184 assert(nextvp->target_clock >= vp->target_clock);
1185 next_target= nextvp->target_clock;
1186 }else{
1187 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1188 }
1189 if(framedrop && time > next_target){
1190 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1191 if(is->pictq_size > 1 || time > next_target + 0.5){
1192 /* update queue size and signal for next picture */
1193 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1194 is->pictq_rindex = 0;
1195
1196 SDL_LockMutex(is->pictq_mutex);
1197 is->pictq_size--;
1198 SDL_CondSignal(is->pictq_cond);
1199 SDL_UnlockMutex(is->pictq_mutex);
1200 goto retry;
1201 }
1202 }
1203
1204 if(is->subtitle_st) {
1205 if (is->subtitle_stream_changed) {
1206 SDL_LockMutex(is->subpq_mutex);
1207
1208 while (is->subpq_size) {
1209 free_subpicture(&is->subpq[is->subpq_rindex]);
1210
1211 /* update queue size and signal for next picture */
1212 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1213 is->subpq_rindex = 0;
1214
1215 is->subpq_size--;
1216 }
1217 is->subtitle_stream_changed = 0;
1218
1219 SDL_CondSignal(is->subpq_cond);
1220 SDL_UnlockMutex(is->subpq_mutex);
1221 } else {
1222 if (is->subpq_size > 0) {
1223 sp = &is->subpq[is->subpq_rindex];
1224
1225 if (is->subpq_size > 1)
1226 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1227 else
1228 sp2 = NULL;
1229
1230 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1231 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1232 {
1233 free_subpicture(sp);
1234
1235 /* update queue size and signal for next picture */
1236 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1237 is->subpq_rindex = 0;
1238
1239 SDL_LockMutex(is->subpq_mutex);
1240 is->subpq_size--;
1241 SDL_CondSignal(is->subpq_cond);
1242 SDL_UnlockMutex(is->subpq_mutex);
1243 }
1244 }
1245 }
1246 }
1247
1248 /* display picture */
1249 video_display(is);
1250
1251 /* update queue size and signal for next picture */
1252 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1253 is->pictq_rindex = 0;
1254
1255 SDL_LockMutex(is->pictq_mutex);
1256 is->pictq_size--;
1257 SDL_CondSignal(is->pictq_cond);
1258 SDL_UnlockMutex(is->pictq_mutex);
1259 }
1260 } else if (is->audio_st) {
1261 /* draw the next audio frame */
1262
1263 /* if only audio stream, then display the audio bars (better
1264 than nothing, just to test the implementation */
1265
1266 /* display picture */
1267 video_display(is);
1268 }
1269 if (show_status) {
1270 static int64_t last_time;
1271 int64_t cur_time;
1272 int aqsize, vqsize, sqsize;
1273 double av_diff;
1274
1275 cur_time = av_gettime();
1276 if (!last_time || (cur_time - last_time) >= 30000) {
1277 aqsize = 0;
1278 vqsize = 0;
1279 sqsize = 0;
1280 if (is->audio_st)
1281 aqsize = is->audioq.size;
1282 if (is->video_st)
1283 vqsize = is->videoq.size;
1284 if (is->subtitle_st)
1285 sqsize = is->subtitleq.size;
1286 av_diff = 0;
1287 if (is->audio_st && is->video_st)
1288 av_diff = get_audio_clock(is) - get_video_clock(is);
1289 printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1290 get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1291 fflush(stdout);
1292 last_time = cur_time;
1293 }
1294 }
1295 }
1296
1297 /* allocate a picture (needs to do that in main thread to avoid
1298 potential locking problems */
1299 static void alloc_picture(void *opaque)
1300 {
1301 VideoState *is = opaque;
1302 VideoPicture *vp;
1303
1304 vp = &is->pictq[is->pictq_windex];
1305
1306 if (vp->bmp)
1307 SDL_FreeYUVOverlay(vp->bmp);
1308
1309 #if CONFIG_AVFILTER
1310 if (vp->picref)
1311 avfilter_unref_pic(vp->picref);
1312 vp->picref = NULL;
1313
1314 vp->width = is->out_video_filter->inputs[0]->w;
1315 vp->height = is->out_video_filter->inputs[0]->h;
1316 vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1317 #else
1318 vp->width = is->video_st->codec->width;
1319 vp->height = is->video_st->codec->height;
1320 vp->pix_fmt = is->video_st->codec->pix_fmt;
1321 #endif
1322
1323 vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1324 SDL_YV12_OVERLAY,
1325 screen);
1326
1327 SDL_LockMutex(is->pictq_mutex);
1328 vp->allocated = 1;
1329 SDL_CondSignal(is->pictq_cond);
1330 SDL_UnlockMutex(is->pictq_mutex);
1331 }
1332
1333 /**
1334 *
1335 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1336 */
1337 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1338 {
1339 VideoPicture *vp;
1340 int dst_pix_fmt;
1341 #if CONFIG_AVFILTER
1342 AVPicture pict_src;
1343 #endif
1344 /* wait until we have space to put a new picture */
1345 SDL_LockMutex(is->pictq_mutex);
1346
1347 if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1348 is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1349
1350 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1351 !is->videoq.abort_request) {
1352 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1353 }
1354 SDL_UnlockMutex(is->pictq_mutex);
1355
1356 if (is->videoq.abort_request)
1357 return -1;
1358
1359 vp = &is->pictq[is->pictq_windex];
1360
1361 /* alloc or resize hardware picture buffer */
1362 if (!vp->bmp ||
1363 #if CONFIG_AVFILTER
1364 vp->width != is->out_video_filter->inputs[0]->w ||
1365 vp->height != is->out_video_filter->inputs[0]->h) {
1366 #else
1367 vp->width != is->video_st->codec->width ||
1368 vp->height != is->video_st->codec->height) {
1369 #endif
1370 SDL_Event event;
1371
1372 vp->allocated = 0;
1373
1374 /* the allocation must be done in the main thread to avoid
1375 locking problems */
1376 event.type = FF_ALLOC_EVENT;
1377 event.user.data1 = is;
1378 SDL_PushEvent(&event);
1379
1380 /* wait until the picture is allocated */
1381 SDL_LockMutex(is->pictq_mutex);
1382 while (!vp->allocated && !is->videoq.abort_request) {
1383 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1384 }
1385 SDL_UnlockMutex(is->pictq_mutex);
1386
1387 if (is->videoq.abort_request)
1388 return -1;
1389 }
1390
1391 /* if the frame is not skipped, then display it */
1392 if (vp->bmp) {
1393 AVPicture pict;
1394 #if CONFIG_AVFILTER
1395 if(vp->picref)
1396 avfilter_unref_pic(vp->picref);
1397 vp->picref = src_frame->opaque;
1398 #endif
1399
1400 /* get a pointer on the bitmap */
1401 SDL_LockYUVOverlay (vp->bmp);
1402
1403 dst_pix_fmt = PIX_FMT_YUV420P;
1404 memset(&pict,0,sizeof(AVPicture));
1405 pict.data[0] = vp->bmp->pixels[0];
1406 pict.data[1] = vp->bmp->pixels[2];
1407 pict.data[2] = vp->bmp->pixels[1];
1408
1409 pict.linesize[0] = vp->bmp->pitches[0];
1410 pict.linesize[1] = vp->bmp->pitches[2];
1411 pict.linesize[2] = vp->bmp->pitches[1];
1412
1413 #if CONFIG_AVFILTER
1414 pict_src.data[0] = src_frame->data[0];
1415 pict_src.data[1] = src_frame->data[1];
1416 pict_src.data[2] = src_frame->data[2];
1417
1418 pict_src.linesize[0] = src_frame->linesize[0];
1419 pict_src.linesize[1] = src_frame->linesize[1];
1420 pict_src.linesize[2] = src_frame->linesize[2];
1421
1422 //FIXME use direct rendering
1423 av_picture_copy(&pict, &pict_src,
1424 vp->pix_fmt, vp->width, vp->height);
1425 #else
1426 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1427 is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1428 vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1429 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1430 if (is->img_convert_ctx == NULL) {
1431 fprintf(stderr, "Cannot initialize the conversion context\n");
1432 exit(1);
1433 }
1434 sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1435 0, vp->height, pict.data, pict.linesize);
1436 #endif
1437 /* update the bitmap content */
1438 SDL_UnlockYUVOverlay(vp->bmp);
1439
1440 vp->pts = pts;
1441 vp->pos = pos;
1442
1443 /* now we can update the picture count */
1444 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1445 is->pictq_windex = 0;
1446 SDL_LockMutex(is->pictq_mutex);
1447 vp->target_clock= compute_target_time(vp->pts, is);
1448
1449 is->pictq_size++;
1450 SDL_UnlockMutex(is->pictq_mutex);
1451 }
1452 return 0;
1453 }
1454
1455 /**
1456 * compute the exact PTS for the picture if it is omitted in the stream
1457 * @param pts1 the dts of the pkt / pts of the frame
1458 */
1459 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1460 {
1461 double frame_delay, pts;
1462
1463 pts = pts1;
1464
1465 if (pts != 0) {
1466 /* update video clock with pts, if present */
1467 is->video_clock = pts;
1468 } else {
1469 pts = is->video_clock;
1470 }
1471 /* update video clock for next frame */
1472 frame_delay = av_q2d(is->video_st->codec->time_base);
1473 /* for MPEG2, the frame can be repeated, so we update the
1474 clock accordingly */
1475 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1476 is->video_clock += frame_delay;
1477
1478 #if defined(DEBUG_SYNC) && 0
1479 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1480 av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1481 #endif
1482 return queue_picture(is, src_frame, pts, pos);
1483 }
1484
1485 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1486 {
1487 int len1, got_picture, i;
1488
1489 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1490 return -1;
1491
1492 if(pkt->data == flush_pkt.data){
1493 avcodec_flush_buffers(is->video_st->codec);
1494
1495 SDL_LockMutex(is->pictq_mutex);
1496 //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1497 for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1498 is->pictq[i].target_clock= 0;
1499 }
1500 while (is->pictq_size && !is->videoq.abort_request) {
1501 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1502 }
1503 is->video_current_pos= -1;
1504 SDL_UnlockMutex(is->pictq_mutex);
1505
1506 is->last_dts_for_fault_detection=
1507 is->last_pts_for_fault_detection= INT64_MIN;
1508 is->frame_last_pts= AV_NOPTS_VALUE;
1509 is->frame_last_delay = 0;
1510 is->frame_timer = (double)av_gettime() / 1000000.0;
1511 is->skip_frames= 1;
1512 is->skip_frames_index= 0;
1513 return 0;
1514 }
1515
1516 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1517 this packet, if any */
1518 is->video_st->codec->reordered_opaque= pkt->pts;
1519 len1 = avcodec_decode_video2(is->video_st->codec,
1520 frame, &got_picture,
1521 pkt);
1522
1523 if (got_picture) {
1524 if(pkt->dts != AV_NOPTS_VALUE){
1525 is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1526 is->last_dts_for_fault_detection= pkt->dts;
1527 }
1528 if(frame->reordered_opaque != AV_NOPTS_VALUE){
1529 is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1530 is->last_pts_for_fault_detection= frame->reordered_opaque;
1531 }
1532 }
1533
1534 if( ( decoder_reorder_pts==1
1535 || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1536 || pkt->dts == AV_NOPTS_VALUE)
1537 && frame->reordered_opaque != AV_NOPTS_VALUE)
1538 *pts= frame->reordered_opaque;
1539 else if(pkt->dts != AV_NOPTS_VALUE)
1540 *pts= pkt->dts;
1541 else
1542 *pts= 0;
1543
1544 // if (len1 < 0)
1545 // break;
1546 if (got_picture){
1547 is->skip_frames_index += 1;
1548 if(is->skip_frames_index >= is->skip_frames){
1549 is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1550 return 1;
1551 }
1552
1553 }
1554 return 0;
1555 }
1556
1557 #if CONFIG_AVFILTER
1558 typedef struct {
1559 VideoState *is;
1560 AVFrame *frame;
1561 int use_dr1;
1562 } FilterPriv;
1563
1564 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1565 {
1566 AVFilterContext *ctx = codec->opaque;
1567 AVFilterPicRef *ref;
1568 int perms = AV_PERM_WRITE;
1569 int w, h, stride[4];
1570 unsigned edge;
1571
1572 if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1573 if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1574 if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1575 if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1576 }
1577 if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1578
1579 w = codec->width;
1580 h = codec->height;
1581 avcodec_align_dimensions2(codec, &w, &h, stride);
1582 edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1583 w += edge << 1;
1584 h += edge << 1;
1585
1586 if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1587 return -1;
1588
1589 ref->w = codec->width;
1590 ref->h = codec->height;
1591 for(int i = 0; i < 3; i ++) {
1592 unsigned hshift = i == 0 ? 0 : av_pix_fmt_descriptors[ref->pic->format].log2_chroma_w;
1593 unsigned vshift = i == 0 ? 0 : av_pix_fmt_descriptors[ref->pic->format].log2_chroma_h;
1594
1595 ref->data[i] += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1596 pic->data[i] = ref->data[i];
1597 pic->linesize[i] = ref->linesize[i];
1598 }
1599 pic->opaque = ref;
1600 pic->age = INT_MAX;
1601 pic->type = FF_BUFFER_TYPE_USER;
1602 return 0;
1603 }
1604
1605 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1606 {
1607 memset(pic->data, 0, sizeof(pic->data));
1608 avfilter_unref_pic(pic->opaque);
1609 }
1610
1611 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1612 {
1613 FilterPriv *priv = ctx->priv;
1614 AVCodecContext *codec;
1615 if(!opaque) return -1;
1616
1617 priv->is = opaque;
1618 codec = priv->is->video_st->codec;
1619 codec->opaque = ctx;
1620 if(codec->codec->capabilities & CODEC_CAP_DR1) {
1621 priv->use_dr1 = 1;
1622 codec->get_buffer = input_get_buffer;
1623 codec->release_buffer = input_release_buffer;
1624 }
1625
1626 priv->frame = avcodec_alloc_frame();
1627
1628 return 0;
1629 }
1630
1631 static void input_uninit(AVFilterContext *ctx)
1632 {
1633 FilterPriv *priv = ctx->priv;
1634 av_free(priv->frame);
1635 }
1636
1637 static int input_request_frame(AVFilterLink *link)
1638 {
1639 FilterPriv *priv = link->src->priv;
1640 AVFilterPicRef *picref;
1641 int64_t pts = 0;
1642 AVPacket pkt;
1643 int ret;
1644
1645 while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1646 av_free_packet(&pkt);
1647 if (ret < 0)
1648 return -1;
1649
1650 if(priv->use_dr1) {
1651 picref = avfilter_ref_pic(priv->frame->opaque, ~0);
1652 } else {
1653 picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1654 av_picture_copy((AVPicture *)&picref->data, (AVPicture *)priv->frame,
1655 picref->pic->format, link->w, link->h);
1656 }
1657 av_free_packet(&pkt);
1658
1659 picref->pts = pts;
1660 picref->pos = pkt.pos;
1661 picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1662 avfilter_start_frame(link, picref);
1663 avfilter_draw_slice(link, 0, link->h, 1);
1664 avfilter_end_frame(link);
1665
1666 return 0;
1667 }
1668
1669 static int input_query_formats(AVFilterContext *ctx)
1670 {
1671 FilterPriv *priv = ctx->priv;
1672 enum PixelFormat pix_fmts[] = {
1673 priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1674 };
1675
1676 avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1677 return 0;
1678 }
1679
1680 static int input_config_props(AVFilterLink *link)
1681 {
1682 FilterPriv *priv = link->src->priv;
1683 AVCodecContext *c = priv->is->video_st->codec;
1684
1685 link->w = c->width;
1686 link->h = c->height;
1687
1688 return 0;
1689 }
1690
1691 static AVFilter input_filter =
1692 {
1693 .name = "ffplay_input",
1694
1695 .priv_size = sizeof(FilterPriv),
1696
1697 .init = input_init,
1698 .uninit = input_uninit,
1699
1700 .query_formats = input_query_formats,
1701
1702 .inputs = (AVFilterPad[]) {{ .name = NULL }},
1703 .outputs = (AVFilterPad[]) {{ .name = "default",
1704 .type = AVMEDIA_TYPE_VIDEO,
1705 .request_frame = input_request_frame,
1706 .config_props = input_config_props, },
1707 { .name = NULL }},
1708 };
1709
1710 static void output_end_frame(AVFilterLink *link)
1711 {
1712 }
1713
1714 static int output_query_formats(AVFilterContext *ctx)
1715 {
1716 enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1717
1718 avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1719 return 0;
1720 }
1721
1722 static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
1723 int64_t *pts, int64_t *pos)
1724 {
1725 AVFilterPicRef *pic;
1726
1727 if(avfilter_request_frame(ctx->inputs[0]))
1728 return -1;
1729 if(!(pic = ctx->inputs[0]->cur_pic))
1730 return -1;
1731 ctx->inputs[0]->cur_pic = NULL;
1732
1733 frame->opaque = pic;
1734 *pts = pic->pts;
1735 *pos = pic->pos;
1736
1737 memcpy(frame->data, pic->data, sizeof(frame->data));
1738 memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
1739
1740 return 1;
1741 }
1742
1743 static AVFilter output_filter =
1744 {
1745 .name = "ffplay_output",
1746
1747 .query_formats = output_query_formats,
1748
1749 .inputs = (AVFilterPad[]) {{ .name = "default",
1750 .type = AVMEDIA_TYPE_VIDEO,
1751 .end_frame = output_end_frame,
1752 .min_perms = AV_PERM_READ, },
1753 { .name = NULL }},
1754 .outputs = (AVFilterPad[]) {{ .name = NULL }},
1755 };
1756 #endif /* CONFIG_AVFILTER */
1757
1758 static int video_thread(void *arg)
1759 {
1760 VideoState *is = arg;
1761 AVFrame *frame= avcodec_alloc_frame();
1762 int64_t pts_int;
1763 double pts;
1764 int ret;
1765
1766 #if CONFIG_AVFILTER
1767 int64_t pos;
1768 AVFilterContext *filt_src = NULL, *filt_out = NULL;
1769 AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
1770 graph->scale_sws_opts = av_strdup("sws_flags=bilinear");
1771
1772 if(!(filt_src = avfilter_open(&input_filter, "src"))) goto the_end;
1773 if(!(filt_out = avfilter_open(&output_filter, "out"))) goto the_end;
1774
1775 if(avfilter_init_filter(filt_src, NULL, is)) goto the_end;
1776 if(avfilter_init_filter(filt_out, NULL, frame)) goto the_end;
1777
1778
1779 if(vfilters) {
1780 AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1781 AVFilterInOut *inputs = av_malloc(sizeof(AVFilterInOut));
1782
1783 outputs->name = av_strdup("in");
1784 outputs->filter = filt_src;
1785 outputs->pad_idx = 0;
1786 outputs->next = NULL;
1787
1788 inputs->name = av_strdup("out");
1789 inputs->filter = filt_out;
1790 inputs->pad_idx = 0;
1791 inputs->next = NULL;
1792
1793 if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1794 goto the_end;
1795 av_freep(&vfilters);
1796 } else {
1797 if(avfilter_link(filt_src, 0, filt_out, 0) < 0) goto the_end;
1798 }
1799 avfilter_graph_add_filter(graph, filt_src);
1800 avfilter_graph_add_filter(graph, filt_out);
1801
1802 if(avfilter_graph_check_validity(graph, NULL)) goto the_end;
1803 if(avfilter_graph_config_formats(graph, NULL)) goto the_end;
1804 if(avfilter_graph_config_links(graph, NULL)) goto the_end;
1805
1806 is->out_video_filter = filt_out;
1807 #endif
1808
1809 for(;;) {
1810 #if !CONFIG_AVFILTER
1811 AVPacket pkt;
1812 #endif
1813 while (is->paused && !is->videoq.abort_request)
1814 SDL_Delay(10);
1815 #if CONFIG_AVFILTER
1816 ret = get_filtered_video_frame(filt_out, frame, &pts_int, &pos);
1817 #else
1818 ret = get_video_frame(is, frame, &pts_int, &pkt);
1819 #endif
1820
1821 if (ret < 0) goto the_end;
1822
1823 if (!ret)
1824 continue;
1825
1826 pts = pts_int*av_q2d(is->video_st->time_base);
1827
1828 #if CONFIG_AVFILTER
1829 ret = output_picture2(is, frame, pts, pos);
1830 #else
1831 ret = output_picture2(is, frame, pts, pkt.pos);
1832 av_free_packet(&pkt);
1833 #endif
1834 if (ret < 0)
1835 goto the_end;
1836
1837 if (step)
1838 if (cur_stream)
1839 stream_pause(cur_stream);
1840 }
1841 the_end:
1842 #if CONFIG_AVFILTER
1843 avfilter_graph_destroy(graph);
1844 av_freep(&graph);
1845 #endif
1846 av_free(frame);
1847 return 0;
1848 }
1849
1850 static int subtitle_thread(void *arg)
1851 {
1852 VideoState *is = arg;
1853 SubPicture *sp;
1854 AVPacket pkt1, *pkt = &pkt1;
1855 int len1, got_subtitle;
1856 double pts;
1857 int i, j;
1858 int r, g, b, y, u, v, a;
1859
1860 for(;;) {
1861 while (is->paused && !is->subtitleq.abort_request) {
1862 SDL_Delay(10);
1863 }
1864 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1865 break;
1866
1867 if(pkt->data == flush_pkt.data){
1868 avcodec_flush_buffers(is->subtitle_st->codec);
1869 continue;
1870 }
1871 SDL_LockMutex(is->subpq_mutex);
1872 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1873 !is->subtitleq.abort_request) {
1874 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1875 }
1876 SDL_UnlockMutex(is->subpq_mutex);
1877
1878 if (is->subtitleq.abort_request)
1879 goto the_end;
1880
1881 sp = &is->subpq[is->subpq_windex];
1882
1883 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1884 this packet, if any */
1885 pts = 0;
1886 if (pkt->pts != AV_NOPTS_VALUE)
1887 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1888
1889 len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1890 &sp->sub, &got_subtitle,
1891 pkt);
1892 // if (len1 < 0)
1893 // break;
1894 if (got_subtitle && sp->sub.format == 0) {
1895 sp->pts = pts;
1896
1897 for (i = 0; i < sp->sub.num_rects; i++)
1898 {
1899 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1900 {
1901 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1902 y = RGB_TO_Y_CCIR(r, g, b);
1903 u = RGB_TO_U_CCIR(r, g, b, 0);
1904 v = RGB_TO_V_CCIR(r, g, b, 0);
1905 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1906 }
1907 }
1908
1909 /* now we can update the picture count */
1910 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1911 is->subpq_windex = 0;
1912 SDL_LockMutex(is->subpq_mutex);
1913 is->subpq_size++;
1914 SDL_UnlockMutex(is->subpq_mutex);
1915 }
1916 av_free_packet(pkt);
1917 // if (step)
1918 // if (cur_stream)
1919 // stream_pause(cur_stream);
1920 }
1921 the_end:
1922 return 0;
1923 }
1924
1925 /* copy samples for viewing in editor window */
1926 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1927 {
1928 int size, len, channels;
1929
1930 channels = is->audio_st->codec->channels;
1931
1932 size = samples_size / sizeof(short);
1933 while (size > 0) {
1934 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1935 if (len > size)
1936 len = size;
1937 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1938 samples += len;
1939 is->sample_array_index += len;
1940 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1941 is->sample_array_index = 0;
1942 size -= len;
1943 }
1944 }
1945
1946 /* return the new audio buffer size (samples can be added or deleted
1947 to get better sync if video or external master clock) */
1948 static int synchronize_audio(VideoState *is, short *samples,
1949 int samples_size1, double pts)
1950 {
1951 int n, samples_size;
1952 double ref_clock;
1953
1954 n = 2 * is->audio_st->codec->channels;
1955 samples_size = samples_size1;
1956
1957 /* if not master, then we try to remove or add samples to correct the clock */
1958 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1959 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1960 double diff, avg_diff;
1961 int wanted_size, min_size, max_size, nb_samples;
1962
1963 ref_clock = get_master_clock(is);
1964 diff = get_audio_clock(is) - ref_clock;
1965
1966 if (diff < AV_NOSYNC_THRESHOLD) {
1967 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1968 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1969 /* not enough measures to have a correct estimate */
1970 is->audio_diff_avg_count++;
1971 } else {
1972 /* estimate the A-V difference */
1973 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1974
1975 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1976 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1977 nb_samples = samples_size / n;
1978
1979 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1980 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1981 if (wanted_size < min_size)
1982 wanted_size = min_size;
1983 else if (wanted_size > max_size)
1984 wanted_size = max_size;
1985
1986 /* add or remove samples to correction the synchro */
1987 if (wanted_size < samples_size) {
1988 /* remove samples */
1989 samples_size = wanted_size;
1990 } else if (wanted_size > samples_size) {
1991 uint8_t *samples_end, *q;
1992 int nb;
1993
1994 /* add samples */
1995 nb = (samples_size - wanted_size);
1996 samples_end = (uint8_t *)samples + samples_size - n;
1997 q = samples_end + n;
1998 while (nb > 0) {
1999 memcpy(q, samples_end, n);
2000 q += n;
2001 nb -= n;
2002 }
2003 samples_size = wanted_size;
2004 }
2005 }
2006 #if 0
2007 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2008 diff, avg_diff, samples_size - samples_size1,
2009 is->audio_clock, is->video_clock, is->audio_diff_threshold);
2010 #endif
2011 }
2012 } else {
2013 /* too big difference : may be initial PTS errors, so
2014 reset A-V filter */
2015 is->audio_diff_avg_count = 0;
2016 is->audio_diff_cum = 0;
2017 }
2018 }
2019
2020 return samples_size;
2021 }
2022
2023 /* decode one audio frame and returns its uncompressed size */
2024 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2025 {
2026 AVPacket *pkt_temp = &is->audio_pkt_temp;
2027 AVPacket *pkt = &is->audio_pkt;
2028 AVCodecContext *dec= is->audio_st->codec;
2029 int n, len1, data_size;
2030 double pts;
2031
2032 for(;;) {
2033 /* NOTE: the audio packet can contain several frames */
2034 while (pkt_temp->size > 0) {
2035 data_size = sizeof(is->audio_buf1);
2036 len1 = avcodec_decode_audio3(dec,
2037 (int16_t *)is->audio_buf1, &data_size,
2038 pkt_temp);
2039 if (len1 < 0) {
2040 /* if error, we skip the frame */
2041 pkt_temp->size = 0;
2042 break;
2043 }
2044
2045 pkt_temp->data += len1;
2046 pkt_temp->size -= len1;
2047 if (data_size <= 0)
2048 continue;
2049
2050 if (dec->sample_fmt != is->audio_src_fmt) {
2051 if (is->reformat_ctx)
2052 av_audio_convert_free(is->reformat_ctx);
2053 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
2054 dec->sample_fmt, 1, NULL, 0);
2055 if (!is->reformat_ctx) {
2056 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2057 avcodec_get_sample_fmt_name(dec->sample_fmt),
2058 avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
2059 break;
2060 }
2061 is->audio_src_fmt= dec->sample_fmt;
2062 }
2063
2064 if (is->reformat_ctx) {
2065 const void *ibuf[6]= {is->audio_buf1};
2066 void *obuf[6]= {is->audio_buf2};
2067 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
2068 int ostride[6]= {2};
2069 int len= data_size/istride[0];
2070 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2071 printf("av_audio_convert() failed\n");
2072 break;
2073 }
2074 is->audio_buf= is->audio_buf2;
2075 /* FIXME: existing code assume that data_size equals framesize*channels*2
2076 remove this legacy cruft */
2077 data_size= len*2;
2078 }else{
2079 is->audio_buf= is->audio_buf1;
2080 }
2081
2082 /* if no pts, then compute it */
2083 pts = is->audio_clock;
2084 *pts_ptr = pts;
2085 n = 2 * dec->channels;
2086 is->audio_clock += (double)data_size /
2087 (double)(n * dec->sample_rate);
2088 #if defined(DEBUG_SYNC)
2089 {
2090 static double last_clock;
2091 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2092 is->audio_clock - last_clock,
2093 is->audio_clock, pts);
2094 last_clock = is->audio_clock;
2095 }
2096 #endif
2097 return data_size;
2098 }
2099
2100 /* free the current packet */
2101 if (pkt->data)
2102 av_free_packet(pkt);
2103
2104 if (is->paused || is->audioq.abort_request) {
2105 return -1;
2106 }
2107
2108 /* read next packet */
2109 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2110 return -1;
2111 if(pkt->data == flush_pkt.data){
2112 avcodec_flush_buffers(dec);
2113 continue;
2114 }
2115
2116 pkt_temp->data = pkt->data;
2117 pkt_temp->size = pkt->size;
2118
2119 /* if update the audio clock with the pts */
2120 if (pkt->pts != AV_NOPTS_VALUE) {
2121 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2122 }
2123 }
2124 }
2125
2126 /* get the current audio output buffer size, in samples. With SDL, we
2127 cannot have a precise information */
2128 static int audio_write_get_buf_size(VideoState *is)
2129 {
2130 return is->audio_buf_size - is->audio_buf_index;
2131 }
2132
2133
2134 /* prepare a new audio buffer */
2135 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2136 {
2137 VideoState *is = opaque;
2138 int audio_size, len1;
2139 double pts;
2140
2141 audio_callback_time = av_gettime();
2142
2143 while (len > 0) {
2144 if (is->audio_buf_index >= is->audio_buf_size) {
2145 audio_size = audio_decode_frame(is, &pts);
2146 if (audio_size < 0) {
2147 /* if error, just output silence */
2148 is->audio_buf = is->audio_buf1;
2149 is->audio_buf_size = 1024;
2150 memset(is->audio_buf, 0, is->audio_buf_size);
2151 } else {
2152 if (is->show_audio)
2153 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2154 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2155 pts);
2156 is->audio_buf_size = audio_size;
2157 }
2158 is->audio_buf_index = 0;
2159 }
2160 len1 = is->audio_buf_size - is->audio_buf_index;
2161 if (len1 > len)
2162 len1 = len;
2163 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2164 len -= len1;
2165 stream += len1;
2166 is->audio_buf_index += len1;
2167 }
2168 }
2169
2170 /* open a given stream. Return 0 if OK */
2171 static int stream_component_open(VideoState *is, int stream_index)
2172 {
2173 AVFormatContext *ic = is->ic;
2174 AVCodecContext *avctx;
2175 AVCodec *codec;
2176 SDL_AudioSpec wanted_spec, spec;
2177
2178 if (stream_index < 0 || stream_index >= ic->nb_streams)
2179 return -1;
2180 avctx = ic->streams[stream_index]->codec;
2181
2182 /* prepare audio output */
2183 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2184 if (avctx->channels > 0) {
2185 avctx->request_channels = FFMIN(2, avctx->channels);
2186 } else {
2187 avctx->request_channels = 2;
2188 }
2189 }
2190
2191 codec = avcodec_find_decoder(avctx->codec_id);
2192 avctx->debug_mv = debug_mv;
2193 avctx->debug = debug;
2194 avctx->workaround_bugs = workaround_bugs;
2195 avctx->lowres = lowres;
2196 if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2197 avctx->idct_algo= idct;
2198 if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2199 avctx->skip_frame= skip_frame;
2200 avctx->skip_idct= skip_idct;
2201 avctx->skip_loop_filter= skip_loop_filter;
2202 avctx->error_recognition= error_recognition;
2203 avctx->error_concealment= error_concealment;
2204 avcodec_thread_init(avctx, thread_count);
2205
2206 set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
2207
2208 if (!codec ||
2209 avcodec_open(avctx, codec) < 0)
2210 return -1;
2211
2212 /* prepare audio output */
2213 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2214 wanted_spec.freq = avctx->sample_rate;
2215 wanted_spec.format = AUDIO_S16SYS;
2216 wanted_spec.channels = avctx->channels;
2217 wanted_spec.silence = 0;
2218 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2219 wanted_spec.callback = sdl_audio_callback;
2220 wanted_spec.userdata = is;
2221 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2222 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2223 return -1;
2224 }
2225 is->audio_hw_buf_size = spec.size;
2226 is->audio_src_fmt= SAMPLE_FMT_S16;
2227 }
2228
2229 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2230 switch(avctx->codec_type) {
2231 case AVMEDIA_TYPE_AUDIO:
2232 is->audio_stream = stream_index;
2233 is->audio_st = ic->streams[stream_index];
2234 is->audio_buf_size = 0;
2235 is->audio_buf_index = 0;
2236
2237 /* init averaging filter */
2238 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2239 is->audio_diff_avg_count = 0;
2240 /* since we do not have a precise anough audio fifo fullness,
2241 we correct audio sync only if larger than this threshold */
2242 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2243
2244 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2245 packet_queue_init(&is->audioq);
2246 SDL_PauseAudio(0);
2247 break;
2248 case AVMEDIA_TYPE_VIDEO:
2249 is->video_stream = stream_index;
2250 is->video_st = ic->streams[stream_index];
2251
2252 // is->video_current_pts_time = av_gettime();
2253
2254 packet_queue_init(&is->videoq);
2255 is->video_tid = SDL_CreateThread(video_thread, is);
2256 break;
2257 case AVMEDIA_TYPE_SUBTITLE:
2258 is->subtitle_stream = stream_index;
2259 is->subtitle_st = ic->streams[stream_index];
2260 packet_queue_init(&is->subtitleq);
2261
2262 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2263 break;
2264 default:
2265 break;
2266 }
2267 return 0;
2268 }
2269
2270 static void stream_component_close(VideoState *is, int stream_index)
2271 {
2272 AVFormatContext *ic = is->ic;
2273 AVCodecContext *avctx;
2274
2275 if (stream_index < 0 || stream_index >= ic->nb_streams)
2276 return;
2277 avctx = ic->streams[stream_index]->codec;
2278
2279 switch(avctx->codec_type) {
2280 case AVMEDIA_TYPE_AUDIO:
2281 packet_queue_abort(&is->audioq);
2282
2283 SDL_CloseAudio();
2284
2285 packet_queue_end(&is->audioq);
2286 if (is->reformat_ctx)
2287 av_audio_convert_free(is->reformat_ctx);
2288 is->reformat_ctx = NULL;
2289 break;
2290 case AVMEDIA_TYPE_VIDEO:
2291 packet_queue_abort(&is->videoq);
2292
2293 /* note: we also signal this mutex to make sure we deblock the
2294 video thread in all cases */
2295 SDL_LockMutex(is->pictq_mutex);
2296 SDL_CondSignal(is->pictq_cond);
2297 SDL_UnlockMutex(is->pictq_mutex);
2298
2299 SDL_WaitThread(is->video_tid, NULL);
2300
2301 packet_queue_end(&is->videoq);
2302 break;
2303 case AVMEDIA_TYPE_SUBTITLE:
2304 packet_queue_abort(&is->subtitleq);
2305
2306 /* note: we also signal this mutex to make sure we deblock the
2307 video thread in all cases */
2308 SDL_LockMutex(is->subpq_mutex);
2309 is->subtitle_stream_changed = 1;
2310
2311 SDL_CondSignal(is->subpq_cond);
2312 SDL_UnlockMutex(is->subpq_mutex);
2313
2314 SDL_WaitThread(is->subtitle_tid, NULL);
2315
2316 packet_queue_end(&is->subtitleq);
2317 break;
2318 default:
2319 break;
2320 }
2321
2322 ic->streams[stream_index]->discard = AVDISCARD_ALL;
2323 avcodec_close(avctx);
2324 switch(avctx->codec_type) {
2325 case AVMEDIA_TYPE_AUDIO:
2326 is->audio_st = NULL;
2327 is->audio_stream = -1;
2328 break;
2329 case AVMEDIA_TYPE_VIDEO:
2330 is->video_st = NULL;
2331 is->video_stream = -1;
2332 break;
2333 case AVMEDIA_TYPE_SUBTITLE:
2334 is->subtitle_st = NULL;
2335 is->subtitle_stream = -1;
2336 break;
2337 default:
2338 break;
2339 }
2340 }
2341
2342 /* since we have only one decoding thread, we can use a global
2343 variable instead of a thread local variable */
2344 static VideoState *global_video_state;
2345
2346 static int decode_interrupt_cb(void)
2347 {
2348 return (global_video_state && global_video_state->abort_request);
2349 }
2350
2351 /* this thread gets the stream from the disk or the network */
2352 static int decode_thread(void *arg)
2353 {
2354 VideoState *is = arg;
2355 AVFormatContext *ic;
2356 int err, i, ret;
2357 int st_index[AVMEDIA_TYPE_NB];
2358 int st_count[AVMEDIA_TYPE_NB]={0};
2359 int st_best_packet_count[AVMEDIA_TYPE_NB];
2360 AVPacket pkt1, *pkt = &pkt1;
2361 AVFormatParameters params, *ap = &params;
2362 int eof=0;
2363
2364 ic = avformat_alloc_context();
2365
2366 memset(st_index, -1, sizeof(st_index));
2367 memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2368 is->video_stream = -1;
2369 is->audio_stream = -1;
2370 is->subtitle_stream = -1;
2371
2372 global_video_state = is;
2373 url_set_interrupt_cb(decode_interrupt_cb);
2374
2375 memset(ap, 0, sizeof(*ap));
2376
2377 ap->prealloced_context = 1;
2378 ap->width = frame_width;
2379 ap->height= frame_height;
2380 ap->time_base= (AVRational){1, 25};
2381 ap->pix_fmt = frame_pix_fmt;
2382
2383 set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2384
2385 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2386 if (err < 0) {
2387 print_error(is->filename, err);
2388 ret = -1;
2389 goto fail;
2390 }
2391 is->ic = ic;
2392
2393 if(genpts)
2394 ic->flags |= AVFMT_FLAG_GENPTS;
2395
2396 err = av_find_stream_info(ic);
2397 if (err < 0) {
2398 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2399 ret = -1;
2400 goto fail;
2401 }
2402 if(ic->pb)
2403 ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2404
2405 if(seek_by_bytes<0)
2406 seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2407
2408 /* if seeking requested, we execute it */
2409 if (start_time != AV_NOPTS_VALUE) {
2410 int64_t timestamp;
2411
2412 timestamp = start_time;
2413 /* add the stream start time */
2414 if (ic->start_time != AV_NOPTS_VALUE)
2415 timestamp += ic->start_time;
2416 ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2417 if (ret < 0) {
2418 fprintf(stderr, "%s: could not seek to position %0.3f\n",
2419 is->filename, (double)timestamp / AV_TIME_BASE);
2420 }
2421 }
2422
2423 for(i = 0; i < ic->nb_streams; i++) {
2424 AVStream *st= ic->streams[i];
2425 AVCodecContext *avctx = st->codec;
2426 ic->streams[i]->discard = AVDISCARD_ALL;
2427 if(avctx->codec_type >= (unsigned)AVMEDIA_TYPE_NB)
2428 continue;
2429 if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2430 continue;
2431
2432 if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2433 continue;
2434 st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2435
2436 switch(avctx->codec_type) {
2437 case AVMEDIA_TYPE_AUDIO:
2438 if (!audio_disable)
2439 st_index[AVMEDIA_TYPE_AUDIO] = i;
2440 break;
2441 case AVMEDIA_TYPE_VIDEO:
2442 case AVMEDIA_TYPE_SUBTITLE:
2443 if (!video_disable)
2444 st_index[avctx->codec_type] = i;
2445 break;
2446 default:
2447 break;
2448 }
2449 }
2450 if (show_status) {
2451 dump_format(ic, 0, is->filename, 0);
2452 }
2453
2454 /* open the streams */
2455 if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2456 stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2457 }
2458
2459 ret=-1;
2460 if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2461 ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2462 }
2463 is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2464 if(ret<0) {
2465 if (!display_disable)
2466 is->show_audio = 2;
2467 }
2468
2469 if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2470 stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2471 }
2472
2473 if (is->video_stream < 0 && is->audio_stream < 0) {
2474 fprintf(stderr, "%s: could not open codecs\n", is->filename);
2475 ret = -1;
2476 goto fail;
2477 }
2478
2479 for(;;) {
2480 if (is->abort_request)
2481 break;
2482 if (is->paused != is->last_paused) {
2483 is->last_paused = is->paused;
2484 if (is->paused)
2485 is->read_pause_return= av_read_pause(ic);
2486 else
2487 av_read_play(ic);
2488 }
2489 #if CONFIG_RTSP_DEMUXER
2490 if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2491 /* wait 10 ms to avoid trying to get another packet */
2492 /* XXX: horrible */
2493 SDL_Delay(10);
2494 continue;
2495 }
2496 #endif
2497 if (is->seek_req) {
2498 int64_t seek_target= is->seek_pos;
2499 int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2500 int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2501 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2502 // of the seek_pos/seek_rel variables
2503
2504 ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2505 if (ret < 0) {
2506 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2507 }else{
2508 if (is->audio_stream >= 0) {
2509 packet_queue_flush(&is->audioq);
2510 packet_queue_put(&is->audioq, &flush_pkt);
2511 }
2512 if (is->subtitle_stream >= 0) {
2513 packet_queue_flush(&is->subtitleq);
2514 packet_queue_put(&is->subtitleq, &flush_pkt);
2515 }
2516 if (is->video_stream >= 0) {
2517 packet_queue_flush(&is->videoq);
2518 packet_queue_put(&is->videoq, &flush_pkt);
2519 }
2520 }
2521 is->seek_req = 0;
2522 eof= 0;
2523 }
2524
2525 /* if the queue are full, no need to read more */
2526 if ( is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2527 || ( (is->audioq .size > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2528 && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream<0)
2529 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2530 /* wait 10 ms */
2531 SDL_Delay(10);
2532 continue;
2533 }
2534 if(url_feof(ic->pb) || eof) {
2535 if(is->video_stream >= 0){
2536 av_init_packet(pkt);
2537 pkt->data=NULL;
2538 pkt->size=0;
2539 pkt->stream_index= is->video_stream;
2540 packet_queue_put(&is->videoq, pkt);
2541 }
2542 SDL_Delay(10);
2543 if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2544 if(loop!=1 && (!loop || --loop)){
2545 stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2546 }else if(autoexit){
2547 ret=AVERROR_EOF;
2548 goto fail;
2549 }
2550 }
2551 continue;
2552 }
2553 ret = av_read_frame(ic, pkt);
2554 if (ret < 0) {
2555 if (ret == AVERROR_EOF)
2556 eof=1;
2557 if (url_ferror(ic->pb))
2558 break;
2559 SDL_Delay(100); /* wait for user event */
2560 continue;
2561 }
2562 if (pkt->stream_index == is->audio_stream) {
2563 packet_queue_put(&is->audioq, pkt);
2564 } else if (pkt->stream_index == is->video_stream) {
2565 packet_queue_put(&is->videoq, pkt);
2566 } else if (pkt->stream_index == is->subtitle_stream) {
2567 packet_queue_put(&is->subtitleq, pkt);
2568 } else {
2569 av_free_packet(pkt);
2570 }
2571 }
2572 /* wait until the end */
2573 while (!is->abort_request) {
2574 SDL_Delay(100);
2575 }
2576
2577 ret = 0;
2578 fail:
2579 /* disable interrupting */
2580 global_video_state = NULL;
2581
2582 /* close each stream */
2583 if (is->audio_stream >= 0)
2584 stream_component_close(is, is->audio_stream);
2585 if (is->video_stream >= 0)
2586 stream_component_close(is, is->video_stream);
2587 if (is->subtitle_stream >= 0)
2588 stream_component_close(is, is->subtitle_stream);
2589 if (is->ic) {
2590 av_close_input_file(is->ic);
2591 is->ic = NULL; /* safety */
2592 }
2593 url_set_interrupt_cb(NULL);
2594
2595 if (ret != 0) {
2596 SDL_Event event;
2597
2598 event.type = FF_QUIT_EVENT;
2599 event.user.data1 = is;
2600 SDL_PushEvent(&event);
2601 }
2602 return 0;
2603 }
2604
2605 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2606 {
2607 VideoState *is;
2608
2609 is = av_mallocz(sizeof(VideoState));
2610 if (!is)
2611 return NULL;
2612 av_strlcpy(is->filename, filename, sizeof(is->filename));
2613 is->iformat = iformat;
2614 is->ytop = 0;
2615 is->xleft = 0;
2616
2617 /* start video display */
2618 is->pictq_mutex = SDL_CreateMutex();
2619 is->pictq_cond = SDL_CreateCond();
2620
2621 is->subpq_mutex = SDL_CreateMutex();
2622 is->subpq_cond = SDL_CreateCond();
2623
2624 is->av_sync_type = av_sync_type;
2625 is->parse_tid = SDL_CreateThread(decode_thread, is);
2626 if (!is->parse_tid) {
2627 av_free(is);
2628 return NULL;
2629 }
2630 return is;
2631 }
2632
2633 static void stream_close(VideoState *is)
2634 {
2635 VideoPicture *vp;
2636 int i;
2637 /* XXX: use a special url_shutdown call to abort parse cleanly */
2638 is->abort_request = 1;
2639 SDL_WaitThread(is->parse_tid, NULL);
2640 SDL_WaitThread(is->refresh_tid, NULL);
2641
2642 /* free all pictures */
2643 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2644 vp = &is->pictq[i];
2645 #if CONFIG_AVFILTER
2646 if (vp->picref) {
2647 avfilter_unref_pic(vp->picref);
2648 vp->picref = NULL;
2649 }
2650 #endif
2651 if (vp->bmp) {
2652 SDL_FreeYUVOverlay(vp->bmp);
2653 vp->bmp = NULL;
2654 }
2655 }
2656 SDL_DestroyMutex(is->pictq_mutex);
2657 SDL_DestroyCond(is->pictq_cond);
2658 SDL_DestroyMutex(is->subpq_mutex);
2659 SDL_DestroyCond(is->subpq_cond);
2660 #if !CONFIG_AVFILTER
2661 if (is->img_convert_ctx)
2662 sws_freeContext(is->img_convert_ctx);
2663 #endif
2664 av_free(is);
2665 }
2666
2667 static void stream_cycle_channel(VideoState *is, int codec_type)
2668 {
2669 AVFormatContext *ic = is->ic;
2670 int start_index, stream_index;
2671 AVStream *st;
2672
2673 if (codec_type == AVMEDIA_TYPE_VIDEO)
2674 start_index = is->video_stream;
2675 else if (codec_type == AVMEDIA_TYPE_AUDIO)
2676 start_index = is->audio_stream;
2677 else
2678 start_index = is->subtitle_stream;
2679 if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2680 return;
2681 stream_index = start_index;
2682 for(;;) {
2683 if (++stream_index >= is->ic->nb_streams)
2684 {
2685 if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2686 {
2687 stream_index = -1;
2688 goto the_end;
2689 } else
2690 stream_index = 0;
2691 }
2692 if (stream_index == start_index)
2693 return;
2694 st = ic->streams[stream_index];
2695 if (st->codec->codec_type == codec_type) {
2696 /* check that parameters are OK */
2697 switch(codec_type) {
2698 case AVMEDIA_TYPE_AUDIO:
2699 if (st->codec->sample_rate != 0 &&
2700 st->codec->channels != 0)
2701 goto the_end;
2702 break;
2703 case AVMEDIA_TYPE_VIDEO:
2704 case AVMEDIA_TYPE_SUBTITLE:
2705 goto the_end;
2706 default:
2707 break;
2708 }
2709 }
2710 }
2711 the_end:
2712 stream_component_close(is, start_index);
2713 stream_component_open(is, stream_index);
2714 }
2715
2716
2717 static void toggle_full_screen(void)
2718 {
2719 is_full_screen = !is_full_screen;
2720 if (!fs_screen_width) {
2721 /* use default SDL method */
2722 // SDL_WM_ToggleFullScreen(screen);
2723 }
2724 video_open(cur_stream);
2725 }
2726
2727 static void toggle_pause(void)
2728 {
2729 if (cur_stream)
2730 stream_pause(cur_stream);
2731 step = 0;
2732 }
2733
2734 static void step_to_next_frame(void)
2735 {
2736 if (cur_stream) {
2737 /* if the stream is paused unpause it, then step */
2738 if (cur_stream->paused)
2739 stream_pause(cur_stream);
2740 }
2741 step = 1;
2742 }
2743
2744 static void do_exit(void)
2745 {
2746 int i;
2747 if (cur_stream) {
2748 stream_close(cur_stream);
2749 cur_stream = NULL;
2750 }
2751 for (i = 0; i < AVMEDIA_TYPE_NB; i++)
2752 av_free(avcodec_opts[i]);
2753 av_free(avformat_opts);
2754 av_free(sws_opts);
2755 #if CONFIG_AVFILTER
2756 avfilter_uninit();
2757 #endif
2758 if (show_status)
2759 printf("\n");
2760 SDL_Quit();
2761 exit(0);
2762 }
2763
2764 static void toggle_audio_display(void)
2765 {
2766 if (cur_stream) {
2767 int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2768 cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2769 fill_rectangle(screen,
2770 cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2771 bgcolor);
2772 SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2773 }
2774 }
2775
2776 /* handle an event sent by the GUI */
2777 static void event_loop(void)
2778 {
2779 SDL_Event event;
2780 double incr, pos, frac;
2781
2782 for(;;) {
2783 double x;
2784 SDL_WaitEvent(&event);
2785 switch(event.type) {
2786 case SDL_KEYDOWN:
2787 switch(event.key.keysym.sym) {
2788 case SDLK_ESCAPE:
2789 case SDLK_q:
2790 do_exit();
2791 break;
2792 case SDLK_f:
2793 toggle_full_screen();
2794 break;
2795 case SDLK_p:
2796 case SDLK_SPACE:
2797 toggle_pause();
2798 break;
2799 case SDLK_s: //S: Step to next frame
2800 step_to_next_frame();
2801 break;
2802 case SDLK_a:
2803 if (cur_stream)
2804 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2805 break;
2806 case SDLK_v:
2807 if (cur_stream)
2808 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2809 break;
2810 case SDLK_t:
2811 if (cur_stream)
2812 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2813 break;
2814 case SDLK_w:
2815 toggle_audio_display();
2816 break;
2817 case SDLK_LEFT:
2818 incr = -10.0;
2819 goto do_seek;
2820 case SDLK_RIGHT:
2821 incr = 10.0;
2822 goto do_seek;
2823 case SDLK_UP:
2824 incr = 60.0;
2825 goto do_seek;
2826 case SDLK_DOWN:
2827 incr = -60.0;
2828 do_seek:
2829 if (cur_stream) {
2830 if (seek_by_bytes) {
2831 if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2832 pos= cur_stream->video_current_pos;
2833 }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2834 pos= cur_stream->audio_pkt.pos;
2835 }else
2836 pos = url_ftell(cur_stream->ic->pb);
2837 if (cur_stream->ic->bit_rate)
2838 incr *= cur_stream->ic->bit_rate / 8.0;
2839 else
2840 incr *= 180000.0;
2841 pos += incr;
2842 stream_seek(cur_stream, pos, incr, 1);
2843 } else {
2844 pos = get_master_clock(cur_stream);
2845 pos += incr;
2846 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2847 }
2848 }
2849 break;
2850 default:
2851 break;
2852 }
2853 break;
2854 case SDL_MOUSEBUTTONDOWN:
2855 case SDL_MOUSEMOTION:
2856 if(event.type ==SDL_MOUSEBUTTONDOWN){
2857 x= event.button.x;
2858 }else{
2859 if(event.motion.state != SDL_PRESSED)
2860 break;
2861 x= event.motion.x;
2862 }
2863 if (cur_stream) {
2864 if(seek_by_bytes || cur_stream->ic->duration<=0){
2865 uint64_t size= url_fsize(cur_stream->ic->pb);
2866 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2867 }else{
2868 int64_t ts;
2869 int ns, hh, mm, ss;
2870 int tns, thh, tmm, tss;
2871 tns = cur_stream->ic->duration/1000000LL;
2872 thh = tns/3600;
2873 tmm = (tns%3600)/60;
2874 tss = (tns%60);
2875 frac = x/cur_stream->width;
2876 ns = frac*tns;
2877 hh = ns/3600;
2878 mm = (ns%3600)/60;
2879 ss = (ns%60);
2880 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2881 hh, mm, ss, thh, tmm, tss);
2882 ts = frac*cur_stream->ic->duration;
2883 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2884 ts += cur_stream->ic->start_time;
2885 stream_seek(cur_stream, ts, 0, 0);
2886 }
2887 }
2888 break;
2889 case SDL_VIDEORESIZE:
2890 if (cur_stream) {
2891 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2892 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2893 screen_width = cur_stream->width = event.resize.w;
2894 screen_height= cur_stream->height= event.resize.h;
2895 }
2896 break;
2897 case SDL_QUIT:
2898 case FF_QUIT_EVENT:
2899 do_exit();
2900 break;
2901 case FF_ALLOC_EVENT:
2902 video_open(event.user.data1);
2903 alloc_picture(event.user.data1);
2904 break;
2905 case FF_REFRESH_EVENT:
2906 video_refresh_timer(event.user.data1);
2907 cur_stream->refresh=0;
2908 break;
2909 default:
2910 break;
2911 }
2912 }
2913 }
2914
2915 static void opt_frame_size(const char *arg)
2916 {
2917 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2918 fprintf(stderr, "Incorrect frame size\n");
2919 exit(1);
2920 }
2921 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2922 fprintf(stderr, "Frame size must be a multiple of 2\n");
2923 exit(1);
2924 }
2925 }
2926
2927 static int opt_width(const char *opt, const char *arg)
2928 {
2929 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2930 return 0;
2931 }
2932
2933 static int opt_height(const char *opt, const char *arg)
2934 {
2935 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2936 return 0;
2937 }
2938
2939 static void opt_format(const char *arg)
2940 {
2941 file_iformat = av_find_input_format(arg);
2942 if (!file_iformat) {
2943 fprintf(stderr, "Unknown input format: %s\n", arg);
2944 exit(1);
2945 }
2946 }
2947
2948 static void opt_frame_pix_fmt(const char *arg)
2949 {
2950 frame_pix_fmt = av_get_pix_fmt(arg);
2951 }
2952
2953 static int opt_sync(const char *opt, const char *arg)
2954 {
2955 if (!strcmp(arg, "audio"))
2956 av_sync_type = AV_SYNC_AUDIO_MASTER;
2957 else if (!strcmp(arg, "video"))
2958 av_sync_type = AV_SYNC_VIDEO_MASTER;
2959 else if (!strcmp(arg, "ext"))
2960 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2961 else {
2962 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2963 exit(1);
2964 }
2965 return 0;
2966 }
2967
2968 static int opt_seek(const char *opt, const char *arg)
2969 {
2970 start_time = parse_time_or_die(opt, arg, 1);
2971 return 0;
2972 }
2973
2974 static int opt_debug(const char *opt, const char *arg)
2975 {
2976 av_log_set_level(99);
2977 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2978 return 0;
2979 }
2980
2981 static int opt_vismv(const char *opt, const char *arg)
2982 {
2983 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2984 return 0;
2985 }
2986
2987 static int opt_thread_count(const char *opt, const char *arg)
2988 {
2989 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2990 #if !HAVE_THREADS
2991 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2992 #endif
2993 return 0;
2994 }
2995
2996 static const OptionDef options[] = {
2997 #include "cmdutils_common_opts.h"
2998 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2999 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
3000 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
3001 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3002 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3003 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3004 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3005 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3006 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3007 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3008 { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3009 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3010 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3011 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3012 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3013 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3014 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3015 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3016 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3017 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3018 { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3019 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3020 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3021 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3022 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3023 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
3024 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)", "threshold" },
3025 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
3026 { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3027 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3028 { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3029 { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3030 { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3031 { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3032 #if CONFIG_AVFILTER
3033 { "vfilters", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3034 #endif
3035 { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3036 { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3037 { NULL, },
3038 };
3039
3040 static void show_usage(void)
3041 {
3042 printf("Simple media player\n");
3043 printf("usage: ffplay [options] input_file\n");
3044 printf("\n");
3045 }
3046
3047 static void show_help(void)
3048 {
3049 show_usage();
3050 show_help_options(options, "Main options:\n",
3051 OPT_EXPERT, 0);
3052 show_help_options(options, "\nAdvanced options:\n",
3053 OPT_EXPERT, OPT_EXPERT);
3054 printf("\nWhile playing:\n"
3055 "q, ESC quit\n"
3056 "f toggle full screen\n"
3057 "p, SPC pause\n"
3058 "a cycle audio channel\n"
3059 "v cycle video channel\n"
3060 "t cycle subtitle channel\n"
3061 "w show audio waves\n"
3062 "left/right seek backward/forward 10 seconds\n"
3063 "down/up seek backward/forward 1 minute\n"
3064 "mouse click seek to percentage in file corresponding to fraction of width\n"
3065 );
3066 }
3067
3068 static void opt_input_file(const char *filename)
3069 {
3070 if (input_filename) {
3071 fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3072 filename, input_filename);
3073 exit(1);
3074 }
3075 if (!strcmp(filename, "-"))
3076 filename = "pipe:";
3077 input_filename = filename;
3078 }
3079
3080 /* Called from the main */
3081 int main(int argc, char **argv)
3082 {
3083 int flags, i;
3084
3085 /* register all codecs, demux and protocols */
3086 avcodec_register_all();
3087 avdevice_register_all();
3088 #if CONFIG_AVFILTER
3089 avfilter_register_all();
3090 #endif
3091 av_register_all();
3092
3093 for(i=0; i<AVMEDIA_TYPE_NB; i++){
3094 avcodec_opts[i]= avcodec_alloc_context2(i);
3095 }
3096 avformat_opts = avformat_alloc_context();
3097 #if !CONFIG_AVFILTER
3098 sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
3099 #endif
3100
3101 show_banner();
3102
3103 parse_options(argc, argv, options, opt_input_file);
3104
3105 if (!input_filename) {
3106 show_usage();
3107 fprintf(stderr, "An input file must be specified\n");
3108 fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3109 exit(1);
3110 }
3111
3112 if (display_disable) {
3113 video_disable = 1;
3114 }
3115 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3116 #if !defined(__MINGW32__) && !defined(__APPLE__)
3117 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3118 #endif
3119 if (SDL_Init (flags)) {
3120 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3121 exit(1);
3122 }
3123
3124 if (!display_disable) {
3125 #if HAVE_SDL_VIDEO_SIZE
3126 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3127 fs_screen_width = vi->current_w;
3128 fs_screen_height = vi->current_h;
3129 #endif
3130 }
3131
3132 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3133 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3134 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3135
3136 av_init_packet(&flush_pkt);
3137 flush_pkt.data= "FLUSH";
3138
3139 cur_stream = stream_open(input_filename, file_iformat);
3140
3141 event_loop();
3142
3143 /* never returns */
3144
3145 return 0;
3146 }