Simplify some output messages in the v4l2 input device
[libav.git] / ffplay.c
1 /*
2 * FFplay : Simple Media Player based on the FFmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/pixdesc.h"
28 #include "libavformat/avformat.h"
29 #include "libavdevice/avdevice.h"
30 #include "libswscale/swscale.h"
31 #include "libavcodec/audioconvert.h"
32 #include "libavcodec/colorspace.h"
33 #include "libavcodec/opt.h"
34 #include "libavcodec/avfft.h"
35
36 #if CONFIG_AVFILTER
37 # include "libavfilter/avfilter.h"
38 # include "libavfilter/avfiltergraph.h"
39 # include "libavfilter/graphparser.h"
40 #endif
41
42 #include "cmdutils.h"
43
44 #include <SDL.h>
45 #include <SDL_thread.h>
46
47 #ifdef __MINGW32__
48 #undef main /* We don't want SDL to override our main() */
49 #endif
50
51 #include <unistd.h>
52 #include <assert.h>
53
54 const char program_name[] = "FFplay";
55 const int program_birth_year = 2003;
56
57 //#define DEBUG_SYNC
58
59 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
60 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
61 #define MIN_FRAMES 5
62
63 /* SDL audio buffer size, in samples. Should be small to have precise
64 A/V sync as SDL does not have hardware buffer fullness info. */
65 #define SDL_AUDIO_BUFFER_SIZE 1024
66
67 /* no AV sync correction is done if below the AV sync threshold */
68 #define AV_SYNC_THRESHOLD 0.01
69 /* no AV correction is done if too big error */
70 #define AV_NOSYNC_THRESHOLD 10.0
71
72 #define FRAME_SKIP_FACTOR 0.05
73
74 /* maximum audio speed change to get correct sync */
75 #define SAMPLE_CORRECTION_PERCENT_MAX 10
76
77 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
78 #define AUDIO_DIFF_AVG_NB 20
79
80 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
81 #define SAMPLE_ARRAY_SIZE (2*65536)
82
83 #if !CONFIG_AVFILTER
84 static int sws_flags = SWS_BICUBIC;
85 #endif
86
87 typedef struct PacketQueue {
88 AVPacketList *first_pkt, *last_pkt;
89 int nb_packets;
90 int size;
91 int abort_request;
92 SDL_mutex *mutex;
93 SDL_cond *cond;
94 } PacketQueue;
95
96 #define VIDEO_PICTURE_QUEUE_SIZE 2
97 #define SUBPICTURE_QUEUE_SIZE 4
98
99 typedef struct VideoPicture {
100 double pts; ///<presentation time stamp for this picture
101 double target_clock; ///<av_gettime() time at which this should be displayed ideally
102 int64_t pos; ///<byte position in file
103 SDL_Overlay *bmp;
104 int width, height; /* source height & width */
105 int allocated;
106 enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109 AVFilterPicRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114 double pts; /* presentation time stamp for this picture */
115 AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119 AV_SYNC_AUDIO_MASTER, /* default choice */
120 AV_SYNC_VIDEO_MASTER,
121 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125 SDL_Thread *parse_tid;
126 SDL_Thread *video_tid;
127 SDL_Thread *refresh_tid;
128 AVInputFormat *iformat;
129 int no_background;
130 int abort_request;
131 int paused;
132 int last_paused;
133 int seek_req;
134 int seek_flags;
135 int64_t seek_pos;
136 int64_t seek_rel;
137 int read_pause_return;
138 AVFormatContext *ic;
139 int dtg_active_format;
140
141 int audio_stream;
142
143 int av_sync_type;
144 double external_clock; /* external clock base */
145 int64_t external_clock_time;
146
147 double audio_clock;
148 double audio_diff_cum; /* used for AV difference average computation */
149 double audio_diff_avg_coef;
150 double audio_diff_threshold;
151 int audio_diff_avg_count;
152 AVStream *audio_st;
153 PacketQueue audioq;
154 int audio_hw_buf_size;
155 /* samples output by the codec. we reserve more space for avsync
156 compensation */
157 DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158 DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
159 uint8_t *audio_buf;
160 unsigned int audio_buf_size; /* in bytes */
161 int audio_buf_index; /* in bytes */
162 AVPacket audio_pkt_temp;
163 AVPacket audio_pkt;
164 enum SampleFormat audio_src_fmt;
165 AVAudioConvert *reformat_ctx;
166
167 int show_audio; /* if true, display audio samples */
168 int16_t sample_array[SAMPLE_ARRAY_SIZE];
169 int sample_array_index;
170 int last_i_start;
171 RDFTContext *rdft;
172 int rdft_bits;
173 int xpos;
174
175 SDL_Thread *subtitle_tid;
176 int subtitle_stream;
177 int subtitle_stream_changed;
178 AVStream *subtitle_st;
179 PacketQueue subtitleq;
180 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
181 int subpq_size, subpq_rindex, subpq_windex;
182 SDL_mutex *subpq_mutex;
183 SDL_cond *subpq_cond;
184
185 double frame_timer;
186 double frame_last_pts;
187 double frame_last_delay;
188 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
189 int video_stream;
190 AVStream *video_st;
191 PacketQueue videoq;
192 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
193 double video_current_pts_drift; ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
194 int64_t video_current_pos; ///<current displayed file pos
195 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
196 int pictq_size, pictq_rindex, pictq_windex;
197 SDL_mutex *pictq_mutex;
198 SDL_cond *pictq_cond;
199 #if !CONFIG_AVFILTER
200 struct SwsContext *img_convert_ctx;
201 #endif
202
203 // QETimer *video_timer;
204 char filename[1024];
205 int width, height, xleft, ytop;
206
207 int64_t faulty_pts;
208 int64_t faulty_dts;
209 int64_t last_dts_for_fault_detection;
210 int64_t last_pts_for_fault_detection;
211
212 #if CONFIG_AVFILTER
213 AVFilterContext *out_video_filter; ///<the last filter in the video chain
214 #endif
215
216 float skip_frames;
217 float skip_frames_index;
218 int refresh;
219 } VideoState;
220
221 static void show_help(void);
222 static int audio_write_get_buf_size(VideoState *is);
223
224 /* options specified by the user */
225 static AVInputFormat *file_iformat;
226 static const char *input_filename;
227 static const char *window_title;
228 static int fs_screen_width;
229 static int fs_screen_height;
230 static int screen_width = 0;
231 static int screen_height = 0;
232 static int frame_width = 0;
233 static int frame_height = 0;
234 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
235 static int audio_disable;
236 static int video_disable;
237 static int wanted_stream[AVMEDIA_TYPE_NB]={
238 [AVMEDIA_TYPE_AUDIO]=-1,
239 [AVMEDIA_TYPE_VIDEO]=-1,
240 [AVMEDIA_TYPE_SUBTITLE]=-1,
241 };
242 static int seek_by_bytes=-1;
243 static int display_disable;
244 static int show_status = 1;
245 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246 static int64_t start_time = AV_NOPTS_VALUE;
247 static int64_t duration = AV_NOPTS_VALUE;
248 static int debug = 0;
249 static int debug_mv = 0;
250 static int step = 0;
251 static int thread_count = 1;
252 static int workaround_bugs = 1;
253 static int fast = 0;
254 static int genpts = 0;
255 static int lowres = 0;
256 static int idct = FF_IDCT_AUTO;
257 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
258 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
259 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
260 static int error_recognition = FF_ER_CAREFUL;
261 static int error_concealment = 3;
262 static int decoder_reorder_pts= -1;
263 static int autoexit;
264 static int loop=1;
265 static int framedrop=1;
266
267 static int rdftspeed=20;
268 #if CONFIG_AVFILTER
269 static char *vfilters = NULL;
270 #endif
271
272 /* current context */
273 static int is_full_screen;
274 static VideoState *cur_stream;
275 static int64_t audio_callback_time;
276
277 static AVPacket flush_pkt;
278
279 #define FF_ALLOC_EVENT (SDL_USEREVENT)
280 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
281 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
282
283 static SDL_Surface *screen;
284
285 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
286
287 /* packet queue handling */
288 static void packet_queue_init(PacketQueue *q)
289 {
290 memset(q, 0, sizeof(PacketQueue));
291 q->mutex = SDL_CreateMutex();
292 q->cond = SDL_CreateCond();
293 packet_queue_put(q, &flush_pkt);
294 }
295
296 static void packet_queue_flush(PacketQueue *q)
297 {
298 AVPacketList *pkt, *pkt1;
299
300 SDL_LockMutex(q->mutex);
301 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
302 pkt1 = pkt->next;
303 av_free_packet(&pkt->pkt);
304 av_freep(&pkt);
305 }
306 q->last_pkt = NULL;
307 q->first_pkt = NULL;
308 q->nb_packets = 0;
309 q->size = 0;
310 SDL_UnlockMutex(q->mutex);
311 }
312
313 static void packet_queue_end(PacketQueue *q)
314 {
315 packet_queue_flush(q);
316 SDL_DestroyMutex(q->mutex);
317 SDL_DestroyCond(q->cond);
318 }
319
320 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
321 {
322 AVPacketList *pkt1;
323
324 /* duplicate the packet */
325 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
326 return -1;
327
328 pkt1 = av_malloc(sizeof(AVPacketList));
329 if (!pkt1)
330 return -1;
331 pkt1->pkt = *pkt;
332 pkt1->next = NULL;
333
334
335 SDL_LockMutex(q->mutex);
336
337 if (!q->last_pkt)
338
339 q->first_pkt = pkt1;
340 else
341 q->last_pkt->next = pkt1;
342 q->last_pkt = pkt1;
343 q->nb_packets++;
344 q->size += pkt1->pkt.size + sizeof(*pkt1);
345 /* XXX: should duplicate packet data in DV case */
346 SDL_CondSignal(q->cond);
347
348 SDL_UnlockMutex(q->mutex);
349 return 0;
350 }
351
352 static void packet_queue_abort(PacketQueue *q)
353 {
354 SDL_LockMutex(q->mutex);
355
356 q->abort_request = 1;
357
358 SDL_CondSignal(q->cond);
359
360 SDL_UnlockMutex(q->mutex);
361 }
362
363 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
364 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
365 {
366 AVPacketList *pkt1;
367 int ret;
368
369 SDL_LockMutex(q->mutex);
370
371 for(;;) {
372 if (q->abort_request) {
373 ret = -1;
374 break;
375 }
376
377 pkt1 = q->first_pkt;
378 if (pkt1) {
379 q->first_pkt = pkt1->next;
380 if (!q->first_pkt)
381 q->last_pkt = NULL;
382 q->nb_packets--;
383 q->size -= pkt1->pkt.size + sizeof(*pkt1);
384 *pkt = pkt1->pkt;
385 av_free(pkt1);
386 ret = 1;
387 break;
388 } else if (!block) {
389 ret = 0;
390 break;
391 } else {
392 SDL_CondWait(q->cond, q->mutex);
393 }
394 }
395 SDL_UnlockMutex(q->mutex);
396 return ret;
397 }
398
399 static inline void fill_rectangle(SDL_Surface *screen,
400 int x, int y, int w, int h, int color)
401 {
402 SDL_Rect rect;
403 rect.x = x;
404 rect.y = y;
405 rect.w = w;
406 rect.h = h;
407 SDL_FillRect(screen, &rect, color);
408 }
409
410 #if 0
411 /* draw only the border of a rectangle */
412 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
413 {
414 int w1, w2, h1, h2;
415
416 /* fill the background */
417 w1 = x;
418 if (w1 < 0)
419 w1 = 0;
420 w2 = s->width - (x + w);
421 if (w2 < 0)
422 w2 = 0;
423 h1 = y;
424 if (h1 < 0)
425 h1 = 0;
426 h2 = s->height - (y + h);
427 if (h2 < 0)
428 h2 = 0;
429 fill_rectangle(screen,
430 s->xleft, s->ytop,
431 w1, s->height,
432 color);
433 fill_rectangle(screen,
434 s->xleft + s->width - w2, s->ytop,
435 w2, s->height,
436 color);
437 fill_rectangle(screen,
438 s->xleft + w1, s->ytop,
439 s->width - w1 - w2, h1,
440 color);
441 fill_rectangle(screen,
442 s->xleft + w1, s->ytop + s->height - h2,
443 s->width - w1 - w2, h2,
444 color);
445 }
446 #endif
447
448 #define ALPHA_BLEND(a, oldp, newp, s)\
449 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
450
451 #define RGBA_IN(r, g, b, a, s)\
452 {\
453 unsigned int v = ((const uint32_t *)(s))[0];\
454 a = (v >> 24) & 0xff;\
455 r = (v >> 16) & 0xff;\
456 g = (v >> 8) & 0xff;\
457 b = v & 0xff;\
458 }
459
460 #define YUVA_IN(y, u, v, a, s, pal)\
461 {\
462 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
463 a = (val >> 24) & 0xff;\
464 y = (val >> 16) & 0xff;\
465 u = (val >> 8) & 0xff;\
466 v = val & 0xff;\
467 }
468
469 #define YUVA_OUT(d, y, u, v, a)\
470 {\
471 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
472 }
473
474
475 #define BPP 1
476
477 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
478 {
479 int wrap, wrap3, width2, skip2;
480 int y, u, v, a, u1, v1, a1, w, h;
481 uint8_t *lum, *cb, *cr;
482 const uint8_t *p;
483 const uint32_t *pal;
484 int dstx, dsty, dstw, dsth;
485
486 dstw = av_clip(rect->w, 0, imgw);
487 dsth = av_clip(rect->h, 0, imgh);
488 dstx = av_clip(rect->x, 0, imgw - dstw);
489 dsty = av_clip(rect->y, 0, imgh - dsth);
490 lum = dst->data[0] + dsty * dst->linesize[0];
491 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
492 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
493
494 width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
495 skip2 = dstx >> 1;
496 wrap = dst->linesize[0];
497 wrap3 = rect->pict.linesize[0];
498 p = rect->pict.data[0];
499 pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
500
501 if (dsty & 1) {
502 lum += dstx;
503 cb += skip2;
504 cr += skip2;
505
506 if (dstx & 1) {
507 YUVA_IN(y, u, v, a, p, pal);
508 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
509 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
510 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
511 cb++;
512 cr++;
513 lum++;
514 p += BPP;
515 }
516 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
517 YUVA_IN(y, u, v, a, p, pal);
518 u1 = u;
519 v1 = v;
520 a1 = a;
521 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
522
523 YUVA_IN(y, u, v, a, p + BPP, pal);
524 u1 += u;
525 v1 += v;
526 a1 += a;
527 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
528 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
529 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
530 cb++;
531 cr++;
532 p += 2 * BPP;
533 lum += 2;
534 }
535 if (w) {
536 YUVA_IN(y, u, v, a, p, pal);
537 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
538 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
539 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
540 p++;
541 lum++;
542 }
543 p += wrap3 - dstw * BPP;
544 lum += wrap - dstw - dstx;
545 cb += dst->linesize[1] - width2 - skip2;
546 cr += dst->linesize[2] - width2 - skip2;
547 }
548 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
549 lum += dstx;
550 cb += skip2;
551 cr += skip2;
552
553 if (dstx & 1) {
554 YUVA_IN(y, u, v, a, p, pal);
555 u1 = u;
556 v1 = v;
557 a1 = a;
558 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
559 p += wrap3;
560 lum += wrap;
561 YUVA_IN(y, u, v, a, p, pal);
562 u1 += u;
563 v1 += v;
564 a1 += a;
565 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
566 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
567 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
568 cb++;
569 cr++;
570 p += -wrap3 + BPP;
571 lum += -wrap + 1;
572 }
573 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
574 YUVA_IN(y, u, v, a, p, pal);
575 u1 = u;
576 v1 = v;
577 a1 = a;
578 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
579
580 YUVA_IN(y, u, v, a, p + BPP, pal);
581 u1 += u;
582 v1 += v;
583 a1 += a;
584 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
585 p += wrap3;
586 lum += wrap;
587
588 YUVA_IN(y, u, v, a, p, pal);
589 u1 += u;
590 v1 += v;
591 a1 += a;
592 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
593
594 YUVA_IN(y, u, v, a, p + BPP, pal);
595 u1 += u;
596 v1 += v;
597 a1 += a;
598 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
599
600 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
601 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
602
603 cb++;
604 cr++;
605 p += -wrap3 + 2 * BPP;
606 lum += -wrap + 2;
607 }
608 if (w) {
609 YUVA_IN(y, u, v, a, p, pal);
610 u1 = u;
611 v1 = v;
612 a1 = a;
613 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
614 p += wrap3;
615 lum += wrap;
616 YUVA_IN(y, u, v, a, p, pal);
617 u1 += u;
618 v1 += v;
619 a1 += a;
620 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
621 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
622 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
623 cb++;
624 cr++;
625 p += -wrap3 + BPP;
626 lum += -wrap + 1;
627 }
628 p += wrap3 + (wrap3 - dstw * BPP);
629 lum += wrap + (wrap - dstw - dstx);
630 cb += dst->linesize[1] - width2 - skip2;
631 cr += dst->linesize[2] - width2 - skip2;
632 }
633 /* handle odd height */
634 if (h) {
635 lum += dstx;
636 cb += skip2;
637 cr += skip2;
638
639 if (dstx & 1) {
640 YUVA_IN(y, u, v, a, p, pal);
641 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
642 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
643 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
644 cb++;
645 cr++;
646 lum++;
647 p += BPP;
648 }
649 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
650 YUVA_IN(y, u, v, a, p, pal);
651 u1 = u;
652 v1 = v;
653 a1 = a;
654 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
655
656 YUVA_IN(y, u, v, a, p + BPP, pal);
657 u1 += u;
658 v1 += v;
659 a1 += a;
660 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
661 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
662 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
663 cb++;
664 cr++;
665 p += 2 * BPP;
666 lum += 2;
667 }
668 if (w) {
669 YUVA_IN(y, u, v, a, p, pal);
670 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
671 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
672 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
673 }
674 }
675 }
676
677 static void free_subpicture(SubPicture *sp)
678 {
679 int i;
680
681 for (i = 0; i < sp->sub.num_rects; i++)
682 {
683 av_freep(&sp->sub.rects[i]->pict.data[0]);
684 av_freep(&sp->sub.rects[i]->pict.data[1]);
685 av_freep(&sp->sub.rects[i]);
686 }
687
688 av_free(sp->sub.rects);
689
690 memset(&sp->sub, 0, sizeof(AVSubtitle));
691 }
692
693 static void video_image_display(VideoState *is)
694 {
695 VideoPicture *vp;
696 SubPicture *sp;
697 AVPicture pict;
698 float aspect_ratio;
699 int width, height, x, y;
700 SDL_Rect rect;
701 int i;
702
703 vp = &is->pictq[is->pictq_rindex];
704 if (vp->bmp) {
705 #if CONFIG_AVFILTER
706 if (vp->picref->pixel_aspect.num == 0)
707 aspect_ratio = 0;
708 else
709 aspect_ratio = av_q2d(vp->picref->pixel_aspect);
710 #else
711
712 /* XXX: use variable in the frame */
713 if (is->video_st->sample_aspect_ratio.num)
714 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
715 else if (is->video_st->codec->sample_aspect_ratio.num)
716 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
717 else
718 aspect_ratio = 0;
719 #endif
720 if (aspect_ratio <= 0.0)
721 aspect_ratio = 1.0;
722 aspect_ratio *= (float)vp->width / (float)vp->height;
723 /* if an active format is indicated, then it overrides the
724 mpeg format */
725 #if 0
726 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
727 is->dtg_active_format = is->video_st->codec->dtg_active_format;
728 printf("dtg_active_format=%d\n", is->dtg_active_format);
729 }
730 #endif
731 #if 0
732 switch(is->video_st->codec->dtg_active_format) {
733 case FF_DTG_AFD_SAME:
734 default:
735 /* nothing to do */
736 break;
737 case FF_DTG_AFD_4_3:
738 aspect_ratio = 4.0 / 3.0;
739 break;
740 case FF_DTG_AFD_16_9:
741 aspect_ratio = 16.0 / 9.0;
742 break;
743 case FF_DTG_AFD_14_9:
744 aspect_ratio = 14.0 / 9.0;
745 break;
746 case FF_DTG_AFD_4_3_SP_14_9:
747 aspect_ratio = 14.0 / 9.0;
748 break;
749 case FF_DTG_AFD_16_9_SP_14_9:
750 aspect_ratio = 14.0 / 9.0;
751 break;
752 case FF_DTG_AFD_SP_4_3:
753 aspect_ratio = 4.0 / 3.0;
754 break;
755 }
756 #endif
757
758 if (is->subtitle_st)
759 {
760 if (is->subpq_size > 0)
761 {
762 sp = &is->subpq[is->subpq_rindex];
763
764 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
765 {
766 SDL_LockYUVOverlay (vp->bmp);
767
768 pict.data[0] = vp->bmp->pixels[0];
769 pict.data[1] = vp->bmp->pixels[2];
770 pict.data[2] = vp->bmp->pixels[1];
771
772 pict.linesize[0] = vp->bmp->pitches[0];
773 pict.linesize[1] = vp->bmp->pitches[2];
774 pict.linesize[2] = vp->bmp->pitches[1];
775
776 for (i = 0; i < sp->sub.num_rects; i++)
777 blend_subrect(&pict, sp->sub.rects[i],
778 vp->bmp->w, vp->bmp->h);
779
780 SDL_UnlockYUVOverlay (vp->bmp);
781 }
782 }
783 }
784
785
786 /* XXX: we suppose the screen has a 1.0 pixel ratio */
787 height = is->height;
788 width = ((int)rint(height * aspect_ratio)) & ~1;
789 if (width > is->width) {
790 width = is->width;
791 height = ((int)rint(width / aspect_ratio)) & ~1;
792 }
793 x = (is->width - width) / 2;
794 y = (is->height - height) / 2;
795 if (!is->no_background) {
796 /* fill the background */
797 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
798 } else {
799 is->no_background = 0;
800 }
801 rect.x = is->xleft + x;
802 rect.y = is->ytop + y;
803 rect.w = width;
804 rect.h = height;
805 SDL_DisplayYUVOverlay(vp->bmp, &rect);
806 } else {
807 #if 0
808 fill_rectangle(screen,
809 is->xleft, is->ytop, is->width, is->height,
810 QERGB(0x00, 0x00, 0x00));
811 #endif
812 }
813 }
814
815 static inline int compute_mod(int a, int b)
816 {
817 a = a % b;
818 if (a >= 0)
819 return a;
820 else
821 return a + b;
822 }
823
824 static void video_audio_display(VideoState *s)
825 {
826 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
827 int ch, channels, h, h2, bgcolor, fgcolor;
828 int16_t time_diff;
829 int rdft_bits, nb_freq;
830
831 for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
832 ;
833 nb_freq= 1<<(rdft_bits-1);
834
835 /* compute display index : center on currently output samples */
836 channels = s->audio_st->codec->channels;
837 nb_display_channels = channels;
838 if (!s->paused) {
839 int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
840 n = 2 * channels;
841 delay = audio_write_get_buf_size(s);
842 delay /= n;
843
844 /* to be more precise, we take into account the time spent since
845 the last buffer computation */
846 if (audio_callback_time) {
847 time_diff = av_gettime() - audio_callback_time;
848 delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
849 }
850
851 delay += 2*data_used;
852 if (delay < data_used)
853 delay = data_used;
854
855 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
856 if(s->show_audio==1){
857 h= INT_MIN;
858 for(i=0; i<1000; i+=channels){
859 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
860 int a= s->sample_array[idx];
861 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
862 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
863 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
864 int score= a-d;
865 if(h<score && (b^c)<0){
866 h= score;
867 i_start= idx;
868 }
869 }
870 }
871
872 s->last_i_start = i_start;
873 } else {
874 i_start = s->last_i_start;
875 }
876
877 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
878 if(s->show_audio==1){
879 fill_rectangle(screen,
880 s->xleft, s->ytop, s->width, s->height,
881 bgcolor);
882
883 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
884
885 /* total height for one channel */
886 h = s->height / nb_display_channels;
887 /* graph height / 2 */
888 h2 = (h * 9) / 20;
889 for(ch = 0;ch < nb_display_channels; ch++) {
890 i = i_start + ch;
891 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
892 for(x = 0; x < s->width; x++) {
893 y = (s->sample_array[i] * h2) >> 15;
894 if (y < 0) {
895 y = -y;
896 ys = y1 - y;
897 } else {
898 ys = y1;
899 }
900 fill_rectangle(screen,
901 s->xleft + x, ys, 1, y,
902 fgcolor);
903 i += channels;
904 if (i >= SAMPLE_ARRAY_SIZE)
905 i -= SAMPLE_ARRAY_SIZE;
906 }
907 }
908
909 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
910
911 for(ch = 1;ch < nb_display_channels; ch++) {
912 y = s->ytop + ch * h;
913 fill_rectangle(screen,
914 s->xleft, y, s->width, 1,
915 fgcolor);
916 }
917 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
918 }else{
919 nb_display_channels= FFMIN(nb_display_channels, 2);
920 if(rdft_bits != s->rdft_bits){
921 av_rdft_end(s->rdft);
922 s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
923 s->rdft_bits= rdft_bits;
924 }
925 {
926 FFTSample data[2][2*nb_freq];
927 for(ch = 0;ch < nb_display_channels; ch++) {
928 i = i_start + ch;
929 for(x = 0; x < 2*nb_freq; x++) {
930 double w= (x-nb_freq)*(1.0/nb_freq);
931 data[ch][x]= s->sample_array[i]*(1.0-w*w);
932 i += channels;
933 if (i >= SAMPLE_ARRAY_SIZE)
934 i -= SAMPLE_ARRAY_SIZE;
935 }
936 av_rdft_calc(s->rdft, data[ch]);
937 }
938 //least efficient way to do this, we should of course directly access it but its more than fast enough
939 for(y=0; y<s->height; y++){
940 double w= 1/sqrt(nb_freq);
941 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
942 int b= sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0] + data[1][2*y+1]*data[1][2*y+1]));
943 a= FFMIN(a,255);
944 b= FFMIN(b,255);
945 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
946
947 fill_rectangle(screen,
948 s->xpos, s->height-y, 1, 1,
949 fgcolor);
950 }
951 }
952 SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
953 s->xpos++;
954 if(s->xpos >= s->width)
955 s->xpos= s->xleft;
956 }
957 }
958
959 static int video_open(VideoState *is){
960 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
961 int w,h;
962
963 if(is_full_screen) flags |= SDL_FULLSCREEN;
964 else flags |= SDL_RESIZABLE;
965
966 if (is_full_screen && fs_screen_width) {
967 w = fs_screen_width;
968 h = fs_screen_height;
969 } else if(!is_full_screen && screen_width){
970 w = screen_width;
971 h = screen_height;
972 #if CONFIG_AVFILTER
973 }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
974 w = is->out_video_filter->inputs[0]->w;
975 h = is->out_video_filter->inputs[0]->h;
976 #else
977 }else if (is->video_st && is->video_st->codec->width){
978 w = is->video_st->codec->width;
979 h = is->video_st->codec->height;
980 #endif
981 } else {
982 w = 640;
983 h = 480;
984 }
985 if(screen && is->width == screen->w && screen->w == w
986 && is->height== screen->h && screen->h == h)
987 return 0;
988
989 #ifndef __APPLE__
990 screen = SDL_SetVideoMode(w, h, 0, flags);
991 #else
992 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
993 screen = SDL_SetVideoMode(w, h, 24, flags);
994 #endif
995 if (!screen) {
996 fprintf(stderr, "SDL: could not set video mode - exiting\n");
997 return -1;
998 }
999 if (!window_title)
1000 window_title = input_filename;
1001 SDL_WM_SetCaption(window_title, window_title);
1002
1003 is->width = screen->w;
1004 is->height = screen->h;
1005
1006 return 0;
1007 }
1008
1009 /* display the current picture, if any */
1010 static void video_display(VideoState *is)
1011 {
1012 if(!screen)
1013 video_open(cur_stream);
1014 if (is->audio_st && is->show_audio)
1015 video_audio_display(is);
1016 else if (is->video_st)
1017 video_image_display(is);
1018 }
1019
1020 static int refresh_thread(void *opaque)
1021 {
1022 VideoState *is= opaque;
1023 while(!is->abort_request){
1024 SDL_Event event;
1025 event.type = FF_REFRESH_EVENT;
1026 event.user.data1 = opaque;
1027 if(!is->refresh){
1028 is->refresh=1;
1029 SDL_PushEvent(&event);
1030 }
1031 usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1032 }
1033 return 0;
1034 }
1035
1036 /* get the current audio clock value */
1037 static double get_audio_clock(VideoState *is)
1038 {
1039 double pts;
1040 int hw_buf_size, bytes_per_sec;
1041 pts = is->audio_clock;
1042 hw_buf_size = audio_write_get_buf_size(is);
1043 bytes_per_sec = 0;
1044 if (is->audio_st) {
1045 bytes_per_sec = is->audio_st->codec->sample_rate *
1046 2 * is->audio_st->codec->channels;
1047 }
1048 if (bytes_per_sec)
1049 pts -= (double)hw_buf_size / bytes_per_sec;
1050 return pts;
1051 }
1052
1053 /* get the current video clock value */
1054 static double get_video_clock(VideoState *is)
1055 {
1056 if (is->paused) {
1057 return is->video_current_pts;
1058 } else {
1059 return is->video_current_pts_drift + av_gettime() / 1000000.0;
1060 }
1061 }
1062
1063 /* get the current external clock value */
1064 static double get_external_clock(VideoState *is)
1065 {
1066 int64_t ti;
1067 ti = av_gettime();
1068 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1069 }
1070
1071 /* get the current master clock value */
1072 static double get_master_clock(VideoState *is)
1073 {
1074 double val;
1075
1076 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1077 if (is->video_st)
1078 val = get_video_clock(is);
1079 else
1080 val = get_audio_clock(is);
1081 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1082 if (is->audio_st)
1083 val = get_audio_clock(is);
1084 else
1085 val = get_video_clock(is);
1086 } else {
1087 val = get_external_clock(is);
1088 }
1089 return val;
1090 }
1091
1092 /* seek in the stream */
1093 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1094 {
1095 if (!is->seek_req) {
1096 is->seek_pos = pos;
1097 is->seek_rel = rel;
1098 is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1099 if (seek_by_bytes)
1100 is->seek_flags |= AVSEEK_FLAG_BYTE;
1101 is->seek_req = 1;
1102 }
1103 }
1104
1105 /* pause or resume the video */
1106 static void stream_pause(VideoState *is)
1107 {
1108 if (is->paused) {
1109 is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1110 if(is->read_pause_return != AVERROR(ENOSYS)){
1111 is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1112 }
1113 is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1114 }
1115 is->paused = !is->paused;
1116 }
1117
1118 static double compute_target_time(double frame_current_pts, VideoState *is)
1119 {
1120 double delay, sync_threshold, diff;
1121
1122 /* compute nominal delay */
1123 delay = frame_current_pts - is->frame_last_pts;
1124 if (delay <= 0 || delay >= 10.0) {
1125 /* if incorrect delay, use previous one */
1126 delay = is->frame_last_delay;
1127 } else {
1128 is->frame_last_delay = delay;
1129 }
1130 is->frame_last_pts = frame_current_pts;
1131
1132 /* update delay to follow master synchronisation source */
1133 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1134 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1135 /* if video is slave, we try to correct big delays by
1136 duplicating or deleting a frame */
1137 diff = get_video_clock(is) - get_master_clock(is);
1138
1139 /* skip or repeat frame. We take into account the
1140 delay to compute the threshold. I still don't know
1141 if it is the best guess */
1142 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1143 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1144 if (diff <= -sync_threshold)
1145 delay = 0;
1146 else if (diff >= sync_threshold)
1147 delay = 2 * delay;
1148 }
1149 }
1150 is->frame_timer += delay;
1151 #if defined(DEBUG_SYNC)
1152 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1153 delay, actual_delay, frame_current_pts, -diff);
1154 #endif
1155
1156 return is->frame_timer;
1157 }
1158
1159 /* called to display each frame */
1160 static void video_refresh_timer(void *opaque)
1161 {
1162 VideoState *is = opaque;
1163 VideoPicture *vp;
1164
1165 SubPicture *sp, *sp2;
1166
1167 if (is->video_st) {
1168 retry:
1169 if (is->pictq_size == 0) {
1170 //nothing to do, no picture to display in the que
1171 } else {
1172 double time= av_gettime()/1000000.0;
1173 double next_target;
1174 /* dequeue the picture */
1175 vp = &is->pictq[is->pictq_rindex];
1176
1177 if(time < vp->target_clock)
1178 return;
1179 /* update current video pts */
1180 is->video_current_pts = vp->pts;
1181 is->video_current_pts_drift = is->video_current_pts - time;
1182 is->video_current_pos = vp->pos;
1183 if(is->pictq_size > 1){
1184 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1185 assert(nextvp->target_clock >= vp->target_clock);
1186 next_target= nextvp->target_clock;
1187 }else{
1188 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1189 }
1190 if(framedrop && time > next_target){
1191 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1192 if(is->pictq_size > 1 || time > next_target + 0.5){
1193 /* update queue size and signal for next picture */
1194 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1195 is->pictq_rindex = 0;
1196
1197 SDL_LockMutex(is->pictq_mutex);
1198 is->pictq_size--;
1199 SDL_CondSignal(is->pictq_cond);
1200 SDL_UnlockMutex(is->pictq_mutex);
1201 goto retry;
1202 }
1203 }
1204
1205 if(is->subtitle_st) {
1206 if (is->subtitle_stream_changed) {
1207 SDL_LockMutex(is->subpq_mutex);
1208
1209 while (is->subpq_size) {
1210 free_subpicture(&is->subpq[is->subpq_rindex]);
1211
1212 /* update queue size and signal for next picture */
1213 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1214 is->subpq_rindex = 0;
1215
1216 is->subpq_size--;
1217 }
1218 is->subtitle_stream_changed = 0;
1219
1220 SDL_CondSignal(is->subpq_cond);
1221 SDL_UnlockMutex(is->subpq_mutex);
1222 } else {
1223 if (is->subpq_size > 0) {
1224 sp = &is->subpq[is->subpq_rindex];
1225
1226 if (is->subpq_size > 1)
1227 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1228 else
1229 sp2 = NULL;
1230
1231 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1232 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1233 {
1234 free_subpicture(sp);
1235
1236 /* update queue size and signal for next picture */
1237 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1238 is->subpq_rindex = 0;
1239
1240 SDL_LockMutex(is->subpq_mutex);
1241 is->subpq_size--;
1242 SDL_CondSignal(is->subpq_cond);
1243 SDL_UnlockMutex(is->subpq_mutex);
1244 }
1245 }
1246 }
1247 }
1248
1249 /* display picture */
1250 video_display(is);
1251
1252 /* update queue size and signal for next picture */
1253 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1254 is->pictq_rindex = 0;
1255
1256 SDL_LockMutex(is->pictq_mutex);
1257 is->pictq_size--;
1258 SDL_CondSignal(is->pictq_cond);
1259 SDL_UnlockMutex(is->pictq_mutex);
1260 }
1261 } else if (is->audio_st) {
1262 /* draw the next audio frame */
1263
1264 /* if only audio stream, then display the audio bars (better
1265 than nothing, just to test the implementation */
1266
1267 /* display picture */
1268 video_display(is);
1269 }
1270 if (show_status) {
1271 static int64_t last_time;
1272 int64_t cur_time;
1273 int aqsize, vqsize, sqsize;
1274 double av_diff;
1275
1276 cur_time = av_gettime();
1277 if (!last_time || (cur_time - last_time) >= 30000) {
1278 aqsize = 0;
1279 vqsize = 0;
1280 sqsize = 0;
1281 if (is->audio_st)
1282 aqsize = is->audioq.size;
1283 if (is->video_st)
1284 vqsize = is->videoq.size;
1285 if (is->subtitle_st)
1286 sqsize = is->subtitleq.size;
1287 av_diff = 0;
1288 if (is->audio_st && is->video_st)
1289 av_diff = get_audio_clock(is) - get_video_clock(is);
1290 printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1291 get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1292 fflush(stdout);
1293 last_time = cur_time;
1294 }
1295 }
1296 }
1297
1298 /* allocate a picture (needs to do that in main thread to avoid
1299 potential locking problems */
1300 static void alloc_picture(void *opaque)
1301 {
1302 VideoState *is = opaque;
1303 VideoPicture *vp;
1304
1305 vp = &is->pictq[is->pictq_windex];
1306
1307 if (vp->bmp)
1308 SDL_FreeYUVOverlay(vp->bmp);
1309
1310 #if CONFIG_AVFILTER
1311 if (vp->picref)
1312 avfilter_unref_pic(vp->picref);
1313 vp->picref = NULL;
1314
1315 vp->width = is->out_video_filter->inputs[0]->w;
1316 vp->height = is->out_video_filter->inputs[0]->h;
1317 vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1318 #else
1319 vp->width = is->video_st->codec->width;
1320 vp->height = is->video_st->codec->height;
1321 vp->pix_fmt = is->video_st->codec->pix_fmt;
1322 #endif
1323
1324 vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1325 SDL_YV12_OVERLAY,
1326 screen);
1327
1328 SDL_LockMutex(is->pictq_mutex);
1329 vp->allocated = 1;
1330 SDL_CondSignal(is->pictq_cond);
1331 SDL_UnlockMutex(is->pictq_mutex);
1332 }
1333
1334 /**
1335 *
1336 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1337 */
1338 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1339 {
1340 VideoPicture *vp;
1341 int dst_pix_fmt;
1342 #if CONFIG_AVFILTER
1343 AVPicture pict_src;
1344 #endif
1345 /* wait until we have space to put a new picture */
1346 SDL_LockMutex(is->pictq_mutex);
1347
1348 if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1349 is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1350
1351 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1352 !is->videoq.abort_request) {
1353 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1354 }
1355 SDL_UnlockMutex(is->pictq_mutex);
1356
1357 if (is->videoq.abort_request)
1358 return -1;
1359
1360 vp = &is->pictq[is->pictq_windex];
1361
1362 /* alloc or resize hardware picture buffer */
1363 if (!vp->bmp ||
1364 #if CONFIG_AVFILTER
1365 vp->width != is->out_video_filter->inputs[0]->w ||
1366 vp->height != is->out_video_filter->inputs[0]->h) {
1367 #else
1368 vp->width != is->video_st->codec->width ||
1369 vp->height != is->video_st->codec->height) {
1370 #endif
1371 SDL_Event event;
1372
1373 vp->allocated = 0;
1374
1375 /* the allocation must be done in the main thread to avoid
1376 locking problems */
1377 event.type = FF_ALLOC_EVENT;
1378 event.user.data1 = is;
1379 SDL_PushEvent(&event);
1380
1381 /* wait until the picture is allocated */
1382 SDL_LockMutex(is->pictq_mutex);
1383 while (!vp->allocated && !is->videoq.abort_request) {
1384 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1385 }
1386 SDL_UnlockMutex(is->pictq_mutex);
1387
1388 if (is->videoq.abort_request)
1389 return -1;
1390 }
1391
1392 /* if the frame is not skipped, then display it */
1393 if (vp->bmp) {
1394 AVPicture pict;
1395 #if CONFIG_AVFILTER
1396 if(vp->picref)
1397 avfilter_unref_pic(vp->picref);
1398 vp->picref = src_frame->opaque;
1399 #endif
1400
1401 /* get a pointer on the bitmap */
1402 SDL_LockYUVOverlay (vp->bmp);
1403
1404 dst_pix_fmt = PIX_FMT_YUV420P;
1405 memset(&pict,0,sizeof(AVPicture));
1406 pict.data[0] = vp->bmp->pixels[0];
1407 pict.data[1] = vp->bmp->pixels[2];
1408 pict.data[2] = vp->bmp->pixels[1];
1409
1410 pict.linesize[0] = vp->bmp->pitches[0];
1411 pict.linesize[1] = vp->bmp->pitches[2];
1412 pict.linesize[2] = vp->bmp->pitches[1];
1413
1414 #if CONFIG_AVFILTER
1415 pict_src.data[0] = src_frame->data[0];
1416 pict_src.data[1] = src_frame->data[1];
1417 pict_src.data[2] = src_frame->data[2];
1418
1419 pict_src.linesize[0] = src_frame->linesize[0];
1420 pict_src.linesize[1] = src_frame->linesize[1];
1421 pict_src.linesize[2] = src_frame->linesize[2];
1422
1423 //FIXME use direct rendering
1424 av_picture_copy(&pict, &pict_src,
1425 vp->pix_fmt, vp->width, vp->height);
1426 #else
1427 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1428 is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1429 vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1430 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1431 if (is->img_convert_ctx == NULL) {
1432 fprintf(stderr, "Cannot initialize the conversion context\n");
1433 exit(1);
1434 }
1435 sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1436 0, vp->height, pict.data, pict.linesize);
1437 #endif
1438 /* update the bitmap content */
1439 SDL_UnlockYUVOverlay(vp->bmp);
1440
1441 vp->pts = pts;
1442 vp->pos = pos;
1443
1444 /* now we can update the picture count */
1445 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1446 is->pictq_windex = 0;
1447 SDL_LockMutex(is->pictq_mutex);
1448 vp->target_clock= compute_target_time(vp->pts, is);
1449
1450 is->pictq_size++;
1451 SDL_UnlockMutex(is->pictq_mutex);
1452 }
1453 return 0;
1454 }
1455
1456 /**
1457 * compute the exact PTS for the picture if it is omitted in the stream
1458 * @param pts1 the dts of the pkt / pts of the frame
1459 */
1460 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1461 {
1462 double frame_delay, pts;
1463
1464 pts = pts1;
1465
1466 if (pts != 0) {
1467 /* update video clock with pts, if present */
1468 is->video_clock = pts;
1469 } else {
1470 pts = is->video_clock;
1471 }
1472 /* update video clock for next frame */
1473 frame_delay = av_q2d(is->video_st->codec->time_base);
1474 /* for MPEG2, the frame can be repeated, so we update the
1475 clock accordingly */
1476 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1477 is->video_clock += frame_delay;
1478
1479 #if defined(DEBUG_SYNC) && 0
1480 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1481 av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1482 #endif
1483 return queue_picture(is, src_frame, pts, pos);
1484 }
1485
1486 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1487 {
1488 int len1, got_picture, i;
1489
1490 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1491 return -1;
1492
1493 if(pkt->data == flush_pkt.data){
1494 avcodec_flush_buffers(is->video_st->codec);
1495
1496 SDL_LockMutex(is->pictq_mutex);
1497 //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1498 for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1499 is->pictq[i].target_clock= 0;
1500 }
1501 while (is->pictq_size && !is->videoq.abort_request) {
1502 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1503 }
1504 is->video_current_pos= -1;
1505 SDL_UnlockMutex(is->pictq_mutex);
1506
1507 is->last_dts_for_fault_detection=
1508 is->last_pts_for_fault_detection= INT64_MIN;
1509 is->frame_last_pts= AV_NOPTS_VALUE;
1510 is->frame_last_delay = 0;
1511 is->frame_timer = (double)av_gettime() / 1000000.0;
1512 is->skip_frames= 1;
1513 is->skip_frames_index= 0;
1514 return 0;
1515 }
1516
1517 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1518 this packet, if any */
1519 is->video_st->codec->reordered_opaque= pkt->pts;
1520 len1 = avcodec_decode_video2(is->video_st->codec,
1521 frame, &got_picture,
1522 pkt);
1523
1524 if (got_picture) {
1525 if(pkt->dts != AV_NOPTS_VALUE){
1526 is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1527 is->last_dts_for_fault_detection= pkt->dts;
1528 }
1529 if(frame->reordered_opaque != AV_NOPTS_VALUE){
1530 is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1531 is->last_pts_for_fault_detection= frame->reordered_opaque;
1532 }
1533 }
1534
1535 if( ( decoder_reorder_pts==1
1536 || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1537 || pkt->dts == AV_NOPTS_VALUE)
1538 && frame->reordered_opaque != AV_NOPTS_VALUE)
1539 *pts= frame->reordered_opaque;
1540 else if(pkt->dts != AV_NOPTS_VALUE)
1541 *pts= pkt->dts;
1542 else
1543 *pts= 0;
1544
1545 // if (len1 < 0)
1546 // break;
1547 if (got_picture){
1548 is->skip_frames_index += 1;
1549 if(is->skip_frames_index >= is->skip_frames){
1550 is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1551 return 1;
1552 }
1553
1554 }
1555 return 0;
1556 }
1557
1558 #if CONFIG_AVFILTER
1559 typedef struct {
1560 VideoState *is;
1561 AVFrame *frame;
1562 int use_dr1;
1563 } FilterPriv;
1564
1565 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1566 {
1567 AVFilterContext *ctx = codec->opaque;
1568 AVFilterPicRef *ref;
1569 int perms = AV_PERM_WRITE;
1570 int w, h, stride[4];
1571 unsigned edge;
1572
1573 if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1574 if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1575 if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1576 if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1577 }
1578 if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1579
1580 w = codec->width;
1581 h = codec->height;
1582 avcodec_align_dimensions2(codec, &w, &h, stride);
1583 edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1584 w += edge << 1;
1585 h += edge << 1;
1586
1587 if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1588 return -1;
1589
1590 ref->w = codec->width;
1591 ref->h = codec->height;
1592 for(int i = 0; i < 3; i ++) {
1593 unsigned hshift = i == 0 ? 0 : av_pix_fmt_descriptors[ref->pic->format].log2_chroma_w;
1594 unsigned vshift = i == 0 ? 0 : av_pix_fmt_descriptors[ref->pic->format].log2_chroma_h;
1595
1596 ref->data[i] += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1597 pic->data[i] = ref->data[i];
1598 pic->linesize[i] = ref->linesize[i];
1599 }
1600 pic->opaque = ref;
1601 pic->age = INT_MAX;
1602 pic->type = FF_BUFFER_TYPE_USER;
1603 return 0;
1604 }
1605
1606 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1607 {
1608 memset(pic->data, 0, sizeof(pic->data));
1609 avfilter_unref_pic(pic->opaque);
1610 }
1611
1612 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1613 {
1614 FilterPriv *priv = ctx->priv;
1615 AVCodecContext *codec;
1616 if(!opaque) return -1;
1617
1618 priv->is = opaque;
1619 codec = priv->is->video_st->codec;
1620 codec->opaque = ctx;
1621 if(codec->codec->capabilities & CODEC_CAP_DR1) {
1622 priv->use_dr1 = 1;
1623 codec->get_buffer = input_get_buffer;
1624 codec->release_buffer = input_release_buffer;
1625 }
1626
1627 priv->frame = avcodec_alloc_frame();
1628
1629 return 0;
1630 }
1631
1632 static void input_uninit(AVFilterContext *ctx)
1633 {
1634 FilterPriv *priv = ctx->priv;
1635 av_free(priv->frame);
1636 }
1637
1638 static int input_request_frame(AVFilterLink *link)
1639 {
1640 FilterPriv *priv = link->src->priv;
1641 AVFilterPicRef *picref;
1642 int64_t pts = 0;
1643 AVPacket pkt;
1644 int ret;
1645
1646 while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1647 av_free_packet(&pkt);
1648 if (ret < 0)
1649 return -1;
1650
1651 if(priv->use_dr1) {
1652 picref = avfilter_ref_pic(priv->frame->opaque, ~0);
1653 } else {
1654 picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1655 av_picture_copy((AVPicture *)&picref->data, (AVPicture *)priv->frame,
1656 picref->pic->format, link->w, link->h);
1657 }
1658 av_free_packet(&pkt);
1659
1660 picref->pts = pts;
1661 picref->pos = pkt.pos;
1662 picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1663 avfilter_start_frame(link, picref);
1664 avfilter_draw_slice(link, 0, link->h, 1);
1665 avfilter_end_frame(link);
1666
1667 return 0;
1668 }
1669
1670 static int input_query_formats(AVFilterContext *ctx)
1671 {
1672 FilterPriv *priv = ctx->priv;
1673 enum PixelFormat pix_fmts[] = {
1674 priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1675 };
1676
1677 avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1678 return 0;
1679 }
1680
1681 static int input_config_props(AVFilterLink *link)
1682 {
1683 FilterPriv *priv = link->src->priv;
1684 AVCodecContext *c = priv->is->video_st->codec;
1685
1686 link->w = c->width;
1687 link->h = c->height;
1688
1689 return 0;
1690 }
1691
1692 static AVFilter input_filter =
1693 {
1694 .name = "ffplay_input",
1695
1696 .priv_size = sizeof(FilterPriv),
1697
1698 .init = input_init,
1699 .uninit = input_uninit,
1700
1701 .query_formats = input_query_formats,
1702
1703 .inputs = (AVFilterPad[]) {{ .name = NULL }},
1704 .outputs = (AVFilterPad[]) {{ .name = "default",
1705 .type = AVMEDIA_TYPE_VIDEO,
1706 .request_frame = input_request_frame,
1707 .config_props = input_config_props, },
1708 { .name = NULL }},
1709 };
1710
1711 static void output_end_frame(AVFilterLink *link)
1712 {
1713 }
1714
1715 static int output_query_formats(AVFilterContext *ctx)
1716 {
1717 enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1718
1719 avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1720 return 0;
1721 }
1722
1723 static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
1724 int64_t *pts, int64_t *pos)
1725 {
1726 AVFilterPicRef *pic;
1727
1728 if(avfilter_request_frame(ctx->inputs[0]))
1729 return -1;
1730 if(!(pic = ctx->inputs[0]->cur_pic))
1731 return -1;
1732 ctx->inputs[0]->cur_pic = NULL;
1733
1734 frame->opaque = pic;
1735 *pts = pic->pts;
1736 *pos = pic->pos;
1737
1738 memcpy(frame->data, pic->data, sizeof(frame->data));
1739 memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
1740
1741 return 1;
1742 }
1743
1744 static AVFilter output_filter =
1745 {
1746 .name = "ffplay_output",
1747
1748 .query_formats = output_query_formats,
1749
1750 .inputs = (AVFilterPad[]) {{ .name = "default",
1751 .type = AVMEDIA_TYPE_VIDEO,
1752 .end_frame = output_end_frame,
1753 .min_perms = AV_PERM_READ, },
1754 { .name = NULL }},
1755 .outputs = (AVFilterPad[]) {{ .name = NULL }},
1756 };
1757 #endif /* CONFIG_AVFILTER */
1758
1759 static int video_thread(void *arg)
1760 {
1761 VideoState *is = arg;
1762 AVFrame *frame= avcodec_alloc_frame();
1763 int64_t pts_int;
1764 double pts;
1765 int ret;
1766
1767 #if CONFIG_AVFILTER
1768 int64_t pos;
1769 AVFilterContext *filt_src = NULL, *filt_out = NULL;
1770 AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
1771 graph->scale_sws_opts = av_strdup("sws_flags=bilinear");
1772
1773 if(!(filt_src = avfilter_open(&input_filter, "src"))) goto the_end;
1774 if(!(filt_out = avfilter_open(&output_filter, "out"))) goto the_end;
1775
1776 if(avfilter_init_filter(filt_src, NULL, is)) goto the_end;
1777 if(avfilter_init_filter(filt_out, NULL, frame)) goto the_end;
1778
1779
1780 if(vfilters) {
1781 AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1782 AVFilterInOut *inputs = av_malloc(sizeof(AVFilterInOut));
1783
1784 outputs->name = av_strdup("in");
1785 outputs->filter = filt_src;
1786 outputs->pad_idx = 0;
1787 outputs->next = NULL;
1788
1789 inputs->name = av_strdup("out");
1790 inputs->filter = filt_out;
1791 inputs->pad_idx = 0;
1792 inputs->next = NULL;
1793
1794 if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1795 goto the_end;
1796 av_freep(&vfilters);
1797 } else {
1798 if(avfilter_link(filt_src, 0, filt_out, 0) < 0) goto the_end;
1799 }
1800 avfilter_graph_add_filter(graph, filt_src);
1801 avfilter_graph_add_filter(graph, filt_out);
1802
1803 if(avfilter_graph_check_validity(graph, NULL)) goto the_end;
1804 if(avfilter_graph_config_formats(graph, NULL)) goto the_end;
1805 if(avfilter_graph_config_links(graph, NULL)) goto the_end;
1806
1807 is->out_video_filter = filt_out;
1808 #endif
1809
1810 for(;;) {
1811 #if !CONFIG_AVFILTER
1812 AVPacket pkt;
1813 #endif
1814 while (is->paused && !is->videoq.abort_request)
1815 SDL_Delay(10);
1816 #if CONFIG_AVFILTER
1817 ret = get_filtered_video_frame(filt_out, frame, &pts_int, &pos);
1818 #else
1819 ret = get_video_frame(is, frame, &pts_int, &pkt);
1820 #endif
1821
1822 if (ret < 0) goto the_end;
1823
1824 if (!ret)
1825 continue;
1826
1827 pts = pts_int*av_q2d(is->video_st->time_base);
1828
1829 #if CONFIG_AVFILTER
1830 ret = output_picture2(is, frame, pts, pos);
1831 #else
1832 ret = output_picture2(is, frame, pts, pkt.pos);
1833 av_free_packet(&pkt);
1834 #endif
1835 if (ret < 0)
1836 goto the_end;
1837
1838 if (step)
1839 if (cur_stream)
1840 stream_pause(cur_stream);
1841 }
1842 the_end:
1843 #if CONFIG_AVFILTER
1844 avfilter_graph_destroy(graph);
1845 av_freep(&graph);
1846 #endif
1847 av_free(frame);
1848 return 0;
1849 }
1850
1851 static int subtitle_thread(void *arg)
1852 {
1853 VideoState *is = arg;
1854 SubPicture *sp;
1855 AVPacket pkt1, *pkt = &pkt1;
1856 int len1, got_subtitle;
1857 double pts;
1858 int i, j;
1859 int r, g, b, y, u, v, a;
1860
1861 for(;;) {
1862 while (is->paused && !is->subtitleq.abort_request) {
1863 SDL_Delay(10);
1864 }
1865 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1866 break;
1867
1868 if(pkt->data == flush_pkt.data){
1869 avcodec_flush_buffers(is->subtitle_st->codec);
1870 continue;
1871 }
1872 SDL_LockMutex(is->subpq_mutex);
1873 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1874 !is->subtitleq.abort_request) {
1875 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1876 }
1877 SDL_UnlockMutex(is->subpq_mutex);
1878
1879 if (is->subtitleq.abort_request)
1880 goto the_end;
1881
1882 sp = &is->subpq[is->subpq_windex];
1883
1884 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1885 this packet, if any */
1886 pts = 0;
1887 if (pkt->pts != AV_NOPTS_VALUE)
1888 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1889
1890 len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1891 &sp->sub, &got_subtitle,
1892 pkt);
1893 // if (len1 < 0)
1894 // break;
1895 if (got_subtitle && sp->sub.format == 0) {
1896 sp->pts = pts;
1897
1898 for (i = 0; i < sp->sub.num_rects; i++)
1899 {
1900 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1901 {
1902 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1903 y = RGB_TO_Y_CCIR(r, g, b);
1904 u = RGB_TO_U_CCIR(r, g, b, 0);
1905 v = RGB_TO_V_CCIR(r, g, b, 0);
1906 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1907 }
1908 }
1909
1910 /* now we can update the picture count */
1911 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1912 is->subpq_windex = 0;
1913 SDL_LockMutex(is->subpq_mutex);
1914 is->subpq_size++;
1915 SDL_UnlockMutex(is->subpq_mutex);
1916 }
1917 av_free_packet(pkt);
1918 // if (step)
1919 // if (cur_stream)
1920 // stream_pause(cur_stream);
1921 }
1922 the_end:
1923 return 0;
1924 }
1925
1926 /* copy samples for viewing in editor window */
1927 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1928 {
1929 int size, len, channels;
1930
1931 channels = is->audio_st->codec->channels;
1932
1933 size = samples_size / sizeof(short);
1934 while (size > 0) {
1935 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1936 if (len > size)
1937 len = size;
1938 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1939 samples += len;
1940 is->sample_array_index += len;
1941 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1942 is->sample_array_index = 0;
1943 size -= len;
1944 }
1945 }
1946
1947 /* return the new audio buffer size (samples can be added or deleted
1948 to get better sync if video or external master clock) */
1949 static int synchronize_audio(VideoState *is, short *samples,
1950 int samples_size1, double pts)
1951 {
1952 int n, samples_size;
1953 double ref_clock;
1954
1955 n = 2 * is->audio_st->codec->channels;
1956 samples_size = samples_size1;
1957
1958 /* if not master, then we try to remove or add samples to correct the clock */
1959 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1960 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1961 double diff, avg_diff;
1962 int wanted_size, min_size, max_size, nb_samples;
1963
1964 ref_clock = get_master_clock(is);
1965 diff = get_audio_clock(is) - ref_clock;
1966
1967 if (diff < AV_NOSYNC_THRESHOLD) {
1968 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1969 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1970 /* not enough measures to have a correct estimate */
1971 is->audio_diff_avg_count++;
1972 } else {
1973 /* estimate the A-V difference */
1974 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1975
1976 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1977 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1978 nb_samples = samples_size / n;
1979
1980 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1981 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1982 if (wanted_size < min_size)
1983 wanted_size = min_size;
1984 else if (wanted_size > max_size)
1985 wanted_size = max_size;
1986
1987 /* add or remove samples to correction the synchro */
1988 if (wanted_size < samples_size) {
1989 /* remove samples */
1990 samples_size = wanted_size;
1991 } else if (wanted_size > samples_size) {
1992 uint8_t *samples_end, *q;
1993 int nb;
1994
1995 /* add samples */
1996 nb = (samples_size - wanted_size);
1997 samples_end = (uint8_t *)samples + samples_size - n;
1998 q = samples_end + n;
1999 while (nb > 0) {
2000 memcpy(q, samples_end, n);
2001 q += n;
2002 nb -= n;
2003 }
2004 samples_size = wanted_size;
2005 }
2006 }
2007 #if 0
2008 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2009 diff, avg_diff, samples_size - samples_size1,
2010 is->audio_clock, is->video_clock, is->audio_diff_threshold);
2011 #endif
2012 }
2013 } else {
2014 /* too big difference : may be initial PTS errors, so
2015 reset A-V filter */
2016 is->audio_diff_avg_count = 0;
2017 is->audio_diff_cum = 0;
2018 }
2019 }
2020
2021 return samples_size;
2022 }
2023
2024 /* decode one audio frame and returns its uncompressed size */
2025 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2026 {
2027 AVPacket *pkt_temp = &is->audio_pkt_temp;
2028 AVPacket *pkt = &is->audio_pkt;
2029 AVCodecContext *dec= is->audio_st->codec;
2030 int n, len1, data_size;
2031 double pts;
2032
2033 for(;;) {
2034 /* NOTE: the audio packet can contain several frames */
2035 while (pkt_temp->size > 0) {
2036 data_size = sizeof(is->audio_buf1);
2037 len1 = avcodec_decode_audio3(dec,
2038 (int16_t *)is->audio_buf1, &data_size,
2039 pkt_temp);
2040 if (len1 < 0) {
2041 /* if error, we skip the frame */
2042 pkt_temp->size = 0;
2043 break;
2044 }
2045
2046 pkt_temp->data += len1;
2047 pkt_temp->size -= len1;
2048 if (data_size <= 0)
2049 continue;
2050
2051 if (dec->sample_fmt != is->audio_src_fmt) {
2052 if (is->reformat_ctx)
2053 av_audio_convert_free(is->reformat_ctx);
2054 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
2055 dec->sample_fmt, 1, NULL, 0);
2056 if (!is->reformat_ctx) {
2057 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2058 avcodec_get_sample_fmt_name(dec->sample_fmt),
2059 avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
2060 break;
2061 }
2062 is->audio_src_fmt= dec->sample_fmt;
2063 }
2064
2065 if (is->reformat_ctx) {
2066 const void *ibuf[6]= {is->audio_buf1};
2067 void *obuf[6]= {is->audio_buf2};
2068 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
2069 int ostride[6]= {2};
2070 int len= data_size/istride[0];
2071 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2072 printf("av_audio_convert() failed\n");
2073 break;
2074 }
2075 is->audio_buf= is->audio_buf2;
2076 /* FIXME: existing code assume that data_size equals framesize*channels*2
2077 remove this legacy cruft */
2078 data_size= len*2;
2079 }else{
2080 is->audio_buf= is->audio_buf1;
2081 }
2082
2083 /* if no pts, then compute it */
2084 pts = is->audio_clock;
2085 *pts_ptr = pts;
2086 n = 2 * dec->channels;
2087 is->audio_clock += (double)data_size /
2088 (double)(n * dec->sample_rate);
2089 #if defined(DEBUG_SYNC)
2090 {
2091 static double last_clock;
2092 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2093 is->audio_clock - last_clock,
2094 is->audio_clock, pts);
2095 last_clock = is->audio_clock;
2096 }
2097 #endif
2098 return data_size;
2099 }
2100
2101 /* free the current packet */
2102 if (pkt->data)
2103 av_free_packet(pkt);
2104
2105 if (is->paused || is->audioq.abort_request) {
2106 return -1;
2107 }
2108
2109 /* read next packet */
2110 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2111 return -1;
2112 if(pkt->data == flush_pkt.data){
2113 avcodec_flush_buffers(dec);
2114 continue;
2115 }
2116
2117 pkt_temp->data = pkt->data;
2118 pkt_temp->size = pkt->size;
2119
2120 /* if update the audio clock with the pts */
2121 if (pkt->pts != AV_NOPTS_VALUE) {
2122 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2123 }
2124 }
2125 }
2126
2127 /* get the current audio output buffer size, in samples. With SDL, we
2128 cannot have a precise information */
2129 static int audio_write_get_buf_size(VideoState *is)
2130 {
2131 return is->audio_buf_size - is->audio_buf_index;
2132 }
2133
2134
2135 /* prepare a new audio buffer */
2136 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2137 {
2138 VideoState *is = opaque;
2139 int audio_size, len1;
2140 double pts;
2141
2142 audio_callback_time = av_gettime();
2143
2144 while (len > 0) {
2145 if (is->audio_buf_index >= is->audio_buf_size) {
2146 audio_size = audio_decode_frame(is, &pts);
2147 if (audio_size < 0) {
2148 /* if error, just output silence */
2149 is->audio_buf = is->audio_buf1;
2150 is->audio_buf_size = 1024;
2151 memset(is->audio_buf, 0, is->audio_buf_size);
2152 } else {
2153 if (is->show_audio)
2154 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2155 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2156 pts);
2157 is->audio_buf_size = audio_size;
2158 }
2159 is->audio_buf_index = 0;
2160 }
2161 len1 = is->audio_buf_size - is->audio_buf_index;
2162 if (len1 > len)
2163 len1 = len;
2164 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2165 len -= len1;
2166 stream += len1;
2167 is->audio_buf_index += len1;
2168 }
2169 }
2170
2171 /* open a given stream. Return 0 if OK */
2172 static int stream_component_open(VideoState *is, int stream_index)
2173 {
2174 AVFormatContext *ic = is->ic;
2175 AVCodecContext *avctx;
2176 AVCodec *codec;
2177 SDL_AudioSpec wanted_spec, spec;
2178
2179 if (stream_index < 0 || stream_index >= ic->nb_streams)
2180 return -1;
2181 avctx = ic->streams[stream_index]->codec;
2182
2183 /* prepare audio output */
2184 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2185 if (avctx->channels > 0) {
2186 avctx->request_channels = FFMIN(2, avctx->channels);
2187 } else {
2188 avctx->request_channels = 2;
2189 }
2190 }
2191
2192 codec = avcodec_find_decoder(avctx->codec_id);
2193 avctx->debug_mv = debug_mv;
2194 avctx->debug = debug;
2195 avctx->workaround_bugs = workaround_bugs;
2196 avctx->lowres = lowres;
2197 if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2198 avctx->idct_algo= idct;
2199 if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2200 avctx->skip_frame= skip_frame;
2201 avctx->skip_idct= skip_idct;
2202 avctx->skip_loop_filter= skip_loop_filter;
2203 avctx->error_recognition= error_recognition;
2204 avctx->error_concealment= error_concealment;
2205 avcodec_thread_init(avctx, thread_count);
2206
2207 set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
2208
2209 if (!codec ||
2210 avcodec_open(avctx, codec) < 0)
2211 return -1;
2212
2213 /* prepare audio output */
2214 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2215 wanted_spec.freq = avctx->sample_rate;
2216 wanted_spec.format = AUDIO_S16SYS;
2217 wanted_spec.channels = avctx->channels;
2218 wanted_spec.silence = 0;
2219 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2220 wanted_spec.callback = sdl_audio_callback;
2221 wanted_spec.userdata = is;
2222 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2223 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2224 return -1;
2225 }
2226 is->audio_hw_buf_size = spec.size;
2227 is->audio_src_fmt= SAMPLE_FMT_S16;
2228 }
2229
2230 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2231 switch(avctx->codec_type) {
2232 case AVMEDIA_TYPE_AUDIO:
2233 is->audio_stream = stream_index;
2234 is->audio_st = ic->streams[stream_index];
2235 is->audio_buf_size = 0;
2236 is->audio_buf_index = 0;
2237
2238 /* init averaging filter */
2239 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2240 is->audio_diff_avg_count = 0;
2241 /* since we do not have a precise anough audio fifo fullness,
2242 we correct audio sync only if larger than this threshold */
2243 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2244
2245 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2246 packet_queue_init(&is->audioq);
2247 SDL_PauseAudio(0);
2248 break;
2249 case AVMEDIA_TYPE_VIDEO:
2250 is->video_stream = stream_index;
2251 is->video_st = ic->streams[stream_index];
2252
2253 // is->video_current_pts_time = av_gettime();
2254
2255 packet_queue_init(&is->videoq);
2256 is->video_tid = SDL_CreateThread(video_thread, is);
2257 break;
2258 case AVMEDIA_TYPE_SUBTITLE:
2259 is->subtitle_stream = stream_index;
2260 is->subtitle_st = ic->streams[stream_index];
2261 packet_queue_init(&is->subtitleq);
2262
2263 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2264 break;
2265 default:
2266 break;
2267 }
2268 return 0;
2269 }
2270
2271 static void stream_component_close(VideoState *is, int stream_index)
2272 {
2273 AVFormatContext *ic = is->ic;
2274 AVCodecContext *avctx;
2275
2276 if (stream_index < 0 || stream_index >= ic->nb_streams)
2277 return;
2278 avctx = ic->streams[stream_index]->codec;
2279
2280 switch(avctx->codec_type) {
2281 case AVMEDIA_TYPE_AUDIO:
2282 packet_queue_abort(&is->audioq);
2283
2284 SDL_CloseAudio();
2285
2286 packet_queue_end(&is->audioq);
2287 if (is->reformat_ctx)
2288 av_audio_convert_free(is->reformat_ctx);
2289 is->reformat_ctx = NULL;
2290 break;
2291 case AVMEDIA_TYPE_VIDEO:
2292 packet_queue_abort(&is->videoq);
2293
2294 /* note: we also signal this mutex to make sure we deblock the
2295 video thread in all cases */
2296 SDL_LockMutex(is->pictq_mutex);
2297 SDL_CondSignal(is->pictq_cond);
2298 SDL_UnlockMutex(is->pictq_mutex);
2299
2300 SDL_WaitThread(is->video_tid, NULL);
2301
2302 packet_queue_end(&is->videoq);
2303 break;
2304 case AVMEDIA_TYPE_SUBTITLE:
2305 packet_queue_abort(&is->subtitleq);
2306
2307 /* note: we also signal this mutex to make sure we deblock the
2308 video thread in all cases */
2309 SDL_LockMutex(is->subpq_mutex);
2310 is->subtitle_stream_changed = 1;
2311
2312 SDL_CondSignal(is->subpq_cond);
2313 SDL_UnlockMutex(is->subpq_mutex);
2314
2315 SDL_WaitThread(is->subtitle_tid, NULL);
2316
2317 packet_queue_end(&is->subtitleq);
2318 break;
2319 default:
2320 break;
2321 }
2322
2323 ic->streams[stream_index]->discard = AVDISCARD_ALL;
2324 avcodec_close(avctx);
2325 switch(avctx->codec_type) {
2326 case AVMEDIA_TYPE_AUDIO:
2327 is->audio_st = NULL;
2328 is->audio_stream = -1;
2329 break;
2330 case AVMEDIA_TYPE_VIDEO:
2331 is->video_st = NULL;
2332 is->video_stream = -1;
2333 break;
2334 case AVMEDIA_TYPE_SUBTITLE:
2335 is->subtitle_st = NULL;
2336 is->subtitle_stream = -1;
2337 break;
2338 default:
2339 break;
2340 }
2341 }
2342
2343 /* since we have only one decoding thread, we can use a global
2344 variable instead of a thread local variable */
2345 static VideoState *global_video_state;
2346
2347 static int decode_interrupt_cb(void)
2348 {
2349 return (global_video_state && global_video_state->abort_request);
2350 }
2351
2352 /* this thread gets the stream from the disk or the network */
2353 static int decode_thread(void *arg)
2354 {
2355 VideoState *is = arg;
2356 AVFormatContext *ic;
2357 int err, i, ret;
2358 int st_index[AVMEDIA_TYPE_NB];
2359 int st_count[AVMEDIA_TYPE_NB]={0};
2360 int st_best_packet_count[AVMEDIA_TYPE_NB];
2361 AVPacket pkt1, *pkt = &pkt1;
2362 AVFormatParameters params, *ap = &params;
2363 int eof=0;
2364 int pkt_in_play_range = 0;
2365
2366 ic = avformat_alloc_context();
2367
2368 memset(st_index, -1, sizeof(st_index));
2369 memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2370 is->video_stream = -1;
2371 is->audio_stream = -1;
2372 is->subtitle_stream = -1;
2373
2374 global_video_state = is;
2375 url_set_interrupt_cb(decode_interrupt_cb);
2376
2377 memset(ap, 0, sizeof(*ap));
2378
2379 ap->prealloced_context = 1;
2380 ap->width = frame_width;
2381 ap->height= frame_height;
2382 ap->time_base= (AVRational){1, 25};
2383 ap->pix_fmt = frame_pix_fmt;
2384
2385 set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2386
2387 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2388 if (err < 0) {
2389 print_error(is->filename, err);
2390 ret = -1;
2391 goto fail;
2392 }
2393 is->ic = ic;
2394
2395 if(genpts)
2396 ic->flags |= AVFMT_FLAG_GENPTS;
2397
2398 err = av_find_stream_info(ic);
2399 if (err < 0) {
2400 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2401 ret = -1;
2402 goto fail;
2403 }
2404 if(ic->pb)
2405 ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2406
2407 if(seek_by_bytes<0)
2408 seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2409
2410 /* if seeking requested, we execute it */
2411 if (start_time != AV_NOPTS_VALUE) {
2412 int64_t timestamp;
2413
2414 timestamp = start_time;
2415 /* add the stream start time */
2416 if (ic->start_time != AV_NOPTS_VALUE)
2417 timestamp += ic->start_time;
2418 ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2419 if (ret < 0) {
2420 fprintf(stderr, "%s: could not seek to position %0.3f\n",
2421 is->filename, (double)timestamp / AV_TIME_BASE);
2422 }
2423 }
2424
2425 for(i = 0; i < ic->nb_streams; i++) {
2426 AVStream *st= ic->streams[i];
2427 AVCodecContext *avctx = st->codec;
2428 ic->streams[i]->discard = AVDISCARD_ALL;
2429 if(avctx->codec_type >= (unsigned)AVMEDIA_TYPE_NB)
2430 continue;
2431 if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2432 continue;
2433
2434 if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2435 continue;
2436 st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2437
2438 switch(avctx->codec_type) {
2439 case AVMEDIA_TYPE_AUDIO:
2440 if (!audio_disable)
2441 st_index[AVMEDIA_TYPE_AUDIO] = i;
2442 break;
2443 case AVMEDIA_TYPE_VIDEO:
2444 case AVMEDIA_TYPE_SUBTITLE:
2445 if (!video_disable)
2446 st_index[avctx->codec_type] = i;
2447 break;
2448 default:
2449 break;
2450 }
2451 }
2452 if (show_status) {
2453 dump_format(ic, 0, is->filename, 0);
2454 }
2455
2456 /* open the streams */
2457 if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2458 stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2459 }
2460
2461 ret=-1;
2462 if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2463 ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2464 }
2465 is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2466 if(ret<0) {
2467 if (!display_disable)
2468 is->show_audio = 2;
2469 }
2470
2471 if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2472 stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2473 }
2474
2475 if (is->video_stream < 0 && is->audio_stream < 0) {
2476 fprintf(stderr, "%s: could not open codecs\n", is->filename);
2477 ret = -1;
2478 goto fail;
2479 }
2480
2481 for(;;) {
2482 if (is->abort_request)
2483 break;
2484 if (is->paused != is->last_paused) {
2485 is->last_paused = is->paused;
2486 if (is->paused)
2487 is->read_pause_return= av_read_pause(ic);
2488 else
2489 av_read_play(ic);
2490 }
2491 #if CONFIG_RTSP_DEMUXER
2492 if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2493 /* wait 10 ms to avoid trying to get another packet */
2494 /* XXX: horrible */
2495 SDL_Delay(10);
2496 continue;
2497 }
2498 #endif
2499 if (is->seek_req) {
2500 int64_t seek_target= is->seek_pos;
2501 int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2502 int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2503 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2504 // of the seek_pos/seek_rel variables
2505
2506 ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2507 if (ret < 0) {
2508 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2509 }else{
2510 if (is->audio_stream >= 0) {
2511 packet_queue_flush(&is->audioq);
2512 packet_queue_put(&is->audioq, &flush_pkt);
2513 }
2514 if (is->subtitle_stream >= 0) {
2515 packet_queue_flush(&is->subtitleq);
2516 packet_queue_put(&is->subtitleq, &flush_pkt);
2517 }
2518 if (is->video_stream >= 0) {
2519 packet_queue_flush(&is->videoq);
2520 packet_queue_put(&is->videoq, &flush_pkt);
2521 }
2522 }
2523 is->seek_req = 0;
2524 eof= 0;
2525 }
2526
2527 /* if the queue are full, no need to read more */
2528 if ( is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2529 || ( (is->audioq .size > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2530 && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream<0)
2531 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2532 /* wait 10 ms */
2533 SDL_Delay(10);
2534 continue;
2535 }
2536 if(url_feof(ic->pb) || eof) {
2537 if(is->video_stream >= 0){
2538 av_init_packet(pkt);
2539 pkt->data=NULL;
2540 pkt->size=0;
2541 pkt->stream_index= is->video_stream;
2542 packet_queue_put(&is->videoq, pkt);
2543 }
2544 SDL_Delay(10);
2545 if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2546 if(loop!=1 && (!loop || --loop)){
2547 stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2548 }else if(autoexit){
2549 ret=AVERROR_EOF;
2550 goto fail;
2551 }
2552 }
2553 continue;
2554 }
2555 ret = av_read_frame(ic, pkt);
2556 if (ret < 0) {
2557 if (ret == AVERROR_EOF)
2558 eof=1;
2559 if (url_ferror(ic->pb))
2560 break;
2561 SDL_Delay(100); /* wait for user event */
2562 continue;
2563 }
2564 /* check if packet is in play range specified by user, then queue, otherwise discard */
2565 pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2566 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2567 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2568 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2569 <= ((double)duration/1000000);
2570 if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2571 packet_queue_put(&is->audioq, pkt);
2572 } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2573 packet_queue_put(&is->videoq, pkt);
2574 } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2575 packet_queue_put(&is->subtitleq, pkt);
2576 } else {
2577 av_free_packet(pkt);
2578 }
2579 }
2580 /* wait until the end */
2581 while (!is->abort_request) {
2582 SDL_Delay(100);
2583 }
2584
2585 ret = 0;
2586 fail:
2587 /* disable interrupting */
2588 global_video_state = NULL;
2589
2590 /* close each stream */
2591 if (is->audio_stream >= 0)
2592 stream_component_close(is, is->audio_stream);
2593 if (is->video_stream >= 0)
2594 stream_component_close(is, is->video_stream);
2595 if (is->subtitle_stream >= 0)
2596 stream_component_close(is, is->subtitle_stream);
2597 if (is->ic) {
2598 av_close_input_file(is->ic);
2599 is->ic = NULL; /* safety */
2600 }
2601 url_set_interrupt_cb(NULL);
2602
2603 if (ret != 0) {
2604 SDL_Event event;
2605
2606 event.type = FF_QUIT_EVENT;
2607 event.user.data1 = is;
2608 SDL_PushEvent(&event);
2609 }
2610 return 0;
2611 }
2612
2613 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2614 {
2615 VideoState *is;
2616
2617 is = av_mallocz(sizeof(VideoState));
2618 if (!is)
2619 return NULL;
2620 av_strlcpy(is->filename, filename, sizeof(is->filename));
2621 is->iformat = iformat;
2622 is->ytop = 0;
2623 is->xleft = 0;
2624
2625 /* start video display */
2626 is->pictq_mutex = SDL_CreateMutex();
2627 is->pictq_cond = SDL_CreateCond();
2628
2629 is->subpq_mutex = SDL_CreateMutex();
2630 is->subpq_cond = SDL_CreateCond();
2631
2632 is->av_sync_type = av_sync_type;
2633 is->parse_tid = SDL_CreateThread(decode_thread, is);
2634 if (!is->parse_tid) {
2635 av_free(is);
2636 return NULL;
2637 }
2638 return is;
2639 }
2640
2641 static void stream_close(VideoState *is)
2642 {
2643 VideoPicture *vp;
2644 int i;
2645 /* XXX: use a special url_shutdown call to abort parse cleanly */
2646 is->abort_request = 1;
2647 SDL_WaitThread(is->parse_tid, NULL);
2648 SDL_WaitThread(is->refresh_tid, NULL);
2649
2650 /* free all pictures */
2651 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2652 vp = &is->pictq[i];
2653 #if CONFIG_AVFILTER
2654 if (vp->picref) {
2655 avfilter_unref_pic(vp->picref);
2656 vp->picref = NULL;
2657 }
2658 #endif
2659 if (vp->bmp) {
2660 SDL_FreeYUVOverlay(vp->bmp);
2661 vp->bmp = NULL;
2662 }
2663 }
2664 SDL_DestroyMutex(is->pictq_mutex);
2665 SDL_DestroyCond(is->pictq_cond);
2666 SDL_DestroyMutex(is->subpq_mutex);
2667 SDL_DestroyCond(is->subpq_cond);
2668 #if !CONFIG_AVFILTER
2669 if (is->img_convert_ctx)
2670 sws_freeContext(is->img_convert_ctx);
2671 #endif
2672 av_free(is);
2673 }
2674
2675 static void stream_cycle_channel(VideoState *is, int codec_type)
2676 {
2677 AVFormatContext *ic = is->ic;
2678 int start_index, stream_index;
2679 AVStream *st;
2680
2681 if (codec_type == AVMEDIA_TYPE_VIDEO)
2682 start_index = is->video_stream;
2683 else if (codec_type == AVMEDIA_TYPE_AUDIO)
2684 start_index = is->audio_stream;
2685 else
2686 start_index = is->subtitle_stream;
2687 if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2688 return;
2689 stream_index = start_index;
2690 for(;;) {
2691 if (++stream_index >= is->ic->nb_streams)
2692 {
2693 if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2694 {
2695 stream_index = -1;
2696 goto the_end;
2697 } else
2698 stream_index = 0;
2699 }
2700 if (stream_index == start_index)
2701 return;
2702 st = ic->streams[stream_index];
2703 if (st->codec->codec_type == codec_type) {
2704 /* check that parameters are OK */
2705 switch(codec_type) {
2706 case AVMEDIA_TYPE_AUDIO:
2707 if (st->codec->sample_rate != 0 &&
2708 st->codec->channels != 0)
2709 goto the_end;
2710 break;
2711 case AVMEDIA_TYPE_VIDEO:
2712 case AVMEDIA_TYPE_SUBTITLE:
2713 goto the_end;
2714 default:
2715 break;
2716 }
2717 }
2718 }
2719 the_end:
2720 stream_component_close(is, start_index);
2721 stream_component_open(is, stream_index);
2722 }
2723
2724
2725 static void toggle_full_screen(void)
2726 {
2727 is_full_screen = !is_full_screen;
2728 if (!fs_screen_width) {
2729 /* use default SDL method */
2730 // SDL_WM_ToggleFullScreen(screen);
2731 }
2732 video_open(cur_stream);
2733 }
2734
2735 static void toggle_pause(void)
2736 {
2737 if (cur_stream)
2738 stream_pause(cur_stream);
2739 step = 0;
2740 }
2741
2742 static void step_to_next_frame(void)
2743 {
2744 if (cur_stream) {
2745 /* if the stream is paused unpause it, then step */
2746 if (cur_stream->paused)
2747 stream_pause(cur_stream);
2748 }
2749 step = 1;
2750 }
2751
2752 static void do_exit(void)
2753 {
2754 int i;
2755 if (cur_stream) {
2756 stream_close(cur_stream);
2757 cur_stream = NULL;
2758 }
2759 for (i = 0; i < AVMEDIA_TYPE_NB; i++)
2760 av_free(avcodec_opts[i]);
2761 av_free(avformat_opts);
2762 av_free(sws_opts);
2763 #if CONFIG_AVFILTER
2764 avfilter_uninit();
2765 #endif
2766 if (show_status)
2767 printf("\n");
2768 SDL_Quit();
2769 exit(0);
2770 }
2771
2772 static void toggle_audio_display(void)
2773 {
2774 if (cur_stream) {
2775 int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2776 cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2777 fill_rectangle(screen,
2778 cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2779 bgcolor);
2780 SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2781 }
2782 }
2783
2784 /* handle an event sent by the GUI */
2785 static void event_loop(void)
2786 {
2787 SDL_Event event;
2788 double incr, pos, frac;
2789
2790 for(;;) {
2791 double x;
2792 SDL_WaitEvent(&event);
2793 switch(event.type) {
2794 case SDL_KEYDOWN:
2795 switch(event.key.keysym.sym) {
2796 case SDLK_ESCAPE:
2797 case SDLK_q:
2798 do_exit();
2799 break;
2800 case SDLK_f:
2801 toggle_full_screen();
2802 break;
2803 case SDLK_p:
2804 case SDLK_SPACE:
2805 toggle_pause();
2806 break;
2807 case SDLK_s: //S: Step to next frame
2808 step_to_next_frame();
2809 break;
2810 case SDLK_a:
2811 if (cur_stream)
2812 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2813 break;
2814 case SDLK_v:
2815 if (cur_stream)
2816 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2817 break;
2818 case SDLK_t:
2819 if (cur_stream)
2820 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2821 break;
2822 case SDLK_w:
2823 toggle_audio_display();
2824 break;
2825 case SDLK_LEFT:
2826 incr = -10.0;
2827 goto do_seek;
2828 case SDLK_RIGHT:
2829 incr = 10.0;
2830 goto do_seek;
2831 case SDLK_UP:
2832 incr = 60.0;
2833 goto do_seek;
2834 case SDLK_DOWN:
2835 incr = -60.0;
2836 do_seek:
2837 if (cur_stream) {
2838 if (seek_by_bytes) {
2839 if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2840 pos= cur_stream->video_current_pos;
2841 }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2842 pos= cur_stream->audio_pkt.pos;
2843 }else
2844 pos = url_ftell(cur_stream->ic->pb);
2845 if (cur_stream->ic->bit_rate)
2846 incr *= cur_stream->ic->bit_rate / 8.0;
2847 else
2848 incr *= 180000.0;
2849 pos += incr;
2850 stream_seek(cur_stream, pos, incr, 1);
2851 } else {
2852 pos = get_master_clock(cur_stream);
2853 pos += incr;
2854 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2855 }
2856 }
2857 break;
2858 default:
2859 break;
2860 }
2861 break;
2862 case SDL_MOUSEBUTTONDOWN:
2863 case SDL_MOUSEMOTION:
2864 if(event.type ==SDL_MOUSEBUTTONDOWN){
2865 x= event.button.x;
2866 }else{
2867 if(event.motion.state != SDL_PRESSED)
2868 break;
2869 x= event.motion.x;
2870 }
2871 if (cur_stream) {
2872 if(seek_by_bytes || cur_stream->ic->duration<=0){
2873 uint64_t size= url_fsize(cur_stream->ic->pb);
2874 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2875 }else{
2876 int64_t ts;
2877 int ns, hh, mm, ss;
2878 int tns, thh, tmm, tss;
2879 tns = cur_stream->ic->duration/1000000LL;
2880 thh = tns/3600;
2881 tmm = (tns%3600)/60;
2882 tss = (tns%60);
2883 frac = x/cur_stream->width;
2884 ns = frac*tns;
2885 hh = ns/3600;
2886 mm = (ns%3600)/60;
2887 ss = (ns%60);
2888 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2889 hh, mm, ss, thh, tmm, tss);
2890 ts = frac*cur_stream->ic->duration;
2891 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2892 ts += cur_stream->ic->start_time;
2893 stream_seek(cur_stream, ts, 0, 0);
2894 }
2895 }
2896 break;
2897 case SDL_VIDEORESIZE:
2898 if (cur_stream) {
2899 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2900 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2901 screen_width = cur_stream->width = event.resize.w;
2902 screen_height= cur_stream->height= event.resize.h;
2903 }
2904 break;
2905 case SDL_QUIT:
2906 case FF_QUIT_EVENT:
2907 do_exit();
2908 break;
2909 case FF_ALLOC_EVENT:
2910 video_open(event.user.data1);
2911 alloc_picture(event.user.data1);
2912 break;
2913 case FF_REFRESH_EVENT:
2914 video_refresh_timer(event.user.data1);
2915 cur_stream->refresh=0;
2916 break;
2917 default:
2918 break;
2919 }
2920 }
2921 }
2922
2923 static void opt_frame_size(const char *arg)
2924 {
2925 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2926 fprintf(stderr, "Incorrect frame size\n");
2927 exit(1);
2928 }
2929 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2930 fprintf(stderr, "Frame size must be a multiple of 2\n");
2931 exit(1);
2932 }
2933 }
2934
2935 static int opt_width(const char *opt, const char *arg)
2936 {
2937 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2938 return 0;
2939 }
2940
2941 static int opt_height(const char *opt, const char *arg)
2942 {
2943 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2944 return 0;
2945 }
2946
2947 static void opt_format(const char *arg)
2948 {
2949 file_iformat = av_find_input_format(arg);
2950 if (!file_iformat) {
2951 fprintf(stderr, "Unknown input format: %s\n", arg);
2952 exit(1);
2953 }
2954 }
2955
2956 static void opt_frame_pix_fmt(const char *arg)
2957 {
2958 frame_pix_fmt = av_get_pix_fmt(arg);
2959 }
2960
2961 static int opt_sync(const char *opt, const char *arg)
2962 {
2963 if (!strcmp(arg, "audio"))
2964 av_sync_type = AV_SYNC_AUDIO_MASTER;
2965 else if (!strcmp(arg, "video"))
2966 av_sync_type = AV_SYNC_VIDEO_MASTER;
2967 else if (!strcmp(arg, "ext"))
2968 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2969 else {
2970 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2971 exit(1);
2972 }
2973 return 0;
2974 }
2975
2976 static int opt_seek(const char *opt, const char *arg)
2977 {
2978 start_time = parse_time_or_die(opt, arg, 1);
2979 return 0;
2980 }
2981
2982 static int opt_duration(const char *opt, const char *arg)
2983 {
2984 duration = parse_time_or_die(opt, arg, 1);
2985 return 0;
2986 }
2987
2988 static int opt_debug(const char *opt, const char *arg)
2989 {
2990 av_log_set_level(99);
2991 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2992 return 0;
2993 }
2994
2995 static int opt_vismv(const char *opt, const char *arg)
2996 {
2997 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2998 return 0;
2999 }
3000
3001 static int opt_thread_count(const char *opt, const char *arg)
3002 {
3003 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3004 #if !HAVE_THREADS
3005 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
3006 #endif
3007 return 0;
3008 }
3009
3010 static const OptionDef options[] = {
3011 #include "cmdutils_common_opts.h"
3012 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
3013 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
3014 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
3015 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3016 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3017 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3018 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3019 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3020 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3021 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3022 { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play \"duration\" seconds of audio/video", "duration" },
3023 { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3024 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3025 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3026 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3027 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3028 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3029 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3030 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3031 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3032 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3033 { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3034 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3035 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3036 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3037 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3038 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
3039 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)", "threshold" },
3040 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
3041 { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3042 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3043 { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3044 { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3045 { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3046 { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3047 #if CONFIG_AVFILTER
3048 { "vfilters", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3049 #endif
3050 { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3051 { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3052 { NULL, },
3053 };
3054
3055 static void show_usage(void)
3056 {
3057 printf("Simple media player\n");
3058 printf("usage: ffplay [options] input_file\n");
3059 printf("\n");
3060 }
3061
3062 static void show_help(void)
3063 {
3064 show_usage();
3065 show_help_options(options, "Main options:\n",
3066 OPT_EXPERT, 0);
3067 show_help_options(options, "\nAdvanced options:\n",
3068 OPT_EXPERT, OPT_EXPERT);
3069 printf("\nWhile playing:\n"
3070 "q, ESC quit\n"
3071 "f toggle full screen\n"
3072 "p, SPC pause\n"
3073 "a cycle audio channel\n"
3074 "v cycle video channel\n"
3075 "t cycle subtitle channel\n"
3076 "w show audio waves\n"
3077 "s activate frame-step mode\n"
3078 "left/right seek backward/forward 10 seconds\n"
3079 "down/up seek backward/forward 1 minute\n"
3080 "mouse click seek to percentage in file corresponding to fraction of width\n"
3081 );
3082 }
3083
3084 static void opt_input_file(const char *filename)
3085 {
3086 if (input_filename) {
3087 fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3088 filename, input_filename);
3089 exit(1);
3090 }
3091 if (!strcmp(filename, "-"))
3092 filename = "pipe:";
3093 input_filename = filename;
3094 }
3095
3096 /* Called from the main */
3097 int main(int argc, char **argv)
3098 {
3099 int flags, i;
3100
3101 /* register all codecs, demux and protocols */
3102 avcodec_register_all();
3103 #if CONFIG_AVDEVICE
3104 avdevice_register_all();
3105 #endif
3106 #if CONFIG_AVFILTER
3107 avfilter_register_all();
3108 #endif
3109 av_register_all();
3110
3111 for(i=0; i<AVMEDIA_TYPE_NB; i++){
3112 avcodec_opts[i]= avcodec_alloc_context2(i);
3113 }
3114 avformat_opts = avformat_alloc_context();
3115 #if !CONFIG_AVFILTER
3116 sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
3117 #endif
3118
3119 show_banner();
3120
3121 parse_options(argc, argv, options, opt_input_file);
3122
3123 if (!input_filename) {
3124 show_usage();
3125 fprintf(stderr, "An input file must be specified\n");
3126 fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3127 exit(1);
3128 }
3129
3130 if (display_disable) {
3131 video_disable = 1;
3132 }
3133 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3134 #if !defined(__MINGW32__) && !defined(__APPLE__)
3135 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3136 #endif
3137 if (SDL_Init (flags)) {
3138 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3139 exit(1);
3140 }
3141
3142 if (!display_disable) {
3143 #if HAVE_SDL_VIDEO_SIZE
3144 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3145 fs_screen_width = vi->current_w;
3146 fs_screen_height = vi->current_h;
3147 #endif
3148 }
3149
3150 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3151 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3152 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3153
3154 av_init_packet(&flush_pkt);
3155 flush_pkt.data= "FLUSH";
3156
3157 cur_stream = stream_open(input_filename, file_iformat);
3158
3159 event_loop();
3160
3161 /* never returns */
3162
3163 return 0;
3164 }