vqa: set time base to frame rate as coded in the header.
[libav.git] / avplay.c
1 /*
2 * avplay : Simple Media Player based on the Libav libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This file is part of Libav.
6 *
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavformat/avformat.h"
35 #include "libavdevice/avdevice.h"
36 #include "libswscale/swscale.h"
37 #include "libavcodec/audioconvert.h"
38 #include "libavutil/opt.h"
39 #include "libavcodec/avfft.h"
40
41 #if CONFIG_AVFILTER
42 # include "libavfilter/avfilter.h"
43 # include "libavfilter/avfiltergraph.h"
44 #endif
45
46 #include "cmdutils.h"
47
48 #include <SDL.h>
49 #include <SDL_thread.h>
50
51 #ifdef __MINGW32__
52 #undef main /* We don't want SDL to override our main() */
53 #endif
54
55 #include <unistd.h>
56 #include <assert.h>
57
58 const char program_name[] = "avplay";
59 const int program_birth_year = 2003;
60
61 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
62 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
63 #define MIN_FRAMES 5
64
65 /* SDL audio buffer size, in samples. Should be small to have precise
66 A/V sync as SDL does not have hardware buffer fullness info. */
67 #define SDL_AUDIO_BUFFER_SIZE 1024
68
69 /* no AV sync correction is done if below the AV sync threshold */
70 #define AV_SYNC_THRESHOLD 0.01
71 /* no AV correction is done if too big error */
72 #define AV_NOSYNC_THRESHOLD 10.0
73
74 #define FRAME_SKIP_FACTOR 0.05
75
76 /* maximum audio speed change to get correct sync */
77 #define SAMPLE_CORRECTION_PERCENT_MAX 10
78
79 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
80 #define AUDIO_DIFF_AVG_NB 20
81
82 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
83 #define SAMPLE_ARRAY_SIZE (2 * 65536)
84
85 static int sws_flags = SWS_BICUBIC;
86
87 typedef struct PacketQueue {
88 AVPacketList *first_pkt, *last_pkt;
89 int nb_packets;
90 int size;
91 int abort_request;
92 SDL_mutex *mutex;
93 SDL_cond *cond;
94 } PacketQueue;
95
96 #define VIDEO_PICTURE_QUEUE_SIZE 2
97 #define SUBPICTURE_QUEUE_SIZE 4
98
99 typedef struct VideoPicture {
100 double pts; ///< presentation time stamp for this picture
101 double target_clock; ///< av_gettime() time at which this should be displayed ideally
102 int64_t pos; ///< byte position in file
103 SDL_Overlay *bmp;
104 int width, height; /* source height & width */
105 int allocated;
106 int reallocate;
107 enum PixelFormat pix_fmt;
108
109 #if CONFIG_AVFILTER
110 AVFilterBufferRef *picref;
111 #endif
112 } VideoPicture;
113
114 typedef struct SubPicture {
115 double pts; /* presentation time stamp for this picture */
116 AVSubtitle sub;
117 } SubPicture;
118
119 enum {
120 AV_SYNC_AUDIO_MASTER, /* default choice */
121 AV_SYNC_VIDEO_MASTER,
122 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
123 };
124
125 typedef struct VideoState {
126 SDL_Thread *parse_tid;
127 SDL_Thread *video_tid;
128 SDL_Thread *refresh_tid;
129 AVInputFormat *iformat;
130 int no_background;
131 int abort_request;
132 int paused;
133 int last_paused;
134 int seek_req;
135 int seek_flags;
136 int64_t seek_pos;
137 int64_t seek_rel;
138 int read_pause_return;
139 AVFormatContext *ic;
140 int dtg_active_format;
141
142 int audio_stream;
143
144 int av_sync_type;
145 double external_clock; /* external clock base */
146 int64_t external_clock_time;
147
148 double audio_clock;
149 double audio_diff_cum; /* used for AV difference average computation */
150 double audio_diff_avg_coef;
151 double audio_diff_threshold;
152 int audio_diff_avg_count;
153 AVStream *audio_st;
154 PacketQueue audioq;
155 int audio_hw_buf_size;
156 uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
157 uint8_t *audio_buf;
158 uint8_t *audio_buf1;
159 unsigned int audio_buf_size; /* in bytes */
160 int audio_buf_index; /* in bytes */
161 AVPacket audio_pkt_temp;
162 AVPacket audio_pkt;
163 enum AVSampleFormat audio_src_fmt;
164 AVAudioConvert *reformat_ctx;
165 AVFrame *frame;
166
167 int show_audio; /* if true, display audio samples */
168 int16_t sample_array[SAMPLE_ARRAY_SIZE];
169 int sample_array_index;
170 int last_i_start;
171 RDFTContext *rdft;
172 int rdft_bits;
173 FFTSample *rdft_data;
174 int xpos;
175
176 SDL_Thread *subtitle_tid;
177 int subtitle_stream;
178 int subtitle_stream_changed;
179 AVStream *subtitle_st;
180 PacketQueue subtitleq;
181 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
182 int subpq_size, subpq_rindex, subpq_windex;
183 SDL_mutex *subpq_mutex;
184 SDL_cond *subpq_cond;
185
186 double frame_timer;
187 double frame_last_pts;
188 double frame_last_delay;
189 double video_clock; ///< pts of last decoded frame / predicted pts of next decoded frame
190 int video_stream;
191 AVStream *video_st;
192 PacketQueue videoq;
193 double video_current_pts; ///< current displayed pts (different from video_clock if frame fifos are used)
194 double video_current_pts_drift; ///< video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
195 int64_t video_current_pos; ///< current displayed file pos
196 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
197 int pictq_size, pictq_rindex, pictq_windex;
198 SDL_mutex *pictq_mutex;
199 SDL_cond *pictq_cond;
200 #if !CONFIG_AVFILTER
201 struct SwsContext *img_convert_ctx;
202 #endif
203
204 // QETimer *video_timer;
205 char filename[1024];
206 int width, height, xleft, ytop;
207
208 PtsCorrectionContext pts_ctx;
209
210 #if CONFIG_AVFILTER
211 AVFilterContext *out_video_filter; ///< the last filter in the video chain
212 #endif
213
214 float skip_frames;
215 float skip_frames_index;
216 int refresh;
217 } VideoState;
218
219 static void show_help(void);
220
221 /* options specified by the user */
222 static AVInputFormat *file_iformat;
223 static const char *input_filename;
224 static const char *window_title;
225 static int fs_screen_width;
226 static int fs_screen_height;
227 static int screen_width = 0;
228 static int screen_height = 0;
229 static int audio_disable;
230 static int video_disable;
231 static int wanted_stream[AVMEDIA_TYPE_NB] = {
232 [AVMEDIA_TYPE_AUDIO] = -1,
233 [AVMEDIA_TYPE_VIDEO] = -1,
234 [AVMEDIA_TYPE_SUBTITLE] = -1,
235 };
236 static int seek_by_bytes = -1;
237 static int display_disable;
238 static int show_status = 1;
239 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
240 static int64_t start_time = AV_NOPTS_VALUE;
241 static int64_t duration = AV_NOPTS_VALUE;
242 static int debug = 0;
243 static int debug_mv = 0;
244 static int step = 0;
245 static int thread_count = 1;
246 static int workaround_bugs = 1;
247 static int fast = 0;
248 static int genpts = 0;
249 static int lowres = 0;
250 static int idct = FF_IDCT_AUTO;
251 static enum AVDiscard skip_frame = AVDISCARD_DEFAULT;
252 static enum AVDiscard skip_idct = AVDISCARD_DEFAULT;
253 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
254 static int error_concealment = 3;
255 static int decoder_reorder_pts = -1;
256 static int autoexit;
257 static int exit_on_keydown;
258 static int exit_on_mousedown;
259 static int loop = 1;
260 static int framedrop = 1;
261
262 static int rdftspeed = 20;
263 #if CONFIG_AVFILTER
264 static char *vfilters = NULL;
265 #endif
266
267 /* current context */
268 static int is_full_screen;
269 static VideoState *cur_stream;
270 static int64_t audio_callback_time;
271
272 static AVPacket flush_pkt;
273
274 #define FF_ALLOC_EVENT (SDL_USEREVENT)
275 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
276 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
277
278 static SDL_Surface *screen;
279
280 void exit_program(int ret)
281 {
282 exit(ret);
283 }
284
285 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
286
287 /* packet queue handling */
288 static void packet_queue_init(PacketQueue *q)
289 {
290 memset(q, 0, sizeof(PacketQueue));
291 q->mutex = SDL_CreateMutex();
292 q->cond = SDL_CreateCond();
293 packet_queue_put(q, &flush_pkt);
294 }
295
296 static void packet_queue_flush(PacketQueue *q)
297 {
298 AVPacketList *pkt, *pkt1;
299
300 SDL_LockMutex(q->mutex);
301 for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
302 pkt1 = pkt->next;
303 av_free_packet(&pkt->pkt);
304 av_freep(&pkt);
305 }
306 q->last_pkt = NULL;
307 q->first_pkt = NULL;
308 q->nb_packets = 0;
309 q->size = 0;
310 SDL_UnlockMutex(q->mutex);
311 }
312
313 static void packet_queue_end(PacketQueue *q)
314 {
315 packet_queue_flush(q);
316 SDL_DestroyMutex(q->mutex);
317 SDL_DestroyCond(q->cond);
318 }
319
320 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
321 {
322 AVPacketList *pkt1;
323
324 /* duplicate the packet */
325 if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
326 return -1;
327
328 pkt1 = av_malloc(sizeof(AVPacketList));
329 if (!pkt1)
330 return -1;
331 pkt1->pkt = *pkt;
332 pkt1->next = NULL;
333
334
335 SDL_LockMutex(q->mutex);
336
337 if (!q->last_pkt)
338
339 q->first_pkt = pkt1;
340 else
341 q->last_pkt->next = pkt1;
342 q->last_pkt = pkt1;
343 q->nb_packets++;
344 q->size += pkt1->pkt.size + sizeof(*pkt1);
345 /* XXX: should duplicate packet data in DV case */
346 SDL_CondSignal(q->cond);
347
348 SDL_UnlockMutex(q->mutex);
349 return 0;
350 }
351
352 static void packet_queue_abort(PacketQueue *q)
353 {
354 SDL_LockMutex(q->mutex);
355
356 q->abort_request = 1;
357
358 SDL_CondSignal(q->cond);
359
360 SDL_UnlockMutex(q->mutex);
361 }
362
363 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
364 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
365 {
366 AVPacketList *pkt1;
367 int ret;
368
369 SDL_LockMutex(q->mutex);
370
371 for (;;) {
372 if (q->abort_request) {
373 ret = -1;
374 break;
375 }
376
377 pkt1 = q->first_pkt;
378 if (pkt1) {
379 q->first_pkt = pkt1->next;
380 if (!q->first_pkt)
381 q->last_pkt = NULL;
382 q->nb_packets--;
383 q->size -= pkt1->pkt.size + sizeof(*pkt1);
384 *pkt = pkt1->pkt;
385 av_free(pkt1);
386 ret = 1;
387 break;
388 } else if (!block) {
389 ret = 0;
390 break;
391 } else {
392 SDL_CondWait(q->cond, q->mutex);
393 }
394 }
395 SDL_UnlockMutex(q->mutex);
396 return ret;
397 }
398
399 static inline void fill_rectangle(SDL_Surface *screen,
400 int x, int y, int w, int h, int color)
401 {
402 SDL_Rect rect;
403 rect.x = x;
404 rect.y = y;
405 rect.w = w;
406 rect.h = h;
407 SDL_FillRect(screen, &rect, color);
408 }
409
410 #define ALPHA_BLEND(a, oldp, newp, s)\
411 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
412
413 #define RGBA_IN(r, g, b, a, s)\
414 {\
415 unsigned int v = ((const uint32_t *)(s))[0];\
416 a = (v >> 24) & 0xff;\
417 r = (v >> 16) & 0xff;\
418 g = (v >> 8) & 0xff;\
419 b = v & 0xff;\
420 }
421
422 #define YUVA_IN(y, u, v, a, s, pal)\
423 {\
424 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
425 a = (val >> 24) & 0xff;\
426 y = (val >> 16) & 0xff;\
427 u = (val >> 8) & 0xff;\
428 v = val & 0xff;\
429 }
430
431 #define YUVA_OUT(d, y, u, v, a)\
432 {\
433 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
434 }
435
436
437 #define BPP 1
438
439 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
440 {
441 int wrap, wrap3, width2, skip2;
442 int y, u, v, a, u1, v1, a1, w, h;
443 uint8_t *lum, *cb, *cr;
444 const uint8_t *p;
445 const uint32_t *pal;
446 int dstx, dsty, dstw, dsth;
447
448 dstw = av_clip(rect->w, 0, imgw);
449 dsth = av_clip(rect->h, 0, imgh);
450 dstx = av_clip(rect->x, 0, imgw - dstw);
451 dsty = av_clip(rect->y, 0, imgh - dsth);
452 lum = dst->data[0] + dsty * dst->linesize[0];
453 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
454 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
455
456 width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
457 skip2 = dstx >> 1;
458 wrap = dst->linesize[0];
459 wrap3 = rect->pict.linesize[0];
460 p = rect->pict.data[0];
461 pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
462
463 if (dsty & 1) {
464 lum += dstx;
465 cb += skip2;
466 cr += skip2;
467
468 if (dstx & 1) {
469 YUVA_IN(y, u, v, a, p, pal);
470 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
471 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
472 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
473 cb++;
474 cr++;
475 lum++;
476 p += BPP;
477 }
478 for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
479 YUVA_IN(y, u, v, a, p, pal);
480 u1 = u;
481 v1 = v;
482 a1 = a;
483 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
484
485 YUVA_IN(y, u, v, a, p + BPP, pal);
486 u1 += u;
487 v1 += v;
488 a1 += a;
489 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
490 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
491 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
492 cb++;
493 cr++;
494 p += 2 * BPP;
495 lum += 2;
496 }
497 if (w) {
498 YUVA_IN(y, u, v, a, p, pal);
499 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
500 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
501 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
502 p++;
503 lum++;
504 }
505 p += wrap3 - dstw * BPP;
506 lum += wrap - dstw - dstx;
507 cb += dst->linesize[1] - width2 - skip2;
508 cr += dst->linesize[2] - width2 - skip2;
509 }
510 for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
511 lum += dstx;
512 cb += skip2;
513 cr += skip2;
514
515 if (dstx & 1) {
516 YUVA_IN(y, u, v, a, p, pal);
517 u1 = u;
518 v1 = v;
519 a1 = a;
520 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
521 p += wrap3;
522 lum += wrap;
523 YUVA_IN(y, u, v, a, p, pal);
524 u1 += u;
525 v1 += v;
526 a1 += a;
527 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
528 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
529 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
530 cb++;
531 cr++;
532 p += -wrap3 + BPP;
533 lum += -wrap + 1;
534 }
535 for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
536 YUVA_IN(y, u, v, a, p, pal);
537 u1 = u;
538 v1 = v;
539 a1 = a;
540 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
541
542 YUVA_IN(y, u, v, a, p + BPP, pal);
543 u1 += u;
544 v1 += v;
545 a1 += a;
546 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
547 p += wrap3;
548 lum += wrap;
549
550 YUVA_IN(y, u, v, a, p, pal);
551 u1 += u;
552 v1 += v;
553 a1 += a;
554 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
555
556 YUVA_IN(y, u, v, a, p + BPP, pal);
557 u1 += u;
558 v1 += v;
559 a1 += a;
560 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
561
562 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
563 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
564
565 cb++;
566 cr++;
567 p += -wrap3 + 2 * BPP;
568 lum += -wrap + 2;
569 }
570 if (w) {
571 YUVA_IN(y, u, v, a, p, pal);
572 u1 = u;
573 v1 = v;
574 a1 = a;
575 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
576 p += wrap3;
577 lum += wrap;
578 YUVA_IN(y, u, v, a, p, pal);
579 u1 += u;
580 v1 += v;
581 a1 += a;
582 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
583 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
584 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
585 cb++;
586 cr++;
587 p += -wrap3 + BPP;
588 lum += -wrap + 1;
589 }
590 p += wrap3 + (wrap3 - dstw * BPP);
591 lum += wrap + (wrap - dstw - dstx);
592 cb += dst->linesize[1] - width2 - skip2;
593 cr += dst->linesize[2] - width2 - skip2;
594 }
595 /* handle odd height */
596 if (h) {
597 lum += dstx;
598 cb += skip2;
599 cr += skip2;
600
601 if (dstx & 1) {
602 YUVA_IN(y, u, v, a, p, pal);
603 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
604 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
605 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
606 cb++;
607 cr++;
608 lum++;
609 p += BPP;
610 }
611 for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
612 YUVA_IN(y, u, v, a, p, pal);
613 u1 = u;
614 v1 = v;
615 a1 = a;
616 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
617
618 YUVA_IN(y, u, v, a, p + BPP, pal);
619 u1 += u;
620 v1 += v;
621 a1 += a;
622 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
623 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
624 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
625 cb++;
626 cr++;
627 p += 2 * BPP;
628 lum += 2;
629 }
630 if (w) {
631 YUVA_IN(y, u, v, a, p, pal);
632 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
633 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
634 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
635 }
636 }
637 }
638
639 static void free_subpicture(SubPicture *sp)
640 {
641 avsubtitle_free(&sp->sub);
642 }
643
644 static void video_image_display(VideoState *is)
645 {
646 VideoPicture *vp;
647 SubPicture *sp;
648 AVPicture pict;
649 float aspect_ratio;
650 int width, height, x, y;
651 SDL_Rect rect;
652 int i;
653
654 vp = &is->pictq[is->pictq_rindex];
655 if (vp->bmp) {
656 #if CONFIG_AVFILTER
657 if (vp->picref->video->pixel_aspect.num == 0)
658 aspect_ratio = 0;
659 else
660 aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
661 #else
662
663 /* XXX: use variable in the frame */
664 if (is->video_st->sample_aspect_ratio.num)
665 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
666 else if (is->video_st->codec->sample_aspect_ratio.num)
667 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
668 else
669 aspect_ratio = 0;
670 #endif
671 if (aspect_ratio <= 0.0)
672 aspect_ratio = 1.0;
673 aspect_ratio *= (float)vp->width / (float)vp->height;
674
675 if (is->subtitle_st)
676 {
677 if (is->subpq_size > 0)
678 {
679 sp = &is->subpq[is->subpq_rindex];
680
681 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
682 {
683 SDL_LockYUVOverlay (vp->bmp);
684
685 pict.data[0] = vp->bmp->pixels[0];
686 pict.data[1] = vp->bmp->pixels[2];
687 pict.data[2] = vp->bmp->pixels[1];
688
689 pict.linesize[0] = vp->bmp->pitches[0];
690 pict.linesize[1] = vp->bmp->pitches[2];
691 pict.linesize[2] = vp->bmp->pitches[1];
692
693 for (i = 0; i < sp->sub.num_rects; i++)
694 blend_subrect(&pict, sp->sub.rects[i],
695 vp->bmp->w, vp->bmp->h);
696
697 SDL_UnlockYUVOverlay (vp->bmp);
698 }
699 }
700 }
701
702
703 /* XXX: we suppose the screen has a 1.0 pixel ratio */
704 height = is->height;
705 width = ((int)rint(height * aspect_ratio)) & ~1;
706 if (width > is->width) {
707 width = is->width;
708 height = ((int)rint(width / aspect_ratio)) & ~1;
709 }
710 x = (is->width - width) / 2;
711 y = (is->height - height) / 2;
712 is->no_background = 0;
713 rect.x = is->xleft + x;
714 rect.y = is->ytop + y;
715 rect.w = width;
716 rect.h = height;
717 SDL_DisplayYUVOverlay(vp->bmp, &rect);
718 }
719 }
720
721 /* get the current audio output buffer size, in samples. With SDL, we
722 cannot have a precise information */
723 static int audio_write_get_buf_size(VideoState *is)
724 {
725 return is->audio_buf_size - is->audio_buf_index;
726 }
727
728 static inline int compute_mod(int a, int b)
729 {
730 a = a % b;
731 if (a >= 0)
732 return a;
733 else
734 return a + b;
735 }
736
737 static void video_audio_display(VideoState *s)
738 {
739 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
740 int ch, channels, h, h2, bgcolor, fgcolor;
741 int16_t time_diff;
742 int rdft_bits, nb_freq;
743
744 for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
745 ;
746 nb_freq = 1 << (rdft_bits - 1);
747
748 /* compute display index : center on currently output samples */
749 channels = s->audio_st->codec->channels;
750 nb_display_channels = channels;
751 if (!s->paused) {
752 int data_used = s->show_audio == 1 ? s->width : (2 * nb_freq);
753 n = 2 * channels;
754 delay = audio_write_get_buf_size(s);
755 delay /= n;
756
757 /* to be more precise, we take into account the time spent since
758 the last buffer computation */
759 if (audio_callback_time) {
760 time_diff = av_gettime() - audio_callback_time;
761 delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
762 }
763
764 delay += 2 * data_used;
765 if (delay < data_used)
766 delay = data_used;
767
768 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
769 if (s->show_audio == 1) {
770 h = INT_MIN;
771 for (i = 0; i < 1000; i += channels) {
772 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
773 int a = s->sample_array[idx];
774 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
775 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
776 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
777 int score = a - d;
778 if (h < score && (b ^ c) < 0) {
779 h = score;
780 i_start = idx;
781 }
782 }
783 }
784
785 s->last_i_start = i_start;
786 } else {
787 i_start = s->last_i_start;
788 }
789
790 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
791 if (s->show_audio == 1) {
792 fill_rectangle(screen,
793 s->xleft, s->ytop, s->width, s->height,
794 bgcolor);
795
796 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
797
798 /* total height for one channel */
799 h = s->height / nb_display_channels;
800 /* graph height / 2 */
801 h2 = (h * 9) / 20;
802 for (ch = 0; ch < nb_display_channels; ch++) {
803 i = i_start + ch;
804 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
805 for (x = 0; x < s->width; x++) {
806 y = (s->sample_array[i] * h2) >> 15;
807 if (y < 0) {
808 y = -y;
809 ys = y1 - y;
810 } else {
811 ys = y1;
812 }
813 fill_rectangle(screen,
814 s->xleft + x, ys, 1, y,
815 fgcolor);
816 i += channels;
817 if (i >= SAMPLE_ARRAY_SIZE)
818 i -= SAMPLE_ARRAY_SIZE;
819 }
820 }
821
822 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
823
824 for (ch = 1; ch < nb_display_channels; ch++) {
825 y = s->ytop + ch * h;
826 fill_rectangle(screen,
827 s->xleft, y, s->width, 1,
828 fgcolor);
829 }
830 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
831 } else {
832 nb_display_channels= FFMIN(nb_display_channels, 2);
833 if (rdft_bits != s->rdft_bits) {
834 av_rdft_end(s->rdft);
835 av_free(s->rdft_data);
836 s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
837 s->rdft_bits = rdft_bits;
838 s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
839 }
840 {
841 FFTSample *data[2];
842 for (ch = 0; ch < nb_display_channels; ch++) {
843 data[ch] = s->rdft_data + 2 * nb_freq * ch;
844 i = i_start + ch;
845 for (x = 0; x < 2 * nb_freq; x++) {
846 double w = (x-nb_freq) * (1.0 / nb_freq);
847 data[ch][x] = s->sample_array[i] * (1.0 - w * w);
848 i += channels;
849 if (i >= SAMPLE_ARRAY_SIZE)
850 i -= SAMPLE_ARRAY_SIZE;
851 }
852 av_rdft_calc(s->rdft, data[ch]);
853 }
854 // least efficient way to do this, we should of course directly access it but its more than fast enough
855 for (y = 0; y < s->height; y++) {
856 double w = 1 / sqrt(nb_freq);
857 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
858 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
859 + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
860 a = FFMIN(a, 255);
861 b = FFMIN(b, 255);
862 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
863
864 fill_rectangle(screen,
865 s->xpos, s->height-y, 1, 1,
866 fgcolor);
867 }
868 }
869 SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
870 s->xpos++;
871 if (s->xpos >= s->width)
872 s->xpos= s->xleft;
873 }
874 }
875
876 static int video_open(VideoState *is)
877 {
878 int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
879 int w,h;
880
881 if (is_full_screen) flags |= SDL_FULLSCREEN;
882 else flags |= SDL_RESIZABLE;
883
884 if (is_full_screen && fs_screen_width) {
885 w = fs_screen_width;
886 h = fs_screen_height;
887 } else if (!is_full_screen && screen_width) {
888 w = screen_width;
889 h = screen_height;
890 #if CONFIG_AVFILTER
891 } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
892 w = is->out_video_filter->inputs[0]->w;
893 h = is->out_video_filter->inputs[0]->h;
894 #else
895 } else if (is->video_st && is->video_st->codec->width) {
896 w = is->video_st->codec->width;
897 h = is->video_st->codec->height;
898 #endif
899 } else {
900 w = 640;
901 h = 480;
902 }
903 if (screen && is->width == screen->w && screen->w == w
904 && is->height== screen->h && screen->h == h)
905 return 0;
906
907 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
908 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
909 screen = SDL_SetVideoMode(w, h, 24, flags);
910 #else
911 screen = SDL_SetVideoMode(w, h, 0, flags);
912 #endif
913 if (!screen) {
914 fprintf(stderr, "SDL: could not set video mode - exiting\n");
915 return -1;
916 }
917 if (!window_title)
918 window_title = input_filename;
919 SDL_WM_SetCaption(window_title, window_title);
920
921 is->width = screen->w;
922 is->height = screen->h;
923
924 return 0;
925 }
926
927 /* display the current picture, if any */
928 static void video_display(VideoState *is)
929 {
930 if (!screen)
931 video_open(cur_stream);
932 if (is->audio_st && is->show_audio)
933 video_audio_display(is);
934 else if (is->video_st)
935 video_image_display(is);
936 }
937
938 static int refresh_thread(void *opaque)
939 {
940 VideoState *is= opaque;
941 while (!is->abort_request) {
942 SDL_Event event;
943 event.type = FF_REFRESH_EVENT;
944 event.user.data1 = opaque;
945 if (!is->refresh) {
946 is->refresh = 1;
947 SDL_PushEvent(&event);
948 }
949 usleep(is->audio_st && is->show_audio ? rdftspeed * 1000 : 5000); // FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
950 }
951 return 0;
952 }
953
954 /* get the current audio clock value */
955 static double get_audio_clock(VideoState *is)
956 {
957 double pts;
958 int hw_buf_size, bytes_per_sec;
959 pts = is->audio_clock;
960 hw_buf_size = audio_write_get_buf_size(is);
961 bytes_per_sec = 0;
962 if (is->audio_st) {
963 bytes_per_sec = is->audio_st->codec->sample_rate *
964 2 * is->audio_st->codec->channels;
965 }
966 if (bytes_per_sec)
967 pts -= (double)hw_buf_size / bytes_per_sec;
968 return pts;
969 }
970
971 /* get the current video clock value */
972 static double get_video_clock(VideoState *is)
973 {
974 if (is->paused) {
975 return is->video_current_pts;
976 } else {
977 return is->video_current_pts_drift + av_gettime() / 1000000.0;
978 }
979 }
980
981 /* get the current external clock value */
982 static double get_external_clock(VideoState *is)
983 {
984 int64_t ti;
985 ti = av_gettime();
986 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
987 }
988
989 /* get the current master clock value */
990 static double get_master_clock(VideoState *is)
991 {
992 double val;
993
994 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
995 if (is->video_st)
996 val = get_video_clock(is);
997 else
998 val = get_audio_clock(is);
999 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1000 if (is->audio_st)
1001 val = get_audio_clock(is);
1002 else
1003 val = get_video_clock(is);
1004 } else {
1005 val = get_external_clock(is);
1006 }
1007 return val;
1008 }
1009
1010 /* seek in the stream */
1011 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1012 {
1013 if (!is->seek_req) {
1014 is->seek_pos = pos;
1015 is->seek_rel = rel;
1016 is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1017 if (seek_by_bytes)
1018 is->seek_flags |= AVSEEK_FLAG_BYTE;
1019 is->seek_req = 1;
1020 }
1021 }
1022
1023 /* pause or resume the video */
1024 static void stream_pause(VideoState *is)
1025 {
1026 if (is->paused) {
1027 is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1028 if (is->read_pause_return != AVERROR(ENOSYS)) {
1029 is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1030 }
1031 is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1032 }
1033 is->paused = !is->paused;
1034 }
1035
1036 static double compute_target_time(double frame_current_pts, VideoState *is)
1037 {
1038 double delay, sync_threshold, diff;
1039
1040 /* compute nominal delay */
1041 delay = frame_current_pts - is->frame_last_pts;
1042 if (delay <= 0 || delay >= 10.0) {
1043 /* if incorrect delay, use previous one */
1044 delay = is->frame_last_delay;
1045 } else {
1046 is->frame_last_delay = delay;
1047 }
1048 is->frame_last_pts = frame_current_pts;
1049
1050 /* update delay to follow master synchronisation source */
1051 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1052 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1053 /* if video is slave, we try to correct big delays by
1054 duplicating or deleting a frame */
1055 diff = get_video_clock(is) - get_master_clock(is);
1056
1057 /* skip or repeat frame. We take into account the
1058 delay to compute the threshold. I still don't know
1059 if it is the best guess */
1060 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1061 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1062 if (diff <= -sync_threshold)
1063 delay = 0;
1064 else if (diff >= sync_threshold)
1065 delay = 2 * delay;
1066 }
1067 }
1068 is->frame_timer += delay;
1069
1070 av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1071 delay, frame_current_pts, -diff);
1072
1073 return is->frame_timer;
1074 }
1075
1076 /* called to display each frame */
1077 static void video_refresh_timer(void *opaque)
1078 {
1079 VideoState *is = opaque;
1080 VideoPicture *vp;
1081
1082 SubPicture *sp, *sp2;
1083
1084 if (is->video_st) {
1085 retry:
1086 if (is->pictq_size == 0) {
1087 // nothing to do, no picture to display in the que
1088 } else {
1089 double time = av_gettime() / 1000000.0;
1090 double next_target;
1091 /* dequeue the picture */
1092 vp = &is->pictq[is->pictq_rindex];
1093
1094 if (time < vp->target_clock)
1095 return;
1096 /* update current video pts */
1097 is->video_current_pts = vp->pts;
1098 is->video_current_pts_drift = is->video_current_pts - time;
1099 is->video_current_pos = vp->pos;
1100 if (is->pictq_size > 1) {
1101 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1102 assert(nextvp->target_clock >= vp->target_clock);
1103 next_target= nextvp->target_clock;
1104 } else {
1105 next_target = vp->target_clock + is->video_clock - vp->pts; // FIXME pass durations cleanly
1106 }
1107 if (framedrop && time > next_target) {
1108 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1109 if (is->pictq_size > 1 || time > next_target + 0.5) {
1110 /* update queue size and signal for next picture */
1111 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1112 is->pictq_rindex = 0;
1113
1114 SDL_LockMutex(is->pictq_mutex);
1115 is->pictq_size--;
1116 SDL_CondSignal(is->pictq_cond);
1117 SDL_UnlockMutex(is->pictq_mutex);
1118 goto retry;
1119 }
1120 }
1121
1122 if (is->subtitle_st) {
1123 if (is->subtitle_stream_changed) {
1124 SDL_LockMutex(is->subpq_mutex);
1125
1126 while (is->subpq_size) {
1127 free_subpicture(&is->subpq[is->subpq_rindex]);
1128
1129 /* update queue size and signal for next picture */
1130 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1131 is->subpq_rindex = 0;
1132
1133 is->subpq_size--;
1134 }
1135 is->subtitle_stream_changed = 0;
1136
1137 SDL_CondSignal(is->subpq_cond);
1138 SDL_UnlockMutex(is->subpq_mutex);
1139 } else {
1140 if (is->subpq_size > 0) {
1141 sp = &is->subpq[is->subpq_rindex];
1142
1143 if (is->subpq_size > 1)
1144 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1145 else
1146 sp2 = NULL;
1147
1148 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1149 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1150 {
1151 free_subpicture(sp);
1152
1153 /* update queue size and signal for next picture */
1154 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1155 is->subpq_rindex = 0;
1156
1157 SDL_LockMutex(is->subpq_mutex);
1158 is->subpq_size--;
1159 SDL_CondSignal(is->subpq_cond);
1160 SDL_UnlockMutex(is->subpq_mutex);
1161 }
1162 }
1163 }
1164 }
1165
1166 /* display picture */
1167 if (!display_disable)
1168 video_display(is);
1169
1170 /* update queue size and signal for next picture */
1171 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1172 is->pictq_rindex = 0;
1173
1174 SDL_LockMutex(is->pictq_mutex);
1175 is->pictq_size--;
1176 SDL_CondSignal(is->pictq_cond);
1177 SDL_UnlockMutex(is->pictq_mutex);
1178 }
1179 } else if (is->audio_st) {
1180 /* draw the next audio frame */
1181
1182 /* if only audio stream, then display the audio bars (better
1183 than nothing, just to test the implementation */
1184
1185 /* display picture */
1186 if (!display_disable)
1187 video_display(is);
1188 }
1189 if (show_status) {
1190 static int64_t last_time;
1191 int64_t cur_time;
1192 int aqsize, vqsize, sqsize;
1193 double av_diff;
1194
1195 cur_time = av_gettime();
1196 if (!last_time || (cur_time - last_time) >= 30000) {
1197 aqsize = 0;
1198 vqsize = 0;
1199 sqsize = 0;
1200 if (is->audio_st)
1201 aqsize = is->audioq.size;
1202 if (is->video_st)
1203 vqsize = is->videoq.size;
1204 if (is->subtitle_st)
1205 sqsize = is->subtitleq.size;
1206 av_diff = 0;
1207 if (is->audio_st && is->video_st)
1208 av_diff = get_audio_clock(is) - get_video_clock(is);
1209 printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1210 get_master_clock(is), av_diff, FFMAX(is->skip_frames - 1, 0), aqsize / 1024,
1211 vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1212 fflush(stdout);
1213 last_time = cur_time;
1214 }
1215 }
1216 }
1217
1218 static void stream_close(VideoState *is)
1219 {
1220 VideoPicture *vp;
1221 int i;
1222 /* XXX: use a special url_shutdown call to abort parse cleanly */
1223 is->abort_request = 1;
1224 SDL_WaitThread(is->parse_tid, NULL);
1225 SDL_WaitThread(is->refresh_tid, NULL);
1226
1227 /* free all pictures */
1228 for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1229 vp = &is->pictq[i];
1230 #if CONFIG_AVFILTER
1231 if (vp->picref) {
1232 avfilter_unref_buffer(vp->picref);
1233 vp->picref = NULL;
1234 }
1235 #endif
1236 if (vp->bmp) {
1237 SDL_FreeYUVOverlay(vp->bmp);
1238 vp->bmp = NULL;
1239 }
1240 }
1241 SDL_DestroyMutex(is->pictq_mutex);
1242 SDL_DestroyCond(is->pictq_cond);
1243 SDL_DestroyMutex(is->subpq_mutex);
1244 SDL_DestroyCond(is->subpq_cond);
1245 #if !CONFIG_AVFILTER
1246 if (is->img_convert_ctx)
1247 sws_freeContext(is->img_convert_ctx);
1248 #endif
1249 av_free(is);
1250 }
1251
1252 static void do_exit(void)
1253 {
1254 if (cur_stream) {
1255 stream_close(cur_stream);
1256 cur_stream = NULL;
1257 }
1258 uninit_opts();
1259 #if CONFIG_AVFILTER
1260 avfilter_uninit();
1261 #endif
1262 avformat_network_deinit();
1263 if (show_status)
1264 printf("\n");
1265 SDL_Quit();
1266 av_log(NULL, AV_LOG_QUIET, "");
1267 exit(0);
1268 }
1269
1270 /* allocate a picture (needs to do that in main thread to avoid
1271 potential locking problems */
1272 static void alloc_picture(void *opaque)
1273 {
1274 VideoState *is = opaque;
1275 VideoPicture *vp;
1276
1277 vp = &is->pictq[is->pictq_windex];
1278
1279 if (vp->bmp)
1280 SDL_FreeYUVOverlay(vp->bmp);
1281
1282 #if CONFIG_AVFILTER
1283 if (vp->picref)
1284 avfilter_unref_buffer(vp->picref);
1285 vp->picref = NULL;
1286
1287 vp->width = is->out_video_filter->inputs[0]->w;
1288 vp->height = is->out_video_filter->inputs[0]->h;
1289 vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1290 #else
1291 vp->width = is->video_st->codec->width;
1292 vp->height = is->video_st->codec->height;
1293 vp->pix_fmt = is->video_st->codec->pix_fmt;
1294 #endif
1295
1296 vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1297 SDL_YV12_OVERLAY,
1298 screen);
1299 if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1300 /* SDL allocates a buffer smaller than requested if the video
1301 * overlay hardware is unable to support the requested size. */
1302 fprintf(stderr, "Error: the video system does not support an image\n"
1303 "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1304 "to reduce the image size.\n", vp->width, vp->height );
1305 do_exit();
1306 }
1307
1308 SDL_LockMutex(is->pictq_mutex);
1309 vp->allocated = 1;
1310 SDL_CondSignal(is->pictq_cond);
1311 SDL_UnlockMutex(is->pictq_mutex);
1312 }
1313
1314 /**
1315 *
1316 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1317 */
1318 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1319 {
1320 VideoPicture *vp;
1321 #if CONFIG_AVFILTER
1322 AVPicture pict_src;
1323 #else
1324 int dst_pix_fmt = PIX_FMT_YUV420P;
1325 #endif
1326 /* wait until we have space to put a new picture */
1327 SDL_LockMutex(is->pictq_mutex);
1328
1329 if (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1330 is->skip_frames = FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0 - FRAME_SKIP_FACTOR));
1331
1332 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1333 !is->videoq.abort_request) {
1334 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1335 }
1336 SDL_UnlockMutex(is->pictq_mutex);
1337
1338 if (is->videoq.abort_request)
1339 return -1;
1340
1341 vp = &is->pictq[is->pictq_windex];
1342
1343 /* alloc or resize hardware picture buffer */
1344 if (!vp->bmp || vp->reallocate ||
1345 #if CONFIG_AVFILTER
1346 vp->width != is->out_video_filter->inputs[0]->w ||
1347 vp->height != is->out_video_filter->inputs[0]->h) {
1348 #else
1349 vp->width != is->video_st->codec->width ||
1350 vp->height != is->video_st->codec->height) {
1351 #endif
1352 SDL_Event event;
1353
1354 vp->allocated = 0;
1355 vp->reallocate = 0;
1356
1357 /* the allocation must be done in the main thread to avoid
1358 locking problems */
1359 event.type = FF_ALLOC_EVENT;
1360 event.user.data1 = is;
1361 SDL_PushEvent(&event);
1362
1363 /* wait until the picture is allocated */
1364 SDL_LockMutex(is->pictq_mutex);
1365 while (!vp->allocated && !is->videoq.abort_request) {
1366 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1367 }
1368 SDL_UnlockMutex(is->pictq_mutex);
1369
1370 if (is->videoq.abort_request)
1371 return -1;
1372 }
1373
1374 /* if the frame is not skipped, then display it */
1375 if (vp->bmp) {
1376 AVPicture pict;
1377 #if CONFIG_AVFILTER
1378 if (vp->picref)
1379 avfilter_unref_buffer(vp->picref);
1380 vp->picref = src_frame->opaque;
1381 #endif
1382
1383 /* get a pointer on the bitmap */
1384 SDL_LockYUVOverlay (vp->bmp);
1385
1386 memset(&pict, 0, sizeof(AVPicture));
1387 pict.data[0] = vp->bmp->pixels[0];
1388 pict.data[1] = vp->bmp->pixels[2];
1389 pict.data[2] = vp->bmp->pixels[1];
1390
1391 pict.linesize[0] = vp->bmp->pitches[0];
1392 pict.linesize[1] = vp->bmp->pitches[2];
1393 pict.linesize[2] = vp->bmp->pitches[1];
1394
1395 #if CONFIG_AVFILTER
1396 pict_src.data[0] = src_frame->data[0];
1397 pict_src.data[1] = src_frame->data[1];
1398 pict_src.data[2] = src_frame->data[2];
1399
1400 pict_src.linesize[0] = src_frame->linesize[0];
1401 pict_src.linesize[1] = src_frame->linesize[1];
1402 pict_src.linesize[2] = src_frame->linesize[2];
1403
1404 // FIXME use direct rendering
1405 av_picture_copy(&pict, &pict_src,
1406 vp->pix_fmt, vp->width, vp->height);
1407 #else
1408 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1409 is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1410 vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1411 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1412 if (is->img_convert_ctx == NULL) {
1413 fprintf(stderr, "Cannot initialize the conversion context\n");
1414 exit(1);
1415 }
1416 sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1417 0, vp->height, pict.data, pict.linesize);
1418 #endif
1419 /* update the bitmap content */
1420 SDL_UnlockYUVOverlay(vp->bmp);
1421
1422 vp->pts = pts;
1423 vp->pos = pos;
1424
1425 /* now we can update the picture count */
1426 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1427 is->pictq_windex = 0;
1428 SDL_LockMutex(is->pictq_mutex);
1429 vp->target_clock = compute_target_time(vp->pts, is);
1430
1431 is->pictq_size++;
1432 SDL_UnlockMutex(is->pictq_mutex);
1433 }
1434 return 0;
1435 }
1436
1437 /**
1438 * compute the exact PTS for the picture if it is omitted in the stream
1439 * @param pts1 the dts of the pkt / pts of the frame
1440 */
1441 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1442 {
1443 double frame_delay, pts;
1444
1445 pts = pts1;
1446
1447 if (pts != 0) {
1448 /* update video clock with pts, if present */
1449 is->video_clock = pts;
1450 } else {
1451 pts = is->video_clock;
1452 }
1453 /* update video clock for next frame */
1454 frame_delay = av_q2d(is->video_st->codec->time_base);
1455 /* for MPEG2, the frame can be repeated, so we update the
1456 clock accordingly */
1457 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1458 is->video_clock += frame_delay;
1459
1460 return queue_picture(is, src_frame, pts, pos);
1461 }
1462
1463 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1464 {
1465 int got_picture, i;
1466
1467 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1468 return -1;
1469
1470 if (pkt->data == flush_pkt.data) {
1471 avcodec_flush_buffers(is->video_st->codec);
1472
1473 SDL_LockMutex(is->pictq_mutex);
1474 // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1475 for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1476 is->pictq[i].target_clock= 0;
1477 }
1478 while (is->pictq_size && !is->videoq.abort_request) {
1479 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1480 }
1481 is->video_current_pos = -1;
1482 SDL_UnlockMutex(is->pictq_mutex);
1483
1484 init_pts_correction(&is->pts_ctx);
1485 is->frame_last_pts = AV_NOPTS_VALUE;
1486 is->frame_last_delay = 0;
1487 is->frame_timer = (double)av_gettime() / 1000000.0;
1488 is->skip_frames = 1;
1489 is->skip_frames_index = 0;
1490 return 0;
1491 }
1492
1493 avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1494
1495 if (got_picture) {
1496 if (decoder_reorder_pts == -1) {
1497 *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1498 } else if (decoder_reorder_pts) {
1499 *pts = frame->pkt_pts;
1500 } else {
1501 *pts = frame->pkt_dts;
1502 }
1503
1504 if (*pts == AV_NOPTS_VALUE) {
1505 *pts = 0;
1506 }
1507
1508 is->skip_frames_index += 1;
1509 if (is->skip_frames_index >= is->skip_frames) {
1510 is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1511 return 1;
1512 }
1513
1514 }
1515 return 0;
1516 }
1517
1518 #if CONFIG_AVFILTER
1519 typedef struct {
1520 VideoState *is;
1521 AVFrame *frame;
1522 int use_dr1;
1523 } FilterPriv;
1524
1525 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1526 {
1527 AVFilterContext *ctx = codec->opaque;
1528 AVFilterBufferRef *ref;
1529 int perms = AV_PERM_WRITE;
1530 int i, w, h, stride[4];
1531 unsigned edge;
1532 int pixel_size;
1533
1534 if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1535 perms |= AV_PERM_NEG_LINESIZES;
1536
1537 if (pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1538 if (pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1539 if (pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1540 if (pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1541 }
1542 if (pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1543
1544 w = codec->width;
1545 h = codec->height;
1546 avcodec_align_dimensions2(codec, &w, &h, stride);
1547 edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1548 w += edge << 1;
1549 h += edge << 1;
1550
1551 if (!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1552 return -1;
1553
1554 pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1 + 1;
1555 ref->video->w = codec->width;
1556 ref->video->h = codec->height;
1557 for (i = 0; i < 4; i ++) {
1558 unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1559 unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1560
1561 if (ref->data[i]) {
1562 ref->data[i] += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1563 }
1564 pic->data[i] = ref->data[i];
1565 pic->linesize[i] = ref->linesize[i];
1566 }
1567 pic->opaque = ref;
1568 pic->type = FF_BUFFER_TYPE_USER;
1569 pic->reordered_opaque = codec->reordered_opaque;
1570 if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
1571 else pic->pkt_pts = AV_NOPTS_VALUE;
1572 return 0;
1573 }
1574
1575 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1576 {
1577 memset(pic->data, 0, sizeof(pic->data));
1578 avfilter_unref_buffer(pic->opaque);
1579 }
1580
1581 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1582 {
1583 AVFilterBufferRef *ref = pic->opaque;
1584
1585 if (pic->data[0] == NULL) {
1586 pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1587 return codec->get_buffer(codec, pic);
1588 }
1589
1590 if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1591 (codec->pix_fmt != ref->format)) {
1592 av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1593 return -1;
1594 }
1595
1596 pic->reordered_opaque = codec->reordered_opaque;
1597 if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
1598 else pic->pkt_pts = AV_NOPTS_VALUE;
1599 return 0;
1600 }
1601
1602 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1603 {
1604 FilterPriv *priv = ctx->priv;
1605 AVCodecContext *codec;
1606 if (!opaque) return -1;
1607
1608 priv->is = opaque;
1609 codec = priv->is->video_st->codec;
1610 codec->opaque = ctx;
1611 if (codec->codec->capabilities & CODEC_CAP_DR1) {
1612 priv->use_dr1 = 1;
1613 codec->get_buffer = input_get_buffer;
1614 codec->release_buffer = input_release_buffer;
1615 codec->reget_buffer = input_reget_buffer;
1616 codec->thread_safe_callbacks = 1;
1617 }
1618
1619 priv->frame = avcodec_alloc_frame();
1620
1621 return 0;
1622 }
1623
1624 static void input_uninit(AVFilterContext *ctx)
1625 {
1626 FilterPriv *priv = ctx->priv;
1627 av_free(priv->frame);
1628 }
1629
1630 static int input_request_frame(AVFilterLink *link)
1631 {
1632 FilterPriv *priv = link->src->priv;
1633 AVFilterBufferRef *picref;
1634 int64_t pts = 0;
1635 AVPacket pkt;
1636 int ret;
1637
1638 while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1639 av_free_packet(&pkt);
1640 if (ret < 0)
1641 return -1;
1642
1643 if (priv->use_dr1) {
1644 picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1645 } else {
1646 picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1647 av_image_copy(picref->data, picref->linesize,
1648 priv->frame->data, priv->frame->linesize,
1649 picref->format, link->w, link->h);
1650 }
1651 av_free_packet(&pkt);
1652
1653 avfilter_copy_frame_props(picref, priv->frame);
1654 picref->pts = pts;
1655
1656 avfilter_start_frame(link, picref);
1657 avfilter_draw_slice(link, 0, link->h, 1);
1658 avfilter_end_frame(link);
1659
1660 return 0;
1661 }
1662
1663 static int input_query_formats(AVFilterContext *ctx)
1664 {
1665 FilterPriv *priv = ctx->priv;
1666 enum PixelFormat pix_fmts[] = {
1667 priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1668 };
1669
1670 avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1671 return 0;
1672 }
1673
1674 static int input_config_props(AVFilterLink *link)
1675 {
1676 FilterPriv *priv = link->src->priv;
1677 AVCodecContext *c = priv->is->video_st->codec;
1678
1679 link->w = c->width;
1680 link->h = c->height;
1681 link->time_base = priv->is->video_st->time_base;
1682
1683 return 0;
1684 }
1685
1686 static AVFilter input_filter =
1687 {
1688 .name = "avplay_input",
1689
1690 .priv_size = sizeof(FilterPriv),
1691
1692 .init = input_init,
1693 .uninit = input_uninit,
1694
1695 .query_formats = input_query_formats,
1696
1697 .inputs = (AVFilterPad[]) {{ .name = NULL }},
1698 .outputs = (AVFilterPad[]) {{ .name = "default",
1699 .type = AVMEDIA_TYPE_VIDEO,
1700 .request_frame = input_request_frame,
1701 .config_props = input_config_props, },
1702 { .name = NULL }},
1703 };
1704
1705 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1706 {
1707 char sws_flags_str[128];
1708 int ret;
1709 AVSinkContext avsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1710 AVFilterContext *filt_src = NULL, *filt_out = NULL;
1711 snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1712 graph->scale_sws_opts = av_strdup(sws_flags_str);
1713
1714 if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1715 NULL, is, graph)) < 0)
1716 return ret;
1717 if ((ret = avfilter_graph_create_filter(&filt_out, &avsink, "out",
1718 NULL, &avsink_ctx, graph)) < 0)
1719 return ret;
1720
1721 if (vfilters) {
1722 AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1723 AVFilterInOut *inputs = av_malloc(sizeof(AVFilterInOut));
1724
1725 outputs->name = av_strdup("in");
1726 outputs->filter_ctx = filt_src;
1727 outputs->pad_idx = 0;
1728 outputs->next = NULL;
1729
1730 inputs->name = av_strdup("out");
1731 inputs->filter_ctx = filt_out;
1732 inputs->pad_idx = 0;
1733 inputs->next = NULL;
1734
1735 if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1736 return ret;
1737 av_freep(&vfilters);
1738 } else {
1739 if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1740 return ret;
1741 }
1742
1743 if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1744 return ret;
1745
1746 is->out_video_filter = filt_out;
1747
1748 return ret;
1749 }
1750
1751 #endif /* CONFIG_AVFILTER */
1752
1753 static int video_thread(void *arg)
1754 {
1755 VideoState *is = arg;
1756 AVFrame *frame = avcodec_alloc_frame();
1757 int64_t pts_int;
1758 double pts;
1759 int ret;
1760
1761 #if CONFIG_AVFILTER
1762 AVFilterGraph *graph = avfilter_graph_alloc();
1763 AVFilterContext *filt_out = NULL;
1764 int64_t pos;
1765 int last_w = is->video_st->codec->width;
1766 int last_h = is->video_st->codec->height;
1767
1768 if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1769 goto the_end;
1770 filt_out = is->out_video_filter;
1771 #endif
1772
1773 for (;;) {
1774 #if !CONFIG_AVFILTER
1775 AVPacket pkt;
1776 #else
1777 AVFilterBufferRef *picref;
1778 AVRational tb;
1779 #endif
1780 while (is->paused && !is->videoq.abort_request)
1781 SDL_Delay(10);
1782 #if CONFIG_AVFILTER
1783 if ( last_w != is->video_st->codec->width
1784 || last_h != is->video_st->codec->height) {
1785 av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1786 is->video_st->codec->width, is->video_st->codec->height);
1787 avfilter_graph_free(&graph);
1788 graph = avfilter_graph_alloc();
1789 if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1790 goto the_end;
1791 filt_out = is->out_video_filter;
1792 last_w = is->video_st->codec->width;
1793 last_h = is->video_st->codec->height;
1794 }
1795 ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1796 if (picref) {
1797 pts_int = picref->pts;
1798 pos = picref->pos;
1799 frame->opaque = picref;
1800 }
1801
1802 if (av_cmp_q(tb, is->video_st->time_base)) {
1803 av_unused int64_t pts1 = pts_int;
1804 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1805 av_dlog(NULL, "video_thread(): "
1806 "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1807 tb.num, tb.den, pts1,
1808 is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1809 }
1810 #else
1811 ret = get_video_frame(is, frame, &pts_int, &pkt);
1812 #endif
1813
1814 if (ret < 0)
1815 goto the_end;
1816
1817 if (!ret)
1818 continue;
1819
1820 pts = pts_int * av_q2d(is->video_st->time_base);
1821
1822 #if CONFIG_AVFILTER
1823 ret = output_picture2(is, frame, pts, pos);
1824 #else
1825 ret = output_picture2(is, frame, pts, pkt.pos);
1826 av_free_packet(&pkt);
1827 #endif
1828 if (ret < 0)
1829 goto the_end;
1830
1831 if (step)
1832 if (cur_stream)
1833 stream_pause(cur_stream);
1834 }
1835 the_end:
1836 #if CONFIG_AVFILTER
1837 avfilter_graph_free(&graph);
1838 #endif
1839 av_free(frame);
1840 return 0;
1841 }
1842
1843 static int subtitle_thread(void *arg)
1844 {
1845 VideoState *is = arg;
1846 SubPicture *sp;
1847 AVPacket pkt1, *pkt = &pkt1;
1848 int got_subtitle;
1849 double pts;
1850 int i, j;
1851 int r, g, b, y, u, v, a;
1852
1853 for (;;) {
1854 while (is->paused && !is->subtitleq.abort_request) {
1855 SDL_Delay(10);
1856 }
1857 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1858 break;
1859
1860 if (pkt->data == flush_pkt.data) {
1861 avcodec_flush_buffers(is->subtitle_st->codec);
1862 continue;
1863 }
1864 SDL_LockMutex(is->subpq_mutex);
1865 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1866 !is->subtitleq.abort_request) {
1867 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1868 }
1869 SDL_UnlockMutex(is->subpq_mutex);
1870
1871 if (is->subtitleq.abort_request)
1872 return 0;
1873
1874 sp = &is->subpq[is->subpq_windex];
1875
1876 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1877 this packet, if any */
1878 pts = 0;
1879 if (pkt->pts != AV_NOPTS_VALUE)
1880 pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1881
1882 avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1883 &got_subtitle, pkt);
1884
1885 if (got_subtitle && sp->sub.format == 0) {
1886 sp->pts = pts;
1887
1888 for (i = 0; i < sp->sub.num_rects; i++)
1889 {
1890 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1891 {
1892 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1893 y = RGB_TO_Y_CCIR(r, g, b);
1894 u = RGB_TO_U_CCIR(r, g, b, 0);
1895 v = RGB_TO_V_CCIR(r, g, b, 0);
1896 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1897 }
1898 }
1899
1900 /* now we can update the picture count */
1901 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1902 is->subpq_windex = 0;
1903 SDL_LockMutex(is->subpq_mutex);
1904 is->subpq_size++;
1905 SDL_UnlockMutex(is->subpq_mutex);
1906 }
1907 av_free_packet(pkt);
1908 }
1909 return 0;
1910 }
1911
1912 /* copy samples for viewing in editor window */
1913 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1914 {
1915 int size, len;
1916
1917 size = samples_size / sizeof(short);
1918 while (size > 0) {
1919 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1920 if (len > size)
1921 len = size;
1922 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1923 samples += len;
1924 is->sample_array_index += len;
1925 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1926 is->sample_array_index = 0;
1927 size -= len;
1928 }
1929 }
1930
1931 /* return the new audio buffer size (samples can be added or deleted
1932 to get better sync if video or external master clock) */
1933 static int synchronize_audio(VideoState *is, short *samples,
1934 int samples_size1, double pts)
1935 {
1936 int n, samples_size;
1937 double ref_clock;
1938
1939 n = 2 * is->audio_st->codec->channels;
1940 samples_size = samples_size1;
1941
1942 /* if not master, then we try to remove or add samples to correct the clock */
1943 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1944 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1945 double diff, avg_diff;
1946 int wanted_size, min_size, max_size, nb_samples;
1947
1948 ref_clock = get_master_clock(is);
1949 diff = get_audio_clock(is) - ref_clock;
1950
1951 if (diff < AV_NOSYNC_THRESHOLD) {
1952 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1953 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1954 /* not enough measures to have a correct estimate */
1955 is->audio_diff_avg_count++;
1956 } else {
1957 /* estimate the A-V difference */
1958 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1959
1960 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1961 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1962 nb_samples = samples_size / n;
1963
1964 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1965 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1966 if (wanted_size < min_size)
1967 wanted_size = min_size;
1968 else if (wanted_size > max_size)
1969 wanted_size = max_size;
1970
1971 /* add or remove samples to correction the synchro */
1972 if (wanted_size < samples_size) {
1973 /* remove samples */
1974 samples_size = wanted_size;
1975 } else if (wanted_size > samples_size) {
1976 uint8_t *samples_end, *q;
1977 int nb;
1978
1979 /* add samples */
1980 nb = (samples_size - wanted_size);
1981 samples_end = (uint8_t *)samples + samples_size - n;
1982 q = samples_end + n;
1983 while (nb > 0) {
1984 memcpy(q, samples_end, n);
1985 q += n;
1986 nb -= n;
1987 }
1988 samples_size = wanted_size;
1989 }
1990 }
1991 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1992 diff, avg_diff, samples_size - samples_size1,
1993 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1994 }
1995 } else {
1996 /* too big difference : may be initial PTS errors, so
1997 reset A-V filter */
1998 is->audio_diff_avg_count = 0;
1999 is->audio_diff_cum = 0;
2000 }
2001 }
2002
2003 return samples_size;
2004 }
2005
2006 /* decode one audio frame and returns its uncompressed size */
2007 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2008 {
2009 AVPacket *pkt_temp = &is->audio_pkt_temp;
2010 AVPacket *pkt = &is->audio_pkt;
2011 AVCodecContext *dec = is->audio_st->codec;
2012 int n, len1, data_size, got_frame;
2013 double pts;
2014 int new_packet = 0;
2015 int flush_complete = 0;
2016
2017 for (;;) {
2018 /* NOTE: the audio packet can contain several frames */
2019 while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2020 if (!is->frame) {
2021 if (!(is->frame = avcodec_alloc_frame()))
2022 return AVERROR(ENOMEM);
2023 } else
2024 avcodec_get_frame_defaults(is->frame);
2025
2026 if (flush_complete)
2027 break;
2028 new_packet = 0;
2029 len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2030 if (len1 < 0) {
2031 /* if error, we skip the frame */
2032 pkt_temp->size = 0;
2033 break;
2034 }
2035
2036 pkt_temp->data += len1;
2037 pkt_temp->size -= len1;
2038
2039 if (!got_frame) {
2040 /* stop sending empty packets if the decoder is finished */
2041 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2042 flush_complete = 1;
2043 continue;
2044 }
2045 data_size = av_samples_get_buffer_size(NULL, dec->channels,
2046 is->frame->nb_samples,
2047 dec->sample_fmt, 1);
2048
2049 if (dec->sample_fmt != is->audio_src_fmt) {
2050 if (is->reformat_ctx)
2051 av_audio_convert_free(is->reformat_ctx);
2052 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2053 dec->sample_fmt, 1, NULL, 0);
2054 if (!is->reformat_ctx) {
2055 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2056 av_get_sample_fmt_name(dec->sample_fmt),
2057 av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2058 break;
2059 }
2060 is->audio_src_fmt= dec->sample_fmt;
2061 }
2062
2063 if (is->reformat_ctx) {
2064 const void *ibuf[6] = { is->frame->data[0] };
2065 void *obuf[6];
2066 int istride[6] = { av_get_bytes_per_sample(dec->sample_fmt) };
2067 int ostride[6] = { 2 };
2068 int len= data_size/istride[0];
2069 obuf[0] = av_realloc(is->audio_buf1, FFALIGN(len * ostride[0], 32));
2070 if (!obuf[0]) {
2071 return AVERROR(ENOMEM);
2072 }
2073 is->audio_buf1 = obuf[0];
2074 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len) < 0) {
2075 printf("av_audio_convert() failed\n");
2076 break;
2077 }
2078 is->audio_buf = is->audio_buf1;
2079 /* FIXME: existing code assume that data_size equals framesize*channels*2
2080 remove this legacy cruft */
2081 data_size = len * 2;
2082 } else {
2083 is->audio_buf = is->frame->data[0];
2084 }
2085
2086 /* if no pts, then compute it */
2087 pts = is->audio_clock;
2088 *pts_ptr = pts;
2089 n = 2 * dec->channels;
2090 is->audio_clock += (double)data_size /
2091 (double)(n * dec->sample_rate);
2092 #ifdef DEBUG
2093 {
2094 static double last_clock;
2095 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2096 is->audio_clock - last_clock,
2097 is->audio_clock, pts);
2098 last_clock = is->audio_clock;
2099 }
2100 #endif
2101 return data_size;
2102 }
2103
2104 /* free the current packet */
2105 if (pkt->data)
2106 av_free_packet(pkt);
2107 memset(pkt_temp, 0, sizeof(*pkt_temp));
2108
2109 if (is->paused || is->audioq.abort_request) {
2110 return -1;
2111 }
2112
2113 /* read next packet */
2114 if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2115 return -1;
2116
2117 if (pkt->data == flush_pkt.data)
2118 avcodec_flush_buffers(dec);
2119
2120 *pkt_temp = *pkt;
2121
2122 /* if update the audio clock with the pts */
2123 if (pkt->pts != AV_NOPTS_VALUE) {
2124 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2125 }
2126 }
2127 }
2128
2129 /* prepare a new audio buffer */
2130 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2131 {
2132 VideoState *is = opaque;
2133 int audio_size, len1;
2134 double pts;
2135
2136 audio_callback_time = av_gettime();
2137
2138 while (len > 0) {
2139 if (is->audio_buf_index >= is->audio_buf_size) {
2140 audio_size = audio_decode_frame(is, &pts);
2141 if (audio_size < 0) {
2142 /* if error, just output silence */
2143 is->audio_buf = is->silence_buf;
2144 is->audio_buf_size = sizeof(is->silence_buf);
2145 } else {
2146 if (is->show_audio)
2147 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2148 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2149 pts);
2150 is->audio_buf_size = audio_size;
2151 }
2152 is->audio_buf_index = 0;
2153 }
2154 len1 = is->audio_buf_size - is->audio_buf_index;
2155 if (len1 > len)
2156 len1 = len;
2157 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2158 len -= len1;
2159 stream += len1;
2160 is->audio_buf_index += len1;
2161 }
2162 }
2163
2164 /* open a given stream. Return 0 if OK */
2165 static int stream_component_open(VideoState *is, int stream_index)
2166 {
2167 AVFormatContext *ic = is->ic;
2168 AVCodecContext *avctx;
2169 AVCodec *codec;
2170 SDL_AudioSpec wanted_spec, spec;
2171 AVDictionary *opts;
2172 AVDictionaryEntry *t = NULL;
2173
2174 if (stream_index < 0 || stream_index >= ic->nb_streams)
2175 return -1;
2176 avctx = ic->streams[stream_index]->codec;
2177
2178 opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
2179
2180 codec = avcodec_find_decoder(avctx->codec_id);
2181 avctx->debug_mv = debug_mv;
2182 avctx->debug = debug;
2183 avctx->workaround_bugs = workaround_bugs;
2184 avctx->lowres = lowres;
2185 avctx->idct_algo = idct;
2186 avctx->skip_frame = skip_frame;
2187 avctx->skip_idct = skip_idct;
2188 avctx->skip_loop_filter = skip_loop_filter;
2189 avctx->error_concealment = error_concealment;
2190 avctx->thread_count = thread_count;
2191
2192 if (lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2193 if (fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2194
2195 if (!av_dict_get(opts, "threads", NULL, 0))
2196 av_dict_set(&opts, "threads", "auto", 0);
2197 if (!codec ||
2198 avcodec_open2(avctx, codec, &opts) < 0)
2199 return -1;
2200 if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2201 av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2202 return AVERROR_OPTION_NOT_FOUND;
2203 }
2204
2205 /* prepare audio output */
2206 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2207 wanted_spec.freq = avctx->sample_rate;
2208 wanted_spec.format = AUDIO_S16SYS;
2209 wanted_spec.channels = avctx->channels;
2210 wanted_spec.silence = 0;
2211 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2212 wanted_spec.callback = sdl_audio_callback;
2213 wanted_spec.userdata = is;
2214 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2215 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2216 return -1;
2217 }
2218 is->audio_hw_buf_size = spec.size;
2219 is->audio_src_fmt = AV_SAMPLE_FMT_S16;
2220 }
2221
2222 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2223 switch (avctx->codec_type) {
2224 case AVMEDIA_TYPE_AUDIO:
2225 is->audio_stream = stream_index;
2226 is->audio_st = ic->streams[stream_index];
2227 is->audio_buf_size = 0;
2228 is->audio_buf_index = 0;
2229
2230 /* init averaging filter */
2231 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2232 is->audio_diff_avg_count = 0;
2233 /* since we do not have a precise anough audio fifo fullness,
2234 we correct audio sync only if larger than this threshold */
2235 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2236
2237 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2238 packet_queue_init(&is->audioq);
2239 SDL_PauseAudio(0);
2240 break;
2241 case AVMEDIA_TYPE_VIDEO:
2242 is->video_stream = stream_index;
2243 is->video_st = ic->streams[stream_index];
2244
2245 packet_queue_init(&is->videoq);
2246 is->video_tid = SDL_CreateThread(video_thread, is);
2247 break;
2248 case AVMEDIA_TYPE_SUBTITLE:
2249 is->subtitle_stream = stream_index;
2250 is->subtitle_st = ic->streams[stream_index];
2251 packet_queue_init(&is->subtitleq);
2252
2253 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2254 break;
2255 default:
2256 break;
2257 }
2258 return 0;
2259 }
2260
2261 static void stream_component_close(VideoState *is, int stream_index)
2262 {
2263 AVFormatContext *ic = is->ic;
2264 AVCodecContext *avctx;
2265
2266 if (stream_index < 0 || stream_index >= ic->nb_streams)
2267 return;
2268 avctx = ic->streams[stream_index]->codec;
2269
2270 switch (avctx->codec_type) {
2271 case AVMEDIA_TYPE_AUDIO:
2272 packet_queue_abort(&is->audioq);
2273
2274 SDL_CloseAudio();
2275
2276 packet_queue_end(&is->audioq);
2277 av_free_packet(&is->audio_pkt);
2278 if (is->reformat_ctx)
2279 av_audio_convert_free(is->reformat_ctx);
2280 is->reformat_ctx = NULL;
2281 av_freep(&is->audio_buf1);
2282 is->audio_buf = NULL;
2283 av_freep(&is->frame);
2284
2285 if (is->rdft) {
2286 av_rdft_end(is->rdft);
2287 av_freep(&is->rdft_data);
2288 is->rdft = NULL;
2289 is->rdft_bits = 0;
2290 }
2291 break;
2292 case AVMEDIA_TYPE_VIDEO:
2293 packet_queue_abort(&is->videoq);
2294
2295 /* note: we also signal this mutex to make sure we deblock the
2296 video thread in all cases */
2297 SDL_LockMutex(is->pictq_mutex);
2298 SDL_CondSignal(is->pictq_cond);
2299 SDL_UnlockMutex(is->pictq_mutex);
2300
2301 SDL_WaitThread(is->video_tid, NULL);
2302
2303 packet_queue_end(&is->videoq);
2304 break;
2305 case AVMEDIA_TYPE_SUBTITLE:
2306 packet_queue_abort(&is->subtitleq);
2307
2308 /* note: we also signal this mutex to make sure we deblock the
2309 video thread in all cases */
2310 SDL_LockMutex(is->subpq_mutex);
2311 is->subtitle_stream_changed = 1;
2312
2313 SDL_CondSignal(is->subpq_cond);
2314 SDL_UnlockMutex(is->subpq_mutex);
2315
2316 SDL_WaitThread(is->subtitle_tid, NULL);
2317
2318 packet_queue_end(&is->subtitleq);
2319 break;
2320 default:
2321 break;
2322 }
2323
2324 ic->streams[stream_index]->discard = AVDISCARD_ALL;
2325 avcodec_close(avctx);
2326 switch (avctx->codec_type) {
2327 case AVMEDIA_TYPE_AUDIO:
2328 is->audio_st = NULL;
2329 is->audio_stream = -1;
2330 break;
2331 case AVMEDIA_TYPE_VIDEO:
2332 is->video_st = NULL;
2333 is->video_stream = -1;
2334 break;
2335 case AVMEDIA_TYPE_SUBTITLE:
2336 is->subtitle_st = NULL;
2337 is->subtitle_stream = -1;
2338 break;
2339 default:
2340 break;
2341 }
2342 }
2343
2344 /* since we have only one decoding thread, we can use a global
2345 variable instead of a thread local variable */
2346 static VideoState *global_video_state;
2347
2348 static int decode_interrupt_cb(void *ctx)
2349 {
2350 return global_video_state && global_video_state->abort_request;
2351 }
2352
2353 /* this thread gets the stream from the disk or the network */
2354 static int decode_thread(void *arg)
2355 {
2356 VideoState *is = arg;
2357 AVFormatContext *ic = NULL;
2358 int err, i, ret;
2359 int st_index[AVMEDIA_TYPE_NB];
2360 AVPacket pkt1, *pkt = &pkt1;
2361 int eof = 0;
2362 int pkt_in_play_range = 0;
2363 AVDictionaryEntry *t;
2364 AVDictionary **opts;
2365 int orig_nb_streams;
2366
2367 memset(st_index, -1, sizeof(st_index));
2368 is->video_stream = -1;
2369 is->audio_stream = -1;
2370 is->subtitle_stream = -1;
2371
2372 global_video_state = is;
2373
2374 ic = avformat_alloc_context();
2375 ic->interrupt_callback.callback = decode_interrupt_cb;
2376 err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2377 if (err < 0) {
2378 print_error(is->filename, err);
2379 ret = -1;
2380 goto fail;
2381 }
2382 if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2383 av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2384 ret = AVERROR_OPTION_NOT_FOUND;
2385 goto fail;
2386 }
2387 is->ic = ic;
2388
2389 if (genpts)
2390 ic->flags |= AVFMT_FLAG_GENPTS;
2391
2392 opts = setup_find_stream_info_opts(ic, codec_opts);
2393 orig_nb_streams = ic->nb_streams;
2394
2395 err = avformat_find_stream_info(ic, opts);
2396 if (err < 0) {
2397 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2398 ret = -1;
2399 goto fail;
2400 }
2401 for (i = 0; i < orig_nb_streams; i++)
2402 av_dict_free(&opts[i]);
2403 av_freep(&opts);
2404
2405 if (ic->pb)
2406 ic->pb->eof_reached = 0; // FIXME hack, avplay maybe should not use url_feof() to test for the end
2407
2408 if (seek_by_bytes < 0)
2409 seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2410
2411 /* if seeking requested, we execute it */
2412 if (start_time != AV_NOPTS_VALUE) {
2413 int64_t timestamp;
2414
2415 timestamp = start_time;
2416 /* add the stream start time */
2417 if (ic->start_time != AV_NOPTS_VALUE)
2418 timestamp += ic->start_time;
2419 ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2420 if (ret < 0) {
2421 fprintf(stderr, "%s: could not seek to position %0.3f\n",
2422 is->filename, (double)timestamp / AV_TIME_BASE);
2423 }
2424 }
2425
2426 for (i = 0; i < ic->nb_streams; i++)
2427 ic->streams[i]->discard = AVDISCARD_ALL;
2428 if (!video_disable)
2429 st_index[AVMEDIA_TYPE_VIDEO] =
2430 av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2431 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2432 if (!audio_disable)
2433 st_index[AVMEDIA_TYPE_AUDIO] =
2434 av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2435 wanted_stream[AVMEDIA_TYPE_AUDIO],
2436 st_index[AVMEDIA_TYPE_VIDEO],
2437 NULL, 0);
2438 if (!video_disable)
2439 st_index[AVMEDIA_TYPE_SUBTITLE] =
2440 av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2441 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2442 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2443 st_index[AVMEDIA_TYPE_AUDIO] :
2444 st_index[AVMEDIA_TYPE_VIDEO]),
2445 NULL, 0);
2446 if (show_status) {
2447 av_dump_format(ic, 0, is->filename, 0);
2448 }
2449
2450 /* open the streams */
2451 if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2452 stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2453 }
2454
2455 ret = -1;
2456 if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2457 ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2458 }
2459 is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2460 if (ret < 0) {
2461 if (!display_disable)
2462 is->show_audio = 2;
2463 }
2464
2465 if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2466 stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2467 }
2468
2469 if (is->video_stream < 0 && is->audio_stream < 0) {
2470 fprintf(stderr, "%s: could not open codecs\n", is->filename);
2471 ret = -1;
2472 goto fail;
2473 }
2474
2475 for (;;) {
2476 if (is->abort_request)
2477 break;
2478 if (is->paused != is->last_paused) {
2479 is->last_paused = is->paused;
2480 if (is->paused)
2481 is->read_pause_return = av_read_pause(ic);
2482 else
2483 av_read_play(ic);
2484 }
2485 #if CONFIG_RTSP_DEMUXER
2486 if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2487 /* wait 10 ms to avoid trying to get another packet */
2488 /* XXX: horrible */
2489 SDL_Delay(10);
2490 continue;
2491 }
2492 #endif
2493 if (is->seek_req) {
2494 int64_t seek_target = is->seek_pos;
2495 int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2496 int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2497 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2498 // of the seek_pos/seek_rel variables
2499
2500 ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2501 if (ret < 0) {
2502 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2503 } else {
2504 if (is->audio_stream >= 0) {
2505 packet_queue_flush(&is->audioq);
2506 packet_queue_put(&is->audioq, &flush_pkt);
2507 }
2508 if (is->subtitle_stream >= 0) {
2509 packet_queue_flush(&is->subtitleq);
2510 packet_queue_put(&is->subtitleq, &flush_pkt);
2511 }
2512 if (is->video_stream >= 0) {
2513 packet_queue_flush(&is->videoq);
2514 packet_queue_put(&is->videoq, &flush_pkt);
2515 }
2516 }
2517 is->seek_req = 0;
2518 eof = 0;
2519 }
2520
2521 /* if the queue are full, no need to read more */
2522 if ( is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2523 || ( (is->audioq .size > MIN_AUDIOQ_SIZE || is->audio_stream < 0)
2524 && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0)
2525 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0))) {
2526 /* wait 10 ms */
2527 SDL_Delay(10);
2528 continue;
2529 }
2530 if (eof) {
2531 if (is->video_stream >= 0) {
2532 av_init_packet(pkt);
2533 pkt->data = NULL;
2534 pkt->size = 0;
2535 pkt->stream_index = is->video_stream;
2536 packet_queue_put(&is->videoq, pkt);
2537 }
2538 if (is->audio_stream >= 0 &&
2539 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2540 av_init_packet(pkt);
2541 pkt->data = NULL;
2542 pkt->size = 0;
2543 pkt->stream_index = is->audio_stream;
2544 packet_queue_put(&is->audioq, pkt);
2545 }
2546 SDL_Delay(10);
2547 if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2548 if (loop != 1 && (!loop || --loop)) {
2549 stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2550 } else if (autoexit) {
2551 ret = AVERROR_EOF;
2552 goto fail;
2553 }
2554 }
2555 continue;
2556 }
2557 ret = av_read_frame(ic, pkt);
2558 if (ret < 0) {
2559 if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2560 eof = 1;
2561 if (ic->pb && ic->pb->error)
2562 break;
2563 SDL_Delay(100); /* wait for user event */
2564 continue;
2565 }
2566 /* check if packet is in play range specified by user, then queue, otherwise discard */
2567 pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2568 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2569 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2570 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2571 <= ((double)duration / 1000000);
2572 if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2573 packet_queue_put(&is->audioq, pkt);
2574 } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2575 packet_queue_put(&is->videoq, pkt);
2576 } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2577 packet_queue_put(&is->subtitleq, pkt);
2578 } else {
2579 av_free_packet(pkt);
2580 }
2581 }
2582 /* wait until the end */
2583 while (!is->abort_request) {
2584 SDL_Delay(100);
2585 }
2586
2587 ret = 0;
2588 fail:
2589 /* disable interrupting */
2590 global_video_state = NULL;
2591
2592 /* close each stream */
2593 if (is->audio_stream >= 0)
2594 stream_component_close(is, is->audio_stream);
2595 if (is->video_stream >= 0)
2596 stream_component_close(is, is->video_stream);
2597 if (is->subtitle_stream >= 0)
2598 stream_component_close(is, is->subtitle_stream);
2599 if (is->ic) {
2600 avformat_close_input(&is->ic);
2601 }
2602
2603 if (ret != 0) {
2604 SDL_Event event;
2605
2606 event.type = FF_QUIT_EVENT;
2607 event.user.data1 = is;
2608 SDL_PushEvent(&event);
2609 }
2610 return 0;
2611 }
2612
2613 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2614 {
2615 VideoState *is;
2616
2617 is = av_mallocz(sizeof(VideoState));
2618 if (!is)
2619 return NULL;
2620 av_strlcpy(is->filename, filename, sizeof(is->filename));
2621 is->iformat = iformat;
2622 is->ytop = 0;
2623 is->xleft = 0;
2624
2625 /* start video display */
2626 is->pictq_mutex = SDL_CreateMutex();
2627 is->pictq_cond = SDL_CreateCond();
2628
2629 is->subpq_mutex = SDL_CreateMutex();
2630 is->subpq_cond = SDL_CreateCond();
2631
2632 is->av_sync_type = av_sync_type;
2633 is->parse_tid = SDL_CreateThread(decode_thread, is);
2634 if (!is->parse_tid) {
2635 av_free(is);
2636 return NULL;
2637 }
2638 return is;
2639 }
2640
2641 static void stream_cycle_channel(VideoState *is, int codec_type)
2642 {
2643 AVFormatContext *ic = is->ic;
2644 int start_index, stream_index;
2645 AVStream *st;
2646
2647 if (codec_type == AVMEDIA_TYPE_VIDEO)
2648 start_index = is->video_stream;
2649 else if (codec_type == AVMEDIA_TYPE_AUDIO)
2650 start_index = is->audio_stream;
2651 else
2652 start_index = is->subtitle_stream;
2653 if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2654 return;
2655 stream_index = start_index;
2656 for (;;) {
2657 if (++stream_index >= is->ic->nb_streams)
2658 {
2659 if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2660 {
2661 stream_index = -1;
2662 goto the_end;
2663 } else
2664 stream_index = 0;
2665 }
2666 if (stream_index == start_index)
2667 return;
2668 st = ic->streams[stream_index];
2669 if (st->codec->codec_type == codec_type) {
2670 /* check that parameters are OK */
2671 switch (codec_type) {
2672 case AVMEDIA_TYPE_AUDIO:
2673 if (st->codec->sample_rate != 0 &&
2674 st->codec->channels != 0)
2675 goto the_end;
2676 break;
2677 case AVMEDIA_TYPE_VIDEO:
2678 case AVMEDIA_TYPE_SUBTITLE:
2679 goto the_end;
2680 default:
2681 break;
2682 }
2683 }
2684 }
2685 the_end:
2686 stream_component_close(is, start_index);
2687 stream_component_open(is, stream_index);
2688 }
2689
2690
2691 static void toggle_full_screen(void)
2692 {
2693 is_full_screen = !is_full_screen;
2694 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2695 /* OS X needs to empty the picture_queue */
2696 for (int i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
2697 cur_stream->pictq[i].reallocate = 1;
2698 }
2699 #endif
2700 video_open(cur_stream);
2701 }
2702
2703 static void toggle_pause(void)
2704 {
2705 if (cur_stream)
2706 stream_pause(cur_stream);
2707 step = 0;
2708 }
2709
2710 static void step_to_next_frame(void)
2711 {
2712 if (cur_stream) {
2713 /* if the stream is paused unpause it, then step */
2714 if (cur_stream->paused)
2715 stream_pause(cur_stream);
2716 }
2717 step = 1;
2718 }
2719
2720 static void toggle_audio_display(void)
2721 {
2722 if (cur_stream) {
2723 int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2724 cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2725 fill_rectangle(screen,
2726 cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2727 bgcolor);
2728 SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2729 }
2730 }
2731
2732 /* handle an event sent by the GUI */
2733 static void event_loop(void)
2734 {
2735 SDL_Event event;
2736 double incr, pos, frac;
2737
2738 for (;;) {
2739 double x;
2740 SDL_WaitEvent(&event);
2741 switch (event.type) {
2742 case SDL_KEYDOWN:
2743 if (exit_on_keydown) {
2744 do_exit();
2745 break;
2746 }
2747 switch (event.key.keysym.sym) {
2748 case SDLK_ESCAPE:
2749 case SDLK_q:
2750 do_exit();
2751 break;
2752 case SDLK_f:
2753 toggle_full_screen();
2754 break;
2755 case SDLK_p:
2756 case SDLK_SPACE:
2757 toggle_pause();
2758 break;
2759 case SDLK_s: // S: Step to next frame
2760 step_to_next_frame();
2761 break;
2762 case SDLK_a:
2763 if (cur_stream)
2764 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2765 break;
2766 case SDLK_v:
2767 if (cur_stream)
2768 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2769 break;
2770 case SDLK_t:
2771 if (cur_stream)
2772 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2773 break;
2774 case SDLK_w:
2775 toggle_audio_display();
2776 break;
2777 case SDLK_LEFT:
2778 incr = -10.0;
2779 goto do_seek;
2780 case SDLK_RIGHT:
2781 incr = 10.0;
2782 goto do_seek;
2783 case SDLK_UP:
2784 incr = 60.0;
2785 goto do_seek;
2786 case SDLK_DOWN:
2787 incr = -60.0;
2788 do_seek:
2789 if (cur_stream) {
2790 if (seek_by_bytes) {
2791 if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2792 pos = cur_stream->video_current_pos;
2793 } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2794 pos = cur_stream->audio_pkt.pos;
2795 } else
2796 pos = avio_tell(cur_stream->ic->pb);
2797 if (cur_stream->ic->bit_rate)
2798 incr *= cur_stream->ic->bit_rate / 8.0;
2799 else
2800 incr *= 180000.0;
2801 pos += incr;
2802 stream_seek(cur_stream, pos, incr, 1);
2803 } else {
2804 pos = get_master_clock(cur_stream);
2805 pos += incr;
2806 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2807 }
2808 }
2809 break;
2810 default:
2811 break;
2812 }
2813 break;
2814 case SDL_MOUSEBUTTONDOWN:
2815 if (exit_on_mousedown) {
2816 do_exit();
2817 break;
2818 }
2819 case SDL_MOUSEMOTION:
2820 if (event.type == SDL_MOUSEBUTTONDOWN) {
2821 x = event.button.x;
2822 } else {
2823 if (event.motion.state != SDL_PRESSED)
2824 break;
2825 x = event.motion.x;
2826 }
2827 if (cur_stream) {
2828 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2829 uint64_t size = avio_size(cur_stream->ic->pb);
2830 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2831 } else {
2832 int64_t ts;
2833 int ns, hh, mm, ss;
2834 int tns, thh, tmm, tss;
2835 tns = cur_stream->ic->duration / 1000000LL;
2836 thh = tns / 3600;
2837 tmm = (tns % 3600) / 60;
2838 tss = (tns % 60);
2839 frac = x / cur_stream->width;
2840 ns = frac * tns;
2841 hh = ns / 3600;
2842 mm = (ns % 3600) / 60;
2843 ss = (ns % 60);
2844 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2845 hh, mm, ss, thh, tmm, tss);
2846 ts = frac * cur_stream->ic->duration;
2847 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2848 ts += cur_stream->ic->start_time;
2849 stream_seek(cur_stream, ts, 0, 0);
2850 }
2851 }
2852 break;
2853 case SDL_VIDEORESIZE:
2854 if (cur_stream) {
2855 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2856 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2857 screen_width = cur_stream->width = event.resize.w;
2858 screen_height = cur_stream->height = event.resize.h;
2859 }
2860 break;
2861 case SDL_QUIT:
2862 case FF_QUIT_EVENT:
2863 do_exit();
2864 break;
2865 case FF_ALLOC_EVENT:
2866 video_open(event.user.data1);
2867 alloc_picture(event.user.data1);
2868 break;
2869 case FF_REFRESH_EVENT:
2870 video_refresh_timer(event.user.data1);
2871 cur_stream->refresh = 0;
2872 break;
2873 default:
2874 break;
2875 }
2876 }
2877 }
2878
2879 static int opt_frame_size(const char *opt, const char *arg)
2880 {
2881 av_log(NULL, AV_LOG_ERROR,
2882 "Option '%s' has been removed, use private format options instead\n", opt);
2883 return AVERROR(EINVAL);
2884 }
2885
2886 static int opt_width(const char *opt, const char *arg)
2887 {
2888 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2889 return 0;
2890 }
2891
2892 static int opt_height(const char *opt, const char *arg)
2893 {
2894 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2895 return 0;
2896 }
2897
2898 static int opt_format(const char *opt, const char *arg)
2899 {
2900 file_iformat = av_find_input_format(arg);
2901 if (!file_iformat) {
2902 fprintf(stderr, "Unknown input format: %s\n", arg);
2903 return AVERROR(EINVAL);
2904 }
2905 return 0;
2906 }
2907
2908 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2909 {
2910 av_log(NULL, AV_LOG_ERROR,
2911 "Option '%s' has been removed, use private format options instead\n", opt);
2912 return AVERROR(EINVAL);
2913 }
2914
2915 static int opt_sync(const char *opt, const char *arg)
2916 {
2917 if (!strcmp(arg, "audio"))
2918 av_sync_type = AV_SYNC_AUDIO_MASTER;
2919 else if (!strcmp(arg, "video"))
2920 av_sync_type = AV_SYNC_VIDEO_MASTER;
2921 else if (!strcmp(arg, "ext"))
2922 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2923 else {
2924 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2925 exit(1);
2926 }
2927 return 0;
2928 }
2929
2930 static int opt_seek(const char *opt, const char *arg)
2931 {
2932 start_time = parse_time_or_die(opt, arg, 1);
2933 return 0;
2934 }
2935
2936 static int opt_duration(const char *opt, const char *arg)
2937 {
2938 duration = parse_time_or_die(opt, arg, 1);
2939 return 0;
2940 }
2941
2942 static int opt_debug(const char *opt, const char *arg)
2943 {
2944 av_log_set_level(99);
2945 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2946 return 0;
2947 }
2948
2949 static int opt_vismv(const char *opt, const char *arg)
2950 {
2951 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2952 return 0;
2953 }
2954
2955 static int opt_thread_count(const char *opt, const char *arg)
2956 {
2957 thread_count = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2958 #if !HAVE_THREADS
2959 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2960 #endif
2961 return 0;
2962 }
2963
2964 static const OptionDef options[] = {
2965 #include "cmdutils_common_opts.h"
2966 { "x", HAS_ARG, { (void*)opt_width }, "force displayed width", "width" },
2967 { "y", HAS_ARG, { (void*)opt_height }, "force displayed height", "height" },
2968 { "s", HAS_ARG | OPT_VIDEO, { (void*)opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
2969 { "fs", OPT_BOOL, { (void*)&is_full_screen }, "force full screen" },
2970 { "an", OPT_BOOL, { (void*)&audio_disable }, "disable audio" },
2971 { "vn", OPT_BOOL, { (void*)&video_disable }, "disable video" },
2972 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
2973 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
2974 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
2975 { "ss", HAS_ARG, { (void*)&opt_seek }, "seek to a given position in seconds", "pos" },
2976 { "t", HAS_ARG, { (void*)&opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
2977 { "bytes", OPT_INT | HAS_ARG, { (void*)&seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
2978 { "nodisp", OPT_BOOL, { (void*)&display_disable }, "disable graphical display" },
2979 { "f", HAS_ARG, { (void*)opt_format }, "force format", "fmt" },
2980 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { (void*)opt_frame_pix_fmt }, "set pixel format", "format" },
2981 { "stats", OPT_BOOL | OPT_EXPERT, { (void*)&show_status }, "show status", "" },
2982 { "debug", HAS_ARG | OPT_EXPERT, { (void*)opt_debug }, "print specific debug info", "" },
2983 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&workaround_bugs }, "workaround bugs", "" },
2984 { "vismv", HAS_ARG | OPT_EXPERT, { (void*)opt_vismv }, "visualize motion vectors", "" },
2985 { "fast", OPT_BOOL | OPT_EXPERT, { (void*)&fast }, "non spec compliant optimizations", "" },
2986 { "genpts", OPT_BOOL | OPT_EXPERT, { (void*)&genpts }, "generate pts", "" },
2987 { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2988 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&lowres }, "", "" },
2989 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_loop_filter }, "", "" },
2990 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_frame }, "", "" },
2991 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_idct }, "", "" },
2992 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&idct }, "set idct algo", "algo" },
2993 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&error_concealment }, "set error concealment options", "bit_mask" },
2994 { "sync", HAS_ARG | OPT_EXPERT, { (void*)opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
2995 { "threads", HAS_ARG | OPT_EXPERT, { (void*)opt_thread_count }, "thread count", "count" },
2996 { "autoexit", OPT_BOOL | OPT_EXPERT, { (void*)&autoexit }, "exit at the end", "" },
2997 { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_keydown }, "exit on key down", "" },
2998 { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_mousedown }, "exit on mouse down", "" },
2999 { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&loop }, "set number of times the playback shall be looped", "loop count" },
3000 { "framedrop", OPT_BOOL | OPT_EXPERT, { (void*)&framedrop }, "drop frames when cpu is too slow", "" },
3001 { "window_title", OPT_STRING | HAS_ARG, { (void*)&window_title }, "set window title", "window title" },
3002 #if CONFIG_AVFILTER
3003 { "vf", OPT_STRING | HAS_ARG, { (void*)&vfilters }, "video filters", "filter list" },
3004 #endif
3005 { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { (void*)&rdftspeed }, "rdft speed", "msecs" },
3006 { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { (void*)opt_default }, "generic catch all option", "" },
3007 { "i", 0, { NULL }, "avconv compatibility dummy option", ""},
3008 { NULL, },
3009 };
3010
3011 static void show_usage(void)
3012 {
3013 printf("Simple media player\n");
3014 printf("usage: %s [options] input_file\n", program_name);
3015 printf("\n");
3016 }
3017
3018 static void show_help(void)
3019 {
3020 av_log_set_callback(log_callback_help);
3021 show_usage();
3022 show_help_options(options, "Main options:\n",
3023 OPT_EXPERT, 0);
3024 show_help_options(options, "\nAdvanced options:\n",
3025 OPT_EXPERT, OPT_EXPERT);
3026 printf("\n");
3027 show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3028 show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3029 #if !CONFIG_AVFILTER
3030 show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3031 #endif
3032 printf("\nWhile playing:\n"
3033 "q, ESC quit\n"
3034 "f toggle full screen\n"
3035 "p, SPC pause\n"
3036 "a cycle audio channel\n"
3037 "v cycle video channel\n"
3038 "t cycle subtitle channel\n"
3039 "w show audio waves\n"
3040 "s activate frame-step mode\n"
3041 "left/right seek backward/forward 10 seconds\n"
3042 "down/up seek backward/forward 1 minute\n"
3043 "mouse click seek to percentage in file corresponding to fraction of width\n"
3044 );
3045 }
3046
3047 static void opt_input_file(void *optctx, const char *filename)
3048 {
3049 if (input_filename) {
3050 fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3051 filename, input_filename);
3052 exit(1);
3053 }
3054 if (!strcmp(filename, "-"))
3055 filename = "pipe:";
3056 input_filename = filename;
3057 }
3058
3059 /* Called from the main */
3060 int main(int argc, char **argv)
3061 {
3062 int flags;
3063
3064 av_log_set_flags(AV_LOG_SKIP_REPEATED);
3065 parse_loglevel(argc, argv, options);
3066
3067 /* register all codecs, demux and protocols */
3068 avcodec_register_all();
3069 #if CONFIG_AVDEVICE
3070 avdevice_register_all();
3071 #endif
3072 #if CONFIG_AVFILTER
3073 avfilter_register_all();
3074 #endif
3075 av_register_all();
3076 avformat_network_init();
3077
3078 init_opts();
3079
3080 show_banner();
3081
3082 parse_options(NULL, argc, argv, options, opt_input_file);
3083
3084 if (!input_filename) {
3085 show_usage();
3086 fprintf(stderr, "An input file must be specified\n");
3087 fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3088 exit(1);
3089 }
3090
3091 if (display_disable) {
3092 video_disable = 1;
3093 }
3094 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3095 #if !defined(__MINGW32__) && !defined(__APPLE__)
3096 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3097 #endif
3098 if (SDL_Init (flags)) {
3099 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3100 exit(1);
3101 }
3102
3103 if (!display_disable) {
3104 #if HAVE_SDL_VIDEO_SIZE
3105 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3106 fs_screen_width = vi->current_w;
3107 fs_screen_height = vi->current_h;
3108 #endif
3109 }
3110
3111 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3112 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3113 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3114
3115 av_init_packet(&flush_pkt);
3116 flush_pkt.data = "FLUSH";
3117
3118 cur_stream = stream_open(input_filename, file_iformat);
3119
3120 event_loop();
3121
3122 /* never returns */
3123
3124 return 0;
3125 }