Remove some hunks of unused and disabled code cruft.
[libav.git] / ffplay.c
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include <math.h>
23 #include <limits.h>
24 #include "libavutil/avstring.h"
25 #include "libavformat/avformat.h"
26 #include "libavdevice/avdevice.h"
27 #include "libswscale/swscale.h"
28 #include "libavcodec/audioconvert.h"
29 #include "libavcodec/colorspace.h"
30 #include "libavcodec/opt.h"
31
32 #include "cmdutils.h"
33
34 #include <SDL.h>
35 #include <SDL_thread.h>
36
37 #ifdef __MINGW32__
38 #undef main /* We don't want SDL to override our main() */
39 #endif
40
41 #undef exit
42
43 const char program_name[] = "FFplay";
44 const int program_birth_year = 2003;
45
46 //#define DEBUG_SYNC
47
48 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
49 #define MAX_AUDIOQ_SIZE (20 * 16 * 1024)
50 #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
51
52 /* SDL audio buffer size, in samples. Should be small to have precise
53 A/V sync as SDL does not have hardware buffer fullness info. */
54 #define SDL_AUDIO_BUFFER_SIZE 1024
55
56 /* no AV sync correction is done if below the AV sync threshold */
57 #define AV_SYNC_THRESHOLD 0.01
58 /* no AV correction is done if too big error */
59 #define AV_NOSYNC_THRESHOLD 10.0
60
61 /* maximum audio speed change to get correct sync */
62 #define SAMPLE_CORRECTION_PERCENT_MAX 10
63
64 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
65 #define AUDIO_DIFF_AVG_NB 20
66
67 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
68 #define SAMPLE_ARRAY_SIZE (2*65536)
69
70 static int sws_flags = SWS_BICUBIC;
71
72 typedef struct PacketQueue {
73 AVPacketList *first_pkt, *last_pkt;
74 int nb_packets;
75 int size;
76 int abort_request;
77 SDL_mutex *mutex;
78 SDL_cond *cond;
79 } PacketQueue;
80
81 #define VIDEO_PICTURE_QUEUE_SIZE 1
82 #define SUBPICTURE_QUEUE_SIZE 4
83
84 typedef struct VideoPicture {
85 double pts; ///<presentation time stamp for this picture
86 SDL_Overlay *bmp;
87 int width, height; /* source height & width */
88 int allocated;
89 } VideoPicture;
90
91 typedef struct SubPicture {
92 double pts; /* presentation time stamp for this picture */
93 AVSubtitle sub;
94 } SubPicture;
95
96 enum {
97 AV_SYNC_AUDIO_MASTER, /* default choice */
98 AV_SYNC_VIDEO_MASTER,
99 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
100 };
101
102 typedef struct VideoState {
103 SDL_Thread *parse_tid;
104 SDL_Thread *video_tid;
105 AVInputFormat *iformat;
106 int no_background;
107 int abort_request;
108 int paused;
109 int last_paused;
110 int seek_req;
111 int seek_flags;
112 int64_t seek_pos;
113 int64_t seek_rel;
114 AVFormatContext *ic;
115 int dtg_active_format;
116
117 int audio_stream;
118
119 int av_sync_type;
120 double external_clock; /* external clock base */
121 int64_t external_clock_time;
122
123 double audio_clock;
124 double audio_diff_cum; /* used for AV difference average computation */
125 double audio_diff_avg_coef;
126 double audio_diff_threshold;
127 int audio_diff_avg_count;
128 AVStream *audio_st;
129 PacketQueue audioq;
130 int audio_hw_buf_size;
131 /* samples output by the codec. we reserve more space for avsync
132 compensation */
133 DECLARE_ALIGNED(16,uint8_t,audio_buf1[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
134 DECLARE_ALIGNED(16,uint8_t,audio_buf2[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
135 uint8_t *audio_buf;
136 unsigned int audio_buf_size; /* in bytes */
137 int audio_buf_index; /* in bytes */
138 AVPacket audio_pkt_temp;
139 AVPacket audio_pkt;
140 enum SampleFormat audio_src_fmt;
141 AVAudioConvert *reformat_ctx;
142
143 int show_audio; /* if true, display audio samples */
144 int16_t sample_array[SAMPLE_ARRAY_SIZE];
145 int sample_array_index;
146 int last_i_start;
147
148 SDL_Thread *subtitle_tid;
149 int subtitle_stream;
150 int subtitle_stream_changed;
151 AVStream *subtitle_st;
152 PacketQueue subtitleq;
153 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
154 int subpq_size, subpq_rindex, subpq_windex;
155 SDL_mutex *subpq_mutex;
156 SDL_cond *subpq_cond;
157
158 double frame_timer;
159 double frame_last_pts;
160 double frame_last_delay;
161 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
162 int video_stream;
163 AVStream *video_st;
164 PacketQueue videoq;
165 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
166 int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
167 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
168 int pictq_size, pictq_rindex, pictq_windex;
169 SDL_mutex *pictq_mutex;
170 SDL_cond *pictq_cond;
171 struct SwsContext *img_convert_ctx;
172
173 // QETimer *video_timer;
174 char filename[1024];
175 int width, height, xleft, ytop;
176 } VideoState;
177
178 static void show_help(void);
179 static int audio_write_get_buf_size(VideoState *is);
180
181 /* options specified by the user */
182 static AVInputFormat *file_iformat;
183 static const char *input_filename;
184 static int fs_screen_width;
185 static int fs_screen_height;
186 static int screen_width = 0;
187 static int screen_height = 0;
188 static int frame_width = 0;
189 static int frame_height = 0;
190 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
191 static int audio_disable;
192 static int video_disable;
193 static int wanted_audio_stream= 0;
194 static int wanted_video_stream= 0;
195 static int wanted_subtitle_stream= -1;
196 static int seek_by_bytes;
197 static int display_disable;
198 static int show_status = 1;
199 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
200 static int64_t start_time = AV_NOPTS_VALUE;
201 static int debug = 0;
202 static int debug_mv = 0;
203 static int step = 0;
204 static int thread_count = 1;
205 static int workaround_bugs = 1;
206 static int fast = 0;
207 static int genpts = 0;
208 static int lowres = 0;
209 static int idct = FF_IDCT_AUTO;
210 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
211 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
212 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
213 static int error_recognition = FF_ER_CAREFUL;
214 static int error_concealment = 3;
215 static int decoder_reorder_pts= 0;
216
217 /* current context */
218 static int is_full_screen;
219 static VideoState *cur_stream;
220 static int64_t audio_callback_time;
221
222 static AVPacket flush_pkt;
223
224 #define FF_ALLOC_EVENT (SDL_USEREVENT)
225 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
226 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
227
228 static SDL_Surface *screen;
229
230 /* packet queue handling */
231 static void packet_queue_init(PacketQueue *q)
232 {
233 memset(q, 0, sizeof(PacketQueue));
234 q->mutex = SDL_CreateMutex();
235 q->cond = SDL_CreateCond();
236 }
237
238 static void packet_queue_flush(PacketQueue *q)
239 {
240 AVPacketList *pkt, *pkt1;
241
242 SDL_LockMutex(q->mutex);
243 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
244 pkt1 = pkt->next;
245 av_free_packet(&pkt->pkt);
246 av_freep(&pkt);
247 }
248 q->last_pkt = NULL;
249 q->first_pkt = NULL;
250 q->nb_packets = 0;
251 q->size = 0;
252 SDL_UnlockMutex(q->mutex);
253 }
254
255 static void packet_queue_end(PacketQueue *q)
256 {
257 packet_queue_flush(q);
258 SDL_DestroyMutex(q->mutex);
259 SDL_DestroyCond(q->cond);
260 }
261
262 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
263 {
264 AVPacketList *pkt1;
265
266 /* duplicate the packet */
267 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
268 return -1;
269
270 pkt1 = av_malloc(sizeof(AVPacketList));
271 if (!pkt1)
272 return -1;
273 pkt1->pkt = *pkt;
274 pkt1->next = NULL;
275
276
277 SDL_LockMutex(q->mutex);
278
279 if (!q->last_pkt)
280
281 q->first_pkt = pkt1;
282 else
283 q->last_pkt->next = pkt1;
284 q->last_pkt = pkt1;
285 q->nb_packets++;
286 q->size += pkt1->pkt.size + sizeof(*pkt1);
287 /* XXX: should duplicate packet data in DV case */
288 SDL_CondSignal(q->cond);
289
290 SDL_UnlockMutex(q->mutex);
291 return 0;
292 }
293
294 static void packet_queue_abort(PacketQueue *q)
295 {
296 SDL_LockMutex(q->mutex);
297
298 q->abort_request = 1;
299
300 SDL_CondSignal(q->cond);
301
302 SDL_UnlockMutex(q->mutex);
303 }
304
305 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
306 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
307 {
308 AVPacketList *pkt1;
309 int ret;
310
311 SDL_LockMutex(q->mutex);
312
313 for(;;) {
314 if (q->abort_request) {
315 ret = -1;
316 break;
317 }
318
319 pkt1 = q->first_pkt;
320 if (pkt1) {
321 q->first_pkt = pkt1->next;
322 if (!q->first_pkt)
323 q->last_pkt = NULL;
324 q->nb_packets--;
325 q->size -= pkt1->pkt.size + sizeof(*pkt1);
326 *pkt = pkt1->pkt;
327 av_free(pkt1);
328 ret = 1;
329 break;
330 } else if (!block) {
331 ret = 0;
332 break;
333 } else {
334 SDL_CondWait(q->cond, q->mutex);
335 }
336 }
337 SDL_UnlockMutex(q->mutex);
338 return ret;
339 }
340
341 static inline void fill_rectangle(SDL_Surface *screen,
342 int x, int y, int w, int h, int color)
343 {
344 SDL_Rect rect;
345 rect.x = x;
346 rect.y = y;
347 rect.w = w;
348 rect.h = h;
349 SDL_FillRect(screen, &rect, color);
350 }
351
352 #if 0
353 /* draw only the border of a rectangle */
354 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
355 {
356 int w1, w2, h1, h2;
357
358 /* fill the background */
359 w1 = x;
360 if (w1 < 0)
361 w1 = 0;
362 w2 = s->width - (x + w);
363 if (w2 < 0)
364 w2 = 0;
365 h1 = y;
366 if (h1 < 0)
367 h1 = 0;
368 h2 = s->height - (y + h);
369 if (h2 < 0)
370 h2 = 0;
371 fill_rectangle(screen,
372 s->xleft, s->ytop,
373 w1, s->height,
374 color);
375 fill_rectangle(screen,
376 s->xleft + s->width - w2, s->ytop,
377 w2, s->height,
378 color);
379 fill_rectangle(screen,
380 s->xleft + w1, s->ytop,
381 s->width - w1 - w2, h1,
382 color);
383 fill_rectangle(screen,
384 s->xleft + w1, s->ytop + s->height - h2,
385 s->width - w1 - w2, h2,
386 color);
387 }
388 #endif
389
390 #define ALPHA_BLEND(a, oldp, newp, s)\
391 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
392
393 #define RGBA_IN(r, g, b, a, s)\
394 {\
395 unsigned int v = ((const uint32_t *)(s))[0];\
396 a = (v >> 24) & 0xff;\
397 r = (v >> 16) & 0xff;\
398 g = (v >> 8) & 0xff;\
399 b = v & 0xff;\
400 }
401
402 #define YUVA_IN(y, u, v, a, s, pal)\
403 {\
404 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
405 a = (val >> 24) & 0xff;\
406 y = (val >> 16) & 0xff;\
407 u = (val >> 8) & 0xff;\
408 v = val & 0xff;\
409 }
410
411 #define YUVA_OUT(d, y, u, v, a)\
412 {\
413 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
414 }
415
416
417 #define BPP 1
418
419 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
420 {
421 int wrap, wrap3, width2, skip2;
422 int y, u, v, a, u1, v1, a1, w, h;
423 uint8_t *lum, *cb, *cr;
424 const uint8_t *p;
425 const uint32_t *pal;
426 int dstx, dsty, dstw, dsth;
427
428 dstw = av_clip(rect->w, 0, imgw);
429 dsth = av_clip(rect->h, 0, imgh);
430 dstx = av_clip(rect->x, 0, imgw - dstw);
431 dsty = av_clip(rect->y, 0, imgh - dsth);
432 lum = dst->data[0] + dsty * dst->linesize[0];
433 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
434 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
435
436 width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
437 skip2 = dstx >> 1;
438 wrap = dst->linesize[0];
439 wrap3 = rect->pict.linesize[0];
440 p = rect->pict.data[0];
441 pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
442
443 if (dsty & 1) {
444 lum += dstx;
445 cb += skip2;
446 cr += skip2;
447
448 if (dstx & 1) {
449 YUVA_IN(y, u, v, a, p, pal);
450 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
451 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
452 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
453 cb++;
454 cr++;
455 lum++;
456 p += BPP;
457 }
458 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
459 YUVA_IN(y, u, v, a, p, pal);
460 u1 = u;
461 v1 = v;
462 a1 = a;
463 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
464
465 YUVA_IN(y, u, v, a, p + BPP, pal);
466 u1 += u;
467 v1 += v;
468 a1 += a;
469 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
470 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
471 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
472 cb++;
473 cr++;
474 p += 2 * BPP;
475 lum += 2;
476 }
477 if (w) {
478 YUVA_IN(y, u, v, a, p, pal);
479 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
480 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
481 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
482 p++;
483 lum++;
484 }
485 p += wrap3 - dstw * BPP;
486 lum += wrap - dstw - dstx;
487 cb += dst->linesize[1] - width2 - skip2;
488 cr += dst->linesize[2] - width2 - skip2;
489 }
490 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
491 lum += dstx;
492 cb += skip2;
493 cr += skip2;
494
495 if (dstx & 1) {
496 YUVA_IN(y, u, v, a, p, pal);
497 u1 = u;
498 v1 = v;
499 a1 = a;
500 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
501 p += wrap3;
502 lum += wrap;
503 YUVA_IN(y, u, v, a, p, pal);
504 u1 += u;
505 v1 += v;
506 a1 += a;
507 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
508 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
509 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
510 cb++;
511 cr++;
512 p += -wrap3 + BPP;
513 lum += -wrap + 1;
514 }
515 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
516 YUVA_IN(y, u, v, a, p, pal);
517 u1 = u;
518 v1 = v;
519 a1 = a;
520 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
521
522 YUVA_IN(y, u, v, a, p + BPP, pal);
523 u1 += u;
524 v1 += v;
525 a1 += a;
526 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
527 p += wrap3;
528 lum += wrap;
529
530 YUVA_IN(y, u, v, a, p, pal);
531 u1 += u;
532 v1 += v;
533 a1 += a;
534 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
535
536 YUVA_IN(y, u, v, a, p + BPP, pal);
537 u1 += u;
538 v1 += v;
539 a1 += a;
540 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
541
542 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
543 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
544
545 cb++;
546 cr++;
547 p += -wrap3 + 2 * BPP;
548 lum += -wrap + 2;
549 }
550 if (w) {
551 YUVA_IN(y, u, v, a, p, pal);
552 u1 = u;
553 v1 = v;
554 a1 = a;
555 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
556 p += wrap3;
557 lum += wrap;
558 YUVA_IN(y, u, v, a, p, pal);
559 u1 += u;
560 v1 += v;
561 a1 += a;
562 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
563 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
564 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
565 cb++;
566 cr++;
567 p += -wrap3 + BPP;
568 lum += -wrap + 1;
569 }
570 p += wrap3 + (wrap3 - dstw * BPP);
571 lum += wrap + (wrap - dstw - dstx);
572 cb += dst->linesize[1] - width2 - skip2;
573 cr += dst->linesize[2] - width2 - skip2;
574 }
575 /* handle odd height */
576 if (h) {
577 lum += dstx;
578 cb += skip2;
579 cr += skip2;
580
581 if (dstx & 1) {
582 YUVA_IN(y, u, v, a, p, pal);
583 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
584 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
585 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
586 cb++;
587 cr++;
588 lum++;
589 p += BPP;
590 }
591 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
592 YUVA_IN(y, u, v, a, p, pal);
593 u1 = u;
594 v1 = v;
595 a1 = a;
596 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
597
598 YUVA_IN(y, u, v, a, p + BPP, pal);
599 u1 += u;
600 v1 += v;
601 a1 += a;
602 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
603 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
604 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
605 cb++;
606 cr++;
607 p += 2 * BPP;
608 lum += 2;
609 }
610 if (w) {
611 YUVA_IN(y, u, v, a, p, pal);
612 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
613 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
614 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
615 }
616 }
617 }
618
619 static void free_subpicture(SubPicture *sp)
620 {
621 int i;
622
623 for (i = 0; i < sp->sub.num_rects; i++)
624 {
625 av_freep(&sp->sub.rects[i]->pict.data[0]);
626 av_freep(&sp->sub.rects[i]->pict.data[1]);
627 av_freep(&sp->sub.rects[i]);
628 }
629
630 av_free(sp->sub.rects);
631
632 memset(&sp->sub, 0, sizeof(AVSubtitle));
633 }
634
635 static void video_image_display(VideoState *is)
636 {
637 VideoPicture *vp;
638 SubPicture *sp;
639 AVPicture pict;
640 float aspect_ratio;
641 int width, height, x, y;
642 SDL_Rect rect;
643 int i;
644
645 vp = &is->pictq[is->pictq_rindex];
646 if (vp->bmp) {
647 /* XXX: use variable in the frame */
648 if (is->video_st->sample_aspect_ratio.num)
649 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
650 else if (is->video_st->codec->sample_aspect_ratio.num)
651 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
652 else
653 aspect_ratio = 0;
654 if (aspect_ratio <= 0.0)
655 aspect_ratio = 1.0;
656 aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec->height;
657 /* if an active format is indicated, then it overrides the
658 mpeg format */
659 #if 0
660 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
661 is->dtg_active_format = is->video_st->codec->dtg_active_format;
662 printf("dtg_active_format=%d\n", is->dtg_active_format);
663 }
664 #endif
665 #if 0
666 switch(is->video_st->codec->dtg_active_format) {
667 case FF_DTG_AFD_SAME:
668 default:
669 /* nothing to do */
670 break;
671 case FF_DTG_AFD_4_3:
672 aspect_ratio = 4.0 / 3.0;
673 break;
674 case FF_DTG_AFD_16_9:
675 aspect_ratio = 16.0 / 9.0;
676 break;
677 case FF_DTG_AFD_14_9:
678 aspect_ratio = 14.0 / 9.0;
679 break;
680 case FF_DTG_AFD_4_3_SP_14_9:
681 aspect_ratio = 14.0 / 9.0;
682 break;
683 case FF_DTG_AFD_16_9_SP_14_9:
684 aspect_ratio = 14.0 / 9.0;
685 break;
686 case FF_DTG_AFD_SP_4_3:
687 aspect_ratio = 4.0 / 3.0;
688 break;
689 }
690 #endif
691
692 if (is->subtitle_st)
693 {
694 if (is->subpq_size > 0)
695 {
696 sp = &is->subpq[is->subpq_rindex];
697
698 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
699 {
700 SDL_LockYUVOverlay (vp->bmp);
701
702 pict.data[0] = vp->bmp->pixels[0];
703 pict.data[1] = vp->bmp->pixels[2];
704 pict.data[2] = vp->bmp->pixels[1];
705
706 pict.linesize[0] = vp->bmp->pitches[0];
707 pict.linesize[1] = vp->bmp->pitches[2];
708 pict.linesize[2] = vp->bmp->pitches[1];
709
710 for (i = 0; i < sp->sub.num_rects; i++)
711 blend_subrect(&pict, sp->sub.rects[i],
712 vp->bmp->w, vp->bmp->h);
713
714 SDL_UnlockYUVOverlay (vp->bmp);
715 }
716 }
717 }
718
719
720 /* XXX: we suppose the screen has a 1.0 pixel ratio */
721 height = is->height;
722 width = ((int)rint(height * aspect_ratio)) & ~1;
723 if (width > is->width) {
724 width = is->width;
725 height = ((int)rint(width / aspect_ratio)) & ~1;
726 }
727 x = (is->width - width) / 2;
728 y = (is->height - height) / 2;
729 if (!is->no_background) {
730 /* fill the background */
731 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
732 } else {
733 is->no_background = 0;
734 }
735 rect.x = is->xleft + x;
736 rect.y = is->ytop + y;
737 rect.w = width;
738 rect.h = height;
739 SDL_DisplayYUVOverlay(vp->bmp, &rect);
740 } else {
741 #if 0
742 fill_rectangle(screen,
743 is->xleft, is->ytop, is->width, is->height,
744 QERGB(0x00, 0x00, 0x00));
745 #endif
746 }
747 }
748
749 static inline int compute_mod(int a, int b)
750 {
751 a = a % b;
752 if (a >= 0)
753 return a;
754 else
755 return a + b;
756 }
757
758 static void video_audio_display(VideoState *s)
759 {
760 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
761 int ch, channels, h, h2, bgcolor, fgcolor;
762 int16_t time_diff;
763
764 /* compute display index : center on currently output samples */
765 channels = s->audio_st->codec->channels;
766 nb_display_channels = channels;
767 if (!s->paused) {
768 n = 2 * channels;
769 delay = audio_write_get_buf_size(s);
770 delay /= n;
771
772 /* to be more precise, we take into account the time spent since
773 the last buffer computation */
774 if (audio_callback_time) {
775 time_diff = av_gettime() - audio_callback_time;
776 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
777 }
778
779 delay -= s->width / 2;
780 if (delay < s->width)
781 delay = s->width;
782
783 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
784
785 h= INT_MIN;
786 for(i=0; i<1000; i+=channels){
787 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
788 int a= s->sample_array[idx];
789 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
790 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
791 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
792 int score= a-d;
793 if(h<score && (b^c)<0){
794 h= score;
795 i_start= idx;
796 }
797 }
798
799 s->last_i_start = i_start;
800 } else {
801 i_start = s->last_i_start;
802 }
803
804 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
805 fill_rectangle(screen,
806 s->xleft, s->ytop, s->width, s->height,
807 bgcolor);
808
809 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
810
811 /* total height for one channel */
812 h = s->height / nb_display_channels;
813 /* graph height / 2 */
814 h2 = (h * 9) / 20;
815 for(ch = 0;ch < nb_display_channels; ch++) {
816 i = i_start + ch;
817 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
818 for(x = 0; x < s->width; x++) {
819 y = (s->sample_array[i] * h2) >> 15;
820 if (y < 0) {
821 y = -y;
822 ys = y1 - y;
823 } else {
824 ys = y1;
825 }
826 fill_rectangle(screen,
827 s->xleft + x, ys, 1, y,
828 fgcolor);
829 i += channels;
830 if (i >= SAMPLE_ARRAY_SIZE)
831 i -= SAMPLE_ARRAY_SIZE;
832 }
833 }
834
835 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
836
837 for(ch = 1;ch < nb_display_channels; ch++) {
838 y = s->ytop + ch * h;
839 fill_rectangle(screen,
840 s->xleft, y, s->width, 1,
841 fgcolor);
842 }
843 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
844 }
845
846 static int video_open(VideoState *is){
847 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
848 int w,h;
849
850 if(is_full_screen) flags |= SDL_FULLSCREEN;
851 else flags |= SDL_RESIZABLE;
852
853 if (is_full_screen && fs_screen_width) {
854 w = fs_screen_width;
855 h = fs_screen_height;
856 } else if(!is_full_screen && screen_width){
857 w = screen_width;
858 h = screen_height;
859 }else if (is->video_st && is->video_st->codec->width){
860 w = is->video_st->codec->width;
861 h = is->video_st->codec->height;
862 } else {
863 w = 640;
864 h = 480;
865 }
866 #ifndef __APPLE__
867 screen = SDL_SetVideoMode(w, h, 0, flags);
868 #else
869 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
870 screen = SDL_SetVideoMode(w, h, 24, flags);
871 #endif
872 if (!screen) {
873 fprintf(stderr, "SDL: could not set video mode - exiting\n");
874 return -1;
875 }
876 SDL_WM_SetCaption("FFplay", "FFplay");
877
878 is->width = screen->w;
879 is->height = screen->h;
880
881 return 0;
882 }
883
884 /* display the current picture, if any */
885 static void video_display(VideoState *is)
886 {
887 if(!screen)
888 video_open(cur_stream);
889 if (is->audio_st && is->show_audio)
890 video_audio_display(is);
891 else if (is->video_st)
892 video_image_display(is);
893 }
894
895 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
896 {
897 SDL_Event event;
898 event.type = FF_REFRESH_EVENT;
899 event.user.data1 = opaque;
900 SDL_PushEvent(&event);
901 return 0; /* 0 means stop timer */
902 }
903
904 /* schedule a video refresh in 'delay' ms */
905 static void schedule_refresh(VideoState *is, int delay)
906 {
907 if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
908 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
909 }
910
911 /* get the current audio clock value */
912 static double get_audio_clock(VideoState *is)
913 {
914 double pts;
915 int hw_buf_size, bytes_per_sec;
916 pts = is->audio_clock;
917 hw_buf_size = audio_write_get_buf_size(is);
918 bytes_per_sec = 0;
919 if (is->audio_st) {
920 bytes_per_sec = is->audio_st->codec->sample_rate *
921 2 * is->audio_st->codec->channels;
922 }
923 if (bytes_per_sec)
924 pts -= (double)hw_buf_size / bytes_per_sec;
925 return pts;
926 }
927
928 /* get the current video clock value */
929 static double get_video_clock(VideoState *is)
930 {
931 double delta;
932 if (is->paused) {
933 delta = 0;
934 } else {
935 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
936 }
937 return is->video_current_pts + delta;
938 }
939
940 /* get the current external clock value */
941 static double get_external_clock(VideoState *is)
942 {
943 int64_t ti;
944 ti = av_gettime();
945 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
946 }
947
948 /* get the current master clock value */
949 static double get_master_clock(VideoState *is)
950 {
951 double val;
952
953 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
954 if (is->video_st)
955 val = get_video_clock(is);
956 else
957 val = get_audio_clock(is);
958 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
959 if (is->audio_st)
960 val = get_audio_clock(is);
961 else
962 val = get_video_clock(is);
963 } else {
964 val = get_external_clock(is);
965 }
966 return val;
967 }
968
969 /* seek in the stream */
970 static void stream_seek(VideoState *is, int64_t pos, int64_t rel)
971 {
972 if (!is->seek_req) {
973 is->seek_pos = pos;
974 is->seek_rel = rel;
975 if (seek_by_bytes)
976 is->seek_flags |= AVSEEK_FLAG_BYTE;
977 is->seek_req = 1;
978 }
979 }
980
981 /* pause or resume the video */
982 static void stream_pause(VideoState *is)
983 {
984 is->paused = !is->paused;
985 if (!is->paused) {
986 is->video_current_pts = get_video_clock(is);
987 is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
988 }
989 }
990
991 static double compute_frame_delay(double frame_current_pts, VideoState *is)
992 {
993 double actual_delay, delay, sync_threshold, ref_clock, diff;
994
995 /* compute nominal delay */
996 delay = frame_current_pts - is->frame_last_pts;
997 if (delay <= 0 || delay >= 10.0) {
998 /* if incorrect delay, use previous one */
999 delay = is->frame_last_delay;
1000 } else {
1001 is->frame_last_delay = delay;
1002 }
1003 is->frame_last_pts = frame_current_pts;
1004
1005 /* update delay to follow master synchronisation source */
1006 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1007 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1008 /* if video is slave, we try to correct big delays by
1009 duplicating or deleting a frame */
1010 ref_clock = get_master_clock(is);
1011 diff = frame_current_pts - ref_clock;
1012
1013 /* skip or repeat frame. We take into account the
1014 delay to compute the threshold. I still don't know
1015 if it is the best guess */
1016 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1017 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1018 if (diff <= -sync_threshold)
1019 delay = 0;
1020 else if (diff >= sync_threshold)
1021 delay = 2 * delay;
1022 }
1023 }
1024
1025 is->frame_timer += delay;
1026 /* compute the REAL delay (we need to do that to avoid
1027 long term errors */
1028 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1029 if (actual_delay < 0.010) {
1030 /* XXX: should skip picture */
1031 actual_delay = 0.010;
1032 }
1033
1034 #if defined(DEBUG_SYNC)
1035 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1036 delay, actual_delay, frame_current_pts, -diff);
1037 #endif
1038
1039 return actual_delay;
1040 }
1041
1042 /* called to display each frame */
1043 static void video_refresh_timer(void *opaque)
1044 {
1045 VideoState *is = opaque;
1046 VideoPicture *vp;
1047
1048 SubPicture *sp, *sp2;
1049
1050 if (is->video_st) {
1051 if (is->pictq_size == 0) {
1052 /* if no picture, need to wait */
1053 schedule_refresh(is, 1);
1054 } else {
1055 /* dequeue the picture */
1056 vp = &is->pictq[is->pictq_rindex];
1057
1058 /* update current video pts */
1059 is->video_current_pts = vp->pts;
1060 is->video_current_pts_time = av_gettime();
1061
1062 /* launch timer for next picture */
1063 schedule_refresh(is, (int)(compute_frame_delay(vp->pts, is) * 1000 + 0.5));
1064
1065 if(is->subtitle_st) {
1066 if (is->subtitle_stream_changed) {
1067 SDL_LockMutex(is->subpq_mutex);
1068
1069 while (is->subpq_size) {
1070 free_subpicture(&is->subpq[is->subpq_rindex]);
1071
1072 /* update queue size and signal for next picture */
1073 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1074 is->subpq_rindex = 0;
1075
1076 is->subpq_size--;
1077 }
1078 is->subtitle_stream_changed = 0;
1079
1080 SDL_CondSignal(is->subpq_cond);
1081 SDL_UnlockMutex(is->subpq_mutex);
1082 } else {
1083 if (is->subpq_size > 0) {
1084 sp = &is->subpq[is->subpq_rindex];
1085
1086 if (is->subpq_size > 1)
1087 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1088 else
1089 sp2 = NULL;
1090
1091 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1092 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1093 {
1094 free_subpicture(sp);
1095
1096 /* update queue size and signal for next picture */
1097 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1098 is->subpq_rindex = 0;
1099
1100 SDL_LockMutex(is->subpq_mutex);
1101 is->subpq_size--;
1102 SDL_CondSignal(is->subpq_cond);
1103 SDL_UnlockMutex(is->subpq_mutex);
1104 }
1105 }
1106 }
1107 }
1108
1109 /* display picture */
1110 video_display(is);
1111
1112 /* update queue size and signal for next picture */
1113 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1114 is->pictq_rindex = 0;
1115
1116 SDL_LockMutex(is->pictq_mutex);
1117 is->pictq_size--;
1118 SDL_CondSignal(is->pictq_cond);
1119 SDL_UnlockMutex(is->pictq_mutex);
1120 }
1121 } else if (is->audio_st) {
1122 /* draw the next audio frame */
1123
1124 schedule_refresh(is, 40);
1125
1126 /* if only audio stream, then display the audio bars (better
1127 than nothing, just to test the implementation */
1128
1129 /* display picture */
1130 video_display(is);
1131 } else {
1132 schedule_refresh(is, 100);
1133 }
1134 if (show_status) {
1135 static int64_t last_time;
1136 int64_t cur_time;
1137 int aqsize, vqsize, sqsize;
1138 double av_diff;
1139
1140 cur_time = av_gettime();
1141 if (!last_time || (cur_time - last_time) >= 30000) {
1142 aqsize = 0;
1143 vqsize = 0;
1144 sqsize = 0;
1145 if (is->audio_st)
1146 aqsize = is->audioq.size;
1147 if (is->video_st)
1148 vqsize = is->videoq.size;
1149 if (is->subtitle_st)
1150 sqsize = is->subtitleq.size;
1151 av_diff = 0;
1152 if (is->audio_st && is->video_st)
1153 av_diff = get_audio_clock(is) - get_video_clock(is);
1154 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
1155 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1156 fflush(stdout);
1157 last_time = cur_time;
1158 }
1159 }
1160 }
1161
1162 /* allocate a picture (needs to do that in main thread to avoid
1163 potential locking problems */
1164 static void alloc_picture(void *opaque)
1165 {
1166 VideoState *is = opaque;
1167 VideoPicture *vp;
1168
1169 vp = &is->pictq[is->pictq_windex];
1170
1171 if (vp->bmp)
1172 SDL_FreeYUVOverlay(vp->bmp);
1173
1174 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1175 is->video_st->codec->height,
1176 SDL_YV12_OVERLAY,
1177 screen);
1178 vp->width = is->video_st->codec->width;
1179 vp->height = is->video_st->codec->height;
1180
1181 SDL_LockMutex(is->pictq_mutex);
1182 vp->allocated = 1;
1183 SDL_CondSignal(is->pictq_cond);
1184 SDL_UnlockMutex(is->pictq_mutex);
1185 }
1186
1187 /**
1188 *
1189 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1190 */
1191 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1192 {
1193 VideoPicture *vp;
1194 int dst_pix_fmt;
1195
1196 /* wait until we have space to put a new picture */
1197 SDL_LockMutex(is->pictq_mutex);
1198 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1199 !is->videoq.abort_request) {
1200 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1201 }
1202 SDL_UnlockMutex(is->pictq_mutex);
1203
1204 if (is->videoq.abort_request)
1205 return -1;
1206
1207 vp = &is->pictq[is->pictq_windex];
1208
1209 /* alloc or resize hardware picture buffer */
1210 if (!vp->bmp ||
1211 vp->width != is->video_st->codec->width ||
1212 vp->height != is->video_st->codec->height) {
1213 SDL_Event event;
1214
1215 vp->allocated = 0;
1216
1217 /* the allocation must be done in the main thread to avoid
1218 locking problems */
1219 event.type = FF_ALLOC_EVENT;
1220 event.user.data1 = is;
1221 SDL_PushEvent(&event);
1222
1223 /* wait until the picture is allocated */
1224 SDL_LockMutex(is->pictq_mutex);
1225 while (!vp->allocated && !is->videoq.abort_request) {
1226 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1227 }
1228 SDL_UnlockMutex(is->pictq_mutex);
1229
1230 if (is->videoq.abort_request)
1231 return -1;
1232 }
1233
1234 /* if the frame is not skipped, then display it */
1235 if (vp->bmp) {
1236 AVPicture pict;
1237
1238 /* get a pointer on the bitmap */
1239 SDL_LockYUVOverlay (vp->bmp);
1240
1241 dst_pix_fmt = PIX_FMT_YUV420P;
1242 memset(&pict,0,sizeof(AVPicture));
1243 pict.data[0] = vp->bmp->pixels[0];
1244 pict.data[1] = vp->bmp->pixels[2];
1245 pict.data[2] = vp->bmp->pixels[1];
1246
1247 pict.linesize[0] = vp->bmp->pitches[0];
1248 pict.linesize[1] = vp->bmp->pitches[2];
1249 pict.linesize[2] = vp->bmp->pitches[1];
1250 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1251 is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1252 is->video_st->codec->width, is->video_st->codec->height,
1253 is->video_st->codec->pix_fmt,
1254 is->video_st->codec->width, is->video_st->codec->height,
1255 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1256 if (is->img_convert_ctx == NULL) {
1257 fprintf(stderr, "Cannot initialize the conversion context\n");
1258 exit(1);
1259 }
1260 sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1261 0, is->video_st->codec->height, pict.data, pict.linesize);
1262 /* update the bitmap content */
1263 SDL_UnlockYUVOverlay(vp->bmp);
1264
1265 vp->pts = pts;
1266
1267 /* now we can update the picture count */
1268 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1269 is->pictq_windex = 0;
1270 SDL_LockMutex(is->pictq_mutex);
1271 is->pictq_size++;
1272 SDL_UnlockMutex(is->pictq_mutex);
1273 }
1274 return 0;
1275 }
1276
1277 /**
1278 * compute the exact PTS for the picture if it is omitted in the stream
1279 * @param pts1 the dts of the pkt / pts of the frame
1280 */
1281 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1282 {
1283 double frame_delay, pts;
1284
1285 pts = pts1;
1286
1287 if (pts != 0) {
1288 /* update video clock with pts, if present */
1289 is->video_clock = pts;
1290 } else {
1291 pts = is->video_clock;
1292 }
1293 /* update video clock for next frame */
1294 frame_delay = av_q2d(is->video_st->codec->time_base);
1295 /* for MPEG2, the frame can be repeated, so we update the
1296 clock accordingly */
1297 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1298 is->video_clock += frame_delay;
1299
1300 #if defined(DEBUG_SYNC) && 0
1301 {
1302 int ftype;
1303 if (src_frame->pict_type == FF_B_TYPE)
1304 ftype = 'B';
1305 else if (src_frame->pict_type == FF_I_TYPE)
1306 ftype = 'I';
1307 else
1308 ftype = 'P';
1309 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1310 ftype, pts, pts1);
1311 }
1312 #endif
1313 return queue_picture(is, src_frame, pts);
1314 }
1315
1316 static int video_thread(void *arg)
1317 {
1318 VideoState *is = arg;
1319 AVPacket pkt1, *pkt = &pkt1;
1320 int len1, got_picture;
1321 AVFrame *frame= avcodec_alloc_frame();
1322 double pts;
1323
1324 for(;;) {
1325 while (is->paused && !is->videoq.abort_request) {
1326 SDL_Delay(10);
1327 }
1328 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1329 break;
1330
1331 if(pkt->data == flush_pkt.data){
1332 avcodec_flush_buffers(is->video_st->codec);
1333 continue;
1334 }
1335
1336 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1337 this packet, if any */
1338 is->video_st->codec->reordered_opaque= pkt->pts;
1339 len1 = avcodec_decode_video2(is->video_st->codec,
1340 frame, &got_picture,
1341 pkt);
1342
1343 if( (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1344 && frame->reordered_opaque != AV_NOPTS_VALUE)
1345 pts= frame->reordered_opaque;
1346 else if(pkt->dts != AV_NOPTS_VALUE)
1347 pts= pkt->dts;
1348 else
1349 pts= 0;
1350 pts *= av_q2d(is->video_st->time_base);
1351
1352 // if (len1 < 0)
1353 // break;
1354 if (got_picture) {
1355 if (output_picture2(is, frame, pts) < 0)
1356 goto the_end;
1357 }
1358 av_free_packet(pkt);
1359 if (step)
1360 if (cur_stream)
1361 stream_pause(cur_stream);
1362 }
1363 the_end:
1364 av_free(frame);
1365 return 0;
1366 }
1367
1368 static int subtitle_thread(void *arg)
1369 {
1370 VideoState *is = arg;
1371 SubPicture *sp;
1372 AVPacket pkt1, *pkt = &pkt1;
1373 int len1, got_subtitle;
1374 double pts;
1375 int i, j;
1376 int r, g, b, y, u, v, a;
1377
1378 for(;;) {
1379 while (is->paused && !is->subtitleq.abort_request) {
1380 SDL_Delay(10);
1381 }
1382 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1383 break;
1384
1385 if(pkt->data == flush_pkt.data){
1386 avcodec_flush_buffers(is->subtitle_st->codec);
1387 continue;
1388 }
1389 SDL_LockMutex(is->subpq_mutex);
1390 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1391 !is->subtitleq.abort_request) {
1392 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1393 }
1394 SDL_UnlockMutex(is->subpq_mutex);
1395
1396 if (is->subtitleq.abort_request)
1397 goto the_end;
1398
1399 sp = &is->subpq[is->subpq_windex];
1400
1401 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1402 this packet, if any */
1403 pts = 0;
1404 if (pkt->pts != AV_NOPTS_VALUE)
1405 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1406
1407 len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1408 &sp->sub, &got_subtitle,
1409 pkt);
1410 // if (len1 < 0)
1411 // break;
1412 if (got_subtitle && sp->sub.format == 0) {
1413 sp->pts = pts;
1414
1415 for (i = 0; i < sp->sub.num_rects; i++)
1416 {
1417 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1418 {
1419 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1420 y = RGB_TO_Y_CCIR(r, g, b);
1421 u = RGB_TO_U_CCIR(r, g, b, 0);
1422 v = RGB_TO_V_CCIR(r, g, b, 0);
1423 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1424 }
1425 }
1426
1427 /* now we can update the picture count */
1428 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1429 is->subpq_windex = 0;
1430 SDL_LockMutex(is->subpq_mutex);
1431 is->subpq_size++;
1432 SDL_UnlockMutex(is->subpq_mutex);
1433 }
1434 av_free_packet(pkt);
1435 // if (step)
1436 // if (cur_stream)
1437 // stream_pause(cur_stream);
1438 }
1439 the_end:
1440 return 0;
1441 }
1442
1443 /* copy samples for viewing in editor window */
1444 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1445 {
1446 int size, len, channels;
1447
1448 channels = is->audio_st->codec->channels;
1449
1450 size = samples_size / sizeof(short);
1451 while (size > 0) {
1452 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1453 if (len > size)
1454 len = size;
1455 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1456 samples += len;
1457 is->sample_array_index += len;
1458 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1459 is->sample_array_index = 0;
1460 size -= len;
1461 }
1462 }
1463
1464 /* return the new audio buffer size (samples can be added or deleted
1465 to get better sync if video or external master clock) */
1466 static int synchronize_audio(VideoState *is, short *samples,
1467 int samples_size1, double pts)
1468 {
1469 int n, samples_size;
1470 double ref_clock;
1471
1472 n = 2 * is->audio_st->codec->channels;
1473 samples_size = samples_size1;
1474
1475 /* if not master, then we try to remove or add samples to correct the clock */
1476 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1477 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1478 double diff, avg_diff;
1479 int wanted_size, min_size, max_size, nb_samples;
1480
1481 ref_clock = get_master_clock(is);
1482 diff = get_audio_clock(is) - ref_clock;
1483
1484 if (diff < AV_NOSYNC_THRESHOLD) {
1485 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1486 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1487 /* not enough measures to have a correct estimate */
1488 is->audio_diff_avg_count++;
1489 } else {
1490 /* estimate the A-V difference */
1491 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1492
1493 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1494 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1495 nb_samples = samples_size / n;
1496
1497 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1498 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1499 if (wanted_size < min_size)
1500 wanted_size = min_size;
1501 else if (wanted_size > max_size)
1502 wanted_size = max_size;
1503
1504 /* add or remove samples to correction the synchro */
1505 if (wanted_size < samples_size) {
1506 /* remove samples */
1507 samples_size = wanted_size;
1508 } else if (wanted_size > samples_size) {
1509 uint8_t *samples_end, *q;
1510 int nb;
1511
1512 /* add samples */
1513 nb = (samples_size - wanted_size);
1514 samples_end = (uint8_t *)samples + samples_size - n;
1515 q = samples_end + n;
1516 while (nb > 0) {
1517 memcpy(q, samples_end, n);
1518 q += n;
1519 nb -= n;
1520 }
1521 samples_size = wanted_size;
1522 }
1523 }
1524 #if 0
1525 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1526 diff, avg_diff, samples_size - samples_size1,
1527 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1528 #endif
1529 }
1530 } else {
1531 /* too big difference : may be initial PTS errors, so
1532 reset A-V filter */
1533 is->audio_diff_avg_count = 0;
1534 is->audio_diff_cum = 0;
1535 }
1536 }
1537
1538 return samples_size;
1539 }
1540
1541 /* decode one audio frame and returns its uncompressed size */
1542 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1543 {
1544 AVPacket *pkt_temp = &is->audio_pkt_temp;
1545 AVPacket *pkt = &is->audio_pkt;
1546 AVCodecContext *dec= is->audio_st->codec;
1547 int n, len1, data_size;
1548 double pts;
1549
1550 for(;;) {
1551 /* NOTE: the audio packet can contain several frames */
1552 while (pkt_temp->size > 0) {
1553 data_size = sizeof(is->audio_buf1);
1554 len1 = avcodec_decode_audio3(dec,
1555 (int16_t *)is->audio_buf1, &data_size,
1556 pkt_temp);
1557 if (len1 < 0) {
1558 /* if error, we skip the frame */
1559 pkt_temp->size = 0;
1560 break;
1561 }
1562
1563 pkt_temp->data += len1;
1564 pkt_temp->size -= len1;
1565 if (data_size <= 0)
1566 continue;
1567
1568 if (dec->sample_fmt != is->audio_src_fmt) {
1569 if (is->reformat_ctx)
1570 av_audio_convert_free(is->reformat_ctx);
1571 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1572 dec->sample_fmt, 1, NULL, 0);
1573 if (!is->reformat_ctx) {
1574 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1575 avcodec_get_sample_fmt_name(dec->sample_fmt),
1576 avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1577 break;
1578 }
1579 is->audio_src_fmt= dec->sample_fmt;
1580 }
1581
1582 if (is->reformat_ctx) {
1583 const void *ibuf[6]= {is->audio_buf1};
1584 void *obuf[6]= {is->audio_buf2};
1585 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1586 int ostride[6]= {2};
1587 int len= data_size/istride[0];
1588 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1589 printf("av_audio_convert() failed\n");
1590 break;
1591 }
1592 is->audio_buf= is->audio_buf2;
1593 /* FIXME: existing code assume that data_size equals framesize*channels*2
1594 remove this legacy cruft */
1595 data_size= len*2;
1596 }else{
1597 is->audio_buf= is->audio_buf1;
1598 }
1599
1600 /* if no pts, then compute it */
1601 pts = is->audio_clock;
1602 *pts_ptr = pts;
1603 n = 2 * dec->channels;
1604 is->audio_clock += (double)data_size /
1605 (double)(n * dec->sample_rate);
1606 #if defined(DEBUG_SYNC)
1607 {
1608 static double last_clock;
1609 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1610 is->audio_clock - last_clock,
1611 is->audio_clock, pts);
1612 last_clock = is->audio_clock;
1613 }
1614 #endif
1615 return data_size;
1616 }
1617
1618 /* free the current packet */
1619 if (pkt->data)
1620 av_free_packet(pkt);
1621
1622 if (is->paused || is->audioq.abort_request) {
1623 return -1;
1624 }
1625
1626 /* read next packet */
1627 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1628 return -1;
1629 if(pkt->data == flush_pkt.data){
1630 avcodec_flush_buffers(dec);
1631 continue;
1632 }
1633
1634 pkt_temp->data = pkt->data;
1635 pkt_temp->size = pkt->size;
1636
1637 /* if update the audio clock with the pts */
1638 if (pkt->pts != AV_NOPTS_VALUE) {
1639 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1640 }
1641 }
1642 }
1643
1644 /* get the current audio output buffer size, in samples. With SDL, we
1645 cannot have a precise information */
1646 static int audio_write_get_buf_size(VideoState *is)
1647 {
1648 return is->audio_buf_size - is->audio_buf_index;
1649 }
1650
1651
1652 /* prepare a new audio buffer */
1653 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1654 {
1655 VideoState *is = opaque;
1656 int audio_size, len1;
1657 double pts;
1658
1659 audio_callback_time = av_gettime();
1660
1661 while (len > 0) {
1662 if (is->audio_buf_index >= is->audio_buf_size) {
1663 audio_size = audio_decode_frame(is, &pts);
1664 if (audio_size < 0) {
1665 /* if error, just output silence */
1666 is->audio_buf = is->audio_buf1;
1667 is->audio_buf_size = 1024;
1668 memset(is->audio_buf, 0, is->audio_buf_size);
1669 } else {
1670 if (is->show_audio)
1671 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1672 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1673 pts);
1674 is->audio_buf_size = audio_size;
1675 }
1676 is->audio_buf_index = 0;
1677 }
1678 len1 = is->audio_buf_size - is->audio_buf_index;
1679 if (len1 > len)
1680 len1 = len;
1681 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1682 len -= len1;
1683 stream += len1;
1684 is->audio_buf_index += len1;
1685 }
1686 }
1687
1688 /* open a given stream. Return 0 if OK */
1689 static int stream_component_open(VideoState *is, int stream_index)
1690 {
1691 AVFormatContext *ic = is->ic;
1692 AVCodecContext *enc;
1693 AVCodec *codec;
1694 SDL_AudioSpec wanted_spec, spec;
1695
1696 if (stream_index < 0 || stream_index >= ic->nb_streams)
1697 return -1;
1698 enc = ic->streams[stream_index]->codec;
1699
1700 /* prepare audio output */
1701 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1702 if (enc->channels > 0) {
1703 enc->request_channels = FFMIN(2, enc->channels);
1704 } else {
1705 enc->request_channels = 2;
1706 }
1707 }
1708
1709 codec = avcodec_find_decoder(enc->codec_id);
1710 enc->debug_mv = debug_mv;
1711 enc->debug = debug;
1712 enc->workaround_bugs = workaround_bugs;
1713 enc->lowres = lowres;
1714 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1715 enc->idct_algo= idct;
1716 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1717 enc->skip_frame= skip_frame;
1718 enc->skip_idct= skip_idct;
1719 enc->skip_loop_filter= skip_loop_filter;
1720 enc->error_recognition= error_recognition;
1721 enc->error_concealment= error_concealment;
1722
1723 set_context_opts(enc, avcodec_opts[enc->codec_type], 0);
1724
1725 if (!codec ||
1726 avcodec_open(enc, codec) < 0)
1727 return -1;
1728
1729 /* prepare audio output */
1730 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1731 wanted_spec.freq = enc->sample_rate;
1732 wanted_spec.format = AUDIO_S16SYS;
1733 wanted_spec.channels = enc->channels;
1734 wanted_spec.silence = 0;
1735 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1736 wanted_spec.callback = sdl_audio_callback;
1737 wanted_spec.userdata = is;
1738 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1739 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1740 return -1;
1741 }
1742 is->audio_hw_buf_size = spec.size;
1743 is->audio_src_fmt= SAMPLE_FMT_S16;
1744 }
1745
1746 if(thread_count>1)
1747 avcodec_thread_init(enc, thread_count);
1748 enc->thread_count= thread_count;
1749 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
1750 switch(enc->codec_type) {
1751 case CODEC_TYPE_AUDIO:
1752 is->audio_stream = stream_index;
1753 is->audio_st = ic->streams[stream_index];
1754 is->audio_buf_size = 0;
1755 is->audio_buf_index = 0;
1756
1757 /* init averaging filter */
1758 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1759 is->audio_diff_avg_count = 0;
1760 /* since we do not have a precise anough audio fifo fullness,
1761 we correct audio sync only if larger than this threshold */
1762 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1763
1764 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1765 packet_queue_init(&is->audioq);
1766 SDL_PauseAudio(0);
1767 break;
1768 case CODEC_TYPE_VIDEO:
1769 is->video_stream = stream_index;
1770 is->video_st = ic->streams[stream_index];
1771
1772 is->frame_last_delay = 40e-3;
1773 is->frame_timer = (double)av_gettime() / 1000000.0;
1774 is->video_current_pts_time = av_gettime();
1775
1776 packet_queue_init(&is->videoq);
1777 is->video_tid = SDL_CreateThread(video_thread, is);
1778 break;
1779 case CODEC_TYPE_SUBTITLE:
1780 is->subtitle_stream = stream_index;
1781 is->subtitle_st = ic->streams[stream_index];
1782 packet_queue_init(&is->subtitleq);
1783
1784 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1785 break;
1786 default:
1787 break;
1788 }
1789 return 0;
1790 }
1791
1792 static void stream_component_close(VideoState *is, int stream_index)
1793 {
1794 AVFormatContext *ic = is->ic;
1795 AVCodecContext *enc;
1796
1797 if (stream_index < 0 || stream_index >= ic->nb_streams)
1798 return;
1799 enc = ic->streams[stream_index]->codec;
1800
1801 switch(enc->codec_type) {
1802 case CODEC_TYPE_AUDIO:
1803 packet_queue_abort(&is->audioq);
1804
1805 SDL_CloseAudio();
1806
1807 packet_queue_end(&is->audioq);
1808 if (is->reformat_ctx)
1809 av_audio_convert_free(is->reformat_ctx);
1810 break;
1811 case CODEC_TYPE_VIDEO:
1812 packet_queue_abort(&is->videoq);
1813
1814 /* note: we also signal this mutex to make sure we deblock the
1815 video thread in all cases */
1816 SDL_LockMutex(is->pictq_mutex);
1817 SDL_CondSignal(is->pictq_cond);
1818 SDL_UnlockMutex(is->pictq_mutex);
1819
1820 SDL_WaitThread(is->video_tid, NULL);
1821
1822 packet_queue_end(&is->videoq);
1823 break;
1824 case CODEC_TYPE_SUBTITLE:
1825 packet_queue_abort(&is->subtitleq);
1826
1827 /* note: we also signal this mutex to make sure we deblock the
1828 video thread in all cases */
1829 SDL_LockMutex(is->subpq_mutex);
1830 is->subtitle_stream_changed = 1;
1831
1832 SDL_CondSignal(is->subpq_cond);
1833 SDL_UnlockMutex(is->subpq_mutex);
1834
1835 SDL_WaitThread(is->subtitle_tid, NULL);
1836
1837 packet_queue_end(&is->subtitleq);
1838 break;
1839 default:
1840 break;
1841 }
1842
1843 ic->streams[stream_index]->discard = AVDISCARD_ALL;
1844 avcodec_close(enc);
1845 switch(enc->codec_type) {
1846 case CODEC_TYPE_AUDIO:
1847 is->audio_st = NULL;
1848 is->audio_stream = -1;
1849 break;
1850 case CODEC_TYPE_VIDEO:
1851 is->video_st = NULL;
1852 is->video_stream = -1;
1853 break;
1854 case CODEC_TYPE_SUBTITLE:
1855 is->subtitle_st = NULL;
1856 is->subtitle_stream = -1;
1857 break;
1858 default:
1859 break;
1860 }
1861 }
1862
1863 static void dump_stream_info(const AVFormatContext *s)
1864 {
1865 AVMetadataTag *tag = NULL;
1866 while ((tag=av_metadata_get(s->metadata,"",tag,AV_METADATA_IGNORE_SUFFIX)))
1867 fprintf(stderr, "%s: %s\n", tag->key, tag->value);
1868 }
1869
1870 /* since we have only one decoding thread, we can use a global
1871 variable instead of a thread local variable */
1872 static VideoState *global_video_state;
1873
1874 static int decode_interrupt_cb(void)
1875 {
1876 return (global_video_state && global_video_state->abort_request);
1877 }
1878
1879 /* this thread gets the stream from the disk or the network */
1880 static int decode_thread(void *arg)
1881 {
1882 VideoState *is = arg;
1883 AVFormatContext *ic;
1884 int err, i, ret, video_index, audio_index, subtitle_index;
1885 AVPacket pkt1, *pkt = &pkt1;
1886 AVFormatParameters params, *ap = &params;
1887 int eof=0;
1888
1889 video_index = -1;
1890 audio_index = -1;
1891 subtitle_index = -1;
1892 is->video_stream = -1;
1893 is->audio_stream = -1;
1894 is->subtitle_stream = -1;
1895
1896 global_video_state = is;
1897 url_set_interrupt_cb(decode_interrupt_cb);
1898
1899 memset(ap, 0, sizeof(*ap));
1900
1901 ap->width = frame_width;
1902 ap->height= frame_height;
1903 ap->time_base= (AVRational){1, 25};
1904 ap->pix_fmt = frame_pix_fmt;
1905
1906 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1907 if (err < 0) {
1908 print_error(is->filename, err);
1909 ret = -1;
1910 goto fail;
1911 }
1912 is->ic = ic;
1913
1914 if(genpts)
1915 ic->flags |= AVFMT_FLAG_GENPTS;
1916
1917 err = av_find_stream_info(ic);
1918 if (err < 0) {
1919 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1920 ret = -1;
1921 goto fail;
1922 }
1923 if(ic->pb)
1924 ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1925
1926 /* if seeking requested, we execute it */
1927 if (start_time != AV_NOPTS_VALUE) {
1928 int64_t timestamp;
1929
1930 timestamp = start_time;
1931 /* add the stream start time */
1932 if (ic->start_time != AV_NOPTS_VALUE)
1933 timestamp += ic->start_time;
1934 ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
1935 if (ret < 0) {
1936 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1937 is->filename, (double)timestamp / AV_TIME_BASE);
1938 }
1939 }
1940
1941 for(i = 0; i < ic->nb_streams; i++) {
1942 AVCodecContext *enc = ic->streams[i]->codec;
1943 ic->streams[i]->discard = AVDISCARD_ALL;
1944 switch(enc->codec_type) {
1945 case CODEC_TYPE_AUDIO:
1946 if (wanted_audio_stream-- >= 0 && !audio_disable)
1947 audio_index = i;
1948 break;
1949 case CODEC_TYPE_VIDEO:
1950 if (wanted_video_stream-- >= 0 && !video_disable)
1951 video_index = i;
1952 break;
1953 case CODEC_TYPE_SUBTITLE:
1954 if (wanted_subtitle_stream-- >= 0 && !video_disable)
1955 subtitle_index = i;
1956 break;
1957 default:
1958 break;
1959 }
1960 }
1961 if (show_status) {
1962 dump_format(ic, 0, is->filename, 0);
1963 dump_stream_info(ic);
1964 }
1965
1966 /* open the streams */
1967 if (audio_index >= 0) {
1968 stream_component_open(is, audio_index);
1969 }
1970
1971 if (video_index >= 0) {
1972 stream_component_open(is, video_index);
1973 } else {
1974 if (!display_disable)
1975 is->show_audio = 1;
1976 }
1977
1978 if (subtitle_index >= 0) {
1979 stream_component_open(is, subtitle_index);
1980 }
1981
1982 if (is->video_stream < 0 && is->audio_stream < 0) {
1983 fprintf(stderr, "%s: could not open codecs\n", is->filename);
1984 ret = -1;
1985 goto fail;
1986 }
1987
1988 for(;;) {
1989 if (is->abort_request)
1990 break;
1991 if (is->paused != is->last_paused) {
1992 is->last_paused = is->paused;
1993 if (is->paused)
1994 av_read_pause(ic);
1995 else
1996 av_read_play(ic);
1997 }
1998 #if CONFIG_RTSP_DEMUXER
1999 if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2000 /* wait 10 ms to avoid trying to get another packet */
2001 /* XXX: horrible */
2002 SDL_Delay(10);
2003 continue;
2004 }
2005 #endif
2006 if (is->seek_req) {
2007 int64_t seek_target= is->seek_pos;
2008 int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2009 int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2010 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2011 // of the seek_pos/seek_rel variables
2012
2013 ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2014 if (ret < 0) {
2015 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2016 }else{
2017 if (is->audio_stream >= 0) {
2018 packet_queue_flush(&is->audioq);
2019 packet_queue_put(&is->audioq, &flush_pkt);
2020 }
2021 if (is->subtitle_stream >= 0) {
2022 packet_queue_flush(&is->subtitleq);
2023 packet_queue_put(&is->subtitleq, &flush_pkt);
2024 }
2025 if (is->video_stream >= 0) {
2026 packet_queue_flush(&is->videoq);
2027 packet_queue_put(&is->videoq, &flush_pkt);
2028 }
2029 }
2030 is->seek_req = 0;
2031 eof= 0;
2032 }
2033
2034 /* if the queue are full, no need to read more */
2035 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2036 is->videoq.size > MAX_VIDEOQ_SIZE ||
2037 is->subtitleq.size > MAX_SUBTITLEQ_SIZE) {
2038 /* wait 10 ms */
2039 SDL_Delay(10);
2040 continue;
2041 }
2042 if(url_feof(ic->pb) || eof) {
2043 if(is->video_stream >= 0){
2044 av_init_packet(pkt);
2045 pkt->data=NULL;
2046 pkt->size=0;
2047 pkt->stream_index= is->video_stream;
2048 packet_queue_put(&is->videoq, pkt);
2049 }
2050 SDL_Delay(10);
2051 continue;
2052 }
2053 ret = av_read_frame(ic, pkt);
2054 if (ret < 0) {
2055 if (ret == AVERROR_EOF)
2056 eof=1;
2057 if (url_ferror(ic->pb))
2058 break;
2059 SDL_Delay(100); /* wait for user event */
2060 continue;
2061 }
2062 if (pkt->stream_index == is->audio_stream) {
2063 packet_queue_put(&is->audioq, pkt);
2064 } else if (pkt->stream_index == is->video_stream) {
2065 packet_queue_put(&is->videoq, pkt);
2066 } else if (pkt->stream_index == is->subtitle_stream) {
2067 packet_queue_put(&is->subtitleq, pkt);
2068 } else {
2069 av_free_packet(pkt);
2070 }
2071 }
2072 /* wait until the end */
2073 while (!is->abort_request) {
2074 SDL_Delay(100);
2075 }
2076
2077 ret = 0;
2078 fail:
2079 /* disable interrupting */
2080 global_video_state = NULL;
2081
2082 /* close each stream */
2083 if (is->audio_stream >= 0)
2084 stream_component_close(is, is->audio_stream);
2085 if (is->video_stream >= 0)
2086 stream_component_close(is, is->video_stream);
2087 if (is->subtitle_stream >= 0)
2088 stream_component_close(is, is->subtitle_stream);
2089 if (is->ic) {
2090 av_close_input_file(is->ic);
2091 is->ic = NULL; /* safety */
2092 }
2093 url_set_interrupt_cb(NULL);
2094
2095 if (ret != 0) {
2096 SDL_Event event;
2097
2098 event.type = FF_QUIT_EVENT;
2099 event.user.data1 = is;
2100 SDL_PushEvent(&event);
2101 }
2102 return 0;
2103 }
2104
2105 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2106 {
2107 VideoState *is;
2108
2109 is = av_mallocz(sizeof(VideoState));
2110 if (!is)
2111 return NULL;
2112 av_strlcpy(is->filename, filename, sizeof(is->filename));
2113 is->iformat = iformat;
2114 is->ytop = 0;
2115 is->xleft = 0;
2116
2117 /* start video display */
2118 is->pictq_mutex = SDL_CreateMutex();
2119 is->pictq_cond = SDL_CreateCond();
2120
2121 is->subpq_mutex = SDL_CreateMutex();
2122 is->subpq_cond = SDL_CreateCond();
2123
2124 /* add the refresh timer to draw the picture */
2125 schedule_refresh(is, 40);
2126
2127 is->av_sync_type = av_sync_type;
2128 is->parse_tid = SDL_CreateThread(decode_thread, is);
2129 if (!is->parse_tid) {
2130 av_free(is);
2131 return NULL;
2132 }
2133 return is;
2134 }
2135
2136 static void stream_close(VideoState *is)
2137 {
2138 VideoPicture *vp;
2139 int i;
2140 /* XXX: use a special url_shutdown call to abort parse cleanly */
2141 is->abort_request = 1;
2142 SDL_WaitThread(is->parse_tid, NULL);
2143
2144 /* free all pictures */
2145 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2146 vp = &is->pictq[i];
2147 if (vp->bmp) {
2148 SDL_FreeYUVOverlay(vp->bmp);
2149 vp->bmp = NULL;
2150 }
2151 }
2152 SDL_DestroyMutex(is->pictq_mutex);
2153 SDL_DestroyCond(is->pictq_cond);
2154 SDL_DestroyMutex(is->subpq_mutex);
2155 SDL_DestroyCond(is->subpq_cond);
2156 if (is->img_convert_ctx)
2157 sws_freeContext(is->img_convert_ctx);
2158 av_free(is);
2159 }
2160
2161 static void stream_cycle_channel(VideoState *is, int codec_type)
2162 {
2163 AVFormatContext *ic = is->ic;
2164 int start_index, stream_index;
2165 AVStream *st;
2166
2167 if (codec_type == CODEC_TYPE_VIDEO)
2168 start_index = is->video_stream;
2169 else if (codec_type == CODEC_TYPE_AUDIO)
2170 start_index = is->audio_stream;
2171 else
2172 start_index = is->subtitle_stream;
2173 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2174 return;
2175 stream_index = start_index;
2176 for(;;) {
2177 if (++stream_index >= is->ic->nb_streams)
2178 {
2179 if (codec_type == CODEC_TYPE_SUBTITLE)
2180 {
2181 stream_index = -1;
2182 goto the_end;
2183 } else
2184 stream_index = 0;
2185 }
2186 if (stream_index == start_index)
2187 return;
2188 st = ic->streams[stream_index];
2189 if (st->codec->codec_type == codec_type) {
2190 /* check that parameters are OK */
2191 switch(codec_type) {
2192 case CODEC_TYPE_AUDIO:
2193 if (st->codec->sample_rate != 0 &&
2194 st->codec->channels != 0)
2195 goto the_end;
2196 break;
2197 case CODEC_TYPE_VIDEO:
2198 case CODEC_TYPE_SUBTITLE:
2199 goto the_end;
2200 default:
2201 break;
2202 }
2203 }
2204 }
2205 the_end:
2206 stream_component_close(is, start_index);
2207 stream_component_open(is, stream_index);
2208 }
2209
2210
2211 static void toggle_full_screen(void)
2212 {
2213 is_full_screen = !is_full_screen;
2214 if (!fs_screen_width) {
2215 /* use default SDL method */
2216 // SDL_WM_ToggleFullScreen(screen);
2217 }
2218 video_open(cur_stream);
2219 }
2220
2221 static void toggle_pause(void)
2222 {
2223 if (cur_stream)
2224 stream_pause(cur_stream);
2225 step = 0;
2226 }
2227
2228 static void step_to_next_frame(void)
2229 {
2230 if (cur_stream) {
2231 /* if the stream is paused unpause it, then step */
2232 if (cur_stream->paused)
2233 stream_pause(cur_stream);
2234 }
2235 step = 1;
2236 }
2237
2238 static void do_exit(void)
2239 {
2240 int i;
2241 if (cur_stream) {
2242 stream_close(cur_stream);
2243 cur_stream = NULL;
2244 }
2245 for (i = 0; i < CODEC_TYPE_NB; i++)
2246 av_free(avcodec_opts[i]);
2247 av_free(avformat_opts);
2248 av_free(sws_opts);
2249 if (show_status)
2250 printf("\n");
2251 SDL_Quit();
2252 exit(0);
2253 }
2254
2255 static void toggle_audio_display(void)
2256 {
2257 if (cur_stream) {
2258 cur_stream->show_audio = !cur_stream->show_audio;
2259 }
2260 }
2261
2262 /* handle an event sent by the GUI */
2263 static void event_loop(void)
2264 {
2265 SDL_Event event;
2266 double incr, pos, frac;
2267
2268 for(;;) {
2269 SDL_WaitEvent(&event);
2270 switch(event.type) {
2271 case SDL_KEYDOWN:
2272 switch(event.key.keysym.sym) {
2273 case SDLK_ESCAPE:
2274 case SDLK_q:
2275 do_exit();
2276 break;
2277 case SDLK_f:
2278 toggle_full_screen();
2279 break;
2280 case SDLK_p:
2281 case SDLK_SPACE:
2282 toggle_pause();
2283 break;
2284 case SDLK_s: //S: Step to next frame
2285 step_to_next_frame();
2286 break;
2287 case SDLK_a:
2288 if (cur_stream)
2289 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2290 break;
2291 case SDLK_v:
2292 if (cur_stream)
2293 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2294 break;
2295 case SDLK_t:
2296 if (cur_stream)
2297 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2298 break;
2299 case SDLK_w:
2300 toggle_audio_display();
2301 break;
2302 case SDLK_LEFT:
2303 incr = -10.0;
2304 goto do_seek;
2305 case SDLK_RIGHT:
2306 incr = 10.0;
2307 goto do_seek;
2308 case SDLK_UP:
2309 incr = 60.0;
2310 goto do_seek;
2311 case SDLK_DOWN:
2312 incr = -60.0;
2313 do_seek:
2314 if (cur_stream) {
2315 if (seek_by_bytes) {
2316 pos = url_ftell(cur_stream->ic->pb);
2317 if (cur_stream->ic->bit_rate)
2318 incr *= cur_stream->ic->bit_rate / 60.0;
2319 else
2320 incr *= 180000.0;
2321 pos += incr;
2322 stream_seek(cur_stream, pos, incr);
2323 } else {
2324 pos = get_master_clock(cur_stream);
2325 pos += incr;
2326 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE));
2327 }
2328 }
2329 break;
2330 default:
2331 break;
2332 }
2333 break;
2334 case SDL_MOUSEBUTTONDOWN:
2335 if (cur_stream) {
2336 int64_t ts;
2337 int ns, hh, mm, ss;
2338 int tns, thh, tmm, tss;
2339 tns = cur_stream->ic->duration/1000000LL;
2340 thh = tns/3600;
2341 tmm = (tns%3600)/60;
2342 tss = (tns%60);
2343 frac = (double)event.button.x/(double)cur_stream->width;
2344 ns = frac*tns;
2345 hh = ns/3600;
2346 mm = (ns%3600)/60;
2347 ss = (ns%60);
2348 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2349 hh, mm, ss, thh, tmm, tss);
2350 ts = frac*cur_stream->ic->duration;
2351 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2352 ts += cur_stream->ic->start_time;
2353 stream_seek(cur_stream, ts, 0);
2354 }
2355 break;
2356 case SDL_VIDEORESIZE:
2357 if (cur_stream) {
2358 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2359 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2360 screen_width = cur_stream->width = event.resize.w;
2361 screen_height= cur_stream->height= event.resize.h;
2362 }
2363 break;
2364 case SDL_QUIT:
2365 case FF_QUIT_EVENT:
2366 do_exit();
2367 break;
2368 case FF_ALLOC_EVENT:
2369 video_open(event.user.data1);
2370 alloc_picture(event.user.data1);
2371 break;
2372 case FF_REFRESH_EVENT:
2373 video_refresh_timer(event.user.data1);
2374 break;
2375 default:
2376 break;
2377 }
2378 }
2379 }
2380
2381 static void opt_frame_size(const char *arg)
2382 {
2383 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2384 fprintf(stderr, "Incorrect frame size\n");
2385 exit(1);
2386 }
2387 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2388 fprintf(stderr, "Frame size must be a multiple of 2\n");
2389 exit(1);
2390 }
2391 }
2392
2393 static int opt_width(const char *opt, const char *arg)
2394 {
2395 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2396 return 0;
2397 }
2398
2399 static int opt_height(const char *opt, const char *arg)
2400 {
2401 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2402 return 0;
2403 }
2404
2405 static void opt_format(const char *arg)
2406 {
2407 file_iformat = av_find_input_format(arg);
2408 if (!file_iformat) {
2409 fprintf(stderr, "Unknown input format: %s\n", arg);
2410 exit(1);
2411 }
2412 }
2413
2414 static void opt_frame_pix_fmt(const char *arg)
2415 {
2416 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2417 }
2418
2419 static int opt_sync(const char *opt, const char *arg)
2420 {
2421 if (!strcmp(arg, "audio"))
2422 av_sync_type = AV_SYNC_AUDIO_MASTER;
2423 else if (!strcmp(arg, "video"))
2424 av_sync_type = AV_SYNC_VIDEO_MASTER;
2425 else if (!strcmp(arg, "ext"))
2426 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2427 else {
2428 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2429 exit(1);
2430 }
2431 return 0;
2432 }
2433
2434 static int opt_seek(const char *opt, const char *arg)
2435 {
2436 start_time = parse_time_or_die(opt, arg, 1);
2437 return 0;
2438 }
2439
2440 static int opt_debug(const char *opt, const char *arg)
2441 {
2442 av_log_set_level(99);
2443 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2444 return 0;
2445 }
2446
2447 static int opt_vismv(const char *opt, const char *arg)
2448 {
2449 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2450 return 0;
2451 }
2452
2453 static int opt_thread_count(const char *opt, const char *arg)
2454 {
2455 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2456 #if !HAVE_THREADS
2457 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2458 #endif
2459 return 0;
2460 }
2461
2462 static const OptionDef options[] = {
2463 { "h", OPT_EXIT, {(void*)show_help}, "show help" },
2464 { "version", OPT_EXIT, {(void*)show_version}, "show version" },
2465 { "L", OPT_EXIT, {(void*)show_license}, "show license" },
2466 { "formats", OPT_EXIT, {(void*)show_formats}, "show available formats, codecs, protocols, ..." },
2467 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2468 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2469 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2470 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2471 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2472 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2473 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "select desired audio stream", "stream_number" },
2474 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "select desired video stream", "stream_number" },
2475 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_subtitle_stream}, "select desired subtitle stream", "stream_number" },
2476 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2477 { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2478 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2479 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2480 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2481 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2482 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2483 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2484 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2485 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2486 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2487 { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2488 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2489 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2490 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2491 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2492 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2493 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)", "threshold" },
2494 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2495 { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2496 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2497 { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2498 { NULL, },
2499 };
2500
2501 static void show_help(void)
2502 {
2503 printf("usage: ffplay [options] input_file\n"
2504 "Simple media player\n");
2505 printf("\n");
2506 show_help_options(options, "Main options:\n",
2507 OPT_EXPERT, 0);
2508 show_help_options(options, "\nAdvanced options:\n",
2509 OPT_EXPERT, OPT_EXPERT);
2510 printf("\nWhile playing:\n"
2511 "q, ESC quit\n"
2512 "f toggle full screen\n"
2513 "p, SPC pause\n"
2514 "a cycle audio channel\n"
2515 "v cycle video channel\n"
2516 "t cycle subtitle channel\n"
2517 "w show audio waves\n"
2518 "left/right seek backward/forward 10 seconds\n"
2519 "down/up seek backward/forward 1 minute\n"
2520 "mouse click seek to percentage in file corresponding to fraction of width\n"
2521 );
2522 }
2523
2524 static void opt_input_file(const char *filename)
2525 {
2526 if (!strcmp(filename, "-"))
2527 filename = "pipe:";
2528 input_filename = filename;
2529 }
2530
2531 /* Called from the main */
2532 int main(int argc, char **argv)
2533 {
2534 int flags, i;
2535
2536 /* register all codecs, demux and protocols */
2537 avcodec_register_all();
2538 avdevice_register_all();
2539 av_register_all();
2540
2541 for(i=0; i<CODEC_TYPE_NB; i++){
2542 avcodec_opts[i]= avcodec_alloc_context2(i);
2543 }
2544 avformat_opts = avformat_alloc_context();
2545 sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
2546
2547 show_banner();
2548
2549 parse_options(argc, argv, options, opt_input_file);
2550
2551 if (!input_filename) {
2552 fprintf(stderr, "An input file must be specified\n");
2553 exit(1);
2554 }
2555
2556 if (display_disable) {
2557 video_disable = 1;
2558 }
2559 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2560 #if !defined(__MINGW32__) && !defined(__APPLE__)
2561 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2562 #endif
2563 if (SDL_Init (flags)) {
2564 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2565 exit(1);
2566 }
2567
2568 if (!display_disable) {
2569 #if HAVE_SDL_VIDEO_SIZE
2570 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2571 fs_screen_width = vi->current_w;
2572 fs_screen_height = vi->current_h;
2573 #endif
2574 }
2575
2576 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2577 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2578 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2579 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2580
2581 av_init_packet(&flush_pkt);
2582 flush_pkt.data= "FLUSH";
2583
2584 cur_stream = stream_open(input_filename, file_iformat);
2585
2586 event_loop();
2587
2588 /* never returns */
2589
2590 return 0;
2591 }