Patch check script.
[libav.git] / ffplay.c
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include <math.h>
23 #include <limits.h>
24 #include "libavutil/avstring.h"
25 #include "libavformat/avformat.h"
26 #include "libavformat/rtsp.h"
27 #include "libavdevice/avdevice.h"
28 #include "libswscale/swscale.h"
29 #include "libavcodec/audioconvert.h"
30 #include "libavcodec/opt.h"
31
32 #include "cmdutils.h"
33
34 #include <SDL.h>
35 #include <SDL_thread.h>
36
37 #ifdef __MINGW32__
38 #undef main /* We don't want SDL to override our main() */
39 #endif
40
41 #undef exit
42
43 const char program_name[] = "FFplay";
44 const int program_birth_year = 2003;
45
46 //#define DEBUG_SYNC
47
48 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
49 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
50 #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
51
52 /* SDL audio buffer size, in samples. Should be small to have precise
53 A/V sync as SDL does not have hardware buffer fullness info. */
54 #define SDL_AUDIO_BUFFER_SIZE 1024
55
56 /* no AV sync correction is done if below the AV sync threshold */
57 #define AV_SYNC_THRESHOLD 0.01
58 /* no AV correction is done if too big error */
59 #define AV_NOSYNC_THRESHOLD 10.0
60
61 /* maximum audio speed change to get correct sync */
62 #define SAMPLE_CORRECTION_PERCENT_MAX 10
63
64 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
65 #define AUDIO_DIFF_AVG_NB 20
66
67 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
68 #define SAMPLE_ARRAY_SIZE (2*65536)
69
70 static int sws_flags = SWS_BICUBIC;
71
72 typedef struct PacketQueue {
73 AVPacketList *first_pkt, *last_pkt;
74 int nb_packets;
75 int size;
76 int abort_request;
77 SDL_mutex *mutex;
78 SDL_cond *cond;
79 } PacketQueue;
80
81 #define VIDEO_PICTURE_QUEUE_SIZE 1
82 #define SUBPICTURE_QUEUE_SIZE 4
83
84 typedef struct VideoPicture {
85 double pts; ///<presentation time stamp for this picture
86 SDL_Overlay *bmp;
87 int width, height; /* source height & width */
88 int allocated;
89 } VideoPicture;
90
91 typedef struct SubPicture {
92 double pts; /* presentation time stamp for this picture */
93 AVSubtitle sub;
94 } SubPicture;
95
96 enum {
97 AV_SYNC_AUDIO_MASTER, /* default choice */
98 AV_SYNC_VIDEO_MASTER,
99 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
100 };
101
102 typedef struct VideoState {
103 SDL_Thread *parse_tid;
104 SDL_Thread *video_tid;
105 AVInputFormat *iformat;
106 int no_background;
107 int abort_request;
108 int paused;
109 int last_paused;
110 int seek_req;
111 int seek_flags;
112 int64_t seek_pos;
113 AVFormatContext *ic;
114 int dtg_active_format;
115
116 int audio_stream;
117
118 int av_sync_type;
119 double external_clock; /* external clock base */
120 int64_t external_clock_time;
121
122 double audio_clock;
123 double audio_diff_cum; /* used for AV difference average computation */
124 double audio_diff_avg_coef;
125 double audio_diff_threshold;
126 int audio_diff_avg_count;
127 AVStream *audio_st;
128 PacketQueue audioq;
129 int audio_hw_buf_size;
130 /* samples output by the codec. we reserve more space for avsync
131 compensation */
132 DECLARE_ALIGNED(16,uint8_t,audio_buf1[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
133 DECLARE_ALIGNED(16,uint8_t,audio_buf2[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
134 uint8_t *audio_buf;
135 unsigned int audio_buf_size; /* in bytes */
136 int audio_buf_index; /* in bytes */
137 AVPacket audio_pkt;
138 uint8_t *audio_pkt_data;
139 int audio_pkt_size;
140 enum SampleFormat audio_src_fmt;
141 AVAudioConvert *reformat_ctx;
142
143 int show_audio; /* if true, display audio samples */
144 int16_t sample_array[SAMPLE_ARRAY_SIZE];
145 int sample_array_index;
146 int last_i_start;
147
148 SDL_Thread *subtitle_tid;
149 int subtitle_stream;
150 int subtitle_stream_changed;
151 AVStream *subtitle_st;
152 PacketQueue subtitleq;
153 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
154 int subpq_size, subpq_rindex, subpq_windex;
155 SDL_mutex *subpq_mutex;
156 SDL_cond *subpq_cond;
157
158 double frame_timer;
159 double frame_last_pts;
160 double frame_last_delay;
161 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
162 int video_stream;
163 AVStream *video_st;
164 PacketQueue videoq;
165 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
166 int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
167 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
168 int pictq_size, pictq_rindex, pictq_windex;
169 SDL_mutex *pictq_mutex;
170 SDL_cond *pictq_cond;
171
172 // QETimer *video_timer;
173 char filename[1024];
174 int width, height, xleft, ytop;
175 } VideoState;
176
177 static void show_help(void);
178 static int audio_write_get_buf_size(VideoState *is);
179
180 /* options specified by the user */
181 static AVInputFormat *file_iformat;
182 static const char *input_filename;
183 static int fs_screen_width;
184 static int fs_screen_height;
185 static int screen_width = 0;
186 static int screen_height = 0;
187 static int frame_width = 0;
188 static int frame_height = 0;
189 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
190 static int audio_disable;
191 static int video_disable;
192 static int wanted_audio_stream= 0;
193 static int wanted_video_stream= 0;
194 static int seek_by_bytes;
195 static int display_disable;
196 static int show_status;
197 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
198 static int64_t start_time = AV_NOPTS_VALUE;
199 static int debug = 0;
200 static int debug_mv = 0;
201 static int step = 0;
202 static int thread_count = 1;
203 static int workaround_bugs = 1;
204 static int fast = 0;
205 static int genpts = 0;
206 static int lowres = 0;
207 static int idct = FF_IDCT_AUTO;
208 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
209 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
210 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
211 static int error_recognition = FF_ER_CAREFUL;
212 static int error_concealment = 3;
213 static int decoder_reorder_pts= 0;
214
215 /* current context */
216 static int is_full_screen;
217 static VideoState *cur_stream;
218 static int64_t audio_callback_time;
219
220 static AVPacket flush_pkt;
221
222 #define FF_ALLOC_EVENT (SDL_USEREVENT)
223 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
224 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
225
226 static SDL_Surface *screen;
227
228 /* packet queue handling */
229 static void packet_queue_init(PacketQueue *q)
230 {
231 memset(q, 0, sizeof(PacketQueue));
232 q->mutex = SDL_CreateMutex();
233 q->cond = SDL_CreateCond();
234 }
235
236 static void packet_queue_flush(PacketQueue *q)
237 {
238 AVPacketList *pkt, *pkt1;
239
240 SDL_LockMutex(q->mutex);
241 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
242 pkt1 = pkt->next;
243 av_free_packet(&pkt->pkt);
244 av_freep(&pkt);
245 }
246 q->last_pkt = NULL;
247 q->first_pkt = NULL;
248 q->nb_packets = 0;
249 q->size = 0;
250 SDL_UnlockMutex(q->mutex);
251 }
252
253 static void packet_queue_end(PacketQueue *q)
254 {
255 packet_queue_flush(q);
256 SDL_DestroyMutex(q->mutex);
257 SDL_DestroyCond(q->cond);
258 }
259
260 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
261 {
262 AVPacketList *pkt1;
263
264 /* duplicate the packet */
265 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
266 return -1;
267
268 pkt1 = av_malloc(sizeof(AVPacketList));
269 if (!pkt1)
270 return -1;
271 pkt1->pkt = *pkt;
272 pkt1->next = NULL;
273
274
275 SDL_LockMutex(q->mutex);
276
277 if (!q->last_pkt)
278
279 q->first_pkt = pkt1;
280 else
281 q->last_pkt->next = pkt1;
282 q->last_pkt = pkt1;
283 q->nb_packets++;
284 q->size += pkt1->pkt.size;
285 /* XXX: should duplicate packet data in DV case */
286 SDL_CondSignal(q->cond);
287
288 SDL_UnlockMutex(q->mutex);
289 return 0;
290 }
291
292 static void packet_queue_abort(PacketQueue *q)
293 {
294 SDL_LockMutex(q->mutex);
295
296 q->abort_request = 1;
297
298 SDL_CondSignal(q->cond);
299
300 SDL_UnlockMutex(q->mutex);
301 }
302
303 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
304 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
305 {
306 AVPacketList *pkt1;
307 int ret;
308
309 SDL_LockMutex(q->mutex);
310
311 for(;;) {
312 if (q->abort_request) {
313 ret = -1;
314 break;
315 }
316
317 pkt1 = q->first_pkt;
318 if (pkt1) {
319 q->first_pkt = pkt1->next;
320 if (!q->first_pkt)
321 q->last_pkt = NULL;
322 q->nb_packets--;
323 q->size -= pkt1->pkt.size;
324 *pkt = pkt1->pkt;
325 av_free(pkt1);
326 ret = 1;
327 break;
328 } else if (!block) {
329 ret = 0;
330 break;
331 } else {
332 SDL_CondWait(q->cond, q->mutex);
333 }
334 }
335 SDL_UnlockMutex(q->mutex);
336 return ret;
337 }
338
339 static inline void fill_rectangle(SDL_Surface *screen,
340 int x, int y, int w, int h, int color)
341 {
342 SDL_Rect rect;
343 rect.x = x;
344 rect.y = y;
345 rect.w = w;
346 rect.h = h;
347 SDL_FillRect(screen, &rect, color);
348 }
349
350 #if 0
351 /* draw only the border of a rectangle */
352 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
353 {
354 int w1, w2, h1, h2;
355
356 /* fill the background */
357 w1 = x;
358 if (w1 < 0)
359 w1 = 0;
360 w2 = s->width - (x + w);
361 if (w2 < 0)
362 w2 = 0;
363 h1 = y;
364 if (h1 < 0)
365 h1 = 0;
366 h2 = s->height - (y + h);
367 if (h2 < 0)
368 h2 = 0;
369 fill_rectangle(screen,
370 s->xleft, s->ytop,
371 w1, s->height,
372 color);
373 fill_rectangle(screen,
374 s->xleft + s->width - w2, s->ytop,
375 w2, s->height,
376 color);
377 fill_rectangle(screen,
378 s->xleft + w1, s->ytop,
379 s->width - w1 - w2, h1,
380 color);
381 fill_rectangle(screen,
382 s->xleft + w1, s->ytop + s->height - h2,
383 s->width - w1 - w2, h2,
384 color);
385 }
386 #endif
387
388
389
390 #define SCALEBITS 10
391 #define ONE_HALF (1 << (SCALEBITS - 1))
392 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
393
394 #define RGB_TO_Y_CCIR(r, g, b) \
395 ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
396 FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
397
398 #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
399 (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
400 FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
401
402 #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
403 (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
404 FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
405
406 #define ALPHA_BLEND(a, oldp, newp, s)\
407 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
408
409 #define RGBA_IN(r, g, b, a, s)\
410 {\
411 unsigned int v = ((const uint32_t *)(s))[0];\
412 a = (v >> 24) & 0xff;\
413 r = (v >> 16) & 0xff;\
414 g = (v >> 8) & 0xff;\
415 b = v & 0xff;\
416 }
417
418 #define YUVA_IN(y, u, v, a, s, pal)\
419 {\
420 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
421 a = (val >> 24) & 0xff;\
422 y = (val >> 16) & 0xff;\
423 u = (val >> 8) & 0xff;\
424 v = val & 0xff;\
425 }
426
427 #define YUVA_OUT(d, y, u, v, a)\
428 {\
429 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
430 }
431
432
433 #define BPP 1
434
435 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
436 {
437 int wrap, wrap3, width2, skip2;
438 int y, u, v, a, u1, v1, a1, w, h;
439 uint8_t *lum, *cb, *cr;
440 const uint8_t *p;
441 const uint32_t *pal;
442 int dstx, dsty, dstw, dsth;
443
444 dstw = av_clip(rect->w, 0, imgw);
445 dsth = av_clip(rect->h, 0, imgh);
446 dstx = av_clip(rect->x, 0, imgw - dstw);
447 dsty = av_clip(rect->y, 0, imgh - dsth);
448 lum = dst->data[0] + dsty * dst->linesize[0];
449 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
450 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
451
452 width2 = (dstw + 1) >> 1;
453 skip2 = dstx >> 1;
454 wrap = dst->linesize[0];
455 wrap3 = rect->pict.linesize[0];
456 p = rect->pict.data[0];
457 pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
458
459 if (dsty & 1) {
460 lum += dstx;
461 cb += skip2;
462 cr += skip2;
463
464 if (dstx & 1) {
465 YUVA_IN(y, u, v, a, p, pal);
466 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
467 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
468 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
469 cb++;
470 cr++;
471 lum++;
472 p += BPP;
473 }
474 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
475 YUVA_IN(y, u, v, a, p, pal);
476 u1 = u;
477 v1 = v;
478 a1 = a;
479 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
480
481 YUVA_IN(y, u, v, a, p + BPP, pal);
482 u1 += u;
483 v1 += v;
484 a1 += a;
485 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
486 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
487 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
488 cb++;
489 cr++;
490 p += 2 * BPP;
491 lum += 2;
492 }
493 if (w) {
494 YUVA_IN(y, u, v, a, p, pal);
495 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
496 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
497 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
498 p++;
499 lum++;
500 }
501 p += wrap3 - dstw * BPP;
502 lum += wrap - dstw - dstx;
503 cb += dst->linesize[1] - width2 - skip2;
504 cr += dst->linesize[2] - width2 - skip2;
505 }
506 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
507 lum += dstx;
508 cb += skip2;
509 cr += skip2;
510
511 if (dstx & 1) {
512 YUVA_IN(y, u, v, a, p, pal);
513 u1 = u;
514 v1 = v;
515 a1 = a;
516 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
517 p += wrap3;
518 lum += wrap;
519 YUVA_IN(y, u, v, a, p, pal);
520 u1 += u;
521 v1 += v;
522 a1 += a;
523 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
524 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
525 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
526 cb++;
527 cr++;
528 p += -wrap3 + BPP;
529 lum += -wrap + 1;
530 }
531 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
532 YUVA_IN(y, u, v, a, p, pal);
533 u1 = u;
534 v1 = v;
535 a1 = a;
536 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
537
538 YUVA_IN(y, u, v, a, p + BPP, pal);
539 u1 += u;
540 v1 += v;
541 a1 += a;
542 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
543 p += wrap3;
544 lum += wrap;
545
546 YUVA_IN(y, u, v, a, p, pal);
547 u1 += u;
548 v1 += v;
549 a1 += a;
550 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
551
552 YUVA_IN(y, u, v, a, p + BPP, pal);
553 u1 += u;
554 v1 += v;
555 a1 += a;
556 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
557
558 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
559 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
560
561 cb++;
562 cr++;
563 p += -wrap3 + 2 * BPP;
564 lum += -wrap + 2;
565 }
566 if (w) {
567 YUVA_IN(y, u, v, a, p, pal);
568 u1 = u;
569 v1 = v;
570 a1 = a;
571 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
572 p += wrap3;
573 lum += wrap;
574 YUVA_IN(y, u, v, a, p, pal);
575 u1 += u;
576 v1 += v;
577 a1 += a;
578 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
579 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
580 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
581 cb++;
582 cr++;
583 p += -wrap3 + BPP;
584 lum += -wrap + 1;
585 }
586 p += wrap3 + (wrap3 - dstw * BPP);
587 lum += wrap + (wrap - dstw - dstx);
588 cb += dst->linesize[1] - width2 - skip2;
589 cr += dst->linesize[2] - width2 - skip2;
590 }
591 /* handle odd height */
592 if (h) {
593 lum += dstx;
594 cb += skip2;
595 cr += skip2;
596
597 if (dstx & 1) {
598 YUVA_IN(y, u, v, a, p, pal);
599 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
600 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
601 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
602 cb++;
603 cr++;
604 lum++;
605 p += BPP;
606 }
607 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
608 YUVA_IN(y, u, v, a, p, pal);
609 u1 = u;
610 v1 = v;
611 a1 = a;
612 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
613
614 YUVA_IN(y, u, v, a, p + BPP, pal);
615 u1 += u;
616 v1 += v;
617 a1 += a;
618 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
619 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
620 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
621 cb++;
622 cr++;
623 p += 2 * BPP;
624 lum += 2;
625 }
626 if (w) {
627 YUVA_IN(y, u, v, a, p, pal);
628 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
629 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
630 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
631 }
632 }
633 }
634
635 static void free_subpicture(SubPicture *sp)
636 {
637 int i;
638
639 for (i = 0; i < sp->sub.num_rects; i++)
640 {
641 av_freep(&sp->sub.rects[i]->pict.data[0]);
642 av_freep(&sp->sub.rects[i]->pict.data[1]);
643 av_freep(&sp->sub.rects[i]);
644 }
645
646 av_free(sp->sub.rects);
647
648 memset(&sp->sub, 0, sizeof(AVSubtitle));
649 }
650
651 static void video_image_display(VideoState *is)
652 {
653 VideoPicture *vp;
654 SubPicture *sp;
655 AVPicture pict;
656 float aspect_ratio;
657 int width, height, x, y;
658 SDL_Rect rect;
659 int i;
660
661 vp = &is->pictq[is->pictq_rindex];
662 if (vp->bmp) {
663 /* XXX: use variable in the frame */
664 if (is->video_st->sample_aspect_ratio.num)
665 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
666 else if (is->video_st->codec->sample_aspect_ratio.num)
667 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
668 else
669 aspect_ratio = 0;
670 if (aspect_ratio <= 0.0)
671 aspect_ratio = 1.0;
672 aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec->height;
673 /* if an active format is indicated, then it overrides the
674 mpeg format */
675 #if 0
676 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
677 is->dtg_active_format = is->video_st->codec->dtg_active_format;
678 printf("dtg_active_format=%d\n", is->dtg_active_format);
679 }
680 #endif
681 #if 0
682 switch(is->video_st->codec->dtg_active_format) {
683 case FF_DTG_AFD_SAME:
684 default:
685 /* nothing to do */
686 break;
687 case FF_DTG_AFD_4_3:
688 aspect_ratio = 4.0 / 3.0;
689 break;
690 case FF_DTG_AFD_16_9:
691 aspect_ratio = 16.0 / 9.0;
692 break;
693 case FF_DTG_AFD_14_9:
694 aspect_ratio = 14.0 / 9.0;
695 break;
696 case FF_DTG_AFD_4_3_SP_14_9:
697 aspect_ratio = 14.0 / 9.0;
698 break;
699 case FF_DTG_AFD_16_9_SP_14_9:
700 aspect_ratio = 14.0 / 9.0;
701 break;
702 case FF_DTG_AFD_SP_4_3:
703 aspect_ratio = 4.0 / 3.0;
704 break;
705 }
706 #endif
707
708 if (is->subtitle_st)
709 {
710 if (is->subpq_size > 0)
711 {
712 sp = &is->subpq[is->subpq_rindex];
713
714 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
715 {
716 SDL_LockYUVOverlay (vp->bmp);
717
718 pict.data[0] = vp->bmp->pixels[0];
719 pict.data[1] = vp->bmp->pixels[2];
720 pict.data[2] = vp->bmp->pixels[1];
721
722 pict.linesize[0] = vp->bmp->pitches[0];
723 pict.linesize[1] = vp->bmp->pitches[2];
724 pict.linesize[2] = vp->bmp->pitches[1];
725
726 for (i = 0; i < sp->sub.num_rects; i++)
727 blend_subrect(&pict, sp->sub.rects[i],
728 vp->bmp->w, vp->bmp->h);
729
730 SDL_UnlockYUVOverlay (vp->bmp);
731 }
732 }
733 }
734
735
736 /* XXX: we suppose the screen has a 1.0 pixel ratio */
737 height = is->height;
738 width = ((int)rint(height * aspect_ratio)) & ~1;
739 if (width > is->width) {
740 width = is->width;
741 height = ((int)rint(width / aspect_ratio)) & ~1;
742 }
743 x = (is->width - width) / 2;
744 y = (is->height - height) / 2;
745 if (!is->no_background) {
746 /* fill the background */
747 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
748 } else {
749 is->no_background = 0;
750 }
751 rect.x = is->xleft + x;
752 rect.y = is->ytop + y;
753 rect.w = width;
754 rect.h = height;
755 SDL_DisplayYUVOverlay(vp->bmp, &rect);
756 } else {
757 #if 0
758 fill_rectangle(screen,
759 is->xleft, is->ytop, is->width, is->height,
760 QERGB(0x00, 0x00, 0x00));
761 #endif
762 }
763 }
764
765 static inline int compute_mod(int a, int b)
766 {
767 a = a % b;
768 if (a >= 0)
769 return a;
770 else
771 return a + b;
772 }
773
774 static void video_audio_display(VideoState *s)
775 {
776 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
777 int ch, channels, h, h2, bgcolor, fgcolor;
778 int16_t time_diff;
779
780 /* compute display index : center on currently output samples */
781 channels = s->audio_st->codec->channels;
782 nb_display_channels = channels;
783 if (!s->paused) {
784 n = 2 * channels;
785 delay = audio_write_get_buf_size(s);
786 delay /= n;
787
788 /* to be more precise, we take into account the time spent since
789 the last buffer computation */
790 if (audio_callback_time) {
791 time_diff = av_gettime() - audio_callback_time;
792 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
793 }
794
795 delay -= s->width / 2;
796 if (delay < s->width)
797 delay = s->width;
798
799 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
800
801 h= INT_MIN;
802 for(i=0; i<1000; i+=channels){
803 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
804 int a= s->sample_array[idx];
805 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
806 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
807 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
808 int score= a-d;
809 if(h<score && (b^c)<0){
810 h= score;
811 i_start= idx;
812 }
813 }
814
815 s->last_i_start = i_start;
816 } else {
817 i_start = s->last_i_start;
818 }
819
820 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
821 fill_rectangle(screen,
822 s->xleft, s->ytop, s->width, s->height,
823 bgcolor);
824
825 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
826
827 /* total height for one channel */
828 h = s->height / nb_display_channels;
829 /* graph height / 2 */
830 h2 = (h * 9) / 20;
831 for(ch = 0;ch < nb_display_channels; ch++) {
832 i = i_start + ch;
833 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
834 for(x = 0; x < s->width; x++) {
835 y = (s->sample_array[i] * h2) >> 15;
836 if (y < 0) {
837 y = -y;
838 ys = y1 - y;
839 } else {
840 ys = y1;
841 }
842 fill_rectangle(screen,
843 s->xleft + x, ys, 1, y,
844 fgcolor);
845 i += channels;
846 if (i >= SAMPLE_ARRAY_SIZE)
847 i -= SAMPLE_ARRAY_SIZE;
848 }
849 }
850
851 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
852
853 for(ch = 1;ch < nb_display_channels; ch++) {
854 y = s->ytop + ch * h;
855 fill_rectangle(screen,
856 s->xleft, y, s->width, 1,
857 fgcolor);
858 }
859 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
860 }
861
862 static int video_open(VideoState *is){
863 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
864 int w,h;
865
866 if(is_full_screen) flags |= SDL_FULLSCREEN;
867 else flags |= SDL_RESIZABLE;
868
869 if (is_full_screen && fs_screen_width) {
870 w = fs_screen_width;
871 h = fs_screen_height;
872 } else if(!is_full_screen && screen_width){
873 w = screen_width;
874 h = screen_height;
875 }else if (is->video_st && is->video_st->codec->width){
876 w = is->video_st->codec->width;
877 h = is->video_st->codec->height;
878 } else {
879 w = 640;
880 h = 480;
881 }
882 #ifndef __APPLE__
883 screen = SDL_SetVideoMode(w, h, 0, flags);
884 #else
885 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
886 screen = SDL_SetVideoMode(w, h, 24, flags);
887 #endif
888 if (!screen) {
889 fprintf(stderr, "SDL: could not set video mode - exiting\n");
890 return -1;
891 }
892 SDL_WM_SetCaption("FFplay", "FFplay");
893
894 is->width = screen->w;
895 is->height = screen->h;
896
897 return 0;
898 }
899
900 /* display the current picture, if any */
901 static void video_display(VideoState *is)
902 {
903 if(!screen)
904 video_open(cur_stream);
905 if (is->audio_st && is->show_audio)
906 video_audio_display(is);
907 else if (is->video_st)
908 video_image_display(is);
909 }
910
911 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
912 {
913 SDL_Event event;
914 event.type = FF_REFRESH_EVENT;
915 event.user.data1 = opaque;
916 SDL_PushEvent(&event);
917 return 0; /* 0 means stop timer */
918 }
919
920 /* schedule a video refresh in 'delay' ms */
921 static void schedule_refresh(VideoState *is, int delay)
922 {
923 if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
924 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
925 }
926
927 /* get the current audio clock value */
928 static double get_audio_clock(VideoState *is)
929 {
930 double pts;
931 int hw_buf_size, bytes_per_sec;
932 pts = is->audio_clock;
933 hw_buf_size = audio_write_get_buf_size(is);
934 bytes_per_sec = 0;
935 if (is->audio_st) {
936 bytes_per_sec = is->audio_st->codec->sample_rate *
937 2 * is->audio_st->codec->channels;
938 }
939 if (bytes_per_sec)
940 pts -= (double)hw_buf_size / bytes_per_sec;
941 return pts;
942 }
943
944 /* get the current video clock value */
945 static double get_video_clock(VideoState *is)
946 {
947 double delta;
948 if (is->paused) {
949 delta = 0;
950 } else {
951 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
952 }
953 return is->video_current_pts + delta;
954 }
955
956 /* get the current external clock value */
957 static double get_external_clock(VideoState *is)
958 {
959 int64_t ti;
960 ti = av_gettime();
961 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
962 }
963
964 /* get the current master clock value */
965 static double get_master_clock(VideoState *is)
966 {
967 double val;
968
969 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
970 if (is->video_st)
971 val = get_video_clock(is);
972 else
973 val = get_audio_clock(is);
974 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
975 if (is->audio_st)
976 val = get_audio_clock(is);
977 else
978 val = get_video_clock(is);
979 } else {
980 val = get_external_clock(is);
981 }
982 return val;
983 }
984
985 /* seek in the stream */
986 static void stream_seek(VideoState *is, int64_t pos, int rel)
987 {
988 if (!is->seek_req) {
989 is->seek_pos = pos;
990 is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
991 if (seek_by_bytes)
992 is->seek_flags |= AVSEEK_FLAG_BYTE;
993 is->seek_req = 1;
994 }
995 }
996
997 /* pause or resume the video */
998 static void stream_pause(VideoState *is)
999 {
1000 is->paused = !is->paused;
1001 if (!is->paused) {
1002 is->video_current_pts = get_video_clock(is);
1003 is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
1004 }
1005 }
1006
1007 /* called to display each frame */
1008 static void video_refresh_timer(void *opaque)
1009 {
1010 VideoState *is = opaque;
1011 VideoPicture *vp;
1012 double actual_delay, delay, sync_threshold, ref_clock, diff;
1013
1014 SubPicture *sp, *sp2;
1015
1016 if (is->video_st) {
1017 if (is->pictq_size == 0) {
1018 /* if no picture, need to wait */
1019 schedule_refresh(is, 1);
1020 } else {
1021 /* dequeue the picture */
1022 vp = &is->pictq[is->pictq_rindex];
1023
1024 /* update current video pts */
1025 is->video_current_pts = vp->pts;
1026 is->video_current_pts_time = av_gettime();
1027
1028 /* compute nominal delay */
1029 delay = vp->pts - is->frame_last_pts;
1030 if (delay <= 0 || delay >= 10.0) {
1031 /* if incorrect delay, use previous one */
1032 delay = is->frame_last_delay;
1033 }
1034 is->frame_last_delay = delay;
1035 is->frame_last_pts = vp->pts;
1036
1037 /* update delay to follow master synchronisation source */
1038 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1039 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1040 /* if video is slave, we try to correct big delays by
1041 duplicating or deleting a frame */
1042 ref_clock = get_master_clock(is);
1043 diff = vp->pts - ref_clock;
1044
1045 /* skip or repeat frame. We take into account the
1046 delay to compute the threshold. I still don't know
1047 if it is the best guess */
1048 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1049 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1050 if (diff <= -sync_threshold)
1051 delay = 0;
1052 else if (diff >= sync_threshold)
1053 delay = 2 * delay;
1054 }
1055 }
1056
1057 is->frame_timer += delay;
1058 /* compute the REAL delay (we need to do that to avoid
1059 long term errors */
1060 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1061 if (actual_delay < 0.010) {
1062 /* XXX: should skip picture */
1063 actual_delay = 0.010;
1064 }
1065 /* launch timer for next picture */
1066 schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
1067
1068 #if defined(DEBUG_SYNC)
1069 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1070 delay, actual_delay, vp->pts, -diff);
1071 #endif
1072
1073 if(is->subtitle_st) {
1074 if (is->subtitle_stream_changed) {
1075 SDL_LockMutex(is->subpq_mutex);
1076
1077 while (is->subpq_size) {
1078 free_subpicture(&is->subpq[is->subpq_rindex]);
1079
1080 /* update queue size and signal for next picture */
1081 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1082 is->subpq_rindex = 0;
1083
1084 is->subpq_size--;
1085 }
1086 is->subtitle_stream_changed = 0;
1087
1088 SDL_CondSignal(is->subpq_cond);
1089 SDL_UnlockMutex(is->subpq_mutex);
1090 } else {
1091 if (is->subpq_size > 0) {
1092 sp = &is->subpq[is->subpq_rindex];
1093
1094 if (is->subpq_size > 1)
1095 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1096 else
1097 sp2 = NULL;
1098
1099 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1100 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1101 {
1102 free_subpicture(sp);
1103
1104 /* update queue size and signal for next picture */
1105 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1106 is->subpq_rindex = 0;
1107
1108 SDL_LockMutex(is->subpq_mutex);
1109 is->subpq_size--;
1110 SDL_CondSignal(is->subpq_cond);
1111 SDL_UnlockMutex(is->subpq_mutex);
1112 }
1113 }
1114 }
1115 }
1116
1117 /* display picture */
1118 video_display(is);
1119
1120 /* update queue size and signal for next picture */
1121 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1122 is->pictq_rindex = 0;
1123
1124 SDL_LockMutex(is->pictq_mutex);
1125 is->pictq_size--;
1126 SDL_CondSignal(is->pictq_cond);
1127 SDL_UnlockMutex(is->pictq_mutex);
1128 }
1129 } else if (is->audio_st) {
1130 /* draw the next audio frame */
1131
1132 schedule_refresh(is, 40);
1133
1134 /* if only audio stream, then display the audio bars (better
1135 than nothing, just to test the implementation */
1136
1137 /* display picture */
1138 video_display(is);
1139 } else {
1140 schedule_refresh(is, 100);
1141 }
1142 if (show_status) {
1143 static int64_t last_time;
1144 int64_t cur_time;
1145 int aqsize, vqsize, sqsize;
1146 double av_diff;
1147
1148 cur_time = av_gettime();
1149 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1150 aqsize = 0;
1151 vqsize = 0;
1152 sqsize = 0;
1153 if (is->audio_st)
1154 aqsize = is->audioq.size;
1155 if (is->video_st)
1156 vqsize = is->videoq.size;
1157 if (is->subtitle_st)
1158 sqsize = is->subtitleq.size;
1159 av_diff = 0;
1160 if (is->audio_st && is->video_st)
1161 av_diff = get_audio_clock(is) - get_video_clock(is);
1162 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
1163 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1164 fflush(stdout);
1165 last_time = cur_time;
1166 }
1167 }
1168 }
1169
1170 /* allocate a picture (needs to do that in main thread to avoid
1171 potential locking problems */
1172 static void alloc_picture(void *opaque)
1173 {
1174 VideoState *is = opaque;
1175 VideoPicture *vp;
1176
1177 vp = &is->pictq[is->pictq_windex];
1178
1179 if (vp->bmp)
1180 SDL_FreeYUVOverlay(vp->bmp);
1181
1182 #if 0
1183 /* XXX: use generic function */
1184 /* XXX: disable overlay if no hardware acceleration or if RGB format */
1185 switch(is->video_st->codec->pix_fmt) {
1186 case PIX_FMT_YUV420P:
1187 case PIX_FMT_YUV422P:
1188 case PIX_FMT_YUV444P:
1189 case PIX_FMT_YUYV422:
1190 case PIX_FMT_YUV410P:
1191 case PIX_FMT_YUV411P:
1192 is_yuv = 1;
1193 break;
1194 default:
1195 is_yuv = 0;
1196 break;
1197 }
1198 #endif
1199 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1200 is->video_st->codec->height,
1201 SDL_YV12_OVERLAY,
1202 screen);
1203 vp->width = is->video_st->codec->width;
1204 vp->height = is->video_st->codec->height;
1205
1206 SDL_LockMutex(is->pictq_mutex);
1207 vp->allocated = 1;
1208 SDL_CondSignal(is->pictq_cond);
1209 SDL_UnlockMutex(is->pictq_mutex);
1210 }
1211
1212 /**
1213 *
1214 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1215 */
1216 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1217 {
1218 VideoPicture *vp;
1219 int dst_pix_fmt;
1220 AVPicture pict;
1221 static struct SwsContext *img_convert_ctx;
1222
1223 /* wait until we have space to put a new picture */
1224 SDL_LockMutex(is->pictq_mutex);
1225 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1226 !is->videoq.abort_request) {
1227 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1228 }
1229 SDL_UnlockMutex(is->pictq_mutex);
1230
1231 if (is->videoq.abort_request)
1232 return -1;
1233
1234 vp = &is->pictq[is->pictq_windex];
1235
1236 /* alloc or resize hardware picture buffer */
1237 if (!vp->bmp ||
1238 vp->width != is->video_st->codec->width ||
1239 vp->height != is->video_st->codec->height) {
1240 SDL_Event event;
1241
1242 vp->allocated = 0;
1243
1244 /* the allocation must be done in the main thread to avoid
1245 locking problems */
1246 event.type = FF_ALLOC_EVENT;
1247 event.user.data1 = is;
1248 SDL_PushEvent(&event);
1249
1250 /* wait until the picture is allocated */
1251 SDL_LockMutex(is->pictq_mutex);
1252 while (!vp->allocated && !is->videoq.abort_request) {
1253 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1254 }
1255 SDL_UnlockMutex(is->pictq_mutex);
1256
1257 if (is->videoq.abort_request)
1258 return -1;
1259 }
1260
1261 /* if the frame is not skipped, then display it */
1262 if (vp->bmp) {
1263 /* get a pointer on the bitmap */
1264 SDL_LockYUVOverlay (vp->bmp);
1265
1266 dst_pix_fmt = PIX_FMT_YUV420P;
1267 pict.data[0] = vp->bmp->pixels[0];
1268 pict.data[1] = vp->bmp->pixels[2];
1269 pict.data[2] = vp->bmp->pixels[1];
1270
1271 pict.linesize[0] = vp->bmp->pitches[0];
1272 pict.linesize[1] = vp->bmp->pitches[2];
1273 pict.linesize[2] = vp->bmp->pitches[1];
1274 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1275 img_convert_ctx = sws_getCachedContext(img_convert_ctx,
1276 is->video_st->codec->width, is->video_st->codec->height,
1277 is->video_st->codec->pix_fmt,
1278 is->video_st->codec->width, is->video_st->codec->height,
1279 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1280 if (img_convert_ctx == NULL) {
1281 fprintf(stderr, "Cannot initialize the conversion context\n");
1282 exit(1);
1283 }
1284 sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1285 0, is->video_st->codec->height, pict.data, pict.linesize);
1286 /* update the bitmap content */
1287 SDL_UnlockYUVOverlay(vp->bmp);
1288
1289 vp->pts = pts;
1290
1291 /* now we can update the picture count */
1292 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1293 is->pictq_windex = 0;
1294 SDL_LockMutex(is->pictq_mutex);
1295 is->pictq_size++;
1296 SDL_UnlockMutex(is->pictq_mutex);
1297 }
1298 return 0;
1299 }
1300
1301 /**
1302 * compute the exact PTS for the picture if it is omitted in the stream
1303 * @param pts1 the dts of the pkt / pts of the frame
1304 */
1305 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1306 {
1307 double frame_delay, pts;
1308
1309 pts = pts1;
1310
1311 if (pts != 0) {
1312 /* update video clock with pts, if present */
1313 is->video_clock = pts;
1314 } else {
1315 pts = is->video_clock;
1316 }
1317 /* update video clock for next frame */
1318 frame_delay = av_q2d(is->video_st->codec->time_base);
1319 /* for MPEG2, the frame can be repeated, so we update the
1320 clock accordingly */
1321 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1322 is->video_clock += frame_delay;
1323
1324 #if defined(DEBUG_SYNC) && 0
1325 {
1326 int ftype;
1327 if (src_frame->pict_type == FF_B_TYPE)
1328 ftype = 'B';
1329 else if (src_frame->pict_type == FF_I_TYPE)
1330 ftype = 'I';
1331 else
1332 ftype = 'P';
1333 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1334 ftype, pts, pts1);
1335 }
1336 #endif
1337 return queue_picture(is, src_frame, pts);
1338 }
1339
1340 static int video_thread(void *arg)
1341 {
1342 VideoState *is = arg;
1343 AVPacket pkt1, *pkt = &pkt1;
1344 int len1, got_picture;
1345 AVFrame *frame= avcodec_alloc_frame();
1346 double pts;
1347
1348 for(;;) {
1349 while (is->paused && !is->videoq.abort_request) {
1350 SDL_Delay(10);
1351 }
1352 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1353 break;
1354
1355 if(pkt->data == flush_pkt.data){
1356 avcodec_flush_buffers(is->video_st->codec);
1357 continue;
1358 }
1359
1360 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1361 this packet, if any */
1362 is->video_st->codec->reordered_opaque= pkt->pts;
1363 len1 = avcodec_decode_video(is->video_st->codec,
1364 frame, &got_picture,
1365 pkt->data, pkt->size);
1366
1367 if( (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1368 && frame->reordered_opaque != AV_NOPTS_VALUE)
1369 pts= frame->reordered_opaque;
1370 else if(pkt->dts != AV_NOPTS_VALUE)
1371 pts= pkt->dts;
1372 else
1373 pts= 0;
1374 pts *= av_q2d(is->video_st->time_base);
1375
1376 // if (len1 < 0)
1377 // break;
1378 if (got_picture) {
1379 if (output_picture2(is, frame, pts) < 0)
1380 goto the_end;
1381 }
1382 av_free_packet(pkt);
1383 if (step)
1384 if (cur_stream)
1385 stream_pause(cur_stream);
1386 }
1387 the_end:
1388 av_free(frame);
1389 return 0;
1390 }
1391
1392 static int subtitle_thread(void *arg)
1393 {
1394 VideoState *is = arg;
1395 SubPicture *sp;
1396 AVPacket pkt1, *pkt = &pkt1;
1397 int len1, got_subtitle;
1398 double pts;
1399 int i, j;
1400 int r, g, b, y, u, v, a;
1401
1402 for(;;) {
1403 while (is->paused && !is->subtitleq.abort_request) {
1404 SDL_Delay(10);
1405 }
1406 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1407 break;
1408
1409 if(pkt->data == flush_pkt.data){
1410 avcodec_flush_buffers(is->subtitle_st->codec);
1411 continue;
1412 }
1413 SDL_LockMutex(is->subpq_mutex);
1414 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1415 !is->subtitleq.abort_request) {
1416 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1417 }
1418 SDL_UnlockMutex(is->subpq_mutex);
1419
1420 if (is->subtitleq.abort_request)
1421 goto the_end;
1422
1423 sp = &is->subpq[is->subpq_windex];
1424
1425 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1426 this packet, if any */
1427 pts = 0;
1428 if (pkt->pts != AV_NOPTS_VALUE)
1429 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1430
1431 len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1432 &sp->sub, &got_subtitle,
1433 pkt->data, pkt->size);
1434 // if (len1 < 0)
1435 // break;
1436 if (got_subtitle && sp->sub.format == 0) {
1437 sp->pts = pts;
1438
1439 for (i = 0; i < sp->sub.num_rects; i++)
1440 {
1441 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1442 {
1443 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1444 y = RGB_TO_Y_CCIR(r, g, b);
1445 u = RGB_TO_U_CCIR(r, g, b, 0);
1446 v = RGB_TO_V_CCIR(r, g, b, 0);
1447 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1448 }
1449 }
1450
1451 /* now we can update the picture count */
1452 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1453 is->subpq_windex = 0;
1454 SDL_LockMutex(is->subpq_mutex);
1455 is->subpq_size++;
1456 SDL_UnlockMutex(is->subpq_mutex);
1457 }
1458 av_free_packet(pkt);
1459 // if (step)
1460 // if (cur_stream)
1461 // stream_pause(cur_stream);
1462 }
1463 the_end:
1464 return 0;
1465 }
1466
1467 /* copy samples for viewing in editor window */
1468 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1469 {
1470 int size, len, channels;
1471
1472 channels = is->audio_st->codec->channels;
1473
1474 size = samples_size / sizeof(short);
1475 while (size > 0) {
1476 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1477 if (len > size)
1478 len = size;
1479 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1480 samples += len;
1481 is->sample_array_index += len;
1482 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1483 is->sample_array_index = 0;
1484 size -= len;
1485 }
1486 }
1487
1488 /* return the new audio buffer size (samples can be added or deleted
1489 to get better sync if video or external master clock) */
1490 static int synchronize_audio(VideoState *is, short *samples,
1491 int samples_size1, double pts)
1492 {
1493 int n, samples_size;
1494 double ref_clock;
1495
1496 n = 2 * is->audio_st->codec->channels;
1497 samples_size = samples_size1;
1498
1499 /* if not master, then we try to remove or add samples to correct the clock */
1500 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1501 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1502 double diff, avg_diff;
1503 int wanted_size, min_size, max_size, nb_samples;
1504
1505 ref_clock = get_master_clock(is);
1506 diff = get_audio_clock(is) - ref_clock;
1507
1508 if (diff < AV_NOSYNC_THRESHOLD) {
1509 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1510 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1511 /* not enough measures to have a correct estimate */
1512 is->audio_diff_avg_count++;
1513 } else {
1514 /* estimate the A-V difference */
1515 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1516
1517 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1518 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1519 nb_samples = samples_size / n;
1520
1521 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1522 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1523 if (wanted_size < min_size)
1524 wanted_size = min_size;
1525 else if (wanted_size > max_size)
1526 wanted_size = max_size;
1527
1528 /* add or remove samples to correction the synchro */
1529 if (wanted_size < samples_size) {
1530 /* remove samples */
1531 samples_size = wanted_size;
1532 } else if (wanted_size > samples_size) {
1533 uint8_t *samples_end, *q;
1534 int nb;
1535
1536 /* add samples */
1537 nb = (samples_size - wanted_size);
1538 samples_end = (uint8_t *)samples + samples_size - n;
1539 q = samples_end + n;
1540 while (nb > 0) {
1541 memcpy(q, samples_end, n);
1542 q += n;
1543 nb -= n;
1544 }
1545 samples_size = wanted_size;
1546 }
1547 }
1548 #if 0
1549 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1550 diff, avg_diff, samples_size - samples_size1,
1551 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1552 #endif
1553 }
1554 } else {
1555 /* too big difference : may be initial PTS errors, so
1556 reset A-V filter */
1557 is->audio_diff_avg_count = 0;
1558 is->audio_diff_cum = 0;
1559 }
1560 }
1561
1562 return samples_size;
1563 }
1564
1565 /* decode one audio frame and returns its uncompressed size */
1566 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1567 {
1568 AVPacket *pkt = &is->audio_pkt;
1569 AVCodecContext *dec= is->audio_st->codec;
1570 int n, len1, data_size;
1571 double pts;
1572
1573 for(;;) {
1574 /* NOTE: the audio packet can contain several frames */
1575 while (is->audio_pkt_size > 0) {
1576 data_size = sizeof(is->audio_buf1);
1577 len1 = avcodec_decode_audio2(dec,
1578 (int16_t *)is->audio_buf1, &data_size,
1579 is->audio_pkt_data, is->audio_pkt_size);
1580 if (len1 < 0) {
1581 /* if error, we skip the frame */
1582 is->audio_pkt_size = 0;
1583 break;
1584 }
1585
1586 is->audio_pkt_data += len1;
1587 is->audio_pkt_size -= len1;
1588 if (data_size <= 0)
1589 continue;
1590
1591 if (dec->sample_fmt != is->audio_src_fmt) {
1592 if (is->reformat_ctx)
1593 av_audio_convert_free(is->reformat_ctx);
1594 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1595 dec->sample_fmt, 1, NULL, 0);
1596 if (!is->reformat_ctx) {
1597 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1598 avcodec_get_sample_fmt_name(dec->sample_fmt),
1599 avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1600 break;
1601 }
1602 is->audio_src_fmt= dec->sample_fmt;
1603 }
1604
1605 if (is->reformat_ctx) {
1606 const void *ibuf[6]= {is->audio_buf1};
1607 void *obuf[6]= {is->audio_buf2};
1608 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1609 int ostride[6]= {2};
1610 int len= data_size/istride[0];
1611 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1612 printf("av_audio_convert() failed\n");
1613 break;
1614 }
1615 is->audio_buf= is->audio_buf2;
1616 /* FIXME: existing code assume that data_size equals framesize*channels*2
1617 remove this legacy cruft */
1618 data_size= len*2;
1619 }else{
1620 is->audio_buf= is->audio_buf1;
1621 }
1622
1623 /* if no pts, then compute it */
1624 pts = is->audio_clock;
1625 *pts_ptr = pts;
1626 n = 2 * dec->channels;
1627 is->audio_clock += (double)data_size /
1628 (double)(n * dec->sample_rate);
1629 #if defined(DEBUG_SYNC)
1630 {
1631 static double last_clock;
1632 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1633 is->audio_clock - last_clock,
1634 is->audio_clock, pts);
1635 last_clock = is->audio_clock;
1636 }
1637 #endif
1638 return data_size;
1639 }
1640
1641 /* free the current packet */
1642 if (pkt->data)
1643 av_free_packet(pkt);
1644
1645 if (is->paused || is->audioq.abort_request) {
1646 return -1;
1647 }
1648
1649 /* read next packet */
1650 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1651 return -1;
1652 if(pkt->data == flush_pkt.data){
1653 avcodec_flush_buffers(dec);
1654 continue;
1655 }
1656
1657 is->audio_pkt_data = pkt->data;
1658 is->audio_pkt_size = pkt->size;
1659
1660 /* if update the audio clock with the pts */
1661 if (pkt->pts != AV_NOPTS_VALUE) {
1662 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1663 }
1664 }
1665 }
1666
1667 /* get the current audio output buffer size, in samples. With SDL, we
1668 cannot have a precise information */
1669 static int audio_write_get_buf_size(VideoState *is)
1670 {
1671 return is->audio_buf_size - is->audio_buf_index;
1672 }
1673
1674
1675 /* prepare a new audio buffer */
1676 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1677 {
1678 VideoState *is = opaque;
1679 int audio_size, len1;
1680 double pts;
1681
1682 audio_callback_time = av_gettime();
1683
1684 while (len > 0) {
1685 if (is->audio_buf_index >= is->audio_buf_size) {
1686 audio_size = audio_decode_frame(is, &pts);
1687 if (audio_size < 0) {
1688 /* if error, just output silence */
1689 is->audio_buf = is->audio_buf1;
1690 is->audio_buf_size = 1024;
1691 memset(is->audio_buf, 0, is->audio_buf_size);
1692 } else {
1693 if (is->show_audio)
1694 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1695 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1696 pts);
1697 is->audio_buf_size = audio_size;
1698 }
1699 is->audio_buf_index = 0;
1700 }
1701 len1 = is->audio_buf_size - is->audio_buf_index;
1702 if (len1 > len)
1703 len1 = len;
1704 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1705 len -= len1;
1706 stream += len1;
1707 is->audio_buf_index += len1;
1708 }
1709 }
1710
1711 /* open a given stream. Return 0 if OK */
1712 static int stream_component_open(VideoState *is, int stream_index)
1713 {
1714 AVFormatContext *ic = is->ic;
1715 AVCodecContext *enc;
1716 AVCodec *codec;
1717 SDL_AudioSpec wanted_spec, spec;
1718
1719 if (stream_index < 0 || stream_index >= ic->nb_streams)
1720 return -1;
1721 enc = ic->streams[stream_index]->codec;
1722
1723 /* prepare audio output */
1724 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1725 if (enc->channels > 0) {
1726 enc->request_channels = FFMIN(2, enc->channels);
1727 } else {
1728 enc->request_channels = 2;
1729 }
1730 }
1731
1732 codec = avcodec_find_decoder(enc->codec_id);
1733 enc->debug_mv = debug_mv;
1734 enc->debug = debug;
1735 enc->workaround_bugs = workaround_bugs;
1736 enc->lowres = lowres;
1737 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1738 enc->idct_algo= idct;
1739 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1740 enc->skip_frame= skip_frame;
1741 enc->skip_idct= skip_idct;
1742 enc->skip_loop_filter= skip_loop_filter;
1743 enc->error_recognition= error_recognition;
1744 enc->error_concealment= error_concealment;
1745
1746 set_context_opts(enc, avctx_opts[enc->codec_type], 0);
1747
1748 if (!codec ||
1749 avcodec_open(enc, codec) < 0)
1750 return -1;
1751
1752 /* prepare audio output */
1753 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1754 wanted_spec.freq = enc->sample_rate;
1755 wanted_spec.format = AUDIO_S16SYS;
1756 wanted_spec.channels = enc->channels;
1757 wanted_spec.silence = 0;
1758 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1759 wanted_spec.callback = sdl_audio_callback;
1760 wanted_spec.userdata = is;
1761 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1762 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1763 return -1;
1764 }
1765 is->audio_hw_buf_size = spec.size;
1766 is->audio_src_fmt= SAMPLE_FMT_S16;
1767 }
1768
1769 if(thread_count>1)
1770 avcodec_thread_init(enc, thread_count);
1771 enc->thread_count= thread_count;
1772 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
1773 switch(enc->codec_type) {
1774 case CODEC_TYPE_AUDIO:
1775 is->audio_stream = stream_index;
1776 is->audio_st = ic->streams[stream_index];
1777 is->audio_buf_size = 0;
1778 is->audio_buf_index = 0;
1779
1780 /* init averaging filter */
1781 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1782 is->audio_diff_avg_count = 0;
1783 /* since we do not have a precise anough audio fifo fullness,
1784 we correct audio sync only if larger than this threshold */
1785 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1786
1787 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1788 packet_queue_init(&is->audioq);
1789 SDL_PauseAudio(0);
1790 break;
1791 case CODEC_TYPE_VIDEO:
1792 is->video_stream = stream_index;
1793 is->video_st = ic->streams[stream_index];
1794
1795 is->frame_last_delay = 40e-3;
1796 is->frame_timer = (double)av_gettime() / 1000000.0;
1797 is->video_current_pts_time = av_gettime();
1798
1799 packet_queue_init(&is->videoq);
1800 is->video_tid = SDL_CreateThread(video_thread, is);
1801 break;
1802 case CODEC_TYPE_SUBTITLE:
1803 is->subtitle_stream = stream_index;
1804 is->subtitle_st = ic->streams[stream_index];
1805 packet_queue_init(&is->subtitleq);
1806
1807 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1808 break;
1809 default:
1810 break;
1811 }
1812 return 0;
1813 }
1814
1815 static void stream_component_close(VideoState *is, int stream_index)
1816 {
1817 AVFormatContext *ic = is->ic;
1818 AVCodecContext *enc;
1819
1820 if (stream_index < 0 || stream_index >= ic->nb_streams)
1821 return;
1822 enc = ic->streams[stream_index]->codec;
1823
1824 switch(enc->codec_type) {
1825 case CODEC_TYPE_AUDIO:
1826 packet_queue_abort(&is->audioq);
1827
1828 SDL_CloseAudio();
1829
1830 packet_queue_end(&is->audioq);
1831 if (is->reformat_ctx)
1832 av_audio_convert_free(is->reformat_ctx);
1833 break;
1834 case CODEC_TYPE_VIDEO:
1835 packet_queue_abort(&is->videoq);
1836
1837 /* note: we also signal this mutex to make sure we deblock the
1838 video thread in all cases */
1839 SDL_LockMutex(is->pictq_mutex);
1840 SDL_CondSignal(is->pictq_cond);
1841 SDL_UnlockMutex(is->pictq_mutex);
1842
1843 SDL_WaitThread(is->video_tid, NULL);
1844
1845 packet_queue_end(&is->videoq);
1846 break;
1847 case CODEC_TYPE_SUBTITLE:
1848 packet_queue_abort(&is->subtitleq);
1849
1850 /* note: we also signal this mutex to make sure we deblock the
1851 video thread in all cases */
1852 SDL_LockMutex(is->subpq_mutex);
1853 is->subtitle_stream_changed = 1;
1854
1855 SDL_CondSignal(is->subpq_cond);
1856 SDL_UnlockMutex(is->subpq_mutex);
1857
1858 SDL_WaitThread(is->subtitle_tid, NULL);
1859
1860 packet_queue_end(&is->subtitleq);
1861 break;
1862 default:
1863 break;
1864 }
1865
1866 ic->streams[stream_index]->discard = AVDISCARD_ALL;
1867 avcodec_close(enc);
1868 switch(enc->codec_type) {
1869 case CODEC_TYPE_AUDIO:
1870 is->audio_st = NULL;
1871 is->audio_stream = -1;
1872 break;
1873 case CODEC_TYPE_VIDEO:
1874 is->video_st = NULL;
1875 is->video_stream = -1;
1876 break;
1877 case CODEC_TYPE_SUBTITLE:
1878 is->subtitle_st = NULL;
1879 is->subtitle_stream = -1;
1880 break;
1881 default:
1882 break;
1883 }
1884 }
1885
1886 static void dump_stream_info(const AVFormatContext *s)
1887 {
1888 if (s->track != 0)
1889 fprintf(stderr, "Track: %d\n", s->track);
1890 if (s->title[0] != '\0')
1891 fprintf(stderr, "Title: %s\n", s->title);
1892 if (s->author[0] != '\0')
1893 fprintf(stderr, "Author: %s\n", s->author);
1894 if (s->copyright[0] != '\0')
1895 fprintf(stderr, "Copyright: %s\n", s->copyright);
1896 if (s->comment[0] != '\0')
1897 fprintf(stderr, "Comment: %s\n", s->comment);
1898 if (s->album[0] != '\0')
1899 fprintf(stderr, "Album: %s\n", s->album);
1900 if (s->year != 0)
1901 fprintf(stderr, "Year: %d\n", s->year);
1902 if (s->genre[0] != '\0')
1903 fprintf(stderr, "Genre: %s\n", s->genre);
1904 }
1905
1906 /* since we have only one decoding thread, we can use a global
1907 variable instead of a thread local variable */
1908 static VideoState *global_video_state;
1909
1910 static int decode_interrupt_cb(void)
1911 {
1912 return (global_video_state && global_video_state->abort_request);
1913 }
1914
1915 /* this thread gets the stream from the disk or the network */
1916 static int decode_thread(void *arg)
1917 {
1918 VideoState *is = arg;
1919 AVFormatContext *ic;
1920 int err, i, ret, video_index, audio_index;
1921 AVPacket pkt1, *pkt = &pkt1;
1922 AVFormatParameters params, *ap = &params;
1923
1924 video_index = -1;
1925 audio_index = -1;
1926 is->video_stream = -1;
1927 is->audio_stream = -1;
1928 is->subtitle_stream = -1;
1929
1930 global_video_state = is;
1931 url_set_interrupt_cb(decode_interrupt_cb);
1932
1933 memset(ap, 0, sizeof(*ap));
1934
1935 ap->width = frame_width;
1936 ap->height= frame_height;
1937 ap->time_base= (AVRational){1, 25};
1938 ap->pix_fmt = frame_pix_fmt;
1939
1940 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1941 if (err < 0) {
1942 print_error(is->filename, err);
1943 ret = -1;
1944 goto fail;
1945 }
1946 is->ic = ic;
1947
1948 if(genpts)
1949 ic->flags |= AVFMT_FLAG_GENPTS;
1950
1951 err = av_find_stream_info(ic);
1952 if (err < 0) {
1953 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1954 ret = -1;
1955 goto fail;
1956 }
1957 if(ic->pb)
1958 ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1959
1960 /* if seeking requested, we execute it */
1961 if (start_time != AV_NOPTS_VALUE) {
1962 int64_t timestamp;
1963
1964 timestamp = start_time;
1965 /* add the stream start time */
1966 if (ic->start_time != AV_NOPTS_VALUE)
1967 timestamp += ic->start_time;
1968 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1969 if (ret < 0) {
1970 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1971 is->filename, (double)timestamp / AV_TIME_BASE);
1972 }
1973 }
1974
1975 for(i = 0; i < ic->nb_streams; i++) {
1976 AVCodecContext *enc = ic->streams[i]->codec;
1977 ic->streams[i]->discard = AVDISCARD_ALL;
1978 switch(enc->codec_type) {
1979 case CODEC_TYPE_AUDIO:
1980 if ((audio_index < 0 || wanted_audio_stream-- > 0) && !audio_disable)
1981 audio_index = i;
1982 break;
1983 case CODEC_TYPE_VIDEO:
1984 if ((video_index < 0 || wanted_video_stream-- > 0) && !video_disable)
1985 video_index = i;
1986 break;
1987 default:
1988 break;
1989 }
1990 }
1991 if (show_status) {
1992 dump_format(ic, 0, is->filename, 0);
1993 dump_stream_info(ic);
1994 }
1995
1996 /* open the streams */
1997 if (audio_index >= 0) {
1998 stream_component_open(is, audio_index);
1999 }
2000
2001 if (video_index >= 0) {
2002 stream_component_open(is, video_index);
2003 } else {
2004 if (!display_disable)
2005 is->show_audio = 1;
2006 }
2007
2008 if (is->video_stream < 0 && is->audio_stream < 0) {
2009 fprintf(stderr, "%s: could not open codecs\n", is->filename);
2010 ret = -1;
2011 goto fail;
2012 }
2013
2014 for(;;) {
2015 if (is->abort_request)
2016 break;
2017 if (is->paused != is->last_paused) {
2018 is->last_paused = is->paused;
2019 if (is->paused)
2020 av_read_pause(ic);
2021 else
2022 av_read_play(ic);
2023 }
2024 #if CONFIG_RTSP_DEMUXER
2025 if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2026 /* wait 10 ms to avoid trying to get another packet */
2027 /* XXX: horrible */
2028 SDL_Delay(10);
2029 continue;
2030 }
2031 #endif
2032 if (is->seek_req) {
2033 int stream_index= -1;
2034 int64_t seek_target= is->seek_pos;
2035
2036 if (is-> video_stream >= 0) stream_index= is-> video_stream;
2037 else if(is-> audio_stream >= 0) stream_index= is-> audio_stream;
2038 else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
2039
2040 if(stream_index>=0){
2041 seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
2042 }
2043
2044 ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
2045 if (ret < 0) {
2046 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2047 }else{
2048 if (is->audio_stream >= 0) {
2049 packet_queue_flush(&is->audioq);
2050 packet_queue_put(&is->audioq, &flush_pkt);
2051 }
2052 if (is->subtitle_stream >= 0) {
2053 packet_queue_flush(&is->subtitleq);
2054 packet_queue_put(&is->subtitleq, &flush_pkt);
2055 }
2056 if (is->video_stream >= 0) {
2057 packet_queue_flush(&is->videoq);
2058 packet_queue_put(&is->videoq, &flush_pkt);
2059 }
2060 }
2061 is->seek_req = 0;
2062 }
2063
2064 /* if the queue are full, no need to read more */
2065 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2066 is->videoq.size > MAX_VIDEOQ_SIZE ||
2067 is->subtitleq.size > MAX_SUBTITLEQ_SIZE) {
2068 /* wait 10 ms */
2069 SDL_Delay(10);
2070 continue;
2071 }
2072 if(url_feof(ic->pb)) {
2073 av_init_packet(pkt);
2074 pkt->data=NULL;
2075 pkt->size=0;
2076 pkt->stream_index= is->video_stream;
2077 packet_queue_put(&is->videoq, pkt);
2078 continue;
2079 }
2080 ret = av_read_frame(ic, pkt);
2081 if (ret < 0) {
2082 if (url_ferror(ic->pb) == 0) {
2083 SDL_Delay(100); /* wait for user event */
2084 continue;
2085 } else
2086 break;
2087 }
2088 if (pkt->stream_index == is->audio_stream) {
2089 packet_queue_put(&is->audioq, pkt);
2090 } else if (pkt->stream_index == is->video_stream) {
2091 packet_queue_put(&is->videoq, pkt);
2092 } else if (pkt->stream_index == is->subtitle_stream) {
2093 packet_queue_put(&is->subtitleq, pkt);
2094 } else {
2095 av_free_packet(pkt);
2096 }
2097 }
2098 /* wait until the end */
2099 while (!is->abort_request) {
2100 SDL_Delay(100);
2101 }
2102
2103 ret = 0;
2104 fail:
2105 /* disable interrupting */
2106 global_video_state = NULL;
2107
2108 /* close each stream */
2109 if (is->audio_stream >= 0)
2110 stream_component_close(is, is->audio_stream);
2111 if (is->video_stream >= 0)
2112 stream_component_close(is, is->video_stream);
2113 if (is->subtitle_stream >= 0)
2114 stream_component_close(is, is->subtitle_stream);
2115 if (is->ic) {
2116 av_close_input_file(is->ic);
2117 is->ic = NULL; /* safety */
2118 }
2119 url_set_interrupt_cb(NULL);
2120
2121 if (ret != 0) {
2122 SDL_Event event;
2123
2124 event.type = FF_QUIT_EVENT;
2125 event.user.data1 = is;
2126 SDL_PushEvent(&event);
2127 }
2128 return 0;
2129 }
2130
2131 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2132 {
2133 VideoState *is;
2134
2135 is = av_mallocz(sizeof(VideoState));
2136 if (!is)
2137 return NULL;
2138 av_strlcpy(is->filename, filename, sizeof(is->filename));
2139 is->iformat = iformat;
2140 is->ytop = 0;
2141 is->xleft = 0;
2142
2143 /* start video display */
2144 is->pictq_mutex = SDL_CreateMutex();
2145 is->pictq_cond = SDL_CreateCond();
2146
2147 is->subpq_mutex = SDL_CreateMutex();
2148 is->subpq_cond = SDL_CreateCond();
2149
2150 /* add the refresh timer to draw the picture */
2151 schedule_refresh(is, 40);
2152
2153 is->av_sync_type = av_sync_type;
2154 is->parse_tid = SDL_CreateThread(decode_thread, is);
2155 if (!is->parse_tid) {
2156 av_free(is);
2157 return NULL;
2158 }
2159 return is;
2160 }
2161
2162 static void stream_close(VideoState *is)
2163 {
2164 VideoPicture *vp;
2165 int i;
2166 /* XXX: use a special url_shutdown call to abort parse cleanly */
2167 is->abort_request = 1;
2168 SDL_WaitThread(is->parse_tid, NULL);
2169
2170 /* free all pictures */
2171 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2172 vp = &is->pictq[i];
2173 if (vp->bmp) {
2174 SDL_FreeYUVOverlay(vp->bmp);
2175 vp->bmp = NULL;
2176 }
2177 }
2178 SDL_DestroyMutex(is->pictq_mutex);
2179 SDL_DestroyCond(is->pictq_cond);
2180 SDL_DestroyMutex(is->subpq_mutex);
2181 SDL_DestroyCond(is->subpq_cond);
2182 }
2183
2184 static void stream_cycle_channel(VideoState *is, int codec_type)
2185 {
2186 AVFormatContext *ic = is->ic;
2187 int start_index, stream_index;
2188 AVStream *st;
2189
2190 if (codec_type == CODEC_TYPE_VIDEO)
2191 start_index = is->video_stream;
2192 else if (codec_type == CODEC_TYPE_AUDIO)
2193 start_index = is->audio_stream;
2194 else
2195 start_index = is->subtitle_stream;
2196 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2197 return;
2198 stream_index = start_index;
2199 for(;;) {
2200 if (++stream_index >= is->ic->nb_streams)
2201 {
2202 if (codec_type == CODEC_TYPE_SUBTITLE)
2203 {
2204 stream_index = -1;
2205 goto the_end;
2206 } else
2207 stream_index = 0;
2208 }
2209 if (stream_index == start_index)
2210 return;
2211 st = ic->streams[stream_index];
2212 if (st->codec->codec_type == codec_type) {
2213 /* check that parameters are OK */
2214 switch(codec_type) {
2215 case CODEC_TYPE_AUDIO:
2216 if (st->codec->sample_rate != 0 &&
2217 st->codec->channels != 0)
2218 goto the_end;
2219 break;
2220 case CODEC_TYPE_VIDEO:
2221 case CODEC_TYPE_SUBTITLE:
2222 goto the_end;
2223 default:
2224 break;
2225 }
2226 }
2227 }
2228 the_end:
2229 stream_component_close(is, start_index);
2230 stream_component_open(is, stream_index);
2231 }
2232
2233
2234 static void toggle_full_screen(void)
2235 {
2236 is_full_screen = !is_full_screen;
2237 if (!fs_screen_width) {
2238 /* use default SDL method */
2239 // SDL_WM_ToggleFullScreen(screen);
2240 }
2241 video_open(cur_stream);
2242 }
2243
2244 static void toggle_pause(void)
2245 {
2246 if (cur_stream)
2247 stream_pause(cur_stream);
2248 step = 0;
2249 }
2250
2251 static void step_to_next_frame(void)
2252 {
2253 if (cur_stream) {
2254 /* if the stream is paused unpause it, then step */
2255 if (cur_stream->paused)
2256 stream_pause(cur_stream);
2257 }
2258 step = 1;
2259 }
2260
2261 static void do_exit(void)
2262 {
2263 if (cur_stream) {
2264 stream_close(cur_stream);
2265 cur_stream = NULL;
2266 }
2267 if (show_status)
2268 printf("\n");
2269 SDL_Quit();
2270 exit(0);
2271 }
2272
2273 static void toggle_audio_display(void)
2274 {
2275 if (cur_stream) {
2276 cur_stream->show_audio = !cur_stream->show_audio;
2277 }
2278 }
2279
2280 /* handle an event sent by the GUI */
2281 static void event_loop(void)
2282 {
2283 SDL_Event event;
2284 double incr, pos, frac;
2285
2286 for(;;) {
2287 SDL_WaitEvent(&event);
2288 switch(event.type) {
2289 case SDL_KEYDOWN:
2290 switch(event.key.keysym.sym) {
2291 case SDLK_ESCAPE:
2292 case SDLK_q:
2293 do_exit();
2294 break;
2295 case SDLK_f:
2296 toggle_full_screen();
2297 break;
2298 case SDLK_p:
2299 case SDLK_SPACE:
2300 toggle_pause();
2301 break;
2302 case SDLK_s: //S: Step to next frame
2303 step_to_next_frame();
2304 break;
2305 case SDLK_a:
2306 if (cur_stream)
2307 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2308 break;
2309 case SDLK_v:
2310 if (cur_stream)
2311 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2312 break;
2313 case SDLK_t:
2314 if (cur_stream)
2315 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2316 break;
2317 case SDLK_w:
2318 toggle_audio_display();
2319 break;
2320 case SDLK_LEFT:
2321 incr = -10.0;
2322 goto do_seek;
2323 case SDLK_RIGHT:
2324 incr = 10.0;
2325 goto do_seek;
2326 case SDLK_UP:
2327 incr = 60.0;
2328 goto do_seek;
2329 case SDLK_DOWN:
2330 incr = -60.0;
2331 do_seek:
2332 if (cur_stream) {
2333 if (seek_by_bytes) {
2334 pos = url_ftell(cur_stream->ic->pb);
2335 if (cur_stream->ic->bit_rate)
2336 incr *= cur_stream->ic->bit_rate / 60.0;
2337 else
2338 incr *= 180000.0;
2339 pos += incr;
2340 stream_seek(cur_stream, pos, incr);
2341 } else {
2342 pos = get_master_clock(cur_stream);
2343 pos += incr;
2344 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2345 }
2346 }
2347 break;
2348 default:
2349 break;
2350 }
2351 break;
2352 case SDL_MOUSEBUTTONDOWN:
2353 if (cur_stream) {
2354 int ns, hh, mm, ss;
2355 int tns, thh, tmm, tss;
2356 tns = cur_stream->ic->duration/1000000LL;
2357 thh = tns/3600;
2358 tmm = (tns%3600)/60;
2359 tss = (tns%60);
2360 frac = (double)event.button.x/(double)cur_stream->width;
2361 ns = frac*tns;
2362 hh = ns/3600;
2363 mm = (ns%3600)/60;
2364 ss = (ns%60);
2365 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2366 hh, mm, ss, thh, tmm, tss);
2367 stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2368 }
2369 break;
2370 case SDL_VIDEORESIZE:
2371 if (cur_stream) {
2372 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2373 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2374 screen_width = cur_stream->width = event.resize.w;
2375 screen_height= cur_stream->height= event.resize.h;
2376 }
2377 break;
2378 case SDL_QUIT:
2379 case FF_QUIT_EVENT:
2380 do_exit();
2381 break;
2382 case FF_ALLOC_EVENT:
2383 video_open(event.user.data1);
2384 alloc_picture(event.user.data1);
2385 break;
2386 case FF_REFRESH_EVENT:
2387 video_refresh_timer(event.user.data1);
2388 break;
2389 default:
2390 break;
2391 }
2392 }
2393 }
2394
2395 static void opt_frame_size(const char *arg)
2396 {
2397 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2398 fprintf(stderr, "Incorrect frame size\n");
2399 exit(1);
2400 }
2401 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2402 fprintf(stderr, "Frame size must be a multiple of 2\n");
2403 exit(1);
2404 }
2405 }
2406
2407 static int opt_width(const char *opt, const char *arg)
2408 {
2409 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2410 return 0;
2411 }
2412
2413 static int opt_height(const char *opt, const char *arg)
2414 {
2415 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2416 return 0;
2417 }
2418
2419 static void opt_format(const char *arg)
2420 {
2421 file_iformat = av_find_input_format(arg);
2422 if (!file_iformat) {
2423 fprintf(stderr, "Unknown input format: %s\n", arg);
2424 exit(1);
2425 }
2426 }
2427
2428 static void opt_frame_pix_fmt(const char *arg)
2429 {
2430 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2431 }
2432
2433 static int opt_sync(const char *opt, const char *arg)
2434 {
2435 if (!strcmp(arg, "audio"))
2436 av_sync_type = AV_SYNC_AUDIO_MASTER;
2437 else if (!strcmp(arg, "video"))
2438 av_sync_type = AV_SYNC_VIDEO_MASTER;
2439 else if (!strcmp(arg, "ext"))
2440 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2441 else {
2442 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2443 exit(1);
2444 }
2445 return 0;
2446 }
2447
2448 static int opt_seek(const char *opt, const char *arg)
2449 {
2450 start_time = parse_time_or_die(opt, arg, 1);
2451 return 0;
2452 }
2453
2454 static int opt_debug(const char *opt, const char *arg)
2455 {
2456 av_log_set_level(99);
2457 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2458 return 0;
2459 }
2460
2461 static int opt_vismv(const char *opt, const char *arg)
2462 {
2463 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2464 return 0;
2465 }
2466
2467 static int opt_thread_count(const char *opt, const char *arg)
2468 {
2469 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2470 #if !HAVE_THREADS
2471 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2472 #endif
2473 return 0;
2474 }
2475
2476 static const OptionDef options[] = {
2477 { "h", OPT_EXIT, {(void*)show_help}, "show help" },
2478 { "version", OPT_EXIT, {(void*)show_version}, "show version" },
2479 { "L", OPT_EXIT, {(void*)show_license}, "show license" },
2480 { "formats", OPT_EXIT, {(void*)show_formats}, "show available formats, codecs, protocols, ..." },
2481 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2482 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2483 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2484 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2485 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2486 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2487 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "", "" },
2488 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "", "" },
2489 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2490 { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2491 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2492 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2493 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2494 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2495 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2496 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2497 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2498 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2499 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2500 { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2501 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2502 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2503 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2504 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2505 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2506 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)", "threshold" },
2507 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2508 { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2509 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2510 { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2511 { NULL, },
2512 };
2513
2514 static void show_help(void)
2515 {
2516 printf("usage: ffplay [options] input_file\n"
2517 "Simple media player\n");
2518 printf("\n");
2519 show_help_options(options, "Main options:\n",
2520 OPT_EXPERT, 0);
2521 show_help_options(options, "\nAdvanced options:\n",
2522 OPT_EXPERT, OPT_EXPERT);
2523 printf("\nWhile playing:\n"
2524 "q, ESC quit\n"
2525 "f toggle full screen\n"
2526 "p, SPC pause\n"
2527 "a cycle audio channel\n"
2528 "v cycle video channel\n"
2529 "t cycle subtitle channel\n"
2530 "w show audio waves\n"
2531 "left/right seek backward/forward 10 seconds\n"
2532 "down/up seek backward/forward 1 minute\n"
2533 "mouse click seek to percentage in file corresponding to fraction of width\n"
2534 );
2535 }
2536
2537 static void opt_input_file(const char *filename)
2538 {
2539 if (!strcmp(filename, "-"))
2540 filename = "pipe:";
2541 input_filename = filename;
2542 }
2543
2544 /* Called from the main */
2545 int main(int argc, char **argv)
2546 {
2547 int flags, i;
2548
2549 /* register all codecs, demux and protocols */
2550 avcodec_register_all();
2551 avdevice_register_all();
2552 av_register_all();
2553
2554 for(i=0; i<CODEC_TYPE_NB; i++){
2555 avctx_opts[i]= avcodec_alloc_context2(i);
2556 }
2557 avformat_opts = avformat_alloc_context();
2558 sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
2559
2560 show_banner();
2561
2562 parse_options(argc, argv, options, opt_input_file);
2563
2564 if (!input_filename) {
2565 fprintf(stderr, "An input file must be specified\n");
2566 exit(1);
2567 }
2568
2569 if (display_disable) {
2570 video_disable = 1;
2571 }
2572 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2573 #if !defined(__MINGW32__) && !defined(__APPLE__)
2574 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2575 #endif
2576 if (SDL_Init (flags)) {
2577 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2578 exit(1);
2579 }
2580
2581 if (!display_disable) {
2582 #if HAVE_SDL_VIDEO_SIZE
2583 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2584 fs_screen_width = vi->current_w;
2585 fs_screen_height = vi->current_h;
2586 #endif
2587 }
2588
2589 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2590 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2591 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2592 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2593
2594 av_init_packet(&flush_pkt);
2595 flush_pkt.data= "FLUSH";
2596
2597 cur_stream = stream_open(input_filename, file_iformat);
2598
2599 event_loop();
2600
2601 /* never returns */
2602
2603 return 0;
2604 }