HAVE_3DNOW --> HAVE_AMD3DNOW to sync with latest configure changes.
[libav.git] / ffplay.c
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include <math.h>
23 #include <limits.h>
24 #include "libavutil/avstring.h"
25 #include "libavformat/avformat.h"
26 #include "libavformat/rtsp.h"
27 #include "libavdevice/avdevice.h"
28 #include "libswscale/swscale.h"
29 #include "libavcodec/audioconvert.h"
30 #include "libavcodec/opt.h"
31
32 #include "cmdutils.h"
33
34 #include <SDL.h>
35 #include <SDL_thread.h>
36
37 #ifdef __MINGW32__
38 #undef main /* We don't want SDL to override our main() */
39 #endif
40
41 #undef exit
42
43 const char program_name[] = "FFplay";
44 const int program_birth_year = 2003;
45
46 //#define DEBUG_SYNC
47
48 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
49 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
50 #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
51
52 /* SDL audio buffer size, in samples. Should be small to have precise
53 A/V sync as SDL does not have hardware buffer fullness info. */
54 #define SDL_AUDIO_BUFFER_SIZE 1024
55
56 /* no AV sync correction is done if below the AV sync threshold */
57 #define AV_SYNC_THRESHOLD 0.01
58 /* no AV correction is done if too big error */
59 #define AV_NOSYNC_THRESHOLD 10.0
60
61 /* maximum audio speed change to get correct sync */
62 #define SAMPLE_CORRECTION_PERCENT_MAX 10
63
64 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
65 #define AUDIO_DIFF_AVG_NB 20
66
67 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
68 #define SAMPLE_ARRAY_SIZE (2*65536)
69
70 static int sws_flags = SWS_BICUBIC;
71
72 typedef struct PacketQueue {
73 AVPacketList *first_pkt, *last_pkt;
74 int nb_packets;
75 int size;
76 int abort_request;
77 SDL_mutex *mutex;
78 SDL_cond *cond;
79 } PacketQueue;
80
81 #define VIDEO_PICTURE_QUEUE_SIZE 1
82 #define SUBPICTURE_QUEUE_SIZE 4
83
84 typedef struct VideoPicture {
85 double pts; ///<presentation time stamp for this picture
86 SDL_Overlay *bmp;
87 int width, height; /* source height & width */
88 int allocated;
89 } VideoPicture;
90
91 typedef struct SubPicture {
92 double pts; /* presentation time stamp for this picture */
93 AVSubtitle sub;
94 } SubPicture;
95
96 enum {
97 AV_SYNC_AUDIO_MASTER, /* default choice */
98 AV_SYNC_VIDEO_MASTER,
99 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
100 };
101
102 typedef struct VideoState {
103 SDL_Thread *parse_tid;
104 SDL_Thread *video_tid;
105 AVInputFormat *iformat;
106 int no_background;
107 int abort_request;
108 int paused;
109 int last_paused;
110 int seek_req;
111 int seek_flags;
112 int64_t seek_pos;
113 AVFormatContext *ic;
114 int dtg_active_format;
115
116 int audio_stream;
117
118 int av_sync_type;
119 double external_clock; /* external clock base */
120 int64_t external_clock_time;
121
122 double audio_clock;
123 double audio_diff_cum; /* used for AV difference average computation */
124 double audio_diff_avg_coef;
125 double audio_diff_threshold;
126 int audio_diff_avg_count;
127 AVStream *audio_st;
128 PacketQueue audioq;
129 int audio_hw_buf_size;
130 /* samples output by the codec. we reserve more space for avsync
131 compensation */
132 DECLARE_ALIGNED(16,uint8_t,audio_buf1[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
133 DECLARE_ALIGNED(16,uint8_t,audio_buf2[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
134 uint8_t *audio_buf;
135 unsigned int audio_buf_size; /* in bytes */
136 int audio_buf_index; /* in bytes */
137 AVPacket audio_pkt;
138 uint8_t *audio_pkt_data;
139 int audio_pkt_size;
140 enum SampleFormat audio_src_fmt;
141 AVAudioConvert *reformat_ctx;
142
143 int show_audio; /* if true, display audio samples */
144 int16_t sample_array[SAMPLE_ARRAY_SIZE];
145 int sample_array_index;
146 int last_i_start;
147
148 SDL_Thread *subtitle_tid;
149 int subtitle_stream;
150 int subtitle_stream_changed;
151 AVStream *subtitle_st;
152 PacketQueue subtitleq;
153 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
154 int subpq_size, subpq_rindex, subpq_windex;
155 SDL_mutex *subpq_mutex;
156 SDL_cond *subpq_cond;
157
158 double frame_timer;
159 double frame_last_pts;
160 double frame_last_delay;
161 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
162 int video_stream;
163 AVStream *video_st;
164 PacketQueue videoq;
165 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
166 int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
167 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
168 int pictq_size, pictq_rindex, pictq_windex;
169 SDL_mutex *pictq_mutex;
170 SDL_cond *pictq_cond;
171
172 // QETimer *video_timer;
173 char filename[1024];
174 int width, height, xleft, ytop;
175 } VideoState;
176
177 static void show_help(void);
178 static int audio_write_get_buf_size(VideoState *is);
179
180 /* options specified by the user */
181 static AVInputFormat *file_iformat;
182 static const char *input_filename;
183 static int fs_screen_width;
184 static int fs_screen_height;
185 static int screen_width = 0;
186 static int screen_height = 0;
187 static int frame_width = 0;
188 static int frame_height = 0;
189 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
190 static int audio_disable;
191 static int video_disable;
192 static int wanted_audio_stream= 0;
193 static int wanted_video_stream= 0;
194 static int seek_by_bytes;
195 static int display_disable;
196 static int show_status;
197 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
198 static int64_t start_time = AV_NOPTS_VALUE;
199 static int debug = 0;
200 static int debug_mv = 0;
201 static int step = 0;
202 static int thread_count = 1;
203 static int workaround_bugs = 1;
204 static int fast = 0;
205 static int genpts = 0;
206 static int lowres = 0;
207 static int idct = FF_IDCT_AUTO;
208 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
209 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
210 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
211 static int error_recognition = FF_ER_CAREFUL;
212 static int error_concealment = 3;
213 static int decoder_reorder_pts= 0;
214
215 /* current context */
216 static int is_full_screen;
217 static VideoState *cur_stream;
218 static int64_t audio_callback_time;
219
220 static AVPacket flush_pkt;
221
222 #define FF_ALLOC_EVENT (SDL_USEREVENT)
223 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
224 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
225
226 static SDL_Surface *screen;
227
228 /* packet queue handling */
229 static void packet_queue_init(PacketQueue *q)
230 {
231 memset(q, 0, sizeof(PacketQueue));
232 q->mutex = SDL_CreateMutex();
233 q->cond = SDL_CreateCond();
234 }
235
236 static void packet_queue_flush(PacketQueue *q)
237 {
238 AVPacketList *pkt, *pkt1;
239
240 SDL_LockMutex(q->mutex);
241 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
242 pkt1 = pkt->next;
243 av_free_packet(&pkt->pkt);
244 av_freep(&pkt);
245 }
246 q->last_pkt = NULL;
247 q->first_pkt = NULL;
248 q->nb_packets = 0;
249 q->size = 0;
250 SDL_UnlockMutex(q->mutex);
251 }
252
253 static void packet_queue_end(PacketQueue *q)
254 {
255 packet_queue_flush(q);
256 SDL_DestroyMutex(q->mutex);
257 SDL_DestroyCond(q->cond);
258 }
259
260 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
261 {
262 AVPacketList *pkt1;
263
264 /* duplicate the packet */
265 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
266 return -1;
267
268 pkt1 = av_malloc(sizeof(AVPacketList));
269 if (!pkt1)
270 return -1;
271 pkt1->pkt = *pkt;
272 pkt1->next = NULL;
273
274
275 SDL_LockMutex(q->mutex);
276
277 if (!q->last_pkt)
278
279 q->first_pkt = pkt1;
280 else
281 q->last_pkt->next = pkt1;
282 q->last_pkt = pkt1;
283 q->nb_packets++;
284 q->size += pkt1->pkt.size;
285 /* XXX: should duplicate packet data in DV case */
286 SDL_CondSignal(q->cond);
287
288 SDL_UnlockMutex(q->mutex);
289 return 0;
290 }
291
292 static void packet_queue_abort(PacketQueue *q)
293 {
294 SDL_LockMutex(q->mutex);
295
296 q->abort_request = 1;
297
298 SDL_CondSignal(q->cond);
299
300 SDL_UnlockMutex(q->mutex);
301 }
302
303 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
304 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
305 {
306 AVPacketList *pkt1;
307 int ret;
308
309 SDL_LockMutex(q->mutex);
310
311 for(;;) {
312 if (q->abort_request) {
313 ret = -1;
314 break;
315 }
316
317 pkt1 = q->first_pkt;
318 if (pkt1) {
319 q->first_pkt = pkt1->next;
320 if (!q->first_pkt)
321 q->last_pkt = NULL;
322 q->nb_packets--;
323 q->size -= pkt1->pkt.size;
324 *pkt = pkt1->pkt;
325 av_free(pkt1);
326 ret = 1;
327 break;
328 } else if (!block) {
329 ret = 0;
330 break;
331 } else {
332 SDL_CondWait(q->cond, q->mutex);
333 }
334 }
335 SDL_UnlockMutex(q->mutex);
336 return ret;
337 }
338
339 static inline void fill_rectangle(SDL_Surface *screen,
340 int x, int y, int w, int h, int color)
341 {
342 SDL_Rect rect;
343 rect.x = x;
344 rect.y = y;
345 rect.w = w;
346 rect.h = h;
347 SDL_FillRect(screen, &rect, color);
348 }
349
350 #if 0
351 /* draw only the border of a rectangle */
352 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
353 {
354 int w1, w2, h1, h2;
355
356 /* fill the background */
357 w1 = x;
358 if (w1 < 0)
359 w1 = 0;
360 w2 = s->width - (x + w);
361 if (w2 < 0)
362 w2 = 0;
363 h1 = y;
364 if (h1 < 0)
365 h1 = 0;
366 h2 = s->height - (y + h);
367 if (h2 < 0)
368 h2 = 0;
369 fill_rectangle(screen,
370 s->xleft, s->ytop,
371 w1, s->height,
372 color);
373 fill_rectangle(screen,
374 s->xleft + s->width - w2, s->ytop,
375 w2, s->height,
376 color);
377 fill_rectangle(screen,
378 s->xleft + w1, s->ytop,
379 s->width - w1 - w2, h1,
380 color);
381 fill_rectangle(screen,
382 s->xleft + w1, s->ytop + s->height - h2,
383 s->width - w1 - w2, h2,
384 color);
385 }
386 #endif
387
388
389
390 #define SCALEBITS 10
391 #define ONE_HALF (1 << (SCALEBITS - 1))
392 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
393
394 #define RGB_TO_Y_CCIR(r, g, b) \
395 ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
396 FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
397
398 #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
399 (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
400 FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
401
402 #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
403 (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
404 FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
405
406 #define ALPHA_BLEND(a, oldp, newp, s)\
407 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
408
409 #define RGBA_IN(r, g, b, a, s)\
410 {\
411 unsigned int v = ((const uint32_t *)(s))[0];\
412 a = (v >> 24) & 0xff;\
413 r = (v >> 16) & 0xff;\
414 g = (v >> 8) & 0xff;\
415 b = v & 0xff;\
416 }
417
418 #define YUVA_IN(y, u, v, a, s, pal)\
419 {\
420 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
421 a = (val >> 24) & 0xff;\
422 y = (val >> 16) & 0xff;\
423 u = (val >> 8) & 0xff;\
424 v = val & 0xff;\
425 }
426
427 #define YUVA_OUT(d, y, u, v, a)\
428 {\
429 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
430 }
431
432
433 #define BPP 1
434
435 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
436 {
437 int wrap, wrap3, width2, skip2;
438 int y, u, v, a, u1, v1, a1, w, h;
439 uint8_t *lum, *cb, *cr;
440 const uint8_t *p;
441 const uint32_t *pal;
442 int dstx, dsty, dstw, dsth;
443
444 dstw = av_clip(rect->w, 0, imgw);
445 dsth = av_clip(rect->h, 0, imgh);
446 dstx = av_clip(rect->x, 0, imgw - dstw);
447 dsty = av_clip(rect->y, 0, imgh - dsth);
448 lum = dst->data[0] + dsty * dst->linesize[0];
449 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
450 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
451
452 width2 = (dstw + 1) >> 1;
453 skip2 = dstx >> 1;
454 wrap = dst->linesize[0];
455 wrap3 = rect->pict.linesize[0];
456 p = rect->pict.data[0];
457 pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
458
459 if (dsty & 1) {
460 lum += dstx;
461 cb += skip2;
462 cr += skip2;
463
464 if (dstx & 1) {
465 YUVA_IN(y, u, v, a, p, pal);
466 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
467 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
468 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
469 cb++;
470 cr++;
471 lum++;
472 p += BPP;
473 }
474 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
475 YUVA_IN(y, u, v, a, p, pal);
476 u1 = u;
477 v1 = v;
478 a1 = a;
479 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
480
481 YUVA_IN(y, u, v, a, p + BPP, pal);
482 u1 += u;
483 v1 += v;
484 a1 += a;
485 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
486 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
487 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
488 cb++;
489 cr++;
490 p += 2 * BPP;
491 lum += 2;
492 }
493 if (w) {
494 YUVA_IN(y, u, v, a, p, pal);
495 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
496 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
497 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
498 }
499 p += wrap3 + (wrap3 - dstw * BPP);
500 lum += wrap + (wrap - dstw - dstx);
501 cb += dst->linesize[1] - width2 - skip2;
502 cr += dst->linesize[2] - width2 - skip2;
503 }
504 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
505 lum += dstx;
506 cb += skip2;
507 cr += skip2;
508
509 if (dstx & 1) {
510 YUVA_IN(y, u, v, a, p, pal);
511 u1 = u;
512 v1 = v;
513 a1 = a;
514 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
515 p += wrap3;
516 lum += wrap;
517 YUVA_IN(y, u, v, a, p, pal);
518 u1 += u;
519 v1 += v;
520 a1 += a;
521 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
522 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
523 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
524 cb++;
525 cr++;
526 p += -wrap3 + BPP;
527 lum += -wrap + 1;
528 }
529 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
530 YUVA_IN(y, u, v, a, p, pal);
531 u1 = u;
532 v1 = v;
533 a1 = a;
534 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
535
536 YUVA_IN(y, u, v, a, p, pal);
537 u1 += u;
538 v1 += v;
539 a1 += a;
540 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
541 p += wrap3;
542 lum += wrap;
543
544 YUVA_IN(y, u, v, a, p, pal);
545 u1 += u;
546 v1 += v;
547 a1 += a;
548 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
549
550 YUVA_IN(y, u, v, a, p, pal);
551 u1 += u;
552 v1 += v;
553 a1 += a;
554 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
555
556 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
557 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
558
559 cb++;
560 cr++;
561 p += -wrap3 + 2 * BPP;
562 lum += -wrap + 2;
563 }
564 if (w) {
565 YUVA_IN(y, u, v, a, p, pal);
566 u1 = u;
567 v1 = v;
568 a1 = a;
569 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
570 p += wrap3;
571 lum += wrap;
572 YUVA_IN(y, u, v, a, p, pal);
573 u1 += u;
574 v1 += v;
575 a1 += a;
576 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
577 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
578 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
579 cb++;
580 cr++;
581 p += -wrap3 + BPP;
582 lum += -wrap + 1;
583 }
584 p += wrap3 + (wrap3 - dstw * BPP);
585 lum += wrap + (wrap - dstw - dstx);
586 cb += dst->linesize[1] - width2 - skip2;
587 cr += dst->linesize[2] - width2 - skip2;
588 }
589 /* handle odd height */
590 if (h) {
591 lum += dstx;
592 cb += skip2;
593 cr += skip2;
594
595 if (dstx & 1) {
596 YUVA_IN(y, u, v, a, p, pal);
597 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
598 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
599 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
600 cb++;
601 cr++;
602 lum++;
603 p += BPP;
604 }
605 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
606 YUVA_IN(y, u, v, a, p, pal);
607 u1 = u;
608 v1 = v;
609 a1 = a;
610 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
611
612 YUVA_IN(y, u, v, a, p + BPP, pal);
613 u1 += u;
614 v1 += v;
615 a1 += a;
616 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
617 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
618 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
619 cb++;
620 cr++;
621 p += 2 * BPP;
622 lum += 2;
623 }
624 if (w) {
625 YUVA_IN(y, u, v, a, p, pal);
626 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
627 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
628 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
629 }
630 }
631 }
632
633 static void free_subpicture(SubPicture *sp)
634 {
635 int i;
636
637 for (i = 0; i < sp->sub.num_rects; i++)
638 {
639 av_freep(&sp->sub.rects[i]->pict.data[0]);
640 av_freep(&sp->sub.rects[i]->pict.data[1]);
641 av_freep(&sp->sub.rects[i]);
642 }
643
644 av_free(sp->sub.rects);
645
646 memset(&sp->sub, 0, sizeof(AVSubtitle));
647 }
648
649 static void video_image_display(VideoState *is)
650 {
651 VideoPicture *vp;
652 SubPicture *sp;
653 AVPicture pict;
654 float aspect_ratio;
655 int width, height, x, y;
656 SDL_Rect rect;
657 int i;
658
659 vp = &is->pictq[is->pictq_rindex];
660 if (vp->bmp) {
661 /* XXX: use variable in the frame */
662 if (is->video_st->sample_aspect_ratio.num)
663 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
664 else if (is->video_st->codec->sample_aspect_ratio.num)
665 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
666 else
667 aspect_ratio = 0;
668 if (aspect_ratio <= 0.0)
669 aspect_ratio = 1.0;
670 aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec->height;
671 /* if an active format is indicated, then it overrides the
672 mpeg format */
673 #if 0
674 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
675 is->dtg_active_format = is->video_st->codec->dtg_active_format;
676 printf("dtg_active_format=%d\n", is->dtg_active_format);
677 }
678 #endif
679 #if 0
680 switch(is->video_st->codec->dtg_active_format) {
681 case FF_DTG_AFD_SAME:
682 default:
683 /* nothing to do */
684 break;
685 case FF_DTG_AFD_4_3:
686 aspect_ratio = 4.0 / 3.0;
687 break;
688 case FF_DTG_AFD_16_9:
689 aspect_ratio = 16.0 / 9.0;
690 break;
691 case FF_DTG_AFD_14_9:
692 aspect_ratio = 14.0 / 9.0;
693 break;
694 case FF_DTG_AFD_4_3_SP_14_9:
695 aspect_ratio = 14.0 / 9.0;
696 break;
697 case FF_DTG_AFD_16_9_SP_14_9:
698 aspect_ratio = 14.0 / 9.0;
699 break;
700 case FF_DTG_AFD_SP_4_3:
701 aspect_ratio = 4.0 / 3.0;
702 break;
703 }
704 #endif
705
706 if (is->subtitle_st)
707 {
708 if (is->subpq_size > 0)
709 {
710 sp = &is->subpq[is->subpq_rindex];
711
712 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
713 {
714 SDL_LockYUVOverlay (vp->bmp);
715
716 pict.data[0] = vp->bmp->pixels[0];
717 pict.data[1] = vp->bmp->pixels[2];
718 pict.data[2] = vp->bmp->pixels[1];
719
720 pict.linesize[0] = vp->bmp->pitches[0];
721 pict.linesize[1] = vp->bmp->pitches[2];
722 pict.linesize[2] = vp->bmp->pitches[1];
723
724 for (i = 0; i < sp->sub.num_rects; i++)
725 blend_subrect(&pict, sp->sub.rects[i],
726 vp->bmp->w, vp->bmp->h);
727
728 SDL_UnlockYUVOverlay (vp->bmp);
729 }
730 }
731 }
732
733
734 /* XXX: we suppose the screen has a 1.0 pixel ratio */
735 height = is->height;
736 width = ((int)rint(height * aspect_ratio)) & ~1;
737 if (width > is->width) {
738 width = is->width;
739 height = ((int)rint(width / aspect_ratio)) & ~1;
740 }
741 x = (is->width - width) / 2;
742 y = (is->height - height) / 2;
743 if (!is->no_background) {
744 /* fill the background */
745 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
746 } else {
747 is->no_background = 0;
748 }
749 rect.x = is->xleft + x;
750 rect.y = is->ytop + y;
751 rect.w = width;
752 rect.h = height;
753 SDL_DisplayYUVOverlay(vp->bmp, &rect);
754 } else {
755 #if 0
756 fill_rectangle(screen,
757 is->xleft, is->ytop, is->width, is->height,
758 QERGB(0x00, 0x00, 0x00));
759 #endif
760 }
761 }
762
763 static inline int compute_mod(int a, int b)
764 {
765 a = a % b;
766 if (a >= 0)
767 return a;
768 else
769 return a + b;
770 }
771
772 static void video_audio_display(VideoState *s)
773 {
774 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
775 int ch, channels, h, h2, bgcolor, fgcolor;
776 int16_t time_diff;
777
778 /* compute display index : center on currently output samples */
779 channels = s->audio_st->codec->channels;
780 nb_display_channels = channels;
781 if (!s->paused) {
782 n = 2 * channels;
783 delay = audio_write_get_buf_size(s);
784 delay /= n;
785
786 /* to be more precise, we take into account the time spent since
787 the last buffer computation */
788 if (audio_callback_time) {
789 time_diff = av_gettime() - audio_callback_time;
790 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
791 }
792
793 delay -= s->width / 2;
794 if (delay < s->width)
795 delay = s->width;
796
797 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
798
799 h= INT_MIN;
800 for(i=0; i<1000; i+=channels){
801 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
802 int a= s->sample_array[idx];
803 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
804 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
805 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
806 int score= a-d;
807 if(h<score && (b^c)<0){
808 h= score;
809 i_start= idx;
810 }
811 }
812
813 s->last_i_start = i_start;
814 } else {
815 i_start = s->last_i_start;
816 }
817
818 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
819 fill_rectangle(screen,
820 s->xleft, s->ytop, s->width, s->height,
821 bgcolor);
822
823 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
824
825 /* total height for one channel */
826 h = s->height / nb_display_channels;
827 /* graph height / 2 */
828 h2 = (h * 9) / 20;
829 for(ch = 0;ch < nb_display_channels; ch++) {
830 i = i_start + ch;
831 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
832 for(x = 0; x < s->width; x++) {
833 y = (s->sample_array[i] * h2) >> 15;
834 if (y < 0) {
835 y = -y;
836 ys = y1 - y;
837 } else {
838 ys = y1;
839 }
840 fill_rectangle(screen,
841 s->xleft + x, ys, 1, y,
842 fgcolor);
843 i += channels;
844 if (i >= SAMPLE_ARRAY_SIZE)
845 i -= SAMPLE_ARRAY_SIZE;
846 }
847 }
848
849 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
850
851 for(ch = 1;ch < nb_display_channels; ch++) {
852 y = s->ytop + ch * h;
853 fill_rectangle(screen,
854 s->xleft, y, s->width, 1,
855 fgcolor);
856 }
857 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
858 }
859
860 static int video_open(VideoState *is){
861 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
862 int w,h;
863
864 if(is_full_screen) flags |= SDL_FULLSCREEN;
865 else flags |= SDL_RESIZABLE;
866
867 if (is_full_screen && fs_screen_width) {
868 w = fs_screen_width;
869 h = fs_screen_height;
870 } else if(!is_full_screen && screen_width){
871 w = screen_width;
872 h = screen_height;
873 }else if (is->video_st && is->video_st->codec->width){
874 w = is->video_st->codec->width;
875 h = is->video_st->codec->height;
876 } else {
877 w = 640;
878 h = 480;
879 }
880 #ifndef __APPLE__
881 screen = SDL_SetVideoMode(w, h, 0, flags);
882 #else
883 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
884 screen = SDL_SetVideoMode(w, h, 24, flags);
885 #endif
886 if (!screen) {
887 fprintf(stderr, "SDL: could not set video mode - exiting\n");
888 return -1;
889 }
890 SDL_WM_SetCaption("FFplay", "FFplay");
891
892 is->width = screen->w;
893 is->height = screen->h;
894
895 return 0;
896 }
897
898 /* display the current picture, if any */
899 static void video_display(VideoState *is)
900 {
901 if(!screen)
902 video_open(cur_stream);
903 if (is->audio_st && is->show_audio)
904 video_audio_display(is);
905 else if (is->video_st)
906 video_image_display(is);
907 }
908
909 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
910 {
911 SDL_Event event;
912 event.type = FF_REFRESH_EVENT;
913 event.user.data1 = opaque;
914 SDL_PushEvent(&event);
915 return 0; /* 0 means stop timer */
916 }
917
918 /* schedule a video refresh in 'delay' ms */
919 static void schedule_refresh(VideoState *is, int delay)
920 {
921 if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
922 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
923 }
924
925 /* get the current audio clock value */
926 static double get_audio_clock(VideoState *is)
927 {
928 double pts;
929 int hw_buf_size, bytes_per_sec;
930 pts = is->audio_clock;
931 hw_buf_size = audio_write_get_buf_size(is);
932 bytes_per_sec = 0;
933 if (is->audio_st) {
934 bytes_per_sec = is->audio_st->codec->sample_rate *
935 2 * is->audio_st->codec->channels;
936 }
937 if (bytes_per_sec)
938 pts -= (double)hw_buf_size / bytes_per_sec;
939 return pts;
940 }
941
942 /* get the current video clock value */
943 static double get_video_clock(VideoState *is)
944 {
945 double delta;
946 if (is->paused) {
947 delta = 0;
948 } else {
949 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
950 }
951 return is->video_current_pts + delta;
952 }
953
954 /* get the current external clock value */
955 static double get_external_clock(VideoState *is)
956 {
957 int64_t ti;
958 ti = av_gettime();
959 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
960 }
961
962 /* get the current master clock value */
963 static double get_master_clock(VideoState *is)
964 {
965 double val;
966
967 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
968 if (is->video_st)
969 val = get_video_clock(is);
970 else
971 val = get_audio_clock(is);
972 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
973 if (is->audio_st)
974 val = get_audio_clock(is);
975 else
976 val = get_video_clock(is);
977 } else {
978 val = get_external_clock(is);
979 }
980 return val;
981 }
982
983 /* seek in the stream */
984 static void stream_seek(VideoState *is, int64_t pos, int rel)
985 {
986 if (!is->seek_req) {
987 is->seek_pos = pos;
988 is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
989 if (seek_by_bytes)
990 is->seek_flags |= AVSEEK_FLAG_BYTE;
991 is->seek_req = 1;
992 }
993 }
994
995 /* pause or resume the video */
996 static void stream_pause(VideoState *is)
997 {
998 is->paused = !is->paused;
999 if (!is->paused) {
1000 is->video_current_pts = get_video_clock(is);
1001 is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
1002 }
1003 }
1004
1005 /* called to display each frame */
1006 static void video_refresh_timer(void *opaque)
1007 {
1008 VideoState *is = opaque;
1009 VideoPicture *vp;
1010 double actual_delay, delay, sync_threshold, ref_clock, diff;
1011
1012 SubPicture *sp, *sp2;
1013
1014 if (is->video_st) {
1015 if (is->pictq_size == 0) {
1016 /* if no picture, need to wait */
1017 schedule_refresh(is, 1);
1018 } else {
1019 /* dequeue the picture */
1020 vp = &is->pictq[is->pictq_rindex];
1021
1022 /* update current video pts */
1023 is->video_current_pts = vp->pts;
1024 is->video_current_pts_time = av_gettime();
1025
1026 /* compute nominal delay */
1027 delay = vp->pts - is->frame_last_pts;
1028 if (delay <= 0 || delay >= 10.0) {
1029 /* if incorrect delay, use previous one */
1030 delay = is->frame_last_delay;
1031 }
1032 is->frame_last_delay = delay;
1033 is->frame_last_pts = vp->pts;
1034
1035 /* update delay to follow master synchronisation source */
1036 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1037 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1038 /* if video is slave, we try to correct big delays by
1039 duplicating or deleting a frame */
1040 ref_clock = get_master_clock(is);
1041 diff = vp->pts - ref_clock;
1042
1043 /* skip or repeat frame. We take into account the
1044 delay to compute the threshold. I still don't know
1045 if it is the best guess */
1046 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1047 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1048 if (diff <= -sync_threshold)
1049 delay = 0;
1050 else if (diff >= sync_threshold)
1051 delay = 2 * delay;
1052 }
1053 }
1054
1055 is->frame_timer += delay;
1056 /* compute the REAL delay (we need to do that to avoid
1057 long term errors */
1058 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1059 if (actual_delay < 0.010) {
1060 /* XXX: should skip picture */
1061 actual_delay = 0.010;
1062 }
1063 /* launch timer for next picture */
1064 schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
1065
1066 #if defined(DEBUG_SYNC)
1067 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1068 delay, actual_delay, vp->pts, -diff);
1069 #endif
1070
1071 if(is->subtitle_st) {
1072 if (is->subtitle_stream_changed) {
1073 SDL_LockMutex(is->subpq_mutex);
1074
1075 while (is->subpq_size) {
1076 free_subpicture(&is->subpq[is->subpq_rindex]);
1077
1078 /* update queue size and signal for next picture */
1079 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1080 is->subpq_rindex = 0;
1081
1082 is->subpq_size--;
1083 }
1084 is->subtitle_stream_changed = 0;
1085
1086 SDL_CondSignal(is->subpq_cond);
1087 SDL_UnlockMutex(is->subpq_mutex);
1088 } else {
1089 if (is->subpq_size > 0) {
1090 sp = &is->subpq[is->subpq_rindex];
1091
1092 if (is->subpq_size > 1)
1093 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1094 else
1095 sp2 = NULL;
1096
1097 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1098 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1099 {
1100 free_subpicture(sp);
1101
1102 /* update queue size and signal for next picture */
1103 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1104 is->subpq_rindex = 0;
1105
1106 SDL_LockMutex(is->subpq_mutex);
1107 is->subpq_size--;
1108 SDL_CondSignal(is->subpq_cond);
1109 SDL_UnlockMutex(is->subpq_mutex);
1110 }
1111 }
1112 }
1113 }
1114
1115 /* display picture */
1116 video_display(is);
1117
1118 /* update queue size and signal for next picture */
1119 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1120 is->pictq_rindex = 0;
1121
1122 SDL_LockMutex(is->pictq_mutex);
1123 is->pictq_size--;
1124 SDL_CondSignal(is->pictq_cond);
1125 SDL_UnlockMutex(is->pictq_mutex);
1126 }
1127 } else if (is->audio_st) {
1128 /* draw the next audio frame */
1129
1130 schedule_refresh(is, 40);
1131
1132 /* if only audio stream, then display the audio bars (better
1133 than nothing, just to test the implementation */
1134
1135 /* display picture */
1136 video_display(is);
1137 } else {
1138 schedule_refresh(is, 100);
1139 }
1140 if (show_status) {
1141 static int64_t last_time;
1142 int64_t cur_time;
1143 int aqsize, vqsize, sqsize;
1144 double av_diff;
1145
1146 cur_time = av_gettime();
1147 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1148 aqsize = 0;
1149 vqsize = 0;
1150 sqsize = 0;
1151 if (is->audio_st)
1152 aqsize = is->audioq.size;
1153 if (is->video_st)
1154 vqsize = is->videoq.size;
1155 if (is->subtitle_st)
1156 sqsize = is->subtitleq.size;
1157 av_diff = 0;
1158 if (is->audio_st && is->video_st)
1159 av_diff = get_audio_clock(is) - get_video_clock(is);
1160 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
1161 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1162 fflush(stdout);
1163 last_time = cur_time;
1164 }
1165 }
1166 }
1167
1168 /* allocate a picture (needs to do that in main thread to avoid
1169 potential locking problems */
1170 static void alloc_picture(void *opaque)
1171 {
1172 VideoState *is = opaque;
1173 VideoPicture *vp;
1174
1175 vp = &is->pictq[is->pictq_windex];
1176
1177 if (vp->bmp)
1178 SDL_FreeYUVOverlay(vp->bmp);
1179
1180 #if 0
1181 /* XXX: use generic function */
1182 /* XXX: disable overlay if no hardware acceleration or if RGB format */
1183 switch(is->video_st->codec->pix_fmt) {
1184 case PIX_FMT_YUV420P:
1185 case PIX_FMT_YUV422P:
1186 case PIX_FMT_YUV444P:
1187 case PIX_FMT_YUYV422:
1188 case PIX_FMT_YUV410P:
1189 case PIX_FMT_YUV411P:
1190 is_yuv = 1;
1191 break;
1192 default:
1193 is_yuv = 0;
1194 break;
1195 }
1196 #endif
1197 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1198 is->video_st->codec->height,
1199 SDL_YV12_OVERLAY,
1200 screen);
1201 vp->width = is->video_st->codec->width;
1202 vp->height = is->video_st->codec->height;
1203
1204 SDL_LockMutex(is->pictq_mutex);
1205 vp->allocated = 1;
1206 SDL_CondSignal(is->pictq_cond);
1207 SDL_UnlockMutex(is->pictq_mutex);
1208 }
1209
1210 /**
1211 *
1212 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1213 */
1214 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1215 {
1216 VideoPicture *vp;
1217 int dst_pix_fmt;
1218 AVPicture pict;
1219 static struct SwsContext *img_convert_ctx;
1220
1221 /* wait until we have space to put a new picture */
1222 SDL_LockMutex(is->pictq_mutex);
1223 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1224 !is->videoq.abort_request) {
1225 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1226 }
1227 SDL_UnlockMutex(is->pictq_mutex);
1228
1229 if (is->videoq.abort_request)
1230 return -1;
1231
1232 vp = &is->pictq[is->pictq_windex];
1233
1234 /* alloc or resize hardware picture buffer */
1235 if (!vp->bmp ||
1236 vp->width != is->video_st->codec->width ||
1237 vp->height != is->video_st->codec->height) {
1238 SDL_Event event;
1239
1240 vp->allocated = 0;
1241
1242 /* the allocation must be done in the main thread to avoid
1243 locking problems */
1244 event.type = FF_ALLOC_EVENT;
1245 event.user.data1 = is;
1246 SDL_PushEvent(&event);
1247
1248 /* wait until the picture is allocated */
1249 SDL_LockMutex(is->pictq_mutex);
1250 while (!vp->allocated && !is->videoq.abort_request) {
1251 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1252 }
1253 SDL_UnlockMutex(is->pictq_mutex);
1254
1255 if (is->videoq.abort_request)
1256 return -1;
1257 }
1258
1259 /* if the frame is not skipped, then display it */
1260 if (vp->bmp) {
1261 /* get a pointer on the bitmap */
1262 SDL_LockYUVOverlay (vp->bmp);
1263
1264 dst_pix_fmt = PIX_FMT_YUV420P;
1265 pict.data[0] = vp->bmp->pixels[0];
1266 pict.data[1] = vp->bmp->pixels[2];
1267 pict.data[2] = vp->bmp->pixels[1];
1268
1269 pict.linesize[0] = vp->bmp->pitches[0];
1270 pict.linesize[1] = vp->bmp->pitches[2];
1271 pict.linesize[2] = vp->bmp->pitches[1];
1272 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1273 img_convert_ctx = sws_getCachedContext(img_convert_ctx,
1274 is->video_st->codec->width, is->video_st->codec->height,
1275 is->video_st->codec->pix_fmt,
1276 is->video_st->codec->width, is->video_st->codec->height,
1277 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1278 if (img_convert_ctx == NULL) {
1279 fprintf(stderr, "Cannot initialize the conversion context\n");
1280 exit(1);
1281 }
1282 sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1283 0, is->video_st->codec->height, pict.data, pict.linesize);
1284 /* update the bitmap content */
1285 SDL_UnlockYUVOverlay(vp->bmp);
1286
1287 vp->pts = pts;
1288
1289 /* now we can update the picture count */
1290 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1291 is->pictq_windex = 0;
1292 SDL_LockMutex(is->pictq_mutex);
1293 is->pictq_size++;
1294 SDL_UnlockMutex(is->pictq_mutex);
1295 }
1296 return 0;
1297 }
1298
1299 /**
1300 * compute the exact PTS for the picture if it is omitted in the stream
1301 * @param pts1 the dts of the pkt / pts of the frame
1302 */
1303 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1304 {
1305 double frame_delay, pts;
1306
1307 pts = pts1;
1308
1309 if (pts != 0) {
1310 /* update video clock with pts, if present */
1311 is->video_clock = pts;
1312 } else {
1313 pts = is->video_clock;
1314 }
1315 /* update video clock for next frame */
1316 frame_delay = av_q2d(is->video_st->codec->time_base);
1317 /* for MPEG2, the frame can be repeated, so we update the
1318 clock accordingly */
1319 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1320 is->video_clock += frame_delay;
1321
1322 #if defined(DEBUG_SYNC) && 0
1323 {
1324 int ftype;
1325 if (src_frame->pict_type == FF_B_TYPE)
1326 ftype = 'B';
1327 else if (src_frame->pict_type == FF_I_TYPE)
1328 ftype = 'I';
1329 else
1330 ftype = 'P';
1331 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1332 ftype, pts, pts1);
1333 }
1334 #endif
1335 return queue_picture(is, src_frame, pts);
1336 }
1337
1338 static int video_thread(void *arg)
1339 {
1340 VideoState *is = arg;
1341 AVPacket pkt1, *pkt = &pkt1;
1342 int len1, got_picture;
1343 AVFrame *frame= avcodec_alloc_frame();
1344 double pts;
1345
1346 for(;;) {
1347 while (is->paused && !is->videoq.abort_request) {
1348 SDL_Delay(10);
1349 }
1350 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1351 break;
1352
1353 if(pkt->data == flush_pkt.data){
1354 avcodec_flush_buffers(is->video_st->codec);
1355 continue;
1356 }
1357
1358 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1359 this packet, if any */
1360 is->video_st->codec->reordered_opaque= pkt->pts;
1361 len1 = avcodec_decode_video(is->video_st->codec,
1362 frame, &got_picture,
1363 pkt->data, pkt->size);
1364
1365 if( (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1366 && frame->reordered_opaque != AV_NOPTS_VALUE)
1367 pts= frame->reordered_opaque;
1368 else if(pkt->dts != AV_NOPTS_VALUE)
1369 pts= pkt->dts;
1370 else
1371 pts= 0;
1372 pts *= av_q2d(is->video_st->time_base);
1373
1374 // if (len1 < 0)
1375 // break;
1376 if (got_picture) {
1377 if (output_picture2(is, frame, pts) < 0)
1378 goto the_end;
1379 }
1380 av_free_packet(pkt);
1381 if (step)
1382 if (cur_stream)
1383 stream_pause(cur_stream);
1384 }
1385 the_end:
1386 av_free(frame);
1387 return 0;
1388 }
1389
1390 static int subtitle_thread(void *arg)
1391 {
1392 VideoState *is = arg;
1393 SubPicture *sp;
1394 AVPacket pkt1, *pkt = &pkt1;
1395 int len1, got_subtitle;
1396 double pts;
1397 int i, j;
1398 int r, g, b, y, u, v, a;
1399
1400 for(;;) {
1401 while (is->paused && !is->subtitleq.abort_request) {
1402 SDL_Delay(10);
1403 }
1404 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1405 break;
1406
1407 if(pkt->data == flush_pkt.data){
1408 avcodec_flush_buffers(is->subtitle_st->codec);
1409 continue;
1410 }
1411 SDL_LockMutex(is->subpq_mutex);
1412 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1413 !is->subtitleq.abort_request) {
1414 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1415 }
1416 SDL_UnlockMutex(is->subpq_mutex);
1417
1418 if (is->subtitleq.abort_request)
1419 goto the_end;
1420
1421 sp = &is->subpq[is->subpq_windex];
1422
1423 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1424 this packet, if any */
1425 pts = 0;
1426 if (pkt->pts != AV_NOPTS_VALUE)
1427 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1428
1429 len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1430 &sp->sub, &got_subtitle,
1431 pkt->data, pkt->size);
1432 // if (len1 < 0)
1433 // break;
1434 if (got_subtitle && sp->sub.format == 0) {
1435 sp->pts = pts;
1436
1437 for (i = 0; i < sp->sub.num_rects; i++)
1438 {
1439 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1440 {
1441 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1442 y = RGB_TO_Y_CCIR(r, g, b);
1443 u = RGB_TO_U_CCIR(r, g, b, 0);
1444 v = RGB_TO_V_CCIR(r, g, b, 0);
1445 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1446 }
1447 }
1448
1449 /* now we can update the picture count */
1450 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1451 is->subpq_windex = 0;
1452 SDL_LockMutex(is->subpq_mutex);
1453 is->subpq_size++;
1454 SDL_UnlockMutex(is->subpq_mutex);
1455 }
1456 av_free_packet(pkt);
1457 // if (step)
1458 // if (cur_stream)
1459 // stream_pause(cur_stream);
1460 }
1461 the_end:
1462 return 0;
1463 }
1464
1465 /* copy samples for viewing in editor window */
1466 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1467 {
1468 int size, len, channels;
1469
1470 channels = is->audio_st->codec->channels;
1471
1472 size = samples_size / sizeof(short);
1473 while (size > 0) {
1474 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1475 if (len > size)
1476 len = size;
1477 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1478 samples += len;
1479 is->sample_array_index += len;
1480 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1481 is->sample_array_index = 0;
1482 size -= len;
1483 }
1484 }
1485
1486 /* return the new audio buffer size (samples can be added or deleted
1487 to get better sync if video or external master clock) */
1488 static int synchronize_audio(VideoState *is, short *samples,
1489 int samples_size1, double pts)
1490 {
1491 int n, samples_size;
1492 double ref_clock;
1493
1494 n = 2 * is->audio_st->codec->channels;
1495 samples_size = samples_size1;
1496
1497 /* if not master, then we try to remove or add samples to correct the clock */
1498 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1499 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1500 double diff, avg_diff;
1501 int wanted_size, min_size, max_size, nb_samples;
1502
1503 ref_clock = get_master_clock(is);
1504 diff = get_audio_clock(is) - ref_clock;
1505
1506 if (diff < AV_NOSYNC_THRESHOLD) {
1507 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1508 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1509 /* not enough measures to have a correct estimate */
1510 is->audio_diff_avg_count++;
1511 } else {
1512 /* estimate the A-V difference */
1513 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1514
1515 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1516 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1517 nb_samples = samples_size / n;
1518
1519 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1520 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1521 if (wanted_size < min_size)
1522 wanted_size = min_size;
1523 else if (wanted_size > max_size)
1524 wanted_size = max_size;
1525
1526 /* add or remove samples to correction the synchro */
1527 if (wanted_size < samples_size) {
1528 /* remove samples */
1529 samples_size = wanted_size;
1530 } else if (wanted_size > samples_size) {
1531 uint8_t *samples_end, *q;
1532 int nb;
1533
1534 /* add samples */
1535 nb = (samples_size - wanted_size);
1536 samples_end = (uint8_t *)samples + samples_size - n;
1537 q = samples_end + n;
1538 while (nb > 0) {
1539 memcpy(q, samples_end, n);
1540 q += n;
1541 nb -= n;
1542 }
1543 samples_size = wanted_size;
1544 }
1545 }
1546 #if 0
1547 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1548 diff, avg_diff, samples_size - samples_size1,
1549 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1550 #endif
1551 }
1552 } else {
1553 /* too big difference : may be initial PTS errors, so
1554 reset A-V filter */
1555 is->audio_diff_avg_count = 0;
1556 is->audio_diff_cum = 0;
1557 }
1558 }
1559
1560 return samples_size;
1561 }
1562
1563 /* decode one audio frame and returns its uncompressed size */
1564 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1565 {
1566 AVPacket *pkt = &is->audio_pkt;
1567 AVCodecContext *dec= is->audio_st->codec;
1568 int n, len1, data_size;
1569 double pts;
1570
1571 for(;;) {
1572 /* NOTE: the audio packet can contain several frames */
1573 while (is->audio_pkt_size > 0) {
1574 data_size = sizeof(is->audio_buf1);
1575 len1 = avcodec_decode_audio2(dec,
1576 (int16_t *)is->audio_buf1, &data_size,
1577 is->audio_pkt_data, is->audio_pkt_size);
1578 if (len1 < 0) {
1579 /* if error, we skip the frame */
1580 is->audio_pkt_size = 0;
1581 break;
1582 }
1583
1584 is->audio_pkt_data += len1;
1585 is->audio_pkt_size -= len1;
1586 if (data_size <= 0)
1587 continue;
1588
1589 if (dec->sample_fmt != is->audio_src_fmt) {
1590 if (is->reformat_ctx)
1591 av_audio_convert_free(is->reformat_ctx);
1592 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1593 dec->sample_fmt, 1, NULL, 0);
1594 if (!is->reformat_ctx) {
1595 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1596 avcodec_get_sample_fmt_name(dec->sample_fmt),
1597 avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1598 break;
1599 }
1600 is->audio_src_fmt= dec->sample_fmt;
1601 }
1602
1603 if (is->reformat_ctx) {
1604 const void *ibuf[6]= {is->audio_buf1};
1605 void *obuf[6]= {is->audio_buf2};
1606 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1607 int ostride[6]= {2};
1608 int len= data_size/istride[0];
1609 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1610 printf("av_audio_convert() failed\n");
1611 break;
1612 }
1613 is->audio_buf= is->audio_buf2;
1614 /* FIXME: existing code assume that data_size equals framesize*channels*2
1615 remove this legacy cruft */
1616 data_size= len*2;
1617 }else{
1618 is->audio_buf= is->audio_buf1;
1619 }
1620
1621 /* if no pts, then compute it */
1622 pts = is->audio_clock;
1623 *pts_ptr = pts;
1624 n = 2 * dec->channels;
1625 is->audio_clock += (double)data_size /
1626 (double)(n * dec->sample_rate);
1627 #if defined(DEBUG_SYNC)
1628 {
1629 static double last_clock;
1630 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1631 is->audio_clock - last_clock,
1632 is->audio_clock, pts);
1633 last_clock = is->audio_clock;
1634 }
1635 #endif
1636 return data_size;
1637 }
1638
1639 /* free the current packet */
1640 if (pkt->data)
1641 av_free_packet(pkt);
1642
1643 if (is->paused || is->audioq.abort_request) {
1644 return -1;
1645 }
1646
1647 /* read next packet */
1648 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1649 return -1;
1650 if(pkt->data == flush_pkt.data){
1651 avcodec_flush_buffers(dec);
1652 continue;
1653 }
1654
1655 is->audio_pkt_data = pkt->data;
1656 is->audio_pkt_size = pkt->size;
1657
1658 /* if update the audio clock with the pts */
1659 if (pkt->pts != AV_NOPTS_VALUE) {
1660 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1661 }
1662 }
1663 }
1664
1665 /* get the current audio output buffer size, in samples. With SDL, we
1666 cannot have a precise information */
1667 static int audio_write_get_buf_size(VideoState *is)
1668 {
1669 return is->audio_buf_size - is->audio_buf_index;
1670 }
1671
1672
1673 /* prepare a new audio buffer */
1674 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1675 {
1676 VideoState *is = opaque;
1677 int audio_size, len1;
1678 double pts;
1679
1680 audio_callback_time = av_gettime();
1681
1682 while (len > 0) {
1683 if (is->audio_buf_index >= is->audio_buf_size) {
1684 audio_size = audio_decode_frame(is, &pts);
1685 if (audio_size < 0) {
1686 /* if error, just output silence */
1687 is->audio_buf = is->audio_buf1;
1688 is->audio_buf_size = 1024;
1689 memset(is->audio_buf, 0, is->audio_buf_size);
1690 } else {
1691 if (is->show_audio)
1692 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1693 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1694 pts);
1695 is->audio_buf_size = audio_size;
1696 }
1697 is->audio_buf_index = 0;
1698 }
1699 len1 = is->audio_buf_size - is->audio_buf_index;
1700 if (len1 > len)
1701 len1 = len;
1702 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1703 len -= len1;
1704 stream += len1;
1705 is->audio_buf_index += len1;
1706 }
1707 }
1708
1709 /* open a given stream. Return 0 if OK */
1710 static int stream_component_open(VideoState *is, int stream_index)
1711 {
1712 AVFormatContext *ic = is->ic;
1713 AVCodecContext *enc;
1714 AVCodec *codec;
1715 SDL_AudioSpec wanted_spec, spec;
1716
1717 if (stream_index < 0 || stream_index >= ic->nb_streams)
1718 return -1;
1719 enc = ic->streams[stream_index]->codec;
1720
1721 /* prepare audio output */
1722 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1723 if (enc->channels > 0) {
1724 enc->request_channels = FFMIN(2, enc->channels);
1725 } else {
1726 enc->request_channels = 2;
1727 }
1728 }
1729
1730 codec = avcodec_find_decoder(enc->codec_id);
1731 enc->debug_mv = debug_mv;
1732 enc->debug = debug;
1733 enc->workaround_bugs = workaround_bugs;
1734 enc->lowres = lowres;
1735 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1736 enc->idct_algo= idct;
1737 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1738 enc->skip_frame= skip_frame;
1739 enc->skip_idct= skip_idct;
1740 enc->skip_loop_filter= skip_loop_filter;
1741 enc->error_recognition= error_recognition;
1742 enc->error_concealment= error_concealment;
1743
1744 set_context_opts(enc, avctx_opts[enc->codec_type], 0);
1745
1746 if (!codec ||
1747 avcodec_open(enc, codec) < 0)
1748 return -1;
1749
1750 /* prepare audio output */
1751 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1752 wanted_spec.freq = enc->sample_rate;
1753 wanted_spec.format = AUDIO_S16SYS;
1754 wanted_spec.channels = enc->channels;
1755 wanted_spec.silence = 0;
1756 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1757 wanted_spec.callback = sdl_audio_callback;
1758 wanted_spec.userdata = is;
1759 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1760 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1761 return -1;
1762 }
1763 is->audio_hw_buf_size = spec.size;
1764 is->audio_src_fmt= SAMPLE_FMT_S16;
1765 }
1766
1767 if(thread_count>1)
1768 avcodec_thread_init(enc, thread_count);
1769 enc->thread_count= thread_count;
1770 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
1771 switch(enc->codec_type) {
1772 case CODEC_TYPE_AUDIO:
1773 is->audio_stream = stream_index;
1774 is->audio_st = ic->streams[stream_index];
1775 is->audio_buf_size = 0;
1776 is->audio_buf_index = 0;
1777
1778 /* init averaging filter */
1779 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1780 is->audio_diff_avg_count = 0;
1781 /* since we do not have a precise anough audio fifo fullness,
1782 we correct audio sync only if larger than this threshold */
1783 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1784
1785 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1786 packet_queue_init(&is->audioq);
1787 SDL_PauseAudio(0);
1788 break;
1789 case CODEC_TYPE_VIDEO:
1790 is->video_stream = stream_index;
1791 is->video_st = ic->streams[stream_index];
1792
1793 is->frame_last_delay = 40e-3;
1794 is->frame_timer = (double)av_gettime() / 1000000.0;
1795 is->video_current_pts_time = av_gettime();
1796
1797 packet_queue_init(&is->videoq);
1798 is->video_tid = SDL_CreateThread(video_thread, is);
1799 break;
1800 case CODEC_TYPE_SUBTITLE:
1801 is->subtitle_stream = stream_index;
1802 is->subtitle_st = ic->streams[stream_index];
1803 packet_queue_init(&is->subtitleq);
1804
1805 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1806 break;
1807 default:
1808 break;
1809 }
1810 return 0;
1811 }
1812
1813 static void stream_component_close(VideoState *is, int stream_index)
1814 {
1815 AVFormatContext *ic = is->ic;
1816 AVCodecContext *enc;
1817
1818 if (stream_index < 0 || stream_index >= ic->nb_streams)
1819 return;
1820 enc = ic->streams[stream_index]->codec;
1821
1822 switch(enc->codec_type) {
1823 case CODEC_TYPE_AUDIO:
1824 packet_queue_abort(&is->audioq);
1825
1826 SDL_CloseAudio();
1827
1828 packet_queue_end(&is->audioq);
1829 if (is->reformat_ctx)
1830 av_audio_convert_free(is->reformat_ctx);
1831 break;
1832 case CODEC_TYPE_VIDEO:
1833 packet_queue_abort(&is->videoq);
1834
1835 /* note: we also signal this mutex to make sure we deblock the
1836 video thread in all cases */
1837 SDL_LockMutex(is->pictq_mutex);
1838 SDL_CondSignal(is->pictq_cond);
1839 SDL_UnlockMutex(is->pictq_mutex);
1840
1841 SDL_WaitThread(is->video_tid, NULL);
1842
1843 packet_queue_end(&is->videoq);
1844 break;
1845 case CODEC_TYPE_SUBTITLE:
1846 packet_queue_abort(&is->subtitleq);
1847
1848 /* note: we also signal this mutex to make sure we deblock the
1849 video thread in all cases */
1850 SDL_LockMutex(is->subpq_mutex);
1851 is->subtitle_stream_changed = 1;
1852
1853 SDL_CondSignal(is->subpq_cond);
1854 SDL_UnlockMutex(is->subpq_mutex);
1855
1856 SDL_WaitThread(is->subtitle_tid, NULL);
1857
1858 packet_queue_end(&is->subtitleq);
1859 break;
1860 default:
1861 break;
1862 }
1863
1864 ic->streams[stream_index]->discard = AVDISCARD_ALL;
1865 avcodec_close(enc);
1866 switch(enc->codec_type) {
1867 case CODEC_TYPE_AUDIO:
1868 is->audio_st = NULL;
1869 is->audio_stream = -1;
1870 break;
1871 case CODEC_TYPE_VIDEO:
1872 is->video_st = NULL;
1873 is->video_stream = -1;
1874 break;
1875 case CODEC_TYPE_SUBTITLE:
1876 is->subtitle_st = NULL;
1877 is->subtitle_stream = -1;
1878 break;
1879 default:
1880 break;
1881 }
1882 }
1883
1884 static void dump_stream_info(const AVFormatContext *s)
1885 {
1886 if (s->track != 0)
1887 fprintf(stderr, "Track: %d\n", s->track);
1888 if (s->title[0] != '\0')
1889 fprintf(stderr, "Title: %s\n", s->title);
1890 if (s->author[0] != '\0')
1891 fprintf(stderr, "Author: %s\n", s->author);
1892 if (s->copyright[0] != '\0')
1893 fprintf(stderr, "Copyright: %s\n", s->copyright);
1894 if (s->comment[0] != '\0')
1895 fprintf(stderr, "Comment: %s\n", s->comment);
1896 if (s->album[0] != '\0')
1897 fprintf(stderr, "Album: %s\n", s->album);
1898 if (s->year != 0)
1899 fprintf(stderr, "Year: %d\n", s->year);
1900 if (s->genre[0] != '\0')
1901 fprintf(stderr, "Genre: %s\n", s->genre);
1902 }
1903
1904 /* since we have only one decoding thread, we can use a global
1905 variable instead of a thread local variable */
1906 static VideoState *global_video_state;
1907
1908 static int decode_interrupt_cb(void)
1909 {
1910 return (global_video_state && global_video_state->abort_request);
1911 }
1912
1913 /* this thread gets the stream from the disk or the network */
1914 static int decode_thread(void *arg)
1915 {
1916 VideoState *is = arg;
1917 AVFormatContext *ic;
1918 int err, i, ret, video_index, audio_index;
1919 AVPacket pkt1, *pkt = &pkt1;
1920 AVFormatParameters params, *ap = &params;
1921
1922 video_index = -1;
1923 audio_index = -1;
1924 is->video_stream = -1;
1925 is->audio_stream = -1;
1926 is->subtitle_stream = -1;
1927
1928 global_video_state = is;
1929 url_set_interrupt_cb(decode_interrupt_cb);
1930
1931 memset(ap, 0, sizeof(*ap));
1932
1933 ap->width = frame_width;
1934 ap->height= frame_height;
1935 ap->time_base= (AVRational){1, 25};
1936 ap->pix_fmt = frame_pix_fmt;
1937
1938 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1939 if (err < 0) {
1940 print_error(is->filename, err);
1941 ret = -1;
1942 goto fail;
1943 }
1944 is->ic = ic;
1945
1946 if(genpts)
1947 ic->flags |= AVFMT_FLAG_GENPTS;
1948
1949 err = av_find_stream_info(ic);
1950 if (err < 0) {
1951 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1952 ret = -1;
1953 goto fail;
1954 }
1955 if(ic->pb)
1956 ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1957
1958 /* if seeking requested, we execute it */
1959 if (start_time != AV_NOPTS_VALUE) {
1960 int64_t timestamp;
1961
1962 timestamp = start_time;
1963 /* add the stream start time */
1964 if (ic->start_time != AV_NOPTS_VALUE)
1965 timestamp += ic->start_time;
1966 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1967 if (ret < 0) {
1968 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1969 is->filename, (double)timestamp / AV_TIME_BASE);
1970 }
1971 }
1972
1973 for(i = 0; i < ic->nb_streams; i++) {
1974 AVCodecContext *enc = ic->streams[i]->codec;
1975 ic->streams[i]->discard = AVDISCARD_ALL;
1976 switch(enc->codec_type) {
1977 case CODEC_TYPE_AUDIO:
1978 if ((audio_index < 0 || wanted_audio_stream-- > 0) && !audio_disable)
1979 audio_index = i;
1980 break;
1981 case CODEC_TYPE_VIDEO:
1982 if ((video_index < 0 || wanted_video_stream-- > 0) && !video_disable)
1983 video_index = i;
1984 break;
1985 default:
1986 break;
1987 }
1988 }
1989 if (show_status) {
1990 dump_format(ic, 0, is->filename, 0);
1991 dump_stream_info(ic);
1992 }
1993
1994 /* open the streams */
1995 if (audio_index >= 0) {
1996 stream_component_open(is, audio_index);
1997 }
1998
1999 if (video_index >= 0) {
2000 stream_component_open(is, video_index);
2001 } else {
2002 if (!display_disable)
2003 is->show_audio = 1;
2004 }
2005
2006 if (is->video_stream < 0 && is->audio_stream < 0) {
2007 fprintf(stderr, "%s: could not open codecs\n", is->filename);
2008 ret = -1;
2009 goto fail;
2010 }
2011
2012 for(;;) {
2013 if (is->abort_request)
2014 break;
2015 if (is->paused != is->last_paused) {
2016 is->last_paused = is->paused;
2017 if (is->paused)
2018 av_read_pause(ic);
2019 else
2020 av_read_play(ic);
2021 }
2022 #if CONFIG_RTSP_DEMUXER
2023 if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2024 /* wait 10 ms to avoid trying to get another packet */
2025 /* XXX: horrible */
2026 SDL_Delay(10);
2027 continue;
2028 }
2029 #endif
2030 if (is->seek_req) {
2031 int stream_index= -1;
2032 int64_t seek_target= is->seek_pos;
2033
2034 if (is-> video_stream >= 0) stream_index= is-> video_stream;
2035 else if(is-> audio_stream >= 0) stream_index= is-> audio_stream;
2036 else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
2037
2038 if(stream_index>=0){
2039 seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
2040 }
2041
2042 ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
2043 if (ret < 0) {
2044 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2045 }else{
2046 if (is->audio_stream >= 0) {
2047 packet_queue_flush(&is->audioq);
2048 packet_queue_put(&is->audioq, &flush_pkt);
2049 }
2050 if (is->subtitle_stream >= 0) {
2051 packet_queue_flush(&is->subtitleq);
2052 packet_queue_put(&is->subtitleq, &flush_pkt);
2053 }
2054 if (is->video_stream >= 0) {
2055 packet_queue_flush(&is->videoq);
2056 packet_queue_put(&is->videoq, &flush_pkt);
2057 }
2058 }
2059 is->seek_req = 0;
2060 }
2061
2062 /* if the queue are full, no need to read more */
2063 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2064 is->videoq.size > MAX_VIDEOQ_SIZE ||
2065 is->subtitleq.size > MAX_SUBTITLEQ_SIZE) {
2066 /* wait 10 ms */
2067 SDL_Delay(10);
2068 continue;
2069 }
2070 if(url_feof(ic->pb)) {
2071 av_init_packet(pkt);
2072 pkt->data=NULL;
2073 pkt->size=0;
2074 pkt->stream_index= is->video_stream;
2075 packet_queue_put(&is->videoq, pkt);
2076 continue;
2077 }
2078 ret = av_read_frame(ic, pkt);
2079 if (ret < 0) {
2080 if (url_ferror(ic->pb) == 0) {
2081 SDL_Delay(100); /* wait for user event */
2082 continue;
2083 } else
2084 break;
2085 }
2086 if (pkt->stream_index == is->audio_stream) {
2087 packet_queue_put(&is->audioq, pkt);
2088 } else if (pkt->stream_index == is->video_stream) {
2089 packet_queue_put(&is->videoq, pkt);
2090 } else if (pkt->stream_index == is->subtitle_stream) {
2091 packet_queue_put(&is->subtitleq, pkt);
2092 } else {
2093 av_free_packet(pkt);
2094 }
2095 }
2096 /* wait until the end */
2097 while (!is->abort_request) {
2098 SDL_Delay(100);
2099 }
2100
2101 ret = 0;
2102 fail:
2103 /* disable interrupting */
2104 global_video_state = NULL;
2105
2106 /* close each stream */
2107 if (is->audio_stream >= 0)
2108 stream_component_close(is, is->audio_stream);
2109 if (is->video_stream >= 0)
2110 stream_component_close(is, is->video_stream);
2111 if (is->subtitle_stream >= 0)
2112 stream_component_close(is, is->subtitle_stream);
2113 if (is->ic) {
2114 av_close_input_file(is->ic);
2115 is->ic = NULL; /* safety */
2116 }
2117 url_set_interrupt_cb(NULL);
2118
2119 if (ret != 0) {
2120 SDL_Event event;
2121
2122 event.type = FF_QUIT_EVENT;
2123 event.user.data1 = is;
2124 SDL_PushEvent(&event);
2125 }
2126 return 0;
2127 }
2128
2129 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2130 {
2131 VideoState *is;
2132
2133 is = av_mallocz(sizeof(VideoState));
2134 if (!is)
2135 return NULL;
2136 av_strlcpy(is->filename, filename, sizeof(is->filename));
2137 is->iformat = iformat;
2138 is->ytop = 0;
2139 is->xleft = 0;
2140
2141 /* start video display */
2142 is->pictq_mutex = SDL_CreateMutex();
2143 is->pictq_cond = SDL_CreateCond();
2144
2145 is->subpq_mutex = SDL_CreateMutex();
2146 is->subpq_cond = SDL_CreateCond();
2147
2148 /* add the refresh timer to draw the picture */
2149 schedule_refresh(is, 40);
2150
2151 is->av_sync_type = av_sync_type;
2152 is->parse_tid = SDL_CreateThread(decode_thread, is);
2153 if (!is->parse_tid) {
2154 av_free(is);
2155 return NULL;
2156 }
2157 return is;
2158 }
2159
2160 static void stream_close(VideoState *is)
2161 {
2162 VideoPicture *vp;
2163 int i;
2164 /* XXX: use a special url_shutdown call to abort parse cleanly */
2165 is->abort_request = 1;
2166 SDL_WaitThread(is->parse_tid, NULL);
2167
2168 /* free all pictures */
2169 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2170 vp = &is->pictq[i];
2171 if (vp->bmp) {
2172 SDL_FreeYUVOverlay(vp->bmp);
2173 vp->bmp = NULL;
2174 }
2175 }
2176 SDL_DestroyMutex(is->pictq_mutex);
2177 SDL_DestroyCond(is->pictq_cond);
2178 SDL_DestroyMutex(is->subpq_mutex);
2179 SDL_DestroyCond(is->subpq_cond);
2180 }
2181
2182 static void stream_cycle_channel(VideoState *is, int codec_type)
2183 {
2184 AVFormatContext *ic = is->ic;
2185 int start_index, stream_index;
2186 AVStream *st;
2187
2188 if (codec_type == CODEC_TYPE_VIDEO)
2189 start_index = is->video_stream;
2190 else if (codec_type == CODEC_TYPE_AUDIO)
2191 start_index = is->audio_stream;
2192 else
2193 start_index = is->subtitle_stream;
2194 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2195 return;
2196 stream_index = start_index;
2197 for(;;) {
2198 if (++stream_index >= is->ic->nb_streams)
2199 {
2200 if (codec_type == CODEC_TYPE_SUBTITLE)
2201 {
2202 stream_index = -1;
2203 goto the_end;
2204 } else
2205 stream_index = 0;
2206 }
2207 if (stream_index == start_index)
2208 return;
2209 st = ic->streams[stream_index];
2210 if (st->codec->codec_type == codec_type) {
2211 /* check that parameters are OK */
2212 switch(codec_type) {
2213 case CODEC_TYPE_AUDIO:
2214 if (st->codec->sample_rate != 0 &&
2215 st->codec->channels != 0)
2216 goto the_end;
2217 break;
2218 case CODEC_TYPE_VIDEO:
2219 case CODEC_TYPE_SUBTITLE:
2220 goto the_end;
2221 default:
2222 break;
2223 }
2224 }
2225 }
2226 the_end:
2227 stream_component_close(is, start_index);
2228 stream_component_open(is, stream_index);
2229 }
2230
2231
2232 static void toggle_full_screen(void)
2233 {
2234 is_full_screen = !is_full_screen;
2235 if (!fs_screen_width) {
2236 /* use default SDL method */
2237 // SDL_WM_ToggleFullScreen(screen);
2238 }
2239 video_open(cur_stream);
2240 }
2241
2242 static void toggle_pause(void)
2243 {
2244 if (cur_stream)
2245 stream_pause(cur_stream);
2246 step = 0;
2247 }
2248
2249 static void step_to_next_frame(void)
2250 {
2251 if (cur_stream) {
2252 /* if the stream is paused unpause it, then step */
2253 if (cur_stream->paused)
2254 stream_pause(cur_stream);
2255 }
2256 step = 1;
2257 }
2258
2259 static void do_exit(void)
2260 {
2261 if (cur_stream) {
2262 stream_close(cur_stream);
2263 cur_stream = NULL;
2264 }
2265 if (show_status)
2266 printf("\n");
2267 SDL_Quit();
2268 exit(0);
2269 }
2270
2271 static void toggle_audio_display(void)
2272 {
2273 if (cur_stream) {
2274 cur_stream->show_audio = !cur_stream->show_audio;
2275 }
2276 }
2277
2278 /* handle an event sent by the GUI */
2279 static void event_loop(void)
2280 {
2281 SDL_Event event;
2282 double incr, pos, frac;
2283
2284 for(;;) {
2285 SDL_WaitEvent(&event);
2286 switch(event.type) {
2287 case SDL_KEYDOWN:
2288 switch(event.key.keysym.sym) {
2289 case SDLK_ESCAPE:
2290 case SDLK_q:
2291 do_exit();
2292 break;
2293 case SDLK_f:
2294 toggle_full_screen();
2295 break;
2296 case SDLK_p:
2297 case SDLK_SPACE:
2298 toggle_pause();
2299 break;
2300 case SDLK_s: //S: Step to next frame
2301 step_to_next_frame();
2302 break;
2303 case SDLK_a:
2304 if (cur_stream)
2305 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2306 break;
2307 case SDLK_v:
2308 if (cur_stream)
2309 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2310 break;
2311 case SDLK_t:
2312 if (cur_stream)
2313 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2314 break;
2315 case SDLK_w:
2316 toggle_audio_display();
2317 break;
2318 case SDLK_LEFT:
2319 incr = -10.0;
2320 goto do_seek;
2321 case SDLK_RIGHT:
2322 incr = 10.0;
2323 goto do_seek;
2324 case SDLK_UP:
2325 incr = 60.0;
2326 goto do_seek;
2327 case SDLK_DOWN:
2328 incr = -60.0;
2329 do_seek:
2330 if (cur_stream) {
2331 if (seek_by_bytes) {
2332 pos = url_ftell(cur_stream->ic->pb);
2333 if (cur_stream->ic->bit_rate)
2334 incr *= cur_stream->ic->bit_rate / 60.0;
2335 else
2336 incr *= 180000.0;
2337 pos += incr;
2338 stream_seek(cur_stream, pos, incr);
2339 } else {
2340 pos = get_master_clock(cur_stream);
2341 pos += incr;
2342 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2343 }
2344 }
2345 break;
2346 default:
2347 break;
2348 }
2349 break;
2350 case SDL_MOUSEBUTTONDOWN:
2351 if (cur_stream) {
2352 int ns, hh, mm, ss;
2353 int tns, thh, tmm, tss;
2354 tns = cur_stream->ic->duration/1000000LL;
2355 thh = tns/3600;
2356 tmm = (tns%3600)/60;
2357 tss = (tns%60);
2358 frac = (double)event.button.x/(double)cur_stream->width;
2359 ns = frac*tns;
2360 hh = ns/3600;
2361 mm = (ns%3600)/60;
2362 ss = (ns%60);
2363 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2364 hh, mm, ss, thh, tmm, tss);
2365 stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2366 }
2367 break;
2368 case SDL_VIDEORESIZE:
2369 if (cur_stream) {
2370 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2371 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2372 screen_width = cur_stream->width = event.resize.w;
2373 screen_height= cur_stream->height= event.resize.h;
2374 }
2375 break;
2376 case SDL_QUIT:
2377 case FF_QUIT_EVENT:
2378 do_exit();
2379 break;
2380 case FF_ALLOC_EVENT:
2381 video_open(event.user.data1);
2382 alloc_picture(event.user.data1);
2383 break;
2384 case FF_REFRESH_EVENT:
2385 video_refresh_timer(event.user.data1);
2386 break;
2387 default:
2388 break;
2389 }
2390 }
2391 }
2392
2393 static void opt_frame_size(const char *arg)
2394 {
2395 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2396 fprintf(stderr, "Incorrect frame size\n");
2397 exit(1);
2398 }
2399 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2400 fprintf(stderr, "Frame size must be a multiple of 2\n");
2401 exit(1);
2402 }
2403 }
2404
2405 static int opt_width(const char *opt, const char *arg)
2406 {
2407 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2408 return 0;
2409 }
2410
2411 static int opt_height(const char *opt, const char *arg)
2412 {
2413 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2414 return 0;
2415 }
2416
2417 static void opt_format(const char *arg)
2418 {
2419 file_iformat = av_find_input_format(arg);
2420 if (!file_iformat) {
2421 fprintf(stderr, "Unknown input format: %s\n", arg);
2422 exit(1);
2423 }
2424 }
2425
2426 static void opt_frame_pix_fmt(const char *arg)
2427 {
2428 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2429 }
2430
2431 static int opt_sync(const char *opt, const char *arg)
2432 {
2433 if (!strcmp(arg, "audio"))
2434 av_sync_type = AV_SYNC_AUDIO_MASTER;
2435 else if (!strcmp(arg, "video"))
2436 av_sync_type = AV_SYNC_VIDEO_MASTER;
2437 else if (!strcmp(arg, "ext"))
2438 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2439 else {
2440 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2441 exit(1);
2442 }
2443 return 0;
2444 }
2445
2446 static int opt_seek(const char *opt, const char *arg)
2447 {
2448 start_time = parse_time_or_die(opt, arg, 1);
2449 return 0;
2450 }
2451
2452 static int opt_debug(const char *opt, const char *arg)
2453 {
2454 av_log_set_level(99);
2455 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2456 return 0;
2457 }
2458
2459 static int opt_vismv(const char *opt, const char *arg)
2460 {
2461 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2462 return 0;
2463 }
2464
2465 static int opt_thread_count(const char *opt, const char *arg)
2466 {
2467 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2468 #if !HAVE_THREADS
2469 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2470 #endif
2471 return 0;
2472 }
2473
2474 static const OptionDef options[] = {
2475 { "h", OPT_EXIT, {(void*)show_help}, "show help" },
2476 { "version", OPT_EXIT, {(void*)show_version}, "show version" },
2477 { "L", OPT_EXIT, {(void*)show_license}, "show license" },
2478 { "formats", OPT_EXIT, {(void*)show_formats}, "show available formats, codecs, protocols, ..." },
2479 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2480 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2481 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2482 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2483 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2484 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2485 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "", "" },
2486 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "", "" },
2487 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2488 { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2489 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2490 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2491 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2492 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2493 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2494 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2495 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2496 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2497 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2498 { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2499 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2500 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2501 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2502 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2503 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2504 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)", "threshold" },
2505 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2506 { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2507 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2508 { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2509 { NULL, },
2510 };
2511
2512 static void show_help(void)
2513 {
2514 printf("usage: ffplay [options] input_file\n"
2515 "Simple media player\n");
2516 printf("\n");
2517 show_help_options(options, "Main options:\n",
2518 OPT_EXPERT, 0);
2519 show_help_options(options, "\nAdvanced options:\n",
2520 OPT_EXPERT, OPT_EXPERT);
2521 printf("\nWhile playing:\n"
2522 "q, ESC quit\n"
2523 "f toggle full screen\n"
2524 "p, SPC pause\n"
2525 "a cycle audio channel\n"
2526 "v cycle video channel\n"
2527 "t cycle subtitle channel\n"
2528 "w show audio waves\n"
2529 "left/right seek backward/forward 10 seconds\n"
2530 "down/up seek backward/forward 1 minute\n"
2531 "mouse click seek to percentage in file corresponding to fraction of width\n"
2532 );
2533 }
2534
2535 static void opt_input_file(const char *filename)
2536 {
2537 if (!strcmp(filename, "-"))
2538 filename = "pipe:";
2539 input_filename = filename;
2540 }
2541
2542 /* Called from the main */
2543 int main(int argc, char **argv)
2544 {
2545 int flags, i;
2546
2547 /* register all codecs, demux and protocols */
2548 avcodec_register_all();
2549 avdevice_register_all();
2550 av_register_all();
2551
2552 for(i=0; i<CODEC_TYPE_NB; i++){
2553 avctx_opts[i]= avcodec_alloc_context2(i);
2554 }
2555 avformat_opts = av_alloc_format_context();
2556 sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
2557
2558 show_banner();
2559
2560 parse_options(argc, argv, options, opt_input_file);
2561
2562 if (!input_filename) {
2563 fprintf(stderr, "An input file must be specified\n");
2564 exit(1);
2565 }
2566
2567 if (display_disable) {
2568 video_disable = 1;
2569 }
2570 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2571 #if !defined(__MINGW32__) && !defined(__APPLE__)
2572 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2573 #endif
2574 if (SDL_Init (flags)) {
2575 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2576 exit(1);
2577 }
2578
2579 if (!display_disable) {
2580 #if HAVE_SDL_VIDEO_SIZE
2581 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2582 fs_screen_width = vi->current_w;
2583 fs_screen_height = vi->current_h;
2584 #endif
2585 }
2586
2587 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2588 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2589 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2590 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2591
2592 av_init_packet(&flush_pkt);
2593 flush_pkt.data= "FLUSH";
2594
2595 cur_stream = stream_open(input_filename, file_iformat);
2596
2597 event_loop();
2598
2599 /* never returns */
2600
2601 return 0;
2602 }