644ae71eb368bf6a98a4572b385156518d386195
[libav.git] / ffplay.c
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21 #define HAVE_AV_CONFIG_H
22 #include "avformat.h"
23 #include "swscale.h"
24
25 #include "version.h"
26 #include "cmdutils.h"
27
28 #include <SDL.h>
29 #include <SDL_thread.h>
30
31 #ifdef __MINGW32__
32 #undef main /* We don't want SDL to override our main() */
33 #endif
34
35 #ifdef CONFIG_OS2
36 #define INCL_DOS
37 #include <os2.h>
38 #include <stdio.h>
39
40 void MorphToPM()
41 {
42 PPIB pib;
43 PTIB tib;
44
45 DosGetInfoBlocks(&tib, &pib);
46
47 // Change flag from VIO to PM:
48 if (pib->pib_ultype==2) pib->pib_ultype = 3;
49 }
50 #endif
51
52 //#define DEBUG_SYNC
53
54 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
55 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
56 #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
57
58 /* SDL audio buffer size, in samples. Should be small to have precise
59 A/V sync as SDL does not have hardware buffer fullness info. */
60 #define SDL_AUDIO_BUFFER_SIZE 1024
61
62 /* no AV sync correction is done if below the AV sync threshold */
63 #define AV_SYNC_THRESHOLD 0.01
64 /* no AV correction is done if too big error */
65 #define AV_NOSYNC_THRESHOLD 10.0
66
67 /* maximum audio speed change to get correct sync */
68 #define SAMPLE_CORRECTION_PERCENT_MAX 10
69
70 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
71 #define AUDIO_DIFF_AVG_NB 20
72
73 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
74 #define SAMPLE_ARRAY_SIZE (2*65536)
75
76 static int sws_flags = SWS_BICUBIC;
77
78 typedef struct PacketQueue {
79 AVPacketList *first_pkt, *last_pkt;
80 int nb_packets;
81 int size;
82 int abort_request;
83 SDL_mutex *mutex;
84 SDL_cond *cond;
85 } PacketQueue;
86
87 #define VIDEO_PICTURE_QUEUE_SIZE 1
88 #define SUBPICTURE_QUEUE_SIZE 4
89
90 typedef struct VideoPicture {
91 double pts; ///<presentation time stamp for this picture
92 SDL_Overlay *bmp;
93 int width, height; /* source height & width */
94 int allocated;
95 } VideoPicture;
96
97 typedef struct SubPicture {
98 double pts; /* presentation time stamp for this picture */
99 AVSubtitle sub;
100 } SubPicture;
101
102 enum {
103 AV_SYNC_AUDIO_MASTER, /* default choice */
104 AV_SYNC_VIDEO_MASTER,
105 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
106 };
107
108 typedef struct VideoState {
109 SDL_Thread *parse_tid;
110 SDL_Thread *video_tid;
111 AVInputFormat *iformat;
112 int no_background;
113 int abort_request;
114 int paused;
115 int last_paused;
116 int seek_req;
117 int seek_flags;
118 int64_t seek_pos;
119 AVFormatContext *ic;
120 int dtg_active_format;
121
122 int audio_stream;
123
124 int av_sync_type;
125 double external_clock; /* external clock base */
126 int64_t external_clock_time;
127
128 double audio_clock;
129 double audio_diff_cum; /* used for AV difference average computation */
130 double audio_diff_avg_coef;
131 double audio_diff_threshold;
132 int audio_diff_avg_count;
133 AVStream *audio_st;
134 PacketQueue audioq;
135 int audio_hw_buf_size;
136 /* samples output by the codec. we reserve more space for avsync
137 compensation */
138 DECLARE_ALIGNED(16,uint8_t,audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
139 unsigned int audio_buf_size; /* in bytes */
140 int audio_buf_index; /* in bytes */
141 AVPacket audio_pkt;
142 uint8_t *audio_pkt_data;
143 int audio_pkt_size;
144
145 int show_audio; /* if true, display audio samples */
146 int16_t sample_array[SAMPLE_ARRAY_SIZE];
147 int sample_array_index;
148 int last_i_start;
149
150 SDL_Thread *subtitle_tid;
151 int subtitle_stream;
152 int subtitle_stream_changed;
153 AVStream *subtitle_st;
154 PacketQueue subtitleq;
155 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
156 int subpq_size, subpq_rindex, subpq_windex;
157 SDL_mutex *subpq_mutex;
158 SDL_cond *subpq_cond;
159
160 double frame_timer;
161 double frame_last_pts;
162 double frame_last_delay;
163 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
164 int video_stream;
165 AVStream *video_st;
166 PacketQueue videoq;
167 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
168 int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
169 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
170 int pictq_size, pictq_rindex, pictq_windex;
171 SDL_mutex *pictq_mutex;
172 SDL_cond *pictq_cond;
173
174 // QETimer *video_timer;
175 char filename[1024];
176 int width, height, xleft, ytop;
177 } VideoState;
178
179 void show_help(void);
180 static int audio_write_get_buf_size(VideoState *is);
181
182 /* options specified by the user */
183 static AVInputFormat *file_iformat;
184 static const char *input_filename;
185 static int fs_screen_width;
186 static int fs_screen_height;
187 static int screen_width = 0;
188 static int screen_height = 0;
189 static int audio_disable;
190 static int video_disable;
191 static int wanted_audio_stream= 0;
192 static int seek_by_bytes;
193 static int display_disable;
194 static int show_status;
195 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
196 static int64_t start_time = AV_NOPTS_VALUE;
197 static int debug = 0;
198 static int debug_mv = 0;
199 static int step = 0;
200 static int thread_count = 1;
201 static int workaround_bugs = 1;
202 static int fast = 0;
203 static int genpts = 0;
204 static int lowres = 0;
205 static int idct = FF_IDCT_AUTO;
206 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
207 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
208 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
209 static int error_resilience = FF_ER_CAREFUL;
210 static int error_concealment = 3;
211
212 /* current context */
213 static int is_full_screen;
214 static VideoState *cur_stream;
215 static int64_t audio_callback_time;
216
217 AVPacket flush_pkt;
218
219 #define FF_ALLOC_EVENT (SDL_USEREVENT)
220 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
221 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
222
223 SDL_Surface *screen;
224
225 /* packet queue handling */
226 static void packet_queue_init(PacketQueue *q)
227 {
228 memset(q, 0, sizeof(PacketQueue));
229 q->mutex = SDL_CreateMutex();
230 q->cond = SDL_CreateCond();
231 }
232
233 static void packet_queue_flush(PacketQueue *q)
234 {
235 AVPacketList *pkt, *pkt1;
236
237 SDL_LockMutex(q->mutex);
238 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
239 pkt1 = pkt->next;
240 av_free_packet(&pkt->pkt);
241 av_freep(&pkt);
242 }
243 q->last_pkt = NULL;
244 q->first_pkt = NULL;
245 q->nb_packets = 0;
246 q->size = 0;
247 SDL_UnlockMutex(q->mutex);
248 }
249
250 static void packet_queue_end(PacketQueue *q)
251 {
252 packet_queue_flush(q);
253 SDL_DestroyMutex(q->mutex);
254 SDL_DestroyCond(q->cond);
255 }
256
257 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
258 {
259 AVPacketList *pkt1;
260
261 /* duplicate the packet */
262 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
263 return -1;
264
265 pkt1 = av_malloc(sizeof(AVPacketList));
266 if (!pkt1)
267 return -1;
268 pkt1->pkt = *pkt;
269 pkt1->next = NULL;
270
271
272 SDL_LockMutex(q->mutex);
273
274 if (!q->last_pkt)
275
276 q->first_pkt = pkt1;
277 else
278 q->last_pkt->next = pkt1;
279 q->last_pkt = pkt1;
280 q->nb_packets++;
281 q->size += pkt1->pkt.size;
282 /* XXX: should duplicate packet data in DV case */
283 SDL_CondSignal(q->cond);
284
285 SDL_UnlockMutex(q->mutex);
286 return 0;
287 }
288
289 static void packet_queue_abort(PacketQueue *q)
290 {
291 SDL_LockMutex(q->mutex);
292
293 q->abort_request = 1;
294
295 SDL_CondSignal(q->cond);
296
297 SDL_UnlockMutex(q->mutex);
298 }
299
300 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
301 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
302 {
303 AVPacketList *pkt1;
304 int ret;
305
306 SDL_LockMutex(q->mutex);
307
308 for(;;) {
309 if (q->abort_request) {
310 ret = -1;
311 break;
312 }
313
314 pkt1 = q->first_pkt;
315 if (pkt1) {
316 q->first_pkt = pkt1->next;
317 if (!q->first_pkt)
318 q->last_pkt = NULL;
319 q->nb_packets--;
320 q->size -= pkt1->pkt.size;
321 *pkt = pkt1->pkt;
322 av_free(pkt1);
323 ret = 1;
324 break;
325 } else if (!block) {
326 ret = 0;
327 break;
328 } else {
329 SDL_CondWait(q->cond, q->mutex);
330 }
331 }
332 SDL_UnlockMutex(q->mutex);
333 return ret;
334 }
335
336 static inline void fill_rectangle(SDL_Surface *screen,
337 int x, int y, int w, int h, int color)
338 {
339 SDL_Rect rect;
340 rect.x = x;
341 rect.y = y;
342 rect.w = w;
343 rect.h = h;
344 SDL_FillRect(screen, &rect, color);
345 }
346
347 #if 0
348 /* draw only the border of a rectangle */
349 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
350 {
351 int w1, w2, h1, h2;
352
353 /* fill the background */
354 w1 = x;
355 if (w1 < 0)
356 w1 = 0;
357 w2 = s->width - (x + w);
358 if (w2 < 0)
359 w2 = 0;
360 h1 = y;
361 if (h1 < 0)
362 h1 = 0;
363 h2 = s->height - (y + h);
364 if (h2 < 0)
365 h2 = 0;
366 fill_rectangle(screen,
367 s->xleft, s->ytop,
368 w1, s->height,
369 color);
370 fill_rectangle(screen,
371 s->xleft + s->width - w2, s->ytop,
372 w2, s->height,
373 color);
374 fill_rectangle(screen,
375 s->xleft + w1, s->ytop,
376 s->width - w1 - w2, h1,
377 color);
378 fill_rectangle(screen,
379 s->xleft + w1, s->ytop + s->height - h2,
380 s->width - w1 - w2, h2,
381 color);
382 }
383 #endif
384
385
386
387 #define SCALEBITS 10
388 #define ONE_HALF (1 << (SCALEBITS - 1))
389 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
390
391 #define RGB_TO_Y_CCIR(r, g, b) \
392 ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
393 FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
394
395 #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
396 (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
397 FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
398
399 #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
400 (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
401 FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
402
403 #define ALPHA_BLEND(a, oldp, newp, s)\
404 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
405
406 #define RGBA_IN(r, g, b, a, s)\
407 {\
408 unsigned int v = ((const uint32_t *)(s))[0];\
409 a = (v >> 24) & 0xff;\
410 r = (v >> 16) & 0xff;\
411 g = (v >> 8) & 0xff;\
412 b = v & 0xff;\
413 }
414
415 #define YUVA_IN(y, u, v, a, s, pal)\
416 {\
417 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)s];\
418 a = (val >> 24) & 0xff;\
419 y = (val >> 16) & 0xff;\
420 u = (val >> 8) & 0xff;\
421 v = val & 0xff;\
422 }
423
424 #define YUVA_OUT(d, y, u, v, a)\
425 {\
426 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
427 }
428
429
430 #define BPP 1
431
432 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect)
433 {
434 int wrap, wrap3, width2, skip2;
435 int y, u, v, a, u1, v1, a1, w, h;
436 uint8_t *lum, *cb, *cr;
437 const uint8_t *p;
438 const uint32_t *pal;
439
440 lum = dst->data[0] + rect->y * dst->linesize[0];
441 cb = dst->data[1] + (rect->y >> 1) * dst->linesize[1];
442 cr = dst->data[2] + (rect->y >> 1) * dst->linesize[2];
443
444 width2 = (rect->w + 1) >> 1;
445 skip2 = rect->x >> 1;
446 wrap = dst->linesize[0];
447 wrap3 = rect->linesize;
448 p = rect->bitmap;
449 pal = rect->rgba_palette; /* Now in YCrCb! */
450
451 if (rect->y & 1) {
452 lum += rect->x;
453 cb += skip2;
454 cr += skip2;
455
456 if (rect->x & 1) {
457 YUVA_IN(y, u, v, a, p, pal);
458 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
459 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
460 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
461 cb++;
462 cr++;
463 lum++;
464 p += BPP;
465 }
466 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
467 YUVA_IN(y, u, v, a, p, pal);
468 u1 = u;
469 v1 = v;
470 a1 = a;
471 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
472
473 YUVA_IN(y, u, v, a, p + BPP, pal);
474 u1 += u;
475 v1 += v;
476 a1 += a;
477 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
478 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
479 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
480 cb++;
481 cr++;
482 p += 2 * BPP;
483 lum += 2;
484 }
485 if (w) {
486 YUVA_IN(y, u, v, a, p, pal);
487 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
488 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
489 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
490 }
491 p += wrap3 + (wrap3 - rect->w * BPP);
492 lum += wrap + (wrap - rect->w - rect->x);
493 cb += dst->linesize[1] - width2 - skip2;
494 cr += dst->linesize[2] - width2 - skip2;
495 }
496 for(h = rect->h - (rect->y & 1); h >= 2; h -= 2) {
497 lum += rect->x;
498 cb += skip2;
499 cr += skip2;
500
501 if (rect->x & 1) {
502 YUVA_IN(y, u, v, a, p, pal);
503 u1 = u;
504 v1 = v;
505 a1 = a;
506 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
507 p += wrap3;
508 lum += wrap;
509 YUVA_IN(y, u, v, a, p, pal);
510 u1 += u;
511 v1 += v;
512 a1 += a;
513 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
514 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
515 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
516 cb++;
517 cr++;
518 p += -wrap3 + BPP;
519 lum += -wrap + 1;
520 }
521 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
522 YUVA_IN(y, u, v, a, p, pal);
523 u1 = u;
524 v1 = v;
525 a1 = a;
526 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
527
528 YUVA_IN(y, u, v, a, p, pal);
529 u1 += u;
530 v1 += v;
531 a1 += a;
532 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
533 p += wrap3;
534 lum += wrap;
535
536 YUVA_IN(y, u, v, a, p, pal);
537 u1 += u;
538 v1 += v;
539 a1 += a;
540 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
541
542 YUVA_IN(y, u, v, a, p, pal);
543 u1 += u;
544 v1 += v;
545 a1 += a;
546 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
547
548 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
549 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
550
551 cb++;
552 cr++;
553 p += -wrap3 + 2 * BPP;
554 lum += -wrap + 2;
555 }
556 if (w) {
557 YUVA_IN(y, u, v, a, p, pal);
558 u1 = u;
559 v1 = v;
560 a1 = a;
561 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
562 p += wrap3;
563 lum += wrap;
564 YUVA_IN(y, u, v, a, p, pal);
565 u1 += u;
566 v1 += v;
567 a1 += a;
568 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
569 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
570 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
571 cb++;
572 cr++;
573 p += -wrap3 + BPP;
574 lum += -wrap + 1;
575 }
576 p += wrap3 + (wrap3 - rect->w * BPP);
577 lum += wrap + (wrap - rect->w - rect->x);
578 cb += dst->linesize[1] - width2 - skip2;
579 cr += dst->linesize[2] - width2 - skip2;
580 }
581 /* handle odd height */
582 if (h) {
583 lum += rect->x;
584 cb += skip2;
585 cr += skip2;
586
587 if (rect->x & 1) {
588 YUVA_IN(y, u, v, a, p, pal);
589 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
590 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
591 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
592 cb++;
593 cr++;
594 lum++;
595 p += BPP;
596 }
597 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
598 YUVA_IN(y, u, v, a, p, pal);
599 u1 = u;
600 v1 = v;
601 a1 = a;
602 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
603
604 YUVA_IN(y, u, v, a, p + BPP, pal);
605 u1 += u;
606 v1 += v;
607 a1 += a;
608 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
609 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
610 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
611 cb++;
612 cr++;
613 p += 2 * BPP;
614 lum += 2;
615 }
616 if (w) {
617 YUVA_IN(y, u, v, a, p, pal);
618 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
619 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
620 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
621 }
622 }
623 }
624
625 static void free_subpicture(SubPicture *sp)
626 {
627 int i;
628
629 for (i = 0; i < sp->sub.num_rects; i++)
630 {
631 av_free(sp->sub.rects[i].bitmap);
632 av_free(sp->sub.rects[i].rgba_palette);
633 }
634
635 av_free(sp->sub.rects);
636
637 memset(&sp->sub, 0, sizeof(AVSubtitle));
638 }
639
640 static void video_image_display(VideoState *is)
641 {
642 VideoPicture *vp;
643 SubPicture *sp;
644 AVPicture pict;
645 float aspect_ratio;
646 int width, height, x, y;
647 SDL_Rect rect;
648 int i;
649
650 vp = &is->pictq[is->pictq_rindex];
651 if (vp->bmp) {
652 /* XXX: use variable in the frame */
653 if (is->video_st->codec->sample_aspect_ratio.num == 0)
654 aspect_ratio = 0;
655 else
656 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio)
657 * is->video_st->codec->width / is->video_st->codec->height;;
658 if (aspect_ratio <= 0.0)
659 aspect_ratio = (float)is->video_st->codec->width /
660 (float)is->video_st->codec->height;
661 /* if an active format is indicated, then it overrides the
662 mpeg format */
663 #if 0
664 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
665 is->dtg_active_format = is->video_st->codec->dtg_active_format;
666 printf("dtg_active_format=%d\n", is->dtg_active_format);
667 }
668 #endif
669 #if 0
670 switch(is->video_st->codec->dtg_active_format) {
671 case FF_DTG_AFD_SAME:
672 default:
673 /* nothing to do */
674 break;
675 case FF_DTG_AFD_4_3:
676 aspect_ratio = 4.0 / 3.0;
677 break;
678 case FF_DTG_AFD_16_9:
679 aspect_ratio = 16.0 / 9.0;
680 break;
681 case FF_DTG_AFD_14_9:
682 aspect_ratio = 14.0 / 9.0;
683 break;
684 case FF_DTG_AFD_4_3_SP_14_9:
685 aspect_ratio = 14.0 / 9.0;
686 break;
687 case FF_DTG_AFD_16_9_SP_14_9:
688 aspect_ratio = 14.0 / 9.0;
689 break;
690 case FF_DTG_AFD_SP_4_3:
691 aspect_ratio = 4.0 / 3.0;
692 break;
693 }
694 #endif
695
696 if (is->subtitle_st)
697 {
698 if (is->subpq_size > 0)
699 {
700 sp = &is->subpq[is->subpq_rindex];
701
702 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
703 {
704 SDL_LockYUVOverlay (vp->bmp);
705
706 pict.data[0] = vp->bmp->pixels[0];
707 pict.data[1] = vp->bmp->pixels[2];
708 pict.data[2] = vp->bmp->pixels[1];
709
710 pict.linesize[0] = vp->bmp->pitches[0];
711 pict.linesize[1] = vp->bmp->pitches[2];
712 pict.linesize[2] = vp->bmp->pitches[1];
713
714 for (i = 0; i < sp->sub.num_rects; i++)
715 blend_subrect(&pict, &sp->sub.rects[i]);
716
717 SDL_UnlockYUVOverlay (vp->bmp);
718 }
719 }
720 }
721
722
723 /* XXX: we suppose the screen has a 1.0 pixel ratio */
724 height = is->height;
725 width = ((int)rint(height * aspect_ratio)) & -3;
726 if (width > is->width) {
727 width = is->width;
728 height = ((int)rint(width / aspect_ratio)) & -3;
729 }
730 x = (is->width - width) / 2;
731 y = (is->height - height) / 2;
732 if (!is->no_background) {
733 /* fill the background */
734 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
735 } else {
736 is->no_background = 0;
737 }
738 rect.x = is->xleft + x;
739 rect.y = is->xleft + y;
740 rect.w = width;
741 rect.h = height;
742 SDL_DisplayYUVOverlay(vp->bmp, &rect);
743 } else {
744 #if 0
745 fill_rectangle(screen,
746 is->xleft, is->ytop, is->width, is->height,
747 QERGB(0x00, 0x00, 0x00));
748 #endif
749 }
750 }
751
752 static inline int compute_mod(int a, int b)
753 {
754 a = a % b;
755 if (a >= 0)
756 return a;
757 else
758 return a + b;
759 }
760
761 static void video_audio_display(VideoState *s)
762 {
763 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
764 int ch, channels, h, h2, bgcolor, fgcolor;
765 int16_t time_diff;
766
767 /* compute display index : center on currently output samples */
768 channels = s->audio_st->codec->channels;
769 nb_display_channels = channels;
770 if (!s->paused) {
771 n = 2 * channels;
772 delay = audio_write_get_buf_size(s);
773 delay /= n;
774
775 /* to be more precise, we take into account the time spent since
776 the last buffer computation */
777 if (audio_callback_time) {
778 time_diff = av_gettime() - audio_callback_time;
779 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
780 }
781
782 delay -= s->width / 2;
783 if (delay < s->width)
784 delay = s->width;
785
786 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
787
788 h= INT_MIN;
789 for(i=0; i<1000; i+=channels){
790 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
791 int a= s->sample_array[idx];
792 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
793 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
794 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
795 int score= a-d;
796 if(h<score && (b^c)<0){
797 h= score;
798 i_start= idx;
799 }
800 }
801
802 s->last_i_start = i_start;
803 } else {
804 i_start = s->last_i_start;
805 }
806
807 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
808 fill_rectangle(screen,
809 s->xleft, s->ytop, s->width, s->height,
810 bgcolor);
811
812 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
813
814 /* total height for one channel */
815 h = s->height / nb_display_channels;
816 /* graph height / 2 */
817 h2 = (h * 9) / 20;
818 for(ch = 0;ch < nb_display_channels; ch++) {
819 i = i_start + ch;
820 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
821 for(x = 0; x < s->width; x++) {
822 y = (s->sample_array[i] * h2) >> 15;
823 if (y < 0) {
824 y = -y;
825 ys = y1 - y;
826 } else {
827 ys = y1;
828 }
829 fill_rectangle(screen,
830 s->xleft + x, ys, 1, y,
831 fgcolor);
832 i += channels;
833 if (i >= SAMPLE_ARRAY_SIZE)
834 i -= SAMPLE_ARRAY_SIZE;
835 }
836 }
837
838 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
839
840 for(ch = 1;ch < nb_display_channels; ch++) {
841 y = s->ytop + ch * h;
842 fill_rectangle(screen,
843 s->xleft, y, s->width, 1,
844 fgcolor);
845 }
846 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
847 }
848
849 static int video_open(VideoState *is){
850 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
851 int w,h;
852
853 if(is_full_screen) flags |= SDL_FULLSCREEN;
854 else flags |= SDL_RESIZABLE;
855
856 if (is_full_screen && fs_screen_width) {
857 w = fs_screen_width;
858 h = fs_screen_height;
859 } else if(!is_full_screen && screen_width){
860 w = screen_width;
861 h = screen_height;
862 }else if (is->video_st && is->video_st->codec->width){
863 w = is->video_st->codec->width;
864 h = is->video_st->codec->height;
865 } else {
866 w = 640;
867 h = 480;
868 }
869 #ifndef CONFIG_DARWIN
870 screen = SDL_SetVideoMode(w, h, 0, flags);
871 #else
872 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
873 screen = SDL_SetVideoMode(w, h, 24, flags);
874 #endif
875 if (!screen) {
876 fprintf(stderr, "SDL: could not set video mode - exiting\n");
877 return -1;
878 }
879 SDL_WM_SetCaption("FFplay", "FFplay");
880
881 is->width = screen->w;
882 is->height = screen->h;
883
884 return 0;
885 }
886
887 /* display the current picture, if any */
888 static void video_display(VideoState *is)
889 {
890 if(!screen)
891 video_open(cur_stream);
892 if (is->audio_st && is->show_audio)
893 video_audio_display(is);
894 else if (is->video_st)
895 video_image_display(is);
896 }
897
898 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
899 {
900 SDL_Event event;
901 event.type = FF_REFRESH_EVENT;
902 event.user.data1 = opaque;
903 SDL_PushEvent(&event);
904 return 0; /* 0 means stop timer */
905 }
906
907 /* schedule a video refresh in 'delay' ms */
908 static void schedule_refresh(VideoState *is, int delay)
909 {
910 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
911 }
912
913 /* get the current audio clock value */
914 static double get_audio_clock(VideoState *is)
915 {
916 double pts;
917 int hw_buf_size, bytes_per_sec;
918 pts = is->audio_clock;
919 hw_buf_size = audio_write_get_buf_size(is);
920 bytes_per_sec = 0;
921 if (is->audio_st) {
922 bytes_per_sec = is->audio_st->codec->sample_rate *
923 2 * is->audio_st->codec->channels;
924 }
925 if (bytes_per_sec)
926 pts -= (double)hw_buf_size / bytes_per_sec;
927 return pts;
928 }
929
930 /* get the current video clock value */
931 static double get_video_clock(VideoState *is)
932 {
933 double delta;
934 if (is->paused) {
935 delta = 0;
936 } else {
937 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
938 }
939 return is->video_current_pts + delta;
940 }
941
942 /* get the current external clock value */
943 static double get_external_clock(VideoState *is)
944 {
945 int64_t ti;
946 ti = av_gettime();
947 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
948 }
949
950 /* get the current master clock value */
951 static double get_master_clock(VideoState *is)
952 {
953 double val;
954
955 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
956 if (is->video_st)
957 val = get_video_clock(is);
958 else
959 val = get_audio_clock(is);
960 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
961 if (is->audio_st)
962 val = get_audio_clock(is);
963 else
964 val = get_video_clock(is);
965 } else {
966 val = get_external_clock(is);
967 }
968 return val;
969 }
970
971 /* seek in the stream */
972 static void stream_seek(VideoState *is, int64_t pos, int rel)
973 {
974 if (!is->seek_req) {
975 is->seek_pos = pos;
976 is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
977 if (seek_by_bytes)
978 is->seek_flags |= AVSEEK_FLAG_BYTE;
979 is->seek_req = 1;
980 }
981 }
982
983 /* pause or resume the video */
984 static void stream_pause(VideoState *is)
985 {
986 is->paused = !is->paused;
987 if (!is->paused) {
988 is->video_current_pts = get_video_clock(is);
989 is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
990 }
991 }
992
993 /* called to display each frame */
994 static void video_refresh_timer(void *opaque)
995 {
996 VideoState *is = opaque;
997 VideoPicture *vp;
998 double actual_delay, delay, sync_threshold, ref_clock, diff;
999
1000 SubPicture *sp, *sp2;
1001
1002 if (is->video_st) {
1003 if (is->pictq_size == 0) {
1004 /* if no picture, need to wait */
1005 schedule_refresh(is, 1);
1006 } else {
1007 /* dequeue the picture */
1008 vp = &is->pictq[is->pictq_rindex];
1009
1010 /* update current video pts */
1011 is->video_current_pts = vp->pts;
1012 is->video_current_pts_time = av_gettime();
1013
1014 /* compute nominal delay */
1015 delay = vp->pts - is->frame_last_pts;
1016 if (delay <= 0 || delay >= 1.0) {
1017 /* if incorrect delay, use previous one */
1018 delay = is->frame_last_delay;
1019 }
1020 is->frame_last_delay = delay;
1021 is->frame_last_pts = vp->pts;
1022
1023 /* update delay to follow master synchronisation source */
1024 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1025 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1026 /* if video is slave, we try to correct big delays by
1027 duplicating or deleting a frame */
1028 ref_clock = get_master_clock(is);
1029 diff = vp->pts - ref_clock;
1030
1031 /* skip or repeat frame. We take into account the
1032 delay to compute the threshold. I still don't know
1033 if it is the best guess */
1034 sync_threshold = AV_SYNC_THRESHOLD;
1035 if (delay > sync_threshold)
1036 sync_threshold = delay;
1037 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1038 if (diff <= -sync_threshold)
1039 delay = 0;
1040 else if (diff >= sync_threshold)
1041 delay = 2 * delay;
1042 }
1043 }
1044
1045 is->frame_timer += delay;
1046 /* compute the REAL delay (we need to do that to avoid
1047 long term errors */
1048 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1049 if (actual_delay < 0.010) {
1050 /* XXX: should skip picture */
1051 actual_delay = 0.010;
1052 }
1053 /* launch timer for next picture */
1054 schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
1055
1056 #if defined(DEBUG_SYNC)
1057 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1058 delay, actual_delay, vp->pts, -diff);
1059 #endif
1060
1061 if(is->subtitle_st) {
1062 if (is->subtitle_stream_changed) {
1063 SDL_LockMutex(is->subpq_mutex);
1064
1065 while (is->subpq_size) {
1066 free_subpicture(&is->subpq[is->subpq_rindex]);
1067
1068 /* update queue size and signal for next picture */
1069 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1070 is->subpq_rindex = 0;
1071
1072 is->subpq_size--;
1073 }
1074 is->subtitle_stream_changed = 0;
1075
1076 SDL_CondSignal(is->subpq_cond);
1077 SDL_UnlockMutex(is->subpq_mutex);
1078 } else {
1079 if (is->subpq_size > 0) {
1080 sp = &is->subpq[is->subpq_rindex];
1081
1082 if (is->subpq_size > 1)
1083 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1084 else
1085 sp2 = NULL;
1086
1087 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1088 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1089 {
1090 free_subpicture(sp);
1091
1092 /* update queue size and signal for next picture */
1093 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1094 is->subpq_rindex = 0;
1095
1096 SDL_LockMutex(is->subpq_mutex);
1097 is->subpq_size--;
1098 SDL_CondSignal(is->subpq_cond);
1099 SDL_UnlockMutex(is->subpq_mutex);
1100 }
1101 }
1102 }
1103 }
1104
1105 /* display picture */
1106 video_display(is);
1107
1108 /* update queue size and signal for next picture */
1109 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1110 is->pictq_rindex = 0;
1111
1112 SDL_LockMutex(is->pictq_mutex);
1113 is->pictq_size--;
1114 SDL_CondSignal(is->pictq_cond);
1115 SDL_UnlockMutex(is->pictq_mutex);
1116 }
1117 } else if (is->audio_st) {
1118 /* draw the next audio frame */
1119
1120 schedule_refresh(is, 40);
1121
1122 /* if only audio stream, then display the audio bars (better
1123 than nothing, just to test the implementation */
1124
1125 /* display picture */
1126 video_display(is);
1127 } else {
1128 schedule_refresh(is, 100);
1129 }
1130 if (show_status) {
1131 static int64_t last_time;
1132 int64_t cur_time;
1133 int aqsize, vqsize, sqsize;
1134 double av_diff;
1135
1136 cur_time = av_gettime();
1137 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1138 aqsize = 0;
1139 vqsize = 0;
1140 sqsize = 0;
1141 if (is->audio_st)
1142 aqsize = is->audioq.size;
1143 if (is->video_st)
1144 vqsize = is->videoq.size;
1145 if (is->subtitle_st)
1146 sqsize = is->subtitleq.size;
1147 av_diff = 0;
1148 if (is->audio_st && is->video_st)
1149 av_diff = get_audio_clock(is) - get_video_clock(is);
1150 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
1151 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1152 fflush(stdout);
1153 last_time = cur_time;
1154 }
1155 }
1156 }
1157
1158 /* allocate a picture (needs to do that in main thread to avoid
1159 potential locking problems */
1160 static void alloc_picture(void *opaque)
1161 {
1162 VideoState *is = opaque;
1163 VideoPicture *vp;
1164
1165 vp = &is->pictq[is->pictq_windex];
1166
1167 if (vp->bmp)
1168 SDL_FreeYUVOverlay(vp->bmp);
1169
1170 #if 0
1171 /* XXX: use generic function */
1172 /* XXX: disable overlay if no hardware acceleration or if RGB format */
1173 switch(is->video_st->codec->pix_fmt) {
1174 case PIX_FMT_YUV420P:
1175 case PIX_FMT_YUV422P:
1176 case PIX_FMT_YUV444P:
1177 case PIX_FMT_YUYV422:
1178 case PIX_FMT_YUV410P:
1179 case PIX_FMT_YUV411P:
1180 is_yuv = 1;
1181 break;
1182 default:
1183 is_yuv = 0;
1184 break;
1185 }
1186 #endif
1187 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1188 is->video_st->codec->height,
1189 SDL_YV12_OVERLAY,
1190 screen);
1191 vp->width = is->video_st->codec->width;
1192 vp->height = is->video_st->codec->height;
1193
1194 SDL_LockMutex(is->pictq_mutex);
1195 vp->allocated = 1;
1196 SDL_CondSignal(is->pictq_cond);
1197 SDL_UnlockMutex(is->pictq_mutex);
1198 }
1199
1200 /**
1201 *
1202 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1203 */
1204 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1205 {
1206 VideoPicture *vp;
1207 int dst_pix_fmt;
1208 AVPicture pict;
1209 static struct SwsContext *img_convert_ctx;
1210
1211 /* wait until we have space to put a new picture */
1212 SDL_LockMutex(is->pictq_mutex);
1213 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1214 !is->videoq.abort_request) {
1215 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1216 }
1217 SDL_UnlockMutex(is->pictq_mutex);
1218
1219 if (is->videoq.abort_request)
1220 return -1;
1221
1222 vp = &is->pictq[is->pictq_windex];
1223
1224 /* alloc or resize hardware picture buffer */
1225 if (!vp->bmp ||
1226 vp->width != is->video_st->codec->width ||
1227 vp->height != is->video_st->codec->height) {
1228 SDL_Event event;
1229
1230 vp->allocated = 0;
1231
1232 /* the allocation must be done in the main thread to avoid
1233 locking problems */
1234 event.type = FF_ALLOC_EVENT;
1235 event.user.data1 = is;
1236 SDL_PushEvent(&event);
1237
1238 /* wait until the picture is allocated */
1239 SDL_LockMutex(is->pictq_mutex);
1240 while (!vp->allocated && !is->videoq.abort_request) {
1241 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1242 }
1243 SDL_UnlockMutex(is->pictq_mutex);
1244
1245 if (is->videoq.abort_request)
1246 return -1;
1247 }
1248
1249 /* if the frame is not skipped, then display it */
1250 if (vp->bmp) {
1251 /* get a pointer on the bitmap */
1252 SDL_LockYUVOverlay (vp->bmp);
1253
1254 dst_pix_fmt = PIX_FMT_YUV420P;
1255 pict.data[0] = vp->bmp->pixels[0];
1256 pict.data[1] = vp->bmp->pixels[2];
1257 pict.data[2] = vp->bmp->pixels[1];
1258
1259 pict.linesize[0] = vp->bmp->pitches[0];
1260 pict.linesize[1] = vp->bmp->pitches[2];
1261 pict.linesize[2] = vp->bmp->pitches[1];
1262 if (img_convert_ctx == NULL) {
1263 img_convert_ctx = sws_getContext(is->video_st->codec->width,
1264 is->video_st->codec->height, is->video_st->codec->pix_fmt,
1265 is->video_st->codec->width, is->video_st->codec->height,
1266 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1267 if (img_convert_ctx == NULL) {
1268 fprintf(stderr, "Cannot initialize the conversion context\n");
1269 exit(1);
1270 }
1271 }
1272 sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1273 0, is->video_st->codec->height, pict.data, pict.linesize);
1274 /* update the bitmap content */
1275 SDL_UnlockYUVOverlay(vp->bmp);
1276
1277 vp->pts = pts;
1278
1279 /* now we can update the picture count */
1280 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1281 is->pictq_windex = 0;
1282 SDL_LockMutex(is->pictq_mutex);
1283 is->pictq_size++;
1284 SDL_UnlockMutex(is->pictq_mutex);
1285 }
1286 return 0;
1287 }
1288
1289 /**
1290 * compute the exact PTS for the picture if it is omitted in the stream
1291 * @param pts1 the dts of the pkt / pts of the frame
1292 */
1293 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1294 {
1295 double frame_delay, pts;
1296
1297 pts = pts1;
1298
1299 if (pts != 0) {
1300 /* update video clock with pts, if present */
1301 is->video_clock = pts;
1302 } else {
1303 pts = is->video_clock;
1304 }
1305 /* update video clock for next frame */
1306 frame_delay = av_q2d(is->video_st->codec->time_base);
1307 /* for MPEG2, the frame can be repeated, so we update the
1308 clock accordingly */
1309 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1310 is->video_clock += frame_delay;
1311
1312 #if defined(DEBUG_SYNC) && 0
1313 {
1314 int ftype;
1315 if (src_frame->pict_type == FF_B_TYPE)
1316 ftype = 'B';
1317 else if (src_frame->pict_type == FF_I_TYPE)
1318 ftype = 'I';
1319 else
1320 ftype = 'P';
1321 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1322 ftype, pts, pts1);
1323 }
1324 #endif
1325 return queue_picture(is, src_frame, pts);
1326 }
1327
1328 static int video_thread(void *arg)
1329 {
1330 VideoState *is = arg;
1331 AVPacket pkt1, *pkt = &pkt1;
1332 int len1, got_picture;
1333 AVFrame *frame= avcodec_alloc_frame();
1334 double pts;
1335
1336 for(;;) {
1337 while (is->paused && !is->videoq.abort_request) {
1338 SDL_Delay(10);
1339 }
1340 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1341 break;
1342
1343 if(pkt->data == flush_pkt.data){
1344 avcodec_flush_buffers(is->video_st->codec);
1345 continue;
1346 }
1347
1348 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1349 this packet, if any */
1350 pts = 0;
1351 if (pkt->dts != AV_NOPTS_VALUE)
1352 pts = av_q2d(is->video_st->time_base)*pkt->dts;
1353
1354 len1 = avcodec_decode_video(is->video_st->codec,
1355 frame, &got_picture,
1356 pkt->data, pkt->size);
1357 // if (len1 < 0)
1358 // break;
1359 if (got_picture) {
1360 if (output_picture2(is, frame, pts) < 0)
1361 goto the_end;
1362 }
1363 av_free_packet(pkt);
1364 if (step)
1365 if (cur_stream)
1366 stream_pause(cur_stream);
1367 }
1368 the_end:
1369 av_free(frame);
1370 return 0;
1371 }
1372
1373 static int subtitle_thread(void *arg)
1374 {
1375 VideoState *is = arg;
1376 SubPicture *sp;
1377 AVPacket pkt1, *pkt = &pkt1;
1378 int len1, got_subtitle;
1379 double pts;
1380 int i, j;
1381 int r, g, b, y, u, v, a;
1382
1383 for(;;) {
1384 while (is->paused && !is->subtitleq.abort_request) {
1385 SDL_Delay(10);
1386 }
1387 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1388 break;
1389
1390 if(pkt->data == flush_pkt.data){
1391 avcodec_flush_buffers(is->subtitle_st->codec);
1392 continue;
1393 }
1394 SDL_LockMutex(is->subpq_mutex);
1395 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1396 !is->subtitleq.abort_request) {
1397 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1398 }
1399 SDL_UnlockMutex(is->subpq_mutex);
1400
1401 if (is->subtitleq.abort_request)
1402 goto the_end;
1403
1404 sp = &is->subpq[is->subpq_windex];
1405
1406 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1407 this packet, if any */
1408 pts = 0;
1409 if (pkt->pts != AV_NOPTS_VALUE)
1410 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1411
1412 len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1413 &sp->sub, &got_subtitle,
1414 pkt->data, pkt->size);
1415 // if (len1 < 0)
1416 // break;
1417 if (got_subtitle && sp->sub.format == 0) {
1418 sp->pts = pts;
1419
1420 for (i = 0; i < sp->sub.num_rects; i++)
1421 {
1422 for (j = 0; j < sp->sub.rects[i].nb_colors; j++)
1423 {
1424 RGBA_IN(r, g, b, a, sp->sub.rects[i].rgba_palette + j);
1425 y = RGB_TO_Y_CCIR(r, g, b);
1426 u = RGB_TO_U_CCIR(r, g, b, 0);
1427 v = RGB_TO_V_CCIR(r, g, b, 0);
1428 YUVA_OUT(sp->sub.rects[i].rgba_palette + j, y, u, v, a);
1429 }
1430 }
1431
1432 /* now we can update the picture count */
1433 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1434 is->subpq_windex = 0;
1435 SDL_LockMutex(is->subpq_mutex);
1436 is->subpq_size++;
1437 SDL_UnlockMutex(is->subpq_mutex);
1438 }
1439 av_free_packet(pkt);
1440 // if (step)
1441 // if (cur_stream)
1442 // stream_pause(cur_stream);
1443 }
1444 the_end:
1445 return 0;
1446 }
1447
1448 /* copy samples for viewing in editor window */
1449 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1450 {
1451 int size, len, channels;
1452
1453 channels = is->audio_st->codec->channels;
1454
1455 size = samples_size / sizeof(short);
1456 while (size > 0) {
1457 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1458 if (len > size)
1459 len = size;
1460 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1461 samples += len;
1462 is->sample_array_index += len;
1463 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1464 is->sample_array_index = 0;
1465 size -= len;
1466 }
1467 }
1468
1469 /* return the new audio buffer size (samples can be added or deleted
1470 to get better sync if video or external master clock) */
1471 static int synchronize_audio(VideoState *is, short *samples,
1472 int samples_size1, double pts)
1473 {
1474 int n, samples_size;
1475 double ref_clock;
1476
1477 n = 2 * is->audio_st->codec->channels;
1478 samples_size = samples_size1;
1479
1480 /* if not master, then we try to remove or add samples to correct the clock */
1481 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1482 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1483 double diff, avg_diff;
1484 int wanted_size, min_size, max_size, nb_samples;
1485
1486 ref_clock = get_master_clock(is);
1487 diff = get_audio_clock(is) - ref_clock;
1488
1489 if (diff < AV_NOSYNC_THRESHOLD) {
1490 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1491 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1492 /* not enough measures to have a correct estimate */
1493 is->audio_diff_avg_count++;
1494 } else {
1495 /* estimate the A-V difference */
1496 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1497
1498 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1499 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1500 nb_samples = samples_size / n;
1501
1502 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1503 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1504 if (wanted_size < min_size)
1505 wanted_size = min_size;
1506 else if (wanted_size > max_size)
1507 wanted_size = max_size;
1508
1509 /* add or remove samples to correction the synchro */
1510 if (wanted_size < samples_size) {
1511 /* remove samples */
1512 samples_size = wanted_size;
1513 } else if (wanted_size > samples_size) {
1514 uint8_t *samples_end, *q;
1515 int nb;
1516
1517 /* add samples */
1518 nb = (samples_size - wanted_size);
1519 samples_end = (uint8_t *)samples + samples_size - n;
1520 q = samples_end + n;
1521 while (nb > 0) {
1522 memcpy(q, samples_end, n);
1523 q += n;
1524 nb -= n;
1525 }
1526 samples_size = wanted_size;
1527 }
1528 }
1529 #if 0
1530 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1531 diff, avg_diff, samples_size - samples_size1,
1532 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1533 #endif
1534 }
1535 } else {
1536 /* too big difference : may be initial PTS errors, so
1537 reset A-V filter */
1538 is->audio_diff_avg_count = 0;
1539 is->audio_diff_cum = 0;
1540 }
1541 }
1542
1543 return samples_size;
1544 }
1545
1546 /* decode one audio frame and returns its uncompressed size */
1547 static int audio_decode_frame(VideoState *is, uint8_t *audio_buf, double *pts_ptr)
1548 {
1549 AVPacket *pkt = &is->audio_pkt;
1550 int n, len1, data_size;
1551 double pts;
1552
1553 for(;;) {
1554 /* NOTE: the audio packet can contain several frames */
1555 while (is->audio_pkt_size > 0) {
1556 len1 = avcodec_decode_audio(is->audio_st->codec,
1557 (int16_t *)audio_buf, &data_size,
1558 is->audio_pkt_data, is->audio_pkt_size);
1559 if (len1 < 0) {
1560 /* if error, we skip the frame */
1561 is->audio_pkt_size = 0;
1562 break;
1563 }
1564
1565 is->audio_pkt_data += len1;
1566 is->audio_pkt_size -= len1;
1567 if (data_size <= 0)
1568 continue;
1569 /* if no pts, then compute it */
1570 pts = is->audio_clock;
1571 *pts_ptr = pts;
1572 n = 2 * is->audio_st->codec->channels;
1573 is->audio_clock += (double)data_size /
1574 (double)(n * is->audio_st->codec->sample_rate);
1575 #if defined(DEBUG_SYNC)
1576 {
1577 static double last_clock;
1578 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1579 is->audio_clock - last_clock,
1580 is->audio_clock, pts);
1581 last_clock = is->audio_clock;
1582 }
1583 #endif
1584 return data_size;
1585 }
1586
1587 /* free the current packet */
1588 if (pkt->data)
1589 av_free_packet(pkt);
1590
1591 if (is->paused || is->audioq.abort_request) {
1592 return -1;
1593 }
1594
1595 /* read next packet */
1596 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1597 return -1;
1598 if(pkt->data == flush_pkt.data){
1599 avcodec_flush_buffers(is->audio_st->codec);
1600 continue;
1601 }
1602
1603 is->audio_pkt_data = pkt->data;
1604 is->audio_pkt_size = pkt->size;
1605
1606 /* if update the audio clock with the pts */
1607 if (pkt->pts != AV_NOPTS_VALUE) {
1608 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1609 }
1610 }
1611 }
1612
1613 /* get the current audio output buffer size, in samples. With SDL, we
1614 cannot have a precise information */
1615 static int audio_write_get_buf_size(VideoState *is)
1616 {
1617 return is->audio_hw_buf_size - is->audio_buf_index;
1618 }
1619
1620
1621 /* prepare a new audio buffer */
1622 void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1623 {
1624 VideoState *is = opaque;
1625 int audio_size, len1;
1626 double pts;
1627
1628 audio_callback_time = av_gettime();
1629
1630 while (len > 0) {
1631 if (is->audio_buf_index >= is->audio_buf_size) {
1632 audio_size = audio_decode_frame(is, is->audio_buf, &pts);
1633 if (audio_size < 0) {
1634 /* if error, just output silence */
1635 is->audio_buf_size = 1024;
1636 memset(is->audio_buf, 0, is->audio_buf_size);
1637 } else {
1638 if (is->show_audio)
1639 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1640 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1641 pts);
1642 is->audio_buf_size = audio_size;
1643 }
1644 is->audio_buf_index = 0;
1645 }
1646 len1 = is->audio_buf_size - is->audio_buf_index;
1647 if (len1 > len)
1648 len1 = len;
1649 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1650 len -= len1;
1651 stream += len1;
1652 is->audio_buf_index += len1;
1653 }
1654 }
1655
1656 /* open a given stream. Return 0 if OK */
1657 static int stream_component_open(VideoState *is, int stream_index)
1658 {
1659 AVFormatContext *ic = is->ic;
1660 AVCodecContext *enc;
1661 AVCodec *codec;
1662 SDL_AudioSpec wanted_spec, spec;
1663
1664 if (stream_index < 0 || stream_index >= ic->nb_streams)
1665 return -1;
1666 enc = ic->streams[stream_index]->codec;
1667
1668 /* prepare audio output */
1669 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1670 wanted_spec.freq = enc->sample_rate;
1671 wanted_spec.format = AUDIO_S16SYS;
1672 /* hack for AC3. XXX: suppress that */
1673 if (enc->channels > 2)
1674 enc->channels = 2;
1675 wanted_spec.channels = enc->channels;
1676 wanted_spec.silence = 0;
1677 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1678 wanted_spec.callback = sdl_audio_callback;
1679 wanted_spec.userdata = is;
1680 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1681 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1682 return -1;
1683 }
1684 is->audio_hw_buf_size = spec.size;
1685 }
1686
1687 codec = avcodec_find_decoder(enc->codec_id);
1688 enc->debug_mv = debug_mv;
1689 enc->debug = debug;
1690 enc->workaround_bugs = workaround_bugs;
1691 enc->lowres = lowres;
1692 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1693 enc->idct_algo= idct;
1694 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1695 enc->skip_frame= skip_frame;
1696 enc->skip_idct= skip_idct;
1697 enc->skip_loop_filter= skip_loop_filter;
1698 enc->error_resilience= error_resilience;
1699 enc->error_concealment= error_concealment;
1700 if (!codec ||
1701 avcodec_open(enc, codec) < 0)
1702 return -1;
1703 #if defined(HAVE_THREADS)
1704 if(thread_count>1)
1705 avcodec_thread_init(enc, thread_count);
1706 #endif
1707 enc->thread_count= thread_count;
1708 switch(enc->codec_type) {
1709 case CODEC_TYPE_AUDIO:
1710 is->audio_stream = stream_index;
1711 is->audio_st = ic->streams[stream_index];
1712 is->audio_buf_size = 0;
1713 is->audio_buf_index = 0;
1714
1715 /* init averaging filter */
1716 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1717 is->audio_diff_avg_count = 0;
1718 /* since we do not have a precise anough audio fifo fullness,
1719 we correct audio sync only if larger than this threshold */
1720 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1721
1722 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1723 packet_queue_init(&is->audioq);
1724 SDL_PauseAudio(0);
1725 break;
1726 case CODEC_TYPE_VIDEO:
1727 is->video_stream = stream_index;
1728 is->video_st = ic->streams[stream_index];
1729
1730 is->frame_last_delay = 40e-3;
1731 is->frame_timer = (double)av_gettime() / 1000000.0;
1732 is->video_current_pts_time = av_gettime();
1733
1734 packet_queue_init(&is->videoq);
1735 is->video_tid = SDL_CreateThread(video_thread, is);
1736 break;
1737 case CODEC_TYPE_SUBTITLE:
1738 is->subtitle_stream = stream_index;
1739 is->subtitle_st = ic->streams[stream_index];
1740 packet_queue_init(&is->subtitleq);
1741
1742 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1743 break;
1744 default:
1745 break;
1746 }
1747 return 0;
1748 }
1749
1750 static void stream_component_close(VideoState *is, int stream_index)
1751 {
1752 AVFormatContext *ic = is->ic;
1753 AVCodecContext *enc;
1754
1755 if (stream_index < 0 || stream_index >= ic->nb_streams)
1756 return;
1757 enc = ic->streams[stream_index]->codec;
1758
1759 switch(enc->codec_type) {
1760 case CODEC_TYPE_AUDIO:
1761 packet_queue_abort(&is->audioq);
1762
1763 SDL_CloseAudio();
1764
1765 packet_queue_end(&is->audioq);
1766 break;
1767 case CODEC_TYPE_VIDEO:
1768 packet_queue_abort(&is->videoq);
1769
1770 /* note: we also signal this mutex to make sure we deblock the
1771 video thread in all cases */
1772 SDL_LockMutex(is->pictq_mutex);
1773 SDL_CondSignal(is->pictq_cond);
1774 SDL_UnlockMutex(is->pictq_mutex);
1775
1776 SDL_WaitThread(is->video_tid, NULL);
1777
1778 packet_queue_end(&is->videoq);
1779 break;
1780 case CODEC_TYPE_SUBTITLE:
1781 packet_queue_abort(&is->subtitleq);
1782
1783 /* note: we also signal this mutex to make sure we deblock the
1784 video thread in all cases */
1785 SDL_LockMutex(is->subpq_mutex);
1786 is->subtitle_stream_changed = 1;
1787
1788 SDL_CondSignal(is->subpq_cond);
1789 SDL_UnlockMutex(is->subpq_mutex);
1790
1791 SDL_WaitThread(is->subtitle_tid, NULL);
1792
1793 packet_queue_end(&is->subtitleq);
1794 break;
1795 default:
1796 break;
1797 }
1798
1799 avcodec_close(enc);
1800 switch(enc->codec_type) {
1801 case CODEC_TYPE_AUDIO:
1802 is->audio_st = NULL;
1803 is->audio_stream = -1;
1804 break;
1805 case CODEC_TYPE_VIDEO:
1806 is->video_st = NULL;
1807 is->video_stream = -1;
1808 break;
1809 case CODEC_TYPE_SUBTITLE:
1810 is->subtitle_st = NULL;
1811 is->subtitle_stream = -1;
1812 break;
1813 default:
1814 break;
1815 }
1816 }
1817
1818 static void dump_stream_info(const AVFormatContext *s)
1819 {
1820 if (s->track != 0)
1821 fprintf(stderr, "Track: %d\n", s->track);
1822 if (s->title[0] != '\0')
1823 fprintf(stderr, "Title: %s\n", s->title);
1824 if (s->author[0] != '\0')
1825 fprintf(stderr, "Author: %s\n", s->author);
1826 if (s->copyright[0] != '\0')
1827 fprintf(stderr, "Copyright: %s\n", s->copyright);
1828 if (s->comment[0] != '\0')
1829 fprintf(stderr, "Comment: %s\n", s->comment);
1830 if (s->album[0] != '\0')
1831 fprintf(stderr, "Album: %s\n", s->album);
1832 if (s->year != 0)
1833 fprintf(stderr, "Year: %d\n", s->year);
1834 if (s->genre[0] != '\0')
1835 fprintf(stderr, "Genre: %s\n", s->genre);
1836 }
1837
1838 /* since we have only one decoding thread, we can use a global
1839 variable instead of a thread local variable */
1840 static VideoState *global_video_state;
1841
1842 static int decode_interrupt_cb(void)
1843 {
1844 return (global_video_state && global_video_state->abort_request);
1845 }
1846
1847 /* this thread gets the stream from the disk or the network */
1848 static int decode_thread(void *arg)
1849 {
1850 VideoState *is = arg;
1851 AVFormatContext *ic;
1852 int err, i, ret, video_index, audio_index, use_play;
1853 AVPacket pkt1, *pkt = &pkt1;
1854 AVFormatParameters params, *ap = &params;
1855
1856 video_index = -1;
1857 audio_index = -1;
1858 is->video_stream = -1;
1859 is->audio_stream = -1;
1860 is->subtitle_stream = -1;
1861
1862 global_video_state = is;
1863 url_set_interrupt_cb(decode_interrupt_cb);
1864
1865 memset(ap, 0, sizeof(*ap));
1866 ap->initial_pause = 1; /* we force a pause when starting an RTSP
1867 stream */
1868
1869 ap->width = screen_width;
1870 ap->height= screen_height;
1871 ap->time_base= (AVRational){1, 25};
1872
1873 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1874 if (err < 0) {
1875 print_error(is->filename, err);
1876 ret = -1;
1877 goto fail;
1878 }
1879 is->ic = ic;
1880 #ifdef CONFIG_NETWORK
1881 use_play = (ic->iformat == &rtsp_demuxer);
1882 #else
1883 use_play = 0;
1884 #endif
1885
1886 if(genpts)
1887 ic->flags |= AVFMT_FLAG_GENPTS;
1888
1889 if (!use_play) {
1890 err = av_find_stream_info(ic);
1891 if (err < 0) {
1892 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1893 ret = -1;
1894 goto fail;
1895 }
1896 ic->pb.eof_reached= 0; //FIXME hack, ffplay maybe shouldnt use url_feof() to test for the end
1897 }
1898
1899 /* if seeking requested, we execute it */
1900 if (start_time != AV_NOPTS_VALUE) {
1901 int64_t timestamp;
1902
1903 timestamp = start_time;
1904 /* add the stream start time */
1905 if (ic->start_time != AV_NOPTS_VALUE)
1906 timestamp += ic->start_time;
1907 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1908 if (ret < 0) {
1909 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1910 is->filename, (double)timestamp / AV_TIME_BASE);
1911 }
1912 }
1913
1914 /* now we can begin to play (RTSP stream only) */
1915 av_read_play(ic);
1916
1917 if (use_play) {
1918 err = av_find_stream_info(ic);
1919 if (err < 0) {
1920 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1921 ret = -1;
1922 goto fail;
1923 }
1924 }
1925
1926 for(i = 0; i < ic->nb_streams; i++) {
1927 AVCodecContext *enc = ic->streams[i]->codec;
1928 switch(enc->codec_type) {
1929 case CODEC_TYPE_AUDIO:
1930 if ((audio_index < 0 || wanted_audio_stream-- > 0) && !audio_disable)
1931 audio_index = i;
1932 break;
1933 case CODEC_TYPE_VIDEO:
1934 if (video_index < 0 && !video_disable)
1935 video_index = i;
1936 break;
1937 default:
1938 break;
1939 }
1940 }
1941 if (show_status) {
1942 dump_format(ic, 0, is->filename, 0);
1943 dump_stream_info(ic);
1944 }
1945
1946 /* open the streams */
1947 if (audio_index >= 0) {
1948 stream_component_open(is, audio_index);
1949 }
1950
1951 if (video_index >= 0) {
1952 stream_component_open(is, video_index);
1953 } else {
1954 if (!display_disable)
1955 is->show_audio = 1;
1956 }
1957
1958 if (is->video_stream < 0 && is->audio_stream < 0) {
1959 fprintf(stderr, "%s: could not open codecs\n", is->filename);
1960 ret = -1;
1961 goto fail;
1962 }
1963
1964 for(;;) {
1965 if (is->abort_request)
1966 break;
1967 #ifdef CONFIG_NETWORK
1968 if (is->paused != is->last_paused) {
1969 is->last_paused = is->paused;
1970 if (is->paused)
1971 av_read_pause(ic);
1972 else
1973 av_read_play(ic);
1974 }
1975 if (is->paused && ic->iformat == &rtsp_demuxer) {
1976 /* wait 10 ms to avoid trying to get another packet */
1977 /* XXX: horrible */
1978 SDL_Delay(10);
1979 continue;
1980 }
1981 #endif
1982 if (is->seek_req) {
1983 int stream_index= -1;
1984 int64_t seek_target= is->seek_pos;
1985
1986 if (is-> video_stream >= 0) stream_index= is-> video_stream;
1987 else if(is-> audio_stream >= 0) stream_index= is-> audio_stream;
1988 else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
1989
1990 if(stream_index>=0){
1991 seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
1992 }
1993
1994 ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
1995 if (ret < 0) {
1996 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
1997 }else{
1998 if (is->audio_stream >= 0) {
1999 packet_queue_flush(&is->audioq);
2000 packet_queue_put(&is->audioq, &flush_pkt);
2001 }
2002 if (is->subtitle_stream >= 0) {
2003 packet_queue_flush(&is->subtitleq);
2004 packet_queue_put(&is->subtitleq, &flush_pkt);
2005 }
2006 if (is->video_stream >= 0) {
2007 packet_queue_flush(&is->videoq);
2008 packet_queue_put(&is->videoq, &flush_pkt);
2009 }
2010 }
2011 is->seek_req = 0;
2012 }
2013
2014 /* if the queue are full, no need to read more */
2015 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2016 is->videoq.size > MAX_VIDEOQ_SIZE ||
2017 is->subtitleq.size > MAX_SUBTITLEQ_SIZE ||
2018 url_feof(&ic->pb)) {
2019 /* wait 10 ms */
2020 SDL_Delay(10);
2021 continue;
2022 }
2023 ret = av_read_frame(ic, pkt);
2024 if (ret < 0) {
2025 if (url_ferror(&ic->pb) == 0) {
2026 SDL_Delay(100); /* wait for user event */
2027 continue;
2028 } else
2029 break;
2030 }
2031 if (pkt->stream_index == is->audio_stream) {
2032 packet_queue_put(&is->audioq, pkt);
2033 } else if (pkt->stream_index == is->video_stream) {
2034 packet_queue_put(&is->videoq, pkt);
2035 } else if (pkt->stream_index == is->subtitle_stream) {
2036 packet_queue_put(&is->subtitleq, pkt);
2037 } else {
2038 av_free_packet(pkt);
2039 }
2040 }
2041 /* wait until the end */
2042 while (!is->abort_request) {
2043 SDL_Delay(100);
2044 }
2045
2046 ret = 0;
2047 fail:
2048 /* disable interrupting */
2049 global_video_state = NULL;
2050
2051 /* close each stream */
2052 if (is->audio_stream >= 0)
2053 stream_component_close(is, is->audio_stream);
2054 if (is->video_stream >= 0)
2055 stream_component_close(is, is->video_stream);
2056 if (is->subtitle_stream >= 0)
2057 stream_component_close(is, is->subtitle_stream);
2058 if (is->ic) {
2059 av_close_input_file(is->ic);
2060 is->ic = NULL; /* safety */
2061 }
2062 url_set_interrupt_cb(NULL);
2063
2064 if (ret != 0) {
2065 SDL_Event event;
2066
2067 event.type = FF_QUIT_EVENT;
2068 event.user.data1 = is;
2069 SDL_PushEvent(&event);
2070 }
2071 return 0;
2072 }
2073
2074 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2075 {
2076 VideoState *is;
2077
2078 is = av_mallocz(sizeof(VideoState));
2079 if (!is)
2080 return NULL;
2081 pstrcpy(is->filename, sizeof(is->filename), filename);
2082 is->iformat = iformat;
2083 is->ytop = 0;
2084 is->xleft = 0;
2085
2086 /* start video display */
2087 is->pictq_mutex = SDL_CreateMutex();
2088 is->pictq_cond = SDL_CreateCond();
2089
2090 is->subpq_mutex = SDL_CreateMutex();
2091 is->subpq_cond = SDL_CreateCond();
2092
2093 /* add the refresh timer to draw the picture */
2094 schedule_refresh(is, 40);
2095
2096 is->av_sync_type = av_sync_type;
2097 is->parse_tid = SDL_CreateThread(decode_thread, is);
2098 if (!is->parse_tid) {
2099 av_free(is);
2100 return NULL;
2101 }
2102 return is;
2103 }
2104
2105 static void stream_close(VideoState *is)
2106 {
2107 VideoPicture *vp;
2108 int i;
2109 /* XXX: use a special url_shutdown call to abort parse cleanly */
2110 is->abort_request = 1;
2111 SDL_WaitThread(is->parse_tid, NULL);
2112
2113 /* free all pictures */
2114 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2115 vp = &is->pictq[i];
2116 if (vp->bmp) {
2117 SDL_FreeYUVOverlay(vp->bmp);
2118 vp->bmp = NULL;
2119 }
2120 }
2121 SDL_DestroyMutex(is->pictq_mutex);
2122 SDL_DestroyCond(is->pictq_cond);
2123 SDL_DestroyMutex(is->subpq_mutex);
2124 SDL_DestroyCond(is->subpq_cond);
2125 }
2126
2127 static void stream_cycle_channel(VideoState *is, int codec_type)
2128 {
2129 AVFormatContext *ic = is->ic;
2130 int start_index, stream_index;
2131 AVStream *st;
2132
2133 if (codec_type == CODEC_TYPE_VIDEO)
2134 start_index = is->video_stream;
2135 else if (codec_type == CODEC_TYPE_AUDIO)
2136 start_index = is->audio_stream;
2137 else
2138 start_index = is->subtitle_stream;
2139 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2140 return;
2141 stream_index = start_index;
2142 for(;;) {
2143 if (++stream_index >= is->ic->nb_streams)
2144 {
2145 if (codec_type == CODEC_TYPE_SUBTITLE)
2146 {
2147 stream_index = -1;
2148 goto the_end;
2149 } else
2150 stream_index = 0;
2151 }
2152 if (stream_index == start_index)
2153 return;
2154 st = ic->streams[stream_index];
2155 if (st->codec->codec_type == codec_type) {
2156 /* check that parameters are OK */
2157 switch(codec_type) {
2158 case CODEC_TYPE_AUDIO:
2159 if (st->codec->sample_rate != 0 &&
2160 st->codec->channels != 0)
2161 goto the_end;
2162 break;
2163 case CODEC_TYPE_VIDEO:
2164 case CODEC_TYPE_SUBTITLE:
2165 goto the_end;
2166 default:
2167 break;
2168 }
2169 }
2170 }
2171 the_end:
2172 stream_component_close(is, start_index);
2173 stream_component_open(is, stream_index);
2174 }
2175
2176
2177 static void toggle_full_screen(void)
2178 {
2179 is_full_screen = !is_full_screen;
2180 if (!fs_screen_width) {
2181 /* use default SDL method */
2182 // SDL_WM_ToggleFullScreen(screen);
2183 }
2184 video_open(cur_stream);
2185 }
2186
2187 static void toggle_pause(void)
2188 {
2189 if (cur_stream)
2190 stream_pause(cur_stream);
2191 step = 0;
2192 }
2193
2194 static void step_to_next_frame(void)
2195 {
2196 if (cur_stream) {
2197 if (cur_stream->paused)
2198 cur_stream->paused=0;
2199 cur_stream->video_current_pts = get_video_clock(cur_stream);
2200 }
2201 step = 1;
2202 }
2203
2204 static void do_exit(void)
2205 {
2206 if (cur_stream) {
2207 stream_close(cur_stream);
2208 cur_stream = NULL;
2209 }
2210 if (show_status)
2211 printf("\n");
2212 SDL_Quit();
2213 exit(0);
2214 }
2215
2216 static void toggle_audio_display(void)
2217 {
2218 if (cur_stream) {
2219 cur_stream->show_audio = !cur_stream->show_audio;
2220 }
2221 }
2222
2223 /* handle an event sent by the GUI */
2224 static void event_loop(void)
2225 {
2226 SDL_Event event;
2227 double incr, pos, frac;
2228
2229 for(;;) {
2230 SDL_WaitEvent(&event);
2231 switch(event.type) {
2232 case SDL_KEYDOWN:
2233 switch(event.key.keysym.sym) {
2234 case SDLK_ESCAPE:
2235 case SDLK_q:
2236 do_exit();
2237 break;
2238 case SDLK_f:
2239 toggle_full_screen();
2240 break;
2241 case SDLK_p:
2242 case SDLK_SPACE:
2243 toggle_pause();
2244 break;
2245 case SDLK_s: //S: Step to next frame
2246 step_to_next_frame();
2247 break;
2248 case SDLK_a:
2249 if (cur_stream)
2250 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2251 break;
2252 case SDLK_v:
2253 if (cur_stream)
2254 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2255 break;
2256 case SDLK_t:
2257 if (cur_stream)
2258 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2259 break;
2260 case SDLK_w:
2261 toggle_audio_display();
2262 break;
2263 case SDLK_LEFT:
2264 incr = -10.0;
2265 goto do_seek;
2266 case SDLK_RIGHT:
2267 incr = 10.0;
2268 goto do_seek;
2269 case SDLK_UP:
2270 incr = 60.0;
2271 goto do_seek;
2272 case SDLK_DOWN:
2273 incr = -60.0;
2274 do_seek:
2275 if (cur_stream) {
2276 if (seek_by_bytes) {
2277 pos = url_ftell(&cur_stream->ic->pb);
2278 if (cur_stream->ic->bit_rate)
2279 incr *= cur_stream->ic->bit_rate / 60.0;
2280 else
2281 incr *= 180000.0;
2282 pos += incr;
2283 stream_seek(cur_stream, pos, incr);
2284 } else {
2285 pos = get_master_clock(cur_stream);
2286 pos += incr;
2287 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2288 }
2289 }
2290 break;
2291 default:
2292 break;
2293 }
2294 break;
2295 case SDL_MOUSEBUTTONDOWN:
2296 if (cur_stream) {
2297 int ns, hh, mm, ss;
2298 int tns, thh, tmm, tss;
2299 tns = cur_stream->ic->duration/1000000LL;
2300 thh = tns/3600;
2301 tmm = (tns%3600)/60;
2302 tss = (tns%60);
2303 frac = (double)event.button.x/(double)cur_stream->width;
2304 ns = frac*tns;
2305 hh = ns/3600;
2306 mm = (ns%3600)/60;
2307 ss = (ns%60);
2308 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2309 hh, mm, ss, thh, tmm, tss);
2310 stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2311 }
2312 break;
2313 case SDL_VIDEORESIZE:
2314 if (cur_stream) {
2315 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2316 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2317 screen_width = cur_stream->width = event.resize.w;
2318 screen_height= cur_stream->height= event.resize.h;
2319 }
2320 break;
2321 case SDL_QUIT:
2322 case FF_QUIT_EVENT:
2323 do_exit();
2324 break;
2325 case FF_ALLOC_EVENT:
2326 video_open(event.user.data1);
2327 alloc_picture(event.user.data1);
2328 break;
2329 case FF_REFRESH_EVENT:
2330 video_refresh_timer(event.user.data1);
2331 break;
2332 default:
2333 break;
2334 }
2335 }
2336 }
2337
2338 void opt_width(const char *arg)
2339 {
2340 screen_width = atoi(arg);
2341 if(screen_width<=0){
2342 fprintf(stderr, "invalid width\n");
2343 exit(1);
2344 }
2345 }
2346
2347 void opt_height(const char *arg)
2348 {
2349 screen_height = atoi(arg);
2350 if(screen_height<=0){
2351 fprintf(stderr, "invalid height\n");
2352 exit(1);
2353 }
2354 }
2355
2356 static void opt_format(const char *arg)
2357 {
2358 file_iformat = av_find_input_format(arg);
2359 if (!file_iformat) {
2360 fprintf(stderr, "Unknown input format: %s\n", arg);
2361 exit(1);
2362 }
2363 }
2364
2365 #ifdef CONFIG_NETWORK
2366 void opt_rtp_tcp(void)
2367 {
2368 /* only tcp protocol */
2369 rtsp_default_protocols = (1 << RTSP_PROTOCOL_RTP_TCP);
2370 }
2371 #endif
2372
2373 void opt_sync(const char *arg)
2374 {
2375 if (!strcmp(arg, "audio"))
2376 av_sync_type = AV_SYNC_AUDIO_MASTER;
2377 else if (!strcmp(arg, "video"))
2378 av_sync_type = AV_SYNC_VIDEO_MASTER;
2379 else if (!strcmp(arg, "ext"))
2380 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2381 else
2382 show_help();
2383 }
2384
2385 void opt_seek(const char *arg)
2386 {
2387 start_time = parse_date(arg, 1);
2388 }
2389
2390 static void opt_debug(const char *arg)
2391 {
2392 av_log_level = 99;
2393 debug = atoi(arg);
2394 }
2395
2396 static void opt_vismv(const char *arg)
2397 {
2398 debug_mv = atoi(arg);
2399 }
2400
2401 static void opt_thread_count(const char *arg)
2402 {
2403 thread_count= atoi(arg);
2404 #if !defined(HAVE_THREADS)
2405 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2406 #endif
2407 }
2408
2409 const OptionDef options[] = {
2410 { "h", 0, {(void*)show_help}, "show help" },
2411 { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2412 { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2413 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2414 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2415 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2416 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "", "" },
2417 { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2418 { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2419 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2420 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2421 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2422 { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2423 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2424 { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2425 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2426 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2427 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2428 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2429 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2430 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2431 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2432 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_resilience}, "set error detection threshold (0-4)", "threshold" },
2433 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2434 #ifdef CONFIG_NETWORK
2435 { "rtp_tcp", OPT_EXPERT, {(void*)&opt_rtp_tcp}, "force RTP/TCP protocol usage", "" },
2436 #endif
2437 { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2438 { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2439 { NULL, },
2440 };
2441
2442 void show_help(void)
2443 {
2444 printf("ffplay version " FFMPEG_VERSION ", Copyright (c) 2003-2006 Fabrice Bellard, et al.\n"
2445 "usage: ffplay [options] input_file\n"
2446 "Simple media player\n");
2447 printf("\n");
2448 show_help_options(options, "Main options:\n",
2449 OPT_EXPERT, 0);
2450 show_help_options(options, "\nAdvanced options:\n",
2451 OPT_EXPERT, OPT_EXPERT);
2452 printf("\nWhile playing:\n"
2453 "q, ESC quit\n"
2454 "f toggle full screen\n"
2455 "p, SPC pause\n"
2456 "a cycle audio channel\n"
2457 "v cycle video channel\n"
2458 "t cycle subtitle channel\n"
2459 "w show audio waves\n"
2460 "left/right seek backward/forward 10 seconds\n"
2461 "down/up seek backward/forward 1 minute\n"
2462 "mouse click seek to percentage in file corresponding to fraction of width\n"
2463 );
2464 exit(1);
2465 }
2466
2467 void parse_arg_file(const char *filename)
2468 {
2469 if (!strcmp(filename, "-"))
2470 filename = "pipe:";
2471 input_filename = filename;
2472 }
2473
2474 /* Called from the main */
2475 int main(int argc, char **argv)
2476 {
2477 int flags;
2478
2479 /* register all codecs, demux and protocols */
2480 av_register_all();
2481
2482 #ifdef CONFIG_OS2
2483 MorphToPM(); // Morph the VIO application to a PM one to be able to use Win* functions
2484
2485 // Make stdout and stderr unbuffered
2486 setbuf( stdout, NULL );
2487 setbuf( stderr, NULL );
2488 #endif
2489
2490 parse_options(argc, argv, options);
2491
2492 if (!input_filename)
2493 show_help();
2494
2495 if (display_disable) {
2496 video_disable = 1;
2497 }
2498 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2499 #if !defined(__MINGW32__) && !defined(CONFIG_DARWIN)
2500 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on win32 or darwin */
2501 #endif
2502 if (SDL_Init (flags)) {
2503 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2504 exit(1);
2505 }
2506
2507 if (!display_disable) {
2508 #ifdef HAVE_SDL_VIDEO_SIZE
2509 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2510 fs_screen_width = vi->current_w;
2511 fs_screen_height = vi->current_h;
2512 #endif
2513 }
2514
2515 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2516 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2517 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2518 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2519
2520 av_init_packet(&flush_pkt);
2521 flush_pkt.data= "FLUSH";
2522
2523 cur_stream = stream_open(input_filename, file_iformat);
2524
2525 event_loop();
2526
2527 /* never returns */
2528
2529 return 0;
2530 }