check for SDL_VideoInfo.current_[wh] availability in configure, and
[libav.git] / ffplay.c
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19 #define HAVE_AV_CONFIG_H
20 #include "avformat.h"
21
22 #include "cmdutils.h"
23
24 #include <SDL.h>
25 #include <SDL_thread.h>
26
27 #ifdef CONFIG_WIN32
28 #undef main /* We don't want SDL to override our main() */
29 #endif
30
31 #ifdef CONFIG_OS2
32 #define INCL_DOS
33 #include <os2.h>
34 #include <stdio.h>
35
36 void MorphToPM()
37 {
38 PPIB pib;
39 PTIB tib;
40
41 DosGetInfoBlocks(&tib, &pib);
42
43 // Change flag from VIO to PM:
44 if (pib->pib_ultype==2) pib->pib_ultype = 3;
45 }
46 #endif
47
48 //#define DEBUG_SYNC
49
50 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
51 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
52 #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
53
54 /* SDL audio buffer size, in samples. Should be small to have precise
55 A/V sync as SDL does not have hardware buffer fullness info. */
56 #define SDL_AUDIO_BUFFER_SIZE 1024
57
58 /* no AV sync correction is done if below the AV sync threshold */
59 #define AV_SYNC_THRESHOLD 0.01
60 /* no AV correction is done if too big error */
61 #define AV_NOSYNC_THRESHOLD 10.0
62
63 /* maximum audio speed change to get correct sync */
64 #define SAMPLE_CORRECTION_PERCENT_MAX 10
65
66 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
67 #define AUDIO_DIFF_AVG_NB 20
68
69 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
70 #define SAMPLE_ARRAY_SIZE (2*65536)
71
72 typedef struct PacketQueue {
73 AVPacketList *first_pkt, *last_pkt;
74 int nb_packets;
75 int size;
76 int abort_request;
77 SDL_mutex *mutex;
78 SDL_cond *cond;
79 } PacketQueue;
80
81 #define VIDEO_PICTURE_QUEUE_SIZE 1
82 #define SUBPICTURE_QUEUE_SIZE 4
83
84 typedef struct VideoPicture {
85 double pts; ///<presentation time stamp for this picture
86 SDL_Overlay *bmp;
87 int width, height; /* source height & width */
88 int allocated;
89 } VideoPicture;
90
91 typedef struct SubPicture {
92 double pts; /* presentation time stamp for this picture */
93 AVSubtitle sub;
94 } SubPicture;
95
96 enum {
97 AV_SYNC_AUDIO_MASTER, /* default choice */
98 AV_SYNC_VIDEO_MASTER,
99 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
100 };
101
102 typedef struct VideoState {
103 SDL_Thread *parse_tid;
104 SDL_Thread *video_tid;
105 AVInputFormat *iformat;
106 int no_background;
107 int abort_request;
108 int paused;
109 int last_paused;
110 int seek_req;
111 int seek_flags;
112 int64_t seek_pos;
113 AVFormatContext *ic;
114 int dtg_active_format;
115
116 int audio_stream;
117
118 int av_sync_type;
119 double external_clock; /* external clock base */
120 int64_t external_clock_time;
121
122 double audio_clock;
123 double audio_diff_cum; /* used for AV difference average computation */
124 double audio_diff_avg_coef;
125 double audio_diff_threshold;
126 int audio_diff_avg_count;
127 AVStream *audio_st;
128 PacketQueue audioq;
129 int audio_hw_buf_size;
130 /* samples output by the codec. we reserve more space for avsync
131 compensation */
132 uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
133 unsigned int audio_buf_size; /* in bytes */
134 int audio_buf_index; /* in bytes */
135 AVPacket audio_pkt;
136 uint8_t *audio_pkt_data;
137 int audio_pkt_size;
138
139 int show_audio; /* if true, display audio samples */
140 int16_t sample_array[SAMPLE_ARRAY_SIZE];
141 int sample_array_index;
142 int last_i_start;
143
144 SDL_Thread *subtitle_tid;
145 int subtitle_stream;
146 int subtitle_stream_changed;
147 AVStream *subtitle_st;
148 PacketQueue subtitleq;
149 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
150 int subpq_size, subpq_rindex, subpq_windex;
151 SDL_mutex *subpq_mutex;
152 SDL_cond *subpq_cond;
153
154 double frame_timer;
155 double frame_last_pts;
156 double frame_last_delay;
157 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
158 int video_stream;
159 AVStream *video_st;
160 PacketQueue videoq;
161 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
162 int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
163 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
164 int pictq_size, pictq_rindex, pictq_windex;
165 SDL_mutex *pictq_mutex;
166 SDL_cond *pictq_cond;
167
168 SDL_mutex *video_decoder_mutex;
169 SDL_mutex *audio_decoder_mutex;
170 SDL_mutex *subtitle_decoder_mutex;
171
172 // QETimer *video_timer;
173 char filename[1024];
174 int width, height, xleft, ytop;
175 } VideoState;
176
177 void show_help(void);
178 static int audio_write_get_buf_size(VideoState *is);
179
180 /* options specified by the user */
181 static AVInputFormat *file_iformat;
182 static AVImageFormat *image_format;
183 static const char *input_filename;
184 static int fs_screen_width;
185 static int fs_screen_height;
186 static int screen_width = 640;
187 static int screen_height = 480;
188 static int audio_disable;
189 static int video_disable;
190 static int display_disable;
191 static int show_status;
192 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
193 static int64_t start_time = AV_NOPTS_VALUE;
194 static int debug = 0;
195 static int debug_mv = 0;
196 static int step = 0;
197 static int thread_count = 1;
198 static int workaround_bugs = 1;
199 static int fast = 0;
200 static int genpts = 0;
201 static int lowres = 0;
202 static int idct = FF_IDCT_AUTO;
203 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
204 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
205 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
206 static int error_resilience = FF_ER_CAREFUL;
207 static int error_concealment = 3;
208
209 /* current context */
210 static int is_full_screen;
211 static VideoState *cur_stream;
212 static int64_t audio_callback_time;
213
214 #define FF_ALLOC_EVENT (SDL_USEREVENT)
215 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
216 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
217
218 SDL_Surface *screen;
219
220 /* packet queue handling */
221 static void packet_queue_init(PacketQueue *q)
222 {
223 memset(q, 0, sizeof(PacketQueue));
224 q->mutex = SDL_CreateMutex();
225 q->cond = SDL_CreateCond();
226 }
227
228 static void packet_queue_flush(PacketQueue *q)
229 {
230 AVPacketList *pkt, *pkt1;
231
232 SDL_LockMutex(q->mutex);
233 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
234 pkt1 = pkt->next;
235 av_free_packet(&pkt->pkt);
236 av_freep(&pkt);
237 }
238 q->last_pkt = NULL;
239 q->first_pkt = NULL;
240 q->nb_packets = 0;
241 q->size = 0;
242 SDL_UnlockMutex(q->mutex);
243 }
244
245 static void packet_queue_end(PacketQueue *q)
246 {
247 packet_queue_flush(q);
248 SDL_DestroyMutex(q->mutex);
249 SDL_DestroyCond(q->cond);
250 }
251
252 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
253 {
254 AVPacketList *pkt1;
255
256 /* duplicate the packet */
257 if (av_dup_packet(pkt) < 0)
258 return -1;
259
260 pkt1 = av_malloc(sizeof(AVPacketList));
261 if (!pkt1)
262 return -1;
263 pkt1->pkt = *pkt;
264 pkt1->next = NULL;
265
266
267 SDL_LockMutex(q->mutex);
268
269 if (!q->last_pkt)
270
271 q->first_pkt = pkt1;
272 else
273 q->last_pkt->next = pkt1;
274 q->last_pkt = pkt1;
275 q->nb_packets++;
276 q->size += pkt1->pkt.size;
277 /* XXX: should duplicate packet data in DV case */
278 SDL_CondSignal(q->cond);
279
280 SDL_UnlockMutex(q->mutex);
281 return 0;
282 }
283
284 static void packet_queue_abort(PacketQueue *q)
285 {
286 SDL_LockMutex(q->mutex);
287
288 q->abort_request = 1;
289
290 SDL_CondSignal(q->cond);
291
292 SDL_UnlockMutex(q->mutex);
293 }
294
295 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
296 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
297 {
298 AVPacketList *pkt1;
299 int ret;
300
301 SDL_LockMutex(q->mutex);
302
303 for(;;) {
304 if (q->abort_request) {
305 ret = -1;
306 break;
307 }
308
309 pkt1 = q->first_pkt;
310 if (pkt1) {
311 q->first_pkt = pkt1->next;
312 if (!q->first_pkt)
313 q->last_pkt = NULL;
314 q->nb_packets--;
315 q->size -= pkt1->pkt.size;
316 *pkt = pkt1->pkt;
317 av_free(pkt1);
318 ret = 1;
319 break;
320 } else if (!block) {
321 ret = 0;
322 break;
323 } else {
324 SDL_CondWait(q->cond, q->mutex);
325 }
326 }
327 SDL_UnlockMutex(q->mutex);
328 return ret;
329 }
330
331 static inline void fill_rectangle(SDL_Surface *screen,
332 int x, int y, int w, int h, int color)
333 {
334 SDL_Rect rect;
335 rect.x = x;
336 rect.y = y;
337 rect.w = w;
338 rect.h = h;
339 SDL_FillRect(screen, &rect, color);
340 }
341
342 #if 0
343 /* draw only the border of a rectangle */
344 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
345 {
346 int w1, w2, h1, h2;
347
348 /* fill the background */
349 w1 = x;
350 if (w1 < 0)
351 w1 = 0;
352 w2 = s->width - (x + w);
353 if (w2 < 0)
354 w2 = 0;
355 h1 = y;
356 if (h1 < 0)
357 h1 = 0;
358 h2 = s->height - (y + h);
359 if (h2 < 0)
360 h2 = 0;
361 fill_rectangle(screen,
362 s->xleft, s->ytop,
363 w1, s->height,
364 color);
365 fill_rectangle(screen,
366 s->xleft + s->width - w2, s->ytop,
367 w2, s->height,
368 color);
369 fill_rectangle(screen,
370 s->xleft + w1, s->ytop,
371 s->width - w1 - w2, h1,
372 color);
373 fill_rectangle(screen,
374 s->xleft + w1, s->ytop + s->height - h2,
375 s->width - w1 - w2, h2,
376 color);
377 }
378 #endif
379
380
381
382 #define SCALEBITS 10
383 #define ONE_HALF (1 << (SCALEBITS - 1))
384 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
385
386 #define RGB_TO_Y_CCIR(r, g, b) \
387 ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
388 FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
389
390 #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
391 (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
392 FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
393
394 #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
395 (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
396 FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
397
398 #define ALPHA_BLEND(a, oldp, newp, s)\
399 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
400
401 #define RGBA_IN(r, g, b, a, s)\
402 {\
403 unsigned int v = ((const uint32_t *)(s))[0];\
404 a = (v >> 24) & 0xff;\
405 r = (v >> 16) & 0xff;\
406 g = (v >> 8) & 0xff;\
407 b = v & 0xff;\
408 }
409
410 #define YUVA_IN(y, u, v, a, s, pal)\
411 {\
412 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)s];\
413 a = (val >> 24) & 0xff;\
414 y = (val >> 16) & 0xff;\
415 u = (val >> 8) & 0xff;\
416 v = val & 0xff;\
417 }
418
419 #define YUVA_OUT(d, y, u, v, a)\
420 {\
421 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
422 }
423
424
425 #define BPP 1
426
427 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect)
428 {
429 int wrap, wrap3, width2, skip2;
430 int y, u, v, a, u1, v1, a1, w, h;
431 uint8_t *lum, *cb, *cr;
432 const uint8_t *p;
433 const uint32_t *pal;
434
435 lum = dst->data[0] + rect->y * dst->linesize[0];
436 cb = dst->data[1] + (rect->y >> 1) * dst->linesize[1];
437 cr = dst->data[2] + (rect->y >> 1) * dst->linesize[2];
438
439 width2 = (rect->w + 1) >> 1;
440 skip2 = rect->x >> 1;
441 wrap = dst->linesize[0];
442 wrap3 = rect->linesize;
443 p = rect->bitmap;
444 pal = rect->rgba_palette; /* Now in YCrCb! */
445
446 if (rect->y & 1) {
447 lum += rect->x;
448 cb += skip2;
449 cr += skip2;
450
451 if (rect->x & 1) {
452 YUVA_IN(y, u, v, a, p, pal);
453 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
454 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
455 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
456 cb++;
457 cr++;
458 lum++;
459 p += BPP;
460 }
461 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
462 YUVA_IN(y, u, v, a, p, pal);
463 u1 = u;
464 v1 = v;
465 a1 = a;
466 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
467
468 YUVA_IN(y, u, v, a, p + BPP, pal);
469 u1 += u;
470 v1 += v;
471 a1 += a;
472 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
473 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
474 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
475 cb++;
476 cr++;
477 p += 2 * BPP;
478 lum += 2;
479 }
480 if (w) {
481 YUVA_IN(y, u, v, a, p, pal);
482 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
483 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
484 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
485 }
486 p += wrap3 + (wrap3 - rect->w * BPP);
487 lum += wrap + (wrap - rect->w - rect->x);
488 cb += dst->linesize[1] - width2 - skip2;
489 cr += dst->linesize[2] - width2 - skip2;
490 }
491 for(h = rect->h - (rect->y & 1); h >= 2; h -= 2) {
492 lum += rect->x;
493 cb += skip2;
494 cr += skip2;
495
496 if (rect->x & 1) {
497 YUVA_IN(y, u, v, a, p, pal);
498 u1 = u;
499 v1 = v;
500 a1 = a;
501 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
502 p += wrap3;
503 lum += wrap;
504 YUVA_IN(y, u, v, a, p, pal);
505 u1 += u;
506 v1 += v;
507 a1 += a;
508 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
509 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
510 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
511 cb++;
512 cr++;
513 p += -wrap3 + BPP;
514 lum += -wrap + 1;
515 }
516 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
517 YUVA_IN(y, u, v, a, p, pal);
518 u1 = u;
519 v1 = v;
520 a1 = a;
521 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
522
523 YUVA_IN(y, u, v, a, p, pal);
524 u1 += u;
525 v1 += v;
526 a1 += a;
527 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
528 p += wrap3;
529 lum += wrap;
530
531 YUVA_IN(y, u, v, a, p, pal);
532 u1 += u;
533 v1 += v;
534 a1 += a;
535 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
536
537 YUVA_IN(y, u, v, a, p, pal);
538 u1 += u;
539 v1 += v;
540 a1 += a;
541 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
542
543 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
544 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
545
546 cb++;
547 cr++;
548 p += -wrap3 + 2 * BPP;
549 lum += -wrap + 2;
550 }
551 if (w) {
552 YUVA_IN(y, u, v, a, p, pal);
553 u1 = u;
554 v1 = v;
555 a1 = a;
556 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
557 p += wrap3;
558 lum += wrap;
559 YUVA_IN(y, u, v, a, p, pal);
560 u1 += u;
561 v1 += v;
562 a1 += a;
563 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
564 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
565 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
566 cb++;
567 cr++;
568 p += -wrap3 + BPP;
569 lum += -wrap + 1;
570 }
571 p += wrap3 + (wrap3 - rect->w * BPP);
572 lum += wrap + (wrap - rect->w - rect->x);
573 cb += dst->linesize[1] - width2 - skip2;
574 cr += dst->linesize[2] - width2 - skip2;
575 }
576 /* handle odd height */
577 if (h) {
578 lum += rect->x;
579 cb += skip2;
580 cr += skip2;
581
582 if (rect->x & 1) {
583 YUVA_IN(y, u, v, a, p, pal);
584 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
585 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
586 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
587 cb++;
588 cr++;
589 lum++;
590 p += BPP;
591 }
592 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
593 YUVA_IN(y, u, v, a, p, pal);
594 u1 = u;
595 v1 = v;
596 a1 = a;
597 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
598
599 YUVA_IN(y, u, v, a, p + BPP, pal);
600 u1 += u;
601 v1 += v;
602 a1 += a;
603 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
604 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
605 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
606 cb++;
607 cr++;
608 p += 2 * BPP;
609 lum += 2;
610 }
611 if (w) {
612 YUVA_IN(y, u, v, a, p, pal);
613 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
614 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
615 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
616 }
617 }
618 }
619
620 static void free_subpicture(SubPicture *sp)
621 {
622 int i;
623
624 for (i = 0; i < sp->sub.num_rects; i++)
625 {
626 av_free(sp->sub.rects[i].bitmap);
627 av_free(sp->sub.rects[i].rgba_palette);
628 }
629
630 av_free(sp->sub.rects);
631
632 memset(&sp->sub, 0, sizeof(AVSubtitle));
633 }
634
635 static void video_image_display(VideoState *is)
636 {
637 VideoPicture *vp;
638 SubPicture *sp;
639 AVPicture pict;
640 float aspect_ratio;
641 int width, height, x, y;
642 SDL_Rect rect;
643 int i;
644
645 vp = &is->pictq[is->pictq_rindex];
646 if (vp->bmp) {
647 /* XXX: use variable in the frame */
648 if (is->video_st->codec->sample_aspect_ratio.num == 0)
649 aspect_ratio = 0;
650 else
651 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio)
652 * is->video_st->codec->width / is->video_st->codec->height;;
653 if (aspect_ratio <= 0.0)
654 aspect_ratio = (float)is->video_st->codec->width /
655 (float)is->video_st->codec->height;
656 /* if an active format is indicated, then it overrides the
657 mpeg format */
658 #if 0
659 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
660 is->dtg_active_format = is->video_st->codec->dtg_active_format;
661 printf("dtg_active_format=%d\n", is->dtg_active_format);
662 }
663 #endif
664 #if 0
665 switch(is->video_st->codec->dtg_active_format) {
666 case FF_DTG_AFD_SAME:
667 default:
668 /* nothing to do */
669 break;
670 case FF_DTG_AFD_4_3:
671 aspect_ratio = 4.0 / 3.0;
672 break;
673 case FF_DTG_AFD_16_9:
674 aspect_ratio = 16.0 / 9.0;
675 break;
676 case FF_DTG_AFD_14_9:
677 aspect_ratio = 14.0 / 9.0;
678 break;
679 case FF_DTG_AFD_4_3_SP_14_9:
680 aspect_ratio = 14.0 / 9.0;
681 break;
682 case FF_DTG_AFD_16_9_SP_14_9:
683 aspect_ratio = 14.0 / 9.0;
684 break;
685 case FF_DTG_AFD_SP_4_3:
686 aspect_ratio = 4.0 / 3.0;
687 break;
688 }
689 #endif
690
691 if (is->subtitle_st)
692 {
693 if (is->subpq_size > 0)
694 {
695 sp = &is->subpq[is->subpq_rindex];
696
697 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
698 {
699 SDL_LockYUVOverlay (vp->bmp);
700
701 pict.data[0] = vp->bmp->pixels[0];
702 pict.data[1] = vp->bmp->pixels[2];
703 pict.data[2] = vp->bmp->pixels[1];
704
705 pict.linesize[0] = vp->bmp->pitches[0];
706 pict.linesize[1] = vp->bmp->pitches[2];
707 pict.linesize[2] = vp->bmp->pitches[1];
708
709 for (i = 0; i < sp->sub.num_rects; i++)
710 blend_subrect(&pict, &sp->sub.rects[i]);
711
712 SDL_UnlockYUVOverlay (vp->bmp);
713 }
714 }
715 }
716
717
718 /* XXX: we suppose the screen has a 1.0 pixel ratio */
719 height = is->height;
720 width = ((int)rint(height * aspect_ratio)) & -3;
721 if (width > is->width) {
722 width = is->width;
723 height = ((int)rint(width / aspect_ratio)) & -3;
724 }
725 x = (is->width - width) / 2;
726 y = (is->height - height) / 2;
727 if (!is->no_background) {
728 /* fill the background */
729 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
730 } else {
731 is->no_background = 0;
732 }
733 rect.x = is->xleft + x;
734 rect.y = is->xleft + y;
735 rect.w = width;
736 rect.h = height;
737 SDL_DisplayYUVOverlay(vp->bmp, &rect);
738 } else {
739 #if 0
740 fill_rectangle(screen,
741 is->xleft, is->ytop, is->width, is->height,
742 QERGB(0x00, 0x00, 0x00));
743 #endif
744 }
745 }
746
747 static inline int compute_mod(int a, int b)
748 {
749 a = a % b;
750 if (a >= 0)
751 return a;
752 else
753 return a + b;
754 }
755
756 static void video_audio_display(VideoState *s)
757 {
758 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
759 int ch, channels, h, h2, bgcolor, fgcolor;
760 int16_t time_diff;
761
762 /* compute display index : center on currently output samples */
763 channels = s->audio_st->codec->channels;
764 nb_display_channels = channels;
765 if (!s->paused) {
766 n = 2 * channels;
767 delay = audio_write_get_buf_size(s);
768 delay /= n;
769
770 /* to be more precise, we take into account the time spent since
771 the last buffer computation */
772 if (audio_callback_time) {
773 time_diff = av_gettime() - audio_callback_time;
774 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
775 }
776
777 delay -= s->width / 2;
778 if (delay < s->width)
779 delay = s->width;
780 i_start = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
781 s->last_i_start = i_start;
782 } else {
783 i_start = s->last_i_start;
784 }
785
786 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
787 fill_rectangle(screen,
788 s->xleft, s->ytop, s->width, s->height,
789 bgcolor);
790
791 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
792
793 /* total height for one channel */
794 h = s->height / nb_display_channels;
795 /* graph height / 2 */
796 h2 = (h * 9) / 20;
797 for(ch = 0;ch < nb_display_channels; ch++) {
798 i = i_start + ch;
799 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
800 for(x = 0; x < s->width; x++) {
801 y = (s->sample_array[i] * h2) >> 15;
802 if (y < 0) {
803 y = -y;
804 ys = y1 - y;
805 } else {
806 ys = y1;
807 }
808 fill_rectangle(screen,
809 s->xleft + x, ys, 1, y,
810 fgcolor);
811 i += channels;
812 if (i >= SAMPLE_ARRAY_SIZE)
813 i -= SAMPLE_ARRAY_SIZE;
814 }
815 }
816
817 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
818
819 for(ch = 1;ch < nb_display_channels; ch++) {
820 y = s->ytop + ch * h;
821 fill_rectangle(screen,
822 s->xleft, y, s->width, 1,
823 fgcolor);
824 }
825 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
826 }
827
828 /* display the current picture, if any */
829 static void video_display(VideoState *is)
830 {
831 if (is->audio_st && is->show_audio)
832 video_audio_display(is);
833 else if (is->video_st)
834 video_image_display(is);
835 }
836
837 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
838 {
839 SDL_Event event;
840 event.type = FF_REFRESH_EVENT;
841 event.user.data1 = opaque;
842 SDL_PushEvent(&event);
843 return 0; /* 0 means stop timer */
844 }
845
846 /* schedule a video refresh in 'delay' ms */
847 static void schedule_refresh(VideoState *is, int delay)
848 {
849 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
850 }
851
852 /* get the current audio clock value */
853 static double get_audio_clock(VideoState *is)
854 {
855 double pts;
856 int hw_buf_size, bytes_per_sec;
857 pts = is->audio_clock;
858 hw_buf_size = audio_write_get_buf_size(is);
859 bytes_per_sec = 0;
860 if (is->audio_st) {
861 bytes_per_sec = is->audio_st->codec->sample_rate *
862 2 * is->audio_st->codec->channels;
863 }
864 if (bytes_per_sec)
865 pts -= (double)hw_buf_size / bytes_per_sec;
866 return pts;
867 }
868
869 /* get the current video clock value */
870 static double get_video_clock(VideoState *is)
871 {
872 double delta;
873 if (is->paused) {
874 delta = 0;
875 } else {
876 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
877 }
878 return is->video_current_pts + delta;
879 }
880
881 /* get the current external clock value */
882 static double get_external_clock(VideoState *is)
883 {
884 int64_t ti;
885 ti = av_gettime();
886 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
887 }
888
889 /* get the current master clock value */
890 static double get_master_clock(VideoState *is)
891 {
892 double val;
893
894 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
895 if (is->video_st)
896 val = get_video_clock(is);
897 else
898 val = get_audio_clock(is);
899 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
900 if (is->audio_st)
901 val = get_audio_clock(is);
902 else
903 val = get_video_clock(is);
904 } else {
905 val = get_external_clock(is);
906 }
907 return val;
908 }
909
910 /* seek in the stream */
911 static void stream_seek(VideoState *is, int64_t pos, int rel)
912 {
913 if (!is->seek_req) {
914 is->seek_pos = pos;
915 is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
916 is->seek_req = 1;
917 }
918 }
919
920 /* pause or resume the video */
921 static void stream_pause(VideoState *is)
922 {
923 is->paused = !is->paused;
924 if (is->paused) {
925 is->video_current_pts = get_video_clock(is);
926 }
927 }
928
929 /* called to display each frame */
930 static void video_refresh_timer(void *opaque)
931 {
932 VideoState *is = opaque;
933 VideoPicture *vp;
934 double actual_delay, delay, sync_threshold, ref_clock, diff;
935
936 SubPicture *sp, *sp2;
937
938 if (is->video_st) {
939 if (is->pictq_size == 0) {
940 /* if no picture, need to wait */
941 schedule_refresh(is, 1);
942 } else {
943 /* dequeue the picture */
944 vp = &is->pictq[is->pictq_rindex];
945
946 /* update current video pts */
947 is->video_current_pts = vp->pts;
948 is->video_current_pts_time = av_gettime();
949
950 /* compute nominal delay */
951 delay = vp->pts - is->frame_last_pts;
952 if (delay <= 0 || delay >= 1.0) {
953 /* if incorrect delay, use previous one */
954 delay = is->frame_last_delay;
955 }
956 is->frame_last_delay = delay;
957 is->frame_last_pts = vp->pts;
958
959 /* update delay to follow master synchronisation source */
960 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
961 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
962 /* if video is slave, we try to correct big delays by
963 duplicating or deleting a frame */
964 ref_clock = get_master_clock(is);
965 diff = vp->pts - ref_clock;
966
967 /* skip or repeat frame. We take into account the
968 delay to compute the threshold. I still don't know
969 if it is the best guess */
970 sync_threshold = AV_SYNC_THRESHOLD;
971 if (delay > sync_threshold)
972 sync_threshold = delay;
973 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
974 if (diff <= -sync_threshold)
975 delay = 0;
976 else if (diff >= sync_threshold)
977 delay = 2 * delay;
978 }
979 }
980
981 is->frame_timer += delay;
982 /* compute the REAL delay (we need to do that to avoid
983 long term errors */
984 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
985 if (actual_delay < 0.010) {
986 /* XXX: should skip picture */
987 actual_delay = 0.010;
988 }
989 /* launch timer for next picture */
990 schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
991
992 #if defined(DEBUG_SYNC)
993 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
994 delay, actual_delay, vp->pts, -diff);
995 #endif
996
997 if(is->subtitle_st) {
998 if (is->subtitle_stream_changed) {
999 SDL_LockMutex(is->subpq_mutex);
1000
1001 while (is->subpq_size) {
1002 free_subpicture(&is->subpq[is->subpq_rindex]);
1003
1004 /* update queue size and signal for next picture */
1005 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1006 is->subpq_rindex = 0;
1007
1008 is->subpq_size--;
1009 }
1010 is->subtitle_stream_changed = 0;
1011
1012 SDL_CondSignal(is->subpq_cond);
1013 SDL_UnlockMutex(is->subpq_mutex);
1014 } else {
1015 if (is->subpq_size > 0) {
1016 sp = &is->subpq[is->subpq_rindex];
1017
1018 if (is->subpq_size > 1)
1019 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1020 else
1021 sp2 = NULL;
1022
1023 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1024 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1025 {
1026 free_subpicture(sp);
1027
1028 /* update queue size and signal for next picture */
1029 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1030 is->subpq_rindex = 0;
1031
1032 SDL_LockMutex(is->subpq_mutex);
1033 is->subpq_size--;
1034 SDL_CondSignal(is->subpq_cond);
1035 SDL_UnlockMutex(is->subpq_mutex);
1036 }
1037 }
1038 }
1039 }
1040
1041 /* display picture */
1042 video_display(is);
1043
1044 /* update queue size and signal for next picture */
1045 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1046 is->pictq_rindex = 0;
1047
1048 SDL_LockMutex(is->pictq_mutex);
1049 is->pictq_size--;
1050 SDL_CondSignal(is->pictq_cond);
1051 SDL_UnlockMutex(is->pictq_mutex);
1052 }
1053 } else if (is->audio_st) {
1054 /* draw the next audio frame */
1055
1056 schedule_refresh(is, 40);
1057
1058 /* if only audio stream, then display the audio bars (better
1059 than nothing, just to test the implementation */
1060
1061 /* display picture */
1062 video_display(is);
1063 } else {
1064 schedule_refresh(is, 100);
1065 }
1066 if (show_status) {
1067 static int64_t last_time;
1068 int64_t cur_time;
1069 int aqsize, vqsize, sqsize;
1070 double av_diff;
1071
1072 cur_time = av_gettime();
1073 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1074 aqsize = 0;
1075 vqsize = 0;
1076 sqsize = 0;
1077 if (is->audio_st)
1078 aqsize = is->audioq.size;
1079 if (is->video_st)
1080 vqsize = is->videoq.size;
1081 if (is->subtitle_st)
1082 sqsize = is->subtitleq.size;
1083 av_diff = 0;
1084 if (is->audio_st && is->video_st)
1085 av_diff = get_audio_clock(is) - get_video_clock(is);
1086 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
1087 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1088 fflush(stdout);
1089 last_time = cur_time;
1090 }
1091 }
1092 }
1093
1094 /* allocate a picture (needs to do that in main thread to avoid
1095 potential locking problems */
1096 static void alloc_picture(void *opaque)
1097 {
1098 VideoState *is = opaque;
1099 VideoPicture *vp;
1100
1101 vp = &is->pictq[is->pictq_windex];
1102
1103 if (vp->bmp)
1104 SDL_FreeYUVOverlay(vp->bmp);
1105
1106 #if 0
1107 /* XXX: use generic function */
1108 /* XXX: disable overlay if no hardware acceleration or if RGB format */
1109 switch(is->video_st->codec->pix_fmt) {
1110 case PIX_FMT_YUV420P:
1111 case PIX_FMT_YUV422P:
1112 case PIX_FMT_YUV444P:
1113 case PIX_FMT_YUV422:
1114 case PIX_FMT_YUV410P:
1115 case PIX_FMT_YUV411P:
1116 is_yuv = 1;
1117 break;
1118 default:
1119 is_yuv = 0;
1120 break;
1121 }
1122 #endif
1123 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1124 is->video_st->codec->height,
1125 SDL_YV12_OVERLAY,
1126 screen);
1127 vp->width = is->video_st->codec->width;
1128 vp->height = is->video_st->codec->height;
1129
1130 SDL_LockMutex(is->pictq_mutex);
1131 vp->allocated = 1;
1132 SDL_CondSignal(is->pictq_cond);
1133 SDL_UnlockMutex(is->pictq_mutex);
1134 }
1135
1136 /**
1137 *
1138 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1139 */
1140 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1141 {
1142 VideoPicture *vp;
1143 int dst_pix_fmt;
1144 AVPicture pict;
1145
1146 /* wait until we have space to put a new picture */
1147 SDL_LockMutex(is->pictq_mutex);
1148 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1149 !is->videoq.abort_request) {
1150 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1151 }
1152 SDL_UnlockMutex(is->pictq_mutex);
1153
1154 if (is->videoq.abort_request)
1155 return -1;
1156
1157 vp = &is->pictq[is->pictq_windex];
1158
1159 /* alloc or resize hardware picture buffer */
1160 if (!vp->bmp ||
1161 vp->width != is->video_st->codec->width ||
1162 vp->height != is->video_st->codec->height) {
1163 SDL_Event event;
1164
1165 vp->allocated = 0;
1166
1167 /* the allocation must be done in the main thread to avoid
1168 locking problems */
1169 event.type = FF_ALLOC_EVENT;
1170 event.user.data1 = is;
1171 SDL_PushEvent(&event);
1172
1173 /* wait until the picture is allocated */
1174 SDL_LockMutex(is->pictq_mutex);
1175 while (!vp->allocated && !is->videoq.abort_request) {
1176 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1177 }
1178 SDL_UnlockMutex(is->pictq_mutex);
1179
1180 if (is->videoq.abort_request)
1181 return -1;
1182 }
1183
1184 /* if the frame is not skipped, then display it */
1185 if (vp->bmp) {
1186 /* get a pointer on the bitmap */
1187 SDL_LockYUVOverlay (vp->bmp);
1188
1189 dst_pix_fmt = PIX_FMT_YUV420P;
1190 pict.data[0] = vp->bmp->pixels[0];
1191 pict.data[1] = vp->bmp->pixels[2];
1192 pict.data[2] = vp->bmp->pixels[1];
1193
1194 pict.linesize[0] = vp->bmp->pitches[0];
1195 pict.linesize[1] = vp->bmp->pitches[2];
1196 pict.linesize[2] = vp->bmp->pitches[1];
1197 img_convert(&pict, dst_pix_fmt,
1198 (AVPicture *)src_frame, is->video_st->codec->pix_fmt,
1199 is->video_st->codec->width, is->video_st->codec->height);
1200 /* update the bitmap content */
1201 SDL_UnlockYUVOverlay(vp->bmp);
1202
1203 vp->pts = pts;
1204
1205 /* now we can update the picture count */
1206 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1207 is->pictq_windex = 0;
1208 SDL_LockMutex(is->pictq_mutex);
1209 is->pictq_size++;
1210 SDL_UnlockMutex(is->pictq_mutex);
1211 }
1212 return 0;
1213 }
1214
1215 /**
1216 * compute the exact PTS for the picture if it is omitted in the stream
1217 * @param pts1 the dts of the pkt / pts of the frame
1218 */
1219 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1220 {
1221 double frame_delay, pts;
1222
1223 pts = pts1;
1224
1225 if (pts != 0) {
1226 /* update video clock with pts, if present */
1227 is->video_clock = pts;
1228 } else {
1229 pts = is->video_clock;
1230 }
1231 /* update video clock for next frame */
1232 frame_delay = av_q2d(is->video_st->codec->time_base);
1233 /* for MPEG2, the frame can be repeated, so we update the
1234 clock accordingly */
1235 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1236 is->video_clock += frame_delay;
1237
1238 #if defined(DEBUG_SYNC) && 0
1239 {
1240 int ftype;
1241 if (src_frame->pict_type == FF_B_TYPE)
1242 ftype = 'B';
1243 else if (src_frame->pict_type == FF_I_TYPE)
1244 ftype = 'I';
1245 else
1246 ftype = 'P';
1247 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1248 ftype, pts, pts1);
1249 }
1250 #endif
1251 return queue_picture(is, src_frame, pts);
1252 }
1253
1254 static int video_thread(void *arg)
1255 {
1256 VideoState *is = arg;
1257 AVPacket pkt1, *pkt = &pkt1;
1258 int len1, got_picture;
1259 AVFrame *frame= avcodec_alloc_frame();
1260 double pts;
1261
1262 for(;;) {
1263 while (is->paused && !is->videoq.abort_request) {
1264 SDL_Delay(10);
1265 }
1266 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1267 break;
1268 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1269 this packet, if any */
1270 pts = 0;
1271 if (pkt->dts != AV_NOPTS_VALUE)
1272 pts = av_q2d(is->video_st->time_base)*pkt->dts;
1273
1274 SDL_LockMutex(is->video_decoder_mutex);
1275 len1 = avcodec_decode_video(is->video_st->codec,
1276 frame, &got_picture,
1277 pkt->data, pkt->size);
1278 SDL_UnlockMutex(is->video_decoder_mutex);
1279 // if (len1 < 0)
1280 // break;
1281 if (got_picture) {
1282 if (output_picture2(is, frame, pts) < 0)
1283 goto the_end;
1284 }
1285 av_free_packet(pkt);
1286 if (step)
1287 if (cur_stream)
1288 stream_pause(cur_stream);
1289 }
1290 the_end:
1291 av_free(frame);
1292 return 0;
1293 }
1294
1295 static int subtitle_thread(void *arg)
1296 {
1297 VideoState *is = arg;
1298 SubPicture *sp;
1299 AVPacket pkt1, *pkt = &pkt1;
1300 int len1, got_subtitle;
1301 double pts;
1302 int i, j;
1303 int r, g, b, y, u, v, a;
1304
1305 for(;;) {
1306 while (is->paused && !is->subtitleq.abort_request) {
1307 SDL_Delay(10);
1308 }
1309 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1310 break;
1311
1312 SDL_LockMutex(is->subpq_mutex);
1313 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1314 !is->subtitleq.abort_request) {
1315 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1316 }
1317 SDL_UnlockMutex(is->subpq_mutex);
1318
1319 if (is->subtitleq.abort_request)
1320 goto the_end;
1321
1322 sp = &is->subpq[is->subpq_windex];
1323
1324 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1325 this packet, if any */
1326 pts = 0;
1327 if (pkt->pts != AV_NOPTS_VALUE)
1328 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1329
1330 SDL_LockMutex(is->subtitle_decoder_mutex);
1331 len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1332 &sp->sub, &got_subtitle,
1333 pkt->data, pkt->size);
1334 SDL_UnlockMutex(is->subtitle_decoder_mutex);
1335 // if (len1 < 0)
1336 // break;
1337 if (got_subtitle && sp->sub.format == 0) {
1338 sp->pts = pts;
1339
1340 for (i = 0; i < sp->sub.num_rects; i++)
1341 {
1342 for (j = 0; j < sp->sub.rects[i].nb_colors; j++)
1343 {
1344 RGBA_IN(r, g, b, a, sp->sub.rects[i].rgba_palette + j);
1345 y = RGB_TO_Y_CCIR(r, g, b);
1346 u = RGB_TO_U_CCIR(r, g, b, 0);
1347 v = RGB_TO_V_CCIR(r, g, b, 0);
1348 YUVA_OUT(sp->sub.rects[i].rgba_palette + j, y, u, v, a);
1349 }
1350 }
1351
1352 /* now we can update the picture count */
1353 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1354 is->subpq_windex = 0;
1355 SDL_LockMutex(is->subpq_mutex);
1356 is->subpq_size++;
1357 SDL_UnlockMutex(is->subpq_mutex);
1358 }
1359 av_free_packet(pkt);
1360 // if (step)
1361 // if (cur_stream)
1362 // stream_pause(cur_stream);
1363 }
1364 the_end:
1365 return 0;
1366 }
1367
1368 /* copy samples for viewing in editor window */
1369 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1370 {
1371 int size, len, channels;
1372
1373 channels = is->audio_st->codec->channels;
1374
1375 size = samples_size / sizeof(short);
1376 while (size > 0) {
1377 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1378 if (len > size)
1379 len = size;
1380 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1381 samples += len;
1382 is->sample_array_index += len;
1383 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1384 is->sample_array_index = 0;
1385 size -= len;
1386 }
1387 }
1388
1389 /* return the new audio buffer size (samples can be added or deleted
1390 to get better sync if video or external master clock) */
1391 static int synchronize_audio(VideoState *is, short *samples,
1392 int samples_size1, double pts)
1393 {
1394 int n, samples_size;
1395 double ref_clock;
1396
1397 n = 2 * is->audio_st->codec->channels;
1398 samples_size = samples_size1;
1399
1400 /* if not master, then we try to remove or add samples to correct the clock */
1401 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1402 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1403 double diff, avg_diff;
1404 int wanted_size, min_size, max_size, nb_samples;
1405
1406 ref_clock = get_master_clock(is);
1407 diff = get_audio_clock(is) - ref_clock;
1408
1409 if (diff < AV_NOSYNC_THRESHOLD) {
1410 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1411 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1412 /* not enough measures to have a correct estimate */
1413 is->audio_diff_avg_count++;
1414 } else {
1415 /* estimate the A-V difference */
1416 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1417
1418 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1419 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1420 nb_samples = samples_size / n;
1421
1422 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1423 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1424 if (wanted_size < min_size)
1425 wanted_size = min_size;
1426 else if (wanted_size > max_size)
1427 wanted_size = max_size;
1428
1429 /* add or remove samples to correction the synchro */
1430 if (wanted_size < samples_size) {
1431 /* remove samples */
1432 samples_size = wanted_size;
1433 } else if (wanted_size > samples_size) {
1434 uint8_t *samples_end, *q;
1435 int nb;
1436
1437 /* add samples */
1438 nb = (samples_size - wanted_size);
1439 samples_end = (uint8_t *)samples + samples_size - n;
1440 q = samples_end + n;
1441 while (nb > 0) {
1442 memcpy(q, samples_end, n);
1443 q += n;
1444 nb -= n;
1445 }
1446 samples_size = wanted_size;
1447 }
1448 }
1449 #if 0
1450 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1451 diff, avg_diff, samples_size - samples_size1,
1452 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1453 #endif
1454 }
1455 } else {
1456 /* too big difference : may be initial PTS errors, so
1457 reset A-V filter */
1458 is->audio_diff_avg_count = 0;
1459 is->audio_diff_cum = 0;
1460 }
1461 }
1462
1463 return samples_size;
1464 }
1465
1466 /* decode one audio frame and returns its uncompressed size */
1467 static int audio_decode_frame(VideoState *is, uint8_t *audio_buf, double *pts_ptr)
1468 {
1469 AVPacket *pkt = &is->audio_pkt;
1470 int n, len1, data_size;
1471 double pts;
1472
1473 for(;;) {
1474 /* NOTE: the audio packet can contain several frames */
1475 while (is->audio_pkt_size > 0) {
1476 SDL_LockMutex(is->audio_decoder_mutex);
1477 len1 = avcodec_decode_audio(is->audio_st->codec,
1478 (int16_t *)audio_buf, &data_size,
1479 is->audio_pkt_data, is->audio_pkt_size);
1480 SDL_UnlockMutex(is->audio_decoder_mutex);
1481 if (len1 < 0) {
1482 /* if error, we skip the frame */
1483 is->audio_pkt_size = 0;
1484 break;
1485 }
1486
1487 is->audio_pkt_data += len1;
1488 is->audio_pkt_size -= len1;
1489 if (data_size <= 0)
1490 continue;
1491 /* if no pts, then compute it */
1492 pts = is->audio_clock;
1493 *pts_ptr = pts;
1494 n = 2 * is->audio_st->codec->channels;
1495 is->audio_clock += (double)data_size /
1496 (double)(n * is->audio_st->codec->sample_rate);
1497 #if defined(DEBUG_SYNC)
1498 {
1499 static double last_clock;
1500 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1501 is->audio_clock - last_clock,
1502 is->audio_clock, pts);
1503 last_clock = is->audio_clock;
1504 }
1505 #endif
1506 return data_size;
1507 }
1508
1509 /* free the current packet */
1510 if (pkt->data)
1511 av_free_packet(pkt);
1512
1513 if (is->paused || is->audioq.abort_request) {
1514 return -1;
1515 }
1516
1517 /* read next packet */
1518 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1519 return -1;
1520 is->audio_pkt_data = pkt->data;
1521 is->audio_pkt_size = pkt->size;
1522
1523 /* if update the audio clock with the pts */
1524 if (pkt->pts != AV_NOPTS_VALUE) {
1525 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1526 }
1527 }
1528 }
1529
1530 /* get the current audio output buffer size, in samples. With SDL, we
1531 cannot have a precise information */
1532 static int audio_write_get_buf_size(VideoState *is)
1533 {
1534 return is->audio_hw_buf_size - is->audio_buf_index;
1535 }
1536
1537
1538 /* prepare a new audio buffer */
1539 void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1540 {
1541 VideoState *is = opaque;
1542 int audio_size, len1;
1543 double pts;
1544
1545 audio_callback_time = av_gettime();
1546
1547 while (len > 0) {
1548 if (is->audio_buf_index >= is->audio_buf_size) {
1549 audio_size = audio_decode_frame(is, is->audio_buf, &pts);
1550 if (audio_size < 0) {
1551 /* if error, just output silence */
1552 is->audio_buf_size = 1024;
1553 memset(is->audio_buf, 0, is->audio_buf_size);
1554 } else {
1555 if (is->show_audio)
1556 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1557 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1558 pts);
1559 is->audio_buf_size = audio_size;
1560 }
1561 is->audio_buf_index = 0;
1562 }
1563 len1 = is->audio_buf_size - is->audio_buf_index;
1564 if (len1 > len)
1565 len1 = len;
1566 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1567 len -= len1;
1568 stream += len1;
1569 is->audio_buf_index += len1;
1570 }
1571 }
1572
1573
1574 /* open a given stream. Return 0 if OK */
1575 static int stream_component_open(VideoState *is, int stream_index)
1576 {
1577 AVFormatContext *ic = is->ic;
1578 AVCodecContext *enc;
1579 AVCodec *codec;
1580 SDL_AudioSpec wanted_spec, spec;
1581
1582 if (stream_index < 0 || stream_index >= ic->nb_streams)
1583 return -1;
1584 enc = ic->streams[stream_index]->codec;
1585
1586 /* prepare audio output */
1587 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1588 wanted_spec.freq = enc->sample_rate;
1589 wanted_spec.format = AUDIO_S16SYS;
1590 /* hack for AC3. XXX: suppress that */
1591 if (enc->channels > 2)
1592 enc->channels = 2;
1593 wanted_spec.channels = enc->channels;
1594 wanted_spec.silence = 0;
1595 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1596 wanted_spec.callback = sdl_audio_callback;
1597 wanted_spec.userdata = is;
1598 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1599 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1600 return -1;
1601 }
1602 is->audio_hw_buf_size = spec.size;
1603 }
1604
1605 codec = avcodec_find_decoder(enc->codec_id);
1606 enc->debug_mv = debug_mv;
1607 enc->debug = debug;
1608 if(debug)
1609 av_log_set_level(AV_LOG_DEBUG);
1610 enc->workaround_bugs = workaround_bugs;
1611 enc->lowres = lowres;
1612 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1613 enc->idct_algo= idct;
1614 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1615 enc->skip_frame= skip_frame;
1616 enc->skip_idct= skip_idct;
1617 enc->skip_loop_filter= skip_loop_filter;
1618 enc->error_resilience= error_resilience;
1619 enc->error_concealment= error_concealment;
1620 if (!codec ||
1621 avcodec_open(enc, codec) < 0)
1622 return -1;
1623 #if defined(HAVE_THREADS)
1624 if(thread_count>1)
1625 avcodec_thread_init(enc, thread_count);
1626 #endif
1627 enc->thread_count= thread_count;
1628 switch(enc->codec_type) {
1629 case CODEC_TYPE_AUDIO:
1630 is->audio_stream = stream_index;
1631 is->audio_st = ic->streams[stream_index];
1632 is->audio_buf_size = 0;
1633 is->audio_buf_index = 0;
1634
1635 /* init averaging filter */
1636 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1637 is->audio_diff_avg_count = 0;
1638 /* since we do not have a precise anough audio fifo fullness,
1639 we correct audio sync only if larger than this threshold */
1640 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1641
1642 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1643 packet_queue_init(&is->audioq);
1644 SDL_PauseAudio(0);
1645 break;
1646 case CODEC_TYPE_VIDEO:
1647 is->video_stream = stream_index;
1648 is->video_st = ic->streams[stream_index];
1649
1650 is->frame_last_delay = 40e-3;
1651 is->frame_timer = (double)av_gettime() / 1000000.0;
1652 is->video_current_pts_time = av_gettime();
1653
1654 packet_queue_init(&is->videoq);
1655 is->video_tid = SDL_CreateThread(video_thread, is);
1656 break;
1657 case CODEC_TYPE_SUBTITLE:
1658 is->subtitle_stream = stream_index;
1659 is->subtitle_st = ic->streams[stream_index];
1660 packet_queue_init(&is->subtitleq);
1661
1662 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1663 break;
1664 default:
1665 break;
1666 }
1667 return 0;
1668 }
1669
1670 static void stream_component_close(VideoState *is, int stream_index)
1671 {
1672 AVFormatContext *ic = is->ic;
1673 AVCodecContext *enc;
1674
1675 if (stream_index < 0 || stream_index >= ic->nb_streams)
1676 return;
1677 enc = ic->streams[stream_index]->codec;
1678
1679 switch(enc->codec_type) {
1680 case CODEC_TYPE_AUDIO:
1681 packet_queue_abort(&is->audioq);
1682
1683 SDL_CloseAudio();
1684
1685 packet_queue_end(&is->audioq);
1686 break;
1687 case CODEC_TYPE_VIDEO:
1688 packet_queue_abort(&is->videoq);
1689
1690 /* note: we also signal this mutex to make sure we deblock the
1691 video thread in all cases */
1692 SDL_LockMutex(is->pictq_mutex);
1693 SDL_CondSignal(is->pictq_cond);
1694 SDL_UnlockMutex(is->pictq_mutex);
1695
1696 SDL_WaitThread(is->video_tid, NULL);
1697
1698 packet_queue_end(&is->videoq);
1699 break;
1700 case CODEC_TYPE_SUBTITLE:
1701 packet_queue_abort(&is->subtitleq);
1702
1703 /* note: we also signal this mutex to make sure we deblock the
1704 video thread in all cases */
1705 SDL_LockMutex(is->subpq_mutex);
1706 is->subtitle_stream_changed = 1;
1707
1708 SDL_CondSignal(is->subpq_cond);
1709 SDL_UnlockMutex(is->subpq_mutex);
1710
1711 SDL_WaitThread(is->subtitle_tid, NULL);
1712
1713 packet_queue_end(&is->subtitleq);
1714 break;
1715 default:
1716 break;
1717 }
1718
1719 avcodec_close(enc);
1720 switch(enc->codec_type) {
1721 case CODEC_TYPE_AUDIO:
1722 is->audio_st = NULL;
1723 is->audio_stream = -1;
1724 break;
1725 case CODEC_TYPE_VIDEO:
1726 is->video_st = NULL;
1727 is->video_stream = -1;
1728 break;
1729 case CODEC_TYPE_SUBTITLE:
1730 is->subtitle_st = NULL;
1731 is->subtitle_stream = -1;
1732 break;
1733 default:
1734 break;
1735 }
1736 }
1737
1738 void dump_stream_info(AVFormatContext *s)
1739 {
1740 if (s->track != 0)
1741 fprintf(stderr, "Track: %d\n", s->track);
1742 if (s->title[0] != '\0')
1743 fprintf(stderr, "Title: %s\n", s->title);
1744 if (s->author[0] != '\0')
1745 fprintf(stderr, "Author: %s\n", s->author);
1746 if (s->album[0] != '\0')
1747 fprintf(stderr, "Album: %s\n", s->album);
1748 if (s->year != 0)
1749 fprintf(stderr, "Year: %d\n", s->year);
1750 if (s->genre[0] != '\0')
1751 fprintf(stderr, "Genre: %s\n", s->genre);
1752 }
1753
1754 /* since we have only one decoding thread, we can use a global
1755 variable instead of a thread local variable */
1756 static VideoState *global_video_state;
1757
1758 static int decode_interrupt_cb(void)
1759 {
1760 return (global_video_state && global_video_state->abort_request);
1761 }
1762
1763 /* this thread gets the stream from the disk or the network */
1764 static int decode_thread(void *arg)
1765 {
1766 VideoState *is = arg;
1767 AVFormatContext *ic;
1768 int err, i, ret, video_index, audio_index, use_play;
1769 AVPacket pkt1, *pkt = &pkt1;
1770 AVFormatParameters params, *ap = &params;
1771
1772 video_index = -1;
1773 audio_index = -1;
1774 is->video_stream = -1;
1775 is->audio_stream = -1;
1776 is->subtitle_stream = -1;
1777
1778 global_video_state = is;
1779 url_set_interrupt_cb(decode_interrupt_cb);
1780
1781 memset(ap, 0, sizeof(*ap));
1782 ap->image_format = image_format;
1783 ap->initial_pause = 1; /* we force a pause when starting an RTSP
1784 stream */
1785
1786 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1787 if (err < 0) {
1788 print_error(is->filename, err);
1789 ret = -1;
1790 goto fail;
1791 }
1792 is->ic = ic;
1793 #ifdef CONFIG_NETWORK
1794 use_play = (ic->iformat == &rtsp_demux);
1795 #else
1796 use_play = 0;
1797 #endif
1798
1799 if(genpts)
1800 ic->flags |= AVFMT_FLAG_GENPTS;
1801
1802 if (!use_play) {
1803 err = av_find_stream_info(ic);
1804 if (err < 0) {
1805 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1806 ret = -1;
1807 goto fail;
1808 }
1809 ic->pb.eof_reached= 0; //FIXME hack, ffplay maybe shouldnt use url_feof() to test for the end
1810 }
1811
1812 /* if seeking requested, we execute it */
1813 if (start_time != AV_NOPTS_VALUE) {
1814 int64_t timestamp;
1815
1816 timestamp = start_time;
1817 /* add the stream start time */
1818 if (ic->start_time != AV_NOPTS_VALUE)
1819 timestamp += ic->start_time;
1820 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1821 if (ret < 0) {
1822 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1823 is->filename, (double)timestamp / AV_TIME_BASE);
1824 }
1825 }
1826
1827 /* now we can begin to play (RTSP stream only) */
1828 av_read_play(ic);
1829
1830 if (use_play) {
1831 err = av_find_stream_info(ic);
1832 if (err < 0) {
1833 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1834 ret = -1;
1835 goto fail;
1836 }
1837 }
1838
1839 for(i = 0; i < ic->nb_streams; i++) {
1840 AVCodecContext *enc = ic->streams[i]->codec;
1841 switch(enc->codec_type) {
1842 case CODEC_TYPE_AUDIO:
1843 if (audio_index < 0 && !audio_disable)
1844 audio_index = i;
1845 break;
1846 case CODEC_TYPE_VIDEO:
1847 if (video_index < 0 && !video_disable)
1848 video_index = i;
1849 break;
1850 default:
1851 break;
1852 }
1853 }
1854 if (show_status) {
1855 dump_format(ic, 0, is->filename, 0);
1856 dump_stream_info(ic);
1857 }
1858
1859 /* open the streams */
1860 if (audio_index >= 0) {
1861 stream_component_open(is, audio_index);
1862 }
1863
1864 if (video_index >= 0) {
1865 stream_component_open(is, video_index);
1866 } else {
1867 if (!display_disable)
1868 is->show_audio = 1;
1869 }
1870
1871 if (is->video_stream < 0 && is->audio_stream < 0) {
1872 fprintf(stderr, "%s: could not open codecs\n", is->filename);
1873 ret = -1;
1874 goto fail;
1875 }
1876
1877 for(;;) {
1878 if (is->abort_request)
1879 break;
1880 #ifdef CONFIG_NETWORK
1881 if (is->paused != is->last_paused) {
1882 is->last_paused = is->paused;
1883 if (is->paused)
1884 av_read_pause(ic);
1885 else
1886 av_read_play(ic);
1887 }
1888 if (is->paused && ic->iformat == &rtsp_demux) {
1889 /* wait 10 ms to avoid trying to get another packet */
1890 /* XXX: horrible */
1891 SDL_Delay(10);
1892 continue;
1893 }
1894 #endif
1895 if (is->seek_req) {
1896 /* XXX: must lock decoder threads */
1897 SDL_LockMutex(is->video_decoder_mutex);
1898 SDL_LockMutex(is->audio_decoder_mutex);
1899 SDL_LockMutex(is->subtitle_decoder_mutex);
1900 ret = av_seek_frame(is->ic, -1, is->seek_pos, is->seek_flags);
1901 if (ret < 0) {
1902 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
1903 }else{
1904 if (is->audio_stream >= 0) {
1905 packet_queue_flush(&is->audioq);
1906 }
1907 if (is->subtitle_stream >= 0) {
1908 packet_queue_flush(&is->subtitleq);
1909 }
1910 if (is->video_stream >= 0) {
1911 packet_queue_flush(&is->videoq);
1912 avcodec_flush_buffers(ic->streams[video_index]->codec);
1913 }
1914 }
1915 SDL_UnlockMutex(is->subtitle_decoder_mutex);
1916 SDL_UnlockMutex(is->audio_decoder_mutex);
1917 SDL_UnlockMutex(is->video_decoder_mutex);
1918 is->seek_req = 0;
1919 }
1920
1921 /* if the queue are full, no need to read more */
1922 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
1923 is->videoq.size > MAX_VIDEOQ_SIZE ||
1924 is->subtitleq.size > MAX_SUBTITLEQ_SIZE ||
1925 url_feof(&ic->pb)) {
1926 /* wait 10 ms */
1927 SDL_Delay(10);
1928 continue;
1929 }
1930 ret = av_read_frame(ic, pkt);
1931 if (ret < 0) {
1932 if (url_ferror(&ic->pb) == 0) {
1933 SDL_Delay(100); /* wait for user event */
1934 continue;
1935 } else
1936 break;
1937 }
1938 if (pkt->stream_index == is->audio_stream) {
1939 packet_queue_put(&is->audioq, pkt);
1940 } else if (pkt->stream_index == is->video_stream) {
1941 packet_queue_put(&is->videoq, pkt);
1942 } else if (pkt->stream_index == is->subtitle_stream) {
1943 packet_queue_put(&is->subtitleq, pkt);
1944 } else {
1945 av_free_packet(pkt);
1946 }
1947 }
1948 /* wait until the end */
1949 while (!is->abort_request) {
1950 SDL_Delay(100);
1951 }
1952
1953 ret = 0;
1954 fail:
1955 /* disable interrupting */
1956 global_video_state = NULL;
1957
1958 /* close each stream */
1959 if (is->audio_stream >= 0)
1960 stream_component_close(is, is->audio_stream);
1961 if (is->video_stream >= 0)
1962 stream_component_close(is, is->video_stream);
1963 if (is->subtitle_stream >= 0)
1964 stream_component_close(is, is->subtitle_stream);
1965 if (is->ic) {
1966 av_close_input_file(is->ic);
1967 is->ic = NULL; /* safety */
1968 }
1969 url_set_interrupt_cb(NULL);
1970
1971 if (ret != 0) {
1972 SDL_Event event;
1973
1974 event.type = FF_QUIT_EVENT;
1975 event.user.data1 = is;
1976 SDL_PushEvent(&event);
1977 }
1978 return 0;
1979 }
1980
1981 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
1982 {
1983 VideoState *is;
1984
1985 is = av_mallocz(sizeof(VideoState));
1986 if (!is)
1987 return NULL;
1988 pstrcpy(is->filename, sizeof(is->filename), filename);
1989 is->iformat = iformat;
1990 if (screen) {
1991 is->width = screen->w;
1992 is->height = screen->h;
1993 }
1994 is->ytop = 0;
1995 is->xleft = 0;
1996
1997 /* start video display */
1998 is->pictq_mutex = SDL_CreateMutex();
1999 is->pictq_cond = SDL_CreateCond();
2000
2001 is->subpq_mutex = SDL_CreateMutex();
2002 is->subpq_cond = SDL_CreateCond();
2003
2004 is->subtitle_decoder_mutex = SDL_CreateMutex();
2005 is->audio_decoder_mutex = SDL_CreateMutex();
2006 is->video_decoder_mutex = SDL_CreateMutex();
2007
2008 /* add the refresh timer to draw the picture */
2009 schedule_refresh(is, 40);
2010
2011 is->av_sync_type = av_sync_type;
2012 is->parse_tid = SDL_CreateThread(decode_thread, is);
2013 if (!is->parse_tid) {
2014 av_free(is);
2015 return NULL;
2016 }
2017 return is;
2018 }
2019
2020 static void stream_close(VideoState *is)
2021 {
2022 VideoPicture *vp;
2023 int i;
2024 /* XXX: use a special url_shutdown call to abort parse cleanly */
2025 is->abort_request = 1;
2026 SDL_WaitThread(is->parse_tid, NULL);
2027
2028 /* free all pictures */
2029 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2030 vp = &is->pictq[i];
2031 if (vp->bmp) {
2032 SDL_FreeYUVOverlay(vp->bmp);
2033 vp->bmp = NULL;
2034 }
2035 }
2036 SDL_DestroyMutex(is->pictq_mutex);
2037 SDL_DestroyCond(is->pictq_cond);
2038 SDL_DestroyMutex(is->subpq_mutex);
2039 SDL_DestroyCond(is->subpq_cond);
2040 SDL_DestroyMutex(is->subtitle_decoder_mutex);
2041 SDL_DestroyMutex(is->audio_decoder_mutex);
2042 SDL_DestroyMutex(is->video_decoder_mutex);
2043 }
2044
2045 void stream_cycle_channel(VideoState *is, int codec_type)
2046 {
2047 AVFormatContext *ic = is->ic;
2048 int start_index, stream_index;
2049 AVStream *st;
2050
2051 if (codec_type == CODEC_TYPE_VIDEO)
2052 start_index = is->video_stream;
2053 else if (codec_type == CODEC_TYPE_AUDIO)
2054 start_index = is->audio_stream;
2055 else
2056 start_index = is->subtitle_stream;
2057 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2058 return;
2059 stream_index = start_index;
2060 for(;;) {
2061 if (++stream_index >= is->ic->nb_streams)
2062 {
2063 if (codec_type == CODEC_TYPE_SUBTITLE)
2064 {
2065 stream_index = -1;
2066 goto the_end;
2067 } else
2068 stream_index = 0;
2069 }
2070 if (stream_index == start_index)
2071 return;
2072 st = ic->streams[stream_index];
2073 if (st->codec->codec_type == codec_type) {
2074 /* check that parameters are OK */
2075 switch(codec_type) {
2076 case CODEC_TYPE_AUDIO:
2077 if (st->codec->sample_rate != 0 &&
2078 st->codec->channels != 0)
2079 goto the_end;
2080 break;
2081 case CODEC_TYPE_VIDEO:
2082 case CODEC_TYPE_SUBTITLE:
2083 goto the_end;
2084 default:
2085 break;
2086 }
2087 }
2088 }
2089 the_end:
2090 stream_component_close(is, start_index);
2091 stream_component_open(is, stream_index);
2092 }
2093
2094
2095 void toggle_full_screen(void)
2096 {
2097 int w, h, flags;
2098 is_full_screen = !is_full_screen;
2099 if (!fs_screen_width) {
2100 /* use default SDL method */
2101 SDL_WM_ToggleFullScreen(screen);
2102 } else {
2103 /* use the recorded resolution */
2104 flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
2105 if (is_full_screen) {
2106 w = fs_screen_width;
2107 h = fs_screen_height;
2108 flags |= SDL_FULLSCREEN;
2109 } else {
2110 w = screen_width;
2111 h = screen_height;
2112 flags |= SDL_RESIZABLE;
2113 }
2114 screen = SDL_SetVideoMode(w, h, 0, flags);
2115 cur_stream->width = w;
2116 cur_stream->height = h;
2117 }
2118 }
2119
2120 void toggle_pause(void)
2121 {
2122 if (cur_stream)
2123 stream_pause(cur_stream);
2124 step = 0;
2125 }
2126
2127 void step_to_next_frame(void)
2128 {
2129 if (cur_stream) {
2130 if (cur_stream->paused)
2131 cur_stream->paused=0;
2132 cur_stream->video_current_pts = get_video_clock(cur_stream);
2133 }
2134 step = 1;
2135 }
2136
2137 void do_exit(void)
2138 {
2139 if (cur_stream) {
2140 stream_close(cur_stream);
2141 cur_stream = NULL;
2142 }
2143 if (show_status)
2144 printf("\n");
2145 SDL_Quit();
2146 exit(0);
2147 }
2148
2149 void toggle_audio_display(void)
2150 {
2151 if (cur_stream) {
2152 cur_stream->show_audio = !cur_stream->show_audio;
2153 }
2154 }
2155
2156 /* handle an event sent by the GUI */
2157 void event_loop(void)
2158 {
2159 SDL_Event event;
2160 double incr, pos, frac;
2161
2162 for(;;) {
2163 SDL_WaitEvent(&event);
2164 switch(event.type) {
2165 case SDL_KEYDOWN:
2166 switch(event.key.keysym.sym) {
2167 case SDLK_ESCAPE:
2168 case SDLK_q:
2169 do_exit();
2170 break;
2171 case SDLK_f:
2172 toggle_full_screen();
2173 break;
2174 case SDLK_p:
2175 case SDLK_SPACE:
2176 toggle_pause();
2177 break;
2178 case SDLK_s: //S: Step to next frame
2179 step_to_next_frame();
2180 break;
2181 case SDLK_a:
2182 if (cur_stream)
2183 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2184 break;
2185 case SDLK_v:
2186 if (cur_stream)
2187 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2188 break;
2189 case SDLK_t:
2190 if (cur_stream)
2191 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2192 break;
2193 case SDLK_w:
2194 toggle_audio_display();
2195 break;
2196 case SDLK_LEFT:
2197 incr = -10.0;
2198 goto do_seek;
2199 case SDLK_RIGHT:
2200 incr = 10.0;
2201 goto do_seek;
2202 case SDLK_UP:
2203 incr = 60.0;
2204 goto do_seek;
2205 case SDLK_DOWN:
2206 incr = -60.0;
2207 do_seek:
2208 if (cur_stream) {
2209 pos = get_master_clock(cur_stream);
2210 pos += incr;
2211 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2212 }
2213 break;
2214 default:
2215 break;
2216 }
2217 break;
2218 case SDL_MOUSEBUTTONDOWN:
2219 if (cur_stream) {
2220 int ns, hh, mm, ss;
2221 int tns, thh, tmm, tss;
2222 tns = cur_stream->ic->duration/1000000LL;
2223 thh = tns/3600;
2224 tmm = (tns%3600)/60;
2225 tss = (tns%60);
2226 frac = (double)event.button.x/(double)cur_stream->width;
2227 ns = frac*tns;
2228 hh = ns/3600;
2229 mm = (ns%3600)/60;
2230 ss = (ns%60);
2231 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2232 hh, mm, ss, thh, tmm, tss);
2233 stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2234 }
2235 break;
2236 case SDL_VIDEORESIZE:
2237 if (cur_stream) {
2238 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2239 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2240 cur_stream->width = event.resize.w;
2241 cur_stream->height = event.resize.h;
2242 }
2243 break;
2244 case SDL_QUIT:
2245 case FF_QUIT_EVENT:
2246 do_exit();
2247 break;
2248 case FF_ALLOC_EVENT:
2249 alloc_picture(event.user.data1);
2250 break;
2251 case FF_REFRESH_EVENT:
2252 video_refresh_timer(event.user.data1);
2253 break;
2254 default:
2255 break;
2256 }
2257 }
2258 }
2259
2260 void opt_width(const char *arg)
2261 {
2262 screen_width = atoi(arg);
2263 }
2264
2265 void opt_height(const char *arg)
2266 {
2267 screen_height = atoi(arg);
2268 }
2269
2270 static void opt_format(const char *arg)
2271 {
2272 file_iformat = av_find_input_format(arg);
2273 if (!file_iformat) {
2274 fprintf(stderr, "Unknown input format: %s\n", arg);
2275 exit(1);
2276 }
2277 }
2278
2279 static void opt_image_format(const char *arg)
2280 {
2281 AVImageFormat *f;
2282
2283 for(f = first_image_format; f != NULL; f = f->next) {
2284 if (!strcmp(arg, f->name))
2285 break;
2286 }
2287 if (!f) {
2288 fprintf(stderr, "Unknown image format: '%s'\n", arg);
2289 exit(1);
2290 }
2291 image_format = f;
2292 }
2293
2294 #ifdef CONFIG_NETWORK
2295 void opt_rtp_tcp(void)
2296 {
2297 /* only tcp protocol */
2298 rtsp_default_protocols = (1 << RTSP_PROTOCOL_RTP_TCP);
2299 }
2300 #endif
2301
2302 void opt_sync(const char *arg)
2303 {
2304 if (!strcmp(arg, "audio"))
2305 av_sync_type = AV_SYNC_AUDIO_MASTER;
2306 else if (!strcmp(arg, "video"))
2307 av_sync_type = AV_SYNC_VIDEO_MASTER;
2308 else if (!strcmp(arg, "ext"))
2309 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2310 else
2311 show_help();
2312 }
2313
2314 void opt_seek(const char *arg)
2315 {
2316 start_time = parse_date(arg, 1);
2317 }
2318
2319 static void opt_debug(const char *arg)
2320 {
2321 debug = atoi(arg);
2322 }
2323
2324 static void opt_vismv(const char *arg)
2325 {
2326 debug_mv = atoi(arg);
2327 }
2328
2329 static void opt_thread_count(const char *arg)
2330 {
2331 thread_count= atoi(arg);
2332 #if !defined(HAVE_THREADS)
2333 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2334 #endif
2335 }
2336
2337 const OptionDef options[] = {
2338 { "h", 0, {(void*)show_help}, "show help" },
2339 { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2340 { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2341 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2342 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2343 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2344 { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2345 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2346 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2347 { "img", HAS_ARG, {(void*)opt_image_format}, "force image format", "img_fmt" },
2348 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2349 { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2350 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2351 { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2352 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2353 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2354 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2355 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2356 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2357 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2358 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2359 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_resilience}, "set error detection threshold (0-4)", "threshold" },
2360 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2361 #ifdef CONFIG_NETWORK
2362 { "rtp_tcp", OPT_EXPERT, {(void*)&opt_rtp_tcp}, "force RTP/TCP protocol usage", "" },
2363 #endif
2364 { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2365 { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2366 { NULL, },
2367 };
2368
2369 void show_help(void)
2370 {
2371 printf("ffplay version " FFMPEG_VERSION ", Copyright (c) 2003 Fabrice Bellard\n"
2372 "usage: ffplay [options] input_file\n"
2373 "Simple media player\n");
2374 printf("\n");
2375 show_help_options(options, "Main options:\n",
2376 OPT_EXPERT, 0);
2377 show_help_options(options, "\nAdvanced options:\n",
2378 OPT_EXPERT, OPT_EXPERT);
2379 printf("\nWhile playing:\n"
2380 "q, ESC quit\n"
2381 "f toggle full screen\n"
2382 "p, SPC pause\n"
2383 "a cycle audio channel\n"
2384 "v cycle video channel\n"
2385 "t cycle subtitle channel\n"
2386 "w show audio waves\n"
2387 "left/right seek backward/forward 10 seconds\n"
2388 "down/up seek backward/forward 1 minute\n"
2389 "mouse click seek to percentage in file corresponding to fraction of width\n"
2390 );
2391 exit(1);
2392 }
2393
2394 void parse_arg_file(const char *filename)
2395 {
2396 if (!strcmp(filename, "-"))
2397 filename = "pipe:";
2398 input_filename = filename;
2399 }
2400
2401 /* Called from the main */
2402 int main(int argc, char **argv)
2403 {
2404 int flags, w, h;
2405
2406 /* register all codecs, demux and protocols */
2407 av_register_all();
2408
2409 #ifdef CONFIG_OS2
2410 MorphToPM(); // Morph the VIO application to a PM one to be able to use Win* functions
2411
2412 // Make stdout and stderr unbuffered
2413 setbuf( stdout, NULL );
2414 setbuf( stderr, NULL );
2415 #endif
2416
2417 parse_options(argc, argv, options);
2418
2419 if (!input_filename)
2420 show_help();
2421
2422 if (display_disable) {
2423 video_disable = 1;
2424 }
2425 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2426 #if !defined(CONFIG_WIN32) && !defined(CONFIG_DARWIN)
2427 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on win32 or darwin */
2428 #endif
2429 if (SDL_Init (flags)) {
2430 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2431 exit(1);
2432 }
2433
2434 if (!display_disable) {
2435 #ifdef HAVE_SDL_VIDEO_SIZE
2436 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2437 fs_screen_width = vi->current_w;
2438 fs_screen_height = vi->current_h;
2439 #endif
2440 flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
2441 if (is_full_screen && fs_screen_width) {
2442 w = fs_screen_width;
2443 h = fs_screen_height;
2444 flags |= SDL_FULLSCREEN;
2445 } else {
2446 w = screen_width;
2447 h = screen_height;
2448 flags |= SDL_RESIZABLE;
2449 }
2450 #ifndef CONFIG_DARWIN
2451 screen = SDL_SetVideoMode(w, h, 0, flags);
2452 #else
2453 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
2454 screen = SDL_SetVideoMode(w, h, 24, flags);
2455 #endif
2456 if (!screen) {
2457 fprintf(stderr, "SDL: could not set video mode - exiting\n");
2458 exit(1);
2459 }
2460 SDL_WM_SetCaption("FFplay", "FFplay");
2461 }
2462
2463 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2464 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2465 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2466 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2467
2468 cur_stream = stream_open(input_filename, file_iformat);
2469
2470 event_loop();
2471
2472 /* never returns */
2473
2474 return 0;
2475 }