Disable v4l2 is videodev2.h is not "sanitized"
[libav.git] / ffplay.c
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19 #define HAVE_AV_CONFIG_H
20 #include "avformat.h"
21
22 #include "cmdutils.h"
23
24 #include <SDL.h>
25 #include <SDL_thread.h>
26
27 #ifdef CONFIG_WIN32
28 #undef main /* We don't want SDL to override our main() */
29 #endif
30
31 #ifdef CONFIG_OS2
32 #define INCL_DOS
33 #include <os2.h>
34 #include <stdio.h>
35
36 void MorphToPM()
37 {
38 PPIB pib;
39 PTIB tib;
40
41 DosGetInfoBlocks(&tib, &pib);
42
43 // Change flag from VIO to PM:
44 if (pib->pib_ultype==2) pib->pib_ultype = 3;
45 }
46 #endif
47
48 #if defined(__linux__)
49 #define HAVE_X11
50 #endif
51
52 #ifdef HAVE_X11
53 #include <X11/Xlib.h>
54 #endif
55
56 //#define DEBUG_SYNC
57
58 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
59 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
60 #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
61
62 /* SDL audio buffer size, in samples. Should be small to have precise
63 A/V sync as SDL does not have hardware buffer fullness info. */
64 #define SDL_AUDIO_BUFFER_SIZE 1024
65
66 /* no AV sync correction is done if below the AV sync threshold */
67 #define AV_SYNC_THRESHOLD 0.01
68 /* no AV correction is done if too big error */
69 #define AV_NOSYNC_THRESHOLD 10.0
70
71 /* maximum audio speed change to get correct sync */
72 #define SAMPLE_CORRECTION_PERCENT_MAX 10
73
74 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
75 #define AUDIO_DIFF_AVG_NB 20
76
77 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
78 #define SAMPLE_ARRAY_SIZE (2*65536)
79
80 typedef struct PacketQueue {
81 AVPacketList *first_pkt, *last_pkt;
82 int nb_packets;
83 int size;
84 int abort_request;
85 SDL_mutex *mutex;
86 SDL_cond *cond;
87 } PacketQueue;
88
89 #define VIDEO_PICTURE_QUEUE_SIZE 1
90 #define SUBPICTURE_QUEUE_SIZE 4
91
92 typedef struct VideoPicture {
93 double pts; ///<presentation time stamp for this picture
94 SDL_Overlay *bmp;
95 int width, height; /* source height & width */
96 int allocated;
97 } VideoPicture;
98
99 typedef struct SubPicture {
100 double pts; /* presentation time stamp for this picture */
101 AVSubtitle sub;
102 } SubPicture;
103
104 enum {
105 AV_SYNC_AUDIO_MASTER, /* default choice */
106 AV_SYNC_VIDEO_MASTER,
107 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
108 };
109
110 typedef struct VideoState {
111 SDL_Thread *parse_tid;
112 SDL_Thread *video_tid;
113 AVInputFormat *iformat;
114 int no_background;
115 int abort_request;
116 int paused;
117 int last_paused;
118 int seek_req;
119 int seek_flags;
120 int64_t seek_pos;
121 AVFormatContext *ic;
122 int dtg_active_format;
123
124 int audio_stream;
125
126 int av_sync_type;
127 double external_clock; /* external clock base */
128 int64_t external_clock_time;
129
130 double audio_clock;
131 double audio_diff_cum; /* used for AV difference average computation */
132 double audio_diff_avg_coef;
133 double audio_diff_threshold;
134 int audio_diff_avg_count;
135 AVStream *audio_st;
136 PacketQueue audioq;
137 int audio_hw_buf_size;
138 /* samples output by the codec. we reserve more space for avsync
139 compensation */
140 uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
141 unsigned int audio_buf_size; /* in bytes */
142 int audio_buf_index; /* in bytes */
143 AVPacket audio_pkt;
144 uint8_t *audio_pkt_data;
145 int audio_pkt_size;
146
147 int show_audio; /* if true, display audio samples */
148 int16_t sample_array[SAMPLE_ARRAY_SIZE];
149 int sample_array_index;
150 int last_i_start;
151
152 SDL_Thread *subtitle_tid;
153 int subtitle_stream;
154 int subtitle_stream_changed;
155 AVStream *subtitle_st;
156 PacketQueue subtitleq;
157 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
158 int subpq_size, subpq_rindex, subpq_windex;
159 SDL_mutex *subpq_mutex;
160 SDL_cond *subpq_cond;
161
162 double frame_timer;
163 double frame_last_pts;
164 double frame_last_delay;
165 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
166 int video_stream;
167 AVStream *video_st;
168 PacketQueue videoq;
169 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
170 int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
171 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
172 int pictq_size, pictq_rindex, pictq_windex;
173 SDL_mutex *pictq_mutex;
174 SDL_cond *pictq_cond;
175
176 SDL_mutex *video_decoder_mutex;
177 SDL_mutex *audio_decoder_mutex;
178 SDL_mutex *subtitle_decoder_mutex;
179
180 // QETimer *video_timer;
181 char filename[1024];
182 int width, height, xleft, ytop;
183 } VideoState;
184
185 void show_help(void);
186 static int audio_write_get_buf_size(VideoState *is);
187
188 /* options specified by the user */
189 static AVInputFormat *file_iformat;
190 static AVImageFormat *image_format;
191 static const char *input_filename;
192 static int fs_screen_width;
193 static int fs_screen_height;
194 static int screen_width = 640;
195 static int screen_height = 480;
196 static int audio_disable;
197 static int video_disable;
198 static int display_disable;
199 static int show_status;
200 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
201 static int64_t start_time = AV_NOPTS_VALUE;
202 static int debug = 0;
203 static int debug_mv = 0;
204 static int step = 0;
205 static int thread_count = 1;
206 static int workaround_bugs = 1;
207 static int fast = 0;
208 static int genpts = 0;
209 static int lowres = 0;
210 static int idct = FF_IDCT_AUTO;
211 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
212 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
213 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
214 static int error_resilience = FF_ER_CAREFUL;
215 static int error_concealment = 3;
216
217 /* current context */
218 static int is_full_screen;
219 static VideoState *cur_stream;
220 static int64_t audio_callback_time;
221
222 #define FF_ALLOC_EVENT (SDL_USEREVENT)
223 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
224 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
225
226 SDL_Surface *screen;
227
228 /* packet queue handling */
229 static void packet_queue_init(PacketQueue *q)
230 {
231 memset(q, 0, sizeof(PacketQueue));
232 q->mutex = SDL_CreateMutex();
233 q->cond = SDL_CreateCond();
234 }
235
236 static void packet_queue_flush(PacketQueue *q)
237 {
238 AVPacketList *pkt, *pkt1;
239
240 SDL_LockMutex(q->mutex);
241 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
242 pkt1 = pkt->next;
243 av_free_packet(&pkt->pkt);
244 av_freep(&pkt);
245 }
246 q->last_pkt = NULL;
247 q->first_pkt = NULL;
248 q->nb_packets = 0;
249 q->size = 0;
250 SDL_UnlockMutex(q->mutex);
251 }
252
253 static void packet_queue_end(PacketQueue *q)
254 {
255 packet_queue_flush(q);
256 SDL_DestroyMutex(q->mutex);
257 SDL_DestroyCond(q->cond);
258 }
259
260 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
261 {
262 AVPacketList *pkt1;
263
264 /* duplicate the packet */
265 if (av_dup_packet(pkt) < 0)
266 return -1;
267
268 pkt1 = av_malloc(sizeof(AVPacketList));
269 if (!pkt1)
270 return -1;
271 pkt1->pkt = *pkt;
272 pkt1->next = NULL;
273
274
275 SDL_LockMutex(q->mutex);
276
277 if (!q->last_pkt)
278
279 q->first_pkt = pkt1;
280 else
281 q->last_pkt->next = pkt1;
282 q->last_pkt = pkt1;
283 q->nb_packets++;
284 q->size += pkt1->pkt.size;
285 /* XXX: should duplicate packet data in DV case */
286 SDL_CondSignal(q->cond);
287
288 SDL_UnlockMutex(q->mutex);
289 return 0;
290 }
291
292 static void packet_queue_abort(PacketQueue *q)
293 {
294 SDL_LockMutex(q->mutex);
295
296 q->abort_request = 1;
297
298 SDL_CondSignal(q->cond);
299
300 SDL_UnlockMutex(q->mutex);
301 }
302
303 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
304 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
305 {
306 AVPacketList *pkt1;
307 int ret;
308
309 SDL_LockMutex(q->mutex);
310
311 for(;;) {
312 if (q->abort_request) {
313 ret = -1;
314 break;
315 }
316
317 pkt1 = q->first_pkt;
318 if (pkt1) {
319 q->first_pkt = pkt1->next;
320 if (!q->first_pkt)
321 q->last_pkt = NULL;
322 q->nb_packets--;
323 q->size -= pkt1->pkt.size;
324 *pkt = pkt1->pkt;
325 av_free(pkt1);
326 ret = 1;
327 break;
328 } else if (!block) {
329 ret = 0;
330 break;
331 } else {
332 SDL_CondWait(q->cond, q->mutex);
333 }
334 }
335 SDL_UnlockMutex(q->mutex);
336 return ret;
337 }
338
339 static inline void fill_rectangle(SDL_Surface *screen,
340 int x, int y, int w, int h, int color)
341 {
342 SDL_Rect rect;
343 rect.x = x;
344 rect.y = y;
345 rect.w = w;
346 rect.h = h;
347 SDL_FillRect(screen, &rect, color);
348 }
349
350 #if 0
351 /* draw only the border of a rectangle */
352 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
353 {
354 int w1, w2, h1, h2;
355
356 /* fill the background */
357 w1 = x;
358 if (w1 < 0)
359 w1 = 0;
360 w2 = s->width - (x + w);
361 if (w2 < 0)
362 w2 = 0;
363 h1 = y;
364 if (h1 < 0)
365 h1 = 0;
366 h2 = s->height - (y + h);
367 if (h2 < 0)
368 h2 = 0;
369 fill_rectangle(screen,
370 s->xleft, s->ytop,
371 w1, s->height,
372 color);
373 fill_rectangle(screen,
374 s->xleft + s->width - w2, s->ytop,
375 w2, s->height,
376 color);
377 fill_rectangle(screen,
378 s->xleft + w1, s->ytop,
379 s->width - w1 - w2, h1,
380 color);
381 fill_rectangle(screen,
382 s->xleft + w1, s->ytop + s->height - h2,
383 s->width - w1 - w2, h2,
384 color);
385 }
386 #endif
387
388
389
390 #define SCALEBITS 10
391 #define ONE_HALF (1 << (SCALEBITS - 1))
392 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
393
394 #define RGB_TO_Y_CCIR(r, g, b) \
395 ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
396 FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
397
398 #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
399 (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
400 FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
401
402 #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
403 (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
404 FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
405
406 #define ALPHA_BLEND(a, oldp, newp, s)\
407 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
408
409 #define RGBA_IN(r, g, b, a, s)\
410 {\
411 unsigned int v = ((const uint32_t *)(s))[0];\
412 a = (v >> 24) & 0xff;\
413 r = (v >> 16) & 0xff;\
414 g = (v >> 8) & 0xff;\
415 b = v & 0xff;\
416 }
417
418 #define YUVA_IN(y, u, v, a, s, pal)\
419 {\
420 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)s];\
421 a = (val >> 24) & 0xff;\
422 y = (val >> 16) & 0xff;\
423 u = (val >> 8) & 0xff;\
424 v = val & 0xff;\
425 }
426
427 #define YUVA_OUT(d, y, u, v, a)\
428 {\
429 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
430 }
431
432
433 #define BPP 1
434
435 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect)
436 {
437 int wrap, wrap3, width2, skip2;
438 int y, u, v, a, u1, v1, a1, w, h;
439 uint8_t *lum, *cb, *cr;
440 const uint8_t *p;
441 const uint32_t *pal;
442
443 lum = dst->data[0] + rect->y * dst->linesize[0];
444 cb = dst->data[1] + (rect->y >> 1) * dst->linesize[1];
445 cr = dst->data[2] + (rect->y >> 1) * dst->linesize[2];
446
447 width2 = (rect->w + 1) >> 1;
448 skip2 = rect->x >> 1;
449 wrap = dst->linesize[0];
450 wrap3 = rect->linesize;
451 p = rect->bitmap;
452 pal = rect->rgba_palette; /* Now in YCrCb! */
453
454 if (rect->y & 1) {
455 lum += rect->x;
456 cb += skip2;
457 cr += skip2;
458
459 if (rect->x & 1) {
460 YUVA_IN(y, u, v, a, p, pal);
461 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
462 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
463 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
464 cb++;
465 cr++;
466 lum++;
467 p += BPP;
468 }
469 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
470 YUVA_IN(y, u, v, a, p, pal);
471 u1 = u;
472 v1 = v;
473 a1 = a;
474 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
475
476 YUVA_IN(y, u, v, a, p + BPP, pal);
477 u1 += u;
478 v1 += v;
479 a1 += a;
480 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
481 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
482 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
483 cb++;
484 cr++;
485 p += 2 * BPP;
486 lum += 2;
487 }
488 if (w) {
489 YUVA_IN(y, u, v, a, p, pal);
490 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
491 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
492 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
493 }
494 p += wrap3 + (wrap3 - rect->w * BPP);
495 lum += wrap + (wrap - rect->w - rect->x);
496 cb += dst->linesize[1] - width2 - skip2;
497 cr += dst->linesize[2] - width2 - skip2;
498 }
499 for(h = rect->h - (rect->y & 1); h >= 2; h -= 2) {
500 lum += rect->x;
501 cb += skip2;
502 cr += skip2;
503
504 if (rect->x & 1) {
505 YUVA_IN(y, u, v, a, p, pal);
506 u1 = u;
507 v1 = v;
508 a1 = a;
509 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
510 p += wrap3;
511 lum += wrap;
512 YUVA_IN(y, u, v, a, p, pal);
513 u1 += u;
514 v1 += v;
515 a1 += a;
516 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
517 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
518 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
519 cb++;
520 cr++;
521 p += -wrap3 + BPP;
522 lum += -wrap + 1;
523 }
524 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
525 YUVA_IN(y, u, v, a, p, pal);
526 u1 = u;
527 v1 = v;
528 a1 = a;
529 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
530
531 YUVA_IN(y, u, v, a, p, pal);
532 u1 += u;
533 v1 += v;
534 a1 += a;
535 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
536 p += wrap3;
537 lum += wrap;
538
539 YUVA_IN(y, u, v, a, p, pal);
540 u1 += u;
541 v1 += v;
542 a1 += a;
543 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
544
545 YUVA_IN(y, u, v, a, p, pal);
546 u1 += u;
547 v1 += v;
548 a1 += a;
549 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
550
551 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
552 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
553
554 cb++;
555 cr++;
556 p += -wrap3 + 2 * BPP;
557 lum += -wrap + 2;
558 }
559 if (w) {
560 YUVA_IN(y, u, v, a, p, pal);
561 u1 = u;
562 v1 = v;
563 a1 = a;
564 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
565 p += wrap3;
566 lum += wrap;
567 YUVA_IN(y, u, v, a, p, pal);
568 u1 += u;
569 v1 += v;
570 a1 += a;
571 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
572 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
573 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
574 cb++;
575 cr++;
576 p += -wrap3 + BPP;
577 lum += -wrap + 1;
578 }
579 p += wrap3 + (wrap3 - rect->w * BPP);
580 lum += wrap + (wrap - rect->w - rect->x);
581 cb += dst->linesize[1] - width2 - skip2;
582 cr += dst->linesize[2] - width2 - skip2;
583 }
584 /* handle odd height */
585 if (h) {
586 lum += rect->x;
587 cb += skip2;
588 cr += skip2;
589
590 if (rect->x & 1) {
591 YUVA_IN(y, u, v, a, p, pal);
592 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
593 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
594 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
595 cb++;
596 cr++;
597 lum++;
598 p += BPP;
599 }
600 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
601 YUVA_IN(y, u, v, a, p, pal);
602 u1 = u;
603 v1 = v;
604 a1 = a;
605 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
606
607 YUVA_IN(y, u, v, a, p + BPP, pal);
608 u1 += u;
609 v1 += v;
610 a1 += a;
611 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
612 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
613 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
614 cb++;
615 cr++;
616 p += 2 * BPP;
617 lum += 2;
618 }
619 if (w) {
620 YUVA_IN(y, u, v, a, p, pal);
621 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
622 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
623 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
624 }
625 }
626 }
627
628 static void free_subpicture(SubPicture *sp)
629 {
630 int i;
631
632 for (i = 0; i < sp->sub.num_rects; i++)
633 {
634 av_free(sp->sub.rects[i].bitmap);
635 av_free(sp->sub.rects[i].rgba_palette);
636 }
637
638 av_free(sp->sub.rects);
639
640 memset(&sp->sub, 0, sizeof(AVSubtitle));
641 }
642
643 static void video_image_display(VideoState *is)
644 {
645 VideoPicture *vp;
646 SubPicture *sp;
647 AVPicture pict;
648 float aspect_ratio;
649 int width, height, x, y;
650 SDL_Rect rect;
651 int i;
652
653 vp = &is->pictq[is->pictq_rindex];
654 if (vp->bmp) {
655 /* XXX: use variable in the frame */
656 if (is->video_st->codec->sample_aspect_ratio.num == 0)
657 aspect_ratio = 0;
658 else
659 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio)
660 * is->video_st->codec->width / is->video_st->codec->height;;
661 if (aspect_ratio <= 0.0)
662 aspect_ratio = (float)is->video_st->codec->width /
663 (float)is->video_st->codec->height;
664 /* if an active format is indicated, then it overrides the
665 mpeg format */
666 #if 0
667 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
668 is->dtg_active_format = is->video_st->codec->dtg_active_format;
669 printf("dtg_active_format=%d\n", is->dtg_active_format);
670 }
671 #endif
672 #if 0
673 switch(is->video_st->codec->dtg_active_format) {
674 case FF_DTG_AFD_SAME:
675 default:
676 /* nothing to do */
677 break;
678 case FF_DTG_AFD_4_3:
679 aspect_ratio = 4.0 / 3.0;
680 break;
681 case FF_DTG_AFD_16_9:
682 aspect_ratio = 16.0 / 9.0;
683 break;
684 case FF_DTG_AFD_14_9:
685 aspect_ratio = 14.0 / 9.0;
686 break;
687 case FF_DTG_AFD_4_3_SP_14_9:
688 aspect_ratio = 14.0 / 9.0;
689 break;
690 case FF_DTG_AFD_16_9_SP_14_9:
691 aspect_ratio = 14.0 / 9.0;
692 break;
693 case FF_DTG_AFD_SP_4_3:
694 aspect_ratio = 4.0 / 3.0;
695 break;
696 }
697 #endif
698
699 if (is->subtitle_st)
700 {
701 if (is->subpq_size > 0)
702 {
703 sp = &is->subpq[is->subpq_rindex];
704
705 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
706 {
707 SDL_LockYUVOverlay (vp->bmp);
708
709 pict.data[0] = vp->bmp->pixels[0];
710 pict.data[1] = vp->bmp->pixels[2];
711 pict.data[2] = vp->bmp->pixels[1];
712
713 pict.linesize[0] = vp->bmp->pitches[0];
714 pict.linesize[1] = vp->bmp->pitches[2];
715 pict.linesize[2] = vp->bmp->pitches[1];
716
717 for (i = 0; i < sp->sub.num_rects; i++)
718 blend_subrect(&pict, &sp->sub.rects[i]);
719
720 SDL_UnlockYUVOverlay (vp->bmp);
721 }
722 }
723 }
724
725
726 /* XXX: we suppose the screen has a 1.0 pixel ratio */
727 height = is->height;
728 width = ((int)rint(height * aspect_ratio)) & -3;
729 if (width > is->width) {
730 width = is->width;
731 height = ((int)rint(width / aspect_ratio)) & -3;
732 }
733 x = (is->width - width) / 2;
734 y = (is->height - height) / 2;
735 if (!is->no_background) {
736 /* fill the background */
737 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
738 } else {
739 is->no_background = 0;
740 }
741 rect.x = is->xleft + x;
742 rect.y = is->xleft + y;
743 rect.w = width;
744 rect.h = height;
745 SDL_DisplayYUVOverlay(vp->bmp, &rect);
746 } else {
747 #if 0
748 fill_rectangle(screen,
749 is->xleft, is->ytop, is->width, is->height,
750 QERGB(0x00, 0x00, 0x00));
751 #endif
752 }
753 }
754
755 static inline int compute_mod(int a, int b)
756 {
757 a = a % b;
758 if (a >= 0)
759 return a;
760 else
761 return a + b;
762 }
763
764 static void video_audio_display(VideoState *s)
765 {
766 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
767 int ch, channels, h, h2, bgcolor, fgcolor;
768 int16_t time_diff;
769
770 /* compute display index : center on currently output samples */
771 channels = s->audio_st->codec->channels;
772 nb_display_channels = channels;
773 if (!s->paused) {
774 n = 2 * channels;
775 delay = audio_write_get_buf_size(s);
776 delay /= n;
777
778 /* to be more precise, we take into account the time spent since
779 the last buffer computation */
780 if (audio_callback_time) {
781 time_diff = av_gettime() - audio_callback_time;
782 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
783 }
784
785 delay -= s->width / 2;
786 if (delay < s->width)
787 delay = s->width;
788 i_start = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
789 s->last_i_start = i_start;
790 } else {
791 i_start = s->last_i_start;
792 }
793
794 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
795 fill_rectangle(screen,
796 s->xleft, s->ytop, s->width, s->height,
797 bgcolor);
798
799 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
800
801 /* total height for one channel */
802 h = s->height / nb_display_channels;
803 /* graph height / 2 */
804 h2 = (h * 9) / 20;
805 for(ch = 0;ch < nb_display_channels; ch++) {
806 i = i_start + ch;
807 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
808 for(x = 0; x < s->width; x++) {
809 y = (s->sample_array[i] * h2) >> 15;
810 if (y < 0) {
811 y = -y;
812 ys = y1 - y;
813 } else {
814 ys = y1;
815 }
816 fill_rectangle(screen,
817 s->xleft + x, ys, 1, y,
818 fgcolor);
819 i += channels;
820 if (i >= SAMPLE_ARRAY_SIZE)
821 i -= SAMPLE_ARRAY_SIZE;
822 }
823 }
824
825 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
826
827 for(ch = 1;ch < nb_display_channels; ch++) {
828 y = s->ytop + ch * h;
829 fill_rectangle(screen,
830 s->xleft, y, s->width, 1,
831 fgcolor);
832 }
833 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
834 }
835
836 /* display the current picture, if any */
837 static void video_display(VideoState *is)
838 {
839 if (is->audio_st && is->show_audio)
840 video_audio_display(is);
841 else if (is->video_st)
842 video_image_display(is);
843 }
844
845 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
846 {
847 SDL_Event event;
848 event.type = FF_REFRESH_EVENT;
849 event.user.data1 = opaque;
850 SDL_PushEvent(&event);
851 return 0; /* 0 means stop timer */
852 }
853
854 /* schedule a video refresh in 'delay' ms */
855 static void schedule_refresh(VideoState *is, int delay)
856 {
857 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
858 }
859
860 /* get the current audio clock value */
861 static double get_audio_clock(VideoState *is)
862 {
863 double pts;
864 int hw_buf_size, bytes_per_sec;
865 pts = is->audio_clock;
866 hw_buf_size = audio_write_get_buf_size(is);
867 bytes_per_sec = 0;
868 if (is->audio_st) {
869 bytes_per_sec = is->audio_st->codec->sample_rate *
870 2 * is->audio_st->codec->channels;
871 }
872 if (bytes_per_sec)
873 pts -= (double)hw_buf_size / bytes_per_sec;
874 return pts;
875 }
876
877 /* get the current video clock value */
878 static double get_video_clock(VideoState *is)
879 {
880 double delta;
881 if (is->paused) {
882 delta = 0;
883 } else {
884 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
885 }
886 return is->video_current_pts + delta;
887 }
888
889 /* get the current external clock value */
890 static double get_external_clock(VideoState *is)
891 {
892 int64_t ti;
893 ti = av_gettime();
894 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
895 }
896
897 /* get the current master clock value */
898 static double get_master_clock(VideoState *is)
899 {
900 double val;
901
902 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
903 if (is->video_st)
904 val = get_video_clock(is);
905 else
906 val = get_audio_clock(is);
907 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
908 if (is->audio_st)
909 val = get_audio_clock(is);
910 else
911 val = get_video_clock(is);
912 } else {
913 val = get_external_clock(is);
914 }
915 return val;
916 }
917
918 /* seek in the stream */
919 static void stream_seek(VideoState *is, int64_t pos, int rel)
920 {
921 if (!is->seek_req) {
922 is->seek_pos = pos;
923 is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
924 is->seek_req = 1;
925 }
926 }
927
928 /* pause or resume the video */
929 static void stream_pause(VideoState *is)
930 {
931 is->paused = !is->paused;
932 if (is->paused) {
933 is->video_current_pts = get_video_clock(is);
934 }
935 }
936
937 /* called to display each frame */
938 static void video_refresh_timer(void *opaque)
939 {
940 VideoState *is = opaque;
941 VideoPicture *vp;
942 double actual_delay, delay, sync_threshold, ref_clock, diff;
943
944 SubPicture *sp, *sp2;
945
946 if (is->video_st) {
947 if (is->pictq_size == 0) {
948 /* if no picture, need to wait */
949 schedule_refresh(is, 1);
950 } else {
951 /* dequeue the picture */
952 vp = &is->pictq[is->pictq_rindex];
953
954 /* update current video pts */
955 is->video_current_pts = vp->pts;
956 is->video_current_pts_time = av_gettime();
957
958 /* compute nominal delay */
959 delay = vp->pts - is->frame_last_pts;
960 if (delay <= 0 || delay >= 1.0) {
961 /* if incorrect delay, use previous one */
962 delay = is->frame_last_delay;
963 }
964 is->frame_last_delay = delay;
965 is->frame_last_pts = vp->pts;
966
967 /* update delay to follow master synchronisation source */
968 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
969 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
970 /* if video is slave, we try to correct big delays by
971 duplicating or deleting a frame */
972 ref_clock = get_master_clock(is);
973 diff = vp->pts - ref_clock;
974
975 /* skip or repeat frame. We take into account the
976 delay to compute the threshold. I still don't know
977 if it is the best guess */
978 sync_threshold = AV_SYNC_THRESHOLD;
979 if (delay > sync_threshold)
980 sync_threshold = delay;
981 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
982 if (diff <= -sync_threshold)
983 delay = 0;
984 else if (diff >= sync_threshold)
985 delay = 2 * delay;
986 }
987 }
988
989 is->frame_timer += delay;
990 /* compute the REAL delay (we need to do that to avoid
991 long term errors */
992 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
993 if (actual_delay < 0.010) {
994 /* XXX: should skip picture */
995 actual_delay = 0.010;
996 }
997 /* launch timer for next picture */
998 schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
999
1000 #if defined(DEBUG_SYNC)
1001 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1002 delay, actual_delay, vp->pts, -diff);
1003 #endif
1004
1005 if(is->subtitle_st) {
1006 if (is->subtitle_stream_changed) {
1007 SDL_LockMutex(is->subpq_mutex);
1008
1009 while (is->subpq_size) {
1010 free_subpicture(&is->subpq[is->subpq_rindex]);
1011
1012 /* update queue size and signal for next picture */
1013 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1014 is->subpq_rindex = 0;
1015
1016 is->subpq_size--;
1017 }
1018 is->subtitle_stream_changed = 0;
1019
1020 SDL_CondSignal(is->subpq_cond);
1021 SDL_UnlockMutex(is->subpq_mutex);
1022 } else {
1023 if (is->subpq_size > 0) {
1024 sp = &is->subpq[is->subpq_rindex];
1025
1026 if (is->subpq_size > 1)
1027 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1028 else
1029 sp2 = NULL;
1030
1031 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1032 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1033 {
1034 free_subpicture(sp);
1035
1036 /* update queue size and signal for next picture */
1037 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1038 is->subpq_rindex = 0;
1039
1040 SDL_LockMutex(is->subpq_mutex);
1041 is->subpq_size--;
1042 SDL_CondSignal(is->subpq_cond);
1043 SDL_UnlockMutex(is->subpq_mutex);
1044 }
1045 }
1046 }
1047 }
1048
1049 /* display picture */
1050 video_display(is);
1051
1052 /* update queue size and signal for next picture */
1053 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1054 is->pictq_rindex = 0;
1055
1056 SDL_LockMutex(is->pictq_mutex);
1057 is->pictq_size--;
1058 SDL_CondSignal(is->pictq_cond);
1059 SDL_UnlockMutex(is->pictq_mutex);
1060 }
1061 } else if (is->audio_st) {
1062 /* draw the next audio frame */
1063
1064 schedule_refresh(is, 40);
1065
1066 /* if only audio stream, then display the audio bars (better
1067 than nothing, just to test the implementation */
1068
1069 /* display picture */
1070 video_display(is);
1071 } else {
1072 schedule_refresh(is, 100);
1073 }
1074 if (show_status) {
1075 static int64_t last_time;
1076 int64_t cur_time;
1077 int aqsize, vqsize, sqsize;
1078 double av_diff;
1079
1080 cur_time = av_gettime();
1081 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1082 aqsize = 0;
1083 vqsize = 0;
1084 sqsize = 0;
1085 if (is->audio_st)
1086 aqsize = is->audioq.size;
1087 if (is->video_st)
1088 vqsize = is->videoq.size;
1089 if (is->subtitle_st)
1090 sqsize = is->subtitleq.size;
1091 av_diff = 0;
1092 if (is->audio_st && is->video_st)
1093 av_diff = get_audio_clock(is) - get_video_clock(is);
1094 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
1095 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1096 fflush(stdout);
1097 last_time = cur_time;
1098 }
1099 }
1100 }
1101
1102 /* allocate a picture (needs to do that in main thread to avoid
1103 potential locking problems */
1104 static void alloc_picture(void *opaque)
1105 {
1106 VideoState *is = opaque;
1107 VideoPicture *vp;
1108
1109 vp = &is->pictq[is->pictq_windex];
1110
1111 if (vp->bmp)
1112 SDL_FreeYUVOverlay(vp->bmp);
1113
1114 #if 0
1115 /* XXX: use generic function */
1116 /* XXX: disable overlay if no hardware acceleration or if RGB format */
1117 switch(is->video_st->codec->pix_fmt) {
1118 case PIX_FMT_YUV420P:
1119 case PIX_FMT_YUV422P:
1120 case PIX_FMT_YUV444P:
1121 case PIX_FMT_YUV422:
1122 case PIX_FMT_YUV410P:
1123 case PIX_FMT_YUV411P:
1124 is_yuv = 1;
1125 break;
1126 default:
1127 is_yuv = 0;
1128 break;
1129 }
1130 #endif
1131 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1132 is->video_st->codec->height,
1133 SDL_YV12_OVERLAY,
1134 screen);
1135 vp->width = is->video_st->codec->width;
1136 vp->height = is->video_st->codec->height;
1137
1138 SDL_LockMutex(is->pictq_mutex);
1139 vp->allocated = 1;
1140 SDL_CondSignal(is->pictq_cond);
1141 SDL_UnlockMutex(is->pictq_mutex);
1142 }
1143
1144 /**
1145 *
1146 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1147 */
1148 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1149 {
1150 VideoPicture *vp;
1151 int dst_pix_fmt;
1152 AVPicture pict;
1153
1154 /* wait until we have space to put a new picture */
1155 SDL_LockMutex(is->pictq_mutex);
1156 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1157 !is->videoq.abort_request) {
1158 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1159 }
1160 SDL_UnlockMutex(is->pictq_mutex);
1161
1162 if (is->videoq.abort_request)
1163 return -1;
1164
1165 vp = &is->pictq[is->pictq_windex];
1166
1167 /* alloc or resize hardware picture buffer */
1168 if (!vp->bmp ||
1169 vp->width != is->video_st->codec->width ||
1170 vp->height != is->video_st->codec->height) {
1171 SDL_Event event;
1172
1173 vp->allocated = 0;
1174
1175 /* the allocation must be done in the main thread to avoid
1176 locking problems */
1177 event.type = FF_ALLOC_EVENT;
1178 event.user.data1 = is;
1179 SDL_PushEvent(&event);
1180
1181 /* wait until the picture is allocated */
1182 SDL_LockMutex(is->pictq_mutex);
1183 while (!vp->allocated && !is->videoq.abort_request) {
1184 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1185 }
1186 SDL_UnlockMutex(is->pictq_mutex);
1187
1188 if (is->videoq.abort_request)
1189 return -1;
1190 }
1191
1192 /* if the frame is not skipped, then display it */
1193 if (vp->bmp) {
1194 /* get a pointer on the bitmap */
1195 SDL_LockYUVOverlay (vp->bmp);
1196
1197 dst_pix_fmt = PIX_FMT_YUV420P;
1198 pict.data[0] = vp->bmp->pixels[0];
1199 pict.data[1] = vp->bmp->pixels[2];
1200 pict.data[2] = vp->bmp->pixels[1];
1201
1202 pict.linesize[0] = vp->bmp->pitches[0];
1203 pict.linesize[1] = vp->bmp->pitches[2];
1204 pict.linesize[2] = vp->bmp->pitches[1];
1205 img_convert(&pict, dst_pix_fmt,
1206 (AVPicture *)src_frame, is->video_st->codec->pix_fmt,
1207 is->video_st->codec->width, is->video_st->codec->height);
1208 /* update the bitmap content */
1209 SDL_UnlockYUVOverlay(vp->bmp);
1210
1211 vp->pts = pts;
1212
1213 /* now we can update the picture count */
1214 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1215 is->pictq_windex = 0;
1216 SDL_LockMutex(is->pictq_mutex);
1217 is->pictq_size++;
1218 SDL_UnlockMutex(is->pictq_mutex);
1219 }
1220 return 0;
1221 }
1222
1223 /**
1224 * compute the exact PTS for the picture if it is omitted in the stream
1225 * @param pts1 the dts of the pkt / pts of the frame
1226 */
1227 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1228 {
1229 double frame_delay, pts;
1230
1231 pts = pts1;
1232
1233 if (pts != 0) {
1234 /* update video clock with pts, if present */
1235 is->video_clock = pts;
1236 } else {
1237 pts = is->video_clock;
1238 }
1239 /* update video clock for next frame */
1240 frame_delay = av_q2d(is->video_st->codec->time_base);
1241 /* for MPEG2, the frame can be repeated, so we update the
1242 clock accordingly */
1243 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1244 is->video_clock += frame_delay;
1245
1246 #if defined(DEBUG_SYNC) && 0
1247 {
1248 int ftype;
1249 if (src_frame->pict_type == FF_B_TYPE)
1250 ftype = 'B';
1251 else if (src_frame->pict_type == FF_I_TYPE)
1252 ftype = 'I';
1253 else
1254 ftype = 'P';
1255 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1256 ftype, pts, pts1);
1257 }
1258 #endif
1259 return queue_picture(is, src_frame, pts);
1260 }
1261
1262 static int video_thread(void *arg)
1263 {
1264 VideoState *is = arg;
1265 AVPacket pkt1, *pkt = &pkt1;
1266 int len1, got_picture;
1267 AVFrame *frame= avcodec_alloc_frame();
1268 double pts;
1269
1270 for(;;) {
1271 while (is->paused && !is->videoq.abort_request) {
1272 SDL_Delay(10);
1273 }
1274 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1275 break;
1276 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1277 this packet, if any */
1278 pts = 0;
1279 if (pkt->dts != AV_NOPTS_VALUE)
1280 pts = av_q2d(is->video_st->time_base)*pkt->dts;
1281
1282 SDL_LockMutex(is->video_decoder_mutex);
1283 len1 = avcodec_decode_video(is->video_st->codec,
1284 frame, &got_picture,
1285 pkt->data, pkt->size);
1286 SDL_UnlockMutex(is->video_decoder_mutex);
1287 // if (len1 < 0)
1288 // break;
1289 if (got_picture) {
1290 if (output_picture2(is, frame, pts) < 0)
1291 goto the_end;
1292 }
1293 av_free_packet(pkt);
1294 if (step)
1295 if (cur_stream)
1296 stream_pause(cur_stream);
1297 }
1298 the_end:
1299 av_free(frame);
1300 return 0;
1301 }
1302
1303 static int subtitle_thread(void *arg)
1304 {
1305 VideoState *is = arg;
1306 SubPicture *sp;
1307 AVPacket pkt1, *pkt = &pkt1;
1308 int len1, got_subtitle;
1309 double pts;
1310 int i, j;
1311 int r, g, b, y, u, v, a;
1312
1313 for(;;) {
1314 while (is->paused && !is->subtitleq.abort_request) {
1315 SDL_Delay(10);
1316 }
1317 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1318 break;
1319
1320 SDL_LockMutex(is->subpq_mutex);
1321 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1322 !is->subtitleq.abort_request) {
1323 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1324 }
1325 SDL_UnlockMutex(is->subpq_mutex);
1326
1327 if (is->subtitleq.abort_request)
1328 goto the_end;
1329
1330 sp = &is->subpq[is->subpq_windex];
1331
1332 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1333 this packet, if any */
1334 pts = 0;
1335 if (pkt->pts != AV_NOPTS_VALUE)
1336 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1337
1338 SDL_LockMutex(is->subtitle_decoder_mutex);
1339 len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1340 &sp->sub, &got_subtitle,
1341 pkt->data, pkt->size);
1342 SDL_UnlockMutex(is->subtitle_decoder_mutex);
1343 // if (len1 < 0)
1344 // break;
1345 if (got_subtitle && sp->sub.format == 0) {
1346 sp->pts = pts;
1347
1348 for (i = 0; i < sp->sub.num_rects; i++)
1349 {
1350 for (j = 0; j < sp->sub.rects[i].nb_colors; j++)
1351 {
1352 RGBA_IN(r, g, b, a, sp->sub.rects[i].rgba_palette + j);
1353 y = RGB_TO_Y_CCIR(r, g, b);
1354 u = RGB_TO_U_CCIR(r, g, b, 0);
1355 v = RGB_TO_V_CCIR(r, g, b, 0);
1356 YUVA_OUT(sp->sub.rects[i].rgba_palette + j, y, u, v, a);
1357 }
1358 }
1359
1360 /* now we can update the picture count */
1361 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1362 is->subpq_windex = 0;
1363 SDL_LockMutex(is->subpq_mutex);
1364 is->subpq_size++;
1365 SDL_UnlockMutex(is->subpq_mutex);
1366 }
1367 av_free_packet(pkt);
1368 // if (step)
1369 // if (cur_stream)
1370 // stream_pause(cur_stream);
1371 }
1372 the_end:
1373 return 0;
1374 }
1375
1376 /* copy samples for viewing in editor window */
1377 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1378 {
1379 int size, len, channels;
1380
1381 channels = is->audio_st->codec->channels;
1382
1383 size = samples_size / sizeof(short);
1384 while (size > 0) {
1385 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1386 if (len > size)
1387 len = size;
1388 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1389 samples += len;
1390 is->sample_array_index += len;
1391 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1392 is->sample_array_index = 0;
1393 size -= len;
1394 }
1395 }
1396
1397 /* return the new audio buffer size (samples can be added or deleted
1398 to get better sync if video or external master clock) */
1399 static int synchronize_audio(VideoState *is, short *samples,
1400 int samples_size1, double pts)
1401 {
1402 int n, samples_size;
1403 double ref_clock;
1404
1405 n = 2 * is->audio_st->codec->channels;
1406 samples_size = samples_size1;
1407
1408 /* if not master, then we try to remove or add samples to correct the clock */
1409 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1410 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1411 double diff, avg_diff;
1412 int wanted_size, min_size, max_size, nb_samples;
1413
1414 ref_clock = get_master_clock(is);
1415 diff = get_audio_clock(is) - ref_clock;
1416
1417 if (diff < AV_NOSYNC_THRESHOLD) {
1418 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1419 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1420 /* not enough measures to have a correct estimate */
1421 is->audio_diff_avg_count++;
1422 } else {
1423 /* estimate the A-V difference */
1424 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1425
1426 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1427 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1428 nb_samples = samples_size / n;
1429
1430 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1431 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1432 if (wanted_size < min_size)
1433 wanted_size = min_size;
1434 else if (wanted_size > max_size)
1435 wanted_size = max_size;
1436
1437 /* add or remove samples to correction the synchro */
1438 if (wanted_size < samples_size) {
1439 /* remove samples */
1440 samples_size = wanted_size;
1441 } else if (wanted_size > samples_size) {
1442 uint8_t *samples_end, *q;
1443 int nb;
1444
1445 /* add samples */
1446 nb = (samples_size - wanted_size);
1447 samples_end = (uint8_t *)samples + samples_size - n;
1448 q = samples_end + n;
1449 while (nb > 0) {
1450 memcpy(q, samples_end, n);
1451 q += n;
1452 nb -= n;
1453 }
1454 samples_size = wanted_size;
1455 }
1456 }
1457 #if 0
1458 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1459 diff, avg_diff, samples_size - samples_size1,
1460 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1461 #endif
1462 }
1463 } else {
1464 /* too big difference : may be initial PTS errors, so
1465 reset A-V filter */
1466 is->audio_diff_avg_count = 0;
1467 is->audio_diff_cum = 0;
1468 }
1469 }
1470
1471 return samples_size;
1472 }
1473
1474 /* decode one audio frame and returns its uncompressed size */
1475 static int audio_decode_frame(VideoState *is, uint8_t *audio_buf, double *pts_ptr)
1476 {
1477 AVPacket *pkt = &is->audio_pkt;
1478 int n, len1, data_size;
1479 double pts;
1480
1481 for(;;) {
1482 /* NOTE: the audio packet can contain several frames */
1483 while (is->audio_pkt_size > 0) {
1484 SDL_LockMutex(is->audio_decoder_mutex);
1485 len1 = avcodec_decode_audio(is->audio_st->codec,
1486 (int16_t *)audio_buf, &data_size,
1487 is->audio_pkt_data, is->audio_pkt_size);
1488 SDL_UnlockMutex(is->audio_decoder_mutex);
1489 if (len1 < 0) {
1490 /* if error, we skip the frame */
1491 is->audio_pkt_size = 0;
1492 break;
1493 }
1494
1495 is->audio_pkt_data += len1;
1496 is->audio_pkt_size -= len1;
1497 if (data_size <= 0)
1498 continue;
1499 /* if no pts, then compute it */
1500 pts = is->audio_clock;
1501 *pts_ptr = pts;
1502 n = 2 * is->audio_st->codec->channels;
1503 is->audio_clock += (double)data_size /
1504 (double)(n * is->audio_st->codec->sample_rate);
1505 #if defined(DEBUG_SYNC)
1506 {
1507 static double last_clock;
1508 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1509 is->audio_clock - last_clock,
1510 is->audio_clock, pts);
1511 last_clock = is->audio_clock;
1512 }
1513 #endif
1514 return data_size;
1515 }
1516
1517 /* free the current packet */
1518 if (pkt->data)
1519 av_free_packet(pkt);
1520
1521 if (is->paused || is->audioq.abort_request) {
1522 return -1;
1523 }
1524
1525 /* read next packet */
1526 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1527 return -1;
1528 is->audio_pkt_data = pkt->data;
1529 is->audio_pkt_size = pkt->size;
1530
1531 /* if update the audio clock with the pts */
1532 if (pkt->pts != AV_NOPTS_VALUE) {
1533 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1534 }
1535 }
1536 }
1537
1538 /* get the current audio output buffer size, in samples. With SDL, we
1539 cannot have a precise information */
1540 static int audio_write_get_buf_size(VideoState *is)
1541 {
1542 return is->audio_hw_buf_size - is->audio_buf_index;
1543 }
1544
1545
1546 /* prepare a new audio buffer */
1547 void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1548 {
1549 VideoState *is = opaque;
1550 int audio_size, len1;
1551 double pts;
1552
1553 audio_callback_time = av_gettime();
1554
1555 while (len > 0) {
1556 if (is->audio_buf_index >= is->audio_buf_size) {
1557 audio_size = audio_decode_frame(is, is->audio_buf, &pts);
1558 if (audio_size < 0) {
1559 /* if error, just output silence */
1560 is->audio_buf_size = 1024;
1561 memset(is->audio_buf, 0, is->audio_buf_size);
1562 } else {
1563 if (is->show_audio)
1564 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1565 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1566 pts);
1567 is->audio_buf_size = audio_size;
1568 }
1569 is->audio_buf_index = 0;
1570 }
1571 len1 = is->audio_buf_size - is->audio_buf_index;
1572 if (len1 > len)
1573 len1 = len;
1574 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1575 len -= len1;
1576 stream += len1;
1577 is->audio_buf_index += len1;
1578 }
1579 }
1580
1581
1582 /* open a given stream. Return 0 if OK */
1583 static int stream_component_open(VideoState *is, int stream_index)
1584 {
1585 AVFormatContext *ic = is->ic;
1586 AVCodecContext *enc;
1587 AVCodec *codec;
1588 SDL_AudioSpec wanted_spec, spec;
1589
1590 if (stream_index < 0 || stream_index >= ic->nb_streams)
1591 return -1;
1592 enc = ic->streams[stream_index]->codec;
1593
1594 /* prepare audio output */
1595 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1596 wanted_spec.freq = enc->sample_rate;
1597 wanted_spec.format = AUDIO_S16SYS;
1598 /* hack for AC3. XXX: suppress that */
1599 if (enc->channels > 2)
1600 enc->channels = 2;
1601 wanted_spec.channels = enc->channels;
1602 wanted_spec.silence = 0;
1603 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1604 wanted_spec.callback = sdl_audio_callback;
1605 wanted_spec.userdata = is;
1606 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1607 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1608 return -1;
1609 }
1610 is->audio_hw_buf_size = spec.size;
1611 }
1612
1613 codec = avcodec_find_decoder(enc->codec_id);
1614 enc->debug_mv = debug_mv;
1615 enc->debug = debug;
1616 if(debug)
1617 av_log_set_level(AV_LOG_DEBUG);
1618 enc->workaround_bugs = workaround_bugs;
1619 enc->lowres = lowres;
1620 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1621 enc->idct_algo= idct;
1622 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1623 enc->skip_frame= skip_frame;
1624 enc->skip_idct= skip_idct;
1625 enc->skip_loop_filter= skip_loop_filter;
1626 enc->error_resilience= error_resilience;
1627 enc->error_concealment= error_concealment;
1628 if (!codec ||
1629 avcodec_open(enc, codec) < 0)
1630 return -1;
1631 #if defined(HAVE_THREADS)
1632 if(thread_count>1)
1633 avcodec_thread_init(enc, thread_count);
1634 #endif
1635 enc->thread_count= thread_count;
1636 switch(enc->codec_type) {
1637 case CODEC_TYPE_AUDIO:
1638 is->audio_stream = stream_index;
1639 is->audio_st = ic->streams[stream_index];
1640 is->audio_buf_size = 0;
1641 is->audio_buf_index = 0;
1642
1643 /* init averaging filter */
1644 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1645 is->audio_diff_avg_count = 0;
1646 /* since we do not have a precise anough audio fifo fullness,
1647 we correct audio sync only if larger than this threshold */
1648 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1649
1650 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1651 packet_queue_init(&is->audioq);
1652 SDL_PauseAudio(0);
1653 break;
1654 case CODEC_TYPE_VIDEO:
1655 is->video_stream = stream_index;
1656 is->video_st = ic->streams[stream_index];
1657
1658 is->frame_last_delay = 40e-3;
1659 is->frame_timer = (double)av_gettime() / 1000000.0;
1660 is->video_current_pts_time = av_gettime();
1661
1662 packet_queue_init(&is->videoq);
1663 is->video_tid = SDL_CreateThread(video_thread, is);
1664 break;
1665 case CODEC_TYPE_SUBTITLE:
1666 is->subtitle_stream = stream_index;
1667 is->subtitle_st = ic->streams[stream_index];
1668 packet_queue_init(&is->subtitleq);
1669
1670 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1671 break;
1672 default:
1673 break;
1674 }
1675 return 0;
1676 }
1677
1678 static void stream_component_close(VideoState *is, int stream_index)
1679 {
1680 AVFormatContext *ic = is->ic;
1681 AVCodecContext *enc;
1682
1683 if (stream_index < 0 || stream_index >= ic->nb_streams)
1684 return;
1685 enc = ic->streams[stream_index]->codec;
1686
1687 switch(enc->codec_type) {
1688 case CODEC_TYPE_AUDIO:
1689 packet_queue_abort(&is->audioq);
1690
1691 SDL_CloseAudio();
1692
1693 packet_queue_end(&is->audioq);
1694 break;
1695 case CODEC_TYPE_VIDEO:
1696 packet_queue_abort(&is->videoq);
1697
1698 /* note: we also signal this mutex to make sure we deblock the
1699 video thread in all cases */
1700 SDL_LockMutex(is->pictq_mutex);
1701 SDL_CondSignal(is->pictq_cond);
1702 SDL_UnlockMutex(is->pictq_mutex);
1703
1704 SDL_WaitThread(is->video_tid, NULL);
1705
1706 packet_queue_end(&is->videoq);
1707 break;
1708 case CODEC_TYPE_SUBTITLE:
1709 packet_queue_abort(&is->subtitleq);
1710
1711 /* note: we also signal this mutex to make sure we deblock the
1712 video thread in all cases */
1713 SDL_LockMutex(is->subpq_mutex);
1714 is->subtitle_stream_changed = 1;
1715
1716 SDL_CondSignal(is->subpq_cond);
1717 SDL_UnlockMutex(is->subpq_mutex);
1718
1719 SDL_WaitThread(is->subtitle_tid, NULL);
1720
1721 packet_queue_end(&is->subtitleq);
1722 break;
1723 default:
1724 break;
1725 }
1726
1727 avcodec_close(enc);
1728 switch(enc->codec_type) {
1729 case CODEC_TYPE_AUDIO:
1730 is->audio_st = NULL;
1731 is->audio_stream = -1;
1732 break;
1733 case CODEC_TYPE_VIDEO:
1734 is->video_st = NULL;
1735 is->video_stream = -1;
1736 break;
1737 case CODEC_TYPE_SUBTITLE:
1738 is->subtitle_st = NULL;
1739 is->subtitle_stream = -1;
1740 break;
1741 default:
1742 break;
1743 }
1744 }
1745
1746 void dump_stream_info(AVFormatContext *s)
1747 {
1748 if (s->track != 0)
1749 fprintf(stderr, "Track: %d\n", s->track);
1750 if (s->title[0] != '\0')
1751 fprintf(stderr, "Title: %s\n", s->title);
1752 if (s->author[0] != '\0')
1753 fprintf(stderr, "Author: %s\n", s->author);
1754 if (s->album[0] != '\0')
1755 fprintf(stderr, "Album: %s\n", s->album);
1756 if (s->year != 0)
1757 fprintf(stderr, "Year: %d\n", s->year);
1758 if (s->genre[0] != '\0')
1759 fprintf(stderr, "Genre: %s\n", s->genre);
1760 }
1761
1762 /* since we have only one decoding thread, we can use a global
1763 variable instead of a thread local variable */
1764 static VideoState *global_video_state;
1765
1766 static int decode_interrupt_cb(void)
1767 {
1768 return (global_video_state && global_video_state->abort_request);
1769 }
1770
1771 /* this thread gets the stream from the disk or the network */
1772 static int decode_thread(void *arg)
1773 {
1774 VideoState *is = arg;
1775 AVFormatContext *ic;
1776 int err, i, ret, video_index, audio_index, use_play;
1777 AVPacket pkt1, *pkt = &pkt1;
1778 AVFormatParameters params, *ap = &params;
1779
1780 video_index = -1;
1781 audio_index = -1;
1782 is->video_stream = -1;
1783 is->audio_stream = -1;
1784 is->subtitle_stream = -1;
1785
1786 global_video_state = is;
1787 url_set_interrupt_cb(decode_interrupt_cb);
1788
1789 memset(ap, 0, sizeof(*ap));
1790 ap->image_format = image_format;
1791 ap->initial_pause = 1; /* we force a pause when starting an RTSP
1792 stream */
1793
1794 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1795 if (err < 0) {
1796 print_error(is->filename, err);
1797 ret = -1;
1798 goto fail;
1799 }
1800 is->ic = ic;
1801 #ifdef CONFIG_NETWORK
1802 use_play = (ic->iformat == &rtsp_demux);
1803 #else
1804 use_play = 0;
1805 #endif
1806
1807 if(genpts)
1808 ic->flags |= AVFMT_FLAG_GENPTS;
1809
1810 if (!use_play) {
1811 err = av_find_stream_info(ic);
1812 if (err < 0) {
1813 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1814 ret = -1;
1815 goto fail;
1816 }
1817 ic->pb.eof_reached= 0; //FIXME hack, ffplay maybe shouldnt use url_feof() to test for the end
1818 }
1819
1820 /* if seeking requested, we execute it */
1821 if (start_time != AV_NOPTS_VALUE) {
1822 int64_t timestamp;
1823
1824 timestamp = start_time;
1825 /* add the stream start time */
1826 if (ic->start_time != AV_NOPTS_VALUE)
1827 timestamp += ic->start_time;
1828 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1829 if (ret < 0) {
1830 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1831 is->filename, (double)timestamp / AV_TIME_BASE);
1832 }
1833 }
1834
1835 /* now we can begin to play (RTSP stream only) */
1836 av_read_play(ic);
1837
1838 if (use_play) {
1839 err = av_find_stream_info(ic);
1840 if (err < 0) {
1841 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1842 ret = -1;
1843 goto fail;
1844 }
1845 }
1846
1847 for(i = 0; i < ic->nb_streams; i++) {
1848 AVCodecContext *enc = ic->streams[i]->codec;
1849 switch(enc->codec_type) {
1850 case CODEC_TYPE_AUDIO:
1851 if (audio_index < 0 && !audio_disable)
1852 audio_index = i;
1853 break;
1854 case CODEC_TYPE_VIDEO:
1855 if (video_index < 0 && !video_disable)
1856 video_index = i;
1857 break;
1858 default:
1859 break;
1860 }
1861 }
1862 if (show_status) {
1863 dump_format(ic, 0, is->filename, 0);
1864 dump_stream_info(ic);
1865 }
1866
1867 /* open the streams */
1868 if (audio_index >= 0) {
1869 stream_component_open(is, audio_index);
1870 }
1871
1872 if (video_index >= 0) {
1873 stream_component_open(is, video_index);
1874 } else {
1875 if (!display_disable)
1876 is->show_audio = 1;
1877 }
1878
1879 if (is->video_stream < 0 && is->audio_stream < 0) {
1880 fprintf(stderr, "%s: could not open codecs\n", is->filename);
1881 ret = -1;
1882 goto fail;
1883 }
1884
1885 for(;;) {
1886 if (is->abort_request)
1887 break;
1888 #ifdef CONFIG_NETWORK
1889 if (is->paused != is->last_paused) {
1890 is->last_paused = is->paused;
1891 if (is->paused)
1892 av_read_pause(ic);
1893 else
1894 av_read_play(ic);
1895 }
1896 if (is->paused && ic->iformat == &rtsp_demux) {
1897 /* wait 10 ms to avoid trying to get another packet */
1898 /* XXX: horrible */
1899 SDL_Delay(10);
1900 continue;
1901 }
1902 #endif
1903 if (is->seek_req) {
1904 /* XXX: must lock decoder threads */
1905 SDL_LockMutex(is->video_decoder_mutex);
1906 SDL_LockMutex(is->audio_decoder_mutex);
1907 SDL_LockMutex(is->subtitle_decoder_mutex);
1908 ret = av_seek_frame(is->ic, -1, is->seek_pos, is->seek_flags);
1909 if (ret < 0) {
1910 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
1911 }else{
1912 if (is->audio_stream >= 0) {
1913 packet_queue_flush(&is->audioq);
1914 }
1915 if (is->subtitle_stream >= 0) {
1916 packet_queue_flush(&is->subtitleq);
1917 }
1918 if (is->video_stream >= 0) {
1919 packet_queue_flush(&is->videoq);
1920 avcodec_flush_buffers(ic->streams[video_index]->codec);
1921 }
1922 }
1923 SDL_UnlockMutex(is->subtitle_decoder_mutex);
1924 SDL_UnlockMutex(is->audio_decoder_mutex);
1925 SDL_UnlockMutex(is->video_decoder_mutex);
1926 is->seek_req = 0;
1927 }
1928
1929 /* if the queue are full, no need to read more */
1930 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
1931 is->videoq.size > MAX_VIDEOQ_SIZE ||
1932 is->subtitleq.size > MAX_SUBTITLEQ_SIZE ||
1933 url_feof(&ic->pb)) {
1934 /* wait 10 ms */
1935 SDL_Delay(10);
1936 continue;
1937 }
1938 ret = av_read_frame(ic, pkt);
1939 if (ret < 0) {
1940 if (url_ferror(&ic->pb) == 0) {
1941 SDL_Delay(100); /* wait for user event */
1942 continue;
1943 } else
1944 break;
1945 }
1946 if (pkt->stream_index == is->audio_stream) {
1947 packet_queue_put(&is->audioq, pkt);
1948 } else if (pkt->stream_index == is->video_stream) {
1949 packet_queue_put(&is->videoq, pkt);
1950 } else if (pkt->stream_index == is->subtitle_stream) {
1951 packet_queue_put(&is->subtitleq, pkt);
1952 } else {
1953 av_free_packet(pkt);
1954 }
1955 }
1956 /* wait until the end */
1957 while (!is->abort_request) {
1958 SDL_Delay(100);
1959 }
1960
1961 ret = 0;
1962 fail:
1963 /* disable interrupting */
1964 global_video_state = NULL;
1965
1966 /* close each stream */
1967 if (is->audio_stream >= 0)
1968 stream_component_close(is, is->audio_stream);
1969 if (is->video_stream >= 0)
1970 stream_component_close(is, is->video_stream);
1971 if (is->subtitle_stream >= 0)
1972 stream_component_close(is, is->subtitle_stream);
1973 if (is->ic) {
1974 av_close_input_file(is->ic);
1975 is->ic = NULL; /* safety */
1976 }
1977 url_set_interrupt_cb(NULL);
1978
1979 if (ret != 0) {
1980 SDL_Event event;
1981
1982 event.type = FF_QUIT_EVENT;
1983 event.user.data1 = is;
1984 SDL_PushEvent(&event);
1985 }
1986 return 0;
1987 }
1988
1989 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
1990 {
1991 VideoState *is;
1992
1993 is = av_mallocz(sizeof(VideoState));
1994 if (!is)
1995 return NULL;
1996 pstrcpy(is->filename, sizeof(is->filename), filename);
1997 is->iformat = iformat;
1998 if (screen) {
1999 is->width = screen->w;
2000 is->height = screen->h;
2001 }
2002 is->ytop = 0;
2003 is->xleft = 0;
2004
2005 /* start video display */
2006 is->pictq_mutex = SDL_CreateMutex();
2007 is->pictq_cond = SDL_CreateCond();
2008
2009 is->subpq_mutex = SDL_CreateMutex();
2010 is->subpq_cond = SDL_CreateCond();
2011
2012 is->subtitle_decoder_mutex = SDL_CreateMutex();
2013 is->audio_decoder_mutex = SDL_CreateMutex();
2014 is->video_decoder_mutex = SDL_CreateMutex();
2015
2016 /* add the refresh timer to draw the picture */
2017 schedule_refresh(is, 40);
2018
2019 is->av_sync_type = av_sync_type;
2020 is->parse_tid = SDL_CreateThread(decode_thread, is);
2021 if (!is->parse_tid) {
2022 av_free(is);
2023 return NULL;
2024 }
2025 return is;
2026 }
2027
2028 static void stream_close(VideoState *is)
2029 {
2030 VideoPicture *vp;
2031 int i;
2032 /* XXX: use a special url_shutdown call to abort parse cleanly */
2033 is->abort_request = 1;
2034 SDL_WaitThread(is->parse_tid, NULL);
2035
2036 /* free all pictures */
2037 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2038 vp = &is->pictq[i];
2039 if (vp->bmp) {
2040 SDL_FreeYUVOverlay(vp->bmp);
2041 vp->bmp = NULL;
2042 }
2043 }
2044 SDL_DestroyMutex(is->pictq_mutex);
2045 SDL_DestroyCond(is->pictq_cond);
2046 SDL_DestroyMutex(is->subpq_mutex);
2047 SDL_DestroyCond(is->subpq_cond);
2048 SDL_DestroyMutex(is->subtitle_decoder_mutex);
2049 SDL_DestroyMutex(is->audio_decoder_mutex);
2050 SDL_DestroyMutex(is->video_decoder_mutex);
2051 }
2052
2053 void stream_cycle_channel(VideoState *is, int codec_type)
2054 {
2055 AVFormatContext *ic = is->ic;
2056 int start_index, stream_index;
2057 AVStream *st;
2058
2059 if (codec_type == CODEC_TYPE_VIDEO)
2060 start_index = is->video_stream;
2061 else if (codec_type == CODEC_TYPE_AUDIO)
2062 start_index = is->audio_stream;
2063 else
2064 start_index = is->subtitle_stream;
2065 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2066 return;
2067 stream_index = start_index;
2068 for(;;) {
2069 if (++stream_index >= is->ic->nb_streams)
2070 {
2071 if (codec_type == CODEC_TYPE_SUBTITLE)
2072 {
2073 stream_index = -1;
2074 goto the_end;
2075 } else
2076 stream_index = 0;
2077 }
2078 if (stream_index == start_index)
2079 return;
2080 st = ic->streams[stream_index];
2081 if (st->codec->codec_type == codec_type) {
2082 /* check that parameters are OK */
2083 switch(codec_type) {
2084 case CODEC_TYPE_AUDIO:
2085 if (st->codec->sample_rate != 0 &&
2086 st->codec->channels != 0)
2087 goto the_end;
2088 break;
2089 case CODEC_TYPE_VIDEO:
2090 case CODEC_TYPE_SUBTITLE:
2091 goto the_end;
2092 default:
2093 break;
2094 }
2095 }
2096 }
2097 the_end:
2098 stream_component_close(is, start_index);
2099 stream_component_open(is, stream_index);
2100 }
2101
2102
2103 void toggle_full_screen(void)
2104 {
2105 int w, h, flags;
2106 is_full_screen = !is_full_screen;
2107 if (!fs_screen_width) {
2108 /* use default SDL method */
2109 SDL_WM_ToggleFullScreen(screen);
2110 } else {
2111 /* use the recorded resolution */
2112 flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
2113 if (is_full_screen) {
2114 w = fs_screen_width;
2115 h = fs_screen_height;
2116 flags |= SDL_FULLSCREEN;
2117 } else {
2118 w = screen_width;
2119 h = screen_height;
2120 flags |= SDL_RESIZABLE;
2121 }
2122 screen = SDL_SetVideoMode(w, h, 0, flags);
2123 cur_stream->width = w;
2124 cur_stream->height = h;
2125 }
2126 }
2127
2128 void toggle_pause(void)
2129 {
2130 if (cur_stream)
2131 stream_pause(cur_stream);
2132 step = 0;
2133 }
2134
2135 void step_to_next_frame(void)
2136 {
2137 if (cur_stream) {
2138 if (cur_stream->paused)
2139 cur_stream->paused=0;
2140 cur_stream->video_current_pts = get_video_clock(cur_stream);
2141 }
2142 step = 1;
2143 }
2144
2145 void do_exit(void)
2146 {
2147 if (cur_stream) {
2148 stream_close(cur_stream);
2149 cur_stream = NULL;
2150 }
2151 if (show_status)
2152 printf("\n");
2153 SDL_Quit();
2154 exit(0);
2155 }
2156
2157 void toggle_audio_display(void)
2158 {
2159 if (cur_stream) {
2160 cur_stream->show_audio = !cur_stream->show_audio;
2161 }
2162 }
2163
2164 /* handle an event sent by the GUI */
2165 void event_loop(void)
2166 {
2167 SDL_Event event;
2168 double incr, pos, frac;
2169
2170 for(;;) {
2171 SDL_WaitEvent(&event);
2172 switch(event.type) {
2173 case SDL_KEYDOWN:
2174 switch(event.key.keysym.sym) {
2175 case SDLK_ESCAPE:
2176 case SDLK_q:
2177 do_exit();
2178 break;
2179 case SDLK_f:
2180 toggle_full_screen();
2181 break;
2182 case SDLK_p:
2183 case SDLK_SPACE:
2184 toggle_pause();
2185 break;
2186 case SDLK_s: //S: Step to next frame
2187 step_to_next_frame();
2188 break;
2189 case SDLK_a:
2190 if (cur_stream)
2191 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2192 break;
2193 case SDLK_v:
2194 if (cur_stream)
2195 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2196 break;
2197 case SDLK_t:
2198 if (cur_stream)
2199 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2200 break;
2201 case SDLK_w:
2202 toggle_audio_display();
2203 break;
2204 case SDLK_LEFT:
2205 incr = -10.0;
2206 goto do_seek;
2207 case SDLK_RIGHT:
2208 incr = 10.0;
2209 goto do_seek;
2210 case SDLK_UP:
2211 incr = 60.0;
2212 goto do_seek;
2213 case SDLK_DOWN:
2214 incr = -60.0;
2215 do_seek:
2216 if (cur_stream) {
2217 pos = get_master_clock(cur_stream);
2218 pos += incr;
2219 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2220 }
2221 break;
2222 default:
2223 break;
2224 }
2225 break;
2226 case SDL_MOUSEBUTTONDOWN:
2227 if (cur_stream) {
2228 int ns, hh, mm, ss;
2229 int tns, thh, tmm, tss;
2230 tns = cur_stream->ic->duration/1000000LL;
2231 thh = tns/3600;
2232 tmm = (tns%3600)/60;
2233 tss = (tns%60);
2234 frac = (double)event.button.x/(double)cur_stream->width;
2235 ns = frac*tns;
2236 hh = ns/3600;
2237 mm = (ns%3600)/60;
2238 ss = (ns%60);
2239 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2240 hh, mm, ss, thh, tmm, tss);
2241 stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2242 }
2243 break;
2244 case SDL_VIDEORESIZE:
2245 if (cur_stream) {
2246 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2247 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2248 cur_stream->width = event.resize.w;
2249 cur_stream->height = event.resize.h;
2250 }
2251 break;
2252 case SDL_QUIT:
2253 case FF_QUIT_EVENT:
2254 do_exit();
2255 break;
2256 case FF_ALLOC_EVENT:
2257 alloc_picture(event.user.data1);
2258 break;
2259 case FF_REFRESH_EVENT:
2260 video_refresh_timer(event.user.data1);
2261 break;
2262 default:
2263 break;
2264 }
2265 }
2266 }
2267
2268 void opt_width(const char *arg)
2269 {
2270 screen_width = atoi(arg);
2271 }
2272
2273 void opt_height(const char *arg)
2274 {
2275 screen_height = atoi(arg);
2276 }
2277
2278 static void opt_format(const char *arg)
2279 {
2280 file_iformat = av_find_input_format(arg);
2281 if (!file_iformat) {
2282 fprintf(stderr, "Unknown input format: %s\n", arg);
2283 exit(1);
2284 }
2285 }
2286
2287 static void opt_image_format(const char *arg)
2288 {
2289 AVImageFormat *f;
2290
2291 for(f = first_image_format; f != NULL; f = f->next) {
2292 if (!strcmp(arg, f->name))
2293 break;
2294 }
2295 if (!f) {
2296 fprintf(stderr, "Unknown image format: '%s'\n", arg);
2297 exit(1);
2298 }
2299 image_format = f;
2300 }
2301
2302 #ifdef CONFIG_NETWORK
2303 void opt_rtp_tcp(void)
2304 {
2305 /* only tcp protocol */
2306 rtsp_default_protocols = (1 << RTSP_PROTOCOL_RTP_TCP);
2307 }
2308 #endif
2309
2310 void opt_sync(const char *arg)
2311 {
2312 if (!strcmp(arg, "audio"))
2313 av_sync_type = AV_SYNC_AUDIO_MASTER;
2314 else if (!strcmp(arg, "video"))
2315 av_sync_type = AV_SYNC_VIDEO_MASTER;
2316 else if (!strcmp(arg, "ext"))
2317 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2318 else
2319 show_help();
2320 }
2321
2322 void opt_seek(const char *arg)
2323 {
2324 start_time = parse_date(arg, 1);
2325 }
2326
2327 static void opt_debug(const char *arg)
2328 {
2329 debug = atoi(arg);
2330 }
2331
2332 static void opt_vismv(const char *arg)
2333 {
2334 debug_mv = atoi(arg);
2335 }
2336
2337 static void opt_thread_count(const char *arg)
2338 {
2339 thread_count= atoi(arg);
2340 #if !defined(HAVE_THREADS)
2341 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2342 #endif
2343 }
2344
2345 const OptionDef options[] = {
2346 { "h", 0, {(void*)show_help}, "show help" },
2347 { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2348 { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2349 #if 0
2350 /* disabled as SDL/X11 does not support it correctly on application launch */
2351 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2352 #endif
2353 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2354 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2355 { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2356 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2357 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2358 { "img", HAS_ARG, {(void*)opt_image_format}, "force image format", "img_fmt" },
2359 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2360 { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2361 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2362 { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2363 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2364 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2365 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2366 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2367 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2368 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2369 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2370 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_resilience}, "set error detection threshold (0-4)", "threshold" },
2371 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2372 #ifdef CONFIG_NETWORK
2373 { "rtp_tcp", OPT_EXPERT, {(void*)&opt_rtp_tcp}, "force RTP/TCP protocol usage", "" },
2374 #endif
2375 { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2376 { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2377 { NULL, },
2378 };
2379
2380 void show_help(void)
2381 {
2382 printf("ffplay version " FFMPEG_VERSION ", Copyright (c) 2003 Fabrice Bellard\n"
2383 "usage: ffplay [options] input_file\n"
2384 "Simple media player\n");
2385 printf("\n");
2386 show_help_options(options, "Main options:\n",
2387 OPT_EXPERT, 0);
2388 show_help_options(options, "\nAdvanced options:\n",
2389 OPT_EXPERT, OPT_EXPERT);
2390 printf("\nWhile playing:\n"
2391 "q, ESC quit\n"
2392 "f toggle full screen\n"
2393 "p, SPC pause\n"
2394 "a cycle audio channel\n"
2395 "v cycle video channel\n"
2396 "t cycle subtitle channel\n"
2397 "w show audio waves\n"
2398 "left/right seek backward/forward 10 seconds\n"
2399 "down/up seek backward/forward 1 minute\n"
2400 "mouse click seek to percentage in file corresponding to fraction of width\n"
2401 );
2402 exit(1);
2403 }
2404
2405 void parse_arg_file(const char *filename)
2406 {
2407 if (!strcmp(filename, "-"))
2408 filename = "pipe:";
2409 input_filename = filename;
2410 }
2411
2412 /* Called from the main */
2413 int main(int argc, char **argv)
2414 {
2415 int flags, w, h;
2416
2417 /* register all codecs, demux and protocols */
2418 av_register_all();
2419
2420 #ifdef CONFIG_OS2
2421 MorphToPM(); // Morph the VIO application to a PM one to be able to use Win* functions
2422
2423 // Make stdout and stderr unbuffered
2424 setbuf( stdout, NULL );
2425 setbuf( stderr, NULL );
2426 #endif
2427
2428 parse_options(argc, argv, options);
2429
2430 if (!input_filename)
2431 show_help();
2432
2433 if (display_disable) {
2434 video_disable = 1;
2435 }
2436 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2437 #ifndef CONFIG_WIN32
2438 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on win32 */
2439 #endif
2440 if (SDL_Init (flags)) {
2441 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2442 exit(1);
2443 }
2444
2445 if (!display_disable) {
2446 #ifdef HAVE_X11
2447 /* save the screen resolution... SDL should allow full screen
2448 by resizing the window */
2449 {
2450 Display *dpy;
2451 dpy = XOpenDisplay(NULL);
2452 if (dpy) {
2453 fs_screen_width = DisplayWidth(dpy, DefaultScreen(dpy));
2454 fs_screen_height = DisplayHeight(dpy, DefaultScreen(dpy));
2455 XCloseDisplay(dpy);
2456 }
2457 }
2458 #endif
2459 flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
2460 if (is_full_screen && fs_screen_width) {
2461 w = fs_screen_width;
2462 h = fs_screen_height;
2463 flags |= SDL_FULLSCREEN;
2464 } else {
2465 w = screen_width;
2466 h = screen_height;
2467 flags |= SDL_RESIZABLE;
2468 }
2469 screen = SDL_SetVideoMode(w, h, 0, flags);
2470 if (!screen) {
2471 fprintf(stderr, "SDL: could not set video mode - exiting\n");
2472 exit(1);
2473 }
2474 SDL_WM_SetCaption("FFplay", "FFplay");
2475 }
2476
2477 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2478 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2479 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2480 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2481
2482 cur_stream = stream_open(input_filename, file_iformat);
2483
2484 event_loop();
2485
2486 /* never returns */
2487
2488 return 0;
2489 }