add support for more pixel format (yuv422p, yuv444p, etc instead of yuv420 only.)
[libav.git] / ffplay.c
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21 #define HAVE_AV_CONFIG_H
22 #include "avformat.h"
23 #include "swscale.h"
24
25 #include "version.h"
26 #include "cmdutils.h"
27
28 #include <SDL.h>
29 #include <SDL_thread.h>
30
31 #ifdef __MINGW32__
32 #undef main /* We don't want SDL to override our main() */
33 #endif
34
35 #ifdef CONFIG_OS2
36 #define INCL_DOS
37 #include <os2.h>
38 #include <stdio.h>
39
40 void MorphToPM()
41 {
42 PPIB pib;
43 PTIB tib;
44
45 DosGetInfoBlocks(&tib, &pib);
46
47 // Change flag from VIO to PM:
48 if (pib->pib_ultype==2) pib->pib_ultype = 3;
49 }
50 #endif
51
52 //#define DEBUG_SYNC
53
54 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
55 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
56 #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
57
58 /* SDL audio buffer size, in samples. Should be small to have precise
59 A/V sync as SDL does not have hardware buffer fullness info. */
60 #define SDL_AUDIO_BUFFER_SIZE 1024
61
62 /* no AV sync correction is done if below the AV sync threshold */
63 #define AV_SYNC_THRESHOLD 0.01
64 /* no AV correction is done if too big error */
65 #define AV_NOSYNC_THRESHOLD 10.0
66
67 /* maximum audio speed change to get correct sync */
68 #define SAMPLE_CORRECTION_PERCENT_MAX 10
69
70 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
71 #define AUDIO_DIFF_AVG_NB 20
72
73 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
74 #define SAMPLE_ARRAY_SIZE (2*65536)
75
76 static int sws_flags = SWS_BICUBIC;
77
78 typedef struct PacketQueue {
79 AVPacketList *first_pkt, *last_pkt;
80 int nb_packets;
81 int size;
82 int abort_request;
83 SDL_mutex *mutex;
84 SDL_cond *cond;
85 } PacketQueue;
86
87 #define VIDEO_PICTURE_QUEUE_SIZE 1
88 #define SUBPICTURE_QUEUE_SIZE 4
89
90 typedef struct VideoPicture {
91 double pts; ///<presentation time stamp for this picture
92 SDL_Overlay *bmp;
93 int width, height; /* source height & width */
94 int allocated;
95 } VideoPicture;
96
97 typedef struct SubPicture {
98 double pts; /* presentation time stamp for this picture */
99 AVSubtitle sub;
100 } SubPicture;
101
102 enum {
103 AV_SYNC_AUDIO_MASTER, /* default choice */
104 AV_SYNC_VIDEO_MASTER,
105 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
106 };
107
108 typedef struct VideoState {
109 SDL_Thread *parse_tid;
110 SDL_Thread *video_tid;
111 AVInputFormat *iformat;
112 int no_background;
113 int abort_request;
114 int paused;
115 int last_paused;
116 int seek_req;
117 int seek_flags;
118 int64_t seek_pos;
119 AVFormatContext *ic;
120 int dtg_active_format;
121
122 int audio_stream;
123
124 int av_sync_type;
125 double external_clock; /* external clock base */
126 int64_t external_clock_time;
127
128 double audio_clock;
129 double audio_diff_cum; /* used for AV difference average computation */
130 double audio_diff_avg_coef;
131 double audio_diff_threshold;
132 int audio_diff_avg_count;
133 AVStream *audio_st;
134 PacketQueue audioq;
135 int audio_hw_buf_size;
136 /* samples output by the codec. we reserve more space for avsync
137 compensation */
138 DECLARE_ALIGNED(16,uint8_t,audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
139 unsigned int audio_buf_size; /* in bytes */
140 int audio_buf_index; /* in bytes */
141 AVPacket audio_pkt;
142 uint8_t *audio_pkt_data;
143 int audio_pkt_size;
144
145 int show_audio; /* if true, display audio samples */
146 int16_t sample_array[SAMPLE_ARRAY_SIZE];
147 int sample_array_index;
148 int last_i_start;
149
150 SDL_Thread *subtitle_tid;
151 int subtitle_stream;
152 int subtitle_stream_changed;
153 AVStream *subtitle_st;
154 PacketQueue subtitleq;
155 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
156 int subpq_size, subpq_rindex, subpq_windex;
157 SDL_mutex *subpq_mutex;
158 SDL_cond *subpq_cond;
159
160 double frame_timer;
161 double frame_last_pts;
162 double frame_last_delay;
163 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
164 int video_stream;
165 AVStream *video_st;
166 PacketQueue videoq;
167 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
168 int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
169 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
170 int pictq_size, pictq_rindex, pictq_windex;
171 SDL_mutex *pictq_mutex;
172 SDL_cond *pictq_cond;
173
174 // QETimer *video_timer;
175 char filename[1024];
176 int width, height, xleft, ytop;
177 } VideoState;
178
179 void show_help(void);
180 static int audio_write_get_buf_size(VideoState *is);
181
182 /* options specified by the user */
183 static AVInputFormat *file_iformat;
184 static const char *input_filename;
185 static int fs_screen_width;
186 static int fs_screen_height;
187 static int screen_width = 0;
188 static int screen_height = 0;
189 static int frame_width = 0;
190 static int frame_height = 0;
191 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
192 static int audio_disable;
193 static int video_disable;
194 static int wanted_audio_stream= 0;
195 static int seek_by_bytes;
196 static int display_disable;
197 static int show_status;
198 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
199 static int64_t start_time = AV_NOPTS_VALUE;
200 static int debug = 0;
201 static int debug_mv = 0;
202 static int step = 0;
203 static int thread_count = 1;
204 static int workaround_bugs = 1;
205 static int fast = 0;
206 static int genpts = 0;
207 static int lowres = 0;
208 static int idct = FF_IDCT_AUTO;
209 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
210 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
211 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
212 static int error_resilience = FF_ER_CAREFUL;
213 static int error_concealment = 3;
214
215 /* current context */
216 static int is_full_screen;
217 static VideoState *cur_stream;
218 static int64_t audio_callback_time;
219
220 AVPacket flush_pkt;
221
222 #define FF_ALLOC_EVENT (SDL_USEREVENT)
223 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
224 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
225
226 SDL_Surface *screen;
227
228 /* packet queue handling */
229 static void packet_queue_init(PacketQueue *q)
230 {
231 memset(q, 0, sizeof(PacketQueue));
232 q->mutex = SDL_CreateMutex();
233 q->cond = SDL_CreateCond();
234 }
235
236 static void packet_queue_flush(PacketQueue *q)
237 {
238 AVPacketList *pkt, *pkt1;
239
240 SDL_LockMutex(q->mutex);
241 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
242 pkt1 = pkt->next;
243 av_free_packet(&pkt->pkt);
244 av_freep(&pkt);
245 }
246 q->last_pkt = NULL;
247 q->first_pkt = NULL;
248 q->nb_packets = 0;
249 q->size = 0;
250 SDL_UnlockMutex(q->mutex);
251 }
252
253 static void packet_queue_end(PacketQueue *q)
254 {
255 packet_queue_flush(q);
256 SDL_DestroyMutex(q->mutex);
257 SDL_DestroyCond(q->cond);
258 }
259
260 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
261 {
262 AVPacketList *pkt1;
263
264 /* duplicate the packet */
265 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
266 return -1;
267
268 pkt1 = av_malloc(sizeof(AVPacketList));
269 if (!pkt1)
270 return -1;
271 pkt1->pkt = *pkt;
272 pkt1->next = NULL;
273
274
275 SDL_LockMutex(q->mutex);
276
277 if (!q->last_pkt)
278
279 q->first_pkt = pkt1;
280 else
281 q->last_pkt->next = pkt1;
282 q->last_pkt = pkt1;
283 q->nb_packets++;
284 q->size += pkt1->pkt.size;
285 /* XXX: should duplicate packet data in DV case */
286 SDL_CondSignal(q->cond);
287
288 SDL_UnlockMutex(q->mutex);
289 return 0;
290 }
291
292 static void packet_queue_abort(PacketQueue *q)
293 {
294 SDL_LockMutex(q->mutex);
295
296 q->abort_request = 1;
297
298 SDL_CondSignal(q->cond);
299
300 SDL_UnlockMutex(q->mutex);
301 }
302
303 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
304 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
305 {
306 AVPacketList *pkt1;
307 int ret;
308
309 SDL_LockMutex(q->mutex);
310
311 for(;;) {
312 if (q->abort_request) {
313 ret = -1;
314 break;
315 }
316
317 pkt1 = q->first_pkt;
318 if (pkt1) {
319 q->first_pkt = pkt1->next;
320 if (!q->first_pkt)
321 q->last_pkt = NULL;
322 q->nb_packets--;
323 q->size -= pkt1->pkt.size;
324 *pkt = pkt1->pkt;
325 av_free(pkt1);
326 ret = 1;
327 break;
328 } else if (!block) {
329 ret = 0;
330 break;
331 } else {
332 SDL_CondWait(q->cond, q->mutex);
333 }
334 }
335 SDL_UnlockMutex(q->mutex);
336 return ret;
337 }
338
339 static inline void fill_rectangle(SDL_Surface *screen,
340 int x, int y, int w, int h, int color)
341 {
342 SDL_Rect rect;
343 rect.x = x;
344 rect.y = y;
345 rect.w = w;
346 rect.h = h;
347 SDL_FillRect(screen, &rect, color);
348 }
349
350 #if 0
351 /* draw only the border of a rectangle */
352 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
353 {
354 int w1, w2, h1, h2;
355
356 /* fill the background */
357 w1 = x;
358 if (w1 < 0)
359 w1 = 0;
360 w2 = s->width - (x + w);
361 if (w2 < 0)
362 w2 = 0;
363 h1 = y;
364 if (h1 < 0)
365 h1 = 0;
366 h2 = s->height - (y + h);
367 if (h2 < 0)
368 h2 = 0;
369 fill_rectangle(screen,
370 s->xleft, s->ytop,
371 w1, s->height,
372 color);
373 fill_rectangle(screen,
374 s->xleft + s->width - w2, s->ytop,
375 w2, s->height,
376 color);
377 fill_rectangle(screen,
378 s->xleft + w1, s->ytop,
379 s->width - w1 - w2, h1,
380 color);
381 fill_rectangle(screen,
382 s->xleft + w1, s->ytop + s->height - h2,
383 s->width - w1 - w2, h2,
384 color);
385 }
386 #endif
387
388
389
390 #define SCALEBITS 10
391 #define ONE_HALF (1 << (SCALEBITS - 1))
392 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
393
394 #define RGB_TO_Y_CCIR(r, g, b) \
395 ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
396 FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
397
398 #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
399 (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
400 FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
401
402 #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
403 (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
404 FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
405
406 #define ALPHA_BLEND(a, oldp, newp, s)\
407 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
408
409 #define RGBA_IN(r, g, b, a, s)\
410 {\
411 unsigned int v = ((const uint32_t *)(s))[0];\
412 a = (v >> 24) & 0xff;\
413 r = (v >> 16) & 0xff;\
414 g = (v >> 8) & 0xff;\
415 b = v & 0xff;\
416 }
417
418 #define YUVA_IN(y, u, v, a, s, pal)\
419 {\
420 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)s];\
421 a = (val >> 24) & 0xff;\
422 y = (val >> 16) & 0xff;\
423 u = (val >> 8) & 0xff;\
424 v = val & 0xff;\
425 }
426
427 #define YUVA_OUT(d, y, u, v, a)\
428 {\
429 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
430 }
431
432
433 #define BPP 1
434
435 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect)
436 {
437 int wrap, wrap3, width2, skip2;
438 int y, u, v, a, u1, v1, a1, w, h;
439 uint8_t *lum, *cb, *cr;
440 const uint8_t *p;
441 const uint32_t *pal;
442
443 lum = dst->data[0] + rect->y * dst->linesize[0];
444 cb = dst->data[1] + (rect->y >> 1) * dst->linesize[1];
445 cr = dst->data[2] + (rect->y >> 1) * dst->linesize[2];
446
447 width2 = (rect->w + 1) >> 1;
448 skip2 = rect->x >> 1;
449 wrap = dst->linesize[0];
450 wrap3 = rect->linesize;
451 p = rect->bitmap;
452 pal = rect->rgba_palette; /* Now in YCrCb! */
453
454 if (rect->y & 1) {
455 lum += rect->x;
456 cb += skip2;
457 cr += skip2;
458
459 if (rect->x & 1) {
460 YUVA_IN(y, u, v, a, p, pal);
461 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
462 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
463 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
464 cb++;
465 cr++;
466 lum++;
467 p += BPP;
468 }
469 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
470 YUVA_IN(y, u, v, a, p, pal);
471 u1 = u;
472 v1 = v;
473 a1 = a;
474 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
475
476 YUVA_IN(y, u, v, a, p + BPP, pal);
477 u1 += u;
478 v1 += v;
479 a1 += a;
480 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
481 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
482 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
483 cb++;
484 cr++;
485 p += 2 * BPP;
486 lum += 2;
487 }
488 if (w) {
489 YUVA_IN(y, u, v, a, p, pal);
490 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
491 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
492 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
493 }
494 p += wrap3 + (wrap3 - rect->w * BPP);
495 lum += wrap + (wrap - rect->w - rect->x);
496 cb += dst->linesize[1] - width2 - skip2;
497 cr += dst->linesize[2] - width2 - skip2;
498 }
499 for(h = rect->h - (rect->y & 1); h >= 2; h -= 2) {
500 lum += rect->x;
501 cb += skip2;
502 cr += skip2;
503
504 if (rect->x & 1) {
505 YUVA_IN(y, u, v, a, p, pal);
506 u1 = u;
507 v1 = v;
508 a1 = a;
509 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
510 p += wrap3;
511 lum += wrap;
512 YUVA_IN(y, u, v, a, p, pal);
513 u1 += u;
514 v1 += v;
515 a1 += a;
516 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
517 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
518 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
519 cb++;
520 cr++;
521 p += -wrap3 + BPP;
522 lum += -wrap + 1;
523 }
524 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
525 YUVA_IN(y, u, v, a, p, pal);
526 u1 = u;
527 v1 = v;
528 a1 = a;
529 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
530
531 YUVA_IN(y, u, v, a, p, pal);
532 u1 += u;
533 v1 += v;
534 a1 += a;
535 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
536 p += wrap3;
537 lum += wrap;
538
539 YUVA_IN(y, u, v, a, p, pal);
540 u1 += u;
541 v1 += v;
542 a1 += a;
543 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
544
545 YUVA_IN(y, u, v, a, p, pal);
546 u1 += u;
547 v1 += v;
548 a1 += a;
549 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
550
551 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
552 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
553
554 cb++;
555 cr++;
556 p += -wrap3 + 2 * BPP;
557 lum += -wrap + 2;
558 }
559 if (w) {
560 YUVA_IN(y, u, v, a, p, pal);
561 u1 = u;
562 v1 = v;
563 a1 = a;
564 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
565 p += wrap3;
566 lum += wrap;
567 YUVA_IN(y, u, v, a, p, pal);
568 u1 += u;
569 v1 += v;
570 a1 += a;
571 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
572 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
573 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
574 cb++;
575 cr++;
576 p += -wrap3 + BPP;
577 lum += -wrap + 1;
578 }
579 p += wrap3 + (wrap3 - rect->w * BPP);
580 lum += wrap + (wrap - rect->w - rect->x);
581 cb += dst->linesize[1] - width2 - skip2;
582 cr += dst->linesize[2] - width2 - skip2;
583 }
584 /* handle odd height */
585 if (h) {
586 lum += rect->x;
587 cb += skip2;
588 cr += skip2;
589
590 if (rect->x & 1) {
591 YUVA_IN(y, u, v, a, p, pal);
592 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
593 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
594 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
595 cb++;
596 cr++;
597 lum++;
598 p += BPP;
599 }
600 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
601 YUVA_IN(y, u, v, a, p, pal);
602 u1 = u;
603 v1 = v;
604 a1 = a;
605 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
606
607 YUVA_IN(y, u, v, a, p + BPP, pal);
608 u1 += u;
609 v1 += v;
610 a1 += a;
611 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
612 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
613 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
614 cb++;
615 cr++;
616 p += 2 * BPP;
617 lum += 2;
618 }
619 if (w) {
620 YUVA_IN(y, u, v, a, p, pal);
621 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
622 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
623 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
624 }
625 }
626 }
627
628 static void free_subpicture(SubPicture *sp)
629 {
630 int i;
631
632 for (i = 0; i < sp->sub.num_rects; i++)
633 {
634 av_free(sp->sub.rects[i].bitmap);
635 av_free(sp->sub.rects[i].rgba_palette);
636 }
637
638 av_free(sp->sub.rects);
639
640 memset(&sp->sub, 0, sizeof(AVSubtitle));
641 }
642
643 static void video_image_display(VideoState *is)
644 {
645 VideoPicture *vp;
646 SubPicture *sp;
647 AVPicture pict;
648 float aspect_ratio;
649 int width, height, x, y;
650 SDL_Rect rect;
651 int i;
652
653 vp = &is->pictq[is->pictq_rindex];
654 if (vp->bmp) {
655 /* XXX: use variable in the frame */
656 if (is->video_st->codec->sample_aspect_ratio.num == 0)
657 aspect_ratio = 0;
658 else
659 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio)
660 * is->video_st->codec->width / is->video_st->codec->height;;
661 if (aspect_ratio <= 0.0)
662 aspect_ratio = (float)is->video_st->codec->width /
663 (float)is->video_st->codec->height;
664 /* if an active format is indicated, then it overrides the
665 mpeg format */
666 #if 0
667 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
668 is->dtg_active_format = is->video_st->codec->dtg_active_format;
669 printf("dtg_active_format=%d\n", is->dtg_active_format);
670 }
671 #endif
672 #if 0
673 switch(is->video_st->codec->dtg_active_format) {
674 case FF_DTG_AFD_SAME:
675 default:
676 /* nothing to do */
677 break;
678 case FF_DTG_AFD_4_3:
679 aspect_ratio = 4.0 / 3.0;
680 break;
681 case FF_DTG_AFD_16_9:
682 aspect_ratio = 16.0 / 9.0;
683 break;
684 case FF_DTG_AFD_14_9:
685 aspect_ratio = 14.0 / 9.0;
686 break;
687 case FF_DTG_AFD_4_3_SP_14_9:
688 aspect_ratio = 14.0 / 9.0;
689 break;
690 case FF_DTG_AFD_16_9_SP_14_9:
691 aspect_ratio = 14.0 / 9.0;
692 break;
693 case FF_DTG_AFD_SP_4_3:
694 aspect_ratio = 4.0 / 3.0;
695 break;
696 }
697 #endif
698
699 if (is->subtitle_st)
700 {
701 if (is->subpq_size > 0)
702 {
703 sp = &is->subpq[is->subpq_rindex];
704
705 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
706 {
707 SDL_LockYUVOverlay (vp->bmp);
708
709 pict.data[0] = vp->bmp->pixels[0];
710 pict.data[1] = vp->bmp->pixels[2];
711 pict.data[2] = vp->bmp->pixels[1];
712
713 pict.linesize[0] = vp->bmp->pitches[0];
714 pict.linesize[1] = vp->bmp->pitches[2];
715 pict.linesize[2] = vp->bmp->pitches[1];
716
717 for (i = 0; i < sp->sub.num_rects; i++)
718 blend_subrect(&pict, &sp->sub.rects[i]);
719
720 SDL_UnlockYUVOverlay (vp->bmp);
721 }
722 }
723 }
724
725
726 /* XXX: we suppose the screen has a 1.0 pixel ratio */
727 height = is->height;
728 width = ((int)rint(height * aspect_ratio)) & -3;
729 if (width > is->width) {
730 width = is->width;
731 height = ((int)rint(width / aspect_ratio)) & -3;
732 }
733 x = (is->width - width) / 2;
734 y = (is->height - height) / 2;
735 if (!is->no_background) {
736 /* fill the background */
737 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
738 } else {
739 is->no_background = 0;
740 }
741 rect.x = is->xleft + x;
742 rect.y = is->xleft + y;
743 rect.w = width;
744 rect.h = height;
745 SDL_DisplayYUVOverlay(vp->bmp, &rect);
746 } else {
747 #if 0
748 fill_rectangle(screen,
749 is->xleft, is->ytop, is->width, is->height,
750 QERGB(0x00, 0x00, 0x00));
751 #endif
752 }
753 }
754
755 static inline int compute_mod(int a, int b)
756 {
757 a = a % b;
758 if (a >= 0)
759 return a;
760 else
761 return a + b;
762 }
763
764 static void video_audio_display(VideoState *s)
765 {
766 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
767 int ch, channels, h, h2, bgcolor, fgcolor;
768 int16_t time_diff;
769
770 /* compute display index : center on currently output samples */
771 channels = s->audio_st->codec->channels;
772 nb_display_channels = channels;
773 if (!s->paused) {
774 n = 2 * channels;
775 delay = audio_write_get_buf_size(s);
776 delay /= n;
777
778 /* to be more precise, we take into account the time spent since
779 the last buffer computation */
780 if (audio_callback_time) {
781 time_diff = av_gettime() - audio_callback_time;
782 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
783 }
784
785 delay -= s->width / 2;
786 if (delay < s->width)
787 delay = s->width;
788
789 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
790
791 h= INT_MIN;
792 for(i=0; i<1000; i+=channels){
793 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
794 int a= s->sample_array[idx];
795 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
796 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
797 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
798 int score= a-d;
799 if(h<score && (b^c)<0){
800 h= score;
801 i_start= idx;
802 }
803 }
804
805 s->last_i_start = i_start;
806 } else {
807 i_start = s->last_i_start;
808 }
809
810 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
811 fill_rectangle(screen,
812 s->xleft, s->ytop, s->width, s->height,
813 bgcolor);
814
815 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
816
817 /* total height for one channel */
818 h = s->height / nb_display_channels;
819 /* graph height / 2 */
820 h2 = (h * 9) / 20;
821 for(ch = 0;ch < nb_display_channels; ch++) {
822 i = i_start + ch;
823 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
824 for(x = 0; x < s->width; x++) {
825 y = (s->sample_array[i] * h2) >> 15;
826 if (y < 0) {
827 y = -y;
828 ys = y1 - y;
829 } else {
830 ys = y1;
831 }
832 fill_rectangle(screen,
833 s->xleft + x, ys, 1, y,
834 fgcolor);
835 i += channels;
836 if (i >= SAMPLE_ARRAY_SIZE)
837 i -= SAMPLE_ARRAY_SIZE;
838 }
839 }
840
841 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
842
843 for(ch = 1;ch < nb_display_channels; ch++) {
844 y = s->ytop + ch * h;
845 fill_rectangle(screen,
846 s->xleft, y, s->width, 1,
847 fgcolor);
848 }
849 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
850 }
851
852 static int video_open(VideoState *is){
853 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
854 int w,h;
855
856 if(is_full_screen) flags |= SDL_FULLSCREEN;
857 else flags |= SDL_RESIZABLE;
858
859 if (is_full_screen && fs_screen_width) {
860 w = fs_screen_width;
861 h = fs_screen_height;
862 } else if(!is_full_screen && screen_width){
863 w = screen_width;
864 h = screen_height;
865 }else if (is->video_st && is->video_st->codec->width){
866 w = is->video_st->codec->width;
867 h = is->video_st->codec->height;
868 } else {
869 w = 640;
870 h = 480;
871 }
872 #ifndef CONFIG_DARWIN
873 screen = SDL_SetVideoMode(w, h, 0, flags);
874 #else
875 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
876 screen = SDL_SetVideoMode(w, h, 24, flags);
877 #endif
878 if (!screen) {
879 fprintf(stderr, "SDL: could not set video mode - exiting\n");
880 return -1;
881 }
882 SDL_WM_SetCaption("FFplay", "FFplay");
883
884 is->width = screen->w;
885 is->height = screen->h;
886
887 return 0;
888 }
889
890 /* display the current picture, if any */
891 static void video_display(VideoState *is)
892 {
893 if(!screen)
894 video_open(cur_stream);
895 if (is->audio_st && is->show_audio)
896 video_audio_display(is);
897 else if (is->video_st)
898 video_image_display(is);
899 }
900
901 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
902 {
903 SDL_Event event;
904 event.type = FF_REFRESH_EVENT;
905 event.user.data1 = opaque;
906 SDL_PushEvent(&event);
907 return 0; /* 0 means stop timer */
908 }
909
910 /* schedule a video refresh in 'delay' ms */
911 static void schedule_refresh(VideoState *is, int delay)
912 {
913 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
914 }
915
916 /* get the current audio clock value */
917 static double get_audio_clock(VideoState *is)
918 {
919 double pts;
920 int hw_buf_size, bytes_per_sec;
921 pts = is->audio_clock;
922 hw_buf_size = audio_write_get_buf_size(is);
923 bytes_per_sec = 0;
924 if (is->audio_st) {
925 bytes_per_sec = is->audio_st->codec->sample_rate *
926 2 * is->audio_st->codec->channels;
927 }
928 if (bytes_per_sec)
929 pts -= (double)hw_buf_size / bytes_per_sec;
930 return pts;
931 }
932
933 /* get the current video clock value */
934 static double get_video_clock(VideoState *is)
935 {
936 double delta;
937 if (is->paused) {
938 delta = 0;
939 } else {
940 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
941 }
942 return is->video_current_pts + delta;
943 }
944
945 /* get the current external clock value */
946 static double get_external_clock(VideoState *is)
947 {
948 int64_t ti;
949 ti = av_gettime();
950 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
951 }
952
953 /* get the current master clock value */
954 static double get_master_clock(VideoState *is)
955 {
956 double val;
957
958 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
959 if (is->video_st)
960 val = get_video_clock(is);
961 else
962 val = get_audio_clock(is);
963 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
964 if (is->audio_st)
965 val = get_audio_clock(is);
966 else
967 val = get_video_clock(is);
968 } else {
969 val = get_external_clock(is);
970 }
971 return val;
972 }
973
974 /* seek in the stream */
975 static void stream_seek(VideoState *is, int64_t pos, int rel)
976 {
977 if (!is->seek_req) {
978 is->seek_pos = pos;
979 is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
980 if (seek_by_bytes)
981 is->seek_flags |= AVSEEK_FLAG_BYTE;
982 is->seek_req = 1;
983 }
984 }
985
986 /* pause or resume the video */
987 static void stream_pause(VideoState *is)
988 {
989 is->paused = !is->paused;
990 if (!is->paused) {
991 is->video_current_pts = get_video_clock(is);
992 is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
993 }
994 }
995
996 /* called to display each frame */
997 static void video_refresh_timer(void *opaque)
998 {
999 VideoState *is = opaque;
1000 VideoPicture *vp;
1001 double actual_delay, delay, sync_threshold, ref_clock, diff;
1002
1003 SubPicture *sp, *sp2;
1004
1005 if (is->video_st) {
1006 if (is->pictq_size == 0) {
1007 /* if no picture, need to wait */
1008 schedule_refresh(is, 1);
1009 } else {
1010 /* dequeue the picture */
1011 vp = &is->pictq[is->pictq_rindex];
1012
1013 /* update current video pts */
1014 is->video_current_pts = vp->pts;
1015 is->video_current_pts_time = av_gettime();
1016
1017 /* compute nominal delay */
1018 delay = vp->pts - is->frame_last_pts;
1019 if (delay <= 0 || delay >= 1.0) {
1020 /* if incorrect delay, use previous one */
1021 delay = is->frame_last_delay;
1022 }
1023 is->frame_last_delay = delay;
1024 is->frame_last_pts = vp->pts;
1025
1026 /* update delay to follow master synchronisation source */
1027 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1028 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1029 /* if video is slave, we try to correct big delays by
1030 duplicating or deleting a frame */
1031 ref_clock = get_master_clock(is);
1032 diff = vp->pts - ref_clock;
1033
1034 /* skip or repeat frame. We take into account the
1035 delay to compute the threshold. I still don't know
1036 if it is the best guess */
1037 sync_threshold = AV_SYNC_THRESHOLD;
1038 if (delay > sync_threshold)
1039 sync_threshold = delay;
1040 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1041 if (diff <= -sync_threshold)
1042 delay = 0;
1043 else if (diff >= sync_threshold)
1044 delay = 2 * delay;
1045 }
1046 }
1047
1048 is->frame_timer += delay;
1049 /* compute the REAL delay (we need to do that to avoid
1050 long term errors */
1051 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1052 if (actual_delay < 0.010) {
1053 /* XXX: should skip picture */
1054 actual_delay = 0.010;
1055 }
1056 /* launch timer for next picture */
1057 schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
1058
1059 #if defined(DEBUG_SYNC)
1060 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1061 delay, actual_delay, vp->pts, -diff);
1062 #endif
1063
1064 if(is->subtitle_st) {
1065 if (is->subtitle_stream_changed) {
1066 SDL_LockMutex(is->subpq_mutex);
1067
1068 while (is->subpq_size) {
1069 free_subpicture(&is->subpq[is->subpq_rindex]);
1070
1071 /* update queue size and signal for next picture */
1072 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1073 is->subpq_rindex = 0;
1074
1075 is->subpq_size--;
1076 }
1077 is->subtitle_stream_changed = 0;
1078
1079 SDL_CondSignal(is->subpq_cond);
1080 SDL_UnlockMutex(is->subpq_mutex);
1081 } else {
1082 if (is->subpq_size > 0) {
1083 sp = &is->subpq[is->subpq_rindex];
1084
1085 if (is->subpq_size > 1)
1086 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1087 else
1088 sp2 = NULL;
1089
1090 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1091 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1092 {
1093 free_subpicture(sp);
1094
1095 /* update queue size and signal for next picture */
1096 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1097 is->subpq_rindex = 0;
1098
1099 SDL_LockMutex(is->subpq_mutex);
1100 is->subpq_size--;
1101 SDL_CondSignal(is->subpq_cond);
1102 SDL_UnlockMutex(is->subpq_mutex);
1103 }
1104 }
1105 }
1106 }
1107
1108 /* display picture */
1109 video_display(is);
1110
1111 /* update queue size and signal for next picture */
1112 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1113 is->pictq_rindex = 0;
1114
1115 SDL_LockMutex(is->pictq_mutex);
1116 is->pictq_size--;
1117 SDL_CondSignal(is->pictq_cond);
1118 SDL_UnlockMutex(is->pictq_mutex);
1119 }
1120 } else if (is->audio_st) {
1121 /* draw the next audio frame */
1122
1123 schedule_refresh(is, 40);
1124
1125 /* if only audio stream, then display the audio bars (better
1126 than nothing, just to test the implementation */
1127
1128 /* display picture */
1129 video_display(is);
1130 } else {
1131 schedule_refresh(is, 100);
1132 }
1133 if (show_status) {
1134 static int64_t last_time;
1135 int64_t cur_time;
1136 int aqsize, vqsize, sqsize;
1137 double av_diff;
1138
1139 cur_time = av_gettime();
1140 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1141 aqsize = 0;
1142 vqsize = 0;
1143 sqsize = 0;
1144 if (is->audio_st)
1145 aqsize = is->audioq.size;
1146 if (is->video_st)
1147 vqsize = is->videoq.size;
1148 if (is->subtitle_st)
1149 sqsize = is->subtitleq.size;
1150 av_diff = 0;
1151 if (is->audio_st && is->video_st)
1152 av_diff = get_audio_clock(is) - get_video_clock(is);
1153 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
1154 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1155 fflush(stdout);
1156 last_time = cur_time;
1157 }
1158 }
1159 }
1160
1161 /* allocate a picture (needs to do that in main thread to avoid
1162 potential locking problems */
1163 static void alloc_picture(void *opaque)
1164 {
1165 VideoState *is = opaque;
1166 VideoPicture *vp;
1167
1168 vp = &is->pictq[is->pictq_windex];
1169
1170 if (vp->bmp)
1171 SDL_FreeYUVOverlay(vp->bmp);
1172
1173 #if 0
1174 /* XXX: use generic function */
1175 /* XXX: disable overlay if no hardware acceleration or if RGB format */
1176 switch(is->video_st->codec->pix_fmt) {
1177 case PIX_FMT_YUV420P:
1178 case PIX_FMT_YUV422P:
1179 case PIX_FMT_YUV444P:
1180 case PIX_FMT_YUYV422:
1181 case PIX_FMT_YUV410P:
1182 case PIX_FMT_YUV411P:
1183 is_yuv = 1;
1184 break;
1185 default:
1186 is_yuv = 0;
1187 break;
1188 }
1189 #endif
1190 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1191 is->video_st->codec->height,
1192 SDL_YV12_OVERLAY,
1193 screen);
1194 vp->width = is->video_st->codec->width;
1195 vp->height = is->video_st->codec->height;
1196
1197 SDL_LockMutex(is->pictq_mutex);
1198 vp->allocated = 1;
1199 SDL_CondSignal(is->pictq_cond);
1200 SDL_UnlockMutex(is->pictq_mutex);
1201 }
1202
1203 /**
1204 *
1205 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1206 */
1207 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1208 {
1209 VideoPicture *vp;
1210 int dst_pix_fmt;
1211 AVPicture pict;
1212 static struct SwsContext *img_convert_ctx;
1213
1214 /* wait until we have space to put a new picture */
1215 SDL_LockMutex(is->pictq_mutex);
1216 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1217 !is->videoq.abort_request) {
1218 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1219 }
1220 SDL_UnlockMutex(is->pictq_mutex);
1221
1222 if (is->videoq.abort_request)
1223 return -1;
1224
1225 vp = &is->pictq[is->pictq_windex];
1226
1227 /* alloc or resize hardware picture buffer */
1228 if (!vp->bmp ||
1229 vp->width != is->video_st->codec->width ||
1230 vp->height != is->video_st->codec->height) {
1231 SDL_Event event;
1232
1233 vp->allocated = 0;
1234
1235 /* the allocation must be done in the main thread to avoid
1236 locking problems */
1237 event.type = FF_ALLOC_EVENT;
1238 event.user.data1 = is;
1239 SDL_PushEvent(&event);
1240
1241 /* wait until the picture is allocated */
1242 SDL_LockMutex(is->pictq_mutex);
1243 while (!vp->allocated && !is->videoq.abort_request) {
1244 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1245 }
1246 SDL_UnlockMutex(is->pictq_mutex);
1247
1248 if (is->videoq.abort_request)
1249 return -1;
1250 }
1251
1252 /* if the frame is not skipped, then display it */
1253 if (vp->bmp) {
1254 /* get a pointer on the bitmap */
1255 SDL_LockYUVOverlay (vp->bmp);
1256
1257 dst_pix_fmt = PIX_FMT_YUV420P;
1258 pict.data[0] = vp->bmp->pixels[0];
1259 pict.data[1] = vp->bmp->pixels[2];
1260 pict.data[2] = vp->bmp->pixels[1];
1261
1262 pict.linesize[0] = vp->bmp->pitches[0];
1263 pict.linesize[1] = vp->bmp->pitches[2];
1264 pict.linesize[2] = vp->bmp->pitches[1];
1265 if (img_convert_ctx == NULL) {
1266 img_convert_ctx = sws_getContext(is->video_st->codec->width,
1267 is->video_st->codec->height, is->video_st->codec->pix_fmt,
1268 is->video_st->codec->width, is->video_st->codec->height,
1269 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1270 if (img_convert_ctx == NULL) {
1271 fprintf(stderr, "Cannot initialize the conversion context\n");
1272 exit(1);
1273 }
1274 }
1275 sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1276 0, is->video_st->codec->height, pict.data, pict.linesize);
1277 /* update the bitmap content */
1278 SDL_UnlockYUVOverlay(vp->bmp);
1279
1280 vp->pts = pts;
1281
1282 /* now we can update the picture count */
1283 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1284 is->pictq_windex = 0;
1285 SDL_LockMutex(is->pictq_mutex);
1286 is->pictq_size++;
1287 SDL_UnlockMutex(is->pictq_mutex);
1288 }
1289 return 0;
1290 }
1291
1292 /**
1293 * compute the exact PTS for the picture if it is omitted in the stream
1294 * @param pts1 the dts of the pkt / pts of the frame
1295 */
1296 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1297 {
1298 double frame_delay, pts;
1299
1300 pts = pts1;
1301
1302 if (pts != 0) {
1303 /* update video clock with pts, if present */
1304 is->video_clock = pts;
1305 } else {
1306 pts = is->video_clock;
1307 }
1308 /* update video clock for next frame */
1309 frame_delay = av_q2d(is->video_st->codec->time_base);
1310 /* for MPEG2, the frame can be repeated, so we update the
1311 clock accordingly */
1312 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1313 is->video_clock += frame_delay;
1314
1315 #if defined(DEBUG_SYNC) && 0
1316 {
1317 int ftype;
1318 if (src_frame->pict_type == FF_B_TYPE)
1319 ftype = 'B';
1320 else if (src_frame->pict_type == FF_I_TYPE)
1321 ftype = 'I';
1322 else
1323 ftype = 'P';
1324 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1325 ftype, pts, pts1);
1326 }
1327 #endif
1328 return queue_picture(is, src_frame, pts);
1329 }
1330
1331 static int video_thread(void *arg)
1332 {
1333 VideoState *is = arg;
1334 AVPacket pkt1, *pkt = &pkt1;
1335 int len1, got_picture;
1336 AVFrame *frame= avcodec_alloc_frame();
1337 double pts;
1338
1339 for(;;) {
1340 while (is->paused && !is->videoq.abort_request) {
1341 SDL_Delay(10);
1342 }
1343 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1344 break;
1345
1346 if(pkt->data == flush_pkt.data){
1347 avcodec_flush_buffers(is->video_st->codec);
1348 continue;
1349 }
1350
1351 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1352 this packet, if any */
1353 pts = 0;
1354 if (pkt->dts != AV_NOPTS_VALUE)
1355 pts = av_q2d(is->video_st->time_base)*pkt->dts;
1356
1357 len1 = avcodec_decode_video(is->video_st->codec,
1358 frame, &got_picture,
1359 pkt->data, pkt->size);
1360 // if (len1 < 0)
1361 // break;
1362 if (got_picture) {
1363 if (output_picture2(is, frame, pts) < 0)
1364 goto the_end;
1365 }
1366 av_free_packet(pkt);
1367 if (step)
1368 if (cur_stream)
1369 stream_pause(cur_stream);
1370 }
1371 the_end:
1372 av_free(frame);
1373 return 0;
1374 }
1375
1376 static int subtitle_thread(void *arg)
1377 {
1378 VideoState *is = arg;
1379 SubPicture *sp;
1380 AVPacket pkt1, *pkt = &pkt1;
1381 int len1, got_subtitle;
1382 double pts;
1383 int i, j;
1384 int r, g, b, y, u, v, a;
1385
1386 for(;;) {
1387 while (is->paused && !is->subtitleq.abort_request) {
1388 SDL_Delay(10);
1389 }
1390 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1391 break;
1392
1393 if(pkt->data == flush_pkt.data){
1394 avcodec_flush_buffers(is->subtitle_st->codec);
1395 continue;
1396 }
1397 SDL_LockMutex(is->subpq_mutex);
1398 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1399 !is->subtitleq.abort_request) {
1400 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1401 }
1402 SDL_UnlockMutex(is->subpq_mutex);
1403
1404 if (is->subtitleq.abort_request)
1405 goto the_end;
1406
1407 sp = &is->subpq[is->subpq_windex];
1408
1409 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1410 this packet, if any */
1411 pts = 0;
1412 if (pkt->pts != AV_NOPTS_VALUE)
1413 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1414
1415 len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1416 &sp->sub, &got_subtitle,
1417 pkt->data, pkt->size);
1418 // if (len1 < 0)
1419 // break;
1420 if (got_subtitle && sp->sub.format == 0) {
1421 sp->pts = pts;
1422
1423 for (i = 0; i < sp->sub.num_rects; i++)
1424 {
1425 for (j = 0; j < sp->sub.rects[i].nb_colors; j++)
1426 {
1427 RGBA_IN(r, g, b, a, sp->sub.rects[i].rgba_palette + j);
1428 y = RGB_TO_Y_CCIR(r, g, b);
1429 u = RGB_TO_U_CCIR(r, g, b, 0);
1430 v = RGB_TO_V_CCIR(r, g, b, 0);
1431 YUVA_OUT(sp->sub.rects[i].rgba_palette + j, y, u, v, a);
1432 }
1433 }
1434
1435 /* now we can update the picture count */
1436 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1437 is->subpq_windex = 0;
1438 SDL_LockMutex(is->subpq_mutex);
1439 is->subpq_size++;
1440 SDL_UnlockMutex(is->subpq_mutex);
1441 }
1442 av_free_packet(pkt);
1443 // if (step)
1444 // if (cur_stream)
1445 // stream_pause(cur_stream);
1446 }
1447 the_end:
1448 return 0;
1449 }
1450
1451 /* copy samples for viewing in editor window */
1452 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1453 {
1454 int size, len, channels;
1455
1456 channels = is->audio_st->codec->channels;
1457
1458 size = samples_size / sizeof(short);
1459 while (size > 0) {
1460 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1461 if (len > size)
1462 len = size;
1463 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1464 samples += len;
1465 is->sample_array_index += len;
1466 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1467 is->sample_array_index = 0;
1468 size -= len;
1469 }
1470 }
1471
1472 /* return the new audio buffer size (samples can be added or deleted
1473 to get better sync if video or external master clock) */
1474 static int synchronize_audio(VideoState *is, short *samples,
1475 int samples_size1, double pts)
1476 {
1477 int n, samples_size;
1478 double ref_clock;
1479
1480 n = 2 * is->audio_st->codec->channels;
1481 samples_size = samples_size1;
1482
1483 /* if not master, then we try to remove or add samples to correct the clock */
1484 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1485 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1486 double diff, avg_diff;
1487 int wanted_size, min_size, max_size, nb_samples;
1488
1489 ref_clock = get_master_clock(is);
1490 diff = get_audio_clock(is) - ref_clock;
1491
1492 if (diff < AV_NOSYNC_THRESHOLD) {
1493 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1494 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1495 /* not enough measures to have a correct estimate */
1496 is->audio_diff_avg_count++;
1497 } else {
1498 /* estimate the A-V difference */
1499 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1500
1501 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1502 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1503 nb_samples = samples_size / n;
1504
1505 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1506 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1507 if (wanted_size < min_size)
1508 wanted_size = min_size;
1509 else if (wanted_size > max_size)
1510 wanted_size = max_size;
1511
1512 /* add or remove samples to correction the synchro */
1513 if (wanted_size < samples_size) {
1514 /* remove samples */
1515 samples_size = wanted_size;
1516 } else if (wanted_size > samples_size) {
1517 uint8_t *samples_end, *q;
1518 int nb;
1519
1520 /* add samples */
1521 nb = (samples_size - wanted_size);
1522 samples_end = (uint8_t *)samples + samples_size - n;
1523 q = samples_end + n;
1524 while (nb > 0) {
1525 memcpy(q, samples_end, n);
1526 q += n;
1527 nb -= n;
1528 }
1529 samples_size = wanted_size;
1530 }
1531 }
1532 #if 0
1533 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1534 diff, avg_diff, samples_size - samples_size1,
1535 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1536 #endif
1537 }
1538 } else {
1539 /* too big difference : may be initial PTS errors, so
1540 reset A-V filter */
1541 is->audio_diff_avg_count = 0;
1542 is->audio_diff_cum = 0;
1543 }
1544 }
1545
1546 return samples_size;
1547 }
1548
1549 /* decode one audio frame and returns its uncompressed size */
1550 static int audio_decode_frame(VideoState *is, uint8_t *audio_buf, double *pts_ptr)
1551 {
1552 AVPacket *pkt = &is->audio_pkt;
1553 int n, len1, data_size;
1554 double pts;
1555
1556 for(;;) {
1557 /* NOTE: the audio packet can contain several frames */
1558 while (is->audio_pkt_size > 0) {
1559 len1 = avcodec_decode_audio(is->audio_st->codec,
1560 (int16_t *)audio_buf, &data_size,
1561 is->audio_pkt_data, is->audio_pkt_size);
1562 if (len1 < 0) {
1563 /* if error, we skip the frame */
1564 is->audio_pkt_size = 0;
1565 break;
1566 }
1567
1568 is->audio_pkt_data += len1;
1569 is->audio_pkt_size -= len1;
1570 if (data_size <= 0)
1571 continue;
1572 /* if no pts, then compute it */
1573 pts = is->audio_clock;
1574 *pts_ptr = pts;
1575 n = 2 * is->audio_st->codec->channels;
1576 is->audio_clock += (double)data_size /
1577 (double)(n * is->audio_st->codec->sample_rate);
1578 #if defined(DEBUG_SYNC)
1579 {
1580 static double last_clock;
1581 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1582 is->audio_clock - last_clock,
1583 is->audio_clock, pts);
1584 last_clock = is->audio_clock;
1585 }
1586 #endif
1587 return data_size;
1588 }
1589
1590 /* free the current packet */
1591 if (pkt->data)
1592 av_free_packet(pkt);
1593
1594 if (is->paused || is->audioq.abort_request) {
1595 return -1;
1596 }
1597
1598 /* read next packet */
1599 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1600 return -1;
1601 if(pkt->data == flush_pkt.data){
1602 avcodec_flush_buffers(is->audio_st->codec);
1603 continue;
1604 }
1605
1606 is->audio_pkt_data = pkt->data;
1607 is->audio_pkt_size = pkt->size;
1608
1609 /* if update the audio clock with the pts */
1610 if (pkt->pts != AV_NOPTS_VALUE) {
1611 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1612 }
1613 }
1614 }
1615
1616 /* get the current audio output buffer size, in samples. With SDL, we
1617 cannot have a precise information */
1618 static int audio_write_get_buf_size(VideoState *is)
1619 {
1620 return is->audio_hw_buf_size - is->audio_buf_index;
1621 }
1622
1623
1624 /* prepare a new audio buffer */
1625 void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1626 {
1627 VideoState *is = opaque;
1628 int audio_size, len1;
1629 double pts;
1630
1631 audio_callback_time = av_gettime();
1632
1633 while (len > 0) {
1634 if (is->audio_buf_index >= is->audio_buf_size) {
1635 audio_size = audio_decode_frame(is, is->audio_buf, &pts);
1636 if (audio_size < 0) {
1637 /* if error, just output silence */
1638 is->audio_buf_size = 1024;
1639 memset(is->audio_buf, 0, is->audio_buf_size);
1640 } else {
1641 if (is->show_audio)
1642 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1643 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1644 pts);
1645 is->audio_buf_size = audio_size;
1646 }
1647 is->audio_buf_index = 0;
1648 }
1649 len1 = is->audio_buf_size - is->audio_buf_index;
1650 if (len1 > len)
1651 len1 = len;
1652 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1653 len -= len1;
1654 stream += len1;
1655 is->audio_buf_index += len1;
1656 }
1657 }
1658
1659 /* open a given stream. Return 0 if OK */
1660 static int stream_component_open(VideoState *is, int stream_index)
1661 {
1662 AVFormatContext *ic = is->ic;
1663 AVCodecContext *enc;
1664 AVCodec *codec;
1665 SDL_AudioSpec wanted_spec, spec;
1666
1667 if (stream_index < 0 || stream_index >= ic->nb_streams)
1668 return -1;
1669 enc = ic->streams[stream_index]->codec;
1670
1671 /* prepare audio output */
1672 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1673 wanted_spec.freq = enc->sample_rate;
1674 wanted_spec.format = AUDIO_S16SYS;
1675 /* hack for AC3. XXX: suppress that */
1676 if (enc->channels > 2)
1677 enc->channels = 2;
1678 wanted_spec.channels = enc->channels;
1679 wanted_spec.silence = 0;
1680 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1681 wanted_spec.callback = sdl_audio_callback;
1682 wanted_spec.userdata = is;
1683 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1684 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1685 return -1;
1686 }
1687 is->audio_hw_buf_size = spec.size;
1688 }
1689
1690 codec = avcodec_find_decoder(enc->codec_id);
1691 enc->debug_mv = debug_mv;
1692 enc->debug = debug;
1693 enc->workaround_bugs = workaround_bugs;
1694 enc->lowres = lowres;
1695 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1696 enc->idct_algo= idct;
1697 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1698 enc->skip_frame= skip_frame;
1699 enc->skip_idct= skip_idct;
1700 enc->skip_loop_filter= skip_loop_filter;
1701 enc->error_resilience= error_resilience;
1702 enc->error_concealment= error_concealment;
1703 if (!codec ||
1704 avcodec_open(enc, codec) < 0)
1705 return -1;
1706 #if defined(HAVE_THREADS)
1707 if(thread_count>1)
1708 avcodec_thread_init(enc, thread_count);
1709 #endif
1710 enc->thread_count= thread_count;
1711 switch(enc->codec_type) {
1712 case CODEC_TYPE_AUDIO:
1713 is->audio_stream = stream_index;
1714 is->audio_st = ic->streams[stream_index];
1715 is->audio_buf_size = 0;
1716 is->audio_buf_index = 0;
1717
1718 /* init averaging filter */
1719 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1720 is->audio_diff_avg_count = 0;
1721 /* since we do not have a precise anough audio fifo fullness,
1722 we correct audio sync only if larger than this threshold */
1723 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1724
1725 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1726 packet_queue_init(&is->audioq);
1727 SDL_PauseAudio(0);
1728 break;
1729 case CODEC_TYPE_VIDEO:
1730 is->video_stream = stream_index;
1731 is->video_st = ic->streams[stream_index];
1732
1733 is->frame_last_delay = 40e-3;
1734 is->frame_timer = (double)av_gettime() / 1000000.0;
1735 is->video_current_pts_time = av_gettime();
1736
1737 packet_queue_init(&is->videoq);
1738 is->video_tid = SDL_CreateThread(video_thread, is);
1739 break;
1740 case CODEC_TYPE_SUBTITLE:
1741 is->subtitle_stream = stream_index;
1742 is->subtitle_st = ic->streams[stream_index];
1743 packet_queue_init(&is->subtitleq);
1744
1745 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1746 break;
1747 default:
1748 break;
1749 }
1750 return 0;
1751 }
1752
1753 static void stream_component_close(VideoState *is, int stream_index)
1754 {
1755 AVFormatContext *ic = is->ic;
1756 AVCodecContext *enc;
1757
1758 if (stream_index < 0 || stream_index >= ic->nb_streams)
1759 return;
1760 enc = ic->streams[stream_index]->codec;
1761
1762 switch(enc->codec_type) {
1763 case CODEC_TYPE_AUDIO:
1764 packet_queue_abort(&is->audioq);
1765
1766 SDL_CloseAudio();
1767
1768 packet_queue_end(&is->audioq);
1769 break;
1770 case CODEC_TYPE_VIDEO:
1771 packet_queue_abort(&is->videoq);
1772
1773 /* note: we also signal this mutex to make sure we deblock the
1774 video thread in all cases */
1775 SDL_LockMutex(is->pictq_mutex);
1776 SDL_CondSignal(is->pictq_cond);
1777 SDL_UnlockMutex(is->pictq_mutex);
1778
1779 SDL_WaitThread(is->video_tid, NULL);
1780
1781 packet_queue_end(&is->videoq);
1782 break;
1783 case CODEC_TYPE_SUBTITLE:
1784 packet_queue_abort(&is->subtitleq);
1785
1786 /* note: we also signal this mutex to make sure we deblock the
1787 video thread in all cases */
1788 SDL_LockMutex(is->subpq_mutex);
1789 is->subtitle_stream_changed = 1;
1790
1791 SDL_CondSignal(is->subpq_cond);
1792 SDL_UnlockMutex(is->subpq_mutex);
1793
1794 SDL_WaitThread(is->subtitle_tid, NULL);
1795
1796 packet_queue_end(&is->subtitleq);
1797 break;
1798 default:
1799 break;
1800 }
1801
1802 avcodec_close(enc);
1803 switch(enc->codec_type) {
1804 case CODEC_TYPE_AUDIO:
1805 is->audio_st = NULL;
1806 is->audio_stream = -1;
1807 break;
1808 case CODEC_TYPE_VIDEO:
1809 is->video_st = NULL;
1810 is->video_stream = -1;
1811 break;
1812 case CODEC_TYPE_SUBTITLE:
1813 is->subtitle_st = NULL;
1814 is->subtitle_stream = -1;
1815 break;
1816 default:
1817 break;
1818 }
1819 }
1820
1821 static void dump_stream_info(const AVFormatContext *s)
1822 {
1823 if (s->track != 0)
1824 fprintf(stderr, "Track: %d\n", s->track);
1825 if (s->title[0] != '\0')
1826 fprintf(stderr, "Title: %s\n", s->title);
1827 if (s->author[0] != '\0')
1828 fprintf(stderr, "Author: %s\n", s->author);
1829 if (s->copyright[0] != '\0')
1830 fprintf(stderr, "Copyright: %s\n", s->copyright);
1831 if (s->comment[0] != '\0')
1832 fprintf(stderr, "Comment: %s\n", s->comment);
1833 if (s->album[0] != '\0')
1834 fprintf(stderr, "Album: %s\n", s->album);
1835 if (s->year != 0)
1836 fprintf(stderr, "Year: %d\n", s->year);
1837 if (s->genre[0] != '\0')
1838 fprintf(stderr, "Genre: %s\n", s->genre);
1839 }
1840
1841 /* since we have only one decoding thread, we can use a global
1842 variable instead of a thread local variable */
1843 static VideoState *global_video_state;
1844
1845 static int decode_interrupt_cb(void)
1846 {
1847 return (global_video_state && global_video_state->abort_request);
1848 }
1849
1850 /* this thread gets the stream from the disk or the network */
1851 static int decode_thread(void *arg)
1852 {
1853 VideoState *is = arg;
1854 AVFormatContext *ic;
1855 int err, i, ret, video_index, audio_index, use_play;
1856 AVPacket pkt1, *pkt = &pkt1;
1857 AVFormatParameters params, *ap = &params;
1858
1859 video_index = -1;
1860 audio_index = -1;
1861 is->video_stream = -1;
1862 is->audio_stream = -1;
1863 is->subtitle_stream = -1;
1864
1865 global_video_state = is;
1866 url_set_interrupt_cb(decode_interrupt_cb);
1867
1868 memset(ap, 0, sizeof(*ap));
1869 ap->initial_pause = 1; /* we force a pause when starting an RTSP
1870 stream */
1871
1872 ap->width = frame_width;
1873 ap->height= frame_height;
1874 ap->time_base= (AVRational){1, 25};
1875 ap->pix_fmt = frame_pix_fmt;
1876
1877 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1878 if (err < 0) {
1879 print_error(is->filename, err);
1880 ret = -1;
1881 goto fail;
1882 }
1883 is->ic = ic;
1884 #ifdef CONFIG_NETWORK
1885 use_play = (ic->iformat == &rtsp_demuxer);
1886 #else
1887 use_play = 0;
1888 #endif
1889
1890 if(genpts)
1891 ic->flags |= AVFMT_FLAG_GENPTS;
1892
1893 if (!use_play) {
1894 err = av_find_stream_info(ic);
1895 if (err < 0) {
1896 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1897 ret = -1;
1898 goto fail;
1899 }
1900 ic->pb.eof_reached= 0; //FIXME hack, ffplay maybe shouldnt use url_feof() to test for the end
1901 }
1902
1903 /* if seeking requested, we execute it */
1904 if (start_time != AV_NOPTS_VALUE) {
1905 int64_t timestamp;
1906
1907 timestamp = start_time;
1908 /* add the stream start time */
1909 if (ic->start_time != AV_NOPTS_VALUE)
1910 timestamp += ic->start_time;
1911 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1912 if (ret < 0) {
1913 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1914 is->filename, (double)timestamp / AV_TIME_BASE);
1915 }
1916 }
1917
1918 /* now we can begin to play (RTSP stream only) */
1919 av_read_play(ic);
1920
1921 if (use_play) {
1922 err = av_find_stream_info(ic);
1923 if (err < 0) {
1924 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1925 ret = -1;
1926 goto fail;
1927 }
1928 }
1929
1930 for(i = 0; i < ic->nb_streams; i++) {
1931 AVCodecContext *enc = ic->streams[i]->codec;
1932 switch(enc->codec_type) {
1933 case CODEC_TYPE_AUDIO:
1934 if ((audio_index < 0 || wanted_audio_stream-- > 0) && !audio_disable)
1935 audio_index = i;
1936 break;
1937 case CODEC_TYPE_VIDEO:
1938 if (video_index < 0 && !video_disable)
1939 video_index = i;
1940 break;
1941 default:
1942 break;
1943 }
1944 }
1945 if (show_status) {
1946 dump_format(ic, 0, is->filename, 0);
1947 dump_stream_info(ic);
1948 }
1949
1950 /* open the streams */
1951 if (audio_index >= 0) {
1952 stream_component_open(is, audio_index);
1953 }
1954
1955 if (video_index >= 0) {
1956 stream_component_open(is, video_index);
1957 } else {
1958 if (!display_disable)
1959 is->show_audio = 1;
1960 }
1961
1962 if (is->video_stream < 0 && is->audio_stream < 0) {
1963 fprintf(stderr, "%s: could not open codecs\n", is->filename);
1964 ret = -1;
1965 goto fail;
1966 }
1967
1968 for(;;) {
1969 if (is->abort_request)
1970 break;
1971 #ifdef CONFIG_NETWORK
1972 if (is->paused != is->last_paused) {
1973 is->last_paused = is->paused;
1974 if (is->paused)
1975 av_read_pause(ic);
1976 else
1977 av_read_play(ic);
1978 }
1979 if (is->paused && ic->iformat == &rtsp_demuxer) {
1980 /* wait 10 ms to avoid trying to get another packet */
1981 /* XXX: horrible */
1982 SDL_Delay(10);
1983 continue;
1984 }
1985 #endif
1986 if (is->seek_req) {
1987 int stream_index= -1;
1988 int64_t seek_target= is->seek_pos;
1989
1990 if (is-> video_stream >= 0) stream_index= is-> video_stream;
1991 else if(is-> audio_stream >= 0) stream_index= is-> audio_stream;
1992 else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
1993
1994 if(stream_index>=0){
1995 seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
1996 }
1997
1998 ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
1999 if (ret < 0) {
2000 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2001 }else{
2002 if (is->audio_stream >= 0) {
2003 packet_queue_flush(&is->audioq);
2004 packet_queue_put(&is->audioq, &flush_pkt);
2005 }
2006 if (is->subtitle_stream >= 0) {
2007 packet_queue_flush(&is->subtitleq);
2008 packet_queue_put(&is->subtitleq, &flush_pkt);
2009 }
2010 if (is->video_stream >= 0) {
2011 packet_queue_flush(&is->videoq);
2012 packet_queue_put(&is->videoq, &flush_pkt);
2013 }
2014 }
2015 is->seek_req = 0;
2016 }
2017
2018 /* if the queue are full, no need to read more */
2019 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2020 is->videoq.size > MAX_VIDEOQ_SIZE ||
2021 is->subtitleq.size > MAX_SUBTITLEQ_SIZE ||
2022 url_feof(&ic->pb)) {
2023 /* wait 10 ms */
2024 SDL_Delay(10);
2025 continue;
2026 }
2027 ret = av_read_frame(ic, pkt);
2028 if (ret < 0) {
2029 if (url_ferror(&ic->pb) == 0) {
2030 SDL_Delay(100); /* wait for user event */
2031 continue;
2032 } else
2033 break;
2034 }
2035 if (pkt->stream_index == is->audio_stream) {
2036 packet_queue_put(&is->audioq, pkt);
2037 } else if (pkt->stream_index == is->video_stream) {
2038 packet_queue_put(&is->videoq, pkt);
2039 } else if (pkt->stream_index == is->subtitle_stream) {
2040 packet_queue_put(&is->subtitleq, pkt);
2041 } else {
2042 av_free_packet(pkt);
2043 }
2044 }
2045 /* wait until the end */
2046 while (!is->abort_request) {
2047 SDL_Delay(100);
2048 }
2049
2050 ret = 0;
2051 fail:
2052 /* disable interrupting */
2053 global_video_state = NULL;
2054
2055 /* close each stream */
2056 if (is->audio_stream >= 0)
2057 stream_component_close(is, is->audio_stream);
2058 if (is->video_stream >= 0)
2059 stream_component_close(is, is->video_stream);
2060 if (is->subtitle_stream >= 0)
2061 stream_component_close(is, is->subtitle_stream);
2062 if (is->ic) {
2063 av_close_input_file(is->ic);
2064 is->ic = NULL; /* safety */
2065 }
2066 url_set_interrupt_cb(NULL);
2067
2068 if (ret != 0) {
2069 SDL_Event event;
2070
2071 event.type = FF_QUIT_EVENT;
2072 event.user.data1 = is;
2073 SDL_PushEvent(&event);
2074 }
2075 return 0;
2076 }
2077
2078 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2079 {
2080 VideoState *is;
2081
2082 is = av_mallocz(sizeof(VideoState));
2083 if (!is)
2084 return NULL;
2085 pstrcpy(is->filename, sizeof(is->filename), filename);
2086 is->iformat = iformat;
2087 is->ytop = 0;
2088 is->xleft = 0;
2089
2090 /* start video display */
2091 is->pictq_mutex = SDL_CreateMutex();
2092 is->pictq_cond = SDL_CreateCond();
2093
2094 is->subpq_mutex = SDL_CreateMutex();
2095 is->subpq_cond = SDL_CreateCond();
2096
2097 /* add the refresh timer to draw the picture */
2098 schedule_refresh(is, 40);
2099
2100 is->av_sync_type = av_sync_type;
2101 is->parse_tid = SDL_CreateThread(decode_thread, is);
2102 if (!is->parse_tid) {
2103 av_free(is);
2104 return NULL;
2105 }
2106 return is;
2107 }
2108
2109 static void stream_close(VideoState *is)
2110 {
2111 VideoPicture *vp;
2112 int i;
2113 /* XXX: use a special url_shutdown call to abort parse cleanly */
2114 is->abort_request = 1;
2115 SDL_WaitThread(is->parse_tid, NULL);
2116
2117 /* free all pictures */
2118 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2119 vp = &is->pictq[i];
2120 if (vp->bmp) {
2121 SDL_FreeYUVOverlay(vp->bmp);
2122 vp->bmp = NULL;
2123 }
2124 }
2125 SDL_DestroyMutex(is->pictq_mutex);
2126 SDL_DestroyCond(is->pictq_cond);
2127 SDL_DestroyMutex(is->subpq_mutex);
2128 SDL_DestroyCond(is->subpq_cond);
2129 }
2130
2131 static void stream_cycle_channel(VideoState *is, int codec_type)
2132 {
2133 AVFormatContext *ic = is->ic;
2134 int start_index, stream_index;
2135 AVStream *st;
2136
2137 if (codec_type == CODEC_TYPE_VIDEO)
2138 start_index = is->video_stream;
2139 else if (codec_type == CODEC_TYPE_AUDIO)
2140 start_index = is->audio_stream;
2141 else
2142 start_index = is->subtitle_stream;
2143 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2144 return;
2145 stream_index = start_index;
2146 for(;;) {
2147 if (++stream_index >= is->ic->nb_streams)
2148 {
2149 if (codec_type == CODEC_TYPE_SUBTITLE)
2150 {
2151 stream_index = -1;
2152 goto the_end;
2153 } else
2154 stream_index = 0;
2155 }
2156 if (stream_index == start_index)
2157 return;
2158 st = ic->streams[stream_index];
2159 if (st->codec->codec_type == codec_type) {
2160 /* check that parameters are OK */
2161 switch(codec_type) {
2162 case CODEC_TYPE_AUDIO:
2163 if (st->codec->sample_rate != 0 &&
2164 st->codec->channels != 0)
2165 goto the_end;
2166 break;
2167 case CODEC_TYPE_VIDEO:
2168 case CODEC_TYPE_SUBTITLE:
2169 goto the_end;
2170 default:
2171 break;
2172 }
2173 }
2174 }
2175 the_end:
2176 stream_component_close(is, start_index);
2177 stream_component_open(is, stream_index);
2178 }
2179
2180
2181 static void toggle_full_screen(void)
2182 {
2183 is_full_screen = !is_full_screen;
2184 if (!fs_screen_width) {
2185 /* use default SDL method */
2186 // SDL_WM_ToggleFullScreen(screen);
2187 }
2188 video_open(cur_stream);
2189 }
2190
2191 static void toggle_pause(void)
2192 {
2193 if (cur_stream)
2194 stream_pause(cur_stream);
2195 step = 0;
2196 }
2197
2198 static void step_to_next_frame(void)
2199 {
2200 if (cur_stream) {
2201 if (cur_stream->paused)
2202 cur_stream->paused=0;
2203 cur_stream->video_current_pts = get_video_clock(cur_stream);
2204 }
2205 step = 1;
2206 }
2207
2208 static void do_exit(void)
2209 {
2210 if (cur_stream) {
2211 stream_close(cur_stream);
2212 cur_stream = NULL;
2213 }
2214 if (show_status)
2215 printf("\n");
2216 SDL_Quit();
2217 exit(0);
2218 }
2219
2220 static void toggle_audio_display(void)
2221 {
2222 if (cur_stream) {
2223 cur_stream->show_audio = !cur_stream->show_audio;
2224 }
2225 }
2226
2227 /* handle an event sent by the GUI */
2228 static void event_loop(void)
2229 {
2230 SDL_Event event;
2231 double incr, pos, frac;
2232
2233 for(;;) {
2234 SDL_WaitEvent(&event);
2235 switch(event.type) {
2236 case SDL_KEYDOWN:
2237 switch(event.key.keysym.sym) {
2238 case SDLK_ESCAPE:
2239 case SDLK_q:
2240 do_exit();
2241 break;
2242 case SDLK_f:
2243 toggle_full_screen();
2244 break;
2245 case SDLK_p:
2246 case SDLK_SPACE:
2247 toggle_pause();
2248 break;
2249 case SDLK_s: //S: Step to next frame
2250 step_to_next_frame();
2251 break;
2252 case SDLK_a:
2253 if (cur_stream)
2254 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2255 break;
2256 case SDLK_v:
2257 if (cur_stream)
2258 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2259 break;
2260 case SDLK_t:
2261 if (cur_stream)
2262 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2263 break;
2264 case SDLK_w:
2265 toggle_audio_display();
2266 break;
2267 case SDLK_LEFT:
2268 incr = -10.0;
2269 goto do_seek;
2270 case SDLK_RIGHT:
2271 incr = 10.0;
2272 goto do_seek;
2273 case SDLK_UP:
2274 incr = 60.0;
2275 goto do_seek;
2276 case SDLK_DOWN:
2277 incr = -60.0;
2278 do_seek:
2279 if (cur_stream) {
2280 if (seek_by_bytes) {
2281 pos = url_ftell(&cur_stream->ic->pb);
2282 if (cur_stream->ic->bit_rate)
2283 incr *= cur_stream->ic->bit_rate / 60.0;
2284 else
2285 incr *= 180000.0;
2286 pos += incr;
2287 stream_seek(cur_stream, pos, incr);
2288 } else {
2289 pos = get_master_clock(cur_stream);
2290 pos += incr;
2291 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2292 }
2293 }
2294 break;
2295 default:
2296 break;
2297 }
2298 break;
2299 case SDL_MOUSEBUTTONDOWN:
2300 if (cur_stream) {
2301 int ns, hh, mm, ss;
2302 int tns, thh, tmm, tss;
2303 tns = cur_stream->ic->duration/1000000LL;
2304 thh = tns/3600;
2305 tmm = (tns%3600)/60;
2306 tss = (tns%60);
2307 frac = (double)event.button.x/(double)cur_stream->width;
2308 ns = frac*tns;
2309 hh = ns/3600;
2310 mm = (ns%3600)/60;
2311 ss = (ns%60);
2312 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2313 hh, mm, ss, thh, tmm, tss);
2314 stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2315 }
2316 break;
2317 case SDL_VIDEORESIZE:
2318 if (cur_stream) {
2319 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2320 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2321 screen_width = cur_stream->width = event.resize.w;
2322 screen_height= cur_stream->height= event.resize.h;
2323 }
2324 break;
2325 case SDL_QUIT:
2326 case FF_QUIT_EVENT:
2327 do_exit();
2328 break;
2329 case FF_ALLOC_EVENT:
2330 video_open(event.user.data1);
2331 alloc_picture(event.user.data1);
2332 break;
2333 case FF_REFRESH_EVENT:
2334 video_refresh_timer(event.user.data1);
2335 break;
2336 default:
2337 break;
2338 }
2339 }
2340 }
2341
2342 static void opt_frame_size(const char *arg)
2343 {
2344 if (parse_image_size(&screen_width, &screen_height, arg) < 0) {
2345 fprintf(stderr, "Incorrect frame size\n");
2346 exit(1);
2347 }
2348 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2349 fprintf(stderr, "Frame size must be a multiple of 2\n");
2350 exit(1);
2351 }
2352 }
2353
2354 void opt_width(const char *arg)
2355 {
2356 screen_width = atoi(arg);
2357 if(screen_width<=0){
2358 fprintf(stderr, "invalid width\n");
2359 exit(1);
2360 }
2361 }
2362
2363 void opt_height(const char *arg)
2364 {
2365 screen_height = atoi(arg);
2366 if(screen_height<=0){
2367 fprintf(stderr, "invalid height\n");
2368 exit(1);
2369 }
2370 }
2371
2372 static void opt_format(const char *arg)
2373 {
2374 file_iformat = av_find_input_format(arg);
2375 if (!file_iformat) {
2376 fprintf(stderr, "Unknown input format: %s\n", arg);
2377 exit(1);
2378 }
2379 }
2380
2381 static void opt_frame_pix_fmt(const char *arg)
2382 {
2383 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2384 }
2385
2386 #ifdef CONFIG_NETWORK
2387 void opt_rtp_tcp(void)
2388 {
2389 /* only tcp protocol */
2390 rtsp_default_protocols = (1 << RTSP_PROTOCOL_RTP_TCP);
2391 }
2392 #endif
2393
2394 void opt_sync(const char *arg)
2395 {
2396 if (!strcmp(arg, "audio"))
2397 av_sync_type = AV_SYNC_AUDIO_MASTER;
2398 else if (!strcmp(arg, "video"))
2399 av_sync_type = AV_SYNC_VIDEO_MASTER;
2400 else if (!strcmp(arg, "ext"))
2401 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2402 else
2403 show_help();
2404 }
2405
2406 void opt_seek(const char *arg)
2407 {
2408 start_time = parse_date(arg, 1);
2409 }
2410
2411 static void opt_debug(const char *arg)
2412 {
2413 av_log_level = 99;
2414 debug = atoi(arg);
2415 }
2416
2417 static void opt_vismv(const char *arg)
2418 {
2419 debug_mv = atoi(arg);
2420 }
2421
2422 static void opt_thread_count(const char *arg)
2423 {
2424 thread_count= atoi(arg);
2425 #if !defined(HAVE_THREADS)
2426 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2427 #endif
2428 }
2429
2430 const OptionDef options[] = {
2431 { "h", 0, {(void*)show_help}, "show help" },
2432 { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2433 { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2434 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2435 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2436 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2437 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2438 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "", "" },
2439 { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2440 { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2441 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2442 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2443 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2444 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2445 { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2446 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2447 { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2448 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2449 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2450 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2451 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2452 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2453 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2454 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2455 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_resilience}, "set error detection threshold (0-4)", "threshold" },
2456 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2457 #ifdef CONFIG_NETWORK
2458 { "rtp_tcp", OPT_EXPERT, {(void*)&opt_rtp_tcp}, "force RTP/TCP protocol usage", "" },
2459 #endif
2460 { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2461 { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2462 { NULL, },
2463 };
2464
2465 void show_help(void)
2466 {
2467 printf("ffplay version " FFMPEG_VERSION ", Copyright (c) 2003-2006 Fabrice Bellard, et al.\n"
2468 "usage: ffplay [options] input_file\n"
2469 "Simple media player\n");
2470 printf("\n");
2471 show_help_options(options, "Main options:\n",
2472 OPT_EXPERT, 0);
2473 show_help_options(options, "\nAdvanced options:\n",
2474 OPT_EXPERT, OPT_EXPERT);
2475 printf("\nWhile playing:\n"
2476 "q, ESC quit\n"
2477 "f toggle full screen\n"
2478 "p, SPC pause\n"
2479 "a cycle audio channel\n"
2480 "v cycle video channel\n"
2481 "t cycle subtitle channel\n"
2482 "w show audio waves\n"
2483 "left/right seek backward/forward 10 seconds\n"
2484 "down/up seek backward/forward 1 minute\n"
2485 "mouse click seek to percentage in file corresponding to fraction of width\n"
2486 );
2487 exit(1);
2488 }
2489
2490 void parse_arg_file(const char *filename)
2491 {
2492 if (!strcmp(filename, "-"))
2493 filename = "pipe:";
2494 input_filename = filename;
2495 }
2496
2497 /* Called from the main */
2498 int main(int argc, char **argv)
2499 {
2500 int flags;
2501
2502 /* register all codecs, demux and protocols */
2503 av_register_all();
2504
2505 #ifdef CONFIG_OS2
2506 MorphToPM(); // Morph the VIO application to a PM one to be able to use Win* functions
2507
2508 // Make stdout and stderr unbuffered
2509 setbuf( stdout, NULL );
2510 setbuf( stderr, NULL );
2511 #endif
2512
2513 parse_options(argc, argv, options);
2514
2515 if (!input_filename)
2516 show_help();
2517
2518 if (display_disable) {
2519 video_disable = 1;
2520 }
2521 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2522 #if !defined(__MINGW32__) && !defined(CONFIG_DARWIN)
2523 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on win32 or darwin */
2524 #endif
2525 if (SDL_Init (flags)) {
2526 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2527 exit(1);
2528 }
2529
2530 if (!display_disable) {
2531 #ifdef HAVE_SDL_VIDEO_SIZE
2532 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2533 fs_screen_width = vi->current_w;
2534 fs_screen_height = vi->current_h;
2535 #endif
2536 }
2537
2538 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2539 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2540 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2541 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2542
2543 av_init_packet(&flush_pkt);
2544 flush_pkt.data= "FLUSH";
2545
2546 cur_stream = stream_open(input_filename, file_iformat);
2547
2548 event_loop();
2549
2550 /* never returns */
2551
2552 return 0;
2553 }