Remove the inclusion of rtsp.h and rtp.h from avformat.h, and
[libav.git] / ffplay.c
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include <math.h>
23 #include <limits.h>
24 #include "avformat.h"
25 #include "rtsp.h"
26 #include "swscale.h"
27 #include "avstring.h"
28
29 #include "version.h"
30 #include "cmdutils.h"
31
32 #include <SDL.h>
33 #include <SDL_thread.h>
34
35 #ifdef __MINGW32__
36 #undef main /* We don't want SDL to override our main() */
37 #endif
38
39 #undef exit
40
41 static const char program_name[] = "FFplay";
42 static const int program_birth_year = 2003;
43
44 //#define DEBUG_SYNC
45
46 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
47 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
48 #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
49
50 /* SDL audio buffer size, in samples. Should be small to have precise
51 A/V sync as SDL does not have hardware buffer fullness info. */
52 #define SDL_AUDIO_BUFFER_SIZE 1024
53
54 /* no AV sync correction is done if below the AV sync threshold */
55 #define AV_SYNC_THRESHOLD 0.01
56 /* no AV correction is done if too big error */
57 #define AV_NOSYNC_THRESHOLD 10.0
58
59 /* maximum audio speed change to get correct sync */
60 #define SAMPLE_CORRECTION_PERCENT_MAX 10
61
62 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
63 #define AUDIO_DIFF_AVG_NB 20
64
65 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
66 #define SAMPLE_ARRAY_SIZE (2*65536)
67
68 static int sws_flags = SWS_BICUBIC;
69
70 typedef struct PacketQueue {
71 AVPacketList *first_pkt, *last_pkt;
72 int nb_packets;
73 int size;
74 int abort_request;
75 SDL_mutex *mutex;
76 SDL_cond *cond;
77 } PacketQueue;
78
79 #define VIDEO_PICTURE_QUEUE_SIZE 1
80 #define SUBPICTURE_QUEUE_SIZE 4
81
82 typedef struct VideoPicture {
83 double pts; ///<presentation time stamp for this picture
84 SDL_Overlay *bmp;
85 int width, height; /* source height & width */
86 int allocated;
87 } VideoPicture;
88
89 typedef struct SubPicture {
90 double pts; /* presentation time stamp for this picture */
91 AVSubtitle sub;
92 } SubPicture;
93
94 enum {
95 AV_SYNC_AUDIO_MASTER, /* default choice */
96 AV_SYNC_VIDEO_MASTER,
97 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
98 };
99
100 typedef struct VideoState {
101 SDL_Thread *parse_tid;
102 SDL_Thread *video_tid;
103 AVInputFormat *iformat;
104 int no_background;
105 int abort_request;
106 int paused;
107 int last_paused;
108 int seek_req;
109 int seek_flags;
110 int64_t seek_pos;
111 AVFormatContext *ic;
112 int dtg_active_format;
113
114 int audio_stream;
115
116 int av_sync_type;
117 double external_clock; /* external clock base */
118 int64_t external_clock_time;
119
120 double audio_clock;
121 double audio_diff_cum; /* used for AV difference average computation */
122 double audio_diff_avg_coef;
123 double audio_diff_threshold;
124 int audio_diff_avg_count;
125 AVStream *audio_st;
126 PacketQueue audioq;
127 int audio_hw_buf_size;
128 /* samples output by the codec. we reserve more space for avsync
129 compensation */
130 DECLARE_ALIGNED(16,uint8_t,audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
131 unsigned int audio_buf_size; /* in bytes */
132 int audio_buf_index; /* in bytes */
133 AVPacket audio_pkt;
134 uint8_t *audio_pkt_data;
135 int audio_pkt_size;
136
137 int show_audio; /* if true, display audio samples */
138 int16_t sample_array[SAMPLE_ARRAY_SIZE];
139 int sample_array_index;
140 int last_i_start;
141
142 SDL_Thread *subtitle_tid;
143 int subtitle_stream;
144 int subtitle_stream_changed;
145 AVStream *subtitle_st;
146 PacketQueue subtitleq;
147 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
148 int subpq_size, subpq_rindex, subpq_windex;
149 SDL_mutex *subpq_mutex;
150 SDL_cond *subpq_cond;
151
152 double frame_timer;
153 double frame_last_pts;
154 double frame_last_delay;
155 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
156 int video_stream;
157 AVStream *video_st;
158 PacketQueue videoq;
159 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
160 int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
161 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
162 int pictq_size, pictq_rindex, pictq_windex;
163 SDL_mutex *pictq_mutex;
164 SDL_cond *pictq_cond;
165
166 // QETimer *video_timer;
167 char filename[1024];
168 int width, height, xleft, ytop;
169 } VideoState;
170
171 void show_help(void);
172 static int audio_write_get_buf_size(VideoState *is);
173
174 /* options specified by the user */
175 static AVInputFormat *file_iformat;
176 static const char *input_filename;
177 static int fs_screen_width;
178 static int fs_screen_height;
179 static int screen_width = 0;
180 static int screen_height = 0;
181 static int frame_width = 0;
182 static int frame_height = 0;
183 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
184 static int audio_disable;
185 static int video_disable;
186 static int wanted_audio_stream= 0;
187 static int wanted_video_stream= 0;
188 static int seek_by_bytes;
189 static int display_disable;
190 static int show_status;
191 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
192 static int64_t start_time = AV_NOPTS_VALUE;
193 static int debug = 0;
194 static int debug_mv = 0;
195 static int step = 0;
196 static int thread_count = 1;
197 static int workaround_bugs = 1;
198 static int fast = 0;
199 static int genpts = 0;
200 static int lowres = 0;
201 static int idct = FF_IDCT_AUTO;
202 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
203 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
204 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
205 static int error_resilience = FF_ER_CAREFUL;
206 static int error_concealment = 3;
207 static int decoder_reorder_pts= 0;
208
209 /* current context */
210 static int is_full_screen;
211 static VideoState *cur_stream;
212 static int64_t audio_callback_time;
213
214 AVPacket flush_pkt;
215
216 #define FF_ALLOC_EVENT (SDL_USEREVENT)
217 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
218 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
219
220 SDL_Surface *screen;
221
222 /* packet queue handling */
223 static void packet_queue_init(PacketQueue *q)
224 {
225 memset(q, 0, sizeof(PacketQueue));
226 q->mutex = SDL_CreateMutex();
227 q->cond = SDL_CreateCond();
228 }
229
230 static void packet_queue_flush(PacketQueue *q)
231 {
232 AVPacketList *pkt, *pkt1;
233
234 SDL_LockMutex(q->mutex);
235 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
236 pkt1 = pkt->next;
237 av_free_packet(&pkt->pkt);
238 av_freep(&pkt);
239 }
240 q->last_pkt = NULL;
241 q->first_pkt = NULL;
242 q->nb_packets = 0;
243 q->size = 0;
244 SDL_UnlockMutex(q->mutex);
245 }
246
247 static void packet_queue_end(PacketQueue *q)
248 {
249 packet_queue_flush(q);
250 SDL_DestroyMutex(q->mutex);
251 SDL_DestroyCond(q->cond);
252 }
253
254 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
255 {
256 AVPacketList *pkt1;
257
258 /* duplicate the packet */
259 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
260 return -1;
261
262 pkt1 = av_malloc(sizeof(AVPacketList));
263 if (!pkt1)
264 return -1;
265 pkt1->pkt = *pkt;
266 pkt1->next = NULL;
267
268
269 SDL_LockMutex(q->mutex);
270
271 if (!q->last_pkt)
272
273 q->first_pkt = pkt1;
274 else
275 q->last_pkt->next = pkt1;
276 q->last_pkt = pkt1;
277 q->nb_packets++;
278 q->size += pkt1->pkt.size;
279 /* XXX: should duplicate packet data in DV case */
280 SDL_CondSignal(q->cond);
281
282 SDL_UnlockMutex(q->mutex);
283 return 0;
284 }
285
286 static void packet_queue_abort(PacketQueue *q)
287 {
288 SDL_LockMutex(q->mutex);
289
290 q->abort_request = 1;
291
292 SDL_CondSignal(q->cond);
293
294 SDL_UnlockMutex(q->mutex);
295 }
296
297 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
298 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
299 {
300 AVPacketList *pkt1;
301 int ret;
302
303 SDL_LockMutex(q->mutex);
304
305 for(;;) {
306 if (q->abort_request) {
307 ret = -1;
308 break;
309 }
310
311 pkt1 = q->first_pkt;
312 if (pkt1) {
313 q->first_pkt = pkt1->next;
314 if (!q->first_pkt)
315 q->last_pkt = NULL;
316 q->nb_packets--;
317 q->size -= pkt1->pkt.size;
318 *pkt = pkt1->pkt;
319 av_free(pkt1);
320 ret = 1;
321 break;
322 } else if (!block) {
323 ret = 0;
324 break;
325 } else {
326 SDL_CondWait(q->cond, q->mutex);
327 }
328 }
329 SDL_UnlockMutex(q->mutex);
330 return ret;
331 }
332
333 static inline void fill_rectangle(SDL_Surface *screen,
334 int x, int y, int w, int h, int color)
335 {
336 SDL_Rect rect;
337 rect.x = x;
338 rect.y = y;
339 rect.w = w;
340 rect.h = h;
341 SDL_FillRect(screen, &rect, color);
342 }
343
344 #if 0
345 /* draw only the border of a rectangle */
346 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
347 {
348 int w1, w2, h1, h2;
349
350 /* fill the background */
351 w1 = x;
352 if (w1 < 0)
353 w1 = 0;
354 w2 = s->width - (x + w);
355 if (w2 < 0)
356 w2 = 0;
357 h1 = y;
358 if (h1 < 0)
359 h1 = 0;
360 h2 = s->height - (y + h);
361 if (h2 < 0)
362 h2 = 0;
363 fill_rectangle(screen,
364 s->xleft, s->ytop,
365 w1, s->height,
366 color);
367 fill_rectangle(screen,
368 s->xleft + s->width - w2, s->ytop,
369 w2, s->height,
370 color);
371 fill_rectangle(screen,
372 s->xleft + w1, s->ytop,
373 s->width - w1 - w2, h1,
374 color);
375 fill_rectangle(screen,
376 s->xleft + w1, s->ytop + s->height - h2,
377 s->width - w1 - w2, h2,
378 color);
379 }
380 #endif
381
382
383
384 #define SCALEBITS 10
385 #define ONE_HALF (1 << (SCALEBITS - 1))
386 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
387
388 #define RGB_TO_Y_CCIR(r, g, b) \
389 ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
390 FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
391
392 #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
393 (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
394 FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
395
396 #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
397 (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
398 FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
399
400 #define ALPHA_BLEND(a, oldp, newp, s)\
401 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
402
403 #define RGBA_IN(r, g, b, a, s)\
404 {\
405 unsigned int v = ((const uint32_t *)(s))[0];\
406 a = (v >> 24) & 0xff;\
407 r = (v >> 16) & 0xff;\
408 g = (v >> 8) & 0xff;\
409 b = v & 0xff;\
410 }
411
412 #define YUVA_IN(y, u, v, a, s, pal)\
413 {\
414 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
415 a = (val >> 24) & 0xff;\
416 y = (val >> 16) & 0xff;\
417 u = (val >> 8) & 0xff;\
418 v = val & 0xff;\
419 }
420
421 #define YUVA_OUT(d, y, u, v, a)\
422 {\
423 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
424 }
425
426
427 #define BPP 1
428
429 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
430 {
431 int wrap, wrap3, width2, skip2;
432 int y, u, v, a, u1, v1, a1, w, h;
433 uint8_t *lum, *cb, *cr;
434 const uint8_t *p;
435 const uint32_t *pal;
436 int dstx, dsty, dstw, dsth;
437
438 dstx = FFMIN(FFMAX(rect->x, 0), imgw);
439 dstw = FFMIN(FFMAX(rect->w, 0), imgw - dstx);
440 dsty = FFMIN(FFMAX(rect->y, 0), imgh);
441 dsth = FFMIN(FFMAX(rect->h, 0), imgh - dsty);
442 lum = dst->data[0] + dsty * dst->linesize[0];
443 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
444 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
445
446 width2 = (dstw + 1) >> 1;
447 skip2 = dstx >> 1;
448 wrap = dst->linesize[0];
449 wrap3 = rect->linesize;
450 p = rect->bitmap;
451 pal = rect->rgba_palette; /* Now in YCrCb! */
452
453 if (dsty & 1) {
454 lum += dstx;
455 cb += skip2;
456 cr += skip2;
457
458 if (dstx & 1) {
459 YUVA_IN(y, u, v, a, p, pal);
460 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
461 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
462 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
463 cb++;
464 cr++;
465 lum++;
466 p += BPP;
467 }
468 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
469 YUVA_IN(y, u, v, a, p, pal);
470 u1 = u;
471 v1 = v;
472 a1 = a;
473 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
474
475 YUVA_IN(y, u, v, a, p + BPP, pal);
476 u1 += u;
477 v1 += v;
478 a1 += a;
479 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
480 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
481 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
482 cb++;
483 cr++;
484 p += 2 * BPP;
485 lum += 2;
486 }
487 if (w) {
488 YUVA_IN(y, u, v, a, p, pal);
489 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
490 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
491 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
492 }
493 p += wrap3 + (wrap3 - dstw * BPP);
494 lum += wrap + (wrap - dstw - dstx);
495 cb += dst->linesize[1] - width2 - skip2;
496 cr += dst->linesize[2] - width2 - skip2;
497 }
498 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
499 lum += dstx;
500 cb += skip2;
501 cr += skip2;
502
503 if (dstx & 1) {
504 YUVA_IN(y, u, v, a, p, pal);
505 u1 = u;
506 v1 = v;
507 a1 = a;
508 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
509 p += wrap3;
510 lum += wrap;
511 YUVA_IN(y, u, v, a, p, pal);
512 u1 += u;
513 v1 += v;
514 a1 += a;
515 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
516 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
517 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
518 cb++;
519 cr++;
520 p += -wrap3 + BPP;
521 lum += -wrap + 1;
522 }
523 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
524 YUVA_IN(y, u, v, a, p, pal);
525 u1 = u;
526 v1 = v;
527 a1 = a;
528 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
529
530 YUVA_IN(y, u, v, a, p, pal);
531 u1 += u;
532 v1 += v;
533 a1 += a;
534 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
535 p += wrap3;
536 lum += wrap;
537
538 YUVA_IN(y, u, v, a, p, pal);
539 u1 += u;
540 v1 += v;
541 a1 += a;
542 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
543
544 YUVA_IN(y, u, v, a, p, pal);
545 u1 += u;
546 v1 += v;
547 a1 += a;
548 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
549
550 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
551 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
552
553 cb++;
554 cr++;
555 p += -wrap3 + 2 * BPP;
556 lum += -wrap + 2;
557 }
558 if (w) {
559 YUVA_IN(y, u, v, a, p, pal);
560 u1 = u;
561 v1 = v;
562 a1 = a;
563 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
564 p += wrap3;
565 lum += wrap;
566 YUVA_IN(y, u, v, a, p, pal);
567 u1 += u;
568 v1 += v;
569 a1 += a;
570 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
571 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
572 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
573 cb++;
574 cr++;
575 p += -wrap3 + BPP;
576 lum += -wrap + 1;
577 }
578 p += wrap3 + (wrap3 - dstw * BPP);
579 lum += wrap + (wrap - dstw - dstx);
580 cb += dst->linesize[1] - width2 - skip2;
581 cr += dst->linesize[2] - width2 - skip2;
582 }
583 /* handle odd height */
584 if (h) {
585 lum += dstx;
586 cb += skip2;
587 cr += skip2;
588
589 if (dstx & 1) {
590 YUVA_IN(y, u, v, a, p, pal);
591 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
592 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
593 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
594 cb++;
595 cr++;
596 lum++;
597 p += BPP;
598 }
599 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
600 YUVA_IN(y, u, v, a, p, pal);
601 u1 = u;
602 v1 = v;
603 a1 = a;
604 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
605
606 YUVA_IN(y, u, v, a, p + BPP, pal);
607 u1 += u;
608 v1 += v;
609 a1 += a;
610 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
611 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
612 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
613 cb++;
614 cr++;
615 p += 2 * BPP;
616 lum += 2;
617 }
618 if (w) {
619 YUVA_IN(y, u, v, a, p, pal);
620 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
621 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
622 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
623 }
624 }
625 }
626
627 static void free_subpicture(SubPicture *sp)
628 {
629 int i;
630
631 for (i = 0; i < sp->sub.num_rects; i++)
632 {
633 av_free(sp->sub.rects[i].bitmap);
634 av_free(sp->sub.rects[i].rgba_palette);
635 }
636
637 av_free(sp->sub.rects);
638
639 memset(&sp->sub, 0, sizeof(AVSubtitle));
640 }
641
642 static void video_image_display(VideoState *is)
643 {
644 VideoPicture *vp;
645 SubPicture *sp;
646 AVPicture pict;
647 float aspect_ratio;
648 int width, height, x, y;
649 SDL_Rect rect;
650 int i;
651
652 vp = &is->pictq[is->pictq_rindex];
653 if (vp->bmp) {
654 /* XXX: use variable in the frame */
655 if (is->video_st->codec->sample_aspect_ratio.num == 0)
656 aspect_ratio = 0;
657 else
658 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio)
659 * is->video_st->codec->width / is->video_st->codec->height;;
660 if (aspect_ratio <= 0.0)
661 aspect_ratio = (float)is->video_st->codec->width /
662 (float)is->video_st->codec->height;
663 /* if an active format is indicated, then it overrides the
664 mpeg format */
665 #if 0
666 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
667 is->dtg_active_format = is->video_st->codec->dtg_active_format;
668 printf("dtg_active_format=%d\n", is->dtg_active_format);
669 }
670 #endif
671 #if 0
672 switch(is->video_st->codec->dtg_active_format) {
673 case FF_DTG_AFD_SAME:
674 default:
675 /* nothing to do */
676 break;
677 case FF_DTG_AFD_4_3:
678 aspect_ratio = 4.0 / 3.0;
679 break;
680 case FF_DTG_AFD_16_9:
681 aspect_ratio = 16.0 / 9.0;
682 break;
683 case FF_DTG_AFD_14_9:
684 aspect_ratio = 14.0 / 9.0;
685 break;
686 case FF_DTG_AFD_4_3_SP_14_9:
687 aspect_ratio = 14.0 / 9.0;
688 break;
689 case FF_DTG_AFD_16_9_SP_14_9:
690 aspect_ratio = 14.0 / 9.0;
691 break;
692 case FF_DTG_AFD_SP_4_3:
693 aspect_ratio = 4.0 / 3.0;
694 break;
695 }
696 #endif
697
698 if (is->subtitle_st)
699 {
700 if (is->subpq_size > 0)
701 {
702 sp = &is->subpq[is->subpq_rindex];
703
704 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
705 {
706 SDL_LockYUVOverlay (vp->bmp);
707
708 pict.data[0] = vp->bmp->pixels[0];
709 pict.data[1] = vp->bmp->pixels[2];
710 pict.data[2] = vp->bmp->pixels[1];
711
712 pict.linesize[0] = vp->bmp->pitches[0];
713 pict.linesize[1] = vp->bmp->pitches[2];
714 pict.linesize[2] = vp->bmp->pitches[1];
715
716 for (i = 0; i < sp->sub.num_rects; i++)
717 blend_subrect(&pict, &sp->sub.rects[i],
718 vp->bmp->w, vp->bmp->h);
719
720 SDL_UnlockYUVOverlay (vp->bmp);
721 }
722 }
723 }
724
725
726 /* XXX: we suppose the screen has a 1.0 pixel ratio */
727 height = is->height;
728 width = ((int)rint(height * aspect_ratio)) & -3;
729 if (width > is->width) {
730 width = is->width;
731 height = ((int)rint(width / aspect_ratio)) & -3;
732 }
733 x = (is->width - width) / 2;
734 y = (is->height - height) / 2;
735 if (!is->no_background) {
736 /* fill the background */
737 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
738 } else {
739 is->no_background = 0;
740 }
741 rect.x = is->xleft + x;
742 rect.y = is->ytop + y;
743 rect.w = width;
744 rect.h = height;
745 SDL_DisplayYUVOverlay(vp->bmp, &rect);
746 } else {
747 #if 0
748 fill_rectangle(screen,
749 is->xleft, is->ytop, is->width, is->height,
750 QERGB(0x00, 0x00, 0x00));
751 #endif
752 }
753 }
754
755 static inline int compute_mod(int a, int b)
756 {
757 a = a % b;
758 if (a >= 0)
759 return a;
760 else
761 return a + b;
762 }
763
764 static void video_audio_display(VideoState *s)
765 {
766 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
767 int ch, channels, h, h2, bgcolor, fgcolor;
768 int16_t time_diff;
769
770 /* compute display index : center on currently output samples */
771 channels = s->audio_st->codec->channels;
772 nb_display_channels = channels;
773 if (!s->paused) {
774 n = 2 * channels;
775 delay = audio_write_get_buf_size(s);
776 delay /= n;
777
778 /* to be more precise, we take into account the time spent since
779 the last buffer computation */
780 if (audio_callback_time) {
781 time_diff = av_gettime() - audio_callback_time;
782 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
783 }
784
785 delay -= s->width / 2;
786 if (delay < s->width)
787 delay = s->width;
788
789 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
790
791 h= INT_MIN;
792 for(i=0; i<1000; i+=channels){
793 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
794 int a= s->sample_array[idx];
795 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
796 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
797 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
798 int score= a-d;
799 if(h<score && (b^c)<0){
800 h= score;
801 i_start= idx;
802 }
803 }
804
805 s->last_i_start = i_start;
806 } else {
807 i_start = s->last_i_start;
808 }
809
810 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
811 fill_rectangle(screen,
812 s->xleft, s->ytop, s->width, s->height,
813 bgcolor);
814
815 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
816
817 /* total height for one channel */
818 h = s->height / nb_display_channels;
819 /* graph height / 2 */
820 h2 = (h * 9) / 20;
821 for(ch = 0;ch < nb_display_channels; ch++) {
822 i = i_start + ch;
823 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
824 for(x = 0; x < s->width; x++) {
825 y = (s->sample_array[i] * h2) >> 15;
826 if (y < 0) {
827 y = -y;
828 ys = y1 - y;
829 } else {
830 ys = y1;
831 }
832 fill_rectangle(screen,
833 s->xleft + x, ys, 1, y,
834 fgcolor);
835 i += channels;
836 if (i >= SAMPLE_ARRAY_SIZE)
837 i -= SAMPLE_ARRAY_SIZE;
838 }
839 }
840
841 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
842
843 for(ch = 1;ch < nb_display_channels; ch++) {
844 y = s->ytop + ch * h;
845 fill_rectangle(screen,
846 s->xleft, y, s->width, 1,
847 fgcolor);
848 }
849 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
850 }
851
852 static int video_open(VideoState *is){
853 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
854 int w,h;
855
856 if(is_full_screen) flags |= SDL_FULLSCREEN;
857 else flags |= SDL_RESIZABLE;
858
859 if (is_full_screen && fs_screen_width) {
860 w = fs_screen_width;
861 h = fs_screen_height;
862 } else if(!is_full_screen && screen_width){
863 w = screen_width;
864 h = screen_height;
865 }else if (is->video_st && is->video_st->codec->width){
866 w = is->video_st->codec->width;
867 h = is->video_st->codec->height;
868 } else {
869 w = 640;
870 h = 480;
871 }
872 #ifndef __APPLE__
873 screen = SDL_SetVideoMode(w, h, 0, flags);
874 #else
875 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
876 screen = SDL_SetVideoMode(w, h, 24, flags);
877 #endif
878 if (!screen) {
879 fprintf(stderr, "SDL: could not set video mode - exiting\n");
880 return -1;
881 }
882 SDL_WM_SetCaption("FFplay", "FFplay");
883
884 is->width = screen->w;
885 is->height = screen->h;
886
887 return 0;
888 }
889
890 /* display the current picture, if any */
891 static void video_display(VideoState *is)
892 {
893 if(!screen)
894 video_open(cur_stream);
895 if (is->audio_st && is->show_audio)
896 video_audio_display(is);
897 else if (is->video_st)
898 video_image_display(is);
899 }
900
901 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
902 {
903 SDL_Event event;
904 event.type = FF_REFRESH_EVENT;
905 event.user.data1 = opaque;
906 SDL_PushEvent(&event);
907 return 0; /* 0 means stop timer */
908 }
909
910 /* schedule a video refresh in 'delay' ms */
911 static void schedule_refresh(VideoState *is, int delay)
912 {
913 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
914 }
915
916 /* get the current audio clock value */
917 static double get_audio_clock(VideoState *is)
918 {
919 double pts;
920 int hw_buf_size, bytes_per_sec;
921 pts = is->audio_clock;
922 hw_buf_size = audio_write_get_buf_size(is);
923 bytes_per_sec = 0;
924 if (is->audio_st) {
925 bytes_per_sec = is->audio_st->codec->sample_rate *
926 2 * is->audio_st->codec->channels;
927 }
928 if (bytes_per_sec)
929 pts -= (double)hw_buf_size / bytes_per_sec;
930 return pts;
931 }
932
933 /* get the current video clock value */
934 static double get_video_clock(VideoState *is)
935 {
936 double delta;
937 if (is->paused) {
938 delta = 0;
939 } else {
940 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
941 }
942 return is->video_current_pts + delta;
943 }
944
945 /* get the current external clock value */
946 static double get_external_clock(VideoState *is)
947 {
948 int64_t ti;
949 ti = av_gettime();
950 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
951 }
952
953 /* get the current master clock value */
954 static double get_master_clock(VideoState *is)
955 {
956 double val;
957
958 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
959 if (is->video_st)
960 val = get_video_clock(is);
961 else
962 val = get_audio_clock(is);
963 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
964 if (is->audio_st)
965 val = get_audio_clock(is);
966 else
967 val = get_video_clock(is);
968 } else {
969 val = get_external_clock(is);
970 }
971 return val;
972 }
973
974 /* seek in the stream */
975 static void stream_seek(VideoState *is, int64_t pos, int rel)
976 {
977 if (!is->seek_req) {
978 is->seek_pos = pos;
979 is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
980 if (seek_by_bytes)
981 is->seek_flags |= AVSEEK_FLAG_BYTE;
982 is->seek_req = 1;
983 }
984 }
985
986 /* pause or resume the video */
987 static void stream_pause(VideoState *is)
988 {
989 is->paused = !is->paused;
990 if (!is->paused) {
991 is->video_current_pts = get_video_clock(is);
992 is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
993 }
994 }
995
996 /* called to display each frame */
997 static void video_refresh_timer(void *opaque)
998 {
999 VideoState *is = opaque;
1000 VideoPicture *vp;
1001 double actual_delay, delay, sync_threshold, ref_clock, diff;
1002
1003 SubPicture *sp, *sp2;
1004
1005 if (is->video_st) {
1006 if (is->pictq_size == 0) {
1007 /* if no picture, need to wait */
1008 schedule_refresh(is, 1);
1009 } else {
1010 /* dequeue the picture */
1011 vp = &is->pictq[is->pictq_rindex];
1012
1013 /* update current video pts */
1014 is->video_current_pts = vp->pts;
1015 is->video_current_pts_time = av_gettime();
1016
1017 /* compute nominal delay */
1018 delay = vp->pts - is->frame_last_pts;
1019 if (delay <= 0 || delay >= 2.0) {
1020 /* if incorrect delay, use previous one */
1021 delay = is->frame_last_delay;
1022 }
1023 is->frame_last_delay = delay;
1024 is->frame_last_pts = vp->pts;
1025
1026 /* update delay to follow master synchronisation source */
1027 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1028 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1029 /* if video is slave, we try to correct big delays by
1030 duplicating or deleting a frame */
1031 ref_clock = get_master_clock(is);
1032 diff = vp->pts - ref_clock;
1033
1034 /* skip or repeat frame. We take into account the
1035 delay to compute the threshold. I still don't know
1036 if it is the best guess */
1037 sync_threshold = AV_SYNC_THRESHOLD;
1038 if (delay > sync_threshold)
1039 sync_threshold = delay;
1040 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1041 if (diff <= -sync_threshold)
1042 delay = 0;
1043 else if (diff >= sync_threshold)
1044 delay = 2 * delay;
1045 }
1046 }
1047
1048 is->frame_timer += delay;
1049 /* compute the REAL delay (we need to do that to avoid
1050 long term errors */
1051 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1052 if (actual_delay < 0.010) {
1053 /* XXX: should skip picture */
1054 actual_delay = 0.010;
1055 }
1056 /* launch timer for next picture */
1057 schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
1058
1059 #if defined(DEBUG_SYNC)
1060 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1061 delay, actual_delay, vp->pts, -diff);
1062 #endif
1063
1064 if(is->subtitle_st) {
1065 if (is->subtitle_stream_changed) {
1066 SDL_LockMutex(is->subpq_mutex);
1067
1068 while (is->subpq_size) {
1069 free_subpicture(&is->subpq[is->subpq_rindex]);
1070
1071 /* update queue size and signal for next picture */
1072 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1073 is->subpq_rindex = 0;
1074
1075 is->subpq_size--;
1076 }
1077 is->subtitle_stream_changed = 0;
1078
1079 SDL_CondSignal(is->subpq_cond);
1080 SDL_UnlockMutex(is->subpq_mutex);
1081 } else {
1082 if (is->subpq_size > 0) {
1083 sp = &is->subpq[is->subpq_rindex];
1084
1085 if (is->subpq_size > 1)
1086 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1087 else
1088 sp2 = NULL;
1089
1090 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1091 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1092 {
1093 free_subpicture(sp);
1094
1095 /* update queue size and signal for next picture */
1096 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1097 is->subpq_rindex = 0;
1098
1099 SDL_LockMutex(is->subpq_mutex);
1100 is->subpq_size--;
1101 SDL_CondSignal(is->subpq_cond);
1102 SDL_UnlockMutex(is->subpq_mutex);
1103 }
1104 }
1105 }
1106 }
1107
1108 /* display picture */
1109 video_display(is);
1110
1111 /* update queue size and signal for next picture */
1112 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1113 is->pictq_rindex = 0;
1114
1115 SDL_LockMutex(is->pictq_mutex);
1116 is->pictq_size--;
1117 SDL_CondSignal(is->pictq_cond);
1118 SDL_UnlockMutex(is->pictq_mutex);
1119 }
1120 } else if (is->audio_st) {
1121 /* draw the next audio frame */
1122
1123 schedule_refresh(is, 40);
1124
1125 /* if only audio stream, then display the audio bars (better
1126 than nothing, just to test the implementation */
1127
1128 /* display picture */
1129 video_display(is);
1130 } else {
1131 schedule_refresh(is, 100);
1132 }
1133 if (show_status) {
1134 static int64_t last_time;
1135 int64_t cur_time;
1136 int aqsize, vqsize, sqsize;
1137 double av_diff;
1138
1139 cur_time = av_gettime();
1140 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1141 aqsize = 0;
1142 vqsize = 0;
1143 sqsize = 0;
1144 if (is->audio_st)
1145 aqsize = is->audioq.size;
1146 if (is->video_st)
1147 vqsize = is->videoq.size;
1148 if (is->subtitle_st)
1149 sqsize = is->subtitleq.size;
1150 av_diff = 0;
1151 if (is->audio_st && is->video_st)
1152 av_diff = get_audio_clock(is) - get_video_clock(is);
1153 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
1154 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1155 fflush(stdout);
1156 last_time = cur_time;
1157 }
1158 }
1159 }
1160
1161 /* allocate a picture (needs to do that in main thread to avoid
1162 potential locking problems */
1163 static void alloc_picture(void *opaque)
1164 {
1165 VideoState *is = opaque;
1166 VideoPicture *vp;
1167
1168 vp = &is->pictq[is->pictq_windex];
1169
1170 if (vp->bmp)
1171 SDL_FreeYUVOverlay(vp->bmp);
1172
1173 #if 0
1174 /* XXX: use generic function */
1175 /* XXX: disable overlay if no hardware acceleration or if RGB format */
1176 switch(is->video_st->codec->pix_fmt) {
1177 case PIX_FMT_YUV420P:
1178 case PIX_FMT_YUV422P:
1179 case PIX_FMT_YUV444P:
1180 case PIX_FMT_YUYV422:
1181 case PIX_FMT_YUV410P:
1182 case PIX_FMT_YUV411P:
1183 is_yuv = 1;
1184 break;
1185 default:
1186 is_yuv = 0;
1187 break;
1188 }
1189 #endif
1190 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1191 is->video_st->codec->height,
1192 SDL_YV12_OVERLAY,
1193 screen);
1194 vp->width = is->video_st->codec->width;
1195 vp->height = is->video_st->codec->height;
1196
1197 SDL_LockMutex(is->pictq_mutex);
1198 vp->allocated = 1;
1199 SDL_CondSignal(is->pictq_cond);
1200 SDL_UnlockMutex(is->pictq_mutex);
1201 }
1202
1203 /**
1204 *
1205 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1206 */
1207 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1208 {
1209 VideoPicture *vp;
1210 int dst_pix_fmt;
1211 AVPicture pict;
1212 static struct SwsContext *img_convert_ctx;
1213
1214 /* wait until we have space to put a new picture */
1215 SDL_LockMutex(is->pictq_mutex);
1216 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1217 !is->videoq.abort_request) {
1218 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1219 }
1220 SDL_UnlockMutex(is->pictq_mutex);
1221
1222 if (is->videoq.abort_request)
1223 return -1;
1224
1225 vp = &is->pictq[is->pictq_windex];
1226
1227 /* alloc or resize hardware picture buffer */
1228 if (!vp->bmp ||
1229 vp->width != is->video_st->codec->width ||
1230 vp->height != is->video_st->codec->height) {
1231 SDL_Event event;
1232
1233 vp->allocated = 0;
1234
1235 /* the allocation must be done in the main thread to avoid
1236 locking problems */
1237 event.type = FF_ALLOC_EVENT;
1238 event.user.data1 = is;
1239 SDL_PushEvent(&event);
1240
1241 /* wait until the picture is allocated */
1242 SDL_LockMutex(is->pictq_mutex);
1243 while (!vp->allocated && !is->videoq.abort_request) {
1244 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1245 }
1246 SDL_UnlockMutex(is->pictq_mutex);
1247
1248 if (is->videoq.abort_request)
1249 return -1;
1250 }
1251
1252 /* if the frame is not skipped, then display it */
1253 if (vp->bmp) {
1254 /* get a pointer on the bitmap */
1255 SDL_LockYUVOverlay (vp->bmp);
1256
1257 dst_pix_fmt = PIX_FMT_YUV420P;
1258 pict.data[0] = vp->bmp->pixels[0];
1259 pict.data[1] = vp->bmp->pixels[2];
1260 pict.data[2] = vp->bmp->pixels[1];
1261
1262 pict.linesize[0] = vp->bmp->pitches[0];
1263 pict.linesize[1] = vp->bmp->pitches[2];
1264 pict.linesize[2] = vp->bmp->pitches[1];
1265 img_convert_ctx = sws_getCachedContext(img_convert_ctx,
1266 is->video_st->codec->width, is->video_st->codec->height,
1267 is->video_st->codec->pix_fmt,
1268 is->video_st->codec->width, is->video_st->codec->height,
1269 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1270 if (img_convert_ctx == NULL) {
1271 fprintf(stderr, "Cannot initialize the conversion context\n");
1272 exit(1);
1273 }
1274 sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1275 0, is->video_st->codec->height, pict.data, pict.linesize);
1276 /* update the bitmap content */
1277 SDL_UnlockYUVOverlay(vp->bmp);
1278
1279 vp->pts = pts;
1280
1281 /* now we can update the picture count */
1282 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1283 is->pictq_windex = 0;
1284 SDL_LockMutex(is->pictq_mutex);
1285 is->pictq_size++;
1286 SDL_UnlockMutex(is->pictq_mutex);
1287 }
1288 return 0;
1289 }
1290
1291 /**
1292 * compute the exact PTS for the picture if it is omitted in the stream
1293 * @param pts1 the dts of the pkt / pts of the frame
1294 */
1295 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1296 {
1297 double frame_delay, pts;
1298
1299 pts = pts1;
1300
1301 if (pts != 0) {
1302 /* update video clock with pts, if present */
1303 is->video_clock = pts;
1304 } else {
1305 pts = is->video_clock;
1306 }
1307 /* update video clock for next frame */
1308 frame_delay = av_q2d(is->video_st->codec->time_base);
1309 /* for MPEG2, the frame can be repeated, so we update the
1310 clock accordingly */
1311 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1312 is->video_clock += frame_delay;
1313
1314 #if defined(DEBUG_SYNC) && 0
1315 {
1316 int ftype;
1317 if (src_frame->pict_type == FF_B_TYPE)
1318 ftype = 'B';
1319 else if (src_frame->pict_type == FF_I_TYPE)
1320 ftype = 'I';
1321 else
1322 ftype = 'P';
1323 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1324 ftype, pts, pts1);
1325 }
1326 #endif
1327 return queue_picture(is, src_frame, pts);
1328 }
1329
1330 static uint64_t global_video_pkt_pts= AV_NOPTS_VALUE;
1331
1332 static int my_get_buffer(struct AVCodecContext *c, AVFrame *pic){
1333 int ret= avcodec_default_get_buffer(c, pic);
1334 uint64_t *pts= av_malloc(sizeof(uint64_t));
1335 *pts= global_video_pkt_pts;
1336 pic->opaque= pts;
1337 return ret;
1338 }
1339
1340 static void my_release_buffer(struct AVCodecContext *c, AVFrame *pic){
1341 if(pic) av_freep(&pic->opaque);
1342 avcodec_default_release_buffer(c, pic);
1343 }
1344
1345 static int video_thread(void *arg)
1346 {
1347 VideoState *is = arg;
1348 AVPacket pkt1, *pkt = &pkt1;
1349 int len1, got_picture;
1350 AVFrame *frame= avcodec_alloc_frame();
1351 double pts;
1352
1353 for(;;) {
1354 while (is->paused && !is->videoq.abort_request) {
1355 SDL_Delay(10);
1356 }
1357 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1358 break;
1359
1360 if(pkt->data == flush_pkt.data){
1361 avcodec_flush_buffers(is->video_st->codec);
1362 continue;
1363 }
1364
1365 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1366 this packet, if any */
1367 global_video_pkt_pts= pkt->pts;
1368 len1 = avcodec_decode_video(is->video_st->codec,
1369 frame, &got_picture,
1370 pkt->data, pkt->size);
1371
1372 if( (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1373 && frame->opaque && *(uint64_t*)frame->opaque != AV_NOPTS_VALUE)
1374 pts= *(uint64_t*)frame->opaque;
1375 else if(pkt->dts != AV_NOPTS_VALUE)
1376 pts= pkt->dts;
1377 else
1378 pts= 0;
1379 pts *= av_q2d(is->video_st->time_base);
1380
1381 // if (len1 < 0)
1382 // break;
1383 if (got_picture) {
1384 if (output_picture2(is, frame, pts) < 0)
1385 goto the_end;
1386 }
1387 av_free_packet(pkt);
1388 if (step)
1389 if (cur_stream)
1390 stream_pause(cur_stream);
1391 }
1392 the_end:
1393 av_free(frame);
1394 return 0;
1395 }
1396
1397 static int subtitle_thread(void *arg)
1398 {
1399 VideoState *is = arg;
1400 SubPicture *sp;
1401 AVPacket pkt1, *pkt = &pkt1;
1402 int len1, got_subtitle;
1403 double pts;
1404 int i, j;
1405 int r, g, b, y, u, v, a;
1406
1407 for(;;) {
1408 while (is->paused && !is->subtitleq.abort_request) {
1409 SDL_Delay(10);
1410 }
1411 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1412 break;
1413
1414 if(pkt->data == flush_pkt.data){
1415 avcodec_flush_buffers(is->subtitle_st->codec);
1416 continue;
1417 }
1418 SDL_LockMutex(is->subpq_mutex);
1419 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1420 !is->subtitleq.abort_request) {
1421 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1422 }
1423 SDL_UnlockMutex(is->subpq_mutex);
1424
1425 if (is->subtitleq.abort_request)
1426 goto the_end;
1427
1428 sp = &is->subpq[is->subpq_windex];
1429
1430 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1431 this packet, if any */
1432 pts = 0;
1433 if (pkt->pts != AV_NOPTS_VALUE)
1434 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1435
1436 len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1437 &sp->sub, &got_subtitle,
1438 pkt->data, pkt->size);
1439 // if (len1 < 0)
1440 // break;
1441 if (got_subtitle && sp->sub.format == 0) {
1442 sp->pts = pts;
1443
1444 for (i = 0; i < sp->sub.num_rects; i++)
1445 {
1446 for (j = 0; j < sp->sub.rects[i].nb_colors; j++)
1447 {
1448 RGBA_IN(r, g, b, a, sp->sub.rects[i].rgba_palette + j);
1449 y = RGB_TO_Y_CCIR(r, g, b);
1450 u = RGB_TO_U_CCIR(r, g, b, 0);
1451 v = RGB_TO_V_CCIR(r, g, b, 0);
1452 YUVA_OUT(sp->sub.rects[i].rgba_palette + j, y, u, v, a);
1453 }
1454 }
1455
1456 /* now we can update the picture count */
1457 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1458 is->subpq_windex = 0;
1459 SDL_LockMutex(is->subpq_mutex);
1460 is->subpq_size++;
1461 SDL_UnlockMutex(is->subpq_mutex);
1462 }
1463 av_free_packet(pkt);
1464 // if (step)
1465 // if (cur_stream)
1466 // stream_pause(cur_stream);
1467 }
1468 the_end:
1469 return 0;
1470 }
1471
1472 /* copy samples for viewing in editor window */
1473 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1474 {
1475 int size, len, channels;
1476
1477 channels = is->audio_st->codec->channels;
1478
1479 size = samples_size / sizeof(short);
1480 while (size > 0) {
1481 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1482 if (len > size)
1483 len = size;
1484 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1485 samples += len;
1486 is->sample_array_index += len;
1487 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1488 is->sample_array_index = 0;
1489 size -= len;
1490 }
1491 }
1492
1493 /* return the new audio buffer size (samples can be added or deleted
1494 to get better sync if video or external master clock) */
1495 static int synchronize_audio(VideoState *is, short *samples,
1496 int samples_size1, double pts)
1497 {
1498 int n, samples_size;
1499 double ref_clock;
1500
1501 n = 2 * is->audio_st->codec->channels;
1502 samples_size = samples_size1;
1503
1504 /* if not master, then we try to remove or add samples to correct the clock */
1505 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1506 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1507 double diff, avg_diff;
1508 int wanted_size, min_size, max_size, nb_samples;
1509
1510 ref_clock = get_master_clock(is);
1511 diff = get_audio_clock(is) - ref_clock;
1512
1513 if (diff < AV_NOSYNC_THRESHOLD) {
1514 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1515 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1516 /* not enough measures to have a correct estimate */
1517 is->audio_diff_avg_count++;
1518 } else {
1519 /* estimate the A-V difference */
1520 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1521
1522 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1523 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1524 nb_samples = samples_size / n;
1525
1526 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1527 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1528 if (wanted_size < min_size)
1529 wanted_size = min_size;
1530 else if (wanted_size > max_size)
1531 wanted_size = max_size;
1532
1533 /* add or remove samples to correction the synchro */
1534 if (wanted_size < samples_size) {
1535 /* remove samples */
1536 samples_size = wanted_size;
1537 } else if (wanted_size > samples_size) {
1538 uint8_t *samples_end, *q;
1539 int nb;
1540
1541 /* add samples */
1542 nb = (samples_size - wanted_size);
1543 samples_end = (uint8_t *)samples + samples_size - n;
1544 q = samples_end + n;
1545 while (nb > 0) {
1546 memcpy(q, samples_end, n);
1547 q += n;
1548 nb -= n;
1549 }
1550 samples_size = wanted_size;
1551 }
1552 }
1553 #if 0
1554 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1555 diff, avg_diff, samples_size - samples_size1,
1556 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1557 #endif
1558 }
1559 } else {
1560 /* too big difference : may be initial PTS errors, so
1561 reset A-V filter */
1562 is->audio_diff_avg_count = 0;
1563 is->audio_diff_cum = 0;
1564 }
1565 }
1566
1567 return samples_size;
1568 }
1569
1570 /* decode one audio frame and returns its uncompressed size */
1571 static int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int buf_size, double *pts_ptr)
1572 {
1573 AVPacket *pkt = &is->audio_pkt;
1574 int n, len1, data_size;
1575 double pts;
1576
1577 for(;;) {
1578 /* NOTE: the audio packet can contain several frames */
1579 while (is->audio_pkt_size > 0) {
1580 data_size = buf_size;
1581 len1 = avcodec_decode_audio2(is->audio_st->codec,
1582 (int16_t *)audio_buf, &data_size,
1583 is->audio_pkt_data, is->audio_pkt_size);
1584 if (len1 < 0) {
1585 /* if error, we skip the frame */
1586 is->audio_pkt_size = 0;
1587 break;
1588 }
1589
1590 is->audio_pkt_data += len1;
1591 is->audio_pkt_size -= len1;
1592 if (data_size <= 0)
1593 continue;
1594 /* if no pts, then compute it */
1595 pts = is->audio_clock;
1596 *pts_ptr = pts;
1597 n = 2 * is->audio_st->codec->channels;
1598 is->audio_clock += (double)data_size /
1599 (double)(n * is->audio_st->codec->sample_rate);
1600 #if defined(DEBUG_SYNC)
1601 {
1602 static double last_clock;
1603 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1604 is->audio_clock - last_clock,
1605 is->audio_clock, pts);
1606 last_clock = is->audio_clock;
1607 }
1608 #endif
1609 return data_size;
1610 }
1611
1612 /* free the current packet */
1613 if (pkt->data)
1614 av_free_packet(pkt);
1615
1616 if (is->paused || is->audioq.abort_request) {
1617 return -1;
1618 }
1619
1620 /* read next packet */
1621 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1622 return -1;
1623 if(pkt->data == flush_pkt.data){
1624 avcodec_flush_buffers(is->audio_st->codec);
1625 continue;
1626 }
1627
1628 is->audio_pkt_data = pkt->data;
1629 is->audio_pkt_size = pkt->size;
1630
1631 /* if update the audio clock with the pts */
1632 if (pkt->pts != AV_NOPTS_VALUE) {
1633 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1634 }
1635 }
1636 }
1637
1638 /* get the current audio output buffer size, in samples. With SDL, we
1639 cannot have a precise information */
1640 static int audio_write_get_buf_size(VideoState *is)
1641 {
1642 return is->audio_buf_size - is->audio_buf_index;
1643 }
1644
1645
1646 /* prepare a new audio buffer */
1647 void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1648 {
1649 VideoState *is = opaque;
1650 int audio_size, len1;
1651 double pts;
1652
1653 audio_callback_time = av_gettime();
1654
1655 while (len > 0) {
1656 if (is->audio_buf_index >= is->audio_buf_size) {
1657 audio_size = audio_decode_frame(is, is->audio_buf, sizeof(is->audio_buf), &pts);
1658 if (audio_size < 0) {
1659 /* if error, just output silence */
1660 is->audio_buf_size = 1024;
1661 memset(is->audio_buf, 0, is->audio_buf_size);
1662 } else {
1663 if (is->show_audio)
1664 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1665 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1666 pts);
1667 is->audio_buf_size = audio_size;
1668 }
1669 is->audio_buf_index = 0;
1670 }
1671 len1 = is->audio_buf_size - is->audio_buf_index;
1672 if (len1 > len)
1673 len1 = len;
1674 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1675 len -= len1;
1676 stream += len1;
1677 is->audio_buf_index += len1;
1678 }
1679 }
1680
1681 /* open a given stream. Return 0 if OK */
1682 static int stream_component_open(VideoState *is, int stream_index)
1683 {
1684 AVFormatContext *ic = is->ic;
1685 AVCodecContext *enc;
1686 AVCodec *codec;
1687 SDL_AudioSpec wanted_spec, spec;
1688
1689 if (stream_index < 0 || stream_index >= ic->nb_streams)
1690 return -1;
1691 enc = ic->streams[stream_index]->codec;
1692
1693 /* prepare audio output */
1694 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1695 wanted_spec.freq = enc->sample_rate;
1696 wanted_spec.format = AUDIO_S16SYS;
1697 /* hack for AC3. XXX: suppress that */
1698 if (enc->channels > 2)
1699 enc->channels = 2;
1700 wanted_spec.channels = enc->channels;
1701 wanted_spec.silence = 0;
1702 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1703 wanted_spec.callback = sdl_audio_callback;
1704 wanted_spec.userdata = is;
1705 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1706 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1707 return -1;
1708 }
1709 is->audio_hw_buf_size = spec.size;
1710 }
1711
1712 codec = avcodec_find_decoder(enc->codec_id);
1713 enc->debug_mv = debug_mv;
1714 enc->debug = debug;
1715 enc->workaround_bugs = workaround_bugs;
1716 enc->lowres = lowres;
1717 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1718 enc->idct_algo= idct;
1719 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1720 enc->skip_frame= skip_frame;
1721 enc->skip_idct= skip_idct;
1722 enc->skip_loop_filter= skip_loop_filter;
1723 enc->error_resilience= error_resilience;
1724 enc->error_concealment= error_concealment;
1725 if (!codec ||
1726 avcodec_open(enc, codec) < 0)
1727 return -1;
1728 if(thread_count>1)
1729 avcodec_thread_init(enc, thread_count);
1730 enc->thread_count= thread_count;
1731 switch(enc->codec_type) {
1732 case CODEC_TYPE_AUDIO:
1733 is->audio_stream = stream_index;
1734 is->audio_st = ic->streams[stream_index];
1735 is->audio_buf_size = 0;
1736 is->audio_buf_index = 0;
1737
1738 /* init averaging filter */
1739 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1740 is->audio_diff_avg_count = 0;
1741 /* since we do not have a precise anough audio fifo fullness,
1742 we correct audio sync only if larger than this threshold */
1743 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1744
1745 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1746 packet_queue_init(&is->audioq);
1747 SDL_PauseAudio(0);
1748 break;
1749 case CODEC_TYPE_VIDEO:
1750 is->video_stream = stream_index;
1751 is->video_st = ic->streams[stream_index];
1752
1753 is->frame_last_delay = 40e-3;
1754 is->frame_timer = (double)av_gettime() / 1000000.0;
1755 is->video_current_pts_time = av_gettime();
1756
1757 packet_queue_init(&is->videoq);
1758 is->video_tid = SDL_CreateThread(video_thread, is);
1759
1760 enc-> get_buffer= my_get_buffer;
1761 enc->release_buffer= my_release_buffer;
1762 break;
1763 case CODEC_TYPE_SUBTITLE:
1764 is->subtitle_stream = stream_index;
1765 is->subtitle_st = ic->streams[stream_index];
1766 packet_queue_init(&is->subtitleq);
1767
1768 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1769 break;
1770 default:
1771 break;
1772 }
1773 return 0;
1774 }
1775
1776 static void stream_component_close(VideoState *is, int stream_index)
1777 {
1778 AVFormatContext *ic = is->ic;
1779 AVCodecContext *enc;
1780
1781 if (stream_index < 0 || stream_index >= ic->nb_streams)
1782 return;
1783 enc = ic->streams[stream_index]->codec;
1784
1785 switch(enc->codec_type) {
1786 case CODEC_TYPE_AUDIO:
1787 packet_queue_abort(&is->audioq);
1788
1789 SDL_CloseAudio();
1790
1791 packet_queue_end(&is->audioq);
1792 break;
1793 case CODEC_TYPE_VIDEO:
1794 packet_queue_abort(&is->videoq);
1795
1796 /* note: we also signal this mutex to make sure we deblock the
1797 video thread in all cases */
1798 SDL_LockMutex(is->pictq_mutex);
1799 SDL_CondSignal(is->pictq_cond);
1800 SDL_UnlockMutex(is->pictq_mutex);
1801
1802 SDL_WaitThread(is->video_tid, NULL);
1803
1804 packet_queue_end(&is->videoq);
1805 break;
1806 case CODEC_TYPE_SUBTITLE:
1807 packet_queue_abort(&is->subtitleq);
1808
1809 /* note: we also signal this mutex to make sure we deblock the
1810 video thread in all cases */
1811 SDL_LockMutex(is->subpq_mutex);
1812 is->subtitle_stream_changed = 1;
1813
1814 SDL_CondSignal(is->subpq_cond);
1815 SDL_UnlockMutex(is->subpq_mutex);
1816
1817 SDL_WaitThread(is->subtitle_tid, NULL);
1818
1819 packet_queue_end(&is->subtitleq);
1820 break;
1821 default:
1822 break;
1823 }
1824
1825 avcodec_close(enc);
1826 switch(enc->codec_type) {
1827 case CODEC_TYPE_AUDIO:
1828 is->audio_st = NULL;
1829 is->audio_stream = -1;
1830 break;
1831 case CODEC_TYPE_VIDEO:
1832 is->video_st = NULL;
1833 is->video_stream = -1;
1834 break;
1835 case CODEC_TYPE_SUBTITLE:
1836 is->subtitle_st = NULL;
1837 is->subtitle_stream = -1;
1838 break;
1839 default:
1840 break;
1841 }
1842 }
1843
1844 static void dump_stream_info(const AVFormatContext *s)
1845 {
1846 if (s->track != 0)
1847 fprintf(stderr, "Track: %d\n", s->track);
1848 if (s->title[0] != '\0')
1849 fprintf(stderr, "Title: %s\n", s->title);
1850 if (s->author[0] != '\0')
1851 fprintf(stderr, "Author: %s\n", s->author);
1852 if (s->copyright[0] != '\0')
1853 fprintf(stderr, "Copyright: %s\n", s->copyright);
1854 if (s->comment[0] != '\0')
1855 fprintf(stderr, "Comment: %s\n", s->comment);
1856 if (s->album[0] != '\0')
1857 fprintf(stderr, "Album: %s\n", s->album);
1858 if (s->year != 0)
1859 fprintf(stderr, "Year: %d\n", s->year);
1860 if (s->genre[0] != '\0')
1861 fprintf(stderr, "Genre: %s\n", s->genre);
1862 }
1863
1864 /* since we have only one decoding thread, we can use a global
1865 variable instead of a thread local variable */
1866 static VideoState *global_video_state;
1867
1868 static int decode_interrupt_cb(void)
1869 {
1870 return (global_video_state && global_video_state->abort_request);
1871 }
1872
1873 /* this thread gets the stream from the disk or the network */
1874 static int decode_thread(void *arg)
1875 {
1876 VideoState *is = arg;
1877 AVFormatContext *ic;
1878 int err, i, ret, video_index, audio_index;
1879 AVPacket pkt1, *pkt = &pkt1;
1880 AVFormatParameters params, *ap = &params;
1881
1882 video_index = -1;
1883 audio_index = -1;
1884 is->video_stream = -1;
1885 is->audio_stream = -1;
1886 is->subtitle_stream = -1;
1887
1888 global_video_state = is;
1889 url_set_interrupt_cb(decode_interrupt_cb);
1890
1891 memset(ap, 0, sizeof(*ap));
1892
1893 ap->width = frame_width;
1894 ap->height= frame_height;
1895 ap->time_base= (AVRational){1, 25};
1896 ap->pix_fmt = frame_pix_fmt;
1897
1898 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1899 if (err < 0) {
1900 print_error(is->filename, err);
1901 ret = -1;
1902 goto fail;
1903 }
1904 is->ic = ic;
1905
1906 if(genpts)
1907 ic->flags |= AVFMT_FLAG_GENPTS;
1908
1909 err = av_find_stream_info(ic);
1910 if (err < 0) {
1911 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1912 ret = -1;
1913 goto fail;
1914 }
1915 ic->pb.eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1916
1917 /* if seeking requested, we execute it */
1918 if (start_time != AV_NOPTS_VALUE) {
1919 int64_t timestamp;
1920
1921 timestamp = start_time;
1922 /* add the stream start time */
1923 if (ic->start_time != AV_NOPTS_VALUE)
1924 timestamp += ic->start_time;
1925 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1926 if (ret < 0) {
1927 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1928 is->filename, (double)timestamp / AV_TIME_BASE);
1929 }
1930 }
1931
1932 for(i = 0; i < ic->nb_streams; i++) {
1933 AVCodecContext *enc = ic->streams[i]->codec;
1934 switch(enc->codec_type) {
1935 case CODEC_TYPE_AUDIO:
1936 if ((audio_index < 0 || wanted_audio_stream-- > 0) && !audio_disable)
1937 audio_index = i;
1938 break;
1939 case CODEC_TYPE_VIDEO:
1940 if ((video_index < 0 || wanted_video_stream-- > 0) && !video_disable)
1941 video_index = i;
1942 break;
1943 default:
1944 break;
1945 }
1946 }
1947 if (show_status) {
1948 dump_format(ic, 0, is->filename, 0);
1949 dump_stream_info(ic);
1950 }
1951
1952 /* open the streams */
1953 if (audio_index >= 0) {
1954 stream_component_open(is, audio_index);
1955 }
1956
1957 if (video_index >= 0) {
1958 stream_component_open(is, video_index);
1959 } else {
1960 if (!display_disable)
1961 is->show_audio = 1;
1962 }
1963
1964 if (is->video_stream < 0 && is->audio_stream < 0) {
1965 fprintf(stderr, "%s: could not open codecs\n", is->filename);
1966 ret = -1;
1967 goto fail;
1968 }
1969
1970 for(;;) {
1971 if (is->abort_request)
1972 break;
1973 if (is->paused != is->last_paused) {
1974 is->last_paused = is->paused;
1975 if (is->paused)
1976 av_read_pause(ic);
1977 else
1978 av_read_play(ic);
1979 }
1980 #ifdef CONFIG_RTSP_DEMUXER
1981 if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
1982 /* wait 10 ms to avoid trying to get another packet */
1983 /* XXX: horrible */
1984 SDL_Delay(10);
1985 continue;
1986 }
1987 #endif
1988 if (is->seek_req) {
1989 int stream_index= -1;
1990 int64_t seek_target= is->seek_pos;
1991
1992 if (is-> video_stream >= 0) stream_index= is-> video_stream;
1993 else if(is-> audio_stream >= 0) stream_index= is-> audio_stream;
1994 else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
1995
1996 if(stream_index>=0){
1997 seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
1998 }
1999
2000 ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
2001 if (ret < 0) {
2002 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2003 }else{
2004 if (is->audio_stream >= 0) {
2005 packet_queue_flush(&is->audioq);
2006 packet_queue_put(&is->audioq, &flush_pkt);
2007 }
2008 if (is->subtitle_stream >= 0) {
2009 packet_queue_flush(&is->subtitleq);
2010 packet_queue_put(&is->subtitleq, &flush_pkt);
2011 }
2012 if (is->video_stream >= 0) {
2013 packet_queue_flush(&is->videoq);
2014 packet_queue_put(&is->videoq, &flush_pkt);
2015 }
2016 }
2017 is->seek_req = 0;
2018 }
2019
2020 /* if the queue are full, no need to read more */
2021 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2022 is->videoq.size > MAX_VIDEOQ_SIZE ||
2023 is->subtitleq.size > MAX_SUBTITLEQ_SIZE ||
2024 url_feof(&ic->pb)) {
2025 /* wait 10 ms */
2026 SDL_Delay(10);
2027 continue;
2028 }
2029 ret = av_read_frame(ic, pkt);
2030 if (ret < 0) {
2031 if (url_ferror(&ic->pb) == 0) {
2032 SDL_Delay(100); /* wait for user event */
2033 continue;
2034 } else
2035 break;
2036 }
2037 if (pkt->stream_index == is->audio_stream) {
2038 packet_queue_put(&is->audioq, pkt);
2039 } else if (pkt->stream_index == is->video_stream) {
2040 packet_queue_put(&is->videoq, pkt);
2041 } else if (pkt->stream_index == is->subtitle_stream) {
2042 packet_queue_put(&is->subtitleq, pkt);
2043 } else {
2044 av_free_packet(pkt);
2045 }
2046 }
2047 /* wait until the end */
2048 while (!is->abort_request) {
2049 SDL_Delay(100);
2050 }
2051
2052 ret = 0;
2053 fail:
2054 /* disable interrupting */
2055 global_video_state = NULL;
2056
2057 /* close each stream */
2058 if (is->audio_stream >= 0)
2059 stream_component_close(is, is->audio_stream);
2060 if (is->video_stream >= 0)
2061 stream_component_close(is, is->video_stream);
2062 if (is->subtitle_stream >= 0)
2063 stream_component_close(is, is->subtitle_stream);
2064 if (is->ic) {
2065 av_close_input_file(is->ic);
2066 is->ic = NULL; /* safety */
2067 }
2068 url_set_interrupt_cb(NULL);
2069
2070 if (ret != 0) {
2071 SDL_Event event;
2072
2073 event.type = FF_QUIT_EVENT;
2074 event.user.data1 = is;
2075 SDL_PushEvent(&event);
2076 }
2077 return 0;
2078 }
2079
2080 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2081 {
2082 VideoState *is;
2083
2084 is = av_mallocz(sizeof(VideoState));
2085 if (!is)
2086 return NULL;
2087 av_strlcpy(is->filename, filename, sizeof(is->filename));
2088 is->iformat = iformat;
2089 is->ytop = 0;
2090 is->xleft = 0;
2091
2092 /* start video display */
2093 is->pictq_mutex = SDL_CreateMutex();
2094 is->pictq_cond = SDL_CreateCond();
2095
2096 is->subpq_mutex = SDL_CreateMutex();
2097 is->subpq_cond = SDL_CreateCond();
2098
2099 /* add the refresh timer to draw the picture */
2100 schedule_refresh(is, 40);
2101
2102 is->av_sync_type = av_sync_type;
2103 is->parse_tid = SDL_CreateThread(decode_thread, is);
2104 if (!is->parse_tid) {
2105 av_free(is);
2106 return NULL;
2107 }
2108 return is;
2109 }
2110
2111 static void stream_close(VideoState *is)
2112 {
2113 VideoPicture *vp;
2114 int i;
2115 /* XXX: use a special url_shutdown call to abort parse cleanly */
2116 is->abort_request = 1;
2117 SDL_WaitThread(is->parse_tid, NULL);
2118
2119 /* free all pictures */
2120 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2121 vp = &is->pictq[i];
2122 if (vp->bmp) {
2123 SDL_FreeYUVOverlay(vp->bmp);
2124 vp->bmp = NULL;
2125 }
2126 }
2127 SDL_DestroyMutex(is->pictq_mutex);
2128 SDL_DestroyCond(is->pictq_cond);
2129 SDL_DestroyMutex(is->subpq_mutex);
2130 SDL_DestroyCond(is->subpq_cond);
2131 }
2132
2133 static void stream_cycle_channel(VideoState *is, int codec_type)
2134 {
2135 AVFormatContext *ic = is->ic;
2136 int start_index, stream_index;
2137 AVStream *st;
2138
2139 if (codec_type == CODEC_TYPE_VIDEO)
2140 start_index = is->video_stream;
2141 else if (codec_type == CODEC_TYPE_AUDIO)
2142 start_index = is->audio_stream;
2143 else
2144 start_index = is->subtitle_stream;
2145 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2146 return;
2147 stream_index = start_index;
2148 for(;;) {
2149 if (++stream_index >= is->ic->nb_streams)
2150 {
2151 if (codec_type == CODEC_TYPE_SUBTITLE)
2152 {
2153 stream_index = -1;
2154 goto the_end;
2155 } else
2156 stream_index = 0;
2157 }
2158 if (stream_index == start_index)
2159 return;
2160 st = ic->streams[stream_index];
2161 if (st->codec->codec_type == codec_type) {
2162 /* check that parameters are OK */
2163 switch(codec_type) {
2164 case CODEC_TYPE_AUDIO:
2165 if (st->codec->sample_rate != 0 &&
2166 st->codec->channels != 0)
2167 goto the_end;
2168 break;
2169 case CODEC_TYPE_VIDEO:
2170 case CODEC_TYPE_SUBTITLE:
2171 goto the_end;
2172 default:
2173 break;
2174 }
2175 }
2176 }
2177 the_end:
2178 stream_component_close(is, start_index);
2179 stream_component_open(is, stream_index);
2180 }
2181
2182
2183 static void toggle_full_screen(void)
2184 {
2185 is_full_screen = !is_full_screen;
2186 if (!fs_screen_width) {
2187 /* use default SDL method */
2188 // SDL_WM_ToggleFullScreen(screen);
2189 }
2190 video_open(cur_stream);
2191 }
2192
2193 static void toggle_pause(void)
2194 {
2195 if (cur_stream)
2196 stream_pause(cur_stream);
2197 step = 0;
2198 }
2199
2200 static void step_to_next_frame(void)
2201 {
2202 if (cur_stream) {
2203 /* if the stream is paused unpause it, then step */
2204 if (cur_stream->paused)
2205 stream_pause(cur_stream);
2206 }
2207 step = 1;
2208 }
2209
2210 static void do_exit(void)
2211 {
2212 if (cur_stream) {
2213 stream_close(cur_stream);
2214 cur_stream = NULL;
2215 }
2216 if (show_status)
2217 printf("\n");
2218 SDL_Quit();
2219 exit(0);
2220 }
2221
2222 static void toggle_audio_display(void)
2223 {
2224 if (cur_stream) {
2225 cur_stream->show_audio = !cur_stream->show_audio;
2226 }
2227 }
2228
2229 /* handle an event sent by the GUI */
2230 static void event_loop(void)
2231 {
2232 SDL_Event event;
2233 double incr, pos, frac;
2234
2235 for(;;) {
2236 SDL_WaitEvent(&event);
2237 switch(event.type) {
2238 case SDL_KEYDOWN:
2239 switch(event.key.keysym.sym) {
2240 case SDLK_ESCAPE:
2241 case SDLK_q:
2242 do_exit();
2243 break;
2244 case SDLK_f:
2245 toggle_full_screen();
2246 break;
2247 case SDLK_p:
2248 case SDLK_SPACE:
2249 toggle_pause();
2250 break;
2251 case SDLK_s: //S: Step to next frame
2252 step_to_next_frame();
2253 break;
2254 case SDLK_a:
2255 if (cur_stream)
2256 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2257 break;
2258 case SDLK_v:
2259 if (cur_stream)
2260 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2261 break;
2262 case SDLK_t:
2263 if (cur_stream)
2264 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2265 break;
2266 case SDLK_w:
2267 toggle_audio_display();
2268 break;
2269 case SDLK_LEFT:
2270 incr = -10.0;
2271 goto do_seek;
2272 case SDLK_RIGHT:
2273 incr = 10.0;
2274 goto do_seek;
2275 case SDLK_UP:
2276 incr = 60.0;
2277 goto do_seek;
2278 case SDLK_DOWN:
2279 incr = -60.0;
2280 do_seek:
2281 if (cur_stream) {
2282 if (seek_by_bytes) {
2283 pos = url_ftell(&cur_stream->ic->pb);
2284 if (cur_stream->ic->bit_rate)
2285 incr *= cur_stream->ic->bit_rate / 60.0;
2286 else
2287 incr *= 180000.0;
2288 pos += incr;
2289 stream_seek(cur_stream, pos, incr);
2290 } else {
2291 pos = get_master_clock(cur_stream);
2292 pos += incr;
2293 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2294 }
2295 }
2296 break;
2297 default:
2298 break;
2299 }
2300 break;
2301 case SDL_MOUSEBUTTONDOWN:
2302 if (cur_stream) {
2303 int ns, hh, mm, ss;
2304 int tns, thh, tmm, tss;
2305 tns = cur_stream->ic->duration/1000000LL;
2306 thh = tns/3600;
2307 tmm = (tns%3600)/60;
2308 tss = (tns%60);
2309 frac = (double)event.button.x/(double)cur_stream->width;
2310 ns = frac*tns;
2311 hh = ns/3600;
2312 mm = (ns%3600)/60;
2313 ss = (ns%60);
2314 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2315 hh, mm, ss, thh, tmm, tss);
2316 stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2317 }
2318 break;
2319 case SDL_VIDEORESIZE:
2320 if (cur_stream) {
2321 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2322 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2323 screen_width = cur_stream->width = event.resize.w;
2324 screen_height= cur_stream->height= event.resize.h;
2325 }
2326 break;
2327 case SDL_QUIT:
2328 case FF_QUIT_EVENT:
2329 do_exit();
2330 break;
2331 case FF_ALLOC_EVENT:
2332 video_open(event.user.data1);
2333 alloc_picture(event.user.data1);
2334 break;
2335 case FF_REFRESH_EVENT:
2336 video_refresh_timer(event.user.data1);
2337 break;
2338 default:
2339 break;
2340 }
2341 }
2342 }
2343
2344 static void opt_frame_size(const char *arg)
2345 {
2346 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2347 fprintf(stderr, "Incorrect frame size\n");
2348 exit(1);
2349 }
2350 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2351 fprintf(stderr, "Frame size must be a multiple of 2\n");
2352 exit(1);
2353 }
2354 }
2355
2356 static void opt_width(const char *arg)
2357 {
2358 screen_width = atoi(arg);
2359 if(screen_width<=0){
2360 fprintf(stderr, "invalid width\n");
2361 exit(1);
2362 }
2363 }
2364
2365 static void opt_height(const char *arg)
2366 {
2367 screen_height = atoi(arg);
2368 if(screen_height<=0){
2369 fprintf(stderr, "invalid height\n");
2370 exit(1);
2371 }
2372 }
2373
2374 static void opt_format(const char *arg)
2375 {
2376 file_iformat = av_find_input_format(arg);
2377 if (!file_iformat) {
2378 fprintf(stderr, "Unknown input format: %s\n", arg);
2379 exit(1);
2380 }
2381 }
2382
2383 static void opt_frame_pix_fmt(const char *arg)
2384 {
2385 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2386 }
2387
2388 #ifdef CONFIG_RTSP_DEMUXER
2389 static void opt_rtp_tcp(void)
2390 {
2391 /* only tcp protocol */
2392 rtsp_default_protocols = (1 << RTSP_PROTOCOL_RTP_TCP);
2393 }
2394 #endif
2395
2396 static void opt_sync(const char *arg)
2397 {
2398 if (!strcmp(arg, "audio"))
2399 av_sync_type = AV_SYNC_AUDIO_MASTER;
2400 else if (!strcmp(arg, "video"))
2401 av_sync_type = AV_SYNC_VIDEO_MASTER;
2402 else if (!strcmp(arg, "ext"))
2403 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2404 else {
2405 show_help();
2406 exit(1);
2407 }
2408 }
2409
2410 static void opt_seek(const char *arg)
2411 {
2412 start_time = parse_date(arg, 1);
2413 if (start_time == INT64_MIN) {
2414 fprintf(stderr, "Invalid duration specification: %s\n", arg);
2415 exit(1);
2416 }
2417 }
2418
2419 static void opt_debug(const char *arg)
2420 {
2421 av_log_level = 99;
2422 debug = atoi(arg);
2423 }
2424
2425 static void opt_vismv(const char *arg)
2426 {
2427 debug_mv = atoi(arg);
2428 }
2429
2430 static void opt_thread_count(const char *arg)
2431 {
2432 thread_count= atoi(arg);
2433 #if !defined(HAVE_THREADS)
2434 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2435 #endif
2436 }
2437
2438 static void opt_show_help(void)
2439 {
2440 show_help();
2441 exit(0);
2442 }
2443
2444 const OptionDef options[] = {
2445 { "h", 0, {(void*)opt_show_help}, "show help" },
2446 { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2447 { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2448 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2449 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2450 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2451 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2452 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "", "" },
2453 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "", "" },
2454 { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2455 { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2456 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2457 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2458 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2459 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2460 { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2461 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2462 { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2463 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2464 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2465 { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2466 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2467 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2468 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2469 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2470 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2471 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_resilience}, "set error detection threshold (0-4)", "threshold" },
2472 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2473 #ifdef CONFIG_RTSP_DEMUXER
2474 { "rtp_tcp", OPT_EXPERT, {(void*)&opt_rtp_tcp}, "force RTP/TCP protocol usage", "" },
2475 #endif
2476 { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2477 { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2478 { NULL, },
2479 };
2480
2481 void show_help(void)
2482 {
2483 printf("usage: ffplay [options] input_file\n"
2484 "Simple media player\n");
2485 printf("\n");
2486 show_help_options(options, "Main options:\n",
2487 OPT_EXPERT, 0);
2488 show_help_options(options, "\nAdvanced options:\n",
2489 OPT_EXPERT, OPT_EXPERT);
2490 printf("\nWhile playing:\n"
2491 "q, ESC quit\n"
2492 "f toggle full screen\n"
2493 "p, SPC pause\n"
2494 "a cycle audio channel\n"
2495 "v cycle video channel\n"
2496 "t cycle subtitle channel\n"
2497 "w show audio waves\n"
2498 "left/right seek backward/forward 10 seconds\n"
2499 "down/up seek backward/forward 1 minute\n"
2500 "mouse click seek to percentage in file corresponding to fraction of width\n"
2501 );
2502 }
2503
2504 void opt_input_file(const char *filename)
2505 {
2506 if (!strcmp(filename, "-"))
2507 filename = "pipe:";
2508 input_filename = filename;
2509 }
2510
2511 /* Called from the main */
2512 int main(int argc, char **argv)
2513 {
2514 int flags;
2515
2516 /* register all codecs, demux and protocols */
2517 av_register_all();
2518
2519 show_banner(program_name, program_birth_year);
2520
2521 parse_options(argc, argv, options, opt_input_file);
2522
2523 if (!input_filename) {
2524 show_help();
2525 exit(1);
2526 }
2527
2528 if (display_disable) {
2529 video_disable = 1;
2530 }
2531 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2532 #if !defined(__MINGW32__) && !defined(__APPLE__)
2533 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2534 #endif
2535 if (SDL_Init (flags)) {
2536 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2537 exit(1);
2538 }
2539
2540 if (!display_disable) {
2541 #ifdef HAVE_SDL_VIDEO_SIZE
2542 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2543 fs_screen_width = vi->current_w;
2544 fs_screen_height = vi->current_h;
2545 #endif
2546 }
2547
2548 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2549 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2550 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2551 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2552
2553 av_init_packet(&flush_pkt);
2554 flush_pkt.data= "FLUSH";
2555
2556 cur_stream = stream_open(input_filename, file_iformat);
2557
2558 event_loop();
2559
2560 /* never returns */
2561
2562 return 0;
2563 }