Fix get_str/get_str8() to also work if the target string is not long enough to
[libav.git] / ffplay.c
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include <math.h>
23 #include <limits.h>
24 #include "avformat.h"
25 #include "swscale.h"
26 #include "avstring.h"
27
28 #include "version.h"
29 #include "cmdutils.h"
30
31 #include <SDL.h>
32 #include <SDL_thread.h>
33
34 #ifdef __MINGW32__
35 #undef main /* We don't want SDL to override our main() */
36 #endif
37
38 #undef exit
39
40 //#define DEBUG_SYNC
41
42 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
43 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
44 #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
45
46 /* SDL audio buffer size, in samples. Should be small to have precise
47 A/V sync as SDL does not have hardware buffer fullness info. */
48 #define SDL_AUDIO_BUFFER_SIZE 1024
49
50 /* no AV sync correction is done if below the AV sync threshold */
51 #define AV_SYNC_THRESHOLD 0.01
52 /* no AV correction is done if too big error */
53 #define AV_NOSYNC_THRESHOLD 10.0
54
55 /* maximum audio speed change to get correct sync */
56 #define SAMPLE_CORRECTION_PERCENT_MAX 10
57
58 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
59 #define AUDIO_DIFF_AVG_NB 20
60
61 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
62 #define SAMPLE_ARRAY_SIZE (2*65536)
63
64 static int sws_flags = SWS_BICUBIC;
65
66 typedef struct PacketQueue {
67 AVPacketList *first_pkt, *last_pkt;
68 int nb_packets;
69 int size;
70 int abort_request;
71 SDL_mutex *mutex;
72 SDL_cond *cond;
73 } PacketQueue;
74
75 #define VIDEO_PICTURE_QUEUE_SIZE 1
76 #define SUBPICTURE_QUEUE_SIZE 4
77
78 typedef struct VideoPicture {
79 double pts; ///<presentation time stamp for this picture
80 SDL_Overlay *bmp;
81 int width, height; /* source height & width */
82 int allocated;
83 } VideoPicture;
84
85 typedef struct SubPicture {
86 double pts; /* presentation time stamp for this picture */
87 AVSubtitle sub;
88 } SubPicture;
89
90 enum {
91 AV_SYNC_AUDIO_MASTER, /* default choice */
92 AV_SYNC_VIDEO_MASTER,
93 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
94 };
95
96 typedef struct VideoState {
97 SDL_Thread *parse_tid;
98 SDL_Thread *video_tid;
99 AVInputFormat *iformat;
100 int no_background;
101 int abort_request;
102 int paused;
103 int last_paused;
104 int seek_req;
105 int seek_flags;
106 int64_t seek_pos;
107 AVFormatContext *ic;
108 int dtg_active_format;
109
110 int audio_stream;
111
112 int av_sync_type;
113 double external_clock; /* external clock base */
114 int64_t external_clock_time;
115
116 double audio_clock;
117 double audio_diff_cum; /* used for AV difference average computation */
118 double audio_diff_avg_coef;
119 double audio_diff_threshold;
120 int audio_diff_avg_count;
121 AVStream *audio_st;
122 PacketQueue audioq;
123 int audio_hw_buf_size;
124 /* samples output by the codec. we reserve more space for avsync
125 compensation */
126 DECLARE_ALIGNED(16,uint8_t,audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
127 unsigned int audio_buf_size; /* in bytes */
128 int audio_buf_index; /* in bytes */
129 AVPacket audio_pkt;
130 uint8_t *audio_pkt_data;
131 int audio_pkt_size;
132
133 int show_audio; /* if true, display audio samples */
134 int16_t sample_array[SAMPLE_ARRAY_SIZE];
135 int sample_array_index;
136 int last_i_start;
137
138 SDL_Thread *subtitle_tid;
139 int subtitle_stream;
140 int subtitle_stream_changed;
141 AVStream *subtitle_st;
142 PacketQueue subtitleq;
143 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
144 int subpq_size, subpq_rindex, subpq_windex;
145 SDL_mutex *subpq_mutex;
146 SDL_cond *subpq_cond;
147
148 double frame_timer;
149 double frame_last_pts;
150 double frame_last_delay;
151 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
152 int video_stream;
153 AVStream *video_st;
154 PacketQueue videoq;
155 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
156 int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
157 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
158 int pictq_size, pictq_rindex, pictq_windex;
159 SDL_mutex *pictq_mutex;
160 SDL_cond *pictq_cond;
161
162 // QETimer *video_timer;
163 char filename[1024];
164 int width, height, xleft, ytop;
165 } VideoState;
166
167 void show_help(void);
168 static int audio_write_get_buf_size(VideoState *is);
169
170 /* options specified by the user */
171 static AVInputFormat *file_iformat;
172 static const char *input_filename;
173 static int fs_screen_width;
174 static int fs_screen_height;
175 static int screen_width = 0;
176 static int screen_height = 0;
177 static int frame_width = 0;
178 static int frame_height = 0;
179 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
180 static int audio_disable;
181 static int video_disable;
182 static int wanted_audio_stream= 0;
183 static int wanted_video_stream= 0;
184 static int seek_by_bytes;
185 static int display_disable;
186 static int show_status;
187 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
188 static int64_t start_time = AV_NOPTS_VALUE;
189 static int debug = 0;
190 static int debug_mv = 0;
191 static int step = 0;
192 static int thread_count = 1;
193 static int workaround_bugs = 1;
194 static int fast = 0;
195 static int genpts = 0;
196 static int lowres = 0;
197 static int idct = FF_IDCT_AUTO;
198 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
199 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
200 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
201 static int error_resilience = FF_ER_CAREFUL;
202 static int error_concealment = 3;
203 static int decoder_reorder_pts= 0;
204
205 /* current context */
206 static int is_full_screen;
207 static VideoState *cur_stream;
208 static int64_t audio_callback_time;
209
210 AVPacket flush_pkt;
211
212 #define FF_ALLOC_EVENT (SDL_USEREVENT)
213 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
214 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
215
216 SDL_Surface *screen;
217
218 /* packet queue handling */
219 static void packet_queue_init(PacketQueue *q)
220 {
221 memset(q, 0, sizeof(PacketQueue));
222 q->mutex = SDL_CreateMutex();
223 q->cond = SDL_CreateCond();
224 }
225
226 static void packet_queue_flush(PacketQueue *q)
227 {
228 AVPacketList *pkt, *pkt1;
229
230 SDL_LockMutex(q->mutex);
231 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
232 pkt1 = pkt->next;
233 av_free_packet(&pkt->pkt);
234 av_freep(&pkt);
235 }
236 q->last_pkt = NULL;
237 q->first_pkt = NULL;
238 q->nb_packets = 0;
239 q->size = 0;
240 SDL_UnlockMutex(q->mutex);
241 }
242
243 static void packet_queue_end(PacketQueue *q)
244 {
245 packet_queue_flush(q);
246 SDL_DestroyMutex(q->mutex);
247 SDL_DestroyCond(q->cond);
248 }
249
250 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
251 {
252 AVPacketList *pkt1;
253
254 /* duplicate the packet */
255 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
256 return -1;
257
258 pkt1 = av_malloc(sizeof(AVPacketList));
259 if (!pkt1)
260 return -1;
261 pkt1->pkt = *pkt;
262 pkt1->next = NULL;
263
264
265 SDL_LockMutex(q->mutex);
266
267 if (!q->last_pkt)
268
269 q->first_pkt = pkt1;
270 else
271 q->last_pkt->next = pkt1;
272 q->last_pkt = pkt1;
273 q->nb_packets++;
274 q->size += pkt1->pkt.size;
275 /* XXX: should duplicate packet data in DV case */
276 SDL_CondSignal(q->cond);
277
278 SDL_UnlockMutex(q->mutex);
279 return 0;
280 }
281
282 static void packet_queue_abort(PacketQueue *q)
283 {
284 SDL_LockMutex(q->mutex);
285
286 q->abort_request = 1;
287
288 SDL_CondSignal(q->cond);
289
290 SDL_UnlockMutex(q->mutex);
291 }
292
293 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
294 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
295 {
296 AVPacketList *pkt1;
297 int ret;
298
299 SDL_LockMutex(q->mutex);
300
301 for(;;) {
302 if (q->abort_request) {
303 ret = -1;
304 break;
305 }
306
307 pkt1 = q->first_pkt;
308 if (pkt1) {
309 q->first_pkt = pkt1->next;
310 if (!q->first_pkt)
311 q->last_pkt = NULL;
312 q->nb_packets--;
313 q->size -= pkt1->pkt.size;
314 *pkt = pkt1->pkt;
315 av_free(pkt1);
316 ret = 1;
317 break;
318 } else if (!block) {
319 ret = 0;
320 break;
321 } else {
322 SDL_CondWait(q->cond, q->mutex);
323 }
324 }
325 SDL_UnlockMutex(q->mutex);
326 return ret;
327 }
328
329 static inline void fill_rectangle(SDL_Surface *screen,
330 int x, int y, int w, int h, int color)
331 {
332 SDL_Rect rect;
333 rect.x = x;
334 rect.y = y;
335 rect.w = w;
336 rect.h = h;
337 SDL_FillRect(screen, &rect, color);
338 }
339
340 #if 0
341 /* draw only the border of a rectangle */
342 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
343 {
344 int w1, w2, h1, h2;
345
346 /* fill the background */
347 w1 = x;
348 if (w1 < 0)
349 w1 = 0;
350 w2 = s->width - (x + w);
351 if (w2 < 0)
352 w2 = 0;
353 h1 = y;
354 if (h1 < 0)
355 h1 = 0;
356 h2 = s->height - (y + h);
357 if (h2 < 0)
358 h2 = 0;
359 fill_rectangle(screen,
360 s->xleft, s->ytop,
361 w1, s->height,
362 color);
363 fill_rectangle(screen,
364 s->xleft + s->width - w2, s->ytop,
365 w2, s->height,
366 color);
367 fill_rectangle(screen,
368 s->xleft + w1, s->ytop,
369 s->width - w1 - w2, h1,
370 color);
371 fill_rectangle(screen,
372 s->xleft + w1, s->ytop + s->height - h2,
373 s->width - w1 - w2, h2,
374 color);
375 }
376 #endif
377
378
379
380 #define SCALEBITS 10
381 #define ONE_HALF (1 << (SCALEBITS - 1))
382 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
383
384 #define RGB_TO_Y_CCIR(r, g, b) \
385 ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
386 FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
387
388 #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
389 (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
390 FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
391
392 #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
393 (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
394 FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
395
396 #define ALPHA_BLEND(a, oldp, newp, s)\
397 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
398
399 #define RGBA_IN(r, g, b, a, s)\
400 {\
401 unsigned int v = ((const uint32_t *)(s))[0];\
402 a = (v >> 24) & 0xff;\
403 r = (v >> 16) & 0xff;\
404 g = (v >> 8) & 0xff;\
405 b = v & 0xff;\
406 }
407
408 #define YUVA_IN(y, u, v, a, s, pal)\
409 {\
410 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)s];\
411 a = (val >> 24) & 0xff;\
412 y = (val >> 16) & 0xff;\
413 u = (val >> 8) & 0xff;\
414 v = val & 0xff;\
415 }
416
417 #define YUVA_OUT(d, y, u, v, a)\
418 {\
419 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
420 }
421
422
423 #define BPP 1
424
425 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect)
426 {
427 int wrap, wrap3, width2, skip2;
428 int y, u, v, a, u1, v1, a1, w, h;
429 uint8_t *lum, *cb, *cr;
430 const uint8_t *p;
431 const uint32_t *pal;
432
433 lum = dst->data[0] + rect->y * dst->linesize[0];
434 cb = dst->data[1] + (rect->y >> 1) * dst->linesize[1];
435 cr = dst->data[2] + (rect->y >> 1) * dst->linesize[2];
436
437 width2 = (rect->w + 1) >> 1;
438 skip2 = rect->x >> 1;
439 wrap = dst->linesize[0];
440 wrap3 = rect->linesize;
441 p = rect->bitmap;
442 pal = rect->rgba_palette; /* Now in YCrCb! */
443
444 if (rect->y & 1) {
445 lum += rect->x;
446 cb += skip2;
447 cr += skip2;
448
449 if (rect->x & 1) {
450 YUVA_IN(y, u, v, a, p, pal);
451 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
452 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
453 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
454 cb++;
455 cr++;
456 lum++;
457 p += BPP;
458 }
459 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
460 YUVA_IN(y, u, v, a, p, pal);
461 u1 = u;
462 v1 = v;
463 a1 = a;
464 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
465
466 YUVA_IN(y, u, v, a, p + BPP, pal);
467 u1 += u;
468 v1 += v;
469 a1 += a;
470 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
471 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
472 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
473 cb++;
474 cr++;
475 p += 2 * BPP;
476 lum += 2;
477 }
478 if (w) {
479 YUVA_IN(y, u, v, a, p, pal);
480 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
481 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
482 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
483 }
484 p += wrap3 + (wrap3 - rect->w * BPP);
485 lum += wrap + (wrap - rect->w - rect->x);
486 cb += dst->linesize[1] - width2 - skip2;
487 cr += dst->linesize[2] - width2 - skip2;
488 }
489 for(h = rect->h - (rect->y & 1); h >= 2; h -= 2) {
490 lum += rect->x;
491 cb += skip2;
492 cr += skip2;
493
494 if (rect->x & 1) {
495 YUVA_IN(y, u, v, a, p, pal);
496 u1 = u;
497 v1 = v;
498 a1 = a;
499 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
500 p += wrap3;
501 lum += wrap;
502 YUVA_IN(y, u, v, a, p, pal);
503 u1 += u;
504 v1 += v;
505 a1 += a;
506 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
507 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
508 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
509 cb++;
510 cr++;
511 p += -wrap3 + BPP;
512 lum += -wrap + 1;
513 }
514 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
515 YUVA_IN(y, u, v, a, p, pal);
516 u1 = u;
517 v1 = v;
518 a1 = a;
519 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
520
521 YUVA_IN(y, u, v, a, p, pal);
522 u1 += u;
523 v1 += v;
524 a1 += a;
525 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
526 p += wrap3;
527 lum += wrap;
528
529 YUVA_IN(y, u, v, a, p, pal);
530 u1 += u;
531 v1 += v;
532 a1 += a;
533 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
534
535 YUVA_IN(y, u, v, a, p, pal);
536 u1 += u;
537 v1 += v;
538 a1 += a;
539 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
540
541 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
542 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
543
544 cb++;
545 cr++;
546 p += -wrap3 + 2 * BPP;
547 lum += -wrap + 2;
548 }
549 if (w) {
550 YUVA_IN(y, u, v, a, p, pal);
551 u1 = u;
552 v1 = v;
553 a1 = a;
554 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
555 p += wrap3;
556 lum += wrap;
557 YUVA_IN(y, u, v, a, p, pal);
558 u1 += u;
559 v1 += v;
560 a1 += a;
561 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
562 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
563 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
564 cb++;
565 cr++;
566 p += -wrap3 + BPP;
567 lum += -wrap + 1;
568 }
569 p += wrap3 + (wrap3 - rect->w * BPP);
570 lum += wrap + (wrap - rect->w - rect->x);
571 cb += dst->linesize[1] - width2 - skip2;
572 cr += dst->linesize[2] - width2 - skip2;
573 }
574 /* handle odd height */
575 if (h) {
576 lum += rect->x;
577 cb += skip2;
578 cr += skip2;
579
580 if (rect->x & 1) {
581 YUVA_IN(y, u, v, a, p, pal);
582 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
583 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
584 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
585 cb++;
586 cr++;
587 lum++;
588 p += BPP;
589 }
590 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
591 YUVA_IN(y, u, v, a, p, pal);
592 u1 = u;
593 v1 = v;
594 a1 = a;
595 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
596
597 YUVA_IN(y, u, v, a, p + BPP, pal);
598 u1 += u;
599 v1 += v;
600 a1 += a;
601 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
602 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
603 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
604 cb++;
605 cr++;
606 p += 2 * BPP;
607 lum += 2;
608 }
609 if (w) {
610 YUVA_IN(y, u, v, a, p, pal);
611 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
612 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
613 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
614 }
615 }
616 }
617
618 static void free_subpicture(SubPicture *sp)
619 {
620 int i;
621
622 for (i = 0; i < sp->sub.num_rects; i++)
623 {
624 av_free(sp->sub.rects[i].bitmap);
625 av_free(sp->sub.rects[i].rgba_palette);
626 }
627
628 av_free(sp->sub.rects);
629
630 memset(&sp->sub, 0, sizeof(AVSubtitle));
631 }
632
633 static void video_image_display(VideoState *is)
634 {
635 VideoPicture *vp;
636 SubPicture *sp;
637 AVPicture pict;
638 float aspect_ratio;
639 int width, height, x, y;
640 SDL_Rect rect;
641 int i;
642
643 vp = &is->pictq[is->pictq_rindex];
644 if (vp->bmp) {
645 /* XXX: use variable in the frame */
646 if (is->video_st->codec->sample_aspect_ratio.num == 0)
647 aspect_ratio = 0;
648 else
649 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio)
650 * is->video_st->codec->width / is->video_st->codec->height;;
651 if (aspect_ratio <= 0.0)
652 aspect_ratio = (float)is->video_st->codec->width /
653 (float)is->video_st->codec->height;
654 /* if an active format is indicated, then it overrides the
655 mpeg format */
656 #if 0
657 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
658 is->dtg_active_format = is->video_st->codec->dtg_active_format;
659 printf("dtg_active_format=%d\n", is->dtg_active_format);
660 }
661 #endif
662 #if 0
663 switch(is->video_st->codec->dtg_active_format) {
664 case FF_DTG_AFD_SAME:
665 default:
666 /* nothing to do */
667 break;
668 case FF_DTG_AFD_4_3:
669 aspect_ratio = 4.0 / 3.0;
670 break;
671 case FF_DTG_AFD_16_9:
672 aspect_ratio = 16.0 / 9.0;
673 break;
674 case FF_DTG_AFD_14_9:
675 aspect_ratio = 14.0 / 9.0;
676 break;
677 case FF_DTG_AFD_4_3_SP_14_9:
678 aspect_ratio = 14.0 / 9.0;
679 break;
680 case FF_DTG_AFD_16_9_SP_14_9:
681 aspect_ratio = 14.0 / 9.0;
682 break;
683 case FF_DTG_AFD_SP_4_3:
684 aspect_ratio = 4.0 / 3.0;
685 break;
686 }
687 #endif
688
689 if (is->subtitle_st)
690 {
691 if (is->subpq_size > 0)
692 {
693 sp = &is->subpq[is->subpq_rindex];
694
695 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
696 {
697 SDL_LockYUVOverlay (vp->bmp);
698
699 pict.data[0] = vp->bmp->pixels[0];
700 pict.data[1] = vp->bmp->pixels[2];
701 pict.data[2] = vp->bmp->pixels[1];
702
703 pict.linesize[0] = vp->bmp->pitches[0];
704 pict.linesize[1] = vp->bmp->pitches[2];
705 pict.linesize[2] = vp->bmp->pitches[1];
706
707 for (i = 0; i < sp->sub.num_rects; i++)
708 blend_subrect(&pict, &sp->sub.rects[i]);
709
710 SDL_UnlockYUVOverlay (vp->bmp);
711 }
712 }
713 }
714
715
716 /* XXX: we suppose the screen has a 1.0 pixel ratio */
717 height = is->height;
718 width = ((int)rint(height * aspect_ratio)) & -3;
719 if (width > is->width) {
720 width = is->width;
721 height = ((int)rint(width / aspect_ratio)) & -3;
722 }
723 x = (is->width - width) / 2;
724 y = (is->height - height) / 2;
725 if (!is->no_background) {
726 /* fill the background */
727 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
728 } else {
729 is->no_background = 0;
730 }
731 rect.x = is->xleft + x;
732 rect.y = is->ytop + y;
733 rect.w = width;
734 rect.h = height;
735 SDL_DisplayYUVOverlay(vp->bmp, &rect);
736 } else {
737 #if 0
738 fill_rectangle(screen,
739 is->xleft, is->ytop, is->width, is->height,
740 QERGB(0x00, 0x00, 0x00));
741 #endif
742 }
743 }
744
745 static inline int compute_mod(int a, int b)
746 {
747 a = a % b;
748 if (a >= 0)
749 return a;
750 else
751 return a + b;
752 }
753
754 static void video_audio_display(VideoState *s)
755 {
756 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
757 int ch, channels, h, h2, bgcolor, fgcolor;
758 int16_t time_diff;
759
760 /* compute display index : center on currently output samples */
761 channels = s->audio_st->codec->channels;
762 nb_display_channels = channels;
763 if (!s->paused) {
764 n = 2 * channels;
765 delay = audio_write_get_buf_size(s);
766 delay /= n;
767
768 /* to be more precise, we take into account the time spent since
769 the last buffer computation */
770 if (audio_callback_time) {
771 time_diff = av_gettime() - audio_callback_time;
772 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
773 }
774
775 delay -= s->width / 2;
776 if (delay < s->width)
777 delay = s->width;
778
779 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
780
781 h= INT_MIN;
782 for(i=0; i<1000; i+=channels){
783 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
784 int a= s->sample_array[idx];
785 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
786 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
787 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
788 int score= a-d;
789 if(h<score && (b^c)<0){
790 h= score;
791 i_start= idx;
792 }
793 }
794
795 s->last_i_start = i_start;
796 } else {
797 i_start = s->last_i_start;
798 }
799
800 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
801 fill_rectangle(screen,
802 s->xleft, s->ytop, s->width, s->height,
803 bgcolor);
804
805 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
806
807 /* total height for one channel */
808 h = s->height / nb_display_channels;
809 /* graph height / 2 */
810 h2 = (h * 9) / 20;
811 for(ch = 0;ch < nb_display_channels; ch++) {
812 i = i_start + ch;
813 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
814 for(x = 0; x < s->width; x++) {
815 y = (s->sample_array[i] * h2) >> 15;
816 if (y < 0) {
817 y = -y;
818 ys = y1 - y;
819 } else {
820 ys = y1;
821 }
822 fill_rectangle(screen,
823 s->xleft + x, ys, 1, y,
824 fgcolor);
825 i += channels;
826 if (i >= SAMPLE_ARRAY_SIZE)
827 i -= SAMPLE_ARRAY_SIZE;
828 }
829 }
830
831 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
832
833 for(ch = 1;ch < nb_display_channels; ch++) {
834 y = s->ytop + ch * h;
835 fill_rectangle(screen,
836 s->xleft, y, s->width, 1,
837 fgcolor);
838 }
839 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
840 }
841
842 static int video_open(VideoState *is){
843 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
844 int w,h;
845
846 if(is_full_screen) flags |= SDL_FULLSCREEN;
847 else flags |= SDL_RESIZABLE;
848
849 if (is_full_screen && fs_screen_width) {
850 w = fs_screen_width;
851 h = fs_screen_height;
852 } else if(!is_full_screen && screen_width){
853 w = screen_width;
854 h = screen_height;
855 }else if (is->video_st && is->video_st->codec->width){
856 w = is->video_st->codec->width;
857 h = is->video_st->codec->height;
858 } else {
859 w = 640;
860 h = 480;
861 }
862 #ifndef CONFIG_DARWIN
863 screen = SDL_SetVideoMode(w, h, 0, flags);
864 #else
865 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
866 screen = SDL_SetVideoMode(w, h, 24, flags);
867 #endif
868 if (!screen) {
869 fprintf(stderr, "SDL: could not set video mode - exiting\n");
870 return -1;
871 }
872 SDL_WM_SetCaption("FFplay", "FFplay");
873
874 is->width = screen->w;
875 is->height = screen->h;
876
877 return 0;
878 }
879
880 /* display the current picture, if any */
881 static void video_display(VideoState *is)
882 {
883 if(!screen)
884 video_open(cur_stream);
885 if (is->audio_st && is->show_audio)
886 video_audio_display(is);
887 else if (is->video_st)
888 video_image_display(is);
889 }
890
891 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
892 {
893 SDL_Event event;
894 event.type = FF_REFRESH_EVENT;
895 event.user.data1 = opaque;
896 SDL_PushEvent(&event);
897 return 0; /* 0 means stop timer */
898 }
899
900 /* schedule a video refresh in 'delay' ms */
901 static void schedule_refresh(VideoState *is, int delay)
902 {
903 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
904 }
905
906 /* get the current audio clock value */
907 static double get_audio_clock(VideoState *is)
908 {
909 double pts;
910 int hw_buf_size, bytes_per_sec;
911 pts = is->audio_clock;
912 hw_buf_size = audio_write_get_buf_size(is);
913 bytes_per_sec = 0;
914 if (is->audio_st) {
915 bytes_per_sec = is->audio_st->codec->sample_rate *
916 2 * is->audio_st->codec->channels;
917 }
918 if (bytes_per_sec)
919 pts -= (double)hw_buf_size / bytes_per_sec;
920 return pts;
921 }
922
923 /* get the current video clock value */
924 static double get_video_clock(VideoState *is)
925 {
926 double delta;
927 if (is->paused) {
928 delta = 0;
929 } else {
930 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
931 }
932 return is->video_current_pts + delta;
933 }
934
935 /* get the current external clock value */
936 static double get_external_clock(VideoState *is)
937 {
938 int64_t ti;
939 ti = av_gettime();
940 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
941 }
942
943 /* get the current master clock value */
944 static double get_master_clock(VideoState *is)
945 {
946 double val;
947
948 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
949 if (is->video_st)
950 val = get_video_clock(is);
951 else
952 val = get_audio_clock(is);
953 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
954 if (is->audio_st)
955 val = get_audio_clock(is);
956 else
957 val = get_video_clock(is);
958 } else {
959 val = get_external_clock(is);
960 }
961 return val;
962 }
963
964 /* seek in the stream */
965 static void stream_seek(VideoState *is, int64_t pos, int rel)
966 {
967 if (!is->seek_req) {
968 is->seek_pos = pos;
969 is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
970 if (seek_by_bytes)
971 is->seek_flags |= AVSEEK_FLAG_BYTE;
972 is->seek_req = 1;
973 }
974 }
975
976 /* pause or resume the video */
977 static void stream_pause(VideoState *is)
978 {
979 is->paused = !is->paused;
980 if (!is->paused) {
981 is->video_current_pts = get_video_clock(is);
982 is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
983 }
984 }
985
986 /* called to display each frame */
987 static void video_refresh_timer(void *opaque)
988 {
989 VideoState *is = opaque;
990 VideoPicture *vp;
991 double actual_delay, delay, sync_threshold, ref_clock, diff;
992
993 SubPicture *sp, *sp2;
994
995 if (is->video_st) {
996 if (is->pictq_size == 0) {
997 /* if no picture, need to wait */
998 schedule_refresh(is, 1);
999 } else {
1000 /* dequeue the picture */
1001 vp = &is->pictq[is->pictq_rindex];
1002
1003 /* update current video pts */
1004 is->video_current_pts = vp->pts;
1005 is->video_current_pts_time = av_gettime();
1006
1007 /* compute nominal delay */
1008 delay = vp->pts - is->frame_last_pts;
1009 if (delay <= 0 || delay >= 1.0) {
1010 /* if incorrect delay, use previous one */
1011 delay = is->frame_last_delay;
1012 }
1013 is->frame_last_delay = delay;
1014 is->frame_last_pts = vp->pts;
1015
1016 /* update delay to follow master synchronisation source */
1017 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1018 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1019 /* if video is slave, we try to correct big delays by
1020 duplicating or deleting a frame */
1021 ref_clock = get_master_clock(is);
1022 diff = vp->pts - ref_clock;
1023
1024 /* skip or repeat frame. We take into account the
1025 delay to compute the threshold. I still don't know
1026 if it is the best guess */
1027 sync_threshold = AV_SYNC_THRESHOLD;
1028 if (delay > sync_threshold)
1029 sync_threshold = delay;
1030 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1031 if (diff <= -sync_threshold)
1032 delay = 0;
1033 else if (diff >= sync_threshold)
1034 delay = 2 * delay;
1035 }
1036 }
1037
1038 is->frame_timer += delay;
1039 /* compute the REAL delay (we need to do that to avoid
1040 long term errors */
1041 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1042 if (actual_delay < 0.010) {
1043 /* XXX: should skip picture */
1044 actual_delay = 0.010;
1045 }
1046 /* launch timer for next picture */
1047 schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
1048
1049 #if defined(DEBUG_SYNC)
1050 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1051 delay, actual_delay, vp->pts, -diff);
1052 #endif
1053
1054 if(is->subtitle_st) {
1055 if (is->subtitle_stream_changed) {
1056 SDL_LockMutex(is->subpq_mutex);
1057
1058 while (is->subpq_size) {
1059 free_subpicture(&is->subpq[is->subpq_rindex]);
1060
1061 /* update queue size and signal for next picture */
1062 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1063 is->subpq_rindex = 0;
1064
1065 is->subpq_size--;
1066 }
1067 is->subtitle_stream_changed = 0;
1068
1069 SDL_CondSignal(is->subpq_cond);
1070 SDL_UnlockMutex(is->subpq_mutex);
1071 } else {
1072 if (is->subpq_size > 0) {
1073 sp = &is->subpq[is->subpq_rindex];
1074
1075 if (is->subpq_size > 1)
1076 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1077 else
1078 sp2 = NULL;
1079
1080 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1081 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1082 {
1083 free_subpicture(sp);
1084
1085 /* update queue size and signal for next picture */
1086 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1087 is->subpq_rindex = 0;
1088
1089 SDL_LockMutex(is->subpq_mutex);
1090 is->subpq_size--;
1091 SDL_CondSignal(is->subpq_cond);
1092 SDL_UnlockMutex(is->subpq_mutex);
1093 }
1094 }
1095 }
1096 }
1097
1098 /* display picture */
1099 video_display(is);
1100
1101 /* update queue size and signal for next picture */
1102 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1103 is->pictq_rindex = 0;
1104
1105 SDL_LockMutex(is->pictq_mutex);
1106 is->pictq_size--;
1107 SDL_CondSignal(is->pictq_cond);
1108 SDL_UnlockMutex(is->pictq_mutex);
1109 }
1110 } else if (is->audio_st) {
1111 /* draw the next audio frame */
1112
1113 schedule_refresh(is, 40);
1114
1115 /* if only audio stream, then display the audio bars (better
1116 than nothing, just to test the implementation */
1117
1118 /* display picture */
1119 video_display(is);
1120 } else {
1121 schedule_refresh(is, 100);
1122 }
1123 if (show_status) {
1124 static int64_t last_time;
1125 int64_t cur_time;
1126 int aqsize, vqsize, sqsize;
1127 double av_diff;
1128
1129 cur_time = av_gettime();
1130 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1131 aqsize = 0;
1132 vqsize = 0;
1133 sqsize = 0;
1134 if (is->audio_st)
1135 aqsize = is->audioq.size;
1136 if (is->video_st)
1137 vqsize = is->videoq.size;
1138 if (is->subtitle_st)
1139 sqsize = is->subtitleq.size;
1140 av_diff = 0;
1141 if (is->audio_st && is->video_st)
1142 av_diff = get_audio_clock(is) - get_video_clock(is);
1143 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
1144 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1145 fflush(stdout);
1146 last_time = cur_time;
1147 }
1148 }
1149 }
1150
1151 /* allocate a picture (needs to do that in main thread to avoid
1152 potential locking problems */
1153 static void alloc_picture(void *opaque)
1154 {
1155 VideoState *is = opaque;
1156 VideoPicture *vp;
1157
1158 vp = &is->pictq[is->pictq_windex];
1159
1160 if (vp->bmp)
1161 SDL_FreeYUVOverlay(vp->bmp);
1162
1163 #if 0
1164 /* XXX: use generic function */
1165 /* XXX: disable overlay if no hardware acceleration or if RGB format */
1166 switch(is->video_st->codec->pix_fmt) {
1167 case PIX_FMT_YUV420P:
1168 case PIX_FMT_YUV422P:
1169 case PIX_FMT_YUV444P:
1170 case PIX_FMT_YUYV422:
1171 case PIX_FMT_YUV410P:
1172 case PIX_FMT_YUV411P:
1173 is_yuv = 1;
1174 break;
1175 default:
1176 is_yuv = 0;
1177 break;
1178 }
1179 #endif
1180 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1181 is->video_st->codec->height,
1182 SDL_YV12_OVERLAY,
1183 screen);
1184 vp->width = is->video_st->codec->width;
1185 vp->height = is->video_st->codec->height;
1186
1187 SDL_LockMutex(is->pictq_mutex);
1188 vp->allocated = 1;
1189 SDL_CondSignal(is->pictq_cond);
1190 SDL_UnlockMutex(is->pictq_mutex);
1191 }
1192
1193 /**
1194 *
1195 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1196 */
1197 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1198 {
1199 VideoPicture *vp;
1200 int dst_pix_fmt;
1201 AVPicture pict;
1202 static struct SwsContext *img_convert_ctx;
1203
1204 /* wait until we have space to put a new picture */
1205 SDL_LockMutex(is->pictq_mutex);
1206 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1207 !is->videoq.abort_request) {
1208 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1209 }
1210 SDL_UnlockMutex(is->pictq_mutex);
1211
1212 if (is->videoq.abort_request)
1213 return -1;
1214
1215 vp = &is->pictq[is->pictq_windex];
1216
1217 /* alloc or resize hardware picture buffer */
1218 if (!vp->bmp ||
1219 vp->width != is->video_st->codec->width ||
1220 vp->height != is->video_st->codec->height) {
1221 SDL_Event event;
1222
1223 vp->allocated = 0;
1224
1225 /* the allocation must be done in the main thread to avoid
1226 locking problems */
1227 event.type = FF_ALLOC_EVENT;
1228 event.user.data1 = is;
1229 SDL_PushEvent(&event);
1230
1231 /* wait until the picture is allocated */
1232 SDL_LockMutex(is->pictq_mutex);
1233 while (!vp->allocated && !is->videoq.abort_request) {
1234 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1235 }
1236 SDL_UnlockMutex(is->pictq_mutex);
1237
1238 if (is->videoq.abort_request)
1239 return -1;
1240 }
1241
1242 /* if the frame is not skipped, then display it */
1243 if (vp->bmp) {
1244 /* get a pointer on the bitmap */
1245 SDL_LockYUVOverlay (vp->bmp);
1246
1247 dst_pix_fmt = PIX_FMT_YUV420P;
1248 pict.data[0] = vp->bmp->pixels[0];
1249 pict.data[1] = vp->bmp->pixels[2];
1250 pict.data[2] = vp->bmp->pixels[1];
1251
1252 pict.linesize[0] = vp->bmp->pitches[0];
1253 pict.linesize[1] = vp->bmp->pitches[2];
1254 pict.linesize[2] = vp->bmp->pitches[1];
1255 if (img_convert_ctx == NULL) {
1256 img_convert_ctx = sws_getContext(is->video_st->codec->width,
1257 is->video_st->codec->height, is->video_st->codec->pix_fmt,
1258 is->video_st->codec->width, is->video_st->codec->height,
1259 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1260 if (img_convert_ctx == NULL) {
1261 fprintf(stderr, "Cannot initialize the conversion context\n");
1262 exit(1);
1263 }
1264 }
1265 sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1266 0, is->video_st->codec->height, pict.data, pict.linesize);
1267 /* update the bitmap content */
1268 SDL_UnlockYUVOverlay(vp->bmp);
1269
1270 vp->pts = pts;
1271
1272 /* now we can update the picture count */
1273 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1274 is->pictq_windex = 0;
1275 SDL_LockMutex(is->pictq_mutex);
1276 is->pictq_size++;
1277 SDL_UnlockMutex(is->pictq_mutex);
1278 }
1279 return 0;
1280 }
1281
1282 /**
1283 * compute the exact PTS for the picture if it is omitted in the stream
1284 * @param pts1 the dts of the pkt / pts of the frame
1285 */
1286 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1287 {
1288 double frame_delay, pts;
1289
1290 pts = pts1;
1291
1292 if (pts != 0) {
1293 /* update video clock with pts, if present */
1294 is->video_clock = pts;
1295 } else {
1296 pts = is->video_clock;
1297 }
1298 /* update video clock for next frame */
1299 frame_delay = av_q2d(is->video_st->codec->time_base);
1300 /* for MPEG2, the frame can be repeated, so we update the
1301 clock accordingly */
1302 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1303 is->video_clock += frame_delay;
1304
1305 #if defined(DEBUG_SYNC) && 0
1306 {
1307 int ftype;
1308 if (src_frame->pict_type == FF_B_TYPE)
1309 ftype = 'B';
1310 else if (src_frame->pict_type == FF_I_TYPE)
1311 ftype = 'I';
1312 else
1313 ftype = 'P';
1314 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1315 ftype, pts, pts1);
1316 }
1317 #endif
1318 return queue_picture(is, src_frame, pts);
1319 }
1320
1321 static uint64_t global_video_pkt_pts= AV_NOPTS_VALUE;
1322
1323 static int my_get_buffer(struct AVCodecContext *c, AVFrame *pic){
1324 int ret= avcodec_default_get_buffer(c, pic);
1325 uint64_t *pts= av_malloc(sizeof(uint64_t));
1326 *pts= global_video_pkt_pts;
1327 pic->opaque= pts;
1328 return ret;
1329 }
1330
1331 static void my_release_buffer(struct AVCodecContext *c, AVFrame *pic){
1332 if(pic) av_freep(&pic->opaque);
1333 avcodec_default_release_buffer(c, pic);
1334 }
1335
1336 static int video_thread(void *arg)
1337 {
1338 VideoState *is = arg;
1339 AVPacket pkt1, *pkt = &pkt1;
1340 int len1, got_picture;
1341 AVFrame *frame= avcodec_alloc_frame();
1342 double pts;
1343
1344 for(;;) {
1345 while (is->paused && !is->videoq.abort_request) {
1346 SDL_Delay(10);
1347 }
1348 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1349 break;
1350
1351 if(pkt->data == flush_pkt.data){
1352 avcodec_flush_buffers(is->video_st->codec);
1353 continue;
1354 }
1355
1356 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1357 this packet, if any */
1358 global_video_pkt_pts= pkt->pts;
1359 len1 = avcodec_decode_video(is->video_st->codec,
1360 frame, &got_picture,
1361 pkt->data, pkt->size);
1362
1363 if( (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1364 && frame->opaque && *(uint64_t*)frame->opaque != AV_NOPTS_VALUE)
1365 pts= *(uint64_t*)frame->opaque;
1366 else if(pkt->dts != AV_NOPTS_VALUE)
1367 pts= pkt->dts;
1368 else
1369 pts= 0;
1370 pts *= av_q2d(is->video_st->time_base);
1371
1372 // if (len1 < 0)
1373 // break;
1374 if (got_picture) {
1375 if (output_picture2(is, frame, pts) < 0)
1376 goto the_end;
1377 }
1378 av_free_packet(pkt);
1379 if (step)
1380 if (cur_stream)
1381 stream_pause(cur_stream);
1382 }
1383 the_end:
1384 av_free(frame);
1385 return 0;
1386 }
1387
1388 static int subtitle_thread(void *arg)
1389 {
1390 VideoState *is = arg;
1391 SubPicture *sp;
1392 AVPacket pkt1, *pkt = &pkt1;
1393 int len1, got_subtitle;
1394 double pts;
1395 int i, j;
1396 int r, g, b, y, u, v, a;
1397
1398 for(;;) {
1399 while (is->paused && !is->subtitleq.abort_request) {
1400 SDL_Delay(10);
1401 }
1402 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1403 break;
1404
1405 if(pkt->data == flush_pkt.data){
1406 avcodec_flush_buffers(is->subtitle_st->codec);
1407 continue;
1408 }
1409 SDL_LockMutex(is->subpq_mutex);
1410 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1411 !is->subtitleq.abort_request) {
1412 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1413 }
1414 SDL_UnlockMutex(is->subpq_mutex);
1415
1416 if (is->subtitleq.abort_request)
1417 goto the_end;
1418
1419 sp = &is->subpq[is->subpq_windex];
1420
1421 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1422 this packet, if any */
1423 pts = 0;
1424 if (pkt->pts != AV_NOPTS_VALUE)
1425 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1426
1427 len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1428 &sp->sub, &got_subtitle,
1429 pkt->data, pkt->size);
1430 // if (len1 < 0)
1431 // break;
1432 if (got_subtitle && sp->sub.format == 0) {
1433 sp->pts = pts;
1434
1435 for (i = 0; i < sp->sub.num_rects; i++)
1436 {
1437 for (j = 0; j < sp->sub.rects[i].nb_colors; j++)
1438 {
1439 RGBA_IN(r, g, b, a, sp->sub.rects[i].rgba_palette + j);
1440 y = RGB_TO_Y_CCIR(r, g, b);
1441 u = RGB_TO_U_CCIR(r, g, b, 0);
1442 v = RGB_TO_V_CCIR(r, g, b, 0);
1443 YUVA_OUT(sp->sub.rects[i].rgba_palette + j, y, u, v, a);
1444 }
1445 }
1446
1447 /* now we can update the picture count */
1448 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1449 is->subpq_windex = 0;
1450 SDL_LockMutex(is->subpq_mutex);
1451 is->subpq_size++;
1452 SDL_UnlockMutex(is->subpq_mutex);
1453 }
1454 av_free_packet(pkt);
1455 // if (step)
1456 // if (cur_stream)
1457 // stream_pause(cur_stream);
1458 }
1459 the_end:
1460 return 0;
1461 }
1462
1463 /* copy samples for viewing in editor window */
1464 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1465 {
1466 int size, len, channels;
1467
1468 channels = is->audio_st->codec->channels;
1469
1470 size = samples_size / sizeof(short);
1471 while (size > 0) {
1472 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1473 if (len > size)
1474 len = size;
1475 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1476 samples += len;
1477 is->sample_array_index += len;
1478 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1479 is->sample_array_index = 0;
1480 size -= len;
1481 }
1482 }
1483
1484 /* return the new audio buffer size (samples can be added or deleted
1485 to get better sync if video or external master clock) */
1486 static int synchronize_audio(VideoState *is, short *samples,
1487 int samples_size1, double pts)
1488 {
1489 int n, samples_size;
1490 double ref_clock;
1491
1492 n = 2 * is->audio_st->codec->channels;
1493 samples_size = samples_size1;
1494
1495 /* if not master, then we try to remove or add samples to correct the clock */
1496 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1497 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1498 double diff, avg_diff;
1499 int wanted_size, min_size, max_size, nb_samples;
1500
1501 ref_clock = get_master_clock(is);
1502 diff = get_audio_clock(is) - ref_clock;
1503
1504 if (diff < AV_NOSYNC_THRESHOLD) {
1505 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1506 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1507 /* not enough measures to have a correct estimate */
1508 is->audio_diff_avg_count++;
1509 } else {
1510 /* estimate the A-V difference */
1511 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1512
1513 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1514 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1515 nb_samples = samples_size / n;
1516
1517 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1518 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1519 if (wanted_size < min_size)
1520 wanted_size = min_size;
1521 else if (wanted_size > max_size)
1522 wanted_size = max_size;
1523
1524 /* add or remove samples to correction the synchro */
1525 if (wanted_size < samples_size) {
1526 /* remove samples */
1527 samples_size = wanted_size;
1528 } else if (wanted_size > samples_size) {
1529 uint8_t *samples_end, *q;
1530 int nb;
1531
1532 /* add samples */
1533 nb = (samples_size - wanted_size);
1534 samples_end = (uint8_t *)samples + samples_size - n;
1535 q = samples_end + n;
1536 while (nb > 0) {
1537 memcpy(q, samples_end, n);
1538 q += n;
1539 nb -= n;
1540 }
1541 samples_size = wanted_size;
1542 }
1543 }
1544 #if 0
1545 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1546 diff, avg_diff, samples_size - samples_size1,
1547 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1548 #endif
1549 }
1550 } else {
1551 /* too big difference : may be initial PTS errors, so
1552 reset A-V filter */
1553 is->audio_diff_avg_count = 0;
1554 is->audio_diff_cum = 0;
1555 }
1556 }
1557
1558 return samples_size;
1559 }
1560
1561 /* decode one audio frame and returns its uncompressed size */
1562 static int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int buf_size, double *pts_ptr)
1563 {
1564 AVPacket *pkt = &is->audio_pkt;
1565 int n, len1, data_size;
1566 double pts;
1567
1568 for(;;) {
1569 /* NOTE: the audio packet can contain several frames */
1570 while (is->audio_pkt_size > 0) {
1571 data_size = buf_size;
1572 len1 = avcodec_decode_audio2(is->audio_st->codec,
1573 (int16_t *)audio_buf, &data_size,
1574 is->audio_pkt_data, is->audio_pkt_size);
1575 if (len1 < 0) {
1576 /* if error, we skip the frame */
1577 is->audio_pkt_size = 0;
1578 break;
1579 }
1580
1581 is->audio_pkt_data += len1;
1582 is->audio_pkt_size -= len1;
1583 if (data_size <= 0)
1584 continue;
1585 /* if no pts, then compute it */
1586 pts = is->audio_clock;
1587 *pts_ptr = pts;
1588 n = 2 * is->audio_st->codec->channels;
1589 is->audio_clock += (double)data_size /
1590 (double)(n * is->audio_st->codec->sample_rate);
1591 #if defined(DEBUG_SYNC)
1592 {
1593 static double last_clock;
1594 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1595 is->audio_clock - last_clock,
1596 is->audio_clock, pts);
1597 last_clock = is->audio_clock;
1598 }
1599 #endif
1600 return data_size;
1601 }
1602
1603 /* free the current packet */
1604 if (pkt->data)
1605 av_free_packet(pkt);
1606
1607 if (is->paused || is->audioq.abort_request) {
1608 return -1;
1609 }
1610
1611 /* read next packet */
1612 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1613 return -1;
1614 if(pkt->data == flush_pkt.data){
1615 avcodec_flush_buffers(is->audio_st->codec);
1616 continue;
1617 }
1618
1619 is->audio_pkt_data = pkt->data;
1620 is->audio_pkt_size = pkt->size;
1621
1622 /* if update the audio clock with the pts */
1623 if (pkt->pts != AV_NOPTS_VALUE) {
1624 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1625 }
1626 }
1627 }
1628
1629 /* get the current audio output buffer size, in samples. With SDL, we
1630 cannot have a precise information */
1631 static int audio_write_get_buf_size(VideoState *is)
1632 {
1633 return is->audio_buf_size - is->audio_buf_index;
1634 }
1635
1636
1637 /* prepare a new audio buffer */
1638 void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1639 {
1640 VideoState *is = opaque;
1641 int audio_size, len1;
1642 double pts;
1643
1644 audio_callback_time = av_gettime();
1645
1646 while (len > 0) {
1647 if (is->audio_buf_index >= is->audio_buf_size) {
1648 audio_size = audio_decode_frame(is, is->audio_buf, sizeof(is->audio_buf), &pts);
1649 if (audio_size < 0) {
1650 /* if error, just output silence */
1651 is->audio_buf_size = 1024;
1652 memset(is->audio_buf, 0, is->audio_buf_size);
1653 } else {
1654 if (is->show_audio)
1655 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1656 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1657 pts);
1658 is->audio_buf_size = audio_size;
1659 }
1660 is->audio_buf_index = 0;
1661 }
1662 len1 = is->audio_buf_size - is->audio_buf_index;
1663 if (len1 > len)
1664 len1 = len;
1665 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1666 len -= len1;
1667 stream += len1;
1668 is->audio_buf_index += len1;
1669 }
1670 }
1671
1672 /* open a given stream. Return 0 if OK */
1673 static int stream_component_open(VideoState *is, int stream_index)
1674 {
1675 AVFormatContext *ic = is->ic;
1676 AVCodecContext *enc;
1677 AVCodec *codec;
1678 SDL_AudioSpec wanted_spec, spec;
1679
1680 if (stream_index < 0 || stream_index >= ic->nb_streams)
1681 return -1;
1682 enc = ic->streams[stream_index]->codec;
1683
1684 /* prepare audio output */
1685 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1686 wanted_spec.freq = enc->sample_rate;
1687 wanted_spec.format = AUDIO_S16SYS;
1688 /* hack for AC3. XXX: suppress that */
1689 if (enc->channels > 2)
1690 enc->channels = 2;
1691 wanted_spec.channels = enc->channels;
1692 wanted_spec.silence = 0;
1693 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1694 wanted_spec.callback = sdl_audio_callback;
1695 wanted_spec.userdata = is;
1696 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1697 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1698 return -1;
1699 }
1700 is->audio_hw_buf_size = spec.size;
1701 }
1702
1703 codec = avcodec_find_decoder(enc->codec_id);
1704 enc->debug_mv = debug_mv;
1705 enc->debug = debug;
1706 enc->workaround_bugs = workaround_bugs;
1707 enc->lowres = lowres;
1708 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1709 enc->idct_algo= idct;
1710 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1711 enc->skip_frame= skip_frame;
1712 enc->skip_idct= skip_idct;
1713 enc->skip_loop_filter= skip_loop_filter;
1714 enc->error_resilience= error_resilience;
1715 enc->error_concealment= error_concealment;
1716 if (!codec ||
1717 avcodec_open(enc, codec) < 0)
1718 return -1;
1719 if(thread_count>1)
1720 avcodec_thread_init(enc, thread_count);
1721 enc->thread_count= thread_count;
1722 switch(enc->codec_type) {
1723 case CODEC_TYPE_AUDIO:
1724 is->audio_stream = stream_index;
1725 is->audio_st = ic->streams[stream_index];
1726 is->audio_buf_size = 0;
1727 is->audio_buf_index = 0;
1728
1729 /* init averaging filter */
1730 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1731 is->audio_diff_avg_count = 0;
1732 /* since we do not have a precise anough audio fifo fullness,
1733 we correct audio sync only if larger than this threshold */
1734 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1735
1736 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1737 packet_queue_init(&is->audioq);
1738 SDL_PauseAudio(0);
1739 break;
1740 case CODEC_TYPE_VIDEO:
1741 is->video_stream = stream_index;
1742 is->video_st = ic->streams[stream_index];
1743
1744 is->frame_last_delay = 40e-3;
1745 is->frame_timer = (double)av_gettime() / 1000000.0;
1746 is->video_current_pts_time = av_gettime();
1747
1748 packet_queue_init(&is->videoq);
1749 is->video_tid = SDL_CreateThread(video_thread, is);
1750
1751 enc-> get_buffer= my_get_buffer;
1752 enc->release_buffer= my_release_buffer;
1753 break;
1754 case CODEC_TYPE_SUBTITLE:
1755 is->subtitle_stream = stream_index;
1756 is->subtitle_st = ic->streams[stream_index];
1757 packet_queue_init(&is->subtitleq);
1758
1759 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1760 break;
1761 default:
1762 break;
1763 }
1764 return 0;
1765 }
1766
1767 static void stream_component_close(VideoState *is, int stream_index)
1768 {
1769 AVFormatContext *ic = is->ic;
1770 AVCodecContext *enc;
1771
1772 if (stream_index < 0 || stream_index >= ic->nb_streams)
1773 return;
1774 enc = ic->streams[stream_index]->codec;
1775
1776 switch(enc->codec_type) {
1777 case CODEC_TYPE_AUDIO:
1778 packet_queue_abort(&is->audioq);
1779
1780 SDL_CloseAudio();
1781
1782 packet_queue_end(&is->audioq);
1783 break;
1784 case CODEC_TYPE_VIDEO:
1785 packet_queue_abort(&is->videoq);
1786
1787 /* note: we also signal this mutex to make sure we deblock the
1788 video thread in all cases */
1789 SDL_LockMutex(is->pictq_mutex);
1790 SDL_CondSignal(is->pictq_cond);
1791 SDL_UnlockMutex(is->pictq_mutex);
1792
1793 SDL_WaitThread(is->video_tid, NULL);
1794
1795 packet_queue_end(&is->videoq);
1796 break;
1797 case CODEC_TYPE_SUBTITLE:
1798 packet_queue_abort(&is->subtitleq);
1799
1800 /* note: we also signal this mutex to make sure we deblock the
1801 video thread in all cases */
1802 SDL_LockMutex(is->subpq_mutex);
1803 is->subtitle_stream_changed = 1;
1804
1805 SDL_CondSignal(is->subpq_cond);
1806 SDL_UnlockMutex(is->subpq_mutex);
1807
1808 SDL_WaitThread(is->subtitle_tid, NULL);
1809
1810 packet_queue_end(&is->subtitleq);
1811 break;
1812 default:
1813 break;
1814 }
1815
1816 avcodec_close(enc);
1817 switch(enc->codec_type) {
1818 case CODEC_TYPE_AUDIO:
1819 is->audio_st = NULL;
1820 is->audio_stream = -1;
1821 break;
1822 case CODEC_TYPE_VIDEO:
1823 is->video_st = NULL;
1824 is->video_stream = -1;
1825 break;
1826 case CODEC_TYPE_SUBTITLE:
1827 is->subtitle_st = NULL;
1828 is->subtitle_stream = -1;
1829 break;
1830 default:
1831 break;
1832 }
1833 }
1834
1835 static void dump_stream_info(const AVFormatContext *s)
1836 {
1837 if (s->track != 0)
1838 fprintf(stderr, "Track: %d\n", s->track);
1839 if (s->title[0] != '\0')
1840 fprintf(stderr, "Title: %s\n", s->title);
1841 if (s->author[0] != '\0')
1842 fprintf(stderr, "Author: %s\n", s->author);
1843 if (s->copyright[0] != '\0')
1844 fprintf(stderr, "Copyright: %s\n", s->copyright);
1845 if (s->comment[0] != '\0')
1846 fprintf(stderr, "Comment: %s\n", s->comment);
1847 if (s->album[0] != '\0')
1848 fprintf(stderr, "Album: %s\n", s->album);
1849 if (s->year != 0)
1850 fprintf(stderr, "Year: %d\n", s->year);
1851 if (s->genre[0] != '\0')
1852 fprintf(stderr, "Genre: %s\n", s->genre);
1853 }
1854
1855 /* since we have only one decoding thread, we can use a global
1856 variable instead of a thread local variable */
1857 static VideoState *global_video_state;
1858
1859 static int decode_interrupt_cb(void)
1860 {
1861 return (global_video_state && global_video_state->abort_request);
1862 }
1863
1864 /* this thread gets the stream from the disk or the network */
1865 static int decode_thread(void *arg)
1866 {
1867 VideoState *is = arg;
1868 AVFormatContext *ic;
1869 int err, i, ret, video_index, audio_index, use_play;
1870 AVPacket pkt1, *pkt = &pkt1;
1871 AVFormatParameters params, *ap = &params;
1872
1873 video_index = -1;
1874 audio_index = -1;
1875 is->video_stream = -1;
1876 is->audio_stream = -1;
1877 is->subtitle_stream = -1;
1878
1879 global_video_state = is;
1880 url_set_interrupt_cb(decode_interrupt_cb);
1881
1882 memset(ap, 0, sizeof(*ap));
1883 ap->initial_pause = 1; /* we force a pause when starting an RTSP
1884 stream */
1885
1886 ap->width = frame_width;
1887 ap->height= frame_height;
1888 ap->time_base= (AVRational){1, 25};
1889 ap->pix_fmt = frame_pix_fmt;
1890
1891 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1892 if (err < 0) {
1893 print_error(is->filename, err);
1894 ret = -1;
1895 goto fail;
1896 }
1897 is->ic = ic;
1898 #ifdef CONFIG_RTSP_DEMUXER
1899 use_play = (ic->iformat == &rtsp_demuxer);
1900 #else
1901 use_play = 0;
1902 #endif
1903
1904 if(genpts)
1905 ic->flags |= AVFMT_FLAG_GENPTS;
1906
1907 if (!use_play) {
1908 err = av_find_stream_info(ic);
1909 if (err < 0) {
1910 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1911 ret = -1;
1912 goto fail;
1913 }
1914 ic->pb.eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1915 }
1916
1917 /* if seeking requested, we execute it */
1918 if (start_time != AV_NOPTS_VALUE) {
1919 int64_t timestamp;
1920
1921 timestamp = start_time;
1922 /* add the stream start time */
1923 if (ic->start_time != AV_NOPTS_VALUE)
1924 timestamp += ic->start_time;
1925 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1926 if (ret < 0) {
1927 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1928 is->filename, (double)timestamp / AV_TIME_BASE);
1929 }
1930 }
1931
1932 /* now we can begin to play (RTSP stream only) */
1933 av_read_play(ic);
1934
1935 if (use_play) {
1936 err = av_find_stream_info(ic);
1937 if (err < 0) {
1938 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1939 ret = -1;
1940 goto fail;
1941 }
1942 }
1943
1944 for(i = 0; i < ic->nb_streams; i++) {
1945 AVCodecContext *enc = ic->streams[i]->codec;
1946 switch(enc->codec_type) {
1947 case CODEC_TYPE_AUDIO:
1948 if ((audio_index < 0 || wanted_audio_stream-- > 0) && !audio_disable)
1949 audio_index = i;
1950 break;
1951 case CODEC_TYPE_VIDEO:
1952 if ((video_index < 0 || wanted_video_stream-- > 0) && !video_disable)
1953 video_index = i;
1954 break;
1955 default:
1956 break;
1957 }
1958 }
1959 if (show_status) {
1960 dump_format(ic, 0, is->filename, 0);
1961 dump_stream_info(ic);
1962 }
1963
1964 /* open the streams */
1965 if (audio_index >= 0) {
1966 stream_component_open(is, audio_index);
1967 }
1968
1969 if (video_index >= 0) {
1970 stream_component_open(is, video_index);
1971 } else {
1972 if (!display_disable)
1973 is->show_audio = 1;
1974 }
1975
1976 if (is->video_stream < 0 && is->audio_stream < 0) {
1977 fprintf(stderr, "%s: could not open codecs\n", is->filename);
1978 ret = -1;
1979 goto fail;
1980 }
1981
1982 for(;;) {
1983 if (is->abort_request)
1984 break;
1985 if (is->paused != is->last_paused) {
1986 is->last_paused = is->paused;
1987 if (is->paused)
1988 av_read_pause(ic);
1989 else
1990 av_read_play(ic);
1991 }
1992 #ifdef CONFIG_RTSP_DEMUXER
1993 if (is->paused && ic->iformat == &rtsp_demuxer) {
1994 /* wait 10 ms to avoid trying to get another packet */
1995 /* XXX: horrible */
1996 SDL_Delay(10);
1997 continue;
1998 }
1999 #endif
2000 if (is->seek_req) {
2001 int stream_index= -1;
2002 int64_t seek_target= is->seek_pos;
2003
2004 if (is-> video_stream >= 0) stream_index= is-> video_stream;
2005 else if(is-> audio_stream >= 0) stream_index= is-> audio_stream;
2006 else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
2007
2008 if(stream_index>=0){
2009 seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
2010 }
2011
2012 ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
2013 if (ret < 0) {
2014 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2015 }else{
2016 if (is->audio_stream >= 0) {
2017 packet_queue_flush(&is->audioq);
2018 packet_queue_put(&is->audioq, &flush_pkt);
2019 }
2020 if (is->subtitle_stream >= 0) {
2021 packet_queue_flush(&is->subtitleq);
2022 packet_queue_put(&is->subtitleq, &flush_pkt);
2023 }
2024 if (is->video_stream >= 0) {
2025 packet_queue_flush(&is->videoq);
2026 packet_queue_put(&is->videoq, &flush_pkt);
2027 }
2028 }
2029 is->seek_req = 0;
2030 }
2031
2032 /* if the queue are full, no need to read more */
2033 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2034 is->videoq.size > MAX_VIDEOQ_SIZE ||
2035 is->subtitleq.size > MAX_SUBTITLEQ_SIZE ||
2036 url_feof(&ic->pb)) {
2037 /* wait 10 ms */
2038 SDL_Delay(10);
2039 continue;
2040 }
2041 ret = av_read_frame(ic, pkt);
2042 if (ret < 0) {
2043 if (url_ferror(&ic->pb) == 0) {
2044 SDL_Delay(100); /* wait for user event */
2045 continue;
2046 } else
2047 break;
2048 }
2049 if (pkt->stream_index == is->audio_stream) {
2050 packet_queue_put(&is->audioq, pkt);
2051 } else if (pkt->stream_index == is->video_stream) {
2052 packet_queue_put(&is->videoq, pkt);
2053 } else if (pkt->stream_index == is->subtitle_stream) {
2054 packet_queue_put(&is->subtitleq, pkt);
2055 } else {
2056 av_free_packet(pkt);
2057 }
2058 }
2059 /* wait until the end */
2060 while (!is->abort_request) {
2061 SDL_Delay(100);
2062 }
2063
2064 ret = 0;
2065 fail:
2066 /* disable interrupting */
2067 global_video_state = NULL;
2068
2069 /* close each stream */
2070 if (is->audio_stream >= 0)
2071 stream_component_close(is, is->audio_stream);
2072 if (is->video_stream >= 0)
2073 stream_component_close(is, is->video_stream);
2074 if (is->subtitle_stream >= 0)
2075 stream_component_close(is, is->subtitle_stream);
2076 if (is->ic) {
2077 av_close_input_file(is->ic);
2078 is->ic = NULL; /* safety */
2079 }
2080 url_set_interrupt_cb(NULL);
2081
2082 if (ret != 0) {
2083 SDL_Event event;
2084
2085 event.type = FF_QUIT_EVENT;
2086 event.user.data1 = is;
2087 SDL_PushEvent(&event);
2088 }
2089 return 0;
2090 }
2091
2092 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2093 {
2094 VideoState *is;
2095
2096 is = av_mallocz(sizeof(VideoState));
2097 if (!is)
2098 return NULL;
2099 av_strlcpy(is->filename, filename, sizeof(is->filename));
2100 is->iformat = iformat;
2101 is->ytop = 0;
2102 is->xleft = 0;
2103
2104 /* start video display */
2105 is->pictq_mutex = SDL_CreateMutex();
2106 is->pictq_cond = SDL_CreateCond();
2107
2108 is->subpq_mutex = SDL_CreateMutex();
2109 is->subpq_cond = SDL_CreateCond();
2110
2111 /* add the refresh timer to draw the picture */
2112 schedule_refresh(is, 40);
2113
2114 is->av_sync_type = av_sync_type;
2115 is->parse_tid = SDL_CreateThread(decode_thread, is);
2116 if (!is->parse_tid) {
2117 av_free(is);
2118 return NULL;
2119 }
2120 return is;
2121 }
2122
2123 static void stream_close(VideoState *is)
2124 {
2125 VideoPicture *vp;
2126 int i;
2127 /* XXX: use a special url_shutdown call to abort parse cleanly */
2128 is->abort_request = 1;
2129 SDL_WaitThread(is->parse_tid, NULL);
2130
2131 /* free all pictures */
2132 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2133 vp = &is->pictq[i];
2134 if (vp->bmp) {
2135 SDL_FreeYUVOverlay(vp->bmp);
2136 vp->bmp = NULL;
2137 }
2138 }
2139 SDL_DestroyMutex(is->pictq_mutex);
2140 SDL_DestroyCond(is->pictq_cond);
2141 SDL_DestroyMutex(is->subpq_mutex);
2142 SDL_DestroyCond(is->subpq_cond);
2143 }
2144
2145 static void stream_cycle_channel(VideoState *is, int codec_type)
2146 {
2147 AVFormatContext *ic = is->ic;
2148 int start_index, stream_index;
2149 AVStream *st;
2150
2151 if (codec_type == CODEC_TYPE_VIDEO)
2152 start_index = is->video_stream;
2153 else if (codec_type == CODEC_TYPE_AUDIO)
2154 start_index = is->audio_stream;
2155 else
2156 start_index = is->subtitle_stream;
2157 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2158 return;
2159 stream_index = start_index;
2160 for(;;) {
2161 if (++stream_index >= is->ic->nb_streams)
2162 {
2163 if (codec_type == CODEC_TYPE_SUBTITLE)
2164 {
2165 stream_index = -1;
2166 goto the_end;
2167 } else
2168 stream_index = 0;
2169 }
2170 if (stream_index == start_index)
2171 return;
2172 st = ic->streams[stream_index];
2173 if (st->codec->codec_type == codec_type) {
2174 /* check that parameters are OK */
2175 switch(codec_type) {
2176 case CODEC_TYPE_AUDIO:
2177 if (st->codec->sample_rate != 0 &&
2178 st->codec->channels != 0)
2179 goto the_end;
2180 break;
2181 case CODEC_TYPE_VIDEO:
2182 case CODEC_TYPE_SUBTITLE:
2183 goto the_end;
2184 default:
2185 break;
2186 }
2187 }
2188 }
2189 the_end:
2190 stream_component_close(is, start_index);
2191 stream_component_open(is, stream_index);
2192 }
2193
2194
2195 static void toggle_full_screen(void)
2196 {
2197 is_full_screen = !is_full_screen;
2198 if (!fs_screen_width) {
2199 /* use default SDL method */
2200 // SDL_WM_ToggleFullScreen(screen);
2201 }
2202 video_open(cur_stream);
2203 }
2204
2205 static void toggle_pause(void)
2206 {
2207 if (cur_stream)
2208 stream_pause(cur_stream);
2209 step = 0;
2210 }
2211
2212 static void step_to_next_frame(void)
2213 {
2214 if (cur_stream) {
2215 if (cur_stream->paused)
2216 cur_stream->paused=0;
2217 cur_stream->video_current_pts = get_video_clock(cur_stream);
2218 }
2219 step = 1;
2220 }
2221
2222 static void do_exit(void)
2223 {
2224 if (cur_stream) {
2225 stream_close(cur_stream);
2226 cur_stream = NULL;
2227 }
2228 if (show_status)
2229 printf("\n");
2230 SDL_Quit();
2231 exit(0);
2232 }
2233
2234 static void toggle_audio_display(void)
2235 {
2236 if (cur_stream) {
2237 cur_stream->show_audio = !cur_stream->show_audio;
2238 }
2239 }
2240
2241 /* handle an event sent by the GUI */
2242 static void event_loop(void)
2243 {
2244 SDL_Event event;
2245 double incr, pos, frac;
2246
2247 for(;;) {
2248 SDL_WaitEvent(&event);
2249 switch(event.type) {
2250 case SDL_KEYDOWN:
2251 switch(event.key.keysym.sym) {
2252 case SDLK_ESCAPE:
2253 case SDLK_q:
2254 do_exit();
2255 break;
2256 case SDLK_f:
2257 toggle_full_screen();
2258 break;
2259 case SDLK_p:
2260 case SDLK_SPACE:
2261 toggle_pause();
2262 break;
2263 case SDLK_s: //S: Step to next frame
2264 step_to_next_frame();
2265 break;
2266 case SDLK_a:
2267 if (cur_stream)
2268 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2269 break;
2270 case SDLK_v:
2271 if (cur_stream)
2272 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2273 break;
2274 case SDLK_t:
2275 if (cur_stream)
2276 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2277 break;
2278 case SDLK_w:
2279 toggle_audio_display();
2280 break;
2281 case SDLK_LEFT:
2282 incr = -10.0;
2283 goto do_seek;
2284 case SDLK_RIGHT:
2285 incr = 10.0;
2286 goto do_seek;
2287 case SDLK_UP:
2288 incr = 60.0;
2289 goto do_seek;
2290 case SDLK_DOWN:
2291 incr = -60.0;
2292 do_seek:
2293 if (cur_stream) {
2294 if (seek_by_bytes) {
2295 pos = url_ftell(&cur_stream->ic->pb);
2296 if (cur_stream->ic->bit_rate)
2297 incr *= cur_stream->ic->bit_rate / 60.0;
2298 else
2299 incr *= 180000.0;
2300 pos += incr;
2301 stream_seek(cur_stream, pos, incr);
2302 } else {
2303 pos = get_master_clock(cur_stream);
2304 pos += incr;
2305 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2306 }
2307 }
2308 break;
2309 default:
2310 break;
2311 }
2312 break;
2313 case SDL_MOUSEBUTTONDOWN:
2314 if (cur_stream) {
2315 int ns, hh, mm, ss;
2316 int tns, thh, tmm, tss;
2317 tns = cur_stream->ic->duration/1000000LL;
2318 thh = tns/3600;
2319 tmm = (tns%3600)/60;
2320 tss = (tns%60);
2321 frac = (double)event.button.x/(double)cur_stream->width;
2322 ns = frac*tns;
2323 hh = ns/3600;
2324 mm = (ns%3600)/60;
2325 ss = (ns%60);
2326 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2327 hh, mm, ss, thh, tmm, tss);
2328 stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2329 }
2330 break;
2331 case SDL_VIDEORESIZE:
2332 if (cur_stream) {
2333 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2334 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2335 screen_width = cur_stream->width = event.resize.w;
2336 screen_height= cur_stream->height= event.resize.h;
2337 }
2338 break;
2339 case SDL_QUIT:
2340 case FF_QUIT_EVENT:
2341 do_exit();
2342 break;
2343 case FF_ALLOC_EVENT:
2344 video_open(event.user.data1);
2345 alloc_picture(event.user.data1);
2346 break;
2347 case FF_REFRESH_EVENT:
2348 video_refresh_timer(event.user.data1);
2349 break;
2350 default:
2351 break;
2352 }
2353 }
2354 }
2355
2356 static void opt_frame_size(const char *arg)
2357 {
2358 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2359 fprintf(stderr, "Incorrect frame size\n");
2360 exit(1);
2361 }
2362 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2363 fprintf(stderr, "Frame size must be a multiple of 2\n");
2364 exit(1);
2365 }
2366 }
2367
2368 static void opt_width(const char *arg)
2369 {
2370 screen_width = atoi(arg);
2371 if(screen_width<=0){
2372 fprintf(stderr, "invalid width\n");
2373 exit(1);
2374 }
2375 }
2376
2377 static void opt_height(const char *arg)
2378 {
2379 screen_height = atoi(arg);
2380 if(screen_height<=0){
2381 fprintf(stderr, "invalid height\n");
2382 exit(1);
2383 }
2384 }
2385
2386 static void opt_format(const char *arg)
2387 {
2388 file_iformat = av_find_input_format(arg);
2389 if (!file_iformat) {
2390 fprintf(stderr, "Unknown input format: %s\n", arg);
2391 exit(1);
2392 }
2393 }
2394
2395 static void opt_frame_pix_fmt(const char *arg)
2396 {
2397 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2398 }
2399
2400 #ifdef CONFIG_RTSP_DEMUXER
2401 static void opt_rtp_tcp(void)
2402 {
2403 /* only tcp protocol */
2404 rtsp_default_protocols = (1 << RTSP_PROTOCOL_RTP_TCP);
2405 }
2406 #endif
2407
2408 static void opt_sync(const char *arg)
2409 {
2410 if (!strcmp(arg, "audio"))
2411 av_sync_type = AV_SYNC_AUDIO_MASTER;
2412 else if (!strcmp(arg, "video"))
2413 av_sync_type = AV_SYNC_VIDEO_MASTER;
2414 else if (!strcmp(arg, "ext"))
2415 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2416 else
2417 show_help();
2418 }
2419
2420 static void opt_seek(const char *arg)
2421 {
2422 start_time = parse_date(arg, 1);
2423 }
2424
2425 static void opt_debug(const char *arg)
2426 {
2427 av_log_level = 99;
2428 debug = atoi(arg);
2429 }
2430
2431 static void opt_vismv(const char *arg)
2432 {
2433 debug_mv = atoi(arg);
2434 }
2435
2436 static void opt_thread_count(const char *arg)
2437 {
2438 thread_count= atoi(arg);
2439 #if !defined(HAVE_THREADS)
2440 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2441 #endif
2442 }
2443
2444 const OptionDef options[] = {
2445 { "h", 0, {(void*)show_help}, "show help" },
2446 { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2447 { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2448 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2449 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2450 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2451 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2452 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "", "" },
2453 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "", "" },
2454 { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2455 { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2456 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2457 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2458 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2459 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2460 { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2461 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2462 { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2463 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2464 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2465 { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2466 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2467 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2468 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2469 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2470 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2471 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_resilience}, "set error detection threshold (0-4)", "threshold" },
2472 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2473 #ifdef CONFIG_RTSP_DEMUXER
2474 { "rtp_tcp", OPT_EXPERT, {(void*)&opt_rtp_tcp}, "force RTP/TCP protocol usage", "" },
2475 #endif
2476 { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2477 { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2478 { NULL, },
2479 };
2480
2481 void show_help(void)
2482 {
2483 printf("ffplay version " FFMPEG_VERSION ", Copyright (c) 2003-2007 Fabrice Bellard, et al.\n"
2484 "usage: ffplay [options] input_file\n"
2485 "Simple media player\n");
2486 printf("\n");
2487 show_help_options(options, "Main options:\n",
2488 OPT_EXPERT, 0);
2489 show_help_options(options, "\nAdvanced options:\n",
2490 OPT_EXPERT, OPT_EXPERT);
2491 printf("\nWhile playing:\n"
2492 "q, ESC quit\n"
2493 "f toggle full screen\n"
2494 "p, SPC pause\n"
2495 "a cycle audio channel\n"
2496 "v cycle video channel\n"
2497 "t cycle subtitle channel\n"
2498 "w show audio waves\n"
2499 "left/right seek backward/forward 10 seconds\n"
2500 "down/up seek backward/forward 1 minute\n"
2501 "mouse click seek to percentage in file corresponding to fraction of width\n"
2502 );
2503 exit(1);
2504 }
2505
2506 void parse_arg_file(const char *filename)
2507 {
2508 if (!strcmp(filename, "-"))
2509 filename = "pipe:";
2510 input_filename = filename;
2511 }
2512
2513 /* Called from the main */
2514 int main(int argc, char **argv)
2515 {
2516 int flags;
2517
2518 /* register all codecs, demux and protocols */
2519 av_register_all();
2520
2521 parse_options(argc, argv, options);
2522
2523 if (!input_filename)
2524 show_help();
2525
2526 if (display_disable) {
2527 video_disable = 1;
2528 }
2529 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2530 #if !defined(__MINGW32__) && !defined(CONFIG_DARWIN)
2531 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on win32 or darwin */
2532 #endif
2533 if (SDL_Init (flags)) {
2534 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2535 exit(1);
2536 }
2537
2538 if (!display_disable) {
2539 #ifdef HAVE_SDL_VIDEO_SIZE
2540 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2541 fs_screen_width = vi->current_w;
2542 fs_screen_height = vi->current_h;
2543 #endif
2544 }
2545
2546 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2547 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2548 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2549 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2550
2551 av_init_packet(&flush_pkt);
2552 flush_pkt.data= "FLUSH";
2553
2554 cur_stream = stream_open(input_filename, file_iformat);
2555
2556 event_loop();
2557
2558 /* never returns */
2559
2560 return 0;
2561 }