Move output_example.c and ffplay.c to the swscale interface
[libav.git] / ffplay.c
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19 #define HAVE_AV_CONFIG_H
20 #include "avformat.h"
21 #include "swscale.h"
22
23 #include "version.h"
24 #include "cmdutils.h"
25
26 #include <SDL.h>
27 #include <SDL_thread.h>
28
29 #ifdef __MINGW32__
30 #undef main /* We don't want SDL to override our main() */
31 #endif
32
33 #ifdef CONFIG_OS2
34 #define INCL_DOS
35 #include <os2.h>
36 #include <stdio.h>
37
38 void MorphToPM()
39 {
40 PPIB pib;
41 PTIB tib;
42
43 DosGetInfoBlocks(&tib, &pib);
44
45 // Change flag from VIO to PM:
46 if (pib->pib_ultype==2) pib->pib_ultype = 3;
47 }
48 #endif
49
50 //#define DEBUG_SYNC
51
52 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
53 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
54 #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
55
56 /* SDL audio buffer size, in samples. Should be small to have precise
57 A/V sync as SDL does not have hardware buffer fullness info. */
58 #define SDL_AUDIO_BUFFER_SIZE 1024
59
60 /* no AV sync correction is done if below the AV sync threshold */
61 #define AV_SYNC_THRESHOLD 0.01
62 /* no AV correction is done if too big error */
63 #define AV_NOSYNC_THRESHOLD 10.0
64
65 /* maximum audio speed change to get correct sync */
66 #define SAMPLE_CORRECTION_PERCENT_MAX 10
67
68 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
69 #define AUDIO_DIFF_AVG_NB 20
70
71 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
72 #define SAMPLE_ARRAY_SIZE (2*65536)
73
74 static int sws_flags = SWS_BICUBIC;
75
76 typedef struct PacketQueue {
77 AVPacketList *first_pkt, *last_pkt;
78 int nb_packets;
79 int size;
80 int abort_request;
81 SDL_mutex *mutex;
82 SDL_cond *cond;
83 } PacketQueue;
84
85 #define VIDEO_PICTURE_QUEUE_SIZE 1
86 #define SUBPICTURE_QUEUE_SIZE 4
87
88 typedef struct VideoPicture {
89 double pts; ///<presentation time stamp for this picture
90 SDL_Overlay *bmp;
91 int width, height; /* source height & width */
92 int allocated;
93 } VideoPicture;
94
95 typedef struct SubPicture {
96 double pts; /* presentation time stamp for this picture */
97 AVSubtitle sub;
98 } SubPicture;
99
100 enum {
101 AV_SYNC_AUDIO_MASTER, /* default choice */
102 AV_SYNC_VIDEO_MASTER,
103 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
104 };
105
106 typedef struct VideoState {
107 SDL_Thread *parse_tid;
108 SDL_Thread *video_tid;
109 AVInputFormat *iformat;
110 int no_background;
111 int abort_request;
112 int paused;
113 int last_paused;
114 int seek_req;
115 int seek_flags;
116 int64_t seek_pos;
117 AVFormatContext *ic;
118 int dtg_active_format;
119
120 int audio_stream;
121
122 int av_sync_type;
123 double external_clock; /* external clock base */
124 int64_t external_clock_time;
125
126 double audio_clock;
127 double audio_diff_cum; /* used for AV difference average computation */
128 double audio_diff_avg_coef;
129 double audio_diff_threshold;
130 int audio_diff_avg_count;
131 AVStream *audio_st;
132 PacketQueue audioq;
133 int audio_hw_buf_size;
134 /* samples output by the codec. we reserve more space for avsync
135 compensation */
136 uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
137 unsigned int audio_buf_size; /* in bytes */
138 int audio_buf_index; /* in bytes */
139 AVPacket audio_pkt;
140 uint8_t *audio_pkt_data;
141 int audio_pkt_size;
142
143 int show_audio; /* if true, display audio samples */
144 int16_t sample_array[SAMPLE_ARRAY_SIZE];
145 int sample_array_index;
146 int last_i_start;
147
148 SDL_Thread *subtitle_tid;
149 int subtitle_stream;
150 int subtitle_stream_changed;
151 AVStream *subtitle_st;
152 PacketQueue subtitleq;
153 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
154 int subpq_size, subpq_rindex, subpq_windex;
155 SDL_mutex *subpq_mutex;
156 SDL_cond *subpq_cond;
157
158 double frame_timer;
159 double frame_last_pts;
160 double frame_last_delay;
161 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
162 int video_stream;
163 AVStream *video_st;
164 PacketQueue videoq;
165 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
166 int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
167 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
168 int pictq_size, pictq_rindex, pictq_windex;
169 SDL_mutex *pictq_mutex;
170 SDL_cond *pictq_cond;
171
172 SDL_mutex *video_decoder_mutex;
173 SDL_mutex *audio_decoder_mutex;
174 SDL_mutex *subtitle_decoder_mutex;
175
176 // QETimer *video_timer;
177 char filename[1024];
178 int width, height, xleft, ytop;
179 } VideoState;
180
181 void show_help(void);
182 static int audio_write_get_buf_size(VideoState *is);
183
184 /* options specified by the user */
185 static AVInputFormat *file_iformat;
186 static AVImageFormat *image_format;
187 static const char *input_filename;
188 static int fs_screen_width;
189 static int fs_screen_height;
190 static int screen_width = 640;
191 static int screen_height = 480;
192 static int audio_disable;
193 static int video_disable;
194 static int display_disable;
195 static int show_status;
196 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
197 static int64_t start_time = AV_NOPTS_VALUE;
198 static int debug = 0;
199 static int debug_mv = 0;
200 static int step = 0;
201 static int thread_count = 1;
202 static int workaround_bugs = 1;
203 static int fast = 0;
204 static int genpts = 0;
205 static int lowres = 0;
206 static int idct = FF_IDCT_AUTO;
207 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
208 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
209 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
210 static int error_resilience = FF_ER_CAREFUL;
211 static int error_concealment = 3;
212
213 /* current context */
214 static int is_full_screen;
215 static VideoState *cur_stream;
216 static int64_t audio_callback_time;
217
218 #define FF_ALLOC_EVENT (SDL_USEREVENT)
219 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
220 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
221
222 SDL_Surface *screen;
223
224 /* packet queue handling */
225 static void packet_queue_init(PacketQueue *q)
226 {
227 memset(q, 0, sizeof(PacketQueue));
228 q->mutex = SDL_CreateMutex();
229 q->cond = SDL_CreateCond();
230 }
231
232 static void packet_queue_flush(PacketQueue *q)
233 {
234 AVPacketList *pkt, *pkt1;
235
236 SDL_LockMutex(q->mutex);
237 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
238 pkt1 = pkt->next;
239 av_free_packet(&pkt->pkt);
240 av_freep(&pkt);
241 }
242 q->last_pkt = NULL;
243 q->first_pkt = NULL;
244 q->nb_packets = 0;
245 q->size = 0;
246 SDL_UnlockMutex(q->mutex);
247 }
248
249 static void packet_queue_end(PacketQueue *q)
250 {
251 packet_queue_flush(q);
252 SDL_DestroyMutex(q->mutex);
253 SDL_DestroyCond(q->cond);
254 }
255
256 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
257 {
258 AVPacketList *pkt1;
259
260 /* duplicate the packet */
261 if (av_dup_packet(pkt) < 0)
262 return -1;
263
264 pkt1 = av_malloc(sizeof(AVPacketList));
265 if (!pkt1)
266 return -1;
267 pkt1->pkt = *pkt;
268 pkt1->next = NULL;
269
270
271 SDL_LockMutex(q->mutex);
272
273 if (!q->last_pkt)
274
275 q->first_pkt = pkt1;
276 else
277 q->last_pkt->next = pkt1;
278 q->last_pkt = pkt1;
279 q->nb_packets++;
280 q->size += pkt1->pkt.size;
281 /* XXX: should duplicate packet data in DV case */
282 SDL_CondSignal(q->cond);
283
284 SDL_UnlockMutex(q->mutex);
285 return 0;
286 }
287
288 static void packet_queue_abort(PacketQueue *q)
289 {
290 SDL_LockMutex(q->mutex);
291
292 q->abort_request = 1;
293
294 SDL_CondSignal(q->cond);
295
296 SDL_UnlockMutex(q->mutex);
297 }
298
299 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
300 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
301 {
302 AVPacketList *pkt1;
303 int ret;
304
305 SDL_LockMutex(q->mutex);
306
307 for(;;) {
308 if (q->abort_request) {
309 ret = -1;
310 break;
311 }
312
313 pkt1 = q->first_pkt;
314 if (pkt1) {
315 q->first_pkt = pkt1->next;
316 if (!q->first_pkt)
317 q->last_pkt = NULL;
318 q->nb_packets--;
319 q->size -= pkt1->pkt.size;
320 *pkt = pkt1->pkt;
321 av_free(pkt1);
322 ret = 1;
323 break;
324 } else if (!block) {
325 ret = 0;
326 break;
327 } else {
328 SDL_CondWait(q->cond, q->mutex);
329 }
330 }
331 SDL_UnlockMutex(q->mutex);
332 return ret;
333 }
334
335 static inline void fill_rectangle(SDL_Surface *screen,
336 int x, int y, int w, int h, int color)
337 {
338 SDL_Rect rect;
339 rect.x = x;
340 rect.y = y;
341 rect.w = w;
342 rect.h = h;
343 SDL_FillRect(screen, &rect, color);
344 }
345
346 #if 0
347 /* draw only the border of a rectangle */
348 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
349 {
350 int w1, w2, h1, h2;
351
352 /* fill the background */
353 w1 = x;
354 if (w1 < 0)
355 w1 = 0;
356 w2 = s->width - (x + w);
357 if (w2 < 0)
358 w2 = 0;
359 h1 = y;
360 if (h1 < 0)
361 h1 = 0;
362 h2 = s->height - (y + h);
363 if (h2 < 0)
364 h2 = 0;
365 fill_rectangle(screen,
366 s->xleft, s->ytop,
367 w1, s->height,
368 color);
369 fill_rectangle(screen,
370 s->xleft + s->width - w2, s->ytop,
371 w2, s->height,
372 color);
373 fill_rectangle(screen,
374 s->xleft + w1, s->ytop,
375 s->width - w1 - w2, h1,
376 color);
377 fill_rectangle(screen,
378 s->xleft + w1, s->ytop + s->height - h2,
379 s->width - w1 - w2, h2,
380 color);
381 }
382 #endif
383
384
385
386 #define SCALEBITS 10
387 #define ONE_HALF (1 << (SCALEBITS - 1))
388 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
389
390 #define RGB_TO_Y_CCIR(r, g, b) \
391 ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
392 FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
393
394 #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
395 (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
396 FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
397
398 #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
399 (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
400 FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
401
402 #define ALPHA_BLEND(a, oldp, newp, s)\
403 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
404
405 #define RGBA_IN(r, g, b, a, s)\
406 {\
407 unsigned int v = ((const uint32_t *)(s))[0];\
408 a = (v >> 24) & 0xff;\
409 r = (v >> 16) & 0xff;\
410 g = (v >> 8) & 0xff;\
411 b = v & 0xff;\
412 }
413
414 #define YUVA_IN(y, u, v, a, s, pal)\
415 {\
416 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)s];\
417 a = (val >> 24) & 0xff;\
418 y = (val >> 16) & 0xff;\
419 u = (val >> 8) & 0xff;\
420 v = val & 0xff;\
421 }
422
423 #define YUVA_OUT(d, y, u, v, a)\
424 {\
425 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
426 }
427
428
429 #define BPP 1
430
431 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect)
432 {
433 int wrap, wrap3, width2, skip2;
434 int y, u, v, a, u1, v1, a1, w, h;
435 uint8_t *lum, *cb, *cr;
436 const uint8_t *p;
437 const uint32_t *pal;
438
439 lum = dst->data[0] + rect->y * dst->linesize[0];
440 cb = dst->data[1] + (rect->y >> 1) * dst->linesize[1];
441 cr = dst->data[2] + (rect->y >> 1) * dst->linesize[2];
442
443 width2 = (rect->w + 1) >> 1;
444 skip2 = rect->x >> 1;
445 wrap = dst->linesize[0];
446 wrap3 = rect->linesize;
447 p = rect->bitmap;
448 pal = rect->rgba_palette; /* Now in YCrCb! */
449
450 if (rect->y & 1) {
451 lum += rect->x;
452 cb += skip2;
453 cr += skip2;
454
455 if (rect->x & 1) {
456 YUVA_IN(y, u, v, a, p, pal);
457 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
458 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
459 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
460 cb++;
461 cr++;
462 lum++;
463 p += BPP;
464 }
465 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
466 YUVA_IN(y, u, v, a, p, pal);
467 u1 = u;
468 v1 = v;
469 a1 = a;
470 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
471
472 YUVA_IN(y, u, v, a, p + BPP, pal);
473 u1 += u;
474 v1 += v;
475 a1 += a;
476 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
477 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
478 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
479 cb++;
480 cr++;
481 p += 2 * BPP;
482 lum += 2;
483 }
484 if (w) {
485 YUVA_IN(y, u, v, a, p, pal);
486 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
487 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
488 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
489 }
490 p += wrap3 + (wrap3 - rect->w * BPP);
491 lum += wrap + (wrap - rect->w - rect->x);
492 cb += dst->linesize[1] - width2 - skip2;
493 cr += dst->linesize[2] - width2 - skip2;
494 }
495 for(h = rect->h - (rect->y & 1); h >= 2; h -= 2) {
496 lum += rect->x;
497 cb += skip2;
498 cr += skip2;
499
500 if (rect->x & 1) {
501 YUVA_IN(y, u, v, a, p, pal);
502 u1 = u;
503 v1 = v;
504 a1 = a;
505 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
506 p += wrap3;
507 lum += wrap;
508 YUVA_IN(y, u, v, a, p, pal);
509 u1 += u;
510 v1 += v;
511 a1 += a;
512 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
513 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
514 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
515 cb++;
516 cr++;
517 p += -wrap3 + BPP;
518 lum += -wrap + 1;
519 }
520 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
521 YUVA_IN(y, u, v, a, p, pal);
522 u1 = u;
523 v1 = v;
524 a1 = a;
525 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
526
527 YUVA_IN(y, u, v, a, p, pal);
528 u1 += u;
529 v1 += v;
530 a1 += a;
531 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
532 p += wrap3;
533 lum += wrap;
534
535 YUVA_IN(y, u, v, a, p, pal);
536 u1 += u;
537 v1 += v;
538 a1 += a;
539 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
540
541 YUVA_IN(y, u, v, a, p, pal);
542 u1 += u;
543 v1 += v;
544 a1 += a;
545 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
546
547 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
548 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
549
550 cb++;
551 cr++;
552 p += -wrap3 + 2 * BPP;
553 lum += -wrap + 2;
554 }
555 if (w) {
556 YUVA_IN(y, u, v, a, p, pal);
557 u1 = u;
558 v1 = v;
559 a1 = a;
560 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
561 p += wrap3;
562 lum += wrap;
563 YUVA_IN(y, u, v, a, p, pal);
564 u1 += u;
565 v1 += v;
566 a1 += a;
567 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
568 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
569 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
570 cb++;
571 cr++;
572 p += -wrap3 + BPP;
573 lum += -wrap + 1;
574 }
575 p += wrap3 + (wrap3 - rect->w * BPP);
576 lum += wrap + (wrap - rect->w - rect->x);
577 cb += dst->linesize[1] - width2 - skip2;
578 cr += dst->linesize[2] - width2 - skip2;
579 }
580 /* handle odd height */
581 if (h) {
582 lum += rect->x;
583 cb += skip2;
584 cr += skip2;
585
586 if (rect->x & 1) {
587 YUVA_IN(y, u, v, a, p, pal);
588 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
589 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
590 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
591 cb++;
592 cr++;
593 lum++;
594 p += BPP;
595 }
596 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
597 YUVA_IN(y, u, v, a, p, pal);
598 u1 = u;
599 v1 = v;
600 a1 = a;
601 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
602
603 YUVA_IN(y, u, v, a, p + BPP, pal);
604 u1 += u;
605 v1 += v;
606 a1 += a;
607 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
608 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
609 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
610 cb++;
611 cr++;
612 p += 2 * BPP;
613 lum += 2;
614 }
615 if (w) {
616 YUVA_IN(y, u, v, a, p, pal);
617 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
618 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
619 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
620 }
621 }
622 }
623
624 static void free_subpicture(SubPicture *sp)
625 {
626 int i;
627
628 for (i = 0; i < sp->sub.num_rects; i++)
629 {
630 av_free(sp->sub.rects[i].bitmap);
631 av_free(sp->sub.rects[i].rgba_palette);
632 }
633
634 av_free(sp->sub.rects);
635
636 memset(&sp->sub, 0, sizeof(AVSubtitle));
637 }
638
639 static void video_image_display(VideoState *is)
640 {
641 VideoPicture *vp;
642 SubPicture *sp;
643 AVPicture pict;
644 float aspect_ratio;
645 int width, height, x, y;
646 SDL_Rect rect;
647 int i;
648
649 vp = &is->pictq[is->pictq_rindex];
650 if (vp->bmp) {
651 /* XXX: use variable in the frame */
652 if (is->video_st->codec->sample_aspect_ratio.num == 0)
653 aspect_ratio = 0;
654 else
655 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio)
656 * is->video_st->codec->width / is->video_st->codec->height;;
657 if (aspect_ratio <= 0.0)
658 aspect_ratio = (float)is->video_st->codec->width /
659 (float)is->video_st->codec->height;
660 /* if an active format is indicated, then it overrides the
661 mpeg format */
662 #if 0
663 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
664 is->dtg_active_format = is->video_st->codec->dtg_active_format;
665 printf("dtg_active_format=%d\n", is->dtg_active_format);
666 }
667 #endif
668 #if 0
669 switch(is->video_st->codec->dtg_active_format) {
670 case FF_DTG_AFD_SAME:
671 default:
672 /* nothing to do */
673 break;
674 case FF_DTG_AFD_4_3:
675 aspect_ratio = 4.0 / 3.0;
676 break;
677 case FF_DTG_AFD_16_9:
678 aspect_ratio = 16.0 / 9.0;
679 break;
680 case FF_DTG_AFD_14_9:
681 aspect_ratio = 14.0 / 9.0;
682 break;
683 case FF_DTG_AFD_4_3_SP_14_9:
684 aspect_ratio = 14.0 / 9.0;
685 break;
686 case FF_DTG_AFD_16_9_SP_14_9:
687 aspect_ratio = 14.0 / 9.0;
688 break;
689 case FF_DTG_AFD_SP_4_3:
690 aspect_ratio = 4.0 / 3.0;
691 break;
692 }
693 #endif
694
695 if (is->subtitle_st)
696 {
697 if (is->subpq_size > 0)
698 {
699 sp = &is->subpq[is->subpq_rindex];
700
701 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
702 {
703 SDL_LockYUVOverlay (vp->bmp);
704
705 pict.data[0] = vp->bmp->pixels[0];
706 pict.data[1] = vp->bmp->pixels[2];
707 pict.data[2] = vp->bmp->pixels[1];
708
709 pict.linesize[0] = vp->bmp->pitches[0];
710 pict.linesize[1] = vp->bmp->pitches[2];
711 pict.linesize[2] = vp->bmp->pitches[1];
712
713 for (i = 0; i < sp->sub.num_rects; i++)
714 blend_subrect(&pict, &sp->sub.rects[i]);
715
716 SDL_UnlockYUVOverlay (vp->bmp);
717 }
718 }
719 }
720
721
722 /* XXX: we suppose the screen has a 1.0 pixel ratio */
723 height = is->height;
724 width = ((int)rint(height * aspect_ratio)) & -3;
725 if (width > is->width) {
726 width = is->width;
727 height = ((int)rint(width / aspect_ratio)) & -3;
728 }
729 x = (is->width - width) / 2;
730 y = (is->height - height) / 2;
731 if (!is->no_background) {
732 /* fill the background */
733 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
734 } else {
735 is->no_background = 0;
736 }
737 rect.x = is->xleft + x;
738 rect.y = is->xleft + y;
739 rect.w = width;
740 rect.h = height;
741 SDL_DisplayYUVOverlay(vp->bmp, &rect);
742 } else {
743 #if 0
744 fill_rectangle(screen,
745 is->xleft, is->ytop, is->width, is->height,
746 QERGB(0x00, 0x00, 0x00));
747 #endif
748 }
749 }
750
751 static inline int compute_mod(int a, int b)
752 {
753 a = a % b;
754 if (a >= 0)
755 return a;
756 else
757 return a + b;
758 }
759
760 static void video_audio_display(VideoState *s)
761 {
762 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
763 int ch, channels, h, h2, bgcolor, fgcolor;
764 int16_t time_diff;
765
766 /* compute display index : center on currently output samples */
767 channels = s->audio_st->codec->channels;
768 nb_display_channels = channels;
769 if (!s->paused) {
770 n = 2 * channels;
771 delay = audio_write_get_buf_size(s);
772 delay /= n;
773
774 /* to be more precise, we take into account the time spent since
775 the last buffer computation */
776 if (audio_callback_time) {
777 time_diff = av_gettime() - audio_callback_time;
778 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
779 }
780
781 delay -= s->width / 2;
782 if (delay < s->width)
783 delay = s->width;
784 i_start = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
785 s->last_i_start = i_start;
786 } else {
787 i_start = s->last_i_start;
788 }
789
790 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
791 fill_rectangle(screen,
792 s->xleft, s->ytop, s->width, s->height,
793 bgcolor);
794
795 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
796
797 /* total height for one channel */
798 h = s->height / nb_display_channels;
799 /* graph height / 2 */
800 h2 = (h * 9) / 20;
801 for(ch = 0;ch < nb_display_channels; ch++) {
802 i = i_start + ch;
803 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
804 for(x = 0; x < s->width; x++) {
805 y = (s->sample_array[i] * h2) >> 15;
806 if (y < 0) {
807 y = -y;
808 ys = y1 - y;
809 } else {
810 ys = y1;
811 }
812 fill_rectangle(screen,
813 s->xleft + x, ys, 1, y,
814 fgcolor);
815 i += channels;
816 if (i >= SAMPLE_ARRAY_SIZE)
817 i -= SAMPLE_ARRAY_SIZE;
818 }
819 }
820
821 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
822
823 for(ch = 1;ch < nb_display_channels; ch++) {
824 y = s->ytop + ch * h;
825 fill_rectangle(screen,
826 s->xleft, y, s->width, 1,
827 fgcolor);
828 }
829 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
830 }
831
832 /* display the current picture, if any */
833 static void video_display(VideoState *is)
834 {
835 if (is->audio_st && is->show_audio)
836 video_audio_display(is);
837 else if (is->video_st)
838 video_image_display(is);
839 }
840
841 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
842 {
843 SDL_Event event;
844 event.type = FF_REFRESH_EVENT;
845 event.user.data1 = opaque;
846 SDL_PushEvent(&event);
847 return 0; /* 0 means stop timer */
848 }
849
850 /* schedule a video refresh in 'delay' ms */
851 static void schedule_refresh(VideoState *is, int delay)
852 {
853 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
854 }
855
856 /* get the current audio clock value */
857 static double get_audio_clock(VideoState *is)
858 {
859 double pts;
860 int hw_buf_size, bytes_per_sec;
861 pts = is->audio_clock;
862 hw_buf_size = audio_write_get_buf_size(is);
863 bytes_per_sec = 0;
864 if (is->audio_st) {
865 bytes_per_sec = is->audio_st->codec->sample_rate *
866 2 * is->audio_st->codec->channels;
867 }
868 if (bytes_per_sec)
869 pts -= (double)hw_buf_size / bytes_per_sec;
870 return pts;
871 }
872
873 /* get the current video clock value */
874 static double get_video_clock(VideoState *is)
875 {
876 double delta;
877 if (is->paused) {
878 delta = 0;
879 } else {
880 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
881 }
882 return is->video_current_pts + delta;
883 }
884
885 /* get the current external clock value */
886 static double get_external_clock(VideoState *is)
887 {
888 int64_t ti;
889 ti = av_gettime();
890 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
891 }
892
893 /* get the current master clock value */
894 static double get_master_clock(VideoState *is)
895 {
896 double val;
897
898 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
899 if (is->video_st)
900 val = get_video_clock(is);
901 else
902 val = get_audio_clock(is);
903 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
904 if (is->audio_st)
905 val = get_audio_clock(is);
906 else
907 val = get_video_clock(is);
908 } else {
909 val = get_external_clock(is);
910 }
911 return val;
912 }
913
914 /* seek in the stream */
915 static void stream_seek(VideoState *is, int64_t pos, int rel)
916 {
917 if (!is->seek_req) {
918 is->seek_pos = pos;
919 is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
920 is->seek_req = 1;
921 }
922 }
923
924 /* pause or resume the video */
925 static void stream_pause(VideoState *is)
926 {
927 is->paused = !is->paused;
928 if (is->paused) {
929 is->video_current_pts = get_video_clock(is);
930 }
931 }
932
933 /* called to display each frame */
934 static void video_refresh_timer(void *opaque)
935 {
936 VideoState *is = opaque;
937 VideoPicture *vp;
938 double actual_delay, delay, sync_threshold, ref_clock, diff;
939
940 SubPicture *sp, *sp2;
941
942 if (is->video_st) {
943 if (is->pictq_size == 0) {
944 /* if no picture, need to wait */
945 schedule_refresh(is, 1);
946 } else {
947 /* dequeue the picture */
948 vp = &is->pictq[is->pictq_rindex];
949
950 /* update current video pts */
951 is->video_current_pts = vp->pts;
952 is->video_current_pts_time = av_gettime();
953
954 /* compute nominal delay */
955 delay = vp->pts - is->frame_last_pts;
956 if (delay <= 0 || delay >= 1.0) {
957 /* if incorrect delay, use previous one */
958 delay = is->frame_last_delay;
959 }
960 is->frame_last_delay = delay;
961 is->frame_last_pts = vp->pts;
962
963 /* update delay to follow master synchronisation source */
964 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
965 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
966 /* if video is slave, we try to correct big delays by
967 duplicating or deleting a frame */
968 ref_clock = get_master_clock(is);
969 diff = vp->pts - ref_clock;
970
971 /* skip or repeat frame. We take into account the
972 delay to compute the threshold. I still don't know
973 if it is the best guess */
974 sync_threshold = AV_SYNC_THRESHOLD;
975 if (delay > sync_threshold)
976 sync_threshold = delay;
977 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
978 if (diff <= -sync_threshold)
979 delay = 0;
980 else if (diff >= sync_threshold)
981 delay = 2 * delay;
982 }
983 }
984
985 is->frame_timer += delay;
986 /* compute the REAL delay (we need to do that to avoid
987 long term errors */
988 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
989 if (actual_delay < 0.010) {
990 /* XXX: should skip picture */
991 actual_delay = 0.010;
992 }
993 /* launch timer for next picture */
994 schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
995
996 #if defined(DEBUG_SYNC)
997 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
998 delay, actual_delay, vp->pts, -diff);
999 #endif
1000
1001 if(is->subtitle_st) {
1002 if (is->subtitle_stream_changed) {
1003 SDL_LockMutex(is->subpq_mutex);
1004
1005 while (is->subpq_size) {
1006 free_subpicture(&is->subpq[is->subpq_rindex]);
1007
1008 /* update queue size and signal for next picture */
1009 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1010 is->subpq_rindex = 0;
1011
1012 is->subpq_size--;
1013 }
1014 is->subtitle_stream_changed = 0;
1015
1016 SDL_CondSignal(is->subpq_cond);
1017 SDL_UnlockMutex(is->subpq_mutex);
1018 } else {
1019 if (is->subpq_size > 0) {
1020 sp = &is->subpq[is->subpq_rindex];
1021
1022 if (is->subpq_size > 1)
1023 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1024 else
1025 sp2 = NULL;
1026
1027 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1028 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1029 {
1030 free_subpicture(sp);
1031
1032 /* update queue size and signal for next picture */
1033 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1034 is->subpq_rindex = 0;
1035
1036 SDL_LockMutex(is->subpq_mutex);
1037 is->subpq_size--;
1038 SDL_CondSignal(is->subpq_cond);
1039 SDL_UnlockMutex(is->subpq_mutex);
1040 }
1041 }
1042 }
1043 }
1044
1045 /* display picture */
1046 video_display(is);
1047
1048 /* update queue size and signal for next picture */
1049 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1050 is->pictq_rindex = 0;
1051
1052 SDL_LockMutex(is->pictq_mutex);
1053 is->pictq_size--;
1054 SDL_CondSignal(is->pictq_cond);
1055 SDL_UnlockMutex(is->pictq_mutex);
1056 }
1057 } else if (is->audio_st) {
1058 /* draw the next audio frame */
1059
1060 schedule_refresh(is, 40);
1061
1062 /* if only audio stream, then display the audio bars (better
1063 than nothing, just to test the implementation */
1064
1065 /* display picture */
1066 video_display(is);
1067 } else {
1068 schedule_refresh(is, 100);
1069 }
1070 if (show_status) {
1071 static int64_t last_time;
1072 int64_t cur_time;
1073 int aqsize, vqsize, sqsize;
1074 double av_diff;
1075
1076 cur_time = av_gettime();
1077 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1078 aqsize = 0;
1079 vqsize = 0;
1080 sqsize = 0;
1081 if (is->audio_st)
1082 aqsize = is->audioq.size;
1083 if (is->video_st)
1084 vqsize = is->videoq.size;
1085 if (is->subtitle_st)
1086 sqsize = is->subtitleq.size;
1087 av_diff = 0;
1088 if (is->audio_st && is->video_st)
1089 av_diff = get_audio_clock(is) - get_video_clock(is);
1090 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
1091 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1092 fflush(stdout);
1093 last_time = cur_time;
1094 }
1095 }
1096 }
1097
1098 /* allocate a picture (needs to do that in main thread to avoid
1099 potential locking problems */
1100 static void alloc_picture(void *opaque)
1101 {
1102 VideoState *is = opaque;
1103 VideoPicture *vp;
1104
1105 vp = &is->pictq[is->pictq_windex];
1106
1107 if (vp->bmp)
1108 SDL_FreeYUVOverlay(vp->bmp);
1109
1110 #if 0
1111 /* XXX: use generic function */
1112 /* XXX: disable overlay if no hardware acceleration or if RGB format */
1113 switch(is->video_st->codec->pix_fmt) {
1114 case PIX_FMT_YUV420P:
1115 case PIX_FMT_YUV422P:
1116 case PIX_FMT_YUV444P:
1117 case PIX_FMT_YUV422:
1118 case PIX_FMT_YUV410P:
1119 case PIX_FMT_YUV411P:
1120 is_yuv = 1;
1121 break;
1122 default:
1123 is_yuv = 0;
1124 break;
1125 }
1126 #endif
1127 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1128 is->video_st->codec->height,
1129 SDL_YV12_OVERLAY,
1130 screen);
1131 vp->width = is->video_st->codec->width;
1132 vp->height = is->video_st->codec->height;
1133
1134 SDL_LockMutex(is->pictq_mutex);
1135 vp->allocated = 1;
1136 SDL_CondSignal(is->pictq_cond);
1137 SDL_UnlockMutex(is->pictq_mutex);
1138 }
1139
1140 /**
1141 *
1142 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1143 */
1144 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1145 {
1146 VideoPicture *vp;
1147 int dst_pix_fmt;
1148 AVPicture pict;
1149 static struct SwsContext *img_convert_ctx;
1150
1151 /* wait until we have space to put a new picture */
1152 SDL_LockMutex(is->pictq_mutex);
1153 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1154 !is->videoq.abort_request) {
1155 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1156 }
1157 SDL_UnlockMutex(is->pictq_mutex);
1158
1159 if (is->videoq.abort_request)
1160 return -1;
1161
1162 vp = &is->pictq[is->pictq_windex];
1163
1164 /* alloc or resize hardware picture buffer */
1165 if (!vp->bmp ||
1166 vp->width != is->video_st->codec->width ||
1167 vp->height != is->video_st->codec->height) {
1168 SDL_Event event;
1169
1170 vp->allocated = 0;
1171
1172 /* the allocation must be done in the main thread to avoid
1173 locking problems */
1174 event.type = FF_ALLOC_EVENT;
1175 event.user.data1 = is;
1176 SDL_PushEvent(&event);
1177
1178 /* wait until the picture is allocated */
1179 SDL_LockMutex(is->pictq_mutex);
1180 while (!vp->allocated && !is->videoq.abort_request) {
1181 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1182 }
1183 SDL_UnlockMutex(is->pictq_mutex);
1184
1185 if (is->videoq.abort_request)
1186 return -1;
1187 }
1188
1189 /* if the frame is not skipped, then display it */
1190 if (vp->bmp) {
1191 /* get a pointer on the bitmap */
1192 SDL_LockYUVOverlay (vp->bmp);
1193
1194 dst_pix_fmt = PIX_FMT_YUV420P;
1195 pict.data[0] = vp->bmp->pixels[0];
1196 pict.data[1] = vp->bmp->pixels[2];
1197 pict.data[2] = vp->bmp->pixels[1];
1198
1199 pict.linesize[0] = vp->bmp->pitches[0];
1200 pict.linesize[1] = vp->bmp->pitches[2];
1201 pict.linesize[2] = vp->bmp->pitches[1];
1202 if (img_convert_ctx == NULL) {
1203 img_convert_ctx = sws_getContext(is->video_st->codec->width,
1204 is->video_st->codec->height, is->video_st->codec->pix_fmt,
1205 is->video_st->codec->width, is->video_st->codec->height,
1206 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1207 if (img_convert_ctx == NULL) {
1208 fprintf(stderr, "Cannot initialize the conversion context\n");
1209 exit(1);
1210 }
1211 }
1212 sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1213 0, is->video_st->codec->height, pict.data, pict.linesize);
1214 /* update the bitmap content */
1215 SDL_UnlockYUVOverlay(vp->bmp);
1216
1217 vp->pts = pts;
1218
1219 /* now we can update the picture count */
1220 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1221 is->pictq_windex = 0;
1222 SDL_LockMutex(is->pictq_mutex);
1223 is->pictq_size++;
1224 SDL_UnlockMutex(is->pictq_mutex);
1225 }
1226 return 0;
1227 }
1228
1229 /**
1230 * compute the exact PTS for the picture if it is omitted in the stream
1231 * @param pts1 the dts of the pkt / pts of the frame
1232 */
1233 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1234 {
1235 double frame_delay, pts;
1236
1237 pts = pts1;
1238
1239 if (pts != 0) {
1240 /* update video clock with pts, if present */
1241 is->video_clock = pts;
1242 } else {
1243 pts = is->video_clock;
1244 }
1245 /* update video clock for next frame */
1246 frame_delay = av_q2d(is->video_st->codec->time_base);
1247 /* for MPEG2, the frame can be repeated, so we update the
1248 clock accordingly */
1249 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1250 is->video_clock += frame_delay;
1251
1252 #if defined(DEBUG_SYNC) && 0
1253 {
1254 int ftype;
1255 if (src_frame->pict_type == FF_B_TYPE)
1256 ftype = 'B';
1257 else if (src_frame->pict_type == FF_I_TYPE)
1258 ftype = 'I';
1259 else
1260 ftype = 'P';
1261 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1262 ftype, pts, pts1);
1263 }
1264 #endif
1265 return queue_picture(is, src_frame, pts);
1266 }
1267
1268 static int video_thread(void *arg)
1269 {
1270 VideoState *is = arg;
1271 AVPacket pkt1, *pkt = &pkt1;
1272 int len1, got_picture;
1273 AVFrame *frame= avcodec_alloc_frame();
1274 double pts;
1275
1276 for(;;) {
1277 while (is->paused && !is->videoq.abort_request) {
1278 SDL_Delay(10);
1279 }
1280 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1281 break;
1282 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1283 this packet, if any */
1284 pts = 0;
1285 if (pkt->dts != AV_NOPTS_VALUE)
1286 pts = av_q2d(is->video_st->time_base)*pkt->dts;
1287
1288 SDL_LockMutex(is->video_decoder_mutex);
1289 len1 = avcodec_decode_video(is->video_st->codec,
1290 frame, &got_picture,
1291 pkt->data, pkt->size);
1292 SDL_UnlockMutex(is->video_decoder_mutex);
1293 // if (len1 < 0)
1294 // break;
1295 if (got_picture) {
1296 if (output_picture2(is, frame, pts) < 0)
1297 goto the_end;
1298 }
1299 av_free_packet(pkt);
1300 if (step)
1301 if (cur_stream)
1302 stream_pause(cur_stream);
1303 }
1304 the_end:
1305 av_free(frame);
1306 return 0;
1307 }
1308
1309 static int subtitle_thread(void *arg)
1310 {
1311 VideoState *is = arg;
1312 SubPicture *sp;
1313 AVPacket pkt1, *pkt = &pkt1;
1314 int len1, got_subtitle;
1315 double pts;
1316 int i, j;
1317 int r, g, b, y, u, v, a;
1318
1319 for(;;) {
1320 while (is->paused && !is->subtitleq.abort_request) {
1321 SDL_Delay(10);
1322 }
1323 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1324 break;
1325
1326 SDL_LockMutex(is->subpq_mutex);
1327 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1328 !is->subtitleq.abort_request) {
1329 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1330 }
1331 SDL_UnlockMutex(is->subpq_mutex);
1332
1333 if (is->subtitleq.abort_request)
1334 goto the_end;
1335
1336 sp = &is->subpq[is->subpq_windex];
1337
1338 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1339 this packet, if any */
1340 pts = 0;
1341 if (pkt->pts != AV_NOPTS_VALUE)
1342 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1343
1344 SDL_LockMutex(is->subtitle_decoder_mutex);
1345 len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1346 &sp->sub, &got_subtitle,
1347 pkt->data, pkt->size);
1348 SDL_UnlockMutex(is->subtitle_decoder_mutex);
1349 // if (len1 < 0)
1350 // break;
1351 if (got_subtitle && sp->sub.format == 0) {
1352 sp->pts = pts;
1353
1354 for (i = 0; i < sp->sub.num_rects; i++)
1355 {
1356 for (j = 0; j < sp->sub.rects[i].nb_colors; j++)
1357 {
1358 RGBA_IN(r, g, b, a, sp->sub.rects[i].rgba_palette + j);
1359 y = RGB_TO_Y_CCIR(r, g, b);
1360 u = RGB_TO_U_CCIR(r, g, b, 0);
1361 v = RGB_TO_V_CCIR(r, g, b, 0);
1362 YUVA_OUT(sp->sub.rects[i].rgba_palette + j, y, u, v, a);
1363 }
1364 }
1365
1366 /* now we can update the picture count */
1367 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1368 is->subpq_windex = 0;
1369 SDL_LockMutex(is->subpq_mutex);
1370 is->subpq_size++;
1371 SDL_UnlockMutex(is->subpq_mutex);
1372 }
1373 av_free_packet(pkt);
1374 // if (step)
1375 // if (cur_stream)
1376 // stream_pause(cur_stream);
1377 }
1378 the_end:
1379 return 0;
1380 }
1381
1382 /* copy samples for viewing in editor window */
1383 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1384 {
1385 int size, len, channels;
1386
1387 channels = is->audio_st->codec->channels;
1388
1389 size = samples_size / sizeof(short);
1390 while (size > 0) {
1391 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1392 if (len > size)
1393 len = size;
1394 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1395 samples += len;
1396 is->sample_array_index += len;
1397 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1398 is->sample_array_index = 0;
1399 size -= len;
1400 }
1401 }
1402
1403 /* return the new audio buffer size (samples can be added or deleted
1404 to get better sync if video or external master clock) */
1405 static int synchronize_audio(VideoState *is, short *samples,
1406 int samples_size1, double pts)
1407 {
1408 int n, samples_size;
1409 double ref_clock;
1410
1411 n = 2 * is->audio_st->codec->channels;
1412 samples_size = samples_size1;
1413
1414 /* if not master, then we try to remove or add samples to correct the clock */
1415 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1416 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1417 double diff, avg_diff;
1418 int wanted_size, min_size, max_size, nb_samples;
1419
1420 ref_clock = get_master_clock(is);
1421 diff = get_audio_clock(is) - ref_clock;
1422
1423 if (diff < AV_NOSYNC_THRESHOLD) {
1424 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1425 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1426 /* not enough measures to have a correct estimate */
1427 is->audio_diff_avg_count++;
1428 } else {
1429 /* estimate the A-V difference */
1430 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1431
1432 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1433 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1434 nb_samples = samples_size / n;
1435
1436 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1437 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1438 if (wanted_size < min_size)
1439 wanted_size = min_size;
1440 else if (wanted_size > max_size)
1441 wanted_size = max_size;
1442
1443 /* add or remove samples to correction the synchro */
1444 if (wanted_size < samples_size) {
1445 /* remove samples */
1446 samples_size = wanted_size;
1447 } else if (wanted_size > samples_size) {
1448 uint8_t *samples_end, *q;
1449 int nb;
1450
1451 /* add samples */
1452 nb = (samples_size - wanted_size);
1453 samples_end = (uint8_t *)samples + samples_size - n;
1454 q = samples_end + n;
1455 while (nb > 0) {
1456 memcpy(q, samples_end, n);
1457 q += n;
1458 nb -= n;
1459 }
1460 samples_size = wanted_size;
1461 }
1462 }
1463 #if 0
1464 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1465 diff, avg_diff, samples_size - samples_size1,
1466 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1467 #endif
1468 }
1469 } else {
1470 /* too big difference : may be initial PTS errors, so
1471 reset A-V filter */
1472 is->audio_diff_avg_count = 0;
1473 is->audio_diff_cum = 0;
1474 }
1475 }
1476
1477 return samples_size;
1478 }
1479
1480 /* decode one audio frame and returns its uncompressed size */
1481 static int audio_decode_frame(VideoState *is, uint8_t *audio_buf, double *pts_ptr)
1482 {
1483 AVPacket *pkt = &is->audio_pkt;
1484 int n, len1, data_size;
1485 double pts;
1486
1487 for(;;) {
1488 /* NOTE: the audio packet can contain several frames */
1489 while (is->audio_pkt_size > 0) {
1490 SDL_LockMutex(is->audio_decoder_mutex);
1491 len1 = avcodec_decode_audio(is->audio_st->codec,
1492 (int16_t *)audio_buf, &data_size,
1493 is->audio_pkt_data, is->audio_pkt_size);
1494 SDL_UnlockMutex(is->audio_decoder_mutex);
1495 if (len1 < 0) {
1496 /* if error, we skip the frame */
1497 is->audio_pkt_size = 0;
1498 break;
1499 }
1500
1501 is->audio_pkt_data += len1;
1502 is->audio_pkt_size -= len1;
1503 if (data_size <= 0)
1504 continue;
1505 /* if no pts, then compute it */
1506 pts = is->audio_clock;
1507 *pts_ptr = pts;
1508 n = 2 * is->audio_st->codec->channels;
1509 is->audio_clock += (double)data_size /
1510 (double)(n * is->audio_st->codec->sample_rate);
1511 #if defined(DEBUG_SYNC)
1512 {
1513 static double last_clock;
1514 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1515 is->audio_clock - last_clock,
1516 is->audio_clock, pts);
1517 last_clock = is->audio_clock;
1518 }
1519 #endif
1520 return data_size;
1521 }
1522
1523 /* free the current packet */
1524 if (pkt->data)
1525 av_free_packet(pkt);
1526
1527 if (is->paused || is->audioq.abort_request) {
1528 return -1;
1529 }
1530
1531 /* read next packet */
1532 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1533 return -1;
1534 is->audio_pkt_data = pkt->data;
1535 is->audio_pkt_size = pkt->size;
1536
1537 /* if update the audio clock with the pts */
1538 if (pkt->pts != AV_NOPTS_VALUE) {
1539 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1540 }
1541 }
1542 }
1543
1544 /* get the current audio output buffer size, in samples. With SDL, we
1545 cannot have a precise information */
1546 static int audio_write_get_buf_size(VideoState *is)
1547 {
1548 return is->audio_hw_buf_size - is->audio_buf_index;
1549 }
1550
1551
1552 /* prepare a new audio buffer */
1553 void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1554 {
1555 VideoState *is = opaque;
1556 int audio_size, len1;
1557 double pts;
1558
1559 audio_callback_time = av_gettime();
1560
1561 while (len > 0) {
1562 if (is->audio_buf_index >= is->audio_buf_size) {
1563 audio_size = audio_decode_frame(is, is->audio_buf, &pts);
1564 if (audio_size < 0) {
1565 /* if error, just output silence */
1566 is->audio_buf_size = 1024;
1567 memset(is->audio_buf, 0, is->audio_buf_size);
1568 } else {
1569 if (is->show_audio)
1570 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1571 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1572 pts);
1573 is->audio_buf_size = audio_size;
1574 }
1575 is->audio_buf_index = 0;
1576 }
1577 len1 = is->audio_buf_size - is->audio_buf_index;
1578 if (len1 > len)
1579 len1 = len;
1580 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1581 len -= len1;
1582 stream += len1;
1583 is->audio_buf_index += len1;
1584 }
1585 }
1586
1587
1588 /* open a given stream. Return 0 if OK */
1589 static int stream_component_open(VideoState *is, int stream_index)
1590 {
1591 AVFormatContext *ic = is->ic;
1592 AVCodecContext *enc;
1593 AVCodec *codec;
1594 SDL_AudioSpec wanted_spec, spec;
1595
1596 if (stream_index < 0 || stream_index >= ic->nb_streams)
1597 return -1;
1598 enc = ic->streams[stream_index]->codec;
1599
1600 /* prepare audio output */
1601 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1602 wanted_spec.freq = enc->sample_rate;
1603 wanted_spec.format = AUDIO_S16SYS;
1604 /* hack for AC3. XXX: suppress that */
1605 if (enc->channels > 2)
1606 enc->channels = 2;
1607 wanted_spec.channels = enc->channels;
1608 wanted_spec.silence = 0;
1609 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1610 wanted_spec.callback = sdl_audio_callback;
1611 wanted_spec.userdata = is;
1612 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1613 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1614 return -1;
1615 }
1616 is->audio_hw_buf_size = spec.size;
1617 }
1618
1619 codec = avcodec_find_decoder(enc->codec_id);
1620 enc->debug_mv = debug_mv;
1621 enc->debug = debug;
1622 enc->workaround_bugs = workaround_bugs;
1623 enc->lowres = lowres;
1624 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1625 enc->idct_algo= idct;
1626 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1627 enc->skip_frame= skip_frame;
1628 enc->skip_idct= skip_idct;
1629 enc->skip_loop_filter= skip_loop_filter;
1630 enc->error_resilience= error_resilience;
1631 enc->error_concealment= error_concealment;
1632 if (!codec ||
1633 avcodec_open(enc, codec) < 0)
1634 return -1;
1635 #if defined(HAVE_THREADS)
1636 if(thread_count>1)
1637 avcodec_thread_init(enc, thread_count);
1638 #endif
1639 enc->thread_count= thread_count;
1640 switch(enc->codec_type) {
1641 case CODEC_TYPE_AUDIO:
1642 is->audio_stream = stream_index;
1643 is->audio_st = ic->streams[stream_index];
1644 is->audio_buf_size = 0;
1645 is->audio_buf_index = 0;
1646
1647 /* init averaging filter */
1648 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1649 is->audio_diff_avg_count = 0;
1650 /* since we do not have a precise anough audio fifo fullness,
1651 we correct audio sync only if larger than this threshold */
1652 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1653
1654 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1655 packet_queue_init(&is->audioq);
1656 SDL_PauseAudio(0);
1657 break;
1658 case CODEC_TYPE_VIDEO:
1659 is->video_stream = stream_index;
1660 is->video_st = ic->streams[stream_index];
1661
1662 is->frame_last_delay = 40e-3;
1663 is->frame_timer = (double)av_gettime() / 1000000.0;
1664 is->video_current_pts_time = av_gettime();
1665
1666 packet_queue_init(&is->videoq);
1667 is->video_tid = SDL_CreateThread(video_thread, is);
1668 break;
1669 case CODEC_TYPE_SUBTITLE:
1670 is->subtitle_stream = stream_index;
1671 is->subtitle_st = ic->streams[stream_index];
1672 packet_queue_init(&is->subtitleq);
1673
1674 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1675 break;
1676 default:
1677 break;
1678 }
1679 return 0;
1680 }
1681
1682 static void stream_component_close(VideoState *is, int stream_index)
1683 {
1684 AVFormatContext *ic = is->ic;
1685 AVCodecContext *enc;
1686
1687 if (stream_index < 0 || stream_index >= ic->nb_streams)
1688 return;
1689 enc = ic->streams[stream_index]->codec;
1690
1691 switch(enc->codec_type) {
1692 case CODEC_TYPE_AUDIO:
1693 packet_queue_abort(&is->audioq);
1694
1695 SDL_CloseAudio();
1696
1697 packet_queue_end(&is->audioq);
1698 break;
1699 case CODEC_TYPE_VIDEO:
1700 packet_queue_abort(&is->videoq);
1701
1702 /* note: we also signal this mutex to make sure we deblock the
1703 video thread in all cases */
1704 SDL_LockMutex(is->pictq_mutex);
1705 SDL_CondSignal(is->pictq_cond);
1706 SDL_UnlockMutex(is->pictq_mutex);
1707
1708 SDL_WaitThread(is->video_tid, NULL);
1709
1710 packet_queue_end(&is->videoq);
1711 break;
1712 case CODEC_TYPE_SUBTITLE:
1713 packet_queue_abort(&is->subtitleq);
1714
1715 /* note: we also signal this mutex to make sure we deblock the
1716 video thread in all cases */
1717 SDL_LockMutex(is->subpq_mutex);
1718 is->subtitle_stream_changed = 1;
1719
1720 SDL_CondSignal(is->subpq_cond);
1721 SDL_UnlockMutex(is->subpq_mutex);
1722
1723 SDL_WaitThread(is->subtitle_tid, NULL);
1724
1725 packet_queue_end(&is->subtitleq);
1726 break;
1727 default:
1728 break;
1729 }
1730
1731 avcodec_close(enc);
1732 switch(enc->codec_type) {
1733 case CODEC_TYPE_AUDIO:
1734 is->audio_st = NULL;
1735 is->audio_stream = -1;
1736 break;
1737 case CODEC_TYPE_VIDEO:
1738 is->video_st = NULL;
1739 is->video_stream = -1;
1740 break;
1741 case CODEC_TYPE_SUBTITLE:
1742 is->subtitle_st = NULL;
1743 is->subtitle_stream = -1;
1744 break;
1745 default:
1746 break;
1747 }
1748 }
1749
1750 static void dump_stream_info(const AVFormatContext *s)
1751 {
1752 if (s->track != 0)
1753 fprintf(stderr, "Track: %d\n", s->track);
1754 if (s->title[0] != '\0')
1755 fprintf(stderr, "Title: %s\n", s->title);
1756 if (s->author[0] != '\0')
1757 fprintf(stderr, "Author: %s\n", s->author);
1758 if (s->album[0] != '\0')
1759 fprintf(stderr, "Album: %s\n", s->album);
1760 if (s->year != 0)
1761 fprintf(stderr, "Year: %d\n", s->year);
1762 if (s->genre[0] != '\0')
1763 fprintf(stderr, "Genre: %s\n", s->genre);
1764 }
1765
1766 /* since we have only one decoding thread, we can use a global
1767 variable instead of a thread local variable */
1768 static VideoState *global_video_state;
1769
1770 static int decode_interrupt_cb(void)
1771 {
1772 return (global_video_state && global_video_state->abort_request);
1773 }
1774
1775 /* this thread gets the stream from the disk or the network */
1776 static int decode_thread(void *arg)
1777 {
1778 VideoState *is = arg;
1779 AVFormatContext *ic;
1780 int err, i, ret, video_index, audio_index, use_play;
1781 AVPacket pkt1, *pkt = &pkt1;
1782 AVFormatParameters params, *ap = &params;
1783
1784 video_index = -1;
1785 audio_index = -1;
1786 is->video_stream = -1;
1787 is->audio_stream = -1;
1788 is->subtitle_stream = -1;
1789
1790 global_video_state = is;
1791 url_set_interrupt_cb(decode_interrupt_cb);
1792
1793 memset(ap, 0, sizeof(*ap));
1794 ap->image_format = image_format;
1795 ap->initial_pause = 1; /* we force a pause when starting an RTSP
1796 stream */
1797
1798 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1799 if (err < 0) {
1800 print_error(is->filename, err);
1801 ret = -1;
1802 goto fail;
1803 }
1804 is->ic = ic;
1805 #ifdef CONFIG_NETWORK
1806 use_play = (ic->iformat == &rtsp_demuxer);
1807 #else
1808 use_play = 0;
1809 #endif
1810
1811 if(genpts)
1812 ic->flags |= AVFMT_FLAG_GENPTS;
1813
1814 if (!use_play) {
1815 err = av_find_stream_info(ic);
1816 if (err < 0) {
1817 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1818 ret = -1;
1819 goto fail;
1820 }
1821 ic->pb.eof_reached= 0; //FIXME hack, ffplay maybe shouldnt use url_feof() to test for the end
1822 }
1823
1824 /* if seeking requested, we execute it */
1825 if (start_time != AV_NOPTS_VALUE) {
1826 int64_t timestamp;
1827
1828 timestamp = start_time;
1829 /* add the stream start time */
1830 if (ic->start_time != AV_NOPTS_VALUE)
1831 timestamp += ic->start_time;
1832 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1833 if (ret < 0) {
1834 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1835 is->filename, (double)timestamp / AV_TIME_BASE);
1836 }
1837 }
1838
1839 /* now we can begin to play (RTSP stream only) */
1840 av_read_play(ic);
1841
1842 if (use_play) {
1843 err = av_find_stream_info(ic);
1844 if (err < 0) {
1845 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1846 ret = -1;
1847 goto fail;
1848 }
1849 }
1850
1851 for(i = 0; i < ic->nb_streams; i++) {
1852 AVCodecContext *enc = ic->streams[i]->codec;
1853 switch(enc->codec_type) {
1854 case CODEC_TYPE_AUDIO:
1855 if (audio_index < 0 && !audio_disable)
1856 audio_index = i;
1857 break;
1858 case CODEC_TYPE_VIDEO:
1859 if (video_index < 0 && !video_disable)
1860 video_index = i;
1861 break;
1862 default:
1863 break;
1864 }
1865 }
1866 if (show_status) {
1867 dump_format(ic, 0, is->filename, 0);
1868 dump_stream_info(ic);
1869 }
1870
1871 /* open the streams */
1872 if (audio_index >= 0) {
1873 stream_component_open(is, audio_index);
1874 }
1875
1876 if (video_index >= 0) {
1877 stream_component_open(is, video_index);
1878 } else {
1879 if (!display_disable)
1880 is->show_audio = 1;
1881 }
1882
1883 if (is->video_stream < 0 && is->audio_stream < 0) {
1884 fprintf(stderr, "%s: could not open codecs\n", is->filename);
1885 ret = -1;
1886 goto fail;
1887 }
1888
1889 for(;;) {
1890 if (is->abort_request)
1891 break;
1892 #ifdef CONFIG_NETWORK
1893 if (is->paused != is->last_paused) {
1894 is->last_paused = is->paused;
1895 if (is->paused)
1896 av_read_pause(ic);
1897 else
1898 av_read_play(ic);
1899 }
1900 if (is->paused && ic->iformat == &rtsp_demuxer) {
1901 /* wait 10 ms to avoid trying to get another packet */
1902 /* XXX: horrible */
1903 SDL_Delay(10);
1904 continue;
1905 }
1906 #endif
1907 if (is->seek_req) {
1908 /* XXX: must lock decoder threads */
1909 SDL_LockMutex(is->video_decoder_mutex);
1910 SDL_LockMutex(is->audio_decoder_mutex);
1911 SDL_LockMutex(is->subtitle_decoder_mutex);
1912 ret = av_seek_frame(is->ic, -1, is->seek_pos, is->seek_flags);
1913 if (ret < 0) {
1914 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
1915 }else{
1916 if (is->audio_stream >= 0) {
1917 packet_queue_flush(&is->audioq);
1918 }
1919 if (is->subtitle_stream >= 0) {
1920 packet_queue_flush(&is->subtitleq);
1921 }
1922 if (is->video_stream >= 0) {
1923 packet_queue_flush(&is->videoq);
1924 avcodec_flush_buffers(ic->streams[video_index]->codec);
1925 }
1926 }
1927 SDL_UnlockMutex(is->subtitle_decoder_mutex);
1928 SDL_UnlockMutex(is->audio_decoder_mutex);
1929 SDL_UnlockMutex(is->video_decoder_mutex);
1930 is->seek_req = 0;
1931 }
1932
1933 /* if the queue are full, no need to read more */
1934 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
1935 is->videoq.size > MAX_VIDEOQ_SIZE ||
1936 is->subtitleq.size > MAX_SUBTITLEQ_SIZE ||
1937 url_feof(&ic->pb)) {
1938 /* wait 10 ms */
1939 SDL_Delay(10);
1940 continue;
1941 }
1942 ret = av_read_frame(ic, pkt);
1943 if (ret < 0) {
1944 if (url_ferror(&ic->pb) == 0) {
1945 SDL_Delay(100); /* wait for user event */
1946 continue;
1947 } else
1948 break;
1949 }
1950 if (pkt->stream_index == is->audio_stream) {
1951 packet_queue_put(&is->audioq, pkt);
1952 } else if (pkt->stream_index == is->video_stream) {
1953 packet_queue_put(&is->videoq, pkt);
1954 } else if (pkt->stream_index == is->subtitle_stream) {
1955 packet_queue_put(&is->subtitleq, pkt);
1956 } else {
1957 av_free_packet(pkt);
1958 }
1959 }
1960 /* wait until the end */
1961 while (!is->abort_request) {
1962 SDL_Delay(100);
1963 }
1964
1965 ret = 0;
1966 fail:
1967 /* disable interrupting */
1968 global_video_state = NULL;
1969
1970 /* close each stream */
1971 if (is->audio_stream >= 0)
1972 stream_component_close(is, is->audio_stream);
1973 if (is->video_stream >= 0)
1974 stream_component_close(is, is->video_stream);
1975 if (is->subtitle_stream >= 0)
1976 stream_component_close(is, is->subtitle_stream);
1977 if (is->ic) {
1978 av_close_input_file(is->ic);
1979 is->ic = NULL; /* safety */
1980 }
1981 url_set_interrupt_cb(NULL);
1982
1983 if (ret != 0) {
1984 SDL_Event event;
1985
1986 event.type = FF_QUIT_EVENT;
1987 event.user.data1 = is;
1988 SDL_PushEvent(&event);
1989 }
1990 return 0;
1991 }
1992
1993 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
1994 {
1995 VideoState *is;
1996
1997 is = av_mallocz(sizeof(VideoState));
1998 if (!is)
1999 return NULL;
2000 pstrcpy(is->filename, sizeof(is->filename), filename);
2001 is->iformat = iformat;
2002 if (screen) {
2003 is->width = screen->w;
2004 is->height = screen->h;
2005 }
2006 is->ytop = 0;
2007 is->xleft = 0;
2008
2009 /* start video display */
2010 is->pictq_mutex = SDL_CreateMutex();
2011 is->pictq_cond = SDL_CreateCond();
2012
2013 is->subpq_mutex = SDL_CreateMutex();
2014 is->subpq_cond = SDL_CreateCond();
2015
2016 is->subtitle_decoder_mutex = SDL_CreateMutex();
2017 is->audio_decoder_mutex = SDL_CreateMutex();
2018 is->video_decoder_mutex = SDL_CreateMutex();
2019
2020 /* add the refresh timer to draw the picture */
2021 schedule_refresh(is, 40);
2022
2023 is->av_sync_type = av_sync_type;
2024 is->parse_tid = SDL_CreateThread(decode_thread, is);
2025 if (!is->parse_tid) {
2026 av_free(is);
2027 return NULL;
2028 }
2029 return is;
2030 }
2031
2032 static void stream_close(VideoState *is)
2033 {
2034 VideoPicture *vp;
2035 int i;
2036 /* XXX: use a special url_shutdown call to abort parse cleanly */
2037 is->abort_request = 1;
2038 SDL_WaitThread(is->parse_tid, NULL);
2039
2040 /* free all pictures */
2041 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2042 vp = &is->pictq[i];
2043 if (vp->bmp) {
2044 SDL_FreeYUVOverlay(vp->bmp);
2045 vp->bmp = NULL;
2046 }
2047 }
2048 SDL_DestroyMutex(is->pictq_mutex);
2049 SDL_DestroyCond(is->pictq_cond);
2050 SDL_DestroyMutex(is->subpq_mutex);
2051 SDL_DestroyCond(is->subpq_cond);
2052 SDL_DestroyMutex(is->subtitle_decoder_mutex);
2053 SDL_DestroyMutex(is->audio_decoder_mutex);
2054 SDL_DestroyMutex(is->video_decoder_mutex);
2055 }
2056
2057 static void stream_cycle_channel(VideoState *is, int codec_type)
2058 {
2059 AVFormatContext *ic = is->ic;
2060 int start_index, stream_index;
2061 AVStream *st;
2062
2063 if (codec_type == CODEC_TYPE_VIDEO)
2064 start_index = is->video_stream;
2065 else if (codec_type == CODEC_TYPE_AUDIO)
2066 start_index = is->audio_stream;
2067 else
2068 start_index = is->subtitle_stream;
2069 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2070 return;
2071 stream_index = start_index;
2072 for(;;) {
2073 if (++stream_index >= is->ic->nb_streams)
2074 {
2075 if (codec_type == CODEC_TYPE_SUBTITLE)
2076 {
2077 stream_index = -1;
2078 goto the_end;
2079 } else
2080 stream_index = 0;
2081 }
2082 if (stream_index == start_index)
2083 return;
2084 st = ic->streams[stream_index];
2085 if (st->codec->codec_type == codec_type) {
2086 /* check that parameters are OK */
2087 switch(codec_type) {
2088 case CODEC_TYPE_AUDIO:
2089 if (st->codec->sample_rate != 0 &&
2090 st->codec->channels != 0)
2091 goto the_end;
2092 break;
2093 case CODEC_TYPE_VIDEO:
2094 case CODEC_TYPE_SUBTITLE:
2095 goto the_end;
2096 default:
2097 break;
2098 }
2099 }
2100 }
2101 the_end:
2102 stream_component_close(is, start_index);
2103 stream_component_open(is, stream_index);
2104 }
2105
2106
2107 static void toggle_full_screen(void)
2108 {
2109 int w, h, flags;
2110 is_full_screen = !is_full_screen;
2111 if (!fs_screen_width) {
2112 /* use default SDL method */
2113 SDL_WM_ToggleFullScreen(screen);
2114 } else {
2115 /* use the recorded resolution */
2116 flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
2117 if (is_full_screen) {
2118 w = fs_screen_width;
2119 h = fs_screen_height;
2120 flags |= SDL_FULLSCREEN;
2121 } else {
2122 w = screen_width;
2123 h = screen_height;
2124 flags |= SDL_RESIZABLE;
2125 }
2126 screen = SDL_SetVideoMode(w, h, 0, flags);
2127 cur_stream->width = w;
2128 cur_stream->height = h;
2129 }
2130 }
2131
2132 static void toggle_pause(void)
2133 {
2134 if (cur_stream)
2135 stream_pause(cur_stream);
2136 step = 0;
2137 }
2138
2139 static void step_to_next_frame(void)
2140 {
2141 if (cur_stream) {
2142 if (cur_stream->paused)
2143 cur_stream->paused=0;
2144 cur_stream->video_current_pts = get_video_clock(cur_stream);
2145 }
2146 step = 1;
2147 }
2148
2149 static void do_exit(void)
2150 {
2151 if (cur_stream) {
2152 stream_close(cur_stream);
2153 cur_stream = NULL;
2154 }
2155 if (show_status)
2156 printf("\n");
2157 SDL_Quit();
2158 exit(0);
2159 }
2160
2161 static void toggle_audio_display(void)
2162 {
2163 if (cur_stream) {
2164 cur_stream->show_audio = !cur_stream->show_audio;
2165 }
2166 }
2167
2168 /* handle an event sent by the GUI */
2169 static void event_loop(void)
2170 {
2171 SDL_Event event;
2172 double incr, pos, frac;
2173
2174 for(;;) {
2175 SDL_WaitEvent(&event);
2176 switch(event.type) {
2177 case SDL_KEYDOWN:
2178 switch(event.key.keysym.sym) {
2179 case SDLK_ESCAPE:
2180 case SDLK_q:
2181 do_exit();
2182 break;
2183 case SDLK_f:
2184 toggle_full_screen();
2185 break;
2186 case SDLK_p:
2187 case SDLK_SPACE:
2188 toggle_pause();
2189 break;
2190 case SDLK_s: //S: Step to next frame
2191 step_to_next_frame();
2192 break;
2193 case SDLK_a:
2194 if (cur_stream)
2195 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2196 break;
2197 case SDLK_v:
2198 if (cur_stream)
2199 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2200 break;
2201 case SDLK_t:
2202 if (cur_stream)
2203 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2204 break;
2205 case SDLK_w:
2206 toggle_audio_display();
2207 break;
2208 case SDLK_LEFT:
2209 incr = -10.0;
2210 goto do_seek;
2211 case SDLK_RIGHT:
2212 incr = 10.0;
2213 goto do_seek;
2214 case SDLK_UP:
2215 incr = 60.0;
2216 goto do_seek;
2217 case SDLK_DOWN:
2218 incr = -60.0;
2219 do_seek:
2220 if (cur_stream) {
2221 pos = get_master_clock(cur_stream);
2222 pos += incr;
2223 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2224 }
2225 break;
2226 default:
2227 break;
2228 }
2229 break;
2230 case SDL_MOUSEBUTTONDOWN:
2231 if (cur_stream) {
2232 int ns, hh, mm, ss;
2233 int tns, thh, tmm, tss;
2234 tns = cur_stream->ic->duration/1000000LL;
2235 thh = tns/3600;
2236 tmm = (tns%3600)/60;
2237 tss = (tns%60);
2238 frac = (double)event.button.x/(double)cur_stream->width;
2239 ns = frac*tns;
2240 hh = ns/3600;
2241 mm = (ns%3600)/60;
2242 ss = (ns%60);
2243 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2244 hh, mm, ss, thh, tmm, tss);
2245 stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2246 }
2247 break;
2248 case SDL_VIDEORESIZE:
2249 if (cur_stream) {
2250 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2251 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2252 cur_stream->width = event.resize.w;
2253 cur_stream->height = event.resize.h;
2254 }
2255 break;
2256 case SDL_QUIT:
2257 case FF_QUIT_EVENT:
2258 do_exit();
2259 break;
2260 case FF_ALLOC_EVENT:
2261 alloc_picture(event.user.data1);
2262 break;
2263 case FF_REFRESH_EVENT:
2264 video_refresh_timer(event.user.data1);
2265 break;
2266 default:
2267 break;
2268 }
2269 }
2270 }
2271
2272 void opt_width(const char *arg)
2273 {
2274 screen_width = atoi(arg);
2275 }
2276
2277 void opt_height(const char *arg)
2278 {
2279 screen_height = atoi(arg);
2280 }
2281
2282 static void opt_format(const char *arg)
2283 {
2284 file_iformat = av_find_input_format(arg);
2285 if (!file_iformat) {
2286 fprintf(stderr, "Unknown input format: %s\n", arg);
2287 exit(1);
2288 }
2289 }
2290
2291 static void opt_image_format(const char *arg)
2292 {
2293 AVImageFormat *f;
2294
2295 for(f = first_image_format; f != NULL; f = f->next) {
2296 if (!strcmp(arg, f->name))
2297 break;
2298 }
2299 if (!f) {
2300 fprintf(stderr, "Unknown image format: '%s'\n", arg);
2301 exit(1);
2302 }
2303 image_format = f;
2304 }
2305
2306 #ifdef CONFIG_NETWORK
2307 void opt_rtp_tcp(void)
2308 {
2309 /* only tcp protocol */
2310 rtsp_default_protocols = (1 << RTSP_PROTOCOL_RTP_TCP);
2311 }
2312 #endif
2313
2314 void opt_sync(const char *arg)
2315 {
2316 if (!strcmp(arg, "audio"))
2317 av_sync_type = AV_SYNC_AUDIO_MASTER;
2318 else if (!strcmp(arg, "video"))
2319 av_sync_type = AV_SYNC_VIDEO_MASTER;
2320 else if (!strcmp(arg, "ext"))
2321 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2322 else
2323 show_help();
2324 }
2325
2326 void opt_seek(const char *arg)
2327 {
2328 start_time = parse_date(arg, 1);
2329 }
2330
2331 static void opt_debug(const char *arg)
2332 {
2333 av_log_set_level(99);
2334 debug = atoi(arg);
2335 }
2336
2337 static void opt_vismv(const char *arg)
2338 {
2339 debug_mv = atoi(arg);
2340 }
2341
2342 static void opt_thread_count(const char *arg)
2343 {
2344 thread_count= atoi(arg);
2345 #if !defined(HAVE_THREADS)
2346 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2347 #endif
2348 }
2349
2350 const OptionDef options[] = {
2351 { "h", 0, {(void*)show_help}, "show help" },
2352 { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2353 { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2354 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2355 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2356 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2357 { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2358 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2359 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2360 { "img", HAS_ARG, {(void*)opt_image_format}, "force image format", "img_fmt" },
2361 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2362 { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2363 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2364 { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2365 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2366 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2367 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2368 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2369 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2370 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2371 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2372 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_resilience}, "set error detection threshold (0-4)", "threshold" },
2373 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2374 #ifdef CONFIG_NETWORK
2375 { "rtp_tcp", OPT_EXPERT, {(void*)&opt_rtp_tcp}, "force RTP/TCP protocol usage", "" },
2376 #endif
2377 { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2378 { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2379 { NULL, },
2380 };
2381
2382 void show_help(void)
2383 {
2384 printf("ffplay version " FFMPEG_VERSION ", Copyright (c) 2003 Fabrice Bellard\n"
2385 "usage: ffplay [options] input_file\n"
2386 "Simple media player\n");
2387 printf("\n");
2388 show_help_options(options, "Main options:\n",
2389 OPT_EXPERT, 0);
2390 show_help_options(options, "\nAdvanced options:\n",
2391 OPT_EXPERT, OPT_EXPERT);
2392 printf("\nWhile playing:\n"
2393 "q, ESC quit\n"
2394 "f toggle full screen\n"
2395 "p, SPC pause\n"
2396 "a cycle audio channel\n"
2397 "v cycle video channel\n"
2398 "t cycle subtitle channel\n"
2399 "w show audio waves\n"
2400 "left/right seek backward/forward 10 seconds\n"
2401 "down/up seek backward/forward 1 minute\n"
2402 "mouse click seek to percentage in file corresponding to fraction of width\n"
2403 );
2404 exit(1);
2405 }
2406
2407 void parse_arg_file(const char *filename)
2408 {
2409 if (!strcmp(filename, "-"))
2410 filename = "pipe:";
2411 input_filename = filename;
2412 }
2413
2414 /* Called from the main */
2415 int main(int argc, char **argv)
2416 {
2417 int flags, w, h;
2418
2419 /* register all codecs, demux and protocols */
2420 av_register_all();
2421
2422 #ifdef CONFIG_OS2
2423 MorphToPM(); // Morph the VIO application to a PM one to be able to use Win* functions
2424
2425 // Make stdout and stderr unbuffered
2426 setbuf( stdout, NULL );
2427 setbuf( stderr, NULL );
2428 #endif
2429
2430 parse_options(argc, argv, options);
2431
2432 if (!input_filename)
2433 show_help();
2434
2435 if (display_disable) {
2436 video_disable = 1;
2437 }
2438 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2439 #if !defined(__MINGW32__) && !defined(CONFIG_DARWIN)
2440 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on win32 or darwin */
2441 #endif
2442 if (SDL_Init (flags)) {
2443 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2444 exit(1);
2445 }
2446
2447 if (!display_disable) {
2448 #ifdef HAVE_SDL_VIDEO_SIZE
2449 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2450 fs_screen_width = vi->current_w;
2451 fs_screen_height = vi->current_h;
2452 #endif
2453 flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
2454 if (is_full_screen && fs_screen_width) {
2455 w = fs_screen_width;
2456 h = fs_screen_height;
2457 flags |= SDL_FULLSCREEN;
2458 } else {
2459 w = screen_width;
2460 h = screen_height;
2461 flags |= SDL_RESIZABLE;
2462 }
2463 #ifndef CONFIG_DARWIN
2464 screen = SDL_SetVideoMode(w, h, 0, flags);
2465 #else
2466 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
2467 screen = SDL_SetVideoMode(w, h, 24, flags);
2468 #endif
2469 if (!screen) {
2470 fprintf(stderr, "SDL: could not set video mode - exiting\n");
2471 exit(1);
2472 }
2473 SDL_WM_SetCaption("FFplay", "FFplay");
2474 }
2475
2476 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2477 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2478 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2479 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2480
2481 cur_stream = stream_open(input_filename, file_iformat);
2482
2483 event_loop();
2484
2485 /* never returns */
2486
2487 return 0;
2488 }