Add missing stdint.h #include to headers that use it.
[libav.git] / libavdevice / v4l.c
1 /*
2 * Linux video grab interface
3 * Copyright (c) 2000,2001 Fabrice Bellard.
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21 #include "avformat.h"
22 #include "dsputil.h"
23 #include <unistd.h>
24 #include <fcntl.h>
25 #include <sys/ioctl.h>
26 #include <sys/mman.h>
27 #include <sys/time.h>
28 #define _LINUX_TIME_H 1
29 #include <linux/videodev.h>
30 #include <time.h>
31
32 typedef struct {
33 int fd;
34 int frame_format; /* see VIDEO_PALETTE_xxx */
35 int use_mmap;
36 int width, height;
37 int frame_rate;
38 int frame_rate_base;
39 int64_t time_frame;
40 int frame_size;
41 struct video_capability video_cap;
42 struct video_audio audio_saved;
43 uint8_t *video_buf;
44 struct video_mbuf gb_buffers;
45 struct video_mmap gb_buf;
46 int gb_frame;
47 } VideoData;
48
49 static const struct {
50 int palette;
51 int depth;
52 enum PixelFormat pix_fmt;
53 } video_formats [] = {
54 {.palette = VIDEO_PALETTE_YUV420P, .depth = 12, .pix_fmt = PIX_FMT_YUV420P },
55 {.palette = VIDEO_PALETTE_YUV422, .depth = 16, .pix_fmt = PIX_FMT_YUYV422 },
56 {.palette = VIDEO_PALETTE_UYVY, .depth = 16, .pix_fmt = PIX_FMT_UYVY422 },
57 {.palette = VIDEO_PALETTE_YUYV, .depth = 16, .pix_fmt = PIX_FMT_YUYV422 },
58 /* NOTE: v4l uses BGR24, not RGB24 */
59 {.palette = VIDEO_PALETTE_RGB24, .depth = 24, .pix_fmt = PIX_FMT_BGR24 },
60 {.palette = VIDEO_PALETTE_RGB565, .depth = 16, .pix_fmt = PIX_FMT_BGR565 },
61 {.palette = VIDEO_PALETTE_GREY, .depth = 8, .pix_fmt = PIX_FMT_GRAY8 },
62 };
63
64
65 static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
66 {
67 VideoData *s = s1->priv_data;
68 AVStream *st;
69 int width, height;
70 int video_fd, frame_size;
71 int ret, frame_rate, frame_rate_base;
72 int desired_palette, desired_depth;
73 struct video_tuner tuner;
74 struct video_audio audio;
75 struct video_picture pict;
76 int j;
77 int vformat_num = sizeof(video_formats) / sizeof(video_formats[0]);
78
79 if (ap->width <= 0 || ap->height <= 0 || ap->time_base.den <= 0) {
80 av_log(s1, AV_LOG_ERROR, "Bad capture size (%dx%d) or wrong time base (%d)\n",
81 ap->width, ap->height, ap->time_base.den);
82
83 return -1;
84 }
85
86 width = ap->width;
87 height = ap->height;
88 frame_rate = ap->time_base.den;
89 frame_rate_base = ap->time_base.num;
90
91 if((unsigned)width > 32767 || (unsigned)height > 32767) {
92 av_log(s1, AV_LOG_ERROR, "Capture size is out of range: %dx%d\n",
93 width, height);
94
95 return -1;
96 }
97
98 st = av_new_stream(s1, 0);
99 if (!st)
100 return AVERROR(ENOMEM);
101 av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
102
103 s->width = width;
104 s->height = height;
105 s->frame_rate = frame_rate;
106 s->frame_rate_base = frame_rate_base;
107
108 video_fd = open(s1->filename, O_RDWR);
109 if (video_fd < 0) {
110 av_log(s1, AV_LOG_ERROR, "%s: %s\n", s1->filename, strerror(errno));
111 goto fail;
112 }
113
114 if (ioctl(video_fd,VIDIOCGCAP, &s->video_cap) < 0) {
115 av_log(s1, AV_LOG_ERROR, "VIDIOCGCAP: %s\n", strerror(errno));
116 goto fail;
117 }
118
119 if (!(s->video_cap.type & VID_TYPE_CAPTURE)) {
120 av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not handle capture\n");
121 goto fail;
122 }
123
124 desired_palette = -1;
125 desired_depth = -1;
126 for (j = 0; j < vformat_num; j++) {
127 if (ap->pix_fmt == video_formats[j].pix_fmt) {
128 desired_palette = video_formats[j].palette;
129 desired_depth = video_formats[j].depth;
130 break;
131 }
132 }
133
134 /* set tv standard */
135 if (ap->standard && !ioctl(video_fd, VIDIOCGTUNER, &tuner)) {
136 if (!strcasecmp(ap->standard, "pal"))
137 tuner.mode = VIDEO_MODE_PAL;
138 else if (!strcasecmp(ap->standard, "secam"))
139 tuner.mode = VIDEO_MODE_SECAM;
140 else
141 tuner.mode = VIDEO_MODE_NTSC;
142 ioctl(video_fd, VIDIOCSTUNER, &tuner);
143 }
144
145 /* unmute audio */
146 audio.audio = 0;
147 ioctl(video_fd, VIDIOCGAUDIO, &audio);
148 memcpy(&s->audio_saved, &audio, sizeof(audio));
149 audio.flags &= ~VIDEO_AUDIO_MUTE;
150 ioctl(video_fd, VIDIOCSAUDIO, &audio);
151
152 ioctl(video_fd, VIDIOCGPICT, &pict);
153 #if 0
154 printf("v4l: colour=%d hue=%d brightness=%d constrast=%d whiteness=%d\n",
155 pict.colour,
156 pict.hue,
157 pict.brightness,
158 pict.contrast,
159 pict.whiteness);
160 #endif
161 /* try to choose a suitable video format */
162 pict.palette = desired_palette;
163 pict.depth= desired_depth;
164 if (desired_palette == -1 || (ret = ioctl(video_fd, VIDIOCSPICT, &pict)) < 0) {
165 for (j = 0; j < vformat_num; j++) {
166 pict.palette = video_formats[j].palette;
167 pict.depth = video_formats[j].depth;
168 if (-1 != ioctl(video_fd, VIDIOCSPICT, &pict))
169 break;
170 }
171 if (j >= vformat_num)
172 goto fail1;
173 }
174
175 ret = ioctl(video_fd,VIDIOCGMBUF,&s->gb_buffers);
176 if (ret < 0) {
177 /* try to use read based access */
178 struct video_window win;
179 int val;
180
181 win.x = 0;
182 win.y = 0;
183 win.width = width;
184 win.height = height;
185 win.chromakey = -1;
186 win.flags = 0;
187
188 ioctl(video_fd, VIDIOCSWIN, &win);
189
190 s->frame_format = pict.palette;
191
192 val = 1;
193 ioctl(video_fd, VIDIOCCAPTURE, &val);
194
195 s->time_frame = av_gettime() * s->frame_rate / s->frame_rate_base;
196 s->use_mmap = 0;
197 } else {
198 s->video_buf = mmap(0,s->gb_buffers.size,PROT_READ|PROT_WRITE,MAP_SHARED,video_fd,0);
199 if ((unsigned char*)-1 == s->video_buf) {
200 s->video_buf = mmap(0,s->gb_buffers.size,PROT_READ|PROT_WRITE,MAP_PRIVATE,video_fd,0);
201 if ((unsigned char*)-1 == s->video_buf) {
202 av_log(s1, AV_LOG_ERROR, "mmap: %s\n", strerror(errno));
203 goto fail;
204 }
205 }
206 s->gb_frame = 0;
207 s->time_frame = av_gettime() * s->frame_rate / s->frame_rate_base;
208
209 /* start to grab the first frame */
210 s->gb_buf.frame = s->gb_frame % s->gb_buffers.frames;
211 s->gb_buf.height = height;
212 s->gb_buf.width = width;
213 s->gb_buf.format = pict.palette;
214
215 ret = ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
216 if (ret < 0) {
217 if (errno != EAGAIN) {
218 fail1:
219 av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not support suitable format\n");
220 } else {
221 av_log(s1, AV_LOG_ERROR,"Fatal: grab device does not receive any video signal\n");
222 }
223 goto fail;
224 }
225 for (j = 1; j < s->gb_buffers.frames; j++) {
226 s->gb_buf.frame = j;
227 ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
228 }
229 s->frame_format = s->gb_buf.format;
230 s->use_mmap = 1;
231 }
232
233 for (j = 0; j < vformat_num; j++) {
234 if (s->frame_format == video_formats[j].palette) {
235 frame_size = width * height * video_formats[j].depth / 8;
236 st->codec->pix_fmt = video_formats[j].pix_fmt;
237 break;
238 }
239 }
240
241 if (j >= vformat_num)
242 goto fail;
243
244 s->fd = video_fd;
245 s->frame_size = frame_size;
246
247 st->codec->codec_type = CODEC_TYPE_VIDEO;
248 st->codec->codec_id = CODEC_ID_RAWVIDEO;
249 st->codec->width = width;
250 st->codec->height = height;
251 st->codec->time_base.den = frame_rate;
252 st->codec->time_base.num = frame_rate_base;
253 st->codec->bit_rate = frame_size * 1/av_q2d(st->codec->time_base) * 8;
254
255 return 0;
256 fail:
257 if (video_fd >= 0)
258 close(video_fd);
259 av_free(st);
260 return AVERROR(EIO);
261 }
262
263 static int v4l_mm_read_picture(VideoData *s, uint8_t *buf)
264 {
265 uint8_t *ptr;
266
267 while (ioctl(s->fd, VIDIOCSYNC, &s->gb_frame) < 0 &&
268 (errno == EAGAIN || errno == EINTR));
269
270 ptr = s->video_buf + s->gb_buffers.offsets[s->gb_frame];
271 memcpy(buf, ptr, s->frame_size);
272
273 /* Setup to capture the next frame */
274 s->gb_buf.frame = s->gb_frame;
275 if (ioctl(s->fd, VIDIOCMCAPTURE, &s->gb_buf) < 0) {
276 if (errno == EAGAIN)
277 av_log(NULL, AV_LOG_ERROR, "Cannot Sync\n");
278 else
279 av_log(NULL, AV_LOG_ERROR, "VIDIOCMCAPTURE: %s\n", strerror(errno));
280 return AVERROR(EIO);
281 }
282
283 /* This is now the grabbing frame */
284 s->gb_frame = (s->gb_frame + 1) % s->gb_buffers.frames;
285
286 return s->frame_size;
287 }
288
289 static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
290 {
291 VideoData *s = s1->priv_data;
292 int64_t curtime, delay;
293 struct timespec ts;
294
295 /* Calculate the time of the next frame */
296 s->time_frame += INT64_C(1000000);
297
298 /* wait based on the frame rate */
299 for(;;) {
300 curtime = av_gettime();
301 delay = s->time_frame * s->frame_rate_base / s->frame_rate - curtime;
302 if (delay <= 0) {
303 if (delay < INT64_C(-1000000) * s->frame_rate_base / s->frame_rate) {
304 /* printf("grabbing is %d frames late (dropping)\n", (int) -(delay / 16666)); */
305 s->time_frame += INT64_C(1000000);
306 }
307 break;
308 }
309 ts.tv_sec = delay / 1000000;
310 ts.tv_nsec = (delay % 1000000) * 1000;
311 nanosleep(&ts, NULL);
312 }
313
314 if (av_new_packet(pkt, s->frame_size) < 0)
315 return AVERROR(EIO);
316
317 pkt->pts = curtime;
318
319 /* read one frame */
320 if (s->use_mmap) {
321 return v4l_mm_read_picture(s, pkt->data);
322 } else {
323 if (read(s->fd, pkt->data, pkt->size) != pkt->size)
324 return AVERROR(EIO);
325 return s->frame_size;
326 }
327 }
328
329 static int grab_read_close(AVFormatContext *s1)
330 {
331 VideoData *s = s1->priv_data;
332
333 if (s->use_mmap)
334 munmap(s->video_buf, s->gb_buffers.size);
335
336 /* mute audio. we must force it because the BTTV driver does not
337 return its state correctly */
338 s->audio_saved.flags |= VIDEO_AUDIO_MUTE;
339 ioctl(s->fd, VIDIOCSAUDIO, &s->audio_saved);
340
341 close(s->fd);
342 return 0;
343 }
344
345 AVInputFormat v4l_demuxer = {
346 "video4linux",
347 "video grab",
348 sizeof(VideoData),
349 NULL,
350 grab_read_header,
351 grab_read_packet,
352 grab_read_close,
353 .flags = AVFMT_NOFILE,
354 };