Commit | Line | Data |
---|---|---|
85f07f22 FB |
1 | /* |
2 | * Multiple format streaming server | |
773a21b8 | 3 | * Copyright (c) 2000, 2001, 2002 Fabrice Bellard |
85f07f22 | 4 | * |
773a21b8 FB |
5 | * This library is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU Lesser General Public | |
7 | * License as published by the Free Software Foundation; either | |
8 | * version 2 of the License, or (at your option) any later version. | |
85f07f22 | 9 | * |
773a21b8 | 10 | * This library is distributed in the hope that it will be useful, |
85f07f22 | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
773a21b8 FB |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | * Lesser General Public License for more details. | |
85f07f22 | 14 | * |
773a21b8 FB |
15 | * You should have received a copy of the GNU Lesser General Public |
16 | * License along with this library; if not, write to the Free Software | |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
85f07f22 | 18 | */ |
773a21b8 FB |
19 | #define HAVE_AV_CONFIG_H |
20 | #include "avformat.h" | |
21 | ||
85f07f22 | 22 | #include <stdarg.h> |
85f07f22 FB |
23 | #include <unistd.h> |
24 | #include <fcntl.h> | |
25 | #include <sys/ioctl.h> | |
26 | #include <sys/poll.h> | |
27 | #include <errno.h> | |
28 | #include <sys/time.h> | |
29 | #include <time.h> | |
85f07f22 FB |
30 | #include <sys/types.h> |
31 | #include <sys/socket.h> | |
5eb765ef | 32 | #include <sys/wait.h> |
9c938e77 | 33 | #include <netinet/in.h> |
b8a78f41 | 34 | #include <arpa/inet.h> |
85f07f22 | 35 | #include <netdb.h> |
85f07f22 | 36 | #include <signal.h> |
6638d424 | 37 | #ifdef CONFIG_HAVE_DLFCN |
2effd274 | 38 | #include <dlfcn.h> |
6638d424 | 39 | #endif |
2effd274 FB |
40 | |
41 | #include "ffserver.h" | |
85f07f22 FB |
42 | |
43 | /* maximum number of simultaneous HTTP connections */ | |
44 | #define HTTP_MAX_CONNECTIONS 2000 | |
45 | ||
46 | enum HTTPState { | |
47 | HTTPSTATE_WAIT_REQUEST, | |
48 | HTTPSTATE_SEND_HEADER, | |
49 | HTTPSTATE_SEND_DATA_HEADER, | |
2effd274 | 50 | HTTPSTATE_SEND_DATA, /* sending TCP or UDP data */ |
85f07f22 | 51 | HTTPSTATE_SEND_DATA_TRAILER, |
2effd274 FB |
52 | HTTPSTATE_RECEIVE_DATA, |
53 | HTTPSTATE_WAIT_FEED, /* wait for data from the feed */ | |
54 | HTTPSTATE_WAIT, /* wait before sending next packets */ | |
55 | HTTPSTATE_WAIT_SHORT, /* short wait for short term | |
56 | bandwidth limitation */ | |
57 | HTTPSTATE_READY, | |
58 | ||
59 | RTSPSTATE_WAIT_REQUEST, | |
60 | RTSPSTATE_SEND_REPLY, | |
bc351386 | 61 | RTSPSTATE_SEND_PACKET, |
85f07f22 FB |
62 | }; |
63 | ||
64 | const char *http_state[] = { | |
2effd274 FB |
65 | "HTTP_WAIT_REQUEST", |
66 | "HTTP_SEND_HEADER", | |
67 | ||
85f07f22 FB |
68 | "SEND_DATA_HEADER", |
69 | "SEND_DATA", | |
70 | "SEND_DATA_TRAILER", | |
71 | "RECEIVE_DATA", | |
72 | "WAIT_FEED", | |
2effd274 FB |
73 | "WAIT", |
74 | "WAIT_SHORT", | |
75 | "READY", | |
76 | ||
77 | "RTSP_WAIT_REQUEST", | |
78 | "RTSP_SEND_REPLY", | |
bc351386 | 79 | "RTSP_SEND_PACKET", |
85f07f22 FB |
80 | }; |
81 | ||
cde25790 | 82 | #define IOBUFFER_INIT_SIZE 8192 |
85f07f22 FB |
83 | |
84 | /* coef for exponential mean for bitrate estimation in statistics */ | |
85 | #define AVG_COEF 0.9 | |
86 | ||
87 | /* timeouts are in ms */ | |
2effd274 FB |
88 | #define HTTP_REQUEST_TIMEOUT (15 * 1000) |
89 | #define RTSP_REQUEST_TIMEOUT (3600 * 24 * 1000) | |
90 | ||
85f07f22 FB |
91 | #define SYNC_TIMEOUT (10 * 1000) |
92 | ||
5eb765ef | 93 | typedef struct { |
0c1a9eda | 94 | int64_t count1, count2; |
5eb765ef PG |
95 | long time1, time2; |
96 | } DataRateData; | |
97 | ||
85f07f22 FB |
98 | /* context associated with one connection */ |
99 | typedef struct HTTPContext { | |
100 | enum HTTPState state; | |
101 | int fd; /* socket file descriptor */ | |
102 | struct sockaddr_in from_addr; /* origin */ | |
103 | struct pollfd *poll_entry; /* used when polling */ | |
104 | long timeout; | |
0c1a9eda | 105 | uint8_t *buffer_ptr, *buffer_end; |
85f07f22 FB |
106 | int http_error; |
107 | struct HTTPContext *next; | |
42a63c6a | 108 | int got_key_frame; /* stream 0 => 1, stream 1 => 2, stream 2=> 4 */ |
0c1a9eda | 109 | int64_t data_count; |
85f07f22 FB |
110 | /* feed input */ |
111 | int feed_fd; | |
112 | /* input format handling */ | |
113 | AVFormatContext *fmt_in; | |
2effd274 | 114 | long start_time; /* In milliseconds - this wraps fairly often */ |
0c1a9eda | 115 | int64_t first_pts; /* initial pts value */ |
1bc1cfdd | 116 | int64_t cur_pts; /* current pts value */ |
2effd274 | 117 | int pts_stream_index; /* stream we choose as clock reference */ |
85f07f22 FB |
118 | /* output format handling */ |
119 | struct FFStream *stream; | |
cde25790 PG |
120 | /* -1 is invalid stream */ |
121 | int feed_streams[MAX_STREAMS]; /* index of streams in the feed */ | |
122 | int switch_feed_streams[MAX_STREAMS]; /* index of streams in the feed */ | |
123 | int switch_pending; | |
2effd274 | 124 | AVFormatContext fmt_ctx; /* instance of FFStream for one user */ |
85f07f22 | 125 | int last_packet_sent; /* true if last data packet was sent */ |
7434ba6d | 126 | int suppress_log; |
5eb765ef | 127 | DataRateData datarate; |
3120d2a2 | 128 | int wmp_client_id; |
7434ba6d PG |
129 | char protocol[16]; |
130 | char method[16]; | |
131 | char url[128]; | |
cde25790 | 132 | int buffer_size; |
0c1a9eda | 133 | uint8_t *buffer; |
2effd274 FB |
134 | int is_packetized; /* if true, the stream is packetized */ |
135 | int packet_stream_index; /* current stream for output in state machine */ | |
136 | ||
137 | /* RTSP state specific */ | |
0c1a9eda | 138 | uint8_t *pb_buffer; /* XXX: use that in all the code */ |
2effd274 FB |
139 | ByteIOContext *pb; |
140 | int seq; /* RTSP sequence number */ | |
141 | ||
142 | /* RTP state specific */ | |
143 | enum RTSPProtocol rtp_protocol; | |
144 | char session_id[32]; /* session id */ | |
145 | AVFormatContext *rtp_ctx[MAX_STREAMS]; | |
2effd274 FB |
146 | /* RTP short term bandwidth limitation */ |
147 | int packet_byte_count; | |
148 | int packet_start_time_us; /* used for short durations (a few | |
149 | seconds max) */ | |
bc351386 FB |
150 | /* RTP/UDP specific */ |
151 | URLContext *rtp_handles[MAX_STREAMS]; | |
152 | ||
153 | /* RTP/TCP specific */ | |
154 | struct HTTPContext *rtsp_c; | |
155 | uint8_t *packet_buffer, *packet_buffer_ptr, *packet_buffer_end; | |
85f07f22 FB |
156 | } HTTPContext; |
157 | ||
a4d70941 PG |
158 | static AVFrame dummy_frame; |
159 | ||
85f07f22 FB |
160 | /* each generated stream is described here */ |
161 | enum StreamType { | |
162 | STREAM_TYPE_LIVE, | |
163 | STREAM_TYPE_STATUS, | |
cde25790 | 164 | STREAM_TYPE_REDIRECT, |
85f07f22 FB |
165 | }; |
166 | ||
8256c0a3 PG |
167 | enum IPAddressAction { |
168 | IP_ALLOW = 1, | |
169 | IP_DENY, | |
170 | }; | |
171 | ||
172 | typedef struct IPAddressACL { | |
173 | struct IPAddressACL *next; | |
174 | enum IPAddressAction action; | |
efa04ce2 | 175 | /* These are in host order */ |
8256c0a3 PG |
176 | struct in_addr first; |
177 | struct in_addr last; | |
178 | } IPAddressACL; | |
179 | ||
85f07f22 FB |
180 | /* description of each stream of the ffserver.conf file */ |
181 | typedef struct FFStream { | |
182 | enum StreamType stream_type; | |
183 | char filename[1024]; /* stream filename */ | |
2effd274 FB |
184 | struct FFStream *feed; /* feed we are using (can be null if |
185 | coming from file) */ | |
bd7cf6ad | 186 | AVOutputFormat *fmt; |
8256c0a3 | 187 | IPAddressACL *acl; |
85f07f22 | 188 | int nb_streams; |
42a63c6a | 189 | int prebuffer; /* Number of millseconds early to start */ |
2ac887ba | 190 | long max_time; /* Number of milliseconds to run */ |
79c4ea3c | 191 | int send_on_key; |
85f07f22 FB |
192 | AVStream *streams[MAX_STREAMS]; |
193 | int feed_streams[MAX_STREAMS]; /* index of streams in the feed */ | |
194 | char feed_filename[1024]; /* file name of the feed storage, or | |
195 | input file name for a stream */ | |
2ac887ba PG |
196 | char author[512]; |
197 | char title[512]; | |
198 | char copyright[512]; | |
199 | char comment[512]; | |
cde25790 | 200 | pid_t pid; /* Of ffmpeg process */ |
5eb765ef | 201 | time_t pid_start; /* Of ffmpeg process */ |
cde25790 | 202 | char **child_argv; |
85f07f22 | 203 | struct FFStream *next; |
6edd6884 | 204 | int bandwidth; /* bandwidth, in kbits/s */ |
2effd274 FB |
205 | /* RTSP options */ |
206 | char *rtsp_option; | |
829ac53d FB |
207 | /* multicast specific */ |
208 | int is_multicast; | |
209 | struct in_addr multicast_ip; | |
210 | int multicast_port; /* first port used for multicast */ | |
6edd6884 FB |
211 | int multicast_ttl; |
212 | int loop; /* if true, send the stream in loops (only meaningful if file) */ | |
829ac53d | 213 | |
85f07f22 | 214 | /* feed specific */ |
2effd274 | 215 | int feed_opened; /* true if someone is writing to the feed */ |
85f07f22 | 216 | int is_feed; /* true if it is a feed */ |
e322ea48 | 217 | int readonly; /* True if writing is prohibited to the file */ |
a6e14edd | 218 | int conns_served; |
0c1a9eda ZK |
219 | int64_t bytes_served; |
220 | int64_t feed_max_size; /* maximum storage size */ | |
221 | int64_t feed_write_index; /* current write position in feed (it wraps round) */ | |
222 | int64_t feed_size; /* current size of feed */ | |
85f07f22 FB |
223 | struct FFStream *next_feed; |
224 | } FFStream; | |
225 | ||
226 | typedef struct FeedData { | |
227 | long long data_count; | |
228 | float avg_frame_size; /* frame size averraged over last frames with exponential mean */ | |
229 | } FeedData; | |
230 | ||
2effd274 FB |
231 | struct sockaddr_in my_http_addr; |
232 | struct sockaddr_in my_rtsp_addr; | |
233 | ||
85f07f22 FB |
234 | char logfilename[1024]; |
235 | HTTPContext *first_http_ctx; | |
236 | FFStream *first_feed; /* contains only feeds */ | |
237 | FFStream *first_stream; /* contains all streams, including feeds */ | |
238 | ||
2effd274 FB |
239 | static void new_connection(int server_fd, int is_rtsp); |
240 | static void close_connection(HTTPContext *c); | |
241 | ||
242 | /* HTTP handling */ | |
243 | static int handle_connection(HTTPContext *c); | |
85f07f22 | 244 | static int http_parse_request(HTTPContext *c); |
5eb765ef | 245 | static int http_send_data(HTTPContext *c); |
85f07f22 FB |
246 | static void compute_stats(HTTPContext *c); |
247 | static int open_input_stream(HTTPContext *c, const char *info); | |
248 | static int http_start_receive_data(HTTPContext *c); | |
249 | static int http_receive_data(HTTPContext *c); | |
2effd274 FB |
250 | static int compute_send_delay(HTTPContext *c); |
251 | ||
252 | /* RTSP handling */ | |
253 | static int rtsp_parse_request(HTTPContext *c); | |
254 | static void rtsp_cmd_describe(HTTPContext *c, const char *url); | |
0df65975 | 255 | static void rtsp_cmd_options(HTTPContext *c, const char *url); |
2effd274 FB |
256 | static void rtsp_cmd_setup(HTTPContext *c, const char *url, RTSPHeader *h); |
257 | static void rtsp_cmd_play(HTTPContext *c, const char *url, RTSPHeader *h); | |
258 | static void rtsp_cmd_pause(HTTPContext *c, const char *url, RTSPHeader *h); | |
259 | static void rtsp_cmd_teardown(HTTPContext *c, const char *url, RTSPHeader *h); | |
260 | ||
829ac53d | 261 | /* SDP handling */ |
0c1a9eda | 262 | static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer, |
829ac53d FB |
263 | struct in_addr my_ip); |
264 | ||
2effd274 | 265 | /* RTP handling */ |
6edd6884 | 266 | static HTTPContext *rtp_new_connection(struct sockaddr_in *from_addr, |
bc351386 FB |
267 | FFStream *stream, const char *session_id, |
268 | enum RTSPProtocol rtp_protocol); | |
2effd274 | 269 | static int rtp_new_av_stream(HTTPContext *c, |
bc351386 FB |
270 | int stream_index, struct sockaddr_in *dest_addr, |
271 | HTTPContext *rtsp_c); | |
85f07f22 | 272 | |
cde25790 | 273 | static const char *my_program_name; |
d6562d2c | 274 | static const char *my_program_dir; |
cde25790 | 275 | |
2ac887ba | 276 | static int ffserver_debug; |
2effd274 | 277 | static int ffserver_daemon; |
2ac887ba | 278 | static int no_launch; |
5eb765ef | 279 | static int need_to_start_children; |
2ac887ba | 280 | |
85f07f22 FB |
281 | int nb_max_connections; |
282 | int nb_connections; | |
283 | ||
6edd6884 FB |
284 | int max_bandwidth; |
285 | int current_bandwidth; | |
42a63c6a | 286 | |
5eb765ef PG |
287 | static long cur_time; // Making this global saves on passing it around everywhere |
288 | ||
85f07f22 FB |
289 | static long gettime_ms(void) |
290 | { | |
291 | struct timeval tv; | |
292 | ||
293 | gettimeofday(&tv,NULL); | |
294 | return (long long)tv.tv_sec * 1000 + (tv.tv_usec / 1000); | |
295 | } | |
296 | ||
297 | static FILE *logfile = NULL; | |
298 | ||
bc351386 | 299 | static void __attribute__ ((format (printf, 1, 2))) http_log(const char *fmt, ...) |
85f07f22 FB |
300 | { |
301 | va_list ap; | |
302 | va_start(ap, fmt); | |
303 | ||
7434ba6d | 304 | if (logfile) { |
85f07f22 | 305 | vfprintf(logfile, fmt, ap); |
7434ba6d PG |
306 | fflush(logfile); |
307 | } | |
85f07f22 FB |
308 | va_end(ap); |
309 | } | |
310 | ||
6edd6884 | 311 | static char *ctime1(char *buf2) |
7434ba6d | 312 | { |
7434ba6d | 313 | time_t ti; |
6edd6884 | 314 | char *p; |
7434ba6d | 315 | |
7434ba6d PG |
316 | ti = time(NULL); |
317 | p = ctime(&ti); | |
318 | strcpy(buf2, p); | |
319 | p = buf2 + strlen(p) - 1; | |
320 | if (*p == '\n') | |
321 | *p = '\0'; | |
6edd6884 FB |
322 | return buf2; |
323 | } | |
324 | ||
325 | static void log_connection(HTTPContext *c) | |
326 | { | |
327 | char buf2[32]; | |
328 | ||
329 | if (c->suppress_log) | |
330 | return; | |
331 | ||
cde25790 | 332 | http_log("%s - - [%s] \"%s %s %s\" %d %lld\n", |
6edd6884 FB |
333 | inet_ntoa(c->from_addr.sin_addr), |
334 | ctime1(buf2), c->method, c->url, | |
335 | c->protocol, (c->http_error ? c->http_error : 200), c->data_count); | |
cde25790 PG |
336 | } |
337 | ||
0c1a9eda | 338 | static void update_datarate(DataRateData *drd, int64_t count) |
5eb765ef PG |
339 | { |
340 | if (!drd->time1 && !drd->count1) { | |
341 | drd->time1 = drd->time2 = cur_time; | |
342 | drd->count1 = drd->count2 = count; | |
343 | } else { | |
344 | if (cur_time - drd->time2 > 5000) { | |
345 | drd->time1 = drd->time2; | |
346 | drd->count1 = drd->count2; | |
347 | drd->time2 = cur_time; | |
348 | drd->count2 = count; | |
349 | } | |
350 | } | |
351 | } | |
352 | ||
353 | /* In bytes per second */ | |
0c1a9eda | 354 | static int compute_datarate(DataRateData *drd, int64_t count) |
5eb765ef PG |
355 | { |
356 | if (cur_time == drd->time1) | |
357 | return 0; | |
6edd6884 | 358 | |
5eb765ef PG |
359 | return ((count - drd->count1) * 1000) / (cur_time - drd->time1); |
360 | } | |
361 | ||
0c1a9eda | 362 | static int get_longterm_datarate(DataRateData *drd, int64_t count) |
a782f209 PG |
363 | { |
364 | /* You get the first 3 seconds flat out */ | |
365 | if (cur_time - drd->time1 < 3000) | |
366 | return 0; | |
a782f209 PG |
367 | return compute_datarate(drd, count); |
368 | } | |
369 | ||
370 | ||
cde25790 PG |
371 | static void start_children(FFStream *feed) |
372 | { | |
2ac887ba PG |
373 | if (no_launch) |
374 | return; | |
375 | ||
cde25790 | 376 | for (; feed; feed = feed->next) { |
5eb765ef PG |
377 | if (feed->child_argv && !feed->pid) { |
378 | feed->pid_start = time(0); | |
379 | ||
cde25790 PG |
380 | feed->pid = fork(); |
381 | ||
382 | if (feed->pid < 0) { | |
383 | fprintf(stderr, "Unable to create children\n"); | |
384 | exit(1); | |
385 | } | |
386 | if (!feed->pid) { | |
387 | /* In child */ | |
388 | char pathname[1024]; | |
389 | char *slash; | |
390 | int i; | |
391 | ||
5eb765ef PG |
392 | for (i = 3; i < 256; i++) { |
393 | close(i); | |
394 | } | |
cde25790 | 395 | |
5eb765ef | 396 | if (!ffserver_debug) { |
2ac887ba PG |
397 | i = open("/dev/null", O_RDWR); |
398 | if (i) | |
399 | dup2(i, 0); | |
400 | dup2(i, 1); | |
401 | dup2(i, 2); | |
5eb765ef PG |
402 | if (i) |
403 | close(i); | |
2ac887ba | 404 | } |
cde25790 PG |
405 | |
406 | pstrcpy(pathname, sizeof(pathname), my_program_name); | |
407 | ||
408 | slash = strrchr(pathname, '/'); | |
409 | if (!slash) { | |
410 | slash = pathname; | |
411 | } else { | |
412 | slash++; | |
413 | } | |
414 | strcpy(slash, "ffmpeg"); | |
415 | ||
d6562d2c PG |
416 | /* This is needed to make relative pathnames work */ |
417 | chdir(my_program_dir); | |
418 | ||
a4d70941 PG |
419 | signal(SIGPIPE, SIG_DFL); |
420 | ||
cde25790 PG |
421 | execvp(pathname, feed->child_argv); |
422 | ||
423 | _exit(1); | |
424 | } | |
425 | } | |
426 | } | |
7434ba6d PG |
427 | } |
428 | ||
2effd274 FB |
429 | /* open a listening socket */ |
430 | static int socket_open_listen(struct sockaddr_in *my_addr) | |
85f07f22 | 431 | { |
2effd274 | 432 | int server_fd, tmp; |
85f07f22 FB |
433 | |
434 | server_fd = socket(AF_INET,SOCK_STREAM,0); | |
435 | if (server_fd < 0) { | |
436 | perror ("socket"); | |
437 | return -1; | |
438 | } | |
439 | ||
440 | tmp = 1; | |
441 | setsockopt(server_fd, SOL_SOCKET, SO_REUSEADDR, &tmp, sizeof(tmp)); | |
442 | ||
2effd274 | 443 | if (bind (server_fd, (struct sockaddr *) my_addr, sizeof (*my_addr)) < 0) { |
b17d099d PG |
444 | char bindmsg[32]; |
445 | snprintf(bindmsg, sizeof(bindmsg), "bind(port %d)", ntohs(my_addr->sin_port)); | |
446 | perror (bindmsg); | |
85f07f22 FB |
447 | close(server_fd); |
448 | return -1; | |
449 | } | |
450 | ||
451 | if (listen (server_fd, 5) < 0) { | |
452 | perror ("listen"); | |
453 | close(server_fd); | |
454 | return -1; | |
455 | } | |
2effd274 FB |
456 | fcntl(server_fd, F_SETFL, O_NONBLOCK); |
457 | ||
458 | return server_fd; | |
459 | } | |
460 | ||
6edd6884 FB |
461 | /* start all multicast streams */ |
462 | static void start_multicast(void) | |
463 | { | |
464 | FFStream *stream; | |
465 | char session_id[32]; | |
466 | HTTPContext *rtp_c; | |
467 | struct sockaddr_in dest_addr; | |
468 | int default_port, stream_index; | |
469 | ||
470 | default_port = 6000; | |
471 | for(stream = first_stream; stream != NULL; stream = stream->next) { | |
472 | if (stream->is_multicast) { | |
473 | /* open the RTP connection */ | |
474 | snprintf(session_id, sizeof(session_id), | |
475 | "%08x%08x", (int)random(), (int)random()); | |
476 | ||
477 | /* choose a port if none given */ | |
478 | if (stream->multicast_port == 0) { | |
479 | stream->multicast_port = default_port; | |
480 | default_port += 100; | |
481 | } | |
482 | ||
483 | dest_addr.sin_family = AF_INET; | |
484 | dest_addr.sin_addr = stream->multicast_ip; | |
485 | dest_addr.sin_port = htons(stream->multicast_port); | |
486 | ||
bc351386 FB |
487 | rtp_c = rtp_new_connection(&dest_addr, stream, session_id, |
488 | RTSP_PROTOCOL_RTP_UDP_MULTICAST); | |
6edd6884 FB |
489 | if (!rtp_c) { |
490 | continue; | |
491 | } | |
492 | if (open_input_stream(rtp_c, "") < 0) { | |
493 | fprintf(stderr, "Could not open input stream for stream '%s'\n", | |
494 | stream->filename); | |
495 | continue; | |
496 | } | |
497 | ||
6edd6884 FB |
498 | /* open each RTP stream */ |
499 | for(stream_index = 0; stream_index < stream->nb_streams; | |
500 | stream_index++) { | |
501 | dest_addr.sin_port = htons(stream->multicast_port + | |
502 | 2 * stream_index); | |
bc351386 | 503 | if (rtp_new_av_stream(rtp_c, stream_index, &dest_addr, NULL) < 0) { |
0fa45e19 FB |
504 | fprintf(stderr, "Could not open output stream '%s/streamid=%d'\n", |
505 | stream->filename, stream_index); | |
506 | exit(1); | |
6edd6884 FB |
507 | } |
508 | } | |
509 | ||
510 | /* change state to send data */ | |
511 | rtp_c->state = HTTPSTATE_SEND_DATA; | |
512 | } | |
513 | } | |
514 | } | |
2effd274 FB |
515 | |
516 | /* main loop of the http server */ | |
517 | static int http_server(void) | |
518 | { | |
519 | int server_fd, ret, rtsp_server_fd, delay, delay1; | |
520 | struct pollfd poll_table[HTTP_MAX_CONNECTIONS + 2], *poll_entry; | |
521 | HTTPContext *c, *c_next; | |
522 | ||
523 | server_fd = socket_open_listen(&my_http_addr); | |
524 | if (server_fd < 0) | |
525 | return -1; | |
85f07f22 | 526 | |
2effd274 FB |
527 | rtsp_server_fd = socket_open_listen(&my_rtsp_addr); |
528 | if (rtsp_server_fd < 0) | |
529 | return -1; | |
530 | ||
85f07f22 FB |
531 | http_log("ffserver started.\n"); |
532 | ||
cde25790 PG |
533 | start_children(first_feed); |
534 | ||
85f07f22 FB |
535 | first_http_ctx = NULL; |
536 | nb_connections = 0; | |
6edd6884 FB |
537 | |
538 | start_multicast(); | |
539 | ||
85f07f22 FB |
540 | for(;;) { |
541 | poll_entry = poll_table; | |
542 | poll_entry->fd = server_fd; | |
543 | poll_entry->events = POLLIN; | |
544 | poll_entry++; | |
545 | ||
2effd274 FB |
546 | poll_entry->fd = rtsp_server_fd; |
547 | poll_entry->events = POLLIN; | |
548 | poll_entry++; | |
549 | ||
85f07f22 FB |
550 | /* wait for events on each HTTP handle */ |
551 | c = first_http_ctx; | |
2effd274 | 552 | delay = 1000; |
85f07f22 FB |
553 | while (c != NULL) { |
554 | int fd; | |
555 | fd = c->fd; | |
556 | switch(c->state) { | |
2effd274 FB |
557 | case HTTPSTATE_SEND_HEADER: |
558 | case RTSPSTATE_SEND_REPLY: | |
bc351386 | 559 | case RTSPSTATE_SEND_PACKET: |
85f07f22 FB |
560 | c->poll_entry = poll_entry; |
561 | poll_entry->fd = fd; | |
2effd274 | 562 | poll_entry->events = POLLOUT; |
85f07f22 FB |
563 | poll_entry++; |
564 | break; | |
85f07f22 FB |
565 | case HTTPSTATE_SEND_DATA_HEADER: |
566 | case HTTPSTATE_SEND_DATA: | |
567 | case HTTPSTATE_SEND_DATA_TRAILER: | |
2effd274 FB |
568 | if (!c->is_packetized) { |
569 | /* for TCP, we output as much as we can (may need to put a limit) */ | |
570 | c->poll_entry = poll_entry; | |
571 | poll_entry->fd = fd; | |
572 | poll_entry->events = POLLOUT; | |
573 | poll_entry++; | |
574 | } else { | |
575 | /* not strictly correct, but currently cannot add | |
576 | more than one fd in poll entry */ | |
577 | delay = 0; | |
578 | } | |
85f07f22 | 579 | break; |
2effd274 | 580 | case HTTPSTATE_WAIT_REQUEST: |
85f07f22 | 581 | case HTTPSTATE_RECEIVE_DATA: |
85f07f22 | 582 | case HTTPSTATE_WAIT_FEED: |
2effd274 | 583 | case RTSPSTATE_WAIT_REQUEST: |
85f07f22 FB |
584 | /* need to catch errors */ |
585 | c->poll_entry = poll_entry; | |
586 | poll_entry->fd = fd; | |
a6e14edd | 587 | poll_entry->events = POLLIN;/* Maybe this will work */ |
85f07f22 FB |
588 | poll_entry++; |
589 | break; | |
2effd274 FB |
590 | case HTTPSTATE_WAIT: |
591 | c->poll_entry = NULL; | |
592 | delay1 = compute_send_delay(c); | |
593 | if (delay1 < delay) | |
594 | delay = delay1; | |
595 | break; | |
596 | case HTTPSTATE_WAIT_SHORT: | |
597 | c->poll_entry = NULL; | |
598 | delay1 = 10; /* one tick wait XXX: 10 ms assumed */ | |
599 | if (delay1 < delay) | |
600 | delay = delay1; | |
601 | break; | |
85f07f22 FB |
602 | default: |
603 | c->poll_entry = NULL; | |
604 | break; | |
605 | } | |
606 | c = c->next; | |
607 | } | |
608 | ||
609 | /* wait for an event on one connection. We poll at least every | |
610 | second to handle timeouts */ | |
611 | do { | |
2effd274 | 612 | ret = poll(poll_table, poll_entry - poll_table, delay); |
85f07f22 FB |
613 | } while (ret == -1); |
614 | ||
615 | cur_time = gettime_ms(); | |
616 | ||
5eb765ef PG |
617 | if (need_to_start_children) { |
618 | need_to_start_children = 0; | |
619 | start_children(first_feed); | |
620 | } | |
621 | ||
85f07f22 | 622 | /* now handle the events */ |
2effd274 FB |
623 | for(c = first_http_ctx; c != NULL; c = c_next) { |
624 | c_next = c->next; | |
625 | if (handle_connection(c) < 0) { | |
85f07f22 | 626 | /* close and free the connection */ |
7434ba6d | 627 | log_connection(c); |
2effd274 | 628 | close_connection(c); |
85f07f22 FB |
629 | } |
630 | } | |
631 | ||
85f07f22 | 632 | poll_entry = poll_table; |
2effd274 | 633 | /* new HTTP connection request ? */ |
85f07f22 | 634 | if (poll_entry->revents & POLLIN) { |
2effd274 | 635 | new_connection(server_fd, 0); |
85f07f22 FB |
636 | } |
637 | poll_entry++; | |
2effd274 FB |
638 | /* new RTSP connection request ? */ |
639 | if (poll_entry->revents & POLLIN) { | |
640 | new_connection(rtsp_server_fd, 1); | |
641 | } | |
85f07f22 FB |
642 | } |
643 | } | |
644 | ||
2effd274 FB |
645 | /* start waiting for a new HTTP/RTSP request */ |
646 | static void start_wait_request(HTTPContext *c, int is_rtsp) | |
85f07f22 | 647 | { |
2effd274 FB |
648 | c->buffer_ptr = c->buffer; |
649 | c->buffer_end = c->buffer + c->buffer_size - 1; /* leave room for '\0' */ | |
650 | ||
651 | if (is_rtsp) { | |
652 | c->timeout = cur_time + RTSP_REQUEST_TIMEOUT; | |
653 | c->state = RTSPSTATE_WAIT_REQUEST; | |
654 | } else { | |
655 | c->timeout = cur_time + HTTP_REQUEST_TIMEOUT; | |
656 | c->state = HTTPSTATE_WAIT_REQUEST; | |
657 | } | |
658 | } | |
659 | ||
660 | static void new_connection(int server_fd, int is_rtsp) | |
661 | { | |
662 | struct sockaddr_in from_addr; | |
663 | int fd, len; | |
664 | HTTPContext *c = NULL; | |
665 | ||
666 | len = sizeof(from_addr); | |
667 | fd = accept(server_fd, (struct sockaddr *)&from_addr, | |
668 | &len); | |
669 | if (fd < 0) | |
670 | return; | |
671 | fcntl(fd, F_SETFL, O_NONBLOCK); | |
672 | ||
673 | /* XXX: should output a warning page when coming | |
674 | close to the connection limit */ | |
675 | if (nb_connections >= nb_max_connections) | |
676 | goto fail; | |
677 | ||
678 | /* add a new connection */ | |
679 | c = av_mallocz(sizeof(HTTPContext)); | |
680 | if (!c) | |
681 | goto fail; | |
682 | ||
2effd274 FB |
683 | c->fd = fd; |
684 | c->poll_entry = NULL; | |
685 | c->from_addr = from_addr; | |
686 | c->buffer_size = IOBUFFER_INIT_SIZE; | |
687 | c->buffer = av_malloc(c->buffer_size); | |
688 | if (!c->buffer) | |
689 | goto fail; | |
8bc80f8b PG |
690 | |
691 | c->next = first_http_ctx; | |
692 | first_http_ctx = c; | |
2effd274 FB |
693 | nb_connections++; |
694 | ||
695 | start_wait_request(c, is_rtsp); | |
696 | ||
697 | return; | |
698 | ||
699 | fail: | |
700 | if (c) { | |
701 | av_free(c->buffer); | |
702 | av_free(c); | |
703 | } | |
704 | close(fd); | |
705 | } | |
706 | ||
707 | static void close_connection(HTTPContext *c) | |
708 | { | |
709 | HTTPContext **cp, *c1; | |
710 | int i, nb_streams; | |
711 | AVFormatContext *ctx; | |
712 | URLContext *h; | |
713 | AVStream *st; | |
714 | ||
715 | /* remove connection from list */ | |
716 | cp = &first_http_ctx; | |
717 | while ((*cp) != NULL) { | |
718 | c1 = *cp; | |
719 | if (c1 == c) { | |
720 | *cp = c->next; | |
721 | } else { | |
722 | cp = &c1->next; | |
723 | } | |
724 | } | |
725 | ||
bc351386 FB |
726 | /* remove references, if any (XXX: do it faster) */ |
727 | for(c1 = first_http_ctx; c1 != NULL; c1 = c1->next) { | |
728 | if (c1->rtsp_c == c) | |
729 | c1->rtsp_c = NULL; | |
730 | } | |
731 | ||
2effd274 FB |
732 | /* remove connection associated resources */ |
733 | if (c->fd >= 0) | |
734 | close(c->fd); | |
735 | if (c->fmt_in) { | |
736 | /* close each frame parser */ | |
737 | for(i=0;i<c->fmt_in->nb_streams;i++) { | |
738 | st = c->fmt_in->streams[i]; | |
739 | if (st->codec.codec) { | |
740 | avcodec_close(&st->codec); | |
741 | } | |
742 | } | |
743 | av_close_input_file(c->fmt_in); | |
744 | } | |
745 | ||
746 | /* free RTP output streams if any */ | |
747 | nb_streams = 0; | |
748 | if (c->stream) | |
749 | nb_streams = c->stream->nb_streams; | |
750 | ||
751 | for(i=0;i<nb_streams;i++) { | |
752 | ctx = c->rtp_ctx[i]; | |
753 | if (ctx) { | |
754 | av_write_trailer(ctx); | |
755 | av_free(ctx); | |
756 | } | |
757 | h = c->rtp_handles[i]; | |
758 | if (h) { | |
759 | url_close(h); | |
760 | } | |
761 | } | |
bc351386 | 762 | |
b88ba823 MH |
763 | ctx = &c->fmt_ctx; |
764 | ||
87638494 | 765 | if (!c->last_packet_sent) { |
87638494 PG |
766 | if (ctx->oformat) { |
767 | /* prepare header */ | |
768 | if (url_open_dyn_buf(&ctx->pb) >= 0) { | |
769 | av_write_trailer(ctx); | |
bc351386 | 770 | url_close_dyn_buf(&ctx->pb, &c->pb_buffer); |
87638494 PG |
771 | } |
772 | } | |
773 | } | |
774 | ||
f0ef6240 PG |
775 | for(i=0; i<ctx->nb_streams; i++) |
776 | av_free(ctx->streams[i]) ; | |
777 | ||
6edd6884 FB |
778 | if (c->stream) |
779 | current_bandwidth -= c->stream->bandwidth; | |
2effd274 | 780 | av_freep(&c->pb_buffer); |
bc351386 | 781 | av_freep(&c->packet_buffer); |
2effd274 FB |
782 | av_free(c->buffer); |
783 | av_free(c); | |
784 | nb_connections--; | |
785 | } | |
786 | ||
787 | static int handle_connection(HTTPContext *c) | |
788 | { | |
789 | int len, ret; | |
85f07f22 FB |
790 | |
791 | switch(c->state) { | |
792 | case HTTPSTATE_WAIT_REQUEST: | |
2effd274 | 793 | case RTSPSTATE_WAIT_REQUEST: |
85f07f22 FB |
794 | /* timeout ? */ |
795 | if ((c->timeout - cur_time) < 0) | |
796 | return -1; | |
797 | if (c->poll_entry->revents & (POLLERR | POLLHUP)) | |
798 | return -1; | |
799 | ||
800 | /* no need to read if no events */ | |
801 | if (!(c->poll_entry->revents & POLLIN)) | |
802 | return 0; | |
803 | /* read the data */ | |
1bc1cfdd | 804 | read_loop: |
94d9ad5f | 805 | len = read(c->fd, c->buffer_ptr, 1); |
85f07f22 FB |
806 | if (len < 0) { |
807 | if (errno != EAGAIN && errno != EINTR) | |
808 | return -1; | |
809 | } else if (len == 0) { | |
810 | return -1; | |
811 | } else { | |
94d9ad5f | 812 | /* search for end of request. */ |
0c1a9eda | 813 | uint8_t *ptr; |
85f07f22 FB |
814 | c->buffer_ptr += len; |
815 | ptr = c->buffer_ptr; | |
816 | if ((ptr >= c->buffer + 2 && !memcmp(ptr-2, "\n\n", 2)) || | |
817 | (ptr >= c->buffer + 4 && !memcmp(ptr-4, "\r\n\r\n", 4))) { | |
818 | /* request found : parse it and reply */ | |
2effd274 FB |
819 | if (c->state == HTTPSTATE_WAIT_REQUEST) { |
820 | ret = http_parse_request(c); | |
821 | } else { | |
822 | ret = rtsp_parse_request(c); | |
823 | } | |
824 | if (ret < 0) | |
85f07f22 FB |
825 | return -1; |
826 | } else if (ptr >= c->buffer_end) { | |
827 | /* request too long: cannot do anything */ | |
828 | return -1; | |
1bc1cfdd | 829 | } else goto read_loop; |
85f07f22 FB |
830 | } |
831 | break; | |
832 | ||
833 | case HTTPSTATE_SEND_HEADER: | |
834 | if (c->poll_entry->revents & (POLLERR | POLLHUP)) | |
835 | return -1; | |
836 | ||
2effd274 | 837 | /* no need to write if no events */ |
85f07f22 FB |
838 | if (!(c->poll_entry->revents & POLLOUT)) |
839 | return 0; | |
840 | len = write(c->fd, c->buffer_ptr, c->buffer_end - c->buffer_ptr); | |
841 | if (len < 0) { | |
842 | if (errno != EAGAIN && errno != EINTR) { | |
843 | /* error : close connection */ | |
2effd274 | 844 | av_freep(&c->pb_buffer); |
85f07f22 FB |
845 | return -1; |
846 | } | |
847 | } else { | |
848 | c->buffer_ptr += len; | |
2e04edb3 PG |
849 | if (c->stream) |
850 | c->stream->bytes_served += len; | |
a6e14edd | 851 | c->data_count += len; |
85f07f22 | 852 | if (c->buffer_ptr >= c->buffer_end) { |
2effd274 | 853 | av_freep(&c->pb_buffer); |
85f07f22 | 854 | /* if error, exit */ |
2effd274 | 855 | if (c->http_error) { |
85f07f22 | 856 | return -1; |
2effd274 FB |
857 | } |
858 | /* all the buffer was sent : synchronize to the incoming stream */ | |
85f07f22 FB |
859 | c->state = HTTPSTATE_SEND_DATA_HEADER; |
860 | c->buffer_ptr = c->buffer_end = c->buffer; | |
861 | } | |
862 | } | |
863 | break; | |
864 | ||
865 | case HTTPSTATE_SEND_DATA: | |
866 | case HTTPSTATE_SEND_DATA_HEADER: | |
867 | case HTTPSTATE_SEND_DATA_TRAILER: | |
2effd274 FB |
868 | /* for packetized output, we consider we can always write (the |
869 | input streams sets the speed). It may be better to verify | |
870 | that we do not rely too much on the kernel queues */ | |
871 | if (!c->is_packetized) { | |
872 | if (c->poll_entry->revents & (POLLERR | POLLHUP)) | |
873 | return -1; | |
874 | ||
875 | /* no need to read if no events */ | |
876 | if (!(c->poll_entry->revents & POLLOUT)) | |
877 | return 0; | |
878 | } | |
5eb765ef | 879 | if (http_send_data(c) < 0) |
85f07f22 FB |
880 | return -1; |
881 | break; | |
882 | case HTTPSTATE_RECEIVE_DATA: | |
883 | /* no need to read if no events */ | |
884 | if (c->poll_entry->revents & (POLLERR | POLLHUP)) | |
885 | return -1; | |
886 | if (!(c->poll_entry->revents & POLLIN)) | |
887 | return 0; | |
888 | if (http_receive_data(c) < 0) | |
889 | return -1; | |
890 | break; | |
891 | case HTTPSTATE_WAIT_FEED: | |
892 | /* no need to read if no events */ | |
a6e14edd | 893 | if (c->poll_entry->revents & (POLLIN | POLLERR | POLLHUP)) |
85f07f22 FB |
894 | return -1; |
895 | ||
896 | /* nothing to do, we'll be waken up by incoming feed packets */ | |
897 | break; | |
2effd274 FB |
898 | |
899 | case HTTPSTATE_WAIT: | |
900 | /* if the delay expired, we can send new packets */ | |
901 | if (compute_send_delay(c) <= 0) | |
902 | c->state = HTTPSTATE_SEND_DATA; | |
903 | break; | |
904 | case HTTPSTATE_WAIT_SHORT: | |
905 | /* just return back to send data */ | |
906 | c->state = HTTPSTATE_SEND_DATA; | |
907 | break; | |
908 | ||
909 | case RTSPSTATE_SEND_REPLY: | |
910 | if (c->poll_entry->revents & (POLLERR | POLLHUP)) { | |
911 | av_freep(&c->pb_buffer); | |
912 | return -1; | |
913 | } | |
914 | /* no need to write if no events */ | |
915 | if (!(c->poll_entry->revents & POLLOUT)) | |
916 | return 0; | |
917 | len = write(c->fd, c->buffer_ptr, c->buffer_end - c->buffer_ptr); | |
918 | if (len < 0) { | |
919 | if (errno != EAGAIN && errno != EINTR) { | |
920 | /* error : close connection */ | |
921 | av_freep(&c->pb_buffer); | |
922 | return -1; | |
923 | } | |
924 | } else { | |
925 | c->buffer_ptr += len; | |
926 | c->data_count += len; | |
927 | if (c->buffer_ptr >= c->buffer_end) { | |
928 | /* all the buffer was sent : wait for a new request */ | |
929 | av_freep(&c->pb_buffer); | |
930 | start_wait_request(c, 1); | |
931 | } | |
932 | } | |
933 | break; | |
bc351386 FB |
934 | case RTSPSTATE_SEND_PACKET: |
935 | if (c->poll_entry->revents & (POLLERR | POLLHUP)) { | |
936 | av_freep(&c->packet_buffer); | |
937 | return -1; | |
938 | } | |
939 | /* no need to write if no events */ | |
940 | if (!(c->poll_entry->revents & POLLOUT)) | |
941 | return 0; | |
942 | len = write(c->fd, c->packet_buffer_ptr, | |
943 | c->packet_buffer_end - c->packet_buffer_ptr); | |
944 | if (len < 0) { | |
945 | if (errno != EAGAIN && errno != EINTR) { | |
946 | /* error : close connection */ | |
947 | av_freep(&c->packet_buffer); | |
948 | return -1; | |
949 | } | |
950 | } else { | |
951 | c->packet_buffer_ptr += len; | |
952 | if (c->packet_buffer_ptr >= c->packet_buffer_end) { | |
953 | /* all the buffer was sent : wait for a new request */ | |
954 | av_freep(&c->packet_buffer); | |
955 | c->state = RTSPSTATE_WAIT_REQUEST; | |
956 | } | |
957 | } | |
958 | break; | |
2effd274 FB |
959 | case HTTPSTATE_READY: |
960 | /* nothing to do */ | |
961 | break; | |
85f07f22 FB |
962 | default: |
963 | return -1; | |
964 | } | |
965 | return 0; | |
966 | } | |
967 | ||
3120d2a2 PG |
968 | static int extract_rates(char *rates, int ratelen, const char *request) |
969 | { | |
970 | const char *p; | |
971 | ||
972 | for (p = request; *p && *p != '\r' && *p != '\n'; ) { | |
973 | if (strncasecmp(p, "Pragma:", 7) == 0) { | |
974 | const char *q = p + 7; | |
975 | ||
976 | while (*q && *q != '\n' && isspace(*q)) | |
977 | q++; | |
978 | ||
979 | if (strncasecmp(q, "stream-switch-entry=", 20) == 0) { | |
980 | int stream_no; | |
981 | int rate_no; | |
982 | ||
983 | q += 20; | |
984 | ||
cde25790 | 985 | memset(rates, 0xff, ratelen); |
3120d2a2 PG |
986 | |
987 | while (1) { | |
988 | while (*q && *q != '\n' && *q != ':') | |
989 | q++; | |
990 | ||
991 | if (sscanf(q, ":%d:%d", &stream_no, &rate_no) != 2) { | |
992 | break; | |
993 | } | |
994 | stream_no--; | |
995 | if (stream_no < ratelen && stream_no >= 0) { | |
996 | rates[stream_no] = rate_no; | |
997 | } | |
998 | ||
999 | while (*q && *q != '\n' && !isspace(*q)) | |
1000 | q++; | |
1001 | } | |
1002 | ||
1003 | return 1; | |
1004 | } | |
1005 | } | |
1006 | p = strchr(p, '\n'); | |
1007 | if (!p) | |
1008 | break; | |
1009 | ||
1010 | p++; | |
1011 | } | |
1012 | ||
1013 | return 0; | |
1014 | } | |
1015 | ||
cde25790 | 1016 | static int find_stream_in_feed(FFStream *feed, AVCodecContext *codec, int bit_rate) |
3120d2a2 PG |
1017 | { |
1018 | int i; | |
cde25790 PG |
1019 | int best_bitrate = 100000000; |
1020 | int best = -1; | |
1021 | ||
1022 | for (i = 0; i < feed->nb_streams; i++) { | |
1023 | AVCodecContext *feed_codec = &feed->streams[i]->codec; | |
1024 | ||
1025 | if (feed_codec->codec_id != codec->codec_id || | |
1026 | feed_codec->sample_rate != codec->sample_rate || | |
1027 | feed_codec->width != codec->width || | |
1028 | feed_codec->height != codec->height) { | |
1029 | continue; | |
1030 | } | |
1031 | ||
1032 | /* Potential stream */ | |
1033 | ||
1034 | /* We want the fastest stream less than bit_rate, or the slowest | |
1035 | * faster than bit_rate | |
1036 | */ | |
1037 | ||
1038 | if (feed_codec->bit_rate <= bit_rate) { | |
1039 | if (best_bitrate > bit_rate || feed_codec->bit_rate > best_bitrate) { | |
1040 | best_bitrate = feed_codec->bit_rate; | |
1041 | best = i; | |
1042 | } | |
1043 | } else { | |
1044 | if (feed_codec->bit_rate < best_bitrate) { | |
1045 | best_bitrate = feed_codec->bit_rate; | |
1046 | best = i; | |
1047 | } | |
1048 | } | |
1049 | } | |
1050 | ||
1051 | return best; | |
1052 | } | |
1053 | ||
1054 | static int modify_current_stream(HTTPContext *c, char *rates) | |
1055 | { | |
1056 | int i; | |
1057 | FFStream *req = c->stream; | |
1058 | int action_required = 0; | |
3120d2a2 | 1059 | |
001bcd29 PG |
1060 | /* Not much we can do for a feed */ |
1061 | if (!req->feed) | |
1062 | return 0; | |
1063 | ||
3120d2a2 PG |
1064 | for (i = 0; i < req->nb_streams; i++) { |
1065 | AVCodecContext *codec = &req->streams[i]->codec; | |
1066 | ||
3120d2a2 PG |
1067 | switch(rates[i]) { |
1068 | case 0: | |
cde25790 | 1069 | c->switch_feed_streams[i] = req->feed_streams[i]; |
3120d2a2 PG |
1070 | break; |
1071 | case 1: | |
cde25790 | 1072 | c->switch_feed_streams[i] = find_stream_in_feed(req->feed, codec, codec->bit_rate / 2); |
3120d2a2 PG |
1073 | break; |
1074 | case 2: | |
cde25790 PG |
1075 | /* Wants off or slow */ |
1076 | c->switch_feed_streams[i] = find_stream_in_feed(req->feed, codec, codec->bit_rate / 4); | |
1077 | #ifdef WANTS_OFF | |
1078 | /* This doesn't work well when it turns off the only stream! */ | |
1079 | c->switch_feed_streams[i] = -2; | |
1080 | c->feed_streams[i] = -2; | |
1081 | #endif | |
3120d2a2 PG |
1082 | break; |
1083 | } | |
3120d2a2 | 1084 | |
cde25790 PG |
1085 | if (c->switch_feed_streams[i] >= 0 && c->switch_feed_streams[i] != c->feed_streams[i]) |
1086 | action_required = 1; | |
1087 | } | |
3120d2a2 | 1088 | |
cde25790 PG |
1089 | return action_required; |
1090 | } | |
3120d2a2 | 1091 | |
3120d2a2 | 1092 | |
cde25790 PG |
1093 | static void do_switch_stream(HTTPContext *c, int i) |
1094 | { | |
1095 | if (c->switch_feed_streams[i] >= 0) { | |
1096 | #ifdef PHILIP | |
1097 | c->feed_streams[i] = c->switch_feed_streams[i]; | |
1098 | #endif | |
3120d2a2 | 1099 | |
cde25790 | 1100 | /* Now update the stream */ |
3120d2a2 | 1101 | } |
cde25790 | 1102 | c->switch_feed_streams[i] = -1; |
3120d2a2 | 1103 | } |
7434ba6d | 1104 | |
2effd274 FB |
1105 | /* XXX: factorize in utils.c ? */ |
1106 | /* XXX: take care with different space meaning */ | |
1107 | static void skip_spaces(const char **pp) | |
1108 | { | |
1109 | const char *p; | |
1110 | p = *pp; | |
1111 | while (*p == ' ' || *p == '\t') | |
1112 | p++; | |
1113 | *pp = p; | |
1114 | } | |
1115 | ||
1116 | static void get_word(char *buf, int buf_size, const char **pp) | |
1117 | { | |
1118 | const char *p; | |
1119 | char *q; | |
1120 | ||
1121 | p = *pp; | |
1122 | skip_spaces(&p); | |
1123 | q = buf; | |
1124 | while (!isspace(*p) && *p != '\0') { | |
1125 | if ((q - buf) < buf_size - 1) | |
1126 | *q++ = *p; | |
1127 | p++; | |
1128 | } | |
1129 | if (buf_size > 0) | |
1130 | *q = '\0'; | |
1131 | *pp = p; | |
1132 | } | |
1133 | ||
8256c0a3 PG |
1134 | static int validate_acl(FFStream *stream, HTTPContext *c) |
1135 | { | |
1136 | enum IPAddressAction last_action = IP_DENY; | |
1137 | IPAddressACL *acl; | |
1138 | struct in_addr *src = &c->from_addr.sin_addr; | |
efa04ce2 | 1139 | unsigned long src_addr = ntohl(src->s_addr); |
8256c0a3 PG |
1140 | |
1141 | for (acl = stream->acl; acl; acl = acl->next) { | |
efa04ce2 | 1142 | if (src_addr >= acl->first.s_addr && src_addr <= acl->last.s_addr) { |
8256c0a3 PG |
1143 | return (acl->action == IP_ALLOW) ? 1 : 0; |
1144 | } | |
1145 | last_action = acl->action; | |
1146 | } | |
1147 | ||
1148 | /* Nothing matched, so return not the last action */ | |
1149 | return (last_action == IP_DENY) ? 1 : 0; | |
1150 | } | |
1151 | ||
829ac53d FB |
1152 | /* compute the real filename of a file by matching it without its |
1153 | extensions to all the stream filenames */ | |
1154 | static void compute_real_filename(char *filename, int max_size) | |
1155 | { | |
1156 | char file1[1024]; | |
1157 | char file2[1024]; | |
1158 | char *p; | |
1159 | FFStream *stream; | |
1160 | ||
1161 | /* compute filename by matching without the file extensions */ | |
1162 | pstrcpy(file1, sizeof(file1), filename); | |
1163 | p = strrchr(file1, '.'); | |
1164 | if (p) | |
1165 | *p = '\0'; | |
1166 | for(stream = first_stream; stream != NULL; stream = stream->next) { | |
1167 | pstrcpy(file2, sizeof(file2), stream->filename); | |
1168 | p = strrchr(file2, '.'); | |
1169 | if (p) | |
1170 | *p = '\0'; | |
1171 | if (!strcmp(file1, file2)) { | |
1172 | pstrcpy(filename, max_size, stream->filename); | |
1173 | break; | |
1174 | } | |
1175 | } | |
1176 | } | |
1177 | ||
1178 | enum RedirType { | |
1179 | REDIR_NONE, | |
1180 | REDIR_ASX, | |
1181 | REDIR_RAM, | |
1182 | REDIR_ASF, | |
1183 | REDIR_RTSP, | |
1184 | REDIR_SDP, | |
1185 | }; | |
1186 | ||
85f07f22 FB |
1187 | /* parse http request and prepare header */ |
1188 | static int http_parse_request(HTTPContext *c) | |
1189 | { | |
1190 | char *p; | |
1191 | int post; | |
829ac53d | 1192 | enum RedirType redir_type; |
85f07f22 FB |
1193 | char cmd[32]; |
1194 | char info[1024], *filename; | |
1195 | char url[1024], *q; | |
1196 | char protocol[32]; | |
1197 | char msg[1024]; | |
1198 | const char *mime_type; | |
1199 | FFStream *stream; | |
42a63c6a | 1200 | int i; |
3120d2a2 | 1201 | char ratebuf[32]; |
cde25790 | 1202 | char *useragent = 0; |
85f07f22 FB |
1203 | |
1204 | p = c->buffer; | |
2effd274 | 1205 | get_word(cmd, sizeof(cmd), (const char **)&p); |
bd7cf6ad | 1206 | pstrcpy(c->method, sizeof(c->method), cmd); |
7434ba6d | 1207 | |
85f07f22 FB |
1208 | if (!strcmp(cmd, "GET")) |
1209 | post = 0; | |
1210 | else if (!strcmp(cmd, "POST")) | |
1211 | post = 1; | |
1212 | else | |
1213 | return -1; | |
1214 | ||
2effd274 | 1215 | get_word(url, sizeof(url), (const char **)&p); |
bd7cf6ad | 1216 | pstrcpy(c->url, sizeof(c->url), url); |
7434ba6d | 1217 | |
2effd274 | 1218 | get_word(protocol, sizeof(protocol), (const char **)&p); |
85f07f22 FB |
1219 | if (strcmp(protocol, "HTTP/1.0") && strcmp(protocol, "HTTP/1.1")) |
1220 | return -1; | |
7434ba6d | 1221 | |
bd7cf6ad | 1222 | pstrcpy(c->protocol, sizeof(c->protocol), protocol); |
85f07f22 FB |
1223 | |
1224 | /* find the filename and the optional info string in the request */ | |
1225 | p = url; | |
1226 | if (*p == '/') | |
1227 | p++; | |
1228 | filename = p; | |
1229 | p = strchr(p, '?'); | |
1230 | if (p) { | |
bd7cf6ad | 1231 | pstrcpy(info, sizeof(info), p); |
85f07f22 FB |
1232 | *p = '\0'; |
1233 | } else { | |
1234 | info[0] = '\0'; | |
1235 | } | |
1236 | ||
cde25790 PG |
1237 | for (p = c->buffer; *p && *p != '\r' && *p != '\n'; ) { |
1238 | if (strncasecmp(p, "User-Agent:", 11) == 0) { | |
1239 | useragent = p + 11; | |
1240 | if (*useragent && *useragent != '\n' && isspace(*useragent)) | |
1241 | useragent++; | |
1242 | break; | |
1243 | } | |
1244 | p = strchr(p, '\n'); | |
1245 | if (!p) | |
1246 | break; | |
1247 | ||
1248 | p++; | |
1249 | } | |
1250 | ||
829ac53d FB |
1251 | redir_type = REDIR_NONE; |
1252 | if (match_ext(filename, "asx")) { | |
1253 | redir_type = REDIR_ASX; | |
7434ba6d | 1254 | filename[strlen(filename)-1] = 'f'; |
c2ce254c | 1255 | } else if (match_ext(filename, "asf") && |
cde25790 PG |
1256 | (!useragent || strncasecmp(useragent, "NSPlayer", 8) != 0)) { |
1257 | /* if this isn't WMP or lookalike, return the redirector file */ | |
829ac53d FB |
1258 | redir_type = REDIR_ASF; |
1259 | } else if (match_ext(filename, "rpm,ram")) { | |
1260 | redir_type = REDIR_RAM; | |
42a63c6a | 1261 | strcpy(filename + strlen(filename)-2, "m"); |
829ac53d FB |
1262 | } else if (match_ext(filename, "rtsp")) { |
1263 | redir_type = REDIR_RTSP; | |
1264 | compute_real_filename(filename, sizeof(url) - 1); | |
1265 | } else if (match_ext(filename, "sdp")) { | |
1266 | redir_type = REDIR_SDP; | |
829ac53d | 1267 | compute_real_filename(filename, sizeof(url) - 1); |
42a63c6a | 1268 | } |
829ac53d | 1269 | |
85f07f22 FB |
1270 | stream = first_stream; |
1271 | while (stream != NULL) { | |
8256c0a3 | 1272 | if (!strcmp(stream->filename, filename) && validate_acl(stream, c)) |
85f07f22 FB |
1273 | break; |
1274 | stream = stream->next; | |
1275 | } | |
1276 | if (stream == NULL) { | |
1277 | sprintf(msg, "File '%s' not found", url); | |
1278 | goto send_error; | |
1279 | } | |
42a63c6a | 1280 | |
cde25790 PG |
1281 | c->stream = stream; |
1282 | memcpy(c->feed_streams, stream->feed_streams, sizeof(c->feed_streams)); | |
1283 | memset(c->switch_feed_streams, -1, sizeof(c->switch_feed_streams)); | |
1284 | ||
1285 | if (stream->stream_type == STREAM_TYPE_REDIRECT) { | |
1286 | c->http_error = 301; | |
1287 | q = c->buffer; | |
1288 | q += sprintf(q, "HTTP/1.0 301 Moved\r\n"); | |
1289 | q += sprintf(q, "Location: %s\r\n", stream->feed_filename); | |
1290 | q += sprintf(q, "Content-type: text/html\r\n"); | |
1291 | q += sprintf(q, "\r\n"); | |
1292 | q += sprintf(q, "<html><head><title>Moved</title></head><body>\r\n"); | |
1293 | q += sprintf(q, "You should be <a href=\"%s\">redirected</a>.\r\n", stream->feed_filename); | |
1294 | q += sprintf(q, "</body></html>\r\n"); | |
1295 | ||
1296 | /* prepare output buffer */ | |
1297 | c->buffer_ptr = c->buffer; | |
1298 | c->buffer_end = q; | |
1299 | c->state = HTTPSTATE_SEND_HEADER; | |
1300 | return 0; | |
1301 | } | |
1302 | ||
3120d2a2 PG |
1303 | /* If this is WMP, get the rate information */ |
1304 | if (extract_rates(ratebuf, sizeof(ratebuf), c->buffer)) { | |
cde25790 PG |
1305 | if (modify_current_stream(c, ratebuf)) { |
1306 | for (i = 0; i < sizeof(c->feed_streams) / sizeof(c->feed_streams[0]); i++) { | |
1307 | if (c->switch_feed_streams[i] >= 0) | |
1308 | do_switch_stream(c, i); | |
1309 | } | |
1310 | } | |
3120d2a2 PG |
1311 | } |
1312 | ||
42a63c6a | 1313 | if (post == 0 && stream->stream_type == STREAM_TYPE_LIVE) { |
6edd6884 | 1314 | current_bandwidth += stream->bandwidth; |
42a63c6a | 1315 | } |
6edd6884 FB |
1316 | |
1317 | if (post == 0 && max_bandwidth < current_bandwidth) { | |
42a63c6a PG |
1318 | c->http_error = 200; |
1319 | q = c->buffer; | |
1320 | q += sprintf(q, "HTTP/1.0 200 Server too busy\r\n"); | |
1321 | q += sprintf(q, "Content-type: text/html\r\n"); | |
1322 | q += sprintf(q, "\r\n"); | |
1323 | q += sprintf(q, "<html><head><title>Too busy</title></head><body>\r\n"); | |
1324 | q += sprintf(q, "The server is too busy to serve your request at this time.<p>\r\n"); | |
1325 | q += sprintf(q, "The bandwidth being served (including your stream) is %dkbit/sec, and this exceeds the limit of %dkbit/sec\r\n", | |
6edd6884 | 1326 | current_bandwidth, max_bandwidth); |
42a63c6a PG |
1327 | q += sprintf(q, "</body></html>\r\n"); |
1328 | ||
1329 | /* prepare output buffer */ | |
1330 | c->buffer_ptr = c->buffer; | |
1331 | c->buffer_end = q; | |
1332 | c->state = HTTPSTATE_SEND_HEADER; | |
1333 | return 0; | |
1334 | } | |
1335 | ||
829ac53d | 1336 | if (redir_type != REDIR_NONE) { |
7434ba6d PG |
1337 | char *hostinfo = 0; |
1338 | ||
1339 | for (p = c->buffer; *p && *p != '\r' && *p != '\n'; ) { | |
1340 | if (strncasecmp(p, "Host:", 5) == 0) { | |
1341 | hostinfo = p + 5; | |
1342 | break; | |
1343 | } | |
1344 | p = strchr(p, '\n'); | |
1345 | if (!p) | |
1346 | break; | |
1347 | ||
1348 | p++; | |
1349 | } | |
1350 | ||
1351 | if (hostinfo) { | |
1352 | char *eoh; | |
1353 | char hostbuf[260]; | |
1354 | ||
1355 | while (isspace(*hostinfo)) | |
1356 | hostinfo++; | |
1357 | ||
1358 | eoh = strchr(hostinfo, '\n'); | |
1359 | if (eoh) { | |
1360 | if (eoh[-1] == '\r') | |
1361 | eoh--; | |
1362 | ||
1363 | if (eoh - hostinfo < sizeof(hostbuf) - 1) { | |
1364 | memcpy(hostbuf, hostinfo, eoh - hostinfo); | |
1365 | hostbuf[eoh - hostinfo] = 0; | |
1366 | ||
1367 | c->http_error = 200; | |
1368 | q = c->buffer; | |
829ac53d FB |
1369 | switch(redir_type) { |
1370 | case REDIR_ASX: | |
42a63c6a PG |
1371 | q += sprintf(q, "HTTP/1.0 200 ASX Follows\r\n"); |
1372 | q += sprintf(q, "Content-type: video/x-ms-asf\r\n"); | |
1373 | q += sprintf(q, "\r\n"); | |
1374 | q += sprintf(q, "<ASX Version=\"3\">\r\n"); | |
1375 | q += sprintf(q, "<!-- Autogenerated by ffserver -->\r\n"); | |
1376 | q += sprintf(q, "<ENTRY><REF HREF=\"http://%s/%s%s\"/></ENTRY>\r\n", | |
1377 | hostbuf, filename, info); | |
1378 | q += sprintf(q, "</ASX>\r\n"); | |
829ac53d FB |
1379 | break; |
1380 | case REDIR_RAM: | |
42a63c6a PG |
1381 | q += sprintf(q, "HTTP/1.0 200 RAM Follows\r\n"); |
1382 | q += sprintf(q, "Content-type: audio/x-pn-realaudio\r\n"); | |
1383 | q += sprintf(q, "\r\n"); | |
1384 | q += sprintf(q, "# Autogenerated by ffserver\r\n"); | |
1385 | q += sprintf(q, "http://%s/%s%s\r\n", | |
1386 | hostbuf, filename, info); | |
829ac53d FB |
1387 | break; |
1388 | case REDIR_ASF: | |
cde25790 PG |
1389 | q += sprintf(q, "HTTP/1.0 200 ASF Redirect follows\r\n"); |
1390 | q += sprintf(q, "Content-type: video/x-ms-asf\r\n"); | |
1391 | q += sprintf(q, "\r\n"); | |
1392 | q += sprintf(q, "[Reference]\r\n"); | |
1393 | q += sprintf(q, "Ref1=http://%s/%s%s\r\n", | |
1394 | hostbuf, filename, info); | |
829ac53d FB |
1395 | break; |
1396 | case REDIR_RTSP: | |
1397 | { | |
1398 | char hostname[256], *p; | |
1399 | /* extract only hostname */ | |
1400 | pstrcpy(hostname, sizeof(hostname), hostbuf); | |
1401 | p = strrchr(hostname, ':'); | |
1402 | if (p) | |
1403 | *p = '\0'; | |
1404 | q += sprintf(q, "HTTP/1.0 200 RTSP Redirect follows\r\n"); | |
1405 | /* XXX: incorrect mime type ? */ | |
1406 | q += sprintf(q, "Content-type: application/x-rtsp\r\n"); | |
1407 | q += sprintf(q, "\r\n"); | |
1408 | q += sprintf(q, "rtsp://%s:%d/%s\r\n", | |
1409 | hostname, ntohs(my_rtsp_addr.sin_port), | |
1410 | filename); | |
1411 | } | |
1412 | break; | |
1413 | case REDIR_SDP: | |
1414 | { | |
0c1a9eda | 1415 | uint8_t *sdp_data; |
829ac53d FB |
1416 | int sdp_data_size, len; |
1417 | struct sockaddr_in my_addr; | |
1418 | ||
1419 | q += sprintf(q, "HTTP/1.0 200 OK\r\n"); | |
1420 | q += sprintf(q, "Content-type: application/sdp\r\n"); | |
1421 | q += sprintf(q, "\r\n"); | |
1422 | ||
1423 | len = sizeof(my_addr); | |
1424 | getsockname(c->fd, (struct sockaddr *)&my_addr, &len); | |
1425 | ||
1426 | /* XXX: should use a dynamic buffer */ | |
1427 | sdp_data_size = prepare_sdp_description(stream, | |
1428 | &sdp_data, | |
1429 | my_addr.sin_addr); | |
1430 | if (sdp_data_size > 0) { | |
1431 | memcpy(q, sdp_data, sdp_data_size); | |
1432 | q += sdp_data_size; | |
1433 | *q = '\0'; | |
1434 | av_free(sdp_data); | |
1435 | } | |
1436 | } | |
1437 | break; | |
1438 | default: | |
ec3b2232 | 1439 | av_abort(); |
829ac53d | 1440 | break; |
2effd274 | 1441 | } |
7434ba6d PG |
1442 | |
1443 | /* prepare output buffer */ | |
1444 | c->buffer_ptr = c->buffer; | |
1445 | c->buffer_end = q; | |
1446 | c->state = HTTPSTATE_SEND_HEADER; | |
1447 | return 0; | |
1448 | } | |
1449 | } | |
1450 | } | |
1451 | ||
42a63c6a | 1452 | sprintf(msg, "ASX/RAM file not handled"); |
7434ba6d | 1453 | goto send_error; |
85f07f22 FB |
1454 | } |
1455 | ||
a6e14edd | 1456 | stream->conns_served++; |
7434ba6d | 1457 | |
85f07f22 FB |
1458 | /* XXX: add there authenticate and IP match */ |
1459 | ||
1460 | if (post) { | |
1461 | /* if post, it means a feed is being sent */ | |
1462 | if (!stream->is_feed) { | |
7434ba6d PG |
1463 | /* However it might be a status report from WMP! Lets log the data |
1464 | * as it might come in handy one day | |
1465 | */ | |
1466 | char *logline = 0; | |
3120d2a2 | 1467 | int client_id = 0; |
7434ba6d PG |
1468 | |
1469 | for (p = c->buffer; *p && *p != '\r' && *p != '\n'; ) { | |
1470 | if (strncasecmp(p, "Pragma: log-line=", 17) == 0) { | |
1471 | logline = p; | |
1472 | break; | |
1473 | } | |
3120d2a2 PG |
1474 | if (strncasecmp(p, "Pragma: client-id=", 18) == 0) { |
1475 | client_id = strtol(p + 18, 0, 10); | |
1476 | } | |
7434ba6d PG |
1477 | p = strchr(p, '\n'); |
1478 | if (!p) | |
1479 | break; | |
1480 | ||
1481 | p++; | |
1482 | } | |
1483 | ||
1484 | if (logline) { | |
1485 | char *eol = strchr(logline, '\n'); | |
1486 | ||
1487 | logline += 17; | |
1488 | ||
1489 | if (eol) { | |
1490 | if (eol[-1] == '\r') | |
1491 | eol--; | |
1492 | http_log("%.*s\n", eol - logline, logline); | |
1493 | c->suppress_log = 1; | |
1494 | } | |
1495 | } | |
3120d2a2 | 1496 | |
cde25790 PG |
1497 | #ifdef DEBUG_WMP |
1498 | http_log("\nGot request:\n%s\n", c->buffer); | |
3120d2a2 PG |
1499 | #endif |
1500 | ||
1501 | if (client_id && extract_rates(ratebuf, sizeof(ratebuf), c->buffer)) { | |
1502 | HTTPContext *wmpc; | |
1503 | ||
1504 | /* Now we have to find the client_id */ | |
1505 | for (wmpc = first_http_ctx; wmpc; wmpc = wmpc->next) { | |
1506 | if (wmpc->wmp_client_id == client_id) | |
1507 | break; | |
1508 | } | |
1509 | ||
1510 | if (wmpc) { | |
cde25790 PG |
1511 | if (modify_current_stream(wmpc, ratebuf)) { |
1512 | wmpc->switch_pending = 1; | |
3120d2a2 PG |
1513 | } |
1514 | } | |
1515 | } | |
7434ba6d | 1516 | |
85f07f22 | 1517 | sprintf(msg, "POST command not handled"); |
cb275dd9 | 1518 | c->stream = 0; |
85f07f22 FB |
1519 | goto send_error; |
1520 | } | |
1521 | if (http_start_receive_data(c) < 0) { | |
1522 | sprintf(msg, "could not open feed"); | |
1523 | goto send_error; | |
1524 | } | |
1525 | c->http_error = 0; | |
1526 | c->state = HTTPSTATE_RECEIVE_DATA; | |
1527 | return 0; | |
1528 | } | |
1529 | ||
cde25790 | 1530 | #ifdef DEBUG_WMP |
3120d2a2 | 1531 | if (strcmp(stream->filename + strlen(stream->filename) - 4, ".asf") == 0) { |
cde25790 | 1532 | http_log("\nGot request:\n%s\n", c->buffer); |
3120d2a2 PG |
1533 | } |
1534 | #endif | |
1535 | ||
85f07f22 FB |
1536 | if (c->stream->stream_type == STREAM_TYPE_STATUS) |
1537 | goto send_stats; | |
1538 | ||
1539 | /* open input stream */ | |
1540 | if (open_input_stream(c, info) < 0) { | |
1541 | sprintf(msg, "Input stream corresponding to '%s' not found", url); | |
1542 | goto send_error; | |
1543 | } | |
1544 | ||
1545 | /* prepare http header */ | |
1546 | q = c->buffer; | |
1547 | q += sprintf(q, "HTTP/1.0 200 OK\r\n"); | |
1548 | mime_type = c->stream->fmt->mime_type; | |
1549 | if (!mime_type) | |
1550 | mime_type = "application/x-octet_stream"; | |
85f07f22 FB |
1551 | q += sprintf(q, "Pragma: no-cache\r\n"); |
1552 | ||
1553 | /* for asf, we need extra headers */ | |
8256c0a3 | 1554 | if (!strcmp(c->stream->fmt->name,"asf_stream")) { |
3120d2a2 | 1555 | /* Need to allocate a client id */ |
3120d2a2 | 1556 | |
8256c0a3 | 1557 | c->wmp_client_id = random() & 0x7fffffff; |
3120d2a2 PG |
1558 | |
1559 | q += sprintf(q, "Server: Cougar 4.1.0.3923\r\nCache-Control: no-cache\r\nPragma: client-id=%d\r\nPragma: features=\"broadcast\"\r\n", c->wmp_client_id); | |
85f07f22 | 1560 | } |
f747e6d3 | 1561 | q += sprintf(q, "Content-Type: %s\r\n", mime_type); |
85f07f22 FB |
1562 | q += sprintf(q, "\r\n"); |
1563 | ||
1564 | /* prepare output buffer */ | |
1565 | c->http_error = 0; | |
1566 | c->buffer_ptr = c->buffer; | |
1567 | c->buffer_end = q; | |
1568 | c->state = HTTPSTATE_SEND_HEADER; | |
1569 | return 0; | |
1570 | send_error: | |
1571 | c->http_error = 404; | |
1572 | q = c->buffer; | |
1573 | q += sprintf(q, "HTTP/1.0 404 Not Found\r\n"); | |
1574 | q += sprintf(q, "Content-type: %s\r\n", "text/html"); | |
1575 | q += sprintf(q, "\r\n"); | |
1576 | q += sprintf(q, "<HTML>\n"); | |
1577 | q += sprintf(q, "<HEAD><TITLE>404 Not Found</TITLE></HEAD>\n"); | |
1578 | q += sprintf(q, "<BODY>%s</BODY>\n", msg); | |
1579 | q += sprintf(q, "</HTML>\n"); | |
1580 | ||
1581 | /* prepare output buffer */ | |
1582 | c->buffer_ptr = c->buffer; | |
1583 | c->buffer_end = q; | |
1584 | c->state = HTTPSTATE_SEND_HEADER; | |
1585 | return 0; | |
1586 | send_stats: | |
1587 | compute_stats(c); | |
1588 | c->http_error = 200; /* horrible : we use this value to avoid | |
1589 | going to the send data state */ | |
1590 | c->state = HTTPSTATE_SEND_HEADER; | |
1591 | return 0; | |
1592 | } | |
1593 | ||
0c1a9eda | 1594 | static void fmt_bytecount(ByteIOContext *pb, int64_t count) |
2ac887ba PG |
1595 | { |
1596 | static const char *suffix = " kMGTP"; | |
1597 | const char *s; | |
1598 | ||
1599 | for (s = suffix; count >= 100000 && s[1]; count /= 1000, s++) { | |
1600 | } | |
1601 | ||
2effd274 | 1602 | url_fprintf(pb, "%lld%c", count, *s); |
2ac887ba PG |
1603 | } |
1604 | ||
85f07f22 FB |
1605 | static void compute_stats(HTTPContext *c) |
1606 | { | |
1607 | HTTPContext *c1; | |
1608 | FFStream *stream; | |
2effd274 | 1609 | char *p; |
85f07f22 | 1610 | time_t ti; |
2effd274 FB |
1611 | int i, len; |
1612 | ByteIOContext pb1, *pb = &pb1; | |
cde25790 | 1613 | |
2effd274 FB |
1614 | if (url_open_dyn_buf(pb) < 0) { |
1615 | /* XXX: return an error ? */ | |
cde25790 | 1616 | c->buffer_ptr = c->buffer; |
2effd274 FB |
1617 | c->buffer_end = c->buffer; |
1618 | return; | |
cde25790 | 1619 | } |
85f07f22 | 1620 | |
2effd274 FB |
1621 | url_fprintf(pb, "HTTP/1.0 200 OK\r\n"); |
1622 | url_fprintf(pb, "Content-type: %s\r\n", "text/html"); | |
1623 | url_fprintf(pb, "Pragma: no-cache\r\n"); | |
1624 | url_fprintf(pb, "\r\n"); | |
85f07f22 | 1625 | |
2effd274 | 1626 | url_fprintf(pb, "<HEAD><TITLE>FFServer Status</TITLE>\n"); |
cde25790 | 1627 | if (c->stream->feed_filename) { |
2effd274 | 1628 | url_fprintf(pb, "<link rel=\"shortcut icon\" href=\"%s\">\n", c->stream->feed_filename); |
cde25790 | 1629 | } |
2effd274 FB |
1630 | url_fprintf(pb, "</HEAD>\n<BODY>"); |
1631 | url_fprintf(pb, "<H1>FFServer Status</H1>\n"); | |
85f07f22 | 1632 | /* format status */ |
2effd274 FB |
1633 | url_fprintf(pb, "<H2>Available Streams</H2>\n"); |
1634 | url_fprintf(pb, "<TABLE cellspacing=0 cellpadding=4>\n"); | |
1635 | url_fprintf(pb, "<TR><Th valign=top>Path<th align=left>Served<br>Conns<Th><br>bytes<Th valign=top>Format<Th>Bit rate<br>kbits/s<Th align=left>Video<br>kbits/s<th><br>Codec<Th align=left>Audio<br>kbits/s<th><br>Codec<Th align=left valign=top>Feed\n"); | |
85f07f22 FB |
1636 | stream = first_stream; |
1637 | while (stream != NULL) { | |
42a63c6a PG |
1638 | char sfilename[1024]; |
1639 | char *eosf; | |
1640 | ||
a6e14edd | 1641 | if (stream->feed != stream) { |
2effd274 | 1642 | pstrcpy(sfilename, sizeof(sfilename) - 10, stream->filename); |
a6e14edd PG |
1643 | eosf = sfilename + strlen(sfilename); |
1644 | if (eosf - sfilename >= 4) { | |
1645 | if (strcmp(eosf - 4, ".asf") == 0) { | |
1646 | strcpy(eosf - 4, ".asx"); | |
1647 | } else if (strcmp(eosf - 3, ".rm") == 0) { | |
1648 | strcpy(eosf - 3, ".ram"); | |
2effd274 | 1649 | } else if (stream->fmt == &rtp_mux) { |
829ac53d FB |
1650 | /* generate a sample RTSP director if |
1651 | unicast. Generate an SDP redirector if | |
1652 | multicast */ | |
2effd274 FB |
1653 | eosf = strrchr(sfilename, '.'); |
1654 | if (!eosf) | |
1655 | eosf = sfilename + strlen(sfilename); | |
829ac53d FB |
1656 | if (stream->is_multicast) |
1657 | strcpy(eosf, ".sdp"); | |
1658 | else | |
1659 | strcpy(eosf, ".rtsp"); | |
a6e14edd | 1660 | } |
42a63c6a | 1661 | } |
a6e14edd | 1662 | |
2effd274 | 1663 | url_fprintf(pb, "<TR><TD><A HREF=\"/%s\">%s</A> ", |
a6e14edd | 1664 | sfilename, stream->filename); |
2effd274 | 1665 | url_fprintf(pb, "<td align=right> %d <td align=right> ", |
2ac887ba | 1666 | stream->conns_served); |
2effd274 | 1667 | fmt_bytecount(pb, stream->bytes_served); |
a6e14edd PG |
1668 | switch(stream->stream_type) { |
1669 | case STREAM_TYPE_LIVE: | |
1670 | { | |
1671 | int audio_bit_rate = 0; | |
1672 | int video_bit_rate = 0; | |
58445440 ZK |
1673 | const char *audio_codec_name = ""; |
1674 | const char *video_codec_name = ""; | |
1675 | const char *audio_codec_name_extra = ""; | |
1676 | const char *video_codec_name_extra = ""; | |
a6e14edd PG |
1677 | |
1678 | for(i=0;i<stream->nb_streams;i++) { | |
1679 | AVStream *st = stream->streams[i]; | |
1680 | AVCodec *codec = avcodec_find_encoder(st->codec.codec_id); | |
1681 | switch(st->codec.codec_type) { | |
1682 | case CODEC_TYPE_AUDIO: | |
1683 | audio_bit_rate += st->codec.bit_rate; | |
1684 | if (codec) { | |
1685 | if (*audio_codec_name) | |
1686 | audio_codec_name_extra = "..."; | |
1687 | audio_codec_name = codec->name; | |
1688 | } | |
1689 | break; | |
1690 | case CODEC_TYPE_VIDEO: | |
1691 | video_bit_rate += st->codec.bit_rate; | |
1692 | if (codec) { | |
1693 | if (*video_codec_name) | |
1694 | video_codec_name_extra = "..."; | |
1695 | video_codec_name = codec->name; | |
1696 | } | |
1697 | break; | |
1698 | default: | |
ec3b2232 | 1699 | av_abort(); |
79c4ea3c | 1700 | } |
85f07f22 | 1701 | } |
2effd274 | 1702 | url_fprintf(pb, "<TD align=center> %s <TD align=right> %d <TD align=right> %d <TD> %s %s <TD align=right> %d <TD> %s %s", |
a6e14edd | 1703 | stream->fmt->name, |
6edd6884 | 1704 | stream->bandwidth, |
a6e14edd PG |
1705 | video_bit_rate / 1000, video_codec_name, video_codec_name_extra, |
1706 | audio_bit_rate / 1000, audio_codec_name, audio_codec_name_extra); | |
1707 | if (stream->feed) { | |
2effd274 | 1708 | url_fprintf(pb, "<TD>%s", stream->feed->filename); |
a6e14edd | 1709 | } else { |
2effd274 | 1710 | url_fprintf(pb, "<TD>%s", stream->feed_filename); |
a6e14edd | 1711 | } |
2effd274 | 1712 | url_fprintf(pb, "\n"); |
85f07f22 | 1713 | } |
a6e14edd PG |
1714 | break; |
1715 | default: | |
2effd274 | 1716 | url_fprintf(pb, "<TD align=center> - <TD align=right> - <TD align=right> - <td><td align=right> - <TD>\n"); |
a6e14edd | 1717 | break; |
85f07f22 | 1718 | } |
85f07f22 FB |
1719 | } |
1720 | stream = stream->next; | |
1721 | } | |
2effd274 | 1722 | url_fprintf(pb, "</TABLE>\n"); |
a6e14edd PG |
1723 | |
1724 | stream = first_stream; | |
1725 | while (stream != NULL) { | |
1726 | if (stream->feed == stream) { | |
2effd274 | 1727 | url_fprintf(pb, "<h2>Feed %s</h2>", stream->filename); |
cde25790 | 1728 | if (stream->pid) { |
2effd274 | 1729 | url_fprintf(pb, "Running as pid %d.\n", stream->pid); |
cde25790 | 1730 | |
2effd274 FB |
1731 | #if defined(linux) && !defined(CONFIG_NOCUTILS) |
1732 | { | |
1733 | FILE *pid_stat; | |
1734 | char ps_cmd[64]; | |
1735 | ||
1736 | /* This is somewhat linux specific I guess */ | |
1737 | snprintf(ps_cmd, sizeof(ps_cmd), | |
1738 | "ps -o \"%%cpu,cputime\" --no-headers %d", | |
1739 | stream->pid); | |
1740 | ||
1741 | pid_stat = popen(ps_cmd, "r"); | |
1742 | if (pid_stat) { | |
1743 | char cpuperc[10]; | |
1744 | char cpuused[64]; | |
1745 | ||
1746 | if (fscanf(pid_stat, "%10s %64s", cpuperc, | |
1747 | cpuused) == 2) { | |
1748 | url_fprintf(pb, "Currently using %s%% of the cpu. Total time used %s.\n", | |
1749 | cpuperc, cpuused); | |
1750 | } | |
1751 | fclose(pid_stat); | |
cde25790 | 1752 | } |
cde25790 PG |
1753 | } |
1754 | #endif | |
1755 | ||
2effd274 | 1756 | url_fprintf(pb, "<p>"); |
cde25790 | 1757 | } |
2effd274 | 1758 | url_fprintf(pb, "<table cellspacing=0 cellpadding=4><tr><th>Stream<th>type<th>kbits/s<th align=left>codec<th align=left>Parameters\n"); |
a6e14edd PG |
1759 | |
1760 | for (i = 0; i < stream->nb_streams; i++) { | |
1761 | AVStream *st = stream->streams[i]; | |
1762 | AVCodec *codec = avcodec_find_encoder(st->codec.codec_id); | |
b29f97d1 | 1763 | const char *type = "unknown"; |
b582f314 PG |
1764 | char parameters[64]; |
1765 | ||
1766 | parameters[0] = 0; | |
a6e14edd PG |
1767 | |
1768 | switch(st->codec.codec_type) { | |
1769 | case CODEC_TYPE_AUDIO: | |
1770 | type = "audio"; | |
1771 | break; | |
1772 | case CODEC_TYPE_VIDEO: | |
1773 | type = "video"; | |
cde25790 | 1774 | sprintf(parameters, "%dx%d, q=%d-%d, fps=%d", st->codec.width, st->codec.height, |
14bea432 | 1775 | st->codec.qmin, st->codec.qmax, st->codec.frame_rate / st->codec.frame_rate_base); |
a6e14edd PG |
1776 | break; |
1777 | default: | |
ec3b2232 | 1778 | av_abort(); |
a6e14edd | 1779 | } |
2effd274 | 1780 | url_fprintf(pb, "<tr><td align=right>%d<td>%s<td align=right>%d<td>%s<td>%s\n", |
b582f314 | 1781 | i, type, st->codec.bit_rate/1000, codec ? codec->name : "", parameters); |
a6e14edd | 1782 | } |
2effd274 | 1783 | url_fprintf(pb, "</table>\n"); |
a6e14edd PG |
1784 | |
1785 | } | |
1786 | stream = stream->next; | |
1787 | } | |
85f07f22 FB |
1788 | |
1789 | #if 0 | |
1790 | { | |
1791 | float avg; | |
1792 | AVCodecContext *enc; | |
1793 | char buf[1024]; | |
1794 | ||
1795 | /* feed status */ | |
1796 | stream = first_feed; | |
1797 | while (stream != NULL) { | |
2effd274 FB |
1798 | url_fprintf(pb, "<H1>Feed '%s'</H1>\n", stream->filename); |
1799 | url_fprintf(pb, "<TABLE>\n"); | |
1800 | url_fprintf(pb, "<TR><TD>Parameters<TD>Frame count<TD>Size<TD>Avg bitrate (kbits/s)\n"); | |
85f07f22 FB |
1801 | for(i=0;i<stream->nb_streams;i++) { |
1802 | AVStream *st = stream->streams[i]; | |
1803 | FeedData *fdata = st->priv_data; | |
1804 | enc = &st->codec; | |
1805 | ||
1806 | avcodec_string(buf, sizeof(buf), enc); | |
1807 | avg = fdata->avg_frame_size * (float)enc->rate * 8.0; | |
1808 | if (enc->codec->type == CODEC_TYPE_AUDIO && enc->frame_size > 0) | |
1809 | avg /= enc->frame_size; | |
2effd274 | 1810 | url_fprintf(pb, "<TR><TD>%s <TD> %d <TD> %Ld <TD> %0.1f\n", |
85f07f22 FB |
1811 | buf, enc->frame_number, fdata->data_count, avg / 1000.0); |
1812 | } | |
2effd274 | 1813 | url_fprintf(pb, "</TABLE>\n"); |
85f07f22 FB |
1814 | stream = stream->next_feed; |
1815 | } | |
1816 | } | |
1817 | #endif | |
1818 | ||
1819 | /* connection status */ | |
2effd274 | 1820 | url_fprintf(pb, "<H2>Connection Status</H2>\n"); |
85f07f22 | 1821 | |
2effd274 | 1822 | url_fprintf(pb, "Number of connections: %d / %d<BR>\n", |
85f07f22 FB |
1823 | nb_connections, nb_max_connections); |
1824 | ||
2effd274 | 1825 | url_fprintf(pb, "Bandwidth in use: %dk / %dk<BR>\n", |
6edd6884 | 1826 | current_bandwidth, max_bandwidth); |
42a63c6a | 1827 | |
2effd274 FB |
1828 | url_fprintf(pb, "<TABLE>\n"); |
1829 | url_fprintf(pb, "<TR><th>#<th>File<th>IP<th>Proto<th>State<th>Target bits/sec<th>Actual bits/sec<th>Bytes transferred\n"); | |
85f07f22 FB |
1830 | c1 = first_http_ctx; |
1831 | i = 0; | |
2effd274 | 1832 | while (c1 != NULL) { |
cde25790 PG |
1833 | int bitrate; |
1834 | int j; | |
1835 | ||
1836 | bitrate = 0; | |
2effd274 FB |
1837 | if (c1->stream) { |
1838 | for (j = 0; j < c1->stream->nb_streams; j++) { | |
1839 | if (!c1->stream->feed) { | |
1840 | bitrate += c1->stream->streams[j]->codec.bit_rate; | |
1841 | } else { | |
1842 | if (c1->feed_streams[j] >= 0) { | |
1843 | bitrate += c1->stream->feed->streams[c1->feed_streams[j]]->codec.bit_rate; | |
1844 | } | |
1845 | } | |
cde25790 PG |
1846 | } |
1847 | } | |
1848 | ||
85f07f22 FB |
1849 | i++; |
1850 | p = inet_ntoa(c1->from_addr.sin_addr); | |
2effd274 FB |
1851 | url_fprintf(pb, "<TR><TD><B>%d</B><TD>%s%s<TD>%s<TD>%s<TD>%s<td align=right>", |
1852 | i, | |
1853 | c1->stream ? c1->stream->filename : "", | |
1854 | c1->state == HTTPSTATE_RECEIVE_DATA ? "(input)" : "", | |
1855 | p, | |
1856 | c1->protocol, | |
1857 | http_state[c1->state]); | |
1858 | fmt_bytecount(pb, bitrate); | |
1859 | url_fprintf(pb, "<td align=right>"); | |
1860 | fmt_bytecount(pb, compute_datarate(&c1->datarate, c1->data_count) * 8); | |
1861 | url_fprintf(pb, "<td align=right>"); | |
1862 | fmt_bytecount(pb, c1->data_count); | |
1863 | url_fprintf(pb, "\n"); | |
85f07f22 FB |
1864 | c1 = c1->next; |
1865 | } | |
2effd274 | 1866 | url_fprintf(pb, "</TABLE>\n"); |
85f07f22 FB |
1867 | |
1868 | /* date */ | |
1869 | ti = time(NULL); | |
1870 | p = ctime(&ti); | |
2effd274 FB |
1871 | url_fprintf(pb, "<HR size=1 noshade>Generated at %s", p); |
1872 | url_fprintf(pb, "</BODY>\n</HTML>\n"); | |
85f07f22 | 1873 | |
2effd274 FB |
1874 | len = url_close_dyn_buf(pb, &c->pb_buffer); |
1875 | c->buffer_ptr = c->pb_buffer; | |
1876 | c->buffer_end = c->pb_buffer + len; | |
85f07f22 FB |
1877 | } |
1878 | ||
2effd274 FB |
1879 | /* check if the parser needs to be opened for stream i */ |
1880 | static void open_parser(AVFormatContext *s, int i) | |
85f07f22 | 1881 | { |
2effd274 FB |
1882 | AVStream *st = s->streams[i]; |
1883 | AVCodec *codec; | |
31def229 | 1884 | |
2effd274 FB |
1885 | if (!st->codec.codec) { |
1886 | codec = avcodec_find_decoder(st->codec.codec_id); | |
1887 | if (codec && (codec->capabilities & CODEC_CAP_PARSE_ONLY)) { | |
1888 | st->codec.parse_only = 1; | |
1889 | if (avcodec_open(&st->codec, codec) < 0) { | |
1890 | st->codec.parse_only = 0; | |
1891 | } | |
cde25790 PG |
1892 | } |
1893 | } | |
85f07f22 FB |
1894 | } |
1895 | ||
1896 | static int open_input_stream(HTTPContext *c, const char *info) | |
1897 | { | |
1898 | char buf[128]; | |
1899 | char input_filename[1024]; | |
1900 | AVFormatContext *s; | |
2effd274 | 1901 | int buf_size, i; |
0c1a9eda | 1902 | int64_t stream_pos; |
85f07f22 FB |
1903 | |
1904 | /* find file name */ | |
1905 | if (c->stream->feed) { | |
1906 | strcpy(input_filename, c->stream->feed->feed_filename); | |
1907 | buf_size = FFM_PACKET_SIZE; | |
1908 | /* compute position (absolute time) */ | |
1909 | if (find_info_tag(buf, sizeof(buf), "date", info)) { | |
1910 | stream_pos = parse_date(buf, 0); | |
f747e6d3 PG |
1911 | } else if (find_info_tag(buf, sizeof(buf), "buffer", info)) { |
1912 | int prebuffer = strtol(buf, 0, 10); | |
0c1a9eda | 1913 | stream_pos = av_gettime() - prebuffer * (int64_t)1000000; |
85f07f22 | 1914 | } else { |
0c1a9eda | 1915 | stream_pos = av_gettime() - c->stream->prebuffer * (int64_t)1000; |
85f07f22 FB |
1916 | } |
1917 | } else { | |
1918 | strcpy(input_filename, c->stream->feed_filename); | |
1919 | buf_size = 0; | |
1920 | /* compute position (relative time) */ | |
1921 | if (find_info_tag(buf, sizeof(buf), "date", info)) { | |
1922 | stream_pos = parse_date(buf, 1); | |
1923 | } else { | |
1924 | stream_pos = 0; | |
1925 | } | |
1926 | } | |
1927 | if (input_filename[0] == '\0') | |
1928 | return -1; | |
1929 | ||
8256c0a3 PG |
1930 | #if 0 |
1931 | { time_t when = stream_pos / 1000000; | |
1932 | http_log("Stream pos = %lld, time=%s", stream_pos, ctime(&when)); | |
1933 | } | |
1934 | #endif | |
1935 | ||
85f07f22 | 1936 | /* open stream */ |
2effd274 FB |
1937 | if (av_open_input_file(&s, input_filename, NULL, buf_size, NULL) < 0) { |
1938 | http_log("%s not found", input_filename); | |
85f07f22 | 1939 | return -1; |
2effd274 | 1940 | } |
85f07f22 | 1941 | c->fmt_in = s; |
2effd274 FB |
1942 | |
1943 | /* open each parser */ | |
1944 | for(i=0;i<s->nb_streams;i++) | |
1945 | open_parser(s, i); | |
1946 | ||
1947 | /* choose stream as clock source (we favorize video stream if | |
1948 | present) for packet sending */ | |
1949 | c->pts_stream_index = 0; | |
1950 | for(i=0;i<c->stream->nb_streams;i++) { | |
1951 | if (c->pts_stream_index == 0 && | |
1952 | c->stream->streams[i]->codec.codec_type == CODEC_TYPE_VIDEO) { | |
1953 | c->pts_stream_index = i; | |
1954 | } | |
1955 | } | |
85f07f22 | 1956 | |
bd7cf6ad FB |
1957 | if (c->fmt_in->iformat->read_seek) { |
1958 | c->fmt_in->iformat->read_seek(c->fmt_in, stream_pos); | |
85f07f22 | 1959 | } |
2effd274 FB |
1960 | /* set the start time (needed for maxtime and RTP packet timing) */ |
1961 | c->start_time = cur_time; | |
1962 | c->first_pts = AV_NOPTS_VALUE; | |
85f07f22 FB |
1963 | return 0; |
1964 | } | |
1965 | ||
2effd274 FB |
1966 | /* currently desactivated because the new PTS handling is not |
1967 | satisfactory yet */ | |
1968 | //#define AV_READ_FRAME | |
1969 | #ifdef AV_READ_FRAME | |
85f07f22 | 1970 | |
2effd274 FB |
1971 | /* XXX: generalize that in ffmpeg for picture/audio/data. Currently |
1972 | the return packet MUST NOT be freed */ | |
1973 | int av_read_frame(AVFormatContext *s, AVPacket *pkt) | |
1974 | { | |
1975 | AVStream *st; | |
1976 | int len, ret, old_nb_streams, i; | |
f747e6d3 | 1977 | |
2effd274 FB |
1978 | /* see if remaining frames must be parsed */ |
1979 | for(;;) { | |
1980 | if (s->cur_len > 0) { | |
1981 | st = s->streams[s->cur_pkt.stream_index]; | |
1982 | len = avcodec_parse_frame(&st->codec, &pkt->data, &pkt->size, | |
1983 | s->cur_ptr, s->cur_len); | |
1984 | if (len < 0) { | |
1985 | /* error: get next packet */ | |
1986 | s->cur_len = 0; | |
1987 | } else { | |
1988 | s->cur_ptr += len; | |
1989 | s->cur_len -= len; | |
1990 | if (pkt->size) { | |
1991 | /* init pts counter if not done */ | |
1992 | if (st->pts.den == 0) { | |
1993 | switch(st->codec.codec_type) { | |
1994 | case CODEC_TYPE_AUDIO: | |
0c1a9eda | 1995 | st->pts_incr = (int64_t)s->pts_den; |
2effd274 | 1996 | av_frac_init(&st->pts, st->pts.val, 0, |
0c1a9eda | 1997 | (int64_t)s->pts_num * st->codec.sample_rate); |
2effd274 FB |
1998 | break; |
1999 | case CODEC_TYPE_VIDEO: | |
14bea432 | 2000 | st->pts_incr = (int64_t)s->pts_den * st->codec.frame_rate_base; |
2effd274 | 2001 | av_frac_init(&st->pts, st->pts.val, 0, |
0c1a9eda | 2002 | (int64_t)s->pts_num * st->codec.frame_rate); |
2effd274 FB |
2003 | break; |
2004 | default: | |
2005 | av_abort(); | |
2006 | } | |
2007 | } | |
2008 | ||
2009 | /* a frame was read: return it */ | |
2010 | pkt->pts = st->pts.val; | |
2011 | #if 0 | |
2012 | printf("add pts=%Lx num=%Lx den=%Lx incr=%Lx\n", | |
2013 | st->pts.val, st->pts.num, st->pts.den, st->pts_incr); | |
2014 | #endif | |
2015 | switch(st->codec.codec_type) { | |
2016 | case CODEC_TYPE_AUDIO: | |
2017 | av_frac_add(&st->pts, st->pts_incr * st->codec.frame_size); | |
2018 | break; | |
2019 | case CODEC_TYPE_VIDEO: | |
2020 | av_frac_add(&st->pts, st->pts_incr); | |
2021 | break; | |
2022 | default: | |
2023 | av_abort(); | |
2024 | } | |
2025 | pkt->stream_index = s->cur_pkt.stream_index; | |
2026 | /* we use the codec indication because it is | |
2027 | more accurate than the demux flags */ | |
2028 | pkt->flags = 0; | |
492cd3a9 | 2029 | if (st->codec.coded_frame->key_frame) |
2effd274 FB |
2030 | pkt->flags |= PKT_FLAG_KEY; |
2031 | return 0; | |
2032 | } | |
85f07f22 FB |
2033 | } |
2034 | } else { | |
2effd274 FB |
2035 | /* free previous packet */ |
2036 | av_free_packet(&s->cur_pkt); | |
2037 | ||
2038 | old_nb_streams = s->nb_streams; | |
2039 | ret = av_read_packet(s, &s->cur_pkt); | |
2040 | if (ret) | |
2041 | return ret; | |
2042 | /* open parsers for each new streams */ | |
2043 | for(i = old_nb_streams; i < s->nb_streams; i++) | |
2044 | open_parser(s, i); | |
2045 | st = s->streams[s->cur_pkt.stream_index]; | |
2046 | ||
2047 | /* update current pts (XXX: dts handling) from packet, or | |
2048 | use current pts if none given */ | |
2049 | if (s->cur_pkt.pts != AV_NOPTS_VALUE) { | |
2050 | av_frac_set(&st->pts, s->cur_pkt.pts); | |
2051 | } else { | |
2052 | s->cur_pkt.pts = st->pts.val; | |
2053 | } | |
2054 | if (!st->codec.codec) { | |
2055 | /* no codec opened: just return the raw packet */ | |
2056 | *pkt = s->cur_pkt; | |
2057 | ||
2058 | /* no codec opened: just update the pts by considering we | |
2059 | have one frame and free the packet */ | |
2060 | if (st->pts.den == 0) { | |
2061 | switch(st->codec.codec_type) { | |
2062 | case CODEC_TYPE_AUDIO: | |
0c1a9eda | 2063 | st->pts_incr = (int64_t)s->pts_den * st->codec.frame_size; |
2effd274 | 2064 | av_frac_init(&st->pts, st->pts.val, 0, |
0c1a9eda | 2065 | (int64_t)s->pts_num * st->codec.sample_rate); |
2effd274 FB |
2066 | break; |
2067 | case CODEC_TYPE_VIDEO: | |
14bea432 | 2068 | st->pts_incr = (int64_t)s->pts_den * st->codec.frame_rate_base; |
2effd274 | 2069 | av_frac_init(&st->pts, st->pts.val, 0, |
0c1a9eda | 2070 | (int64_t)s->pts_num * st->codec.frame_rate); |
2effd274 FB |
2071 | break; |
2072 | default: | |
2073 | av_abort(); | |
2074 | } | |
2075 | } | |
2076 | av_frac_add(&st->pts, st->pts_incr); | |
2077 | return 0; | |
2078 | } else { | |
2079 | s->cur_ptr = s->cur_pkt.data; | |
2080 | s->cur_len = s->cur_pkt.size; | |
85f07f22 FB |
2081 | } |
2082 | } | |
2effd274 FB |
2083 | } |
2084 | } | |
2085 | ||
2086 | static int compute_send_delay(HTTPContext *c) | |
2087 | { | |
0c1a9eda | 2088 | int64_t cur_pts, delta_pts, next_pts; |
2effd274 FB |
2089 | int delay1; |
2090 | ||
2091 | /* compute current pts value from system time */ | |
0c1a9eda | 2092 | cur_pts = ((int64_t)(cur_time - c->start_time) * c->fmt_in->pts_den) / |
2effd274 FB |
2093 | (c->fmt_in->pts_num * 1000LL); |
2094 | /* compute the delta from the stream we choose as | |
2095 | main clock (we do that to avoid using explicit | |
2096 | buffers to do exact packet reordering for each | |
2097 | stream */ | |
2098 | /* XXX: really need to fix the number of streams */ | |
2099 | if (c->pts_stream_index >= c->fmt_in->nb_streams) | |
2100 | next_pts = cur_pts; | |
2101 | else | |
2102 | next_pts = c->fmt_in->streams[c->pts_stream_index]->pts.val; | |
2103 | delta_pts = next_pts - cur_pts; | |
2104 | if (delta_pts <= 0) { | |
2105 | delay1 = 0; | |
2106 | } else { | |
2107 | delay1 = (delta_pts * 1000 * c->fmt_in->pts_num) / c->fmt_in->pts_den; | |
2108 | } | |
2109 | return delay1; | |
2110 | } | |
2111 | #else | |
2112 | ||
2113 | /* just fall backs */ | |
b29f97d1 | 2114 | static int av_read_frame(AVFormatContext *s, AVPacket *pkt) |
2effd274 FB |
2115 | { |
2116 | return av_read_packet(s, pkt); | |
2117 | } | |
2118 | ||
2119 | static int compute_send_delay(HTTPContext *c) | |
2120 | { | |
a782f209 | 2121 | int datarate = 8 * get_longterm_datarate(&c->datarate, c->data_count); |
1bc1cfdd GF |
2122 | int64_t delta_pts; |
2123 | int64_t time_pts; | |
2124 | int m_delay; | |
a782f209 | 2125 | |
6edd6884 | 2126 | if (datarate > c->stream->bandwidth * 2000) { |
a782f209 PG |
2127 | return 1000; |
2128 | } | |
bc351386 FB |
2129 | if (!c->stream->feed && c->first_pts!=AV_NOPTS_VALUE) { |
2130 | time_pts = ((int64_t)(cur_time - c->start_time) * c->fmt_in->pts_den) / | |
2131 | ((int64_t) c->fmt_in->pts_num*1000); | |
2132 | delta_pts = c->cur_pts - time_pts; | |
2133 | m_delay = (delta_pts * 1000 * c->fmt_in->pts_num) / c->fmt_in->pts_den; | |
2134 | return m_delay>0 ? m_delay : 0; | |
2135 | } else { | |
2136 | return 0; | |
2137 | } | |
2effd274 FB |
2138 | } |
2139 | ||
2140 | #endif | |
2141 | ||
2142 | static int http_prepare_data(HTTPContext *c) | |
2143 | { | |
2144 | int i, len, ret; | |
2145 | AVFormatContext *ctx; | |
2146 | ||
bc351386 | 2147 | av_freep(&c->pb_buffer); |
2effd274 FB |
2148 | switch(c->state) { |
2149 | case HTTPSTATE_SEND_DATA_HEADER: | |
2150 | memset(&c->fmt_ctx, 0, sizeof(c->fmt_ctx)); | |
2151 | pstrcpy(c->fmt_ctx.author, sizeof(c->fmt_ctx.author), | |
2152 | c->stream->author); | |
2153 | pstrcpy(c->fmt_ctx.comment, sizeof(c->fmt_ctx.comment), | |
2154 | c->stream->comment); | |
2155 | pstrcpy(c->fmt_ctx.copyright, sizeof(c->fmt_ctx.copyright), | |
2156 | c->stream->copyright); | |
2157 | pstrcpy(c->fmt_ctx.title, sizeof(c->fmt_ctx.title), | |
2158 | c->stream->title); | |
2159 | ||
2160 | /* open output stream by using specified codecs */ | |
2161 | c->fmt_ctx.oformat = c->stream->fmt; | |
2162 | c->fmt_ctx.nb_streams = c->stream->nb_streams; | |
2163 | for(i=0;i<c->fmt_ctx.nb_streams;i++) { | |
2164 | AVStream *st; | |
2165 | st = av_mallocz(sizeof(AVStream)); | |
2166 | c->fmt_ctx.streams[i] = st; | |
2167 | /* if file or feed, then just take streams from FFStream struct */ | |
2168 | if (!c->stream->feed || | |
2169 | c->stream->feed == c->stream) | |
2170 | memcpy(st, c->stream->streams[i], sizeof(AVStream)); | |
2171 | else | |
2172 | memcpy(st, c->stream->feed->streams[c->stream->feed_streams[i]], | |
2173 | sizeof(AVStream)); | |
2174 | st->codec.frame_number = 0; /* XXX: should be done in | |
2175 | AVStream, not in codec */ | |
a4d70941 PG |
2176 | /* I'm pretty sure that this is not correct... |
2177 | * However, without it, we crash | |
2178 | */ | |
2179 | st->codec.coded_frame = &dummy_frame; | |
2effd274 FB |
2180 | } |
2181 | c->got_key_frame = 0; | |
2182 | ||
2183 | /* prepare header and save header data in a stream */ | |
2184 | if (url_open_dyn_buf(&c->fmt_ctx.pb) < 0) { | |
2185 | /* XXX: potential leak */ | |
2186 | return -1; | |
2187 | } | |
2188 | c->fmt_ctx.pb.is_streamed = 1; | |
2189 | ||
3c27199b | 2190 | av_set_parameters(&c->fmt_ctx, NULL); |
2effd274 FB |
2191 | av_write_header(&c->fmt_ctx); |
2192 | ||
2193 | len = url_close_dyn_buf(&c->fmt_ctx.pb, &c->pb_buffer); | |
2194 | c->buffer_ptr = c->pb_buffer; | |
2195 | c->buffer_end = c->pb_buffer + len; | |
2196 | ||
2197 | c->state = HTTPSTATE_SEND_DATA; | |
85f07f22 FB |
2198 | c->last_packet_sent = 0; |
2199 | break; | |
2200 | case HTTPSTATE_SEND_DATA: | |
2201 | /* find a new packet */ | |
85f07f22 FB |
2202 | { |
2203 | AVPacket pkt; | |
2effd274 | 2204 | |
85f07f22 FB |
2205 | /* read a packet from the input stream */ |
2206 | if (c->stream->feed) { | |
2207 | ffm_set_write_index(c->fmt_in, | |
2208 | c->stream->feed->feed_write_index, | |
2209 | c->stream->feed->feed_size); | |
2210 | } | |
ec3b2232 PG |
2211 | |
2212 | if (c->stream->max_time && | |
2ac887ba | 2213 | c->stream->max_time + c->start_time - cur_time < 0) { |
ec3b2232 PG |
2214 | /* We have timed out */ |
2215 | c->state = HTTPSTATE_SEND_DATA_TRAILER; | |
85f07f22 | 2216 | } else { |
a782f209 | 2217 | if (1 || c->is_packetized) { |
2effd274 FB |
2218 | if (compute_send_delay(c) > 0) { |
2219 | c->state = HTTPSTATE_WAIT; | |
2220 | return 1; /* state changed */ | |
2221 | } | |
2222 | } | |
6edd6884 | 2223 | redo: |
2effd274 FB |
2224 | if (av_read_frame(c->fmt_in, &pkt) < 0) { |
2225 | if (c->stream->feed && c->stream->feed->feed_opened) { | |
2226 | /* if coming from feed, it means we reached the end of the | |
2227 | ffm file, so must wait for more data */ | |
2228 | c->state = HTTPSTATE_WAIT_FEED; | |
2229 | return 1; /* state changed */ | |
2230 | } else { | |
6edd6884 FB |
2231 | if (c->stream->loop) { |
2232 | av_close_input_file(c->fmt_in); | |
2233 | c->fmt_in = NULL; | |
2234 | if (open_input_stream(c, "") < 0) | |
2235 | goto no_loop; | |
2236 | goto redo; | |
2237 | } else { | |
2238 | no_loop: | |
2239 | /* must send trailer now because eof or error */ | |
2240 | c->state = HTTPSTATE_SEND_DATA_TRAILER; | |
2241 | } | |
2effd274 FB |
2242 | } |
2243 | } else { | |
2244 | /* update first pts if needed */ | |
1bc1cfdd | 2245 | if (c->first_pts == AV_NOPTS_VALUE) { |
2effd274 | 2246 | c->first_pts = pkt.pts; |
1bc1cfdd GF |
2247 | c->start_time = cur_time; |
2248 | } | |
2249 | c->cur_pts = pkt.pts; | |
2effd274 FB |
2250 | /* send it to the appropriate stream */ |
2251 | if (c->stream->feed) { | |
2252 | /* if coming from a feed, select the right stream */ | |
2253 | if (c->switch_pending) { | |
2254 | c->switch_pending = 0; | |
2255 | for(i=0;i<c->stream->nb_streams;i++) { | |
2256 | if (c->switch_feed_streams[i] == pkt.stream_index) { | |
2257 | if (pkt.flags & PKT_FLAG_KEY) { | |
2258 | do_switch_stream(c, i); | |
2259 | } | |
2260 | } | |
2261 | if (c->switch_feed_streams[i] >= 0) { | |
2262 | c->switch_pending = 1; | |
2263 | } | |
2264 | } | |
2265 | } | |
cde25790 | 2266 | for(i=0;i<c->stream->nb_streams;i++) { |
2effd274 FB |
2267 | if (c->feed_streams[i] == pkt.stream_index) { |
2268 | pkt.stream_index = i; | |
cde25790 | 2269 | if (pkt.flags & PKT_FLAG_KEY) { |
2effd274 FB |
2270 | c->got_key_frame |= 1 << i; |
2271 | } | |
2272 | /* See if we have all the key frames, then | |
2273 | * we start to send. This logic is not quite | |
2274 | * right, but it works for the case of a | |
2275 | * single video stream with one or more | |
2276 | * audio streams (for which every frame is | |
2277 | * typically a key frame). | |
2278 | */ | |
2279 | if (!c->stream->send_on_key || | |
2280 | ((c->got_key_frame + 1) >> c->stream->nb_streams)) { | |
2281 | goto send_it; | |
cde25790 | 2282 | } |
cde25790 PG |
2283 | } |
2284 | } | |
2effd274 FB |
2285 | } else { |
2286 | AVCodecContext *codec; | |
2287 | ||
2288 | send_it: | |
2289 | /* specific handling for RTP: we use several | |
2290 | output stream (one for each RTP | |
2291 | connection). XXX: need more abstract handling */ | |
2292 | if (c->is_packetized) { | |
2293 | c->packet_stream_index = pkt.stream_index; | |
2294 | ctx = c->rtp_ctx[c->packet_stream_index]; | |
1b52b6bd MN |
2295 | if(!ctx) { |
2296 | av_free_packet(&pkt); | |
1bc1cfdd | 2297 | break; |
1b52b6bd | 2298 | } |
2effd274 | 2299 | codec = &ctx->streams[0]->codec; |
6edd6884 FB |
2300 | /* only one stream per RTP connection */ |
2301 | pkt.stream_index = 0; | |
2effd274 FB |
2302 | } else { |
2303 | ctx = &c->fmt_ctx; | |
2304 | /* Fudge here */ | |
2305 | codec = &ctx->streams[pkt.stream_index]->codec; | |
85f07f22 | 2306 | } |
2effd274 | 2307 | |
492cd3a9 | 2308 | codec->coded_frame->key_frame = ((pkt.flags & PKT_FLAG_KEY) != 0); |
2effd274 | 2309 | |
f747e6d3 | 2310 | #ifdef PJSG |
2effd274 FB |
2311 | if (codec->codec_type == CODEC_TYPE_AUDIO) { |
2312 | codec->frame_size = (codec->sample_rate * pkt.duration + 500000) / 1000000; | |
2313 | /* printf("Calculated size %d, from sr %d, duration %d\n", codec->frame_size, codec->sample_rate, pkt.duration); */ | |
2314 | } | |
2315 | #endif | |
2316 | ||
2317 | if (c->is_packetized) { | |
bc351386 FB |
2318 | int max_packet_size; |
2319 | if (c->rtp_protocol == RTSP_PROTOCOL_RTP_TCP) | |
2320 | max_packet_size = RTSP_TCP_MAX_PACKET_SIZE; | |
2321 | else | |
2322 | max_packet_size = url_get_max_packet_size(c->rtp_handles[c->packet_stream_index]); | |
2323 | ret = url_open_dyn_packet_buf(&ctx->pb, max_packet_size); | |
2effd274 FB |
2324 | c->packet_byte_count = 0; |
2325 | c->packet_start_time_us = av_gettime(); | |
2326 | } else { | |
2327 | ret = url_open_dyn_buf(&ctx->pb); | |
2328 | } | |
2329 | if (ret < 0) { | |
2330 | /* XXX: potential leak */ | |
2331 | return -1; | |
2332 | } | |
90dca141 | 2333 | if (av_write_frame(ctx, pkt.stream_index, pkt.data, pkt.size)) { |
2effd274 FB |
2334 | c->state = HTTPSTATE_SEND_DATA_TRAILER; |
2335 | } | |
2336 | ||
2337 | len = url_close_dyn_buf(&ctx->pb, &c->pb_buffer); | |
2338 | c->buffer_ptr = c->pb_buffer; | |
2339 | c->buffer_end = c->pb_buffer + len; | |
2340 | ||
2341 | codec->frame_number++; | |
f747e6d3 | 2342 | } |
2effd274 FB |
2343 | #ifndef AV_READ_FRAME |
2344 | av_free_packet(&pkt); | |
f747e6d3 | 2345 | #endif |
85f07f22 | 2346 | } |
85f07f22 FB |
2347 | } |
2348 | } | |
2349 | break; | |
2350 | default: | |
2351 | case HTTPSTATE_SEND_DATA_TRAILER: | |
2352 | /* last packet test ? */ | |
2effd274 | 2353 | if (c->last_packet_sent || c->is_packetized) |
85f07f22 | 2354 | return -1; |
2effd274 | 2355 | ctx = &c->fmt_ctx; |
85f07f22 | 2356 | /* prepare header */ |
2effd274 FB |
2357 | if (url_open_dyn_buf(&ctx->pb) < 0) { |
2358 | /* XXX: potential leak */ | |
2359 | return -1; | |
2360 | } | |
2361 | av_write_trailer(ctx); | |
2362 | len = url_close_dyn_buf(&ctx->pb, &c->pb_buffer); | |
2363 | c->buffer_ptr = c->pb_buffer; | |
2364 | c->buffer_end = c->pb_buffer + len; | |
2365 | ||
85f07f22 FB |
2366 | c->last_packet_sent = 1; |
2367 | break; | |
2368 | } | |
2369 | return 0; | |
2370 | } | |
2371 | ||
2effd274 FB |
2372 | /* in bit/s */ |
2373 | #define SHORT_TERM_BANDWIDTH 8000000 | |
2374 | ||
85f07f22 | 2375 | /* should convert the format at the same time */ |
bc351386 FB |
2376 | /* send data starting at c->buffer_ptr to the output connection |
2377 | (either UDP or TCP connection) */ | |
5eb765ef | 2378 | static int http_send_data(HTTPContext *c) |
85f07f22 | 2379 | { |
2effd274 | 2380 | int len, ret, dt; |
85f07f22 | 2381 | |
bc351386 FB |
2382 | for(;;) { |
2383 | if (c->buffer_ptr >= c->buffer_end) { | |
2384 | ret = http_prepare_data(c); | |
2385 | if (ret < 0) | |
2386 | return -1; | |
2387 | else if (ret != 0) { | |
2388 | /* state change requested */ | |
2389 | break; | |
f747e6d3 | 2390 | } |
2effd274 | 2391 | } else { |
bc351386 FB |
2392 | if (c->is_packetized) { |
2393 | /* RTP data output */ | |
2394 | len = c->buffer_end - c->buffer_ptr; | |
2395 | if (len < 4) { | |
2396 | /* fail safe - should never happen */ | |
2397 | fail1: | |
2398 | c->buffer_ptr = c->buffer_end; | |
2effd274 FB |
2399 | return 0; |
2400 | } | |
bc351386 FB |
2401 | len = (c->buffer_ptr[0] << 24) | |
2402 | (c->buffer_ptr[1] << 16) | | |
2403 | (c->buffer_ptr[2] << 8) | | |
2404 | (c->buffer_ptr[3]); | |
2405 | if (len > (c->buffer_end - c->buffer_ptr)) | |
2406 | goto fail1; | |
2407 | ||
2408 | if (c->rtp_protocol == RTSP_PROTOCOL_RTP_TCP) { | |
2409 | /* RTP packets are sent inside the RTSP TCP connection */ | |
2410 | ByteIOContext pb1, *pb = &pb1; | |
2411 | int interleaved_index, size; | |
2412 | uint8_t header[4]; | |
2413 | HTTPContext *rtsp_c; | |
2414 | ||
2415 | rtsp_c = c->rtsp_c; | |
2416 | /* if no RTSP connection left, error */ | |
2417 | if (!rtsp_c) | |
2418 | return -1; | |
2419 | /* if already sending something, then wait. */ | |
2420 | if (rtsp_c->state != RTSPSTATE_WAIT_REQUEST) { | |
2421 | break; | |
2422 | } | |
2423 | if (url_open_dyn_buf(pb) < 0) | |
2424 | goto fail1; | |
2425 | interleaved_index = c->packet_stream_index * 2; | |
2426 | /* RTCP packets are sent at odd indexes */ | |
2427 | if (c->buffer_ptr[1] == 200) | |
2428 | interleaved_index++; | |
2429 | /* write RTSP TCP header */ | |
2430 | header[0] = '$'; | |
2431 | header[1] = interleaved_index; | |
2432 | header[2] = len >> 8; | |
2433 | header[3] = len; | |
2434 | put_buffer(pb, header, 4); | |
2435 | /* write RTP packet data */ | |
2436 | c->buffer_ptr += 4; | |
2437 | put_buffer(pb, c->buffer_ptr, len); | |
2438 | size = url_close_dyn_buf(pb, &c->packet_buffer); | |
2439 | /* prepare asynchronous TCP sending */ | |
2440 | rtsp_c->packet_buffer_ptr = c->packet_buffer; | |
2441 | rtsp_c->packet_buffer_end = c->packet_buffer + size; | |
2442 | rtsp_c->state = RTSPSTATE_SEND_PACKET; | |
2443 | } else { | |
2444 | /* send RTP packet directly in UDP */ | |
2445 | ||
2446 | /* short term bandwidth limitation */ | |
2447 | dt = av_gettime() - c->packet_start_time_us; | |
2448 | if (dt < 1) | |
2449 | dt = 1; | |
2450 | ||
2451 | if ((c->packet_byte_count + len) * (int64_t)1000000 >= | |
2452 | (SHORT_TERM_BANDWIDTH / 8) * (int64_t)dt) { | |
2453 | /* bandwidth overflow : wait at most one tick and retry */ | |
2454 | c->state = HTTPSTATE_WAIT_SHORT; | |
2455 | return 0; | |
2456 | } | |
2457 | ||
2458 | c->buffer_ptr += 4; | |
2459 | url_write(c->rtp_handles[c->packet_stream_index], | |
2460 | c->buffer_ptr, len); | |
2461 | } | |
2effd274 | 2462 | c->buffer_ptr += len; |
bc351386 FB |
2463 | c->packet_byte_count += len; |
2464 | } else { | |
2465 | /* TCP data output */ | |
2466 | len = write(c->fd, c->buffer_ptr, c->buffer_end - c->buffer_ptr); | |
2467 | if (len < 0) { | |
2468 | if (errno != EAGAIN && errno != EINTR) { | |
2469 | /* error : close connection */ | |
2470 | return -1; | |
2471 | } else { | |
2472 | return 0; | |
2473 | } | |
2474 | } else { | |
2475 | c->buffer_ptr += len; | |
2476 | } | |
2effd274 | 2477 | } |
bc351386 FB |
2478 | c->data_count += len; |
2479 | update_datarate(&c->datarate, c->data_count); | |
2480 | if (c->stream) | |
2481 | c->stream->bytes_served += len; | |
2482 | break; | |
85f07f22 | 2483 | } |
bc351386 | 2484 | } /* for(;;) */ |
85f07f22 FB |
2485 | return 0; |
2486 | } | |
2487 | ||
2488 | static int http_start_receive_data(HTTPContext *c) | |
2489 | { | |
2490 | int fd; | |
2491 | ||
2492 | if (c->stream->feed_opened) | |
2493 | return -1; | |
2494 | ||
e322ea48 PG |
2495 | /* Don't permit writing to this one */ |
2496 | if (c->stream->readonly) | |
2497 | return -1; | |
2498 | ||
85f07f22 FB |
2499 | /* open feed */ |
2500 | fd = open(c->stream->feed_filename, O_RDWR); | |
2501 | if (fd < 0) | |
2502 | return -1; | |
2503 | c->feed_fd = fd; | |
2504 | ||
2505 | c->stream->feed_write_index = ffm_read_write_index(fd); | |
2506 | c->stream->feed_size = lseek(fd, 0, SEEK_END); | |
2507 | lseek(fd, 0, SEEK_SET); | |
2508 | ||
2509 | /* init buffer input */ | |
2510 | c->buffer_ptr = c->buffer; | |
2511 | c->buffer_end = c->buffer + FFM_PACKET_SIZE; | |
2512 | c->stream->feed_opened = 1; | |
2513 | return 0; | |
2514 | } | |
2515 | ||
2516 | static int http_receive_data(HTTPContext *c) | |
2517 | { | |
85f07f22 FB |
2518 | HTTPContext *c1; |
2519 | ||
a6e14edd PG |
2520 | if (c->buffer_end > c->buffer_ptr) { |
2521 | int len; | |
2522 | ||
2523 | len = read(c->fd, c->buffer_ptr, c->buffer_end - c->buffer_ptr); | |
2524 | if (len < 0) { | |
2525 | if (errno != EAGAIN && errno != EINTR) { | |
2526 | /* error : close connection */ | |
2527 | goto fail; | |
2528 | } | |
2529 | } else if (len == 0) { | |
2530 | /* end of connection : close it */ | |
2531 | goto fail; | |
2532 | } else { | |
2533 | c->buffer_ptr += len; | |
2534 | c->data_count += len; | |
5eb765ef | 2535 | update_datarate(&c->datarate, c->data_count); |
a6e14edd PG |
2536 | } |
2537 | } | |
2538 | ||
85f07f22 | 2539 | if (c->buffer_ptr >= c->buffer_end) { |
f747e6d3 | 2540 | FFStream *feed = c->stream; |
85f07f22 FB |
2541 | /* a packet has been received : write it in the store, except |
2542 | if header */ | |
2543 | if (c->data_count > FFM_PACKET_SIZE) { | |
85f07f22 FB |
2544 | |
2545 | // printf("writing pos=0x%Lx size=0x%Lx\n", feed->feed_write_index, feed->feed_size); | |
2546 | /* XXX: use llseek or url_seek */ | |
2547 | lseek(c->feed_fd, feed->feed_write_index, SEEK_SET); | |
2548 | write(c->feed_fd, c->buffer, FFM_PACKET_SIZE); | |
2549 | ||
2550 | feed->feed_write_index += FFM_PACKET_SIZE; | |
2551 | /* update file size */ | |
2552 | if (feed->feed_write_index > c->stream->feed_size) | |
2553 | feed->feed_size = feed->feed_write_index; | |
2554 | ||
2555 | /* handle wrap around if max file size reached */ | |
2556 | if (feed->feed_write_index >= c->stream->feed_max_size) | |
2557 | feed->feed_write_index = FFM_PACKET_SIZE; | |
2558 | ||
2559 | /* write index */ | |
2560 | ffm_write_write_index(c->feed_fd, feed->feed_write_index); | |
2561 | ||
2562 | /* wake up any waiting connections */ | |
2563 | for(c1 = first_http_ctx; c1 != NULL; c1 = c1->next) { | |
2564 | if (c1->state == HTTPSTATE_WAIT_FEED && | |
2565 | c1->stream->feed == c->stream->feed) { | |
2566 | c1->state = HTTPSTATE_SEND_DATA; | |
2567 | } | |
2568 | } | |
f747e6d3 PG |
2569 | } else { |
2570 | /* We have a header in our hands that contains useful data */ | |
2571 | AVFormatContext s; | |
bd7cf6ad | 2572 | AVInputFormat *fmt_in; |
f747e6d3 PG |
2573 | ByteIOContext *pb = &s.pb; |
2574 | int i; | |
2575 | ||
2576 | memset(&s, 0, sizeof(s)); | |
2577 | ||
2578 | url_open_buf(pb, c->buffer, c->buffer_end - c->buffer, URL_RDONLY); | |
2579 | pb->buf_end = c->buffer_end; /* ?? */ | |
2580 | pb->is_streamed = 1; | |
2581 | ||
bd7cf6ad FB |
2582 | /* use feed output format name to find corresponding input format */ |
2583 | fmt_in = av_find_input_format(feed->fmt->name); | |
2584 | if (!fmt_in) | |
2585 | goto fail; | |
2586 | ||
98486a6b RS |
2587 | if (fmt_in->priv_data_size > 0) { |
2588 | s.priv_data = av_mallocz(fmt_in->priv_data_size); | |
2589 | if (!s.priv_data) | |
2590 | goto fail; | |
2591 | } else | |
2592 | s.priv_data = NULL; | |
ec3b2232 | 2593 | |
bd7cf6ad | 2594 | if (fmt_in->read_header(&s, 0) < 0) { |
ec3b2232 | 2595 | av_freep(&s.priv_data); |
f747e6d3 PG |
2596 | goto fail; |
2597 | } | |
2598 | ||
2599 | /* Now we have the actual streams */ | |
2600 | if (s.nb_streams != feed->nb_streams) { | |
ec3b2232 | 2601 | av_freep(&s.priv_data); |
f747e6d3 PG |
2602 | goto fail; |
2603 | } | |
2604 | for (i = 0; i < s.nb_streams; i++) { | |
bd7cf6ad FB |
2605 | memcpy(&feed->streams[i]->codec, |
2606 | &s.streams[i]->codec, sizeof(AVCodecContext)); | |
f747e6d3 | 2607 | } |
ec3b2232 | 2608 | av_freep(&s.priv_data); |
85f07f22 FB |
2609 | } |
2610 | c->buffer_ptr = c->buffer; | |
2611 | } | |
2612 | ||
85f07f22 FB |
2613 | return 0; |
2614 | fail: | |
2615 | c->stream->feed_opened = 0; | |
2616 | close(c->feed_fd); | |
2617 | return -1; | |
2618 | } | |
2619 | ||
2effd274 FB |
2620 | /********************************************************************/ |
2621 | /* RTSP handling */ | |
2622 | ||
2623 | static void rtsp_reply_header(HTTPContext *c, enum RTSPStatusCode error_number) | |
2624 | { | |
2625 | const char *str; | |
2626 | time_t ti; | |
2627 | char *p; | |
2628 | char buf2[32]; | |
2629 | ||
2630 | switch(error_number) { | |
2631 | #define DEF(n, c, s) case c: str = s; break; | |
2632 | #include "rtspcodes.h" | |
2633 | #undef DEF | |
2634 | default: | |
2635 | str = "Unknown Error"; | |
2636 | break; | |
2637 | } | |
2638 | ||
2639 | url_fprintf(c->pb, "RTSP/1.0 %d %s\r\n", error_number, str); | |
2640 | url_fprintf(c->pb, "CSeq: %d\r\n", c->seq); | |
2641 | ||
2642 | /* output GMT time */ | |
2643 | ti = time(NULL); | |
2644 | p = ctime(&ti); | |
2645 | strcpy(buf2, p); | |
2646 | p = buf2 + strlen(p) - 1; | |
2647 | if (*p == '\n') | |
2648 | *p = '\0'; | |
2649 | url_fprintf(c->pb, "Date: %s GMT\r\n", buf2); | |
2650 | } | |
2651 | ||
2652 | static void rtsp_reply_error(HTTPContext *c, enum RTSPStatusCode error_number) | |
2653 | { | |
2654 | rtsp_reply_header(c, error_number); | |
2655 | url_fprintf(c->pb, "\r\n"); | |
2656 | } | |
2657 | ||
2658 | static int rtsp_parse_request(HTTPContext *c) | |
2659 | { | |
2660 | const char *p, *p1, *p2; | |
2661 | char cmd[32]; | |
2662 | char url[1024]; | |
2663 | char protocol[32]; | |
2664 | char line[1024]; | |
2665 | ByteIOContext pb1; | |
2666 | int len; | |
2667 | RTSPHeader header1, *header = &header1; | |
2668 | ||
2669 | c->buffer_ptr[0] = '\0'; | |
2670 | p = c->buffer; | |
2671 | ||
2672 | get_word(cmd, sizeof(cmd), &p); | |
2673 | get_word(url, sizeof(url), &p); | |
2674 | get_word(protocol, sizeof(protocol), &p); | |
2675 | ||
2676 | pstrcpy(c->method, sizeof(c->method), cmd); | |
2677 | pstrcpy(c->url, sizeof(c->url), url); | |
2678 | pstrcpy(c->protocol, sizeof(c->protocol), protocol); | |
2679 | ||
2680 | c->pb = &pb1; | |
2681 | if (url_open_dyn_buf(c->pb) < 0) { | |
2682 | /* XXX: cannot do more */ | |
2683 | c->pb = NULL; /* safety */ | |
2684 | return -1; | |
2685 | } | |
2686 | ||
2687 | /* check version name */ | |
2688 | if (strcmp(protocol, "RTSP/1.0") != 0) { | |
2689 | rtsp_reply_error(c, RTSP_STATUS_VERSION); | |
2690 | goto the_end; | |
2691 | } | |
2692 | ||
2693 | /* parse each header line */ | |
2694 | memset(header, 0, sizeof(RTSPHeader)); | |
2695 | /* skip to next line */ | |
2696 | while (*p != '\n' && *p != '\0') | |
2697 | p++; | |
2698 | if (*p == '\n') | |
2699 | p++; | |
2700 | while (*p != '\0') { | |
2701 | p1 = strchr(p, '\n'); | |
2702 | if (!p1) | |
2703 | break; | |
2704 | p2 = p1; | |
2705 | if (p2 > p && p2[-1] == '\r') | |
2706 | p2--; | |
2707 | /* skip empty line */ | |
2708 | if (p2 == p) | |
2709 | break; | |
2710 | len = p2 - p; | |
2711 | if (len > sizeof(line) - 1) | |
2712 | len = sizeof(line) - 1; | |
2713 | memcpy(line, p, len); | |
2714 | line[len] = '\0'; | |
2715 | rtsp_parse_line(header, line); | |
2716 | p = p1 + 1; | |
2717 | } | |
2718 | ||
2719 | /* handle sequence number */ | |
2720 | c->seq = header->seq; | |
2721 | ||
2722 | if (!strcmp(cmd, "DESCRIBE")) { | |
2723 | rtsp_cmd_describe(c, url); | |
0df65975 AR |
2724 | } else if (!strcmp(cmd, "OPTIONS")) { |
2725 | rtsp_cmd_options(c, url); | |
2effd274 FB |
2726 | } else if (!strcmp(cmd, "SETUP")) { |
2727 | rtsp_cmd_setup(c, url, header); | |
2728 | } else if (!strcmp(cmd, "PLAY")) { | |
2729 | rtsp_cmd_play(c, url, header); | |
2730 | } else if (!strcmp(cmd, "PAUSE")) { | |
2731 | rtsp_cmd_pause(c, url, header); | |
2732 | } else if (!strcmp(cmd, "TEARDOWN")) { | |
2733 | rtsp_cmd_teardown(c, url, header); | |
2734 | } else { | |
2735 | rtsp_reply_error(c, RTSP_STATUS_METHOD); | |
2736 | } | |
2737 | the_end: | |
2738 | len = url_close_dyn_buf(c->pb, &c->pb_buffer); | |
2739 | c->pb = NULL; /* safety */ | |
2740 | if (len < 0) { | |
2741 | /* XXX: cannot do more */ | |
2742 | return -1; | |
2743 | } | |
2744 | c->buffer_ptr = c->pb_buffer; | |
2745 | c->buffer_end = c->pb_buffer + len; | |
2746 | c->state = RTSPSTATE_SEND_REPLY; | |
2747 | return 0; | |
2748 | } | |
2749 | ||
829ac53d FB |
2750 | /* XXX: move that to rtsp.c, but would need to replace FFStream by |
2751 | AVFormatContext */ | |
0c1a9eda | 2752 | static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer, |
829ac53d | 2753 | struct in_addr my_ip) |
2effd274 FB |
2754 | { |
2755 | ByteIOContext pb1, *pb = &pb1; | |
0fa45e19 | 2756 | int i, payload_type, port, private_payload_type, j; |
2effd274 FB |
2757 | const char *ipstr, *title, *mediatype; |
2758 | AVStream *st; | |
2759 | ||
2effd274 FB |
2760 | if (url_open_dyn_buf(pb) < 0) |
2761 | return -1; | |
2762 | ||
2763 | /* general media info */ | |
2764 | ||
2765 | url_fprintf(pb, "v=0\n"); | |
829ac53d | 2766 | ipstr = inet_ntoa(my_ip); |
2effd274 FB |
2767 | url_fprintf(pb, "o=- 0 0 IN IP4 %s\n", ipstr); |
2768 | title = stream->title; | |
2769 | if (title[0] == '\0') | |
2770 | title = "No Title"; | |
2771 | url_fprintf(pb, "s=%s\n", title); | |
2772 | if (stream->comment[0] != '\0') | |
2773 | url_fprintf(pb, "i=%s\n", stream->comment); | |
829ac53d FB |
2774 | if (stream->is_multicast) { |
2775 | url_fprintf(pb, "c=IN IP4 %s\n", inet_ntoa(stream->multicast_ip)); | |
2776 | } | |
2effd274 | 2777 | /* for each stream, we output the necessary info */ |
0fa45e19 | 2778 | private_payload_type = 96; |
2effd274 FB |
2779 | for(i = 0; i < stream->nb_streams; i++) { |
2780 | st = stream->streams[i]; | |
2781 | switch(st->codec.codec_type) { | |
2782 | case CODEC_TYPE_AUDIO: | |
2783 | mediatype = "audio"; | |
2784 | break; | |
2785 | case CODEC_TYPE_VIDEO: | |
2786 | mediatype = "video"; | |
2787 | break; | |
2788 | default: | |
2789 | mediatype = "application"; | |
2790 | break; | |
2791 | } | |
829ac53d FB |
2792 | /* NOTE: the port indication is not correct in case of |
2793 | unicast. It is not an issue because RTSP gives it */ | |
2effd274 | 2794 | payload_type = rtp_get_payload_type(&st->codec); |
0fa45e19 FB |
2795 | if (payload_type < 0) |
2796 | payload_type = private_payload_type++; | |
829ac53d FB |
2797 | if (stream->is_multicast) { |
2798 | port = stream->multicast_port + 2 * i; | |
2799 | } else { | |
2800 | port = 0; | |
2801 | } | |
2effd274 | 2802 | url_fprintf(pb, "m=%s %d RTP/AVP %d\n", |
829ac53d | 2803 | mediatype, port, payload_type); |
0fa45e19 FB |
2804 | if (payload_type >= 96) { |
2805 | /* for private payload type, we need to give more info */ | |
2806 | switch(st->codec.codec_id) { | |
2807 | case CODEC_ID_MPEG4: | |
2808 | { | |
2809 | uint8_t *data; | |
2810 | url_fprintf(pb, "a=rtpmap:%d MP4V-ES/%d\n", | |
2811 | payload_type, 90000); | |
2812 | /* we must also add the mpeg4 header */ | |
2813 | data = st->codec.extradata; | |
2814 | if (data) { | |
17705a34 | 2815 | url_fprintf(pb, "a=fmtp:%d config=", payload_type); |
0fa45e19 FB |
2816 | for(j=0;j<st->codec.extradata_size;j++) { |
2817 | url_fprintf(pb, "%02x", data[j]); | |
2818 | } | |
2819 | url_fprintf(pb, "\n"); | |
2820 | } | |
2821 | } | |
2822 | break; | |
2823 | default: | |
2824 | /* XXX: add other codecs ? */ | |
2825 | goto fail; | |
2826 | } | |
2827 | } | |
2effd274 FB |
2828 | url_fprintf(pb, "a=control:streamid=%d\n", i); |
2829 | } | |
2830 | return url_close_dyn_buf(pb, pbuffer); | |
0fa45e19 FB |
2831 | fail: |
2832 | url_close_dyn_buf(pb, pbuffer); | |
2833 | av_free(*pbuffer); | |
2834 | return -1; | |
2effd274 FB |
2835 | } |
2836 | ||
0df65975 AR |
2837 | static void rtsp_cmd_options(HTTPContext *c, const char *url) |
2838 | { | |
2839 | // rtsp_reply_header(c, RTSP_STATUS_OK); | |
2840 | url_fprintf(c->pb, "RTSP/1.0 %d %s\r\n", RTSP_STATUS_OK, "OK"); | |
2841 | url_fprintf(c->pb, "CSeq: %d\r\n", c->seq); | |
2842 | url_fprintf(c->pb, "Public: %s\r\n", "OPTIONS, DESCRIBE, SETUP, TEARDOWN, PLAY, PAUSE"); | |
2843 | url_fprintf(c->pb, "\r\n"); | |
2844 | } | |
2845 | ||
2effd274 FB |
2846 | static void rtsp_cmd_describe(HTTPContext *c, const char *url) |
2847 | { | |
2848 | FFStream *stream; | |
2849 | char path1[1024]; | |
2850 | const char *path; | |
0c1a9eda | 2851 | uint8_t *content; |
829ac53d FB |
2852 | int content_length, len; |
2853 | struct sockaddr_in my_addr; | |
2effd274 FB |
2854 | |
2855 | /* find which url is asked */ | |
2856 | url_split(NULL, 0, NULL, 0, NULL, path1, sizeof(path1), url); | |
2857 | path = path1; | |
2858 | if (*path == '/') | |
2859 | path++; | |
2860 | ||
2861 | for(stream = first_stream; stream != NULL; stream = stream->next) { | |
2862 | if (!stream->is_feed && stream->fmt == &rtp_mux && | |
2863 | !strcmp(path, stream->filename)) { | |
2864 | goto found; | |
2865 | } | |
2866 | } | |
2867 | /* no stream found */ | |
2868 | rtsp_reply_error(c, RTSP_STATUS_SERVICE); /* XXX: right error ? */ | |
2869 | return; | |
2870 | ||
2871 | found: | |
2872 | /* prepare the media description in sdp format */ | |
829ac53d FB |
2873 | |
2874 | /* get the host IP */ | |
2875 | len = sizeof(my_addr); | |
2876 | getsockname(c->fd, (struct sockaddr *)&my_addr, &len); | |
2877 | ||
2878 | content_length = prepare_sdp_description(stream, &content, my_addr.sin_addr); | |
2effd274 FB |
2879 | if (content_length < 0) { |
2880 | rtsp_reply_error(c, RTSP_STATUS_INTERNAL); | |
2881 | return; | |
2882 | } | |
2883 | rtsp_reply_header(c, RTSP_STATUS_OK); | |
2884 | url_fprintf(c->pb, "Content-Type: application/sdp\r\n"); | |
2885 | url_fprintf(c->pb, "Content-Length: %d\r\n", content_length); | |
2886 | url_fprintf(c->pb, "\r\n"); | |
2887 | put_buffer(c->pb, content, content_length); | |
2888 | } | |
2889 | ||
2890 | static HTTPContext *find_rtp_session(const char *session_id) | |
2891 | { | |
2892 | HTTPContext *c; | |
2893 | ||
2894 | if (session_id[0] == '\0') | |
2895 | return NULL; | |
2896 | ||
2897 | for(c = first_http_ctx; c != NULL; c = c->next) { | |
2898 | if (!strcmp(c->session_id, session_id)) | |
2899 | return c; | |
2900 | } | |
2901 | return NULL; | |
2902 | } | |
2903 | ||
b29f97d1 | 2904 | static RTSPTransportField *find_transport(RTSPHeader *h, enum RTSPProtocol protocol) |
2effd274 FB |
2905 | { |
2906 | RTSPTransportField *th; | |
2907 | int i; | |
2908 | ||
2909 | for(i=0;i<h->nb_transports;i++) { | |
2910 | th = &h->transports[i]; | |
2911 | if (th->protocol == protocol) | |
2912 | return th; | |
2913 | } | |
2914 | return NULL; | |
2915 | } | |
2916 | ||
2917 | static void rtsp_cmd_setup(HTTPContext *c, const char *url, | |
2918 | RTSPHeader *h) | |
2919 | { | |
2920 | FFStream *stream; | |
2921 | int stream_index, port; | |
2922 | char buf[1024]; | |
2923 | char path1[1024]; | |
2924 | const char *path; | |
2925 | HTTPContext *rtp_c; | |
2926 | RTSPTransportField *th; | |
2927 | struct sockaddr_in dest_addr; | |
2928 | RTSPActionServerSetup setup; | |
2929 | ||
2930 | /* find which url is asked */ | |
2931 | url_split(NULL, 0, NULL, 0, NULL, path1, sizeof(path1), url); | |
2932 | path = path1; | |
2933 | if (*path == '/') | |
2934 | path++; | |
2935 | ||
2936 | /* now check each stream */ | |
2937 | for(stream = first_stream; stream != NULL; stream = stream->next) { | |
2938 | if (!stream->is_feed && stream->fmt == &rtp_mux) { | |
2939 | /* accept aggregate filenames only if single stream */ | |
2940 | if (!strcmp(path, stream->filename)) { | |
2941 | if (stream->nb_streams != 1) { | |
2942 | rtsp_reply_error(c, RTSP_STATUS_AGGREGATE); | |
2943 | return; | |
2944 | } | |
2945 | stream_index = 0; | |
2946 | goto found; | |
2947 | } | |
2948 | ||
2949 | for(stream_index = 0; stream_index < stream->nb_streams; | |
2950 | stream_index++) { | |
2951 | snprintf(buf, sizeof(buf), "%s/streamid=%d", | |
2952 | stream->filename, stream_index); | |
2953 | if (!strcmp(path, buf)) | |
2954 | goto found; | |
2955 | } | |
2956 | } | |
2957 | } | |
2958 | /* no stream found */ | |
2959 | rtsp_reply_error(c, RTSP_STATUS_SERVICE); /* XXX: right error ? */ | |
2960 | return; | |
2961 | found: | |
2962 | ||
2963 | /* generate session id if needed */ | |
2964 | if (h->session_id[0] == '\0') { | |
2965 | snprintf(h->session_id, sizeof(h->session_id), | |
2966 | "%08x%08x", (int)random(), (int)random()); | |
2967 | } | |
2968 | ||
2969 | /* find rtp session, and create it if none found */ | |
2970 | rtp_c = find_rtp_session(h->session_id); | |
2971 | if (!rtp_c) { | |
bc351386 FB |
2972 | /* always prefer UDP */ |
2973 | th = find_transport(h, RTSP_PROTOCOL_RTP_UDP); | |
2974 | if (!th) { | |
2975 | th = find_transport(h, RTSP_PROTOCOL_RTP_TCP); | |
2976 | if (!th) { | |
2977 | rtsp_reply_error(c, RTSP_STATUS_TRANSPORT); | |
2978 | return; | |
2979 | } | |
2980 | } | |
2981 | ||
2982 | rtp_c = rtp_new_connection(&c->from_addr, stream, h->session_id, | |
2983 | th->protocol); | |
2effd274 FB |
2984 | if (!rtp_c) { |
2985 | rtsp_reply_error(c, RTSP_STATUS_BANDWIDTH); | |
2986 | return; | |
2987 | } | |
2988 | ||
2989 | /* open input stream */ | |
2990 | if (open_input_stream(rtp_c, "") < 0) { | |
2991 | rtsp_reply_error(c, RTSP_STATUS_INTERNAL); | |
2992 | return; | |
2993 | } | |
2effd274 FB |
2994 | } |
2995 | ||
2996 | /* test if stream is OK (test needed because several SETUP needs | |
2997 | to be done for a given file) */ | |
2998 | if (rtp_c->stream != stream) { | |
2999 | rtsp_reply_error(c, RTSP_STATUS_SERVICE); | |
3000 | return; | |
3001 | } | |
3002 | ||
3003 | /* test if stream is already set up */ | |
3004 | if (rtp_c->rtp_ctx[stream_index]) { | |
3005 | rtsp_reply_error(c, RTSP_STATUS_STATE); | |
3006 | return; | |
3007 | } | |
3008 | ||
3009 | /* check transport */ | |
3010 | th = find_transport(h, rtp_c->rtp_protocol); | |
3011 | if (!th || (th->protocol == RTSP_PROTOCOL_RTP_UDP && | |
3012 | th->client_port_min <= 0)) { | |
3013 | rtsp_reply_error(c, RTSP_STATUS_TRANSPORT); | |
3014 | return; | |
3015 | } | |
3016 | ||
3017 | /* setup default options */ | |
3018 | setup.transport_option[0] = '\0'; | |
3019 | dest_addr = rtp_c->from_addr; | |
3020 | dest_addr.sin_port = htons(th->client_port_min); | |
3021 | ||
3022 | /* add transport option if needed */ | |
3023 | if (ff_rtsp_callback) { | |
3024 | setup.ipaddr = ntohl(dest_addr.sin_addr.s_addr); | |
3025 | if (ff_rtsp_callback(RTSP_ACTION_SERVER_SETUP, rtp_c->session_id, | |
3026 | (char *)&setup, sizeof(setup), | |
3027 | stream->rtsp_option) < 0) { | |
3028 | rtsp_reply_error(c, RTSP_STATUS_TRANSPORT); | |
3029 | return; | |
3030 | } | |
3031 | dest_addr.sin_addr.s_addr = htonl(setup.ipaddr); | |
3032 | } | |
3033 | ||
3034 | /* setup stream */ | |
bc351386 | 3035 | if (rtp_new_av_stream(rtp_c, stream_index, &dest_addr, c) < 0) { |
2effd274 FB |
3036 | rtsp_reply_error(c, RTSP_STATUS_TRANSPORT); |
3037 | return; | |
3038 | } | |
3039 | ||
3040 | /* now everything is OK, so we can send the connection parameters */ | |
3041 | rtsp_reply_header(c, RTSP_STATUS_OK); | |
3042 | /* session ID */ | |
3043 | url_fprintf(c->pb, "Session: %s\r\n", rtp_c->session_id); | |
3044 | ||
3045 | switch(rtp_c->rtp_protocol) { | |
3046 | case RTSP_PROTOCOL_RTP_UDP: | |
3047 | port = rtp_get_local_port(rtp_c->rtp_handles[stream_index]); | |
3048 | url_fprintf(c->pb, "Transport: RTP/AVP/UDP;unicast;" | |
3049 | "client_port=%d-%d;server_port=%d-%d", | |
3050 | th->client_port_min, th->client_port_min + 1, | |
3051 | port, port + 1); | |
3052 | break; | |
3053 | case RTSP_PROTOCOL_RTP_TCP: | |
3054 | url_fprintf(c->pb, "Transport: RTP/AVP/TCP;interleaved=%d-%d", | |
3055 | stream_index * 2, stream_index * 2 + 1); | |
3056 | break; | |
3057 | default: | |
3058 | break; | |
3059 | } | |
3060 | if (setup.transport_option[0] != '\0') { | |
3061 | url_fprintf(c->pb, ";%s", setup.transport_option); | |
3062 | } | |
3063 | url_fprintf(c->pb, "\r\n"); | |
3064 | ||
3065 | ||
3066 | url_fprintf(c->pb, "\r\n"); | |
3067 | } | |
3068 | ||
3069 | ||
3070 | /* find an rtp connection by using the session ID. Check consistency | |
3071 | with filename */ | |
3072 | static HTTPContext *find_rtp_session_with_url(const char *url, | |
3073 | const char *session_id) | |
3074 | { | |
3075 | HTTPContext *rtp_c; | |
3076 | char path1[1024]; | |
3077 | const char *path; | |
94d9ad5f GF |
3078 | char buf[1024]; |
3079 | int s; | |
2effd274 FB |
3080 | |
3081 | rtp_c = find_rtp_session(session_id); | |
3082 | if (!rtp_c) | |
3083 | return NULL; | |
3084 | ||
3085 | /* find which url is asked */ | |
3086 | url_split(NULL, 0, NULL, 0, NULL, path1, sizeof(path1), url); | |
3087 | path = path1; | |
3088 | if (*path == '/') | |
3089 | path++; | |
94d9ad5f GF |
3090 | if(!strcmp(path, rtp_c->stream->filename)) return rtp_c; |
3091 | for(s=0; s<rtp_c->stream->nb_streams; ++s) { | |
3092 | snprintf(buf, sizeof(buf), "%s/streamid=%d", | |
3093 | rtp_c->stream->filename, s); | |
3094 | if(!strncmp(path, buf, sizeof(buf))) { | |
3095 | // XXX: Should we reply with RTSP_STATUS_ONLY_AGGREGATE if nb_streams>1? | |
3096 | return rtp_c; | |
3097 | } | |
3098 | } | |
3099 | return NULL; | |
2effd274 FB |
3100 | } |
3101 | ||
3102 | static void rtsp_cmd_play(HTTPContext *c, const char *url, RTSPHeader *h) | |
3103 | { | |
3104 | HTTPContext *rtp_c; | |
3105 | ||
3106 | rtp_c = find_rtp_session_with_url(url, h->session_id); | |
3107 | if (!rtp_c) { | |
3108 | rtsp_reply_error(c, RTSP_STATUS_SESSION); | |
3109 | return; | |
3110 | } | |
3111 | ||
3112 | if (rtp_c->state != HTTPSTATE_SEND_DATA && | |
3113 | rtp_c->state != HTTPSTATE_WAIT_FEED && | |
3114 | rtp_c->state != HTTPSTATE_READY) { | |
3115 | rtsp_reply_error(c, RTSP_STATUS_STATE); | |
3116 | return; | |
3117 | } | |
3118 | ||
3119 | rtp_c->state = HTTPSTATE_SEND_DATA; | |
3120 | ||
3121 | /* now everything is OK, so we can send the connection parameters */ | |
3122 | rtsp_reply_header(c, RTSP_STATUS_OK); | |
3123 | /* session ID */ | |
3124 | url_fprintf(c->pb, "Session: %s\r\n", rtp_c->session_id); | |
3125 | url_fprintf(c->pb, "\r\n"); | |
3126 | } | |
3127 | ||
3128 | static void rtsp_cmd_pause(HTTPContext *c, const char *url, RTSPHeader *h) | |
3129 | { | |
3130 | HTTPContext *rtp_c; | |
3131 | ||
3132 | rtp_c = find_rtp_session_with_url(url, h->session_id); | |
3133 | if (!rtp_c) { | |
3134 | rtsp_reply_error(c, RTSP_STATUS_SESSION); | |
3135 | return; | |
3136 | } | |
3137 | ||
3138 | if (rtp_c->state != HTTPSTATE_SEND_DATA && | |
3139 | rtp_c->state != HTTPSTATE_WAIT_FEED) { | |
3140 | rtsp_reply_error(c, RTSP_STATUS_STATE); | |
3141 | return; | |
3142 | } | |
3143 | ||
3144 | rtp_c->state = HTTPSTATE_READY; | |
1bc1cfdd | 3145 | rtp_c->first_pts = AV_NOPTS_VALUE; |
2effd274 FB |
3146 | /* now everything is OK, so we can send the connection parameters */ |
3147 | rtsp_reply_header(c, RTSP_STATUS_OK); | |
3148 | /* session ID */ | |
3149 | url_fprintf(c->pb, "Session: %s\r\n", rtp_c->session_id); | |
3150 | url_fprintf(c->pb, "\r\n"); | |
3151 | } | |
3152 | ||
3153 | static void rtsp_cmd_teardown(HTTPContext *c, const char *url, RTSPHeader *h) | |
3154 | { | |
3155 | HTTPContext *rtp_c; | |
3156 | ||
3157 | rtp_c = find_rtp_session_with_url(url, h->session_id); | |
3158 | if (!rtp_c) { | |
3159 | rtsp_reply_error(c, RTSP_STATUS_SESSION); | |
3160 | return; | |
3161 | } | |
3162 | ||
3163 | /* abort the session */ | |
3164 | close_connection(rtp_c); | |
3165 | ||
3166 | if (ff_rtsp_callback) { | |
3167 | ff_rtsp_callback(RTSP_ACTION_SERVER_TEARDOWN, rtp_c->session_id, | |
3168 | NULL, 0, | |
3169 | rtp_c->stream->rtsp_option); | |
3170 | } | |
3171 | ||
3172 | /* now everything is OK, so we can send the connection parameters */ | |
3173 | rtsp_reply_header(c, RTSP_STATUS_OK); | |
3174 | /* session ID */ | |
3175 | url_fprintf(c->pb, "Session: %s\r\n", rtp_c->session_id); | |
3176 | url_fprintf(c->pb, "\r\n"); | |
3177 | } | |
3178 | ||
3179 | ||
3180 | /********************************************************************/ | |
3181 | /* RTP handling */ | |
3182 | ||
6edd6884 | 3183 | static HTTPContext *rtp_new_connection(struct sockaddr_in *from_addr, |
bc351386 FB |
3184 | FFStream *stream, const char *session_id, |
3185 | enum RTSPProtocol rtp_protocol) | |
2effd274 FB |
3186 | { |
3187 | HTTPContext *c = NULL; | |
bc351386 FB |
3188 | const char *proto_str; |
3189 | ||
2effd274 FB |
3190 | /* XXX: should output a warning page when coming |
3191 | close to the connection limit */ | |
3192 | if (nb_connections >= nb_max_connections) | |
3193 | goto fail; | |
3194 | ||
3195 | /* add a new connection */ | |
3196 | c = av_mallocz(sizeof(HTTPContext)); | |
3197 | if (!c) | |
3198 | goto fail; | |
3199 | ||
3200 | c->fd = -1; | |
3201 | c->poll_entry = NULL; | |
6edd6884 | 3202 | c->from_addr = *from_addr; |
2effd274 FB |
3203 | c->buffer_size = IOBUFFER_INIT_SIZE; |
3204 | c->buffer = av_malloc(c->buffer_size); | |
3205 | if (!c->buffer) | |
3206 | goto fail; | |
3207 | nb_connections++; | |
3208 | c->stream = stream; | |
3209 | pstrcpy(c->session_id, sizeof(c->session_id), session_id); | |
3210 | c->state = HTTPSTATE_READY; | |
3211 | c->is_packetized = 1; | |
bc351386 FB |
3212 | c->rtp_protocol = rtp_protocol; |
3213 | ||
2effd274 | 3214 | /* protocol is shown in statistics */ |
bc351386 FB |
3215 | switch(c->rtp_protocol) { |
3216 | case RTSP_PROTOCOL_RTP_UDP_MULTICAST: | |
3217 | proto_str = "MCAST"; | |
3218 | break; | |
3219 | case RTSP_PROTOCOL_RTP_UDP: | |
3220 | proto_str = "UDP"; | |
3221 | break; | |
3222 | case RTSP_PROTOCOL_RTP_TCP: | |
3223 | proto_str = "TCP"; | |
3224 | break; | |
3225 | default: | |
3226 | proto_str = "???"; | |
3227 | break; | |
3228 | } | |
3229 | pstrcpy(c->protocol, sizeof(c->protocol), "RTP/"); | |
3230 | pstrcat(c->protocol, sizeof(c->protocol), proto_str); | |
2effd274 | 3231 | |
6edd6884 FB |
3232 | current_bandwidth += stream->bandwidth; |
3233 | ||
2effd274 FB |
3234 | c->next = first_http_ctx; |
3235 | first_http_ctx = c; | |
3236 | return c; | |
3237 | ||
3238 | fail: | |
3239 | if (c) { | |
3240 | av_free(c->buffer); | |
3241 | av_free(c); | |
3242 | } | |
3243 | return NULL; | |
3244 | } | |
3245 | ||
3246 | /* add a new RTP stream in an RTP connection (used in RTSP SETUP | |
bc351386 | 3247 | command). If RTP/TCP protocol is used, TCP connection 'rtsp_c' is |
2effd274 FB |
3248 | used. */ |
3249 | static int rtp_new_av_stream(HTTPContext *c, | |
bc351386 FB |
3250 | int stream_index, struct sockaddr_in *dest_addr, |
3251 | HTTPContext *rtsp_c) | |
2effd274 FB |
3252 | { |
3253 | AVFormatContext *ctx; | |
3254 | AVStream *st; | |
3255 | char *ipaddr; | |
3256 | URLContext *h; | |
0c1a9eda | 3257 | uint8_t *dummy_buf; |
6edd6884 | 3258 | char buf2[32]; |
bc351386 | 3259 | int max_packet_size; |
6edd6884 | 3260 | |
2effd274 FB |
3261 | /* now we can open the relevant output stream */ |
3262 | ctx = av_mallocz(sizeof(AVFormatContext)); | |
3263 | if (!ctx) | |
3264 | return -1; | |
3265 | ctx->oformat = &rtp_mux; | |
3266 | ||
3267 | st = av_mallocz(sizeof(AVStream)); | |
3268 | if (!st) | |
3269 | goto fail; | |
3270 | ctx->nb_streams = 1; | |
3271 | ctx->streams[0] = st; | |
3272 | ||
3273 | if (!c->stream->feed || | |
3274 | c->stream->feed == c->stream) { | |
3275 | memcpy(st, c->stream->streams[stream_index], sizeof(AVStream)); | |
3276 | } else { | |
3277 | memcpy(st, | |
3278 | c->stream->feed->streams[c->stream->feed_streams[stream_index]], | |
3279 | sizeof(AVStream)); | |
3280 | } | |
3281 | ||
bc351386 FB |
3282 | /* build destination RTP address */ |
3283 | ipaddr = inet_ntoa(dest_addr->sin_addr); | |
3284 | ||
3285 | switch(c->rtp_protocol) { | |
3286 | case RTSP_PROTOCOL_RTP_UDP: | |
3287 | case RTSP_PROTOCOL_RTP_UDP_MULTICAST: | |
3288 | /* RTP/UDP case */ | |
2effd274 | 3289 | |
6edd6884 FB |
3290 | /* XXX: also pass as parameter to function ? */ |
3291 | if (c->stream->is_multicast) { | |
3292 | int ttl; | |
3293 | ttl = c->stream->multicast_ttl; | |
3294 | if (!ttl) | |
3295 | ttl = 16; | |
3296 | snprintf(ctx->filename, sizeof(ctx->filename), | |
3297 | "rtp://%s:%d?multicast=1&ttl=%d", | |
3298 | ipaddr, ntohs(dest_addr->sin_port), ttl); | |
3299 | } else { | |
3300 | snprintf(ctx->filename, sizeof(ctx->filename), | |
3301 | "rtp://%s:%d", ipaddr, ntohs(dest_addr->sin_port)); | |
3302 | } | |
2effd274 FB |
3303 | |
3304 | if (url_open(&h, ctx->filename, URL_WRONLY) < 0) | |
3305 | goto fail; | |
3306 | c->rtp_handles[stream_index] = h; | |
bc351386 FB |
3307 | max_packet_size = url_get_max_packet_size(h); |
3308 | break; | |
3309 | case RTSP_PROTOCOL_RTP_TCP: | |
3310 | /* RTP/TCP case */ | |
3311 | c->rtsp_c = rtsp_c; | |
3312 | max_packet_size = RTSP_TCP_MAX_PACKET_SIZE; | |
3313 | break; | |
3314 | default: | |
2effd274 FB |
3315 | goto fail; |
3316 | } | |
3317 | ||
bc351386 | 3318 | http_log("%s:%d - - [%s] \"PLAY %s/streamid=%d %s\"\n", |
6edd6884 FB |
3319 | ipaddr, ntohs(dest_addr->sin_port), |
3320 | ctime1(buf2), | |
bc351386 | 3321 | c->stream->filename, stream_index, c->protocol); |
6edd6884 | 3322 | |
2effd274 | 3323 | /* normally, no packets should be output here, but the packet size may be checked */ |
bc351386 | 3324 | if (url_open_dyn_packet_buf(&ctx->pb, max_packet_size) < 0) { |
2effd274 FB |
3325 | /* XXX: close stream */ |
3326 | goto fail; | |
3327 | } | |
3c27199b | 3328 | av_set_parameters(ctx, NULL); |
2effd274 FB |
3329 | if (av_write_header(ctx) < 0) { |
3330 | fail: | |
3331 | if (h) | |
3332 | url_close(h); | |
3333 | av_free(ctx); | |
3334 | return -1; | |
3335 | } | |
3336 | url_close_dyn_buf(&ctx->pb, &dummy_buf); | |
3337 | av_free(dummy_buf); | |
3338 | ||
3339 | c->rtp_ctx[stream_index] = ctx; | |
3340 | return 0; | |
3341 | } | |
3342 | ||
3343 | /********************************************************************/ | |
3344 | /* ffserver initialization */ | |
3345 | ||
b29f97d1 | 3346 | static AVStream *add_av_stream1(FFStream *stream, AVCodecContext *codec) |
2effd274 FB |
3347 | { |
3348 | AVStream *fst; | |
3349 | ||
3350 | fst = av_mallocz(sizeof(AVStream)); | |
3351 | if (!fst) | |
3352 | return NULL; | |
3353 | fst->priv_data = av_mallocz(sizeof(FeedData)); | |
3354 | memcpy(&fst->codec, codec, sizeof(AVCodecContext)); | |
a4d70941 | 3355 | fst->codec.coded_frame = &dummy_frame; |
2effd274 FB |
3356 | stream->streams[stream->nb_streams++] = fst; |
3357 | return fst; | |
3358 | } | |
3359 | ||
85f07f22 | 3360 | /* return the stream number in the feed */ |
b29f97d1 | 3361 | static int add_av_stream(FFStream *feed, AVStream *st) |
85f07f22 FB |
3362 | { |
3363 | AVStream *fst; | |
3364 | AVCodecContext *av, *av1; | |
3365 | int i; | |
3366 | ||
3367 | av = &st->codec; | |
3368 | for(i=0;i<feed->nb_streams;i++) { | |
3369 | st = feed->streams[i]; | |
3370 | av1 = &st->codec; | |
f747e6d3 PG |
3371 | if (av1->codec_id == av->codec_id && |
3372 | av1->codec_type == av->codec_type && | |
85f07f22 FB |
3373 | av1->bit_rate == av->bit_rate) { |
3374 | ||
3375 | switch(av->codec_type) { | |
3376 | case CODEC_TYPE_AUDIO: | |
3377 | if (av1->channels == av->channels && | |
3378 | av1->sample_rate == av->sample_rate) | |
3379 | goto found; | |
3380 | break; | |
3381 | case CODEC_TYPE_VIDEO: | |
3382 | if (av1->width == av->width && | |
3383 | av1->height == av->height && | |
3384 | av1->frame_rate == av->frame_rate && | |
14bea432 | 3385 | av1->frame_rate_base == av->frame_rate_base && |
85f07f22 FB |
3386 | av1->gop_size == av->gop_size) |
3387 | goto found; | |
3388 | break; | |
f747e6d3 | 3389 | default: |
ec3b2232 | 3390 | av_abort(); |
85f07f22 FB |
3391 | } |
3392 | } | |
3393 | } | |
3394 | ||
2effd274 | 3395 | fst = add_av_stream1(feed, av); |
85f07f22 FB |
3396 | if (!fst) |
3397 | return -1; | |
85f07f22 FB |
3398 | return feed->nb_streams - 1; |
3399 | found: | |
3400 | return i; | |
3401 | } | |
3402 | ||
b29f97d1 | 3403 | static void remove_stream(FFStream *stream) |
2effd274 FB |
3404 | { |
3405 | FFStream **ps; | |
3406 | ps = &first_stream; | |
3407 | while (*ps != NULL) { | |
3408 | if (*ps == stream) { | |
3409 | *ps = (*ps)->next; | |
3410 | } else { | |
3411 | ps = &(*ps)->next; | |
3412 | } | |
3413 | } | |
3414 | } | |
3415 | ||
0fa45e19 | 3416 | /* specific mpeg4 handling : we extract the raw parameters */ |
b29f97d1 | 3417 | static void extract_mpeg4_header(AVFormatContext *infile) |
0fa45e19 FB |
3418 | { |
3419 | int mpeg4_count, i, size; | |
3420 | AVPacket pkt; | |
3421 | AVStream *st; | |
0c1a9eda | 3422 | const uint8_t *p; |
0fa45e19 FB |
3423 | |
3424 | mpeg4_count = 0; | |
3425 | for(i=0;i<infile->nb_streams;i++) { | |
3426 | st = infile->streams[i]; | |
3427 | if (st->codec.codec_id == CODEC_ID_MPEG4 && | |
bc351386 | 3428 | st->codec.extradata_size == 0) { |
0fa45e19 FB |
3429 | mpeg4_count++; |
3430 | } | |
3431 | } | |
3432 | if (!mpeg4_count) | |
3433 | return; | |
3434 | ||
3435 | printf("MPEG4 without extra data: trying to find header\n"); | |
3436 | while (mpeg4_count > 0) { | |
3437 | if (av_read_packet(infile, &pkt) < 0) | |
3438 | break; | |
3439 | st = infile->streams[pkt.stream_index]; | |
3440 | if (st->codec.codec_id == CODEC_ID_MPEG4 && | |
bc351386 FB |
3441 | st->codec.extradata_size == 0) { |
3442 | av_freep(&st->codec.extradata); | |
0fa45e19 FB |
3443 | /* fill extradata with the header */ |
3444 | /* XXX: we make hard suppositions here ! */ | |
3445 | p = pkt.data; | |
3446 | while (p < pkt.data + pkt.size - 4) { | |
3447 | /* stop when vop header is found */ | |
3448 | if (p[0] == 0x00 && p[1] == 0x00 && | |
3449 | p[2] == 0x01 && p[3] == 0xb6) { | |
3450 | size = p - pkt.data; | |
3451 | // av_hex_dump(pkt.data, size); | |
3452 | st->codec.extradata = av_malloc(size); | |
3453 | st->codec.extradata_size = size; | |
3454 | memcpy(st->codec.extradata, pkt.data, size); | |
3455 | break; | |
3456 | } | |
3457 | p++; | |
3458 | } | |
3459 | mpeg4_count--; | |
3460 | } | |
3461 | av_free_packet(&pkt); | |
3462 | } | |
3463 | } | |
3464 | ||
2effd274 | 3465 | /* compute the needed AVStream for each file */ |
b29f97d1 | 3466 | static void build_file_streams(void) |
2effd274 FB |
3467 | { |
3468 | FFStream *stream, *stream_next; | |
3469 | AVFormatContext *infile; | |
3470 | int i; | |
3471 | ||
3472 | /* gather all streams */ | |
3473 | for(stream = first_stream; stream != NULL; stream = stream_next) { | |
3474 | stream_next = stream->next; | |
3475 | if (stream->stream_type == STREAM_TYPE_LIVE && | |
3476 | !stream->feed) { | |
3477 | /* the stream comes from a file */ | |
3478 | /* try to open the file */ | |
3479 | /* open stream */ | |
3480 | if (av_open_input_file(&infile, stream->feed_filename, | |
3481 | NULL, 0, NULL) < 0) { | |
3482 | http_log("%s not found", stream->feed_filename); | |
3483 | /* remove stream (no need to spend more time on it) */ | |
3484 | fail: | |
3485 | remove_stream(stream); | |
3486 | } else { | |
3487 | /* find all the AVStreams inside and reference them in | |
3488 | 'stream' */ | |
3489 | if (av_find_stream_info(infile) < 0) { | |
3490 | http_log("Could not find codec parameters from '%s'", | |
3491 | stream->feed_filename); | |
3492 | av_close_input_file(infile); | |
3493 | goto fail; | |
3494 | } | |
0fa45e19 FB |
3495 | extract_mpeg4_header(infile); |
3496 | ||
2effd274 FB |
3497 | for(i=0;i<infile->nb_streams;i++) { |
3498 | add_av_stream1(stream, &infile->streams[i]->codec); | |
3499 | } | |
3500 | av_close_input_file(infile); | |
3501 | } | |
3502 | } | |
3503 | } | |
3504 | } | |
3505 | ||
85f07f22 | 3506 | /* compute the needed AVStream for each feed */ |
b29f97d1 | 3507 | static void build_feed_streams(void) |
85f07f22 FB |
3508 | { |
3509 | FFStream *stream, *feed; | |
3510 | int i; | |
3511 | ||
3512 | /* gather all streams */ | |
3513 | for(stream = first_stream; stream != NULL; stream = stream->next) { | |
3514 | feed = stream->feed; | |
3515 | if (feed) { | |
3516 | if (!stream->is_feed) { | |
2effd274 | 3517 | /* we handle a stream coming from a feed */ |
85f07f22 FB |
3518 | for(i=0;i<stream->nb_streams;i++) { |
3519 | stream->feed_streams[i] = add_av_stream(feed, stream->streams[i]); | |
3520 | } | |
cde25790 PG |
3521 | } |
3522 | } | |
3523 | } | |
3524 | ||
3525 | /* gather all streams */ | |
3526 | for(stream = first_stream; stream != NULL; stream = stream->next) { | |
3527 | feed = stream->feed; | |
3528 | if (feed) { | |
3529 | if (stream->is_feed) { | |
85f07f22 FB |
3530 | for(i=0;i<stream->nb_streams;i++) { |
3531 | stream->feed_streams[i] = i; | |
3532 | } | |
3533 | } | |
3534 | } | |
3535 | } | |
3536 | ||
3537 | /* create feed files if needed */ | |
3538 | for(feed = first_feed; feed != NULL; feed = feed->next_feed) { | |
3539 | int fd; | |
3540 | ||
59eb2ed1 PG |
3541 | if (url_exist(feed->feed_filename)) { |
3542 | /* See if it matches */ | |
3543 | AVFormatContext *s; | |
3544 | int matches = 0; | |
3545 | ||
3546 | if (av_open_input_file(&s, feed->feed_filename, NULL, FFM_PACKET_SIZE, NULL) >= 0) { | |
3547 | /* Now see if it matches */ | |
3548 | if (s->nb_streams == feed->nb_streams) { | |
3549 | matches = 1; | |
3550 | for(i=0;i<s->nb_streams;i++) { | |
3551 | AVStream *sf, *ss; | |
3552 | sf = feed->streams[i]; | |
3553 | ss = s->streams[i]; | |
3554 | ||
3555 | if (sf->index != ss->index || | |
3556 | sf->id != ss->id) { | |
3557 | printf("Index & Id do not match for stream %d\n", i); | |
3558 | matches = 0; | |
3559 | } else { | |
3560 | AVCodecContext *ccf, *ccs; | |
3561 | ||
3562 | ccf = &sf->codec; | |
3563 | ccs = &ss->codec; | |
3564 | #define CHECK_CODEC(x) (ccf->x != ccs->x) | |
3565 | ||
3566 | if (CHECK_CODEC(codec) || CHECK_CODEC(codec_type)) { | |
3567 | printf("Codecs do not match for stream %d\n", i); | |
3568 | matches = 0; | |
3569 | } else if (CHECK_CODEC(bit_rate) || CHECK_CODEC(flags)) { | |
3570 | printf("Codec bitrates do not match for stream %d\n", i); | |
3571 | matches = 0; | |
3572 | } else if (ccf->codec_type == CODEC_TYPE_VIDEO) { | |
3573 | if (CHECK_CODEC(frame_rate) || | |
14bea432 | 3574 | CHECK_CODEC(frame_rate_base) || |
59eb2ed1 PG |
3575 | CHECK_CODEC(width) || |
3576 | CHECK_CODEC(height)) { | |
3577 | printf("Codec width, height and framerate do not match for stream %d\n", i); | |
3578 | matches = 0; | |
3579 | } | |
3580 | } else if (ccf->codec_type == CODEC_TYPE_AUDIO) { | |
3581 | if (CHECK_CODEC(sample_rate) || | |
3582 | CHECK_CODEC(channels) || | |
3583 | CHECK_CODEC(frame_size)) { | |
3584 | printf("Codec sample_rate, channels, frame_size do not match for stream %d\n", i); | |
3585 | matches = 0; | |
3586 | } | |
3587 | } else { | |
3588 | printf("Unknown codec type\n"); | |
3589 | matches = 0; | |
3590 | } | |
3591 | } | |
3592 | if (!matches) { | |
3593 | break; | |
3594 | } | |
3595 | } | |
3596 | } else { | |
3597 | printf("Deleting feed file '%s' as stream counts differ (%d != %d)\n", | |
3598 | feed->feed_filename, s->nb_streams, feed->nb_streams); | |
3599 | } | |
3600 | ||
3601 | av_close_input_file(s); | |
3602 | } else { | |
3603 | printf("Deleting feed file '%s' as it appears to be corrupt\n", | |
3604 | feed->feed_filename); | |
3605 | } | |
e322ea48 PG |
3606 | if (!matches) { |
3607 | if (feed->readonly) { | |
3608 | printf("Unable to delete feed file '%s' as it is marked readonly\n", | |
3609 | feed->feed_filename); | |
3610 | exit(1); | |
3611 | } | |
59eb2ed1 | 3612 | unlink(feed->feed_filename); |
e322ea48 | 3613 | } |
59eb2ed1 | 3614 | } |
85f07f22 FB |
3615 | if (!url_exist(feed->feed_filename)) { |
3616 | AVFormatContext s1, *s = &s1; | |
3617 | ||
e322ea48 PG |
3618 | if (feed->readonly) { |
3619 | printf("Unable to create feed file '%s' as it is marked readonly\n", | |
3620 | feed->feed_filename); | |
3621 | exit(1); | |
3622 | } | |
3623 | ||
85f07f22 FB |
3624 | /* only write the header of the ffm file */ |
3625 | if (url_fopen(&s->pb, feed->feed_filename, URL_WRONLY) < 0) { | |
3626 | fprintf(stderr, "Could not open output feed file '%s'\n", | |
3627 | feed->feed_filename); | |
3628 | exit(1); | |
3629 | } | |
bd7cf6ad | 3630 | s->oformat = feed->fmt; |
85f07f22 FB |
3631 | s->nb_streams = feed->nb_streams; |
3632 | for(i=0;i<s->nb_streams;i++) { | |
3633 | AVStream *st; | |
3634 | st = feed->streams[i]; | |
3635 | s->streams[i] = st; | |
3636 | } | |
3c27199b | 3637 | av_set_parameters(s, NULL); |
bd7cf6ad FB |
3638 | av_write_header(s); |
3639 | /* XXX: need better api */ | |
3640 | av_freep(&s->priv_data); | |
85f07f22 FB |
3641 | url_fclose(&s->pb); |
3642 | } | |
3643 | /* get feed size and write index */ | |
3644 | fd = open(feed->feed_filename, O_RDONLY); | |
3645 | if (fd < 0) { | |
3646 | fprintf(stderr, "Could not open output feed file '%s'\n", | |
3647 | feed->feed_filename); | |
3648 | exit(1); | |
3649 | } | |
3650 | ||
3651 | feed->feed_write_index = ffm_read_write_index(fd); | |
3652 | feed->feed_size = lseek(fd, 0, SEEK_END); | |
3653 | /* ensure that we do not wrap before the end of file */ | |
3654 | if (feed->feed_max_size < feed->feed_size) | |
3655 | feed->feed_max_size = feed->feed_size; | |
3656 | ||
3657 | close(fd); | |
3658 | } | |
3659 | } | |
3660 | ||
6edd6884 FB |
3661 | /* compute the bandwidth used by each stream */ |
3662 | static void compute_bandwidth(void) | |
3663 | { | |
3664 | int bandwidth, i; | |
3665 | FFStream *stream; | |
3666 | ||
3667 | for(stream = first_stream; stream != NULL; stream = stream->next) { | |
3668 | bandwidth = 0; | |
3669 | for(i=0;i<stream->nb_streams;i++) { | |
3670 | AVStream *st = stream->streams[i]; | |
3671 | switch(st->codec.codec_type) { | |
3672 | case CODEC_TYPE_AUDIO: | |
3673 | case CODEC_TYPE_VIDEO: | |
3674 | bandwidth += st->codec.bit_rate; | |
3675 | break; | |
3676 | default: | |
3677 | break; | |
3678 | } | |
3679 | } | |
3680 | stream->bandwidth = (bandwidth + 999) / 1000; | |
3681 | } | |
3682 | } | |
3683 | ||
85f07f22 FB |
3684 | static void get_arg(char *buf, int buf_size, const char **pp) |
3685 | { | |
3686 | const char *p; | |
3687 | char *q; | |
3688 | int quote; | |
3689 | ||
3690 | p = *pp; | |
3691 | while (isspace(*p)) p++; | |
3692 | q = buf; | |
3693 | quote = 0; | |
3694 | if (*p == '\"' || *p == '\'') | |
3695 | quote = *p++; | |
3696 | for(;;) { | |
3697 | if (quote) { | |
3698 | if (*p == quote) | |
3699 | break; | |
3700 | } else { | |
3701 | if (isspace(*p)) | |
3702 | break; | |
3703 | } | |
3704 | if (*p == '\0') | |
3705 | break; | |
3706 | if ((q - buf) < buf_size - 1) | |
3707 | *q++ = *p; | |
3708 | p++; | |
3709 | } | |
3710 | *q = '\0'; | |
3711 | if (quote && *p == quote) | |
3712 | p++; | |
3713 | *pp = p; | |
3714 | } | |
3715 | ||
3716 | /* add a codec and set the default parameters */ | |
b29f97d1 | 3717 | static void add_codec(FFStream *stream, AVCodecContext *av) |
85f07f22 FB |
3718 | { |
3719 | AVStream *st; | |
3720 | ||
3721 | /* compute default parameters */ | |
3722 | switch(av->codec_type) { | |
3723 | case CODEC_TYPE_AUDIO: | |
3724 | if (av->bit_rate == 0) | |
3725 | av->bit_rate = 64000; | |
3726 | if (av->sample_rate == 0) | |
3727 | av->sample_rate = 22050; | |
3728 | if (av->channels == 0) | |
3729 | av->channels = 1; | |
3730 | break; | |
3731 | case CODEC_TYPE_VIDEO: | |
3732 | if (av->bit_rate == 0) | |
3733 | av->bit_rate = 64000; | |
14bea432 MN |
3734 | if (av->frame_rate == 0){ |
3735 | av->frame_rate = 5; | |
3736 | av->frame_rate_base = 1; | |
3737 | } | |
85f07f22 FB |
3738 | if (av->width == 0 || av->height == 0) { |
3739 | av->width = 160; | |
3740 | av->height = 128; | |
3741 | } | |
ba9b374f | 3742 | /* Bitrate tolerance is less for streaming */ |
42a63c6a PG |
3743 | if (av->bit_rate_tolerance == 0) |
3744 | av->bit_rate_tolerance = av->bit_rate / 4; | |
3745 | if (av->qmin == 0) | |
3746 | av->qmin = 3; | |
3747 | if (av->qmax == 0) | |
3748 | av->qmax = 31; | |
3749 | if (av->max_qdiff == 0) | |
3750 | av->max_qdiff = 3; | |
ba9b374f J |
3751 | av->qcompress = 0.5; |
3752 | av->qblur = 0.5; | |
68d7eef9 | 3753 | |
a782f209 PG |
3754 | if (!av->rc_eq) |
3755 | av->rc_eq = "tex^qComp"; | |
3756 | if (!av->i_quant_factor) | |
b3a391e8 | 3757 | av->i_quant_factor = -0.8; |
a782f209 PG |
3758 | if (!av->b_quant_factor) |
3759 | av->b_quant_factor = 1.25; | |
3760 | if (!av->b_quant_offset) | |
3761 | av->b_quant_offset = 1.25; | |
d6562d2c PG |
3762 | if (!av->rc_min_rate) |
3763 | av->rc_min_rate = av->bit_rate / 2; | |