Commit | Line | Data |
---|---|---|
21754ce6 MN |
1 | /* |
2 | * Watermark Hook | |
3 | * Copyright (c) 2005 Marcus Engene myfirstname(at)mylastname.se | |
4 | * | |
5 | * The watermarkpicture works like this. (Assuming colorintencities 0..0xff) | |
6 | * Per color do this: | |
7 | * If mask color is 0x80, no change to original frame. | |
8 | * If mask color is < 0x80 the abs difference is subtracted from frame. If | |
9 | * result < 0, result = 0 | |
10 | * If mask color is > 0x80 the abs difference is added to frame. If result | |
11 | * > 0xff, result = 0xff | |
12 | * | |
13 | * This way a mask that is visible both in light pictures and in dark can be | |
14 | * made (fex by using a picture generated by gimp and the bump map tool). | |
15 | * | |
16 | * An example watermark file is at | |
17 | * http://engene.se/ffmpeg_watermark.gif | |
18 | * | |
ded78ac0 ME |
19 | * Example usage: |
20 | * ffmpeg -i infile -vhook '/path/watermark.so -f wm.gif' out.mov | |
21 | * | |
22 | * Note that the entire vhook argument is encapsulated in ''. This | |
23 | * way, arguments to the vhook won't be mixed up with those to ffmpeg. | |
24 | * | |
21754ce6 MN |
25 | * This library is free software; you can redistribute it and/or |
26 | * modify it under the terms of the GNU Lesser General Public | |
27 | * License as published by the Free Software Foundation; either | |
28 | * version 2 of the License, or (at your option) any later version. | |
29 | * | |
30 | * This library is distributed in the hope that it will be useful, | |
31 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
32 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
33 | * Lesser General Public License for more details. | |
34 | * | |
35 | * You should have received a copy of the GNU Lesser General Public | |
36 | * License along with this library; if not, write to the Free Software | |
37 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
38 | */ | |
39 | ||
40 | //#include <stdlib.h> | |
41 | //#include <fcntl.h> | |
42 | #include <unistd.h> | |
43 | #include <stdarg.h> | |
44 | ||
45 | #include "common.h" | |
46 | #include "avformat.h" | |
47 | ||
48 | #include "framehook.h" | |
49 | ||
50 | typedef struct { | |
51 | char filename[2000]; | |
52 | int x_size; | |
53 | int y_size; | |
54 | ||
55 | /* get_watermark_picture() variables */ | |
56 | AVFormatContext *pFormatCtx; | |
57 | const char *p_ext; | |
58 | int videoStream; | |
59 | int frameFinished; | |
60 | AVCodecContext *pCodecCtx; | |
61 | AVCodec *pCodec; | |
62 | AVFrame *pFrame; | |
63 | AVPacket packet; | |
64 | int numBytes; | |
65 | uint8_t *buffer; | |
66 | int i; | |
67 | AVInputFormat *file_iformat; | |
68 | AVStream *st; | |
69 | int is_done; | |
70 | AVFrame *pFrameRGB; | |
71 | } ContextInfo; | |
72 | ||
73 | int get_watermark_picture(ContextInfo *ci, int cleanup); | |
74 | ||
75 | ||
76 | /**************************************************************************** | |
77 | * | |
78 | ****************************************************************************/ | |
79 | void Release(void *ctx) | |
80 | { | |
81 | ContextInfo *ci; | |
82 | ci = (ContextInfo *) ctx; | |
83 | ||
84 | if (ci) get_watermark_picture(ci, 1); | |
85 | ||
86 | if (ctx) | |
87 | av_free(ctx); | |
88 | } | |
89 | ||
90 | ||
91 | /**************************************************************************** | |
92 | * | |
93 | ****************************************************************************/ | |
94 | int Configure(void **ctxp, int argc, char *argv[]) | |
95 | { | |
96 | ContextInfo *ci; | |
97 | int c; | |
98 | ||
99 | if (0 == (*ctxp = av_mallocz(sizeof(ContextInfo)))) return -1; | |
100 | ci = (ContextInfo *) *ctxp; | |
101 | ||
102 | optind = 0; | |
103 | ||
104 | // Struct is mallocz:ed so no need to reset. | |
105 | ||
106 | while ((c = getopt(argc, argv, "f:")) > 0) { | |
107 | switch (c) { | |
108 | case 'f': | |
109 | strncpy(ci->filename, optarg, 1999); | |
110 | ci->filename[1999] = 0; | |
111 | break; | |
112 | default: | |
113 | av_log(NULL, AV_LOG_DEBUG, "Unrecognized argument '%s'\n", argv[optind]); | |
114 | return -1; | |
115 | } | |
116 | } | |
117 | ||
118 | // | |
119 | if (0 == ci->filename[0]) return -1; | |
120 | ||
121 | av_register_all(); | |
122 | return get_watermark_picture(ci, 0); | |
123 | ||
124 | return 0; | |
125 | } | |
126 | ||
127 | ||
128 | /**************************************************************************** | |
129 | * Why is this a void returning functions? I want to be able to go wrong! | |
130 | ****************************************************************************/ | |
131 | void Process(void *ctx, | |
132 | AVPicture *picture, | |
133 | enum PixelFormat pix_fmt, | |
134 | int src_width, | |
135 | int src_height, | |
136 | int64_t pts) | |
137 | { | |
138 | ContextInfo *ci = (ContextInfo *) ctx; | |
139 | char *buf = 0; | |
140 | AVPicture picture1; | |
141 | AVPicture *pict = picture; | |
142 | ||
143 | AVFrame *pFrameRGB; | |
144 | int xm_size; | |
145 | int ym_size; | |
146 | ||
147 | // int retval = -1; | |
148 | int x; | |
149 | int y; | |
150 | int offs, offsm; | |
151 | int mpoffs; | |
152 | uint32_t *p_pixel = 0; | |
153 | uint32_t pixel_meck; | |
154 | uint32_t pixel; | |
155 | uint32_t pixelm; | |
156 | int tmp; | |
157 | ||
158 | ||
159 | //?? (void) ci; | |
160 | ||
161 | if (pix_fmt != PIX_FMT_RGBA32) { | |
162 | int size; | |
163 | ||
164 | size = avpicture_get_size(PIX_FMT_RGBA32, src_width, src_height); | |
165 | buf = av_malloc(size); | |
166 | ||
167 | avpicture_fill(&picture1, buf, PIX_FMT_RGBA32, src_width, src_height); | |
168 | if (img_convert(&picture1, PIX_FMT_RGBA32, | |
169 | picture, pix_fmt, src_width, src_height) < 0) { | |
170 | av_free(buf); | |
171 | return; | |
172 | } | |
173 | pict = &picture1; | |
174 | } | |
175 | ||
176 | /* Insert filter code here */ /* ok */ | |
177 | ||
178 | // Get me next frame | |
179 | if (0 > get_watermark_picture(ci, 0)) { | |
180 | return; | |
181 | } | |
182 | // These are the three original static variables in the ffmpeg hack. | |
183 | pFrameRGB = ci->pFrameRGB; | |
184 | xm_size = ci->x_size; | |
185 | ym_size = ci->y_size; | |
186 | ||
187 | // I'll do the *4 => <<2 crap later. Most compilers understand that anyway. | |
188 | // According to avcodec.h PIX_FMT_RGBA32 is handled in endian specific manner. | |
189 | for (y=0; y<src_height; y++) { | |
190 | offs = y * (src_width * 4); | |
191 | offsm = (((y * ym_size) / src_height) * 4) * xm_size; // offsm first in maskline. byteoffs! | |
192 | for (x=0; x<src_width; x++) { | |
193 | mpoffs = offsm + (((x * xm_size) / src_width) * 4); | |
194 | p_pixel = (uint32_t *)&((pFrameRGB->data[0])[mpoffs]); | |
195 | pixelm = *p_pixel; | |
196 | p_pixel = (uint32_t *)&((pict->data[0])[offs]); | |
197 | pixel = *p_pixel; | |
198 | // pixelm = *((uint32_t *)&(pFrameRGB->data[mpoffs])); | |
199 | pixel_meck = pixel & 0xff000000; | |
200 | ||
201 | // R | |
202 | tmp = (int)((pixel >> 16) & 0xff) + (int)((pixelm >> 16) & 0xff) - 0x80; | |
203 | if (tmp > 255) tmp = 255; | |
204 | if (tmp < 0) tmp = 0; | |
205 | pixel_meck |= (tmp << 16) & 0xff0000; | |
206 | // G | |
207 | tmp = (int)((pixel >> 8) & 0xff) + (int)((pixelm >> 8) & 0xff) - 0x80; | |
208 | if (tmp > 255) tmp = 255; | |
209 | if (tmp < 0) tmp = 0; | |
210 | pixel_meck |= (tmp << 8) & 0xff00; | |
211 | // B | |
212 | tmp = (int)((pixel >> 0) & 0xff) + (int)((pixelm >> 0) & 0xff) - 0x80; | |
213 | if (tmp > 255) tmp = 255; | |
214 | if (tmp < 0) tmp = 0; | |
215 | pixel_meck |= (tmp << 0) & 0xff; | |
216 | ||
217 | ||
218 | // test: | |
219 | //pixel_meck = pixel & 0xff000000; | |
220 | //pixel_meck |= (pixelm & 0x00ffffff); | |
221 | ||
222 | *p_pixel = pixel_meck; | |
223 | ||
224 | offs += 4; | |
225 | } // foreach X | |
226 | } // foreach Y | |
227 | ||
228 | ||
229 | ||
230 | ||
231 | if (pix_fmt != PIX_FMT_RGBA32) { | |
232 | if (img_convert(picture, pix_fmt, | |
233 | &picture1, PIX_FMT_RGBA32, src_width, src_height) < 0) { | |
234 | } | |
235 | } | |
236 | ||
237 | av_free(buf); | |
238 | } | |
239 | ||
240 | ||
241 | /**************************************************************************** | |
242 | * When cleanup == 0, we try to get the next frame. If no next frame, nothing | |
243 | * is done. | |
244 | * | |
245 | * This code follows the example on | |
246 | * http://www.inb.uni-luebeck.de/~boehme/using_libavcodec.html | |
247 | * | |
248 | * 0 = ok, -1 = error | |
249 | ****************************************************************************/ | |
250 | int get_watermark_picture(ContextInfo *ci, int cleanup) | |
251 | { | |
252 | if (1 == ci->is_done && 0 == cleanup) return 0; | |
253 | ||
254 | // Yes, *pFrameRGB arguments must be null the first time otherwise it's not good.. | |
255 | // This block is only executed the first time we enter this function. | |
256 | if (0 == ci->pFrameRGB && | |
257 | 0 == cleanup) | |
258 | { | |
259 | ||
260 | /* | |
261 | * The last three parameters specify the file format, buffer size and format | |
262 | * parameters; by simply specifying NULL or 0 we ask libavformat to auto-detect | |
263 | * the format and use a default buffer size. (Didn't work!) | |
264 | */ | |
265 | if (av_open_input_file(&ci->pFormatCtx, ci->filename, NULL, 0, NULL) != 0) { | |
266 | ||
267 | // Martin says this should not be necessary but it failed for me sending in | |
268 | // NULL instead of file_iformat to av_open_input_file() | |
269 | ci->i = strlen(ci->filename); | |
270 | if (0 == ci->i) { | |
271 | av_log(NULL, AV_LOG_DEBUG, "get_watermark_picture() No filename to watermark vhook\n"); | |
272 | return -1; | |
273 | } | |
274 | while (ci->i > 0) { | |
275 | if (ci->filename[ci->i] == '.') { | |
276 | ci->i++; | |
277 | break; | |
278 | } | |
279 | ci->i--; | |
280 | } | |
281 | ci->p_ext = &(ci->filename[ci->i]); | |
282 | ci->file_iformat = av_find_input_format (ci->p_ext); | |
283 | if (0 == ci->file_iformat) { | |
284 | av_log(NULL, AV_LOG_DEBUG, "get_watermark_picture() Really failed to find iformat [%s]\n", ci->p_ext); | |
285 | return -1; | |
286 | } | |
287 | // now continues the Martin template. | |
288 | ||
289 | if (av_open_input_file(&ci->pFormatCtx, ci->filename, ci->file_iformat, 0, NULL)!=0) { | |
290 | av_log(NULL, AV_LOG_DEBUG, "get_watermark_picture() Failed to open input file [%s]\n", ci->filename); | |
291 | return -1; | |
292 | } | |
293 | } | |
294 | ||
295 | /* | |
296 | * This fills the streams field of the AVFormatContext with valid information. | |
297 | */ | |
298 | if(av_find_stream_info(ci->pFormatCtx)<0) { | |
299 | av_log(NULL, AV_LOG_DEBUG, "get_watermark_picture() Failed to find stream info\n"); | |
300 | return -1; | |
301 | } | |
302 | ||
303 | /* | |
304 | * As mentioned in the introduction, we'll handle only video streams, not audio | |
305 | * streams. To make things nice and easy, we simply use the first video stream we | |
306 | * find. | |
307 | */ | |
308 | ci->videoStream=-1; | |
309 | for(ci->i = 0; ci->i < ci->pFormatCtx->nb_streams; ci->i++) | |
a2cfc4d6 | 310 | if(ci->pFormatCtx->streams[ci->i]->codec->codec_type==CODEC_TYPE_VIDEO) |
21754ce6 MN |
311 | { |
312 | ci->videoStream = ci->i; | |
313 | break; | |
314 | } | |
315 | if(ci->videoStream == -1) { | |
316 | av_log(NULL, AV_LOG_DEBUG, "get_watermark_picture() Failed to find any video stream\n"); | |
317 | return -1; | |
318 | } | |
319 | ||
320 | ci->st = ci->pFormatCtx->streams[ci->videoStream]; | |
a2cfc4d6 MM |
321 | ci->x_size = ci->st->codec->width; |
322 | ci->y_size = ci->st->codec->height; | |
21754ce6 MN |
323 | |
324 | // Get a pointer to the codec context for the video stream | |
a2cfc4d6 | 325 | ci->pCodecCtx = ci->pFormatCtx->streams[ci->videoStream]->codec; |
21754ce6 MN |
326 | |
327 | ||
328 | /* | |
329 | * OK, so now we've got a pointer to the so-called codec context for our video | |
330 | * stream, but we still have to find the actual codec and open it. | |
331 | */ | |
332 | // Find the decoder for the video stream | |
333 | ci->pCodec = avcodec_find_decoder(ci->pCodecCtx->codec_id); | |
334 | if(ci->pCodec == NULL) { | |
335 | av_log(NULL, AV_LOG_DEBUG, "get_watermark_picture() Failed to find any codec\n"); | |
336 | return -1; | |
337 | } | |
338 | ||
339 | // Inform the codec that we can handle truncated bitstreams -- i.e., | |
340 | // bitstreams where frame boundaries can fall in the middle of packets | |
341 | if (ci->pCodec->capabilities & CODEC_CAP_TRUNCATED) | |
342 | ci->pCodecCtx->flags|=CODEC_FLAG_TRUNCATED; | |
343 | ||
344 | // Open codec | |
345 | if(avcodec_open(ci->pCodecCtx, ci->pCodec)<0) { | |
346 | av_log(NULL, AV_LOG_DEBUG, "get_watermark_picture() Failed to open codec\n"); | |
347 | return -1; | |
348 | } | |
349 | ||
350 | // Hack to correct wrong frame rates that seem to be generated by some | |
351 | // codecs | |
c0df9d75 MN |
352 | if (ci->pCodecCtx->time_base.den>1000 && ci->pCodecCtx->time_base.num==1) |
353 | ci->pCodecCtx->time_base.num=1000; | |
21754ce6 MN |
354 | |
355 | /* | |
356 | * Allocate a video frame to store the decoded images in. | |
357 | */ | |
358 | ci->pFrame = avcodec_alloc_frame(); | |
359 | ||
360 | ||
361 | /* | |
362 | * The RGB image pFrameRGB (of type AVFrame *) is allocated like this: | |
363 | */ | |
364 | // Allocate an AVFrame structure | |
365 | ci->pFrameRGB=avcodec_alloc_frame(); | |
366 | if(ci->pFrameRGB==NULL) { | |
367 | av_log(NULL, AV_LOG_DEBUG, "get_watermark_picture() Failed to alloc pFrameRGB\n"); | |
368 | return -1; | |
369 | } | |
370 | ||
371 | // Determine required buffer size and allocate buffer | |
372 | ci->numBytes = avpicture_get_size(PIX_FMT_RGBA32, ci->pCodecCtx->width, | |
373 | ci->pCodecCtx->height); | |
374 | ci->buffer = av_malloc(ci->numBytes); | |
375 | ||
376 | // Assign appropriate parts of buffer to image planes in pFrameRGB | |
377 | avpicture_fill((AVPicture *)ci->pFrameRGB, ci->buffer, PIX_FMT_RGBA32, | |
378 | ci->pCodecCtx->width, ci->pCodecCtx->height); | |
379 | } | |
380 | // TODO loop, pingpong etc? | |
381 | if (0 == cleanup) | |
382 | { | |
383 | // av_log(NULL, AV_LOG_DEBUG, "get_watermark_picture() Get a frame\n"); | |
384 | while(av_read_frame(ci->pFormatCtx, &ci->packet)>=0) | |
385 | { | |
386 | // Is this a packet from the video stream? | |
387 | if(ci->packet.stream_index == ci->videoStream) | |
388 | { | |
389 | // Decode video frame | |
390 | avcodec_decode_video(ci->pCodecCtx, ci->pFrame, &ci->frameFinished, | |
391 | ci->packet.data, ci->packet.size); | |
392 | ||
393 | // Did we get a video frame? | |
394 | if(ci->frameFinished) | |
395 | { | |
396 | // Convert the image from its native format to RGBA32 | |
397 | img_convert((AVPicture *)ci->pFrameRGB, PIX_FMT_RGBA32, | |
398 | (AVPicture*)(ci->pFrame), ci->pCodecCtx->pix_fmt, ci->pCodecCtx->width, | |
399 | ci->pCodecCtx->height); | |
400 | ||
401 | // Process the video frame (save to disk etc.) | |
402 | //fprintf(stderr,"banan() New frame!\n"); | |
403 | //DoSomethingWithTheImage(ci->pFrameRGB); | |
404 | return 0; | |
405 | } | |
406 | } | |
407 | ||
408 | // Free the packet that was allocated by av_read_frame | |
409 | av_free_packet(&ci->packet); | |
410 | } | |
411 | ci->is_done = 1; | |
412 | return 0; | |
413 | } // if 0 != cleanup | |
414 | ||
415 | if (0 != cleanup) | |
416 | { | |
417 | // Free the RGB image | |
418 | if (0 != ci->buffer) { | |
419 | av_free(ci->buffer); | |
420 | ci->buffer = 0; | |
421 | } | |
422 | if (0 != ci->pFrameRGB) { | |
423 | av_free(ci->pFrameRGB); | |
424 | ci->pFrameRGB = 0; | |
425 | } | |
426 | ||
427 | // Close the codec | |
428 | if (0 != ci->pCodecCtx) { | |
429 | avcodec_close(ci->pCodecCtx); | |
430 | ci->pCodecCtx = 0; | |
431 | } | |
432 | ||
433 | // Close the video file | |
434 | if (0 != ci->pFormatCtx) { | |
435 | av_close_input_file(ci->pFormatCtx); | |
436 | ci->pFormatCtx = 0; | |
437 | } | |
438 | ||
439 | ci->is_done = 0; | |
440 | } | |
441 | return 0; | |
442 | } | |
443 | ||
444 | ||
445 | void parse_arg_file(const char *filename) | |
446 | { | |
447 | } |