mirror of https://github.com/FFmpeg/FFmpeg.git
Originally committed as revision 17769 to svn://svn.ffmpeg.org/ffmpeg/trunkrelease/0.6
parent
f989d39752
commit
fdf119062e
16 changed files with 7 additions and 3177 deletions
@ -1,299 +0,0 @@ |
||||
\input texinfo @c -*- texinfo -*- |
||||
|
||||
@settitle Video Hook Documentation |
||||
@titlepage |
||||
@sp 7 |
||||
@center @titlefont{Video Hook Documentation} |
||||
@sp 3 |
||||
@end titlepage |
||||
|
||||
|
||||
@chapter Introduction |
||||
|
||||
@var{Please be aware that vhook is deprecated, and hence its development is |
||||
frozen (bug fixes are still accepted). |
||||
The substitute will be 'libavfilter', the result of our 'Video Filter API' |
||||
Google Summer of Code project. You may monitor its progress by subscribing to |
||||
the ffmpeg-soc mailing list at |
||||
@url{http://lists.mplayerhq.hu/mailman/listinfo/ffmpeg-soc}.} |
||||
|
||||
The video hook functionality is designed (mostly) for live video. It allows |
||||
the video to be modified or examined between the decoder and the encoder. |
||||
|
||||
Any number of hook modules can be placed inline, and they are run in the |
||||
order that they were specified on the ffmpeg command line. |
||||
|
||||
The video hook modules are provided for use as a base for your own modules, |
||||
and are described below. |
||||
|
||||
Modules are loaded using the -vhook option to ffmpeg. The value of this parameter |
||||
is a space separated list of arguments. The first is the module name, and the rest |
||||
are passed as arguments to the Configure function of the module. |
||||
|
||||
The modules are dynamic libraries: They have different suffixes (.so, .dll, .dylib) |
||||
depending on your platform. And your platform dictates if they need to be |
||||
somewhere in your PATH, or in your LD_LIBRARY_PATH. Otherwise you will need to |
||||
specify the full path of the vhook file that you are using. |
||||
|
||||
@section null.c |
||||
|
||||
This does nothing. Actually it converts the input image to RGB24 and then converts |
||||
it back again. This is meant as a sample that you can use to test your setup. |
||||
|
||||
@section fish.c |
||||
|
||||
This implements a 'fish detector'. Essentially it converts the image into HSV |
||||
space and tests whether more than a certain percentage of the pixels fall into |
||||
a specific HSV cuboid. If so, then the image is saved into a file for processing |
||||
by other bits of code. |
||||
|
||||
Why use HSV? It turns out that HSV cuboids represent a more compact range of |
||||
colors than would an RGB cuboid. |
||||
|
||||
@section imlib2.c |
||||
|
||||
This module implements a text overlay for a video image. Currently it |
||||
supports a fixed overlay or reading the text from a file. The string |
||||
is passed through strftime() so that it is easy to imprint the date and |
||||
time onto the image. |
||||
|
||||
This module depends on the external library imlib2, available on |
||||
Sourceforge, among other places, if it is not already installed on |
||||
your system. |
||||
|
||||
You may also overlay an image (even semi-transparent) like TV stations do. |
||||
You may move either the text or the image around your video to create |
||||
scrolling credits, for example. |
||||
|
||||
The font file used is looked for in a FONTPATH environment variable, and |
||||
prepended to the point size as a command line option and can be specified |
||||
with the full path to the font file, as in: |
||||
@example |
||||
-F /usr/X11R6/lib/X11/fonts/TTF/VeraBd.ttf/20 |
||||
@end example |
||||
where 20 is the point size. |
||||
|
||||
You can specify the filename to read RGB color names from. If it is not |
||||
specified, these defaults are used: @file{/usr/share/X11/rgb.txt} and |
||||
@file{/usr/lib/X11/rgb.txt} |
||||
|
||||
Options: |
||||
@multitable @columnfractions .2 .8 |
||||
@item @option{-C <rgb.txt>} @tab The filename to read RGB color names from |
||||
@item @option{-c <color>} @tab The color of the text |
||||
@item @option{-F <fontname>} @tab The font face and size |
||||
@item @option{-t <text>} @tab The text |
||||
@item @option{-f <filename>} @tab The filename to read text from |
||||
@item @option{-x <expression>}@tab x coordinate of text or image |
||||
@item @option{-y <expression>}@tab y coordinate of text or image |
||||
@item @option{-i <filename>} @tab The filename to read a image from |
||||
@item @option{-R <expression>}@tab Value for R color |
||||
@item @option{-G <expression>}@tab Value for G color |
||||
@item @option{-B <expression>}@tab Value for B color |
||||
@item @option{-A <expression>}@tab Value for Alpha channel |
||||
@end multitable |
||||
|
||||
Expressions are functions of these variables: |
||||
@multitable @columnfractions .2 .8 |
||||
@item @var{N} @tab frame number (starting at zero) |
||||
@item @var{H} @tab frame height |
||||
@item @var{W} @tab frame width |
||||
@item @var{h} @tab image height |
||||
@item @var{w} @tab image width |
||||
@item @var{X} @tab previous x coordinate of text or image |
||||
@item @var{Y} @tab previous y coordinate of text or image |
||||
@end multitable |
||||
|
||||
You may also use the constants @var{PI}, @var{E}, and the math functions available at the |
||||
FFmpeg formula evaluator at (@url{ffmpeg-doc.html#SEC13}), except @var{bits2qp(bits)} |
||||
and @var{qp2bits(qp)}. |
||||
|
||||
Usage examples: |
||||
|
||||
@example |
||||
# Remember to set the path to your fonts |
||||
FONTPATH="/cygdrive/c/WINDOWS/Fonts/" |
||||
FONTPATH="$FONTPATH:/usr/share/imlib2/data/fonts/" |
||||
FONTPATH="$FONTPATH:/usr/X11R6/lib/X11/fonts/TTF/" |
||||
export FONTPATH |
||||
|
||||
# Bulb dancing in a Lissajous pattern |
||||
ffmpeg -i input.avi -vhook \ |
||||
'vhook/imlib2.dll -x W*(0.5+0.25*sin(N/47*PI))-w/2 -y H*(0.5+0.50*cos(N/97*PI))-h/2 -i /usr/share/imlib2/data/images/bulb.png' \ |
||||
-acodec copy -sameq output.avi |
||||
|
||||
# Text scrolling |
||||
ffmpeg -i input.avi -vhook \ |
||||
'vhook/imlib2.dll -c red -F Vera.ttf/20 -x 150+0.5*N -y 70+0.25*N -t Hello' \ |
||||
-acodec copy -sameq output.avi |
||||
|
||||
# Date and time stamp, security-camera style: |
||||
ffmpeg -r 29.97 -s 320x256 -f video4linux -i /dev/video0 \ |
||||
-vhook 'vhook/imlib2.so -x 0 -y 0 -i black-260x20.png' \ |
||||
-vhook 'vhook/imlib2.so -c white -F VeraBd.ttf/12 -x 0 -y 0 -t %A-%D-%T' \ |
||||
output.avi |
||||
|
||||
In this example the video is captured from the first video capture card as a |
||||
320x256 AVI, and a black 260 by 20 pixel PNG image is placed in the upper |
||||
left corner, with the day, date and time overlaid on it in Vera Bold 12 |
||||
point font. A simple black PNG file 260 pixels wide and 20 pixels tall |
||||
was created in the GIMP for this purpose. |
||||
|
||||
# Scrolling credits from a text file |
||||
ffmpeg -i input.avi -vhook \ |
||||
'vhook/imlib2.so -c white -F VeraBd.ttf/16 -x 100 -y -1.0*N -f credits.txt' \ |
||||
-sameq output.avi |
||||
|
||||
In this example, the text is stored in a file, and is positioned 100 |
||||
pixels from the left hand edge of the video. The text is scrolled from the |
||||
bottom up. Making the y factor positive will scroll from the top down. |
||||
Increasing the magnitude of the y factor makes the text scroll faster, |
||||
decreasing it makes it scroll slower. Hint: Blank lines containing only |
||||
a newline are treated as end-of-file. To create blank lines, use lines |
||||
that consist of space characters only. |
||||
|
||||
# Scrolling credits with custom color from a text file |
||||
ffmpeg -i input.avi -vhook \ |
||||
'vhook/imlib2.so -C rgb.txt -c CustomColor1 -F VeraBd.ttf/16 -x 100 -y -1.0*N -f credits.txt' \ |
||||
-sameq output.avi |
||||
|
||||
This example does the same as the one above, but specifies an rgb.txt file |
||||
to be used, which has a custom-made color in it. |
||||
|
||||
# Variable colors |
||||
ffmpeg -i input.avi -vhook \ |
||||
'vhook/imlib2.so -t Hello -R abs(255*sin(N/47*PI)) -G abs(255*sin(N/47*PI)) -B abs(255*sin(N/47*PI))' \ |
||||
-sameq output.avi |
||||
|
||||
In this example, the color for the text goes up and down from black to |
||||
white. |
||||
|
||||
# Text fade-out |
||||
ffmpeg -i input.avi -vhook \ |
||||
'vhook/imlib2.so -t Hello -A max(0,255-exp(N/47))' \ |
||||
-sameq output.avi |
||||
|
||||
In this example, the text fades out in about 10 seconds for a 25 fps input |
||||
video file. |
||||
|
||||
# scrolling credits from a graphics file |
||||
ffmpeg -sameq -i input.avi \ |
||||
-vhook 'vhook/imlib2.so -x 0 -y -1.0*N -i credits.png' output.avi |
||||
|
||||
In this example, a transparent PNG file the same width as the video |
||||
(e.g. 320 pixels), but very long, (e.g. 3000 pixels), was created, and |
||||
text, graphics, brushstrokes, etc, were added to the image. The image |
||||
is then scrolled up, from the bottom of the frame. |
||||
|
||||
@end example |
||||
|
||||
@section ppm.c |
||||
|
||||
It's basically a launch point for a PPM pipe, so you can use any |
||||
executable (or script) which consumes a PPM on stdin and produces a PPM |
||||
on stdout (and flushes each frame). The Netpbm utilities are a series of |
||||
such programs. |
||||
|
||||
A list of them is here: |
||||
|
||||
@url{http://netpbm.sourceforge.net/doc/directory.html} |
||||
|
||||
Usage example: |
||||
|
||||
@example |
||||
ffmpeg -i input -vhook "/path/to/ppm.so some-ppm-filter args" output |
||||
@end example |
||||
|
||||
@section drawtext.c |
||||
|
||||
This module implements a text overlay for a video image. Currently it |
||||
supports a fixed overlay or reading the text from a file. The string |
||||
is passed through strftime() so that it is easy to imprint the date and |
||||
time onto the image. |
||||
|
||||
Features: |
||||
@itemize @minus |
||||
@item TrueType, Type1 and others via the FreeType2 library |
||||
@item Font kerning (better output) |
||||
@item Line Wrap (put the text that doesn't fit one line on the next line) |
||||
@item Background box (currently in development) |
||||
@item Outline |
||||
@end itemize |
||||
|
||||
Options: |
||||
@multitable @columnfractions .2 .8 |
||||
@item @option{-c <color>} @tab Foreground color of the text ('internet' way) <#RRGGBB> [default #FFFFFF] |
||||
@item @option{-C <color>} @tab Background color of the text ('internet' way) <#RRGGBB> [default #000000] |
||||
@item @option{-f <font-filename>} @tab font file to use |
||||
@item @option{-t <text>} @tab text to display |
||||
@item @option{-T <filename>} @tab file to read text from |
||||
@item @option{-x <pos>} @tab x coordinate of the start of the text |
||||
@item @option{-y <pos>} @tab y coordinate of the start of the text |
||||
@end multitable |
||||
|
||||
Text fonts are being looked for in a FONTPATH environment variable. |
||||
If the FONTPATH environment variable is not available, or is not checked by |
||||
your target (i.e. Cygwin), then specify the full path to the font file as in: |
||||
@example |
||||
-f /usr/X11R6/lib/X11/fonts/TTF/VeraBd.ttf |
||||
@end example |
||||
|
||||
Usage Example: |
||||
@example |
||||
# Remember to set the path to your fonts |
||||
FONTPATH="/cygdrive/c/WINDOWS/Fonts/" |
||||
FONTPATH="$FONTPATH:/usr/share/imlib2/data/fonts/" |
||||
FONTPATH="$FONTPATH:/usr/X11R6/lib/X11/fonts/TTF/" |
||||
export FONTPATH |
||||
|
||||
# Time and date display |
||||
ffmpeg -f video4linux2 -i /dev/video0 \ |
||||
-vhook 'vhook/drawtext.so -f VeraBd.ttf -t %A-%D-%T' movie.mpg |
||||
|
||||
This example grabs video from the first capture card and outputs it to an |
||||
MPEG video, and places "Weekday-dd/mm/yy-hh:mm:ss" at the top left of the |
||||
frame, updated every second, using the Vera Bold TrueType Font, which |
||||
should exist in: /usr/X11R6/lib/X11/fonts/TTF/ |
||||
@end example |
||||
|
||||
Check the man page for strftime() for all the various ways you can format |
||||
the date and time. |
||||
|
||||
@section watermark.c |
||||
|
||||
Command Line options: |
||||
@multitable @columnfractions .2 .8 |
||||
@item @option{-m [0|1]} @tab Mode (default: 0, see below) |
||||
@item @option{-t 000000 - FFFFFF} @tab Threshold, six digit hex number |
||||
@item @option{-f <filename>} @tab Watermark image filename, must be specified! |
||||
@end multitable |
||||
|
||||
MODE 0: |
||||
The watermark picture works like this (assuming color intensities 0..0xFF): |
||||
Per color do this: |
||||
If mask color is 0x80, no change to the original frame. |
||||
If mask color is < 0x80 the absolute difference is subtracted from the |
||||
frame. If result < 0, result = 0. |
||||
If mask color is > 0x80 the absolute difference is added to the |
||||
frame. If result > 0xFF, result = 0xFF. |
||||
|
||||
You can override the 0x80 level with the -t flag. E.g. if threshold is |
||||
000000 the color value of watermark is added to the destination. |
||||
|
||||
This way a mask that is visible both in light and dark pictures can be made |
||||
(e.g. by using a picture generated by the Gimp and the bump map tool). |
||||
|
||||
An example watermark file is at: |
||||
@url{http://engene.se/ffmpeg_watermark.gif} |
||||
|
||||
MODE 1: |
||||
Per color do this: |
||||
If mask color > threshold color then the watermark pixel is used. |
||||
|
||||
Example usage: |
||||
@example |
||||
ffmpeg -i infile -vhook '/path/watermark.so -f wm.gif' -an out.mov |
||||
ffmpeg -i infile -vhook '/path/watermark.so -f wm.gif -m 1 -t 222222' -an out.mov |
||||
@end example |
||||
|
||||
@bye |
@ -1,115 +0,0 @@ |
||||
/*
|
||||
* Video processing hooks |
||||
* Copyright (c) 2000, 2001 Fabrice Bellard |
||||
* |
||||
* This file is part of FFmpeg. |
||||
* |
||||
* FFmpeg is free software; you can redistribute it and/or |
||||
* modify it under the terms of the GNU Lesser General Public |
||||
* License as published by the Free Software Foundation; either |
||||
* version 2.1 of the License, or (at your option) any later version. |
||||
* |
||||
* FFmpeg is distributed in the hope that it will be useful, |
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
||||
* Lesser General Public License for more details. |
||||
* |
||||
* You should have received a copy of the GNU Lesser General Public |
||||
* License along with FFmpeg; if not, write to the Free Software |
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
||||
*/ |
||||
#include <errno.h> |
||||
#include "config.h" |
||||
#include "avformat.h" |
||||
#include "framehook.h" |
||||
|
||||
#if HAVE_DLFCN_H |
||||
#include <dlfcn.h> |
||||
#endif |
||||
|
||||
|
||||
typedef struct FrameHookEntry { |
||||
struct FrameHookEntry *next; |
||||
FrameHookConfigureFn Configure; |
||||
FrameHookProcessFn Process; |
||||
FrameHookReleaseFn Release; |
||||
void *ctx; |
||||
} FrameHookEntry; |
||||
|
||||
static FrameHookEntry *first_hook; |
||||
|
||||
/* Returns 0 on OK */ |
||||
int frame_hook_add(int argc, char *argv[]) |
||||
{ |
||||
void *loaded; |
||||
FrameHookEntry *fhe, **fhep; |
||||
|
||||
if (argc < 1) { |
||||
return ENOENT; |
||||
} |
||||
|
||||
loaded = dlopen(argv[0], RTLD_NOW); |
||||
if (!loaded) { |
||||
av_log(NULL, AV_LOG_ERROR, "%s\n", dlerror()); |
||||
return -1; |
||||
} |
||||
|
||||
fhe = av_mallocz(sizeof(*fhe)); |
||||
if (!fhe) { |
||||
return AVERROR(ENOMEM); |
||||
} |
||||
|
||||
fhe->Configure = dlsym(loaded, "Configure"); |
||||
fhe->Process = dlsym(loaded, "Process"); |
||||
fhe->Release = dlsym(loaded, "Release"); /* Optional */ |
||||
|
||||
if (!fhe->Process) { |
||||
av_log(NULL, AV_LOG_ERROR, "Failed to find Process entrypoint in %s\n", argv[0]); |
||||
return AVERROR(ENOENT); |
||||
} |
||||
|
||||
if (!fhe->Configure && argc > 1) { |
||||
av_log(NULL, AV_LOG_ERROR, "Failed to find Configure entrypoint in %s\n", argv[0]); |
||||
return AVERROR(ENOENT); |
||||
} |
||||
|
||||
if (argc > 1 || fhe->Configure) { |
||||
if (fhe->Configure(&fhe->ctx, argc, argv)) { |
||||
av_log(NULL, AV_LOG_ERROR, "Failed to Configure %s\n", argv[0]); |
||||
return AVERROR(EINVAL); |
||||
} |
||||
} |
||||
|
||||
for (fhep = &first_hook; *fhep; fhep = &((*fhep)->next)) { |
||||
} |
||||
|
||||
*fhep = fhe; |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
void frame_hook_process(AVPicture *pict, enum PixelFormat pix_fmt, int width, int height, int64_t pts) |
||||
{ |
||||
if (first_hook) { |
||||
FrameHookEntry *fhe; |
||||
|
||||
for (fhe = first_hook; fhe; fhe = fhe->next) { |
||||
fhe->Process(fhe->ctx, pict, pix_fmt, width, height, pts); |
||||
} |
||||
} |
||||
} |
||||
|
||||
void frame_hook_release(void) |
||||
{ |
||||
FrameHookEntry *fhe; |
||||
FrameHookEntry *fhenext; |
||||
|
||||
for (fhe = first_hook; fhe; fhe = fhenext) { |
||||
fhenext = fhe->next; |
||||
if (fhe->Release) |
||||
fhe->Release(fhe->ctx); |
||||
av_free(fhe); |
||||
} |
||||
|
||||
first_hook = NULL; |
||||
} |
@ -1,52 +0,0 @@ |
||||
/*
|
||||
* video processing hooks |
||||
* copyright (c) 2000, 2001 Fabrice Bellard |
||||
* |
||||
* This file is part of FFmpeg. |
||||
* |
||||
* FFmpeg is free software; you can redistribute it and/or |
||||
* modify it under the terms of the GNU Lesser General Public |
||||
* License as published by the Free Software Foundation; either |
||||
* version 2.1 of the License, or (at your option) any later version. |
||||
* |
||||
* FFmpeg is distributed in the hope that it will be useful, |
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
||||
* Lesser General Public License for more details. |
||||
* |
||||
* You should have received a copy of the GNU Lesser General Public |
||||
* License along with FFmpeg; if not, write to the Free Software |
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
||||
*/ |
||||
|
||||
#ifndef AVFORMAT_FRAMEHOOK_H |
||||
#define AVFORMAT_FRAMEHOOK_H |
||||
|
||||
#warning VHOOK is deprecated. Please help finishing libavfilter instead of wasting your time writing new filters for this crappy filter system. |
||||
|
||||
/*
|
||||
* Prototypes for interface to .so that implement a video processing hook |
||||
*/ |
||||
|
||||
#include "libavcodec/avcodec.h" |
||||
|
||||
/* Function must be called 'Configure' */ |
||||
typedef int (FrameHookConfigure)(void **ctxp, int argc, char *argv[]); |
||||
typedef FrameHookConfigure *FrameHookConfigureFn; |
||||
extern FrameHookConfigure Configure; |
||||
|
||||
/* Function must be called 'Process' */ |
||||
typedef void (FrameHookProcess)(void *ctx, struct AVPicture *pict, enum PixelFormat pix_fmt, int width, int height, int64_t pts); |
||||
typedef FrameHookProcess *FrameHookProcessFn; |
||||
extern FrameHookProcess Process; |
||||
|
||||
/* Function must be called 'Release' */ |
||||
typedef void (FrameHookRelease)(void *ctx); |
||||
typedef FrameHookRelease *FrameHookReleaseFn; |
||||
extern FrameHookRelease Release; |
||||
|
||||
int frame_hook_add(int argc, char *argv[]); |
||||
void frame_hook_process(struct AVPicture *pict, enum PixelFormat pix_fmt, int width, int height, int64_t pts); |
||||
void frame_hook_release(void); |
||||
|
||||
#endif /* AVFORMAT_FRAMEHOOK_H */ |
@ -1,531 +0,0 @@ |
||||
/*
|
||||
* drawtext.c: print text over the screen |
||||
****************************************************************************** |
||||
* Options: |
||||
* -f <filename> font filename (MANDATORY!!!) |
||||
* -s <pixel_size> font size in pixels [default 16] |
||||
* -b print background |
||||
* -o outline glyphs (use the bg color) |
||||
* -x <pos> x position ( >= 0) [default 0] |
||||
* -y <pos> y position ( >= 0) [default 0] |
||||
* -t <text> text to print (will be passed to strftime()) |
||||
* MANDATORY: will be used even when -T is used. |
||||
* in this case, -t will be used if some error |
||||
* occurs |
||||
* -T <filename> file with the text (re-read every frame) |
||||
* -c <#RRGGBB> foreground color ('internet' way) [default #ffffff] |
||||
* -C <#RRGGBB> background color ('internet' way) [default #000000] |
||||
* |
||||
****************************************************************************** |
||||
* Features: |
||||
* - True Type, Type1 and others via FreeType2 library |
||||
* - Font kerning (better output) |
||||
* - Line Wrap (if the text doesn't fit, the next char go to the next line) |
||||
* - Background box |
||||
* - Outline |
||||
****************************************************************************** |
||||
* Author: Gustavo Sverzut Barbieri <gsbarbieri@yahoo.com.br> |
||||
* |
||||
* This file is part of FFmpeg. |
||||
* |
||||
* FFmpeg is free software; you can redistribute it and/or |
||||
* modify it under the terms of the GNU Lesser General Public |
||||
* License as published by the Free Software Foundation; either |
||||
* version 2.1 of the License, or (at your option) any later version. |
||||
* |
||||
* FFmpeg is distributed in the hope that it will be useful, |
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
||||
* Lesser General Public License for more details. |
||||
* |
||||
* You should have received a copy of the GNU Lesser General Public |
||||
* License along with FFmpeg; if not, write to the Free Software |
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
||||
*/ |
||||
|
||||
#define MAXSIZE_TEXT 1024 |
||||
|
||||
#include "libavformat/framehook.h" |
||||
|
||||
#include <stdio.h> |
||||
#include <stdlib.h> |
||||
#include <fcntl.h> |
||||
#include <stdarg.h> |
||||
#include <string.h> |
||||
#include <unistd.h> |
||||
#undef time |
||||
#include <sys/time.h> |
||||
#include <time.h> |
||||
|
||||
#include <ft2build.h> |
||||
#include FT_FREETYPE_H |
||||
#include FT_GLYPH_H |
||||
|
||||
#define SCALEBITS 10 |
||||
#define ONE_HALF (1 << (SCALEBITS - 1)) |
||||
#define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5)) |
||||
|
||||
#define RGB_TO_YUV(rgb_color, yuv_color) do { \ |
||||
yuv_color[0] = (FIX(0.29900) * rgb_color[0] + FIX(0.58700) * rgb_color[1] + FIX(0.11400) * rgb_color[2] + ONE_HALF) >> SCALEBITS; \
|
||||
yuv_color[2] = ((FIX(0.50000) * rgb_color[0] - FIX(0.41869) * rgb_color[1] - FIX(0.08131) * rgb_color[2] + ONE_HALF - 1) >> SCALEBITS) + 128; \
|
||||
yuv_color[1] = ((- FIX(0.16874) * rgb_color[0] - FIX(0.33126) * rgb_color[1] + FIX(0.50000) * rgb_color[2] + ONE_HALF - 1) >> SCALEBITS) + 128; \
|
||||
} while (0) |
||||
|
||||
#define COPY_3(dst,src) { \ |
||||
dst[0]=src[0]; \
|
||||
dst[1]=src[1]; \
|
||||
dst[2]=src[2]; \
|
||||
} |
||||
|
||||
|
||||
|
||||
#define SET_PIXEL(picture, yuv_color, x, y) { \ |
||||
picture->data[0][ (x) + (y)*picture->linesize[0] ] = yuv_color[0]; \
|
||||
picture->data[1][ ((x/2) + (y/2)*picture->linesize[1]) ] = yuv_color[1]; \
|
||||
picture->data[2][ ((x/2) + (y/2)*picture->linesize[2]) ] = yuv_color[2]; \
|
||||
} |
||||
|
||||
#define GET_PIXEL(picture, yuv_color, x, y) { \ |
||||
yuv_color[0] = picture->data[0][ (x) + (y)*picture->linesize[0] ]; \
|
||||
yuv_color[1] = picture->data[1][ (x/2) + (y/2)*picture->linesize[1] ]; \
|
||||
yuv_color[2] = picture->data[2][ (x/2) + (y/2)*picture->linesize[2] ]; \
|
||||
} |
||||
|
||||
|
||||
typedef struct { |
||||
unsigned char *text; |
||||
char *file; |
||||
unsigned int x; |
||||
unsigned int y; |
||||
int bg; |
||||
int outline; |
||||
unsigned char bgcolor[3]; /* YUV */ |
||||
unsigned char fgcolor[3]; /* YUV */ |
||||
FT_Library library; |
||||
FT_Face face; |
||||
FT_Glyph glyphs[ 255 ]; |
||||
FT_Bitmap bitmaps[ 255 ]; |
||||
int advance[ 255 ]; |
||||
int bitmap_left[ 255 ]; |
||||
int bitmap_top[ 255 ]; |
||||
unsigned int glyphs_index[ 255 ]; |
||||
int text_height; |
||||
int baseline; |
||||
int use_kerning; |
||||
} ContextInfo; |
||||
|
||||
|
||||
void Release(void *ctx) |
||||
{ |
||||
if (ctx) |
||||
av_free(ctx); |
||||
} |
||||
|
||||
|
||||
static int ParseColor(char *text, unsigned char yuv_color[3]) |
||||
{ |
||||
char tmp[3]; |
||||
unsigned char rgb_color[3]; |
||||
int i; |
||||
|
||||
tmp[2] = '\0'; |
||||
|
||||
if ((!text) || (strlen(text) != 7) || (text[0] != '#') ) |
||||
return -1; |
||||
|
||||
for (i=0; i < 3; i++) |
||||
{ |
||||
tmp[0] = text[i*2+1]; |
||||
tmp[1] = text[i*2+2]; |
||||
|
||||
rgb_color[i] = strtol(tmp, NULL, 16); |
||||
} |
||||
|
||||
RGB_TO_YUV(rgb_color, yuv_color); |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
int Configure(void **ctxp, int argc, char *argv[]) |
||||
{ |
||||
int c; |
||||
int error; |
||||
ContextInfo *ci=NULL; |
||||
char *font=NULL; |
||||
unsigned int size=16; |
||||
FT_BBox bbox; |
||||
int yMax, yMin; |
||||
*ctxp = av_mallocz(sizeof(ContextInfo)); |
||||
ci = (ContextInfo *) *ctxp; |
||||
|
||||
/* configure Context Info */ |
||||
ci->text = NULL; |
||||
ci->file = NULL; |
||||
ci->x = ci->y = 0; |
||||
ci->fgcolor[0]=255; |
||||
ci->fgcolor[1]=128; |
||||
ci->fgcolor[2]=128; |
||||
ci->bgcolor[0]=0; |
||||
ci->fgcolor[1]=128; |
||||
ci->fgcolor[2]=128; |
||||
ci->bg = 0; |
||||
ci->outline = 0; |
||||
ci->text_height = 0; |
||||
|
||||
optind = 1; |
||||
while ((c = getopt(argc, argv, "f:t:T:x:y:s:c:C:bo")) > 0) { |
||||
switch (c) { |
||||
case 'f': |
||||
font = optarg; |
||||
break; |
||||
case 't': |
||||
ci->text = av_strdup(optarg); |
||||
break; |
||||
case 'T': |
||||
ci->file = av_strdup(optarg); |
||||
break; |
||||
case 'x': |
||||
ci->x = (unsigned int) atoi(optarg); |
||||
break; |
||||
case 'y': |
||||
ci->y = (unsigned int) atoi(optarg); |
||||
break; |
||||
case 's': |
||||
size = (unsigned int) atoi(optarg); |
||||
break; |
||||
case 'c': |
||||
if (ParseColor(optarg, ci->fgcolor) == -1) |
||||
{ |
||||
av_log(NULL, AV_LOG_ERROR, "Invalid foreground color: '%s'. You must specify the color in the internet way(packaged hex): #RRGGBB, ie: -c #ffffff (for white foreground)\n", optarg); |
||||
return -1; |
||||
} |
||||
break; |
||||
case 'C': |
||||
if (ParseColor(optarg, ci->bgcolor) == -1) |
||||
{ |
||||
av_log(NULL, AV_LOG_ERROR, "Invalid background color: '%s'. You must specify the color in the internet way(packaged hex): #RRGGBB, ie: -C #ffffff (for white background)\n", optarg); |
||||
return -1; |
||||
} |
||||
break; |
||||
case 'b': |
||||
ci->bg=1; |
||||
break; |
||||
case 'o': |
||||
ci->outline=1; |
||||
break; |
||||
case '?': |
||||
av_log(NULL, AV_LOG_ERROR, "Unrecognized argument '%s'\n", argv[optind]); |
||||
return -1; |
||||
} |
||||
} |
||||
|
||||
if (!ci->text) |
||||
{ |
||||
av_log(NULL, AV_LOG_ERROR, "No text provided (-t text)\n"); |
||||
return -1; |
||||
} |
||||
|
||||
if (ci->file) |
||||
{ |
||||
FILE *fp; |
||||
if ((fp=fopen(ci->file, "r")) == NULL) |
||||
{ |
||||
av_log(NULL, AV_LOG_INFO, "WARNING: The file could not be opened. Using text provided with -t switch: %s", strerror(errno)); |
||||
} |
||||
else |
||||
{ |
||||
fclose(fp); |
||||
} |
||||
} |
||||
|
||||
if (!font) |
||||
{ |
||||
av_log(NULL, AV_LOG_ERROR, "No font file provided! (-f filename)\n"); |
||||
return -1; |
||||
} |
||||
|
||||
if ((error = FT_Init_FreeType(&(ci->library))) != 0) |
||||
{ |
||||
av_log(NULL, AV_LOG_ERROR, "Could not load FreeType (error# %d).\n", error); |
||||
return -1; |
||||
} |
||||
|
||||
if ((error = FT_New_Face( ci->library, font, 0, &(ci->face) )) != 0) |
||||
{ |
||||
av_log(NULL, AV_LOG_ERROR, "Could not load face: %s (error# %d).\n", font, error); |
||||
return -1; |
||||
} |
||||
|
||||
if ((error = FT_Set_Pixel_Sizes( ci->face, 0, size)) != 0) |
||||
{ |
||||
av_log(NULL, AV_LOG_ERROR, "Could not set font size to %d pixels (error# %d).\n", size, error); |
||||
return -1; |
||||
} |
||||
|
||||
ci->use_kerning = FT_HAS_KERNING(ci->face); |
||||
|
||||
/* load and cache glyphs */ |
||||
yMax = -32000; |
||||
yMin = 32000; |
||||
for (c=0; c < 256; c++) |
||||
{ |
||||
/* Load char */ |
||||
error = FT_Load_Char( ci->face, (unsigned char) c, FT_LOAD_RENDER | FT_LOAD_MONOCHROME ); |
||||
if (error) continue; /* ignore errors */ |
||||
|
||||
/* Save bitmap */ |
||||
ci->bitmaps[c] = ci->face->glyph->bitmap; |
||||
/* Save bitmap left */ |
||||
ci->bitmap_left[c] = ci->face->glyph->bitmap_left; |
||||
/* Save bitmap top */ |
||||
ci->bitmap_top[c] = ci->face->glyph->bitmap_top; |
||||
|
||||
/* Save advance */ |
||||
ci->advance[c] = ci->face->glyph->advance.x >> 6; |
||||
|
||||
/* Save glyph */ |
||||
error = FT_Get_Glyph( ci->face->glyph, &(ci->glyphs[c]) ); |
||||
/* Save glyph index */ |
||||
ci->glyphs_index[c] = FT_Get_Char_Index( ci->face, (unsigned char) c ); |
||||
|
||||
/* Measure text height to calculate text_height (or the maximum text height) */ |
||||
FT_Glyph_Get_CBox( ci->glyphs[ c ], ft_glyph_bbox_pixels, &bbox ); |
||||
if (bbox.yMax > yMax) |
||||
yMax = bbox.yMax; |
||||
if (bbox.yMin < yMin) |
||||
yMin = bbox.yMin; |
||||
|
||||
} |
||||
|
||||
ci->text_height = yMax - yMin; |
||||
ci->baseline = yMax; |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
|
||||
|
||||
|
||||
static inline void draw_glyph(AVPicture *picture, FT_Bitmap *bitmap, unsigned int x, unsigned int y, unsigned int width, unsigned int height, unsigned char yuv_fgcolor[3], unsigned char yuv_bgcolor[3], int outline) |
||||
{ |
||||
int r, c; |
||||
int spixel, dpixel[3], in_glyph=0; |
||||
|
||||
if (bitmap->pixel_mode == ft_pixel_mode_mono) |
||||
{ |
||||
in_glyph = 0; |
||||
for (r=0; (r < bitmap->rows) && (r+y < height); r++) |
||||
{ |
||||
for (c=0; (c < bitmap->width) && (c+x < width); c++) |
||||
{ |
||||
/* pixel in the picture (destination) */ |
||||
GET_PIXEL(picture, dpixel, (c+x), (y+r)); |
||||
|
||||
/* pixel in the glyph bitmap (source) */ |
||||
spixel = bitmap->buffer[r*bitmap->pitch +c/8] & (0x80>>(c%8)); |
||||
|
||||
if (spixel) |
||||
COPY_3(dpixel, yuv_fgcolor); |
||||
|
||||
if (outline) |
||||
{ |
||||
/* border detection: */ |
||||
if ( (!in_glyph) && (spixel) ) |
||||
/* left border detected */ |
||||
{ |
||||
in_glyph = 1; |
||||
/* draw left pixel border */ |
||||
if (c-1 >= 0) |
||||
SET_PIXEL(picture, yuv_bgcolor, (c+x-1), (y+r)); |
||||
} |
||||
else if ( (in_glyph) && (!spixel) ) |
||||
/* right border detected */ |
||||
{ |
||||
in_glyph = 0; |
||||
/* 'draw' right pixel border */ |
||||
COPY_3(dpixel, yuv_bgcolor); |
||||
} |
||||
|
||||
if (in_glyph) |
||||
/* see if we have a top/bottom border */ |
||||
{ |
||||
/* top */ |
||||
if ( (r-1 >= 0) && (! bitmap->buffer[(r-1)*bitmap->pitch +c/8] & (0x80>>(c%8))) ) |
||||
/* we have a top border */ |
||||
SET_PIXEL(picture, yuv_bgcolor, (c+x), (y+r-1)); |
||||
|
||||
/* bottom */ |
||||
if ( (r+1 < height) && (! bitmap->buffer[(r+1)*bitmap->pitch +c/8] & (0x80>>(c%8))) ) |
||||
/* we have a bottom border */ |
||||
SET_PIXEL(picture, yuv_bgcolor, (c+x), (y+r+1)); |
||||
|
||||
} |
||||
} |
||||
|
||||
SET_PIXEL(picture, dpixel, (c+x), (y+r)); |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
|
||||
static inline void draw_box(AVPicture *picture, unsigned int x, unsigned int y, unsigned int width, unsigned int height, unsigned char yuv_color[3]) |
||||
{ |
||||
int i, j; |
||||
|
||||
for (j = 0; (j < height); j++) |
||||
for (i = 0; (i < width); i++) |
||||
{ |
||||
SET_PIXEL(picture, yuv_color, (i+x), (y+j)); |
||||
} |
||||
|
||||
} |
||||
|
||||
|
||||
|
||||
|
||||
void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width, int height, int64_t pts) |
||||
{ |
||||
ContextInfo *ci = (ContextInfo *) ctx; |
||||
FT_Face face = ci->face; |
||||
FT_GlyphSlot slot = face->glyph; |
||||
unsigned char *text = ci->text; |
||||
unsigned char c; |
||||
int x = 0, y = 0, i=0, size=0; |
||||
unsigned char buff[MAXSIZE_TEXT]; |
||||
unsigned char tbuff[MAXSIZE_TEXT]; |
||||
time_t now = time(0); |
||||
int str_w, str_w_max; |
||||
FT_Vector pos[MAXSIZE_TEXT]; |
||||
FT_Vector delta; |
||||
|
||||
if (ci->file) |
||||
{ |
||||
int fd = open(ci->file, O_RDONLY); |
||||
|
||||
if (fd < 0) |
||||
{ |
||||
text = ci->text; |
||||
av_log(NULL, AV_LOG_INFO, "WARNING: The file could not be opened. Using text provided with -t switch: %s", strerror(errno)); |
||||
} |
||||
else |
||||
{ |
||||
int l = read(fd, tbuff, sizeof(tbuff) - 1); |
||||
|
||||
if (l >= 0) |
||||
{ |
||||
tbuff[l] = 0; |
||||
text = tbuff; |
||||
} |
||||
else |
||||
{ |
||||
text = ci->text; |
||||
av_log(NULL, AV_LOG_INFO, "WARNING: The file could not be read. Using text provided with -t switch: %s", strerror(errno)); |
||||
} |
||||
close(fd); |
||||
} |
||||
} |
||||
else |
||||
{ |
||||
text = ci->text; |
||||
} |
||||
|
||||
strftime(buff, sizeof(buff), text, localtime(&now)); |
||||
|
||||
text = buff; |
||||
|
||||
size = strlen(text); |
||||
|
||||
|
||||
|
||||
|
||||
/* measure string size and save glyphs position*/ |
||||
str_w = str_w_max = 0; |
||||
x = ci->x; |
||||
y = ci->y; |
||||
for (i=0; i < size; i++) |
||||
{ |
||||
c = text[i]; |
||||
|
||||
/* kerning */ |
||||
if ( (ci->use_kerning) && (i > 0) && (ci->glyphs_index[c]) ) |
||||
{ |
||||
FT_Get_Kerning( ci->face, |
||||
ci->glyphs_index[ text[i-1] ], |
||||
ci->glyphs_index[c], |
||||
ft_kerning_default, |
||||
&delta ); |
||||
|
||||
x += delta.x >> 6; |
||||
} |
||||
|
||||
if (( (x + ci->advance[ c ]) >= width ) || ( c == '\n' )) |
||||
{ |
||||
str_w = width - ci->x - 1; |
||||
|
||||
y += ci->text_height; |
||||
x = ci->x; |
||||
} |
||||
|
||||
|
||||
/* save position */ |
||||
pos[i].x = x + ci->bitmap_left[c]; |
||||
pos[i].y = y - ci->bitmap_top[c] + ci->baseline; |
||||
|
||||
|
||||
x += ci->advance[c]; |
||||
|
||||
|
||||
if (str_w > str_w_max) |
||||
str_w_max = str_w; |
||||
|
||||
} |
||||
|
||||
|
||||
|
||||
|
||||
if (ci->bg) |
||||
{ |
||||
/* Check if it doesn't pass the limits */ |
||||
if ( str_w_max + ci->x >= width ) |
||||
str_w_max = width - ci->x - 1; |
||||
if ( y >= height ) |
||||
y = height - 1 - 2*ci->y; |
||||
|
||||
/* Draw Background */ |
||||
draw_box( picture, ci->x, ci->y, str_w_max, y - ci->y, ci->bgcolor ); |
||||
} |
||||
|
||||
|
||||
|
||||
/* Draw Glyphs */ |
||||
for (i=0; i < size; i++) |
||||
{ |
||||
c = text[i]; |
||||
|
||||
if ( |
||||
( (c == '_') && (text == ci->text) ) || /* skip '_' (consider as space)
|
||||
IF text was specified in cmd line |
||||
(which doesn't like nested quotes) */ |
||||
( c == '\n' ) /* Skip new line char, just go to new line */ |
||||
) |
||||
continue; |
||||
|
||||
/* now, draw to our target surface */ |
||||
draw_glyph( picture, |
||||
&(ci->bitmaps[ c ]), |
||||
pos[i].x, |
||||
pos[i].y, |
||||
width, |
||||
height, |
||||
ci->fgcolor, |
||||
ci->bgcolor, |
||||
ci->outline ); |
||||
|
||||
/* increment pen position */ |
||||
x += slot->advance.x >> 6; |
||||
} |
||||
|
||||
|
||||
} |
||||
|
@ -1,382 +0,0 @@ |
||||
/*
|
||||
* Fish Detector Hook |
||||
* Copyright (c) 2002 Philip Gladstone |
||||
* |
||||
* This file implements a fish detector. It is used to see when a |
||||
* goldfish passes in front of the camera. It does this by counting |
||||
* the number of input pixels that fall within a particular HSV |
||||
* range. |
||||
* |
||||
* It takes a multitude of arguments: |
||||
* |
||||
* -h <num>-<num> the range of H values that are fish |
||||
* -s <num>-<num> the range of S values that are fish |
||||
* -v <num>-<num> the range of V values that are fish |
||||
* -z zap all non-fish values to black |
||||
* -l <num> limit the number of saved files to <num> |
||||
* -i <num> only check frames every <num> seconds |
||||
* -t <num> the threshold for the amount of fish pixels (range 0-1) |
||||
* -d turn debugging on |
||||
* -D <directory> where to put the fish images |
||||
* |
||||
* This file is part of FFmpeg. |
||||
* |
||||
* FFmpeg is free software; you can redistribute it and/or |
||||
* modify it under the terms of the GNU Lesser General Public |
||||
* License as published by the Free Software Foundation; either |
||||
* version 2.1 of the License, or (at your option) any later version. |
||||
* |
||||
* FFmpeg is distributed in the hope that it will be useful, |
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
||||
* Lesser General Public License for more details. |
||||
* |
||||
* You should have received a copy of the GNU Lesser General Public |
||||
* License along with FFmpeg; if not, write to the Free Software |
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
||||
*/ |
||||
#include <stdlib.h> |
||||
#include <fcntl.h> |
||||
#include <unistd.h> |
||||
#include <stdarg.h> |
||||
#include <string.h> |
||||
#include <time.h> |
||||
#include <stdio.h> |
||||
#include <dirent.h> |
||||
|
||||
#include "libavformat/avformat.h" |
||||
#include "libavformat/framehook.h" |
||||
#include "libavcodec/dsputil.h" |
||||
#include "libswscale/swscale.h" |
||||
#undef fprintf |
||||
|
||||
static int sws_flags = SWS_BICUBIC; |
||||
|
||||
#define SCALEBITS 10 |
||||
#define ONE_HALF (1 << (SCALEBITS - 1)) |
||||
#define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5)) |
||||
|
||||
#define YUV_TO_RGB1_CCIR(cb1, cr1)\ |
||||
{\
|
||||
cb = (cb1) - 128;\
|
||||
cr = (cr1) - 128;\
|
||||
r_add = FIX(1.40200*255.0/224.0) * cr + ONE_HALF;\
|
||||
g_add = - FIX(0.34414*255.0/224.0) * cb - FIX(0.71414*255.0/224.0) * cr + \
|
||||
ONE_HALF;\
|
||||
b_add = FIX(1.77200*255.0/224.0) * cb + ONE_HALF;\
|
||||
} |
||||
|
||||
#define YUV_TO_RGB2_CCIR(r, g, b, y1)\ |
||||
{\
|
||||
yt = ((y1) - 16) * FIX(255.0/219.0);\
|
||||
r = cm[(yt + r_add) >> SCALEBITS];\
|
||||
g = cm[(yt + g_add) >> SCALEBITS];\
|
||||
b = cm[(yt + b_add) >> SCALEBITS];\
|
||||
} |
||||
|
||||
|
||||
|
||||
|
||||
typedef struct { |
||||
int h; /* 0 .. 360 */ |
||||
int s; /* 0 .. 255 */ |
||||
int v; /* 0 .. 255 */ |
||||
} HSV; |
||||
|
||||
typedef struct { |
||||
int zapping; |
||||
int threshold; |
||||
HSV dark, bright; |
||||
char *dir; |
||||
int file_limit; |
||||
int debug; |
||||
int min_interval; |
||||
int64_t next_pts; |
||||
int inset; |
||||
int min_width; |
||||
struct SwsContext *toRGB_convert_ctx; |
||||
} ContextInfo; |
||||
|
||||
static void dorange(const char *s, int *first, int *second, int maxval) |
||||
{ |
||||
sscanf(s, "%d-%d", first, second); |
||||
if (*first > maxval) |
||||
*first = maxval; |
||||
if (*second > maxval) |
||||
*second = maxval; |
||||
} |
||||
|
||||
void Release(void *ctx) |
||||
{ |
||||
ContextInfo *ci; |
||||
ci = (ContextInfo *) ctx; |
||||
|
||||
if (ctx) { |
||||
sws_freeContext(ci->toRGB_convert_ctx); |
||||
av_free(ctx); |
||||
} |
||||
} |
||||
|
||||
int Configure(void **ctxp, int argc, char *argv[]) |
||||
{ |
||||
ContextInfo *ci; |
||||
int c; |
||||
|
||||
*ctxp = av_mallocz(sizeof(ContextInfo)); |
||||
ci = (ContextInfo *) *ctxp; |
||||
|
||||
optind = 1; |
||||
|
||||
ci->dir = av_strdup("/tmp"); |
||||
ci->threshold = 100; |
||||
ci->file_limit = 100; |
||||
ci->min_interval = 1000000; |
||||
ci->inset = 10; /* Percent */ |
||||
|
||||
while ((c = getopt(argc, argv, "w:i:dh:s:v:zl:t:D:")) > 0) { |
||||
switch (c) { |
||||
case 'h': |
||||
dorange(optarg, &ci->dark.h, &ci->bright.h, 360); |
||||
break; |
||||
case 's': |
||||
dorange(optarg, &ci->dark.s, &ci->bright.s, 255); |
||||
break; |
||||
case 'v': |
||||
dorange(optarg, &ci->dark.v, &ci->bright.v, 255); |
||||
break; |
||||
case 'z': |
||||
ci->zapping = 1; |
||||
break; |
||||
case 'l': |
||||
ci->file_limit = atoi(optarg); |
||||
break; |
||||
case 'i': |
||||
ci->min_interval = 1000000 * atof(optarg); |
||||
break; |
||||
case 't': |
||||
ci->threshold = atof(optarg) * 1000; |
||||
if (ci->threshold > 1000 || ci->threshold < 0) { |
||||
av_log(NULL, AV_LOG_ERROR, "Invalid threshold value '%s' (range is 0-1)\n", optarg); |
||||
return -1; |
||||
} |
||||
break; |
||||
case 'w': |
||||
ci->min_width = atoi(optarg); |
||||
break; |
||||
case 'd': |
||||
ci->debug++; |
||||
break; |
||||
case 'D': |
||||
ci->dir = av_strdup(optarg); |
||||
break; |
||||
default: |
||||
av_log(NULL, AV_LOG_ERROR, "Unrecognized argument '%s'\n", argv[optind]); |
||||
return -1; |
||||
} |
||||
} |
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Fish detector configured:\n"); |
||||
av_log(NULL, AV_LOG_INFO, " HSV range: %d,%d,%d - %d,%d,%d\n", |
||||
ci->dark.h, |
||||
ci->dark.s, |
||||
ci->dark.v, |
||||
ci->bright.h, |
||||
ci->bright.s, |
||||
ci->bright.v); |
||||
av_log(NULL, AV_LOG_INFO, " Threshold is %d%% pixels\n", ci->threshold / 10); |
||||
|
||||
|
||||
return 0; |
||||
} |
||||
|
||||
static void get_hsv(HSV *hsv, int r, int g, int b) |
||||
{ |
||||
int i, v, x, f; |
||||
|
||||
x = (r < g) ? r : g; |
||||
if (b < x) |
||||
x = b; |
||||
v = (r > g) ? r : g; |
||||
if (b > v) |
||||
v = b; |
||||
|
||||
if (v == x) { |
||||
hsv->h = 0; |
||||
hsv->s = 0; |
||||
hsv->v = v; |
||||
return; |
||||
} |
||||
|
||||
if (r == v) { |
||||
f = g - b; |
||||
i = 0; |
||||
} else if (g == v) { |
||||
f = b - r; |
||||
i = 2 * 60; |
||||
} else { |
||||
f = r - g; |
||||
i = 4 * 60; |
||||
} |
||||
|
||||
hsv->h = i + (60 * f) / (v - x); |
||||
if (hsv->h < 0) |
||||
hsv->h += 360; |
||||
|
||||
hsv->s = (255 * (v - x)) / v; |
||||
hsv->v = v; |
||||
|
||||
return; |
||||
} |
||||
|
||||
void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width, int height, int64_t pts) |
||||
{ |
||||
ContextInfo *ci = (ContextInfo *) ctx; |
||||
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; |
||||
int rowsize = picture->linesize[0]; |
||||
|
||||
#if 0 |
||||
av_log(NULL, AV_LOG_DEBUG, "pix_fmt = %d, width = %d, pts = %lld, ci->next_pts = %lld\n", |
||||
pix_fmt, width, pts, ci->next_pts); |
||||
#endif |
||||
|
||||
if (pts < ci->next_pts) |
||||
return; |
||||
|
||||
if (width < ci->min_width) |
||||
return; |
||||
|
||||
ci->next_pts = pts + 1000000; |
||||
|
||||
if (pix_fmt == PIX_FMT_YUV420P) { |
||||
uint8_t *y, *u, *v; |
||||
int width2 = width >> 1; |
||||
int inrange = 0; |
||||
int pixcnt; |
||||
int h; |
||||
int h_start, h_end; |
||||
int w_start, w_end; |
||||
|
||||
h_end = 2 * ((ci->inset * height) / 200); |
||||
h_start = height - h_end; |
||||
|
||||
w_end = (ci->inset * width2) / 100; |
||||
w_start = width2 - w_end; |
||||
|
||||
pixcnt = ((h_start - h_end) >> 1) * (w_start - w_end); |
||||
|
||||
y = picture->data[0] + h_end * picture->linesize[0] + w_end * 2; |
||||
u = picture->data[1] + h_end * picture->linesize[1] / 2 + w_end; |
||||
v = picture->data[2] + h_end * picture->linesize[2] / 2 + w_end; |
||||
|
||||
for (h = h_start; h > h_end; h -= 2) { |
||||
int w; |
||||
|
||||
for (w = w_start; w > w_end; w--) { |
||||
unsigned int r,g,b; |
||||
HSV hsv; |
||||
int cb, cr, yt, r_add, g_add, b_add; |
||||
|
||||
YUV_TO_RGB1_CCIR(u[0], v[0]); |
||||
YUV_TO_RGB2_CCIR(r, g, b, y[0]); |
||||
|
||||
get_hsv(&hsv, r, g, b); |
||||
|
||||
if (ci->debug > 1) |
||||
av_log(NULL, AV_LOG_DEBUG, "(%d,%d,%d) -> (%d,%d,%d)\n", |
||||
r,g,b,hsv.h,hsv.s,hsv.v); |
||||
|
||||
|
||||
if (hsv.h >= ci->dark.h && hsv.h <= ci->bright.h && |
||||
hsv.s >= ci->dark.s && hsv.s <= ci->bright.s && |
||||
hsv.v >= ci->dark.v && hsv.v <= ci->bright.v) { |
||||
inrange++; |
||||
} else if (ci->zapping) { |
||||
y[0] = y[1] = y[rowsize] = y[rowsize + 1] = 16; |
||||
u[0] = 128; |
||||
v[0] = 128; |
||||
} |
||||
|
||||
y+= 2; |
||||
u++; |
||||
v++; |
||||
} |
||||
|
||||
y += picture->linesize[0] * 2 - (w_start - w_end) * 2; |
||||
u += picture->linesize[1] - (w_start - w_end); |
||||
v += picture->linesize[2] - (w_start - w_end); |
||||
} |
||||
|
||||
if (ci->debug) |
||||
av_log(NULL, AV_LOG_INFO, "Fish: Inrange=%d of %d = %d threshold\n", inrange, pixcnt, 1000 * inrange / pixcnt); |
||||
|
||||
if (inrange * 1000 / pixcnt >= ci->threshold) { |
||||
/* Save to file */ |
||||
int size; |
||||
char *buf; |
||||
AVPicture picture1; |
||||
static int frame_counter; |
||||
static int foundfile; |
||||
|
||||
if ((frame_counter++ % 20) == 0) { |
||||
/* Check how many files we have */ |
||||
DIR *d; |
||||
|
||||
foundfile = 0; |
||||
|
||||
d = opendir(ci->dir); |
||||
if (d) { |
||||
struct dirent *dent; |
||||
|
||||
while ((dent = readdir(d))) { |
||||
if (strncmp("fishimg", dent->d_name, 7) == 0) { |
||||
if (strcmp(".ppm", dent->d_name + strlen(dent->d_name) - 4) == 0) { |
||||
foundfile++; |
||||
} |
||||
} |
||||
} |
||||
closedir(d); |
||||
} |
||||
} |
||||
|
||||
if (foundfile < ci->file_limit) { |
||||
FILE *f; |
||||
char fname[256]; |
||||
|
||||
size = avpicture_get_size(PIX_FMT_RGB24, width, height); |
||||
buf = av_malloc(size); |
||||
|
||||
avpicture_fill(&picture1, buf, PIX_FMT_RGB24, width, height); |
||||
|
||||
// if we already got a SWS context, let's realloc if is not re-useable
|
||||
ci->toRGB_convert_ctx = sws_getCachedContext(ci->toRGB_convert_ctx, |
||||
width, height, pix_fmt, |
||||
width, height, PIX_FMT_RGB24, |
||||
sws_flags, NULL, NULL, NULL); |
||||
if (ci->toRGB_convert_ctx == NULL) { |
||||
av_log(NULL, AV_LOG_ERROR, |
||||
"Cannot initialize the toRGB conversion context\n"); |
||||
return; |
||||
} |
||||
// img_convert parameters are 2 first destination, then 4 source
|
||||
// sws_scale parameters are context, 4 first source, then 2 destination
|
||||
sws_scale(ci->toRGB_convert_ctx, |
||||
picture->data, picture->linesize, 0, height, |
||||
picture1.data, picture1.linesize); |
||||
|
||||
/* Write out the PPM file */ |
||||
snprintf(fname, sizeof(fname), "%s/fishimg%ld_%"PRId64".ppm", ci->dir, (long)(av_gettime() / 1000000), pts); |
||||
f = fopen(fname, "w"); |
||||
if (f) { |
||||
fprintf(f, "P6 %d %d 255\n", width, height); |
||||
if (!fwrite(buf, width * height * 3, 1, f)) |
||||
av_log(ctx, AV_LOG_ERROR, "Couldn't write to PPM file %s\n", fname); |
||||
fclose(f); |
||||
} |
||||
|
||||
av_free(buf); |
||||
ci->next_pts = pts + ci->min_interval; |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
@ -1,493 +0,0 @@ |
||||
/*
|
||||
* imlib2 based hook |
||||
* Copyright (c) 2002 Philip Gladstone |
||||
* |
||||
* This module is very much intended as an example of what could be done. |
||||
* |
||||
* One caution is that this is an expensive process -- in particular the |
||||
* conversion of the image into RGB and back is time consuming. For some |
||||
* special cases -- e.g. painting black text -- it would be faster to paint |
||||
* the text into a bitmap and then combine it directly into the YUV |
||||
* image. However, this code is fast enough to handle 10 fps of 320x240 on a |
||||
* 900MHz Duron in maybe 15% of the CPU. |
||||
|
||||
* See further statistics on Pentium4, 3GHz, FFMpeg is SVN-r6798 |
||||
* Input movie is 20.2 seconds of PAL DV on AVI |
||||
* Output movie is DVD compliant VOB. |
||||
* |
||||
ffmpeg -i input.avi -target pal-dvd out.vob |
||||
# 13.516s just transcode |
||||
ffmpeg -i input.avi -vhook /usr/local/bin/vhook/null.dll -target pal-dvd out.vob |
||||
# 23.546s transcode and img_convert |
||||
ffmpeg -i input.avi -vhook \
|
||||
'vhook/imlib2.dll -c red -F Vera/20 -x 150-0.5*N -y 70+0.25*N -t Hello_person' \
|
||||
-target pal-dvd out.vob |
||||
# 21.454s transcode, img_convert and move text around |
||||
ffmpeg -i input.avi -vhook \
|
||||
'vhook/imlib2.dll -x 150-0.5*N -y 70+0.25*N -i /usr/share/imlib2/data/images/bulb.png' \
|
||||
-target pal-dvd out.vob |
||||
# 20.828s transcode, img_convert and move image around |
||||
* |
||||
* This file is part of FFmpeg. |
||||
* |
||||
* FFmpeg is free software; you can redistribute it and/or |
||||
* modify it under the terms of the GNU Lesser General Public |
||||
* License as published by the Free Software Foundation; either |
||||
* version 2.1 of the License, or (at your option) any later version. |
||||
* |
||||
* FFmpeg is distributed in the hope that it will be useful, |
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
||||
* Lesser General Public License for more details. |
||||
* |
||||
* You should have received a copy of the GNU Lesser General Public |
||||
* License along with FFmpeg; if not, write to the Free Software |
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
||||
*/ |
||||
|
||||
#include "libavformat/framehook.h" |
||||
#include "libswscale/swscale.h" |
||||
|
||||
#include <stdio.h> |
||||
#include <stdlib.h> |
||||
#include <fcntl.h> |
||||
#include <stdarg.h> |
||||
#include <string.h> |
||||
#include <strings.h> |
||||
#include <unistd.h> |
||||
#undef time |
||||
#include <sys/time.h> |
||||
#include <time.h> |
||||
#include <Imlib2.h> |
||||
#include "libavcodec/eval.h" |
||||
|
||||
const char *const_names[]={ |
||||
"PI", |
||||
"E", |
||||
"N", // frame number (starting at zero)
|
||||
"H", // frame height
|
||||
"W", // frame width
|
||||
"h", // image height
|
||||
"w", // image width
|
||||
"X", // previous x
|
||||
"Y", // previous y
|
||||
NULL |
||||
}; |
||||
|
||||
static int sws_flags = SWS_BICUBIC; |
||||
|
||||
typedef struct { |
||||
int dummy; |
||||
Imlib_Font fn; |
||||
char *text; |
||||
char *file; |
||||
int r, g, b, a; |
||||
AVEvalExpr *eval_r, *eval_g, *eval_b, *eval_a; |
||||
char *expr_R, *expr_G, *expr_B, *expr_A; |
||||
int eval_colors; |
||||
double x, y; |
||||
char *fileImage; |
||||
struct CachedImage *cache; |
||||
Imlib_Image imageOverlaid; |
||||
AVEvalExpr *eval_x, *eval_y; |
||||
char *expr_x, *expr_y; |
||||
int frame_number; |
||||
int imageOverlaid_width, imageOverlaid_height; |
||||
|
||||
// This vhook first converts frame to RGB ...
|
||||
struct SwsContext *toRGB_convert_ctx; |
||||
// ... and then converts back frame from RGB to initial format
|
||||
struct SwsContext *fromRGB_convert_ctx; |
||||
} ContextInfo; |
||||
|
||||
typedef struct CachedImage { |
||||
struct CachedImage *next; |
||||
Imlib_Image image; |
||||
int width; |
||||
int height; |
||||
} CachedImage; |
||||
|
||||
void Release(void *ctx) |
||||
{ |
||||
ContextInfo *ci; |
||||
ci = (ContextInfo *) ctx; |
||||
|
||||
if (ci->cache) { |
||||
imlib_context_set_image(ci->cache->image); |
||||
imlib_free_image(); |
||||
av_free(ci->cache); |
||||
} |
||||
if (ctx) { |
||||
if (ci->imageOverlaid) { |
||||
imlib_context_set_image(ci->imageOverlaid); |
||||
imlib_free_image(); |
||||
} |
||||
ff_eval_free(ci->eval_x); |
||||
ff_eval_free(ci->eval_y); |
||||
ff_eval_free(ci->eval_r); |
||||
ff_eval_free(ci->eval_g); |
||||
ff_eval_free(ci->eval_b); |
||||
ff_eval_free(ci->eval_a); |
||||
|
||||
av_free(ci->expr_x); |
||||
av_free(ci->expr_y); |
||||
av_free(ci->expr_R); |
||||
av_free(ci->expr_G); |
||||
av_free(ci->expr_B); |
||||
av_free(ci->expr_A); |
||||
sws_freeContext(ci->toRGB_convert_ctx); |
||||
sws_freeContext(ci->fromRGB_convert_ctx); |
||||
av_free(ctx); |
||||
} |
||||
} |
||||
|
||||
int Configure(void **ctxp, int argc, char *argv[]) |
||||
{ |
||||
int c; |
||||
ContextInfo *ci; |
||||
char *rgbtxt = 0; |
||||
const char *font = "LucidaSansDemiBold/16"; |
||||
char *fp = getenv("FONTPATH"); |
||||
char *color = 0; |
||||
FILE *f; |
||||
char *p; |
||||
const char *error; |
||||
|
||||
*ctxp = av_mallocz(sizeof(ContextInfo)); |
||||
ci = (ContextInfo *) *ctxp; |
||||
|
||||
ci->x = 0.0; |
||||
ci->y = 0.0; |
||||
ci->expr_x = "0.0"; |
||||
ci->expr_y = "0.0"; |
||||
|
||||
optind = 1; |
||||
|
||||
/* Use ':' to split FONTPATH */ |
||||
if (fp) |
||||
while ((p = strchr(fp, ':'))) { |
||||
*p = 0; |
||||
imlib_add_path_to_font_path(fp); |
||||
fp = p + 1; |
||||
} |
||||
if ((fp) && (*fp)) |
||||
imlib_add_path_to_font_path(fp); |
||||
|
||||
|
||||
while ((c = getopt(argc, argv, "R:G:B:A:C:c:f:F:t:x:y:i:")) > 0) { |
||||
switch (c) { |
||||
case 'R': |
||||
ci->expr_R = av_strdup(optarg); |
||||
ci->eval_colors = 1; |
||||
break; |
||||
case 'G': |
||||
ci->expr_G = av_strdup(optarg); |
||||
ci->eval_colors = 1; |
||||
break; |
||||
case 'B': |
||||
ci->expr_B = av_strdup(optarg); |
||||
ci->eval_colors = 1; |
||||
break; |
||||
case 'A': |
||||
ci->expr_A = av_strdup(optarg); |
||||
break; |
||||
case 'C': |
||||
rgbtxt = optarg; |
||||
break; |
||||
case 'c': |
||||
color = optarg; |
||||
break; |
||||
case 'F': |
||||
font = optarg; |
||||
break; |
||||
case 't': |
||||
ci->text = av_strdup(optarg); |
||||
break; |
||||
case 'f': |
||||
ci->file = av_strdup(optarg); |
||||
break; |
||||
case 'x': |
||||
ci->expr_x = av_strdup(optarg); |
||||
break; |
||||
case 'y': |
||||
ci->expr_y = av_strdup(optarg); |
||||
break; |
||||
case 'i': |
||||
ci->fileImage = av_strdup(optarg); |
||||
break; |
||||
case '?': |
||||
av_log(NULL, AV_LOG_ERROR, "Unrecognized argument '%s'\n", argv[optind]); |
||||
return -1; |
||||
} |
||||
} |
||||
|
||||
if (ci->eval_colors && !(ci->expr_R && ci->expr_G && ci->expr_B)) |
||||
{ |
||||
av_log(NULL, AV_LOG_ERROR, "You must specify expressions for all or no colors.\n"); |
||||
return -1; |
||||
} |
||||
|
||||
if (ci->text || ci->file) { |
||||
ci->fn = imlib_load_font(font); |
||||
if (!ci->fn) { |
||||
av_log(NULL, AV_LOG_ERROR, "Failed to load font '%s'\n", font); |
||||
return -1; |
||||
} |
||||
imlib_context_set_font(ci->fn); |
||||
imlib_context_set_direction(IMLIB_TEXT_TO_RIGHT); |
||||
} |
||||
|
||||
if (color) { |
||||
char buff[256]; |
||||
int done = 0; |
||||
|
||||
if (ci->eval_colors) |
||||
{ |
||||
av_log(NULL, AV_LOG_ERROR, "You must not specify both a color name and expressions for the colors.\n"); |
||||
return -1; |
||||
} |
||||
|
||||
if (rgbtxt) |
||||
f = fopen(rgbtxt, "r"); |
||||
else |
||||
{ |
||||
f = fopen("/usr/share/X11/rgb.txt", "r"); |
||||
if (!f) |
||||
f = fopen("/usr/lib/X11/rgb.txt", "r"); |
||||
} |
||||
if (!f) { |
||||
av_log(NULL, AV_LOG_ERROR, "Failed to find RGB color names file\n"); |
||||
return -1; |
||||
} |
||||
while (fgets(buff, sizeof(buff), f)) { |
||||
int r, g, b; |
||||
char colname[80]; |
||||
|
||||
if (sscanf(buff, "%d %d %d %64s", &r, &g, &b, colname) == 4 && |
||||
strcasecmp(colname, color) == 0) { |
||||
ci->r = r; |
||||
ci->g = g; |
||||
ci->b = b; |
||||
/* fprintf(stderr, "%s -> %d,%d,%d\n", colname, r, g, b); */ |
||||
done = 1; |
||||
break; |
||||
} |
||||
} |
||||
fclose(f); |
||||
if (!done) { |
||||
av_log(NULL, AV_LOG_ERROR, "Unable to find color '%s' in rgb.txt\n", color); |
||||
return -1; |
||||
} |
||||
} else if (ci->eval_colors) { |
||||
if (!(ci->eval_r = ff_parse(ci->expr_R, const_names, NULL, NULL, NULL, NULL, &error))){ |
||||
av_log(NULL, AV_LOG_ERROR, "Couldn't parse R expression '%s': %s\n", ci->expr_R, error); |
||||
return -1; |
||||
} |
||||
if (!(ci->eval_g = ff_parse(ci->expr_G, const_names, NULL, NULL, NULL, NULL, &error))){ |
||||
av_log(NULL, AV_LOG_ERROR, "Couldn't parse G expression '%s': %s\n", ci->expr_G, error); |
||||
return -1; |
||||
} |
||||
if (!(ci->eval_b = ff_parse(ci->expr_B, const_names, NULL, NULL, NULL, NULL, &error))){ |
||||
av_log(NULL, AV_LOG_ERROR, "Couldn't parse B expression '%s': %s\n", ci->expr_B, error); |
||||
return -1; |
||||
} |
||||
} |
||||
|
||||
if (ci->expr_A) { |
||||
if (!(ci->eval_a = ff_parse(ci->expr_A, const_names, NULL, NULL, NULL, NULL, &error))){ |
||||
av_log(NULL, AV_LOG_ERROR, "Couldn't parse A expression '%s': %s\n", ci->expr_A, error); |
||||
return -1; |
||||
} |
||||
} else { |
||||
ci->a = 255; |
||||
} |
||||
|
||||
if (!(ci->eval_colors || ci->eval_a)) |
||||
imlib_context_set_color(ci->r, ci->g, ci->b, ci->a); |
||||
|
||||
/* load the image (for example, credits for a movie) */ |
||||
if (ci->fileImage) { |
||||
ci->imageOverlaid = imlib_load_image_immediately(ci->fileImage); |
||||
if (!(ci->imageOverlaid)){ |
||||
av_log(NULL, AV_LOG_ERROR, "Couldn't load image '%s'\n", ci->fileImage); |
||||
return -1; |
||||
} |
||||
imlib_context_set_image(ci->imageOverlaid); |
||||
ci->imageOverlaid_width = imlib_image_get_width(); |
||||
ci->imageOverlaid_height = imlib_image_get_height(); |
||||
} |
||||
|
||||
if (!(ci->eval_x = ff_parse(ci->expr_x, const_names, NULL, NULL, NULL, NULL, &error))){ |
||||
av_log(NULL, AV_LOG_ERROR, "Couldn't parse x expression '%s': %s\n", ci->expr_x, error); |
||||
return -1; |
||||
} |
||||
|
||||
if (!(ci->eval_y = ff_parse(ci->expr_y, const_names, NULL, NULL, NULL, NULL, &error))){ |
||||
av_log(NULL, AV_LOG_ERROR, "Couldn't parse y expression '%s': %s\n", ci->expr_y, error); |
||||
return -1; |
||||
} |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
static Imlib_Image get_cached_image(ContextInfo *ci, int width, int height) |
||||
{ |
||||
CachedImage *cache; |
||||
|
||||
for (cache = ci->cache; cache; cache = cache->next) { |
||||
if (width == cache->width && height == cache->height) |
||||
return cache->image; |
||||
} |
||||
|
||||
return NULL; |
||||
} |
||||
|
||||
static void put_cached_image(ContextInfo *ci, Imlib_Image image, int width, int height) |
||||
{ |
||||
CachedImage *cache = av_mallocz(sizeof(*cache)); |
||||
|
||||
cache->image = image; |
||||
cache->width = width; |
||||
cache->height = height; |
||||
cache->next = ci->cache; |
||||
ci->cache = cache; |
||||
} |
||||
|
||||
void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width, int height, int64_t pts) |
||||
{ |
||||
ContextInfo *ci = (ContextInfo *) ctx; |
||||
AVPicture picture1; |
||||
Imlib_Image image; |
||||
DATA32 *data; |
||||
|
||||
image = get_cached_image(ci, width, height); |
||||
|
||||
if (!image) { |
||||
image = imlib_create_image(width, height); |
||||
put_cached_image(ci, image, width, height); |
||||
} |
||||
|
||||
imlib_context_set_image(image); |
||||
data = imlib_image_get_data(); |
||||
|
||||
avpicture_fill(&picture1, (uint8_t *) data, PIX_FMT_RGB32, width, height); |
||||
|
||||
// if we already got a SWS context, let's realloc if is not re-useable
|
||||
ci->toRGB_convert_ctx = sws_getCachedContext(ci->toRGB_convert_ctx, |
||||
width, height, pix_fmt, |
||||
width, height, PIX_FMT_RGB32, |
||||
sws_flags, NULL, NULL, NULL); |
||||
if (ci->toRGB_convert_ctx == NULL) { |
||||
av_log(NULL, AV_LOG_ERROR, |
||||
"Cannot initialize the toRGB conversion context\n"); |
||||
return; |
||||
} |
||||
|
||||
// img_convert parameters are 2 first destination, then 4 source
|
||||
// sws_scale parameters are context, 4 first source, then 2 destination
|
||||
sws_scale(ci->toRGB_convert_ctx, |
||||
picture->data, picture->linesize, 0, height, |
||||
picture1.data, picture1.linesize); |
||||
|
||||
imlib_image_set_has_alpha(0); |
||||
|
||||
{ |
||||
int wid, hig, h_a, v_a; |
||||
char buff[1000]; |
||||
char tbuff[1000]; |
||||
const char *tbp = ci->text; |
||||
time_t now = time(0); |
||||
char *p, *q; |
||||
int y; |
||||
|
||||
double const_values[]={ |
||||
M_PI, |
||||
M_E, |
||||
ci->frame_number, // frame number (starting at zero)
|
||||
height, // frame height
|
||||
width, // frame width
|
||||
ci->imageOverlaid_height, // image height
|
||||
ci->imageOverlaid_width, // image width
|
||||
ci->x, // previous x
|
||||
ci->y, // previous y
|
||||
0 |
||||
}; |
||||
|
||||
if (ci->file) { |
||||
int fd = open(ci->file, O_RDONLY); |
||||
|
||||
if (fd < 0) { |
||||
tbp = "[File not found]"; |
||||
} else { |
||||
int l = read(fd, tbuff, sizeof(tbuff) - 1); |
||||
|
||||
if (l >= 0) { |
||||
tbuff[l] = 0; |
||||
tbp = tbuff; |
||||
} else { |
||||
tbp = "[I/O Error]"; |
||||
} |
||||
close(fd); |
||||
} |
||||
} |
||||
|
||||
if (tbp) |
||||
strftime(buff, sizeof(buff), tbp, localtime(&now)); |
||||
else if (!(ci->imageOverlaid)) |
||||
strftime(buff, sizeof(buff), "[No data]", localtime(&now)); |
||||
|
||||
ci->x = ff_parse_eval(ci->eval_x, const_values, ci); |
||||
ci->y = ff_parse_eval(ci->eval_y, const_values, ci); |
||||
y = ci->y; |
||||
|
||||
if (ci->eval_a) { |
||||
ci->a = ff_parse_eval(ci->eval_a, const_values, ci); |
||||
} |
||||
|
||||
if (ci->eval_colors) { |
||||
ci->r = ff_parse_eval(ci->eval_r, const_values, ci); |
||||
ci->g = ff_parse_eval(ci->eval_g, const_values, ci); |
||||
ci->b = ff_parse_eval(ci->eval_b, const_values, ci); |
||||
} |
||||
|
||||
if (ci->eval_colors || ci->eval_a) { |
||||
imlib_context_set_color(ci->r, ci->g, ci->b, ci->a); |
||||
} |
||||
|
||||
if (!(ci->imageOverlaid)) |
||||
for (p = buff; p; p = q) { |
||||
q = strchr(p, '\n'); |
||||
if (q) |
||||
*q++ = 0; |
||||
|
||||
imlib_text_draw_with_return_metrics(ci->x, y, p, &wid, &hig, &h_a, &v_a); |
||||
y += v_a; |
||||
} |
||||
|
||||
if (ci->imageOverlaid) { |
||||
imlib_context_set_image(image); |
||||
imlib_blend_image_onto_image(ci->imageOverlaid, 0, |
||||
0, 0, ci->imageOverlaid_width, ci->imageOverlaid_height, |
||||
ci->x, ci->y, ci->imageOverlaid_width, ci->imageOverlaid_height); |
||||
} |
||||
|
||||
} |
||||
|
||||
ci->fromRGB_convert_ctx = sws_getCachedContext(ci->fromRGB_convert_ctx, |
||||
width, height, PIX_FMT_RGB32, |
||||
width, height, pix_fmt, |
||||
sws_flags, NULL, NULL, NULL); |
||||
if (ci->fromRGB_convert_ctx == NULL) { |
||||
av_log(NULL, AV_LOG_ERROR, |
||||
"Cannot initialize the fromRGB conversion context\n"); |
||||
return; |
||||
} |
||||
// img_convert parameters are 2 first destination, then 4 source
|
||||
// sws_scale parameters are context, 4 first source, then 2 destination
|
||||
sws_scale(ci->fromRGB_convert_ctx, |
||||
picture1.data, picture1.linesize, 0, height, |
||||
picture->data, picture->linesize); |
||||
|
||||
ci->frame_number++; |
||||
} |
||||
|
@ -1,116 +0,0 @@ |
||||
/*
|
||||
* Null Video Hook |
||||
* Copyright (c) 2002 Philip Gladstone |
||||
* |
||||
* This file is part of FFmpeg. |
||||
* |
||||
* FFmpeg is free software; you can redistribute it and/or |
||||
* modify it under the terms of the GNU Lesser General Public |
||||
* License as published by the Free Software Foundation; either |
||||
* version 2.1 of the License, or (at your option) any later version. |
||||
* |
||||
* FFmpeg is distributed in the hope that it will be useful, |
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
||||
* Lesser General Public License for more details. |
||||
* |
||||
* You should have received a copy of the GNU Lesser General Public |
||||
* License along with FFmpeg; if not, write to the Free Software |
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
||||
*/ |
||||
#include <stdio.h> |
||||
|
||||
#include "libavformat/framehook.h" |
||||
#include "libswscale/swscale.h" |
||||
|
||||
static int sws_flags = SWS_BICUBIC; |
||||
|
||||
typedef struct { |
||||
int dummy; |
||||
|
||||
// This vhook first converts frame to RGB ...
|
||||
struct SwsContext *toRGB_convert_ctx; |
||||
|
||||
// ... and later converts back frame from RGB to initial format
|
||||
struct SwsContext *fromRGB_convert_ctx; |
||||
|
||||
} ContextInfo; |
||||
|
||||
void Release(void *ctx) |
||||
{ |
||||
ContextInfo *ci; |
||||
ci = (ContextInfo *) ctx; |
||||
|
||||
if (ctx) { |
||||
sws_freeContext(ci->toRGB_convert_ctx); |
||||
sws_freeContext(ci->fromRGB_convert_ctx); |
||||
av_free(ctx); |
||||
} |
||||
} |
||||
|
||||
int Configure(void **ctxp, int argc, char *argv[]) |
||||
{ |
||||
av_log(NULL, AV_LOG_DEBUG, "Called with argc=%d\n", argc); |
||||
|
||||
*ctxp = av_mallocz(sizeof(ContextInfo)); |
||||
return 0; |
||||
} |
||||
|
||||
void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width, int height, int64_t pts) |
||||
{ |
||||
ContextInfo *ci = (ContextInfo *) ctx; |
||||
char *buf = 0; |
||||
AVPicture picture1; |
||||
AVPicture *pict = picture; |
||||
|
||||
(void) ci; |
||||
|
||||
if (pix_fmt != PIX_FMT_RGB24) { |
||||
int size; |
||||
|
||||
size = avpicture_get_size(PIX_FMT_RGB24, width, height); |
||||
buf = av_malloc(size); |
||||
|
||||
avpicture_fill(&picture1, buf, PIX_FMT_RGB24, width, height); |
||||
|
||||
// if we already got a SWS context, let's realloc if is not re-useable
|
||||
ci->toRGB_convert_ctx = sws_getCachedContext(ci->toRGB_convert_ctx, |
||||
width, height, pix_fmt, |
||||
width, height, PIX_FMT_RGB24, |
||||
sws_flags, NULL, NULL, NULL); |
||||
if (ci->toRGB_convert_ctx == NULL) { |
||||
av_log(NULL, AV_LOG_ERROR, |
||||
"Cannot initialize the toRGB conversion context\n"); |
||||
return; |
||||
} |
||||
// img_convert parameters are 2 first destination, then 4 source
|
||||
// sws_scale parameters are context, 4 first source, then 2 destination
|
||||
sws_scale(ci->toRGB_convert_ctx, |
||||
picture->data, picture->linesize, 0, height, |
||||
picture1.data, picture1.linesize); |
||||
|
||||
pict = &picture1; |
||||
} |
||||
|
||||
/* Insert filter code here */ |
||||
|
||||
if (pix_fmt != PIX_FMT_RGB24) { |
||||
ci->fromRGB_convert_ctx = sws_getCachedContext(ci->fromRGB_convert_ctx, |
||||
width, height, PIX_FMT_RGB24, |
||||
width, height, pix_fmt, |
||||
sws_flags, NULL, NULL, NULL); |
||||
if (ci->fromRGB_convert_ctx == NULL) { |
||||
av_log(NULL, AV_LOG_ERROR, |
||||
"Cannot initialize the fromRGB conversion context\n"); |
||||
return; |
||||
} |
||||
// img_convert parameters are 2 first destination, then 4 source
|
||||
// sws_scale parameters are context, 4 first source, then 2 destination
|
||||
sws_scale(ci->fromRGB_convert_ctx, |
||||
picture1.data, picture1.linesize, 0, height, |
||||
picture->data, picture->linesize); |
||||
} |
||||
|
||||
av_free(buf); |
||||
} |
||||
|
@ -1,374 +0,0 @@ |
||||
/*
|
||||
* PPM Video Hook |
||||
* Copyright (c) 2003 Charles Yates |
||||
* |
||||
* This file is part of FFmpeg. |
||||
* |
||||
* FFmpeg is free software; you can redistribute it and/or |
||||
* modify it under the terms of the GNU Lesser General Public |
||||
* License as published by the Free Software Foundation; either |
||||
* version 2.1 of the License, or (at your option) any later version. |
||||
* |
||||
* FFmpeg is distributed in the hope that it will be useful, |
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
||||
* Lesser General Public License for more details. |
||||
* |
||||
* You should have received a copy of the GNU Lesser General Public |
||||
* License along with FFmpeg; if not, write to the Free Software |
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
||||
*/ |
||||
|
||||
#include <stdio.h> |
||||
#include <unistd.h> |
||||
#include <fcntl.h> |
||||
#include <sys/types.h> |
||||
#include <sys/wait.h> |
||||
#include <ctype.h> |
||||
#include "libavutil/avstring.h" |
||||
#include "libavformat/framehook.h" |
||||
#include "libavformat/avformat.h" |
||||
#include "libswscale/swscale.h" |
||||
#undef fprintf |
||||
|
||||
static int sws_flags = SWS_BICUBIC; |
||||
|
||||
/** Bi-directional pipe structure.
|
||||
*/ |
||||
|
||||
typedef struct rwpipe |
||||
{ |
||||
int pid; |
||||
FILE *reader; |
||||
FILE *writer; |
||||
} |
||||
rwpipe; |
||||
|
||||
/** Create a bidirectional pipe for the given command.
|
||||
*/ |
||||
|
||||
static rwpipe *rwpipe_open( int argc, char *argv[] ) |
||||
{ |
||||
rwpipe *this = av_mallocz( sizeof( rwpipe ) ); |
||||
|
||||
if ( this != NULL ) |
||||
{ |
||||
int input[ 2 ]; |
||||
int output[ 2 ]; |
||||
|
||||
if (!pipe( input )) |
||||
return NULL; |
||||
|
||||
if (!pipe( output )) |
||||
return NULL; |
||||
|
||||
this->pid = fork(); |
||||
|
||||
if ( this->pid == 0 ) |
||||
{ |
||||
#define COMMAND_SIZE 10240 |
||||
char *command = av_mallocz( COMMAND_SIZE ); |
||||
int i; |
||||
|
||||
strcpy( command, "" ); |
||||
for ( i = 0; i < argc; i ++ ) |
||||
{ |
||||
av_strlcat( command, argv[ i ], COMMAND_SIZE ); |
||||
av_strlcat( command, " ", COMMAND_SIZE ); |
||||
} |
||||
|
||||
dup2( output[ 0 ], STDIN_FILENO ); |
||||
dup2( input[ 1 ], STDOUT_FILENO ); |
||||
|
||||
close( input[ 0 ] ); |
||||
close( input[ 1 ] ); |
||||
close( output[ 0 ] ); |
||||
close( output[ 1 ] ); |
||||
|
||||
execl("/bin/sh", "sh", "-c", command, (char*)NULL ); |
||||
_exit( 255 ); |
||||
} |
||||
else |
||||
{ |
||||
close( input[ 1 ] ); |
||||
close( output[ 0 ] ); |
||||
|
||||
this->reader = fdopen( input[ 0 ], "r" ); |
||||
this->writer = fdopen( output[ 1 ], "w" ); |
||||
} |
||||
} |
||||
|
||||
return this; |
||||
} |
||||
|
||||
/** Read data from the pipe.
|
||||
*/ |
||||
|
||||
static FILE *rwpipe_reader( rwpipe *this ) |
||||
{ |
||||
if ( this != NULL ) |
||||
return this->reader; |
||||
else |
||||
return NULL; |
||||
} |
||||
|
||||
/** Write data to the pipe.
|
||||
*/ |
||||
|
||||
static FILE *rwpipe_writer( rwpipe *this ) |
||||
{ |
||||
if ( this != NULL ) |
||||
return this->writer; |
||||
else |
||||
return NULL; |
||||
} |
||||
|
||||
/* Read a number from the pipe - assumes PNM style headers.
|
||||
*/ |
||||
|
||||
static int rwpipe_read_number( rwpipe *rw ) |
||||
{ |
||||
int value = 0; |
||||
int c = 0; |
||||
FILE *in = rwpipe_reader( rw ); |
||||
|
||||
do |
||||
{ |
||||
c = fgetc( in ); |
||||
|
||||
while( c != EOF && !isdigit( c ) && c != '#' ) |
||||
c = fgetc( in ); |
||||
|
||||
if ( c == '#' ) |
||||
while( c != EOF && c != '\n' ) |
||||
c = fgetc( in ); |
||||
} |
||||
while ( c != EOF && !isdigit( c ) ); |
||||
|
||||
while( c != EOF && isdigit( c ) ) |
||||
{ |
||||
value = value * 10 + ( c - '0' ); |
||||
c = fgetc( in ); |
||||
} |
||||
|
||||
return value; |
||||
} |
||||
|
||||
/** Read a PPM P6 header.
|
||||
*/ |
||||
|
||||
static int rwpipe_read_ppm_header( rwpipe *rw, int *width, int *height ) |
||||
{ |
||||
char line[ 3 ]; |
||||
FILE *in = rwpipe_reader( rw ); |
||||
int max; |
||||
|
||||
if (!fgets( line, 3, in )) |
||||
return -1; |
||||
|
||||
if ( !strncmp( line, "P6", 2 ) ) |
||||
{ |
||||
*width = rwpipe_read_number( rw ); |
||||
*height = rwpipe_read_number( rw ); |
||||
max = rwpipe_read_number( rw ); |
||||
return max != 255 || *width <= 0 || *height <= 0; |
||||
} |
||||
return 1; |
||||
} |
||||
|
||||
/** Close the pipe and process.
|
||||
*/ |
||||
|
||||
static void rwpipe_close( rwpipe *this ) |
||||
{ |
||||
if ( this != NULL ) |
||||
{ |
||||
fclose( this->reader ); |
||||
fclose( this->writer ); |
||||
waitpid( this->pid, NULL, 0 ); |
||||
av_free( this ); |
||||
} |
||||
} |
||||
|
||||
/** Context info for this vhook - stores the pipe and image buffers.
|
||||
*/ |
||||
|
||||
typedef struct |
||||
{ |
||||
rwpipe *rw; |
||||
int size1; |
||||
char *buf1; |
||||
int size2; |
||||
char *buf2; |
||||
|
||||
// This vhook first converts frame to RGB ...
|
||||
struct SwsContext *toRGB_convert_ctx; |
||||
// ... then processes it via a PPM command pipe ...
|
||||
// ... and finally converts back frame from RGB to initial format
|
||||
struct SwsContext *fromRGB_convert_ctx; |
||||
} |
||||
ContextInfo; |
||||
|
||||
/** Initialise the context info for this vhook.
|
||||
*/ |
||||
|
||||
int Configure(void **ctxp, int argc, char *argv[]) |
||||
{ |
||||
if ( argc > 1 ) |
||||
{ |
||||
*ctxp = av_mallocz(sizeof(ContextInfo)); |
||||
if ( *ctxp != NULL && argc > 1 ) |
||||
{ |
||||
ContextInfo *info = (ContextInfo *)*ctxp; |
||||
info->rw = rwpipe_open( argc - 1, &argv[ 1 ] ); |
||||
return 0; |
||||
} |
||||
} |
||||
return 1; |
||||
} |
||||
|
||||
/** Process a frame.
|
||||
*/ |
||||
|
||||
void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width, int height, int64_t pts) |
||||
{ |
||||
int err = 0; |
||||
ContextInfo *ci = (ContextInfo *) ctx; |
||||
AVPicture picture1; |
||||
AVPicture picture2; |
||||
AVPicture *pict = picture; |
||||
int out_width; |
||||
int out_height; |
||||
int i; |
||||
uint8_t *ptr = NULL; |
||||
FILE *in = rwpipe_reader( ci->rw ); |
||||
FILE *out = rwpipe_writer( ci->rw ); |
||||
|
||||
/* Check that we have a pipe to talk to. */ |
||||
if ( in == NULL || out == NULL ) |
||||
err = 1; |
||||
|
||||
/* Convert to RGB24 if necessary */ |
||||
if ( !err && pix_fmt != PIX_FMT_RGB24 ) |
||||
{ |
||||
int size = avpicture_get_size(PIX_FMT_RGB24, width, height); |
||||
|
||||
if ( size != ci->size1 ) |
||||
{ |
||||
av_free( ci->buf1 ); |
||||
ci->buf1 = av_malloc(size); |
||||
ci->size1 = size; |
||||
err = ci->buf1 == NULL; |
||||
} |
||||
|
||||
if ( !err ) |
||||
{ |
||||
avpicture_fill(&picture1, ci->buf1, PIX_FMT_RGB24, width, height); |
||||
|
||||
// if we already got a SWS context, let's realloc if is not re-useable
|
||||
ci->toRGB_convert_ctx = sws_getCachedContext(ci->toRGB_convert_ctx, |
||||
width, height, pix_fmt, |
||||
width, height, PIX_FMT_RGB24, |
||||
sws_flags, NULL, NULL, NULL); |
||||
if (ci->toRGB_convert_ctx == NULL) { |
||||
av_log(NULL, AV_LOG_ERROR, |
||||
"Cannot initialize the toRGB conversion context\n"); |
||||
return; |
||||
} |
||||
|
||||
// img_convert parameters are 2 first destination, then 4 source
|
||||
// sws_scale parameters are context, 4 first source, then 2 destination
|
||||
sws_scale(ci->toRGB_convert_ctx, |
||||
picture->data, picture->linesize, 0, height, |
||||
picture1.data, picture1.linesize); |
||||
|
||||
pict = &picture1; |
||||
} |
||||
} |
||||
|
||||
/* Write out the PPM */ |
||||
if ( !err ) |
||||
{ |
||||
ptr = pict->data[ 0 ]; |
||||
fprintf( out, "P6\n%d %d\n255\n", width, height ); |
||||
for ( i = 0; !err && i < height; i ++ ) |
||||
{ |
||||
err = !fwrite( ptr, width * 3, 1, out ); |
||||
ptr += pict->linesize[ 0 ]; |
||||
} |
||||
if ( !err ) |
||||
err = fflush( out ); |
||||
} |
||||
|
||||
/* Read the PPM returned. */ |
||||
if ( !err && !rwpipe_read_ppm_header( ci->rw, &out_width, &out_height ) ) |
||||
{ |
||||
int size = avpicture_get_size(PIX_FMT_RGB24, out_width, out_height); |
||||
|
||||
if ( size != ci->size2 ) |
||||
{ |
||||
av_free( ci->buf2 ); |
||||
ci->buf2 = av_malloc(size); |
||||
ci->size2 = size; |
||||
err = ci->buf2 == NULL; |
||||
} |
||||
|
||||
if ( !err ) |
||||
{ |
||||
avpicture_fill(&picture2, ci->buf2, PIX_FMT_RGB24, out_width, out_height); |
||||
ptr = picture2.data[ 0 ]; |
||||
for ( i = 0; !err && i < out_height; i ++ ) |
||||
{ |
||||
err = !fread( ptr, out_width * 3, 1, in ); |
||||
ptr += picture2.linesize[ 0 ]; |
||||
} |
||||
} |
||||
} |
||||
|
||||
/* Convert the returned PPM back to the input format */ |
||||
if ( !err ) |
||||
{ |
||||
/* The out_width/out_height returned from the PPM
|
||||
* filter won't necessarily be the same as width and height |
||||
* but it will be scaled anyway to width/height. |
||||
*/ |
||||
av_log(NULL, AV_LOG_DEBUG, |
||||
"PPM vhook: Input dimensions: %d x %d Output dimensions: %d x %d\n", |
||||
width, height, out_width, out_height); |
||||
ci->fromRGB_convert_ctx = sws_getCachedContext(ci->fromRGB_convert_ctx, |
||||
out_width, out_height, PIX_FMT_RGB24, |
||||
width, height, pix_fmt, |
||||
sws_flags, NULL, NULL, NULL); |
||||
if (ci->fromRGB_convert_ctx == NULL) { |
||||
av_log(NULL, AV_LOG_ERROR, |
||||
"Cannot initialize the fromRGB conversion context\n"); |
||||
return; |
||||
} |
||||
|
||||
// img_convert parameters are 2 first destination, then 4 source
|
||||
// sws_scale parameters are context, 4 first source, then 2 destination
|
||||
sws_scale(ci->fromRGB_convert_ctx, |
||||
picture2.data, picture2.linesize, 0, out_height, |
||||
picture->data, picture->linesize); |
||||
} |
||||
} |
||||
|
||||
/** Clean up the effect.
|
||||
*/ |
||||
|
||||
void Release(void *ctx) |
||||
{ |
||||
ContextInfo *ci; |
||||
ci = (ContextInfo *) ctx; |
||||
|
||||
if (ctx) |
||||
{ |
||||
rwpipe_close( ci->rw ); |
||||
av_free( ci->buf1 ); |
||||
av_free( ci->buf2 ); |
||||
sws_freeContext(ci->toRGB_convert_ctx); |
||||
sws_freeContext(ci->fromRGB_convert_ctx); |
||||
av_free(ctx); |
||||
} |
||||
} |
||||
|
@ -1,655 +0,0 @@ |
||||
/*
|
||||
* Watermark Hook |
||||
* Copyright (c) 2005 Marcus Engene myfirstname(at)mylastname.se |
||||
* |
||||
* parameters for watermark: |
||||
* -m nbr = nbr is 0..1. 0 is the default mode, see below. |
||||
* -t nbr = nbr is six digit hex. Threshold. |
||||
* -f file = file is the watermark image filename. You must specify this! |
||||
* |
||||
* MODE 0: |
||||
* The watermark picture works like this (assuming color intensities 0..0xff): |
||||
* Per color do this: |
||||
* If mask color is 0x80, no change to the original frame. |
||||
* If mask color is < 0x80 the abs difference is subtracted from the frame. If |
||||
* result < 0, result = 0 |
||||
* If mask color is > 0x80 the abs difference is added to the frame. If result |
||||
* > 0xff, result = 0xff |
||||
* |
||||
* You can override the 0x80 level with the -t flag. E.g. if threshold is |
||||
* 000000 the color value of watermark is added to the destination. |
||||
* |
||||
* This way a mask that is visible both in light pictures and in dark can be |
||||
* made (fex by using a picture generated by Gimp and the bump map tool). |
||||
* |
||||
* An example watermark file is at |
||||
* http://engene.se/ffmpeg_watermark.gif
|
||||
* |
||||
* MODE 1: |
||||
* Per color do this: |
||||
* If mask color > threshold color then the watermark pixel is used. |
||||
* |
||||
* Example usage: |
||||
* ffmpeg -i infile -vhook '/path/watermark.so -f wm.gif' -an out.mov |
||||
* ffmpeg -i infile -vhook '/path/watermark.so -f wm.gif -m 1 -t 222222' -an out.mov |
||||
* |
||||
* Note that the entire vhook argument is encapsulated in ''. This |
||||
* way, arguments to the vhook won't be mixed up with those for ffmpeg. |
||||
* |
||||
* This file is part of FFmpeg. |
||||
* |
||||
* FFmpeg is free software; you can redistribute it and/or |
||||
* modify it under the terms of the GNU Lesser General Public |
||||
* License as published by the Free Software Foundation; either |
||||
* version 2.1 of the License, or (at your option) any later version. |
||||
* |
||||
* FFmpeg is distributed in the hope that it will be useful, |
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
||||
* Lesser General Public License for more details. |
||||
* |
||||
* You should have received a copy of the GNU Lesser General Public |
||||
* License along with FFmpeg; if not, write to the Free Software |
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
||||
*/ |
||||
|
||||
#include <stdlib.h> |
||||
//#include <fcntl.h>
|
||||
#include <unistd.h> |
||||
#include <stdarg.h> |
||||
|
||||
#include "libavutil/common.h" |
||||
#include "libavformat/avformat.h" |
||||
#include "libavformat/framehook.h" |
||||
#include "libswscale/swscale.h" |
||||
|
||||
static int sws_flags = SWS_BICUBIC; |
||||
|
||||
typedef struct { |
||||
char filename[2000]; |
||||
int x_size; |
||||
int y_size; |
||||
|
||||
/* get_watermark_picture() variables */ |
||||
AVFormatContext *pFormatCtx; |
||||
const char *p_ext; |
||||
int videoStream; |
||||
int frameFinished; |
||||
AVCodecContext *pCodecCtx; |
||||
AVCodec *pCodec; |
||||
AVFrame *pFrame; |
||||
AVPacket packet; |
||||
int numBytes; |
||||
uint8_t *buffer; |
||||
int i; |
||||
AVInputFormat *file_iformat; |
||||
AVStream *st; |
||||
int is_done; |
||||
AVFrame *pFrameRGB; |
||||
int thrR; |
||||
int thrG; |
||||
int thrB; |
||||
int mode; |
||||
|
||||
// This vhook first converts frame to RGB ...
|
||||
struct SwsContext *toRGB_convert_ctx; |
||||
// ... then converts a watermark and applies it to the RGB frame ...
|
||||
struct SwsContext *watermark_convert_ctx; |
||||
// ... and finally converts back frame from RGB to initial format
|
||||
struct SwsContext *fromRGB_convert_ctx; |
||||
} ContextInfo; |
||||
|
||||
int get_watermark_picture(ContextInfo *ci, int cleanup); |
||||
|
||||
|
||||
/****************************************************************************
|
||||
* |
||||
****************************************************************************/ |
||||
void Release(void *ctx) |
||||
{ |
||||
ContextInfo *ci; |
||||
ci = (ContextInfo *) ctx; |
||||
|
||||
if (ci) { |
||||
get_watermark_picture(ci, 1); |
||||
sws_freeContext(ci->toRGB_convert_ctx); |
||||
sws_freeContext(ci->watermark_convert_ctx); |
||||
sws_freeContext(ci->fromRGB_convert_ctx); |
||||
} |
||||
av_free(ctx); |
||||
} |
||||
|
||||
|
||||
/****************************************************************************
|
||||
* |
||||
****************************************************************************/ |
||||
int Configure(void **ctxp, int argc, char *argv[]) |
||||
{ |
||||
ContextInfo *ci; |
||||
int c; |
||||
int tmp = 0; |
||||
|
||||
if (0 == (*ctxp = av_mallocz(sizeof(ContextInfo)))) return -1; |
||||
ci = (ContextInfo *) *ctxp; |
||||
|
||||
optind = 1; |
||||
|
||||
// Struct is mallocz:ed so no need to reset.
|
||||
ci->thrR = 0x80; |
||||
ci->thrG = 0x80; |
||||
ci->thrB = 0x80; |
||||
|
||||
while ((c = getopt(argc, argv, "f:m:t:")) > 0) { |
||||
switch (c) { |
||||
case 'f': |
||||
strncpy(ci->filename, optarg, 1999); |
||||
ci->filename[1999] = 0; |
||||
break; |
||||
case 'm': |
||||
ci->mode = atoi(optarg); |
||||
break; |
||||
case 't': |
||||
if (1 != sscanf(optarg, "%x", &tmp)) { |
||||
av_log(NULL, AV_LOG_ERROR, "Watermark: argument to -t must be a 6 digit hex number\n"); |
||||
return -1; |
||||
} |
||||
ci->thrR = (tmp >> 16) & 0xff; |
||||
ci->thrG = (tmp >> 8) & 0xff; |
||||
ci->thrB = (tmp >> 0) & 0xff; |
||||
break; |
||||
default: |
||||
av_log(NULL, AV_LOG_ERROR, "Watermark: Unrecognized argument '%s'\n", argv[optind]); |
||||
return -1; |
||||
} |
||||
} |
||||
|
||||
//
|
||||
if (0 == ci->filename[0]) { |
||||
av_log(NULL, AV_LOG_ERROR, "Watermark: There is no filename specified.\n"); |
||||
return -1; |
||||
} |
||||
|
||||
av_register_all(); |
||||
return get_watermark_picture(ci, 0); |
||||
} |
||||
|
||||
|
||||
/****************************************************************************
|
||||
* For mode 0 (the original one) |
||||
****************************************************************************/ |
||||
static void Process0(void *ctx, |
||||
AVPicture *picture, |
||||
enum PixelFormat pix_fmt, |
||||
int src_width, |
||||
int src_height, |
||||
int64_t pts) |
||||
{ |
||||
ContextInfo *ci = (ContextInfo *) ctx; |
||||
char *buf = 0; |
||||
AVPicture picture1; |
||||
AVPicture *pict = picture; |
||||
|
||||
AVFrame *pFrameRGB; |
||||
int xm_size; |
||||
int ym_size; |
||||
|
||||
int x; |
||||
int y; |
||||
int offs, offsm; |
||||
int mpoffs; |
||||
uint32_t *p_pixel = 0; |
||||
uint32_t pixel_meck; |
||||
uint32_t pixel; |
||||
uint32_t pixelm; |
||||
int tmp; |
||||
int thrR = ci->thrR; |
||||
int thrG = ci->thrG; |
||||
int thrB = ci->thrB; |
||||
|
||||
if (pix_fmt != PIX_FMT_RGB32) { |
||||
int size; |
||||
|
||||
size = avpicture_get_size(PIX_FMT_RGB32, src_width, src_height); |
||||
buf = av_malloc(size); |
||||
|
||||
avpicture_fill(&picture1, buf, PIX_FMT_RGB32, src_width, src_height); |
||||
|
||||
// if we already got a SWS context, let's realloc if is not re-useable
|
||||
ci->toRGB_convert_ctx = sws_getCachedContext(ci->toRGB_convert_ctx, |
||||
src_width, src_height, pix_fmt, |
||||
src_width, src_height, PIX_FMT_RGB32, |
||||
sws_flags, NULL, NULL, NULL); |
||||
if (ci->toRGB_convert_ctx == NULL) { |
||||
av_log(NULL, AV_LOG_ERROR, |
||||
"Cannot initialize the toRGB conversion context\n"); |
||||
return; |
||||
} |
||||
|
||||
// img_convert parameters are 2 first destination, then 4 source
|
||||
// sws_scale parameters are context, 4 first source, then 2 destination
|
||||
sws_scale(ci->toRGB_convert_ctx, |
||||
picture->data, picture->linesize, 0, src_height, |
||||
picture1.data, picture1.linesize); |
||||
|
||||
pict = &picture1; |
||||
} |
||||
|
||||
/* Insert filter code here */ /* ok */ |
||||
|
||||
// Get me next frame
|
||||
if (0 > get_watermark_picture(ci, 0)) { |
||||
return; |
||||
} |
||||
// These are the three original static variables in the ffmpeg hack.
|
||||
pFrameRGB = ci->pFrameRGB; |
||||
xm_size = ci->x_size; |
||||
ym_size = ci->y_size; |
||||
|
||||
// I'll do the *4 => <<2 crap later. Most compilers understand that anyway.
|
||||
// According to avcodec.h PIX_FMT_RGB32 is handled in endian specific manner.
|
||||
for (y=0; y<src_height; y++) { |
||||
offs = y * (src_width * 4); |
||||
offsm = (((y * ym_size) / src_height) * 4) * xm_size; // offsm first in maskline. byteoffs!
|
||||
for (x=0; x<src_width; x++) { |
||||
mpoffs = offsm + (((x * xm_size) / src_width) * 4); |
||||
p_pixel = (uint32_t *)&((pFrameRGB->data[0])[mpoffs]); |
||||
pixelm = *p_pixel; |
||||
p_pixel = (uint32_t *)&((pict->data[0])[offs]); |
||||
pixel = *p_pixel; |
||||
// pixelm = *((uint32_t *)&(pFrameRGB->data[mpoffs]));
|
||||
pixel_meck = pixel & 0xff000000; |
||||
|
||||
// R
|
||||
tmp = (int)((pixel >> 16) & 0xff) + (int)((pixelm >> 16) & 0xff) - thrR; |
||||
if (tmp > 255) tmp = 255; |
||||
if (tmp < 0) tmp = 0; |
||||
pixel_meck |= (tmp << 16) & 0xff0000; |
||||
// G
|
||||
tmp = (int)((pixel >> 8) & 0xff) + (int)((pixelm >> 8) & 0xff) - thrG; |
||||
if (tmp > 255) tmp = 255; |
||||
if (tmp < 0) tmp = 0; |
||||
pixel_meck |= (tmp << 8) & 0xff00; |
||||
// B
|
||||
tmp = (int)((pixel >> 0) & 0xff) + (int)((pixelm >> 0) & 0xff) - thrB; |
||||
if (tmp > 255) tmp = 255; |
||||
if (tmp < 0) tmp = 0; |
||||
pixel_meck |= (tmp << 0) & 0xff; |
||||
|
||||
|
||||
// test:
|
||||
//pixel_meck = pixel & 0xff000000;
|
||||
//pixel_meck |= (pixelm & 0x00ffffff);
|
||||
|
||||
*p_pixel = pixel_meck; |
||||
|
||||
offs += 4; |
||||
} // foreach X
|
||||
} // foreach Y
|
||||
|
||||
|
||||
|
||||
|
||||
if (pix_fmt != PIX_FMT_RGB32) { |
||||
ci->fromRGB_convert_ctx = sws_getCachedContext(ci->fromRGB_convert_ctx, |
||||
src_width, src_height, PIX_FMT_RGB32, |
||||
src_width, src_height, pix_fmt, |
||||
sws_flags, NULL, NULL, NULL); |
||||
if (ci->fromRGB_convert_ctx == NULL) { |
||||
av_log(NULL, AV_LOG_ERROR, |
||||
"Cannot initialize the fromRGB conversion context\n"); |
||||
return; |
||||
} |
||||
// img_convert parameters are 2 first destination, then 4 source
|
||||
// sws_scale parameters are context, 4 first source, then 2 destination
|
||||
sws_scale(ci->fromRGB_convert_ctx, |
||||
picture1.data, picture1.linesize, 0, src_height, |
||||
picture->data, picture->linesize); |
||||
} |
||||
|
||||
av_free(buf); |
||||
} |
||||
|
||||
|
||||
/****************************************************************************
|
||||
* For mode 1 (the original one) |
||||
****************************************************************************/ |
||||
static void Process1(void *ctx, |
||||
AVPicture *picture, |
||||
enum PixelFormat pix_fmt, |
||||
int src_width, |
||||
int src_height, |
||||
int64_t pts) |
||||
{ |
||||
ContextInfo *ci = (ContextInfo *) ctx; |
||||
char *buf = 0; |
||||
AVPicture picture1; |
||||
AVPicture *pict = picture; |
||||
|
||||
AVFrame *pFrameRGB; |
||||
int xm_size; |
||||
int ym_size; |
||||
|
||||
int x; |
||||
int y; |
||||
int offs, offsm; |
||||
int mpoffs; |
||||
uint32_t *p_pixel = 0; |
||||
uint32_t pixel; |
||||
uint32_t pixelm; |
||||
|
||||
if (pix_fmt != PIX_FMT_RGB32) { |
||||
int size; |
||||
|
||||
size = avpicture_get_size(PIX_FMT_RGB32, src_width, src_height); |
||||
buf = av_malloc(size); |
||||
|
||||
avpicture_fill(&picture1, buf, PIX_FMT_RGB32, src_width, src_height); |
||||
|
||||
// if we already got a SWS context, let's realloc if is not re-useable
|
||||
ci->toRGB_convert_ctx = sws_getCachedContext(ci->toRGB_convert_ctx, |
||||
src_width, src_height, pix_fmt, |
||||
src_width, src_height, PIX_FMT_RGB32, |
||||
sws_flags, NULL, NULL, NULL); |
||||
if (ci->toRGB_convert_ctx == NULL) { |
||||
av_log(NULL, AV_LOG_ERROR, |
||||
"Cannot initialize the toRGB conversion context\n"); |
||||
return; |
||||
} |
||||
|
||||
// img_convert parameters are 2 first destination, then 4 source
|
||||
// sws_scale parameters are context, 4 first source, then 2 destination
|
||||
sws_scale(ci->toRGB_convert_ctx, |
||||
picture->data, picture->linesize, 0, src_height, |
||||
picture1.data, picture1.linesize); |
||||
|
||||
pict = &picture1; |
||||
} |
||||
|
||||
/* Insert filter code here */ /* ok */ |
||||
|
||||
// Get me next frame
|
||||
if (0 > get_watermark_picture(ci, 0)) { |
||||
return; |
||||
} |
||||
// These are the three original static variables in the ffmpeg hack.
|
||||
pFrameRGB = ci->pFrameRGB; |
||||
xm_size = ci->x_size; |
||||
ym_size = ci->y_size; |
||||
|
||||
// I'll do the *4 => <<2 crap later. Most compilers understand that anyway.
|
||||
// According to avcodec.h PIX_FMT_RGB32 is handled in endian specific manner.
|
||||
for (y=0; y<src_height; y++) { |
||||
offs = y * (src_width * 4); |
||||
offsm = (((y * ym_size) / src_height) * 4) * xm_size; // offsm first in maskline. byteoffs!
|
||||
for (x=0; x<src_width; x++) { |
||||
mpoffs = offsm + (((x * xm_size) / src_width) * 4); |
||||
p_pixel = (uint32_t *)&((pFrameRGB->data[0])[mpoffs]); |
||||
pixelm = *p_pixel; /* watermark pixel */ |
||||
p_pixel = (uint32_t *)&((pict->data[0])[offs]); |
||||
pixel = *p_pixel; |
||||
|
||||
if (((pixelm >> 16) & 0xff) > ci->thrR || |
||||
((pixelm >> 8) & 0xff) > ci->thrG || |
||||
((pixelm >> 0) & 0xff) > ci->thrB) |
||||
{ |
||||
*p_pixel = pixelm; |
||||
} else { |
||||
*p_pixel = pixel; |
||||
} |
||||
offs += 4; |
||||
} // foreach X
|
||||
} // foreach Y
|
||||
|
||||
if (pix_fmt != PIX_FMT_RGB32) { |
||||
ci->fromRGB_convert_ctx = sws_getCachedContext(ci->fromRGB_convert_ctx, |
||||
src_width, src_height, PIX_FMT_RGB32, |
||||
src_width, src_height, pix_fmt, |
||||
sws_flags, NULL, NULL, NULL); |
||||
if (ci->fromRGB_convert_ctx == NULL) { |
||||
av_log(NULL, AV_LOG_ERROR, |
||||
"Cannot initialize the fromRGB conversion context\n"); |
||||
return; |
||||
} |
||||
// img_convert parameters are 2 first destination, then 4 source
|
||||
// sws_scale parameters are context, 4 first source, then 2 destination
|
||||
sws_scale(ci->fromRGB_convert_ctx, |
||||
picture1.data, picture1.linesize, 0, src_height, |
||||
picture->data, picture->linesize); |
||||
} |
||||
|
||||
av_free(buf); |
||||
} |
||||
|
||||
|
||||
/****************************************************************************
|
||||
* This is the function ffmpeg.c callbacks. |
||||
****************************************************************************/ |
||||
void Process(void *ctx, |
||||
AVPicture *picture, |
||||
enum PixelFormat pix_fmt, |
||||
int src_width, |
||||
int src_height, |
||||
int64_t pts) |
||||
{ |
||||
ContextInfo *ci = (ContextInfo *) ctx; |
||||
if (1 == ci->mode) { |
||||
Process1(ctx, picture, pix_fmt, src_width, src_height, pts); |
||||
} else { |
||||
Process0(ctx, picture, pix_fmt, src_width, src_height, pts); |
||||
} |
||||
} |
||||
|
||||
|
||||
/****************************************************************************
|
||||
* When cleanup == 0, we try to get the next frame. If no next frame, nothing |
||||
* is done. |
||||
* |
||||
* This code follows the example on |
||||
* http://www.inb.uni-luebeck.de/~boehme/using_libavcodec.html
|
||||
* |
||||
* 0 = ok, -1 = error |
||||
****************************************************************************/ |
||||
int get_watermark_picture(ContextInfo *ci, int cleanup) |
||||
{ |
||||
if (1 == ci->is_done && 0 == cleanup) return 0; |
||||
|
||||
// Yes, *pFrameRGB arguments must be null the first time otherwise it's not good..
|
||||
// This block is only executed the first time we enter this function.
|
||||
if (0 == ci->pFrameRGB && |
||||
0 == cleanup) |
||||
{ |
||||
|
||||
/*
|
||||
* The last three parameters specify the file format, buffer size and format |
||||
* parameters; by simply specifying NULL or 0 we ask libavformat to auto-detect |
||||
* the format and use a default buffer size. (Didn't work!) |
||||
*/ |
||||
if (av_open_input_file(&ci->pFormatCtx, ci->filename, NULL, 0, NULL) != 0) { |
||||
|
||||
// Martin says this should not be necessary but it failed for me sending in
|
||||
// NULL instead of file_iformat to av_open_input_file()
|
||||
ci->i = strlen(ci->filename); |
||||
if (0 == ci->i) { |
||||
av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() No filename to watermark vhook\n"); |
||||
return -1; |
||||
} |
||||
while (ci->i > 0) { |
||||
if (ci->filename[ci->i] == '.') { |
||||
ci->i++; |
||||
break; |
||||
} |
||||
ci->i--; |
||||
} |
||||
ci->p_ext = &(ci->filename[ci->i]); |
||||
ci->file_iformat = av_find_input_format (ci->p_ext); |
||||
if (0 == ci->file_iformat) { |
||||
av_log(NULL, AV_LOG_INFO, "get_watermark_picture() attempt to use image2 for [%s]\n", ci->p_ext); |
||||
ci->file_iformat = av_find_input_format ("image2"); |
||||
} |
||||
if (0 == ci->file_iformat) { |
||||
av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Really failed to find iformat [%s]\n", ci->p_ext); |
||||
return -1; |
||||
} |
||||
// now continues the Martin template.
|
||||
|
||||
if (av_open_input_file(&ci->pFormatCtx, ci->filename, ci->file_iformat, 0, NULL)!=0) { |
||||
av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Failed to open input file [%s]\n", ci->filename); |
||||
return -1; |
||||
} |
||||
} |
||||
|
||||
/*
|
||||
* This fills the streams field of the AVFormatContext with valid information. |
||||
*/ |
||||
if(av_find_stream_info(ci->pFormatCtx)<0) { |
||||
av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Failed to find stream info\n"); |
||||
return -1; |
||||
} |
||||
|
||||
/*
|
||||
* As mentioned in the introduction, we'll handle only video streams, not audio |
||||
* streams. To make things nice and easy, we simply use the first video stream we |
||||
* find. |
||||
*/ |
||||
ci->videoStream=-1; |
||||
for(ci->i = 0; ci->i < ci->pFormatCtx->nb_streams; ci->i++) |
||||
if(ci->pFormatCtx->streams[ci->i]->codec->codec_type==CODEC_TYPE_VIDEO) |
||||
{ |
||||
ci->videoStream = ci->i; |
||||
break; |
||||
} |
||||
if(ci->videoStream == -1) { |
||||
av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Failed to find any video stream\n"); |
||||
return -1; |
||||
} |
||||
|
||||
ci->st = ci->pFormatCtx->streams[ci->videoStream]; |
||||
ci->x_size = ci->st->codec->width; |
||||
ci->y_size = ci->st->codec->height; |
||||
|
||||
// Get a pointer to the codec context for the video stream
|
||||
ci->pCodecCtx = ci->pFormatCtx->streams[ci->videoStream]->codec; |
||||
|
||||
|
||||
/*
|
||||
* OK, so now we've got a pointer to the so-called codec context for our video |
||||
* stream, but we still have to find the actual codec and open it. |
||||
*/ |
||||
// Find the decoder for the video stream
|
||||
ci->pCodec = avcodec_find_decoder(ci->pCodecCtx->codec_id); |
||||
if(ci->pCodec == NULL) { |
||||
av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Failed to find any codec\n"); |
||||
return -1; |
||||
} |
||||
|
||||
|
||||
// Open codec
|
||||
if(avcodec_open(ci->pCodecCtx, ci->pCodec)<0) { |
||||
av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Failed to open codec\n"); |
||||
return -1; |
||||
} |
||||
|
||||
// Hack to correct wrong frame rates that seem to be generated by some
|
||||
// codecs
|
||||
if (ci->pCodecCtx->time_base.den>1000 && ci->pCodecCtx->time_base.num==1) |
||||
ci->pCodecCtx->time_base.num=1000; |
||||
|
||||
/*
|
||||
* Allocate a video frame to store the decoded images in. |
||||
*/ |
||||
ci->pFrame = avcodec_alloc_frame(); |
||||
|
||||
|
||||
/*
|
||||
* The RGB image pFrameRGB (of type AVFrame *) is allocated like this: |
||||
*/ |
||||
// Allocate an AVFrame structure
|
||||
ci->pFrameRGB=avcodec_alloc_frame(); |
||||
if(ci->pFrameRGB==NULL) { |
||||
av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Failed to alloc pFrameRGB\n"); |
||||
return -1; |
||||
} |
||||
|
||||
// Determine required buffer size and allocate buffer
|
||||
ci->numBytes = avpicture_get_size(PIX_FMT_RGB32, ci->pCodecCtx->width, |
||||
ci->pCodecCtx->height); |
||||
ci->buffer = av_malloc(ci->numBytes); |
||||
|
||||
// Assign appropriate parts of buffer to image planes in pFrameRGB
|
||||
avpicture_fill((AVPicture *)ci->pFrameRGB, ci->buffer, PIX_FMT_RGB32, |
||||
ci->pCodecCtx->width, ci->pCodecCtx->height); |
||||
} |
||||
// TODO loop, pingpong etc?
|
||||
if (0 == cleanup) |
||||
{ |
||||
// av_log(NULL, AV_LOG_DEBUG, "get_watermark_picture() Get a frame\n");
|
||||
while(av_read_frame(ci->pFormatCtx, &ci->packet)>=0) |
||||
{ |
||||
// Is this a packet from the video stream?
|
||||
if(ci->packet.stream_index == ci->videoStream) |
||||
{ |
||||
// Decode video frame
|
||||
avcodec_decode_video(ci->pCodecCtx, ci->pFrame, &ci->frameFinished, |
||||
ci->packet.data, ci->packet.size); |
||||
|
||||
// Did we get a video frame?
|
||||
if(ci->frameFinished) |
||||
{ |
||||
// Convert the image from its native format to RGB32
|
||||
ci->watermark_convert_ctx = |
||||
sws_getCachedContext(ci->watermark_convert_ctx, |
||||
ci->pCodecCtx->width, ci->pCodecCtx->height, ci->pCodecCtx->pix_fmt, |
||||
ci->pCodecCtx->width, ci->pCodecCtx->height, PIX_FMT_RGB32, |
||||
sws_flags, NULL, NULL, NULL); |
||||
if (ci->watermark_convert_ctx == NULL) { |
||||
av_log(NULL, AV_LOG_ERROR, |
||||
"Cannot initialize the watermark conversion context\n"); |
||||
return -1; |
||||
} |
||||
// img_convert parameters are 2 first destination, then 4 source
|
||||
// sws_scale parameters are context, 4 first source, then 2 destination
|
||||
sws_scale(ci->watermark_convert_ctx, |
||||
ci->pFrame->data, ci->pFrame->linesize, 0, ci->pCodecCtx->height, |
||||
ci->pFrameRGB->data, ci->pFrameRGB->linesize); |
||||
|
||||
// Process the video frame (save to disk etc.)
|
||||
//fprintf(stderr,"banan() New frame!\n");
|
||||
//DoSomethingWithTheImage(ci->pFrameRGB);
|
||||
return 0; |
||||
} |
||||
} |
||||
|
||||
// Free the packet that was allocated by av_read_frame
|
||||
av_free_packet(&ci->packet); |
||||
} |
||||
ci->is_done = 1; |
||||
return 0; |
||||
} // if 0 != cleanup
|
||||
|
||||
if (0 != cleanup) |
||||
{ |
||||
// Free the RGB image
|
||||
av_freep(&ci->buffer); |
||||
av_freep(&ci->pFrameRGB); |
||||
|
||||
// Close the codec
|
||||
if (0 != ci->pCodecCtx) { |
||||
avcodec_close(ci->pCodecCtx); |
||||
ci->pCodecCtx = 0; |
||||
} |
||||
|
||||
// Close the video file
|
||||
if (0 != ci->pFormatCtx) { |
||||
av_close_input_file(ci->pFormatCtx); |
||||
ci->pFormatCtx = 0; |
||||
} |
||||
|
||||
ci->is_done = 0; |
||||
} |
||||
return 0; |
||||
} |
||||
|
||||
|
||||
void parse_arg_file(const char *filename) |
||||
{ |
||||
} |
Loading…
Reference in new issue