|
|
@ -31,13 +31,13 @@ |
|
|
|
#include "vidstabutils.h" |
|
|
|
#include "vidstabutils.h" |
|
|
|
|
|
|
|
|
|
|
|
typedef struct { |
|
|
|
typedef struct { |
|
|
|
const AVClass* class; |
|
|
|
const AVClass *class; |
|
|
|
|
|
|
|
|
|
|
|
VSTransformData td; |
|
|
|
VSTransformData td; |
|
|
|
VSTransformConfig conf; |
|
|
|
VSTransformConfig conf; |
|
|
|
|
|
|
|
|
|
|
|
VSTransformations trans; // transformations
|
|
|
|
VSTransformations trans; // transformations
|
|
|
|
char* input; // name of transform file
|
|
|
|
char *input; // name of transform file
|
|
|
|
int tripod; |
|
|
|
int tripod; |
|
|
|
} TransformContext; |
|
|
|
} TransformContext; |
|
|
|
|
|
|
|
|
|
|
@ -45,7 +45,7 @@ typedef struct { |
|
|
|
#define OFFSETC(x) (offsetof(TransformContext, conf)+offsetof(VSTransformConfig, x)) |
|
|
|
#define OFFSETC(x) (offsetof(TransformContext, conf)+offsetof(VSTransformConfig, x)) |
|
|
|
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM |
|
|
|
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM |
|
|
|
|
|
|
|
|
|
|
|
static const AVOption vidstabtransform_options[]= { |
|
|
|
static const AVOption vidstabtransform_options[] = { |
|
|
|
{"input", "path to the file storing the transforms (def:transforms.trf)", OFFSET(input), |
|
|
|
{"input", "path to the file storing the transforms (def:transforms.trf)", OFFSET(input), |
|
|
|
AV_OPT_TYPE_STRING, {.str = DEFAULT_INPUT_NAME} }, |
|
|
|
AV_OPT_TYPE_STRING, {.str = DEFAULT_INPUT_NAME} }, |
|
|
|
{"smoothing", "number of frames*2 + 1 used for lowpass filtering (def: 10)", OFFSETC(smoothing), |
|
|
|
{"smoothing", "number of frames*2 + 1 used for lowpass filtering (def: 10)", OFFSETC(smoothing), |
|
|
@ -62,7 +62,7 @@ static const AVOption vidstabtransform_options[]= { |
|
|
|
AV_OPT_TYPE_CONST, {.i64 = VSCropBorder }, 0, 0, FLAGS, "crop"}, |
|
|
|
AV_OPT_TYPE_CONST, {.i64 = VSCropBorder }, 0, 0, FLAGS, "crop"}, |
|
|
|
{"invert", "1: invert transforms (def: 0)", OFFSETC(invert), |
|
|
|
{"invert", "1: invert transforms (def: 0)", OFFSETC(invert), |
|
|
|
AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS}, |
|
|
|
AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS}, |
|
|
|
{"relative", "consider transforms as 0: abslute, 1: relative (def)", OFFSETC(relative), |
|
|
|
{"relative", "consider transforms as 0: absolute, 1: relative (def)", OFFSETC(relative), |
|
|
|
AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, FLAGS}, |
|
|
|
AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, FLAGS}, |
|
|
|
{"zoom", "percentage to zoom >0: zoom in, <0 zoom out (def: 0)", OFFSETC(zoom), |
|
|
|
{"zoom", "percentage to zoom >0: zoom in, <0 zoom out (def: 0)", OFFSETC(zoom), |
|
|
|
AV_OPT_TYPE_DOUBLE, {.dbl = 0}, 0, 100, FLAGS}, |
|
|
|
AV_OPT_TYPE_DOUBLE, {.dbl = 0}, 0, 100, FLAGS}, |
|
|
@ -80,14 +80,14 @@ static const AVOption vidstabtransform_options[]= { |
|
|
|
AV_OPT_TYPE_CONST, {.i64 = VS_BiCubic },0, 0, FLAGS, "interpol"}, |
|
|
|
AV_OPT_TYPE_CONST, {.i64 = VS_BiCubic },0, 0, FLAGS, "interpol"}, |
|
|
|
{"tripod", "if 1: virtual tripod mode (equiv. to relative=0:smoothing=0)", OFFSET(tripod), |
|
|
|
{"tripod", "if 1: virtual tripod mode (equiv. to relative=0:smoothing=0)", OFFSET(tripod), |
|
|
|
AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS}, |
|
|
|
AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS}, |
|
|
|
{NULL}, |
|
|
|
{NULL} |
|
|
|
}; |
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
AVFILTER_DEFINE_CLASS(vidstabtransform); |
|
|
|
AVFILTER_DEFINE_CLASS(vidstabtransform); |
|
|
|
|
|
|
|
|
|
|
|
static av_cold int init(AVFilterContext *ctx) |
|
|
|
static av_cold int init(AVFilterContext *ctx) |
|
|
|
{ |
|
|
|
{ |
|
|
|
TransformContext* tc = ctx->priv; |
|
|
|
TransformContext *tc = ctx->priv; |
|
|
|
vs_set_mem_and_log_functions(); |
|
|
|
vs_set_mem_and_log_functions(); |
|
|
|
tc->class = &vidstabtransform_class; |
|
|
|
tc->class = &vidstabtransform_class; |
|
|
|
av_log(ctx, AV_LOG_VERBOSE, "vidstabtransform filter: init %s\n", LIBVIDSTAB_VERSION); |
|
|
|
av_log(ctx, AV_LOG_VERBOSE, "vidstabtransform filter: init %s\n", LIBVIDSTAB_VERSION); |
|
|
@ -122,27 +122,27 @@ static int config_input(AVFilterLink *inlink) |
|
|
|
{ |
|
|
|
{ |
|
|
|
AVFilterContext *ctx = inlink->dst; |
|
|
|
AVFilterContext *ctx = inlink->dst; |
|
|
|
TransformContext *tc = ctx->priv; |
|
|
|
TransformContext *tc = ctx->priv; |
|
|
|
FILE* f; |
|
|
|
FILE *f; |
|
|
|
|
|
|
|
|
|
|
|
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); |
|
|
|
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); |
|
|
|
|
|
|
|
|
|
|
|
VSTransformData* td = &(tc->td); |
|
|
|
VSTransformData *td = &(tc->td); |
|
|
|
|
|
|
|
|
|
|
|
VSFrameInfo fi_src; |
|
|
|
VSFrameInfo fi_src; |
|
|
|
VSFrameInfo fi_dest; |
|
|
|
VSFrameInfo fi_dest; |
|
|
|
|
|
|
|
|
|
|
|
if(!vsFrameInfoInit(&fi_src, inlink->w, inlink->h, |
|
|
|
if (!vsFrameInfoInit(&fi_src, inlink->w, inlink->h, |
|
|
|
av_2_vs_pixel_format(ctx,inlink->format)) || |
|
|
|
av_2_vs_pixel_format(ctx, inlink->format)) || |
|
|
|
!vsFrameInfoInit(&fi_dest, inlink->w, inlink->h, |
|
|
|
!vsFrameInfoInit(&fi_dest, inlink->w, inlink->h, |
|
|
|
av_2_vs_pixel_format(ctx, inlink->format))){ |
|
|
|
av_2_vs_pixel_format(ctx, inlink->format))) { |
|
|
|
av_log(ctx, AV_LOG_ERROR, "unknown pixel format: %i (%s)", |
|
|
|
av_log(ctx, AV_LOG_ERROR, "unknown pixel format: %i (%s)", |
|
|
|
inlink->format, desc->name); |
|
|
|
inlink->format, desc->name); |
|
|
|
return AVERROR(EINVAL); |
|
|
|
return AVERROR(EINVAL); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if(fi_src.bytesPerPixel != av_get_bits_per_pixel(desc)/8 || |
|
|
|
if (fi_src.bytesPerPixel != av_get_bits_per_pixel(desc)/8 || |
|
|
|
fi_src.log2ChromaW != desc->log2_chroma_w || |
|
|
|
fi_src.log2ChromaW != desc->log2_chroma_w || |
|
|
|
fi_src.log2ChromaH != desc->log2_chroma_h){ |
|
|
|
fi_src.log2ChromaH != desc->log2_chroma_h) { |
|
|
|
av_log(ctx, AV_LOG_ERROR, "pixel-format error: bpp %i<>%i ", |
|
|
|
av_log(ctx, AV_LOG_ERROR, "pixel-format error: bpp %i<>%i ", |
|
|
|
fi_src.bytesPerPixel, av_get_bits_per_pixel(desc)/8); |
|
|
|
fi_src.bytesPerPixel, av_get_bits_per_pixel(desc)/8); |
|
|
|
av_log(ctx, AV_LOG_ERROR, "chroma_subsampl: w: %i<>%i h: %i<>%i\n", |
|
|
|
av_log(ctx, AV_LOG_ERROR, "chroma_subsampl: w: %i<>%i h: %i<>%i\n", |
|
|
@ -154,18 +154,18 @@ static int config_input(AVFilterLink *inlink) |
|
|
|
// set values that are not initializes by the options
|
|
|
|
// set values that are not initializes by the options
|
|
|
|
tc->conf.modName = "vidstabtransform"; |
|
|
|
tc->conf.modName = "vidstabtransform"; |
|
|
|
tc->conf.verbose =1; |
|
|
|
tc->conf.verbose =1; |
|
|
|
if(tc->tripod){ |
|
|
|
if (tc->tripod) { |
|
|
|
av_log(ctx, AV_LOG_INFO, "Virtual tripod mode: relative=0, smoothing=0"); |
|
|
|
av_log(ctx, AV_LOG_INFO, "Virtual tripod mode: relative=0, smoothing=0"); |
|
|
|
tc->conf.relative=0; |
|
|
|
tc->conf.relative = 0; |
|
|
|
tc->conf.smoothing=0; |
|
|
|
tc->conf.smoothing = 0; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if(vsTransformDataInit(td, &tc->conf, &fi_src, &fi_dest) != VS_OK){ |
|
|
|
if (vsTransformDataInit(td, &tc->conf, &fi_src, &fi_dest) != VS_OK) { |
|
|
|
av_log(ctx, AV_LOG_ERROR, "initialization of vid.stab transform failed, please report a BUG\n"); |
|
|
|
av_log(ctx, AV_LOG_ERROR, "initialization of vid.stab transform failed, please report a BUG\n"); |
|
|
|
return AVERROR(EINVAL); |
|
|
|
return AVERROR(EINVAL); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
vsTransformGetConfig(&tc->conf,td); |
|
|
|
vsTransformGetConfig(&tc->conf, td); |
|
|
|
av_log(ctx, AV_LOG_INFO, "Video transformation/stabilization settings (pass 2/2):\n"); |
|
|
|
av_log(ctx, AV_LOG_INFO, "Video transformation/stabilization settings (pass 2/2):\n"); |
|
|
|
av_log(ctx, AV_LOG_INFO, " input = %s\n", tc->input); |
|
|
|
av_log(ctx, AV_LOG_INFO, " input = %s\n", tc->input); |
|
|
|
av_log(ctx, AV_LOG_INFO, " smoothing = %d\n", tc->conf.smoothing); |
|
|
|
av_log(ctx, AV_LOG_INFO, " smoothing = %d\n", tc->conf.smoothing); |
|
|
@ -184,13 +184,13 @@ static int config_input(AVFilterLink *inlink) |
|
|
|
return AVERROR(errno); |
|
|
|
return AVERROR(errno); |
|
|
|
} else { |
|
|
|
} else { |
|
|
|
VSManyLocalMotions mlms; |
|
|
|
VSManyLocalMotions mlms; |
|
|
|
if(vsReadLocalMotionsFile(f,&mlms)==VS_OK){ |
|
|
|
if (vsReadLocalMotionsFile(f, &mlms) == VS_OK) { |
|
|
|
// calculate the actual transforms from the localmotions
|
|
|
|
// calculate the actual transforms from the local motions
|
|
|
|
if(vsLocalmotions2TransformsSimple(td, &mlms,&tc->trans)!=VS_OK){ |
|
|
|
if (vsLocalmotions2TransformsSimple(td, &mlms, &tc->trans) != VS_OK) { |
|
|
|
av_log(ctx, AV_LOG_ERROR, "calculating transformations failed\n"); |
|
|
|
av_log(ctx, AV_LOG_ERROR, "calculating transformations failed\n"); |
|
|
|
return AVERROR(EINVAL); |
|
|
|
return AVERROR(EINVAL); |
|
|
|
} |
|
|
|
} |
|
|
|
}else{ // try to read old format
|
|
|
|
} else { // try to read old format
|
|
|
|
if (!vsReadOldTransforms(td, f, &tc->trans)) { /* read input file */ |
|
|
|
if (!vsReadOldTransforms(td, f, &tc->trans)) { /* read input file */ |
|
|
|
av_log(ctx, AV_LOG_ERROR, "error parsing input file %s\n", tc->input); |
|
|
|
av_log(ctx, AV_LOG_ERROR, "error parsing input file %s\n", tc->input); |
|
|
|
return AVERROR(EINVAL); |
|
|
|
return AVERROR(EINVAL); |
|
|
@ -199,7 +199,7 @@ static int config_input(AVFilterLink *inlink) |
|
|
|
} |
|
|
|
} |
|
|
|
fclose(f); |
|
|
|
fclose(f); |
|
|
|
|
|
|
|
|
|
|
|
if (vsPreprocessTransforms(td, &tc->trans)!= VS_OK ) { |
|
|
|
if (vsPreprocessTransforms(td, &tc->trans) != VS_OK ) { |
|
|
|
av_log(ctx, AV_LOG_ERROR, "error while preprocessing transforms\n"); |
|
|
|
av_log(ctx, AV_LOG_ERROR, "error while preprocessing transforms\n"); |
|
|
|
return AVERROR(EINVAL); |
|
|
|
return AVERROR(EINVAL); |
|
|
|
} |
|
|
|
} |
|
|
@ -233,15 +233,15 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
|
|
|
av_frame_copy_props(out, in); |
|
|
|
av_frame_copy_props(out, in); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
for(plane=0; plane < vsTransformGetSrcFrameInfo(td)->planes; plane++){ |
|
|
|
for (plane = 0; plane < vsTransformGetSrcFrameInfo(td)->planes; plane++) { |
|
|
|
inframe.data[plane] = in->data[plane]; |
|
|
|
inframe.data[plane] = in->data[plane]; |
|
|
|
inframe.linesize[plane] = in->linesize[plane]; |
|
|
|
inframe.linesize[plane] = in->linesize[plane]; |
|
|
|
} |
|
|
|
} |
|
|
|
if(out == in){ // inplace
|
|
|
|
if (out == in) { // inplace
|
|
|
|
vsTransformPrepare(td, &inframe, &inframe); |
|
|
|
vsTransformPrepare(td, &inframe, &inframe); |
|
|
|
}else{ // seperate frames
|
|
|
|
} else { // separate frames
|
|
|
|
VSFrame outframe; |
|
|
|
VSFrame outframe; |
|
|
|
for(plane=0; plane < vsTransformGetDestFrameInfo(td)->planes; plane++){ |
|
|
|
for (plane = 0; plane < vsTransformGetDestFrameInfo(td)->planes; plane++) { |
|
|
|
outframe.data[plane] = out->data[plane]; |
|
|
|
outframe.data[plane] = out->data[plane]; |
|
|
|
outframe.linesize[plane] = out->linesize[plane]; |
|
|
|
outframe.linesize[plane] = out->linesize[plane]; |
|
|
|
} |
|
|
|
} |
|
|
@ -289,6 +289,4 @@ AVFilter avfilter_vf_vidstabtransform = { |
|
|
|
.inputs = avfilter_vf_vidstabtransform_inputs, |
|
|
|
.inputs = avfilter_vf_vidstabtransform_inputs, |
|
|
|
.outputs = avfilter_vf_vidstabtransform_outputs, |
|
|
|
.outputs = avfilter_vf_vidstabtransform_outputs, |
|
|
|
.priv_class = &vidstabtransform_class, |
|
|
|
.priv_class = &vidstabtransform_class, |
|
|
|
|
|
|
|
|
|
|
|
}; |
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|