|
|
|
@ -29,16 +29,16 @@ |
|
|
|
|
#include "libavutil/opt.h" |
|
|
|
|
#include "libavutil/pixdesc.h" |
|
|
|
|
#include "avfilter.h" |
|
|
|
|
#include "dualinput.h" |
|
|
|
|
#include "drawutils.h" |
|
|
|
|
#include "formats.h" |
|
|
|
|
#include "framesync2.h" |
|
|
|
|
#include "internal.h" |
|
|
|
|
#include "psnr.h" |
|
|
|
|
#include "video.h" |
|
|
|
|
|
|
|
|
|
typedef struct PSNRContext { |
|
|
|
|
const AVClass *class; |
|
|
|
|
FFDualInputContext dinput; |
|
|
|
|
FFFrameSync fs; |
|
|
|
|
double mse, min_mse, max_mse, mse_comp[4]; |
|
|
|
|
uint64_t nb_frames; |
|
|
|
|
FILE *stats_file; |
|
|
|
@ -68,7 +68,7 @@ static const AVOption psnr_options[] = { |
|
|
|
|
{ NULL } |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
AVFILTER_DEFINE_CLASS(psnr); |
|
|
|
|
FRAMESYNC_DEFINE_CLASS(psnr, PSNRContext, fs); |
|
|
|
|
|
|
|
|
|
static inline unsigned pow_2(unsigned base) |
|
|
|
|
{ |
|
|
|
@ -142,13 +142,21 @@ static void set_meta(AVDictionary **metadata, const char *key, char comp, float |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static AVFrame *do_psnr(AVFilterContext *ctx, AVFrame *main, |
|
|
|
|
const AVFrame *ref) |
|
|
|
|
static int do_psnr(FFFrameSync *fs) |
|
|
|
|
{ |
|
|
|
|
AVFilterContext *ctx = fs->parent; |
|
|
|
|
PSNRContext *s = ctx->priv; |
|
|
|
|
AVFrame *main, *ref; |
|
|
|
|
double comp_mse[4], mse = 0; |
|
|
|
|
int j, c; |
|
|
|
|
AVDictionary **metadata = &main->metadata; |
|
|
|
|
int ret, j, c; |
|
|
|
|
AVDictionary **metadata; |
|
|
|
|
|
|
|
|
|
ret = ff_framesync2_dualinput_get(fs, &main, &ref); |
|
|
|
|
if (ret < 0) |
|
|
|
|
return ret; |
|
|
|
|
if (!ref) |
|
|
|
|
return ff_filter_frame(ctx->outputs[0], main); |
|
|
|
|
metadata = &main->metadata; |
|
|
|
|
|
|
|
|
|
compute_images_mse(s, (const uint8_t **)main->data, main->linesize, |
|
|
|
|
(const uint8_t **)ref->data, ref->linesize, |
|
|
|
@ -214,7 +222,7 @@ static AVFrame *do_psnr(AVFilterContext *ctx, AVFrame *main, |
|
|
|
|
fprintf(s->stats_file, "\n"); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
return main; |
|
|
|
|
return ff_filter_frame(ctx->outputs[0], main); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static av_cold int init(AVFilterContext *ctx) |
|
|
|
@ -245,7 +253,7 @@ static av_cold int init(AVFilterContext *ctx) |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
s->dinput.process = do_psnr; |
|
|
|
|
s->fs.on_event = do_psnr; |
|
|
|
|
return 0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -331,27 +339,24 @@ static int config_output(AVFilterLink *outlink) |
|
|
|
|
AVFilterLink *mainlink = ctx->inputs[0]; |
|
|
|
|
int ret; |
|
|
|
|
|
|
|
|
|
ret = ff_framesync2_init_dualinput(&s->fs, ctx); |
|
|
|
|
if (ret < 0) |
|
|
|
|
return ret; |
|
|
|
|
outlink->w = mainlink->w; |
|
|
|
|
outlink->h = mainlink->h; |
|
|
|
|
outlink->time_base = mainlink->time_base; |
|
|
|
|
outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio; |
|
|
|
|
outlink->frame_rate = mainlink->frame_rate; |
|
|
|
|
if ((ret = ff_dualinput_init(ctx, &s->dinput)) < 0) |
|
|
|
|
if ((ret = ff_framesync2_configure(&s->fs)) < 0) |
|
|
|
|
return ret; |
|
|
|
|
|
|
|
|
|
return 0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref) |
|
|
|
|
static int activate(AVFilterContext *ctx) |
|
|
|
|
{ |
|
|
|
|
PSNRContext *s = inlink->dst->priv; |
|
|
|
|
return ff_dualinput_filter_frame(&s->dinput, inlink, inpicref); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static int request_frame(AVFilterLink *outlink) |
|
|
|
|
{ |
|
|
|
|
PSNRContext *s = outlink->src->priv; |
|
|
|
|
return ff_dualinput_request_frame(&s->dinput, outlink); |
|
|
|
|
PSNRContext *s = ctx->priv; |
|
|
|
|
return ff_framesync2_activate(&s->fs); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static av_cold void uninit(AVFilterContext *ctx) |
|
|
|
@ -375,7 +380,7 @@ static av_cold void uninit(AVFilterContext *ctx) |
|
|
|
|
get_psnr(s->min_mse, 1, s->average_max)); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
ff_dualinput_uninit(&s->dinput); |
|
|
|
|
ff_framesync2_uninit(&s->fs); |
|
|
|
|
|
|
|
|
|
if (s->stats_file && s->stats_file != stdout) |
|
|
|
|
fclose(s->stats_file); |
|
|
|
@ -385,11 +390,9 @@ static const AVFilterPad psnr_inputs[] = { |
|
|
|
|
{ |
|
|
|
|
.name = "main", |
|
|
|
|
.type = AVMEDIA_TYPE_VIDEO, |
|
|
|
|
.filter_frame = filter_frame, |
|
|
|
|
},{ |
|
|
|
|
.name = "reference", |
|
|
|
|
.type = AVMEDIA_TYPE_VIDEO, |
|
|
|
|
.filter_frame = filter_frame, |
|
|
|
|
.config_props = config_input_ref, |
|
|
|
|
}, |
|
|
|
|
{ NULL } |
|
|
|
@ -400,7 +403,6 @@ static const AVFilterPad psnr_outputs[] = { |
|
|
|
|
.name = "default", |
|
|
|
|
.type = AVMEDIA_TYPE_VIDEO, |
|
|
|
|
.config_props = config_output, |
|
|
|
|
.request_frame = request_frame, |
|
|
|
|
}, |
|
|
|
|
{ NULL } |
|
|
|
|
}; |
|
|
|
@ -408,9 +410,11 @@ static const AVFilterPad psnr_outputs[] = { |
|
|
|
|
AVFilter ff_vf_psnr = { |
|
|
|
|
.name = "psnr", |
|
|
|
|
.description = NULL_IF_CONFIG_SMALL("Calculate the PSNR between two video streams."), |
|
|
|
|
.preinit = psnr_framesync_preinit, |
|
|
|
|
.init = init, |
|
|
|
|
.uninit = uninit, |
|
|
|
|
.query_formats = query_formats, |
|
|
|
|
.activate = activate, |
|
|
|
|
.priv_size = sizeof(PSNRContext), |
|
|
|
|
.priv_class = &psnr_class, |
|
|
|
|
.inputs = psnr_inputs, |
|
|
|
|