|
|
|
@ -1,13 +1,18 @@ |
|
|
|
|
#ifndef OPENCV_GAPI_PIPELINE_MODELING_TOOL_PIPELINE_HPP |
|
|
|
|
#define OPENCV_GAPI_PIPELINE_MODELING_TOOL_PIPELINE_HPP |
|
|
|
|
|
|
|
|
|
#include <iomanip> |
|
|
|
|
|
|
|
|
|
struct PerfReport { |
|
|
|
|
std::string name; |
|
|
|
|
double avg_latency = 0.0; |
|
|
|
|
double throughput = 0.0; |
|
|
|
|
int64_t first_run_latency = 0; |
|
|
|
|
int64_t elapsed = 0; |
|
|
|
|
int64_t compilation_time = 0; |
|
|
|
|
std::string name; |
|
|
|
|
double avg_latency = 0.0; |
|
|
|
|
int64_t min_latency = 0; |
|
|
|
|
int64_t max_latency = 0; |
|
|
|
|
int64_t first_latency = 0; |
|
|
|
|
double throughput = 0.0; |
|
|
|
|
int64_t elapsed = 0; |
|
|
|
|
int64_t warmup_time = 0; |
|
|
|
|
int64_t num_late_frames = 0; |
|
|
|
|
std::vector<int64_t> latencies; |
|
|
|
|
|
|
|
|
|
std::string toStr(bool expanded = false) const; |
|
|
|
@ -15,17 +20,19 @@ struct PerfReport { |
|
|
|
|
|
|
|
|
|
std::string PerfReport::toStr(bool expand) const { |
|
|
|
|
std::stringstream ss; |
|
|
|
|
ss << name << ": Compilation time: " << compilation_time << " ms; " |
|
|
|
|
<< "Average latency: " << avg_latency << " ms; Throughput: " |
|
|
|
|
<< throughput << " FPS; First latency: " |
|
|
|
|
<< first_run_latency << " ms"; |
|
|
|
|
|
|
|
|
|
ss << name << ": \n" |
|
|
|
|
<< " Warm up time: " << warmup_time << " ms\n" |
|
|
|
|
<< " Execution time: " << elapsed << " ms\n" |
|
|
|
|
<< " Frames: " << num_late_frames << "/" << latencies.size() << " (late/all)\n" |
|
|
|
|
<< " Latency:\n" |
|
|
|
|
<< " first: " << first_latency << " ms\n" |
|
|
|
|
<< " min: " << min_latency << " ms\n" |
|
|
|
|
<< " max: " << max_latency << " ms\n" |
|
|
|
|
<< " avg: " << std::fixed << std::setprecision(3) << avg_latency << " ms\n" |
|
|
|
|
<< " Throughput: " << std::fixed << std::setprecision(3) << throughput << " FPS"; |
|
|
|
|
if (expand) { |
|
|
|
|
ss << "\nTotal processed frames: " << latencies.size() |
|
|
|
|
<< "\nTotal elapsed time: " << elapsed << " ms" << std::endl; |
|
|
|
|
for (size_t i = 0; i < latencies.size(); ++i) { |
|
|
|
|
ss << std::endl; |
|
|
|
|
ss << "Frame:" << i << "\nLatency: " |
|
|
|
|
ss << "\nFrame:" << i << "\nLatency: " |
|
|
|
|
<< latencies[i] << " ms"; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
@ -37,11 +44,11 @@ class Pipeline { |
|
|
|
|
public: |
|
|
|
|
using Ptr = std::shared_ptr<Pipeline>; |
|
|
|
|
|
|
|
|
|
Pipeline(std::string&& name, |
|
|
|
|
cv::GComputation&& comp, |
|
|
|
|
cv::gapi::wip::IStreamSource::Ptr&& src, |
|
|
|
|
cv::GCompileArgs&& args, |
|
|
|
|
const size_t num_outputs); |
|
|
|
|
Pipeline(std::string&& name, |
|
|
|
|
cv::GComputation&& comp, |
|
|
|
|
std::shared_ptr<DummySource>&& src, |
|
|
|
|
cv::GCompileArgs&& args, |
|
|
|
|
const size_t num_outputs); |
|
|
|
|
|
|
|
|
|
void compile(); |
|
|
|
|
void run(double work_time_ms); |
|
|
|
@ -59,19 +66,19 @@ protected: |
|
|
|
|
virtual void _compile() = 0; |
|
|
|
|
virtual RunPerf _run(double work_time_ms) = 0; |
|
|
|
|
|
|
|
|
|
std::string m_name; |
|
|
|
|
cv::GComputation m_comp; |
|
|
|
|
cv::gapi::wip::IStreamSource::Ptr m_src; |
|
|
|
|
cv::GCompileArgs m_args; |
|
|
|
|
size_t m_num_outputs; |
|
|
|
|
PerfReport m_perf; |
|
|
|
|
std::string m_name; |
|
|
|
|
cv::GComputation m_comp; |
|
|
|
|
std::shared_ptr<DummySource> m_src; |
|
|
|
|
cv::GCompileArgs m_args; |
|
|
|
|
size_t m_num_outputs; |
|
|
|
|
PerfReport m_perf; |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
Pipeline::Pipeline(std::string&& name, |
|
|
|
|
cv::GComputation&& comp, |
|
|
|
|
cv::gapi::wip::IStreamSource::Ptr&& src, |
|
|
|
|
cv::GCompileArgs&& args, |
|
|
|
|
const size_t num_outputs) |
|
|
|
|
Pipeline::Pipeline(std::string&& name, |
|
|
|
|
cv::GComputation&& comp, |
|
|
|
|
std::shared_ptr<DummySource>&& src, |
|
|
|
|
cv::GCompileArgs&& args, |
|
|
|
|
const size_t num_outputs) |
|
|
|
|
: m_name(std::move(name)), |
|
|
|
|
m_comp(std::move(comp)), |
|
|
|
|
m_src(std::move(src)), |
|
|
|
@ -81,7 +88,7 @@ Pipeline::Pipeline(std::string&& name, |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void Pipeline::compile() { |
|
|
|
|
m_perf.compilation_time = |
|
|
|
|
m_perf.warmup_time = |
|
|
|
|
utils::measure<std::chrono::milliseconds>([this]() { |
|
|
|
|
_compile(); |
|
|
|
|
}); |
|
|
|
@ -90,17 +97,23 @@ void Pipeline::compile() { |
|
|
|
|
void Pipeline::run(double work_time_ms) { |
|
|
|
|
auto run_perf = _run(work_time_ms); |
|
|
|
|
|
|
|
|
|
m_perf.elapsed = run_perf.elapsed; |
|
|
|
|
m_perf.latencies = std::move(run_perf.latencies); |
|
|
|
|
m_perf.elapsed = run_perf.elapsed; |
|
|
|
|
m_perf.latencies = std::move(run_perf.latencies); |
|
|
|
|
m_perf.avg_latency = utils::avg(m_perf.latencies); |
|
|
|
|
m_perf.min_latency = utils::min(m_perf.latencies); |
|
|
|
|
m_perf.max_latency = utils::max(m_perf.latencies); |
|
|
|
|
m_perf.first_latency = m_perf.latencies[0]; |
|
|
|
|
|
|
|
|
|
// NB: Count how many executions don't fit into camera latency interval.
|
|
|
|
|
m_perf.num_late_frames = |
|
|
|
|
std::count_if(m_perf.latencies.begin(), m_perf.latencies.end(), |
|
|
|
|
[this](int64_t latency) { |
|
|
|
|
return static_cast<double>(latency) > m_src->latency(); |
|
|
|
|
}); |
|
|
|
|
|
|
|
|
|
m_perf.avg_latency = |
|
|
|
|
std::accumulate(m_perf.latencies.begin(), |
|
|
|
|
m_perf.latencies.end(), |
|
|
|
|
0.0) / static_cast<double>(m_perf.latencies.size()); |
|
|
|
|
m_perf.throughput = |
|
|
|
|
(m_perf.latencies.size() / static_cast<double>(m_perf.elapsed)) * 1000; |
|
|
|
|
|
|
|
|
|
m_perf.first_run_latency = m_perf.latencies[0]; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
const PerfReport& Pipeline::report() const { |
|
|
|
|