Merge pull request #16081 from dmatveev:dm/ocv42_gapi_bugfixes

G-API: Fix various issues for 4.2 release

* G-API: Fix issues reported by Coverity

- Fixed: passing values by value instead of passing by reference

* G-API: Fix redundant std::move()'s in return statements

Fixes #15903

* G-API: Added a smarter handling of Stop messages in the pipeline

- This should fix the "expected 100, got 99 frames" problem
- Fixes #15882

* G-API: Pass enum instead of GKernelPackage in Streaming test parameters

- Likely fixes #15836

* G-API: Address review issues in new bugfix comments
pull/16130/head
Dmitry Matveev 5 years ago committed by Alexander Alekhin
parent c89780dfe0
commit 9a18330f3a
  1. 6
      modules/gapi/src/backends/fluid/gfluidbuffer.cpp
  2. 8
      modules/gapi/src/compiler/gmodel.cpp
  3. 8
      modules/gapi/src/compiler/gmodel.hpp
  4. 308
      modules/gapi/src/executor/gstreamingexecutor.cpp
  5. 12
      modules/gapi/src/executor/gstreamingexecutor.hpp
  6. 4
      modules/gapi/test/internal/gapi_int_pattern_matching_test.cpp
  7. 116
      modules/gapi/test/streaming/gapi_streaming_tests.cpp

@ -347,12 +347,12 @@ std::unique_ptr<fluid::BufferStorage> createStorage(int capacity, int desc_width
std::unique_ptr<fluid::BufferStorageWithBorder> storage(new BufferStorageWithBorder);
storage->init(type, border_size, border.value());
storage->create(capacity, desc_width, type);
return std::move(storage);
return storage;
}
std::unique_ptr<BufferStorageWithoutBorder> storage(new BufferStorageWithoutBorder);
storage->create(capacity, desc_width, type);
return std::move(storage);
return storage;
}
std::unique_ptr<BufferStorage> createStorage(const cv::gapi::own::Mat& data, cv::gapi::own::Rect roi);
@ -360,7 +360,7 @@ std::unique_ptr<BufferStorage> createStorage(const cv::gapi::own::Mat& data, cv:
{
std::unique_ptr<BufferStorageWithoutBorder> storage(new BufferStorageWithoutBorder);
storage->attach(data, roi);
return std::move(storage);
return storage;
}
} // namespace
} // namespace fluid

@ -118,7 +118,7 @@ void GModel::linkOut(Graph &g, ade::NodeHandle opH, ade::NodeHandle objH, std::s
op.outs[out_port] = RcDesc{gm.rc, gm.shape, {}};
}
std::vector<ade::NodeHandle> GModel::orderedInputs(ConstGraph &g, ade::NodeHandle nh)
std::vector<ade::NodeHandle> GModel::orderedInputs(const ConstGraph &g, ade::NodeHandle nh)
{
std::vector<ade::NodeHandle> sorted_in_nhs(nh->inEdges().size());
for (const auto& in_eh : nh->inEdges())
@ -130,7 +130,7 @@ std::vector<ade::NodeHandle> GModel::orderedInputs(ConstGraph &g, ade::NodeHandl
return sorted_in_nhs;
}
std::vector<ade::NodeHandle> GModel::orderedOutputs(ConstGraph &g, ade::NodeHandle nh)
std::vector<ade::NodeHandle> GModel::orderedOutputs(const ConstGraph &g, ade::NodeHandle nh)
{
std::vector<ade::NodeHandle> sorted_out_nhs(nh->outEdges().size());
for (const auto& out_eh : nh->outEdges())
@ -227,7 +227,7 @@ void GModel::redirectWriter(Graph &g, ade::NodeHandle from, ade::NodeHandle to)
linkOut(g, op, to, output.port);
}
GMetaArgs GModel::collectInputMeta(GModel::ConstGraph cg, ade::NodeHandle node)
GMetaArgs GModel::collectInputMeta(const GModel::ConstGraph &cg, ade::NodeHandle node)
{
GAPI_Assert(cg.metadata(node).get<NodeType>().t == NodeType::OP);
GMetaArgs in_meta_args(cg.metadata(node).get<Op>().args.size());
@ -254,7 +254,7 @@ ade::EdgeHandle GModel::getInEdgeByPort(const GModel::ConstGraph& cg,
return *edge;
}
GMetaArgs GModel::collectOutputMeta(GModel::ConstGraph cg, ade::NodeHandle node)
GMetaArgs GModel::collectOutputMeta(const GModel::ConstGraph &cg, ade::NodeHandle node)
{
GAPI_Assert(cg.metadata(node).get<NodeType>().t == NodeType::OP);
GMetaArgs out_meta_args(cg.metadata(node).get<Op>().outs.size());

@ -260,15 +260,15 @@ namespace GModel
GAPI_EXPORTS void redirectReaders(Graph &g, ade::NodeHandle from, ade::NodeHandle to);
GAPI_EXPORTS void redirectWriter (Graph &g, ade::NodeHandle from, ade::NodeHandle to);
GAPI_EXPORTS std::vector<ade::NodeHandle> orderedInputs (ConstGraph &g, ade::NodeHandle nh);
GAPI_EXPORTS std::vector<ade::NodeHandle> orderedOutputs(ConstGraph &g, ade::NodeHandle nh);
GAPI_EXPORTS std::vector<ade::NodeHandle> orderedInputs (const ConstGraph &g, ade::NodeHandle nh);
GAPI_EXPORTS std::vector<ade::NodeHandle> orderedOutputs(const ConstGraph &g, ade::NodeHandle nh);
// Returns input meta array for given op node
// Array is sparse, as metadata for non-gapi input objects is empty
// TODO:
// Cover with tests!!
GAPI_EXPORTS GMetaArgs collectInputMeta(GModel::ConstGraph cg, ade::NodeHandle node);
GAPI_EXPORTS GMetaArgs collectOutputMeta(GModel::ConstGraph cg, ade::NodeHandle node);
GAPI_EXPORTS GMetaArgs collectInputMeta(const GModel::ConstGraph &cg, ade::NodeHandle node);
GAPI_EXPORTS GMetaArgs collectOutputMeta(const GModel::ConstGraph &cg, ade::NodeHandle node);
GAPI_EXPORTS ade::EdgeHandle getInEdgeByPort(const GModel::ConstGraph& cg, const ade::NodeHandle& nh, std::size_t in_port);

@ -132,6 +132,177 @@ void sync_data(cv::GRunArgs &results, cv::GRunArgsP &outputs)
}
}
// Pops an item from every input queue and combine it to the final
// result. Blocks the current thread. Returns true if the vector has
// been obtained successfully and false if a Stop message has been
// received. Handles Stop x-queue synchronization gracefully.
//
// In fact, the logic behind this method is a little bit more complex.
// The complexity comes from handling the pipeline termination
// messages. This version if GStreamerExecutable is running every
// graph island in its own thread, and threads communicate via bounded
// (limited in size) queues. Threads poll their input queues in the
// infinite loops and pass the data to their Island executables when
// the full input vector (a "stack frame") arrives.
//
// If the input stream is over or stop() is called, "Stop" messages
// are broadcasted in the graph from island to island via queues,
// starting with the emitters (sources). Since queues are bounded,
// thread may block on push() if the queue is full already and is not
// popped for some reason in the reader thread. In order to avoid
// this, once an Island gets Stop on an input, it start reading all
// other input queues until it reaches Stop messages there as well.
// Only then the thread terminates so in theory queues are left
// free'd.
//
// "Stop" messages are sent to the pipeline in these three cases:
// 1. User has called stop(): a "Stop" message is sent to every input
// queue.
// 2. Input video stream has reached its end -- its emitter sends Stop
// to its readers AND asks constant emitters (emitters attached to
// const data -- infinite data generators) to push Stop messages as
// well - in order to maintain a regular Stop procedure as defined
// above.
// 3. "Stop" message coming from a constant emitter after triggering an
// EOS notification -- see (2).
//
// There is a problem with (3). Sometimes it terminates the pipeline
// too early while some frames could still be produced with no issue,
// and our test fails with error like "got 99 frames, expected 100".
// This is how it reproduces:
//
// q1
// [const input] -----------------------> [ ISL2 ] --> [output]
// q0 q2 .->
// [stream input] ---> [ ISL1 ] -------'
//
// Video emitter is pushing frames to q0, and ISL1 is taking every
// frame from this queue and processes it. Meanwhile, q1 is a
// const-input-queue staffed with const data already, ISL2 already
// popped one, and is waiting for data from q2 (of ISL1) to arrive.
//
// When the stream is over, stream emitter pushes the last frame to
// q0, followed by a Stop sign, and _immediately_ notifies const
// emitters to broadcast Stop messages as well. In the above
// configuration, the replicated Stop message via q1 may reach ISL2
// faster than the real Stop message via q2 -- moreover, somewhere in
// q1 or q2 there may be real frames awaiting processing. ISL2 gets
// Stop via q1 and _discards_ any pending data coming from q2 -- so a
// last frame or two may be lost.
//
// A working but not very elegant solution to this problem is to tag
// Stop messages. Stop got via stop() is really a hard stop, while
// broadcasted Stop from a Const input shouldn't initiate the Island
// execution termination. Instead, its associated const data should
// remain somewhere in islands' thread local storage until a real
// "Stop" is received.
//
// Queue reader is the class which encapsulates all this logic and
// provies threads with a managed storage and an easy API to obtain
// data.
class QueueReader
{
bool m_finishing = false; // Set to true once a "soft" stop is received
std::vector<Cmd> m_cmd;
public:
bool getInputVector(std::vector<Q*> &in_queues,
cv::GRunArgs &in_constants,
cv::GRunArgs &isl_inputs);
};
bool QueueReader::getInputVector(std::vector<Q*> &in_queues,
cv::GRunArgs &in_constants,
cv::GRunArgs &isl_inputs)
{
// NOTE: in order to maintain the GRunArg's underlying object
// lifetime, keep the whole cmd vector (of size == # of inputs)
// in memory.
m_cmd.resize(in_queues.size());
isl_inputs.resize(in_queues.size());
for (auto &&it : ade::util::indexed(in_queues))
{
auto id = ade::util::index(it);
auto &q = ade::util::value(it);
if (q == nullptr)
{
GAPI_Assert(!in_constants.empty());
// NULL queue means a graph-constant value (like a
// value-initialized scalar)
// It can also hold a constant value received with
// Stop::Kind::CNST message (see above).
// FIXME: Variant move problem
isl_inputs[id] = const_cast<const cv::GRunArg&>(in_constants[id]);
continue;
}
q->pop(m_cmd[id]);
if (!cv::util::holds_alternative<Stop>(m_cmd[id]))
{
// FIXME: Variant move problem
isl_inputs[id] = const_cast<const cv::GRunArg &>(cv::util::get<cv::GRunArg>(m_cmd[id]));
}
else // A Stop sign
{
const auto &stop = cv::util::get<Stop>(m_cmd[id]);
if (stop.kind == Stop::Kind::CNST)
{
// We've got a Stop signal from a const source,
// propagated as a result of real stream reaching its
// end. Sometimes these signals come earlier than
// real EOS Stops so are deprioritized -- just
// remember the Const value here and continue
// processing other queues. Set queue pointer to
// nullptr and update the const_val vector
// appropriately
m_finishing = true;
in_queues[id] = nullptr;
in_constants.resize(in_queues.size());
in_constants[id] = std::move(stop.cdata);
// NEXT time (on a next call to getInputVector()), the
// "q==nullptr" check above will be triggered, but now
// we need to make it manually:
isl_inputs[id] = const_cast<const cv::GRunArg&>(in_constants[id]);
}
else
{
GAPI_Assert(stop.kind == Stop::Kind::HARD);
// Just got a stop sign. Reiterate through all
// _remaining valid_ queues (some of them can be
// set to nullptr already -- see above) and rewind
// data to every Stop sign per queue
for (auto &&qit : ade::util::indexed(in_queues))
{
auto id2 = ade::util::index(qit);
auto &q2 = ade::util::value(qit);
if (id == id2) continue;
Cmd cmd2;
while (q2 && !cv::util::holds_alternative<Stop>(cmd2))
q2->pop(cmd2);
}
// After queues are read to the proper indicator,
// indicate end-of-stream
return false;
} // if(Cnst)
} // if(Stop)
} // for(in_queues)
if (m_finishing)
{
// If the process is about to end (a soft Stop was received
// already) and an island has no other inputs than constant
// inputs, its queues may all become nullptrs. Indicate it as
// "no data".
return !ade::util::all_of(in_queues, [](Q *ptr){return ptr == nullptr;});
}
return true; // A regular case - there is data to process.
}
// This thread is a plain dump source actor. What it do is just:
// - Check input queue (the only one) for a control command
// - Depending on the state, obtains next data object and pushes it to the
@ -202,90 +373,62 @@ void islandActorThread(std::vector<cv::gimpl::RcDesc> in_rcs, //
cv::GMetaArgs out_metas, // ...
std::shared_ptr<cv::gimpl::GIslandExecutable> island, // FIXME: ...a copy of OpDesc{}.
std::vector<Q*> in_queues,
std::vector<cv::GRunArg> in_constants,
cv::GRunArgs in_constants,
std::vector< std::vector<Q*> > out_queues)
{
GAPI_Assert(in_queues.size() == in_rcs.size());
GAPI_Assert(out_queues.size() == out_rcs.size());
GAPI_Assert(out_queues.size() == out_metas.size());
QueueReader qr;
while (true)
{
std::vector<cv::gimpl::GIslandExecutable::InObj> isl_inputs;
isl_inputs.resize(in_rcs.size());
// Try to obtain the full input vector.
// Note this may block us. We also may get Stop signal here
// and then exit the thread.
// NOTE: in order to maintain the GRunArg's underlying object
// lifetime, keep the whole cmd vector (of size == # of inputs)
// in memory.
std::vector<Cmd> cmd(in_queues.size());
for (auto &&it : ade::util::indexed(in_queues))
cv::GRunArgs isl_input_args;
if (!qr.getInputVector(in_queues, in_constants, isl_input_args))
{
auto id = ade::util::index(it);
auto &q = ade::util::value(it);
isl_inputs[id].first = in_rcs[id];
if (q == nullptr)
// Stop received -- broadcast Stop down to the pipeline and quit
for (auto &&out_qq : out_queues)
{
// NULL queue means a graph-constant value
// (like a value-initialized scalar)
// FIXME: Variant move problem
isl_inputs[id].second = const_cast<const cv::GRunArg&>(in_constants[id]);
for (auto &&out_q : out_qq) out_q->push(Cmd{Stop{}});
}
else
{
q->pop(cmd[id]);
if (cv::util::holds_alternative<Stop>(cmd[id]))
{
// FIXME: This logic must be unified with what collectorThread is doing!
// Just got a stop sign. Reiterate through all queues
// and rewind data to every Stop sign per queue
for (auto &&qit : ade::util::indexed(in_queues))
{
auto id2 = ade::util::index(qit);
auto &q2 = ade::util::value(qit);
if (id == id2) continue;
Cmd cmd2;
while (q2 && !cv::util::holds_alternative<Stop>(cmd2))
q2->pop(cmd2);
}
// Broadcast Stop down to the pipeline and quit
for (auto &&out_qq : out_queues)
{
for (auto &&out_q : out_qq) out_q->push(Cmd{Stop{}});
}
return;
}
// FIXME: MOVE PROBLEM
const cv::GRunArg &in_arg = cv::util::get<cv::GRunArg>(cmd[id]);
return;
}
GAPI_Assert(isl_inputs.size() == isl_input_args.size());
for (auto &&it : ade::util::zip(ade::util::toRange(in_rcs),
ade::util::toRange(isl_inputs),
ade::util::toRange(isl_input_args)))
{
const auto &in_rc = std::get<0>(it);
auto &isl_input = std::get<1>(it);
const auto &in_arg = std::get<2>(it); // FIXME: MOVE PROBLEM
isl_input.first = in_rc;
#if defined(GAPI_STANDALONE)
// Standalone mode - simply store input argument in the vector as-is
isl_inputs[id].second = in_arg;
// Standalone mode - simply store input argument in the vector as-is
isl_inputs[id].second = in_arg;
#else
// Make Islands operate on own:: data types (i.e. in the same
// environment as GExecutor provides)
// This way several backends (e.g. Fluid) remain OpenCV-independent.
switch (in_arg.index()) {
case cv::GRunArg::index_of<cv::Mat>():
isl_inputs[id].second = cv::GRunArg{cv::to_own(cv::util::get<cv::Mat>(in_arg))};
break;
case cv::GRunArg::index_of<cv::Scalar>():
isl_inputs[id].second = cv::GRunArg{cv::to_own(cv::util::get<cv::Scalar>(in_arg))};
break;
default:
isl_inputs[id].second = in_arg;
break;
}
#endif // GAPI_STANDALONE
// Make Islands operate on own:: data types (i.e. in the same
// environment as GExecutor provides)
// This way several backends (e.g. Fluid) remain OpenCV-independent.
switch (in_arg.index()) {
case cv::GRunArg::index_of<cv::Mat>():
isl_input.second = cv::GRunArg{cv::to_own(cv::util::get<cv::Mat>(in_arg))};
break;
case cv::GRunArg::index_of<cv::Scalar>():
isl_input.second = cv::GRunArg{cv::to_own(cv::util::get<cv::Scalar>(in_arg))};
break;
default:
isl_input.second = in_arg;
break;
}
#endif // GAPI_STANDALONE
}
// Once the vector is obtained, prepare data for island execution
// Note - we first allocate output vector via GRunArg!
// Then it is converted to a GRunArgP.
std::vector<cv::gimpl::GIslandExecutable::OutObj> isl_outputs;
std::vector<cv::GRunArg> out_data;
cv::GRunArgs out_data;
isl_outputs.resize(out_rcs.size());
out_data.resize(out_rcs.size());
for (auto &&it : ade::util::indexed(out_rcs))
@ -363,33 +506,15 @@ void islandActorThread(std::vector<cv::gimpl::RcDesc> in_rcs, //
void collectorThread(std::vector<Q*> in_queues,
Q& out_queue)
{
QueueReader qr;
while (true)
{
cv::GRunArgs this_result(in_queues.size());
for (auto &&it : ade::util::indexed(in_queues))
cv::GRunArgs this_const(in_queues.size());
if (!qr.getInputVector(in_queues, this_const, this_result))
{
Cmd cmd;
ade::util::value(it)->pop(cmd);
if (cv::util::holds_alternative<Stop>(cmd))
{
// FIXME: Unify this code with island thread
for (auto &&qit : ade::util::indexed(in_queues))
{
if (ade::util::index(qit) == ade::util::index(it)) continue;
Cmd cmd2;
while (!cv::util::holds_alternative<Stop>(cmd2))
ade::util::value(qit)->pop(cmd2);
}
out_queue.push(Cmd{Stop{}});
return;
}
else
{
// FIXME: MOVE_PROBLEM
const cv::GRunArg &in_arg = cv::util::get<cv::GRunArg>(cmd);
this_result[ade::util::index(it)] = in_arg;
// FIXME: Check for other message types.
}
out_queue.push(Cmd{Stop{}});
return;
}
out_queue.push(Cmd{this_result});
}
@ -654,6 +779,7 @@ void cv::gimpl::GStreamingExecutor::setSource(GRunArgs &&ins)
// Create a constant emitter.
// Produces always the same ("constant") value when pulled.
emitter.reset(new ConstEmitter{emit_arg});
m_const_vals.push_back(const_cast<cv::GRunArg &>(emit_arg)); // FIXME: move problem
m_const_emitter_queues.push_back(&m_emitter_queues[emit_idx]);
break;
}
@ -664,9 +790,17 @@ void cv::gimpl::GStreamingExecutor::setSource(GRunArgs &&ins)
// all other inputs are "constant" generators.
// Craft here a completion callback to notify Const emitters that
// a video source is over
GAPI_Assert(m_const_emitter_queues.size() == m_const_vals.size());
auto real_video_completion_cb = [this]()
{
for (auto q : m_const_emitter_queues) q->push(Cmd{Stop{}});
for (auto it : ade::util::zip(ade::util::toRange(m_const_emitter_queues),
ade::util::toRange(m_const_vals)))
{
Stop stop;
stop.kind = Stop::Kind::CNST;
stop.cdata = std::get<1>(it);
std::get<0>(it)->push(Cmd{std::move(stop)});
}
};
// FIXME: ONLY now, after all executable objects are created,

@ -32,7 +32,13 @@ namespace gimpl {
namespace stream {
struct Start {};
struct Stop {};
struct Stop {
enum class Kind {
HARD, // a hard-stop: end-of-pipeline reached or stop() called
CNST, // a soft-stop emitted for/by constant sources (see QueueReader)
} kind = Kind::HARD;
cv::GRunArg cdata; // const data for CNST stop
};
using Cmd = cv::util::variant
< cv::util::monostate
@ -91,7 +97,7 @@ protected:
cv::GMetaArgs out_metas;
ade::NodeHandle nh;
std::vector<GRunArg> in_constants;
cv::GRunArgs in_constants;
std::shared_ptr<GIslandExecutable> isl_exec;
};
@ -104,6 +110,8 @@ protected:
};
std::vector<DataDesc> m_slots;
cv::GRunArgs m_const_vals;
// Order in these vectors follows the GComputaion's protocol
std::vector<ade::NodeHandle> m_emitters;
std::vector<ade::NodeHandle> m_sinks;

@ -47,12 +47,12 @@ void initGModel(ade::Graph& gr,
gm.metadata().set(p);
}
bool isConsumedBy(cv::gimpl::GModel::Graph gm, ade::NodeHandle data_nh, ade::NodeHandle op_nh) {
bool isConsumedBy(const cv::gimpl::GModel::ConstGraph &gm, ade::NodeHandle data_nh, ade::NodeHandle op_nh) {
auto oi = cv::gimpl::GModel::orderedInputs(gm, op_nh);
return std::find(oi.begin(), oi.end(), data_nh) != oi.end();
}
std::string opName(cv::gimpl::GModel::Graph gm, ade::NodeHandle op_nh) {
std::string opName(const cv::gimpl::GModel::ConstGraph &gm, ade::NodeHandle op_nh) {
return gm.metadata(op_nh).get<cv::gimpl::Op>().k.name;
}

@ -39,47 +39,65 @@ void initTestDataPath()
#endif // WINRT
}
cv::gapi::GKernelPackage OCV_KERNELS()
enum class KernelPackage: int
{
static cv::gapi::GKernelPackage pkg =
cv::gapi::combine(cv::gapi::core::cpu::kernels(),
cv::gapi::imgproc::cpu::kernels());
return pkg;
}
cv::gapi::GKernelPackage OCV_FLUID_KERNELS()
{
static cv::gapi::GKernelPackage pkg =
cv::gapi::combine(OCV_KERNELS(),
cv::gapi::core::fluid::kernels());
return pkg;
}
#if 0
// FIXME: OpenCL backend seem to work fine with Streaming
// however the results are not very bit exact with CPU
// It may be a problem but may be just implementation innacuracy.
// Need to customize the comparison function in tests where OpenCL
// is involved.
cv::gapi::GKernelPackage OCL_KERNELS()
{
static cv::gapi::GKernelPackage pkg =
cv::gapi::combine(cv::gapi::core::ocl::kernels(),
cv::gapi::imgproc::ocl::kernels());
return pkg;
}
cv::gapi::GKernelPackage OCL_FLUID_KERNELS()
OCV,
OCV_FLUID,
OCL,
OCL_FLUID,
};
std::ostream& operator<< (std::ostream &os, const KernelPackage &e)
{
static cv::gapi::GKernelPackage pkg =
cv::gapi::combine(OCL_KERNELS(),
cv::gapi::core::fluid::kernels());
return pkg;
switch (e)
{
#define _C(X) case KernelPackage::X: os << #X; break
_C(OCV);
_C(OCV_FLUID);
_C(OCL);
_C(OCL_FLUID);
#undef _C
default: GAPI_Assert(false);
}
return os;
}
#endif // 0
struct GAPI_Streaming: public ::testing::TestWithParam<cv::gapi::GKernelPackage> {
struct GAPI_Streaming: public ::testing::TestWithParam<KernelPackage> {
GAPI_Streaming() { initTestDataPath(); }
cv::gapi::GKernelPackage getKernelPackage()
{
using namespace cv::gapi;
switch (GetParam())
{
case KernelPackage::OCV:
return cv::gapi::combine(core::cpu::kernels(),
imgproc::cpu::kernels());
break;
case KernelPackage::OCV_FLUID:
return cv::gapi::combine(core::cpu::kernels(),
imgproc::cpu::kernels(),
core::fluid::kernels());
break;
// FIXME: OpenCL backend seem to work fine with Streaming
// however the results are not very bit exact with CPU
// It may be a problem but may be just implementation innacuracy.
// Need to customize the comparison function in tests where OpenCL
// is involved.
case KernelPackage::OCL:
return cv::gapi::combine(core::ocl::kernels(),
imgproc::ocl::kernels());
break;
case KernelPackage::OCL_FLUID:
return cv::gapi::combine(core::ocl::kernels(),
imgproc::ocl::kernels(),
core::fluid::kernels());
break;
}
throw std::logic_error("Unknown package");
}
};
} // anonymous namespace
@ -122,7 +140,7 @@ TEST_P(GAPI_Streaming, SmokeTest_ConstInput_GMat)
// Compilation & testing
auto ccomp = c.compileStreaming(cv::descr_of(in_mat),
cv::compile_args(cv::gapi::use_only{GetParam()}));
cv::compile_args(cv::gapi::use_only{getKernelPackage()}));
EXPECT_TRUE(ccomp);
EXPECT_FALSE(ccomp.running());
@ -167,7 +185,7 @@ TEST_P(GAPI_Streaming, SmokeTest_VideoInput_GMat)
// Compilation & testing
auto ccomp = c.compileStreaming(cv::GMatDesc{CV_8U,3,cv::Size{768,576}},
cv::compile_args(cv::gapi::use_only{GetParam()}));
cv::compile_args(cv::gapi::use_only{getKernelPackage()}));
EXPECT_TRUE(ccomp);
EXPECT_FALSE(ccomp.running());
@ -213,7 +231,7 @@ TEST_P(GAPI_Streaming, Regression_CompileTimeScalar)
cv::GComputation c(cv::GIn(in), cv::GOut(tmp, tmp + 1));
auto ccomp = c.compileStreaming(cv::GMatDesc{CV_8U,3,cv::Size{768,512}},
cv::compile_args(cv::gapi::use_only{GetParam()}));
cv::compile_args(cv::gapi::use_only{getKernelPackage()}));
cv::Mat in_mat = cv::imread(findDataFile("cv/edgefilter/kodim23.png"));
cv::Mat out_mat1, out_mat2;
@ -236,7 +254,7 @@ TEST_P(GAPI_Streaming, SmokeTest_StartRestart)
cv::GComputation c(cv::GIn(in), cv::GOut(cv::gapi::copy(in), out));
auto ccomp = c.compileStreaming(cv::GMatDesc{CV_8U,3,cv::Size{768,576}},
cv::compile_args(cv::gapi::use_only{GetParam()}));
cv::compile_args(cv::gapi::use_only{getKernelPackage()}));
EXPECT_TRUE(ccomp);
EXPECT_FALSE(ccomp.running());
@ -273,7 +291,7 @@ TEST_P(GAPI_Streaming, SmokeTest_VideoConstSource_NoHang)
cv::GMat in;
return cv::GComputation(in, cv::gapi::copy(in));
}).compileStreaming(cv::GMatDesc{CV_8U,3,cv::Size{768,576}},
cv::compile_args(cv::gapi::use_only{GetParam()}));
cv::compile_args(cv::gapi::use_only{getKernelPackage()}));
refc.setSource(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(findDataFile("cv/video/768x576.avi")));
refc.start();
@ -290,7 +308,7 @@ TEST_P(GAPI_Streaming, SmokeTest_VideoConstSource_NoHang)
auto testc = cv::GComputation(cv::GIn(in, in2), cv::GOut(out))
.compileStreaming(cv::GMatDesc{CV_8U,3,cv::Size{256,256}},
cv::GMatDesc{CV_8U,3,cv::Size{768,576}},
cv::compile_args(cv::gapi::use_only{GetParam()}));
cv::compile_args(cv::gapi::use_only{getKernelPackage()}));
cv::Mat in_const = cv::Mat::eye(cv::Size(256,256), CV_8UC3);
testc.setSource(cv::gin(in_const,
@ -311,7 +329,7 @@ TEST_P(GAPI_Streaming, SmokeTest_AutoMeta)
cv::GMat out = blr - in;
auto testc = cv::GComputation(cv::GIn(in, in2), cv::GOut(out))
.compileStreaming(cv::compile_args(cv::gapi::use_only{GetParam()}));
.compileStreaming(cv::compile_args(cv::gapi::use_only{getKernelPackage()}));
cv::Mat in_const = cv::Mat::eye(cv::Size(256,256), CV_8UC3);
cv::Mat tmp;
@ -345,7 +363,7 @@ TEST_P(GAPI_Streaming, SmokeTest_AutoMeta_2xConstMat)
cv::GMat out = blr - in;
auto testc = cv::GComputation(cv::GIn(in, in2), cv::GOut(out))
.compileStreaming(cv::compile_args(cv::gapi::use_only{GetParam()}));
.compileStreaming(cv::compile_args(cv::gapi::use_only{getKernelPackage()}));
cv::Mat in_const = cv::Mat::eye(cv::Size(256,256), CV_8UC3);
cv::Mat tmp;
@ -376,7 +394,7 @@ TEST_P(GAPI_Streaming, SmokeTest_AutoMeta_VideoScalar)
cv::GMat out_m = in_m * in_s;
auto testc = cv::GComputation(cv::GIn(in_m, in_s), cv::GOut(out_m))
.compileStreaming(cv::compile_args(cv::gapi::use_only{GetParam()}));
.compileStreaming(cv::compile_args(cv::gapi::use_only{getKernelPackage()}));
cv::Mat tmp;
// Test with one video source and scalar
@ -399,10 +417,10 @@ TEST_P(GAPI_Streaming, SmokeTest_AutoMeta_VideoScalar)
}
INSTANTIATE_TEST_CASE_P(TestStreaming, GAPI_Streaming,
Values( OCV_KERNELS()
//, OCL_KERNELS() // FIXME: Fails bit-exactness check, maybe relax it?
, OCV_FLUID_KERNELS()
//, OCL_FLUID_KERNELS() // FIXME: Fails bit-exactness check, maybe relax it?
Values( KernelPackage::OCV
//, KernelPackage::OCL // FIXME: Fails bit-exactness check, maybe relax it?
, KernelPackage::OCV_FLUID
//, KernelPackage::OCL // FIXME: Fails bit-exactness check, maybe relax it?
));
namespace TypesTest

Loading…
Cancel
Save