mirror of https://github.com/opencv/opencv.git
Merge pull request #15869 from TolyaTalamanov:at/plaidml-backend
G-API: Implement PlaidML2 backend * PlaidML backend init version * Add test * Support multiply inputs/outputs in PlaidML2 backend * Fix comment to review * Add HAVE_PLAIDML macros * Move plaidml tests to separate file * Fix comment to review * Fix cmake warning * Fix comments to review * Fix typos overload -> overflow * Fix comments to review * Clean up * Remove spaces from cmake scripts * Disable tests with bitwise operations * Use plaidml::exec::Binderpull/15189/head^2
parent
fb5e7964b3
commit
a7acb8805f
16 changed files with 945 additions and 4 deletions
@ -0,0 +1,20 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2019 Intel Corporation
|
||||
|
||||
|
||||
#ifndef OPENCV_GAPI_PLAIDML_CORE_HPP |
||||
#define OPENCV_GAPI_PLAIDML_CORE_HPP |
||||
|
||||
#include <opencv2/gapi/gkernel.hpp> // GKernelPackage |
||||
#include <opencv2/gapi/own/exports.hpp> // GAPI_EXPORTS |
||||
|
||||
namespace cv { namespace gapi { namespace core { namespace plaidml { |
||||
|
||||
GAPI_EXPORTS GKernelPackage kernels(); |
||||
|
||||
}}}} |
||||
|
||||
#endif // OPENCV_GAPI_PLAIDML_CORE_HPP
|
@ -0,0 +1,140 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2019 Intel Corporation
|
||||
//
|
||||
|
||||
|
||||
#ifndef OPENCV_GAPI_GPLAIDMLKERNEL_HPP |
||||
#define OPENCV_GAPI_GPLAIDMLKERNEL_HPP |
||||
|
||||
#include <opencv2/gapi/gkernel.hpp> |
||||
#include <opencv2/gapi/garg.hpp> |
||||
|
||||
namespace plaidml |
||||
{ |
||||
namespace edsl |
||||
{ |
||||
class Tensor; |
||||
} // namespace edsl
|
||||
} // namespace plaidml
|
||||
|
||||
namespace cv |
||||
{ |
||||
namespace gapi |
||||
{ |
||||
namespace plaidml |
||||
{ |
||||
|
||||
GAPI_EXPORTS cv::gapi::GBackend backend(); |
||||
|
||||
} // namespace plaidml
|
||||
} // namespace gapi
|
||||
|
||||
struct GPlaidMLContext |
||||
{ |
||||
// Generic accessor API
|
||||
template<typename T> |
||||
const T& inArg(int input) { return m_args.at(input).get<T>(); } |
||||
|
||||
// Syntax sugar
|
||||
const plaidml::edsl::Tensor& inTensor(int input) |
||||
{ |
||||
return inArg<plaidml::edsl::Tensor>(input); |
||||
} |
||||
|
||||
plaidml::edsl::Tensor& outTensor(int output) |
||||
{ |
||||
return *(m_results.at(output).get<plaidml::edsl::Tensor*>()); |
||||
} |
||||
|
||||
std::vector<GArg> m_args; |
||||
std::unordered_map<std::size_t, GArg> m_results; |
||||
}; |
||||
|
||||
class GAPI_EXPORTS GPlaidMLKernel |
||||
{ |
||||
public: |
||||
using F = std::function<void(GPlaidMLContext &)>; |
||||
|
||||
GPlaidMLKernel() = default; |
||||
explicit GPlaidMLKernel(const F& f) : m_f(f) {}; |
||||
|
||||
void apply(GPlaidMLContext &ctx) const |
||||
{ |
||||
GAPI_Assert(m_f); |
||||
m_f(ctx); |
||||
} |
||||
|
||||
protected: |
||||
F m_f; |
||||
}; |
||||
|
||||
|
||||
namespace detail |
||||
{ |
||||
|
||||
template<class T> struct plaidml_get_in; |
||||
template<> struct plaidml_get_in<cv::GMat> |
||||
{ |
||||
static const plaidml::edsl::Tensor& get(GPlaidMLContext& ctx, int idx) |
||||
{ |
||||
return ctx.inTensor(idx); |
||||
} |
||||
}; |
||||
|
||||
template<class T> struct plaidml_get_in |
||||
{ |
||||
static T get(GPlaidMLContext &ctx, int idx) { return ctx.inArg<T>(idx); } |
||||
}; |
||||
|
||||
template<class T> struct plaidml_get_out; |
||||
template<> struct plaidml_get_out<cv::GMat> |
||||
{ |
||||
static plaidml::edsl::Tensor& get(GPlaidMLContext& ctx, int idx) |
||||
{ |
||||
return ctx.outTensor(idx); |
||||
} |
||||
}; |
||||
|
||||
template<typename, typename, typename> |
||||
struct PlaidMLCallHelper; |
||||
|
||||
template<typename Impl, typename... Ins, typename... Outs> |
||||
struct PlaidMLCallHelper<Impl, std::tuple<Ins...>, std::tuple<Outs...> > |
||||
{ |
||||
template<int... IIs, int... OIs> |
||||
static void call_impl(GPlaidMLContext &ctx, detail::Seq<IIs...>, detail::Seq<OIs...>) |
||||
{ |
||||
Impl::run(plaidml_get_in<Ins>::get(ctx, IIs)..., plaidml_get_out<Outs>::get(ctx, OIs)...); |
||||
} |
||||
|
||||
static void call(GPlaidMLContext& ctx) |
||||
{ |
||||
call_impl(ctx, |
||||
typename detail::MkSeq<sizeof...(Ins)>::type(), |
||||
typename detail::MkSeq<sizeof...(Outs)>::type()); |
||||
} |
||||
}; |
||||
|
||||
} // namespace detail
|
||||
|
||||
template<class Impl, class K> |
||||
class GPlaidMLKernelImpl: public cv::detail::PlaidMLCallHelper<Impl, typename K::InArgs, typename K::OutArgs>, |
||||
public cv::detail::KernelTag |
||||
{ |
||||
using P = detail::PlaidMLCallHelper<Impl, typename K::InArgs, typename K::OutArgs>; |
||||
|
||||
public: |
||||
using API = K; |
||||
|
||||
static cv::gapi::GBackend backend() { return cv::gapi::plaidml::backend(); } |
||||
static cv::GPlaidMLKernel kernel() { return GPlaidMLKernel(&P::call); } |
||||
}; |
||||
|
||||
#define GAPI_PLAIDML_KERNEL(Name, API) struct Name: public cv::GPlaidMLKernelImpl<Name, API> |
||||
|
||||
} // namespace cv
|
||||
|
||||
#endif // OPENCV_GAPI_GPLAIDMLKERNEL_HPP
|
@ -0,0 +1,40 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2019 Intel Corporation
|
||||
|
||||
|
||||
#ifndef OPENCV_GAPI_PLAIDML_PLAIDML_HPP |
||||
#define OPENCV_GAPI_PLAIDML_PLAIDML_HPP |
||||
|
||||
#include <string> |
||||
#include <opencv2/gapi/gcommon.hpp> // CompileArgTag |
||||
|
||||
namespace cv |
||||
{ |
||||
namespace gapi |
||||
{ |
||||
namespace plaidml |
||||
{ |
||||
|
||||
struct config |
||||
{ |
||||
std::string dev_id; |
||||
std::string trg_id; |
||||
}; |
||||
|
||||
} // namespace plaidml
|
||||
} // namespace gapi
|
||||
|
||||
namespace detail |
||||
{ |
||||
template<> struct CompileArgTag<cv::gapi::plaidml::config> |
||||
{ |
||||
static const char* tag() { return "gapi.plaidml.config"; } |
||||
}; |
||||
} // namespace detail
|
||||
|
||||
} // namespace cv
|
||||
|
||||
#endif // OPENCV_GAPI_PLAIDML_PLAIDML_HPP
|
@ -0,0 +1,305 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2019 Intel Corporation
|
||||
|
||||
|
||||
#ifdef HAVE_PLAIDML |
||||
|
||||
#include "precomp.hpp" |
||||
|
||||
#include <ade/util/algorithm.hpp> |
||||
#include <ade/util/range.hpp> |
||||
#include <ade/util/zip_range.hpp> |
||||
#include <ade/typed_graph.hpp> |
||||
|
||||
#include <opencv2/gapi/gcommon.hpp> |
||||
#include <opencv2/gapi/util/any.hpp> |
||||
#include <opencv2/gapi/gtype_traits.hpp> |
||||
#include <opencv2/gapi/plaidml/plaidml.hpp> |
||||
|
||||
#include "compiler/gobjref.hpp" |
||||
#include "compiler/gmodel.hpp" |
||||
|
||||
#include "backends/plaidml/gplaidmlbackend.hpp" |
||||
#include "backends/plaidml/plaidml_util.hpp" |
||||
|
||||
#include "api/gbackend_priv.hpp" // FIXME: Make it part of Backend SDK! |
||||
|
||||
using GPlaidMLModel = ade::TypedGraph |
||||
< cv::gimpl::PlaidMLUnit |
||||
, cv::gimpl::Protocol |
||||
>; |
||||
|
||||
// FIXME: Same issue with Typed and ConstTyped
|
||||
using GConstGPlaidMLModel = ade::ConstTypedGraph |
||||
< cv::gimpl::PlaidMLUnit |
||||
, cv::gimpl::Protocol |
||||
>; |
||||
|
||||
namespace |
||||
{ |
||||
class GPlaidMLBackendImpl final: public cv::gapi::GBackend::Priv |
||||
{ |
||||
virtual void unpackKernel(ade::Graph &graph, |
||||
const ade::NodeHandle &op_node, |
||||
const cv::GKernelImpl &impl) override |
||||
{ |
||||
GPlaidMLModel gm(graph); |
||||
auto plaidml_impl = cv::util::any_cast<cv::GPlaidMLKernel>(impl.opaque); |
||||
gm.metadata(op_node).set(cv::gimpl::PlaidMLUnit{plaidml_impl}); |
||||
} |
||||
|
||||
virtual EPtr compile(const ade::Graph& graph, |
||||
const cv::GCompileArgs& args, |
||||
const std::vector<ade::NodeHandle>& nodes, |
||||
const std::vector<cv::gimpl::Data>& ins_data, |
||||
const std::vector<cv::gimpl::Data>& outs_data) const override |
||||
{ |
||||
auto has_config = cv::gimpl::getCompileArg<cv::gapi::plaidml::config>(args); |
||||
|
||||
if (!has_config) |
||||
{ |
||||
cv::util::throw_error(std::runtime_error("Config not found!\n" |
||||
"You must pass cv::gapi::plaidml::config to the graph compile arguments")); |
||||
} |
||||
|
||||
const auto& arg = has_config.value(); |
||||
return EPtr{new cv::gimpl::GPlaidMLExecutable(cv::gimpl::GPlaidMLExecutable::Config{arg.dev_id, arg.trg_id}, |
||||
graph, nodes, ins_data, outs_data)}; |
||||
} |
||||
}; |
||||
} |
||||
|
||||
cv::gapi::GBackend cv::gapi::plaidml::backend() |
||||
{ |
||||
static cv::gapi::GBackend this_backend(std::make_shared<GPlaidMLBackendImpl>()); |
||||
return this_backend; |
||||
} |
||||
|
||||
void cv::gimpl::GPlaidMLExecutable::initBuffers(const std::vector<cv::gimpl::Data>& data, |
||||
std::vector<plaidml::exec::Binding>& bindings) |
||||
{ |
||||
|
||||
// NB: This is necessary because we keep a pointer to bindings elements to buffer_map
|
||||
// In order to them to remain valid it's required to prevant reallocation
|
||||
bindings.reserve(data.size()); |
||||
for (const auto& d : data) |
||||
{ |
||||
GAPI_Assert(d.shape == GShape::GMAT && |
||||
"Now PlaidML backend supports only cv::GMat's"); |
||||
|
||||
const auto& desc = cv::util::get<cv::GMatDesc>(d.meta); |
||||
|
||||
auto placeholder = plaidml::edsl::Placeholder( |
||||
cv::util::plaidml::depth_from_ocv(desc.depth), |
||||
{desc.size.width, desc.size.height, desc.chan}); |
||||
|
||||
const auto& shape = placeholder.shape(); |
||||
plaidml::TensorShape tshape(shape.dtype(), shape.int_dims()); |
||||
plaidml::Buffer buffer(m_cfg.dev_id, tshape); |
||||
|
||||
bindings.push_back(plaidml::exec::Binding{std::move(placeholder), |
||||
std::move(buffer)}); |
||||
|
||||
auto& tensor_map = m_res.slot<plaidml::edsl::Tensor>(); |
||||
// FIXME Avoid Copy here !!!
|
||||
tensor_map.emplace(d.rc, bindings.back().tensor); |
||||
|
||||
auto& buffer_map = m_res.slot<plaidml::Buffer*>(); |
||||
buffer_map.emplace(d.rc, &(bindings.back().buffer)); |
||||
} |
||||
} |
||||
|
||||
void cv::gimpl::GPlaidMLExecutable::compile(const std::vector<cv::gimpl::Data>& ins_data, |
||||
const std::vector<cv::gimpl::Data>& outs_data) |
||||
{ |
||||
initBuffers(ins_data, input_bindings_); |
||||
initBuffers(outs_data, output_bindings_); |
||||
|
||||
ade::util::transform(outs_data, std::back_inserter(output_ids_), |
||||
[](const cv::gimpl::Data& d) { return d.rc; }); |
||||
|
||||
GConstGPlaidMLModel gcm(m_g); |
||||
for (const auto& nh : m_all_ops) |
||||
{ |
||||
const auto& k = gcm.metadata(nh).get<PlaidMLUnit>().k; |
||||
GPlaidMLContext ctx; |
||||
|
||||
const auto &op = m_gm.metadata(nh).get<Op>(); |
||||
ctx.m_args.reserve(op.args.size()); |
||||
|
||||
using namespace std::placeholders; |
||||
ade::util::transform(op.args, |
||||
std::back_inserter(ctx.m_args), |
||||
std::bind(&GPlaidMLExecutable::packArg, this, _1)); |
||||
|
||||
for (const auto &out_it : ade::util::indexed(op.outs)) |
||||
{ |
||||
const auto out_port = ade::util::index(out_it); |
||||
const auto out_desc = ade::util::value(out_it); |
||||
|
||||
auto& tensor_map = m_res.slot<plaidml::edsl::Tensor>(); |
||||
|
||||
// NB: Create tensor if need
|
||||
auto& tensor = tensor_map[out_desc.id]; |
||||
ctx.m_results[out_port] = GArg(&(tensor)); |
||||
} |
||||
|
||||
k.apply(ctx); |
||||
} |
||||
|
||||
std::vector<plaidml::edsl::Tensor> output_tensors; |
||||
for (const auto& out_id : output_ids_) |
||||
{ |
||||
auto& tensor_map = m_res.slot<plaidml::edsl::Tensor>(); |
||||
// FIXME Avoid copy here !!!
|
||||
output_tensors.emplace_back(tensor_map[out_id]); |
||||
} |
||||
|
||||
plaidml::edsl::Program program("Program", output_tensors); |
||||
binder_.reset(new plaidml::exec::Binder(program)); |
||||
|
||||
for (const auto& binding : input_bindings_) |
||||
{ |
||||
binder_->set_input(binding.tensor, binding.buffer); |
||||
} |
||||
|
||||
for (const auto& binding : output_bindings_) |
||||
{ |
||||
binder_->set_output(binding.tensor, binding.buffer); |
||||
} |
||||
|
||||
exec_ = binder_->compile(); |
||||
} |
||||
|
||||
cv::gimpl::GPlaidMLExecutable::GPlaidMLExecutable(cv::gimpl::GPlaidMLExecutable::Config cfg, |
||||
const ade::Graph& g, |
||||
const std::vector<ade::NodeHandle>& nodes, |
||||
const std::vector<cv::gimpl::Data>& ins_data, |
||||
const std::vector<cv::gimpl::Data>& outs_data) |
||||
: m_cfg(std::move(cfg)), m_g(g), m_gm(m_g) |
||||
{ |
||||
auto is_op = [&](ade::NodeHandle nh) { |
||||
return m_gm.metadata(nh).get<NodeType>().t == NodeType::OP; |
||||
}; |
||||
|
||||
std::copy_if(nodes.begin(), nodes.end(), std::back_inserter(m_all_ops), is_op); |
||||
|
||||
compile(ins_data, outs_data); |
||||
} |
||||
|
||||
void cv::gimpl::GPlaidMLExecutable::run(std::vector<InObj> &&input_objs, |
||||
std::vector<OutObj> &&output_objs) |
||||
{ |
||||
for (auto& it : input_objs) bindInArg (it.first, it.second); |
||||
|
||||
exec_->run(); |
||||
|
||||
for (auto& it : output_objs) bindOutArg(it.first, it.second); |
||||
} |
||||
|
||||
void cv::gimpl::GPlaidMLExecutable::bindInArg(const RcDesc &rc, const GRunArg &arg) |
||||
{ |
||||
switch (rc.shape) |
||||
{ |
||||
case GShape::GMAT: |
||||
{ |
||||
auto& tensor_map = m_res.slot<plaidml::edsl::Tensor>(); |
||||
auto it = tensor_map.find(rc.id); |
||||
GAPI_Assert(it != tensor_map.end()); |
||||
|
||||
switch (arg.index()) |
||||
{ |
||||
case GRunArg::index_of<cv::gapi::own::Mat>(): |
||||
{ |
||||
auto& arg_mat = util::get<cv::gapi::own::Mat>(arg); |
||||
binder_->input(it->second).copy_from(arg_mat.data); |
||||
} |
||||
break; |
||||
#if !defined(GAPI_STANDALONE) |
||||
case GRunArg::index_of<cv::Mat>() : |
||||
{ |
||||
auto& arg_mat = util::get<cv::Mat>(arg); |
||||
binder_->input(it->second).copy_from(arg_mat.data); |
||||
} |
||||
break; |
||||
#endif // !defined(GAPI_STANDALONE)
|
||||
default: util::throw_error(std::logic_error("content type of the runtime argument does not match to resource description ?")); |
||||
} |
||||
} |
||||
break; |
||||
|
||||
default: |
||||
util::throw_error(std::logic_error("Unsupported GShape type")); |
||||
} |
||||
} |
||||
|
||||
void cv::gimpl::GPlaidMLExecutable::bindOutArg(const RcDesc &rc, const GRunArgP &arg) |
||||
{ |
||||
switch (rc.shape) |
||||
{ |
||||
case GShape::GMAT: |
||||
{ |
||||
auto& tensor_map = m_res.slot<plaidml::edsl::Tensor>(); |
||||
auto it = tensor_map.find(rc.id); |
||||
GAPI_Assert(it != tensor_map.end()); |
||||
|
||||
switch (arg.index()) |
||||
{ |
||||
case GRunArgP::index_of<cv::gapi::own::Mat*>(): |
||||
{ |
||||
auto& arg_mat = *util::get<cv::gapi::own::Mat*>(arg); |
||||
binder_->output(it->second).copy_into(arg_mat.data); |
||||
} |
||||
break; |
||||
#if !defined(GAPI_STANDALONE) |
||||
case GRunArgP::index_of<cv::Mat*>() : |
||||
{ |
||||
auto& arg_mat = *util::get<cv::Mat*>(arg); |
||||
binder_->output(it->second).copy_into(arg_mat.data); |
||||
} |
||||
break; |
||||
#endif // !defined(GAPI_STANDALONE)
|
||||
default: util::throw_error(std::logic_error("content type of the runtime argument does not match to resource description ?")); |
||||
} |
||||
} |
||||
break; |
||||
|
||||
default: |
||||
util::throw_error(std::logic_error("Unsupported GShape type")); |
||||
} |
||||
} |
||||
|
||||
cv::GArg cv::gimpl::GPlaidMLExecutable::packArg(const GArg &arg) |
||||
{ |
||||
GAPI_Assert( arg.kind != cv::detail::ArgKind::GMAT |
||||
&& arg.kind != cv::detail::ArgKind::GSCALAR |
||||
&& arg.kind != cv::detail::ArgKind::GARRAY); |
||||
|
||||
if (arg.kind != cv::detail::ArgKind::GOBJREF) |
||||
{ |
||||
// All other cases - pass as-is, with no transformations to GArg contents.
|
||||
return arg; |
||||
} |
||||
GAPI_Assert(arg.kind == cv::detail::ArgKind::GOBJREF); |
||||
|
||||
const cv::gimpl::RcDesc &ref = arg.get<cv::gimpl::RcDesc>(); |
||||
switch (ref.shape) |
||||
{ |
||||
case GShape::GMAT: |
||||
{ |
||||
auto& tensor_map = m_res.slot<plaidml::edsl::Tensor>(); |
||||
auto it = tensor_map.find(ref.id); |
||||
GAPI_Assert(it != tensor_map.end()); |
||||
return GArg(it->second); |
||||
} |
||||
break; |
||||
default: |
||||
util::throw_error(std::logic_error("Unsupported GShape type")); |
||||
break; |
||||
} |
||||
} |
||||
|
||||
#endif // HAVE_PLAIDML
|
@ -0,0 +1,100 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2019 Intel Corporation
|
||||
|
||||
#ifdef HAVE_PLAIDML |
||||
|
||||
#ifndef OPENCV_GAPI_GPLAIDMLBACKEND_HPP |
||||
#define OPENCV_GAPI_GPLAIDMLBACKEND_HPP |
||||
|
||||
#include <map> // map |
||||
#include <unordered_map> // unordered_map |
||||
#include <tuple> // tuple |
||||
#include <ade/util/algorithm.hpp> // type_list_index |
||||
|
||||
#include <opencv2/gapi/garg.hpp> |
||||
#include <opencv2/gapi/gproto.hpp> |
||||
#include <opencv2/gapi/plaidml/gplaidmlkernel.hpp> |
||||
|
||||
#include "api/gorigin.hpp" |
||||
#include "backends/common/gbackend.hpp" |
||||
|
||||
#include "compiler/gislandmodel.hpp" |
||||
|
||||
#include <plaidml2/exec/exec.h> |
||||
#include <plaidml2/core/core.h> |
||||
|
||||
namespace cv { namespace gimpl { |
||||
|
||||
struct PlaidMLUnit |
||||
{ |
||||
static const char *name() { return "PlaidMLKernel"; } |
||||
GPlaidMLKernel k; |
||||
}; |
||||
|
||||
class GPlaidMLExecutable final: public GIslandExecutable |
||||
{ |
||||
public: |
||||
struct Config |
||||
{ |
||||
std::string dev_id; |
||||
std::string trg_id; |
||||
}; |
||||
|
||||
GPlaidMLExecutable(Config cfg, |
||||
const ade::Graph& graph, |
||||
const std::vector<ade::NodeHandle>& nodes, |
||||
const std::vector<cv::gimpl::Data>& ins_data, |
||||
const std::vector<cv::gimpl::Data>& outs_data); |
||||
|
||||
virtual inline bool canReshape() const override { return false; } |
||||
|
||||
virtual inline void reshape(ade::Graph&, const GCompileArgs&) override |
||||
{ |
||||
util::throw_error(std::logic_error("GPlaidMLExecutable::reshape() should never be called")); |
||||
} |
||||
|
||||
virtual void run(std::vector<InObj> &&input_objs, |
||||
std::vector<OutObj> &&output_objs) override; |
||||
|
||||
private: |
||||
void initBuffers(const std::vector<cv::gimpl::Data>& ins_data, |
||||
std::vector<plaidml::exec::Binding>& bindings); |
||||
|
||||
void bindInArg (const RcDesc &rc, const GRunArg &arg); |
||||
void bindOutArg (const RcDesc &rc, const GRunArgP &arg); |
||||
|
||||
void compile(const std::vector<cv::gimpl::Data>& ins_data, |
||||
const std::vector<cv::gimpl::Data>& outs_data); |
||||
|
||||
// FIXME User also can pass config via compile args ?
|
||||
void initConfig(); |
||||
|
||||
GArg packArg(const GArg &arg); |
||||
|
||||
Config m_cfg; |
||||
|
||||
const ade::Graph &m_g; |
||||
GModel::ConstGraph m_gm; |
||||
|
||||
std::vector<ade::NodeHandle> m_all_ops; |
||||
|
||||
std::vector<size_t> output_ids_; |
||||
|
||||
std::unique_ptr<plaidml::exec::Binder> binder_; |
||||
std::shared_ptr<plaidml::exec::Executable> exec_; |
||||
|
||||
std::vector<plaidml::exec::Binding> input_bindings_; |
||||
std::vector<plaidml::exec::Binding> output_bindings_; |
||||
|
||||
using Mag = detail::magazine<plaidml::edsl::Tensor, plaidml::Buffer*>; |
||||
Mag m_res; |
||||
}; |
||||
|
||||
}} |
||||
|
||||
#endif // OPENCV_GAPI_GPLAIDMLBACKEND_HPP
|
||||
|
||||
#endif // HAVE_PLAIDML
|
@ -0,0 +1,54 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2019 Intel Corporation
|
||||
|
||||
|
||||
#ifdef HAVE_PLAIDML |
||||
|
||||
#include "precomp.hpp" |
||||
|
||||
#include <opencv2/gapi/core.hpp> |
||||
#include <opencv2/gapi/plaidml/core.hpp> |
||||
#include <opencv2/gapi/plaidml/gplaidmlkernel.hpp> |
||||
|
||||
#include <plaidml2/edsl/edsl.h> |
||||
|
||||
#define GAPI_PLAIDML_LOGICAL_OP(Name, API, Op) \ |
||||
GAPI_PLAIDML_KERNEL(Name, API) \
|
||||
{ \
|
||||
static void run(const plaidml::edsl::Tensor& src1, \
|
||||
const plaidml::edsl::Tensor& src2, \
|
||||
plaidml::edsl::Tensor& dst) \
|
||||
{ \
|
||||
dst = src1 Op src2; \
|
||||
}; \
|
||||
}; \
|
||||
|
||||
#define GAPI_PLAIDML_ARITHMETIC_OP(Name, API, Op) \ |
||||
GAPI_PLAIDML_KERNEL(Name, API) \
|
||||
{ \
|
||||
static void run(const plaidml::edsl::Tensor& src1, \
|
||||
const plaidml::edsl::Tensor& src2, \
|
||||
int, /* dtype */ \
|
||||
plaidml::edsl::Tensor& dst) \
|
||||
{ \
|
||||
dst = src1 Op src2; \
|
||||
}; \
|
||||
}; \
|
||||
|
||||
GAPI_PLAIDML_LOGICAL_OP(GPlaidMLAnd, cv::gapi::core::GAnd, &); |
||||
GAPI_PLAIDML_LOGICAL_OP(GPlaidMLXor, cv::gapi::core::GXor, ^); |
||||
GAPI_PLAIDML_LOGICAL_OP(GPlaidMLOr , cv::gapi::core::GOr , |) |
||||
|
||||
GAPI_PLAIDML_ARITHMETIC_OP(GPlaidMLAdd, cv::gapi::core::GAdd, +); |
||||
GAPI_PLAIDML_ARITHMETIC_OP(GPlaidMLSub, cv::gapi::core::GSub, -); |
||||
|
||||
cv::gapi::GKernelPackage cv::gapi::core::plaidml::kernels() |
||||
{ |
||||
static auto pkg = cv::gapi::kernels<GPlaidMLAdd, GPlaidMLSub, GPlaidMLAnd, GPlaidMLXor, GPlaidMLOr>(); |
||||
return pkg; |
||||
} |
||||
|
||||
#endif // HACE_PLAIDML
|
@ -0,0 +1,42 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2019 Intel Corporation
|
||||
|
||||
|
||||
#ifdef HAVE_PLAIDML |
||||
|
||||
#ifndef OPENCV_GAPI_PLAIDML_UTIL_HPP |
||||
#define OPENCV_GAPI_PLAIDML_UTIL_HPP |
||||
|
||||
#include <plaidml2/core/ffi.h> |
||||
|
||||
namespace cv |
||||
{ |
||||
namespace util |
||||
{ |
||||
namespace plaidml |
||||
{ |
||||
|
||||
inline plaidml_datatype depth_from_ocv(int depth) |
||||
{ |
||||
switch(depth) |
||||
{ |
||||
case CV_8U : return PLAIDML_DATA_UINT8; |
||||
case CV_8S : return PLAIDML_DATA_INT8; |
||||
case CV_16U : return PLAIDML_DATA_UINT16; |
||||
case CV_16S : return PLAIDML_DATA_INT16; |
||||
case CV_32S : return PLAIDML_DATA_INT32; |
||||
case CV_32F : return PLAIDML_DATA_FLOAT32; |
||||
case CV_64F : return PLAIDML_DATA_FLOAT64; |
||||
default: util::throw_error("Unrecognized OpenCV depth"); |
||||
} |
||||
}; |
||||
|
||||
} |
||||
} |
||||
} |
||||
#endif // OPENCV_GAPI_PLAIDML_UTIL_HPP
|
||||
|
||||
#endif // HAVE_PLAIDML
|
@ -0,0 +1,178 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2019 Intel Corporation
|
||||
|
||||
|
||||
#ifdef HAVE_PLAIDML |
||||
|
||||
#include "test_precomp.hpp" |
||||
|
||||
#include <stdexcept> |
||||
#include <ade/util/iota_range.hpp> |
||||
#include "logger.hpp" |
||||
|
||||
#include <opencv2/gapi/plaidml/core.hpp> |
||||
#include <opencv2/gapi/plaidml/plaidml.hpp> |
||||
|
||||
namespace opencv_test |
||||
{ |
||||
|
||||
inline cv::gapi::plaidml::config getConfig() |
||||
{ |
||||
auto read_var_from_env = [](const char* env) |
||||
{ |
||||
const char* raw = std::getenv(env); |
||||
if (!raw) |
||||
{ |
||||
cv::util::throw_error(std::runtime_error(std::string(env) + " is't set")); |
||||
} |
||||
|
||||
return std::string(raw); |
||||
}; |
||||
|
||||
auto dev_id = read_var_from_env("PLAIDML_DEVICE"); |
||||
auto trg_id = read_var_from_env("PLAIDML_TARGET"); |
||||
|
||||
return cv::gapi::plaidml::config{std::move(dev_id), |
||||
std::move(trg_id)}; |
||||
} |
||||
|
||||
TEST(GAPI_PlaidML_Pipelines, SimpleArithmetic) |
||||
{ |
||||
cv::Size size(1920, 1080); |
||||
int type = CV_8UC1; |
||||
|
||||
cv::Mat in_mat1(size, type); |
||||
cv::Mat in_mat2(size, type); |
||||
|
||||
// NB: What about overflow ? PlaidML doesn't handle it
|
||||
cv::randu(in_mat1, cv::Scalar::all(0), cv::Scalar::all(127)); |
||||
cv::randu(in_mat2, cv::Scalar::all(0), cv::Scalar::all(127)); |
||||
|
||||
cv::Mat out_mat(size, type, cv::Scalar::all(0)); |
||||
cv::Mat ref_mat(size, type, cv::Scalar::all(0)); |
||||
|
||||
////////////////////////////// G-API //////////////////////////////////////
|
||||
cv::GMat in1, in2; |
||||
auto out = in1 + in2; |
||||
|
||||
cv::GComputation comp(cv::GIn(in1, in2), cv::GOut(out)); |
||||
comp.apply(cv::gin(in_mat1, in_mat2), cv::gout(out_mat), |
||||
cv::compile_args(getConfig(), |
||||
cv::gapi::use_only{cv::gapi::core::plaidml::kernels()})); |
||||
|
||||
////////////////////////////// OpenCV /////////////////////////////////////
|
||||
cv::add(in_mat1, in_mat2, ref_mat, cv::noArray(), type); |
||||
|
||||
EXPECT_EQ(0, cv::norm(out_mat, ref_mat)); |
||||
} |
||||
|
||||
// FIXME PlaidML cpu backend does't support bitwise operations
|
||||
TEST(GAPI_PlaidML_Pipelines, DISABLED_ComplexArithmetic) |
||||
{ |
||||
cv::Size size(1920, 1080); |
||||
int type = CV_8UC1; |
||||
|
||||
cv::Mat in_mat1(size, type); |
||||
cv::Mat in_mat2(size, type); |
||||
|
||||
cv::randu(in_mat1, cv::Scalar::all(0), cv::Scalar::all(255)); |
||||
cv::randu(in_mat2, cv::Scalar::all(0), cv::Scalar::all(255)); |
||||
|
||||
cv::Mat out_mat(size, type, cv::Scalar::all(0)); |
||||
cv::Mat ref_mat(size, type, cv::Scalar::all(0)); |
||||
|
||||
////////////////////////////// G-API //////////////////////////////////////
|
||||
cv::GMat in1, in2; |
||||
auto out = in1 | (in2 ^ (in1 & (in2 + (in1 - in2)))); |
||||
|
||||
cv::GComputation comp(cv::GIn(in1, in2), cv::GOut(out)); |
||||
comp.apply(cv::gin(in_mat1, in_mat2), cv::gout(out_mat), |
||||
cv::compile_args(getConfig(), |
||||
cv::gapi::use_only{cv::gapi::core::plaidml::kernels()})); |
||||
|
||||
////////////////////////////// OpenCV /////////////////////////////////////
|
||||
cv::subtract(in_mat1, in_mat2, ref_mat, cv::noArray(), type); |
||||
cv::add(in_mat2, ref_mat, ref_mat, cv::noArray(), type); |
||||
cv::bitwise_and(in_mat1, ref_mat, ref_mat); |
||||
cv::bitwise_xor(in_mat2, ref_mat, ref_mat); |
||||
cv::bitwise_or(in_mat1, ref_mat, ref_mat); |
||||
|
||||
EXPECT_EQ(0, cv::norm(out_mat, ref_mat)); |
||||
} |
||||
|
||||
TEST(GAPI_PlaidML_Pipelines, TwoInputOperations) |
||||
{ |
||||
cv::Size size(1920, 1080); |
||||
int type = CV_8UC1; |
||||
|
||||
constexpr int kNumInputs = 4; |
||||
std::vector<cv::Mat> in_mat(kNumInputs, cv::Mat(size, type)); |
||||
for (int i = 0; i < kNumInputs; ++i) |
||||
{ |
||||
cv::randu(in_mat[i], cv::Scalar::all(0), cv::Scalar::all(60)); |
||||
} |
||||
|
||||
cv::Mat out_mat(size, type, cv::Scalar::all(0)); |
||||
cv::Mat ref_mat(size, type, cv::Scalar::all(0)); |
||||
|
||||
////////////////////////////// G-API //////////////////////////////////////
|
||||
cv::GMat in[4]; |
||||
auto out = (in[3] - in[0]) + (in[2] - in[1]); |
||||
|
||||
cv::GComputation comp(cv::GIn(in[0], in[1], in[2], in[3]), cv::GOut(out)); |
||||
|
||||
// FIXME Doesn't work just apply(in_mat, out_mat, ...)
|
||||
comp.apply(cv::gin(in_mat[0], in_mat[1], in_mat[2], in_mat[3]), cv::gout(out_mat), |
||||
cv::compile_args(getConfig(), |
||||
cv::gapi::use_only{cv::gapi::core::plaidml::kernels()})); |
||||
|
||||
////////////////////////////// OpenCV /////////////////////////////////////
|
||||
cv::subtract(in_mat[3], in_mat[0], ref_mat, cv::noArray(), type); |
||||
cv::add(ref_mat, in_mat[2], ref_mat, cv::noArray(), type); |
||||
cv::subtract(ref_mat, in_mat[1], ref_mat, cv::noArray(), type); |
||||
|
||||
EXPECT_EQ(0, cv::norm(out_mat, ref_mat)); |
||||
} |
||||
|
||||
TEST(GAPI_PlaidML_Pipelines, TwoOutputOperations) |
||||
{ |
||||
cv::Size size(1920, 1080); |
||||
int type = CV_8UC1; |
||||
|
||||
constexpr int kNumInputs = 4; |
||||
std::vector<cv::Mat> in_mat(kNumInputs, cv::Mat(size, type)); |
||||
for (int i = 0; i < kNumInputs; ++i) |
||||
{ |
||||
cv::randu(in_mat[i], cv::Scalar::all(0), cv::Scalar::all(60)); |
||||
} |
||||
|
||||
std::vector<cv::Mat> out_mat(kNumInputs, cv::Mat(size, type, cv::Scalar::all(0))); |
||||
std::vector<cv::Mat> ref_mat(kNumInputs, cv::Mat(size, type, cv::Scalar::all(0))); |
||||
|
||||
////////////////////////////// G-API //////////////////////////////////////
|
||||
cv::GMat in[4], out[2]; |
||||
out[0] = in[0] + in[3]; |
||||
out[1] = in[1] + in[2]; |
||||
|
||||
cv::GComputation comp(cv::GIn(in[0], in[1], in[2], in[3]), cv::GOut(out[0], out[1])); |
||||
|
||||
// FIXME Doesn't work just apply(in_mat, out_mat, ...)
|
||||
comp.apply(cv::gin(in_mat[0], in_mat[1], in_mat[2], in_mat[3]), |
||||
cv::gout(out_mat[0], out_mat[1]), |
||||
cv::compile_args(getConfig(), |
||||
cv::gapi::use_only{cv::gapi::core::plaidml::kernels()})); |
||||
|
||||
////////////////////////////// OpenCV /////////////////////////////////////
|
||||
cv::add(in_mat[0], in_mat[3], ref_mat[0], cv::noArray(), type); |
||||
cv::add(in_mat[1], in_mat[2], ref_mat[1], cv::noArray(), type); |
||||
|
||||
EXPECT_EQ(0, cv::norm(out_mat[0], ref_mat[0])); |
||||
EXPECT_EQ(0, cv::norm(out_mat[1], ref_mat[1])); |
||||
} |
||||
|
||||
} // namespace opencv_test
|
||||
|
||||
#endif // HAVE_PLAIDML
|
Loading…
Reference in new issue