|
|
|
@ -81,8 +81,24 @@ cv::Mat initMatrixRandU(const int type, const cv::Size& sz_in) { |
|
|
|
|
namespace opencv_test |
|
|
|
|
{ |
|
|
|
|
namespace { |
|
|
|
|
void initTestDataPath() |
|
|
|
|
{ |
|
|
|
|
#ifndef WINRT |
|
|
|
|
static bool initialized = false; |
|
|
|
|
if (!initialized) |
|
|
|
|
{ |
|
|
|
|
// Since G-API has no own test data (yet), it is taken from the common space
|
|
|
|
|
const char* testDataPath = getenv("OPENCV_TEST_DATA_PATH"); |
|
|
|
|
if (testDataPath) { |
|
|
|
|
cvtest::addDataSearchPath(testDataPath); |
|
|
|
|
} |
|
|
|
|
initialized = true; |
|
|
|
|
} |
|
|
|
|
#endif // WINRT
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// FIXME: taken from the DNN module
|
|
|
|
|
void normAssert(const cv::InputArray& ref, const cv::InputArray& test, |
|
|
|
|
void normAssert(cv::InputArray& ref, cv::InputArray& test, |
|
|
|
|
const char *comment /*= ""*/, |
|
|
|
|
const double l1 = 0.00001, const double lInf = 0.0001) { |
|
|
|
|
const double normL1 = cvtest::norm(ref, test, cv::NORM_L1) / ref.getMat().total(); |
|
|
|
@ -109,6 +125,7 @@ inline int toCV(const ONNXTensorElementDataType prec) { |
|
|
|
|
switch (prec) { |
|
|
|
|
case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8: return CV_8U; |
|
|
|
|
case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT: return CV_32F; |
|
|
|
|
case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32: return CV_32S; |
|
|
|
|
default: GAPI_Assert(false && "Unsupported data type"); |
|
|
|
|
} |
|
|
|
|
return -1; |
|
|
|
@ -126,46 +143,97 @@ inline std::vector<const char*> getCharNames(const std::vector<std::string>& nam |
|
|
|
|
return out_vec; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
inline void copyToOut(const cv::Mat& in, cv::Mat& out) { |
|
|
|
|
GAPI_Assert(in.depth() == CV_32F); |
|
|
|
|
GAPI_Assert(in.size == out.size); |
|
|
|
|
const float* const inptr = in.ptr<float>(); |
|
|
|
|
float* const optr = out.ptr<float>(); |
|
|
|
|
const int size = in.total(); |
|
|
|
|
for (int i = 0; i < size; ++i) { |
|
|
|
|
optr[i] = inptr[i]; |
|
|
|
|
template<typename T> |
|
|
|
|
void copyToOut(const cv::Mat& in, cv::Mat& out) { |
|
|
|
|
const size_t size = std::min(out.total(), in.total()); |
|
|
|
|
std::copy(in.begin<T>(), in.begin<T>() + size, out.begin<T>()); |
|
|
|
|
if (size < out.total()) { |
|
|
|
|
T* const optr = out.ptr<T>(); |
|
|
|
|
optr[size] = static_cast<T>(-1); // end data mark
|
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void remapYolo(const std::unordered_map<std::string, cv::Mat> &onnx, |
|
|
|
|
std::unordered_map<std::string, cv::Mat> &gapi) { |
|
|
|
|
std::unordered_map<std::string, cv::Mat> &gapi) { |
|
|
|
|
GAPI_Assert(onnx.size() == 1u); |
|
|
|
|
GAPI_Assert(gapi.size() == 1u); |
|
|
|
|
// Result from Run method
|
|
|
|
|
const cv::Mat& in = onnx.begin()->second; |
|
|
|
|
GAPI_Assert(in.depth() == CV_32F); |
|
|
|
|
// Configured output
|
|
|
|
|
cv::Mat& out = gapi.begin()->second; |
|
|
|
|
// Simple copy
|
|
|
|
|
copyToOut(in, out); |
|
|
|
|
copyToOut<float>(in, out); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void remapSsdPorts(const std::unordered_map<std::string, cv::Mat> &onnx, |
|
|
|
|
std::unordered_map<std::string, cv::Mat> &gapi) { |
|
|
|
|
// Result from Run method
|
|
|
|
|
const cv::Mat& in_num = onnx.at("num_detections:0"); |
|
|
|
|
const cv::Mat& in_boxes = onnx.at("detection_boxes:0"); |
|
|
|
|
const cv::Mat& in_scores = onnx.at("detection_scores:0"); |
|
|
|
|
const cv::Mat& in_classes = onnx.at("detection_classes:0"); |
|
|
|
|
// Configured outputs
|
|
|
|
|
cv::Mat& out_boxes = gapi.at("out1"); |
|
|
|
|
cv::Mat& out_classes = gapi.at("out2"); |
|
|
|
|
cv::Mat& out_scores = gapi.at("out3"); |
|
|
|
|
cv::Mat& out_num = gapi.at("out4"); |
|
|
|
|
void remapYoloV3(const std::unordered_map<std::string, cv::Mat> &onnx, |
|
|
|
|
std::unordered_map<std::string, cv::Mat> &gapi) { |
|
|
|
|
// Simple copy for outputs
|
|
|
|
|
copyToOut(in_num, out_num); |
|
|
|
|
copyToOut(in_boxes, out_boxes); |
|
|
|
|
copyToOut(in_scores, out_scores); |
|
|
|
|
copyToOut(in_classes, out_classes); |
|
|
|
|
const cv::Mat& in_boxes = onnx.at("yolonms_layer_1/ExpandDims_1:0"); |
|
|
|
|
const cv::Mat& in_scores = onnx.at("yolonms_layer_1/ExpandDims_3:0"); |
|
|
|
|
const cv::Mat& in_indices = onnx.at("yolonms_layer_1/concat_2:0"); |
|
|
|
|
GAPI_Assert(in_boxes.depth() == CV_32F); |
|
|
|
|
GAPI_Assert(in_scores.depth() == CV_32F); |
|
|
|
|
GAPI_Assert(in_indices.depth() == CV_32S); |
|
|
|
|
|
|
|
|
|
cv::Mat& out_boxes = gapi.at("out1"); |
|
|
|
|
cv::Mat& out_scores = gapi.at("out2"); |
|
|
|
|
cv::Mat& out_indices = gapi.at("out3"); |
|
|
|
|
|
|
|
|
|
copyToOut<float>(in_boxes, out_boxes); |
|
|
|
|
copyToOut<float>(in_scores, out_scores); |
|
|
|
|
copyToOut<int32_t>(in_indices, out_indices); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void remapToIESSDOut(const std::vector<cv::Mat> &detections, |
|
|
|
|
cv::Mat &ssd_output) { |
|
|
|
|
for (const auto &det_el : detections) { |
|
|
|
|
GAPI_Assert(det_el.depth() == CV_32F); |
|
|
|
|
GAPI_Assert(!det_el.empty()); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// SSD-MobilenetV1 structure check
|
|
|
|
|
ASSERT_EQ(detections[0].total(), 1u); |
|
|
|
|
ASSERT_EQ(detections[2].total(), detections[0].total() * 100); |
|
|
|
|
ASSERT_EQ(detections[2].total(), detections[3].total()); |
|
|
|
|
ASSERT_EQ((detections[2].total() * 4), detections[1].total()); |
|
|
|
|
|
|
|
|
|
const int num_objects = static_cast<int>(detections[0].ptr<float>()[0]); |
|
|
|
|
GAPI_Assert(num_objects <= (ssd_output.size[2] - 1)); |
|
|
|
|
const float *in_boxes = detections[1].ptr<float>(); |
|
|
|
|
const float *in_scores = detections[2].ptr<float>(); |
|
|
|
|
const float *in_classes = detections[3].ptr<float>(); |
|
|
|
|
float *ptr = ssd_output.ptr<float>(); |
|
|
|
|
|
|
|
|
|
for (int i = 0; i < num_objects; i++) { |
|
|
|
|
ptr[0] = 0.f; // "image_id"
|
|
|
|
|
ptr[1] = in_classes[i]; // "label"
|
|
|
|
|
ptr[2] = in_scores[i]; // "confidence"
|
|
|
|
|
ptr[3] = in_boxes[4 * i + 1]; // left
|
|
|
|
|
ptr[4] = in_boxes[4 * i + 0]; // top
|
|
|
|
|
ptr[5] = in_boxes[4 * i + 3]; // right
|
|
|
|
|
ptr[6] = in_boxes[4 * i + 2]; // bottom
|
|
|
|
|
|
|
|
|
|
ptr += 7; |
|
|
|
|
in_boxes += 4; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (num_objects < ssd_output.size[2] - 1) { |
|
|
|
|
// put a -1 mark at the end of output blob if there is space left
|
|
|
|
|
ptr[0] = -1.f; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void remapSSDPorts(const std::unordered_map<std::string, cv::Mat> &onnx, |
|
|
|
|
std::unordered_map<std::string, cv::Mat> &gapi) { |
|
|
|
|
// Assemble ONNX-processed outputs back to a single 1x1x200x7 blob
|
|
|
|
|
// to preserve compatibility with OpenVINO-based SSD pipeline
|
|
|
|
|
const cv::Mat &num_detections = onnx.at("num_detections:0"); |
|
|
|
|
const cv::Mat &detection_boxes = onnx.at("detection_boxes:0"); |
|
|
|
|
const cv::Mat &detection_scores = onnx.at("detection_scores:0"); |
|
|
|
|
const cv::Mat &detection_classes = onnx.at("detection_classes:0"); |
|
|
|
|
cv::Mat &ssd_output = gapi.at("detection_output"); |
|
|
|
|
remapToIESSDOut({num_detections, detection_boxes, detection_scores, detection_classes}, ssd_output); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
class ONNXtest : public ::testing::Test { |
|
|
|
@ -177,18 +245,17 @@ public: |
|
|
|
|
cv::Mat in_mat1; |
|
|
|
|
|
|
|
|
|
ONNXtest() { |
|
|
|
|
initTestDataPath(); |
|
|
|
|
env = Ort::Env(ORT_LOGGING_LEVEL_WARNING, "test"); |
|
|
|
|
memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault); |
|
|
|
|
out_gapi.resize(1); |
|
|
|
|
out_onnx.resize(1); |
|
|
|
|
// FIXME: All tests chek "random" image
|
|
|
|
|
// Ideally it should be a real image
|
|
|
|
|
in_mat1 = initMatrixRandU(CV_8UC3, cv::Size{640, 480}); |
|
|
|
|
// FIXME: It should be an image from own (gapi) directory in opencv extra
|
|
|
|
|
in_mat1 = cv::imread(findDataFile("cv/dpm/cat.png")); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
template<typename T> |
|
|
|
|
void infer(const std::vector<cv::Mat>& ins, |
|
|
|
|
std::vector<cv::Mat>& outs) { |
|
|
|
|
void infer(const std::vector<cv::Mat>& ins, std::vector<cv::Mat>& outs) { |
|
|
|
|
// Prepare session
|
|
|
|
|
session = Ort::Session(env, model_path.data(), session_options); |
|
|
|
|
num_in = session.GetInputCount(); |
|
|
|
@ -241,10 +308,15 @@ public: |
|
|
|
|
template<typename T> |
|
|
|
|
void infer(const cv::Mat& in, cv::Mat& out) { |
|
|
|
|
std::vector<cv::Mat> result; |
|
|
|
|
infer<T>({in}, result); |
|
|
|
|
infer<T>(std::vector<cv::Mat>{in}, result); |
|
|
|
|
GAPI_Assert(result.size() == 1u); |
|
|
|
|
out = result.front(); |
|
|
|
|
} |
|
|
|
|
// One input overload
|
|
|
|
|
template<typename T> |
|
|
|
|
void infer(const cv::Mat& in, std::vector<cv::Mat>& outs) { |
|
|
|
|
infer<T>(std::vector<cv::Mat>{in}, outs); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void validate() { |
|
|
|
|
GAPI_Assert(!out_gapi.empty() && !out_onnx.empty()); |
|
|
|
@ -275,6 +347,12 @@ public: |
|
|
|
|
const cv::Scalar mean = { 0.485, 0.456, 0.406 }; |
|
|
|
|
const cv::Scalar std = { 0.229, 0.224, 0.225 }; |
|
|
|
|
|
|
|
|
|
// Rois for InferList, InferList2
|
|
|
|
|
const std::vector<cv::Rect> rois = { |
|
|
|
|
cv::Rect(cv::Point{ 0, 0}, cv::Size{80, 120}), |
|
|
|
|
cv::Rect(cv::Point{50, 100}, cv::Size{250, 360}), |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
void preprocess(const cv::Mat& src, cv::Mat& dst) { |
|
|
|
|
const int new_h = 224; |
|
|
|
|
const int new_w = 224; |
|
|
|
@ -317,6 +395,55 @@ public: |
|
|
|
|
dst = dst.reshape(1, {1, 1, new_h, new_w}); |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
class ONNXWithRemap : public ONNXtest { |
|
|
|
|
public: |
|
|
|
|
// You can specify any size of the outputs, since we don't know infer result
|
|
|
|
|
// Tests validate a range with results and don't compare empty space
|
|
|
|
|
void validate() { |
|
|
|
|
GAPI_Assert(!out_gapi.empty() && !out_onnx.empty()); |
|
|
|
|
ASSERT_EQ(out_gapi.size(), out_onnx.size()); |
|
|
|
|
const auto size = out_onnx.size(); |
|
|
|
|
for (size_t i = 0; i < size; ++i) { |
|
|
|
|
float* op = out_onnx.at(i).ptr<float>(); |
|
|
|
|
float* gp = out_gapi.at(i).ptr<float>(); |
|
|
|
|
const auto out_size = std::min(out_onnx.at(i).total(), out_gapi.at(i).total()); |
|
|
|
|
GAPI_Assert(out_size != 0u); |
|
|
|
|
for (size_t d_idx = 0; d_idx < out_size; ++d_idx) { |
|
|
|
|
if (gp[d_idx] == -1) { |
|
|
|
|
break; // end of detections
|
|
|
|
|
} |
|
|
|
|
ASSERT_EQ(op[d_idx], gp[d_idx]); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
class ONNXYoloV3MultiInput : public ONNXWithRemap { |
|
|
|
|
public: |
|
|
|
|
std::vector<cv::Mat> ins; |
|
|
|
|
|
|
|
|
|
private: |
|
|
|
|
virtual void SetUp() { |
|
|
|
|
const int yolo_in_h = 416; |
|
|
|
|
const int yolo_in_w = 416; |
|
|
|
|
cv::Mat yolov3_input, shape, prep_mat; |
|
|
|
|
cv::resize(in_mat1, yolov3_input, cv::Size(yolo_in_w, yolo_in_h)); |
|
|
|
|
shape.create(cv::Size(2, 1), CV_32F); |
|
|
|
|
float* ptr = shape.ptr<float>(); |
|
|
|
|
ptr[0] = in_mat1.cols; |
|
|
|
|
ptr[1] = in_mat1.rows; |
|
|
|
|
preprocess(yolov3_input, prep_mat); |
|
|
|
|
ins = {prep_mat, shape}; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void preprocess(const cv::Mat& src, cv::Mat& dst) { |
|
|
|
|
cv::Mat cvt; |
|
|
|
|
src.convertTo(cvt, CV_32F, 1.f / 255.f); |
|
|
|
|
toCHW(cvt, dst); |
|
|
|
|
dst = dst.reshape(1, {1, 3, 416, 416}); |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
|
} // anonymous namespace
|
|
|
|
|
|
|
|
|
|
TEST_F(ONNXClassificationTest, Infer) |
|
|
|
@ -341,15 +468,12 @@ TEST_F(ONNXClassificationTest, Infer) |
|
|
|
|
validate(); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
TEST_F(ONNXtest, InferTensor) |
|
|
|
|
TEST_F(ONNXClassificationTest, InferTensor) |
|
|
|
|
{ |
|
|
|
|
useModel("classification/squeezenet/model/squeezenet1.0-9"); |
|
|
|
|
// Create tensor
|
|
|
|
|
// FIXME: Test cheks "random" image
|
|
|
|
|
// Ideally it should be a real image
|
|
|
|
|
const cv::Mat rand_mat = initMatrixRandU(CV_32FC3, cv::Size{224, 224}); |
|
|
|
|
const std::vector<int> dims = {1, rand_mat.channels(), rand_mat.rows, rand_mat.cols}; |
|
|
|
|
const cv::Mat tensor(dims, CV_32F, rand_mat.data); |
|
|
|
|
cv::Mat tensor; |
|
|
|
|
preprocess(in_mat1, tensor); |
|
|
|
|
// ONNX_API code
|
|
|
|
|
infer<float>(tensor, out_onnx.front()); |
|
|
|
|
// G_API code
|
|
|
|
@ -368,7 +492,7 @@ TEST_F(ONNXtest, InferTensor) |
|
|
|
|
TEST_F(ONNXClassificationTest, InferROI) |
|
|
|
|
{ |
|
|
|
|
useModel("classification/squeezenet/model/squeezenet1.0-9"); |
|
|
|
|
const cv::Rect ROI(cv::Point{0, 0}, cv::Size{250, 250}); |
|
|
|
|
const auto ROI = rois.at(1); |
|
|
|
|
// ONNX_API code
|
|
|
|
|
cv::Mat roi_mat; |
|
|
|
|
preprocess(in_mat1(ROI), roi_mat); |
|
|
|
@ -392,10 +516,6 @@ TEST_F(ONNXClassificationTest, InferROI) |
|
|
|
|
TEST_F(ONNXClassificationTest, InferROIList) |
|
|
|
|
{ |
|
|
|
|
useModel("classification/squeezenet/model/squeezenet1.0-9"); |
|
|
|
|
const std::vector<cv::Rect> rois = { |
|
|
|
|
cv::Rect(cv::Point{ 0, 0}, cv::Size{80, 120}), |
|
|
|
|
cv::Rect(cv::Point{50, 100}, cv::Size{250, 360}), |
|
|
|
|
}; |
|
|
|
|
// ONNX_API code
|
|
|
|
|
out_onnx.resize(rois.size()); |
|
|
|
|
for (size_t i = 0; i < rois.size(); ++i) { |
|
|
|
@ -422,10 +542,6 @@ TEST_F(ONNXClassificationTest, InferROIList) |
|
|
|
|
TEST_F(ONNXClassificationTest, Infer2ROIList) |
|
|
|
|
{ |
|
|
|
|
useModel("classification/squeezenet/model/squeezenet1.0-9"); |
|
|
|
|
const std::vector<cv::Rect> rois = { |
|
|
|
|
cv::Rect(cv::Point{ 0, 0}, cv::Size{80, 120}), |
|
|
|
|
cv::Rect(cv::Point{50, 100}, cv::Size{250, 360}), |
|
|
|
|
}; |
|
|
|
|
// ONNX_API code
|
|
|
|
|
out_onnx.resize(rois.size()); |
|
|
|
|
for (size_t i = 0; i < rois.size(); ++i) { |
|
|
|
@ -449,27 +565,26 @@ TEST_F(ONNXClassificationTest, Infer2ROIList) |
|
|
|
|
validate(); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
TEST_F(ONNXtest, InferDynamicInputTensor) |
|
|
|
|
TEST_F(ONNXWithRemap, InferDynamicInputTensor) |
|
|
|
|
{ |
|
|
|
|
useModel("object_detection_segmentation/tiny-yolov2/model/tinyyolov2-8"); |
|
|
|
|
// Create tensor
|
|
|
|
|
// FIXME: Test cheks "random" image
|
|
|
|
|
// Ideally it should be a real image
|
|
|
|
|
const cv::Mat rand_mat = initMatrixRandU(CV_32FC3, cv::Size{416, 416}); |
|
|
|
|
const std::vector<int> dims = {1, rand_mat.channels(), rand_mat.rows, rand_mat.cols}; |
|
|
|
|
cv::Mat tensor(dims, CV_32F, rand_mat.data); |
|
|
|
|
const cv::Mat in_tensor = tensor / 255.f; |
|
|
|
|
cv::Mat cvt, rsz, tensor; |
|
|
|
|
cv::resize(in_mat1, rsz, cv::Size{416, 416}); |
|
|
|
|
rsz.convertTo(cvt, CV_32F, 1.f / 255.f); |
|
|
|
|
toCHW(cvt, tensor); |
|
|
|
|
tensor = tensor.reshape(1, {1, 3, 416, 416}); |
|
|
|
|
// ONNX_API code
|
|
|
|
|
infer<float>(in_tensor, out_onnx.front()); |
|
|
|
|
infer<float>(tensor, out_onnx.front()); |
|
|
|
|
// G_API code
|
|
|
|
|
G_API_NET(YoloNet, <cv::GMat(cv::GMat)>, "YoloNet"); |
|
|
|
|
cv::GMat in; |
|
|
|
|
cv::GMat out = cv::gapi::infer<YoloNet>(in); |
|
|
|
|
cv::GComputation comp(cv::GIn(in), cv::GOut(out)); |
|
|
|
|
auto net = cv::gapi::onnx::Params<YoloNet>{model_path} |
|
|
|
|
auto net = cv::gapi::onnx::Params<YoloNet>{ model_path } |
|
|
|
|
.cfgPostProc({cv::GMatDesc{CV_32F, {1, 125, 13, 13}}}, remapYolo) |
|
|
|
|
.cfgOutputLayers({"out"}); |
|
|
|
|
comp.apply(cv::gin(in_tensor), |
|
|
|
|
comp.apply(cv::gin(tensor), |
|
|
|
|
cv::gout(out_gapi.front()), |
|
|
|
|
cv::compile_args(cv::gapi::networks(net))); |
|
|
|
|
// Validate
|
|
|
|
@ -497,28 +612,26 @@ TEST_F(ONNXGRayScaleTest, InferImage) |
|
|
|
|
validate(); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
TEST_F(ONNXtest, InferMultOutput) |
|
|
|
|
TEST_F(ONNXWithRemap, InferMultiOutput) |
|
|
|
|
{ |
|
|
|
|
useModel("object_detection_segmentation/ssd-mobilenetv1/model/ssd_mobilenet_v1_10"); |
|
|
|
|
// ONNX_API code
|
|
|
|
|
const auto prep_mat = in_mat1.reshape(1, {1, in_mat1.rows, in_mat1.cols, in_mat1.channels()}); |
|
|
|
|
infer<uint8_t>({prep_mat}, out_onnx); |
|
|
|
|
infer<uint8_t>(prep_mat, out_onnx); |
|
|
|
|
cv::Mat onnx_conv_out({1, 1, 200, 7}, CV_32F); |
|
|
|
|
remapToIESSDOut({out_onnx[3], out_onnx[0], out_onnx[2], out_onnx[1]}, onnx_conv_out); |
|
|
|
|
out_onnx.clear(); |
|
|
|
|
out_onnx.push_back(onnx_conv_out); |
|
|
|
|
// G_API code
|
|
|
|
|
using SSDOut = std::tuple<cv::GMat, cv::GMat, cv::GMat, cv::GMat>; |
|
|
|
|
G_API_NET(MobileNet, <SSDOut(cv::GMat)>, "ssd_mobilenet"); |
|
|
|
|
G_API_NET(MobileNet, <cv::GMat(cv::GMat)>, "ssd_mobilenet"); |
|
|
|
|
cv::GMat in; |
|
|
|
|
cv::GMat out1, out2, out3, out4; |
|
|
|
|
std::tie(out1, out2, out3, out4) = cv::gapi::infer<MobileNet>(in); |
|
|
|
|
cv::GComputation comp(cv::GIn(in), cv::GOut(out1, out2, out3, out4)); |
|
|
|
|
auto net = cv::gapi::onnx::Params<MobileNet>{model_path} |
|
|
|
|
.cfgOutputLayers({"out1", "out2", "out3", "out4"}) |
|
|
|
|
.cfgPostProc({cv::GMatDesc{CV_32F, {1, 100, 4}}, |
|
|
|
|
cv::GMatDesc{CV_32F, {1, 100}}, |
|
|
|
|
cv::GMatDesc{CV_32F, {1, 100}}, |
|
|
|
|
cv::GMatDesc{CV_32F, {1, 1}}}, remapSsdPorts); |
|
|
|
|
out_gapi.resize(num_out); |
|
|
|
|
cv::GMat out = cv::gapi::infer<MobileNet>(in); |
|
|
|
|
cv::GComputation comp(cv::GIn(in), cv::GOut(out)); |
|
|
|
|
auto net = cv::gapi::onnx::Params<MobileNet>{ model_path } |
|
|
|
|
.cfgOutputLayers({"detection_output"}) |
|
|
|
|
.cfgPostProc({cv::GMatDesc{CV_32F, {1, 1, 200, 7}}}, remapSSDPorts); |
|
|
|
|
comp.apply(cv::gin(in_mat1), |
|
|
|
|
cv::gout(out_gapi[0], out_gapi[1], out_gapi[2], out_gapi[3]), |
|
|
|
|
cv::gout(out_gapi.front()), |
|
|
|
|
cv::compile_args(cv::gapi::networks(net))); |
|
|
|
|
// Validate
|
|
|
|
|
validate(); |
|
|
|
@ -733,6 +846,71 @@ TEST_F(ONNXMediaFrameTest, InferList2YUV) |
|
|
|
|
// Validate
|
|
|
|
|
validate(); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
TEST_F(ONNXYoloV3MultiInput, InferConstInput) |
|
|
|
|
{ |
|
|
|
|
useModel("object_detection_segmentation/yolov3/model/yolov3-10"); |
|
|
|
|
// ONNX_API code
|
|
|
|
|
infer<float>(ins, out_onnx); |
|
|
|
|
// G_API code
|
|
|
|
|
using OUT = std::tuple<cv::GMat, cv::GMat, cv::GMat>; |
|
|
|
|
G_API_NET(YoloNet, <OUT(cv::GMat)>, "yolov3"); |
|
|
|
|
auto net = cv::gapi::onnx::Params<YoloNet>{model_path} |
|
|
|
|
.constInput("image_shape", ins[1]) |
|
|
|
|
.cfgInputLayers({"input_1"}) |
|
|
|
|
.cfgOutputLayers({"out1", "out2", "out3"}) |
|
|
|
|
.cfgPostProc({cv::GMatDesc{CV_32F, {1, 10000, 4}}, |
|
|
|
|
cv::GMatDesc{CV_32F, {1, 80, 10000}}, |
|
|
|
|
cv::GMatDesc{CV_32S, {5, 3}}}, remapYoloV3); |
|
|
|
|
cv::GMat in, out1, out2, out3; |
|
|
|
|
std::tie(out1, out2, out3) = cv::gapi::infer<YoloNet>(in); |
|
|
|
|
cv::GComputation comp(cv::GIn(in), cv::GOut(out1, out2, out3)); |
|
|
|
|
out_gapi.resize(num_out); |
|
|
|
|
comp.apply(cv::gin(ins[0]), |
|
|
|
|
cv::gout(out_gapi[0], out_gapi[1], out_gapi[2]), |
|
|
|
|
cv::compile_args(cv::gapi::networks(net))); |
|
|
|
|
// Validate
|
|
|
|
|
validate(); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
TEST_F(ONNXYoloV3MultiInput, InferBSConstInput) |
|
|
|
|
{ |
|
|
|
|
// This test checks the case when a const input is used
|
|
|
|
|
// and all input layer names are specified.
|
|
|
|
|
// Const input has the advantage. It is expected behavior.
|
|
|
|
|
useModel("object_detection_segmentation/yolov3/model/yolov3-10"); |
|
|
|
|
// Tensor with incorrect image size
|
|
|
|
|
// is used for check case when InputLayers and constInput have same names
|
|
|
|
|
cv::Mat bad_shape; |
|
|
|
|
bad_shape.create(cv::Size(2, 1), CV_32F); |
|
|
|
|
float* ptr = bad_shape.ptr<float>(); |
|
|
|
|
ptr[0] = 590; |
|
|
|
|
ptr[1] = 12; |
|
|
|
|
// ONNX_API code
|
|
|
|
|
infer<float>(ins, out_onnx); |
|
|
|
|
// G_API code
|
|
|
|
|
using OUT = std::tuple<cv::GMat, cv::GMat, cv::GMat>; |
|
|
|
|
G_API_NET(YoloNet, <OUT(cv::GMat, cv::GMat)>, "yolov3"); |
|
|
|
|
auto net = cv::gapi::onnx::Params<YoloNet>{model_path} |
|
|
|
|
// Data from const input will be used to infer
|
|
|
|
|
.constInput("image_shape", ins[1]) |
|
|
|
|
// image_shape - const_input has same name
|
|
|
|
|
.cfgInputLayers({"input_1", "image_shape"}) |
|
|
|
|
.cfgOutputLayers({"out1", "out2", "out3"}) |
|
|
|
|
.cfgPostProc({cv::GMatDesc{CV_32F, {1, 10000, 4}}, |
|
|
|
|
cv::GMatDesc{CV_32F, {1, 80, 10000}}, |
|
|
|
|
cv::GMatDesc{CV_32S, {5, 3}}}, remapYoloV3); |
|
|
|
|
cv::GMat in1, in2, out1, out2, out3; |
|
|
|
|
std::tie(out1, out2, out3) = cv::gapi::infer<YoloNet>(in1, in2); |
|
|
|
|
cv::GComputation comp(cv::GIn(in1, in2), cv::GOut(out1, out2, out3)); |
|
|
|
|
out_gapi.resize(num_out); |
|
|
|
|
comp.apply(cv::gin(ins[0], bad_shape), |
|
|
|
|
cv::gout(out_gapi[0], out_gapi[1], out_gapi[2]), |
|
|
|
|
cv::compile_args(cv::gapi::networks(net))); |
|
|
|
|
// Validate
|
|
|
|
|
validate(); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
} // namespace opencv_test
|
|
|
|
|
|
|
|
|
|
#endif // HAVE_ONNX
|
|
|
|
|