diff --git a/modules/gapi/src/backends/ie/giebackend.cpp b/modules/gapi/src/backends/ie/giebackend.cpp index 13daf5d6df..820ee8e11c 100644 --- a/modules/gapi/src/backends/ie/giebackend.cpp +++ b/modules/gapi/src/backends/ie/giebackend.cpp @@ -530,10 +530,11 @@ public: explicit RequestPool(std::vector&& requests); void execute(Task&& t); - void waitAndShutdown(); + void waitAll(); private: void callback(Task task, InferenceEngine::InferRequest& request, size_t id); + void setup(); QueueClass m_idle_ids; std::vector m_requests; @@ -542,11 +543,15 @@ private: // RequestPool implementation ////////////////////////////////////////////// cv::gimpl::ie::RequestPool::RequestPool(std::vector&& requests) : m_requests(std::move(requests)) { - for (size_t i = 0; i < m_requests.size(); ++i) { - m_idle_ids.push(i); - } + setup(); } +void cv::gimpl::ie::RequestPool::setup() { + for (size_t i = 0; i < m_requests.size(); ++i) { + m_idle_ids.push(i); + } +} + void cv::gimpl::ie::RequestPool::execute(cv::gimpl::ie::RequestPool::Task&& t) { size_t id = 0u; m_idle_ids.pop(id); @@ -566,12 +571,13 @@ void cv::gimpl::ie::RequestPool::callback(cv::gimpl::ie::RequestPool::Task task, } // NB: Not thread-safe. -void cv::gimpl::ie::RequestPool::waitAndShutdown() { +void cv::gimpl::ie::RequestPool::waitAll() { // NB: It will be blocked if at least one request is busy. for (size_t i = 0; i < m_requests.size(); ++i) { size_t id = 0u; m_idle_ids.pop(id); } + setup(); } // GCPUExcecutable implementation ////////////////////////////////////////////// @@ -632,7 +638,7 @@ void cv::gimpl::ie::GIEExecutable::run(cv::gimpl::GIslandExecutable::IInput &in if (cv::util::holds_alternative(in_msg)) { // (3) Wait until all passed task are done. - m_reqPool->waitAndShutdown(); + m_reqPool->waitAll(); out.post(cv::gimpl::EndOfStream{}); return; } @@ -671,7 +677,7 @@ void cv::gimpl::ie::GIEExecutable::run(cv::gimpl::GIslandExecutable::IInput &in // (5) In non-streaming mode need to wait until the all tasks are done // FIXME: Is there more graceful way to handle this case ? if (!m_gm.metadata().contains()) { - m_reqPool->waitAndShutdown(); + m_reqPool->waitAll(); } } diff --git a/modules/gapi/test/infer/gapi_infer_ie_test.cpp b/modules/gapi/test/infer/gapi_infer_ie_test.cpp index 366b7b18f2..4ea33f7713 100644 --- a/modules/gapi/test/infer/gapi_infer_ie_test.cpp +++ b/modules/gapi/test/infer/gapi_infer_ie_test.cpp @@ -2005,6 +2005,29 @@ TEST_F(InferWithReshapeNV12, TestInferListYUV) // Validate validate(); } + +TEST_F(ROIList, CallInferMultipleTimes) +{ + cv::GArray rr; + cv::GMat in; + cv::GArray age, gender; + std::tie(age, gender) = cv::gapi::infer(rr, in); + cv::GComputation comp(cv::GIn(in, rr), cv::GOut(age, gender)); + + auto pp = cv::gapi::ie::Params { + params.model_path, params.weights_path, params.device_id + }.cfgOutputLayers({ "age_conv3", "prob" }); + + auto cc = comp.compile(cv::descr_of(cv::gin(m_in_mat, m_roi_list)), + cv::compile_args(cv::gapi::networks(pp))); + + for (int i = 0; i < 10; ++i) { + cc(cv::gin(m_in_mat, m_roi_list), cv::gout(m_out_gapi_ages, m_out_gapi_genders)); + } + + validate(); +} + } // namespace opencv_test #endif // HAVE_INF_ENGINE