man/unmap, preventing getMat/getUMat from temp object, fix thread-unsafe code in `UMat::getMat()`

pull/5317/head
Andrey Pavlenko 9 years ago committed by Alexander Alekhin
parent 536634b1fe
commit cea2dafa0f
  1. 1
      modules/core/include/opencv2/core/mat.hpp
  2. 24
      modules/core/src/ocl.cpp
  3. 18
      modules/core/src/umatrix.cpp
  4. 24
      modules/core/test/test_umat.cpp

@ -496,6 +496,7 @@ struct CV_EXPORTS UMatData
void* handle;
void* userdata;
int allocatorFlags_;
int mapcount;
};

@ -4514,6 +4514,7 @@ public:
CV_Assert(u->refcount >= 0);
CV_Assert(u->handle != 0 && u->urefcount == 0);
CV_Assert(u->mapcount == 0);
if(u->tempUMat())
{
CV_Assert(u->origdata);
@ -4572,6 +4573,9 @@ public:
else
{
cl_int retval = 0;
if (u->tempUMat())
{
CV_Assert(u->mapcount == 0);
void* data = clEnqueueMapBuffer(q, (cl_mem)u->handle, CL_TRUE,
(CL_MAP_READ | CL_MAP_WRITE),
0, u->size, 0, 0, 0, &retval);
@ -4580,6 +4584,7 @@ public:
CV_OclDbgAssert(clFinish(q) == CL_SUCCESS);
}
}
}
u->markHostCopyObsolete(false);
}
#ifdef HAVE_OPENCL_SVM
@ -4715,10 +4720,15 @@ public:
}
#endif
cl_int retval = 0;
cl_int retval = CL_SUCCESS;
if (!u->deviceMemMapped())
{
CV_Assert(u->refcount == 1);
CV_Assert(u->mapcount++ == 0);
u->data = (uchar*)clEnqueueMapBuffer(q, (cl_mem)u->handle, CL_TRUE,
(CL_MAP_READ | CL_MAP_WRITE),
0, u->size, 0, 0, 0, &retval);
}
if (u->data && retval == CL_SUCCESS)
{
u->markHostCopyObsolete(false);
@ -4765,7 +4775,6 @@ public:
if( !u->copyOnMap() && u->deviceMemMapped() )
{
CV_Assert(u->data != NULL);
u->markDeviceMemMapped(false);
#ifdef HAVE_OPENCL_SVM
if ((u->allocatorFlags_ & svm::OPENCL_SVM_BUFFER_MASK) != 0)
{
@ -4792,6 +4801,9 @@ public:
return;
}
#endif
if (u->refcount == 0)
{
CV_Assert(u->mapcount-- == 1);
CV_Assert((retval = clEnqueueUnmapMemObject(q,
(cl_mem)u->handle, u->data, 0, 0, 0)) == CL_SUCCESS);
if (Device::getDefault().isAMD())
@ -4799,9 +4811,11 @@ public:
// required for multithreaded applications (see stitching test)
CV_OclDbgAssert(clFinish(q) == CL_SUCCESS);
}
if (u->refcount == 0)
u->markDeviceMemMapped(false);
u->data = 0;
u->markDeviceCopyObsolete(false);
u->markHostCopyObsolete(true);
}
}
else if( u->copyOnMap() && u->deviceCopyObsolete() )
{
@ -4811,10 +4825,10 @@ public:
#endif
CV_Assert( (retval = clEnqueueWriteBuffer(q, (cl_mem)u->handle, CL_TRUE, 0,
u->size, alignedPtr.getAlignedPtr(), 0, 0, 0)) == CL_SUCCESS );
}
u->markDeviceCopyObsolete(false);
u->markHostCopyObsolete(true);
}
}
bool checkContinuous(int dims, const size_t sz[],
const size_t srcofs[], const size_t srcstep[],

@ -60,7 +60,7 @@ static Mutex umatLocks[UMAT_NLOCKS];
UMatData::UMatData(const MatAllocator* allocator)
{
prevAllocator = currAllocator = allocator;
urefcount = refcount = 0;
urefcount = refcount = mapcount = 0;
data = origdata = 0;
size = 0;
flags = 0;
@ -73,6 +73,7 @@ UMatData::~UMatData()
{
prevAllocator = currAllocator = 0;
urefcount = refcount = 0;
CV_Assert(mapcount == 0);
data = origdata = 0;
size = 0;
flags = 0;
@ -221,6 +222,7 @@ UMat Mat::getUMat(int accessFlags, UMatUsageFlags usageFlags) const
UMat hdr;
if(!data)
return hdr;
CV_Assert((!u || u->mapcount==0) && "Don't get UMat from temp-Mat!");
accessFlags |= ACCESS_RW;
UMatData* temp_u = u;
if(!temp_u)
@ -637,19 +639,29 @@ Mat UMat::getMat(int accessFlags) const
{
if(!u)
return Mat();
CV_Assert(!u->tempUMat() && "Don't get Mat from temp UMat! Use copyTo().");
// TODO Support ACCESS_READ (ACCESS_WRITE) without unnecessary data transfers
accessFlags |= ACCESS_RW;
UMatDataAutoLock autolock(u);
if(CV_XADD(&u->refcount, 1) == 0)
u->currAllocator->map(u, accessFlags);
CV_Assert(u->data != 0);
if (u->data != 0)
{
Mat hdr(dims, size.p, type(), u->data + offset, step.p);
hdr.flags = flags;
hdr.u = u;
hdr.datastart = u->data;
hdr.data = u->data + offset;
hdr.datalimit = hdr.dataend = u->data + u->size;
CV_XADD(&hdr.u->refcount, 1);
return hdr;
}
else
{
CV_XADD(&u->refcount, -1);
CV_Assert(u->data != 0 && "Error mapping of UMat to host memory.");
return Mat();
}
}
void* UMat::handle(int accessFlags) const
{

@ -243,21 +243,25 @@ TEST_P(UMatBasicTests, GetUMat)
EXPECT_MAT_NEAR(ub, ua, 0);
}
{
Mat b;
b = a.getUMat(ACCESS_RW).getMat(ACCESS_RW);
UMat u = a.getUMat(ACCESS_RW);
{
Mat b = u.getMat(ACCESS_RW);
EXPECT_MAT_NEAR(b, a, 0);
}
}
{
Mat b;
b = ua.getMat(ACCESS_RW);
EXPECT_MAT_NEAR(b, a, 0);
}
{
UMat ub;
ub = ua.getMat(ACCESS_RW).getUMat(ACCESS_RW);
Mat m = ua.getMat(ACCESS_RW);
{
UMat ub = m.getUMat(ACCESS_RW);
EXPECT_MAT_NEAR(ub, ua, 0);
}
}
}
INSTANTIATE_TEST_CASE_P(UMat, UMatBasicTests, Combine(testing::Values(CV_8U), testing::Values(1, 2),
testing::Values(cv::Size(1, 1), cv::Size(1, 128), cv::Size(128, 1), cv::Size(128, 128), cv::Size(640, 480)), Bool()));
@ -1268,5 +1272,17 @@ TEST(UMat, DISABLED_Test_same_behaviour_write_and_write)
ASSERT_TRUE(exceptionDetected); // data race
}
TEST(UMat, mat_umat_sync)
{
UMat u(10, 10, CV_8UC1, Scalar(1));
{
Mat m = u.getMat(ACCESS_RW).reshape(1);
m.setTo(Scalar(255));
}
UMat uDiff;
compare(u, 255, uDiff, CMP_NE);
ASSERT_EQ(0, countNonZero(uDiff));
}
} } // namespace cvtest::ocl

Loading…
Cancel
Save