fixed "shadow" warnings in nvidia folder

pull/2/head
Marina Kolpakova 13 years ago
parent f33d90220d
commit c17bd8c11b
  1. 64
      modules/gpu/src/nvidia/NCVBroxOpticalFlow.cu
  2. 24
      modules/gpu/src/nvidia/NCVHaarObjectDetection.cu
  3. 14
      modules/gpu/src/nvidia/NPP_staging/NPP_staging.cu
  4. 8
      modules/gpu/src/nvidia/core/NCV.cu
  5. 54
      modules/gpu/src/nvidia/core/NCV.hpp

@ -940,7 +940,7 @@ NCVStatus NCVBroxOpticalFlow(const NCVBroxOpticalFlowDescriptor desc,
ncvAssertCUDAReturn(cudaMemsetAsync(dv.ptr(), 0, kLevelSizeInBytes, stream), NCV_CUDA_ERROR);
//texture format descriptor
cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float>();
cudaChannelFormatDesc ch_desc = cudaCreateChannelDesc<float>();
I0 = *img0Iter;
I1 = *img1Iter;
@ -948,8 +948,8 @@ NCVStatus NCVBroxOpticalFlow(const NCVBroxOpticalFlowDescriptor desc,
++img0Iter;
++img1Iter;
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_I0, I0->ptr(), channel_desc, kLevelWidth, kLevelHeight, kLevelStride*sizeof(float)), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_I1, I1->ptr(), channel_desc, kLevelWidth, kLevelHeight, kLevelStride*sizeof(float)), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_I0, I0->ptr(), ch_desc, kLevelWidth, kLevelHeight, kLevelStride*sizeof(float)), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_I1, I1->ptr(), ch_desc, kLevelWidth, kLevelHeight, kLevelStride*sizeof(float)), NCV_CUDA_ERROR);
//compute derivatives
dim3 dBlocks(iDivUp(kLevelWidth, 32), iDivUp(kLevelHeight, 6));
@ -989,20 +989,20 @@ NCVStatus NCVBroxOpticalFlow(const NCVBroxOpticalFlowDescriptor desc,
ncvAssertReturnNcvStat( nppiStFilterRowBorder_32f_C1R (Iy.ptr(), srcSize, nSrcStep, Ixy.ptr(), srcSize, nSrcStep, oROI,
nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) );
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Ix, Ix.ptr(), channel_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Ixx, Ixx.ptr(), channel_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Ix0, Ix0.ptr(), channel_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Iy, Iy.ptr(), channel_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Iyy, Iyy.ptr(), channel_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Iy0, Iy0.ptr(), channel_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Ixy, Ixy.ptr(), channel_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Ix, Ix.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Ixx, Ixx.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Ix0, Ix0.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Iy, Iy.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Iyy, Iyy.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Iy0, Iy0.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Ixy, Ixy.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
// flow
ncvAssertCUDAReturn(cudaBindTexture(0, tex_u, ptrU->ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_v, ptrV->ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_u, ptrU->ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_v, ptrV->ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
// flow increments
ncvAssertCUDAReturn(cudaBindTexture(0, tex_du, du.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_dv, dv.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_du, du.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_dv, dv.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
dim3 psor_blocks(iDivUp(kLevelWidth, PSOR_TILE_WIDTH), iDivUp(kLevelHeight, PSOR_TILE_HEIGHT));
dim3 psor_threads(PSOR_TILE_WIDTH, PSOR_TILE_HEIGHT);
@ -1032,37 +1032,37 @@ NCVStatus NCVBroxOpticalFlow(const NCVBroxOpticalFlowDescriptor desc,
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_diffusivity_x, diffusivity_x.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_diffusivity_y, diffusivity_y.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_diffusivity_x, diffusivity_x.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_diffusivity_y, diffusivity_y.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_dudv, num_dudv.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_dudv, num_dudv.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_u, num_u.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_v, num_v.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_u, num_u.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_v, num_v.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
prepare_sor_stage_2<<<psor_blocks, psor_threads, 0, stream>>>(denom_u.ptr(), denom_v.ptr(), kLevelWidth, kLevelHeight, kLevelStride);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
// linear system coefficients
ncvAssertCUDAReturn(cudaBindTexture(0, tex_diffusivity_x, diffusivity_x.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_diffusivity_y, diffusivity_y.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_diffusivity_x, diffusivity_x.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_diffusivity_y, diffusivity_y.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_dudv, num_dudv.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_dudv, num_dudv.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_u, num_u.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_v, num_v.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_u, num_u.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_v, num_v.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_inv_denominator_u, denom_u.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_inv_denominator_v, denom_v.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_inv_denominator_u, denom_u.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_inv_denominator_v, denom_v.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
//solve linear system
for (Ncv32u solver_iteration = 0; solver_iteration < desc.number_of_solver_iterations; ++solver_iteration)
{
float omega = 1.99f;
ncvAssertCUDAReturn(cudaBindTexture(0, tex_du, du.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_dv, dv.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_du, du.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_dv, dv.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
sor_pass<0><<<sor_blocks, sor_threads, 0, stream>>>
(du_new.ptr(),
@ -1079,8 +1079,8 @@ NCVStatus NCVBroxOpticalFlow(const NCVBroxOpticalFlowDescriptor desc,
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_du, du_new.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_dv, dv_new.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_du, du_new.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_dv, dv_new.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
sor_pass<1><<<sor_blocks, sor_threads, 0, stream>>>
(du.ptr(),
@ -1097,8 +1097,8 @@ NCVStatus NCVBroxOpticalFlow(const NCVBroxOpticalFlowDescriptor desc,
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_du, du.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_dv, dv.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_du, du.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(0, tex_dv, dv.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
}//end of solver loop
}// end of inner loop

@ -1622,16 +1622,16 @@ NCVStatus ncvDetectObjectsMultiScale_device(NCVMatrix<Ncv8u> &d_srcImg,
continue;
}
NcvSize32s srcRoi, srcIIRoi, scaledIIRoi, searchRoi;
NcvSize32s srcRoi_, srcIIRo_i, scaledIIRoi, searchRoi;
srcRoi.width = d_srcImg.width();
srcRoi.height = d_srcImg.height();
srcRoi_.width = d_srcImg.width();
srcRoi_.height = d_srcImg.height();
srcIIRoi.width = srcRoi.width + 1;
srcIIRoi.height = srcRoi.height + 1;
srcIIRo_i.width = srcRoi_.width + 1;
srcIIRo_i.height = srcRoi_.height + 1;
scaledIIRoi.width = srcIIRoi.width / scale;
scaledIIRoi.height = srcIIRoi.height / scale;
scaledIIRoi.width = srcIIRo_i.width / scale;
scaledIIRoi.height = srcIIRo_i.height / scale;
searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width;
searchRoi.height = scaledIIRoi.height - haar.ClassifierSize.height;
@ -1659,12 +1659,12 @@ NCVStatus ncvDetectObjectsMultiScale_device(NCVMatrix<Ncv8u> &d_srcImg,
{
Ncv32u scale = scalesVector[i];
NcvSize32u srcRoi, scaledIIRoi, searchRoi;
NcvSize32u srcRoi_, scaledIIRoi, searchRoi;
NcvSize32u srcIIRoi;
srcRoi.width = d_srcImg.width();
srcRoi.height = d_srcImg.height();
srcIIRoi.width = srcRoi.width + 1;
srcIIRoi.height = srcRoi.height + 1;
srcRoi_.width = d_srcImg.width();
srcRoi_.height = d_srcImg.height();
srcIIRoi.width = srcRoi_.width + 1;
srcIIRoi.height = srcRoi_.height + 1;
scaledIIRoi.width = srcIIRoi.width / scale;
scaledIIRoi.height = srcIIRoi.height / scale;
searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width;

@ -1414,17 +1414,17 @@ NCVStatus compactVector_32u_device(Ncv32u *d_src, Ncv32u srcLen,
//calculate hierarchical partial sums
for (Ncv32u i=1; i<partSumNums.size()-1; i++)
{
dim3 grid(partSumNums[i+1]);
if (grid.x > 65535)
dim3 grid_partial(partSumNums[i+1]);
if (grid_partial.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
grid_partial.y = (grid_partial.x + 65534) / 65535;
grid_partial.x = 65535;
}
if (grid.x != 1)
if (grid_partial.x != 1)
{
removePass1Scan
<false, true>
<<<grid, block, 0, nppStGetActiveCUDAstream()>>>
<<<grid_partial, block, 0, nppStGetActiveCUDAstream()>>>
(d_hierSums.ptr() + partSumOffsets[i],
partSumNums[i], NULL,
d_hierSums.ptr() + partSumOffsets[i+1],
@ -1434,7 +1434,7 @@ NCVStatus compactVector_32u_device(Ncv32u *d_src, Ncv32u srcLen,
{
removePass1Scan
<false, false>
<<<grid, block, 0, nppStGetActiveCUDAstream()>>>
<<<grid_partial, block, 0, nppStGetActiveCUDAstream()>>>
(d_hierSums.ptr() + partSumOffsets[i],
partSumNums[i], NULL,
NULL,

@ -723,16 +723,16 @@ static NCVStatus drawRectsWrapperHost(T *h_dst,
if (rect.x < dstWidth)
{
for (Ncv32u i=rect.y; i<rect.y+rect.height && i<dstHeight; i++)
for (Ncv32u each=rect.y; each<rect.y+rect.height && each<dstHeight; each++)
{
h_dst[i*dstStride+rect.x] = color;
h_dst[each*dstStride+rect.x] = color;
}
}
if (rect.x+rect.width-1 < dstWidth)
{
for (Ncv32u i=rect.y; i<rect.y+rect.height && i<dstHeight; i++)
for (Ncv32u each=rect.y; each<rect.y+rect.height && each<dstHeight; each++)
{
h_dst[i*dstStride+rect.x+rect.width-1] = color;
h_dst[each*dstStride+rect.x+rect.width-1] = color;
}
}
if (rect.y < dstHeight)

@ -623,11 +623,11 @@ class NCVVectorAlloc : public NCVVector<T>
{
NCVVectorAlloc();
NCVVectorAlloc(const NCVVectorAlloc &);
NCVVectorAlloc& operator=(const NCVVectorAlloc<T>&);
NCVVectorAlloc& operator=(const NCVVectorAlloc<T>&);
public:
NCVVectorAlloc(INCVMemAllocator &allocator_, Ncv32u length)
NCVVectorAlloc(INCVMemAllocator &allocator_, Ncv32u length_)
:
allocator(allocator_)
{
@ -636,11 +636,11 @@ public:
this->clear();
this->allocatedMem.clear();
ncvStat = allocator.alloc(this->allocatedMem, length * sizeof(T));
ncvStat = allocator.alloc(this->allocatedMem, length_ * sizeof(T));
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "NCVVectorAlloc ctor:: alloc failed", );
this->_ptr = (T *)this->allocatedMem.begin.ptr;
this->_length = length;
this->_length = length_;
this->_memtype = this->allocatedMem.begin.memtype;
}
@ -698,15 +698,15 @@ public:
this->bReused = true;
}
NCVVectorReuse(const NCVMemSegment &memSegment, Ncv32u length)
NCVVectorReuse(const NCVMemSegment &memSegment, Ncv32u length_)
{
this->bReused = false;
this->clear();
ncvAssertPrintReturn(length * sizeof(T) <= memSegment.size, \
ncvAssertPrintReturn(length_ * sizeof(T) <= memSegment.size, \
"NCVVectorReuse ctor:: memory binding failed due to size mismatch", );
this->_length = length;
this->_length = length_;
this->_ptr = (T *)memSegment.begin.ptr;
this->_memtype = memSegment.begin.memtype;
@ -841,34 +841,34 @@ class NCVMatrixAlloc : public NCVMatrix<T>
NCVMatrixAlloc& operator=(const NCVMatrixAlloc &);
public:
NCVMatrixAlloc(INCVMemAllocator &allocator, Ncv32u width, Ncv32u height, Ncv32u _pitch=0)
NCVMatrixAlloc(INCVMemAllocator &allocator_, Ncv32u width_, Ncv32u height_, Ncv32u pitch_=0)
:
allocator(allocator)
allocator(allocator_)
{
NCVStatus ncvStat;
this->clear();
this->allocatedMem.clear();
Ncv32u widthBytes = width * sizeof(T);
Ncv32u widthBytes = width_ * sizeof(T);
Ncv32u pitchBytes = alignUp(widthBytes, allocator.alignment());
if (_pitch != 0)
if (pitch_ != 0)
{
ncvAssertPrintReturn(_pitch >= pitchBytes &&
(_pitch & (allocator.alignment() - 1)) == 0,
ncvAssertPrintReturn(pitch_ >= pitchBytes &&
(pitch_ & (allocator.alignment() - 1)) == 0,
"NCVMatrixAlloc ctor:: incorrect pitch passed", );
pitchBytes = _pitch;
pitchBytes = pitch_;
}
Ncv32u requiredAllocSize = pitchBytes * height;
Ncv32u requiredAllocSize = pitchBytes * height_;
ncvStat = allocator.alloc(this->allocatedMem, requiredAllocSize);
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "NCVMatrixAlloc ctor:: alloc failed", );
this->_ptr = (T *)this->allocatedMem.begin.ptr;
this->_width = width;
this->_height = height;
this->_width = width_;
this->_height = height_;
this->_pitch = pitchBytes;
this->_memtype = this->allocatedMem.begin.memtype;
}
@ -916,34 +916,34 @@ class NCVMatrixReuse : public NCVMatrix<T>
public:
NCVMatrixReuse(const NCVMemSegment &memSegment, Ncv32u alignment, Ncv32u width, Ncv32u height, Ncv32u pitch=0, NcvBool bSkipPitchCheck=false)
NCVMatrixReuse(const NCVMemSegment &memSegment, Ncv32u alignment, Ncv32u width_, Ncv32u height_, Ncv32u pitch_=0, NcvBool bSkipPitchCheck=false)
{
this->bReused = false;
this->clear();
Ncv32u widthBytes = width * sizeof(T);
Ncv32u widthBytes = width_ * sizeof(T);
Ncv32u pitchBytes = alignUp(widthBytes, alignment);
if (pitch != 0)
if (pitch_ != 0)
{
if (!bSkipPitchCheck)
{
ncvAssertPrintReturn(pitch >= pitchBytes &&
(pitch & (alignment - 1)) == 0,
ncvAssertPrintReturn(pitch_ >= pitchBytes &&
(pitch_ & (alignment - 1)) == 0,
"NCVMatrixReuse ctor:: incorrect pitch passed", );
}
else
{
ncvAssertPrintReturn(pitch >= widthBytes, "NCVMatrixReuse ctor:: incorrect pitch passed", );
ncvAssertPrintReturn(pitch_ >= widthBytes, "NCVMatrixReuse ctor:: incorrect pitch passed", );
}
pitchBytes = pitch;
pitchBytes = pitch_;
}
ncvAssertPrintReturn(pitchBytes * height <= memSegment.size, \
ncvAssertPrintReturn(pitchBytes * height_ <= memSegment.size, \
"NCVMatrixReuse ctor:: memory binding failed due to size mismatch", );
this->_width = width;
this->_height = height;
this->_width = width_;
this->_height = height_;
this->_pitch = pitchBytes;
this->_ptr = (T *)memSegment.begin.ptr;
this->_memtype = memSegment.begin.memtype;

Loading…
Cancel
Save