// // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2014, OpenCV Foundation, all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // // Author: Tolga Birdal #include "precomp.hpp" namespace cv { namespace ppf_match_3d { static void subtractColumns(Mat srcPC, double mean[3]) { int height = srcPC.rows; for (int i=0; i>1; for (;;) { if (high <= low) /* One element only */ return arr[median] ; if (high == low + 1) { /* Two elements only */ if (arr[low] > arr[high]) std::swap(arr[low], arr[high]) ; return arr[median] ; } /* Find median of low, middle and high items; swap into position low */ middle = (low + high) >>1; if (arr[middle] > arr[high]) std::swap(arr[middle], arr[high]) ; if (arr[low] > arr[high]) std::swap(arr[low], arr[high]) ; if (arr[middle] > arr[low]) std::swap(arr[middle], arr[low]) ; /* Swap low item (now in position middle) into position (low+1) */ std::swap(arr[middle], arr[low+1]) ; /* Nibble from each end towards middle, swapping items when stuck */ ll = low + 1; hh = high; for (;;) { do ll++; while (arr[low] > arr[ll]) ; do hh--; while (arr[hh] > arr[low]) ; if (hh < ll) break; std::swap(arr[ll], arr[hh]) ; } /* Swap middle item (in position low) back into correct position */ std::swap(arr[low], arr[hh]) ; /* Re-set active partition */ if (hh <= median) low = ll; if (hh >= median) high = hh - 1; } } static float getRejectionThreshold(float* r, int m, float outlierScale) { float* t=(float*)calloc(m, sizeof(float)); int i=0; float s=0, medR, threshold; memcpy(t, r, m*sizeof(float)); medR=medianF(t, m); for (i=0; i(0,0); Pose[1] = DCM.at(0,1); Pose[2] = DCM.at(0,2); Pose[4] = DCM.at(1,0); Pose[5] = DCM.at(1,1); Pose[6] = DCM.at(1,2); Pose[8] = DCM.at(2,0); Pose[9] = DCM.at(2,1); Pose[10] = DCM.at(2,2); Pose[3]=x[3]; Pose[7]=x[4]; Pose[11]=x[5]; Pose[15]=1; } /* Fast way to look up the duplicates duplicates is pre-allocated make sure that the max element in array will not exceed maxElement */ static hashtable_int* getHashtable(int* data, size_t length, int numMaxElement) { hashtable_int* hashtable = hashtableCreate(static_cast(numMaxElement*2), 0); for (size_t i = 0; i < length; i++) { const KeyType key = (KeyType)data[i]; hashtableInsertHashed(hashtable, key+1, reinterpret_cast(i+1)); } return hashtable; } // source point clouds are assumed to contain their normals int ICP::registerModelToScene(const Mat& srcPC, const Mat& dstPC, double& residual, double pose[16]) { int n = srcPC.rows; const bool useRobustReject = m_rejectionScale>0; Mat srcTemp = srcPC.clone(); Mat dstTemp = dstPC.clone(); double meanSrc[3], meanDst[3]; computeMeanCols(srcTemp, meanSrc); computeMeanCols(dstTemp, meanDst); double meanAvg[3]={0.5*(meanSrc[0]+meanDst[0]), 0.5*(meanSrc[1]+meanDst[1]), 0.5*(meanSrc[2]+meanDst[2])}; subtractColumns(srcTemp, meanAvg); subtractColumns(dstTemp, meanAvg); double distSrc = computeDistToOrigin(srcTemp); double distDst = computeDistToOrigin(dstTemp); double scale = (double)n / ((distSrc + distDst)*0.5); srcTemp(cv::Range(0, srcTemp.rows), cv::Range(0,3)) *= scale; dstTemp(cv::Range(0, dstTemp.rows), cv::Range(0,3)) *= scale; Mat srcPC0 = srcTemp; Mat dstPC0 = dstTemp; // initialize pose matrixIdentity(4, pose); void* flann = indexPCFlann(dstPC0); Mat M = Mat::eye(4,4,CV_64F); double tempResidual = 0; // walk the pyramid for (int level = m_numLevels-1; level >=0; level--) { const double impact = 2; double div = pow((double)impact, (double)level); //double div2 = div*div; const int numSamples = cvRound((double)(n/(div))); const double TolP = m_tolerance*(double)(level+1)*(level+1); const int MaxIterationsPyr = cvRound((double)m_maxIterations/(level+1)); // Obtain the sampled point clouds for this level: Also rotates the normals Mat srcPCT = transformPCPose(srcPC0, pose); const int sampleStep = cvRound((double)n/(double)numSamples); std::vector srcSampleInd; /* Note by Tolga Birdal Downsample the model point clouds. If more optimization is required, one could also downsample the scene points, but I think this might decrease the accuracy. That's why I won't be implementing it at this moment. Also note that you have to compute a KD-tree for each level. */ srcPCT = samplePCUniformInd(srcPCT, sampleStep, srcSampleInd); double fval_old=9999999999; double fval_perc=0; double fval_min=9999999999; Mat Src_Moved = srcPCT.clone(); int i=0; size_t numElSrc = (size_t)Src_Moved.rows; int sizesResult[2] = {(int)numElSrc, 1}; float* distances = new float[numElSrc]; int* indices = new int[numElSrc]; Mat Indices(2, sizesResult, CV_32S, indices, 0); Mat Distances(2, sizesResult, CV_32F, distances, 0); // use robust weighting for outlier treatment int* indicesModel = new int[numElSrc]; int* indicesScene = new int[numElSrc]; int* newI = new int[numElSrc]; int* newJ = new int[numElSrc]; double PoseX[16]={0}; matrixIdentity(4, PoseX); while ( (!(fval_perc<(1+TolP) && fval_perc>(1-TolP))) && isize; di++) { hashnode_i *node = duplicateTable->nodes[di]; if (node) { // select the first node size_t idx = reinterpret_cast(node->data)-1, dn=0; int dup = (int)node->key-1; size_t minIdxD = idx; float minDist = distances[idx]; while ( node ) { idx = reinterpret_cast(node->data)-1; if (distances[idx] < minDist) { minDist = distances[idx]; minIdxD = idx; } node = node->next; dn++; } indicesModel[ selInd ] = newI[ minIdxD ]; indicesScene[ selInd ] = dup ; selInd++; } } hashtableDestroy(duplicateTable); if (selInd) { Mat Src_Match = Mat((int)selInd, srcPCT.cols, CV_64F); Mat Dst_Match = Mat((int)selInd, srcPCT.cols, CV_64F); for (di=0; di& poses) { for (size_t i=0; ipose); registerModelToScene(srcTemp, dstPC, poses[i]->residual, poseICP); poses[i]->appendPose(poseICP); } return 0; } } // namespace ppf_match_3d } // namespace cv