add libs hint on OS X in for cmake

pull/276/head
Wangyida 9 years ago
parent 97d49a8834
commit 9852e8096e
  1. 59
      modules/README.md.orig
  2. 18
      modules/cnn_3dobj/CMakeLists.txt
  3. 0
      modules/cnn_3dobj/FindCaffe.cmake
  4. 10
      modules/cnn_3dobj/FindGlog.cmake
  5. 10
      modules/cnn_3dobj/FindProtobuf.cmake
  6. 20
      modules/cnn_3dobj/README.md
  7. 0
      modules/cnn_3dobj/cnn_3dobj_config.hpp.in
  8. 0
      modules/cnn_3dobj/doc/cnn_3dobj.bib
  9. 2
      modules/cnn_3dobj/include/opencv2/cnn_3dobj.hpp
  10. 0
      modules/cnn_3dobj/include/opencv2/cnn_3dobj_config.hpp
  11. 4
      modules/cnn_3dobj/samples/CMakeLists.txt
  12. 0
      modules/cnn_3dobj/samples/data/3Dmodel/ant.ply
  13. 0
      modules/cnn_3dobj/samples/data/3Dmodel/ape.ply
  14. 105411
      modules/cnn_3dobj/samples/data/3Dmodel/bunny.ply
  15. 0
      modules/cnn_3dobj/samples/data/3Dmodel/cow.ply
  16. BIN
      modules/cnn_3dobj/samples/data/3Dmodel/horse.ply
  17. 0
      modules/cnn_3dobj/samples/data/3Dmodel/plane.ply
  18. 0
      modules/cnn_3dobj/samples/data/images_mean/triplet_mean.binaryproto
  19. 0
      modules/cnn_3dobj/samples/data/label_all.txt
  20. 11
      modules/cnn_3dobj/samples/datagen.sh
  21. 28
      modules/cnn_3dobj/samples/demo_classify.cpp
  22. 8
      modules/cnn_3dobj/samples/demo_model_analysis.cpp
  23. 25
      modules/cnn_3dobj/samples/demo_sphereview_data.cpp
  24. 392
      modules/cnn_3dobj/samples/demo_video.cpp
  25. 0
      modules/cnn_3dobj/src/cnn_feature.cpp
  26. 1
      modules/cnn_3dobj/src/cnn_sphereview.cpp
  27. 1
      modules/cnn_3dobj/src/precomp.hpp
  28. 110
      modules/cnn_3dobj/src/sphereview3d.cpp
  29. 0
      modules/cnn_3dobj/test/test_cnn_3dobj_feature_extract.cpp
  30. 0
      modules/cnn_3dobj/test/test_main.cpp
  31. 0
      modules/cnn_3dobj/test/test_precomp.hpp
  32. BIN
      modules/cnn_3dobj/testdata/cv/1_8.png
  33. BIN
      modules/cnn_3dobj/testdata/cv/3d_triplet_iter_30000.caffemodel
  34. 2
      modules/cnn_3dobj/testdata/cv/3d_triplet_testIMG.prototxt
  35. BIN
      modules/cnn_3dobj/testdata/cv/4_78.png
  36. 52
      modules/cnn_3dobj/testdata/cv/caffemodel_list.txt
  37. 0
      modules/cnn_3dobj/tutorials/data_generation/data_generation.markdown
  38. 0
      modules/cnn_3dobj/tutorials/feature_classification/classify.markdown
  39. 0
      modules/cnn_3dobj/tutorials/model_analysis/model_analysis.markdown
  40. 0
      modules/cnn_3dobj/tutorials/table_of_content_cnn_3dobj.markdown

@ -1,59 +0,0 @@
An overview of the contrib modules and a small explanation
----------------------------------------------------------
This list gives an overview of all modules available inside the contrib repository.
These are also the correct names for disabling the building of a specific module by adding
```
$ cmake -D OPENCV_EXTRA_MODULES_PATH=<opencv_contrib>/modules -D BUILD_opencv_reponame=OFF <opencv_source_directory>
```
1. **opencv_adas**: Advanced Driver Assistance Systems module with Forward Collision Warning.
2. **opencv_bgsegm**: Improved Adaptive Background Mixture Model for Real-time Tracking / Visual Tracking of Human Visitors under Variable-Lighting Conditions.
3. **opencv_bioinspired**: Biologically inspired vision models and derivated tools.
4. **opencv_ ccalib**: Custom Calibration Pattern for 3D reconstruction.
5. **opencv_cvv**: GUI for Interactive Visual Debugging of Computer Vision Programs.
6. **opencv_datasets**: Interface for interfacing with existing computer vision databases.
7. **opencv_datasettools**: Tools for working with different datasets.
8. **opencv_face**: Recently added face recognition software which is not yet stabalized.
9. **opencv_latentsvm**: Implementation of the LatentSVM detector algorithm.
10. **opencv_line_descriptor**: Binary descriptors for lines extracted from an image.
11. **opencv_matlab**: OpenCV Matlab Code Generator.
12. **opencv_optflow**: Optical Flow Algorithms for tracking points.
13. **opencv_reg**: Image Registration module.
14. **opencv_rgbd**: RGB-Depth Processing module.
15. **opencv_saliency**: Saliency API, understanding where humans focus given a scene.
16. **opencv_surface_matching**: Surface Matching Algorithm Through 3D Features.
17. **opencv_text**: Scene Text Detection and Recognition in Natural Scene Images.
18. **opencv_tracking**: Long-term optical tracking API.
19. **opencv_xfeatures2d**: Extra 2D Features Framework containing experimental and non-free 2D feature algorithms.
20. **opencv_ximgproc**: Extended Image Processing: Structured Forests / Domain Transform Filter / Guided Filter / Adaptive Manifold Filter / Joint Bilateral Filter / Superpixels.
21. **opencv_xobjdetect**: Integral Channel Features Detector Framework.
22. **opencv_xphoto**: Additional photo processing algorithms: Color balance / Denoising / Inpainting.
<<<<<<< 54d9fdeb5ed51d326de3d2f1383f8e330f114381
23. **opencv_stereo**: Stereo Correspondence done with different descriptors: Census / CS-Census / MCT / BRIEF / MV / RT.
=======
23. **opencv_stereo**: Stereo Correspondence done with different descriptors: Census / CS-Census / MCT / BRIEF / MV / RT.
>>>>>>> modify README under modules

@ -8,6 +8,22 @@ else()
message(STATUS "Caffe: NO")
endif()
find_package(Protobuf)
if(Protobuf_FOUND)
message(STATUS "Protobuf: YES")
set(HAVE_PROTOBUF 1)
else()
message(STATUS "Protobuf: NO")
endif()
find_package(Glog)
if(Glog_FOUND)
message(STATUS "Glog: YES")
set(HAVE_GLOG 1)
else()
message(STATUS "Glog: NO")
endif()
if(HAVE_CAFFE)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cnn_3dobj_config.hpp.in
${CMAKE_CURRENT_SOURCE_DIR}/include/opencv2/cnn_3dobj_config.hpp @ONLY)
@ -21,6 +37,6 @@ endif()
ocv_define_module(cnn_3dobj opencv_core opencv_imgproc opencv_viz opencv_highgui OPTIONAL WRAP python)
if(${Caffe_FOUND})
target_link_libraries(opencv_cnn_3dobj ${Caffe_LIBS})
target_link_libraries(opencv_cnn_3dobj ${Caffe_LIBS} ${Glog_LIBS} ${Protobuf_LIBS})
endif()
endif()

@ -0,0 +1,10 @@
# Glog package for CNN Triplet training
unset(Glog_FOUND)
find_library(Glog_LIBS NAMES glog
HINTS
/usr/local/lib)
if(Glog_LIBS)
set(Glog_FOUND 1)
endif()

@ -0,0 +1,10 @@
# Protobuf package for CNN Triplet training
unset(Protobuf_FOUND)
find_library(Protobuf_LIBS NAMES protobuf
HINTS
/usr/local/lib)
if(Protobuf_LIBS)
set(Protobuf_FOUND 1)
endif()

@ -30,7 +30,7 @@ $ sudo make install
$ cd <opencv_source_directory>
$ mkdir build
$ cd build
$ cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D WITH_TBB=ON -D BUILD_NEW_PYTHON_SUPPORT=OFF -D WITH_V4L=ON -D WITH_QT=ON -D WITH_OPENGL=ON -D WITH_VTK=ON -D INSTALL_TESTS=ON -D OPENCV_EXTRA_MODULES_PATH=<opencv_contrib>/modules ..
$ cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D WITH_TBB=ON -D BUILD_NEW_PYTHON_SUPPORT=OFF -D WITH_V4L=ON -D WITH_QT=OFF -D WITH_OPENGL=ON -D WITH_VTK=ON -D INSTALL_TESTS=ON -D OPENCV_EXTRA_MODULES_PATH=<opencv_contrib>/modules ..
$ make -j4
$ sudo make install
```
@ -56,22 +56,24 @@ $ make
##Demo1: training data generation
####Imagas generation from different pose, by default there are 4 models used, there will be 276 images in all which each class contains 69 iamges, if you want to use additional .ply models, it is necessary to change the class number parameter to the new class number and also give it a new class label. If you will train net work and extract feature from RGB images set the parameter rgb_use as 1.
```
$ ./sphereview_test -plymodel=../data/3Dmodel/ape.ply -label_class=0
$ ./sphereview_test -plymodel=../data/3Dmodel/ape.ply -label_class=0 -cam_head_x=0 -cam_head_y=0 -cam_head_z=1
```
####press 'Q' to start 2D image genaration
```
$ ./sphereview_test -plymodel=../data/3Dmodel/ant.ply -label_class=1
$ ./sphereview_test -plymodel=../data/3Dmodel/ant.ply -label_class=1 -cam_head_x=0 -cam_head_y=-1 -cam_head_z=0
```
####press 'Q' to start
```
$ ./sphereview_test -plymodel=../data/3Dmodel/cow.ply -label_class=2
$ ./sphereview_test -plymodel=../data/3Dmodel/cow.ply -label_class=2 -cam_head_x=0 -cam_head_y=-1 -cam_head_z=0
```
####press 'Q' to start
```
$ ./sphereview_test -plymodel=../data/3Dmodel/plane.ply -label_class=3
$ ./sphereview_test -plymodel=../data/3Dmodel/plane.ply -label_class=3 -cam_head_x=0 -cam_head_y=-1 -cam_head_z=0
```
```
$ ./sphereview_test -plymodel=../data/3Dmodel/bunny.ply -label_class=4 -cam_head_x=0 -cam_head_y=-1 -cam_head_z=0
```
```
$ ./sphereview_test -plymodel=../data/3Dmodel/horse.ply -label_class=5 -cam_head_x=0 -cam_head_y=0 -cam_head_z=-1
```
####press 'Q' to start
####When all images are created in images_all folder as a collection of training images for network tranining and as a gallery of reference images for the classification part, then proceed on.
####After this demo, the binary files of images and labels will be stored as 'binary_image' and 'binary_label' in current path, you should copy them into the leveldb folder in Caffe triplet training, for example: copy these 2 files in <caffe_source_directory>/data/linemod and rename them as 'binary_image_train', 'binary_image_test' and 'binary_label_train', 'binary_label_train'. Here I use the same as trianing and testing data, you can use different data for training and testing the performance in the CAFFE training process. It's important to observe the loss of testing data to check whether training data is suitable for the your aim. Loss should be obseved as keep decreasing and remain on a much smaller number than the initial loss.
####You could start triplet tranining using Caffe like this:

@ -55,10 +55,10 @@ the use of this software, even if advised of the possibility of such damage.
#include <set>
#include <string.h>
#include <stdlib.h>
#include <tr1/memory>
#include <dirent.h>
#define CPU_ONLY
#include <opencv2/cnn_3dobj_config.hpp>
#ifdef HAVE_CAFFE
#include <caffe/blob.hpp>
#include <caffe/common.hpp>

@ -15,3 +15,7 @@ target_link_libraries(classify_test ${OpenCV_LIBS})
set(SOURCES_modelanalysis demo_model_analysis.cpp)
add_executable(model_test ${SOURCES_modelanalysis})
target_link_libraries(model_test ${OpenCV_LIBS})
set(SOURCES_video demo_video.cpp)
add_executable(video_test ${SOURCES_video})
target_link_libraries(video_test ${OpenCV_LIBS})

File diff suppressed because it is too large Load Diff

@ -0,0 +1,11 @@
rm -rf ../data/binary_image
rm -rf ../data/binary_label
rm -rf ../data/header_for_image
rm -rf ../data/header_for_label
./sphereview_test -plymodel=../data/3Dmodel/ape.ply -label_class=0 -cam_head_x=0 -cam_head_y=0 -cam_head_z=1
./sphereview_test -plymodel=../data/3Dmodel/ant.ply -label_class=1 -cam_head_x=0 -cam_head_y=-1 -cam_head_z=0
./sphereview_test -plymodel=../data/3Dmodel/cow.ply -label_class=2 -cam_head_x=0 -cam_head_y=-1 -cam_head_z=0
./sphereview_test -plymodel=../data/3Dmodel/plane.ply -label_class=3 -cam_head_x=0 -cam_head_y=-1 -cam_head_z=0
./sphereview_test -plymodel=../data/3Dmodel/bunny.ply -label_class=4 -cam_head_x=0 -cam_head_y=-1 -cam_head_z=0
./sphereview_test -plymodel=../data/3Dmodel/horse.ply -label_class=5 -cam_head_x=0 -cam_head_y=0 -cam_head_z=-1

@ -84,16 +84,15 @@ void listDir(const char *path, std::vector<string>& files, bool r)
int main(int argc, char** argv)
{
const String keys = "{help | | This sample will extract featrues from reference images and target image for classification. You can add a mean_file if there little variance in data such as human faces, otherwise it is not so useful}"
"{src_dir | ../data/images_all/ | Source direction of the images ready for being used for extract feature as gallery.}"
"{caffemodel | ../../testdata/cv/3d_triplet_iter_30000.caffemodel | caffe model for feature exrtaction.}"
"{network_forIMG | ../../testdata/cv/3d_triplet_testIMG.prototxt | Network definition file used for extracting feature from a single image and making a classification}"
"{mean_file | no | The mean file generated by Caffe from all gallery images, this could be used for mean value substraction from all images. If you want to use the mean file, you can set this as ../data/images_mean/triplet_mean.binaryproto.}"
"{target_img | ../data/images_all/1_8.png | Path of image waiting to be classified.}"
"{feature_blob | feat | Name of layer which will represent as the feature, in this network, ip1 or feat is well.}"
"{num_candidate | 15 | Number of candidates in gallery as the prediction result.}"
"{device | CPU | Device type: CPU or GPU}"
"{dev_id | 0 | Device id}";
"{src_dir | ../data/images_all/ | Source direction of the images ready for being used for extract feature as gallery.}"
"{caffemodel | ../../testdata/cv/3d_triplet_iter_30000.caffemodel | caffe model for feature exrtaction.}"
"{network_forIMG | ../../testdata/cv/3d_triplet_testIMG.prototxt | Network definition file used for extracting feature from a single image and making a classification}"
"{mean_file | no | The mean file generated by Caffe from all gallery images, this could be used for mean value substraction from all images. If you want to use the mean file, you can set this as ../data/images_mean/triplet_mean.binaryproto.}"
"{target_img | ../data/images_all/4_78.png | Path of image waiting to be classified.}"
"{feature_blob | feat | Name of layer which will represent as the feature, in this network, ip1 or feat is well.}"
"{num_candidate | 15 | Number of candidates in gallery as the prediction result.}"
"{device | CPU | Device type: CPU or GPU}"
"{dev_id | 0 | Device id}";
/* get parameters from comand line */
cv::CommandLineParser parser(argc, argv, keys);
parser.about("Feature extraction and classification");
@ -111,18 +110,15 @@ int main(int argc, char** argv)
int num_candidate = parser.get<int>("num_candidate");
string device = parser.get<string>("device");
int dev_id = parser.get<int>("dev_id");
/* Initialize a net work with Device */
cv::cnn_3dobj::descriptorExtractor descriptor(device);
std::cout << "Using" << descriptor.getDeviceType() << std::endl;
/* Load net with the caffe trained net work parameter and structure */
if (strcmp(mean_file.c_str(), "no") == 0)
descriptor.loadNet(network_forIMG, caffemodel);
else
descriptor.loadNet(network_forIMG, caffemodel, mean_file);
std::vector<string> name_gallery;
/* List the file names under a given path */
listDir(src_dir.c_str(), name_gallery, false);
for (unsigned int i = 0; i < name_gallery.size(); i++)
@ -135,16 +131,12 @@ int main(int argc, char** argv)
{
img_gallery.push_back(cv::imread(name_gallery[i], -1));
}
/* Extract feature from a set of images */
descriptor.extract(img_gallery, feature_reference, feature_blob);
std::cout << std::endl << "---------- Prediction for " << target_img << " ----------" << std::endl;
cv::Mat img = cv::imread(target_img, -1);
std::cout << std::endl << "---------- Features of gallery images ----------" << std::endl;
std::vector<std::pair<string, float> > prediction;
/* Print features of the reference images. */
for (unsigned int i = 0; i < feature_reference.rows; i++)
std::cout << feature_reference.row(i) << endl;
@ -155,10 +147,8 @@ int main(int argc, char** argv)
std::vector<std::vector<cv::DMatch> > matches;
/* Have a KNN match on the target and reference images. */
matcher.knnMatch(feature_test, feature_reference, matches, num_candidate);
/* Print feature of the target image waiting to be classified. */
std::cout << std::endl << "---------- Features of target image: " << target_img << "----------" << endl << feature_test << std::endl;
/* Print the top N prediction. */
std::cout << std::endl << "---------- Prediction result(Distance - File Name in Gallery) ----------" << std::endl;
for (size_t i = 0; i < matches[0].size(); ++i)

@ -50,10 +50,10 @@ int main(int argc, char** argv)
"{caffemodel | ../../testdata/cv/3d_triplet_iter_30000.caffemodel | caffe model for feature exrtaction.}"
"{network_forIMG | ../../testdata/cv/3d_triplet_testIMG.prototxt | Network definition file used for extracting feature from a single image and making a classification}"
"{mean_file | no | The mean file generated by Caffe from all gallery images, this could be used for mean value substraction from all images. If you want to use the mean file, you can set this as ../data/images_mean/triplet_mean.binaryproto.}"
"{target_img | ../data/images_all/1_8.png | Path of image in reference.}"
"{ref_img1 | ../data/images_all/1_23.png | Path of closest image.}"
"{ref_img2 | ../data/images_all/1_14.png | Path of less closer image in the same class with reference image.}"
"{ref_img3 | ../data/images_all/3_8.png | Path of image with the same pose in another class.}"
"{target_img | ../data/images_all/4_78.png | Path of image in reference.}"
"{ref_img1 | ../data/images_all/4_79.png | Path of closest image.}"
"{ref_img2 | ../data/images_all/4_87.png | Path of less closer image in the same class with reference image.}"
"{ref_img3 | ../data/images_all/3_78.png | Path of image with the same pose in another class.}"
"{feature_blob | feat | Name of layer which will represent as the feature, in this network, ip1 or feat is well.}"
"{device | CPU | device}"
"{dev_id | 0 | dev_id}";

@ -47,12 +47,15 @@ using namespace std;
using namespace cv::cnn_3dobj;
int main(int argc, char *argv[])
{
const String keys = "{help | | demo :$ ./sphereview_test -ite_depth=2 -plymodel=../data/3Dmodel/ape.ply -imagedir=../data/images_all/ -labeldir=../data/label_all.txt -num_class=4 -label_class=0, then press 'q' to run the demo for images generation when you see the gray background and a coordinate.}"
"{ite_depth | 2 | Iteration of sphere generation.}"
const String keys = "{help | | demo :$ ./sphereview_test -ite_depth=2 -plymodel=../data/3Dmodel/ape.ply -imagedir=../data/images_all/ -labeldir=../data/label_all.txt -num_class=6 -label_class=0, then press 'q' to run the demo for images generation when you see the gray background and a coordinate.}"
"{ite_depth | 3 | Iteration of sphere generation.}"
"{plymodel | ../data/3Dmodel/ape.ply | Path of the '.ply' file for image rendering. }"
"{imagedir | ../data/images_all/ | Path of the generated images for one particular .ply model. }"
"{labeldir | ../data/label_all.txt | Path of the generated images for one particular .ply model. }"
"{num_class | 4 | Total number of classes of models}"
"{cam_head_x | 0 | Head of the camera. }"
"{cam_head_y | -1 | Head of the camera. }"
"{cam_head_z | 0 | Head of the camera. }"
"{num_class | 6 | Total number of classes of models}"
"{label_class | 0 | Class label of current .ply model}"
"{rgb_use | 0 | Use RGB image or grayscale}";
/* Get parameters from comand line. */
@ -69,19 +72,20 @@ int main(int argc, char *argv[])
string labeldir = parser.get<string>("labeldir");
int num_class = parser.get<int>("num_class");
int label_class = parser.get<int>("label_class");
float cam_head_x = parser.get<float>("cam_head_x");
float cam_head_y = parser.get<float>("cam_head_y");
float cam_head_z = parser.get<float>("cam_head_z");
int rgb_use = parser.get<int>("rgb_use");
cv::cnn_3dobj::icoSphere ViewSphere(10,ite_depth);
std::vector<cv::Point3d> campos = ViewSphere.CameraPos;
std::fstream imglabel;
char* p=(char*)labeldir.data();
imglabel.open(p, fstream::app|fstream::out);
bool camera_pov = (true);
bool camera_pov = true;
/* Create a window using viz. */
viz::Viz3d myWindow("Coordinate Frame");
/* Set window size as 64*64, we use this scale as default. */
myWindow.setWindowSize(Size(64,64));
/* Add coordinate axes. */
myWindow.showWidget("Coordinate Widget", viz::WCoordinateSystem());
/* Set background color. */
myWindow.setBackgroundColor(viz::Color::gray());
myWindow.spin();
@ -90,7 +94,12 @@ int main(int argc, char *argv[])
/* Get the center of the generated mesh widget, cause some .ply files. */
Point3d cam_focal_point = ViewSphere.getCenter(objmesh.cloud);
float radius = ViewSphere.getRadius(objmesh.cloud, cam_focal_point);
Point3d cam_y_dir(0.0f,0.0f,1.0f);
objmesh.cloud = objmesh.cloud/radius*100;
cam_focal_point = cam_focal_point/radius*100;
Point3d cam_y_dir;
cam_y_dir.x = cam_head_x;
cam_y_dir.y = cam_head_y;
cam_y_dir.z = cam_head_z;
const char* headerPath = "../data/header_for_";
const char* binaryPath = "../data/binary_";
ViewSphere.createHeader((int)campos.size(), 64, 64, headerPath);
@ -106,7 +115,7 @@ int main(int argc, char *argv[])
imglabel << filename << ' ' << (int)(campos.at(pose).x*100) << ' ' << (int)(campos.at(pose).y*100) << ' ' << (int)(campos.at(pose).z*100) << endl;
filename = imagedir + filename;
/* Get the pose of the camera using makeCameraPoses. */
Affine3f cam_pose = viz::makeCameraPose(campos.at(pose)*radius+cam_focal_point, cam_focal_point, cam_y_dir*radius+cam_focal_point);
Affine3f cam_pose = viz::makeCameraPose(campos.at(pose)*380+cam_focal_point, cam_focal_point, cam_y_dir*380+cam_focal_point);
/* Get the transformation matrix from camera coordinate system to global. */
Affine3f transform = viz::makeTransformToGlobal(Vec3f(1.0f,0.0f,0.0f), Vec3f(0.0f,1.0f,0.0f), Vec3f(0.0f,0.0f,1.0f), campos.at(pose));
viz::WMesh mesh_widget(objmesh);

@ -0,0 +1,392 @@
#include <opencv2/viz/vizcore.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <iostream>
#include <fstream>
#define HAVE_CAFFE
#include <opencv2/cnn_3dobj.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <iomanip>
using namespace cv;
using namespace std;
using namespace cv::cnn_3dobj;
/**
* @function listDir
* @brief Making all files names under a directory into a list
*/
void listDir(const char *path, std::vector<string>& files, bool r)
{
DIR *pDir;
struct dirent *ent;
char childpath[512];
pDir = opendir(path);
memset(childpath, 0, sizeof(childpath));
while ((ent = readdir(pDir)) != NULL)
{
if (ent->d_type & DT_DIR)
{
if (strcmp(ent->d_name, ".") == 0 || strcmp(ent->d_name, "..") == 0)
{
continue;
}
if(r)
{
sprintf(childpath, "%s/%s", path, ent->d_name);
listDir(childpath,files,false);
}
}
else
{
files.push_back(ent->d_name);
}
}
sort(files.begin(),files.end());
};
/**
* @function cvcloud_load
* @brief load bunny.ply
*/
Mat cvcloud_load(Mat feature_reference)
{
Mat cloud(1, feature_reference.rows, CV_32FC3);
Point3f* data = cloud.ptr<cv::Point3f>();
float dummy1, dummy2;
for(size_t i = 0; i < feature_reference.rows; ++i)
{
data[i].x = feature_reference.at<float>(i,0);
data[i].y = feature_reference.at<float>(i,1);
data[i].z = feature_reference.at<float>(i,2);
}
cloud *= 5.0f;
return cloud;
}
/**
* @function main
*/
int main(int argc, char **argv)
{
const String keys = "{help | | This sample will extract featrues from reference images and target image for classification. You can add a mean_file if there little variance in data such as human faces, otherwise it is not so useful}"
"{src_dir | ../data/images_all/ | Source direction of the images ready for being used for extract feature as gallery.}"
"{caffemodellist | ../../testdata/cv/caffemodel_list.txt | caffe model for feature exrtaction.}"
"{network_forIMG | ../../testdata/cv/3d_triplet_testIMG.prototxt | Network definition file used for extracting feature from a single image and making a classification}"
"{mean_file | no | The mean file generated by Caffe from all gallery images, this could be used for mean value substraction from all images. If you want to use the mean file, you can set this as ../data/images_mean/triplet_mean.binaryproto.}"
"{target_img1 | ../data/images_all/0_48.png | Path of image waiting to be classified.}"
"{target_img2 | ../data/images_all/1_339.png | Path of image waiting to be classified.}"
"{target_img3 | ../data/images_all/2_296.png | Path of image waiting to be classified.}"
"{target_img4 | ../data/images_all/3_466.png | Path of image waiting to be classified.}"
"{target_img5 | ../data/images_all/4_117.png | Path of image waiting to be classified.}"
"{target_img6 | ../data/images_all/5_236.png | Path of image waiting to be classified.}"
"{feature_blob | feat | Name of layer which will represent as the feature, in this network, ip1 or feat is well.}"
"{num_candidate | 4 | Number of candidates in gallery as the prediction result.}"
"{device | CPU | Device type: CPU or GPU}"
"{dev_id | 0 | Device id}";
/* get parameters from comand line */
cv::CommandLineParser parser(argc, argv, keys);
parser.about("Feature extraction and classification");
if (parser.has("help"))
{
parser.printMessage();
return 0;
}
string src_dir = parser.get<string>("src_dir");
string caffemodellist = parser.get<string>("caffemodellist");
string network_forIMG = parser.get<string>("network_forIMG");
string mean_file = parser.get<string>("mean_file");
string target_img1 = parser.get<string>("target_img1");
string target_img2 = parser.get<string>("target_img2");
string target_img3 = parser.get<string>("target_img3");
string target_img4 = parser.get<string>("target_img4");
string target_img5 = parser.get<string>("target_img5");
string target_img6 = parser.get<string>("target_img6");
string feature_blob = parser.get<string>("feature_blob");
int num_candidate = parser.get<int>("num_candidate");
string device = parser.get<string>("device");
int dev_id = parser.get<int>("dev_id");
ifstream namelist_model(caffemodellist.c_str(), ios::in);
vector<string> caffemodel;
char *buf = new char[512];
int number_model = 0;
while (!namelist_model.eof())
{
namelist_model.getline(buf, 512);
caffemodel.push_back(buf);
number_model++;
}
/* List the file names under a given path */
std::vector<string> name_gallery;
listDir(src_dir.c_str(), name_gallery, false);
for (unsigned int i = 0; i < name_gallery.size(); i++)
{
name_gallery[i] = src_dir + name_gallery[i];
}
std::vector<cv::Mat> img_gallery;
cv::Mat temp_feat;
vector<cv::Mat> feature_reference;
vector<cv::Mat> feature_test1;
vector<cv::Mat> feature_test2;
vector<cv::Mat> feature_test3;
vector<cv::Mat> feature_test4;
vector<cv::Mat> feature_test5;
vector<cv::Mat> feature_test6;
cv::Mat img_test1 = cv::imread(target_img1, -1);
cv::Mat img_test2 = cv::imread(target_img2, -1);
cv::Mat img_test3 = cv::imread(target_img3, -1);
cv::Mat img_test4 = cv::imread(target_img4, -1);
cv::Mat img_test5 = cv::imread(target_img5, -1);
cv::Mat img_test6 = cv::imread(target_img6, -1);
for (int num_model = 0; num_model < number_model; ++num_model)
{
feature_reference.push_back(temp_feat);
feature_test1.push_back(temp_feat);
feature_test2.push_back(temp_feat);
feature_test3.push_back(temp_feat);
feature_test4.push_back(temp_feat);
feature_test5.push_back(temp_feat);
feature_test6.push_back(temp_feat);
}
for (unsigned int i = 0; i < name_gallery.size(); i++)
{
img_gallery.push_back(cv::imread(name_gallery[i], -1));
}
/* Initialize a net work with Device */
cv::cnn_3dobj::descriptorExtractor descriptor(device);
std::cout << "Using" << descriptor.getDeviceType() << std::endl;
/* Load net with the caffe trained net work parameter and structure */
for (int num_model = 0; num_model < number_model; ++num_model)
{
if (strcmp(mean_file.c_str(), "no") == 0)
descriptor.loadNet(network_forIMG, caffemodel[num_model]);
else
descriptor.loadNet(network_forIMG, caffemodel[num_model], mean_file);
/* Part1: Extract feature from a set of images and a single image*/
descriptor.extract(img_gallery, feature_reference[num_model], feature_blob);
descriptor.extract(img_test1, feature_test1[num_model], feature_blob);
descriptor.extract(img_test2, feature_test2[num_model], feature_blob);
descriptor.extract(img_test3, feature_test3[num_model], feature_blob);
descriptor.extract(img_test4, feature_test4[num_model], feature_blob);
descriptor.extract(img_test5, feature_test5[num_model], feature_blob);
descriptor.extract(img_test6, feature_test6[num_model], feature_blob);
}
/* Initialize a matcher which using L2 distance. */
cv::BFMatcher matcher(NORM_L2);
vector<vector<vector<cv::DMatch> > > matches1;
vector<vector<vector<cv::DMatch> > > matches2;
vector<vector<vector<cv::DMatch> > > matches3;
vector<vector<vector<cv::DMatch> > > matches4;
vector<vector<vector<cv::DMatch> > > matches5;
vector<vector<vector<cv::DMatch> > > matches6;
vector<vector<cv::DMatch> > matches_temp;
for (int num_model = 0; num_model < number_model; ++num_model)
{
matches1.push_back(matches_temp);
matches2.push_back(matches_temp);
matches3.push_back(matches_temp);
matches4.push_back(matches_temp);
matches5.push_back(matches_temp);
matches6.push_back(matches_temp);
}
/* Have a KNN match on the target and reference images. */
for (int num_model = 0; num_model < number_model; ++num_model)
{
matcher.knnMatch(feature_test1[num_model], feature_reference[num_model], matches1[num_model], num_candidate+1);
matcher.knnMatch(feature_test2[num_model], feature_reference[num_model], matches2[num_model], num_candidate+1);
matcher.knnMatch(feature_test3[num_model], feature_reference[num_model], matches3[num_model], num_candidate+1);
matcher.knnMatch(feature_test4[num_model], feature_reference[num_model], matches4[num_model], num_candidate+1);
matcher.knnMatch(feature_test5[num_model], feature_reference[num_model], matches5[num_model], num_candidate+1);
matcher.knnMatch(feature_test6[num_model], feature_reference[num_model], matches6[num_model], num_candidate+1);
}
vector<Mat> img_merge;
/* Part2: Start to have a show */
bool camera_pov = true;
viz::Viz3d myWindow0("Instruction");
viz::Viz3d myWindow1("Point Cloud");
viz::Viz3d myWindow2("Prediction sample");
/* Set window size as 1024*1024, we use this scale as default. */
myWindow0.setWindowSize(Size(1300,100));
myWindow0.setWindowPosition(Point(0,800));
myWindow1.setWindowSize(Size(700,600));
myWindow1.setWindowPosition(Point(600,0));
myWindow2.setWindowSize(Size(600,600));
myWindow2.setWindowPosition(Point(-20,0));
/* Pose of the widget in camera frame */
Affine3f cloud_pose = Affine3f().translate(Vec3f(1.0f,1.0f,1.0f));
Point3d campos(1,0,0);
/* Get the transformation matrix from camera coordinate system to global. */
Affine3f transform = viz::makeTransformToGlobal(Vec3f(1.0f,0.0f,0.0f), Vec3f(0.0f,1.0f,0.0f), Vec3f(0.0f,0.0f,1.0f), campos);
/* Pose of the widget in global frame */
Affine3f cloud_pose_global = transform * cloud_pose;
/* Set background color. */
myWindow0.setBackgroundColor(viz::Color::white());
myWindow1.setBackgroundColor(viz::Color::white());
myWindow2.setBackgroundColor(viz::Color::white());
Point3d cam_y_dir(0.0f,0.0f,1.0f);
cv::cnn_3dobj::icoSphere ViewSphere(1,0);
Mat bunny_cloud;
Point3d cam_focal_point;
float radius;
float translation_phase = 0.0;
int count_pre, num_rotate, max_rotate;
String titlename, Hint, Pred("prediction: ");
vector<viz::WImageOverlay> imagepredict;
string widgename[24] = {"1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19","20","21","22","23","24"};
vector<Mat> slide;
slide.push_back(imread("1.png"));
slide.push_back(imread("2.png"));
slide.push_back(imread("3.png"));
slide.push_back(imread("4.png"));
slide.push_back(imread("5.png"));
slide.push_back(imread("6.png"));
slide.push_back(imread("7.png"));
slide.push_back(imread("8.png"));
slide.push_back(imread("9.png"));
slide.push_back(imread("10.png"));
/// Create a window
viz::Viz3d myWindowS("Slide Show");
myWindowS.setWindowSize(Size(1300,700));
myWindowS.setWindowPosition(Point(0,0));
myWindowS.setBackgroundColor(viz::Color::white());
for (int i = 0; i < slide.size(); ++i)
{
/// Create a triangle widget
viz::WImageOverlay slide1(slide[i],Rect(0, 0, 1300, 700));
/// Show widget in the visualizer window
num_rotate = 0;
if (i == 0)
max_rotate = 2000;
else
max_rotate = 230;
while (num_rotate != max_rotate)
{
myWindowS.showWidget("Slide1", slide1);
/// Start event loop
myWindowS.spinOnce(1, true);
num_rotate++;
}
}
for (int num_model = 0; num_model < number_model; ++num_model)
{
if (num_model == 0)
Hint = "Start training.";
else if (num_model == 28)
Hint = "Different Classes Are Clustered.";
else if(num_model == 40)
Hint = "Poses Are Set apart.";
else if(num_model == 42)
Hint = "Finished. Model could: tell both classes and poses.";
titlename = caffemodel[num_model];
titlename = "Prediction Result of Model Trained on Iteration " + titlename.substr(34, titlename.length() - 44);
viz::WText title(titlename, Point(100, 50), 30, viz::Color::black());
viz::WText hint(Hint, Point(400, 20), 25, viz::Color::black());
viz::WImageOverlay image3d1(img_test1, Rect(20, 40, img_test4.rows, img_test4.cols));
viz::WText arrow1(Pred, Point(90,60), 15, viz::Color::red());
viz::WImageOverlay image3d2(img_test2, Rect(20, 40+75, img_test4.rows, img_test4.cols));
viz::WText arrow2(Pred, Point(90,60+75), 15, viz::Color::green());
viz::WImageOverlay image3d3(img_test3, Rect(20, 40+75*2, img_test4.rows, img_test4.cols));
viz::WText arrow3(Pred, Point(90,60+75*2), 15, viz::Color::purple());
viz::WImageOverlay image3d4(img_test4, Rect(20, 40+75*3, img_test4.rows, img_test4.cols));
viz::WText arrow4(Pred, Point(90,60+75*3), 15, viz::Color::blue());
viz::WImageOverlay image3d5(img_test5, Rect(20, 40+75*4, img_test4.rows, img_test4.cols));
viz::WText arrow5(Pred, Point(90,60+75*4), 15, viz::Color::yellow());
viz::WImageOverlay image3d6(img_test6, Rect(20, 40+75*5, img_test4.rows, img_test4.cols));
viz::WText arrow6(Pred, Point(90,60+75*5), 15, viz::Color::orange());
viz::WText text_target(String("Query Image"), Point2d(20,530), 20, viz::Color::purple());
viz::WText text_pred(String("Predicted Images using 4 NN"), Point2d(80+110,530), 20, viz::Color::purple());
viz::WText text3d1(String("1st"), Point2d(80 + 110,500), 20, viz::Color::orange());
viz::WText text3d2(String("2nd"), Point2d(80 + 2*110,500), 20, viz::Color::orange());
viz::WText text3d3(String("3rd"), Point2d(80 + 3*110,500), 20, viz::Color::orange());
viz::WText text3d4(String("4th"), Point2d(80 + 4*110,500), 20, viz::Color::orange());
viz::WText classname1(String("ape: red"), Point2d(20,10), 11, viz::Color::red());
viz::WText classname2(String("ant: green"), Point2d(120,10), 11, viz::Color::green());
viz::WText classname3(String("cow: purple"), Point2d(220,10), 11, viz::Color::purple());
viz::WText classname4(String("plane: blue"), Point2d(320,10), 11, viz::Color::blue());
viz::WText classname5(String("bunny: yellow"), Point2d(420,10), 11, viz::Color::yellow());
viz::WText classname6(String("horse: orange"), Point2d(500,10), 11, viz::Color::orange());
myWindow0.showWidget("title", title, Affine3f().translate(Vec3f(0.0f,0.0f,0.0f)));
myWindow0.showWidget("hint", hint, Affine3f().translate(Vec3f(0.0f,0.0f,0.0f)));
myWindow2.showWidget("image3d1", image3d1, Affine3f().translate(Vec3f(1.0f,1.0f,1.0f)));
myWindow2.showWidget("image3d2", image3d2, Affine3f().translate(Vec3f(1.0f,1.0f,1.0f)));
myWindow2.showWidget("image3d3", image3d3, Affine3f().translate(Vec3f(1.0f,1.0f,1.0f)));
myWindow2.showWidget("image3d4", image3d4, Affine3f().translate(Vec3f(1.0f,1.0f,1.0f)));
myWindow2.showWidget("image3d5", image3d5, Affine3f().translate(Vec3f(1.0f,1.0f,1.0f)));
myWindow2.showWidget("image3d6", image3d6, Affine3f().translate(Vec3f(1.0f,1.0f,1.0f)));
myWindow2.showWidget("arrow1", arrow1, Affine3f().translate(Vec3f(1.0f,1.0f,1.0f)));
myWindow2.showWidget("arrow2", arrow2, Affine3f().translate(Vec3f(1.0f,1.0f,1.0f)));
myWindow2.showWidget("arrow3", arrow3, Affine3f().translate(Vec3f(1.0f,1.0f,1.0f)));
myWindow2.showWidget("arrow4", arrow4, Affine3f().translate(Vec3f(1.0f,1.0f,1.0f)));
myWindow2.showWidget("arrow5", arrow5, Affine3f().translate(Vec3f(1.0f,1.0f,1.0f)));
myWindow2.showWidget("arrow6", arrow6, Affine3f().translate(Vec3f(1.0f,1.0f,1.0f)));
myWindow2.showWidget("text_target", text_target, Affine3f().translate(Vec3f(0.0f,0.0f,0.0f)));
myWindow2.showWidget("text_pred", text_pred, Affine3f().translate(Vec3f(0.0f,0.0f,0.0f)));
myWindow2.showWidget("text3d1", text3d1, Affine3f().translate(Vec3f(0.0f,0.0f,0.0f)));
myWindow2.showWidget("text3d2", text3d2, Affine3f().translate(Vec3f(0.0f,0.0f,0.0f)));
myWindow2.showWidget("text3d3", text3d3, Affine3f().translate(Vec3f(0.0f,0.0f,0.0f)));
myWindow2.showWidget("text3d4", text3d4, Affine3f().translate(Vec3f(0.0f,0.0f,0.0f)));
myWindow2.showWidget("classname1", classname1, Affine3f().translate(Vec3f(0.0f,0.0f,0.0f)));
myWindow2.showWidget("classname2", classname2, Affine3f().translate(Vec3f(0.0f,0.0f,0.0f)));
myWindow2.showWidget("classname3", classname3, Affine3f().translate(Vec3f(0.0f,0.0f,0.0f)));
myWindow2.showWidget("classname4", classname4, Affine3f().translate(Vec3f(0.0f,0.0f,0.0f)));
myWindow2.showWidget("classname5", classname5, Affine3f().translate(Vec3f(0.0f,0.0f,0.0f)));
myWindow2.showWidget("classname6", classname6, Affine3f().translate(Vec3f(0.0f,0.0f,0.0f)));
bunny_cloud = cvcloud_load(feature_reference[num_model]);
cam_focal_point = ViewSphere.getCenter(bunny_cloud);
radius = ViewSphere.getRadius(bunny_cloud, cam_focal_point);
viz::WCloud cloud_widget1(bunny_cloud.colRange(Range(0,641)), viz::Color::red());
viz::WCloud cloud_widget2(bunny_cloud.colRange(Range(642,642*2-1)), viz::Color::green());
viz::WCloud cloud_widget3(bunny_cloud.colRange(Range(642*2,642*3-1)), viz::Color::purple());
viz::WCloud cloud_widget4(bunny_cloud.colRange(Range(642*3,642*4-1)), viz::Color::blue());
viz::WCloud cloud_widget5(bunny_cloud.colRange(Range(642*4,642*5-1)), viz::Color::yellow());
viz::WCloud cloud_widget6(bunny_cloud.colRange(Range(642*5,642*6-1)), viz::Color::orange());
myWindow1.showWidget("obj1", cloud_widget1, cloud_pose_global);
myWindow1.setRenderingProperty("obj1",0,3);
myWindow1.showWidget("obj2", cloud_widget2, cloud_pose_global);
myWindow1.setRenderingProperty("obj2",0,3);
myWindow1.showWidget("obj3", cloud_widget3, cloud_pose_global);
myWindow1.setRenderingProperty("obj3",0,3);
myWindow1.showWidget("obj4", cloud_widget4, cloud_pose_global);
myWindow1.setRenderingProperty("obj4",0,3);
myWindow1.showWidget("obj5", cloud_widget5, cloud_pose_global);
myWindow1.setRenderingProperty("obj5",0,3);
myWindow1.showWidget("obj6", cloud_widget6, cloud_pose_global);
myWindow1.setRenderingProperty("obj6",0,3);
count_pre = 0;
for (int j = 1; j < num_candidate+1; ++j)
{
myWindow2.showWidget(widgename[count_pre], viz::WImageOverlay(img_gallery[matches1[num_model][0][j].trainIdx], Rect(80+110*j, 40+75*0, img_test4.rows, img_test4.cols)), Affine3f().translate(Vec3f(1.0f,1.0f,1.0f)));
count_pre++;
myWindow2.showWidget(widgename[count_pre], viz::WImageOverlay(img_gallery[matches2[num_model][0][j].trainIdx], Rect(80+110*j, 40+75*1, img_test4.rows, img_test4.cols)), Affine3f().translate(Vec3f(1.0f,1.0f,1.0f)));
count_pre++;
myWindow2.showWidget(widgename[count_pre], viz::WImageOverlay(img_gallery[matches3[num_model][0][j].trainIdx], Rect(80+110*j, 40+75*2, img_test4.rows, img_test4.cols)), Affine3f().translate(Vec3f(1.0f,1.0f,1.0f)));
count_pre++;
myWindow2.showWidget(widgename[count_pre], viz::WImageOverlay(img_gallery[matches4[num_model][0][j].trainIdx], Rect(80+110*j, 40+75*3, img_test4.rows, img_test4.cols)), Affine3f().translate(Vec3f(1.0f,1.0f,1.0f)));
count_pre++;
myWindow2.showWidget(widgename[count_pre], viz::WImageOverlay(img_gallery[matches5[num_model][0][j].trainIdx], Rect(80+110*j, 40+75*4, img_test4.rows, img_test4.cols)), Affine3f().translate(Vec3f(1.0f,1.0f,1.0f)));
count_pre++;
myWindow2.showWidget(widgename[count_pre], viz::WImageOverlay(img_gallery[matches6[num_model][0][j].trainIdx], Rect(80+110*j, 40+75*5, img_test4.rows, img_test4.cols)), Affine3f().translate(Vec3f(1.0f,1.0f,1.0f)));
count_pre++;
}
num_rotate = 0;
max_rotate = 15;
if (num_model == number_model-1)
max_rotate = 30000;
while (num_rotate != max_rotate)
{
translation_phase += CV_PI * 0.01f;
campos.x = sin(translation_phase);
campos.y = cos(translation_phase);
campos.z = 0;
/* Get the pose of the camera using makeCameraPoses. */
Affine3f cam_pose = viz::makeCameraPose(campos*radius*3.5+cam_focal_point, cam_focal_point, cam_y_dir*radius*3.5+cam_focal_point);
myWindow1.setViewerPose(cam_pose);
myWindow1.spinOnce(1, true);
myWindow2.spinOnce(1, true);
myWindow0.spinOnce(1, true);
num_rotate++;
}
myWindow0.removeAllWidgets();
myWindow1.removeAllWidgets();
myWindow2.removeAllWidgets();
}
return 0;
}

@ -146,7 +146,6 @@ namespace cnn_3dobj
radiusCam = Radius;
}
}
radiusCam *= 4;
return radiusCam;
};

@ -42,7 +42,6 @@ the use of this software, even if advised of the possibility of such damage.
#ifndef __OPENCV_CNN_3DOBJ_PRECOMP_HPP__
#define __OPENCV_CNN_3DOBJ_PRECOMP_HPP__
#include <opencv2/cnn_3dobj_config.hpp>
#include <opencv2/cnn_3dobj.hpp>
#endif

@ -1,110 +0,0 @@
#include "../include/cnn_3dobj.hpp"
using namespace cv;
using namespace std;
namespace cv{ namespace cnn_3dobj{
IcoSphere::IcoSphere(float radius_in, int depth_in)
{
X = 0.525731112119133606f;
Z = 0.850650808352039932f;
int radius = radius_in;
int depth = depth_in;
X *= radius;
Z *= radius;
float vdata[12][3] = { { -X, 0.0f, Z }, { X, 0.0f, Z },
{ -X, 0.0f, -Z }, { X, 0.0f, -Z }, { 0.0f, Z, X }, { 0.0f, Z, -X },
{ 0.0f, -Z, X }, { 0.0f, -Z, -X }, { Z, X, 0.0f }, { -Z, X, 0.0f },
{ Z, -X, 0.0f }, { -Z, -X, 0.0f } };
int tindices[20][3] = { { 0, 4, 1 }, { 0, 9, 4 }, { 9, 5, 4 },
{ 4, 5, 8 }, { 4, 8, 1 }, { 8, 10, 1 }, { 8, 3, 10 }, { 5, 3, 8 },
{ 5, 2, 3 }, { 2, 7, 3 }, { 7, 10, 3 }, { 7, 6, 10 }, { 7, 11, 6 },
{ 11, 0, 6 }, { 0, 1, 6 }, { 6, 1, 10 }, { 9, 0, 11 },
{ 9, 11, 2 }, { 9, 2, 5 }, { 7, 2, 11 } };
std::vector<float>* texCoordsList = new std::vector<float>;
std::vector<int>* indicesList = new std::vector<int>;
// Iterate over points
for (int i = 0; i < 20; ++i) {
subdivide(vdata[tindices[i][1]], vdata[tindices[i][2]],
vdata[tindices[i][3]], depth);
}
cout << "View points in total: " << CameraPos->size() << endl;
cout << "The coordinate of view point: " << endl;
for(int i=0; i < CameraPos->size(); i++)
{
cout << CameraPos->at(i).x << endl;
}
}
void IcoSphere::norm(float v[])
{
float len = 0;
for (int i = 0; i < 3; ++i) {
len += v[i] * v[i];
}
len = sqrt(len);
for (int i = 0; i < 3; ++i) {
v[i] /= ((float)len/(float)IcoSphere::radius);
}
}
void IcoSphere::add(float v[])
{
Point3f temp_Campos;
std::vector<float>* temp = new std::vector<float>;
for (int k = 0; k < 3; ++k) {
vertexList->push_back(v[k]);
vertexNormalsList->push_back(v[k]);
temp->push_back(v[k]);
}
temp_Campos.x = temp->at(0);temp_Campos.y = temp->at(1);temp_Campos.z = temp->at(2);
CameraPos->push_back(temp_Campos);
}
void IcoSphere::subdivide(float v1[], float v2[], float v3[], int depth)
{
if (depth == 0) {
add(v1);
add(v2);
add(v3);
return;
}
float* v12 = new float[3];
float* v23 = new float[3];
float* v31 = new float[3];
for (int i = 0; i < 3; ++i) {
v12[i] = (v1[i] + v2[i]) / 2;
v23[i] = (v2[i] + v3[i]) / 2;
v31[i] = (v3[i] + v1[i]) / 2;
}
norm(v12);
norm(v23);
norm(v31);
subdivide(v1, v12, v31, depth - 1);
subdivide(v2, v23, v12, depth - 1);
subdivide(v3, v31, v23, depth - 1);
subdivide(v12, v23, v31, depth - 1);
}
}}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.7 KiB

@ -81,6 +81,6 @@ layer {
bottom: "ip1"
top: "feat"
inner_product_param {
num_output: 16
num_output: 3
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.0 KiB

@ -0,0 +1,52 @@
../../testdata/cv/3d_triplet_iter_1.caffemodel
../../testdata/cv/3d_triplet_iter_2.caffemodel
../../testdata/cv/3d_triplet_iter_3.caffemodel
../../testdata/cv/3d_triplet_iter_4.caffemodel
../../testdata/cv/3d_triplet_iter_5.caffemodel
../../testdata/cv/3d_triplet_iter_6.caffemodel
../../testdata/cv/3d_triplet_iter_7.caffemodel
../../testdata/cv/3d_triplet_iter_8.caffemodel
../../testdata/cv/3d_triplet_iter_9.caffemodel
../../testdata/cv/3d_triplet_iter_10.caffemodel
../../testdata/cv/3d_triplet_iter_20.caffemodel
../../testdata/cv/3d_triplet_iter_30.caffemodel
../../testdata/cv/3d_triplet_iter_40.caffemodel
../../testdata/cv/3d_triplet_iter_50.caffemodel
../../testdata/cv/3d_triplet_iter_60.caffemodel
../../testdata/cv/3d_triplet_iter_70.caffemodel
../../testdata/cv/3d_triplet_iter_80.caffemodel
../../testdata/cv/3d_triplet_iter_90.caffemodel
../../testdata/cv/3d_triplet_iter_100.caffemodel
../../testdata/cv/3d_triplet_iter_200.caffemodel
../../testdata/cv/3d_triplet_iter_300.caffemodel
../../testdata/cv/3d_triplet_iter_400.caffemodel
../../testdata/cv/3d_triplet_iter_500.caffemodel
../../testdata/cv/3d_triplet_iter_600.caffemodel
../../testdata/cv/3d_triplet_iter_700.caffemodel
../../testdata/cv/3d_triplet_iter_800.caffemodel
../../testdata/cv/3d_triplet_iter_900.caffemodel
../../testdata/cv/3d_triplet_iter_1000.caffemodel
../../testdata/cv/3d_triplet_iter_2000.caffemodel
../../testdata/cv/3d_triplet_iter_3000.caffemodel
../../testdata/cv/3d_triplet_iter_4000.caffemodel
../../testdata/cv/3d_triplet_iter_5000.caffemodel
../../testdata/cv/3d_triplet_iter_6000.caffemodel
../../testdata/cv/3d_triplet_iter_7000.caffemodel
../../testdata/cv/3d_triplet_iter_8000.caffemodel
../../testdata/cv/3d_triplet_iter_9000.caffemodel
../../testdata/cv/3d_triplet_iter_10000.caffemodel
../../testdata/cv/3d_triplet_iter_20000.caffemodel
../../testdata/cv/3d_triplet_iter_30000.caffemodel
../../testdata/cv/3d_triplet_iter_40000.caffemodel
../../testdata/cv/3d_triplet_iter_50000.caffemodel
../../testdata/cv/3d_triplet_iter_60000.caffemodel
../../testdata/cv/3d_triplet_iter_70000.caffemodel
../../testdata/cv/3d_triplet_iter_110000.caffemodel
../../testdata/cv/3d_triplet_iter_120000.caffemodel
../../testdata/cv/3d_triplet_iter_130000.caffemodel
../../testdata/cv/3d_triplet_iter_140000.caffemodel
../../testdata/cv/3d_triplet_iter_150000.caffemodel
../../testdata/cv/3d_triplet_iter_160000.caffemodel
../../testdata/cv/3d_triplet_iter_170000.caffemodel
../../testdata/cv/3d_triplet_iter_180000.caffemodel
../../testdata/cv/3d_triplet_iter_190000.caffemodel
Loading…
Cancel
Save