From ba08dbbef8566030f5eed72a8116416b270391b8 Mon Sep 17 00:00:00 2001 From: Pavel Grunt Date: Wed, 30 Oct 2019 22:02:06 +0100 Subject: [PATCH] s/seperate/separate/g in comments --- README.md | 2 +- modules/cnn_3dobj/README.md | 2 +- modules/cnn_3dobj/include/opencv2/cnn_3dobj.hpp | 2 +- modules/ovis/include/opencv2/ovis.hpp | 2 +- modules/saliency/samples/computeSaliency.cpp | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 5ac8b483b..4e46b9af0 100644 --- a/README.md +++ b/README.md @@ -55,4 +55,4 @@ In order to keep a clean overview containing all contributed modules the followi 1. Update the README.md file under the modules folder. Here you add your model with a single line description. -2. Add a README.md inside your own module folder. This README explains which functionality (seperate functions) is available, links to the corresponding samples and explains in somewhat more detail what the module is expected to do. If any extra requirements are needed to build the module without problems, add them here also. +2. Add a README.md inside your own module folder. This README explains which functionality (separate functions) is available, links to the corresponding samples and explains in somewhat more detail what the module is expected to do. If any extra requirements are needed to build the module without problems, add them here also. diff --git a/modules/cnn_3dobj/README.md b/modules/cnn_3dobj/README.md index 76598a0c1..b7457ba9b 100755 --- a/modules/cnn_3dobj/README.md +++ b/modules/cnn_3dobj/README.md @@ -79,7 +79,7 @@ $ ./example_cnn_3dobj_classify -mean_file=../data/images_mean/triplet_mean.binar ``` =========================================================== ##Demo3: Model performance test -####This demo will run a performance test of a trained CNN model on several images. If the the model fails on telling different samples from seperate classes apart, or is confused on samples with similar pose but from different classes, this will give some information for model analysis. +####This demo will run a performance test of a trained CNN model on several images. If the the model fails on telling different samples from separate classes apart, or is confused on samples with similar pose but from different classes, this will give some information for model analysis. ``` $ ./example_cnn_3dobj_model_analysis ``` diff --git a/modules/cnn_3dobj/include/opencv2/cnn_3dobj.hpp b/modules/cnn_3dobj/include/opencv2/cnn_3dobj.hpp index 1dc2c36cc..36d799f8f 100755 --- a/modules/cnn_3dobj/include/opencv2/cnn_3dobj.hpp +++ b/modules/cnn_3dobj/include/opencv2/cnn_3dobj.hpp @@ -73,7 +73,7 @@ the use of this software, even if advised of the possibility of such damage. As CNN based learning algorithm shows better performance on the classification issues, the rich labeled data could be more useful in the training stage. 3D object classification and pose estimation -is a jointed mission aimming at seperate different posed apart in the descriptor form. +is a jointed mission aiming at separate different posed apart in the descriptor form. In the training stage, we prepare 2D training images generated from our module with their class label and pose label. We fully exploit the information lies in their labels diff --git a/modules/ovis/include/opencv2/ovis.hpp b/modules/ovis/include/opencv2/ovis.hpp index 473946fa0..c6de7d81d 100644 --- a/modules/ovis/include/opencv2/ovis.hpp +++ b/modules/ovis/include/opencv2/ovis.hpp @@ -18,7 +18,7 @@ namespace ovis { enum SceneSettings { - /// the window will use a seperate scene. The scene will be shared otherwise. + /// the window will use a separate scene. The scene will be shared otherwise. SCENE_SEPERATE = 1, /// allow the user to control the camera. SCENE_INTERACTIVE = 2, diff --git a/modules/saliency/samples/computeSaliency.cpp b/modules/saliency/samples/computeSaliency.cpp index d55d3cfac..e72b18d7d 100644 --- a/modules/saliency/samples/computeSaliency.cpp +++ b/modules/saliency/samples/computeSaliency.cpp @@ -157,7 +157,7 @@ int main( int argc, char** argv ) int ndet = int(saliencyMap.size()); std::cout << "Objectness done " << ndet << std::endl; // The result are sorted by objectness. We only use the first maxd boxes here. - int maxd = 7, step = 255 / maxd, jitter=9; // jitter to seperate single rects + int maxd = 7, step = 255 / maxd, jitter=9; // jitter to separate single rects Mat draw = image.clone(); for (int i = 0; i < std::min(maxd, ndet); i++) { Vec4i bb = saliencyMap[i];