FIx misc. source and comment typos

Found via `codespell -q 3 -S ./3rdparty,./modules -L amin,ang,atleast,dof,endwhile,hist,uint`
pull/15305/head
luz.paz 5 years ago
parent dbf8e22fad
commit 32aba5e64b
  1. 4
      apps/createsamples/utility.cpp
  2. 2
      cmake/FindCUDA/run_nvcc.cmake
  3. 2
      cmake/OpenCVCompilerOptimizations.cmake
  4. 2
      cmake/OpenCVDetectApacheAnt.cmake
  5. 2
      cmake/OpenCVDetectInferenceEngine.cmake
  6. 2
      cmake/OpenCVDetectPython.cmake
  7. 2
      cmake/OpenCVUtils.cmake
  8. 2
      cmake/android/android_ant_projects.cmake
  9. 2
      doc/js_tutorials/js_video/js_lucas_kanade/js_lucas_kanade.markdown
  10. 4
      doc/tutorials/calib3d/real_time_pose/real_time_pose.markdown
  11. 4
      doc/tutorials/features2d/feature_flann_matcher/feature_flann_matcher.markdown
  12. 12
      doc/tutorials/gapi/anisotropic_segmentation/porting_anisotropic_segmentation.markdown
  13. 2
      doc/tutorials/imgcodecs/raster-gdal/raster_io_gdal.markdown
  14. 2
      doc/tutorials/imgproc/gausian_median_blur_bilateral_filter/gausian_median_blur_bilateral_filter.markdown
  15. 2
      doc/tutorials/imgproc/morph_lines_detection/morph_lines_detection.md
  16. 2
      doc/tutorials/introduction/transition_guide/transition_guide.markdown
  17. 2
      doc/tutorials/ios/video_processing/video_processing.markdown
  18. 2
      doc/tutorials/ml/non_linear_svms/non_linear_svms.markdown
  19. 2
      doc/tutorials/objdetect/cascade_classifier/cascade_classifier.markdown
  20. 2
      doc/tutorials/video/optical_flow/optical_flow.markdown
  21. 2
      samples/cpp/stitching_detailed.cpp
  22. 2
      samples/cpp/videocapture_gphoto2_autofocus.cpp
  23. 2
      samples/directx/d3d11_interop.cpp
  24. 2
      samples/dnn/face_detector/how_to_train_face_detector.txt
  25. 2
      samples/dnn/openpose.py
  26. 2
      samples/opencl/opencl-opencv-interop.cpp
  27. 2
      samples/python/tutorial_code/imgProc/threshold_inRange/threshold_inRange.py
  28. 2
      samples/python/tutorial_code/objectDetection/cascade_classifier/objectDetection.py

@ -895,7 +895,7 @@ void icvGetNextFromBackgroundData( CvBackgroundData* data,
* #pragma omp parallel * #pragma omp parallel
* { * {
* ... * ...
* icvGetBackgourndImage( cvbgdata, cvbgreader, img ); * icvGetBackgroundImage( cvbgdata, cvbgreader, img );
* ... * ...
* } * }
* ... * ...
@ -990,7 +990,7 @@ static int icvInitBackgroundReaders( const char* filename, Size winsize )
/* /*
* icvDestroyBackgroundReaders * icvDestroyBackgroundReaders
* *
* Finish backgournd reading process * Finish background reading process
*/ */
static static
void icvDestroyBackgroundReaders() void icvDestroyBackgroundReaders()

@ -136,7 +136,7 @@ macro(cuda_execute_process status command)
# copy and paste a runnable command line. # copy and paste a runnable command line.
set(cuda_execute_process_string) set(cuda_execute_process_string)
foreach(arg ${ARGN}) foreach(arg ${ARGN})
# If there are quotes, excape them, so they come through. # If there are quotes, escape them, so they come through.
string(REPLACE "\"" "\\\"" arg ${arg}) string(REPLACE "\"" "\\\"" arg ${arg})
# Args with spaces need quotes around them to get them to be parsed as a single argument. # Args with spaces need quotes around them to get them to be parsed as a single argument.
if(arg MATCHES " ") if(arg MATCHES " ")

@ -854,7 +854,7 @@ macro(__ocv_add_dispatched_file filename target_src_var src_directory dst_direct
if(";${CPU_DISPATCH_FINAL};" MATCHES "${OPT}" OR __CPU_DISPATCH_INCLUDE_ALL) if(";${CPU_DISPATCH_FINAL};" MATCHES "${OPT}" OR __CPU_DISPATCH_INCLUDE_ALL)
if(EXISTS "${src_directory}/${filename}.${OPT_LOWER}.cpp") if(EXISTS "${src_directory}/${filename}.${OPT_LOWER}.cpp")
message(STATUS "Using overrided ${OPT} source: ${src_directory}/${filename}.${OPT_LOWER}.cpp") message(STATUS "Using overridden ${OPT} source: ${src_directory}/${filename}.${OPT_LOWER}.cpp")
else() else()
list(APPEND ${target_src_var} "${__file}") list(APPEND ${target_src_var} "${__file}")
endif() endif()

@ -27,7 +27,7 @@ if(ANT_EXECUTABLE)
unset(ANT_EXECUTABLE CACHE) unset(ANT_EXECUTABLE CACHE)
else() else()
string(REGEX MATCH "[0-9]+.[0-9]+.[0-9]+" ANT_VERSION "${ANT_VERSION_FULL}") string(REGEX MATCH "[0-9]+.[0-9]+.[0-9]+" ANT_VERSION "${ANT_VERSION_FULL}")
set(ANT_VERSION "${ANT_VERSION}" CACHE INTERNAL "Detected ant vesion") set(ANT_VERSION "${ANT_VERSION}" CACHE INTERNAL "Detected ant version")
message(STATUS "Found apache ant: ${ANT_EXECUTABLE} (${ANT_VERSION})") message(STATUS "Found apache ant: ${ANT_EXECUTABLE} (${ANT_VERSION})")
endif() endif()

@ -5,7 +5,7 @@
# #
# Detect parameters: # Detect parameters:
# 1. Native cmake IE package: # 1. Native cmake IE package:
# - enironment variable InferenceEngine_DIR is set to location of cmake module # - environment variable InferenceEngine_DIR is set to location of cmake module
# 2. Custom location: # 2. Custom location:
# - INF_ENGINE_INCLUDE_DIRS - headers search location # - INF_ENGINE_INCLUDE_DIRS - headers search location
# - INF_ENGINE_LIB_DIRS - library search location # - INF_ENGINE_LIB_DIRS - library search location

@ -249,7 +249,7 @@ if(NOT ${found})
# Export return values # Export return values
set(${found} "${_found}" CACHE INTERNAL "") set(${found} "${_found}" CACHE INTERNAL "")
set(${executable} "${_executable}" CACHE FILEPATH "Path to Python interpretor") set(${executable} "${_executable}" CACHE FILEPATH "Path to Python interpreter")
set(${version_string} "${_version_string}" CACHE INTERNAL "") set(${version_string} "${_version_string}" CACHE INTERNAL "")
set(${version_major} "${_version_major}" CACHE INTERNAL "") set(${version_major} "${_version_major}" CACHE INTERNAL "")
set(${version_minor} "${_version_minor}" CACHE INTERNAL "") set(${version_minor} "${_version_minor}" CACHE INTERNAL "")

@ -781,7 +781,7 @@ macro(ocv_check_modules define)
if(pkgcfg_lib_${define}_${_lib}) if(pkgcfg_lib_${define}_${_lib})
list(APPEND _libs "${pkgcfg_lib_${define}_${_lib}}") list(APPEND _libs "${pkgcfg_lib_${define}_${_lib}}")
else() else()
message(WARNING "ocv_check_modules(${define}): can't find library '${_lib}'. Specify 'pkgcfg_lib_${define}_${_lib}' manualy") message(WARNING "ocv_check_modules(${define}): can't find library '${_lib}'. Specify 'pkgcfg_lib_${define}_${_lib}' manually")
list(APPEND _libs "${_lib}") list(APPEND _libs "${_lib}")
endif() endif()
else() else()

@ -49,7 +49,7 @@ macro(android_get_compatible_target VAR)
list(GET ANDROID_SDK_TARGETS 0 __lvl) list(GET ANDROID_SDK_TARGETS 0 __lvl)
string(REGEX MATCH "[0-9]+$" __lvl "${__lvl}") string(REGEX MATCH "[0-9]+$" __lvl "${__lvl}")
#find minimal level mathing to all provided levels #find minimal level matching to all provided levels
foreach(lvl ${ARGN}) foreach(lvl ${ARGN})
string(REGEX MATCH "[0-9]+$" __level "${lvl}") string(REGEX MATCH "[0-9]+$" __level "${lvl}")
if(__level GREATER __lvl) if(__level GREATER __lvl)

@ -13,7 +13,7 @@ Optical Flow
------------ ------------
Optical flow is the pattern of apparent motion of image objects between two consecutive frames Optical flow is the pattern of apparent motion of image objects between two consecutive frames
caused by the movemement of object or camera. It is 2D vector field where each vector is a caused by the movement of object or camera. It is 2D vector field where each vector is a
displacement vector showing the movement of points from first frame to second. Consider the image displacement vector showing the movement of points from first frame to second. Consider the image
below (Image Courtesy: [Wikipedia article on Optical below (Image Courtesy: [Wikipedia article on Optical
Flow](http://en.wikipedia.org/wiki/Optical_flow)). Flow](http://en.wikipedia.org/wiki/Optical_flow)).

@ -253,8 +253,8 @@ Here is explained in detail the code for the real time application:
@code{.cpp} @code{.cpp}
RobustMatcher rmatcher; // instantiate RobustMatcher RobustMatcher rmatcher; // instantiate RobustMatcher
cv::FeatureDetector * detector = new cv::OrbFeatureDetector(numKeyPoints); // instatiate ORB feature detector cv::FeatureDetector * detector = new cv::OrbFeatureDetector(numKeyPoints); // instantiate ORB feature detector
cv::DescriptorExtractor * extractor = new cv::OrbDescriptorExtractor(); // instatiate ORB descriptor extractor cv::DescriptorExtractor * extractor = new cv::OrbDescriptorExtractor(); // instantiate ORB descriptor extractor
rmatcher.setFeatureDetector(detector); // set feature detector rmatcher.setFeatureDetector(detector); // set feature detector
rmatcher.setDescriptorExtractor(extractor); // set descriptor extractor rmatcher.setDescriptorExtractor(extractor); // set descriptor extractor

@ -29,8 +29,8 @@ This distance is equivalent to count the number of different elements for binary
To filter the matches, Lowe proposed in @cite Lowe:2004:DIF:993451.996342 to use a distance ratio test to try to eliminate false matches. To filter the matches, Lowe proposed in @cite Lowe:2004:DIF:993451.996342 to use a distance ratio test to try to eliminate false matches.
The distance ratio between the two nearest matches of a considered keypoint is computed and it is a good match when this value is below The distance ratio between the two nearest matches of a considered keypoint is computed and it is a good match when this value is below
a thresold. Indeed, this ratio allows helping to discriminate between ambiguous matches (distance ratio between the two nearest neighbors is a threshold. Indeed, this ratio allows helping to discriminate between ambiguous matches (distance ratio between the two nearest neighbors
close to one) and well discriminated matches. The figure below from the SIFT paper illustrates the probability that a match is correct is close to one) and well discriminated matches. The figure below from the SIFT paper illustrates the probability that a match is correct
based on the nearest-neighbor distance ratio test. based on the nearest-neighbor distance ratio test.
![](images/Feature_FlannMatcher_Lowe_ratio_test.png) ![](images/Feature_FlannMatcher_Lowe_ratio_test.png)

@ -39,7 +39,7 @@ With G-API, we can define it as follows:
It is important to understand that the new G-API based version of It is important to understand that the new G-API based version of
calcGST() will just produce a compute graph, in contrast to its calcGST() will just produce a compute graph, in contrast to its
original version, which actually calculates the values. This is a original version, which actually calculates the values. This is a
principial difference -- G-API based functions like this are used to principal difference -- G-API based functions like this are used to
construct graphs, not to process the actual data. construct graphs, not to process the actual data.
Let's start implementing calcGST() with calculation of \f$J\f$ Let's start implementing calcGST() with calculation of \f$J\f$
@ -186,7 +186,7 @@ is also OpenCV-based since it fallbacks to OpenCV functions inside.
On GNU/Linux, application memory footprint can be profiled with On GNU/Linux, application memory footprint can be profiled with
[Valgrind](http://valgrind.org/). On Debian/Ubuntu systems it can be [Valgrind](http://valgrind.org/). On Debian/Ubuntu systems it can be
installed like this (assuming you have administrator priveleges): installed like this (assuming you have administrator privileges):
$ sudo apt-get install valgrind massif-visualizer $ sudo apt-get install valgrind massif-visualizer
@ -239,10 +239,10 @@ consumption is because the default naive OpenCV-based backend is used to
execute this graph. This backend serves mostly for quick prototyping execute this graph. This backend serves mostly for quick prototyping
and debugging algorithms before offload/further optimization. and debugging algorithms before offload/further optimization.
This backend doesn't utilize any complex memory mamagement strategies yet This backend doesn't utilize any complex memory management strategies yet
since it is not its point at the moment. In the following chapter, since it is not its point at the moment. In the following chapter,
we'll learn about Fluid backend and see how the same G-API code can we'll learn about Fluid backend and see how the same G-API code can
run in a completely different model (and the footprint shrinked to a run in a completely different model (and the footprint shrunk to a
number of kilobytes). number of kilobytes).
# Backends and kernels {#gapi_anisotropic_backends} # Backends and kernels {#gapi_anisotropic_backends}
@ -298,7 +298,7 @@ as a _graph compilation option_:
@snippet cpp/tutorial_code/gapi/porting_anisotropic_image_segmentation/porting_anisotropic_image_segmentation_gapi_fluid.cpp kernel_pkg_use @snippet cpp/tutorial_code/gapi/porting_anisotropic_image_segmentation/porting_anisotropic_image_segmentation_gapi_fluid.cpp kernel_pkg_use
Traditional OpenCV is logically divided into modules, whith every Traditional OpenCV is logically divided into modules, with every
module providing a set of functions. In G-API, there are also module providing a set of functions. In G-API, there are also
"modules" which are represented as kernel packages provided by a "modules" which are represented as kernel packages provided by a
particular backend. In this example, we pass Fluid kernel packages to particular backend. In this example, we pass Fluid kernel packages to
@ -375,7 +375,7 @@ left side of the dump) is easily noticeable.
The visualization reflects how G-API deals with mixed graphs, also The visualization reflects how G-API deals with mixed graphs, also
called _heterogeneous_ graphs. The majority of operations in this called _heterogeneous_ graphs. The majority of operations in this
graph are implemented with Fluid backend, but Box filters are executed graph are implemented with Fluid backend, but Box filters are executed
by the OpenCV backend. One can easily see that the graph is partioned by the OpenCV backend. One can easily see that the graph is partitioned
(with rectangles). G-API groups connected operations based on their (with rectangles). G-API groups connected operations based on their
affinity, forming _subgraphs_ (or _islands_ in G-API terminology), and affinity, forming _subgraphs_ (or _islands_ in G-API terminology), and
our top-level graph becomes a composition of multiple smaller our top-level graph becomes a composition of multiple smaller

@ -15,7 +15,7 @@ The primary objectives for this tutorial:
- How to use OpenCV [imread](@ref imread) to load satellite imagery. - How to use OpenCV [imread](@ref imread) to load satellite imagery.
- How to use OpenCV [imread](@ref imread) to load SRTM Digital Elevation Models - How to use OpenCV [imread](@ref imread) to load SRTM Digital Elevation Models
- Given the corner coordinates of both the image and DEM, correllate the elevation data to the - Given the corner coordinates of both the image and DEM, correlate the elevation data to the
image to find elevations for each pixel. image to find elevations for each pixel.
- Show a basic, easy-to-implement example of a terrain heat map. - Show a basic, easy-to-implement example of a terrain heat map.
- Show a basic use of DEM data coupled with ortho-rectified imagery. - Show a basic use of DEM data coupled with ortho-rectified imagery.

@ -157,7 +157,7 @@ already known by now.
- *src*: Source image - *src*: Source image
- *dst*: Destination image - *dst*: Destination image
- *Size(w, h)*: The size of the kernel to be used (the neighbors to be considered). \f$w\f$ and - *Size(w, h)*: The size of the kernel to be used (the neighbors to be considered). \f$w\f$ and
\f$h\f$ have to be odd and positive numbers otherwise thi size will be calculated using the \f$h\f$ have to be odd and positive numbers otherwise the size will be calculated using the
\f$\sigma_{x}\f$ and \f$\sigma_{y}\f$ arguments. \f$\sigma_{x}\f$ and \f$\sigma_{y}\f$ arguments.
- \f$\sigma_{x}\f$: The standard deviation in x. Writing \f$0\f$ implies that \f$\sigma_{x}\f$ is - \f$\sigma_{x}\f$: The standard deviation in x. Writing \f$0\f$ implies that \f$\sigma_{x}\f$ is
calculated using kernel size. calculated using kernel size.

@ -30,7 +30,7 @@ Two of the most basic morphological operations are dilation and erosion. Dilatio
![Dilation on a Grayscale Image](images/morph6.gif) ![Dilation on a Grayscale Image](images/morph6.gif)
- __Erosion__: The vise versa applies for the erosion operation. The value of the output pixel is the <b><em>minimum</em></b> value of all the pixels that fall within the structuring element's size and shape. Look the at the example figures below: - __Erosion__: The vice versa applies for the erosion operation. The value of the output pixel is the <b><em>minimum</em></b> value of all the pixels that fall within the structuring element's size and shape. Look the at the example figures below:
![Erosion on a Binary Image](images/morph211.png) ![Erosion on a Binary Image](images/morph211.png)

@ -189,7 +189,7 @@ brief->compute(gray, query_kpts, query_desc); //Compute brief descriptors at eac
OpenCL {#tutorial_transition_hints_opencl} OpenCL {#tutorial_transition_hints_opencl}
------ ------
All specialized `ocl` implemetations has been hidden behind general C++ algorithm interface. Now the function execution path can be selected dynamically at runtime: CPU or OpenCL; this mechanism is also called "Transparent API". All specialized `ocl` implementations has been hidden behind general C++ algorithm interface. Now the function execution path can be selected dynamically at runtime: CPU or OpenCL; this mechanism is also called "Transparent API".
New class cv::UMat is intended to hide data exchange with OpenCL device in a convenient way. New class cv::UMat is intended to hide data exchange with OpenCL device in a convenient way.

@ -101,7 +101,7 @@ using namespace cv;
} }
@endcode @endcode
In this case, we initialize the camera and provide the imageView as a target for rendering each In this case, we initialize the camera and provide the imageView as a target for rendering each
frame. CvVideoCamera is basically a wrapper around AVFoundation, so we provie as properties some of frame. CvVideoCamera is basically a wrapper around AVFoundation, so we provide as properties some of
the AVFoundation camera options. For example we want to use the front camera, set the video size to the AVFoundation camera options. For example we want to use the front camera, set the video size to
352x288 and a video orientation (the video camera normally outputs in landscape mode, which results 352x288 and a video orientation (the video camera normally outputs in landscape mode, which results
in transposed data when you design a portrait application). in transposed data when you design a portrait application).

@ -13,7 +13,7 @@ In this tutorial you will learn how to:
Motivation Motivation
---------- ----------
Why is it interesting to extend the SVM optimation problem in order to handle non-linearly separable Why is it interesting to extend the SVM optimization problem in order to handle non-linearly separable
training data? Most of the applications in which SVMs are used in computer vision require a more training data? Most of the applications in which SVMs are used in computer vision require a more
powerful tool than a simple linear classifier. This stems from the fact that in these tasks __the powerful tool than a simple linear classifier. This stems from the fact that in these tasks __the
training data can be rarely separated using an hyperplane__. training data can be rarely separated using an hyperplane__.

@ -113,7 +113,7 @@ This tutorial code's is shown lines below. You can also download it from
Result Result
------ ------
-# Here is the result of running the code above and using as input the video stream of a build-in -# Here is the result of running the code above and using as input the video stream of a built-in
webcam: webcam:
![](images/Cascade_Classifier_Tutorial_Result_Haar.jpg) ![](images/Cascade_Classifier_Tutorial_Result_Haar.jpg)

@ -15,7 +15,7 @@ Optical Flow
------------ ------------
Optical flow is the pattern of apparent motion of image objects between two consecutive frames Optical flow is the pattern of apparent motion of image objects between two consecutive frames
caused by the movemement of object or camera. It is 2D vector field where each vector is a caused by the movement of object or camera. It is 2D vector field where each vector is a
displacement vector showing the movement of points from first frame to second. Consider the image displacement vector showing the movement of points from first frame to second. Consider the image
below (Image Courtesy: [Wikipedia article on Optical Flow](http://en.wikipedia.org/wiki/Optical_flow)). below (Image Courtesy: [Wikipedia article on Optical Flow](http://en.wikipedia.org/wiki/Optical_flow)).

@ -622,7 +622,7 @@ int main(int argc, char* argv[])
vector<Size> sizes(num_images); vector<Size> sizes(num_images);
vector<UMat> masks(num_images); vector<UMat> masks(num_images);
// Preapre images masks // Prepare images masks
for (int i = 0; i < num_images; ++i) for (int i = 0; i < num_images; ++i)
{ {
masks[i].create(images[i].size(), CV_8U); masks[i].create(images[i].size(), CV_8U);

@ -41,7 +41,7 @@ const int MAX_FOCUS_STEP = 32767;
const int FOCUS_DIRECTION_INFTY = 1; const int FOCUS_DIRECTION_INFTY = 1;
const int DEFAULT_BREAK_LIMIT = 5; const int DEFAULT_BREAK_LIMIT = 5;
const int DEFAULT_OUTPUT_FPS = 20; const int DEFAULT_OUTPUT_FPS = 20;
const double epsylon = 0.0005; // compression, noice, etc. const double epsylon = 0.0005; // compression, noise, etc.
struct Args_t struct Args_t
{ {

@ -83,7 +83,7 @@ public:
r = m_pD3D11SwapChain->GetBuffer(0, __uuidof(ID3D11Texture2D), (LPVOID*)&m_pBackBuffer); r = m_pD3D11SwapChain->GetBuffer(0, __uuidof(ID3D11Texture2D), (LPVOID*)&m_pBackBuffer);
if (FAILED(r)) if (FAILED(r))
{ {
throw std::runtime_error("GetBufer() failed!"); throw std::runtime_error("GetBuffer() failed!");
} }
r = m_pD3D11Dev->CreateRenderTargetView(m_pBackBuffer, NULL, &m_pRenderTarget); r = m_pD3D11Dev->CreateRenderTargetView(m_pBackBuffer, NULL, &m_pRenderTarget);

@ -67,7 +67,7 @@ You need to prepare 2 LMDB databases: one for training images, one for validatio
3. Train your detector 3. Train your detector
For training you need to have 3 files: train.prototxt, test.prototxt and solver.prototxt. You can find these files in the same directory as for this readme. For training you need to have 3 files: train.prototxt, test.prototxt and solver.prototxt. You can find these files in the same directory as for this readme.
Also you need to edit train.prototxt and test.prototxt to replace paths for your LMDB databases to actual databases you've crated in step 2. Also you need to edit train.prototxt and test.prototxt to replace paths for your LMDB databases to actual databases you've created in step 2.
Now all is done for launch training process. Now all is done for launch training process.
Execute next lines in Terminal: Execute next lines in Terminal:

@ -88,7 +88,7 @@ while cv.waitKey(1) < 0:
points = [] points = []
for i in range(len(BODY_PARTS)): for i in range(len(BODY_PARTS)):
# Slice heatmap of corresponging body's part. # Slice heatmap of corresponding body's part.
heatMap = out[0, i, :, :] heatMap = out[0, i, :, :]
# Originally, we try to find all the local maximums. To simplify a sample # Originally, we try to find all the local maximums. To simplify a sample

@ -703,7 +703,7 @@ int App::process_frame_with_open_cl(cv::Mat& frame, bool use_buffer, cl_mem* mem
if (0 == mem || 0 == m_img_src) if (0 == mem || 0 == m_img_src)
{ {
// allocate/delete cl memory objects every frame for the simplicity. // allocate/delete cl memory objects every frame for the simplicity.
// in real applicaton more efficient pipeline can be built. // in real application more efficient pipeline can be built.
if (use_buffer) if (use_buffer)
{ {

@ -66,7 +66,7 @@ def on_high_V_thresh_trackbar(val):
cv.setTrackbarPos(high_V_name, window_detection_name, high_V) cv.setTrackbarPos(high_V_name, window_detection_name, high_V)
parser = argparse.ArgumentParser(description='Code for Thresholding Operations using inRange tutorial.') parser = argparse.ArgumentParser(description='Code for Thresholding Operations using inRange tutorial.')
parser.add_argument('--camera', help='Camera devide number.', default=0, type=int) parser.add_argument('--camera', help='Camera divide number.', default=0, type=int)
args = parser.parse_args() args = parser.parse_args()
## [cap] ## [cap]

@ -25,7 +25,7 @@ def detectAndDisplay(frame):
parser = argparse.ArgumentParser(description='Code for Cascade Classifier tutorial.') parser = argparse.ArgumentParser(description='Code for Cascade Classifier tutorial.')
parser.add_argument('--face_cascade', help='Path to face cascade.', default='data/haarcascades/haarcascade_frontalface_alt.xml') parser.add_argument('--face_cascade', help='Path to face cascade.', default='data/haarcascades/haarcascade_frontalface_alt.xml')
parser.add_argument('--eyes_cascade', help='Path to eyes cascade.', default='data/haarcascades/haarcascade_eye_tree_eyeglasses.xml') parser.add_argument('--eyes_cascade', help='Path to eyes cascade.', default='data/haarcascades/haarcascade_eye_tree_eyeglasses.xml')
parser.add_argument('--camera', help='Camera devide number.', type=int, default=0) parser.add_argument('--camera', help='Camera divide number.', type=int, default=0)
args = parser.parse_args() args = parser.parse_args()
face_cascade_name = args.face_cascade face_cascade_name = args.face_cascade

Loading…
Cancel
Save