diff --git a/modules/dnn/include/opencv2/dnn/dnn.hpp b/modules/dnn/include/opencv2/dnn/dnn.hpp index 67042a14b7..ab443cd67e 100644 --- a/modules/dnn/include/opencv2/dnn/dnn.hpp +++ b/modules/dnn/include/opencv2/dnn/dnn.hpp @@ -389,7 +389,7 @@ CV__DNN_INLINE_NS_BEGIN /** - * @brief "Deattaches" all the layers, attached to particular layer. + * @brief "Detaches" all the layers, attached to particular layer. */ virtual void unsetAttached(); @@ -1579,7 +1579,7 @@ public: * - top-right * - bottom-right * - * Use cv::getPerspectiveTransform function to retrive image region without perspective transformations. + * Use cv::getPerspectiveTransform function to retrieve image region without perspective transformations. * * @note If DL model doesn't support that kind of output then result may be derived from detectTextRectangles() output. * diff --git a/modules/dnn/src/cuda/concat.cu b/modules/dnn/src/cuda/concat.cu index ac1be75682..5250b59518 100644 --- a/modules/dnn/src/cuda/concat.cu +++ b/modules/dnn/src/cuda/concat.cu @@ -100,7 +100,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { CV_Assert(output.rank() == input.rank()); CV_Assert(output_axis_offset < output.get_axis_size(axis)); - /* if axes preceeding the concat axis are all singleton, the concat blocks are contiguous + /* if axes preceding the concat axis are all singleton, the concat blocks are contiguous * in the output and we can copy each block directly */ if (output.size_range(0, axis) == 1) diff --git a/modules/dnn/src/cuda/kernel_dispatcher.hpp b/modules/dnn/src/cuda/kernel_dispatcher.hpp index b0fc658850..0f3e7c4fc4 100644 --- a/modules/dnn/src/cuda/kernel_dispatcher.hpp +++ b/modules/dnn/src/cuda/kernel_dispatcher.hpp @@ -33,7 +33,7 @@ * template * void launch_some_kernel(...); * - * // creates the dispatcher named "some_dispatcher" which invokves the correct instantiation of "launch_some_kernel" + * // creates the dispatcher named "some_dispatcher" which invokes the correct instantiation of "launch_some_kernel" * GENERATE_KERNEL_DISPATCHER(some_dispatcher, launch_some_kernel); * * // internal API function diff --git a/modules/dnn/src/cuda/permute.cu b/modules/dnn/src/cuda/permute.cu index 082c1bf75e..35c95a6737 100644 --- a/modules/dnn/src/cuda/permute.cu +++ b/modules/dnn/src/cuda/permute.cu @@ -72,7 +72,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { __syncthreads(); /* We interchange `threadIdx.x` and `threadIdx.y` so that consecutive output indices map to - * consecutive threads. This would allow writes across threds in a warp to be coalesced. + * consecutive threads. This would allow writes across threads in a warp to be coalesced. */ const index_type out_x = blockIdx.y * TILE_SIZE + threadIdx.x; const index_type out_y_begin = blockIdx.x * TILE_SIZE + threadIdx.y; @@ -156,7 +156,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { * tensor indices be [o1, o2, ...]. The permutation operation essentially copies items * from the input tensor to new locations in the output tensor as dictated by the indices. * - * If the size of the nth axis (say i2) of the input is one the input and output indicies for + * If the size of the nth axis (say i2) of the input is one the input and output indices for * all the elements will be of the form be [i1, 0, ...] and [..., 0, ...] respectively. * The index does not contribute to the element's address calculation and hence would give * identical result if it weren't there. diff --git a/modules/dnn/src/cuda/slice.cu b/modules/dnn/src/cuda/slice.cu index 37b718cd63..461e87e549 100644 --- a/modules/dnn/src/cuda/slice.cu +++ b/modules/dnn/src/cuda/slice.cu @@ -159,7 +159,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { /* We can do a copy if the reduced rank is two and only the first axis is sliced. * The general requirement is that only one axis is sliced and all the axes that - * preceed the sliced axis are singleton. However, the reductions above will remove + * precede the sliced axis are singleton. However, the reductions above will remove * all the leading singleton axes and merge the trailing unsliced axes into one, or * zero if there are no trailing unsliced axes. The latter is handled separately. */ diff --git a/modules/dnn/src/cuda4dnn/csl/memory.hpp b/modules/dnn/src/cuda4dnn/csl/memory.hpp index 40918cd4b3..683ed62059 100644 --- a/modules/dnn/src/cuda4dnn/csl/memory.hpp +++ b/modules/dnn/src/cuda4dnn/csl/memory.hpp @@ -68,7 +68,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl { } } }); - /* std::shared_ptr::reset invokves the deleter if an exception occurs; hence, we don't + /* std::shared_ptr::reset invokes the deleter if an exception occurs; hence, we don't * need to have a try-catch block to free the allocated device memory */ diff --git a/modules/dnn/src/cuda4dnn/csl/pointer.hpp b/modules/dnn/src/cuda4dnn/csl/pointer.hpp index 45bf94bf0a..4d7a232093 100644 --- a/modules/dnn/src/cuda4dnn/csl/pointer.hpp +++ b/modules/dnn/src/cuda4dnn/csl/pointer.hpp @@ -147,7 +147,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl { /* host const void pointer to const void device pointer */ CUDA4DNN_HOST_DEVICE explicit DevicePtr(pointer ptr_) noexcept : ptr{ ptr_ } { } - /* allow any device pointer to be implicitly convereted to void device pointer */ + /* allow any device pointer to be implicitly converted to void device pointer */ template CUDA4DNN_HOST_DEVICE DevicePtr(DevicePtr ptr_) noexcept : ptr{ ptr_.get() } { } @@ -199,7 +199,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl { /* host pointer to device pointer */ CUDA4DNN_HOST_DEVICE explicit DevicePtr(pointer ptr_) noexcept : ptr{ ptr_ } { } - /* allow any device pointer to mutable memory to be implicitly convereted to void device pointer */ + /* allow any device pointer to mutable memory to be implicitly converted to void device pointer */ template ::value, bool>::type = false> CUDA4DNN_HOST_DEVICE DevicePtr(DevicePtr ptr_) noexcept : ptr { ptr_.get() } { } diff --git a/modules/dnn/src/darknet/darknet_io.cpp b/modules/dnn/src/darknet/darknet_io.cpp index 11aad453e3..520f3c94be 100644 --- a/modules/dnn/src/darknet/darknet_io.cpp +++ b/modules/dnn/src/darknet/darknet_io.cpp @@ -791,7 +791,7 @@ namespace cv { if (layers_vec.size() > 1) { // layer ids in layers_vec - inputs of Slice layers - // after adding offset to layers_vec: layer ids - ouputs of Slice layers + // after adding offset to layers_vec: layer ids - outputs of Slice layers for (size_t k = 0; k < layers_vec.size(); ++k) layers_vec[k] += layers_vec.size(); diff --git a/modules/dnn/src/model.cpp b/modules/dnn/src/model.cpp index 4cce0a7dc4..7444011a64 100644 --- a/modules/dnn/src/model.cpp +++ b/modules/dnn/src/model.cpp @@ -799,7 +799,7 @@ struct TextRecognitionModel_Impl : public Model::Impl virtual std::string ctcPrefixBeamSearchDecode(const Mat& prediction) { - // CTC prefix beam seach decode. + // CTC prefix beam search decode. // For more detail, refer to: // https://distill.pub/2017/ctc/#inference // https://gist.github.com/awni/56369a90d03953e370f3964c826ed4b0i diff --git a/modules/dnn/src/tengine4dnn/src/tengine_graph_convolution.cpp b/modules/dnn/src/tengine4dnn/src/tengine_graph_convolution.cpp index ecb5c62f56..daadc32ad2 100644 --- a/modules/dnn/src/tengine4dnn/src/tengine_graph_convolution.cpp +++ b/modules/dnn/src/tengine4dnn/src/tengine_graph_convolution.cpp @@ -331,7 +331,7 @@ teng_graph_t tengine_init(const char* layer_name, float* input_, int inch, int g teg_weight = kernel_; } - /* initial the resoruce of tengine */ + /* initial the resource of tengine */ if(false == tengine_init_flag) { init_tengine(); diff --git a/modules/dnn/test/test_ie_models.cpp b/modules/dnn/test/test_ie_models.cpp index 0fe19db5e9..3622f69bdb 100644 --- a/modules/dnn/test/test_ie_models.cpp +++ b/modules/dnn/test/test_ie_models.cpp @@ -290,7 +290,7 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath if (cvtest::debugLevel > 0) { const std::vector& dims = desc.getDims(); - std::cout << "Input: '" << it.first << "' precison=" << desc.getPrecision() << " dims=" << dims.size() << " ["; + std::cout << "Input: '" << it.first << "' precision=" << desc.getPrecision() << " dims=" << dims.size() << " ["; for (auto d : dims) std::cout << " " << d; std::cout << "] ocv_mat=" << inputsMap[it.first].size << " of " << typeToString(inputsMap[it.first].type()) << std::endl; @@ -308,7 +308,7 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath if (cvtest::debugLevel > 0) { const std::vector& dims = desc.getDims(); - std::cout << "Output: '" << it.first << "' precison=" << desc.getPrecision() << " dims=" << dims.size() << " ["; + std::cout << "Output: '" << it.first << "' precision=" << desc.getPrecision() << " dims=" << dims.size() << " ["; for (auto d : dims) std::cout << " " << d; std::cout << "] ocv_mat=" << outputsMap[it.first].size << " of " << typeToString(outputsMap[it.first].type()) << std::endl; diff --git a/samples/dnn/speech_recognition.cpp b/samples/dnn/speech_recognition.cpp index 7e9ee1f54d..ff461c50f5 100644 --- a/samples/dnn/speech_recognition.cpp +++ b/samples/dnn/speech_recognition.cpp @@ -33,7 +33,7 @@ private: double highfreq = sample_rate / 2; public: - // Mel filterbanks preperation + // Mel filterbanks preparation double hz_to_mel(double frequencies) { //Converts frequencies from hz to mel scale @@ -149,7 +149,7 @@ public: return weights; } - // STFT preperation + // STFT preparation vector pad_window_center(vector&data, int size) { // Pad the window out to n_fft size diff --git a/samples/dnn/speech_recognition.py b/samples/dnn/speech_recognition.py index 7bc424b37c..da2ce11521 100644 --- a/samples/dnn/speech_recognition.py +++ b/samples/dnn/speech_recognition.py @@ -44,7 +44,7 @@ import os model.graph.initializer.insert(i,init) ``` - 6. Add an additional reshape node to handle the inconsistant input from python and c++ of openCV. + 6. Add an additional reshape node to handle the inconsistent input from python and c++ of openCV. see https://github.com/opencv/opencv/issues/19091 Make & insert a new node with 'Reshape' operation & required initializer ``` @@ -256,7 +256,7 @@ class FilterbankFeatures: weights *= enorm[:, np.newaxis] return weights - # STFT preperation + # STFT preparation def pad_window_center(self, data, size, axis=-1, **kwargs): ''' Centers the data and pads. @@ -329,7 +329,7 @@ class FilterbankFeatures: then padded with zeros to match n_fft fft_window : a vector or array of length `n_fft` having values computed by a window function - pad_mode : mode while padding the singnal + pad_mode : mode while padding the signal return_complex : returns array with complex data type if `True` return : Matrix of short-term Fourier transform coefficients. '''