Fixed warnings

pull/750/head
Anna Petrovicheva 9 years ago
parent 1f020151c3
commit 083616d05f
  1. 2
      modules/dnn/samples/ssd_object_detection.cpp
  2. 5
      modules/dnn/src/layers/concat_layer.cpp
  3. 2
      modules/dnn/src/layers/prior_box_layer.cpp

@ -115,7 +115,7 @@ int main(int argc, char** argv)
Mat detectionMat(detection.rows(), detection.cols(), CV_32F, detection.ptrf()); Mat detectionMat(detection.rows(), detection.cols(), CV_32F, detection.ptrf());
float confidenceThreshold = parser.get<float>("min_confidence"); float confidenceThreshold = parser.get<float>("min_confidence");
for(size_t i = 0; i < detectionMat.rows; i++) for(int i = 0; i < detectionMat.rows; i++)
{ {
float confidence = detectionMat.at<float>(i, 2); float confidence = detectionMat.at<float>(i, 2);

@ -84,7 +84,7 @@ void ConcatLayer::allocate(const std::vector<Blob *> &inputs, std::vector<Blob>
void ConcatLayer::forward(std::vector<Blob *> &inputs, std::vector<Blob> &outputs) void ConcatLayer::forward(std::vector<Blob *> &inputs, std::vector<Blob> &outputs)
{ {
// In case when Blob shape used in allocation and inner matrix shape do not match, this layer did not work in previous implementation. This implementation is just a fix and needs to be rewritten more optimally. // In case when Blob shape used in allocation and inner matrix shape do not match, this layer did not work in previous implementation. This implementation needs to be rewritten more optimally.
if (inputs.size() == 1) if (inputs.size() == 1)
{ {
@ -96,7 +96,7 @@ void ConcatLayer::forward(std::vector<Blob *> &inputs, std::vector<Blob> &output
size_t outputStride = outputs[0].total(axis); size_t outputStride = outputs[0].total(axis);
size_t offset = 0; size_t offset = 0;
for (int i = 0; i < inputs.size(); ++i) for (size_t i = 0; i < inputs.size(); ++i)
{ {
size_t inputSliceSize = inputs[i]->total(axis); size_t inputSliceSize = inputs[i]->total(axis);
const float* inputData = inputs[i]->ptrf(); const float* inputData = inputs[i]->ptrf();
@ -105,7 +105,6 @@ void ConcatLayer::forward(std::vector<Blob *> &inputs, std::vector<Blob> &output
{ {
const float* src = inputData + n * inputSliceSize; const float* src = inputData + n * inputSliceSize;
float* dst = outputData + n * outputStride + offset; float* dst = outputData + n * outputStride + offset;
// memcpy(dst, src, inputSliceSize);
for(size_t k = 0; k < inputSliceSize; k++) for(size_t k = 0; k < inputSliceSize; k++)
{ {
dst[k] = src[k]; dst[k] = src[k];

@ -211,6 +211,8 @@ void PriorBoxLayer::allocate(const std::vector<Blob*> &inputs, std::vector<Blob>
void PriorBoxLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs) void PriorBoxLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
{ {
(void)inputs; // to suppress unused parameter warning
float* outputPtr = outputs[0].ptrf(); float* outputPtr = outputs[0].ptrf();
// first prior: aspect_ratio = 1, size = min_size // first prior: aspect_ratio = 1, size = min_size

Loading…
Cancel
Save