Fixed small warn and example error

pull/265/head
Vitaliy Lyudvichenko 10 years ago
parent c65d03235e
commit 160d86440c
  1. 6
      modules/dnn/include/opencv2/dnn/blob.hpp
  2. 2
      modules/dnn/include/opencv2/dnn/dict.hpp
  3. 9
      modules/dnn/samples/classify_with_googlenet.cpp
  4. 4
      modules/dnn/src/layers/mvn_layer.cpp
  5. 2
      modules/dnn/src/torch/torch_importer.cpp
  6. 18
      modules/dnn/test/cnpy.cpp
  7. 14
      modules/dnn/test/cnpy.h

@ -83,18 +83,18 @@ namespace dnn
/** @brief returns size of corresponding dimension (axis) /** @brief returns size of corresponding dimension (axis)
@param axis dimension index @param axis dimension index
Python-like indexing is supported, so \p axis can be negative, i. e. -1 is last dimension. Python-like indexing is supported, so \p axis can be negative, i. e. -1 is last dimension.
@note Unlike ::xsize, if \p axis points to non-existing dimension then an error will be generated. @note Unlike xsize(), if \p axis points to non-existing dimension then an error will be generated.
*/ */
int size(int axis) const; int size(int axis) const;
/** @brief returns number of elements /** @brief returns number of elements
@param startAxis starting axis (inverse indexing can be used) @param startAxis starting axis (inverse indexing can be used)
@param endAxis ending (excluded) axis @param endAxis ending (excluded) axis
@see ::canonicalAxis @see canonicalAxis()
*/ */
size_t total(int startAxis = 0, int endAxis = -1) const; size_t total(int startAxis = 0, int endAxis = -1) const;
/** @brief converts axis index to canonical format (where 0 <= axis <= ::dims) /** @brief converts axis index to canonical format (where 0 <= axis < dims())
*/ */
int canonicalAxis(int axis) const; int canonicalAxis(int axis) const;

@ -167,7 +167,7 @@ inline int64 DictValue::get<int64>(int idx) const
fracpart = std::modf(doubleValue, &intpart); fracpart = std::modf(doubleValue, &intpart);
CV_Assert(fracpart == 0.0); CV_Assert(fracpart == 0.0);
return doubleValue; return (int64)doubleValue;
} }
else else
{ {

@ -4,6 +4,7 @@
#include <opencv2/highgui.hpp> #include <opencv2/highgui.hpp>
#include <algorithm> #include <algorithm>
#include <fstream> #include <fstream>
#include <iostream>
using namespace cv; using namespace cv;
using namespace cv::dnn; using namespace cv::dnn;
@ -12,7 +13,7 @@ typedef std::pair<int, double> ClassProb;
ClassProb getMaxClass(Blob &probBlob, int sampleNum = 0) ClassProb getMaxClass(Blob &probBlob, int sampleNum = 0)
{ {
int numClasses = (int)probBlob.total(1); int numClasses = (int)probBlob.total(1);
Mat probMat(1, numClasses, CV_32F, probBlob.ptr<float>(sampleNum)); Mat probMat(1, numClasses, CV_32F, probBlob.ptrf(sampleNum));
double prob; double prob;
Point probLoc; Point probLoc;
@ -21,7 +22,7 @@ ClassProb getMaxClass(Blob &probBlob, int sampleNum = 0)
return std::make_pair(probLoc.x, prob); return std::make_pair(probLoc.x, prob);
} }
std::vector<String> CLASES_NAMES; std::vector<String> CLASSES_NAMES;
void initClassesNames() void initClassesNames()
{ {
@ -33,7 +34,7 @@ void initClassesNames()
{ {
std::getline(fp, name); std::getline(fp, name);
if (name.length()) if (name.length())
CLASES_NAMES.push_back( name.substr(name.find(' ')+1) ); CLASSES_NAMES.push_back( name.substr(name.find(' ')+1) );
} }
fp.close(); fp.close();
@ -63,7 +64,7 @@ int main(int argc, char **argv)
ClassProb bc = getMaxClass(prob); ClassProb bc = getMaxClass(prob);
initClassesNames(); initClassesNames();
std::string className = (bc.first < (int)CLASES_NAMES.size()) ? CLASES_NAMES[bc.first] : "unnamed"; std::string className = (bc.first < (int)CLASSES_NAMES.size()) ? CLASSES_NAMES[bc.first] : "unnamed";
std::cout << "Best class:"; std::cout << "Best class:";
std::cout << " #" << bc.first; std::cout << " #" << bc.first;

@ -48,8 +48,8 @@ void MVNLayer::forward(std::vector<Blob *> &inputs, std::vector<Blob> &outputs)
int workSize[2]; int workSize[2];
int splitDim = (acrossChannels) ? 1 : 2; int splitDim = (acrossChannels) ? 1 : 2;
workSize[0] = inpBlob.total(0, splitDim); workSize[0] = (int)inpBlob.total(0, splitDim);
workSize[1] = inpBlob.total(splitDim); workSize[1] = (int)inpBlob.total(splitDim);
Mat inpMat = inpBlob.getMatRef().reshape(1, 2, workSize); Mat inpMat = inpBlob.getMatRef().reshape(1, 2, workSize);
Mat outMat = outBlob.getMatRef().reshape(1, 2, workSize); Mat outMat = outBlob.getMatRef().reshape(1, 2, workSize);

@ -662,7 +662,7 @@ CV_EXPORTS Ptr<Importer> createTorchImporter(const String&, bool)
return Ptr<Importer>(); return Ptr<Importer>();
} }
CV_EXPORTS Blob readTorchMat(const String &filename, bool isBinary) CV_EXPORTS Blob readTorchMat(const String&, bool)
{ {
CV_Error(Error::StsNotImplemented, "Module was build without Torch importer"); CV_Error(Error::StsNotImplemented, "Module was build without Torch importer");
return Blob(); return Blob();

@ -9,10 +9,6 @@
#include<cstring> #include<cstring>
#include<iomanip> #include<iomanip>
#ifdef __GNUC__
# pragma GCC diagnostic ignored "-Wunused-but-set-variable"
#endif
char cnpy::BigEndianTest() { char cnpy::BigEndianTest() {
union union
{ {
@ -73,7 +69,7 @@ void cnpy::parse_npy_header(FILE* fp, unsigned int& word_size, unsigned int*& sh
if(res != 11) if(res != 11)
throw std::runtime_error("parse_npy_header: failed fread"); throw std::runtime_error("parse_npy_header: failed fread");
std::string header = fgets(buffer,256,fp); std::string header = fgets(buffer,256,fp);
assert(header[header.size()-1] == '\n'); cnpy_assert(header[header.size()-1] == '\n');
size_t loc1, loc2; size_t loc1, loc2;
@ -99,7 +95,7 @@ void cnpy::parse_npy_header(FILE* fp, unsigned int& word_size, unsigned int*& sh
//not sure when this applies except for byte array //not sure when this applies except for byte array
loc1 = header.find("descr")+9; loc1 = header.find("descr")+9;
bool littleEndian = (header[loc1] == '<' || header[loc1] == '|' ? true : false); bool littleEndian = (header[loc1] == '<' || header[loc1] == '|' ? true : false);
assert(littleEndian); cnpy_assert(littleEndian);
//char type = header[loc1+1]; //char type = header[loc1+1];
//assert(type == map_type(T)); //assert(type == map_type(T));
@ -126,10 +122,10 @@ void cnpy::parse_zip_footer(FILE* fp, unsigned short& nrecs, unsigned int& globa
global_header_offset = *(unsigned int*) &footer[16]; global_header_offset = *(unsigned int*) &footer[16];
comment_len = *(unsigned short*) &footer[20]; comment_len = *(unsigned short*) &footer[20];
assert(disk_no == 0); cnpy_assert(disk_no == 0);
assert(disk_start == 0); cnpy_assert(disk_start == 0);
assert(nrecs_on_disk == nrecs); cnpy_assert(nrecs_on_disk == nrecs);
assert(comment_len == 0); cnpy_assert(comment_len == 0);
} }
cnpy::NpyArray load_the_npy_file(FILE* fp) { cnpy::NpyArray load_the_npy_file(FILE* fp) {
@ -156,7 +152,7 @@ cnpy::npz_t cnpy::npz_load(std::string fname) {
FILE* fp = fopen(fname.c_str(),"rb"); FILE* fp = fopen(fname.c_str(),"rb");
if(!fp) printf("npz_load: Error! Unable to open file %s!\n",fname.c_str()); if(!fp) printf("npz_load: Error! Unable to open file %s!\n",fname.c_str());
assert(fp); cnpy_assert(fp);
cnpy::npz_t arrays; cnpy::npz_t arrays;

@ -18,6 +18,12 @@
#include<zlib.h> #include<zlib.h>
#endif #endif
#ifndef NDEBUG
#define cnpy_assert(expression) assert(expression)
#else
#define cnpy_assert(expression) ((void)(expression))
#endif
namespace cnpy { namespace cnpy {
struct NpyArray { struct NpyArray {
@ -76,21 +82,21 @@ namespace cnpy {
unsigned int* tmp_shape = 0; unsigned int* tmp_shape = 0;
bool fortran_order; bool fortran_order;
parse_npy_header(fp,word_size,tmp_shape,tmp_dims,fortran_order); parse_npy_header(fp,word_size,tmp_shape,tmp_dims,fortran_order);
assert(!fortran_order); cnpy_assert(!fortran_order);
if(word_size != sizeof(T)) { if(word_size != sizeof(T)) {
std::cout<<"libnpy error: "<<fname<<" has word size "<<word_size<<" but npy_save appending data sized "<<sizeof(T)<<"\n"; std::cout<<"libnpy error: "<<fname<<" has word size "<<word_size<<" but npy_save appending data sized "<<sizeof(T)<<"\n";
assert( word_size == sizeof(T) ); cnpy_assert( word_size == sizeof(T) );
} }
if(tmp_dims != ndims) { if(tmp_dims != ndims) {
std::cout<<"libnpy error: npy_save attempting to append misdimensioned data to "<<fname<<"\n"; std::cout<<"libnpy error: npy_save attempting to append misdimensioned data to "<<fname<<"\n";
assert(tmp_dims == ndims); cnpy_assert(tmp_dims == ndims);
} }
for(unsigned i = 1; i < ndims; i++) { for(unsigned i = 1; i < ndims; i++) {
if(shape[i] != tmp_shape[i]) { if(shape[i] != tmp_shape[i]) {
std::cout<<"libnpy error: npy_save attempting to append misshaped data to "<<fname<<"\n"; std::cout<<"libnpy error: npy_save attempting to append misshaped data to "<<fname<<"\n";
assert(shape[i] == tmp_shape[i]); cnpy_assert(shape[i] == tmp_shape[i]);
} }
} }
tmp_shape[0] += shape[0]; tmp_shape[0] += shape[0];

Loading…
Cancel
Save