Fixed small warn and example error

pull/265/head
Vitaliy Lyudvichenko 10 years ago
parent c65d03235e
commit 160d86440c
  1. 6
      modules/dnn/include/opencv2/dnn/blob.hpp
  2. 2
      modules/dnn/include/opencv2/dnn/dict.hpp
  3. 9
      modules/dnn/samples/classify_with_googlenet.cpp
  4. 4
      modules/dnn/src/layers/mvn_layer.cpp
  5. 6
      modules/dnn/src/torch/COPYRIGHT.txt
  6. 4
      modules/dnn/src/torch/THDiskFile.cpp
  7. 2
      modules/dnn/src/torch/THGeneral.cpp
  8. 2
      modules/dnn/src/torch/torch_importer.cpp
  9. 18
      modules/dnn/test/cnpy.cpp
  10. 14
      modules/dnn/test/cnpy.h

@ -83,18 +83,18 @@ namespace dnn
/** @brief returns size of corresponding dimension (axis)
@param axis dimension index
Python-like indexing is supported, so \p axis can be negative, i. e. -1 is last dimension.
@note Unlike ::xsize, if \p axis points to non-existing dimension then an error will be generated.
@note Unlike xsize(), if \p axis points to non-existing dimension then an error will be generated.
*/
int size(int axis) const;
/** @brief returns number of elements
@param startAxis starting axis (inverse indexing can be used)
@param endAxis ending (excluded) axis
@see ::canonicalAxis
@see canonicalAxis()
*/
size_t total(int startAxis = 0, int endAxis = -1) const;
/** @brief converts axis index to canonical format (where 0 <= axis <= ::dims)
/** @brief converts axis index to canonical format (where 0 <= axis < dims())
*/
int canonicalAxis(int axis) const;

@ -167,7 +167,7 @@ inline int64 DictValue::get<int64>(int idx) const
fracpart = std::modf(doubleValue, &intpart);
CV_Assert(fracpart == 0.0);
return doubleValue;
return (int64)doubleValue;
}
else
{

@ -4,6 +4,7 @@
#include <opencv2/highgui.hpp>
#include <algorithm>
#include <fstream>
#include <iostream>
using namespace cv;
using namespace cv::dnn;
@ -12,7 +13,7 @@ typedef std::pair<int, double> ClassProb;
ClassProb getMaxClass(Blob &probBlob, int sampleNum = 0)
{
int numClasses = (int)probBlob.total(1);
Mat probMat(1, numClasses, CV_32F, probBlob.ptr<float>(sampleNum));
Mat probMat(1, numClasses, CV_32F, probBlob.ptrf(sampleNum));
double prob;
Point probLoc;
@ -21,7 +22,7 @@ ClassProb getMaxClass(Blob &probBlob, int sampleNum = 0)
return std::make_pair(probLoc.x, prob);
}
std::vector<String> CLASES_NAMES;
std::vector<String> CLASSES_NAMES;
void initClassesNames()
{
@ -33,7 +34,7 @@ void initClassesNames()
{
std::getline(fp, name);
if (name.length())
CLASES_NAMES.push_back( name.substr(name.find(' ')+1) );
CLASSES_NAMES.push_back( name.substr(name.find(' ')+1) );
}
fp.close();
@ -63,7 +64,7 @@ int main(int argc, char **argv)
ClassProb bc = getMaxClass(prob);
initClassesNames();
std::string className = (bc.first < (int)CLASES_NAMES.size()) ? CLASES_NAMES[bc.first] : "unnamed";
std::string className = (bc.first < (int)CLASSES_NAMES.size()) ? CLASSES_NAMES[bc.first] : "unnamed";
std::cout << "Best class:";
std::cout << " #" << bc.first;

@ -48,8 +48,8 @@ void MVNLayer::forward(std::vector<Blob *> &inputs, std::vector<Blob> &outputs)
int workSize[2];
int splitDim = (acrossChannels) ? 1 : 2;
workSize[0] = inpBlob.total(0, splitDim);
workSize[1] = inpBlob.total(splitDim);
workSize[0] = (int)inpBlob.total(0, splitDim);
workSize[1] = (int)inpBlob.total(splitDim);
Mat inpMat = inpBlob.getMatRef().reshape(1, 2, workSize);
Mat outMat = outBlob.getMatRef().reshape(1, 2, workSize);

@ -18,9 +18,9 @@ modification, are permitted provided that the following conditions are met:
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the names of Deepmind Technologies, NYU, NEC Laboratories America
and IDIAP Research Institute nor the names of its contributors may be
used to endorse or promote products derived from this software without
3. Neither the names of Deepmind Technologies, NYU, NEC Laboratories America
and IDIAP Research Institute nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"

@ -333,7 +333,7 @@ static long THDiskFile_readString(THFile *self, const char *format, char **str_)
char *p = (char*)THAlloc(TBRS_BSZ);
long total = TBRS_BSZ;
long pos = 0L;
for (;;)
{
if(total-pos == 0) /* we need more space! */
@ -357,7 +357,7 @@ static long THDiskFile_readString(THFile *self, const char *format, char **str_)
*str_ = p;
return pos;
}
}
}
}
else
{

@ -210,7 +210,7 @@ void* THRealloc(void *ptr, long size)
{
if(!ptr)
return(THAlloc(size));
if(size == 0)
{
THFree(ptr);

@ -662,7 +662,7 @@ CV_EXPORTS Ptr<Importer> createTorchImporter(const String&, bool)
return Ptr<Importer>();
}
CV_EXPORTS Blob readTorchMat(const String &filename, bool isBinary)
CV_EXPORTS Blob readTorchMat(const String&, bool)
{
CV_Error(Error::StsNotImplemented, "Module was build without Torch importer");
return Blob();

@ -9,10 +9,6 @@
#include<cstring>
#include<iomanip>
#ifdef __GNUC__
# pragma GCC diagnostic ignored "-Wunused-but-set-variable"
#endif
char cnpy::BigEndianTest() {
union
{
@ -73,7 +69,7 @@ void cnpy::parse_npy_header(FILE* fp, unsigned int& word_size, unsigned int*& sh
if(res != 11)
throw std::runtime_error("parse_npy_header: failed fread");
std::string header = fgets(buffer,256,fp);
assert(header[header.size()-1] == '\n');
cnpy_assert(header[header.size()-1] == '\n');
size_t loc1, loc2;
@ -99,7 +95,7 @@ void cnpy::parse_npy_header(FILE* fp, unsigned int& word_size, unsigned int*& sh
//not sure when this applies except for byte array
loc1 = header.find("descr")+9;
bool littleEndian = (header[loc1] == '<' || header[loc1] == '|' ? true : false);
assert(littleEndian);
cnpy_assert(littleEndian);
//char type = header[loc1+1];
//assert(type == map_type(T));
@ -126,10 +122,10 @@ void cnpy::parse_zip_footer(FILE* fp, unsigned short& nrecs, unsigned int& globa
global_header_offset = *(unsigned int*) &footer[16];
comment_len = *(unsigned short*) &footer[20];
assert(disk_no == 0);
assert(disk_start == 0);
assert(nrecs_on_disk == nrecs);
assert(comment_len == 0);
cnpy_assert(disk_no == 0);
cnpy_assert(disk_start == 0);
cnpy_assert(nrecs_on_disk == nrecs);
cnpy_assert(comment_len == 0);
}
cnpy::NpyArray load_the_npy_file(FILE* fp) {
@ -156,7 +152,7 @@ cnpy::npz_t cnpy::npz_load(std::string fname) {
FILE* fp = fopen(fname.c_str(),"rb");
if(!fp) printf("npz_load: Error! Unable to open file %s!\n",fname.c_str());
assert(fp);
cnpy_assert(fp);
cnpy::npz_t arrays;

@ -18,6 +18,12 @@
#include<zlib.h>
#endif
#ifndef NDEBUG
#define cnpy_assert(expression) assert(expression)
#else
#define cnpy_assert(expression) ((void)(expression))
#endif
namespace cnpy {
struct NpyArray {
@ -76,21 +82,21 @@ namespace cnpy {
unsigned int* tmp_shape = 0;
bool fortran_order;
parse_npy_header(fp,word_size,tmp_shape,tmp_dims,fortran_order);
assert(!fortran_order);
cnpy_assert(!fortran_order);
if(word_size != sizeof(T)) {
std::cout<<"libnpy error: "<<fname<<" has word size "<<word_size<<" but npy_save appending data sized "<<sizeof(T)<<"\n";
assert( word_size == sizeof(T) );
cnpy_assert( word_size == sizeof(T) );
}
if(tmp_dims != ndims) {
std::cout<<"libnpy error: npy_save attempting to append misdimensioned data to "<<fname<<"\n";
assert(tmp_dims == ndims);
cnpy_assert(tmp_dims == ndims);
}
for(unsigned i = 1; i < ndims; i++) {
if(shape[i] != tmp_shape[i]) {
std::cout<<"libnpy error: npy_save attempting to append misshaped data to "<<fname<<"\n";
assert(shape[i] == tmp_shape[i]);
cnpy_assert(shape[i] == tmp_shape[i]);
}
}
tmp_shape[0] += shape[0];

Loading…
Cancel
Save