Classifier implemented as an independednt class from the worspotter. -Works both on CPU and GPU at expected top performance of 26ms per sample on a GTX 980Ti -Simple demo for wordspoter added for cpp -Interactiuve demo added for pythonpull/723/head
parent
51a4f6e415
commit
44d7339feb
9 changed files with 747 additions and 1 deletions
@ -0,0 +1,14 @@ |
||||
# Caffe package for CNN Triplet training |
||||
unset(Caffe_FOUND) |
||||
|
||||
find_path(Caffe_INCLUDE_DIR NAMES caffe/caffe.hpp caffe/common.hpp caffe/net.hpp caffe/proto/caffe.pb.h caffe/util/io.hpp caffe/vision_layers.hpp |
||||
HINTS |
||||
/usr/local/include) |
||||
|
||||
find_library(Caffe_LIBS NAMES caffe |
||||
HINTS |
||||
/usr/local/lib) |
||||
|
||||
if(Caffe_LIBS AND Caffe_INCLUDE_DIR) |
||||
set(Caffe_FOUND 1) |
||||
endif() |
@ -0,0 +1,10 @@ |
||||
#Required for Caffe |
||||
unset(Glog_FOUND) |
||||
|
||||
find_library(Glog_LIBS NAMES glog |
||||
HINTS |
||||
/usr/local/lib) |
||||
|
||||
if(Glog_LIBS) |
||||
set(Glog_FOUND 1) |
||||
endif() |
@ -0,0 +1,10 @@ |
||||
# Protobuf package required for Caffe |
||||
unset(Protobuf_FOUND) |
||||
|
||||
find_library(Protobuf_LIBS NAMES protobuf |
||||
HINTS |
||||
/usr/local/lib) |
||||
|
||||
if(Protobuf_LIBS) |
||||
set(Protobuf_FOUND 1) |
||||
endif() |
@ -0,0 +1,90 @@ |
||||
/*
|
||||
* dictnet_demo.cpp |
||||
* |
||||
* Demonstrates simple use of the holistic word classifier in C++ |
||||
* |
||||
* Created on: June 26, 2016 |
||||
* Author: Anguelos Nicolaou <anguelos.nicolaou AT gmail.com> |
||||
*/ |
||||
|
||||
#include "opencv2/text.hpp" |
||||
#include "opencv2/highgui.hpp" |
||||
#include "opencv2/imgproc.hpp" |
||||
|
||||
#include <sstream> |
||||
#include <vector> |
||||
#include <iostream> |
||||
#include <iomanip> |
||||
#include <fstream> |
||||
|
||||
std::string getHelpStr(std::string progFname){ |
||||
std::stringstream out; |
||||
out << " Demo of wordspotting CNN for text recognition." << std::endl; |
||||
out << " Max Jaderberg et al.: Reading Text in the Wild with Convolutional Neural Networks, IJCV 2015"<<std::endl<<std::endl; |
||||
|
||||
out << " Usage: " << progFname << " <output_file> <input_image1> <input_image2> ... <input_imageN>" << std::endl; |
||||
out << " Caffe Model files (dictnet_vgg.caffemodel, dictnet_vgg_deploy.prototxt, dictnet_vgg_labels.txt)"<<std::endl; |
||||
out << " must be in the current directory." << std::endl << std::endl; |
||||
|
||||
out << " Obtaining Caffe Model files in linux shell:"<<std::endl; |
||||
out << " wget http://nicolaou.homouniversalis.org/assets/vgg_text/dictnet_vgg.caffemodel"<<std::endl; |
||||
out << " wget http://nicolaou.homouniversalis.org/assets/vgg_text/dictnet_vgg_deploy.prototxt"<<std::endl; |
||||
out << " wget http://nicolaou.homouniversalis.org/assets/vgg_text/dictnet_vgg_labels.txt"<<std::endl<<std::endl; |
||||
return out.str(); |
||||
} |
||||
|
||||
inline bool fileExists (std::string filename) { |
||||
std::ifstream f(filename.c_str()); |
||||
return f.good(); |
||||
} |
||||
|
||||
|
||||
int main(int argc, const char * argv[]){ |
||||
const int USE_GPU=0; |
||||
|
||||
if (argc < 3){ |
||||
std::cout<<getHelpStr(argv[0]); |
||||
exit(1); |
||||
std::cout<<"Insufiecient parameters. Aborting!"<<std::endl; |
||||
} |
||||
|
||||
if (!fileExists("dictnet_vgg.caffemodel") || |
||||
!fileExists("dictnet_vgg_deploy.prototxt") || |
||||
!fileExists("dictnet_vgg_labels.txt")){ |
||||
std::cout<<getHelpStr(argv[0]); |
||||
std::cout<<"Model files not found in the current directory. Aborting!"<<std::endl; |
||||
exit(1); |
||||
} |
||||
|
||||
if (fileExists(argv[1])){ |
||||
std::cout<<getHelpStr(argv[0]); |
||||
std::cout<<"Output file must not exist. Aborting!"<<std::endl; |
||||
exit(1); |
||||
} |
||||
|
||||
std::vector<cv::Mat> imageList; |
||||
for(int imageIdx=2;imageIdx<argc;imageIdx++){ |
||||
if (fileExists(argv[imageIdx])){ |
||||
imageList.push_back(cv::imread(cv::String(argv[imageIdx]))); |
||||
}else{ |
||||
std::cout<<getHelpStr(argv[0]); |
||||
std::cout<<argv[imageIdx]<<" doesn't exist. Aborting"; |
||||
} |
||||
} |
||||
cv::Ptr<cv::text::DictNet> cnn=cv::text::DictNet::create( |
||||
"dictnet_vgg_deploy.prototxt","dictnet_vgg.caffemodel",100,USE_GPU); |
||||
|
||||
cv::Ptr<cv::text::OCRHolisticWordRecognizer> wordSpotter= |
||||
cv::text::OCRHolisticWordRecognizer::create(cnn,"dictnet_vgg_labels.txt"); |
||||
|
||||
std::vector<cv::String> wordList; |
||||
std::vector<double> outProbabillities; |
||||
wordSpotter->recogniseImageBatch(imageList,wordList,outProbabillities); |
||||
|
||||
std::ofstream out; |
||||
out.open(argv[1]); |
||||
for(int imgIdx=0;imgIdx<imageList.size();imgIdx++){ |
||||
out<<argv[imgIdx+2]<<","<<wordList[imgIdx]<<","<<outProbabillities[imgIdx]<<std::endl; |
||||
} |
||||
out.close(); |
||||
} |
@ -0,0 +1,82 @@ |
||||
#!/usr/bin/env python |
||||
|
||||
import cv2 |
||||
import sys |
||||
import os.path |
||||
|
||||
#Global variable shared between the Mouse callback and main |
||||
refPt = [] |
||||
cropping = False |
||||
image=None |
||||
drawImage=None |
||||
dictNet=None |
||||
wordSpotter=None |
||||
|
||||
|
||||
def mouseCallback(event, x, y, flags, param): |
||||
# grab references to the global variables |
||||
global refPt, cropping,wordSpotter,drawImage,image |
||||
|
||||
# if the left mouse button was clicked, record the starting |
||||
# (x, y) coordinates and indicate that cropping is being |
||||
# performed |
||||
if event == cv2.EVENT_LBUTTONDOWN: |
||||
refPt = [(x, y)] |
||||
cropping = True |
||||
|
||||
# check to see if the left mouse button was released |
||||
elif event == cv2.EVENT_LBUTTONUP: |
||||
# record the ending (x, y) coordinates and indicate that |
||||
# the cropping operation is finished |
||||
refPt.append((x, y)) |
||||
cropping = False |
||||
|
||||
# draw a rectangle around the region of interest |
||||
roi = image[refPt[0][1]:refPt[1][1], refPt[0][0]:refPt[1][0]] |
||||
res=wordSpotter.recogniseImage(roi) |
||||
drawImage = image.copy() |
||||
cv2.rectangle(drawImage, refPt[0], refPt[1], (0, 255, 0), 2) |
||||
cv2.putText(drawImage,"%s:%f"%(res[0],res[1]),refPt[0],cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2) |
||||
cv2.imshow("Select A Region", drawImage) |
||||
|
||||
|
||||
if __name__=='__main__': |
||||
USEGPU=False |
||||
helpStr=""" Usage: """+sys.argv[0]+""" IMAGE_FILENAME |
||||
|
||||
Press 'q' or 'Q' exit |
||||
|
||||
The modelFiles must be available in the current directory. |
||||
In linux shell they can be downloaded (~2GB) with the following commands: |
||||
wget http://nicolaou.homouniversalis.org/assets/vgg_text/dictnet_vgg.caffemodel |
||||
wget http://nicolaou.homouniversalis.org/assets/vgg_text/dictnet_vgg_deploy.prototxt |
||||
wget http://nicolaou.homouniversalis.org/assets/vgg_text/dictnet_vgg_labels.txt |
||||
""" |
||||
if((len(sys.argv)!=2 )or not(os.path.isfile(sys.argv[1]) )): |
||||
print helpStr |
||||
print 'No image file given Aborting!' |
||||
sys.exit(1) |
||||
if not (os.path.isfile('dictnet_vgg_deploy.prototxt') and |
||||
os.path.isfile('dictnet_vgg.caffemodel') and |
||||
os.path.isfile('dictnet_vgg_labels.txt')): |
||||
print helpStr |
||||
print 'Model files not present, Aborting!' |
||||
sys.exit(1) |
||||
|
||||
dictNet=cv2.text.DictNet_create('./dictnet_vgg_deploy.prototxt','./dictnet_vgg.caffemodel',100,USEGPU) |
||||
wordSpotter=cv2.text.OCRHolisticWordRecognizer_create(dictNet,"./dictnet_vgg_labels.txt") |
||||
|
||||
image = cv2.imread(sys.argv[1]) |
||||
drawImage = image.copy() |
||||
cv2.namedWindow("Select A Region") |
||||
cv2.setMouseCallback("Select A Region", mouseCallback) |
||||
|
||||
while True: |
||||
cv2.imshow("Select A Region", drawImage) |
||||
key = cv2.waitKey(1) & 0xFF |
||||
|
||||
# if the 'q' key is pressed, break from the loop |
||||
if key == ord("q") or key == ord("Q"): |
||||
break |
||||
|
||||
cv2.destroyAllWindows() |
@ -0,0 +1,287 @@ |
||||
#include "precomp.hpp" |
||||
#include "opencv2/imgproc.hpp" |
||||
#include "opencv2/core.hpp" |
||||
|
||||
|
||||
|
||||
#include <iostream> |
||||
#include <fstream> |
||||
#include <sstream> |
||||
#include <queue> |
||||
#include <algorithm> |
||||
#include <iosfwd> |
||||
#include <memory> |
||||
#include <string> |
||||
#include <utility> |
||||
#include <vector> |
||||
|
||||
//Lluis:DONT KNOW WHY ITS NOT VISIBLE HERE
|
||||
#define CV_StsNotImplemented (-213) |
||||
#define CV_StsError (-2) |
||||
|
||||
|
||||
|
||||
//Luis: should this be moved elsewhere?
|
||||
//In precomp.hpp It doesn't work
|
||||
#ifdef HAVE_CAFFE |
||||
#include "caffe/caffe.hpp" |
||||
#endif |
||||
|
||||
|
||||
namespace cv { namespace text { |
||||
|
||||
//Lluis: Maybe OpenCV has a routine better suited
|
||||
inline bool fileExists (String filename) { |
||||
std::ifstream f(filename.c_str()); |
||||
return f.good(); |
||||
} |
||||
|
||||
|
||||
class DictNetCaffeImpl: public DictNet{ |
||||
protected: |
||||
void preprocess(Mat& input,Mat& output){ |
||||
if(input.channels()==3){ |
||||
Mat tmpInput; |
||||
cvtColor(input,tmpInput,COLOR_BGR2GRAY); |
||||
if(input.depth()==CV_8U){ |
||||
tmpInput.convertTo(output,CV_32FC1,1/255.0); |
||||
}else{//Assuming values are at the desired [0,1] range
|
||||
tmpInput.convertTo(output, CV_32FC1); |
||||
} |
||||
}else if(input.channels()==1){ |
||||
if(input.depth()==CV_8U){ |
||||
input.convertTo(output, CV_32FC1,1/255.0); |
||||
}else{//Assuming values are at the desired [0,1] range
|
||||
input.convertTo(output, CV_32FC1); |
||||
} |
||||
}else{ |
||||
CV_Error(CV_StsError,"Expecting images with either 1 or 3 channels"); |
||||
} |
||||
resize(output,output,this->inputGeometry_); |
||||
Scalar dev,mean; |
||||
meanStdDev(output,mean,dev); |
||||
subtract(output,mean[0],output); |
||||
divide(output,(dev[0]/128.0),output); |
||||
} |
||||
|
||||
void classifyMiniBatch(std::vector<Mat> inputImageList, Mat outputMat){ |
||||
//Classifies a list of images containing at most minibatchSz_ images
|
||||
CV_Assert(inputImageList.size()<=this->minibatchSz_); |
||||
CV_Assert(outputMat.isContinuous()); |
||||
float* ouputPtr= (float*)(outputMat.data); |
||||
net_->input_blobs()[0]->Reshape(inputImageList.size(), 1,this->inputGeometry_.height,this->inputGeometry_.width); |
||||
net_->Reshape(); |
||||
float* inputBuffer=net_->input_blobs()[0]->mutable_cpu_data(); |
||||
float* inputData=inputBuffer; |
||||
for(int imgNum=0;imgNum<inputImageList.size();imgNum++){ |
||||
Mat preprocessed; |
||||
cv::Mat netInputWraped(this->inputGeometry_.height, this->inputGeometry_.width, CV_32FC1, inputData); |
||||
this->preprocess(inputImageList[imgNum],preprocessed); |
||||
preprocessed.copyTo(netInputWraped); |
||||
inputData+=(this->inputGeometry_.height*this->inputGeometry_.width); |
||||
} |
||||
this->net_->ForwardPrefilled(); |
||||
const float* outputNetData=net_->output_blobs()[0]->cpu_data(); |
||||
float*outputMatData=(float*)(outputMat.data); |
||||
memcpy(outputMatData,outputNetData,sizeof(float)*this->outputSize_*inputImageList.size()); |
||||
} |
||||
|
||||
#ifdef HAVE_CAFFE |
||||
Ptr<caffe::Net<float> > net_; |
||||
#endif |
||||
Size inputGeometry_; |
||||
const int minibatchSz_; |
||||
const bool gpuBackend_; |
||||
Ptr<Mat> meanImage_; |
||||
bool standarize_; |
||||
std::vector<std::string> labels_; |
||||
int outputSize_; |
||||
public: |
||||
DictNetCaffeImpl(String modelArchFilename, String modelWeightsFilename, int maxMinibatchSz, bool useGpu) |
||||
:minibatchSz_(maxMinibatchSz), gpuBackend_(useGpu){ |
||||
CV_Assert(this->minibatchSz_>0); |
||||
CV_Assert(fileExists(modelArchFilename)); |
||||
CV_Assert(fileExists(modelWeightsFilename)); |
||||
#ifdef HAVE_CAFFE |
||||
if(this->gpuBackend_){ |
||||
caffe::Caffe::set_mode(caffe::Caffe::GPU); |
||||
}else{ |
||||
caffe::Caffe::set_mode(caffe::Caffe::CPU); |
||||
} |
||||
this->net_.reset(new caffe::Net<float>(modelArchFilename, caffe::TEST)); |
||||
CV_Assert(net_->num_inputs()==1); |
||||
CV_Assert(net_->num_outputs()==1); |
||||
CV_Assert(this->net_->input_blobs()[0]->channels()==1); |
||||
this->net_->CopyTrainedLayersFrom(modelWeightsFilename); |
||||
caffe::Blob<float>* inputLayer = this->net_->input_blobs()[0]; |
||||
this->inputGeometry_=Size(inputLayer->width(), inputLayer->height()); |
||||
inputLayer->Reshape(this->minibatchSz_,1,this->inputGeometry_.height, this->inputGeometry_.width); |
||||
net_->Reshape(); |
||||
this->outputSize_=net_->output_blobs()[0]->channels(); |
||||
|
||||
#else |
||||
CV_Error(CV_StsError,"Caffe not available during compilation!"); |
||||
#endif |
||||
} |
||||
|
||||
void classify(InputArray image, OutputArray classProbabilities){ |
||||
std::vector<Mat> inputImageList; |
||||
inputImageList.push_back(image.getMat()); |
||||
classifyBatch(inputImageList,classProbabilities); |
||||
} |
||||
|
||||
void classifyBatch(InputArrayOfArrays inputImageList, OutputArray classProbabilities){ |
||||
std::vector<Mat> allImageVector; |
||||
inputImageList.getMatVector(allImageVector); |
||||
classProbabilities.create(Size(this->outputSize_,allImageVector.size()),CV_32F); |
||||
Mat outputMat = classProbabilities.getMat(); |
||||
for(int imgNum=0;imgNum<allImageVector.size();imgNum+=this->minibatchSz_){ |
||||
int rangeEnd=imgNum+std::min<int>(allImageVector.size()-imgNum,this->minibatchSz_); |
||||
std::vector<Mat>::const_iterator from=allImageVector.begin()+imgNum; |
||||
std::vector<Mat>::const_iterator to=allImageVector.begin()+rangeEnd; |
||||
std::vector<Mat> minibatchInput(from,to); |
||||
classifyMiniBatch(minibatchInput,outputMat.rowRange(imgNum,rangeEnd)); |
||||
} |
||||
} |
||||
|
||||
int getOutputSize(){ |
||||
return this->outputSize_; |
||||
} |
||||
int getMinibatchSize(){ |
||||
return this->minibatchSz_; |
||||
} |
||||
bool usingGpu(){ |
||||
return this->gpuBackend_; |
||||
} |
||||
int getBackend(){ |
||||
return OCR_HOLISTIC_BACKEND_CAFFE; |
||||
} |
||||
}; |
||||
|
||||
|
||||
Ptr<DictNet> DictNet::create(String archFilename,String weightsFilename,int minibatchSz,bool useGpu,int backEnd){ |
||||
switch(backEnd){ |
||||
case OCR_HOLISTIC_BACKEND_CAFFE: |
||||
return Ptr<DictNet>(new DictNetCaffeImpl(archFilename, weightsFilename, minibatchSz, useGpu)); |
||||
break; |
||||
case OCR_HOLISTIC_BACKEND_NONE: |
||||
default: |
||||
CV_Error(CV_StsError,"DictNet::create backend not implemented"); |
||||
return Ptr<DictNet>(); |
||||
break; |
||||
} |
||||
} |
||||
|
||||
|
||||
class OCRHolisticWordRecognizerImpl: public OCRHolisticWordRecognizer{ |
||||
private: |
||||
struct NetOutput{ |
||||
//Auxiliary structure that handles the logic of getting class ids and probabillities from
|
||||
//the raw outputs of caffe
|
||||
int wordIdx; |
||||
float probabillity; |
||||
|
||||
static bool sorter(const NetOutput& o1,const NetOutput& o2){//used with std::sort to provide the most probable class
|
||||
return o1.probabillity>o2.probabillity; |
||||
} |
||||
|
||||
static void getOutputs(const float* buffer,int nbOutputs,std::vector<NetOutput>& res){ |
||||
res.resize(nbOutputs); |
||||
for(int k=0;k<nbOutputs;k++){ |
||||
res[k].wordIdx=k; |
||||
res[k].probabillity=buffer[k]; |
||||
} |
||||
std::sort(res.begin(),res.end(),NetOutput::sorter); |
||||
} |
||||
static void getClassification(const float* buffer,int nbOutputs,int &classNum,double& confidence){ |
||||
std::vector<NetOutput> tmp; |
||||
getOutputs(buffer,nbOutputs,tmp); |
||||
classNum=tmp[0].wordIdx; |
||||
confidence=tmp[0].probabillity; |
||||
} |
||||
}; |
||||
protected: |
||||
std::vector<String> labels_; |
||||
Ptr<TextImageClassifier> classifier_; |
||||
public: |
||||
OCRHolisticWordRecognizerImpl(Ptr<TextImageClassifier> classifierPtr,String vocabullaryFilename):classifier_(classifierPtr){ |
||||
CV_Assert(fileExists(vocabullaryFilename));//this fails for some rason
|
||||
std::ifstream labelsFile(vocabullaryFilename.c_str()); |
||||
if(!labelsFile){ |
||||
CV_Error(CV_StsError,"Could not read Labels from file"); |
||||
} |
||||
std::string line; |
||||
while (std::getline(labelsFile, line)){ |
||||
labels_.push_back(std::string(line)); |
||||
} |
||||
CV_Assert(this->classifier_->getOutputSize()==this->labels_.size()); |
||||
} |
||||
|
||||
void recogniseImage(InputArray inputImage,CV_OUT String& transcription,CV_OUT double& confidence){ |
||||
Mat netOutput; |
||||
this->classifier_->classify(inputImage,netOutput); |
||||
int classNum; |
||||
NetOutput::getClassification((float*)(netOutput.data),this->classifier_->getOutputSize(),classNum,confidence); |
||||
transcription=this->labels_[classNum]; |
||||
} |
||||
void recogniseImageBatch(InputArrayOfArrays inputImageList,CV_OUT std::vector<String>& transcriptionVec,CV_OUT std::vector<double>& confidenceVec){ |
||||
Mat netOutput; |
||||
this->classifier_->classifyBatch(inputImageList,netOutput); |
||||
for(int k=0;k<netOutput.rows;k++){ |
||||
int classNum; |
||||
double confidence; |
||||
NetOutput::getClassification((float*)(netOutput.row(k).data),this->classifier_->getOutputSize(),classNum,confidence); |
||||
transcriptionVec.push_back(this->labels_[classNum]); |
||||
confidenceVec.push_back(confidence); |
||||
} |
||||
} |
||||
|
||||
|
||||
void run(Mat& image, std::string& output_text, std::vector<Rect>* component_rects=NULL, |
||||
std::vector<std::string>* component_texts=NULL, std::vector<float>* component_confidences=NULL, |
||||
int component_level=0){ |
||||
CV_Assert(component_level==OCR_LEVEL_WORD);//Componnents not applicable for word spotting
|
||||
double confidence; |
||||
String transcription; |
||||
recogniseImage(image,transcription,confidence); |
||||
output_text=transcription.c_str(); |
||||
if(component_rects!=NULL){ |
||||
component_rects->resize(1); |
||||
(*component_rects)[0]=Rect(0,0,image.size().width,image.size().height); |
||||
} |
||||
if(component_texts!=NULL){ |
||||
component_texts->resize(1); |
||||
(*component_texts)[0]=transcription.c_str(); |
||||
} |
||||
if(component_confidences!=NULL){ |
||||
component_confidences->resize(1); |
||||
(*component_confidences)[0]=confidence; |
||||
} |
||||
} |
||||
void run(Mat& image, Mat& mask, std::string& output_text, std::vector<Rect>* component_rects=NULL, |
||||
std::vector<std::string>* component_texts=NULL, std::vector<float>* component_confidences=NULL, |
||||
int component_level=0){ |
||||
CV_Assert(mask.cols==image.cols && mask.rows== image.rows);//Mask is ignored because the CNN operates on a full image
|
||||
this->run(image,output_text,component_rects,component_texts,component_confidences,component_level); |
||||
} |
||||
std::vector<String>& getVocabulary(){ |
||||
return this->labels_; |
||||
}/*
|
||||
void getVocabulary(CV_OUT const std::vector<String>& voc){ |
||||
voc.reshape(this->labels_.size()); |
||||
for(int k =0;k<voc.size();k++){ |
||||
voc[k]=this->labels_[k]; |
||||
} |
||||
}*/ |
||||
}; |
||||
|
||||
Ptr<OCRHolisticWordRecognizer> OCRHolisticWordRecognizer::create(Ptr<TextImageClassifier> classifierPtr,String vocabullaryFilename ){ |
||||
return Ptr<OCRHolisticWordRecognizer>(new OCRHolisticWordRecognizerImpl(classifierPtr,vocabullaryFilename)); |
||||
} |
||||
|
||||
Ptr<OCRHolisticWordRecognizer> OCRHolisticWordRecognizer::create(String modelArchFilename, String modelWeightsFilename, String vocabullaryFilename){ |
||||
Ptr<TextImageClassifier> classifierPtr(new DictNetCaffeImpl(modelArchFilename,modelWeightsFilename, 100,0)); |
||||
return Ptr<OCRHolisticWordRecognizer>(new OCRHolisticWordRecognizerImpl(classifierPtr,vocabullaryFilename)); |
||||
} |
||||
|
||||
} } //namespace text namespace cv
|
@ -1,7 +1,13 @@ |
||||
#ifndef __OPENCV_TEXT_CONFIG_HPP__ |
||||
#define __OPENCV_TEXT_CONFIG_HPP__ |
||||
|
||||
// HAVE CAFFE
|
||||
#cmakedefine HAVE_CAFFE |
||||
|
||||
// HAVE OCR Tesseract
|
||||
#cmakedefine HAVE_TESSERACT |
||||
|
||||
#endif |
||||
|
||||
|
||||
|
||||
#endif |
||||
|
Loading…
Reference in new issue