parent
589286bb9f
commit
7035dacac6
4 changed files with 46 additions and 95 deletions
@ -0,0 +1,31 @@ |
|||||||
|
set(GoogleNet_url "http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel") |
||||||
|
set(GoogleNet_dst "$ENV{OPENCV_TEST_DATA_PATH}/dnn/bvlc_googlenet.caffemodel") |
||||||
|
set(GoogleNet_sha "405fc5acd08a3bb12de8ee5e23a96bec22f08204") |
||||||
|
|
||||||
|
set(VGG16_url "http://www.robots.ox.ac.uk/~vgg/software/very_deep/caffe/VGG_ILSVRC_16_layers.caffemodel") |
||||||
|
set(GG16_dst "$ENV{OPENCV_TEST_DATA_PATH}/dnn/VGG_ILSVRC_16_layers.caffemodel") |
||||||
|
|
||||||
|
set(voc-fcn32s_url "http://dl.caffe.berkeleyvision.org/fcn32s-heavy-pascal.caffemodel") |
||||||
|
set(voc-fcn32s_dst "$ENV{OPENCV_TEST_DATA_PATH}/dnn/fcn32s-heavy-pascal.caffemodel") |
||||||
|
|
||||||
|
if(NOT model) |
||||||
|
set(model "GoogleNet") |
||||||
|
endif() |
||||||
|
|
||||||
|
message(STATUS "Downloading ${${model}_url} to ${${model}_dst}") |
||||||
|
|
||||||
|
if(NOT EXISTS ${${model}_dst}) |
||||||
|
if(${${model}_sha}) |
||||||
|
file(DOWNLOAD ${${model}_url} ${${model}_dst} SHOW_PROGRESS EXPECTED_HASH SHA1=${${model}_sha} STATUS status_vec) |
||||||
|
else() |
||||||
|
file(DOWNLOAD ${${model}_url} ${${model}_dst} SHOW_PROGRESS STATUS status_vec) |
||||||
|
endif() |
||||||
|
|
||||||
|
list(GET status_vec 0 status) |
||||||
|
list(GET status_vec 1 status_msg) |
||||||
|
if(status EQUAL 0) |
||||||
|
message(STATUS "Ok! ${status_msg}") |
||||||
|
else() |
||||||
|
message(STATUS "Fail! ${status_msg}") |
||||||
|
endif() |
||||||
|
endif() |
@ -1,79 +0,0 @@ |
|||||||
#!/usr/bin/env python |
|
||||||
import os |
|
||||||
import sys |
|
||||||
import time |
|
||||||
import urllib |
|
||||||
import hashlib |
|
||||||
import argparse |
|
||||||
import json |
|
||||||
|
|
||||||
|
|
||||||
def reporthook(count, block_size, total_size): |
|
||||||
""" |
|
||||||
From http://blog.moleculea.com/2012/10/04/urlretrieve-progres-indicator/ |
|
||||||
""" |
|
||||||
global start_time |
|
||||||
global prev_duration |
|
||||||
if count == 0: |
|
||||||
start_time = time.time() |
|
||||||
prev_duration = -1 |
|
||||||
return |
|
||||||
duration = max(1, time.time() - start_time) |
|
||||||
if int(duration) == int(prev_duration): |
|
||||||
return |
|
||||||
|
|
||||||
progress_size = int(count * block_size) |
|
||||||
speed = int(progress_size / (1024 * duration)) |
|
||||||
percent = int(count * block_size * 100 / total_size) |
|
||||||
sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds passed" % |
|
||||||
(percent, progress_size / (1024 * 1024), speed, duration)) |
|
||||||
sys.stdout.flush() |
|
||||||
prev_duration = duration |
|
||||||
|
|
||||||
|
|
||||||
# Function for checking SHA1. |
|
||||||
def model_checks_out(filename, sha1): |
|
||||||
with open(filename, 'r') as f: |
|
||||||
return hashlib.sha1(f.read()).hexdigest() == sha1 |
|
||||||
|
|
||||||
def model_download(filename, url, sha1): |
|
||||||
# Check if model exists. |
|
||||||
if os.path.exists(filename) and model_checks_out(filename, sha1): |
|
||||||
print("Model {} already exists.".format(filename)) |
|
||||||
return |
|
||||||
|
|
||||||
# Download and verify model. |
|
||||||
urllib.urlretrieve(url, filename, reporthook) |
|
||||||
print model_checks_out(filename, sha1) |
|
||||||
if not model_checks_out(filename, sha1): |
|
||||||
print("ERROR: model {} did not download correctly!".format(url)) |
|
||||||
sys.exit(1) |
|
||||||
|
|
||||||
if __name__ == '__main__': |
|
||||||
parser = argparse.ArgumentParser(description="Downloading trained model binaries.") |
|
||||||
parser.add_argument("download_list") |
|
||||||
args = parser.parse_args() |
|
||||||
|
|
||||||
test_dir = os.environ.get("OPENCV_TEST_DATA_PATH") |
|
||||||
if not test_dir: |
|
||||||
print "ERROR: OPENCV_TEST_DATA_PATH environment not specified" |
|
||||||
sys.exit(1) |
|
||||||
|
|
||||||
try: |
|
||||||
with open(args.download_list, 'r') as f: |
|
||||||
models_to_download = json.load(f) |
|
||||||
except: |
|
||||||
print "ERROR: Can't pasrse {}".format(args.download_list) |
|
||||||
sys.exit(1) |
|
||||||
|
|
||||||
for model_name in models_to_download: |
|
||||||
model = models_to_download[model_name] |
|
||||||
|
|
||||||
dst_dir = os.path.join(test_dir, os.path.dirname(model['file'])) |
|
||||||
dst_file = os.path.join(test_dir, model['file']) |
|
||||||
if not os.path.exists(dst_dir): |
|
||||||
print "ERROR: Can't find module testdata path '{}'".format(dst_dir) |
|
||||||
sys.exit(1) |
|
||||||
|
|
||||||
print "Downloading model '{}' to {} from {} ...".format(model_name, dst_file, model['url']) |
|
||||||
model_download(dst_file, model['url'], model['sha1']) |
|
@ -1,7 +0,0 @@ |
|||||||
{ |
|
||||||
"googlenet": { |
|
||||||
"file": "dnn/bvlc_googlenet.caffemodel", |
|
||||||
"url": "http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel", |
|
||||||
"sha1": "405fc5acd08a3bb12de8ee5e23a96bec22f08204" |
|
||||||
} |
|
||||||
} |
|
Loading…
Reference in new issue