|
|
|
@ -6,23 +6,6 @@ import argparse |
|
|
|
|
backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV) |
|
|
|
|
targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD) |
|
|
|
|
|
|
|
|
|
parser = argparse.ArgumentParser(description='Use this script to run human parsing using JPPNet', |
|
|
|
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter) |
|
|
|
|
parser.add_argument('--input', '-i', help='Path to input image. Skip this argument to capture frames from a camera.') |
|
|
|
|
parser.add_argument('--model', '-m', required=True, help='Path to pb model.') |
|
|
|
|
parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int, |
|
|
|
|
help="Choose one of computation backends: " |
|
|
|
|
"%d: automatically (by default), " |
|
|
|
|
"%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), " |
|
|
|
|
"%d: OpenCV implementation" % backends) |
|
|
|
|
parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int, |
|
|
|
|
help='Choose one of target computation devices: ' |
|
|
|
|
'%d: CPU target (by default), ' |
|
|
|
|
'%d: OpenCL, ' |
|
|
|
|
'%d: OpenCL fp16 (half-float precision), ' |
|
|
|
|
'%d: VPU' % targets) |
|
|
|
|
args, _ = parser.parse_known_args() |
|
|
|
|
|
|
|
|
|
# To get pre-trained model download https://drive.google.com/file/d/1BFVXgeln-bek8TCbRjN6utPAgRE0LJZg/view |
|
|
|
|
# For correct convert .meta to .pb model download original repository https://github.com/Engineering-Course/LIP_JPPNet |
|
|
|
|
# Change script evaluate_parsing_JPPNet-s2.py for human parsing |
|
|
|
@ -147,7 +130,7 @@ def decode_labels(gray_image): |
|
|
|
|
return segm |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def parse_human(image_path, model_path, backend, target): |
|
|
|
|
def parse_human(image_path, model_path, backend=cv.dnn.DNN_BACKEND_OPENCV, target=cv.dnn.DNN_TARGET_CPU): |
|
|
|
|
""" |
|
|
|
|
Prepare input for execution, run net and postprocess output to parse human. |
|
|
|
|
:param image_path: path to input image |
|
|
|
@ -164,6 +147,23 @@ def parse_human(image_path, model_path, backend, target): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
|
|
|
parser = argparse.ArgumentParser(description='Use this script to run human parsing using JPPNet', |
|
|
|
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter) |
|
|
|
|
parser.add_argument('--input', '-i', help='Path to input image. Skip this argument to capture frames from a camera.') |
|
|
|
|
parser.add_argument('--model', '-m', required=True, help='Path to pb model.') |
|
|
|
|
parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int, |
|
|
|
|
help="Choose one of computation backends: " |
|
|
|
|
"%d: automatically (by default), " |
|
|
|
|
"%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), " |
|
|
|
|
"%d: OpenCV implementation" % backends) |
|
|
|
|
parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int, |
|
|
|
|
help='Choose one of target computation devices: ' |
|
|
|
|
'%d: CPU target (by default), ' |
|
|
|
|
'%d: OpenCL, ' |
|
|
|
|
'%d: OpenCL fp16 (half-float precision), ' |
|
|
|
|
'%d: VPU' % targets) |
|
|
|
|
args, _ = parser.parse_known_args() |
|
|
|
|
|
|
|
|
|
output = parse_human(args.input, args.model, args.backend, args.target) |
|
|
|
|
winName = 'Deep learning human parsing in OpenCV' |
|
|
|
|
cv.namedWindow(winName, cv.WINDOW_AUTOSIZE) |
|
|
|
|