diff --git a/examples/YOLOv8-ONNXRuntime-CPP/inference.cpp b/examples/YOLOv8-ONNXRuntime-CPP/inference.cpp index 0bf07ded5d..88aeae3713 100644 --- a/examples/YOLOv8-ONNXRuntime-CPP/inference.cpp +++ b/examples/YOLOv8-ONNXRuntime-CPP/inference.cpp @@ -301,12 +301,24 @@ char* YOLO_V8::TensorProcess(clock_t& starttime_1, cv::Mat& iImg, N& blob, std:: break; } case YOLO_CLS: + case YOLO_CLS_HALF: { + cv::Mat rawData; + if (modelType == YOLO_CLS) { + // FP32 + rawData = cv::Mat(1, this->classes.size(), CV_32F, output); + } else { + // FP16 + rawData = cv::Mat(1, this->classes.size(), CV_16F, output); + rawData.convertTo(rawData, CV_32F); + } + float *data = (float *) rawData.data; + DL_RESULT result; for (int i = 0; i < this->classes.size(); i++) { result.classId = i; - result.confidence = output[i]; + result.confidence = data[i]; oResult.push_back(result); } break; diff --git a/examples/YOLOv8-ONNXRuntime-CPP/inference.h b/examples/YOLOv8-ONNXRuntime-CPP/inference.h index 3174ae9332..3a9d029ccf 100644 --- a/examples/YOLOv8-ONNXRuntime-CPP/inference.h +++ b/examples/YOLOv8-ONNXRuntime-CPP/inference.h @@ -29,6 +29,7 @@ enum MODEL_TYPE //FLOAT16 MODEL YOLO_DETECT_V8_HALF = 4, YOLO_POSE_V8_HALF = 5, + YOLO_CLS_HALF = 6 };