|
|
@ -221,8 +221,8 @@ char* YOLO_V8::TensorProcess(clock_t& starttime_1, cv::Mat& iImg, N& blob, std:: |
|
|
|
case YOLO_DETECT_V8: |
|
|
|
case YOLO_DETECT_V8: |
|
|
|
case YOLO_DETECT_V8_HALF: |
|
|
|
case YOLO_DETECT_V8_HALF: |
|
|
|
{ |
|
|
|
{ |
|
|
|
int strideNum = outputNodeDims[1];//8400
|
|
|
|
int signalResultNum = outputNodeDims[1];//84
|
|
|
|
int signalResultNum = outputNodeDims[2];//84
|
|
|
|
int strideNum = outputNodeDims[2];//8400
|
|
|
|
std::vector<int> class_ids; |
|
|
|
std::vector<int> class_ids; |
|
|
|
std::vector<float> confidences; |
|
|
|
std::vector<float> confidences; |
|
|
|
std::vector<cv::Rect> boxes; |
|
|
|
std::vector<cv::Rect> boxes; |
|
|
@ -230,18 +230,18 @@ char* YOLO_V8::TensorProcess(clock_t& starttime_1, cv::Mat& iImg, N& blob, std:: |
|
|
|
if (modelType == YOLO_DETECT_V8) |
|
|
|
if (modelType == YOLO_DETECT_V8) |
|
|
|
{ |
|
|
|
{ |
|
|
|
// FP32
|
|
|
|
// FP32
|
|
|
|
rawData = cv::Mat(strideNum, signalResultNum, CV_32F, output); |
|
|
|
rawData = cv::Mat(signalResultNum, strideNum, CV_32F, output); |
|
|
|
} |
|
|
|
} |
|
|
|
else |
|
|
|
else |
|
|
|
{ |
|
|
|
{ |
|
|
|
// FP16
|
|
|
|
// FP16
|
|
|
|
rawData = cv::Mat(strideNum, signalResultNum, CV_16F, output); |
|
|
|
rawData = cv::Mat(signalResultNum, strideNum, CV_16F, output); |
|
|
|
rawData.convertTo(rawData, CV_32F); |
|
|
|
rawData.convertTo(rawData, CV_32F); |
|
|
|
} |
|
|
|
} |
|
|
|
//Note:
|
|
|
|
//Note:
|
|
|
|
//ultralytics add transpose operator to the output of yolov8 model.which make yolov8/v5/v7 has same shape
|
|
|
|
//ultralytics add transpose operator to the output of yolov8 model.which make yolov8/v5/v7 has same shape
|
|
|
|
//https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt
|
|
|
|
//https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt
|
|
|
|
//rowData = rowData.t();
|
|
|
|
rawData = rawData.t(); |
|
|
|
|
|
|
|
|
|
|
|
float* data = (float*)rawData.data; |
|
|
|
float* data = (float*)rawData.data; |
|
|
|
|
|
|
|
|
|
|
|