目的:目标实时检测;
方法:c++调用yolov5模型;
数据资源参考:【Yolov5】1.认真总结6000字Yolov5保姆级教程(2022.06.28全新版本v6.1)_yolov5教程-CSDN博客
代码:
#include
#include
#include
using namespace cv;
using namespace cv::dnn;
// # class names
std::vector
"Hero",
"Tower",
"Soldiers",
"Monster",
"Red_buff",
"Monster_bird",
"Spirit",
"Monster_lizard",
"Blue_buff",
"Monster_wolf"
};
int main() {
Net net = cv::dnn::readNetFromONNX("wzry.onnx");
bool isGpu = 1;// 1;
if (isGpu) {
net.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA);
net.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA);//_FP16
}
//cpu
else {
net.setPreferableBackend(cv::dnn::DNN_BACKEND_DEFAULT);
net.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
}
if (net.empty()) {
std::cerr << "Error: Could not load the neural network." << std::endl;
return -1;
}
VideoCapture cap("SVID_20210726_111258_1.mp4");
if (!cap.isOpened()) {
std::cerr << "Error: Could not open the video file." << std::endl;
return -1;
}
const int input_width = 640;
const int input_height = 640;
float x_factor = 1280 / 640.0f;
float y_factor = 576 / 640.0f;
namedWindow("Object Detection", WINDOW_NORMAL);
Mat frame, blob;
while (cap.read(frame)) {
resize(frame, blob, Size(input_width, input_height));
cv::Mat blob = cv::dnn::blobFromImage(frame, 1 / 255.0, cv::Size(640, 640), cv::Scalar(0, 0, 0), true, false);
net.setInput(blob);
cv::Mat preds = net.forward();
std::vector
std::vector
std::vector
//std::cout << "rows: " << preds.size[1] << " data: " << preds.size[2] << std::endl;
cv::Mat det_output(preds.size[1], preds.size[2], CV_32F, preds.ptr
//In a typical YOLO output, the format is [x_center, y_center, width, height, object_confidence, class_score1, class_score2, ..., class_scoreN] for each bounding box.
for (int i = 0; i < det_output.rows; i++) {
float confidence = det_output.at
cv::Mat class_scores = det_output.row(i).colRange(5, 5 + classes.size());
Point class_id_point;
double max_class_score;
minMaxLoc(class_scores, NULL, &max_class_score, NULL, &class_id_point);
int class_id = class_id_point.x;
float final_confidence = confidence * max_class_score;
//std::cout << "Final confidence: " << final_confidence << std::endl;
if (final_confidence < 0.45) {
continue;
}
float cx = det_output.at
float cy = det_output.at
float ow = det_output.at
float oh = det_output.at
int x = static_cast
int y = static_cast
int width = static_cast
int height = static_cast
//cv::rectangle(frame, cv::Point(x, y), cv::Point(x + width, y + height), cv::Scalar(0, 0, 255), 2, 8);
//putText(frame,classes.at(class_id) +std::to_string(final_confidence), Point(x,y - 10), FONT_HERSHEY_SIMPLEX, 0.9, Scalar(0, 255, 0), 2);
boxes.push_back(Rect(x, y, width, height));
confidences.push_back(final_confidence);
class_ids.push_back(class_id);
}
std::vector
dnn::NMSBoxes(boxes, confidences, 0.4, 0.5, indices);
// Draw the final bounding boxes
for (size_t i = 0; i < indices.size(); ++i) {
int idx = indices[i];
Rect box = boxes[idx];
cv::rectangle(frame, cv::Point(box.x, box.y), cv::Point(box.x + box.width, box.y + box.height), cv::Scalar(0, 0, 255), 2, 8);
std::string label = classes[class_ids[idx]] + ": " + std::to_string(confidences[idx]);
putText(frame, label.c_str(), Point(box.x, box.y - 10), FONT_HERSHEY_SIMPLEX, 0.9, Scalar(0, 255, 0), 2);
}
imshow("Object Detection", frame);
if (waitKey(1) == 'q') {
break;
}
}
cap.release();
destroyAllWindows();
return 0;
}
上述代码关键部分讲解:
(1)yolov5 pt模型转onnx模型,并用opencv c++调用;
Net net = cv::dnn::readNetFromONNX("wzry.onnx");
c++代码调用yolov5训练模型时onnx格式是一种常用格式,但是在yolov5训练模型时给出的是pt格式,因此需要将yolov5训练的 best.pt转换成best*.onnx,以便opencv c++调用。下面给出转换代码:
python export.py --weight best.pt --include onnx --device 0
上面export.py yolov5 主目录下自带,--device 0 选用0号显卡。
(2)// # class names
std::vector
(3) GPU开关
bool isGpu = 1;// 1;
if (isGpu) {
net.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA);
net.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA);//_FP16
}
//cpu
else {
net.setPreferableBackend(cv::dnn::DNN_BACKEND_DEFAULT);
net.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
}
(4)比例因子:
float x_factor = 1280 / 640.0f;
float y_factor = 576 / 640.0f;
1280与576是视频帧的长和宽,要根据实际设置,否则框的位置不对。
(5)模型文件best.onnx及视频文件放到当前目录,也可自行修改;
遇到相关问题可以参考基于opencv-C++dnn模块推理的yolov5 onnx模型_opencv onnx推理-CSDN博客
最终看到了检测结果,还比较满意