开发者如何使用GCC提升开发效率Opencv人脸检测

阅读此篇前请务必阅读以下两篇,不然你可能都不会安装环境
在这里插入图片描述

简单人脸识别

使用Opencv自带的haarcascade_frontalface_alt2模型,始终差一个人没标出来,很尴尬
在这里插入图片描述报错找不到模型在这里插入图片描述
EveryThing搜索一遍
在这里插入图片描述

#include <opencv2/opencv.hpp>
#include <iostream>
int main()
{
const char *filePath = "G:/WorkSpacePy/cmake_opencv/test.png";

// 加载人脸检测分类器
cv::CascadeClassifier face_cascade;
if (!face_cascade.load("G:/opencv/build/etc/haarcascades/haarcascade_frontalface_alt2.xml"))
{
    std::cerr << "无法加载人脸检测分类器文件" << std::endl;
    return -1;
}

// 读取图片
cv::Mat image = cv::imread(filePath, cv::IMREAD_COLOR);
if (image.empty())
{
    std::cerr << "无法读取图片" << std::endl;
    return -1;
}

std::vector<cv::Rect> faces;
cv::Mat gray_image;
// 转换为灰度图,人脸检测通常在灰度图上进行
cv::cvtColor(image, gray_image, cv::COLOR_BGR2GRAY);
face_cascade.detectMultiScale(gray_image, faces, 1.1, 3, 0, cv::Size(30, 30));

// 绘制检测到的人脸矩形框
for (size_t i = 0; i < faces.size(); i++)
{
    cv::rectangle(image, faces[i], cv::Scalar(0, 255, 0), 2);
}
// 创建一个可调节大小的窗口并显示图像
cv::namedWindow("haarcascade_frontalface_default", cv::WINDOW_AUTOSIZE);
cv::imshow("haarcascade_frontalface_default", image);
cv::waitKey(0);
cv::destroyAllWindows();
return 0;
}
第三方模型集成

OpenCV Zoo
效果还是好多了,支持直接使用OpenCv调用
在这里插入图片描述
项目地址如下
https://github.com/opencv/opencv_zoo
找到人脸检测库和识别库
在这里插入图片描述

人脸检测

不要慌!! 跑的是DEMO代码


#include "opencv2/opencv.hpp"

#include <map>
#include <vector>
#include <string>
#include <iostream>

const std::map<std::string, int> str2backend{
    {"opencv", cv::dnn::DNN_BACKEND_OPENCV}, {"cuda", cv::dnn::DNN_BACKEND_CUDA}, {"timvx", cv::dnn::DNN_BACKEND_TIMVX}, {"cann", cv::dnn::DNN_BACKEND_CANN}};
const std::map<std::string, int> str2target{
    {"cpu", cv::dnn::DNN_TARGET_CPU}, {"cuda", cv::dnn::DNN_TARGET_CUDA}, {"npu", cv::dnn::DNN_TARGET_NPU}, {"cuda_fp16", cv::dnn::DNN_TARGET_CUDA_FP16}};

class YuNet
{
public:
    YuNet(const std::string &model_path,
          const cv::Size &input_size = cv::Size(320, 320),
          float conf_threshold = 0.6f,
          float nms_threshold = 0.3f,
          int top_k = 5000,
          int backend_id = 0,
          int target_id = 0)
        : model_path_(model_path), input_size_(input_size),
          conf_threshold_(conf_threshold), nms_threshold_(nms_threshold),
          top_k_(top_k), backend_id_(backend_id), target_id_(target_id)
    {
        model = cv::FaceDetectorYN::create(model_path_, "", input_size_, conf_threshold_, nms_threshold_, top_k_, backend_id_, target_id_);
    }

    /* Overwrite the input size when creating the model. Size format: [Width, Height].
     */
    void setInputSize(const cv::Size &input_size)
    {
        input_size_ = input_size;
        model->setInputSize(input_size_);
    }

    cv::Mat infer(const cv::Mat image)
    {
        cv::Mat res;
        model->detect(image, res);
        return res;
    }

private:
    cv::Ptr<cv::FaceDetectorYN> model;

    std::string model_path_;
    cv::Size input_size_;
    float conf_threshold_;
    float nms_threshold_;
    int top_k_;
    int backend_id_;
    int target_id_;
};

cv::Mat visualize(const cv::Mat &image, const cv::Mat &faces, float fps = -1.f)
{
    static cv::Scalar box_color{0, 255, 0};
    static std::vector<cv::Scalar> landmark_color{
        cv::Scalar(255, 0, 0),   // right eye
        cv::Scalar(0, 0, 255),   // left eye
        cv::Scalar(0, 255, 0),   // nose tip
        cv::Scalar(255, 0, 255), // right mouth corner
        cv::Scalar(0, 255, 255)  // left mouth corner
    };
    static cv::Scalar text_color{0, 255, 0};

    auto output_image = image.clone();

    if (fps >= 0)
    {
        cv::putText(output_image, cv::format("FPS: %.2f", fps), cv::Point(0, 15), cv::FONT_HERSHEY_SIMPLEX, 0.5, text_color, 2);
    }

    for (int i = 0; i < faces.rows; ++i)
    {
        // Draw bounding boxes
        int x1 = static_cast<int>(faces.at<float>(i, 0));
        int y1 = static_cast<int>(faces.at<float>(i, 1));
        int w = static_cast<int>(faces.at<float>(i, 2));
        int h = static_cast<int>(faces.at<float>(i, 3));
        cv::rectangle(output_image, cv::Rect(x1, y1, w, h), box_color, 2);

        // Confidence as text
        float conf = faces.at<float>(i, 14);
        cv::putText(output_image, cv::format("%.4f", conf), cv::Point(x1, y1 + 12), cv::FONT_HERSHEY_DUPLEX, 0.5, text_color);

        // Draw landmarks
        for (int j = 0; j < landmark_color.size(); ++j)
        {
            int x = static_cast<int>(faces.at<float>(i, 2 * j + 4)), y = static_cast<int>(faces.at<float>(i, 2 * j + 5));
            cv::circle(output_image, cv::Point(x, y), 2, landmark_color[j], 2);
        }
    }
    return output_image;
}

int main(int argc, char **argv)
{
    cv::CommandLineParser parser(argc, argv,
                                 "{help  h           |                                      | Print this message}"
                                 "{input i           | G:/WorkSpacePy/cmake_opencv/test.png | Set input to a certain image, omit if using camera}"
                                 "{model m           | G:/WorkSpacePy/cmake_opencv/face_detection_yunet_2023mar.onnx | Set path to the model}"
                                 "{backend b         | opencv                               | Set DNN backend}"
                                 "{target t          | cpu                                  | Set DNN target}"
                                 "{save s            | true                                | Whether to save result image or not}"
                                 "{vis v             | true                                | Whether to visualize result image or not}"
                                 /* model params below*/
                                 "{conf_threshold    | 0.9                                  | Set the minimum confidence for the model to identify a face. Filter out faces of conf < conf_threshold}"
                                 "{nms_threshold     | 0.3                                  | Set the threshold to suppress overlapped boxes. Suppress boxes if IoU(box1, box2) >= nms_threshold, the one of higher score is kept.}"
                                 "{top_k             | 5000                                 | Keep top_k bounding boxes before NMS. Set a lower value may help speed up postprocessing.}");
    if (parser.has("help"))
    {
        parser.printMessage();
        return 0;
    }

    std::string input_path = parser.get<std::string>("input");
    std::string model_path = parser.get<std::string>("model");
    std::string backend = parser.get<std::string>("backend");
    std::string target = parser.get<std::string>("target");
    bool save_flag = parser.get<bool>("save");
    bool vis_flag = parser.get<bool>("vis");

    std::cout << "input_path:" << input_path << std::endl;
    std::cout << "model_path:" << model_path << std::endl;
    std::cout << "save_flag:" << save_flag << std::endl;
    std::cout << "vis_flag:" << vis_flag << std::endl;

    // model params
    float conf_threshold = parser.get<float>("conf_threshold");
    float nms_threshold = parser.get<float>("nms_threshold");
    int top_k = parser.get<int>("top_k");
    const int backend_id = str2backend.at(backend);
    const int target_id = str2target.at(target);

    // Instantiate YuNet
    YuNet model(model_path, cv::Size(320, 320), conf_threshold, nms_threshold, top_k, backend_id, target_id);

    // If input is an image
    if (!input_path.empty())
    {
        auto image = cv::imread(input_path);

        // Inference
        model.setInputSize(image.size());
        auto faces = model.infer(image);

        // Print faces
        std::cout << cv::format("%d faces detected:\n", faces.rows);
        for (int i = 0; i < faces.rows; ++i)
        {
            int x1 = static_cast<int>(faces.at<float>(i, 0));
            int y1 = static_cast<int>(faces.at<float>(i, 1));
            int w = static_cast<int>(faces.at<float>(i, 2));
            int h = static_cast<int>(faces.at<float>(i, 3));
            float conf = faces.at<float>(i, 14);
            std::cout << cv::format("%d: x1=%d, y1=%d, w=%d, h=%d, conf=%.4f\n", i, x1, y1, w, h, conf);
        }

        // Draw reults on the input image
        if (save_flag || vis_flag)
        {
            auto res_image = visualize(image, faces);
            if (save_flag)
            {
                std::cout << "Results are saved to result.jpg\n";
                cv::imwrite("result.jpg", res_image);
            }
            if (vis_flag)
            {
                cv::namedWindow(input_path, cv::WINDOW_AUTOSIZE);
                cv::imshow(input_path, res_image);
                cv::waitKey(0);
            }
        }
    }
    else // Call default camera
    {
        int device_id = 0;
        auto cap = cv::VideoCapture(device_id);
        int w = static_cast<int>(cap.get(cv::CAP_PROP_FRAME_WIDTH));
        int h = static_cast<int>(cap.get(cv::CAP_PROP_FRAME_HEIGHT));
        model.setInputSize(cv::Size(w, h));

        auto tick_meter = cv::TickMeter();
        cv::Mat frame;
        while (cv::waitKey(1) < 0)
        {
            bool has_frame = cap.read(frame);
            if (!has_frame)
            {
                std::cout << "No frames grabbed! Exiting ...\n";
                break;
            }

            // Inference
            tick_meter.start();
            cv::Mat faces = model.infer(frame);
            tick_meter.stop();

            // Draw results on the input image
            auto res_image = visualize(frame, faces, (float)tick_meter.getFPS());
            // Visualize in a new window
            cv::imshow("YuNet Demo", res_image);

            tick_meter.reset();
        }
    }

    return 0;
}

找不到模型?
在这里插入图片描述
看看我的目录结构
在这里插入图片描述

https://blog.csdn.net/m0_56711618/article/details/130162208

下一篇 opencv实现人脸识别

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值