活动介绍
file-type

Qt无边框窗口自定义鼠标事件教程

ZIP文件

下载需积分: 42 | 3KB | 更新于2025-03-27 | 119 浏览量 | 1 下载量 举报 收藏
download 立即下载
知识点: 1. QT框架概述 QT是一个跨平台的C++应用程序框架,广泛用于开发图形用户界面程序,也支持非GUI程序。QT提供了用于数据库、XML、网络、多线程和图形处理等功能的模块。QT的应用程序可以运行在Windows, MacOS, Linux等操作系统上,同时还支持移动设备平台如Android和iOS。 2. QT中的无边框窗口 在QT中,可以通过设置窗口的样式来实现无边框窗口。这通常通过继承QWidget类并使用setWindowFlags()函数来指定窗口为无边框模式。例如,可以使用Qt::Window | Qt::FramelessWindowHint标志来创建一个无边框窗口。无边框窗口可以用于创建更加现代化和简约的应用界面。 3. 自定义鼠标事件处理 QT允许开发者重写鼠标事件处理函数来实现自定义的鼠标行为。这些函数包括mousePressEvent(), mouseReleaseEvent(), 和mouseMoveEvent()。在无边框窗口的应用中,重写这些函数尤为常见,以便根据鼠标的点击、释放和移动来执行特定的界面布局调整或其他逻辑。 4. 界面重新布局 在无边框窗口中,界面重新布局是一个常见需求,通常依赖于鼠标事件。通过自定义鼠标事件,我们可以实现拖拽窗口、调整窗口大小、移动组件位置等功能。例如,可以监听鼠标按下事件,然后在鼠标移动事件中改变窗口或组件的位置,最后在鼠标释放事件中固定这些改变。 5. C++编程基础 由于QT主要使用C++语言进行开发,因此开发者需要具备良好的C++编程基础。这包括对面向对象编程的理解,对C++语言特性的掌握,如类和对象、继承、多态、STL等。在QT开发中,你还会用到C++模板编程、异常处理、智能指针等高级特性。 6. 样式改进 在实现无边框窗口和自定义鼠标事件之后,开发者可能需要对界面的样式进行改进,以提升用户体验。这可能包括设置窗口的透明度、调整颜色、更改字体和布局等。在QT中,可以通过样式表(QSS)或直接在代码中设置样式属性来实现这些效果。 7. QT的信号和槽机制 QT中的信号和槽是它核心的事件处理机制之一。当一个事件发生时,比如按钮被点击,相应的信号(Signal)被发射,而槽(Slot)则响应这些信号并执行特定的处理函数。信号和槽机制是QT实现事件驱动编程的重要组成部分。 8. 开发环境搭建 要开发QT程序,首先需要搭建开发环境。这通常包括安装QT SDK和相应的IDE(如Qt Creator)。开发者需要配置编译器和调试器,并且可能需要设置第三方库和插件。一旦开发环境搭建完毕,开发者就可以开始编写、编译和运行QT程序了。 9. 跨平台开发注意事项 由于QT支持跨平台开发,开发者在设计界面和程序时需要考虑到不同操作系统的特性。这涉及到对特定平台的窗口外观和行为的适配,如Windows和Linux在窗口装饰上的差异。开发者需要使用QT提供的抽象层来编写与平台无关的代码,或者根据不同的平台执行条件编译。 通过以上知识点的介绍,可以看出,使用QT和C++实现一个无边框的操作界面并自定义鼠标事件,涉及到对QT框架的深入理解和C++编程技术的灵活应用。这不仅包括界面设计和事件处理的技术细节,还包括对跨平台开发的考量和对开发环境的正确配置。

相关推荐

filetype

import sys import cv2 import time import torch import traceback import threading import queue import dxcam import ctypes import os import glob import numpy as np import logitech.lg from PyQt6.QtWidgets import (QApplication, QMainWindow, QWidget, QVBoxLayout, QHBoxLayout, QLabel, QPushButton, QComboBox, QSlider, QSpinBox, QDoubleSpinBox, QLineEdit, QTabWidget, QGroupBox, QTextEdit, QFileDialog, QMessageBox, QSizePolicy, QSplitter, QDialog, QScrollArea) from PyQt6.QtCore import Qt, QTimer, QThread, pyqtSignal from PyQt6.QtGui import QImage, QPixmap, QPainter, QColor, QFont, QIcon, QKeyEvent, QMouseEvent from PyQt6.QtSvg import QSvgRenderer from PIL import Image from ultralytics import YOLO from pynput import mouse class PIDController: """PID控制器""" def __init__(self, kp, ki, kd, output_min=-100, output_max=100): self.kp = kp # 比例增益 self.ki = ki # 积分增益 self.kd = kd # 微分增益 self.output_min = output_min self.output_max = output_max # 状态变量 self.integral = 0.0 self.prev_error = 0.0 self.last_time = time.perf_counter() def compute(self, setpoint, current_value): """计算PID控制输出""" current_time = time.perf_counter() dt = current_time - self.last_time # 防止过小的时间差导致计算问题 MIN_DT = 0.0001 if dt < MIN_DT: dt = MIN_DT # 计算误差 error = setpoint - current_value # 比例项 P = self.kp * error # 积分项(防饱和) self.integral += error * dt I = self.ki * self.integral # 微分项 derivative = (error - self.prev_error) / dt D = self.kd * derivative # 合成输出 output = P + I + D # 输出限幅 if output > self.output_max: output = self.output_max elif output < self.output_min: output = self.output_min # 更新状态 self.prev_error = error self.last_time = current_time return output def reset(self): """重置控制器状态""" self.integral = 0.0 self.prev_error = 0.0 self.last_time = time.perf_counter() class ScreenDetector: def __init__(self, config_path): # 解析配置文件 self._parse_config(config_path) # 设备检测与模型加载 self.device = self._determine_device() self.model = YOLO(self.model_path).to(self.device) # 屏幕信息初始化 self._init_screen_info() # 控制参数初始化 self._init_control_params() # 状态管理 self.stop_event = threading.Event() self.camera_lock = threading.Lock() self.target_lock = threading.Lock() self.offset_lock = threading.Lock() self.button_lock = threading.Lock() # 推理状态控制 self.inference_active = False self.inference_lock = threading.Lock() # 初始化相机 self._init_camera() # 初始化鼠标监听器 self._init_mouse_listener() # 初始化PID控制器 self._init_pid_controllers() def _parse_config(self, config_path): """解析并存储配置参数""" self.cfg = self._parse_txt_config(config_path) # 存储常用参数 self.model_path = self.cfg['model_path'] self.model_device = self.cfg['model_device'] self.screen_target_size = int(self.cfg['screen_target_size']) self.detection_conf_thres = float(self.cfg['detection_conf_thres']) self.detection_iou_thres = float(self.cfg['detection_iou_thres']) self.detection_classes = [int(x) for x in self.cfg['detection_classes'].split(',')] self.visualization_color = tuple(map(int, self.cfg['visualization_color'].split(','))) self.visualization_line_width = int(self.cfg['visualization_line_width']) self.visualization_font_scale = float(self.cfg['visualization_font_scale']) self.visualization_show_conf = bool(self.cfg['visualization_show_conf']) self.fov_horizontal = float(self.cfg.get('move_fov_horizontal', '90')) self.mouse_dpi = int(self.cfg.get('move_mouse_dpi', '400')) self.target_offset_x_percent = float(self.cfg.get('target_offset_x', '50')) self.target_offset_y_percent = 100 - float(self.cfg.get('target_offset_y', '50')) # PID参数 self.pid_kp = float(self.cfg.get('pid_kp', '1.0')) self.pid_ki = float(self.cfg.get('pid_ki', '0.05')) self.pid_kd = float(self.cfg.get('pid_kd', '0.2')) # 贝塞尔曲线参数 self.bezier_steps = int(self.cfg.get('bezier_steps', '100')) self.bezier_duration = float(self.cfg.get('bezier_duration', '0.1')) self.bezier_curve = float(self.cfg.get('bezier_curve', '0.3')) def update_config(self, config_path): """动态更新配置""" try: # 重新解析配置文件 self._parse_config(config_path) # 更新可以直接修改的参数 self.detection_conf_thres = float(self.cfg['detection_conf_thres']) self.detection_iou_thres = float(self.cfg['detection_iou_thres']) self.target_offset_x_percent = float(self.cfg.get('target_offset_x', '50')) self.target_offset_y_percent = 100 - float(self.cfg.get('target_offset_y', '50')) # PID参数更新 self.pid_kp = float(self.cfg.get('pid_kp', '1.0')) self.pid_ki = float(self.cfg.get('pid_ki', '0.05')) self.pid_kd = float(self.cfg.get('pid_kd', '0.2')) # 更新PID控制器 self.pid_x = PIDController(self.pid_kp, self.pid_ki, self.pid_kd) self.pid_y = PIDController(self.pid_kp, self.pid_ki, self.pid_kd) # FOV和DPI更新 self.fov_horizontal = float(self.cfg.get('move_fov_horizontal', '90')) self.mouse_dpi = int(self.cfg.get('move_mouse_dpi', '400')) # 更新贝塞尔曲线参数 self.bezier_steps = int(self.cfg.get('bezier_steps', '100')) self.bezier_duration = float(self.cfg.get('bezier_duration', '0.1')) self.bezier_curve = float(self.cfg.get('bezier_curve', '0.3')) print("配置已动态更新") return True except Exception as e: print(f"更新配置失败: {str(e)}") traceback.print_exc() return False def _parse_txt_config(self, path): """解析TXT格式的配置文件""" config = {} with open(path, 'r', encoding='utf-8') as f: for line in f: line = line.strip() if not line or line.startswith('#'): continue if '=' in line: key, value = line.split('=', 1) config[key.strip()] = value.strip() return config def _init_pid_controllers(self): """初始化PID控制器""" # 创建XY方向的PID控制器 self.pid_x = PIDController(self.pid_kp, self.pid_ki, self.pid_kd) self.pid_y = PIDController(self.pid_kp, self.pid_ki, self.pid_kd) def start_inference(self): """启动推理""" with self.inference_lock: self.inference_active = True def stop_inference(self): """停止推理""" with self.inference_lock: self.inference_active = False def _determine_device(self): """确定运行设备""" if self.model_device == 'auto': return 'cuda' if torch.cuda.is_available() and torch.cuda.device_count() > 0 else 'cpu' return self.model_device def _init_screen_info(self): """初始化屏幕信息""" user32 = ctypes.windll.user32 self.screen_width, self.screen_height = user32.GetSystemMetrics(0), user32.GetSystemMetrics(1) self.screen_center = (self.screen_width // 2, self.screen_height // 2) # 计算截图区域 left = (self.screen_width - self.screen_target_size) // 2 top = (self.screen_height - self.screen_target_size) // 2 self.region = ( max(0, int(left)), max(0, int(top)), min(self.screen_width, int(left + self.screen_target_size)), min(self.screen_height, int(top + self.screen_target_size)) ) def _init_control_params(self): """初始化控制参数""" self.previous_target_info = None self.closest_target_absolute = None self.target_offset = None self.right_button_pressed = False # 改为鼠标右键状态 def _init_camera(self): """初始化相机""" try: with self.camera_lock: self.camera = dxcam.create( output_idx=0, output_color="BGR", region=self.region ) self.camera.start(target_fps=120, video_mode=True) except Exception as e: print(f"相机初始化失败: {str(e)}") try: # 降级模式 with self.camera_lock: self.camera = dxcam.create() self.camera.start(target_fps=60, video_mode=True) except Exception as fallback_e: print(f"降级模式初始化失败: {str(fallback_e)}") self.camera = None def _init_mouse_listener(self): """初始化鼠标监听器""" self.mouse_listener = mouse.Listener( on_click=self.on_mouse_click # 监听鼠标点击事件 ) self.mouse_listener.daemon = True self.mouse_listener.start() def on_mouse_click(self, x, y, button, pressed): """处理鼠标点击事件""" try: if button == mouse.Button.right: # 监听鼠标右键 with self.button_lock: self.right_button_pressed = pressed # 更新状态 # 当右键释放时重置PID控制器 if not pressed: self.pid_x.reset() self.pid_y.reset() except Exception as e: print(f"鼠标事件处理错误: {str(e)}") def calculate_fov_movement(self, dx, dy): """基于FOV算法计算鼠标移动量""" # 计算屏幕对角线长度 screen_diagonal = (self.screen_width ** 2 + self.screen_height ** 2) ** 0.5 # 计算垂直FOV aspect_ratio = self.screen_width / self.screen_height fov_vertical = self.fov_horizontal / aspect_ratio # 计算每像素对应角度 angle_per_pixel_x = self.fov_horizontal / self.screen_width angle_per_pixel_y = fov_vertical / self.screen_height # 计算角度偏移 angle_offset_x = dx * angle_per_pixel_x angle_offset_y = dy * angle_per_pixel_y # 转换为鼠标移动量 move_x = (angle_offset_x / 360) * self.mouse_dpi move_y = (angle_offset_y / 360) * self.mouse_dpi return move_x, move_y def move_mouse_to_target(self): """移动鼠标对准目标点""" if not self.target_offset: return try: # 获取目标点与屏幕中心的偏移量 with self.offset_lock: dx, dy = self.target_offset # 使用FOV算法将像素偏移转换为鼠标移动量 move_x, move_y = self.calculate_fov_movement(dx, dy) # 使用PID计算平滑的移动量 pid_move_x = self.pid_x.compute(0, -move_x) # 将dx取反 pid_move_y = self.pid_y.compute(0, -move_y) # 将dy取反 # 移动鼠标 if pid_move_x != 0 or pid_move_y != 0: logitech.lg.start_mouse_move(int(pid_move_x), int(pid_move_y), self.bezier_steps, self.bezier_duration, self.bezier_curve) except Exception as e: print(f"移动鼠标时出错: {str(e)}") def run(self, frame_queue): """主检测循环""" while not self.stop_event.is_set(): try: # 检查推理状态 with self.inference_lock: if not self.inference_active: time.sleep(0.01) continue # 截图 grab_start = time.perf_counter() screenshot = self._grab_screenshot() grab_time = (time.perf_counter() - grab_start) * 1000 # ms if screenshot is None: time.sleep(0.001) continue # 推理 inference_start = time.perf_counter() results = self._inference(screenshot) inference_time = (time.perf_counter() - inference_start) * 1000 # ms # 处理检测结果 target_info, closest_target_relative, closest_offset = self._process_detection_results(results) # 更新目标信息 self._update_target_info(target_info, closest_offset) # 移动鼠标 self._move_mouse_if_needed() # 可视化处理 annotated_frame = self._visualize_results(results, closest_target_relative) if frame_queue else None # 放入队列 if frame_queue: try: frame_queue.put( (annotated_frame, len(target_info), inference_time, grab_time, target_info), timeout=0.01 ) except queue.Full: pass except Exception as e: print(f"检测循环异常: {str(e)}") traceback.print_exc() self._reset_camera() time.sleep(0.5) def _grab_screenshot(self): """安全获取截图""" with self.camera_lock: if self.camera: return self.camera.grab() return None def _inference(self, screenshot): """执行模型推理""" return self.model.predict( screenshot, conf=self.detection_conf_thres, iou=self.detection_iou_thres, classes=self.detection_classes, device=self.device, verbose=False ) def _process_detection_results(self, results): """处理检测结果""" target_info = [] min_distance = float('inf') closest_target_relative = None closest_target_absolute = None closest_offset = None for box in results[0].boxes: # 获取边界框坐标 x1, y1, x2, y2 = map(int, box.xyxy[0]) # 计算绝对坐标 x1_abs = x1 + self.region[0] y1_abs = y1 + self.region[1] x2_abs = x2 + self.region[0] y2_abs = y2 + self.region[1] # 计算边界框尺寸 width = x2_abs - x1_abs height = y2_abs - y1_abs # 应用偏移百分比计算目标点 target_x = x1_abs + int(width * (self.target_offset_x_percent / 100)) target_y = y1_abs + int(height * (self.target_offset_y_percent / 100)) # 计算偏移量 dx = target_x - self.screen_center[0] dy = target_y - self.screen_center[1] distance = (dx ** 2 + dy ** 2) ** 0.5 # 更新最近目标 if distance < min_distance: min_distance = distance # 计算相对坐标(用于可视化) closest_target_relative = ( x1 + int(width * (self.target_offset_x_percent / 100)), y1 + int(height * (self.target_offset_y_percent / 100)) ) closest_target_absolute = (target_x, target_y) closest_offset = (dx, dy) # 保存目标信息 class_id = int(box.cls) class_name = self.model.names[class_id] target_info.append(f"{class_name}:{x1_abs},{y1_abs},{x2_abs},{y2_abs}") return target_info, closest_target_relative, closest_offset def _update_target_info(self, target_info, closest_offset): """更新目标信息""" # 检查目标信息是否有变化 if target_info != self.previous_target_info: self.previous_target_info = target_info.copy() print(f"{len(target_info)}|{'|'.join(target_info)}") # 更新目标偏移量 with self.offset_lock: self.target_offset = closest_offset def _visualize_results(self, results, closest_target): """可视化处理结果""" frame = results[0].plot( line_width=self.visualization_line_width, font_size=self.visualization_font_scale, conf=self.visualization_show_conf ) # 绘制最近目标 if closest_target: # 绘制目标中心点 cv2.circle( frame, (int(closest_target[0]), int(closest_target[1])), 3, (0, 0, 255), -1 ) # 计算屏幕中心在截图区域内的相对坐标 screen_center_x = self.screen_center[0] - self.region[0] screen_center_y = self.screen_center[1] - self.region[1] # 绘制中心到目标的连线 cv2.line( frame, (int(screen_center_x), int(screen_center_y)), (int(closest_target[0]), int(closest_target[1])), (0, 255, 0), 1 ) return frame def _move_mouse_if_needed(self): """如果需要则移动鼠标""" with self.button_lock: if self.right_button_pressed and self.target_offset: # 使用right_button_pressed self.move_mouse_to_target() def _reset_camera(self): """重置相机""" print("正在重置相机...") try: self._init_camera() except Exception as e: print(f"相机重置失败: {str(e)}") traceback.print_exc() def stop(self): """安全停止检测器""" self.stop_event.set() self._safe_stop() if hasattr(self, 'mouse_listener') and self.mouse_listener.running: # 改为停止鼠标监听器 self.mouse_listener.stop() def _safe_stop(self): """同步释放资源""" print("正在安全停止相机...") try: with self.camera_lock: if self.camera: self.camera.stop() print("相机已停止") except Exception as e: print(f"停止相机时发生错误: {str(e)}") print("屏幕检测器已停止") class DetectionThread(QThread): update_signal = pyqtSignal(object) def __init__(self, detector, frame_queue): super().__init__() self.detector = detector self.frame_queue = frame_queue self.running = True def run(self): self.detector.run(self.frame_queue) def stop(self): self.running = False self.detector.stop() class MainWindow(QMainWindow): def __init__(self, detector): super().__init__() self.detector = detector self.setWindowTitle("EFAI 1.1") self.setGeometry(100, 100, 600, 400) # 添加缺失的属性初始化 self.visualization_enabled = True self.inference_active = False # 初始推理状态为停止 #窗口置顶 self.setWindowFlag(Qt.WindowType.WindowStaysOnTopHint) # 创建帧队列 self.frame_queue = queue.Queue(maxsize=3) # 初始化UI self.init_ui() # 启动检测线程 self.detection_thread = DetectionThread(self.detector, self.frame_queue) self.detection_thread.start() # 启动UI更新定时器 self.update_timer = QTimer() self.update_timer.timeout.connect(self.update_ui) self.update_timer.start(1) # 每1ms更新一次 def toggle_visualization(self): # 实际更新可视化状态属性 self.visualization_enabled = not self.visualization_enabled # 更新按钮文本 if self.visualization_enabled: self.toggle_visualization_btn.setText("禁用可视化") else: self.toggle_visualization_btn.setText("启用可视化") def toggle_inference(self): """切换推理状态""" self.inference_active = not self.inference_active if self.inference_active: self.toggle_inference_btn.setText("停止推理") self.toggle_inference_btn.setStyleSheet(""" QPushButton { background-color: #F44336; color: white; border: none; padding: 8px; border-radius: 4px; font-family: Segoe UI; font-size: 10pt; } """) self.detector.start_inference() else: self.toggle_inference_btn.setText("开始推理") self.toggle_inference_btn.setStyleSheet(""" QPushButton { background-color: #4CAF50; color: white; border: none; padding: 8px; border-radius: 4px; font-family: Segoe UI; font-size: 10pt; } """) self.detector.stop_inference() def init_ui(self): # 主布局 central_widget = QWidget() self.setCentralWidget(central_widget) main_layout = QVBoxLayout(central_widget) # 分割器(左侧图像/目标信息,右侧控制面板) splitter = QSplitter(Qt.Orientation.Horizontal) main_layout.addWidget(splitter) # 左侧区域(图像显示和目标信息) left_widget = QWidget() left_layout = QVBoxLayout(left_widget) # 图像显示区域 self.image_label = QLabel() self.image_label.setAlignment(Qt.AlignmentFlag.AlignCenter) self.image_label.setMinimumSize(320, 320) left_layout.addWidget(self.image_label) # 目标信息区域 self.target_info_text = QTextEdit() self.target_info_text.setReadOnly(True) self.target_info_text.setFixedHeight(150) self.target_info_text.setStyleSheet(""" QTextEdit { background-color: #2D2D30; color: #DCDCDC; font-family: Consolas; font-size: 10pt; border: 1px solid #3F3F46; border-radius: 4px; } """) left_layout.addWidget(self.target_info_text) # 右侧控制面板 right_widget = QWidget() right_layout = QVBoxLayout(right_widget) right_layout.setAlignment(Qt.AlignmentFlag.AlignTop) # 性能信息 perf_group = QGroupBox("性能信息") perf_layout = QVBoxLayout(perf_group) self.target_count_label = QLabel("目标数量: 0") self.inference_time_label = QLabel("推理时间: 0.000s") self.grab_time_label = QLabel("截图时间: 0.000s") for label in [self.target_count_label, self.inference_time_label, self.grab_time_label]: label.setStyleSheet("font-family: Consolas; font-size: 10pt;") perf_layout.addWidget(label) right_layout.addWidget(perf_group) # 系统信息 sys_group = QGroupBox("系统信息") sys_layout = QVBoxLayout(sys_group) # 获取模型名称(只显示文件名) model_name = os.path.basename(self.detector.model_path) # 获取显示器编号(如果配置中有则显示,否则显示默认值0) monitor_index = self.detector.cfg.get('screen_monitor', '0') self.model_label = QLabel(f"模型: {model_name}") self.device_label = QLabel(f"设备: {self.detector.device.upper()}") self.monitor_label = QLabel(f"显示器:{monitor_index}") self.screen_res_label = QLabel(f"屏幕分辨率: {self.detector.screen_width}x{self.detector.screen_height}") self.region_label = QLabel(f"检测区域: {self.detector.region}") for label in [self.model_label, self.device_label, self.monitor_label, self.screen_res_label, self.region_label]: label.setStyleSheet("font-family: Consolas; font-size: 9pt; color: #A0A0A0;") sys_layout.addWidget(label) right_layout.addWidget(sys_group) # 鼠标状态 mouse_group = QGroupBox("自瞄状态") mouse_layout = QVBoxLayout(mouse_group) self.mouse_status = QLabel("未瞄准") self.mouse_status.setStyleSheet(""" QLabel { font-family: Consolas; font-size: 10pt; color: #FF5252; } """) mouse_layout.addWidget(self.mouse_status) right_layout.addWidget(mouse_group) # 控制按钮 btn_group = QGroupBox("控制") btn_layout = QVBoxLayout(btn_group) # 添加推理切换按钮 self.toggle_inference_btn = QPushButton("开始推理") self.toggle_inference_btn.clicked.connect(self.toggle_inference) self.toggle_inference_btn.setStyleSheet(""" QPushButton { background-color: #4CAF50; color: white; border: none; padding: 8px; border-radius: 4px; font-family: Segoe UI; font-size: 10pt; } QPushButton:hover { background-color: #45A049; } QPushButton:pressed { background-color: #3D8B40; } """) btn_layout.addWidget(self.toggle_inference_btn) self.toggle_visualization_btn = QPushButton("禁用可视化") self.toggle_visualization_btn.clicked.connect(self.toggle_visualization) self.settings_btn = QPushButton("设置") self.settings_btn.clicked.connect(self.open_settings) for btn in [self.toggle_visualization_btn, self.settings_btn]: btn.setStyleSheet(""" QPushButton { background-color: #0078D7; color: white; border: none; padding: 8px; border-radius: 4px; font-family: Segoe UI; font-size: 10pt; } QPushButton:hover { background-color: #106EBE; } QPushButton:pressed { background-color: #005A9E; } """) btn_layout.addWidget(btn) right_layout.addWidget(btn_group) # 添加左右区域到分割器 splitter.addWidget(left_widget) splitter.addWidget(right_widget) splitter.setSizes([600, 200]) # 设置样式 self.setStyleSheet(""" QMainWindow { background-color: #252526; } QGroupBox { font-family: Segoe UI; font-size: 10pt; color: #CCCCCC; border: 1px solid #3F3F46; border-radius: 4px; margin-top: 1ex; } QGroupBox::title { subcontrol-origin: margin; left: 10px; padding: 0 5px; background-color: transparent; } """) def open_settings(self): settings_dialog = SettingsDialog(self.detector.cfg, self) settings_dialog.exec() def update_ui(self): try: # 获取最新数据 latest_data = None while not self.frame_queue.empty(): latest_data = self.frame_queue.get_nowait() if latest_data: # 解包数据 frame, targets_count, inference_time, grab_time, target_info = latest_data # 更新性能信息 self.target_count_label.setText(f"目标数量: {targets_count}") self.inference_time_label.setText(f"推理时间: {inference_time / 1000:.3f}s") self.grab_time_label.setText(f"截图时间: {grab_time / 1000:.3f}s") # 更新目标信息 self.display_target_info(target_info) # 更新图像显示 if self.visualization_enabled and frame is not None: # 转换图像为Qt格式 height, width, channel = frame.shape bytes_per_line = 3 * width q_img = QImage(frame.data, width, height, bytes_per_line, QImage.Format.Format_BGR888) pixmap = QPixmap.fromImage(q_img) # 等比例缩放 scaled_pixmap = pixmap.scaled( self.image_label.width(), self.image_label.height(), Qt.AspectRatioMode.KeepAspectRatio, Qt.TransformationMode.SmoothTransformation ) self.image_label.setPixmap(scaled_pixmap) else: # 显示黑色背景 pixmap = QPixmap(self.image_label.size()) pixmap.fill(QColor(0, 0, 0)) self.image_label.setPixmap(pixmap) # 更新鼠标状态 self.update_mouse_status() except Exception as e: print(f"更新UI时出错: {str(e)}") def display_target_info(self, target_info): """在文本框中显示目标信息""" if not target_info: self.target_info_text.setPlainText("无检测目标") return info_text = "目标类别与坐标:\n" for i, data in enumerate(target_info): try: parts = data.split(":", 1) if len(parts) == 2: class_name, coords_str = parts coords = list(map(int, coords_str.split(','))) if len(coords) == 4: display_text = f"{class_name}: [{coords[0]}, {coords[1]}, {coords[2]}, {coords[3]}]" else: display_text = f"坐标格式错误: {data}" else: display_text = f"数据格式错误: {data}" except: display_text = f"解析错误: {data}" info_text += f"{display_text}\n" self.target_info_text.setPlainText(info_text) def update_mouse_status(self): """更新鼠标右键状态显示""" with self.detector.button_lock: if self.detector.right_button_pressed: self.mouse_status.setText("瞄准中") self.mouse_status.setStyleSheet("color: #4CAF50; font-family: Consolas; font-size: 10pt;") else: self.mouse_status.setText("未瞄准") self.mouse_status.setStyleSheet("color: #FF5252; font-family: Consolas; font-size: 10pt;") def closeEvent(self, event): """安全关闭程序""" self.detection_thread.stop() self.detection_thread.wait() event.accept() class SettingsDialog(QDialog): def __init__(self, config, parent=None): super().__init__(parent) self.config = config # 保存原始配置的副本用于比较 self.original_config = config.copy() self.setWindowTitle("设置") self.setGeometry(100, 100, 600, 500) self.init_ui() def init_ui(self): layout = QVBoxLayout() self.setLayout(layout) # 标签页 tabs = QTabWidget() layout.addWidget(tabs) # 检测设置标签页 detection_tab = QWidget() detection_layout = QVBoxLayout(detection_tab) self.create_detection_settings(detection_layout) tabs.addTab(detection_tab, "检测") # 移动设置标签页 move_tab = QWidget() move_layout = QVBoxLayout(move_tab) self.create_move_settings(move_layout) tabs.addTab(move_tab, "FOV") # 目标点设置标签页 target_tab = QWidget() target_layout = QVBoxLayout(target_tab) self.create_target_settings(target_layout) tabs.addTab(target_tab, "目标点") # PID设置标签页 pid_tab = QWidget() pid_layout = QVBoxLayout(pid_tab) self.create_pid_settings(pid_layout) tabs.addTab(pid_tab, "PID") # 贝塞尔曲线设置标签页 bezier_tab = QWidget() bezier_layout = QVBoxLayout(bezier_tab) self.create_bezier_settings(bezier_layout) tabs.addTab(bezier_tab, "贝塞尔曲线") # 按钮区域 btn_layout = QHBoxLayout() layout.addLayout(btn_layout) save_btn = QPushButton("保存配置") save_btn.clicked.connect(self.save_config) cancel_btn = QPushButton("取消") cancel_btn.clicked.connect(self.reject) for btn in [save_btn, cancel_btn]: btn.setStyleSheet(""" QPushButton { background-color: #0078D7; color: white; border: none; padding: 8px 16px; border-radius: 4px; font-family: Segoe UI; font-size: 10pt; } QPushButton:hover { background-color: #106EBE; } QPushButton:pressed { background-color: #005A9E; } """) btn_layout.addWidget(btn) btn_layout.addStretch() def create_detection_settings(self, layout): # 模型选择 model_group = QGroupBox("模型设置") model_layout = QVBoxLayout(model_group) # 获取基础路径 if getattr(sys, 'frozen', False): base_path = sys._MEIPASS else: base_path = os.path.dirname(os.path.abspath(__file__)) # 获取模型文件列表 models_dir = os.path.join(base_path, 'models') model_files = [] if os.path.exists(models_dir): model_files = glob.glob(os.path.join(models_dir, '*.pt')) # 处理模型显示名称 model_display_names = [os.path.basename(f) for f in model_files] if model_files else ["未找到模型文件"] self.model_name_to_path = {os.path.basename(f): f for f in model_files} # 当前配置的模型处理 current_model_path = self.config['model_path'] current_model_name = os.path.basename(current_model_path) # 确保当前模型在列表中 if current_model_name not in model_display_names: model_display_names.append(current_model_name) self.model_name_to_path[current_model_name] = current_model_path # 模型选择下拉框 model_layout.addWidget(QLabel("选择模型:")) self.model_combo = QComboBox() self.model_combo.addItems(model_display_names) self.model_combo.setCurrentText(current_model_name) model_layout.addWidget(self.model_combo) # 设备选择 model_layout.addWidget(QLabel("运行设备:")) self.device_combo = QComboBox() self.device_combo.addItems(['auto', 'cuda', 'cpu']) self.device_combo.setCurrentText(self.config['model_device']) model_layout.addWidget(self.device_combo) layout.addWidget(model_group) # 检测参数 param_group = QGroupBox("检测参数") param_layout = QVBoxLayout(param_group) # 置信度阈值 param_layout.addWidget(QLabel("置信度阈值:")) conf_layout = QHBoxLayout() self.conf_slider = QSlider(Qt.Orientation.Horizontal) self.conf_slider.setRange(10, 100) # 0.1到1.0,步长0.01 self.conf_slider.setValue(int(float(self.config['detection_conf_thres']) * 100)) conf_layout.addWidget(self.conf_slider) self.conf_value = QLabel(f"{float(self.config['detection_conf_thres']):.2f}") self.conf_value.setFixedWidth(50) conf_layout.addWidget(self.conf_value) param_layout.addLayout(conf_layout) # 连接滑块值变化事件 self.conf_slider.valueChanged.connect(lambda value: self.conf_value.setText(f"{value / 100:.2f}")) # IOU阈值 - 改为滑动条 param_layout.addWidget(QLabel("IOU阈值:")) iou_layout = QHBoxLayout() self.iou_slider = QSlider(Qt.Orientation.Horizontal) self.iou_slider.setRange(10, 100) # 0.1到1.0,步长0.01 self.iou_slider.setValue(int(float(self.config['detection_iou_thres']) * 100)) iou_layout.addWidget(self.iou_slider) self.iou_value = QLabel(f"{float(self.config['detection_iou_thres']):.2f}") self.iou_value.setFixedWidth(50) iou_layout.addWidget(self.iou_value) param_layout.addLayout(iou_layout) # 连接滑块值变化事件 self.iou_slider.valueChanged.connect(lambda value: self.iou_value.setText(f"{value / 100:.2f}")) # 检测类别 param_layout.addWidget(QLabel("检测类别 (逗号分隔):")) self.classes_edit = QLineEdit() self.classes_edit.setText(self.config['detection_classes']) param_layout.addWidget(self.classes_edit) layout.addWidget(param_group) # 屏幕设置 screen_group = QGroupBox("屏幕设置") screen_layout = QVBoxLayout(screen_group) # 显示器编号 screen_layout.addWidget(QLabel("显示器编号:")) self.monitor_spin = QSpinBox() self.monitor_spin.setRange(0, 3) # 假设最多支持4个显示器 self.monitor_spin.setValue(int(self.config.get('screen_monitor', '0'))) screen_layout.addWidget(self.monitor_spin) # 屏幕区域大小 screen_layout.addWidget(QLabel("截屏尺寸:")) self.screen_size_spin = QSpinBox() self.screen_size_spin.setRange(100, 2000) self.screen_size_spin.setValue(int(self.config['screen_target_size'])) screen_layout.addWidget(self.screen_size_spin) layout.addWidget(screen_group) layout.addStretch() def create_move_settings(self, layout): group = QGroupBox("鼠标移动参数") group_layout = QVBoxLayout(group) # FOV设置 group_layout.addWidget(QLabel("横向FOV(度):")) self.fov_spin = QDoubleSpinBox() self.fov_spin.setRange(1, 179) self.fov_spin.setValue(float(self.config.get('move_fov_horizontal', '90'))) group_layout.addWidget(self.fov_spin) # 鼠标DPI group_layout.addWidget(QLabel("鼠标DPI:")) self.dpi_spin = QSpinBox() self.dpi_spin.setRange(100, 20000) self.dpi_spin.setValue(int(self.config.get('move_mouse_dpi', '400'))) group_layout.addWidget(self.dpi_spin) layout.addWidget(group) layout.addStretch() def create_target_settings(self, layout): group = QGroupBox("目标点偏移") group_layout = QVBoxLayout(group) # X轴偏移 - 添加百分比显示 group_layout.addWidget(QLabel("X轴偏移:")) x_layout = QHBoxLayout() self.x_offset_slider = QSlider(Qt.Orientation.Horizontal) self.x_offset_slider.setRange(0, 100) self.x_offset_slider.setValue(int(float(self.config.get('target_offset_x', '50')))) x_layout.addWidget(self.x_offset_slider) self.x_offset_value = QLabel(f"{int(float(self.config.get('target_offset_x', '50')))}%") self.x_offset_value.setFixedWidth(50) x_layout.addWidget(self.x_offset_value) group_layout.addLayout(x_layout) # 连接滑块值变化事件 self.x_offset_slider.valueChanged.connect(lambda value: self.x_offset_value.setText(f"{value}%")) # Y轴偏移 - 添加百分比显示 group_layout.addWidget(QLabel("Y轴偏移:")) y_layout = QHBoxLayout() self.y_offset_slider = QSlider(Qt.Orientation.Horizontal) self.y_offset_slider.setRange(0, 100) self.y_offset_slider.setValue(int(float(self.config.get('target_offset_y', '50')))) y_layout.addWidget(self.y_offset_slider) self.y_offset_value = QLabel(f"{int(float(self.config.get('target_offset_y', '50')))}%") self.y_offset_value.setFixedWidth(50) y_layout.addWidget(self.y_offset_value) group_layout.addLayout(y_layout) # 连接滑块值变化事件 self.y_offset_slider.valueChanged.connect(lambda value: self.y_offset_value.setText(f"{value}%")) # 说明 info_label = QLabel("(0% = 左上角, 50% = 中心, 100% = 右下角)") info_label.setStyleSheet("font-size: 9pt; color: #888888;") group_layout.addWidget(info_label) layout.addWidget(group) layout.addStretch() def create_pid_settings(self, layout): group = QGroupBox("PID参数") group_layout = QVBoxLayout(group) # Kp参数 group_layout.addWidget(QLabel("比例增益(Kp):")) kp_layout = QHBoxLayout() self.kp_slider = QSlider(Qt.Orientation.Horizontal) self.kp_slider.setRange(1, 1000) # 0.01到10.0,步长0.01 self.kp_slider.setValue(int(float(self.config.get('pid_kp', '1.0')) * 100)) kp_layout.addWidget(self.kp_slider) self.kp_value = QLabel(f"{float(self.config.get('pid_kp', '1.0')):.2f}") self.kp_value.setFixedWidth(50) kp_layout.addWidget(self.kp_value) group_layout.addLayout(kp_layout) # 连接滑块值变化事件 self.kp_slider.valueChanged.connect(lambda value: self.kp_value.setText(f"{value / 100:.2f}")) # Ki参数 group_layout.addWidget(QLabel("积分增益(Ki):")) ki_layout = QHBoxLayout() self.ki_slider = QSlider(Qt.Orientation.Horizontal) self.ki_slider.setRange(0, 500) # 0.0000到0.1000,步长0.001 self.ki_slider.setValue(int(float(self.config.get('pid_ki', '0.05')) * 10000)) ki_layout.addWidget(self.ki_slider) self.ki_value = QLabel(f"{float(self.config.get('pid_ki', '0.05')):.4f}") self.ki_value.setFixedWidth(50) ki_layout.addWidget(self.ki_value) group_layout.addLayout(ki_layout) # 连接滑块值变化事件 self.ki_slider.valueChanged.connect(lambda value: self.ki_value.setText(f"{value / 10000:.4f}")) # Kd参数 group_layout.addWidget(QLabel("微分增益(Kd):")) kd_layout = QHBoxLayout() self.kd_slider = QSlider(Qt.Orientation.Horizontal) self.kd_slider.setRange(0, 5000) # 0.000到5.000,步长0.001 self.kd_slider.setValue(int(float(self.config.get('pid_kd', '0.2')) * 1000)) kd_layout.addWidget(self.kd_slider) self.kd_value = QLabel(f"{float(self.config.get('pid_kd', '0.2')):.3f}") self.kd_value.setFixedWidth(50) kd_layout.addWidget(self.kd_value) group_layout.addLayout(kd_layout) # 连接滑块值变化事件 self.kd_slider.valueChanged.connect(lambda value: self.kd_value.setText(f"{value / 1000:.3f}")) # 说明 info_text = "建议调整顺序: Kp → Kd → Ki\n\n" \ "先调整Kp至响应迅速但不过冲\n" \ "再增加Kd抑制震荡\n" \ "最后微调Ki消除剩余误差" info_label = QLabel(info_text) info_label.setStyleSheet("font-size: 9pt; color: #888888;") group_layout.addWidget(info_label) layout.addWidget(group) layout.addStretch() # 创建贝塞尔曲线设置 def create_bezier_settings(self, layout): group = QGroupBox("贝塞尔曲线参数") group_layout = QVBoxLayout(group) # 步数设置 group_layout.addWidget(QLabel("步数:")) steps_layout = QHBoxLayout() self.steps_slider = QSlider(Qt.Orientation.Horizontal) self.steps_slider.setRange(1, 500) self.steps_slider.setValue(int(self.config.get('bezier_steps', 100))) steps_layout.addWidget(self.steps_slider) self.steps_value = QLabel(str(self.config.get('bezier_steps', 100))) self.steps_value.setFixedWidth(50) steps_layout.addWidget(self.steps_value) group_layout.addLayout(steps_layout) # 连接滑块值变化事件 self.steps_slider.valueChanged.connect(lambda value: self.steps_value.setText(str(value))) # 总移动时间设置 (秒) group_layout.addWidget(QLabel("总移动时间 (秒):")) duration_layout = QHBoxLayout() self.duration_slider = QSlider(Qt.Orientation.Horizontal) self.duration_slider.setRange(0, 100) # 0.01到1.0,步长0.01 self.duration_slider.setValue(int(float(self.config.get('bezier_duration', 0.1)) * 100)) duration_layout.addWidget(self.duration_slider) self.duration_value = QLabel(f"{float(self.config.get('bezier_duration', 0.1)):.2f}") self.duration_value.setFixedWidth(50) duration_layout.addWidget(self.duration_value) group_layout.addLayout(duration_layout) # 连接滑块值变化事件 self.duration_slider.valueChanged.connect(lambda value: self.duration_value.setText(f"{value / 100:.2f}")) # 控制点偏移幅度 group_layout.addWidget(QLabel("控制点偏移幅度 (0-1):")) curve_layout = QHBoxLayout() self.curve_slider = QSlider(Qt.Orientation.Horizontal) self.curve_slider.setRange(0, 100) # 0.00到1.00,步长0.01 self.curve_slider.setValue(int(float(self.config.get('bezier_curve', 0.3)) * 100)) curve_layout.addWidget(self.curve_slider) self.curve_value = QLabel(f"{float(self.config.get('bezier_curve', 0.3)):.2f}") self.curve_value.setFixedWidth(50) curve_layout.addWidget(self.curve_value) group_layout.addLayout(curve_layout) # 连接滑块值变化事件 self.curve_slider.valueChanged.connect(lambda value: self.curve_value.setText(f"{value / 100:.2f}")) # 说明 info_text = "贝塞尔曲线参数说明:\n\n" \ "• 步数: 鼠标移动的细分步数,值越大移动越平滑\n" \ "• 总移动时间: 鼠标移动的总时间,值越小移动越快\n" \ "• 控制点偏移幅度: 控制贝塞尔曲线的弯曲程度,0为直线,1为最大弯曲" info_label = QLabel(info_text) info_label.setStyleSheet("font-size: 9pt; color: #888888;") group_layout.addWidget(info_label) layout.addWidget(group) layout.addStretch() def save_config(self): try: # 保存配置到字典 model_name = self.model_combo.currentText() model_path = self.model_name_to_path.get(model_name, model_name) self.config['model_path'] = model_path self.config['model_device'] = self.device_combo.currentText() self.config['screen_monitor'] = str(self.monitor_spin.value()) self.config['screen_target_size'] = str(self.screen_size_spin.value()) # 检测参数 self.config['detection_conf_thres'] = str(self.conf_slider.value() / 100) self.config['detection_iou_thres'] = str(self.iou_slider.value() / 100) self.config['detection_classes'] = self.classes_edit.text() # 移动设置 self.config['move_fov_horizontal'] = str(self.fov_spin.value()) self.config['move_mouse_dpi'] = str(self.dpi_spin.value()) # 目标点偏移设置 self.config['target_offset_x'] = str(self.x_offset_slider.value()) self.config['target_offset_y'] = str(self.y_offset_slider.value()) # PID设置 self.config['pid_kp'] = str(self.kp_slider.value() / 100) self.config['pid_ki'] = str(self.ki_slider.value() / 10000) self.config['pid_kd'] = str(self.kd_slider.value() / 1000) # 贝塞尔曲线设置 self.config['bezier_steps'] = str(self.steps_slider.value()) self.config['bezier_duration'] = str(self.duration_slider.value() / 100) self.config['bezier_curve'] = str(self.curve_slider.value() / 100) # 保存为TXT格式 with open('detection_config.txt', 'w', encoding='utf-8') as f: for key, value in self.config.items(): f.write(f"{key} = {value}\n") # 检查需要重启的参数是否被修改 restart_required = False restart_params = [] # 比较模型路径是否变化 if self.config['model_path'] != self.original_config.get('model_path', ''): restart_required = True restart_params.append("模型路径") # 比较设备类型是否变化 if self.config['model_device'] != self.original_config.get('model_device', ''): restart_required = True restart_params.append("设备类型") # 比较屏幕区域大小是否变化 if self.config['screen_target_size'] != self.original_config.get('screen_target_size', ''): restart_required = True restart_params.append("屏幕区域大小") # 比较检测类别是否变化 if self.config['detection_classes'] != self.original_config.get('detection_classes', ''): restart_required = True restart_params.append("检测类别") # 动态更新检测器配置 if self.parent() and hasattr(self.parent(), 'detector'): success = self.parent().detector.update_config('detection_config.txt') if success: if restart_required: # 需要重启的参数已修改 param_list = "、".join(restart_params) QMessageBox.information( self, "配置已保存", f"配置已保存!以下参数需要重启才能生效:\n{param_list}\n\n" "其他参数已实时更新。" ) else: # 所有参数都已实时更新 QMessageBox.information(self, "成功", "配置已实时更新生效!") else: QMessageBox.warning(self, "部分更新", "配置更新失败,请查看日志") else: QMessageBox.information(self, "成功", "配置已保存!部分参数需重启生效") self.accept() except Exception as e: QMessageBox.critical(self, "错误", f"保存配置失败: {str(e)}") if __name__ == "__main__": detector = ScreenDetector('detection_config.txt') print(f"\nDXcam检测器初始化完成 | 设备: {detector.device.upper()}") app = QApplication(sys.argv) # 设置全局样式 app.setStyle("Fusion") app.setStyleSheet(""" QWidget { background-color: #252526; color: #D4D4D4; selection-background-color: #0078D7; selection-color: white; } QPushButton { background-color: #0078D7; color: white; border: none; padding: 5px 10px; border-radius: 4px; } QPushButton:hover { background-color: #106EBE; } QPushButton:pressed { background-color: #005A9E; } QComboBox, QLineEdit, QSpinBox, QDoubleSpinBox, QSlider { background-color: #3C3C40; color: #D4D4D4; border: 1px solid #3F3F46; border-radius: 4px; padding: 3px; } QComboBox:editable { background-color: #3C3C40; } QComboBox QAbstractItemView { background-color: #2D2D30; color: #D4D4D4; selection-background-color: #0078D7; selection-color: white; } QLabel { color: #D4D4D4; } QTabWidget::pane { border: 1px solid #3F3F46; background: #252526; } QTabBar::tab { background: #1E1E1E; color: #A0A0A0; padding: 8px 12px; border-top-left-radius: 4px; border-top-right-radius: 4px; } QTabBar::tab:selected { background: #252526; color: #FFFFFF; border-bottom: 2px solid #0078D7; } QTabBar::tab:hover { background: #2D2D30; } QGroupBox { background-color: #252526; border: 1px solid #3F3F46; border-radius: 4px; margin-top: 1ex; } QGroupBox::title { subcontrol-origin: margin; left: 10px; padding: 0 5px; background-color: transparent; color: #CCCCCC; } """) window = MainWindow(detector) window.show() sys.exit(app.exec()) 重构我的代码,减少不必要的代码,确保功能一样,UI一样。

filetype

import sys import os import cv2 import numpy as np import torch from PyQt5.QtWidgets import QListWidget, QProgressDialog from facenet_pytorch import MTCNN, InceptionResnetV1 from PIL import Image from PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget, QVBoxLayout, QHBoxLayout, QPushButton, QLabel, QFileDialog, QComboBox, QSlider, QMessageBox, QTextEdit, QGroupBox, QScrollArea, QDialog, QDialogButtonBox, QTableWidget, QTableWidgetItem, QHeaderView, QGridLayout) from PyQt5.QtCore import Qt, QTimer from PyQt5.QtGui import QImage, QPixmap, QIcon, QFont, QColor import joblib import logging import json from datetime import datetime # 在 dorm_face_recognition_gui.py 顶部添加导入 from face_recognition import FaceRecognition # 配置日志 logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') logger = logging.getLogger(__name__) class FeedbackDialog(QDialog): """反馈对话框""" def __init__(self, parent=None, last_results=None, dorm_members=None): super().__init__(parent) self.setWindowTitle("识别错误反馈") self.setFixedSize(500, 400) self.last_results = last_results or [] self.dorm_members = dorm_members or [] self.init_ui() def init_ui(self): layout = QVBoxLayout(self) # 添加当前识别结果 result_label = QLabel("当前识别结果:") layout.addWidget(result_label) # 使用表格显示结果 self.results_table = QTableWidget() self.results_table.setColumnCount(4) self.results_table.setHorizontalHeaderLabels(["ID", "识别结果", "置信度", "位置"]) self.results_table.setSelectionBehavior(QTableWidget.SelectRows) self.results_table.setEditTriggers(QTableWidget.NoEditTriggers) # 填充表格数据 self.results_table.setRowCount(len(self.last_results)) for i, result in enumerate(self.last_results): self.results_table.setItem(i, 0, QTableWidgetItem(str(i + 1))) self.results_table.setItem(i, 1, QTableWidgetItem(result["label"])) self.results_table.setItem(i, 2, QTableWidgetItem(f"{result['confidence']:.2f}")) x, y = result.get("position", (0, 0)) self.results_table.setItem(i, 3, QTableWidgetItem(f"({x}, {y})")) # 设置表格样式 self.results_table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch) self.results_table.verticalHeader().setVisible(False) layout.addWidget(self.results_table) # 添加正确身份选择 correct_layout = QGridLayout() correct_label = QLabel("正确身份:") correct_layout.addWidget(correct_label, 0, 0) self.correct_combo = QComboBox() self.correct_combo.addItem("选择正确身份", None) for member in self.dorm_members: self.correct_combo.addItem(member, member) self.correct_combo.addItem("陌生人", "stranger") self.correct_combo.addItem("不在列表中", "unknown") correct_layout.addWidget(self.correct_combo, 0, 1) # 添加备注 note_label = QLabel("备注:") correct_layout.addWidget(note_label, 1, 0) self.note_text = QTextEdit() self.note_text.setPlaceholderText("可添加额外说明...") self.note_text.setMaximumHeight(60) correct_layout.addWidget(self.note_text, 1, 1) layout.addLayout(correct_layout) # 添加按钮 button_box = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel) button_box.accepted.connect(self.accept) button_box.rejected.connect(self.reject) layout.addWidget(button_box) def get_selected_result(self): """获取选择的识别结果""" selected_row = self.results_table.currentRow() if selected_row >= 0 and selected_row < len(self.last_results): return self.last_results[selected_row] return None def get_feedback_data(self): """获取反馈数据""" selected_result = self.get_selected_result() if not selected_result: return None return { "timestamp": datetime.now().isoformat(), "original_label": selected_result["label"], "correct_label": self.correct_combo.currentData(), "confidence": selected_result["confidence"], "position": selected_result.get("position", (0, 0)), "note": self.note_text.toPlainText().strip() } class FaceRecognitionSystem(QMainWindow): def __init__(self): super().__init__() self.setWindowTitle("寝室人脸识别系统") self.setGeometry(100, 100, 1200, 800) # 初始化变量 self.model_loaded = False self.camera_active = False self.video_capture = None self.timer = QTimer() self.current_image = None self.last_results = [] # 存储上次识别结果 self.dorm_members = [] # 寝室成员列表 # 创建主界面 self.main_widget = QWidget() self.setCentralWidget(self.main_widget) self.layout = QHBoxLayout(self.main_widget) # 左侧控制面板 - 占40%宽度 self.control_panel = QWidget() self.control_layout = QVBoxLayout(self.control_panel) self.control_layout.setAlignment(Qt.AlignTop) self.control_panel.setMaximumWidth(400) self.layout.addWidget(self.control_panel, 40) # 40%宽度 # 右侧图像显示区域 - 占60%宽度 self.image_panel = QWidget() self.image_layout = QVBoxLayout(self.image_panel) self.image_label = QLabel() self.image_label.setAlignment(Qt.AlignCenter) self.image_label.setMinimumSize(800, 600) self.image_label.setStyleSheet("background-color: #333; border: 1px solid #555;") self.image_layout.addWidget(self.image_label) self.layout.addWidget(self.image_panel, 60) # 60%宽度 # 状态栏 self.status_bar = self.statusBar() self.status_bar.showMessage("系统初始化中...") # 初始化人脸识别器 - 关键修复 self.face_recognition = FaceRecognition() # 初始化UI组件 self.init_ui() # 添加工具栏(必须在UI初始化后) self.toolbar = self.addToolBar('工具栏') # 添加反馈按钮 self.add_feedback_button() # 初始化模型 self.init_models() def init_ui(self): """初始化用户界面组件""" # 标题 title_label = QLabel("寝室人脸识别系统") title_label.setFont(QFont("Arial", 18, QFont.Bold)) title_label.setAlignment(Qt.AlignCenter) title_label.setStyleSheet("color: #2c3e50; padding: 10px;") self.control_layout.addWidget(title_label) # 模型加载 model_group = QGroupBox("模型设置") model_layout = QVBoxLayout(model_group) self.load_model_btn = QPushButton("加载模型") self.load_model_btn.setIcon(QIcon.fromTheme("document-open")) self.load_model_btn.setStyleSheet("background-color: #3498db;") self.load_model_btn.clicked.connect(self.load_model) model_layout.addWidget(self.load_model_btn) self.model_status = QLabel("模型状态: 未加载") model_layout.addWidget(self.model_status) self.control_layout.addWidget(model_group) # 在模型设置部分添加重新训练按钮 self.retrain_btn = QPushButton("重新训练模型") self.retrain_btn.setIcon(QIcon.fromTheme("view-refresh")) self.retrain_btn.setStyleSheet("background-color: #f39c12;") self.retrain_btn.clicked.connect(self.retrain_model) self.retrain_btn.setEnabled(False) # 初始不可用 model_layout.addWidget(self.retrain_btn) # 识别设置 settings_group = QGroupBox("识别设置") settings_layout = QVBoxLayout(settings_group) # 置信度阈值 threshold_layout = QHBoxLayout() threshold_label = QLabel("置信度阈值:") threshold_layout.addWidget(threshold_label) self.threshold_slider = QSlider(Qt.Horizontal) self.threshold_slider.setRange(0, 100) self.threshold_slider.setValue(70) self.threshold_slider.valueChanged.connect(self.update_threshold) threshold_layout.addWidget(self.threshold_slider) self.threshold_value = QLabel("0.70") threshold_layout.addWidget(self.threshold_value) settings_layout.addLayout(threshold_layout) # 显示选项 display_layout = QHBoxLayout() display_label = QLabel("显示模式:") display_layout.addWidget(display_label) self.display_combo = QComboBox() self.display_combo.addItems(["原始图像", "检测框", "识别结果"]) self.display_combo.setCurrentIndex(2) display_layout.addWidget(self.display_combo) settings_layout.addLayout(display_layout) self.control_layout.addWidget(settings_group) # 识别功能 recognition_group = QGroupBox("识别功能") recognition_layout = QVBoxLayout(recognition_group) # 图片识别 self.image_recognition_btn = QPushButton("图片识别") self.image_recognition_btn.setIcon(QIcon.fromTheme("image-x-generic")) self.image_recognition_btn.setStyleSheet("background-color: #9b59b6;") self.image_recognition_btn.clicked.connect(self.open_image) self.image_recognition_btn.setEnabled(False) recognition_layout.addWidget(self.image_recognition_btn) # 摄像头识别 self.camera_recognition_btn = QPushButton("启动摄像头识别") self.camera_recognition_btn.setIcon(QIcon.fromTheme("camera-web")) self.camera_recognition_btn.setStyleSheet("background-color: #e74c3c;") self.camera_recognition_btn.clicked.connect(self.toggle_camera) self.camera_recognition_btn.setEnabled(False) recognition_layout.addWidget(self.camera_recognition_btn) self.control_layout.addWidget(recognition_group) # 结果展示区域 - 使用QTextEdit替代QLabel results_group = QGroupBox("识别结果") results_layout = QVBoxLayout(results_group) self.results_text = QTextEdit() self.results_text.setReadOnly(True) self.results_text.setFont(QFont("Microsoft YaHei", 12)) # 使用支持中文的字体 self.results_text.setStyleSheet("background-color: #f8f9fa; border: 1px solid #ddd; padding: 10px;") self.results_text.setPlaceholderText("识别结果将显示在这里") # 添加滚动区域 scroll_area = QScrollArea() scroll_area.setWidgetResizable(True) scroll_area.setWidget(self.results_text) results_layout.addWidget(scroll_area) self.control_layout.addWidget(results_group, 1) # 占据剩余空间 # 系统信息 info_group = QGroupBox("系统信息") info_layout = QVBoxLayout(info_group) self.device_label = QLabel(f"计算设备: {'GPU' if torch.cuda.is_available() else 'CPU'}") info_layout.addWidget(self.device_label) self.model_info = QLabel("加载模型以显示信息") info_layout.addWidget(self.model_info) self.control_layout.addWidget(info_group) # 退出按钮 exit_btn = QPushButton("退出系统") exit_btn.setIcon(QIcon.fromTheme("application-exit")) exit_btn.clicked.connect(self.close) exit_btn.setStyleSheet("background-color: #ff6b6b; color: white;") self.control_layout.addWidget(exit_btn) def add_feedback_button(self): """添加反馈按钮到界面""" # 创建反馈按钮 self.feedback_button = QPushButton("提供反馈", self) self.feedback_button.setFixedSize(120, 40) # 设置固定大小 self.feedback_button.setStyleSheet( "QPushButton {" " background-color: #4CAF50;" " color: white;" " border-radius: 5px;" " font-weight: bold;" "}" "QPushButton:hover {" " background-color: #45a049;" "}" ) # 连接按钮点击事件 self.feedback_button.clicked.connect(self.open_feedback_dialog) # 添加到工具栏 self.toolbar.addWidget(self.feedback_button) def open_feedback_dialog(self): """打开反馈对话框""" if not self.last_results: QMessageBox.warning(self, "无法反馈", "没有可反馈的识别结果") return dialog = FeedbackDialog( self, last_results=self.last_results, dorm_members=self.dorm_members ) if dialog.exec_() == QDialog.Accepted: feedback_data = dialog.get_feedback_data() if feedback_data: self.save_feedback(feedback_data) QMessageBox.information(self, "反馈提交", "感谢您的反馈!数据已保存用于改进模型") def init_models(self): """初始化模型组件""" # 设置设备 self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.device_label.setText(f"计算设备: {'GPU' if torch.cuda.is_available() else 'CPU'}") # 初始化人脸检测器 try: self.detector = MTCNN( keep_all=True, post_process=False, device=self.device ) self.status_bar.showMessage("MTCNN 检测器初始化完成") logger.info("MTCNN 检测器初始化完成") except Exception as e: self.status_bar.showMessage(f"MTCNN 初始化失败: {str(e)}") logger.error(f"MTCNN 初始化失败: {str(e)}") return # 初始化人脸特征提取器 try: self.embedder = InceptionResnetV1( pretrained='vggface2', classify=False, device=self.device ).eval() self.status_bar.showMessage("FaceNet 特征提取器初始化完成") logger.info("FaceNet 特征提取器初始化完成") except Exception as e: self.status_bar.showMessage(f"FaceNet 初始化失败: {str(e)}") logger.error(f"FaceNet 初始化失败: {str(e)}") def load_model(self): """加载预训练的SVM分类器""" options = QFileDialog.Options() file_path, _ = QFileDialog.getOpenFileName( self, "选择模型文件", "", "模型文件 (*.pkl);;所有文件 (*)", options=options ) if file_path: try: # 加载模型 model_data = joblib.load(file_path) self.classifier = model_data['classifier'] self.label_encoder = model_data['label_encoder'] self.dorm_members = model_data['dorm_members'] # 启用重新训练按钮 self.retrain_btn.setEnabled(True) # 更新UI状态 self.model_loaded = True self.model_status.setText("模型状态: 已加载") self.model_info.setText(f"寝室成员: {', '.join(self.dorm_members)}") self.image_recognition_btn.setEnabled(True) self.camera_recognition_btn.setEnabled(True) # 状态栏消息 self.status_bar.showMessage(f"模型加载成功: {os.path.basename(file_path)}") # 显示成功消息 QMessageBox.information( self, "模型加载", f"模型加载成功!\n识别成员: {len(self.dorm_members)}人\n置信度阈值: {self.threshold_slider.value() / 100:.2f}" ) except Exception as e: QMessageBox.critical(self, "加载错误", f"模型加载失败: {str(e)}") self.status_bar.showMessage(f"模型加载失败: {str(e)}") def update_threshold(self, value): """更新置信度阈值""" threshold = value / 100 self.threshold_value.setText(f"{threshold:.2f}") self.status_bar.showMessage(f"置信度阈值更新为: {threshold:.2f}") def open_image(self): """打开图片文件进行识别""" if not self.model_loaded: QMessageBox.warning(self, "警告", "请先加载模型!") return options = QFileDialog.Options() file_path, _ = QFileDialog.getOpenFileName( self, "选择识别图片", "", "图片文件 (*.jpg *.jpeg *.png);;所有文件 (*)", options=options ) if file_path: # 读取图片 image = cv2.imread(file_path) if image is None: QMessageBox.critical(self, "错误", "无法读取图片文件!") return # 保存当前图片 self.current_image = image.copy() # 进行识别 self.recognize_faces(image) def toggle_camera(self): """切换摄像头状态""" if not self.model_loaded: QMessageBox.warning(self, "警告", "请先加载模型!") return if not self.camera_active: # 尝试打开摄像头 self.video_capture = cv2.VideoCapture(0) if not self.video_capture.isOpened(): QMessageBox.critical(self, "错误", "无法打开摄像头!") return # 启动摄像头 self.camera_active = True self.camera_recognition_btn.setText("停止摄像头识别") self.camera_recognition_btn.setIcon(QIcon.fromTheme("media-playback-stop")) self.timer.timeout.connect(self.process_camera_frame) self.timer.start(30) # 约33 FPS self.status_bar.showMessage("摄像头已启动") else: # 停止摄像头 self.camera_active = False self.camera_recognition_btn.setText("启动摄像头识别") self.camera_recognition_btn.setIcon(QIcon.fromTheme("camera-web")) self.timer.stop() if self.video_capture: self.video_capture.release() self.status_bar.showMessage("摄像头已停止") def process_camera_frame(self): """处理摄像头帧""" ret, frame = self.video_capture.read() if ret: # 保存当前帧 self.current_image = frame.copy() # 进行识别 self.recognize_faces(frame) def retrain_model(self): """使用反馈数据重新训练模型""" # 获取所有反馈数据 feedback_dir = os.path.join(os.getcwd(), "data", "feedback_data") feedback_files = [f for f in os.listdir(feedback_dir) if f.endswith('.json') and os.path.isfile(os.path.join(feedback_dir, f))] if not feedback_files: QMessageBox.information(self, "无反馈数据", "没有找到反馈数据,无法重新训练") return # 确认对话框 reply = QMessageBox.question( self, '确认重新训练', f"将使用 {len(feedback_files)} 条反馈数据重新训练模型。此操作可能需要几分钟时间,确定继续吗?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No ) if reply != QMessageBox.Yes: return try: # 创建进度对话框 progress = QProgressDialog("正在重新训练模型...", "取消", 0, len(feedback_files), self) progress.setWindowTitle("模型重新训练") progress.setWindowModality(Qt.WindowModal) progress.setMinimumDuration(0) progress.setValue(0) # 收集所有反馈数据 feedback_data = [] for i, filename in enumerate(feedback_files): filepath = os.path.join(feedback_dir, filename) with open(filepath, 'r', encoding='utf-8') as f: data = json.load(f) feedback_data.append(data) progress.setValue(i + 1) QApplication.processEvents() # 保持UI响应 if progress.wasCanceled(): return progress.setValue(len(feedback_files)) # 重新训练模型 self.status_bar.showMessage("正在重新训练模型...") self.face_recognition.retrain_with_feedback(feedback_data) # 更新UI状态 self.model_status.setText("模型状态: 已重新训练") QMessageBox.information(self, "训练完成", "模型已成功使用反馈数据重新训练!") except Exception as e: logger.error(f"重新训练失败: {str(e)}") QMessageBox.critical(self, "训练错误", f"重新训练模型时出错: {str(e)}") def recognize_faces(self, image): """识别人脸并在图像上标注结果""" # 清空上次结果 self.last_results = [] # 转换为 PIL 图像 pil_image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) # 检测人脸 boxes, probs, _ = self.detector.detect(pil_image, landmarks=True) # 获取显示选项 display_mode = self.display_combo.currentIndex() # 准备显示图像 display_image = image.copy() # 如果没有检测到人脸 if boxes is None: if display_mode == 2: # 识别结果模式 cv2.putText(display_image, "未检测到人脸", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) self.results_text.setText("未检测到人脸") else: # 提取每个人脸 faces = [] for box in boxes: x1, y1, x2, y2 = box face = pil_image.crop((x1, y1, x2, y2)) faces.append(face) # 提取特征 embeddings = [] if faces and self.model_loaded: # 批量处理所有人脸 face_tensors = [self.preprocess_face(face) for face in faces] if face_tensors: face_tensors = torch.stack(face_tensors).to(self.device) with torch.no_grad(): embeddings = self.embedder(face_tensors).cpu().numpy() # 处理每个人脸 for i, (box, prob) in enumerate(zip(boxes, probs)): x1, y1, x2, y2 = box w, h = x2 - x1, y2 - y1 # 在图像上绘制结果 if display_mode == 0: # 原始图像 # 不绘制任何内容 pass elif display_mode == 1: # 检测框 # 绘制人脸框 cv2.rectangle(display_image, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2) elif display_mode == 2: # 识别结果 # 绘制人脸框 color = (0, 255, 0) # 绿色 # 如果有嵌入向量,则进行识别 if i < len(embeddings): # 预测 probabilities = self.classifier.predict_proba([embeddings[i]])[0] max_prob = np.max(probabilities) pred_class = self.classifier.predict([embeddings[i]])[0] pred_label = self.label_encoder.inverse_transform([pred_class])[0] # 获取置信度阈值 threshold = self.threshold_slider.value() / 100 # 判断是否为陌生人 if max_prob < threshold or pred_label == 'stranger': label = "陌生人" color = (0, 0, 255) # 红色 else: label = pred_label color = (0, 255, 0) # 绿色 # 保存结果用于文本显示 result = { "position": (int(x1), int(y1)), "label": label, "confidence": max_prob } self.last_results.append(result) # 绘制标签 cv2.rectangle(display_image, (int(x1), int(y1)), (int(x2), int(y2)), color, 2) cv2.putText(display_image, f"{label} ({max_prob:.2f})", (int(x1), int(y1) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2) else: # 无法识别的处理 cv2.rectangle(display_image, (int(x1), int(y1)), (int(x2), int(y2)), (0, 165, 255), 2) cv2.putText(display_image, "处理中...", (int(x1), int(y1) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 165, 255), 2) # 更新结果文本 self.update_results_text() # 在图像上显示FPS(摄像头模式下) if self.camera_active: fps = self.timer.interval() if fps > 0: cv2.putText(display_image, f"FPS: {1000 / fps:.1f}", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2) # 显示图像 self.display_image(display_image) def update_results_text(self): """更新结果文本区域""" if not self.last_results: self.results_text.setText("未识别到任何人脸") return # 构建结果文本 result_text = "

识别结果:

" for i, result in enumerate(self.last_results, 1): x, y = result["position"] label = result["label"] confidence = result["confidence"] # 处理中文显示问题 if label in self.dorm_members: result_text += ( f"

人脸 #{i}: " f"寝室成员 - {label}
" f"位置: ({x}, {y}), 置信度: {confidence:.2f}

" ) else: result_text += ( f"

人脸 #{i}: " f"陌生人
" f"位置: ({x}, {y}), 置信度: {confidence:.2f}

" ) self.results_text.setHtml(result_text) def preprocess_face(self, face_img): """预处理人脸图像""" # 调整大小 face_img = face_img.resize((160, 160)) # 转换为张量并归一化 face_img = np.array(face_img).astype(np.float32) / 255.0 face_img = (face_img - 0.5) / 0.5 # 归一化到[-1, 1] face_img = torch.tensor(face_img).permute(2, 0, 1) # HWC to CHW return face_img def display_image(self, image): """在QLabel中显示图像""" # 将OpenCV图像转换为Qt格式 height, width, channel = image.shape bytes_per_line = 3 * width q_img = QImage(image.data, width, height, bytes_per_line, QImage.Format_RGB888).rgbSwapped() # 缩放图像以适应标签 pixmap = QPixmap.fromImage(q_img) self.image_label.setPixmap(pixmap.scaled( self.image_label.width(), self.image_label.height(), Qt.KeepAspectRatio, Qt.SmoothTransformation )) def closeEvent(self, event): """关闭事件处理""" if self.camera_active: self.timer.stop() if self.video_capture: self.video_capture.release() # 确认退出 reply = QMessageBox.question( self, '确认退出', "确定要退出系统吗?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No ) if reply == QMessageBox.Yes: event.accept() else: event.ignore() if __name__ == "__main__": app = QApplication(sys.argv) # 设置全局异常处理 def handle_exception(exc_type, exc_value, exc_traceback): """全局异常处理""" import traceback error_msg = "".join(traceback.format_exception(exc_type, exc_value, exc_traceback)) print(f"未捕获的异常:\n{error_msg}") # 记录到文件 with open("error.log", "a") as f: f.write(f"\n\n{datetime.now()}:\n{error_msg}") # 显示给用户 QMessageBox.critical(None, "系统错误", f"发生未处理的异常:\n{str(exc_value)}") sys.exit(1) sys.excepthook = handle_exception window = FaceRecognitionSystem() window.show() sys.exit(app.exec_()) Traceback (most recent call last): File "E:\pycharm\study\dorm_face_recognition\dorm_face_recognition_gui.py", line 337, in open_feedback_dialog self.save_feedback(feedback_data) AttributeError: 'FaceRecognitionSystem' object has no attribute 'save_feedback'
filetype

import sys import os import cv2 import numpy as np import torch from PyQt5.QtWidgets import QListWidget from facenet_pytorch import MTCNN, InceptionResnetV1 from PIL import Image from PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget, QVBoxLayout, QHBoxLayout, QPushButton, QLabel, QFileDialog, QComboBox, QSlider, QMessageBox, QTextEdit, QGroupBox, QScrollArea, QDialog) from PyQt5.QtCore import Qt, QTimer from PyQt5.QtGui import QImage, QPixmap, QIcon, QFont import joblib import logging from face_recognition import FaceRecognition # 配置日志 logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') logger = logging.getLogger(__name__) class FaceRecognitionSystem(QMainWindow): def __init__(self): super().__init__() self.setWindowTitle("寝室人脸识别系统") self.setGeometry(100, 100, 1200, 800) # 初始化人脸识别器 self.face_recognition = FaceRecognition() # 添加反馈按钮 self.add_feedback_button() def add_feedback_button(self): """添加反馈按钮到界面""" # 创建反馈按钮 self.feedback_button = QPushButton("提供反馈", self) self.feedback_button.setFixedSize(120, 40) # 设置固定大小 self.feedback_button.setStyleSheet( "QPushButton {" " background-color: #4CAF50;" " color: white;" " border-radius: 5px;" " font-weight: bold;" "}" "QPushButton:hover {" " background-color: #45a049;" "}" ) # 连接按钮点击事件 self.feedback_button.clicked.connect(self.open_feedback_dialog) # 或者添加到工具栏 self.toolbar.addWidget(self.feedback_button) def handle_feedback(self): """处理用户反馈""" if not hasattr(self, 'last_results') or not self.last_results: QMessageBox.warning(self, "警告", "没有可反馈的识别结果") return # 创建反馈对话框 dialog = QDialog(self) dialog.setWindowTitle("识别错误反馈") dialog.setFixedSize(400, 300) layout = QVBoxLayout(dialog) # 添加当前识别结果 result_label = QLabel("当前识别结果:") layout.addWidget(result_label) self.feedback_list = QListWidget() for i, result in enumerate(self.last_results, 1): label = result["label"] confidence = result["confidence"] self.feedback_list.addItem(f"人脸 #{i}: {label} (置信度: {confidence:.2f})") layout.addWidget(self.feedback_list) # 添加正确身份选择 correct_label = QLabel("正确身份:") layout.addWidget(correct_label) self.correct_combo = QComboBox() self.correct_combo.addItems( ["选择正确身份"] + self.face_recognition.dorm_members + ["陌生人", "不在列表中"]) layout.addWidget(self.correct_combo) # 添加按钮 btn_layout = QHBoxLayout() submit_btn = QPushButton("提交反馈") submit_btn.clicked.connect(lambda: self.submit_feedback(dialog)) btn_layout.addWidget(submit_btn) cancel_btn = QPushButton("取消") cancel_btn.clicked.connect(dialog.reject) btn_layout.addWidget(cancel_btn) layout.addLayout(btn_layout) dialog.exec_() def submit_feedback(self, dialog): """提交反馈并更新模型""" selected_index = self.feedback_list.currentRow() if selected_index < 0: QMessageBox.warning(self, "警告", "请选择一个识别结果") return result = self.last_results[selected_index] correct_identity = self.correct_combo.currentText() if correct_identity == "选择正确身份": QMessageBox.warning(self, "警告", "请选择正确身份") return # 保存反馈数据 self.face_recognition.save_feedback( self.current_image.copy(), result["box"], result["label"], correct_identity ) QMessageBox.information(self, "反馈提交", "感谢您的反馈!数据已保存用于改进模型") dialog.accept() def recognize_faces(self, image): """识别人脸并在图像上标注结果""" # 使用人脸识别器进行识别 self.last_results, display_image = self.face_recognition.recognize( image, threshold=self.threshold_slider.value() / 100 ) # 更新结果文本 self.update_results_text() # 显示图像 self.display_image(display_image) def update_results_text(self): """更新结果文本区域""" if not self.last_results: self.results_text.setText("未识别到任何人脸") return # 构建结果文本 result_text = "<h3>识别结果:</h3>" for i, result in enumerate(self.last_results, 1): x1, y1, x2, y2 = result["box"] label = result["label"] confidence = result["confidence"] # 处理中文显示问题 if label in self.face_recognition.dorm_members: result_text += ( f"

人脸 #{i}: " f"寝室成员 - {label}
" f"位置: ({x1}, {y1}), 置信度: {confidence:.2f}

" ) else: result_text += ( f"

人脸 #{i}: " f"陌生人
" f"位置: ({x1}, {y1}), 置信度: {confidence:.2f}

" ) self.results_text.setHtml(result_text) # 初始化变量 self.model_loaded = False self.camera_active = False self.video_capture = None self.timer = QTimer() self.current_image = None self.last_results = [] # 存储上次识别结果 # 创建主界面 self.main_widget = QWidget() self.setCentralWidget(self.main_widget) self.layout = QHBoxLayout(self.main_widget) # 左侧控制面板 - 占40%宽度 self.control_panel = QWidget() self.control_layout = QVBoxLayout(self.control_panel) self.control_layout.setAlignment(Qt.AlignTop) self.control_panel.setMaximumWidth(400) self.layout.addWidget(self.control_panel, 40) # 40%宽度 # 右侧图像显示区域 - 占60%宽度 self.image_panel = QWidget() self.image_layout = QVBoxLayout(self.image_panel) self.image_label = QLabel() self.image_label.setAlignment(Qt.AlignCenter) self.image_label.setMinimumSize(800, 600) self.image_label.setStyleSheet("background-color: #333; border: 1px solid #555;") self.image_layout.addWidget(self.image_label) self.layout.addWidget(self.image_panel, 60) # 60%宽度 # 状态栏 - 先初始化状态栏 self.status_bar = self.statusBar() self.status_bar.showMessage("系统初始化中...") # 初始化UI组件 self.init_ui() # 初始化模型 self.init_models() def init_ui(self): """初始化用户界面组件""" # 标题 title_label = QLabel("寝室人脸识别系统") title_label.setFont(QFont("Arial", 18, QFont.Bold)) title_label.setAlignment(Qt.AlignCenter) title_label.setStyleSheet("color: #2c3e50; padding: 10px;") self.control_layout.addWidget(title_label) # 模型加载 model_group = QGroupBox("模型设置") model_layout = QVBoxLayout(model_group) self.load_model_btn = QPushButton("加载模型") self.load_model_btn.setIcon(QIcon.fromTheme("document-open")) self.load_model_btn.setStyleSheet("background-color: #3498db;") self.load_model_btn.clicked.connect(self.load_model) model_layout.addWidget(self.load_model_btn) self.model_status = QLabel("模型状态: 未加载") model_layout.addWidget(self.model_status) self.control_layout.addWidget(model_group) # 识别设置 settings_group = QGroupBox("识别设置") settings_layout = QVBoxLayout(settings_group) # 置信度阈值 threshold_layout = QHBoxLayout() threshold_label = QLabel("置信度阈值:") threshold_layout.addWidget(threshold_label) self.threshold_slider = QSlider(Qt.Horizontal) self.threshold_slider.setRange(0, 100) self.threshold_slider.setValue(70) self.threshold_slider.valueChanged.connect(self.update_threshold) threshold_layout.addWidget(self.threshold_slider) self.threshold_value = QLabel("0.70") threshold_layout.addWidget(self.threshold_value) settings_layout.addLayout(threshold_layout) # 显示选项 display_layout = QHBoxLayout() display_label = QLabel("显示模式:") display_layout.addWidget(display_label) self.display_combo = QComboBox() self.display_combo.addItems(["原始图像", "检测框", "识别结果"]) self.display_combo.setCurrentIndex(2) display_layout.addWidget(self.display_combo) settings_layout.addLayout(display_layout) self.control_layout.addWidget(settings_group) # 识别功能 recognition_group = QGroupBox("识别功能") recognition_layout = QVBoxLayout(recognition_group) # 图片识别 self.image_recognition_btn = QPushButton("图片识别") self.image_recognition_btn.setIcon(QIcon.fromTheme("image-x-generic")) self.image_recognition_btn.setStyleSheet("background-color: #9b59b6;") self.image_recognition_btn.clicked.connect(self.open_image) self.image_recognition_btn.setEnabled(False) recognition_layout.addWidget(self.image_recognition_btn) # 摄像头识别 self.camera_recognition_btn = QPushButton("启动摄像头识别") self.camera_recognition_btn.setIcon(QIcon.fromTheme("camera-web")) self.camera_recognition_btn.setStyleSheet("background-color: #e74c3c;") self.camera_recognition_btn.clicked.connect(self.toggle_camera) self.camera_recognition_btn.setEnabled(False) recognition_layout.addWidget(self.camera_recognition_btn) self.control_layout.addWidget(recognition_group) # 结果展示区域 - 使用QTextEdit替代QLabel results_group = QGroupBox("识别结果") results_layout = QVBoxLayout(results_group) self.results_text = QTextEdit() self.results_text.setReadOnly(True) self.results_text.setFont(QFont("Microsoft YaHei", 12)) # 使用支持中文的字体 self.results_text.setStyleSheet("background-color: #f8f9fa; border: 1px solid #ddd; padding: 10px;") self.results_text.setPlaceholderText("识别结果将显示在这里") # 添加滚动区域 scroll_area = QScrollArea() scroll_area.setWidgetResizable(True) scroll_area.setWidget(self.results_text) results_layout.addWidget(scroll_area) self.control_layout.addWidget(results_group, 1) # 占据剩余空间 # 系统信息 info_group = QGroupBox("系统信息") info_layout = QVBoxLayout(info_group) self.device_label = QLabel(f"计算设备: {'GPU' if torch.cuda.is_available() else 'CPU'}") info_layout.addWidget(self.device_label) self.model_info = QLabel("加载模型以显示信息") info_layout.addWidget(self.model_info) self.control_layout.addWidget(info_group) # 退出按钮 exit_btn = QPushButton("退出系统") exit_btn.setIcon(QIcon.fromTheme("application-exit")) exit_btn.clicked.connect(self.close) exit_btn.setStyleSheet("background-color: #ff6b6b; color: white;") self.control_layout.addWidget(exit_btn) def open_feedback_dialog(self): """打开反馈对话框""" # 实现对话框创建逻辑 dialog = FeedbackDialog(self) dialog.exec_() def init_models(self): """初始化模型组件""" # 设置设备 self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.device_label.setText(f"计算设备: {'GPU' if torch.cuda.is_available() else 'CPU'}") # 初始化人脸检测器 try: self.detector = MTCNN( keep_all=True, post_process=False, device=self.device ) self.status_bar.showMessage("MTCNN 检测器初始化完成") logger.info("MTCNN 检测器初始化完成") except Exception as e: self.status_bar.showMessage(f"MTCNN 初始化失败: {str(e)}") logger.error(f"MTCNN 初始化失败: {str(e)}") return # 初始化人脸特征提取器 try: self.embedder = InceptionResnetV1( pretrained='vggface2', classify=False, device=self.device ).eval() self.status_bar.showMessage("FaceNet 特征提取器初始化完成") logger.info("FaceNet 特征提取器初始化完成") except Exception as e: self.status_bar.showMessage(f"FaceNet 初始化失败: {str(e)}") logger.error(f"FaceNet 初始化失败: {str(e)}") def load_model(self): """加载预训练的SVM分类器""" options = QFileDialog.Options() file_path, _ = QFileDialog.getOpenFileName( self, "选择模型文件", "", "模型文件 (*.pkl);;所有文件 (*)", options=options ) if file_path: try: # 加载模型 model_data = joblib.load(file_path) self.classifier = model_data['classifier'] self.label_encoder = model_data['label_encoder'] self.dorm_members = model_data['dorm_members'] # 更新UI状态 self.model_loaded = True self.model_status.setText("模型状态: 已加载") self.model_info.setText(f"寝室成员: {', '.join(self.dorm_members)}") self.image_recognition_btn.setEnabled(True) self.camera_recognition_btn.setEnabled(True) # 状态栏消息 self.status_bar.showMessage(f"模型加载成功: {os.path.basename(file_path)}") # 显示成功消息 QMessageBox.information( self, "模型加载", f"模型加载成功!\n识别成员: {len(self.dorm_members)}人\n置信度阈值: {self.threshold_slider.value() / 100:.2f}" ) except Exception as e: QMessageBox.critical(self, "加载错误", f"模型加载失败: {str(e)}") self.status_bar.showMessage(f"模型加载失败: {str(e)}") def update_threshold(self, value): """更新置信度阈值""" threshold = value / 100 self.threshold_value.setText(f"{threshold:.2f}") self.status_bar.showMessage(f"置信度阈值更新为: {threshold:.2f}") def open_image(self): """打开图片文件进行识别""" if not self.model_loaded: QMessageBox.warning(self, "警告", "请先加载模型!") return options = QFileDialog.Options() file_path, _ = QFileDialog.getOpenFileName( self, "选择识别图片", "", "图片文件 (*.jpg *.jpeg *.png);;所有文件 (*)", options=options ) if file_path: # 读取图片 image = cv2.imread(file_path) if image is None: QMessageBox.critical(self, "错误", "无法读取图片文件!") return # 保存当前图片 self.current_image = image.copy() # 进行识别 self.recognize_faces(image) def toggle_camera(self): """切换摄像头状态""" if not self.model_loaded: QMessageBox.warning(self, "警告", "请先加载模型!") return if not self.camera_active: # 尝试打开摄像头 self.video_capture = cv2.VideoCapture(0) if not self.video_capture.isOpened(): QMessageBox.critical(self, "错误", "无法打开摄像头!") return # 启动摄像头 self.camera_active = True self.camera_recognition_btn.setText("停止摄像头识别") self.camera_recognition_btn.setIcon(QIcon.fromTheme("media-playback-stop")) self.timer.timeout.connect(self.process_camera_frame) self.timer.start(30) # 约33 FPS self.status_bar.showMessage("摄像头已启动") else: # 停止摄像头 self.camera_active = False self.camera_recognition_btn.setText("启动摄像头识别") self.camera_recognition_btn.setIcon(QIcon.fromTheme("camera-web")) self.timer.stop() if self.video_capture: self.video_capture.release() self.status_bar.showMessage("摄像头已停止") def process_camera_frame(self): """处理摄像头帧""" ret, frame = self.video_capture.read() if ret: # 保存当前帧 self.current_image = frame.copy() # 进行识别 self.recognize_faces(frame) def recognize_faces(self, image): """识别人脸并在图像上标注结果""" # 清空上次结果 self.last_results = [] # 转换为 PIL 图像 pil_image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) # 检测人脸 boxes, probs, _ = self.detector.detect(pil_image, landmarks=True) # 获取显示选项 display_mode = self.display_combo.currentIndex() # 准备显示图像 display_image = image.copy() # 如果没有检测到人脸 if boxes is None: if display_mode == 2: # 识别结果模式 cv2.putText(display_image, "未检测到人脸", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) self.results_text.setText("未检测到人脸") else: # 提取每个人脸 faces = [] for box in boxes: x1, y1, x2, y2 = box face = pil_image.crop((x1, y1, x2, y2)) faces.append(face) # 提取特征 embeddings = [] if faces and self.model_loaded: # 批量处理所有人脸 face_tensors = [self.preprocess_face(face) for face in faces] if face_tensors: face_tensors = torch.stack(face_tensors).to(self.device) with torch.no_grad(): embeddings = self.embedder(face_tensors).cpu().numpy() # 处理每个人脸 for i, (box, prob) in enumerate(zip(boxes, probs)): x1, y1, x2, y2 = box w, h = x2 - x1, y2 - y1 # 在图像上绘制结果 if display_mode == 0: # 原始图像 # 不绘制任何内容 pass elif display_mode == 1: # 检测框 # 绘制人脸框 cv2.rectangle(display_image, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2) elif display_mode == 2: # 识别结果 # 绘制人脸框 color = (0, 255, 0) # 绿色 # 如果有嵌入向量,则进行识别 if i < len(embeddings): # 预测 probabilities = self.classifier.predict_proba([embeddings[i]])[0] max_prob = np.max(probabilities) pred_class = self.classifier.predict([embeddings[i]])[0] pred_label = self.label_encoder.inverse_transform([pred_class])[0] # 获取置信度阈值 threshold = self.threshold_slider.value() / 100 # 判断是否为陌生人 if max_prob < threshold or pred_label == 'stranger': label = "陌生人" color = (0, 0, 255) # 红色 else: label = pred_label color = (0, 255, 0) # 绿色 # 保存结果用于文本显示 result = { "position": (int(x1), int(y1)), "label": label, "confidence": max_prob } self.last_results.append(result) # 绘制标签 cv2.rectangle(display_image, (int(x1), int(y1)), (int(x2), int(y2)), color, 2) cv2.putText(display_image, f"{label} ({max_prob:.2f})", (int(x1), int(y1) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2) else: # 无法识别的处理 cv2.rectangle(display_image, (int(x1), int(y1)), (int(x2), int(y2)), (0, 165, 255), 2) cv2.putText(display_image, "处理中...", (int(x1), int(y1) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 165, 255), 2) # 更新结果文本 self.update_results_text() # 在图像上显示FPS(摄像头模式下) if self.camera_active: fps = self.timer.interval() if fps > 0: cv2.putText(display_image, f"FPS: {1000 / fps:.1f}", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2) # 显示图像 self.display_image(display_image) def update_results_text(self): """更新结果文本区域""" if not self.last_results: self.results_text.setText("未识别到任何人脸") return # 构建结果文本 result_text = "

识别结果:

" for i, result in enumerate(self.last_results, 1): x, y = result["position"] label = result["label"] confidence = result["confidence"] # 处理中文显示问题 if label in self.dorm_members: result_text += ( f"

人脸 #{i}: " f"寝室成员 - {label}
" f"位置: ({x}, {y}), 置信度: {confidence:.2f}

" ) else: result_text += ( f"

人脸 #{i}: " f"陌生人
" f"位置: ({x}, {y}), 置信度: {confidence:.2f}

" ) self.results_text.setHtml(result_text) def preprocess_face(self, face_img): """预处理人脸图像""" # 调整大小 face_img = face_img.resize((160, 160)) # 转换为张量并归一化 face_img = np.array(face_img).astype(np.float32) / 255.0 face_img = (face_img - 0.5) / 0.5 # 归一化到[-1, 1] face_img = torch.tensor(face_img).permute(2, 0, 1) # HWC to CHW return face_img def display_image(self, image): """在QLabel中显示图像""" # 将OpenCV图像转换为Qt格式 height, width, channel = image.shape bytes_per_line = 3 * width q_img = QImage(image.data, width, height, bytes_per_line, QImage.Format_RGB888).rgbSwapped() # 缩放图像以适应标签 pixmap = QPixmap.fromImage(q_img) self.image_label.setPixmap(pixmap.scaled( self.image_label.width(), self.image_label.height(), Qt.KeepAspectRatio, Qt.SmoothTransformation )) def closeEvent(self, event): """关闭事件处理""" if self.camera_active: self.timer.stop() if self.video_capture: self.video_capture.release() # 确认退出 reply = QMessageBox.question( self, '确认退出', "确定要退出系统吗?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No ) if reply == QMessageBox.Yes: event.accept() else: event.ignore() 帮我改吧,已经不知道改哪里了
filetype

import sys import subprocess import zipfile import pkg_resources import requests # 检查并安装缺失的依赖 required = { 'torch', 'torchvision', 'numpy', 'matplotlib', 'tqdm', 'requests', 'pillow', 'scikit-learn', 'pyqt5', 'torchsummary' # 添加torchsummary } installed = {pkg.key for pkg in pkg_resources.working_set} missing = required - installed if missing: print(f"安装缺失的依赖: {', '.join(missing)}") python = sys.executable subprocess.check_call([python, '-m', 'pip', 'install', *missing]) # 现在导入其他模块 import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader, random_split from torchvision import datasets, transforms, models import numpy as np import matplotlib.pyplot as plt import os import shutil from PIL import Image from tqdm import tqdm import matplotlib from matplotlib import font_manager import json from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay # PyQt5相关导入 from PyQt5.QtWidgets import (QApplication, QWidget, QVBoxLayout, QHBoxLayout, QPushButton, QLabel, QScrollArea, QFileDialog, QMessageBox, QTextEdit) from PyQt5.QtGui import QPixmap from PyQt5.QtCore import Qt, QObject, pyqtSignal import threading import time # 导入torchsummary from torchsummary import summary # 设置中文字体支持 try: plt.rcParams['font.sans-serif'] = ['SimHei'] plt.rcParams['axes.unicode_minus'] = False except: try: font_url = "https://github.com/googlefonts/noto-cjk/raw/main/Sans/OTF/SimplifiedChinese/NotoSansSC-Regular.otf" font_path = "NotoSansSC-Regular.otf" if not os.path.exists(font_path): response = requests.get(font_url) with open(font_path, 'wb') as f: f.write(response.content) font_prop = font_manager.FontProperties(fname=font_path) plt.rcParams['font.family'] = font_prop.get_name() except: print("警告: 无法设置中文字体") matplotlib.use('Agg') # 第二部分:下载并设置数据集 def download_and_extract_dataset(): base_dir = "data" data_path = os.path.join(base_dir, "dogs-vs-cats") train_folder = os.path.join(data_path, 'train') test_folder = os.path.join(data_path, 'test') os.makedirs(train_folder, exist_ok=True) os.makedirs(test_folder, exist_ok=True) # 检查数据集是否完整 cat_files = [f for f in os.listdir(train_folder) if f.startswith('cat')] dog_files = [f for f in os.listdir(train_folder) if f.startswith('dog')] if len(cat_files) > 1000 and len(dog_files) > 1000: print("数据集已存在,跳过下载") return print("正在下载数据集...") dataset_url = "https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_5340.zip" try: zip_path = os.path.join(base_dir, "catsdogs.zip") # 下载文件 if not os.path.exists(zip_path): response = requests.get(dataset_url, stream=True) total_size = int(response.headers.get('content-length', 0)) with open(zip_path, 'wb') as f, tqdm( desc="下载进度", total=total_size, unit='B', unit_scale=True, unit_divisor=1024, ) as bar: for data in response.iter_content(chunk_size=1024): size = f.write(data) bar.update(size) print("下载完成,正在解压...") # 解压文件 with zipfile.ZipFile(zip_path, 'r') as zip_ref: zip_ref.extractall(base_dir) print("数据集解压完成!") # 移动文件 extracted_dir = os.path.join(base_dir, "PetImages") # 移动猫图片 cat_source = os.path.join(extracted_dir, "Cat") for file in os.listdir(cat_source): src = os.path.join(cat_source, file) dst = os.path.join(train_folder, f"cat.{file}") if os.path.exists(src) and not os.path.exists(dst): shutil.move(src, dst) # 移动狗图片 dog_source = os.path.join(extracted_dir, "Dog") for file in os.listdir(dog_source): src = os.path.join(dog_source, file) dst = os.path.join(train_folder, f"dog.{file}") if os.path.exists(src) and not os.path.exists(dst): shutil.move(src, dst) # 创建测试集(从训练集中抽取20%) train_files = os.listdir(train_folder) np.random.seed(42) test_files = np.random.choice(train_files, size=int(len(train_files) * 0.2), replace=False) for file in test_files: src = os.path.join(train_folder, file) dst = os.path.join(test_folder, file) if os.path.exists(src) and not os.path.exists(dst): shutil.move(src, dst) # 清理临时文件 if os.path.exists(extracted_dir): shutil.rmtree(extracted_dir) if os.path.exists(zip_path): os.remove(zip_path) print( f"数据集设置完成!训练集: {len(os.listdir(train_folder))} 张图片, 测试集: {len(os.listdir(test_folder))} 张图片") except Exception as e: print(f"下载或设置数据集时出错: {str(e)}") print("请手动下载数据集并解压到 data/dogs-vs-cats 目录") print("下载地址: https://www.microsoft.com/en-us/download/details.aspx?id=54765") # 下载并解压数据集 download_and_extract_dataset() # 第三部分:自定义数据集 class DogsVSCats(Dataset): def __init__(self, data_dir, transform=None): self.image_paths = [] self.labels = [] for file in os.listdir(data_dir): if file.lower().endswith(('.png', '.jpg', '.jpeg')): img_path = os.path.join(data_dir, file) try: # 验证图片完整性 with Image.open(img_path) as img: img.verify() self.image_paths.append(img_path) # 根据文件名设置标签 if file.startswith('cat'): self.labels.append(0) elif file.startswith('dog'): self.labels.append(1) else: # 对于无法识别的文件,默认设为猫 self.labels.append(0) except (IOError, SyntaxError) as e: print(f"跳过损坏图片: {img_path} - {str(e)}") if not self.image_paths: print(f"错误: 在 {data_dir} 中没有找到有效图片!") for i in range(10): img_path = os.path.join(data_dir, f"example_{i}.jpg") img = Image.new('RGB', (224, 224), color=(i * 25, i * 25, i * 25)) img.save(img_path) self.image_paths.append(img_path) self.labels.append(0 if i % 2 == 0 else 1) print(f"已创建 {len(self.image_paths)} 个示例图片") self.transform = transform or transforms.Compose([ transforms.Resize((150, 150)), # 修改为150x150以匹配CNN输入 transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def __len__(self): return len(self.image_paths) def __getitem__(self, idx): try: image = Image.open(self.image_paths[idx]).convert('RGB') except Exception as e: print(f"无法加载图片: {self.image_paths[idx]}, 使用占位符 - {str(e)}") image = Image.new('RGB', (150, 150), color=(100, 100, 100)) image = self.transform(image) label = torch.tensor(self.labels[idx], dtype=torch.long) return image, label # 第六部分:定义自定义CNN模型(添加额外的Dropout层) class CatDogCNN(nn.Module): def __init__(self): super(CatDogCNN, self).__init__() # 卷积层1: 输入3通道(RGB), 输出32通道, 卷积核3x3 self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1) # 卷积层2: 输入32通道, 输出64通道, 卷积核3x3 self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1) # 卷积层3: 输入64通道, 输出128通道, 卷积核3x3 self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1) # 卷积层4: 输入128通道, 输出256通道, 卷积核3x3 self.conv4 = nn.Conv2d(128, 256, kernel_size=3, padding=1) # 最大池化层 self.pool = nn.MaxPool2d(2, 2) # 全连接层 self.fc1 = nn.Linear(256 * 9 * 9, 512) # 输入尺寸计算: 150 -> 75 -> 37 -> 18 -> 9 self.fc2 = nn.Linear(512, 2) # 输出2个类别 (猫和狗) # Dropout防止过拟合(添加额外的Dropout层) self.dropout1 = nn.Dropout(0.5) # 第一个Dropout层 self.dropout2 = nn.Dropout(0.5) # 新添加的第二个Dropout层 def forward(self, x): # 卷积层1 + ReLU + 池化 x = self.pool(F.relu(self.conv1(x))) # 卷积层2 + ReLU + 池化 x = self.pool(F.relu(self.conv2(x))) # 卷积层3 + ReLU + 池化 x = self.pool(F.relu(self.conv3(x))) # 卷积层4 + ReLU + 池化 x = self.pool(F.relu(self.conv4(x))) # 展平特征图 x = x.view(-1, 256 * 9 * 9) # 全连接层 + Dropout x = self.dropout1(F.relu(self.fc1(x))) # 添加第二个Dropout层 x = self.dropout2(x) # 输出层 x = self.fc2(x) return x # 第七部分:模型训练和可视化 class Trainer: def __init__(self, model, train_loader, val_loader): self.train_loader = train_loader self.val_loader = val_loader self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(f"使用设备: {self.device}") self.model = model.to(self.device) self.optimizer = optim.Adam(self.model.parameters(), lr=0.001) self.criterion = nn.CrossEntropyLoss() # 使用兼容性更好的调度器设置(移除了 verbose 参数) self.scheduler = optim.lr_scheduler.ReduceLROnPlateau( self.optimizer, mode='max', factor=0.1, patience=2) # 记录指标 self.train_losses = [] self.train_accuracies = [] self.val_losses = [] self.val_accuracies = [] def train(self, num_epochs): best_accuracy = 0.0 for epoch in range(num_epochs): # 训练阶段 self.model.train() running_loss = 0.0 correct = 0 total = 0 train_bar = tqdm(self.train_loader, desc=f"Epoch {epoch + 1}/{num_epochs} [训练]") for images, labels in train_bar: images, labels = images.to(self.device), labels.to(self.device) self.optimizer.zero_grad() outputs = self.model(images) loss = self.criterion(outputs, labels) loss.backward() self.optimizer.step() running_loss += loss.item() * images.size(0) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() train_loss = running_loss / total train_acc = correct / total train_bar.set_postfix(loss=train_loss, acc=train_acc) # 计算训练指标 epoch_train_loss = running_loss / total epoch_train_acc = correct / total self.train_losses.append(epoch_train_loss) self.train_accuracies.append(epoch_train_acc) # 验证阶段 val_loss, val_acc = self.validate() self.val_losses.append(val_loss) self.val_accuracies.append(val_acc) # 更新学习率 self.scheduler.step(val_acc) # 保存最佳模型 if val_acc > best_accuracy: best_accuracy = val_acc torch.save(self.model.state_dict(), 'best_cnn_model.pth') print(f"保存最佳模型,验证准确率: {best_accuracy:.4f}") # 打印epoch结果 print(f"Epoch {epoch + 1}/{num_epochs} | " f"训练损失: {epoch_train_loss:.4f} | 训练准确率: {epoch_train_acc:.4f} | " f"验证损失: {val_loss:.4f} | 验证准确率: {val_acc:.4f}") # 训练完成后可视化结果 self.visualize_training_results() def validate(self): self.model.eval() running_loss = 0.0 correct = 0 total = 0 with torch.no_grad(): val_bar = tqdm(self.val_loader, desc="[验证]") for images, labels in val_bar: images, labels = images.to(self.device), labels.to(self.device) outputs = self.model(images) loss = self.criterion(outputs, labels) running_loss += loss.item() * images.size(0) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() val_loss = running_loss / total val_acc = correct / total val_bar.set_postfix(loss=val_loss, acc=val_acc) return running_loss / total, correct / total def visualize_training_results(self): """可视化训练和验证的准确率与损失""" epochs = range(1, len(self.train_accuracies) + 1) # 创建准确率图表 plt.figure(figsize=(12, 6)) plt.subplot(1, 2, 1) plt.plot(epochs, self.train_accuracies, 'bo-', label='训练准确率') plt.plot(epochs, self.val_accuracies, 'ro-', label='验证准确率') plt.title('训练和验证准确率') plt.xlabel('Epoch') plt.ylabel('准确率') plt.legend() plt.grid(True) # 创建损失图表 plt.subplot(1, 2, 2) plt.plot(epochs, self.train_losses, 'bo-', label='训练损失') plt.plot(epochs, self.val_losses, 'ro-', label='验证损失') plt.title('训练和验证损失') plt.xlabel('Epoch') plt.ylabel('损失') plt.legend() plt.grid(True) plt.tight_layout() plt.savefig('training_visualization.png') print("训练结果可视化图表已保存为 training_visualization.png") # 单独保存准确率图表 plt.figure(figsize=(8, 6)) plt.plot(epochs, self.train_accuracies, 'bo-', label='训练准确率') plt.plot(epochs, self.val_accuracies, 'ro-', label='验证准确率') plt.title('训练和验证准确率') plt.xlabel('Epoch') plt.ylabel('准确率') plt.legend() plt.grid(True) plt.savefig('accuracy_curve.png') print("准确率曲线已保存为 accuracy_curve.png") # 单独保存损失图表 plt.figure(figsize=(8, 6)) plt.plot(epochs, self.train_losses, 'bo-', label='训练损失') plt.plot(epochs, self.val_losses, 'ro-', label='验证损失') plt.title('训练和验证损失') plt.xlabel('Epoch') plt.ylabel('损失') plt.legend() plt.grid(True) plt.savefig('loss_curve.png') print("损失曲线已保存为 loss_curve.png") # 保存训练结果 results = { 'epochs': list(epochs), 'train_losses': self.train_losses, 'train_accuracies': self.train_accuracies, 'val_losses': self.val_losses, 'val_accuracies': self.val_accuracies } with open('training_results.json', 'w') as f: json.dump(results, f) print("训练结果已保存为 training_results.json") # 图像处理类 class ImageProcessor(QObject): result_signal = pyqtSignal(str, str) # 信号:filename, result def __init__(self, model, device, filename): super().__init__() self.model = model self.device = device self.filename = filename self.transform = transforms.Compose([ transforms.Resize((150, 150)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def process_image(self): try: # 加载图像 image = Image.open(self.filename).convert('RGB') image_tensor = self.transform(image).unsqueeze(0).to(self.device) # 模型预测 self.model.eval() with torch.no_grad(): output = self.model(image_tensor) probabilities = F.softmax(output, dim=1) _, predicted = torch.max(output, 1) # 获取猫和狗的置信度 cat_prob = probabilities[0][0].item() dog_prob = probabilities[0][1].item() # 确定结果和置信度 result = "猫" if predicted.item() == 0 else "狗" confidence = cat_prob if result == "猫" else dog_prob # 格式化输出结果 formatted_result = f"{result} ({confidence * 100:.1f}%置信度)" self.result_signal.emit(self.filename, formatted_result) except Exception as e: self.result_signal.emit(self.filename, f"处理错误: {str(e)}") # 主应用窗口 class CatDogClassifierApp(QWidget): def __init__(self, model, device): super().__init__() self.setWindowTitle("猫狗识别系统") self.setGeometry(100, 100, 1000, 700) self.model = model self.device = device self.initUI() self.image_processors = [] def initUI(self): # 主布局 main_layout = QVBoxLayout() # 标题 title = QLabel("猫狗识别系统") title.setAlignment(Qt.AlignCenter) title.setStyleSheet("font-size: 24px; font-weight: bold; margin: 10px;") main_layout.addWidget(title) # 按钮区域 button_layout = QHBoxLayout() self.upload_button = QPushButton("上传图像") self.upload_button.setStyleSheet("font-size: 16px; padding: 10px;") self.upload_button.clicked.connect(self.uploadImage) button_layout.addWidget(self.upload_button) self.batch_process_button = QPushButton("批量处理") self.batch_process_button.setStyleSheet("font-size: 16px; padding: 10px;") self.batch_process_button.clicked.connect(self.batchProcess) button_layout.addWidget(self.batch_process_button) self.clear_button = QPushButton("清除所有") self.clear_button.setStyleSheet("font-size: 16px; padding: 10px;") self.clear_button.clicked.connect(self.clearAll) button_layout.addWidget(self.clear_button) self.results_button = QPushButton("查看训练结果") self.results_button.setStyleSheet("font-size: 16px; padding: 10px;") self.results_button.clicked.connect(self.showTrainingResults) button_layout.addWidget(self.results_button) # 添加查看模型结构按钮 self.model_summary_button = QPushButton("查看模型结构") self.model_summary_button.setStyleSheet("font-size: 16px; padding: 10px;") self.model_summary_button.clicked.connect(self.showModelSummary) button_layout.addWidget(self.model_summary_button) main_layout.addLayout(button_layout) # 状态标签 self.status_label = QLabel("就绪") self.status_label.setStyleSheet("font-size: 14px; color: #666; margin: 5px;") main_layout.addWidget(self.status_label) # 图像预览区域 self.preview_area = QScrollArea() self.preview_area.setWidgetResizable(True) self.preview_area.setStyleSheet("background-color: #f0f0f0;") self.preview_widget = QWidget() self.preview_layout = QHBoxLayout() self.preview_layout.setAlignment(Qt.AlignTop | Qt.AlignLeft) self.preview_widget.setLayout(self.preview_layout) self.preview_area.setWidget(self.preview_widget) main_layout.addWidget(self.preview_area) # 底部信息 info_label = QLabel("基于卷积神经网络(CNN)的猫狗识别系统 | 支持上传单张或多张图片") info_label.setAlignment(Qt.AlignCenter) info_label.setStyleSheet("font-size: 12px; color: #888; margin: 10px;") main_layout.addWidget(info_label) self.setLayout(main_layout) def uploadImage(self): self.status_label.setText("正在选择图像...") filename, _ = QFileDialog.getOpenFileName( self, "选择图像", "", "图像文件 (*.png *.jpg *.jpeg)" ) if filename: self.status_label.setText(f"正在处理: {os.path.basename(filename)}") self.displayImage(filename) def batchProcess(self): self.status_label.setText("正在选择多张图像...") filenames, _ = QFileDialog.getOpenFileNames( self, "选择多张图像", "", "图像文件 (*.png *.jpg *.jpeg)" ) if filenames: self.status_label.setText(f"正在批量处理 {len(filenames)} 张图像...") for filename in filenames: self.displayImage(filename) def displayImage(self, filename): if not os.path.isfile(filename): QMessageBox.warning(self, "警告", "文件路径不安全或文件不存在") self.status_label.setText("错误: 文件不存在") return # 检查是否已存在相同文件 for i in reversed(range(self.preview_layout.count())): item = self.preview_layout.itemAt(i) if item.widget() and item.widget().objectName().startswith(f"container_{filename}"): widget_to_remove = item.widget() self.preview_layout.removeWidget(widget_to_remove) widget_to_remove.deleteLater() # 创建图像容器 container = QWidget() container.setObjectName(f"container_{filename}") container.setStyleSheet(""" background-color: white; border: 1px solid #ddd; border-radius: 5px; padding: 10px; margin: 5px; """) container.setFixedSize(300, 350) container_layout = QVBoxLayout(container) container_layout.setContentsMargins(5, 5, 5, 5) container_layout.setSpacing(5) # 显示文件名 filename_label = QLabel(os.path.basename(filename)) filename_label.setStyleSheet("font-size: 12px; color: #555;") filename_label.setAlignment(Qt.AlignCenter) container_layout.addWidget(filename_label) # 图像预览 pixmap = QPixmap(filename) if pixmap.width() > 280 or pixmap.height() > 200: pixmap = pixmap.scaled(280, 200, Qt.KeepAspectRatio, Qt.SmoothTransformation) preview_label = QLabel(container) preview_label.setPixmap(pixmap) preview_label.setAlignment(Qt.AlignCenter) preview_label.setFixedSize(280, 200) preview_label.setStyleSheet("border: 1px solid #eee;") container_layout.addWidget(preview_label) # 结果标签 result_label = QLabel("识别中...", container) result_label.setObjectName(f"result_{filename}") result_label.setAlignment(Qt.AlignCenter) result_label.setStyleSheet("font-size: 16px; font-weight: bold; padding: 5px;") container_layout.addWidget(result_label) # 删除按钮 delete_button = QPushButton("删除", container) delete_button.setObjectName(f"button_{filename}") delete_button.setStyleSheet(""" QPushButton { background-color: #ff6b6b; color: white; border: none; border-radius: 3px; padding: 5px; } QPushButton:hover { background-color: #ff5252; } """) delete_button.clicked.connect(lambda _, fn=filename: self.deleteImage(fn)) container_layout.addWidget(delete_button) # 添加到预览区域 self.preview_layout.addWidget(container) # 创建并启动图像处理线程 processor = ImageProcessor(self.model, self.device, filename) processor.result_signal.connect(self.updateUIWithResult) threading.Thread(target=processor.process_image).start() self.image_processors.append(processor) # 限制最大处理数量 if self.preview_layout.count() > 20: QMessageBox.warning(self, "警告", "最多只能同时处理20张图像") self.image_processors.clear() def deleteImage(self, filename): container_name = f"container_{filename}" container = self.findChild(QWidget, container_name) if container: self.preview_layout.removeWidget(container) container.deleteLater() self.status_label.setText(f"已删除: {os.path.basename(filename)}") def updateUIWithResult(self, filename, result): container = self.findChild(QWidget, f"container_{filename}") if container: result_label = container.findChild(QLabel, f"result_{filename}") if result_label: # 根据结果设置颜色 if "猫" in result: result_label.setStyleSheet("color: #1a73e8; font-size: 16px; font-weight: bold;") elif "狗" in result: result_label.setStyleSheet("color: #e91e63; font-size: 16px; font-weight: bold;") else: result_label.setStyleSheet("color: #f57c00; font-size: 16px; font-weight: bold;") result_label.setText(result) self.status_label.setText(f"完成识别: {os.path.basename(filename)} -> {result}") def clearAll(self): # 删除所有图像容器 while self.preview_layout.count(): item = self.preview_layout.takeAt(0) widget = item.widget() if widget is not None: widget.deleteLater() self.image_processors = [] self.status_label.setText("已清除所有图像") def showTrainingResults(self): """显示训练结果可视化图表""" if not os.path.exists('training_visualization.png'): QMessageBox.information(self, "提示", "训练结果可视化图表尚未生成") return try: # 创建结果展示窗口 results_window = QWidget() results_window.setWindowTitle("训练结果可视化") results_window.setGeometry(200, 200, 1200, 800) layout = QVBoxLayout() # 标题 title = QLabel("模型训练结果可视化") title.setStyleSheet("font-size: 20px; font-weight: bold; margin: 10px;") title.setAlignment(Qt.AlignCenter) layout.addWidget(title) # 综合图表 layout.addWidget(QLabel("训练和验证准确率/损失:")) pixmap1 = QPixmap('training_visualization.png') label1 = QLabel() label1.setPixmap(pixmap1.scaled(1000, 500, Qt.KeepAspectRatio, Qt.SmoothTransformation)) layout.addWidget(label1) # 水平布局用于两个图表 h_layout = QHBoxLayout() # 准确率图表 vbox1 = QVBoxLayout() vbox1.addWidget(QLabel("准确率曲线:")) pixmap2 = QPixmap('accuracy_curve.png') label2 = QLabel() label2.setPixmap(pixmap2.scaled(450, 350, Qt.KeepAspectRatio, Qt.SmoothTransformation)) vbox1.addWidget(label2) h_layout.addLayout(vbox1) # 损失图表 vbox2 = QVBoxLayout() vbox2.addWidget(QLabel("损失曲线:")) pixmap3 = QPixmap('loss_curve.png') label3 = QLabel() label3.setPixmap(pixmap3.scaled(450, 350, Qt.KeepAspectRatio, Qt.SmoothTransformation)) vbox2.addWidget(label3) h_layout.addLayout(vbox2) layout.addLayout(h_layout) # 关闭按钮 close_button = QPushButton("关闭") close_button.setStyleSheet("font-size: 16px; padding: 8px;") close_button.clicked.connect(results_window.close) layout.addWidget(close_button, alignment=Qt.AlignCenter) results_window.setLayout(layout) results_window.show() except Exception as e: QMessageBox.critical(self, "错误", f"加载训练结果时出错: {str(e)}") def showModelSummary(self): """显示模型结构摘要""" # 创建摘要展示窗口 summary_window = QWidget() summary_window.setWindowTitle("模型结构摘要") summary_window.setGeometry(200, 200, 800, 600) layout = QVBoxLayout() # 标题 title = QLabel("模型各层参数状况") title.setStyleSheet("font-size: 20px; font-weight: bold; margin: 10px;") title.setAlignment(Qt.AlignCenter) layout.addWidget(title) # 创建文本编辑框显示摘要 summary_text = QTextEdit() summary_text.setReadOnly(True) summary_text.setStyleSheet("font-family: monospace; font-size: 12px;") # 获取模型摘要 try: # 使用StringIO捕获summary的输出 from io import StringIO import sys # 重定向标准输出 original_stdout = sys.stdout sys.stdout = StringIO() # 生成模型摘要 summary(self.model, input_size=(3, 150, 150), device=self.device.type) # 获取捕获的输出 summary_output = sys.stdout.getvalue() # 恢复标准输出 sys.stdout = original_stdout # 显示摘要 summary_text.setPlainText(summary_output) except Exception as e: summary_text.setPlainText(f"生成模型摘要时出错: {str(e)}") layout.addWidget(summary_text) # 关闭按钮 close_button = QPushButton("关闭") close_button.setStyleSheet("font-size: 16px; padding: 8px;") close_button.clicked.connect(summary_window.close) layout.addWidget(close_button, alignment=Qt.AlignCenter) summary_window.setLayout(layout) summary_window.show() # 程序入口点 if __name__ == "__main__": # 设置数据集路径 data_path = os.path.join("data", "dogs-vs-cats") train_folder = os.path.join(data_path, 'train') test_folder = os.path.join(data_path, 'test') # 检查是否已有训练好的模型 model_path = "catdog_cnn_model_with_extra_dropout.pth" # 修改模型名称以反映更改 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(f"使用设备: {device}") # 创建模型实例(使用添加了额外Dropout层的新模型) model = CatDogCNN() if os.path.exists(model_path): print("加载已训练的模型...") model.load_state_dict(torch.load(model_path, map_location=device)) model = model.to(device) model.eval() print("模型加载完成") else: print("未找到训练好的模型,开始训练新模型...") # 创建完整训练集和测试集(使用数据增强) # 训练集使用增强后的transform train_transform = transforms.Compose([ transforms.RandomRotation(15), # 随机旋转15度 transforms.RandomHorizontalFlip(), # 随机水平翻转 transforms.Resize((150, 150)), transforms.ColorJitter(brightness=0.2, contrast=0.2), # 随机调整亮度和对比度 transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) # 验证集和测试集使用基础transform(不需要增强) base_transform = transforms.Compose([ transforms.Resize((150, 150)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) full_train_dataset = DogsVSCats(train_folder, transform=train_transform) test_dataset = DogsVSCats(test_folder, transform=base_transform) # 划分训练集和验证集 (80% 训练, 20% 验证) train_size = int(0.8 * len(full_train_dataset)) val_size = len(full_train_dataset) - train_size gen = torch.Generator().manual_seed(42) train_dataset, val_dataset = random_split( full_train_dataset, [train_size, val_size], generator=gen ) # 创建数据加载器 batch_size = 32 train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=0) val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=0) test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=0) # 训练模型 trainer = Trainer(model, train_loader, val_loader) num_epochs = 15 print(f"开始训练(带额外Dropout层和数据增强),共 {num_epochs} 个epoch...") trainer.train(num_epochs) # 保存最终模型 torch.save(model.state_dict(), model_path) print(f"模型已保存为 {model_path}") # 输出模型各层的参数状况 print("\n模型各层参数状况:") summary(model, input_size=(3, 150, 150), device=device.type) # 启动应用程序 app = QApplication(sys.argv) window = CatDogClassifierApp(model, device) window.show() sys.exit(app.exec_())对此代码进行优化

filetype

dorm_face_recognition_gui.py代码如下: import pickle import sys import os import cv2 import numpy as np import torch from PyQt5.QtWidgets import QListWidget, QProgressDialog from facenet_pytorch import MTCNN, InceptionResnetV1 from PIL import Image from PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget, QVBoxLayout, QHBoxLayout, QPushButton, QLabel, QFileDialog, QComboBox, QSlider, QMessageBox, QTextEdit, QGroupBox, QScrollArea, QDialog, QDialogButtonBox, QTableWidget, QTableWidgetItem, QHeaderView, QGridLayout) from PyQt5.QtCore import Qt, QTimer from PyQt5.QtGui import QImage, QPixmap, QIcon, QFont, QColor import joblib import logging import json from datetime import datetime 在 dorm_face_recognition_gui.py 顶部添加导入 from face_recognition import FaceRecognition 配置日志 logging.basicConfig(level=logging.INFO, format=‘%(asctime)s - %(levelname)s - %(message)s’) logger = logging.getLogger(name) class FeedbackDialog(QDialog): “”“反馈对话框”“” def __init__(self, parent=None, last_results=None, dorm_members=None): super().__init__(parent) self.setWindowTitle("识别错误反馈") self.setFixedSize(500, 400) self.last_results = last_results or [] self.dorm_members = dorm_members or [] self.init_ui() def init_ui(self): layout = QVBoxLayout(self) # 添加当前识别结果 result_label = QLabel("当前识别结果:") layout.addWidget(result_label) # 使用表格显示结果 self.results_table = QTableWidget() self.results_table.setColumnCount(4) self.results_table.setHorizontalHeaderLabels(["ID", "识别结果", "置信度", "位置和大小"]) self.results_table.setSelectionBehavior(QTableWidget.SelectRows) self.results_table.setEditTriggers(QTableWidget.NoEditTriggers) # 填充表格数据 self.results_table.setRowCount(len(self.last_results)) for i, result in enumerate(self.last_results): x, y, w, h = result["box"] self.results_table.setItem(i, 0, QTableWidgetItem(str(i + 1))) self.results_table.setItem(i, 1, QTableWidgetItem(result["label"])) self.results_table.setItem(i, 2, QTableWidgetItem(f"{result['confidence']:.2f}")) self.results_table.setItem(i, 3, QTableWidgetItem(f"({x}, {y}) - {w}x{h}")) # 设置表格样式 self.results_table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch) self.results_table.verticalHeader().setVisible(False) layout.addWidget(self.results_table) # 添加正确身份选择 correct_layout = QGridLayout() correct_label = QLabel("正确身份:") correct_layout.addWidget(correct_label, 0, 0) self.correct_combo = QComboBox() self.correct_combo.addItem("选择正确身份", None) for member in self.dorm_members: self.correct_combo.addItem(member, member) self.correct_combo.addItem("陌生人", "stranger") self.correct_combo.addItem("不在列表中", "unknown") correct_layout.addWidget(self.correct_combo, 0, 1) # 添加备注 note_label = QLabel("备注:") correct_layout.addWidget(note_label, 1, 0) self.note_text = QTextEdit() self.note_text.setPlaceholderText("可添加额外说明...") self.note_text.setMaximumHeight(60) correct_layout.addWidget(self.note_text, 1, 1) layout.addLayout(correct_layout) # 添加按钮 button_box = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel) button_box.accepted.connect(self.accept) button_box.rejected.connect(self.reject) layout.addWidget(button_box) def get_selected_result(self): """获取选择的识别结果""" selected_row = self.results_table.currentRow() if selected_row >= 0 and selected_row < len(self.last_results): return self.last_results[selected_row] return None def get_feedback_data(self): """获取反馈数据""" selected_result = self.get_selected_result() if not selected_result: return None return { "timestamp": datetime.now().isoformat(), "original_label": selected_result["label"], "correct_label": self.correct_combo.currentData(), "confidence": selected_result["confidence"], "box": selected_result["box"], # 保存完整的框信息 "note": self.note_text.toPlainText().strip() } class FaceRecognitionSystem(QMainWindow): def init(self): super().init() self.setWindowTitle(“寝室人脸识别系统”) self.setGeometry(100, 100, 1200, 800) # 初始化变量 self.model_loaded = False self.camera_active = False self.video_capture = None self.timer = QTimer() self.current_image = None self.last_results = [] # 存储上次识别结果 self.dorm_members = [] # 寝室成员列表 # 创建主界面 self.main_widget = QWidget() self.setCentralWidget(self.main_widget) self.layout = QHBoxLayout(self.main_widget) # 左侧控制面板 - 占40%宽度 self.control_panel = QWidget() self.control_layout = QVBoxLayout(self.control_panel) self.control_layout.setAlignment(Qt.AlignTop) self.control_panel.setMaximumWidth(400) self.layout.addWidget(self.control_panel, 40) # 40%宽度 # 右侧图像显示区域 - 占60%宽度 self.image_panel = QWidget() self.image_layout = QVBoxLayout(self.image_panel) self.image_label = QLabel() self.image_label.setAlignment(Qt.AlignCenter) self.image_label.setMinimumSize(800, 600) self.image_label.setStyleSheet("background-color: #333; border: 1px solid #555;") self.image_layout.addWidget(self.image_label) self.layout.addWidget(self.image_panel, 60) # 60%宽度 # 状态栏 self.status_bar = self.statusBar() self.status_bar.showMessage("系统初始化中...") # 初始化人脸识别器 - 关键修复 self.face_recognition = FaceRecognition() # 初始化UI组件 self.init_ui() # 添加工具栏(必须在UI初始化后) self.toolbar = self.addToolBar('工具栏') # 添加反馈按钮 self.add_feedback_button() # 初始化模型 self.init_models() def init_ui(self): """初始化用户界面组件""" # 标题 title_label = QLabel("寝室人脸识别系统") title_label.setFont(QFont("Arial", 18, QFont.Bold)) title_label.setAlignment(Qt.AlignCenter) title_label.setStyleSheet("color: #2c3e50; padding: 10px;") self.control_layout.addWidget(title_label) # 模型加载 model_group = QGroupBox("模型设置") model_layout = QVBoxLayout(model_group) self.load_model_btn = QPushButton("加载模型") self.load_model_btn.setIcon(QIcon.fromTheme("document-open")) self.load_model_btn.setStyleSheet("background-color: #3498db;") self.load_model_btn.clicked.connect(self.load_model) model_layout.addWidget(self.load_model_btn) self.model_status = QLabel("模型状态: 未加载") model_layout.addWidget(self.model_status) self.control_layout.addWidget(model_group) # 在模型设置部分添加重新训练按钮 self.retrain_btn = QPushButton("重新训练模型") self.retrain_btn.setIcon(QIcon.fromTheme("view-refresh")) self.retrain_btn.setStyleSheet("background-color: #f39c12;") self.retrain_btn.clicked.connect(self.retrain_model) self.retrain_btn.setEnabled(False) # 初始不可用 model_layout.addWidget(self.retrain_btn) # 识别设置 settings_group = QGroupBox("识别设置") settings_layout = QVBoxLayout(settings_group) # 置信度阈值 threshold_layout = QHBoxLayout() threshold_label = QLabel("置信度阈值:") threshold_layout.addWidget(threshold_label) self.threshold_slider = QSlider(Qt.Horizontal) self.threshold_slider.setRange(0, 100) self.threshold_slider.setValue(70) self.threshold_slider.valueChanged.connect(self.update_threshold) threshold_layout.addWidget(self.threshold_slider) self.threshold_value = QLabel("0.70") threshold_layout.addWidget(self.threshold_value) settings_layout.addLayout(threshold_layout) # 显示选项 display_layout = QHBoxLayout() display_label = QLabel("显示模式:") display_layout.addWidget(display_label) self.display_combo = QComboBox() self.display_combo.addItems(["原始图像", "检测框", "识别结果"]) self.display_combo.setCurrentIndex(2) display_layout.addWidget(self.display_combo) settings_layout.addLayout(display_layout) self.control_layout.addWidget(settings_group) # 识别功能 recognition_group = QGroupBox("识别功能") recognition_layout = QVBoxLayout(recognition_group) # 图片识别 self.image_recognition_btn = QPushButton("图片识别") self.image_recognition_btn.setIcon(QIcon.fromTheme("image-x-generic")) self.image_recognition_btn.setStyleSheet("background-color: #9b59b6;") self.image_recognition_btn.clicked.connect(self.open_image) self.image_recognition_btn.setEnabled(False) recognition_layout.addWidget(self.image_recognition_btn) # 摄像头识别 self.camera_recognition_btn = QPushButton("启动摄像头识别") self.camera_recognition_btn.setIcon(QIcon.fromTheme("camera-web")) self.camera_recognition_btn.setStyleSheet("background-color: #e74c3c;") self.camera_recognition_btn.clicked.connect(self.toggle_camera) self.camera_recognition_btn.setEnabled(False) recognition_layout.addWidget(self.camera_recognition_btn) self.control_layout.addWidget(recognition_group) # 结果展示区域 - 使用QTextEdit替代QLabel results_group = QGroupBox("识别结果") results_layout = QVBoxLayout(results_group) self.results_text = QTextEdit() self.results_text.setReadOnly(True) self.results_text.setFont(QFont("Microsoft YaHei", 12)) # 使用支持中文的字体 self.results_text.setStyleSheet("background-color: #f8f9fa; border: 1px solid #ddd; padding: 10px;") self.results_text.setPlaceholderText("识别结果将显示在这里") # 添加滚动区域 scroll_area = QScrollArea() scroll_area.setWidgetResizable(True) scroll_area.setWidget(self.results_text) results_layout.addWidget(scroll_area) self.control_layout.addWidget(results_group, 1) # 占据剩余空间 # 系统信息 info_group = QGroupBox("系统信息") info_layout = QVBoxLayout(info_group) self.device_label = QLabel(f"计算设备: {'GPU' if torch.cuda.is_available() else 'CPU'}") info_layout.addWidget(self.device_label) self.model_info = QLabel("加载模型以显示信息") info_layout.addWidget(self.model_info) self.control_layout.addWidget(info_group) # 退出按钮 exit_btn = QPushButton("退出系统") exit_btn.setIcon(QIcon.fromTheme("application-exit")) exit_btn.clicked.connect(self.close) exit_btn.setStyleSheet("background-color: #ff6b6b; color: white;") self.control_layout.addWidget(exit_btn) def add_feedback_button(self): """添加反馈按钮到界面""" # 创建反馈按钮 self.feedback_button = QPushButton("提供反馈", self) self.feedback_button.setFixedSize(120, 40) # 设置固定大小 self.feedback_button.setStyleSheet( "QPushButton {" " background-color: #4CAF50;" " color: white;" " border-radius: 5px;" " font-weight: bold;" "}" "QPushButton:hover {" " background-color: #45a049;" "}" ) # 连接按钮点击事件 self.feedback_button.clicked.connect(self.open_feedback_dialog) # 添加到工具栏 self.toolbar.addWidget(self.feedback_button) def open_feedback_dialog(self): """打开反馈对话框""" if not self.last_results: QMessageBox.warning(self, "无法反馈", "没有可反馈的识别结果") return dialog = FeedbackDialog( self, last_results=self.last_results, dorm_members=self.dorm_members ) if dialog.exec_() == QDialog.Accepted: feedback_data = dialog.get_feedback_data() if feedback_data: # 修复:调用 FaceRecognition 实例的 save_feedback 方法 selected_result = dialog.get_selected_result() if selected_result: # 获取检测框 detected_box = [ selected_result["box"][0], selected_result["box"][1], selected_result["box"][0] + selected_result["box"][2], selected_result["box"][1] + selected_result["box"][3] ] # 调用保存反馈方法 self.face_recognition.save_feedback( self.current_image, detected_box, feedback_data["original_label"], feedback_data["correct_label"] ) QMessageBox.information(self, "反馈提交", "感谢您的反馈!数据已保存用于改进模型") else: QMessageBox.warning(self, "反馈错误", "未选择要反馈的人脸结果") def init_models(self): """初始化模型组件""" # 设置设备 self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.device_label.setText(f"计算设备: {'GPU' if torch.cuda.is_available() else 'CPU'}") # 初始化人脸检测器 try: self.detector = MTCNN( keep_all=True, post_process=False, device=self.device ) self.status_bar.showMessage("MTCNN 检测器初始化完成") logger.info("MTCNN 检测器初始化完成") except Exception as e: self.status_bar.showMessage(f"MTCNN 初始化失败: {str(e)}") logger.error(f"MTCNN 初始化失败: {str(e)}") return # 初始化人脸特征提取器 try: self.embedder = InceptionResnetV1( pretrained='vggface2', classify=False, device=self.device ).eval() self.status_bar.showMessage("FaceNet 特征提取器初始化完成") logger.info("FaceNet 特征提取器初始化完成") except Exception as e: self.status_bar.showMessage(f"FaceNet 初始化失败: {str(e)}") logger.error(f"FaceNet 初始化失败: {str(e)}") def load_model(self): """加载预训练的SVM分类器""" options = QFileDialog.Options() file_path, _ = QFileDialog.getOpenFileName( self, "选择模型文件", "", "模型文件 (*.pkl);;所有文件 (*)", options=options ) if file_path: try: # 加载模型 model_data = joblib.load(file_path) self.classifier = model_data['classifier'] self.label_encoder = model_data['label_encoder'] self.dorm_members = model_data['dorm_members'] # 启用重新训练按钮 self.retrain_btn.setEnabled(True) # 更新UI状态 self.model_loaded = True self.model_status.setText("模型状态: 已加载") self.model_info.setText(f"寝室成员: {', '.join(self.dorm_members)}") self.image_recognition_btn.setEnabled(True) self.camera_recognition_btn.setEnabled(True) # 状态栏消息 self.status_bar.showMessage(f"模型加载成功: {os.path.basename(file_path)}") # 显示成功消息 QMessageBox.information( self, "模型加载", f"模型加载成功!\n识别成员: {len(self.dorm_members)}人\n置信度阈值: {self.threshold_slider.value() / 100:.2f}" ) except Exception as e: QMessageBox.critical(self, "加载错误", f"模型加载失败: {str(e)}") self.status_bar.showMessage(f"模型加载失败: {str(e)}") def update_threshold(self, value): """更新置信度阈值""" threshold = value / 100 self.threshold_value.setText(f"{threshold:.2f}") self.status_bar.showMessage(f"置信度阈值更新为: {threshold:.2f}") def open_image(self): """打开图片文件进行识别""" if not self.model_loaded: QMessageBox.warning(self, "警告", "请先加载模型!") return options = QFileDialog.Options() file_path, _ = QFileDialog.getOpenFileName( self, "选择识别图片", "", "图片文件 (*.jpg *.jpeg *.png);;所有文件 (*)", options=options ) if file_path: # 读取图片 image = cv2.imread(file_path) if image is None: QMessageBox.critical(self, "错误", "无法读取图片文件!") return # 保存当前图片 self.current_image = image.copy() # 进行识别 self.recognize_faces(image) def toggle_camera(self): """切换摄像头状态""" if not self.model_loaded: QMessageBox.warning(self, "警告", "请先加载模型!") return if not self.camera_active: # 尝试打开摄像头 self.video_capture = cv2.VideoCapture(0) if not self.video_capture.isOpened(): QMessageBox.critical(self, "错误", "无法打开摄像头!") return # 启动摄像头 self.camera_active = True self.camera_recognition_btn.setText("停止摄像头识别") self.camera_recognition_btn.setIcon(QIcon.fromTheme("media-playback-stop")) self.timer.timeout.connect(self.process_camera_frame) self.timer.start(30) # 约33 FPS self.status_bar.showMessage("摄像头已启动") else: # 停止摄像头 self.camera_active = False self.camera_recognition_btn.setText("启动摄像头识别") self.camera_recognition_btn.setIcon(QIcon.fromTheme("camera-web")) self.timer.stop() if self.video_capture: self.video_capture.release() self.status_bar.showMessage("摄像头已停止") def process_camera_frame(self): """处理摄像头帧""" ret, frame = self.video_capture.read() if ret: # 保存当前帧 self.current_image = frame.copy() # 进行识别 self.recognize_faces(frame) def retrain_model(self): """使用反馈数据重新训练模型""" # 获取所有反馈数据 feedback_dir = os.path.join(os.getcwd(), "data", "feedback_data") # 修复1:支持多种文件扩展名 feedback_files = [] for f in os.listdir(feedback_dir): filepath = os.path.join(feedback_dir, f) if os.path.isfile(filepath) and (f.endswith('.pkl') or f.endswith('.json')): feedback_files.append(f) # 修复2:添加目录存在性检查 if not os.path.exists(feedback_dir): QMessageBox.warning(self, "目录不存在", f"反馈数据目录不存在: {feedback_dir}") return if not feedback_files: QMessageBox.information(self, "无反馈数据", "没有找到反馈数据,无法重新训练") return # 确认对话框 reply = QMessageBox.question( self, '确认重新训练', f"将使用 {len(feedback_files)} 条反馈数据重新训练模型。此操作可能需要几分钟时间,确定继续吗?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No ) if reply != QMessageBox.Yes: return try: # 创建进度对话框 progress = QProgressDialog("正在重新训练模型...", "取消", 0, len(feedback_files), self) progress.setWindowTitle("模型重新训练") progress.setWindowModality(Qt.WindowModal) progress.setMinimumDuration(0) progress.setValue(0) # 收集所有反馈数据 feedback_data = [] for i, filename in enumerate(feedback_files): filepath = os.path.join(feedback_dir, filename) # 修复3:根据文件扩展名使用不同的加载方式 if filename.endswith('.pkl'): with open(filepath, 'rb') as f: # 二进制模式读取 data = pickle.load(f) elif filename.endswith('.json'): with open(filepath, 'r', encoding='utf-8') as f: data = json.load(f) else: continue # 跳过不支持的文件类型 feedback_data.append(data) progress.setValue(i + 1) QApplication.processEvents() # 保持UI响应 if progress.wasCanceled(): return progress.setValue(len(feedback_files)) # 重新训练模型 self.status_bar.showMessage("正在重新训练模型...") # 修复4:添加详细的日志记录 logger.info(f"开始重新训练,使用 {len(feedback_data)} 条反馈数据") # 调用重新训练方法 success = self.face_recognition.retrain_with_feedback(feedback_data) if success: # 更新UI状态 self.model_status.setText("模型状态: 已重新训练") self.dorm_members = self.face_recognition.dorm_members self.model_info.setText(f"寝室成员: {', '.join(self.dorm_members)}") # 保存更新后的模型 model_path = os.path.join("models", "updated_model.pkl") self.face_recognition.save_updated_model(model_path) QMessageBox.information(self, "训练完成", "模型已成功使用反馈数据重新训练!") else: QMessageBox.warning(self, "训练失败", "重新训练过程中出现问题") except Exception as e: logger.error(f"重新训练失败: {str(e)}") QMessageBox.critical(self, "训练错误", f"重新训练模型时出错: {str(e)}") def recognize_faces(self, image): """识别人脸并在图像上标注结果""" # 清空上次结果 self.last_results = [] # 转换为 PIL 图像 pil_image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) # 检测人脸 boxes, probs, _ = self.detector.detect(pil_image, landmarks=True) # 获取显示选项 display_mode = self.display_combo.currentIndex() # 准备显示图像 display_image = image.copy() # 如果没有检测到人脸 if boxes is None: if display_mode == 2: # 识别结果模式 cv2.putText(display_image, "未检测到人脸", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) self.results_text.setText("未检测到人脸") else: # 提取每个人脸 faces = [] for box in boxes: x1, y1, x2, y2 = box face = pil_image.crop((x1, y1, x2, y2)) faces.append(face) # 提取特征 embeddings = [] if faces and self.model_loaded: # 批量处理所有人脸 face_tensors = [self.preprocess_face(face) for face in faces] if face_tensors: face_tensors = torch.stack(face_tensors).to(self.device) with torch.no_grad(): embeddings = self.embedder(face_tensors).cpu().numpy() # 处理每个人脸 for i, (box, prob) in enumerate(zip(boxes, probs)): x1, y1, x2, y2 = box w, h = x2 - x1, y2 - y1 # 在图像上绘制结果 if display_mode == 0: # 原始图像 # 不绘制任何内容 pass elif display_mode == 1: # 检测框 # 绘制人脸框 cv2.rectangle(display_image, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2) elif display_mode == 2: # 识别结果 # 绘制人脸框 color = (0, 255, 0) # 绿色 # 如果有嵌入向量,则进行识别 if i < len(embeddings): # 预测 probabilities = self.classifier.predict_proba([embeddings[i]])[0] max_prob = np.max(probabilities) pred_class = self.classifier.predict([embeddings[i]])[0] pred_label = self.label_encoder.inverse_transform([pred_class])[0] # 获取置信度阈值 threshold = self.threshold_slider.value() / 100 # 判断是否为陌生人 if max_prob < threshold or pred_label == 'stranger': label = "陌生人" color = (0, 0, 255) # 红色 else: label = pred_label color = (0, 255, 0) # 绿色 # 保存结果用于文本显示 - 修复:保存完整的框信息 result = { "box": [int(x1), int(y1), int(x2 - x1), int(y2 - y1)], # [x, y, width, height] "label": label, "confidence": max_prob } self.last_results.append(result) # 绘制标签 cv2.rectangle(display_image, (int(x1), int(y1)), (int(x2), int(y2)), color, 2) cv2.putText(display_image, f"{label} ({max_prob:.2f})", (int(x1), int(y1) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2) else: # 无法识别的处理 cv2.rectangle(display_image, (int(x1), int(y1)), (int(x2), int(y2)), (0, 165, 255), 2) cv2.putText(display_image, "处理中...", (int(x1), int(y1) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 165, 255), 2) # 更新结果文本 self.update_results_text() # 在图像上显示FPS(摄像头模式下) if self.camera_active: fps = self.timer.interval() if fps > 0: cv2.putText(display_image, f"FPS: {1000 / fps:.1f}", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2) # 显示图像 self.display_image(display_image) def update_results_text(self): """更新结果文本区域""" if not self.last_results: self.results_text.setText("未识别到任何人脸") return # 构建结果文本 result_text = "

识别结果:

" for i, result in enumerate(self.last_results, 1): x, y, w, h = result["box"] label = result["label"] confidence = result["confidence"] # 处理中文显示问题 if label in self.dorm_members: result_text += ( f"

人脸 #{i}: " f"寝室成员 - {label}
" f"位置: ({x}, {y}), 大小: {w}x{h}, 置信度: {confidence:.2f}

" ) else: result_text += ( f"

人脸 #{i}: " f"陌生人
" f"位置: ({x}, {y}), 大小: {w}x{h}, 置信度: {confidence:.2f}

" ) self.results_text.setHtml(result_text) def preprocess_face(self, face_img): """预处理人脸图像""" # 调整大小 face_img = face_img.resize((160, 160)) # 转换为张量并归一化 face_img = np.array(face_img).astype(np.float32) / 255.0 face_img = (face_img - 0.5) / 0.5 # 归一化到[-1, 1] face_img = torch.tensor(face_img).permute(2, 0, 1) # HWC to CHW return face_img def display_image(self, image): """在QLabel中显示图像""" # 将OpenCV图像转换为Qt格式 height, width, channel = image.shape bytes_per_line = 3 * width q_img = QImage(image.data, width, height, bytes_per_line, QImage.Format_RGB888).rgbSwapped() # 缩放图像以适应标签 pixmap = QPixmap.fromImage(q_img) self.image_label.setPixmap(pixmap.scaled( self.image_label.width(), self.image_label.height(), Qt.KeepAspectRatio, Qt.SmoothTransformation )) def closeEvent(self, event): """关闭事件处理""" if self.camera_active: self.timer.stop() if self.video_capture: self.video_capture.release() # 确认退出 reply = QMessageBox.question( self, '确认退出', "确定要退出系统吗?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No ) if reply == QMessageBox.Yes: event.accept() else: event.ignore() if name == “main”: app = QApplication(sys.argv) # 设置全局异常处理 def handle_exception(exc_type, exc_value, exc_traceback): """全局异常处理""" import traceback error_msg = "".join(traceback.format_exception(exc_type, exc_value, exc_traceback)) print(f"未捕获的异常:\n{error_msg}") # 记录到文件 with open("error.log", "a") as f: f.write(f"\n\n{datetime.now()}:\n{error_msg}") # 显示给用户 QMessageBox.critical(None, "系统错误", f"发生未处理的异常:\n{str(exc_value)}") sys.exit(1) sys.excepthook = handle_exception window = FaceRecognitionSystem() window.show() sys.exit(app.exec_()) face_model.py代码如下:import os os.environ[‘TF_CPP_MIN_LOG_LEVEL’] = ‘3’ # 禁用 TensorFlow 日志(如果仍有依赖) import cv2 import numpy as np import time import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import Dataset, DataLoader from torchvision import transforms from sklearn.svm import SVC from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score import joblib import logging import sys import glob from facenet_pytorch import MTCNN, InceptionResnetV1 from PIL import Image import gc 配置日志 logging.basicConfig(level=logging.INFO, format=‘%(asctime)s - %(levelname)s - %(message)s’) logger = logging.getLogger(name) def check_gpu_environment(): “”“检查 GPU 环境”“” print(“=” * 60) print(“GPU 环境检查”) print(“=” * 60) # 检查 CUDA 是否可用 print(f"PyTorch 版本: {torch.__version__}") print(f"CUDA 可用: {torch.cuda.is_available()}") if torch.cuda.is_available(): print(f"GPU 数量: {torch.cuda.device_count()}") for i in range(torch.cuda.device_count()): print(f"GPU {i}: {torch.cuda.get_device_name(i)}") print(f" 显存总量: {torch.cuda.get_device_properties(i).total_memory / 1024 ** 3:.2f} GB") print("=" * 60) class FaceDataset(Dataset): “”“人脸数据集类”“” def __init__(self, data_dir, min_samples=10, transform=None): self.data_dir = data_dir self.transform = transform self.faces = [] self.labels = [] self.label_map = {} self.dorm_members = [] self._load_dataset(min_samples) def _load_dataset(self, min_samples): """加载数据集""" # 遍历每个成员文件夹 for member_dir in os.listdir(self.data_dir): member_path = os.path.join(self.data_dir, member_dir) if not os.path.isdir(member_path): continue # 记录寝室成员 self.dorm_members.append(member_dir) self.label_map[member_dir] = len(self.label_map) # 遍历成员的所有照片 member_faces = [] for img_file in os.listdir(member_path): img_path = os.path.join(member_path, img_file) try: # 使用 PIL 加载图像 img = Image.open(img_path).convert('RGB') member_faces.append(img) except Exception as e: logger.warning(f"无法加载图像 {img_path}: {str(e)}") # 确保每个成员有足够样本 if len(member_faces) < min_samples: logger.warning(f"{member_dir} 只有 {len(member_faces)} 个有效样本,至少需要 {min_samples} 个") continue # 添加成员数据 self.faces.extend(member_faces) self.labels.extend([self.label_map[member_dir]] * len(member_faces)) # 添加陌生人样本 stranger_faces = self._generate_stranger_samples(len(self.faces) // 4) self.faces.extend(stranger_faces) self.labels.extend([len(self.label_map)] * len(stranger_faces)) self.label_map['stranger'] = len(self.label_map) logger.info(f"数据集加载完成: {len(self.faces)} 个样本, {len(self.dorm_members)} 个成员") def _generate_stranger_samples(self, num_samples): """生成陌生人样本""" stranger_faces = [] # 使用公开数据集的人脸作为陌生人 # 这里使用 LFW 数据集作为示例(实际项目中应使用真实数据) for _ in range(num_samples): # 生成随机噪声图像(实际应用中应使用真实陌生人照片) random_face = Image.fromarray(np.uint8(np.random.rand(160, 160, 3) * 255)) stranger_faces.append(random_face) return stranger_faces def __len__(self): return len(self.faces) def __getitem__(self, idx): face = self.faces[idx] label = self.labels[idx] if self.transform: face = self.transform(face) return face, label class DormFaceRecognizer: “”“寝室人脸识别系统 (PyTorch 实现)”“” def __init__(self, threshold=0.7, device=None): # 设置设备 self.device = device or ('cuda' if torch.cuda.is_available() else 'cpu') logger.info(f"使用设备: {self.device}") # 初始化人脸检测器 self.detector = MTCNN( keep_all=True, post_process=False, device=self.device ) logger.info("MTCNN 检测器初始化完成") # 初始化人脸特征提取器 self.embedder = InceptionResnetV1( pretrained='vggface2', classify=False, device=self.device ).eval() # 设置为评估模式 logger.info("FaceNet 特征提取器初始化完成") # 初始化其他组件 self.classifier = None self.label_encoder = None self.threshold = threshold self.dorm_members = [] # 数据预处理 self.transform = transforms.Compose([ transforms.Resize((160, 160)), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ]) def create_dataset(self, data_dir, min_samples=10, batch_size=32, num_workers=4): """创建数据集""" dataset = FaceDataset( data_dir, min_samples=min_samples, transform=self.transform ) # 保存成员信息 self.dorm_members = dataset.dorm_members self.label_encoder = LabelEncoder().fit( list(dataset.label_map.keys()) + ['stranger'] ) # 创建数据加载器 dataloader = DataLoader( dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True ) return dataset, dataloader def extract_features(self, dataloader): """提取人脸特征向量""" embeddings = [] labels = [] logger.info("开始提取特征...") start_time = time.time() with torch.no_grad(): for batch_idx, (faces, batch_labels) in enumerate(dataloader): # 移动到设备 faces = faces.to(self.device) # 提取特征 batch_embeddings = self.embedder(faces) # 保存结果 embeddings.append(batch_embeddings.cpu().numpy()) labels.append(batch_labels.numpy()) # 每10个批次打印一次进度 if (batch_idx + 1) % 10 == 0: elapsed = time.time() - start_time logger.info(f"已处理 {batch_idx + 1}/{len(dataloader)} 批次, 耗时: {elapsed:.2f}秒") # 合并结果 embeddings = np.vstack(embeddings) labels = np.hstack(labels) logger.info(f"特征提取完成: {embeddings.shape[0]} 个样本, 耗时: {time.time() - start_time:.2f}秒") return embeddings, labels def train_classifier(self, embeddings, labels): """训练 SVM 分类器""" logger.info("开始训练分类器...") start_time = time.time() # 划分训练集和测试集 X_train, X_test, y_train, y_test = train_test_split( embeddings, labels, test_size=0.2, random_state=42 ) # 创建并训练 SVM 分类器 self.classifier = SVC(kernel='linear', probability=True, C=1.0) self.classifier.fit(X_train, y_train) # 评估模型 y_pred = self.classifier.predict(X_test) accuracy = accuracy_score(y_test, y_pred) logger.info(f"分类器训练完成, 准确率: {accuracy:.4f}, 耗时: {time.time() - start_time:.2f}秒") return accuracy def recognize_face(self, image): """识别单张图像中的人脸""" # 转换为 PIL 图像 if isinstance(image, np.ndarray): image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) # 检测人脸 boxes, probs, landmarks = self.detector.detect(image, landmarks=True) recognitions = [] if boxes is not None: # 提取每个人脸 faces = [] for box in boxes: x1, y1, x2, y2 = box face = image.crop((x1, y1, x2, y2)) faces.append(face) # 预处理人脸 face_tensors = torch.stack([self.transform(face) for face in faces]).to(self.device) # 提取特征 with torch.no_grad(): embeddings = self.embedder(face_tensors).cpu().numpy() # 预测 probabilities = self.classifier.predict_proba(embeddings) pred_classes = self.classifier.predict(embeddings) for i, (box, prob) in enumerate(zip(boxes, probs)): max_prob = np.max(probabilities[i]) pred_label = self.label_encoder.inverse_transform([pred_classes[i]])[0] # 判断是否为陌生人 if max_prob < self.threshold or pred_label == 'stranger': recognitions.append(("陌生人", max_prob, box)) else: recognitions.append((pred_label, max_prob, box)) return recognitions def save_model(self, file_path): """保存模型""" model_data = { 'classifier': self.classifier, 'label_encoder': self.label_encoder, 'threshold': self.threshold, 'dorm_members': self.dorm_members } joblib.dump(model_data, file_path) logger.info(f"模型已保存至: {file_path}") def load_model(self, file_path): """加载模型""" model_data = joblib.load(file_path) self.classifier = model_data['classifier'] self.label_encoder = model_data['label_encoder'] self.threshold = model_data['threshold'] self.dorm_members = model_data['dorm_members'] logger.info(f"模型已加载,寝室成员: {', '.join(self.dorm_members)}") def main(): “”“主函数”“” print(f"[{time.strftime(‘%H:%M:%S’)}] 程序启动") # 检查 GPU 环境 check_gpu_environment() # 检查并创建必要的目录 os.makedirs('data/dorm_faces', exist_ok=True) # 初始化识别器 try: recognizer = DormFaceRecognizer(threshold=0.6) logger.info("人脸识别器初始化成功") except Exception as e: logger.error(f"初始化失败: {str(e)}") print("程序将在10秒后退出...") time.sleep(10) return # 数据集路径 data_dir = "data/dorm_faces" # 检查数据集是否存在 if not os.path.exists(data_dir) or not os.listdir(data_dir): logger.warning(f"数据集目录 '{data_dir}' 不存在或为空") print("请创建以下结构的目录:") print("dorm_faces/") print("├── 成员1/") print("│ ├── 照片1.jpg") print("│ ├── 照片2.jpg") print("│ └── ...") print("├── 成员2/") print("│ └── ...") print("└── ...") print("\n程序将在10秒后退出...") time.sleep(10) return # 步骤1: 创建数据集 try: dataset, dataloader = recognizer.create_dataset( data_dir, min_samples=10, batch_size=64, num_workers=4 ) except Exception as e: logger.error(f"数据集创建失败: {str(e)}") return # 步骤2: 提取特征 try: embeddings, labels = recognizer.extract_features(dataloader) except Exception as e: logger.error(f"特征提取失败: {str(e)}") return # 步骤3: 训练分类器 try: accuracy = recognizer.train_classifier(embeddings, labels) except Exception as e: logger.error(f"分类器训练失败: {str(e)}") return # 保存模型 model_path = "models/dorm_face_model_pytorch.pkl" try: recognizer.save_model(model_path) except Exception as e: logger.error(f"模型保存失败: {str(e)}") # 测试识别 test_image_path = "test_photo.jpg" if not os.path.exists(test_image_path): logger.warning(f"测试图片 '{test_image_path}' 不存在,跳过识别测试") else: logger.info(f"正在测试识别: {test_image_path}") try: test_image = cv2.imread(test_image_path) if test_image is None: logger.error(f"无法读取图片: {test_image_path}") else: recognitions = recognizer.recognize_face(test_image) if not recognitions: logger.info("未检测到人脸") else: # 在图像上绘制结果 for name, confidence, box in recognitions: x1, y1, x2, y2 = box label = f"{name} ({confidence:.2f})" color = (0, 255, 0) if name != "陌生人" else (0, 0, 255) # 绘制矩形框 cv2.rectangle(test_image, (int(x1), int(y1)), (int(x2), int(y2)), color, 2) # 绘制标签 cv2.putText(test_image, label, (int(x1), int(y1) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) # 显示结果 cv2.imshow("人脸识别结果", test_image) cv2.waitKey(0) cv2.destroyAllWindows() # 保存结果图像 result_path = "recognition_result_pytorch.jpg" cv2.imwrite(result_path, test_image) logger.info(f"识别结果已保存至: {result_path}") except Exception as e: logger.error(f"人脸识别失败: {str(e)}") logger.info("程序执行完成") if name == “main”: main() face_recognition.py代码如下:import json import cv2 import numpy as np import torch import insightface from insightface.app import FaceAnalysis from facenet_pytorch import InceptionResnetV1 from PIL import Image import joblib import os import pickle from datetime import datetime import random import torch.nn as nn import torch.optim as optim from sklearn.preprocessing import LabelEncoder from sklearn.svm import SVC from torch.utils.data import Dataset, DataLoader class FaceRecognition: def init(self, device=None): self.device = device or torch.device(‘cuda’ if torch.cuda.is_available() else ‘cpu’) self.model_loaded = False self.training_data = {} # 初始化 training_data 属性 self.dorm_members = [] # 初始化 dorm_members 属性 self.label_encoder = LabelEncoder() # 初始化标签编码器 self.init_models() def init_models(self): """初始化人脸识别模型""" try: # 初始化ArcFace模型 - 使用正确的方法 self.arcface_model = FaceAnalysis(providers=['CPUExecutionProvider']) self.arcface_model.prepare(ctx_id=0, det_size=(640, 640)) # 初始化FaceNet模型作为备选 self.facenet_model = InceptionResnetV1( pretrained='vggface2', classify=False, device=self.device ).eval() # 状态标记 self.models_initialized = True print("模型初始化完成") except Exception as e: print(f"模型初始化失败: {str(e)}") self.models_initialized = False def load_classifier(self, model_path): """加载分类器模型""" try: model_data = joblib.load(model_path) self.classifier = model_data['classifier'] self.label_encoder = model_data['label_encoder'] self.dorm_members = model_data['dorm_members'] # 确保加载training_data self.training_data = model_data.get('training_data', {}) self.model_loaded = True print(f"分类器加载成功,成员: {', '.join(self.dorm_members)}") print(f"训练数据包含 {len(self.training_data)} 个类别") return True except Exception as e: print(f"分类器加载失败: {str(e)}") self.model_loaded = False return False def extract_features(self, face_img): """使用ArcFace提取人脸特征""" try: if face_img.size == 0: print("错误:空的人脸图像") return None # 将图像从BGR转换为RGB rgb_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB) faces = self.arcface_model.get(rgb_img) if faces: return faces[0].embedding print("未检测到人脸特征") return None except Exception as e: print(f"特征提取失败: {str(e)}") return None def extract_features_facenet(self, face_img): """使用FaceNet提取人脸特征(备选)""" try: # 转换为PIL图像并预处理 face_pil = Image.fromarray(cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB)) face_tensor = self.preprocess_face(face_pil).to(self.device) with torch.no_grad(): features = self.facenet_model(face_tensor.unsqueeze(0)).cpu().numpy()[0] return features except Exception as e: print(f"FaceNet特征提取失败: {str(e)}") return None def preprocess_face(self, face_img): """预处理人脸图像""" # 调整大小 face_img = face_img.resize((160, 160)) # 转换为张量并归一化 face_img = np.array(face_img).astype(np.float32) / 255.0 face_img = (face_img - 0.5) / 0.5 # 归一化到[-1, 1] face_img = torch.tensor(face_img).permute(2, 0, 1) # HWC to CHW return face_img def retrain_with_feedback(self, feedback_data): """使用反馈数据重新训练模型""" # 检查是否有原始训练数据 if not self.training_data: print("错误:没有可用的原始训练数据") return False # 收集原始训练数据 original_features = [] original_labels = [] # 收集特征和标签 for member, embeddings in self.training_data.items(): for emb in embeddings: original_features.append(emb) original_labels.append(member) # 收集反馈数据 feedback_features = [] feedback_labels = [] for feedback in feedback_data: # 获取正确标签 correct_label = feedback.get("correct_label") if not correct_label or correct_label == "unknown": continue # 获取原始图像和人脸位置 image_path = feedback.get("image_path", "") if not image_path or not os.path.exists(image_path): print(f"图像路径无效: {image_path}") continue box = feedback.get("box", []) if len(box) != 4: print(f"无效的人脸框: {box}") continue # 处理图像 image = cv2.imread(image_path) if image is None: print(f"无法读取图像: {image_path}") continue # 裁剪人脸区域 x1, y1, x2, y2 = map(int, box) face_img = image[y1:y2, x1:x2] if face_img.size == 0: print(f"裁剪后的人脸图像为空: {image_path}") continue # 提取特征 embedding = self.extract_features(face_img) if embedding is None: print(f"无法提取特征: {image_path}") continue # 添加到训练数据 feedback_features.append(embedding) feedback_labels.append(correct_label) print(f"添加反馈数据: {correct_label} - {image_path}") # 检查是否有有效的反馈数据 if not feedback_features: print("错误:没有有效的反馈数据") return False # 合并数据 all_features = np.vstack([original_features, feedback_features]) all_labels = original_labels + feedback_labels # 重新训练分类器 self.classifier = SVC(kernel='linear', probability=True) self.classifier.fit(all_features, all_labels) # 更新标签编码器 self.label_encoder = LabelEncoder() self.label_encoder.fit(all_labels) # 更新寝室成员列表 self.dorm_members = list(self.label_encoder.classes_) # 更新训练数据 self.training_data = {} for label, feature in zip(all_labels, all_features): if label not in self.training_data: self.training_data[label] = [] self.training_data[label].append(feature) print(f"重新训练完成! 新模型包含 {len(self.dorm_members)} 个成员") return True def recognize(self, image, threshold=0.7): """识别人脸""" if not self.model_loaded or not self.models_initialized: return [], image.copy() # 使用ArcFace检测人脸 rgb_img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) faces = self.arcface_model.get(rgb_img) results = [] display_img = image.copy() if faces: for face in faces: # 获取人脸框 x1, y1, x2, y2 = face.bbox.astype(int) # 提取特征 embedding = face.embedding # 预测 probabilities = self.classifier.predict_proba([embedding])[0] max_prob = np.max(probabilities) pred_class = self.classifier.predict([embedding])[0] pred_label = self.label_encoder.inverse_transform([pred_class])[0] # 判断是否为陌生人 if max_prob < threshold or pred_label == 'stranger': label = "陌生人" color = (0, 0, 255) # 红色 else: label = pred_label color = (0, 255, 0) # 绿色 # 保存结果 results.append({ "box": [x1, y1, x2, y2], "label": label, "confidence": max_prob }) # 在图像上绘制结果 cv2.rectangle(display_img, (x1, y1), (x2, y2), color, 2) cv2.putText(display_img, f"{label} ({max_prob:.2f})", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2) return results, display_img def save_feedback(self, image, detected_box, incorrect_label, correct_label): """保存用户反馈数据 - 改进为保存图像路径而非完整图像""" feedback_dir = "data/feedback_data" os.makedirs(feedback_dir, exist_ok=True) # 创建唯一文件名 timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") # 保存人脸图像 face_img_dir = os.path.join(feedback_dir, "faces") os.makedirs(face_img_dir, exist_ok=True) face_img_path = os.path.join(face_img_dir, f"face_{timestamp}.jpg") # 裁剪并保存人脸区域 x1, y1, x2, y2 = map(int, detected_box) # 修复1:确保裁剪区域有效 if y2 > y1 and x2 > x1: face_img = image[y1:y2, x1:x2] if face_img.size > 0: cv2.imwrite(face_img_path, face_img) else: logger.warning(f"裁剪的人脸区域无效: {detected_box}") face_img_path = None else: logger.warning(f"无效的检测框: {detected_box}") face_img_path = None # 保存反馈元数据 filename = f"feedback_{timestamp}.json" # 修复2:使用JSON格式 filepath = os.path.join(feedback_dir, filename) # 准备数据 feedback_data = { "image_path": face_img_path, # 保存路径而非完整图像 "detected_box": detected_box, "incorrect_label": incorrect_label, "correct_label": correct_label, "timestamp": timestamp } # 修复3:使用JSON保存便于阅读和调试 with open(filepath, 'w', encoding='utf-8') as f: json.dump(feedback_data, f, ensure_ascii=False, indent=2) return True def save_updated_model(self, output_path): """保存更新后的模型""" model_data = { 'classifier': self.classifier, 'label_encoder': self.label_encoder, 'dorm_members': self.dorm_members, 'training_data': self.training_data # 包含训练数据 } joblib.dump(model_data, output_path) print(f"更新后的模型已保存到: {output_path}") class TripletFaceDataset(Dataset): “”“三元组人脸数据集”“” def __init__(self, embeddings, labels): self.embeddings = embeddings self.labels = labels self.label_to_indices = {} # 创建标签到索引的映射 for idx, label in enumerate(labels): if label not in self.label_to_indices: self.label_to_indices[label] = [] self.label_to_indices[label].append(idx) def __getitem__(self, index): anchor_label = self.labels[index] # 随机选择正样本 positive_idx = index while positive_idx == index: positive_idx = random.choice(self.label_to_indices[anchor_label]) # 随机选择负样本 negative_label = random.choice([l for l in set(self.labels) if l != anchor_label]) negative_idx = random.choice(self.label_to_indices[negative_label]) return ( self.embeddings[index], self.embeddings[positive_idx], self.embeddings[negative_idx] ) def __len__(self): return len(self.embeddings) class TripletLoss(nn.Module): “”“三元组损失函数”“” def __init__(self, margin=1.0): super(TripletLoss, self).__init__() self.margin = margin def forward(self, anchor, positive, negative): distance_positive = (anchor - positive).pow(2).sum(1) distance_negative = (anchor - negative).pow(2).sum(1) losses = torch.relu(distance_positive - distance_negative + self.margin) return losses.mean() def train_triplet_model(embeddings, labels, epochs=100): “”“训练三元组模型”“” dataset = TripletFaceDataset(embeddings, labels) dataloader = DataLoader(dataset, batch_size=32, shuffle=True) model = nn.Sequential( nn.Linear(embeddings.shape[1], 256), nn.ReLU(), nn.Linear(256, 128) ) criterion = TripletLoss(margin=0.5) optimizer = optim.Adam(model.parameters(), lr=0.001) for epoch in range(epochs): total_loss = 0.0 for anchor, positive, negative in dataloader: optimizer.zero_grad() anchor_embed = model(anchor) positive_embed = model(positive) negative_embed = model(negative) loss = criterion(anchor_embed, positive_embed, negative_embed) loss.backward() optimizer.step() total_loss += loss.item() print(f"Epoch {epoch + 1}/{epochs}, Loss: {total_loss / len(dataloader):.4f}") return model main.py代码如下:import sys from dorm_face_recognition_gui import FaceRecognitionSystem from PyQt5.QtWidgets import QApplication if name == “main”: # 设置中文编码支持 if sys.platform == “win32”: import ctypes ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(“dorm.face.recognition”) app = QApplication(sys.argv) app.setStyle("Fusion") # 使用Fusion风格 # 设置应用样式 app.setStyleSheet(""" QMainWindow { background-color: #ecf0f1; } QGroupBox { border: 1px solid #bdc3c7; border-radius: 8px; margin-top: 20px; padding: 10px; font-weight: bold; background-color: #ffffff; } QGroupBox::title { subcontrol-origin: margin; subcontrol-position: top center; padding: 0 5px; } QPushButton { background-color: #3498db; color: white; border: none; padding: 10px 15px; font-size: 14px; margin: 5px; border-radius: 5px; } QPushButton:hover { background-color: #2980b9; } QPushButton:pressed { background-color: #1c6ea4; } QPushButton:disabled { background-color: #bdc3c7; } QLabel { font-size: 14px; padding: 3px; } QComboBox, QSlider { padding: 4px; background-color: #ffffff; } QTextEdit { font-family: "Microsoft YaHei"; font-size: 12px; } """) window = FaceRecognitionSystem() window.show() sys.exit(app.exec_()) ui.py代码如下:from PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget, QVBoxLayout, QHBoxLayout, QPushButton, QLabel, QFileDialog, QComboBox, QSlider, QMessageBox, QTextEdit, QGroupBox, QScrollArea, QDialog, QListWidget) from PyQt5.QtCore import Qt, QTimer from PyQt5.QtGui import QImage, QPixmap, QIcon, QFont from face_recognition import FaceRecognition class FaceRecognitionSystem(QMainWindow): def init(self): super().init() # … 原有初始化代码 … # 初始化人脸识别器 self.face_recognition = FaceRecognition() # 添加反馈按钮 self.add_feedback_button() def add_feedback_button(self): """添加反馈按钮到界面""" self.feedback_btn = QPushButton("反馈识别错误") self.feedback_btn.setIcon(QIcon.fromTheme("dialog-warning")) self.feedback_btn.setStyleSheet("background-color: #f39c12;") self.feedback_btn.clicked.connect(self.handle_feedback) # 找到识别功能组并添加按钮 for i in range(self.control_layout.count()): widget = self.control_layout.itemAt(i).widget() if isinstance(widget, QGroupBox) and widget.title() == "识别功能": layout = widget.layout() layout.addWidget(self.feedback_btn) break def handle_feedback(self): """处理用户反馈""" if not hasattr(self, 'last_results') or not self.last_results: QMessageBox.warning(self, "警告", "没有可反馈的识别结果") return # 创建反馈对话框 dialog = QDialog(self) dialog.setWindowTitle("识别错误反馈") dialog.setFixedSize(400, 300) layout = QVBoxLayout(dialog) # 添加当前识别结果 result_label = QLabel("当前识别结果:") layout.addWidget(result_label) self.feedback_list = QListWidget() for i, result in enumerate(self.last_results, 1): label = result["label"] confidence = result["confidence"] self.feedback_list.addItem(f"人脸 #{i}: {label} (置信度: {confidence:.2f})") layout.addWidget(self.feedback_list) # 添加正确身份选择 correct_label = QLabel("正确身份:") layout.addWidget(correct_label) self.correct_combo = QComboBox() self.correct_combo.addItems(["选择正确身份"] + self.face_recognition.dorm_members + ["陌生人", "不在列表中"]) layout.addWidget(self.correct_combo) # 添加按钮 btn_layout = QHBoxLayout() submit_btn = QPushButton("提交反馈") submit_btn.clicked.connect(lambda: self.submit_feedback(dialog)) btn_layout.addWidget(submit_btn) cancel_btn = QPushButton("取消") cancel_btn.clicked.connect(dialog.reject) btn_layout.addWidget(cancel_btn) layout.addLayout(btn_layout) dialog.exec_() def submit_feedback(self, dialog): """提交反馈并更新模型""" selected_index = self.feedback_list.currentRow() if selected_index < 0: QMessageBox.warning(self, "警告", "请选择一个识别结果") return result = self.last_results[selected_index] correct_identity = self.correct_combo.currentText() if correct_identity == "选择正确身份": QMessageBox.warning(self, "警告", "请选择正确身份") return # 保存反馈数据 self.face_recognition.save_feedback( self.current_image.copy(), result["box"], result["label"], correct_identity ) QMessageBox.information(self, "反馈提交", "感谢您的反馈!数据已保存用于改进模型") dialog.accept() def recognize_faces(self, image): """识别人脸并在图像上标注结果""" # 使用人脸识别器进行识别 self.last_results, display_image = self.face_recognition.recognize( image, threshold=self.threshold_slider.value() / 100 ) # 更新结果文本 self.update_results_text() # 显示图像 self.display_image(display_image) def update_results_text(self): """更新结果文本区域""" if not self.last_results: self.results_text.setText("未识别到任何人脸") return # 构建结果文本 result_text = "<h3>识别结果:</h3>" for i, result in enumerate(self.last_results, 1): x1, y1, x2, y2 = result["box"] label = result["label"] confidence = result["confidence"] # 处理中文显示问题 if label in self.face_recognition.dorm_members: result_text += ( f"

人脸 #{i}: " f"寝室成员 - {label}
" f"位置: ({x1}, {y1}), 置信度: {confidence:.2f}

" ) else: result_text += ( f"

人脸 #{i}: " f"陌生人
" f"位置: ({x1}, {y1}), 置信度: {confidence:.2f}

" ) self.results_text.setHtml(result_text) # ... 其余原有方法 ... 需要把重新训练模型部分和反馈部分全部删除
Littlehero_121
  • 粉丝: 3w+
上传资源 快速赚钱