yolo code
------
import os
import cv2
import numpy as np
import requests
from flask import Flask, render_template, Response, request, jsonify
from datetime import datetime
app = Flask(__name__)
# Hugging Face YOLOv3 weights URL
yolov3_weights_url =
'[Link]
# Function to download the model if it's not present locally
def download_model():
weights_path = 'model/[Link]'
if not [Link](weights_path):
print("Downloading YOLOv3 weights from Hugging Face...")
response = [Link](yolov3_weights_url, stream=True)
with open(weights_path, 'wb') as f:
for chunk in response.iter_content(chunk_size=8192):
[Link](chunk)
print("Download complete.")
else:
print("YOLOv3 weights already present.")
# Call the download_model function to ensure weights are present
download_model()
# Load YOLO model
net = [Link]('model/[Link]', 'model/[Link]')
layer_names = [Link]()
output_layers = [layer_names[i - 1] for i in
[Link]().flatten()]
with open('model/[Link]', 'r') as f:
classes = [[Link]() for line in [Link]()]
# Global variables
object_count = 0
detection_enabled = True
detection_logs = []
current_frame = None # To store the last processed frame
def detect_objects(frame):
global object_count, detection_logs
height, width, channels = [Link]
# Reduce frame size for better performance
blob = [Link](frame, 0.00392, (320, 320), (0, 0, 0), True,
crop=False)
[Link](blob)
outs = [Link](output_layers)
class_ids = []
confidences = []
boxes = []
object_count = 0
for out in outs:
for detection in out:
scores = detection[5:]
class_id = [Link](scores)
confidence = scores[class_id]
if confidence > 0.4: # Lower confidence threshold to detect smaller
objects like earphones
object_count += 1
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
x = int(center_x - w / 2)
y = int(center_y - h / 2)
[Link]([x, y, w, h])
[Link](float(confidence))
class_ids.append(class_id)
indexes = [Link](boxes, confidences, 0.5, 0.4)
if len(indexes) > 0:
for i in [Link]():
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
# Add to logs with timestamp
detection_logs.append(f"{label} detected at
{[Link]().strftime('%H:%M:%S')}")
detection_logs = detection_logs[-10:] # Keep the last 10 logs
[Link](frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
[Link](frame, label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,
255, 0), 2)
return frame
def gen_frames():
global current_frame
cap = [Link](0)
while True:
success, frame = [Link]()
if not success:
print("Failed to capture frame")
break
else:
current_frame = frame # Store the current frame for capturing
if detection_enabled:
frame = detect_objects(frame)
ret, buffer = [Link]('.jpg', frame)
frame = [Link]()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@[Link]('/')
def index():
return render_template('[Link]')
@[Link]('/detection')
def detection():
return render_template('[Link]')
@[Link]('/video_feed')
def video_feed():
return Response(gen_frames(), mimetype='multipart/x-mixed-replace;
boundary=frame')
@[Link]('/object_count')
def object_count_route():
global object_count
return jsonify({'count': object_count})
@[Link]('/detection_logs')
def detection_logs_route():
return jsonify({'logs': detection_logs})
@[Link]('/toggle_detection')
def toggle_detection():
global detection_enabled
detection_enabled = [Link]('enable', 'true').lower() == 'true'
return '', 204 # Empty response
@[Link]('/capture_frame')
def capture_frame():
global current_frame
if current_frame is not None:
filename = f'static/captured_frame_{[Link]().strftime("%Y%m%d_%H%M
%S")}.jpg'
[Link](filename, current_frame) # Save the frame to disk
return f"Frame captured and saved as {filename}"
return "No frame to capture"
if __name__ == '__main__':
[Link](debug=True)