-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrealitimeDetection.py
More file actions
90 lines (64 loc) · 2.43 KB
/
realitimeDetection.py
File metadata and controls
90 lines (64 loc) · 2.43 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
# packages import
import time
import cv2
import streamlit as st
from ultralytics import YOLO
model = YOLO("yolov8n.pt")
model = YOLO("runs/classify/train3/weights/best.pt")
def Detect_RealTime():
st_frame = st.empty() # container for streaming frame
if "run_detection" not in st.session_state:
st.session_state.run_detection = False
# start button
if st.button("Start Real-Time Detection", key="start_btn"):
st.session_state.run_detection = True
# stop button
if st.button("Stop Detection", key="Stop_btn"):
st.session_state.run_detection = False
cap = cv2.VideoCapture(0)
if not cap.isOpened():
st.error("Camera Failed to Open!!")
return
while st.session_state.run_detection:
ret, frame = cap.read()
if not ret:
st.warning("No frame captured. Camera Off?")
break
results = model(frame)
probs = results[0].probs.data.tolist() # probability array
awake_prob = probs[0]
drowsy_prob = probs[1]
# checking if the drowsy is above 0.51%>
threshold = 0.51
is_drowsy_now = drowsy_prob >= threshold
current_time = time.time()
if is_drowsy_now:
if st.session_state.drowsy_start_time is None:
st.session_state.drowsy_start_time = current_time
else:
elapsed = current_time - st.session_state.drowsy_start_time
if elapsed >= 5 and not st.session_state.drowsy_warning_triggered:
st.warning("Wake Up!!!!")
alarm_user()
st.session_state.drowsy_warning_triggered = True
else:
st.session_state.drowsy_start_time = None
st.session_state.drowsy_warning_triggered = False
annotated = results[0].plot()
# Convert BGR to RGB for Streamlit
rgb = cv2.cvtColor(annotated, cv2.COLOR_BGR2RGB)
st_frame.image(rgb, channels='RGB')
cap.release()
def alarm_user():
audio_path = "Apple_alarm.mp3"
with open(audio_path, 'rb') as f:
audio_bytes = f.read()
st.audio(audio_bytes, format='audio/mp3')
import base64
encoded = base64.b64encode(audio_bytes).decode()
audio_html = f"""
<audio autoplay>
<source src="data:audio/mp3;base64,{encoded}" type="audio/mp3">
</audio>
"""
st.markdown(audio_html, unsafe_allow_html=True)