-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmodel_integrated.py
161 lines (135 loc) · 6.94 KB
/
model_integrated.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
# import cv2
# import asyncio
# import websockets
# from imutils import face_utils
# from imutils.video import VideoStream
# from scipy.spatial import distance as dist
# def eye_aspect_ratio(eye):
# # Vertical eye landmarks
# A = dist.euclidean(eye[1], eye[5])
# B = dist.euclidean(eye[2], eye[4])
# # Horizontal eye landmarks
# C = dist.euclidean(eye[0], eye[3])
# # The EAR Equation
# EAR = (A + B) / (2.0 * C)
# return EAR
# def mouth_aspect_ratio(mouth):
# A = dist.euclidean(mouth[13], mouth[19])
# B = dist.euclidean(mouth[14], mouth[18])
# C = dist.euclidean(mouth[15], mouth[17])
# MAR = (A + B + C) / 3.0
# return MAR
# #all eye and mouth aspect ratio with time
# ear_list=[]
# total_ear=[]
# mar_list=[]
# total_mar=[]
# ts=[]
# total_ts=[]
# async def send_message(websocket, message):
# await websocket.send(message)
# async def detect_faces_and_send():
# # Set up WebSocket server
# async with websockets.connect('ws://localhost:5000') as websocket:
# # Declare a constant which will work as the threshold for EAR value, below which it will be regared as a blink
# EAR_THRESHOLD = 0.3
# # Declare another costant to hold the consecutive number of frames to consider for a blink
# CONSECUTIVE_FRAMES = 20
# # Another constant which will work as a threshold for MAR value
# MAR_THRESHOLD = 14
# # Initialize two counters
# BLINK_COUNT = 0
# FRAME_COUNT = 0
# # Now, intialize the dlib's face detector model as 'detector' and the landmark predictor model as 'predictor'
# print("[INFO]Loading the predictor.....")
# detector = dlib.get_frontal_face_detector()
# predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
# # Grab the indexes of the facial landamarks for the left and right eye respectively
# (lstart, lend) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
# (rstart, rend) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
# (mstart, mend) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]
# # Now start the video stream and allow the camera to warm-up
# print("[INFO]Loading Camera.....")
# vs = VideoStream(usePiCamera = False).start()
# time.sleep(2)
# assure_path_exists("dataset/")
# count_sleep = 0
# count_yawn = 0
# # Initialize OpenCV's face detection model
# # Capture video from the default camera (change the parameter to your camera index if needed)
# cap = cv2.VideoCapture(0)
# while True:
# # Extract a frame
# frame = vs.read()
# cv2.putText(frame, "PRESS 'q' TO EXIT", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 3)
# # Resize the frame
# frame = imutils.resize(frame, width = 500)
# # Convert the frame to grayscale
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# # Detect faces
# rects = detector(frame, 1)
# # Now loop over all the face detections and apply the predictor
# for (i, rect) in enumerate(rects):
# shape = predictor(gray, rect)
# # Convert it to a (68, 2) size numpy array
# shape = face_utils.shape_to_np(shape)
# # Draw a rectangle over the detected face
# (x, y, w, h) = face_utils.rect_to_bb(rect)
# cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# # Put a number
# cv2.putText(frame, "Driver", (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# leftEye = shape[lstart:lend]
# rightEye = shape[rstart:rend]
# mouth = shape[mstart:mend]
# # Compute the EAR for both the eyes
# leftEAR = eye_aspect_ratio(leftEye)
# rightEAR = eye_aspect_ratio(rightEye)
# # Take the average of both the EAR
# EAR = (leftEAR + rightEAR) / 2.0
# #live datawrite in csv
# ear_list.append(EAR)
# #print(ear_list)
# ts.append(dt.datetime.now().strftime('%H:%M:%S.%f'))
# # Compute the convex hull for both the eyes and then visualize it
# leftEyeHull = cv2.convexHull(leftEye)
# rightEyeHull = cv2.convexHull(rightEye)
# # Draw the contours
# cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
# cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
# cv2.drawContours(frame, [mouth], -1, (0, 255, 0), 1)
# MAR = mouth_aspect_ratio(mouth)
# mar_list.append(MAR/10)
# # Check if EAR < EAR_THRESHOLD, if so then it indicates that a blink is taking place
# # Thus, count the number of frames for which the eye remains closed
# if EAR < EAR_THRESHOLD:
# FRAME_COUNT += 1
# cv2.drawContours(frame, [leftEyeHull], -1, (0, 0, 255), 1)
# cv2.drawContours(frame, [rightEyeHull], -1, (0, 0, 255), 1)
# if FRAME_COUNT >= CONSECUTIVE_FRAMES:
# count_sleep += 1
# await send_message(websocket, "Sleepy detected!")
# # Add the frame to the dataset ar a proof of drowsy driving
# #cv2.imwrite("dataset/frame_sleep%d.jpg" % count_sleep, frame)
# #playsound('sound files/alarm.mp3')
# #send_notice()
# #cv2.putText(frame, "DROWSINESS ALERT!", (270, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
# else:
# #if FRAME_COUNT >= CONSECUTIVE_FRAMES:
# #playsound('sound files/warning.mp3')
# FRAME_COUNT = 0
# #cv2.putText(frame, "EAR: {:.2f}".format(EAR), (300, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
# # Check if the person is yawning
# if MAR > MAR_THRESHOLD:
# count_yawn += 1
# cv2.drawContours(frame, [mouth], -1, (0, 0, 255), 1)
# cv2.putText(frame, "DROWSINESS ALERT!", (270, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
# await send_message(websocket, "Sleepy detected!")
# # Add the frame to the dataset ar a proof of drowsy driving
# #cv2.imwrite("dataset/frame_yawn%d.jpg" % count_yawn, frame)
# #playsound('sound files/alarm.mp3')
# #playsound('sound files/warning_yawn.mp3')
# #total data collection for plotting
# cv2.destroyAllWindows()
# vs.stop()
# # Run the asyncio event loop
# asyncio.run(detect_faces_and_send())