-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathold_logic.py
144 lines (108 loc) · 4.58 KB
/
old_logic.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
# this module is integrated approach for detection in video for each
# frame at the same time extracting and comparing so that less burden
# on RAM also no need to store them in directory as Images or npz files
import numpy as np
from PIL import Image
from cv2 import cv2
from mtcnn_cv2 import MTCNN
from numpy import asarray
import rec_by_deepface as df
def compare_faces(pic_face, vid_url):
vid_url = vid_url.rsplit("InVidett", 1)
vid_url = vid_url[1][1:]
cap = cv2.VideoCapture(vid_url)
countframes = 1
# taking some variables for tracking purpose
first_marked_frame, last_marked_frame, middle_frames, not_matched, outer_no_face= 0,0,0,0,0
match= 0
tracked_list= list()
# faces_list = list()
while True:
# Grab a single frame of video
try:
ret, frame = cap.read()
countframes += 1
except:
print("URL/selection for the video file is not valid:", vid_url)
break
# below if conditional logic is to boost up the speed
# reducing frames rate 6 fps actual was 30fps
if countframes % 5 != 0:
continue
# Convert the image from BGR color(which OpenCV uses) to RGB
# RGB is preferred color while detection faces
try:
rgb_frame = frame[:, :, ::-1]
except:
print("video file has no more frames, total frames= ", countframes)
break
# loading detector from MTCNN
detector = MTCNN()
# saving all faces in result variable
result = detector.detect_faces(rgb_frame)
print(result)
# if result get some faces
if len(result) > 0:
outer_no_face=0
frame_array = np.array(rgb_frame)
# frame_array = asarray(rgb_frame)
# taking variable face_num; to iterate in loop; for detecting multiple faces in single frame
face_num = 0
for face in result:
# read the documentation of cv2.rectangle()
# starting point coordinates of face are being stored in x1, y1
x1, y1, width, height = result[face_num]['box']
# storing ending points in x2, y2
x2, y2, = x1 + width, y1 + height
# extracting this face from frame_array
frame_face = frame_array[y1:y2, x1:x2]
# to resiize, converting it PIL Image
frame_face = Image.fromarray(frame_face)
frame_face = frame_face.resize((160, 160))
# back to array
frame_face = asarray(frame_face)
face_num += 1
# implementing deepface
recognised= df.verify(pic_face, frame_face, "Facenet")
# print("Face number in current frame: ", face_num, "Above reuslts are for frame", countframes)
if recognised== True:
match+= 1
if match == 1:
first_marked_frame= countframes
print("First Match at frame: ", countframes)
not_matched=0
print("Match is True for frame: ", first_marked_frame, "to ", countframes)
break
else:
not_matched += 1
if not_matched ==6:
last_marked_frame= countframes- 30
print("Last Match at frame: ", countframes -30)
if last_marked_frame >0:
tracked_list.append(first_marked_frame)
tracked_list.append(last_marked_frame)
match=0
first_marked_frame=0
last_marked_frame=0
else:
outer_no_face+=1
if outer_no_face == 6:
if first_marked_frame >0:
last_marked_frame = countframes - 30
tracked_list.append(first_marked_frame)
tracked_list.append(last_marked_frame)
match = 0
first_marked_frame = 0
last_marked_frame = 0
print("Last Matched frame: ", countframes-30)
# append in face list
# faces_list.append(frame_face)
print("Total frames processed: ", countframes)
# just for test purpose limiting frames
# if countframes >= 130:
# break
# saving faces list into npz
# savez("video_faces.npz", faces_list)
# releasing video and destroying windows
cap.release()
return tracked_list