-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
293 lines (259 loc) · 10.3 KB
/
main.py
File metadata and controls
293 lines (259 loc) · 10.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
import os
import sys
import time
import socket
import json
import cv2
import numpy as np
import logging
from pathlib import Path
from argparse import ArgumentParser
from lib.face_detector import FaceDetector
from lib.head_pose_estimator import HeadposeEstimator
from lib.landmark_detector import LandmarkDetector
from lib.gaze_estimator import GazeEstimator
from lib.input_feeder import InputFeeder
from lib.mouse_controller import MouseController
import logging
#Create and configure logger
logging.basicConfig(filename="main.log",
format='%(asctime)s %(message)s',
filemode='w')
#Creating an object
logger = logging.getLogger()
#Setting the threshold of logger to DEBUG
logger.setLevel(logging.DEBUG)
def build_argparser():
"""
Parse command line arguments.
Return:
args: command line arguments
"""
parser = ArgumentParser()
parser.add_argument("-ftm",
"--face_det_m",
required=True,
type=str,
help="Path to an xml file of Face Detection Model.")
parser.add_argument("-ldm",
"--lmar_det_m",
required=True,
type=str,
help="Path to an xml file of Landmark Detection model")
parser.add_argument(
"-hem",
"--h_pose_m",
required=True,
type=str,
help="Path to an xml file of Head Pose Estimation model.")
parser.add_argument("-gem",
"--g_est_m",
required=True,
type=str,
help="Path to an xml file of Gaze Estimation Model.")
parser.add_argument("-i",
"--input",
required=True,
type=str,
help="Path to image or video file")
parser.add_argument("-d",
"--device",
type=str,
default="CPU",
help="Specify the target device to infer on: "
"CPU, GPU, FPGA or MYRIAD is acceptable. Sample "
"will look for a suitable plugin for device "
"specified (CPU by default)")
parser.add_argument(
"-pt",
"--prob_threshold",
type=float,
default=0.5,
help="Probability threshold for face detections filtering"
"(0.5 by default)")
parser.add_argument(
"-pr",
"--preview",
action='store_true',
help="Use this flag if you want to preview visualizations on person face")
return parser
def infer_on_stream(args):
"""
Handle input Stream and perform inference Frame by Frame
Check if input file is supported by OpenCV
Load All models
Perform inference Frame By Frame
Detect and Crop face in Frame
Detect left, right Eye
Detect Head Pose
Estimate Gaze
Move Mouse according to Gaze
Parameters:
args: Values of command line arguments
Returns:
None
"""
# Check if all input files are present
for _ in [
args.face_det_m, args.lmar_det_m, args.h_pose_m, args.g_est_m,
args.input
]:
if not Path(_).is_file() and str(_).upper() != "CAM":
error_message = "This file is not Present: \"{}\" Check the file please".\
format(_)
logger.error(error_message)
sys.exit(error_message)
else:
logger.info(
"input files: {} is available on specified path".format(_))
### Handle the input stream ###
# extenstion of input file
input_extension = os.path.splitext(args.input)[1].lower()
# supported extensions
supported_vid_exts = ['.mp4', '.mpeg', '.avi', '.mkv']
supported_img_exts = [
".bmp", ".dib", ".jpeg", ".jp2", ".jpg", ".jpe", ".png", ".pbm",
".pgm", ".ppm", ".sr", ".ras", ".tiff", ".tif"
]
# if input is camera
if args.input.upper() == 'CAM':
input_type = "cam"
# if input is video
elif input_extension in supported_vid_exts:
input_type = "video"
# if input is image
elif input_extension in supported_img_exts:
input_type = "image"
else:
logger.error("Input file: {} is not Supported".format(args.input))
sys.exit("FATAL ERROR : The format of your input file is not supported" \
"\nsupported extensions are : " + ", "\
.join(supported_img_exts + supported_vid_exts))
### Load All Models
## Load Face Detector Model
face_detector = FaceDetector(args.face_det_m, args.device)
face_detector.load_model()
logger.info("Face Detection model loaded successfully")
# Load Headpose Estimator Model
headpose_estimator = HeadposeEstimator(args.h_pose_m, args.device)
headpose_estimator.load_model()
logger.info("Headpose Estimator model loaded successfully")
## Load Landmark Detector Model
landmark_detector = LandmarkDetector(args.lmar_det_m, args.device)
landmark_detector.load_model()
logger.info("Landmark Detector model loaded successfully")
## Load Gaze Estimation Model
gaze_estimator = GazeEstimator(args.g_est_m, args.device)
gaze_estimator.load_model()
logger.info("Gaze Estimation model loaded successfully")
### Initialize Input Feeder
input_feeder = InputFeeder(input_type, args.input)
(initial_w, initial_h) = input_feeder.load_data()
logger.info("Input Feeder loaded successfully")
f_count = 0
### Move Mouse to the center
mouse_controler = MouseController("medium", "fast")
mouse_controler.move_to_center()
### Iterate through input file frame by frame
### see `InputFeeder.next_batch` method for more detail
for ret, frame in input_feeder.next_batch():
# break if no next frame present
if not ret:
break
f_count += 1
logger.info("Processing Frame: {}".format(f_count))
### Detect Face in Frame
output = face_detector.predict(frame)
## Crop Face
face, face_coords = face_detector.postprocess_output(
output, args.prob_threshold, frame, initial_w, initial_h)
# skip frame if face not found
if not np.any(face):
print("Face Not found in Frame\tSkipping Frame")
logger.warning("Face Not found in Frame\tSkipping Frame")
continue
### Estimate HeadPose
head_pose = headpose_estimator.predict(face)
logger.info("Head Pose Estimation complete")
### Detect Face Landmarks
landmarks = landmark_detector.predict(face)
logger.info("Face Landmarks detected")
## Crop left and right Eye
left_eye, left_eye_coords, right_eye, right_eye_coords = landmark_detector.postprocess_output(
landmarks, face)
## Skip frame if any eye is not cropped correctly
if 0 in left_eye.shape or 0 in right_eye.shape:
print("Issue in Eye Cropping. \nSkipping this Frame ...")
logger.warning("Issue in Eye Cropping. \nSkipping this Frame ...")
continue
logger.info("Both Eyes cropped successfuly")
### Estimate Gaze
gaze = gaze_estimator.predict(left_eye, right_eye, head_pose)
logger.info("Gaze Estimated successfully")
## Get mouse coords (x, y)
mouse_coords = gaze_estimator.preprocess_output(gaze, head_pose)
logger.info("New mouse coordinates: {}".format(mouse_coords))
# Show Preview of input with drawn predictions
if (args.preview):
# function draw rectangel around eye
def rectange_eyes(frame, face_coords, eye_coords):
"""Draw bounding box around Eye"""
eye_start = (
(face_coords[0][0] + eye_coords[0][0]), # x_min + x_min
(face_coords[0][1] + eye_coords[0][1])) # y_min _ y_min
eye_end = (
(face_coords[0][0] + eye_coords[1][0]), # x_min + x_max
(face_coords[0][1] + eye_coords[1][1])) # y_min + y_max
return cv2.rectangle(frame, eye_start, eye_end, (0, 0, 255), 2)
# draw box around face
image = cv2.rectangle(frame, face_coords[0], face_coords[1],
(0, 0, 255), 2)
# draw box around left eye
image = rectange_eyes(image, face_coords, left_eye_coords)
# draw box around right eye
image = rectange_eyes(image, face_coords, right_eye_coords)
# show head pose values on image
cv2.putText(
image,
"Head Pose: Yaw: {:.2f}, Pitch: {:.2f}, Role: {:.2f}".format(
head_pose["yaw"],
head_pose["pitch"],
head_pose["role"],
), (40, 40), cv2.FONT_HERSHEY_COMPLEX, 0.8, (0, 255, 0), 1)
# show head pose values on image
cv2.putText(
image,
"Gaze: X-axis: {:.2f}, Y-axis: {:.2f}, Z-axis: {:.2f}".format(
gaze[0][0], gaze[0][1], gaze[0][2]), (40, 70),
cv2.FONT_HERSHEY_COMPLEX, 0.8, (0, 255, 0), 1)
cv2.imshow('Preview | press q to close', image)
# break loop if q is pressed on output window
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print("New mouse coordinates: {}\n\n".format(mouse_coords))
### Move Mouse
mouse_controler.move(mouse_coords[0], mouse_coords[1])
# go to next frame
### Processing Complete delete resources
print("Input File is complete \nClearing Resources")
logger.info("Input File is complete Clearing Resources")
input_feeder.close()
del face_detector
del landmark_detector
del headpose_estimator
del gaze_estimator
logger.info("Most Heavy Resources are Free now")
def main():
"""
Load the network and parse the output.
Returns:
None
"""
# Grab command line args
logger.info("Starting Program")
args = build_argparser().parse_args()
# Perform inference on the input stream
infer_on_stream(args)
logger.info("Every Thing Complete Exiting Program")
if __name__ == "__main__":
main()