forked from isyedahmed531/Automated-Interview-project
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp.py
More file actions
285 lines (214 loc) · 7.49 KB
/
app.py
File metadata and controls
285 lines (214 loc) · 7.49 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
from flask import (
Flask,
jsonify,
render_template,
send_from_directory,
Response,
request,
)
import os
import cv2
import pyaudio
import wave
import threading
from merge import merge_video_and_audio
from trim import read_timestamps_from_file, trim_video_and_extract_audio
from process import process_files
import warnings
warnings.filterwarnings(
"ignore", category=UserWarning, module="multiprocessing.resource_tracker"
)
app = Flask(__name__)
cap = None
# Global variables for recording state and saving the output file
is_recording = False
output_file = "output.mp4" # The filename for the recorded video in MP4 format
# OpenCV setup for video recording
frame_width, frame_height = 640, 480
video_writer = cv2.VideoWriter()
# PyAudio setup for audio recording
audio_frames = []
audio_channels = 1
audio_rate = 44100
audio_chunk_size = 1024
# Open the camera
cap = None
def start_video_recording():
global is_recording, video_writer, cap
# Open the camera (if not already opened)
if cap is None:
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, frame_width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, frame_height)
# Define the codec and create VideoWriter object
codec = cv2.VideoWriter_fourcc(
*"mp4v"
) # Use 'mp4v' for MP4 format with H.264 codec
video_writer = cv2.VideoWriter(
output_file, codec, 20.0, (frame_width, frame_height)
)
# Start video recording loop
while is_recording:
ret, frame = cap.read()
if ret:
video_writer.write(frame)
# Release the camera and video writer
cap.release()
video_writer.release()
# ... (other functions remain the same)
def start_audio_recording():
global is_recording, audio_frames
audio = pyaudio.PyAudio()
stream = audio.open(
format=pyaudio.paInt16,
channels=audio_channels,
rate=audio_rate,
input=True,
frames_per_buffer=audio_chunk_size,
)
# Start audio recording loop
while is_recording:
audio_frames.append(stream.read(audio_chunk_size))
# Stop and close the audio stream
stream.stop_stream()
stream.close()
audio.terminate()
def save_recordings():
global audio_frames, output_file
# Save audio recording to a WAV file
wf = wave.open(output_file.replace(".mp4", ".wav"), "wb")
wf.setnchannels(audio_channels)
wf.setsampwidth(pyaudio.get_sample_size(pyaudio.paInt16))
wf.setframerate(audio_rate)
wf.writeframes(b"".join(audio_frames))
wf.close()
# Clear audio frames for future recordings
audio_frames = []
@app.route("/")
def index():
return render_template(
"index.html"
) # Create an HTML template with buttons to start and stop recording
@app.route("/video_page")
def changePage():
interview_code = request.args.get("interviewCode")
candidate_email = request.args.get("candidateEmail")
print("interview_code:", interview_code, ", ", "candidate_email:", candidate_email)
return render_template(
"video.html", interview_code=interview_code, candidate_email=candidate_email
)
@app.route("/interview_details")
def interviewPage():
return render_template(
"interview_details.html"
) # Create an HTML template with buttons to start and stop recording
@app.route("/start_recording")
def start_recording():
global is_recording
if not is_recording:
is_recording = True
video_thread = threading.Thread(target=start_video_recording)
audio_thread = threading.Thread(target=start_audio_recording)
video_thread.start()
audio_thread.start()
return "Recording started."
@app.route("/stop_recording")
def stop_recording():
global is_recording
if is_recording:
is_recording = False
save_recordings()
return "Recording stopped and saved."
@app.route("/save_seconds_array", methods=["POST"])
def save_seconds_array():
data = request.get_json()
if data and isinstance(data, list):
# Perform the necessary actions with the secondsArray data
# For example, you can save the data to a file or a database here
print(data) # This will print the secondsArray received from the frontend
# Save the data to a text file
with open("seconds_array.txt", "w") as file:
for item in data:
file.write(str(item) + "\n")
return jsonify({"message": "Data received and saved successfully!"}), 200
else:
return jsonify({"error": "Invalid data format"}), 400
answers = []
@app.route("/save-answers", methods=["POST"])
def save_answers():
global answers
data = request.get_json()
answers = data.get("answers", [])
response_data = {"message": "Answers received and saved successfully"}
print(answers)
return jsonify(response_data)
@app.route("/analyze_results")
def analyze_results():
video_path = output_file
audio_path = output_file.replace(".mp4", ".wav")
merged_output = "merged_output.mp4"
## merging audio and video in one file
merge_video_and_audio(video_path, audio_path, merged_output)
## trimming audio and video
timestamps_file = "seconds_array.txt"
output_folder = "output_folder"
# Read timestamps from the file
timestamps = read_timestamps_from_file(timestamps_file)
# Trim the video and extract audio using the timestamps
trim_video_and_extract_audio(merged_output, timestamps, output_folder)
## process
correct_answers = answers
print("correct_answers", correct_answers)
emotions, answers_result = process_files(correct_answers)
all_result = {}
all_result.update({"emotion": emotions, "answers_result": answers_result})
print("ans results: ", answers_result)
print("ans results: ", emotions)
print(all_result)
if request.headers.get("X-Requested-With") == "XMLHttpRequest":
return jsonify(all_result)
else:
# If not an AJAX request, render the index.html template with the result
# return render_template('results.html', result=result)
return jsonify(all_result)
@app.route("/video_frame")
def video():
return Response(genFrames(), mimetype="multipart/x-mixed-replace; boundary=frame")
# Route to render the index.html file
@app.route("/")
def render_index():
return render_template("index.html")
# Route to serve specific HTML files dynamically
@app.route("/<page_name>")
def serve_page(page_name):
return render_template(f"{page_name}")
# Route to serve static files (CSS, JS, images, etc.)
@app.route("/static/<path:filename>")
def serve_static(filename):
root_dir = os.path.dirname(os.getcwd())
return send_from_directory(os.path.join(root_dir, "your_static_folder"), filename)
def genFrames():
global cap
if cap is None:
# Open the camera if not already opened
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, frame_width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, frame_height)
# Add a delay to allow the camera to warm up
import time
time.sleep(2)
while True:
success, frame = cap.read()
if success:
try:
ret, buffer = cv2.imencode(".jpg", cv2.flip(frame, 1))
frame = buffer.tobytes()
yield (
b"--frame\r\n" b"Content-Type: image/jpeg\r\n\r\n" + frame + b"\r\n"
)
except Exception as e:
pass
else:
pass
if __name__ == "__main__":
app.run(debug=True)