mmesa-gitex / app /video_processing.py
vitorcalvi's picture
12 Oct Gitex 2024
b20a621
raw
history blame
5.42 kB
import cv2
import numpy as np
import torch
from PIL import Image
import mediapipe as mp
from app.model import pth_model_static, pth_model_dynamic, cam, pth_processing
from app.face_utils import get_box, display_info
from app.config import config_data
from app.plot import statistics_plot
from .au_processing import features_to_au_intensities, au_statistics_plot
mp_face_mesh = mp.solutions.face_mesh
def preprocess_video_and_predict(video):
cap = cv2.VideoCapture(video)
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = np.round(cap.get(cv2.CAP_PROP_FPS))
path_save_video_face = 'result_face.mp4'
vid_writer_face = cv2.VideoWriter(path_save_video_face, cv2.VideoWriter_fourcc(*'mp4v'), fps, (224, 224))
path_save_video_hm = 'result_hm.mp4'
vid_writer_hm = cv2.VideoWriter(path_save_video_hm, cv2.VideoWriter_fourcc(*'mp4v'), fps, (224, 224))
lstm_features = []
count_frame = 1
count_face = 0
probs = []
frames = []
au_intensities_list = []
last_output = None
last_heatmap = None
last_au_intensities = None
cur_face = None
with mp_face_mesh.FaceMesh(
max_num_faces=1,
refine_landmarks=False,
min_detection_confidence=0.5,
min_tracking_confidence=0.5) as face_mesh:
while cap.isOpened():
_, frame = cap.read()
if frame is None: break
frame_copy = frame.copy()
frame_copy.flags.writeable = False
frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
results = face_mesh.process(frame_copy)
frame_copy.flags.writeable = True
if results.multi_face_landmarks:
for fl in results.multi_face_landmarks:
startX, startY, endX, endY = get_box(fl, w, h)
cur_face = frame_copy[startY:endY, startX: endX]
if count_face%config_data.FRAME_DOWNSAMPLING == 0:
cur_face_copy = pth_processing(Image.fromarray(cur_face))
with torch.no_grad():
features = torch.nn.functional.relu(pth_model_static.extract_features(cur_face_copy)).detach().numpy()
au_intensities = features_to_au_intensities(pth_model_static(cur_face_copy))
grayscale_cam = cam(input_tensor=cur_face_copy)
grayscale_cam = grayscale_cam[0, :]
cur_face_hm = cv2.resize(cur_face,(224,224), interpolation = cv2.INTER_AREA)
cur_face_hm = np.float32(cur_face_hm) / 255
heatmap = show_cam_on_image(cur_face_hm, grayscale_cam, use_rgb=False)
last_heatmap = heatmap
last_au_intensities = au_intensities
if len(lstm_features) == 0:
lstm_features = [features]*10
else:
lstm_features = lstm_features[1:] + [features]
lstm_f = torch.from_numpy(np.vstack(lstm_features))
lstm_f = torch.unsqueeze(lstm_f, 0)
with torch.no_grad():
output = pth_model_dynamic(lstm_f).detach().numpy()
last_output = output
if count_face == 0:
count_face += 1
else:
if last_output is not None:
output = last_output
heatmap = last_heatmap
au_intensities = last_au_intensities
elif last_output is None:
output = np.empty((1, 7))
output[:] = np.nan
au_intensities = np.empty(24)
au_intensities[:] = np.nan
probs.append(output[0])
frames.append(count_frame)
au_intensities_list.append(au_intensities)
else:
if last_output is not None:
lstm_features = []
empty = np.empty((7))
empty[:] = np.nan
probs.append(empty)
frames.append(count_frame)
au_intensities_list.append(np.full(24, np.nan))
if cur_face is not None:
heatmap_f = display_info(heatmap, 'Frame: {}'.format(count_frame), box_scale=.3)
cur_face = cv2.cvtColor(cur_face, cv2.COLOR_RGB2BGR)
cur_face = cv2.resize(cur_face, (224,224), interpolation = cv2.INTER_AREA)
cur_face = display_info(cur_face, 'Frame: {}'.format(count_frame), box_scale=.3)
vid_writer_face.write(cur_face)
vid_writer_hm.write(heatmap_f)
count_frame += 1
if count_face != 0:
count_face += 1
vid_writer_face.release()
vid_writer_hm.release()
stat = statistics_plot(frames, probs)
au_stat = au_statistics_plot(frames, au_intensities_list)
if not stat or not au_stat:
return None, None, None, None, None
return video, path_save_video_face, path_save_video_hm, stat, au_stat