assile commited on
Commit
f00c22f
·
verified ·
1 Parent(s): 21c844e

Update run.py

Browse files
Files changed (1) hide show
  1. run.py +65 -50
run.py CHANGED
@@ -3,86 +3,101 @@ import cv2
3
  import numpy as np
4
  from insightface.app import FaceAnalysis
5
  import tempfile
6
- from moviepy.editor import VideoFileClip
7
 
8
  # Initialisation du modèle
9
  face_swapper = FaceAnalysis(name="buffalo_l")
10
  face_swapper.prepare(ctx_id=0, det_size=(640, 640))
11
 
12
- def process_video(source_img, target_video):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  try:
14
- # Fichiers temporaires
15
  temp_output = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
16
 
17
- # Traitement de la vidéo
18
- source_face = face_swapper.get(np.array(source_img))[0]
19
-
 
 
 
20
  cap = cv2.VideoCapture(target_video)
21
  fps = cap.get(cv2.CAP_PROP_FPS)
22
  frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
23
  frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
24
-
25
- out = cv2.VideoWriter(temp_output.name,
26
- cv2.VideoWriter_fourcc(*'mp4v'),
27
- fps,
28
- (frame_width, frame_height))
29
-
30
  while cap.isOpened():
31
  ret, frame = cap.read()
32
  if not ret:
33
  break
34
-
35
  target_faces = face_swapper.get(frame)
36
- if target_faces:
37
- frame = swap_face(source_face, target_faces[0], frame)
38
-
39
  out.write(frame)
40
-
41
  cap.release()
42
  out.release()
43
-
44
  return temp_output.name
45
-
46
  except Exception as e:
47
  print(f"ERREUR: {str(e)}")
48
  return None
49
 
50
- def swap_face(source_face, target_face, frame):
51
- # Algorithme professionnel d'échange
52
- src_bbox = source_face.bbox.astype(int)
53
- tgt_bbox = target_face.bbox.astype(int)
54
-
55
- # Extraction et ajustement du visage source
56
- src_face = cv2.resize(source_face.img[tgt_bbox[1]:tgt_bbox[3], tgt_bbox[0]:tgt_bbox[2]],
57
- (tgt_bbox[2]-tgt_bbox[0], tgt_bbox[3]-tgt_bbox[1]))
58
-
59
- # Fusion réaliste
60
- mask = np.zeros_like(src_face)
61
- cv2.circle(mask,
62
- (mask.shape[1]//2, mask.shape[0]//2),
63
- min(mask.shape)//2,
64
- (255,255,255), -1)
65
-
66
- center = ((tgt_bbox[0]+tgt_bbox[2])//2, (tgt_bbox[1]+tgt_bbox[3])//2)
67
- output = cv2.seamlessClone(
68
- src_face, frame, mask, center, cv2.NORMAL_CLONE)
69
-
70
- return output
71
 
72
- # Interface optimisée pour vidéo
73
  with gr.Blocks() as app:
74
- gr.Markdown("## 🎬 VideoFaceSwap Pro")
75
-
76
  with gr.Row():
77
- src_img = gr.Image(label="Visage source", type="numpy")
78
- tgt_video = gr.Video(label="Vidéo cible")
79
-
80
- btn = gr.Button("Traiter la vidéo", variant="primary")
81
- output_video = gr.Video(label="Résultat")
82
-
 
 
 
 
 
83
  btn.click(
84
  fn=process_video,
85
- inputs=[src_img, tgt_video],
86
  outputs=output_video
87
  )
88
 
 
3
  import numpy as np
4
  from insightface.app import FaceAnalysis
5
  import tempfile
6
+ import os
7
 
8
  # Initialisation du modèle
9
  face_swapper = FaceAnalysis(name="buffalo_l")
10
  face_swapper.prepare(ctx_id=0, det_size=(640, 640))
11
 
12
+ def swap_face(source_face, target_face, frame, blend_factor=0.7):
13
+ """Échange le visage avec possibilité de régler l'intensité du mélange"""
14
+ src_emb = source_face.normed_embedding
15
+ tgt_bbox = target_face.bbox.astype(int)
16
+
17
+ # Obtenir le visage source (de la cible pour garder la taille)
18
+ h, w = frame.shape[:2]
19
+ src_face_img = source_face.img
20
+ resized_face = cv2.resize(src_face_img, (tgt_bbox[2]-tgt_bbox[0], tgt_bbox[3]-tgt_bbox[1]))
21
+
22
+ # Créer un masque doux
23
+ mask = np.zeros_like(resized_face)
24
+ center = (mask.shape[1]//2, mask.shape[0]//2)
25
+ radius = int(min(mask.shape) * 0.45)
26
+ cv2.circle(mask, center, radius, (255,255,255), -1)
27
+ mask = cv2.GaussianBlur(mask, (15,15), 5)
28
+
29
+ # Positionner le visage
30
+ center = ((tgt_bbox[0]+tgt_bbox[2])//2, (tgt_bbox[1]+tgt_bbox[3])//2)
31
+
32
+ # Appliquer seamlessClone
33
+ result = cv2.seamlessClone(
34
+ resized_face, frame, mask, center, cv2.NORMAL_CLONE
35
+ )
36
+
37
+ # Mélanger avec l'original selon blend_factor
38
+ blended = cv2.addWeighted(frame, 1-blend_factor, result, blend_factor, 0)
39
+
40
+ return blended
41
+
42
+
43
+ def process_video(source_img, target_video, face_index_source=0, face_index_target=0, blend_factor=0.8):
44
  try:
 
45
  temp_output = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
46
 
47
+ # Charger le visage source
48
+ source_faces = face_swapper.get(np.array(source_img))
49
+ if not source_faces:
50
+ raise ValueError("Aucun visage trouvé sur l'image source.")
51
+ source_face = source_faces[face_index_source]
52
+
53
  cap = cv2.VideoCapture(target_video)
54
  fps = cap.get(cv2.CAP_PROP_FPS)
55
  frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
56
  frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
57
+
58
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
59
+ out = cv2.VideoWriter(temp_output.name, fourcc, fps, (frame_width, frame_height))
60
+
 
 
61
  while cap.isOpened():
62
  ret, frame = cap.read()
63
  if not ret:
64
  break
65
+
66
  target_faces = face_swapper.get(frame)
67
+ if target_faces and len(target_faces) > face_index_target:
68
+ frame = swap_face(source_face, target_faces[face_index_target], frame, blend_factor)
69
+
70
  out.write(frame)
71
+
72
  cap.release()
73
  out.release()
74
+
75
  return temp_output.name
76
+
77
  except Exception as e:
78
  print(f"ERREUR: {str(e)}")
79
  return None
80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
 
82
+ # Interface Gradio
83
  with gr.Blocks() as app:
84
+ gr.Markdown("## 🎬 VideoFaceSwap Pro - Échangez des visages en vidéo avec précision")
85
+
86
  with gr.Row():
87
+ src_img = gr.Image(label="Visage Source", type="numpy")
88
+ tgt_video = gr.Video(label="Vidéo Cible")
89
+
90
+ with gr.Row():
91
+ face_index_source = gr.Slider(0, 5, value=0, step=1, label="Index Visage Source")
92
+ face_index_target = gr.Slider(0, 5, value=0, step=1, label="Index Visage à Remplacer")
93
+ blend_factor = gr.Slider(0.0, 1.0, value=0.8, label="Intensité du mélange")
94
+
95
+ btn = gr.Button("🔁 Traiter la Vidéo", variant="primary")
96
+ output_video = gr.Video(label="Vidéo Résultat")
97
+
98
  btn.click(
99
  fn=process_video,
100
+ inputs=[src_img, tgt_video, face_index_source, face_index_target, blend_factor],
101
  outputs=output_video
102
  )
103