Ruihang commited on
Commit
857bc90
·
1 Parent(s): 25daebd
Files changed (42) hide show
  1. README.md +136 -3
  2. assets/construction.png +3 -0
  3. assets/statistics_1.png +3 -0
  4. assets/statistics_2.png +3 -0
  5. assets/wan-move-logo.png +3 -0
  6. bench.py +228 -0
  7. en.tar.gz +3 -0
  8. utils/.DS_Store +0 -0
  9. utils/__init__.py +8 -0
  10. utils/__pycache__/__init__.cpython-310.pyc +0 -0
  11. utils/__pycache__/__init__.cpython-312.pyc +0 -0
  12. utils/__pycache__/__init__.cpython-39.pyc +0 -0
  13. utils/__pycache__/clip.cpython-310.pyc +0 -0
  14. utils/__pycache__/clip.cpython-312.pyc +0 -0
  15. utils/__pycache__/clip.cpython-39.pyc +0 -0
  16. utils/__pycache__/epe.cpython-310.pyc +0 -0
  17. utils/__pycache__/epe.cpython-312.pyc +0 -0
  18. utils/__pycache__/epe.cpython-39.pyc +0 -0
  19. utils/__pycache__/fid.cpython-310.pyc +0 -0
  20. utils/__pycache__/fid.cpython-312.pyc +0 -0
  21. utils/__pycache__/fid.cpython-39.pyc +0 -0
  22. utils/__pycache__/fvd.cpython-310.pyc +0 -0
  23. utils/__pycache__/fvd.cpython-312.pyc +0 -0
  24. utils/__pycache__/lpips.cpython-310.pyc +0 -0
  25. utils/__pycache__/lpips.cpython-312.pyc +0 -0
  26. utils/__pycache__/pytorch_i3d.cpython-310.pyc +0 -0
  27. utils/__pycache__/pytorch_i3d.cpython-312.pyc +0 -0
  28. utils/__pycache__/ssim_psnr.cpython-310.pyc +0 -0
  29. utils/__pycache__/ssim_psnr.cpython-312.pyc +0 -0
  30. utils/__pycache__/video.cpython-310.pyc +0 -0
  31. utils/__pycache__/video.cpython-312.pyc +0 -0
  32. utils/clip.py +29 -0
  33. utils/epe.py +45 -0
  34. utils/fid.py +16 -0
  35. utils/fvd.py +142 -0
  36. utils/lpips.py +13 -0
  37. utils/pytorch_i3d.py +372 -0
  38. utils/ssim_psnr.py +140 -0
  39. utils/video.py +51 -0
  40. utils/weights/.DS_Store +0 -0
  41. utils/weights/i3d_pretrained_400.pt +3 -0
  42. zh.tar.gz +3 -0
README.md CHANGED
@@ -1,3 +1,136 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ task_categories:
4
+ - image-to-video
5
+ tags:
6
+ - video-generation
7
+ - motion-control
8
+ - point-trajectory
9
+ ---
10
+
11
+ # MoveBench of Wan-Move
12
+
13
+ <p align="center">
14
+ <img src="assets/wan-move-logo.png" alt="Stanford-Alpaca" style="width: 100%; min-width: 300px; display: block; margin: auto;">
15
+ <p>
16
+
17
+ # Wan-Move: Motion-controllable Video Generation via Latent Trajectory Guidance
18
+
19
+ [![Paper](https://img.shields.io/badge/ArXiv-Paper-brown)](https://arxiv.org/abs/xx)
20
+ [![Code](https://img.shields.io/badge/GitHub-Code-blue)](https://github.com/ali-vilab/Wan-Move)
21
+ [![Model](https://img.shields.io/badge/HuggingFace-Model-yellow)](https://huggingface.co/Ruihang/Wan-Move-14B-480P)
22
+ [![Model](https://img.shields.io/badge/ModelScope-Model-violet)](https://www.modelscope.cn/models/Ruihang/Wan-Move-14B-480P)
23
+ [![Model](https://img.shields.io/badge/HuggingFace-MoveBench-cyan)](https://huggingface.co/Ruihang/MoveBench)
24
+ [![Video](https://img.shields.io/badge/YouTube-Video-red)](https://www.youtube.com/watch?v=_5Cy7Z2NQJQ)
25
+ [![Website](https://img.shields.io/badge/Demo-Page-bron)](https://ruihang-chu.github.io/Wan-Move.html)
26
+
27
+
28
+
29
+ ## MoveBench: A Comprehensive and Well-Curated Benchmark to Access Motion Control in Videos
30
+
31
+
32
+ MoveBench evaluates fine-grained point-level motion control in generated videos. We categorize the video library from [Pexels](https://www.pexels.com/videos/) into 54 content categories, 10-25 videos each, giving rise to 1018 cases to ensure a broad scenario coverage. All video clips maintain a 5-second duration to facilitate evaluation of long-range dynamics. Every clip is paired with detailed motion annotations for a single object. Addtional 192 clips have motion annotations for multiple objects. We ensure annotation quality by developing an interactive labeling pipeline, marrying annotation precision with automated scalability.
33
+
34
+ Welcome everyone to use it!
35
+
36
+
37
+
38
+
39
+ ## Statistics
40
+
41
+ <p align="center" style="border-radius: 10px">
42
+ <img src="assets/construction.png" width="100%" alt="logo"/>
43
+ <strong>The contruction pipeline of MoveBench </strong>
44
+ </p>
45
+
46
+ <p align="center" style="border-radius: 10px">
47
+ <img src="assets/statistics_1.png" width="100%" alt="logo"/>
48
+ <strong>Balanced sample number per video category </strong>
49
+ </p>
50
+
51
+ <p align="center" style="border-radius: 10px">
52
+ <img src="assets/statistics_2.png" width="100%" alt="logo"/>
53
+ <strong>Comparison with related benchmarks </strong>
54
+ </p>
55
+
56
+ ## Download
57
+
58
+
59
+ Download MoveBench from Hugging Face:
60
+ ``` sh
61
+ huggingface-cli download Ruihang/MoveBench --local-dir ./MoveBench
62
+ ```
63
+
64
+ Extract the files below:
65
+ ``` sh
66
+ tar -xzvf en.tar.gz
67
+ tar -xzvf zh.tar.gz
68
+ ```
69
+
70
+ The file structure will be:
71
+
72
+ ```
73
+ MoveBench
74
+ ├── en # English version
75
+ │ ├── single_track.txt
76
+ │ ├── multi_track.txt
77
+ │ ├── first_frame
78
+ │ │ ├── Pexels_videoid_0.jpg
79
+ │ │ ├── Pexels_videoid_1.jpg
80
+ │ │ ├── ...
81
+ │ ├── video
82
+ │ │ ├── Pexels_videoid_0.mp4
83
+ │ │ ├── Pexels_videoid_1.mp4
84
+ │ │ ├── ...
85
+ │ ├── track
86
+ │ │ ├── single
87
+ │ │ │ ├── Pexels_videoid_0_tracks.npy
88
+ │ │ │ ├── Pexels_videoid_0_visibility.npy
89
+ │ │ │ ├── ...
90
+ │ │ ├── multi
91
+ │ │ │ ├── Pexels_videoid_0_tracks.npy
92
+ │ │ │ ├── Pexels_videoid_0_visibility.npy
93
+ │ │ │ ├── ...
94
+ ├── zh # Chinese version
95
+ │ ├── single_track.txt
96
+ │ ├── multi_track.txt
97
+ │ ├── first_frame
98
+ │ │ ├── Pexels_videoid_0.jpg
99
+ │ │ ├── Pexels_videoid_1.jpg
100
+ │ │ ├── ...
101
+ │ ├── video
102
+ │ │ ├── Pexels_videoid_0.mp4
103
+ │ │ ├── Pexels_videoid_1.mp4
104
+ │ │ ├── ...
105
+ │ ├── track
106
+ │ │ ├── single
107
+ │ │ │ ├── Pexels_videoid_0_tracks.npy
108
+ │ │ │ ├── Pexels_videoid_0_visibility.npy
109
+ │ │ │ ├── ...
110
+ │ │ ├── multi
111
+ │ │ │ ├── Pexels_videoid_0_tracks.npy
112
+ │ │ │ ├── Pexels_videoid_0_visibility.npy
113
+ │ │ │ ├── ...
114
+ ├── bench.py # Evaluation script
115
+ ├── utils # Evaluation code modules
116
+ ```
117
+
118
+
119
+ For evaluation, please refer to [Wan-Move](https://github.com/ali-vilab/Wan-Move) code base. Enjoy it!
120
+
121
+ <!--
122
+ ## Citation
123
+ If you find our work helpful, please cite us.
124
+
125
+ ```
126
+ @article{wan2025,
127
+ title={Wan: Open and Advanced Large-Scale Video Generative Models},
128
+ author={Team Wan and Ang Wang and Baole Ai and Bin Wen and Chaojie Mao and Chen-Wei Xie and Di Chen and Feiwu Yu and Haiming Zhao and Jianxiao Yang and Jianyuan Zeng and Jiayu Wang and Jingfeng Zhang and Jingren Zhou and Jinkai Wang and Jixuan Chen and Kai Zhu and Kang Zhao and Keyu Yan and Lianghua Huang and Mengyang Feng and Ningyi Zhang and Pandeng Li and Pingyu Wu and Ruihang Chu and Ruili Feng and Shiwei Zhang and Siyang Sun and Tao Fang and Tianxing Wang and Tianyi Gui and Tingyu Weng and Tong Shen and Wei Lin and Wei Wang and Wei Wang and Wenmeng Zhou and Wente Wang and Wenting Shen and Wenyuan Yu and Xianzhong Shi and Xiaoming Huang and Xin Xu and Yan Kou and Yangyu Lv and Yifei Li and Yijing Liu and Yiming Wang and Yingya Zhang and Yitong Huang and Yong Li and You Wu and Yu Liu and Yulin Pan and Yun Zheng and Yuntao Hong and Yupeng Shi and Yutong Feng and Zeyinzi Jiang and Zhen Han and Zhi-Fan Wu and Ziyu Liu},
129
+ journal = {arXiv preprint arXiv:2503.20314},
130
+ year={2025}
131
+ }
132
+ ``` -->
133
+
134
+
135
+ ## Contact Us
136
+ If you would like to leave a message to our research teams, feel free to drop me an [Email]([email protected]).
assets/construction.png ADDED

Git LFS Details

  • SHA256: 2563c105ed5d0f525790304b90480de63fc95b1d83840230113fad452bfa8145
  • Pointer size: 131 Bytes
  • Size of remote file: 805 kB
assets/statistics_1.png ADDED

Git LFS Details

  • SHA256: b3bb7f86bb6eceadfc989078da3829cf881d068cbe554f428baad580cafca142
  • Pointer size: 131 Bytes
  • Size of remote file: 610 kB
assets/statistics_2.png ADDED

Git LFS Details

  • SHA256: e7d491b07b6f12dd8b36885707c9347c1943b9a4620f1cdd4b267944190d70e1
  • Pointer size: 131 Bytes
  • Size of remote file: 240 kB
assets/wan-move-logo.png ADDED

Git LFS Details

  • SHA256: 109437f755a07178e989a68a38d393479b9cd7c64a195fe435569af49e219b3b
  • Pointer size: 131 Bytes
  • Size of remote file: 175 kB
bench.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
3
+ import re
4
+ import json
5
+
6
+ import numpy as np
7
+ from PIL import Image
8
+ import torch
9
+ from torchvision import transforms
10
+ from tqdm import tqdm
11
+
12
+ from utils import (
13
+ calculate_psnr,
14
+ calculate_ssim,
15
+ calculate_fvd,
16
+ calculate_epe,
17
+ calculate_lpips,
18
+ calculate_fid,
19
+ calculate_clip_I,
20
+ save_video_frames,
21
+ preprocess
22
+ )
23
+
24
+ device = "cuda" if torch.cuda.is_available() else "cpu"
25
+
26
+ def preprocess_in_chunks(all_raw_videos, all_gen_videos, batch_size, target_resolution=(224, 224)):
27
+
28
+ processed_raw_chunks = []
29
+ processed_gen_chunks = []
30
+
31
+ for i in range(0, len(all_raw_videos), batch_size):
32
+ raw_chunk_videos = torch.cat(all_raw_videos[i:i + batch_size], dim=0) # (batch_size * T, C, H, W)
33
+ gen_chunk_videos = torch.cat(all_gen_videos[i:i + batch_size], dim=0)
34
+
35
+ raw_chunk_processed = preprocess(raw_chunk_videos, target_resolution) # 返回 (batch_size, C, T, H', W')
36
+ gen_chunk_processed = preprocess(gen_chunk_videos, target_resolution) # 同上
37
+
38
+ processed_raw_chunks.append(raw_chunk_processed)
39
+ processed_gen_chunks.append(gen_chunk_processed)
40
+
41
+ processed_raw = torch.cat(processed_raw_chunks, dim=0)
42
+ processed_gen = torch.cat(processed_gen_chunks, dim=0)
43
+
44
+ return processed_raw, processed_gen
45
+
46
+ class NumpyEncoder(json.JSONEncoder):
47
+ """ Custom encoder for numpy data types """
48
+ def default(self, obj):
49
+ if isinstance(obj, np.integer):
50
+ return int(obj)
51
+ elif isinstance(obj, np.floating):
52
+ return float(obj)
53
+ elif isinstance(obj, np.ndarray):
54
+ return obj.tolist()
55
+ return super().default(obj)
56
+
57
+ def get_min_max_frame(frames_dir):
58
+ frame_pattern = re.compile(r'^(.*?)_frame_(\d+)\.png$')
59
+ max_frames = {}
60
+
61
+ for filename in os.listdir(frames_dir):
62
+ if not filename.endswith('.png'):
63
+ continue
64
+ match = frame_pattern.match(filename)
65
+ if not match:
66
+ continue
67
+ video_name, frame_num = match.groups()
68
+ frame_num = int(frame_num)
69
+ current_max = max_frames.get(video_name, -1)
70
+ if frame_num > current_max:
71
+ max_frames[video_name] = frame_num
72
+
73
+ return min(max_frames.values()) if max_frames else 0
74
+
75
+ def main():
76
+ # raw_root = "gt/en"
77
+ # gen_root = "results/en"
78
+ raw_root = "gt/zh"
79
+ gen_root = "results/zh"
80
+
81
+ raw_frame_dir = f"{raw_root}_frames"
82
+ gen_frame_dir = f"{gen_root}_frames"
83
+
84
+ if not os.path.exists(raw_frame_dir):
85
+ raw_frame_num = save_video_frames(raw_root, raw_frame_dir)
86
+ else:
87
+ raw_frame_num = get_min_max_frame(raw_frame_dir)
88
+
89
+ if not os.path.exists(gen_frame_dir):
90
+ gen_frame_num = save_video_frames(gen_root, gen_frame_dir)
91
+ else:
92
+ gen_frame_num = get_min_max_frame(gen_frame_dir)
93
+
94
+ print(f"Evaluating with frame count: {gen_frame_num}")
95
+ assert gen_frame_num <= raw_frame_num, "Generated frames exceed raw frames count"
96
+
97
+ video_names = sorted([name for name in os.listdir(gen_root) if name.endswith('.mp4')])
98
+
99
+ scores = {
100
+ "clip": [],
101
+ "epe": [],
102
+ "lpips": [],
103
+ "ssim": [],
104
+ "psnr": [],
105
+ }
106
+ all_raw_videos, all_gen_videos = [], []
107
+
108
+ with torch.no_grad():
109
+ progress_bar = tqdm(video_names, desc="Processing videos")
110
+
111
+ for video_name in progress_bar:
112
+ base_name = video_name.replace(".mp4", "")
113
+ clip, lpips, ssim, psnr = [], [], [], []
114
+ raw_video, gen_video = [], []
115
+
116
+ for frame_idx in range(gen_frame_num):
117
+ # for frame_idx in range(16):
118
+ raw_path = f"{raw_frame_dir}/{base_name}_frame_{frame_idx}.png"
119
+ gen_path = f"{gen_frame_dir}/{base_name}_frame_{frame_idx}.png"
120
+
121
+ try:
122
+ raw_img = Image.open(raw_path)
123
+ gen_img = Image.open(gen_path)
124
+ except FileNotFoundError:
125
+ break
126
+
127
+
128
+ # Align the size
129
+ if raw_img.size != gen_img.size:
130
+ gen_img = gen_img.resize(raw_img.size)
131
+
132
+ # Calculate metrics
133
+ clip.append(calculate_clip_I(raw_img, gen_img))
134
+
135
+ raw_tensor = transforms.ToTensor()(raw_img).unsqueeze(0)
136
+ gen_tensor = transforms.ToTensor()(gen_img).unsqueeze(0)
137
+
138
+ raw_video.append(raw_tensor)
139
+ gen_video.append(gen_tensor)
140
+
141
+ psnr.append(calculate_psnr(raw_tensor, gen_tensor).item())
142
+ ssim.append(calculate_ssim(raw_tensor, gen_tensor).item())
143
+ lpips.append(calculate_lpips(
144
+ raw_tensor.sub(0.5).div(0.5),
145
+ gen_tensor.sub(0.5).div(0.5)
146
+ ).item())
147
+
148
+ if not raw_video:
149
+ continue
150
+
151
+ # Process video-level metrics
152
+ raw_video = torch.cat(raw_video)
153
+ gen_video = torch.cat(gen_video)
154
+ all_raw_videos.append(raw_video.unsqueeze(0))
155
+ all_gen_videos.append(gen_video.unsqueeze(0))
156
+
157
+ epe = calculate_epe(raw_video, gen_video).item()
158
+
159
+ scores["clip"].append(np.mean(clip))
160
+ scores["epe"].append(epe)
161
+ scores["lpips"].append(np.mean(lpips))
162
+ scores["ssim"].append(np.mean(ssim))
163
+ scores["psnr"].append(np.mean(psnr))
164
+
165
+ # Update progress_bar
166
+ current_means = {
167
+ k: round(np.mean(v), 2)
168
+ for k, v in scores.items()
169
+ if isinstance(v, list) and len(v) > 0
170
+ }
171
+ progress_bar.set_postfix(current_means)
172
+
173
+ # FID
174
+ try:
175
+ fid = calculate_fid(raw_frame_dir, gen_frame_dir)
176
+ except Exception as e:
177
+ print(f"[WARN] FID calculation failed: {e}")
178
+ else:
179
+ scores["fid"] = fid
180
+
181
+ # FVD
182
+ processed_raw_chunks = []
183
+ processed_gen_chunks = []
184
+
185
+ batch_size = 20
186
+ TARGET_RESOLUTION = (224, 224)
187
+
188
+
189
+ for i in tqdm(range(0, len(all_raw_videos), batch_size)):
190
+
191
+ raw_chunk_videos = torch.cat(all_raw_videos[i:i + batch_size]).mul(255).clamp(0, 255).byte().numpy()
192
+ gen_chunk_videos = torch.cat(all_gen_videos[i:i + batch_size]).mul(255).clamp(0, 255).byte().numpy()
193
+ raw_chunk_videos = raw_chunk_videos.transpose(0, 1, 3, 4, 2) # [N, T, H, W, C]
194
+ gen_chunk_videos = gen_chunk_videos.transpose(0, 1, 3, 4, 2)
195
+
196
+ raw_chunk_processed = preprocess(raw_chunk_videos, TARGET_RESOLUTION)
197
+ gen_chunk_processed = preprocess(gen_chunk_videos, TARGET_RESOLUTION)
198
+
199
+ processed_raw_chunks.append(raw_chunk_processed)
200
+ processed_gen_chunks.append(gen_chunk_processed)
201
+
202
+ all_raw = torch.cat(processed_raw_chunks, dim=0)
203
+ all_gen = torch.cat(processed_gen_chunks, dim=0)
204
+
205
+ fvd = calculate_fvd(all_raw, all_gen)
206
+ scores["fvd"] = fvd
207
+
208
+
209
+ # Generate final results
210
+ final_scores = {
211
+ k: np.mean(v) if isinstance(v, list) else v
212
+ for k, v in scores.items()
213
+ }
214
+
215
+ print("\nEvaluation Results:")
216
+ for k, v in final_scores.items():
217
+ print(f"{k.upper():<8}: {v:.4f}")
218
+
219
+ results = {
220
+ "raw_scores": scores,
221
+ "final_scores": final_scores
222
+ }
223
+ with open("evaluation_results.json", "w") as f:
224
+ json.dump(results, f, indent=4, cls=NumpyEncoder)
225
+ print("\nResults saved to evaluation_results.json")
226
+
227
+ if __name__ == "__main__":
228
+ main()
en.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d0196c7c5c51fd83b777e57db679ed340d11ba46bf3892e8f38dcd33a0eef67
3
+ size 666265297
utils/.DS_Store ADDED
Binary file (6.15 kB). View file
 
utils/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from .clip import calculate_clip_I
2
+ from .epe import calculate_epe
3
+ from .fid import calculate_fid
4
+ from .fvd import calculate_fvd, preprocess
5
+ from .lpips import calculate_lpips
6
+ from .ssim_psnr import calculate_ssim, calculate_psnr
7
+
8
+ from .video import save_video_frames
utils/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (516 Bytes). View file
 
utils/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (519 Bytes). View file
 
utils/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (488 Bytes). View file
 
utils/__pycache__/clip.cpython-310.pyc ADDED
Binary file (1.02 kB). View file
 
utils/__pycache__/clip.cpython-312.pyc ADDED
Binary file (1.81 kB). View file
 
utils/__pycache__/clip.cpython-39.pyc ADDED
Binary file (1.08 kB). View file
 
utils/__pycache__/epe.cpython-310.pyc ADDED
Binary file (1.41 kB). View file
 
utils/__pycache__/epe.cpython-312.pyc ADDED
Binary file (2.58 kB). View file
 
utils/__pycache__/epe.cpython-39.pyc ADDED
Binary file (1.31 kB). View file
 
utils/__pycache__/fid.cpython-310.pyc ADDED
Binary file (567 Bytes). View file
 
utils/__pycache__/fid.cpython-312.pyc ADDED
Binary file (633 Bytes). View file
 
utils/__pycache__/fid.cpython-39.pyc ADDED
Binary file (560 Bytes). View file
 
utils/__pycache__/fvd.cpython-310.pyc ADDED
Binary file (4.94 kB). View file
 
utils/__pycache__/fvd.cpython-312.pyc ADDED
Binary file (8.21 kB). View file
 
utils/__pycache__/lpips.cpython-310.pyc ADDED
Binary file (444 Bytes). View file
 
utils/__pycache__/lpips.cpython-312.pyc ADDED
Binary file (622 Bytes). View file
 
utils/__pycache__/pytorch_i3d.cpython-310.pyc ADDED
Binary file (9.7 kB). View file
 
utils/__pycache__/pytorch_i3d.cpython-312.pyc ADDED
Binary file (16.4 kB). View file
 
utils/__pycache__/ssim_psnr.cpython-310.pyc ADDED
Binary file (3.8 kB). View file
 
utils/__pycache__/ssim_psnr.cpython-312.pyc ADDED
Binary file (7.8 kB). View file
 
utils/__pycache__/video.cpython-310.pyc ADDED
Binary file (1.4 kB). View file
 
utils/__pycache__/video.cpython-312.pyc ADDED
Binary file (2.24 kB). View file
 
utils/clip.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from tqdm import tqdm
3
+ from PIL import Image
4
+ import torch
5
+ import os
6
+ import numpy as np
7
+
8
+ from transformers import CLIPProcessor, CLIPModel
9
+
10
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
11
+
12
+ model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14").to(device)
13
+ processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
14
+
15
+ def calculate_clip_I(image1, image2):
16
+
17
+ inputs1 = processor(images=image1, return_tensors="pt").to(device)
18
+ inputs2 = processor(images=image2, return_tensors="pt").to(device)
19
+
20
+ with torch.no_grad():
21
+ image_features1 = model.get_image_features(**inputs1)
22
+ image_features2 = model.get_image_features(**inputs2)
23
+
24
+ image_features1 /= image_features1.norm(dim=-1, keepdim=True)
25
+ image_features2 /= image_features2.norm(dim=-1, keepdim=True)
26
+
27
+ similarity = torch.matmul(image_features1, image_features2.T).cpu().numpy()[0][0]
28
+
29
+ return similarity
utils/epe.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import torch
4
+ import matplotlib.pyplot as plt
5
+ import torchvision.transforms.functional as F
6
+ torch.backends.cudnn.benchmark = True
7
+ torch.backends.cudnn.enabled=False
8
+ torch.backends.cudnn.deterministic = True
9
+
10
+ from torchvision.models.optical_flow import Raft_Large_Weights
11
+
12
+ weights = Raft_Large_Weights.DEFAULT
13
+ transforms = weights.transforms()
14
+
15
+
16
+ def preprocess(source_batch, target_batch):
17
+ source_batch = F.resize(source_batch, size=[480, 832], antialias=False)
18
+ target_batch = F.resize(target_batch, size=[480, 832], antialias=False)
19
+ return transforms(source_batch, target_batch)
20
+
21
+ from torchvision.models.optical_flow import raft_large
22
+
23
+ # If you can, run this example on a GPU, it will be a lot faster.
24
+ device = "cuda" if torch.cuda.is_available() else "cpu"
25
+
26
+ model = raft_large(weights=Raft_Large_Weights.DEFAULT, progress=False).to(device)
27
+ model = model.eval()
28
+
29
+ def calculate_epe(img1_batch, img2_batch):
30
+ # img [N, C, H, W]
31
+
32
+ # first calculate the op of img1 and img2
33
+ img1_source, img1_target = preprocess(img1_batch[:-1], img1_batch[1:])
34
+ img2_source, img2_target = preprocess(img2_batch[:-1], img2_batch[1:])
35
+
36
+ # op
37
+ img1_flows = model(img1_source.to(device).contiguous(), img1_target.to(device).contiguous())[-1] # [N, 2, H, W]
38
+ img2_flows = model(img2_source.to(device).contiguous(), img2_target.to(device).contiguous())[-1]
39
+
40
+ # epe
41
+ diff = img1_flows - img2_flows
42
+ epe = torch.norm(diff, p=2, dim=1)
43
+ mean_epe = epe.mean()
44
+
45
+ return mean_epe.cpu().numpy()
utils/fid.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision
3
+ import torchvision.transforms as transforms
4
+ from pytorch_fid import fid_score
5
+
6
+
7
+ def calculate_fid(real_images_folder, generated_images_folder):
8
+
9
+ fid_value = fid_score.calculate_fid_given_paths(
10
+ paths=[real_images_folder, generated_images_folder],
11
+ batch_size=50,
12
+ device="cuda",
13
+ dims=2048,
14
+ )
15
+
16
+ return fid_value
utils/fvd.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import numpy as np
3
+
4
+ import torch
5
+ import torch.nn.functional as F
6
+ import torch.utils.data as data
7
+
8
+ from .pytorch_i3d import InceptionI3d
9
+ import os
10
+
11
+ from sklearn.metrics.pairwise import polynomial_kernel
12
+
13
+ MAX_BATCH = 10
14
+ FVD_SAMPLE_SIZE = 2048
15
+ TARGET_RESOLUTION = (224, 224)
16
+
17
+ def preprocess(videos, target_resolution):
18
+ # videos in {0, ..., 255} as np.uint8 array
19
+ b, t, h, w, c = videos.shape
20
+ all_frames = torch.FloatTensor(videos).flatten(end_dim=1) # (b * t, h, w, c)
21
+ all_frames = all_frames.permute(0, 3, 1, 2).contiguous() # (b * t, c, h, w)
22
+ resized_videos = F.interpolate(all_frames, size=target_resolution,
23
+ mode='bilinear', align_corners=False)
24
+ resized_videos = resized_videos.view(b, t, c, *target_resolution)
25
+ output_videos = resized_videos.transpose(1, 2).contiguous() # (b, c, t, *)
26
+ scaled_videos = 2. * output_videos / 255. - 1 # [-1, 1]
27
+ return scaled_videos
28
+
29
+ def get_fvd_logits(videos, i3d, device):
30
+ videos = preprocess(videos, TARGET_RESOLUTION)
31
+ embeddings = get_logits(i3d, videos, device)
32
+ return embeddings
33
+
34
+ def load_fvd_model(device):
35
+ i3d = InceptionI3d(400, in_channels=3).to(device)
36
+ current_dir = os.path.dirname(os.path.abspath(__file__))
37
+ i3d_path = os.path.join(current_dir, 'weights', 'i3d_pretrained_400.pt')
38
+ i3d.load_state_dict(torch.load(i3d_path, map_location=device))
39
+ i3d.eval()
40
+ return i3d
41
+
42
+
43
+ # https://github.com/tensorflow/gan/blob/de4b8da3853058ea380a6152bd3bd454013bf619/tensorflow_gan/python/eval/classifier_metrics.py#L161
44
+ def _symmetric_matrix_square_root(mat, eps=1e-10):
45
+ u, s, v = torch.svd(mat)
46
+ si = torch.where(s < eps, s, torch.sqrt(s))
47
+ return torch.matmul(torch.matmul(u, torch.diag(si)), v.t())
48
+
49
+ # https://github.com/tensorflow/gan/blob/de4b8da3853058ea380a6152bd3bd454013bf619/tensorflow_gan/python/eval/classifier_metrics.py#L400
50
+ def trace_sqrt_product(sigma, sigma_v):
51
+ sqrt_sigma = _symmetric_matrix_square_root(sigma)
52
+ sqrt_a_sigmav_a = torch.matmul(sqrt_sigma, torch.matmul(sigma_v, sqrt_sigma))
53
+ return torch.trace(_symmetric_matrix_square_root(sqrt_a_sigmav_a))
54
+
55
+ # https://discuss.pytorch.org/t/covariance-and-gradient-support/16217/2
56
+ def cov(m, rowvar=False):
57
+ '''Estimate a covariance matrix given data.
58
+
59
+ Covariance indicates the level to which two variables vary together.
60
+ If we examine N-dimensional samples, `X = [x_1, x_2, ... x_N]^T`,
61
+ then the covariance matrix element `C_{ij}` is the covariance of
62
+ `x_i` and `x_j`. The element `C_{ii}` is the variance of `x_i`.
63
+
64
+ Args:
65
+ m: A 1-D or 2-D array containing multiple variables and observations.
66
+ Each row of `m` represents a variable, and each column a single
67
+ observation of all those variables.
68
+ rowvar: If `rowvar` is True, then each row represents a
69
+ variable, with observations in the columns. Otherwise, the
70
+ relationship is transposed: each column represents a variable,
71
+ while the rows contain observations.
72
+
73
+ Returns:
74
+ The covariance matrix of the variables.
75
+ '''
76
+ if m.dim() > 2:
77
+ raise ValueError('m has more than 2 dimensions')
78
+ if m.dim() < 2:
79
+ m = m.view(1, -1)
80
+ if not rowvar and m.size(0) != 1:
81
+ m = m.t()
82
+
83
+ fact = 1.0 / (m.size(1) - 1) # unbiased estimate
84
+ m_center = m - torch.mean(m, dim=1, keepdim=True)
85
+ mt = m_center.t() # if complex: mt = m.t().conj()
86
+ return fact * m_center.matmul(mt).squeeze()
87
+
88
+
89
+ def frechet_distance(x1, x2):
90
+ x1 = x1.flatten(start_dim=1)
91
+ x2 = x2.flatten(start_dim=1)
92
+ m, m_w = x1.mean(dim=0), x2.mean(dim=0)
93
+ sigma, sigma_w = cov(x1, rowvar=False), cov(x2, rowvar=False)
94
+
95
+ sqrt_trace_component = trace_sqrt_product(sigma, sigma_w)
96
+ trace = torch.trace(sigma + sigma_w) - 2.0 * sqrt_trace_component
97
+
98
+ mean = torch.sum((m - m_w) ** 2)
99
+ fd = trace + mean
100
+ return fd
101
+
102
+
103
+ def polynomial_mmd(X, Y):
104
+ m = X.shape[0]
105
+ n = Y.shape[0]
106
+ # compute kernels
107
+ K_XX = polynomial_kernel(X)
108
+ K_YY = polynomial_kernel(Y)
109
+ K_XY = polynomial_kernel(X, Y)
110
+ # compute mmd distance
111
+ K_XX_sum = (K_XX.sum() - np.diagonal(K_XX).sum()) / (m * (m - 1))
112
+ K_YY_sum = (K_YY.sum() - np.diagonal(K_YY).sum()) / (n * (n - 1))
113
+ K_XY_sum = K_XY.sum() / (m * n)
114
+ mmd = K_XX_sum + K_YY_sum - 2 * K_XY_sum
115
+ return mmd
116
+
117
+
118
+
119
+ def get_logits(i3d, videos, device):
120
+ # assert videos.shape[0] % MAX_BATCH == 0
121
+ with torch.no_grad():
122
+ logits = []
123
+ for i in range(0, videos.shape[0], MAX_BATCH):
124
+ batch = videos[i:i + MAX_BATCH].to(device)
125
+ logits.append(i3d(batch))
126
+ logits = torch.cat(logits, dim=0)
127
+ return logits
128
+
129
+
130
+ # def compute_fvd(real, samples, i3d, device=torch.device('cpu')):
131
+ def compute_fvd(real, samples, i3d, device=torch.device('cuda')):
132
+ # real, samples are (N, T, H, W, C) numpy arrays in np.uint8
133
+ # real, samples = preprocess(real, (224, 224)), preprocess(samples, (224, 224))
134
+ first_embed = get_logits(i3d, real, device)
135
+ second_embed = get_logits(i3d, samples, device)
136
+
137
+ return frechet_distance(first_embed, second_embed)
138
+
139
+ i3d = load_fvd_model(device=torch.device('cuda'))
140
+
141
+ def calculate_fvd(real, samples):
142
+ return compute_fvd(real, samples, i3d, device=torch.device('cuda')).cpu().numpy()
utils/lpips.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import lpips
4
+ loss_fn_vgg = lpips.LPIPS(net='vgg') # closer to "traditional" perceptual loss, when used for optimization
5
+
6
+
7
+ # img0 = torch.zeros(1,3,64,64) # image should be RGB, IMPORTANT: normalized to [-1,1]
8
+ # img1 = torch.zeros(1,3,64,64)
9
+ # d = loss_fn_vgg(img0, img1)
10
+
11
+ def calculate_lpips(img1, img2):
12
+ lpips_score = loss_fn_vgg(img1, img2).cpu().numpy()
13
+ return np.squeeze(lpips_score)
utils/pytorch_i3d.py ADDED
@@ -0,0 +1,372 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://github.com/piergiaj/pytorch-i3d
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from torch.autograd import Variable
6
+
7
+ import numpy as np
8
+
9
+ import os
10
+ import sys
11
+ from collections import OrderedDict
12
+
13
+
14
+ class MaxPool3dSamePadding(nn.MaxPool3d):
15
+
16
+ def compute_pad(self, dim, s):
17
+ if s % self.stride[dim] == 0:
18
+ return max(self.kernel_size[dim] - self.stride[dim], 0)
19
+ else:
20
+ return max(self.kernel_size[dim] - (s % self.stride[dim]), 0)
21
+
22
+ def forward(self, x):
23
+ # compute 'same' padding
24
+ (batch, channel, t, h, w) = x.size()
25
+ #print t,h,w
26
+ out_t = np.ceil(float(t) / float(self.stride[0]))
27
+ out_h = np.ceil(float(h) / float(self.stride[1]))
28
+ out_w = np.ceil(float(w) / float(self.stride[2]))
29
+ #print out_t, out_h, out_w
30
+ pad_t = self.compute_pad(0, t)
31
+ pad_h = self.compute_pad(1, h)
32
+ pad_w = self.compute_pad(2, w)
33
+ #print pad_t, pad_h, pad_w
34
+
35
+ pad_t_f = pad_t // 2
36
+ pad_t_b = pad_t - pad_t_f
37
+ pad_h_f = pad_h // 2
38
+ pad_h_b = pad_h - pad_h_f
39
+ pad_w_f = pad_w // 2
40
+ pad_w_b = pad_w - pad_w_f
41
+
42
+ pad = (pad_w_f, pad_w_b, pad_h_f, pad_h_b, pad_t_f, pad_t_b)
43
+ #print x.size()
44
+ #print pad
45
+ x = F.pad(x, pad)
46
+ return super(MaxPool3dSamePadding, self).forward(x)
47
+
48
+
49
+ class Unit3D(nn.Module):
50
+
51
+ def __init__(self, in_channels,
52
+ output_channels,
53
+ kernel_shape=(1, 1, 1),
54
+ stride=(1, 1, 1),
55
+ padding=0,
56
+ activation_fn=F.relu,
57
+ use_batch_norm=True,
58
+ use_bias=False,
59
+ name='unit_3d'):
60
+
61
+ """Initializes Unit3D module."""
62
+ super(Unit3D, self).__init__()
63
+
64
+ self._output_channels = output_channels
65
+ self._kernel_shape = kernel_shape
66
+ self._stride = stride
67
+ self._use_batch_norm = use_batch_norm
68
+ self._activation_fn = activation_fn
69
+ self._use_bias = use_bias
70
+ self.name = name
71
+ self.padding = padding
72
+
73
+ self.conv3d = nn.Conv3d(in_channels=in_channels,
74
+ out_channels=self._output_channels,
75
+ kernel_size=self._kernel_shape,
76
+ stride=self._stride,
77
+ padding=0, # we always want padding to be 0 here. We will dynamically pad based on input size in forward function
78
+ bias=self._use_bias)
79
+
80
+ if self._use_batch_norm:
81
+ self.bn = nn.BatchNorm3d(self._output_channels, eps=1e-5, momentum=0.001)
82
+
83
+ def compute_pad(self, dim, s):
84
+ if s % self._stride[dim] == 0:
85
+ return max(self._kernel_shape[dim] - self._stride[dim], 0)
86
+ else:
87
+ return max(self._kernel_shape[dim] - (s % self._stride[dim]), 0)
88
+
89
+
90
+ def forward(self, x):
91
+ # compute 'same' padding
92
+ (batch, channel, t, h, w) = x.size()
93
+ #print t,h,w
94
+ out_t = np.ceil(float(t) / float(self._stride[0]))
95
+ out_h = np.ceil(float(h) / float(self._stride[1]))
96
+ out_w = np.ceil(float(w) / float(self._stride[2]))
97
+ #print out_t, out_h, out_w
98
+ pad_t = self.compute_pad(0, t)
99
+ pad_h = self.compute_pad(1, h)
100
+ pad_w = self.compute_pad(2, w)
101
+ #print pad_t, pad_h, pad_w
102
+
103
+ pad_t_f = pad_t // 2
104
+ pad_t_b = pad_t - pad_t_f
105
+ pad_h_f = pad_h // 2
106
+ pad_h_b = pad_h - pad_h_f
107
+ pad_w_f = pad_w // 2
108
+ pad_w_b = pad_w - pad_w_f
109
+
110
+ pad = (pad_w_f, pad_w_b, pad_h_f, pad_h_b, pad_t_f, pad_t_b)
111
+ #print x.size()
112
+ #print pad
113
+ x = F.pad(x, pad)
114
+ #print x.size()
115
+
116
+ x = self.conv3d(x)
117
+ if self._use_batch_norm:
118
+ x = self.bn(x)
119
+ if self._activation_fn is not None:
120
+ x = self._activation_fn(x)
121
+ return x
122
+
123
+
124
+
125
+ class InceptionModule(nn.Module):
126
+ def __init__(self, in_channels, out_channels, name):
127
+ super(InceptionModule, self).__init__()
128
+
129
+ self.b0 = Unit3D(in_channels=in_channels, output_channels=out_channels[0], kernel_shape=[1, 1, 1], padding=0,
130
+ name=name+'/Branch_0/Conv3d_0a_1x1')
131
+ self.b1a = Unit3D(in_channels=in_channels, output_channels=out_channels[1], kernel_shape=[1, 1, 1], padding=0,
132
+ name=name+'/Branch_1/Conv3d_0a_1x1')
133
+ self.b1b = Unit3D(in_channels=out_channels[1], output_channels=out_channels[2], kernel_shape=[3, 3, 3],
134
+ name=name+'/Branch_1/Conv3d_0b_3x3')
135
+ self.b2a = Unit3D(in_channels=in_channels, output_channels=out_channels[3], kernel_shape=[1, 1, 1], padding=0,
136
+ name=name+'/Branch_2/Conv3d_0a_1x1')
137
+ self.b2b = Unit3D(in_channels=out_channels[3], output_channels=out_channels[4], kernel_shape=[3, 3, 3],
138
+ name=name+'/Branch_2/Conv3d_0b_3x3')
139
+ self.b3a = MaxPool3dSamePadding(kernel_size=[3, 3, 3],
140
+ stride=(1, 1, 1), padding=0)
141
+ self.b3b = Unit3D(in_channels=in_channels, output_channels=out_channels[5], kernel_shape=[1, 1, 1], padding=0,
142
+ name=name+'/Branch_3/Conv3d_0b_1x1')
143
+ self.name = name
144
+
145
+ def forward(self, x):
146
+ b0 = self.b0(x)
147
+ b1 = self.b1b(self.b1a(x))
148
+ b2 = self.b2b(self.b2a(x))
149
+ b3 = self.b3b(self.b3a(x))
150
+ return torch.cat([b0,b1,b2,b3], dim=1)
151
+
152
+
153
+ class InceptionI3d(nn.Module):
154
+ """Inception-v1 I3D architecture.
155
+ The model is introduced in:
156
+ Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset
157
+ Joao Carreira, Andrew Zisserman
158
+ https://arxiv.org/pdf/1705.07750v1.pdf.
159
+ See also the Inception architecture, introduced in:
160
+ Going deeper with convolutions
161
+ Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
162
+ Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
163
+ http://arxiv.org/pdf/1409.4842v1.pdf.
164
+ """
165
+
166
+ # Endpoints of the model in order. During construction, all the endpoints up
167
+ # to a designated `final_endpoint` are returned in a dictionary as the
168
+ # second return value.
169
+ VALID_ENDPOINTS = (
170
+ 'Conv3d_1a_7x7',
171
+ 'MaxPool3d_2a_3x3',
172
+ 'Conv3d_2b_1x1',
173
+ 'Conv3d_2c_3x3',
174
+ 'MaxPool3d_3a_3x3',
175
+ 'Mixed_3b',
176
+ 'Mixed_3c',
177
+ 'MaxPool3d_4a_3x3',
178
+ 'Mixed_4b',
179
+ 'Mixed_4c',
180
+ 'Mixed_4d',
181
+ 'Mixed_4e',
182
+ 'Mixed_4f',
183
+ 'MaxPool3d_5a_2x2',
184
+ 'Mixed_5b',
185
+ 'Mixed_5c',
186
+ 'Logits',
187
+ 'Predictions',
188
+ )
189
+
190
+ FEAT_ENDPOINTS = (
191
+ 'Conv3d_1a_7x7',
192
+ 'Conv3d_2c_3x3',
193
+ 'Mixed_3c',
194
+ 'Mixed_4f',
195
+ 'Mixed_5c',
196
+ )
197
+ def __init__(self,
198
+ num_classes=400,
199
+ spatial_squeeze=True,
200
+ final_endpoint='Logits',
201
+ name='inception_i3d',
202
+ in_channels=3,
203
+ dropout_keep_prob=0.5,
204
+ is_coinrun=False,
205
+ ):
206
+ """Initializes I3D model instance.
207
+ Args:
208
+ num_classes: The number of outputs in the logit layer (default 400, which
209
+ matches the Kinetics dataset).
210
+ spatial_squeeze: Whether to squeeze the spatial dimensions for the logits
211
+ before returning (default True).
212
+ final_endpoint: The model contains many possible endpoints.
213
+ `final_endpoint` specifies the last endpoint for the model to be built
214
+ up to. In addition to the output at `final_endpoint`, all the outputs
215
+ at endpoints up to `final_endpoint` will also be returned, in a
216
+ dictionary. `final_endpoint` must be one of
217
+ InceptionI3d.VALID_ENDPOINTS (default 'Logits').
218
+ name: A string (optional). The name of this module.
219
+ Raises:
220
+ ValueError: if `final_endpoint` is not recognized.
221
+ """
222
+
223
+ if final_endpoint not in self.VALID_ENDPOINTS:
224
+ raise ValueError('Unknown final endpoint %s' % final_endpoint)
225
+
226
+ super(InceptionI3d, self).__init__()
227
+ self._num_classes = num_classes
228
+ self._spatial_squeeze = spatial_squeeze
229
+ self._final_endpoint = final_endpoint
230
+ self.logits = None
231
+ self.is_coinrun = is_coinrun
232
+
233
+ if self._final_endpoint not in self.VALID_ENDPOINTS:
234
+ raise ValueError('Unknown final endpoint %s' % self._final_endpoint)
235
+
236
+ self.end_points = {}
237
+ end_point = 'Conv3d_1a_7x7'
238
+ self.end_points[end_point] = Unit3D(in_channels=in_channels, output_channels=64, kernel_shape=[7, 7, 7],
239
+ stride=(1 if is_coinrun else 2, 2, 2), padding=(3,3,3), name=name+end_point)
240
+ if self._final_endpoint == end_point: return
241
+
242
+ end_point = 'MaxPool3d_2a_3x3'
243
+ self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2),
244
+ padding=0)
245
+ if self._final_endpoint == end_point: return
246
+
247
+ end_point = 'Conv3d_2b_1x1'
248
+ self.end_points[end_point] = Unit3D(in_channels=64, output_channels=64, kernel_shape=[1, 1, 1], padding=0,
249
+ name=name+end_point)
250
+ if self._final_endpoint == end_point: return
251
+
252
+ end_point = 'Conv3d_2c_3x3'
253
+ self.end_points[end_point] = Unit3D(in_channels=64, output_channels=192, kernel_shape=[3, 3, 3], padding=1,
254
+ name=name+end_point)
255
+ if self._final_endpoint == end_point: return
256
+
257
+ end_point = 'MaxPool3d_3a_3x3'
258
+ self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2),
259
+ padding=0)
260
+ if self._final_endpoint == end_point: return
261
+
262
+ end_point = 'Mixed_3b'
263
+ self.end_points[end_point] = InceptionModule(192, [64,96,128,16,32,32], name+end_point)
264
+ if self._final_endpoint == end_point: return
265
+
266
+ end_point = 'Mixed_3c'
267
+ self.end_points[end_point] = InceptionModule(256, [128,128,192,32,96,64], name+end_point)
268
+ if self._final_endpoint == end_point: return
269
+
270
+ end_point = 'MaxPool3d_4a_3x3'
271
+ self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1 if is_coinrun else 3, 3, 3], stride=(1 if is_coinrun else 2, 2, 2),
272
+ padding=0)
273
+ if self._final_endpoint == end_point: return
274
+
275
+ end_point = 'Mixed_4b'
276
+ self.end_points[end_point] = InceptionModule(128+192+96+64, [192,96,208,16,48,64], name+end_point)
277
+ if self._final_endpoint == end_point: return
278
+
279
+ end_point = 'Mixed_4c'
280
+ self.end_points[end_point] = InceptionModule(192+208+48+64, [160,112,224,24,64,64], name+end_point)
281
+ if self._final_endpoint == end_point: return
282
+
283
+ end_point = 'Mixed_4d'
284
+ self.end_points[end_point] = InceptionModule(160+224+64+64, [128,128,256,24,64,64], name+end_point)
285
+ if self._final_endpoint == end_point: return
286
+
287
+ end_point = 'Mixed_4e'
288
+ self.end_points[end_point] = InceptionModule(128+256+64+64, [112,144,288,32,64,64], name+end_point)
289
+ if self._final_endpoint == end_point: return
290
+
291
+ end_point = 'Mixed_4f'
292
+ self.end_points[end_point] = InceptionModule(112+288+64+64, [256,160,320,32,128,128], name+end_point)
293
+ if self._final_endpoint == end_point: return
294
+
295
+ end_point = 'MaxPool3d_5a_2x2'
296
+ self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[2, 2, 2], stride=(1 if is_coinrun else 2, 2, 2),
297
+ padding=0)
298
+ if self._final_endpoint == end_point: return
299
+
300
+ end_point = 'Mixed_5b'
301
+ self.end_points[end_point] = InceptionModule(256+320+128+128, [256,160,320,32,128,128], name+end_point)
302
+ if self._final_endpoint == end_point: return
303
+
304
+ end_point = 'Mixed_5c'
305
+ self.end_points[end_point] = InceptionModule(256+320+128+128, [384,192,384,48,128,128], name+end_point)
306
+ if self._final_endpoint == end_point: return
307
+
308
+ end_point = 'Logits'
309
+ self.avg_pool = nn.AvgPool3d(kernel_size=[1, 8, 8] if is_coinrun else [2, 7, 7],
310
+ stride=(1, 1, 1))
311
+ self.dropout = nn.Dropout(dropout_keep_prob)
312
+ self.logits = Unit3D(in_channels=384+384+128+128, output_channels=self._num_classes,
313
+ kernel_shape=[1, 1, 1],
314
+ padding=0,
315
+ activation_fn=None,
316
+ use_batch_norm=False,
317
+ use_bias=True,
318
+ name='logits')
319
+
320
+ self.build()
321
+
322
+
323
+ def replace_logits(self, num_classes):
324
+ self._num_classes = num_classes
325
+ self.logits = Unit3D(in_channels=384+384+128+128, output_channels=self._num_classes,
326
+ kernel_shape=[1, 1, 1],
327
+ padding=0,
328
+ activation_fn=None,
329
+ use_batch_norm=False,
330
+ use_bias=True,
331
+ name='logits')
332
+
333
+
334
+ def build(self):
335
+ for k in self.end_points.keys():
336
+ self.add_module(k, self.end_points[k])
337
+
338
+ def forward(self, x):
339
+ for end_point in self.VALID_ENDPOINTS:
340
+ if end_point in self.end_points:
341
+ x = self._modules[end_point](x) # use _modules to work with dataparallel
342
+
343
+ x = self.logits(self.dropout(self.avg_pool(x)))
344
+ if self._spatial_squeeze:
345
+ logits = x.squeeze(3).squeeze(3)
346
+ logits = logits.mean(dim=2)
347
+ # logits is batch X time X classes, which is what we want to work with
348
+ return logits
349
+
350
+
351
+ def extract_features(self, x):
352
+ for end_point in self.VALID_ENDPOINTS:
353
+ if end_point in self.end_points:
354
+ x = self._modules[end_point](x)
355
+ return self.avg_pool(x)
356
+
357
+
358
+ def extract_pre_pool_features(self, x):
359
+ for end_point in self.VALID_ENDPOINTS:
360
+ if end_point in self.end_points:
361
+ x = self._modules[end_point](x)
362
+ return x
363
+
364
+
365
+ def extract_features_multiscale(self, x):
366
+ xs = []
367
+ for end_point in self.VALID_ENDPOINTS:
368
+ if end_point in self.end_points:
369
+ x = self._modules[end_point](x)
370
+ if end_point in self.FEAT_ENDPOINTS:
371
+ xs.append(x)
372
+ return xs
utils/ssim_psnr.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+ from math import exp
4
+
5
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
6
+
7
+ def gaussian(window_size, sigma):
8
+ gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
9
+ return gauss/gauss.sum()
10
+
11
+
12
+ def create_window(window_size, channel=1):
13
+ _1D_window = gaussian(window_size, 1.5).unsqueeze(1)
14
+ _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0).to(device)
15
+ window = _2D_window.expand(channel, 1, window_size, window_size).contiguous()
16
+ return window
17
+
18
+
19
+ def create_window_3d(window_size, channel=1):
20
+ _1D_window = gaussian(window_size, 1.5).unsqueeze(1)
21
+ _2D_window = _1D_window.mm(_1D_window.t())
22
+ _3D_window = _2D_window.unsqueeze(2) @ (_1D_window.t())
23
+ window = _3D_window.expand(1, channel, window_size, window_size, window_size).contiguous().to(device)
24
+ return window
25
+
26
+
27
+ def ssim(img1, img2, window_size=11, window=None, size_average=True, full=False, val_range=None):
28
+ if val_range is None:
29
+ if torch.max(img1) > 128:
30
+ max_val = 255
31
+ else:
32
+ max_val = 1
33
+
34
+ if torch.min(img1) < -0.5:
35
+ min_val = -1
36
+ else:
37
+ min_val = 0
38
+ L = max_val - min_val
39
+ else:
40
+ L = val_range
41
+
42
+ padd = 0
43
+ (_, channel, height, width) = img1.size()
44
+ if window is None:
45
+ real_size = min(window_size, height, width)
46
+ window = create_window(real_size, channel=channel).to(img1.device)
47
+
48
+ mu1 = F.conv2d(F.pad(img1, (5, 5, 5, 5), mode='replicate'), window, padding=padd, groups=channel)
49
+ mu2 = F.conv2d(F.pad(img2, (5, 5, 5, 5), mode='replicate'), window, padding=padd, groups=channel)
50
+
51
+ mu1_sq = mu1.pow(2)
52
+ mu2_sq = mu2.pow(2)
53
+ mu1_mu2 = mu1 * mu2
54
+
55
+ sigma1_sq = F.conv2d(F.pad(img1 * img1, (5, 5, 5, 5), 'replicate'), window, padding=padd, groups=channel) - mu1_sq
56
+ sigma2_sq = F.conv2d(F.pad(img2 * img2, (5, 5, 5, 5), 'replicate'), window, padding=padd, groups=channel) - mu2_sq
57
+ sigma12 = F.conv2d(F.pad(img1 * img2, (5, 5, 5, 5), 'replicate'), window, padding=padd, groups=channel) - mu1_mu2
58
+
59
+ C1 = (0.01 * L) ** 2
60
+ C2 = (0.03 * L) ** 2
61
+
62
+ v1 = 2.0 * sigma12 + C2
63
+ v2 = sigma1_sq + sigma2_sq + C2
64
+ cs = torch.mean(v1 / v2)
65
+
66
+ ssim_map = ((2 * mu1_mu2 + C1) * v1) / ((mu1_sq + mu2_sq + C1) * v2)
67
+
68
+ if size_average:
69
+ ret = ssim_map.mean()
70
+ else:
71
+ ret = ssim_map.mean(1).mean(1).mean(1)
72
+
73
+ if full:
74
+ return ret, cs
75
+ return ret
76
+
77
+
78
+ def calculate_ssim(img1, img2, window_size=11, window=None, size_average=True, full=False, val_range=None):
79
+ if val_range is None:
80
+ if torch.max(img1) > 128:
81
+ max_val = 255
82
+ else:
83
+ max_val = 1
84
+
85
+ if torch.min(img1) < -0.5:
86
+ min_val = -1
87
+ else:
88
+ min_val = 0
89
+ L = max_val - min_val
90
+ else:
91
+ L = val_range
92
+
93
+ padd = 0
94
+ (_, _, height, width) = img1.size()
95
+ if window is None:
96
+ real_size = min(window_size, height, width)
97
+ window = create_window_3d(real_size, channel=1).to(img1.device)
98
+
99
+ img1 = img1.unsqueeze(1)
100
+ img2 = img2.unsqueeze(1)
101
+
102
+ mu1 = F.conv3d(F.pad(img1, (5, 5, 5, 5, 5, 5), mode='replicate'), window, padding=padd, groups=1)
103
+ mu2 = F.conv3d(F.pad(img2, (5, 5, 5, 5, 5, 5), mode='replicate'), window, padding=padd, groups=1)
104
+
105
+ mu1_sq = mu1.pow(2)
106
+ mu2_sq = mu2.pow(2)
107
+ mu1_mu2 = mu1 * mu2
108
+
109
+ sigma1_sq = F.conv3d(F.pad(img1 * img1, (5, 5, 5, 5, 5, 5), 'replicate'), window, padding=padd, groups=1) - mu1_sq
110
+ sigma2_sq = F.conv3d(F.pad(img2 * img2, (5, 5, 5, 5, 5, 5), 'replicate'), window, padding=padd, groups=1) - mu2_sq
111
+ sigma12 = F.conv3d(F.pad(img1 * img2, (5, 5, 5, 5, 5, 5), 'replicate'), window, padding=padd, groups=1) - mu1_mu2
112
+
113
+ C1 = (0.01 * L) ** 2
114
+ C2 = (0.03 * L) ** 2
115
+
116
+ v1 = 2.0 * sigma12 + C2
117
+ v2 = sigma1_sq + sigma2_sq + C2
118
+ cs = torch.mean(v1 / v2)
119
+
120
+ ssim_map = ((2 * mu1_mu2 + C1) * v1) / ((mu1_sq + mu2_sq + C1) * v2)
121
+
122
+ if size_average:
123
+ ret = ssim_map.mean()
124
+ else:
125
+ ret = ssim_map.mean(1).mean(1).mean(1)
126
+
127
+ if full:
128
+ return ret, cs
129
+ return ret.detach().cpu().numpy()
130
+
131
+
132
+
133
+ def calculate_psnr(img1, img2):
134
+ psnr = -10 * torch.log10(((img1 - img2) * (img1 - img2)).mean())
135
+ return psnr.detach().cpu().numpy()
136
+
137
+
138
+ def calculate_ie(img1, img2):
139
+ ie = torch.abs(torch.round(img1 * 255.0) - torch.round(img2 * 255.0)).mean()
140
+ return ie.detach().cpu().numpy()
utils/video.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ from tqdm import tqdm
4
+
5
+ def save_video_frames(input_dir, output_dir):
6
+
7
+ os.makedirs(output_dir, exist_ok=True)
8
+
9
+ dir_frame_idx = None
10
+ for filename in tqdm(os.listdir(input_dir)):
11
+ if filename.lower().endswith('.mp4'):
12
+ video_path = os.path.join(input_dir, filename)
13
+
14
+ base_name = os.path.splitext(filename)[0]
15
+
16
+ cap = cv2.VideoCapture(video_path)
17
+ if not cap.isOpened():
18
+ print(f"Warning: Cannot open {video_path}")
19
+ continue
20
+
21
+ frame_idx = 0
22
+ while True:
23
+ ret, frame = cap.read()
24
+
25
+ if not ret:
26
+ break
27
+
28
+ output_filename = f"{base_name}_frame_{frame_idx}.png"
29
+ output_path = os.path.join(output_dir, output_filename)
30
+
31
+ if not cv2.imwrite(output_path, frame):
32
+ print(f"Warnning: Cannot write {output_path}")
33
+
34
+ frame_idx += 1
35
+
36
+ cap.release()
37
+
38
+ if dir_frame_idx == None:
39
+ dir_frame_idx = frame_idx
40
+ else:
41
+ if dir_frame_idx != frame_idx:
42
+ print(f"Warning: {video_path} has {frame_idx} frames, but {dir_frame_idx} frames in {input_dir}")
43
+ dir_frame_idx = min(dir_frame_idx, frame_idx)
44
+
45
+ return dir_frame_idx
46
+
47
+ if __name__ == "__main__":
48
+ save_video_frames(
49
+ input_dir="aaa",
50
+ output_dir="bbb"
51
+ )
utils/weights/.DS_Store ADDED
Binary file (6.15 kB). View file
 
utils/weights/i3d_pretrained_400.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55095f049e706479d48e221adcdb145b2b9dc930ba28b081ed72367ffaa32343
3
+ size 50939526
zh.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65989a137e1f7f227f2cd0161288529d00521663077706aac521498ca2bec49c
3
+ size 666270320