import os import subprocess import glob import sys from tqdm import tqdm # --- 配置路径 --- # build_dataset.py 脚本所在的目录 # 你需要根据实际情况填写 build_dataset.py 的路径 BUILD_SCRIPT_DIR = r"D:\DiffSingerDatasets\MakeDiffSinger\acoustic_forced_alignment" BUILD_SCRIPT_PATH = os.path.join(BUILD_SCRIPT_DIR, "build_dataset.py") # add_ph_num.py 脚本的完整路径 # *** 请将这里修改为正确的 add_ph_num.py 路径 *** ADD_SCRIPT_PATH = r"D:\DiffSingerDatasets\MakeDiffSinger\variance-temp-solution\add_ph_num.py" # batch_infer.py 脚本的完整路径 BATCH_INFER_SCRIPT_PATH = r"D:\DiffSingerDatasets\MakeDiffSinger\SOME\batch_infer.py" # 已处理的歌手数据根目录 (包含 wav/ 和 TextGrid/) SOURCE_PROCESSED_ROOT = r"D:\DiffSingerDatasets\m4singer_processed" # 输出数据集的根目录 OUTPUT_DATASET_ROOT = r"D:\DiffSingerDatasets\m4singer_dataset" # 字典文件路径 (用于 add_ph_num.py) DICTIONARY_PATH = r"D:\DiffSingerDatasets\SOFA\dictionary\opencpop-extension.txt" # 预训练模型 checkpoint 文件路径 (用于 batch_infer.py) MODEL_CKPT_PATH = r"D:\DiffSingerDatasets\MakeDiffSinger\SOME\pretrained\0119_continuous256_5spk\model_ckpt_steps_100000_simplified.ckpt" # 需要处理的 CSV 文件名 (build_dataset.py 生成, add_ph_num.py 修改) CSV_FILENAME = "transcriptions.csv" # 获取当前正在运行的 Python 解释器的完整路径 PYTHON_EXECUTABLE = sys.executable print(f"Using Python executable: {PYTHON_EXECUTABLE}") print("-" * 50) # Use a longer separator # --- 辅助函数:运行外部命令 --- def _run_command(command, description, cwd=None): """Helper function to run a subprocess command and print output.""" print(f"\nRunning: {description}") print(f"Command: {' '.join(command)}") if cwd: print(f"Working Directory: {cwd}") print("-" * 20) # Separator before command output try: # Execute the command result = subprocess.run(command, capture_output=True, text=True, check=False, cwd=cwd) # Print output if result.stdout: print("--- STDOUT ---") print(result.stdout) if result.stderr: print("--- STDERR ---") print(result.stderr) # Check return code if result.returncode != 0: print(f"!!! Command failed: {description} exited with code {result.returncode}") return False, result.returncode else: print(f"--- Command successful: {description} ---") return True, result.returncode except FileNotFoundError: print(f"!!! Error: Script or executable not found: {command[0]}") print("Please check if the script paths and Python executable path are correct.") return False, -1 # Indicate script not found except Exception as e: print(f"!!! An unexpected error occurred while running {description}: {e}") return False, -2 # Indicate other general error # --- 主处理函数:处理单个歌手 --- def process_single_singer(singer_dir, output_root, dictionary_path, model_ckpt_path, python_exec_path): """ 为单个歌手目录执行完整的建库流程。 Args: singer_dir (str): 歌手的源数据目录 (e.g., D:\DiffSingerDatasets\m4singer_processed\Alto-1) output_root (str): 输出数据集的根目录 (e.g., D:\DiffSingerDatasets\m4singer_dataset) dictionary_path (str): 字典文件路径 model_ckpt_path (str): 模型 checkpoint 文件路径 python_exec_path (str): 用于执行外部脚本的 Python 解释器路径 """ singer_name = os.path.basename(singer_dir) # 获取目录名作为歌手名 (e.g., Alto-1) source_wav_dir = os.path.join(singer_dir, "wav") source_tg_dir = os.path.join(singer_dir, "TextGrid") # 构建输出数据集的目录名,遵循 {声部}={编号}-MAN 格式 # singer_name 已经是 Alto-1, Bass-2 等格式,所以直接加上 -MAN 即可 target_dataset_name = f"{singer_name}-MAN" target_dataset_path = os.path.join(output_root, target_dataset_name) print(f"\n{'='*60}") # Major separator for each singer print(f"=== Processing Singer: {singer_name} ===") print(f" Source Wavs: {source_wav_dir}") print(f" Source TextGrids: {source_tg_dir}") print(f" Target Dataset Dir: {target_dataset_path}") print(f"{'='*60}\n") # 1. 执行 build_dataset.py print("\n--- Step 1: Running build_dataset.py ---") build_command = [ python_exec_path, BUILD_SCRIPT_PATH, "--wavs", source_wav_dir, "--tg", source_tg_dir, "--dataset", target_dataset_path ] success, _ = _run_command(build_command, "build_dataset.py") if not success: print(f"\n!!! Skipping add_ph_num.py and batch_infer.py for {singer_name} due to build_dataset.py failure.") return # Stop processing this singer # 2. 执行 add_ph_num.py print("\n--- Step 2: Running add_ph_num.py ---") csv_path = os.path.join(target_dataset_path, CSV_FILENAME) if not os.path.exists(csv_path): print(f"\n!!! Error: {CSV_FILENAME} not found at {csv_path} after build_dataset.py. Skipping add_ph_num.py and batch_infer.py for {singer_name}.") return # Stop processing this singer add_command = [ python_exec_path, ADD_SCRIPT_PATH, csv_path, # The script expects the CSV path as the first positional argument "--dictionary", dictionary_path ] success, _ = _run_command(add_command, "add_ph_num.py") if not success: print(f"\n!!! Skipping batch_infer.py for {singer_name} due to add_ph_num.py failure.") return # Stop processing this singer # 3. 执行 batch_infer.py print("\n--- Step 3: Running batch_infer.py (Pitch Inference) ---") # Check if model checkpoint exists before attempting inference if not os.path.exists(model_ckpt_path): print(f"\n!!! Error: Model checkpoint not found at {model_ckpt_path}. Skipping batch_infer.py for {singer_name}.") return # Stop processing this singer infer_command = [ python_exec_path, BATCH_INFER_SCRIPT_PATH, "--model", model_ckpt_path, "--dataset", target_dataset_path, # Pass the dataset directory containing updated CSV "--overwrite" ] success, _ = _run_command(infer_command, "batch_infer.py") if not success: print(f"\n!!! Pitch inference failed for {singer_name}.") else: print(f"\n=== Successfully processed all steps for Singer: {singer_name} ===") if __name__ == "__main__": # 检查所有必要的脚本和文件是否存在 if not os.path.exists(BUILD_SCRIPT_PATH): print(f"Error: build_dataset.py not found at {BUILD_SCRIPT_PATH}. Exiting.") sys.exit(1) if not os.path.exists(ADD_SCRIPT_PATH): print(f"Error: add_ph_num.py not found at {ADD_SCRIPT_PATH}. Exiting.") sys.exit(1) if not os.path.exists(BATCH_INFER_SCRIPT_PATH): print(f"Error: batch_infer.py not found at {BATCH_INFER_SCRIPT_PATH}. Exiting.") sys.exit(1) if not os.path.exists(DICTIONARY_PATH): print(f"Error: Dictionary file not found at {DICTIONARY_PATH}. Exiting.") sys.exit(1) # Model checkpoint check is done within the processing function as it's the last step # But a warning upfront might be useful if not os.path.exists(MODEL_CKPT_PATH): print(f"Warning: Model checkpoint not found at {MODEL_CKPT_PATH}. Pitch inference step will be skipped for all singers.") # 查找所有歌手目录 singer_directories = glob.glob(os.path.join(SOURCE_PROCESSED_ROOT, "*")) # 过滤出确实是目录的项 singer_directories = [d for d in singer_directories if os.path.isdir(d)] if not singer_directories: print(f"No singer directories found in {SOURCE_PROCESSED_ROOT}. Please check the path.") else: print(f"Found {len(singer_directories)} singer directories to process.") print("-" * 50) # 使用 tqdm 包装循环以显示进度条 # Leave=True so the final bar remains after completion, showing total count for singer_dir in tqdm(singer_directories, desc="Overall Dataset Building", leave=True): # Call the combined processing function for each singer process_single_singer(singer_dir, OUTPUT_DATASET_ROOT, DICTIONARY_PATH, MODEL_CKPT_PATH, PYTHON_EXECUTABLE) print("\n" + "="*60 + "\n") # Double line separator between singers print("\n" + "="*60) print("=== Finished processing all singers ===") print("="*60)