Umadevi0305 commited on
Commit
84c2b2a
·
verified ·
1 Parent(s): ba1b981

Upload 9 files

Browse files
Files changed (9) hide show
  1. .gitignore +171 -0
  2. .gitmodules +3 -0
  3. .pre-commit-config.yaml +17 -0
  4. Dockerfile +30 -0
  5. F5-TTS-main.zip +3 -0
  6. LICENSE +21 -0
  7. README.md +261 -13
  8. pyproject.toml +64 -0
  9. ruff.toml +10 -0
.gitignore ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Customed
2
+ .vscode/
3
+ tests/
4
+ runs/
5
+ data/
6
+ ckpts/
7
+ wandb/
8
+ results/
9
+
10
+ # Byte-compiled / optimized / DLL files
11
+ __pycache__/
12
+ *.py[cod]
13
+ *$py.class
14
+
15
+ # C extensions
16
+ *.so
17
+
18
+ # Distribution / packaging
19
+ .Python
20
+ build/
21
+ develop-eggs/
22
+ dist/
23
+ downloads/
24
+ eggs/
25
+ .eggs/
26
+ lib/
27
+ lib64/
28
+ parts/
29
+ sdist/
30
+ var/
31
+ wheels/
32
+ share/python-wheels/
33
+ *.egg-info/
34
+ .installed.cfg
35
+ *.egg
36
+ MANIFEST
37
+
38
+ # PyInstaller
39
+ # Usually these files are written by a python script from a template
40
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
41
+ *.manifest
42
+ *.spec
43
+
44
+ # Installer logs
45
+ pip-log.txt
46
+ pip-delete-this-directory.txt
47
+
48
+ # Unit test / coverage reports
49
+ htmlcov/
50
+ .tox/
51
+ .nox/
52
+ .coverage
53
+ .coverage.*
54
+ .cache
55
+ nosetests.xml
56
+ coverage.xml
57
+ *.cover
58
+ *.py,cover
59
+ .hypothesis/
60
+ .pytest_cache/
61
+ cover/
62
+
63
+ # Translations
64
+ *.mo
65
+ *.pot
66
+
67
+ # Django stuff:
68
+ *.log
69
+ local_settings.py
70
+ db.sqlite3
71
+ db.sqlite3-journal
72
+
73
+ # Flask stuff:
74
+ instance/
75
+ .webassets-cache
76
+
77
+ # Scrapy stuff:
78
+ .scrapy
79
+
80
+ # Sphinx documentation
81
+ docs/_build/
82
+
83
+ # PyBuilder
84
+ .pybuilder/
85
+ target/
86
+
87
+ # Jupyter Notebook
88
+ .ipynb_checkpoints
89
+
90
+ # IPython
91
+ profile_default/
92
+ ipython_config.py
93
+
94
+ # pyenv
95
+ # For a library or package, you might want to ignore these files since the code is
96
+ # intended to run in multiple environments; otherwise, check them in:
97
+ # .python-version
98
+
99
+ # pipenv
100
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
101
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
102
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
103
+ # install all needed dependencies.
104
+ #Pipfile.lock
105
+
106
+ # poetry
107
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
108
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
109
+ # commonly ignored for libraries.
110
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
111
+ #poetry.lock
112
+
113
+ # pdm
114
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
115
+ #pdm.lock
116
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
117
+ # in version control.
118
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
119
+ .pdm.toml
120
+ .pdm-python
121
+ .pdm-build/
122
+
123
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
124
+ __pypackages__/
125
+
126
+ # Celery stuff
127
+ celerybeat-schedule
128
+ celerybeat.pid
129
+
130
+ # SageMath parsed files
131
+ *.sage.py
132
+
133
+ # Environments
134
+ .env
135
+ .venv
136
+ env/
137
+ venv/
138
+ ENV/
139
+ env.bak/
140
+ venv.bak/
141
+
142
+ # Spyder project settings
143
+ .spyderproject
144
+ .spyproject
145
+
146
+ # Rope project settings
147
+ .ropeproject
148
+
149
+ # mkdocs documentation
150
+ /site
151
+
152
+ # mypy
153
+ .mypy_cache/
154
+ .dmypy.json
155
+ dmypy.json
156
+
157
+ # Pyre type checker
158
+ .pyre/
159
+
160
+ # pytype static type analyzer
161
+ .pytype/
162
+
163
+ # Cython debug symbols
164
+ cython_debug/
165
+
166
+ # PyCharm
167
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
168
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
169
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
170
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
171
+ #.idea/
.gitmodules ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [submodule "src/third_party/BigVGAN"]
2
+ path = src/third_party/BigVGAN
3
+ url = https://github.com/NVIDIA/BigVGAN.git
.pre-commit-config.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/astral-sh/ruff-pre-commit
3
+ # Ruff version.
4
+ rev: v0.11.2
5
+ hooks:
6
+ - id: ruff
7
+ name: ruff linter
8
+ args: [--fix]
9
+ - id: ruff-format
10
+ name: ruff formatter
11
+ - id: ruff
12
+ name: ruff sorter
13
+ args: [--select, I, --fix]
14
+ - repo: https://github.com/pre-commit/pre-commit-hooks
15
+ rev: v5.0.0
16
+ hooks:
17
+ - id: check-yaml
Dockerfile ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM pytorch/pytorch:2.4.0-cuda12.4-cudnn9-devel
2
+
3
+ USER root
4
+
5
+ ARG DEBIAN_FRONTEND=noninteractive
6
+
7
+ LABEL github_repo="https://github.com/SWivid/F5-TTS"
8
+
9
+ RUN set -x \
10
+ && apt-get update \
11
+ && apt-get -y install wget curl man git less openssl libssl-dev unzip unar build-essential aria2 tmux vim \
12
+ && apt-get install -y openssh-server sox libsox-fmt-all libsox-fmt-mp3 libsndfile1-dev ffmpeg \
13
+ && apt-get install -y librdmacm1 libibumad3 librdmacm-dev libibverbs1 libibverbs-dev ibverbs-utils ibverbs-providers \
14
+ && rm -rf /var/lib/apt/lists/* \
15
+ && apt-get clean
16
+
17
+ WORKDIR /workspace
18
+
19
+ RUN git clone https://github.com/SWivid/F5-TTS.git \
20
+ && cd F5-TTS \
21
+ && git submodule update --init --recursive \
22
+ && pip install -e . --no-cache-dir
23
+
24
+ ENV SHELL=/bin/bash
25
+
26
+ VOLUME /root/.cache/huggingface/hub/
27
+
28
+ EXPOSE 7860
29
+
30
+ WORKDIR /workspace/F5-TTS
F5-TTS-main.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c8a5e583db3435098285e15cb2bfb675c87cc816d3ddd62b2986728959cd5a2
3
+ size 1407856
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Yushen CHEN
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md CHANGED
@@ -1,13 +1,261 @@
1
- ---
2
- title: Audio F5TTS
3
- emoji: 📉
4
- colorFrom: gray
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 5.43.1
8
- app_file: app.py
9
- pinned: false
10
- short_description: open-source Text-to-Speech (TTS) model.
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # F5-TTS: A Fairytaler that Fakes Fluent and Faithful Speech with Flow Matching
2
+
3
+ [![python](https://img.shields.io/badge/Python-3.10-brightgreen)](https://github.com/SWivid/F5-TTS)
4
+ [![arXiv](https://img.shields.io/badge/arXiv-2410.06885-b31b1b.svg?logo=arXiv)](https://arxiv.org/abs/2410.06885)
5
+ [![demo](https://img.shields.io/badge/GitHub-Demo%20page-orange.svg)](https://swivid.github.io/F5-TTS/)
6
+ [![hfspace](https://img.shields.io/badge/🤗-Space%20demo-yellow)](https://huggingface.co/spaces/mrfakename/E2-F5-TTS)
7
+ [![msspace](https://img.shields.io/badge/🤖-Space%20demo-blue)](https://modelscope.cn/studios/modelscope/E2-F5-TTS)
8
+ [![lab](https://img.shields.io/badge/X--LANCE-Lab-grey?labelColor=lightgrey)](https://x-lance.sjtu.edu.cn/)
9
+ [![lab](https://img.shields.io/badge/Peng%20Cheng-Lab-grey?labelColor=lightgrey)](https://www.pcl.ac.cn)
10
+ <!-- <img src="https://github.com/user-attachments/assets/12d7749c-071a-427c-81bf-b87b91def670" alt="Watermark" style="width: 40px; height: auto"> -->
11
+
12
+ **F5-TTS**: Diffusion Transformer with ConvNeXt V2, faster trained and inference.
13
+
14
+ **E2 TTS**: Flat-UNet Transformer, closest reproduction from [paper](https://arxiv.org/abs/2406.18009).
15
+
16
+ **Sway Sampling**: Inference-time flow step sampling strategy, greatly improves performance
17
+
18
+ ### Thanks to all the contributors !
19
+
20
+ ## News
21
+ - **2025/03/12**: 🔥 F5-TTS v1 base model with better training and inference performance. [Few demo](https://swivid.github.io/F5-TTS_updates).
22
+ - **2024/10/08**: F5-TTS & E2 TTS base models on [🤗 Hugging Face](https://huggingface.co/SWivid/F5-TTS), [🤖 Model Scope](https://www.modelscope.cn/models/SWivid/F5-TTS_Emilia-ZH-EN), [🟣 Wisemodel](https://wisemodel.cn/models/SJTU_X-LANCE/F5-TTS_Emilia-ZH-EN).
23
+
24
+ ## Installation
25
+
26
+ ### Create a separate environment if needed
27
+
28
+ ```bash
29
+ # Create a python 3.10 conda env (you could also use virtualenv)
30
+ conda create -n f5-tts python=3.10
31
+ conda activate f5-tts
32
+ ```
33
+
34
+ ### Install PyTorch with matched device
35
+
36
+ <details>
37
+ <summary>NVIDIA GPU</summary>
38
+
39
+ > ```bash
40
+ > # Install pytorch with your CUDA version, e.g.
41
+ > pip install torch==2.4.0+cu124 torchaudio==2.4.0+cu124 --extra-index-url https://download.pytorch.org/whl/cu124
42
+ > ```
43
+
44
+ </details>
45
+
46
+ <details>
47
+ <summary>AMD GPU</summary>
48
+
49
+ > ```bash
50
+ > # Install pytorch with your ROCm version (Linux only), e.g.
51
+ > pip install torch==2.5.1+rocm6.2 torchaudio==2.5.1+rocm6.2 --extra-index-url https://download.pytorch.org/whl/rocm6.2
52
+ > ```
53
+
54
+ </details>
55
+
56
+ <details>
57
+ <summary>Intel GPU</summary>
58
+
59
+ > ```bash
60
+ > # Install pytorch with your XPU version, e.g.
61
+ > # Intel® Deep Learning Essentials or Intel® oneAPI Base Toolkit must be installed
62
+ > pip install torch torchaudio --index-url https://download.pytorch.org/whl/test/xpu
63
+ >
64
+ > # Intel GPU support is also available through IPEX (Intel® Extension for PyTorch)
65
+ > # IPEX does not require the Intel® Deep Learning Essentials or Intel® oneAPI Base Toolkit
66
+ > # See: https://pytorch-extension.intel.com/installation?request=platform
67
+ > ```
68
+
69
+ </details>
70
+
71
+ <details>
72
+ <summary>Apple Silicon</summary>
73
+
74
+ > ```bash
75
+ > # Install the stable pytorch, e.g.
76
+ > pip install torch torchaudio
77
+ > ```
78
+
79
+ </details>
80
+
81
+ ### Then you can choose one from below:
82
+
83
+ > ### 1. As a pip package (if just for inference)
84
+ >
85
+ > ```bash
86
+ > pip install f5-tts
87
+ > ```
88
+ >
89
+ > ### 2. Local editable (if also do training, finetuning)
90
+ >
91
+ > ```bash
92
+ > git clone https://github.com/SWivid/F5-TTS.git
93
+ > cd F5-TTS
94
+ > # git submodule update --init --recursive # (optional, if use bigvgan as vocoder)
95
+ > pip install -e .
96
+ > ```
97
+
98
+ ### Docker usage also available
99
+ ```bash
100
+ # Build from Dockerfile
101
+ docker build -t f5tts:v1 .
102
+
103
+ # Run from GitHub Container Registry
104
+ docker container run --rm -it --gpus=all --mount 'type=volume,source=f5-tts,target=/root/.cache/huggingface/hub/' -p 7860:7860 ghcr.io/swivid/f5-tts:main
105
+
106
+ # Quickstart if you want to just run the web interface (not CLI)
107
+ docker container run --rm -it --gpus=all --mount 'type=volume,source=f5-tts,target=/root/.cache/huggingface/hub/' -p 7860:7860 ghcr.io/swivid/f5-tts:main f5-tts_infer-gradio --host 0.0.0.0
108
+ ```
109
+
110
+ ### Runtime
111
+
112
+ Deployment solution with Triton and TensorRT-LLM.
113
+
114
+ #### Benchmark Results
115
+ Decoding on a single L20 GPU, using 26 different prompt_audio & target_text pairs, 16 NFE.
116
+
117
+ | Model | Concurrency | Avg Latency | RTF | Mode |
118
+ |---------------------|----------------|-------------|--------|-----------------|
119
+ | F5-TTS Base (Vocos) | 2 | 253 ms | 0.0394 | Client-Server |
120
+ | F5-TTS Base (Vocos) | 1 (Batch_size) | - | 0.0402 | Offline TRT-LLM |
121
+ | F5-TTS Base (Vocos) | 1 (Batch_size) | - | 0.1467 | Offline Pytorch |
122
+
123
+ See [detailed instructions](src/f5_tts/runtime/triton_trtllm/README.md) for more information.
124
+
125
+
126
+ ## Inference
127
+
128
+ - In order to achieve desired performance, take a moment to read [detailed guidance](src/f5_tts/infer).
129
+ - By properly searching the keywords of problem encountered, [issues](https://github.com/SWivid/F5-TTS/issues?q=is%3Aissue) are very helpful.
130
+
131
+ ### 1. Gradio App
132
+
133
+ Currently supported features:
134
+
135
+ - Basic TTS with Chunk Inference
136
+ - Multi-Style / Multi-Speaker Generation
137
+ - Voice Chat powered by Qwen2.5-3B-Instruct
138
+ - [Custom inference with more language support](src/f5_tts/infer/SHARED.md)
139
+
140
+ ```bash
141
+ # Launch a Gradio app (web interface)
142
+ f5-tts_infer-gradio
143
+
144
+ # Specify the port/host
145
+ f5-tts_infer-gradio --port 7860 --host 0.0.0.0
146
+
147
+ # Launch a share link
148
+ f5-tts_infer-gradio --share
149
+ ```
150
+
151
+ <details>
152
+ <summary>NVIDIA device docker compose file example</summary>
153
+
154
+ ```yaml
155
+ services:
156
+ f5-tts:
157
+ image: ghcr.io/swivid/f5-tts:main
158
+ ports:
159
+ - "7860:7860"
160
+ environment:
161
+ GRADIO_SERVER_PORT: 7860
162
+ entrypoint: ["f5-tts_infer-gradio", "--port", "7860", "--host", "0.0.0.0"]
163
+ deploy:
164
+ resources:
165
+ reservations:
166
+ devices:
167
+ - driver: nvidia
168
+ count: 1
169
+ capabilities: [gpu]
170
+
171
+ volumes:
172
+ f5-tts:
173
+ driver: local
174
+ ```
175
+
176
+ </details>
177
+
178
+ ### 2. CLI Inference
179
+
180
+ ```bash
181
+ # Run with flags
182
+ # Leave --ref_text "" will have ASR model transcribe (extra GPU memory usage)
183
+ f5-tts_infer-cli --model F5TTS_v1_Base \
184
+ --ref_audio "provide_prompt_wav_path_here.wav" \
185
+ --ref_text "The content, subtitle or transcription of reference audio." \
186
+ --gen_text "Some text you want TTS model generate for you."
187
+
188
+ # Run with default setting. src/f5_tts/infer/examples/basic/basic.toml
189
+ f5-tts_infer-cli
190
+ # Or with your own .toml file
191
+ f5-tts_infer-cli -c custom.toml
192
+
193
+ # Multi voice. See src/f5_tts/infer/README.md
194
+ f5-tts_infer-cli -c src/f5_tts/infer/examples/multi/story.toml
195
+ ```
196
+
197
+
198
+ ## Training
199
+
200
+ ### 1. With Hugging Face Accelerate
201
+
202
+ Refer to [training & finetuning guidance](src/f5_tts/train) for best practice.
203
+
204
+ ### 2. With Gradio App
205
+
206
+ ```bash
207
+ # Quick start with Gradio web interface
208
+ f5-tts_finetune-gradio
209
+ ```
210
+
211
+ Read [training & finetuning guidance](src/f5_tts/train) for more instructions.
212
+
213
+
214
+ ## [Evaluation](src/f5_tts/eval)
215
+
216
+
217
+ ## Development
218
+
219
+ Use pre-commit to ensure code quality (will run linters and formatters automatically):
220
+
221
+ ```bash
222
+ pip install pre-commit
223
+ pre-commit install
224
+ ```
225
+
226
+ When making a pull request, before each commit, run:
227
+
228
+ ```bash
229
+ pre-commit run --all-files
230
+ ```
231
+
232
+ Note: Some model components have linting exceptions for E722 to accommodate tensor notation.
233
+
234
+
235
+ ## Acknowledgements
236
+
237
+ - [E2-TTS](https://arxiv.org/abs/2406.18009) brilliant work, simple and effective
238
+ - [Emilia](https://arxiv.org/abs/2407.05361), [WenetSpeech4TTS](https://arxiv.org/abs/2406.05763), [LibriTTS](https://arxiv.org/abs/1904.02882), [LJSpeech](https://keithito.com/LJ-Speech-Dataset/) valuable datasets
239
+ - [lucidrains](https://github.com/lucidrains) initial CFM structure with also [bfs18](https://github.com/bfs18) for discussion
240
+ - [SD3](https://arxiv.org/abs/2403.03206) & [Hugging Face diffusers](https://github.com/huggingface/diffusers) DiT and MMDiT code structure
241
+ - [torchdiffeq](https://github.com/rtqichen/torchdiffeq) as ODE solver, [Vocos](https://huggingface.co/charactr/vocos-mel-24khz) and [BigVGAN](https://github.com/NVIDIA/BigVGAN) as vocoder
242
+ - [FunASR](https://github.com/modelscope/FunASR), [faster-whisper](https://github.com/SYSTRAN/faster-whisper), [UniSpeech](https://github.com/microsoft/UniSpeech), [SpeechMOS](https://github.com/tarepan/SpeechMOS) for evaluation tools
243
+ - [ctc-forced-aligner](https://github.com/MahmoudAshraf97/ctc-forced-aligner) for speech edit test
244
+ - [mrfakename](https://x.com/realmrfakename) huggingface space demo ~
245
+ - [f5-tts-mlx](https://github.com/lucasnewman/f5-tts-mlx/tree/main) Implementation with MLX framework by [Lucas Newman](https://github.com/lucasnewman)
246
+ - [F5-TTS-ONNX](https://github.com/DakeQQ/F5-TTS-ONNX) ONNX Runtime version by [DakeQQ](https://github.com/DakeQQ)
247
+ - [Yuekai Zhang](https://github.com/yuekaizhang) Triton and TensorRT-LLM support ~
248
+
249
+ ## Citation
250
+ If our work and codebase is useful for you, please cite as:
251
+ ```
252
+ @article{chen-etal-2024-f5tts,
253
+ title={F5-TTS: A Fairytaler that Fakes Fluent and Faithful Speech with Flow Matching},
254
+ author={Yushen Chen and Zhikang Niu and Ziyang Ma and Keqi Deng and Chunhui Wang and Jian Zhao and Kai Yu and Xie Chen},
255
+ journal={arXiv preprint arXiv:2410.06885},
256
+ year={2024},
257
+ }
258
+ ```
259
+ ## License
260
+
261
+ Our code is released under MIT License. The pre-trained models are licensed under the CC-BY-NC license due to the training data Emilia, which is an in-the-wild dataset. Sorry for any inconvenience this may cause.
pyproject.toml ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 61.0", "setuptools-scm>=8.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "f5-tts"
7
+ version = "1.1.7"
8
+ description = "F5-TTS: A Fairytaler that Fakes Fluent and Faithful Speech with Flow Matching"
9
+ readme = "README.md"
10
+ license = {text = "MIT License"}
11
+ classifiers = [
12
+ "License :: OSI Approved :: MIT License",
13
+ "Operating System :: OS Independent",
14
+ "Programming Language :: Python :: 3",
15
+ ]
16
+ dependencies = [
17
+ "accelerate>=0.33.0",
18
+ "bitsandbytes>0.37.0; platform_machine != 'arm64' and platform_system != 'Darwin'",
19
+ "cached_path",
20
+ "click",
21
+ "datasets",
22
+ "ema_pytorch>=0.5.2",
23
+ "gradio>=3.45.2",
24
+ "hydra-core>=1.3.0",
25
+ "jieba",
26
+ "librosa",
27
+ "matplotlib",
28
+ "numpy<=1.26.4",
29
+ "pydantic<=2.10.6",
30
+ "pydub",
31
+ "pypinyin",
32
+ "safetensors",
33
+ "soundfile",
34
+ "tomli",
35
+ "torch>=2.0.0",
36
+ "torchaudio>=2.0.0",
37
+ "torchdiffeq",
38
+ "tqdm>=4.65.0",
39
+ "transformers",
40
+ "transformers_stream_generator",
41
+ "unidecode",
42
+ "vocos",
43
+ "wandb",
44
+ "x_transformers>=1.31.14",
45
+ ]
46
+
47
+ [project.optional-dependencies]
48
+ eval = [
49
+ "faster_whisper==0.10.1",
50
+ "funasr",
51
+ "jiwer",
52
+ "modelscope",
53
+ "zhconv",
54
+ "zhon",
55
+ ]
56
+
57
+ [project.urls]
58
+ Homepage = "https://github.com/SWivid/F5-TTS"
59
+
60
+ [project.scripts]
61
+ "f5-tts_infer-cli" = "f5_tts.infer.infer_cli:main"
62
+ "f5-tts_infer-gradio" = "f5_tts.infer.infer_gradio:main"
63
+ "f5-tts_finetune-cli" = "f5_tts.train.finetune_cli:main"
64
+ "f5-tts_finetune-gradio" = "f5_tts.train.finetune_gradio:main"
ruff.toml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ line-length = 120
2
+ target-version = "py310"
3
+
4
+ [lint]
5
+ # Only ignore variables with names starting with "_".
6
+ dummy-variable-rgx = "^_.*$"
7
+
8
+ [lint.isort]
9
+ force-single-line = false
10
+ lines-after-imports = 2