Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,8 +3,19 @@ import torchaudio
|
|
| 3 |
from torchaudio.transforms import Resample
|
| 4 |
import torch
|
| 5 |
import gradio as gr
|
| 6 |
-
def separate(
|
| 7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
wav, sr = torchaudio.load(audio_path)
|
| 9 |
target_sr = 44100
|
| 10 |
if sr != target_sr:
|
|
@@ -14,7 +25,7 @@ def separate(inst,audio_path,progress=gr.Progress(True)):
|
|
| 14 |
with torch.no_grad():
|
| 15 |
results = model.forward(wav)
|
| 16 |
for i in results:
|
| 17 |
-
torchaudio.save(f"{i}.mp3", results[i], sr
|
| 18 |
return tuple([i+".mp3" for i in results] + [None for _ in range(5-len(results))])
|
| 19 |
|
| 20 |
-
gr.Interface(separate, [gr.Dropdown([2,4,5]
|
|
|
|
| 3 |
from torchaudio.transforms import Resample
|
| 4 |
import torch
|
| 5 |
import gradio as gr
|
| 6 |
+
def separate(audio_path:str,inst_no:int,):
|
| 7 |
+
"""
|
| 8 |
+
Separate audio into instrument tracks.
|
| 9 |
+
|
| 10 |
+
Args:
|
| 11 |
+
audio_path (str): Path to input audio.
|
| 12 |
+
inst_no (int): Number of instruments to separate.
|
| 13 |
+
|
| 14 |
+
Returns:
|
| 15 |
+
tuple: Up to 5 MP3 file paths for separated tracks.
|
| 16 |
+
"""
|
| 17 |
+
progress=gr.Progress(True)
|
| 18 |
+
model = Splitter(inst_no)
|
| 19 |
wav, sr = torchaudio.load(audio_path)
|
| 20 |
target_sr = 44100
|
| 21 |
if sr != target_sr:
|
|
|
|
| 25 |
with torch.no_grad():
|
| 26 |
results = model.forward(wav)
|
| 27 |
for i in results:
|
| 28 |
+
torchaudio.save(f"{i}.mp3", results[i], sr)
|
| 29 |
return tuple([i+".mp3" for i in results] + [None for _ in range(5-len(results))])
|
| 30 |
|
| 31 |
+
gr.Interface(separate, [gr.Audio(type="filepath"),gr.Dropdown([2,4,5],2)], [gr.Audio(type="filepath"), gr.Audio(type="filepath"),gr.Audio(type="filepath"),gr.Audio(type="filepath"),gr.Audio(type="filepath")]).launch(mcp_server=True)
|