shethjenil commited on
Commit
63efc13
·
verified ·
1 Parent(s): a274d42

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -20
app.py CHANGED
@@ -1,20 +1,20 @@
1
- from spleeter import Splitter
2
- import torchaudio
3
- from torchaudio.transforms import Resample
4
- import torch
5
- import gradio as gr
6
- def separate(audio_path):
7
- model = Splitter(2)
8
- wav, sr = torchaudio.load(audio_path)
9
- target_sr = 44100
10
- if sr != target_sr:
11
- resampler = Resample(sr, target_sr)
12
- wav = resampler(wav)
13
- sr = target_sr
14
- with torch.no_grad():
15
- results = model.forward(wav)
16
- torchaudio.save("vocals.mp3", results['vocals'], sr,format="mp3")
17
- torchaudio.save("accompaniment.mp3", results['accompaniment'], sr,format="mp3")
18
- return "vocals.mp3" , "accompaniment.mp3"
19
-
20
- gr.Interface(separate, gr.Audio(type="filepath"), [gr.Audio(type="filepath"), gr.Audio(type="filepath")]).launch()
 
1
+ from spleeter import Splitter
2
+ import torchaudio
3
+ from torchaudio.transforms import Resample
4
+ import torch
5
+ import gradio as gr
6
+ def separate(inst,audio_path,progress=gr.Progress(True)):
7
+ model = Splitter(inst)
8
+ wav, sr = torchaudio.load(audio_path)
9
+ target_sr = 44100
10
+ if sr != target_sr:
11
+ resampler = Resample(sr, target_sr)
12
+ wav = resampler(wav)
13
+ sr = target_sr
14
+ with torch.no_grad():
15
+ results = model.forward(wav)
16
+ for i in results:
17
+ torchaudio.save(f"{i}.mp3", results[i], sr,format="mp3")
18
+ return tuple([i+".mp3" for i in results] + [None for _ in range(5-len(results))])
19
+
20
+ gr.Interface(separate, gr.Dropdown([2,4,5]),gr.Audio(type="filepath"), [gr.Audio(type="filepath"), gr.Audio(type="filepath"),gr.Audio(type="filepath"),gr.Audio(type="filepath"),gr.Audio(type="filepath")]).launch()