Spaces:
Runtime error
Runtime error
update the app.py
Browse files
app.py
CHANGED
|
@@ -2,11 +2,14 @@ import gradio as gr
|
|
| 2 |
from transformers import WhisperProcessor, WhisperForConditionalGeneration
|
| 3 |
import torch
|
| 4 |
import torchaudio
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
# Load model and processor
|
| 7 |
processor = WhisperProcessor.from_pretrained("openai/whisper-large-v2")
|
| 8 |
model = WhisperForConditionalGeneration.from_pretrained("aiola/whisper-ner-v1")
|
| 9 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 10 |
model = model.to(device)
|
| 11 |
|
| 12 |
def unify_ner_text(text, symbols_to_replace=("/", " ", ":", "_")):
|
|
@@ -16,7 +19,7 @@ def unify_ner_text(text, symbols_to_replace=("/", " ", ":", "_")):
|
|
| 16 |
text = text.replace(symbol, "-")
|
| 17 |
return text.lower()
|
| 18 |
|
| 19 |
-
|
| 20 |
def transcribe_and_recognize_entities(audio_file, prompt):
|
| 21 |
target_sample_rate = 16000
|
| 22 |
signal, sampling_rate = torchaudio.load(audio_file)
|
|
@@ -25,12 +28,9 @@ def transcribe_and_recognize_entities(audio_file, prompt):
|
|
| 25 |
if signal.ndim == 2:
|
| 26 |
signal = torch.mean(signal, dim=0)
|
| 27 |
|
| 28 |
-
signal = signal.cpu() # Ensure signal is on CPU for processing
|
| 29 |
input_features = processor(signal, sampling_rate=target_sample_rate, return_tensors="pt").input_features
|
| 30 |
input_features = input_features.to(device)
|
| 31 |
|
| 32 |
-
|
| 33 |
-
# Split the prompt into individual NER types and process each one
|
| 34 |
ner_types = prompt.split(',')
|
| 35 |
processed_ner_types = [unify_ner_text(ner_type.strip()) for ner_type in ner_types]
|
| 36 |
prompt = ", ".join(processed_ner_types)
|
|
@@ -43,31 +43,21 @@ def transcribe_and_recognize_entities(audio_file, prompt):
|
|
| 43 |
input_features,
|
| 44 |
max_new_tokens=256,
|
| 45 |
prompt_ids=prompt_ids,
|
| 46 |
-
language='en',
|
| 47 |
generation_config=model.generation_config,
|
| 48 |
)
|
| 49 |
-
# slice only the output without the prompt itself at the start.
|
| 50 |
transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
|
| 51 |
-
|
| 52 |
-
# Determine the length of the prompt in the transcription
|
| 53 |
prompt_length_in_transcription = len(prompt)
|
| 54 |
-
|
| 55 |
-
# Slice the transcription to remove the prompt itself from the output
|
| 56 |
-
transcription = transcription[prompt_length_in_transcription + 1:]
|
| 57 |
|
| 58 |
return transcription
|
| 59 |
|
| 60 |
-
# Define Gradio interface
|
| 61 |
iface = gr.Interface(
|
| 62 |
fn=transcribe_and_recognize_entities,
|
| 63 |
-
inputs=[
|
| 64 |
-
gr.Audio(label="Upload Audio", type="filepath"),
|
| 65 |
-
gr.Textbox(label="Entity Recognition Prompt"),
|
| 66 |
-
],
|
| 67 |
outputs=gr.Textbox(label="Transcription and Entities"),
|
| 68 |
title="Whisper-NER Demo",
|
| 69 |
description="Upload an audio file and enter entities to identify. The model will transcribe the audio and recognize entities."
|
| 70 |
)
|
| 71 |
|
| 72 |
-
# iface.launch()
|
| 73 |
iface.launch(share=True)
|
|
|
|
| 2 |
from transformers import WhisperProcessor, WhisperForConditionalGeneration
|
| 3 |
import torch
|
| 4 |
import torchaudio
|
| 5 |
+
import spaces
|
| 6 |
+
|
| 7 |
+
# Initialize devices
|
| 8 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 9 |
|
| 10 |
# Load model and processor
|
| 11 |
processor = WhisperProcessor.from_pretrained("openai/whisper-large-v2")
|
| 12 |
model = WhisperForConditionalGeneration.from_pretrained("aiola/whisper-ner-v1")
|
|
|
|
| 13 |
model = model.to(device)
|
| 14 |
|
| 15 |
def unify_ner_text(text, symbols_to_replace=("/", " ", ":", "_")):
|
|
|
|
| 19 |
text = text.replace(symbol, "-")
|
| 20 |
return text.lower()
|
| 21 |
|
| 22 |
+
@spaces.GPU # This decorator ensures your function can use GPU on Hugging Face Spaces
|
| 23 |
def transcribe_and_recognize_entities(audio_file, prompt):
|
| 24 |
target_sample_rate = 16000
|
| 25 |
signal, sampling_rate = torchaudio.load(audio_file)
|
|
|
|
| 28 |
if signal.ndim == 2:
|
| 29 |
signal = torch.mean(signal, dim=0)
|
| 30 |
|
|
|
|
| 31 |
input_features = processor(signal, sampling_rate=target_sample_rate, return_tensors="pt").input_features
|
| 32 |
input_features = input_features.to(device)
|
| 33 |
|
|
|
|
|
|
|
| 34 |
ner_types = prompt.split(',')
|
| 35 |
processed_ner_types = [unify_ner_text(ner_type.strip()) for ner_type in ner_types]
|
| 36 |
prompt = ", ".join(processed_ner_types)
|
|
|
|
| 43 |
input_features,
|
| 44 |
max_new_tokens=256,
|
| 45 |
prompt_ids=prompt_ids,
|
| 46 |
+
language='en',
|
| 47 |
generation_config=model.generation_config,
|
| 48 |
)
|
|
|
|
| 49 |
transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
|
|
|
|
|
|
|
| 50 |
prompt_length_in_transcription = len(prompt)
|
| 51 |
+
transcription = transcription[prompt_length_in_transcription + 1:] # Remove the prompt
|
|
|
|
|
|
|
| 52 |
|
| 53 |
return transcription
|
| 54 |
|
|
|
|
| 55 |
iface = gr.Interface(
|
| 56 |
fn=transcribe_and_recognize_entities,
|
| 57 |
+
inputs=[gr.Audio(label="Upload Audio", type="filepath"), gr.Textbox(label="Entity Recognition Prompt")],
|
|
|
|
|
|
|
|
|
|
| 58 |
outputs=gr.Textbox(label="Transcription and Entities"),
|
| 59 |
title="Whisper-NER Demo",
|
| 60 |
description="Upload an audio file and enter entities to identify. The model will transcribe the audio and recognize entities."
|
| 61 |
)
|
| 62 |
|
|
|
|
| 63 |
iface.launch(share=True)
|