from transformers_js_py import import_transformers_js, read_audio
import gradio as gr
transformers = await import_transformers_js()
pipeline = transformers.pipeline
pipe = await pipeline('automatic-speech-recognition', 'Xenova/whisper-tiny.en')
async def asr(audio_path):
    audio = read_audio(audio_path, 16000)
    result = await pipe(audio)
    return result["text"]
demo = gr.Interface(
    asr,
    gr.Audio(type="filepath"),
    gr.Text(),
    examples=[
        ["jfk.wav"],
    ]
)
demo.launch()
            
            
            
transformers_js_py
numpy
scipy