Saskw2010 commited on
Commit
0626129
·
verified ·
1 Parent(s): d62adc1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -24
app.py CHANGED
@@ -3,43 +3,34 @@ import gradio as gr
3
  from huggingface_hub import InferenceClient
4
 
5
  INFO = """
6
- # Inference Provider (fal-ai)
7
- This Space calls **Wan-AI/Wan2.2-T2V-A14B-Diffusers** through the **fal-ai** provider.
8
- Click *Sign in* (top-left) so your Hugging Face token is used for the call.
9
  """
10
 
11
  def t2v(prompt: str, fps: int = 12, request: gr.Request | None = None):
12
- """
13
- Returns a local MP4 path for Gradio to serve.
14
- Authentication:
15
- - If the user clicked 'Sign in', Gradio attaches `Authorization: Bearer hf_...`
16
- to the request; we reuse that token with InferenceClient.
17
- - Fallback: HF_TOKEN env var (useful for private Spaces / testing).
18
- """
19
- # extract Bearer token if present
20
  token = None
21
  if request and "authorization" in request.headers:
22
  auth = request.headers["authorization"]
23
  if isinstance(auth, str) and auth.lower().startswith("bearer "):
24
  token = auth.split(" ", 1)[1]
25
  token = token or os.getenv("HF_TOKEN")
26
-
27
  if not token:
28
- raise gr.Error("No token found. Click 'Sign in' or set HF_TOKEN env var in the Space.")
29
 
30
- client = InferenceClient(provider="fal-ai", api_key=token) # SDK routes to the provider
31
- # NOTE: Providers usually return raw bytes for text-to-video
32
  video_bytes = client.text_to_video(
33
  prompt,
34
  model="Wan-AI/Wan2.2-T2V-A14B-Diffusers",
35
  )
36
 
37
- # save to a temp file for Gradio to serve
38
  tmp = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
39
  tmp.write(video_bytes)
40
- tmp.flush()
41
- tmp.close()
42
- return tmp.name # Gradio will map this to /file=... URL
43
 
44
  with gr.Blocks(fill_height=True) as demo:
45
  with gr.Sidebar():
@@ -48,10 +39,11 @@ with gr.Blocks(fill_height=True) as demo:
48
  gr.Interface(
49
  fn=t2v,
50
  inputs=[gr.Textbox(label="Prompt"), gr.Slider(4, 24, value=12, step=1, label="FPS")],
51
- outputs=gr.Video(label="Output"),
52
- title="Wan2.2 T2V via fal-ai",
53
- api_name="predict", # <= creates /api/predict
54
- allow_flagging="never",
55
  )
56
 
57
- demo.launch()
 
 
 
3
  from huggingface_hub import InferenceClient
4
 
5
  INFO = """
6
+ ### Wan2.2 T2V via Inference Providers (fal-ai)
7
+ - Click **Sign in** (left) or set a Space secret `HF_TOKEN`.
8
+ - This Space exposes an HTTP API at `/api/predict`.
9
  """
10
 
11
  def t2v(prompt: str, fps: int = 12, request: gr.Request | None = None):
12
+ # 1) get token from Login or Space secret
 
 
 
 
 
 
 
13
  token = None
14
  if request and "authorization" in request.headers:
15
  auth = request.headers["authorization"]
16
  if isinstance(auth, str) and auth.lower().startswith("bearer "):
17
  token = auth.split(" ", 1)[1]
18
  token = token or os.getenv("HF_TOKEN")
 
19
  if not token:
20
+ raise gr.Error("No token found. Click 'Sign in' OR set a Space secret HF_TOKEN.")
21
 
22
+ # 2) call provider-backed model
23
+ client = InferenceClient(provider="fal-ai", api_key=token)
24
  video_bytes = client.text_to_video(
25
  prompt,
26
  model="Wan-AI/Wan2.2-T2V-A14B-Diffusers",
27
  )
28
 
29
+ # 3) save to temp mp4 for Gradio to serve
30
  tmp = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
31
  tmp.write(video_bytes)
32
+ tmp.flush(); tmp.close()
33
+ return tmp.name
 
34
 
35
  with gr.Blocks(fill_height=True) as demo:
36
  with gr.Sidebar():
 
39
  gr.Interface(
40
  fn=t2v,
41
  inputs=[gr.Textbox(label="Prompt"), gr.Slider(4, 24, value=12, step=1, label="FPS")],
42
+ outputs=gr.Video(label="Video"),
43
+ api_name="predict", # <-- creates /api/predict
44
+ title="Wan2.2 Text-to-Video"
 
45
  )
46
 
47
+ if __name__ == "__main__":
48
+ # Disable SSR to avoid the “Stopping Node.js server...” loop
49
+ demo.launch(ssr_mode=False)