PRANJAL KAR commited on
Commit
cc205fe
·
1 Parent(s): c658d07
Files changed (2) hide show
  1. Dockerfile +1 -1
  2. test.py +43 -0
Dockerfile CHANGED
@@ -23,4 +23,4 @@ EXPOSE 7860
23
  # ENV GRADIO_SERVER_PORT 7860
24
 
25
  # Run your script when the container launches
26
- CMD ["python", "app.py"]
 
23
  # ENV GRADIO_SERVER_PORT 7860
24
 
25
  # Run your script when the container launches
26
+ CMD ["python", "test.py"]
test.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use a pipeline as a high-level helper
2
+ import transformers
3
+ import torch
4
+ from transformers import pipeline
5
+ import gradio as gr
6
+ from transformers import AutoTokenizer, AutoModelForCausalLM
7
+
8
+ pipe = pipeline("text-generation", model="tiiuae/falcon-180B")
9
+
10
+ # Load model directly
11
+ from transformers import AutoTokenizer, AutoModelForCausalLM
12
+
13
+ tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-180B")
14
+ model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-180B")
15
+ pipeline = transformers.pipeline(
16
+ "text-generation",
17
+ model=model,
18
+ tokenizer=tokenizer,
19
+ torch_dtype=torch.bfloat16,
20
+ trust_remote_code=True,
21
+ device_map="auto",
22
+ )
23
+
24
+ def generate_headlines(topic):
25
+ sequences = pipeline(
26
+ f"Create at most 5 headlines that highlight {topic}. The headlines should be concise, attention-grabbing, and suitable for use in a news video.",
27
+ max_length=200,
28
+ do_sample=True,
29
+ top_k=10,
30
+ num_return_sequences=5,
31
+ eos_token_id=tokenizer.eos_token_id,
32
+ )
33
+ headlines = [seq['generated_text'] for seq in sequences]
34
+ return "\n".join(headlines)
35
+
36
+ iface = gr.Interface(
37
+ fn=generate_headlines,
38
+ inputs=gr.inputs.Textbox(placeholder="Enter the topic"),
39
+ outputs="text",
40
+ examples=[["Climate Change"], ["AI Innovations"], ["Space Exploration"]]
41
+ )
42
+
43
+ iface.launch()