enacimie commited on
Commit
194c005
·
verified ·
1 Parent(s): 496f987

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -3,7 +3,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
  import torch
4
 
5
  # Load model and tokenizer
6
- model_name = "LiquidAI/LFM2-350M"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForCausalLM.from_pretrained(model_name)
9
 
@@ -51,9 +51,9 @@ def chat_function(message, history):
51
  return response
52
 
53
  # Gradio Interface
54
- with gr.Blocks(title="LFM2-350M Chat") as demo:
55
- gr.Markdown("# 🤖 LFM2-350M Simple Chat")
56
- gr.Markdown("A minimal chat interface using `LiquidAI/LFM2-350M`. Optimized for clean single-turn responses.")
57
 
58
  chatbot = gr.Chatbot(height=400)
59
  msg = gr.Textbox(label="Type your message", placeholder="Say something...")
 
3
  import torch
4
 
5
  # Load model and tokenizer
6
+ model_name = "baidu/ERNIE-4.5-0.3B-PT"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForCausalLM.from_pretrained(model_name)
9
 
 
51
  return response
52
 
53
  # Gradio Interface
54
+ with gr.Blocks(title="baidu/ERNIE-4.5-0.3B-PT Chat") as demo:
55
+ gr.Markdown("# 🤖 baidu/ERNIE-4.5-0.3B-PT Simple Chat")
56
+ gr.Markdown("A minimal chat interface using `baidu/ERNIE-4.5-0.3B-PT`. Optimized for clean single-turn responses.")
57
 
58
  chatbot = gr.Chatbot(height=400)
59
  msg = gr.Textbox(label="Type your message", placeholder="Say something...")