DSDUDEd commited on
Commit
fe20e3e
·
verified ·
1 Parent(s): 3284c8c

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -0
app.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
3
+ import torch
4
+ import threading
5
+
6
+ # Load model
7
+ model_name = "Qwen/Qwen3Guard-Gen-8B"
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
9
+ model = AutoModelForCausalLM.from_pretrained(
10
+ model_name,
11
+ torch_dtype=torch.float16,
12
+ device_map="auto"
13
+ )
14
+
15
+ # Streaming response function
16
+ def chat_with_dudea(message, history):
17
+ messages = [{"role": "system", "content": "You are DUDEAIBeta1.1, a fun and smart AI assistant."}]
18
+ for user_msg, bot_msg in history:
19
+ messages.append({"role": "user", "content": user_msg})
20
+ if bot_msg:
21
+ messages.append({"role": "assistant", "content": bot_msg})
22
+ messages.append({"role": "user", "content": message})
23
+
24
+ inputs = tokenizer.apply_chat_template(
25
+ messages,
26
+ add_generation_prompt=True,
27
+ tokenize=True,
28
+ return_dict=True,
29
+ return_tensors="pt"
30
+ ).to(model.device)
31
+
32
+ streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
33
+ generation_kwargs = dict(inputs, max_new_tokens=250, streamer=streamer)
34
+ thread = threading.Thread(target=model.generate, kwargs=generation_kwargs)
35
+ thread.start()
36
+
37
+ partial_response = ""
38
+ for new_text in streamer:
39
+ partial_response += new_text
40
+ yield partial_response
41
+
42
+ # Custom UI
43
+ with gr.Blocks(css="""
44
+ #chatbox {height: 600px; overflow-y: auto;}
45
+ .message {display: flex; align-items: flex-start; margin: 10px;}
46
+ .user {flex-direction: row-reverse;}
47
+ .avatar {width: 50px; height: 50px; border-radius: 50%; margin: 5px;}
48
+ .bubble {padding: 12px 16px; border-radius: 16px; max-width: 70%;}
49
+ .user .bubble {background-color: #4f46e5; color: white; border-bottom-right-radius: 4px;}
50
+ .bot .bubble {background-color: #f3f4f6; color: black; border-bottom-left-radius: 4px;}
51
+ """) as demo:
52
+ gr.Markdown(
53
+ "<h1 style='text-align: center;'>🤖 DUDEAIBeta1.1</h1>"
54
+ "<p style='text-align:center;'>Next-gen AI with live typing ✨</p>"
55
+ )
56
+
57
+ # 👇 put your avatar files in the Space repo (user.png, ai.png)
58
+ chatbot = gr.Chatbot(
59
+ elem_id="chatbox",
60
+ avatar_images=("user.png", "ai.png") # first is USER, second is AI
61
+ )
62
+
63
+ msg = gr.Textbox(placeholder="Type your message...", container=False)
64
+ clear = gr.Button("🧹 Clear Chat")
65
+
66
+ def respond(message, history):
67
+ response_stream = chat_with_dudea(message, history)
68
+ history.append((message, ""))
69
+ for partial in response_stream:
70
+ history[-1] = (message, partial)
71
+ yield "", history
72
+
73
+ msg.submit(respond, [msg, chatbot], [msg, chatbot])
74
+ clear.click(lambda: None, None, chatbot, queue=False)
75
+
76
+ demo.launch()