Ryan-PC commited on
Commit
e679ee3
verified
1 Parent(s): 8924056

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -30
app.py CHANGED
@@ -3,7 +3,7 @@ from huggingface_hub import InferenceClient
3
 
4
  def respond(
5
  message,
6
- history: list[dict[str, str]],
7
  system_message,
8
  max_tokens,
9
  temperature,
@@ -12,11 +12,17 @@ def respond(
12
  ):
13
  client = InferenceClient(
14
  token=hf_token.token,
15
- model="DeepHat/DeepHat-V1-7B" # <-- Modelo correto
16
  )
17
 
 
18
  messages = [{"role": "system", "content": system_message}]
19
- messages.extend(history)
 
 
 
 
 
20
  messages.append({"role": "user", "content": message})
21
 
22
  response = ""
@@ -34,30 +40,4 @@ def respond(
34
  token = choices[0].delta.content
35
 
36
  response += token
37
- yield response
38
-
39
-
40
- chatbot = gr.ChatInterface(
41
- respond,
42
- type="messages",
43
- additional_inputs=[
44
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
45
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
46
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
47
- gr.Slider(
48
- minimum=0.1,
49
- maximum=1.0,
50
- value=0.95,
51
- step=0.05,
52
- label="Top-p (nucleus sampling)",
53
- ),
54
- ],
55
- )
56
-
57
- with gr.Blocks() as demo:
58
- with gr.Sidebar():
59
- gr.LoginButton()
60
- chatbot.render()
61
-
62
- if __name__ == "__main__":
63
- demo.launch()
 
3
 
4
  def respond(
5
  message,
6
+ history: list[list[str]],
7
  system_message,
8
  max_tokens,
9
  temperature,
 
12
  ):
13
  client = InferenceClient(
14
  token=hf_token.token,
15
+ model="DeepHat/DeepHat-V1-7B"
16
  )
17
 
18
+ # Constru莽茫o correta do hist贸rico
19
  messages = [{"role": "system", "content": system_message}]
20
+
21
+ for user_msg, bot_msg in history:
22
+ messages.append({"role": "user", "content": user_msg})
23
+ messages.append({"role": "assistant", "content": bot_msg})
24
+
25
+ # Adiciona a nova mensagem do usu谩rio
26
  messages.append({"role": "user", "content": message})
27
 
28
  response = ""
 
40
  token = choices[0].delta.content
41
 
42
  response += token
43
+ yield response