ProPerNounpYK commited on
Commit
038a804
·
verified ·
1 Parent(s): 8de9cf0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -16
app.py CHANGED
@@ -1,21 +1,26 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
3
 
4
  """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
8
 
 
 
 
9
 
10
  def respond(
11
  message,
12
  history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
 
15
  temperature,
16
- top_p,
17
  ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
  for val in history:
21
  if val[0]:
@@ -27,12 +32,12 @@ def respond(
27
 
28
  response = ""
29
 
30
- for message in client.chat_completion(
31
  messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
  temperature=temperature,
35
- top_p=top_p,
36
  ):
37
  token = message.choices[0].delta.content
38
 
@@ -40,24 +45,27 @@ def respond(
40
  yield response
41
 
42
  """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
  """
 
 
45
  demo = gr.ChatInterface(
46
  respond,
47
- additional_inputs=[
48
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
  gr.Slider(
52
  minimum=0.1,
53
  maximum=1.0,
54
  value=0.95,
55
  step=0.05,
56
- label="Top-p (nucleus sampling)",
57
  ),
 
58
  ],
59
  )
60
 
61
 
62
- if __name__ == "__main__":
63
  demo.launch()
 
1
  import gradio as gr
2
+ from huggingfacehub import InferenceClient
3
+ import pandas as pd
4
 
5
  """
6
+ or more information on `huggingfacehub` Inference API support, please check the docs: https://huggingface.co/docs/huggingfacehub/v0.22.2/en/guides/inference
7
  """
8
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
9
 
10
+ def loadprompts():
11
+ prompts = pd.readcsv("prompts.csv")
12
+ return prompts
13
 
14
  def respond(
15
  message,
16
  history: list[tuple[str, str]],
17
+ prompts: pd.atarame,
18
+ systemmessage,
19
+ maxtokens,
20
  temperature,
21
+ topp,
22
  ):
23
+ messages = [{"role": "system", "content": systemmessage}]
24
 
25
  for val in history:
26
  if val[0]:
 
32
 
33
  response = ""
34
 
35
+ for message in client.chatcompletion(
36
  messages,
37
+ maxtokens=maxtokens,
38
+ stream=rue,
39
  temperature=temperature,
40
+ topp=topp,
41
  ):
42
  token = message.choices[0].delta.content
43
 
 
45
  yield response
46
 
47
  """
48
+ or information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
49
  """
50
+ prompts = loadprompts()
51
+
52
  demo = gr.ChatInterface(
53
  respond,
54
+ additionalinputs=[
55
+ gr.extbox(value="You are a friendly Chatbot.", label="System message"),
56
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
57
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="emperature"),
58
  gr.Slider(
59
  minimum=0.1,
60
  maximum=1.0,
61
  value=0.95,
62
  step=0.05,
63
+ label="op-p (nucleus sampling)",
64
  ),
65
+ gr.atarame(value=prompts, label="Prompts"),
66
  ],
67
  )
68
 
69
 
70
+ if name == "main":
71
  demo.launch()