pradeei commited on
Commit
194af3a
·
verified ·
1 Parent(s): 1c41095

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +208 -77
app.py CHANGED
@@ -1,77 +1,208 @@
1
- # Import necessary libraries
2
-
3
- import streamlit as st
4
- import os
5
- from openai import OpenAI
6
- import json
7
-
8
- def clear_chat():
9
- st.session_state.messages = []
10
-
11
- st.title("Intel® AI for Enterprise Inference")
12
- st.header("LLM chatbot")
13
-
14
- with st.sidebar:
15
- api_key = st.session_state.api_key = st.secrets["openai_apikey"] #Enter openai_api key under "Secrets " in HF settings
16
- base_url = st.session_state.base_url = os.environ.get("base_url") #Enter base_url under "Variables" in HF settings
17
- client = OpenAI(api_key=api_key, base_url=base_url)
18
- models = client.models.list()
19
- model_names = sorted([model.id for model in models]) # Extract 'id' from each model object
20
- default_model_name = "meta-llama/Llama-3.3-70B-Instruct" # Replace with your desired default model name
21
-
22
- # Use st.session_state to persist the selected model
23
- if "selected_model" not in st.session_state:
24
- st.session_state.selected_model = default_model_name if default_model_name in model_names else model_names[0]
25
-
26
- # Create the selectbox without the `index` parameter
27
- modelname = st.selectbox(
28
- "Select an LLM model (running on Intel® Gaudi®). Hosted on Denvr Dataworks.",
29
- model_names,
30
- key="selected_model", # This ties the widget to st.session_state["selected_model"]
31
- )
32
- st.write(f"You selected: {modelname}")
33
- st.button("Start New Chat", on_click=clear_chat)
34
-
35
- st.markdown("---") # Add a horizontal line for separation
36
- st.markdown(
37
- """
38
- Check the latest models hosted on [Denvr Dataworks](https://www.denvrdata.com/intel), and get your own OpenAI-compatible API key.
39
-
40
- Come and chat with other AI developers on [Intel’s DevHub Discord server](https://discord.gg/kfJ3NKEw5t).
41
- """
42
- )
43
-
44
- try:
45
- if "messages" not in st.session_state:
46
- st.session_state.messages = []
47
-
48
- for message in st.session_state.messages:
49
- with st.chat_message(message["role"]):
50
- st.markdown(message["content"])
51
-
52
- if prompt := st.chat_input("What is up?"):
53
- st.session_state.messages.append({"role": "user", "content": prompt})
54
- with st.chat_message("user"):
55
- st.markdown(prompt)
56
-
57
- with st.chat_message("assistant"):
58
- try:
59
- stream = client.chat.completions.create(
60
- model=modelname,
61
- messages=[
62
- {"role": m["role"], "content": m["content"]}
63
- for m in st.session_state.messages
64
- ],
65
- max_tokens=4096,
66
- stream=True,
67
- )
68
- response = st.write_stream(stream)
69
- except Exception as e:
70
- st.error(f"An error occurred while generating the response: {e}")
71
- response = "An error occurred while generating the response."
72
-
73
- st.session_state.messages.append({"role": "assistant", "content": response})
74
- except KeyError as e:
75
- st.error(f"Key error: {e}")
76
- except Exception as e:
77
- st.error(f"An unexpected error occurred: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import streamlit
3
+ as st
4
+ import os
5
+ from openai
6
+ import OpenAI
7
+ import json
8
+
9
+ defclear_chat():
10
+ st.session_state.messages = []
11
+
12
+ definitialize_provider_settings(provider_choice):
13
+ """Configure API settings based on provider selection"""
14
+ provider_configs = {
15
+ "Denvr Dataworks": {
16
+ "api_key_source": st.secrets.get("openai_apikey",
17
+ ""),
18
+ "base_url_source": os.environ.get("base_url",
19
+ ""),
20
+ "fallback_model":
21
+ "meta-llama/Llama-3.3-70B-Instruct"
22
+ },
23
+ "IBM": {
24
+ "api_key_source": os.environ.get("ibm_openai_apikey",
25
+ ""),
26
+ "base_url_source": os.environ.get("ibm_base_url",
27
+ ""),
28
+ "fallback_model": None
29
+ }
30
+ }
31
+
32
+ return provider_configs.get(provider_choice, {})
33
+
34
+ st.title("Intel® AI for Enterprise Inference")
35
+ st.header("LLM chatbot")
36
+
37
+ with st.sidebar:
38
+
39
+ # Provider selection dropdown
40
+
41
+ available_providers = ["Denvr Dataworks",
42
+ "IBM"]
43
+
44
+ if"current_provider_choice"notin st.session_state:
45
+ st.session_state.current_provider_choice = available_providers[0]
46
+
47
+ provider_selection = st.selectbox(
48
+ "Choose AI Provider:",
49
+ available_providers,
50
+ key="current_provider_choice"
51
+ )
52
+
53
+
54
+ # Get provider-specific settings
55
+
56
+ provider_settings = initialize_provider_settings(provider_selection)
57
+
58
+
59
+ # Validate required credentials
60
+
61
+ ifnot provider_settings.get("api_key_source")
62
+ ornot provider_settings.get("base_url_source"):
63
+ st.error(f"Configuration missing for
64
+ {provider_selection}. Check environment variables.")
65
+ st.stop()
66
+
67
+
68
+ # Setup OpenAI client
69
+
70
+ try:
71
+ api_client = OpenAI(
72
+ api_key=provider_settings["api_key_source"],
73
+ base_url=provider_settings["base_url_source"]
74
+ )
75
+ available_models = api_client.models.list()
76
+ model_list = sorted([m.idfor m
77
+ in available_models])
78
+
79
+
80
+ # Handle model selection with provider switching
81
+
82
+ session_key = f"model_for_{provider_selection}"
83
+ if session_key
84
+ notin st.session_state
85
+ or st.session_state.get("last_provider") != provider_selection:
86
+ preferred_model = provider_settings.get("fallback_model")
87
+ if preferred_model
88
+ and preferred_model
89
+ in model_list:
90
+ st.session_state[session_key] = preferred_model
91
+ elif model_list:
92
+ st.session_state[session_key] = model_list[0]
93
+ st.session_state.last_provider = provider_selection
94
+
95
+ ifnot model_list:
96
+ st.error(f"No models found for
97
+ {provider_selection}")
98
+ st.stop()
99
+
100
+
101
+ # Model selection interface
102
+
103
+ chosen_model = st.selectbox(
104
+ f"Available models from {provider_selection}:",
105
+ model_list,
106
+ key=session_key,
107
+ )
108
+ st.info(f"Active model:
109
+ {chosen_model}")
110
+
111
+ except Exception
112
+ as connection_error:
113
+ st.error(f"Connection failed for
114
+ {provider_selection}:
115
+ {connection_error}")
116
+ st.stop()
117
+
118
+ st.button("Reset Conversation", on_click=clear_chat)
119
+
120
+ st.markdown("---")
121
+
122
+
123
+ # Display provider-specific information
124
+
125
+ if provider_selection ==
126
+ "Denvr Dataworks":
127
+ st.markdown(
128
+ """
129
+ **Denvr Dataworks Integration**
130
+
131
+ Visit [Denvr Dataworks](https://www.denvrdata.com/intel)
132
+ for model information and API access.
133
+
134
+ Join the community: [Intel's DevHub Discord](https://discord.gg/kfJ3NKEw5t)
135
+ """
136
+ )
137
+ elif provider_selection ==
138
+ "IBM":
139
+ st.markdown(
140
+ """
141
+ **IBM AI Services**
142
+
143
+ Connected to IBM's AI infrastructure. Ensure your credentials are properly configured.
144
+ """
145
+ )
146
+
147
+
148
+ # Main chat interface
149
+
150
+ try:
151
+ if"messages"notin st.session_state:
152
+ st.session_state.messages = []
153
+
154
+
155
+ # Display conversation history
156
+
157
+ for msg
158
+ in st.session_state.messages:
159
+ with st.chat_message(msg["role"]):
160
+ st.markdown(msg["content"])
161
+
162
+
163
+ # Handle new user input
164
+
165
+ if user_input := st.chat_input("Enter your message..."):
166
+ st.session_state.messages.append({"role":
167
+ "user",
168
+ "content": user_input})
169
+ with st.chat_message("user"):
170
+ st.markdown(user_input)
171
+
172
+
173
+ # Generate AI response
174
+
175
+ with st.chat_message("assistant"):
176
+ try:
177
+ response_stream = api_client.chat.completions.create(
178
+ model=chosen_model,
179
+ messages=[
180
+ {"role": msg["role"],
181
+ "content": msg["content"]}
182
+ for msg
183
+ in st.session_state.messages
184
+ ],
185
+ max_tokens=4096,
186
+ stream=True,
187
+ )
188
+ ai_response = st.write_stream(response_stream)
189
+ except Exception
190
+ as generation_error:
191
+ st.error(f"Response generation failed:
192
+ {generation_error}")
193
+ ai_response = "Unable to generate response due to an error."
194
+
195
+ st.session_state.messages.append({"role":
196
+ "assistant",
197
+ "content": ai_response})
198
+
199
+ except KeyError
200
+ as key_err:
201
+ st.error(f"Configuration key error:
202
+ {key_err}")
203
+ except Exception
204
+ as general_err:
205
+ st.error(f"Unexpected error occurred:
206
+ {general_err}")
207
+
208
+