Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Upload folder using huggingface_hub
Browse files- app.py +36 -8
- chat_utils.py +60 -1
app.py
CHANGED
|
@@ -5,7 +5,6 @@ import asyncio
|
|
| 5 |
import logging
|
| 6 |
import time
|
| 7 |
import traceback
|
| 8 |
-
import uuid
|
| 9 |
from html import escape
|
| 10 |
import gradio as gr
|
| 11 |
from dotenv import load_dotenv
|
|
@@ -13,7 +12,12 @@ from langchain_core.messages.ai import AIMessageChunk, AIMessage
|
|
| 13 |
from langchain_core.messages.system import SystemMessage
|
| 14 |
from langchain_core.messages.tool import ToolMessage
|
| 15 |
|
| 16 |
-
from chat_utils import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
from config import SanatanConfig
|
| 18 |
from db import SanatanDatabase
|
| 19 |
from drive_downloader import ZipDownloader
|
|
@@ -55,16 +59,13 @@ def init():
|
|
| 55 |
downloader.unzip(zip_path, extract_to="./")
|
| 56 |
|
| 57 |
|
| 58 |
-
def init_session():
|
| 59 |
-
return str(uuid.uuid4())
|
| 60 |
-
|
| 61 |
-
|
| 62 |
def render_message_with_tooltip(content: str, max_chars=200):
|
| 63 |
short = escape(content[:max_chars]) + ("…" if len(content) > max_chars else "")
|
| 64 |
return f"<div title='{escape(content)}'>{short}</div>"
|
| 65 |
|
| 66 |
|
| 67 |
# UI Elements
|
|
|
|
| 68 |
thread_id = gr.State(init_session)
|
| 69 |
|
| 70 |
supported_scriptures = "\n - ".join(
|
|
@@ -213,6 +214,7 @@ with gr.Blocks(
|
|
| 213 |
"📜 Explore Ashwar": "From the same azhwar as the above pasuram, show me other pasurams",
|
| 214 |
"🏛️ Another divya desam (same āzhwār)": "show pasuram from another divya desam by the same azhwar",
|
| 215 |
"👤 Another āzhwār (same divya desam)": "show pasuram from the same divya desam by another azhwar",
|
|
|
|
| 216 |
}
|
| 217 |
|
| 218 |
chatbot = gr.Chatbot(
|
|
@@ -233,15 +235,23 @@ with gr.Blocks(
|
|
| 233 |
)
|
| 234 |
chatInterface = gr.ChatInterface(
|
| 235 |
title="Sanatan-AI",
|
| 236 |
-
fn=
|
| 237 |
-
additional_inputs=[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 238 |
additional_inputs_accordion=additional_inputs,
|
|
|
|
| 239 |
chatbot=chatbot,
|
| 240 |
textbox=message_textbox,
|
| 241 |
type="messages",
|
| 242 |
)
|
| 243 |
|
| 244 |
with gr.Column(visible=False) as followup_examples:
|
|
|
|
|
|
|
| 245 |
with gr.Row():
|
| 246 |
gr.Examples(
|
| 247 |
label="Quick Navigation Follow-ups",
|
|
@@ -286,3 +296,21 @@ with gr.Blocks(
|
|
| 286 |
inputs=[gr.State(False)],
|
| 287 |
outputs=[followup_examples],
|
| 288 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
import logging
|
| 6 |
import time
|
| 7 |
import traceback
|
|
|
|
| 8 |
from html import escape
|
| 9 |
import gradio as gr
|
| 10 |
from dotenv import load_dotenv
|
|
|
|
| 12 |
from langchain_core.messages.system import SystemMessage
|
| 13 |
from langchain_core.messages.tool import ToolMessage
|
| 14 |
|
| 15 |
+
from chat_utils import (
|
| 16 |
+
MAX_MESSAGES_IN_CONVERSATION,
|
| 17 |
+
chat_wrapper,
|
| 18 |
+
init_session,
|
| 19 |
+
limited_chat_wrapper,
|
| 20 |
+
)
|
| 21 |
from config import SanatanConfig
|
| 22 |
from db import SanatanDatabase
|
| 23 |
from drive_downloader import ZipDownloader
|
|
|
|
| 59 |
downloader.unzip(zip_path, extract_to="./")
|
| 60 |
|
| 61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
def render_message_with_tooltip(content: str, max_chars=200):
|
| 63 |
short = escape(content[:max_chars]) + ("…" if len(content) > max_chars else "")
|
| 64 |
return f"<div title='{escape(content)}'>{short}</div>"
|
| 65 |
|
| 66 |
|
| 67 |
# UI Elements
|
| 68 |
+
message_count = gr.State(0)
|
| 69 |
thread_id = gr.State(init_session)
|
| 70 |
|
| 71 |
supported_scriptures = "\n - ".join(
|
|
|
|
| 214 |
"📜 Explore Ashwar": "From the same azhwar as the above pasuram, show me other pasurams",
|
| 215 |
"🏛️ Another divya desam (same āzhwār)": "show pasuram from another divya desam by the same azhwar",
|
| 216 |
"👤 Another āzhwār (same divya desam)": "show pasuram from the same divya desam by another azhwar",
|
| 217 |
+
"❓ Quiz": "Pick any pasuram. Frame a question to ask me related to that pasuram based on its explanatory notes and word by word meanings. Output ONLY the pasuram title, the verse number, the question you framed and the answer to that question.",
|
| 218 |
}
|
| 219 |
|
| 220 |
chatbot = gr.Chatbot(
|
|
|
|
| 235 |
)
|
| 236 |
chatInterface = gr.ChatInterface(
|
| 237 |
title="Sanatan-AI",
|
| 238 |
+
fn=limited_chat_wrapper,
|
| 239 |
+
additional_inputs=[
|
| 240 |
+
thread_id,
|
| 241 |
+
debug_checkbox,
|
| 242 |
+
preferred_language,
|
| 243 |
+
message_count,
|
| 244 |
+
],
|
| 245 |
additional_inputs_accordion=additional_inputs,
|
| 246 |
+
additional_outputs=[thread_id, message_count],
|
| 247 |
chatbot=chatbot,
|
| 248 |
textbox=message_textbox,
|
| 249 |
type="messages",
|
| 250 |
)
|
| 251 |
|
| 252 |
with gr.Column(visible=False) as followup_examples:
|
| 253 |
+
with gr.Row():
|
| 254 |
+
followup_count_textbox = gr.Markdown(container=False, show_label=False)
|
| 255 |
with gr.Row():
|
| 256 |
gr.Examples(
|
| 257 |
label="Quick Navigation Follow-ups",
|
|
|
|
| 296 |
inputs=[gr.State(False)],
|
| 297 |
outputs=[followup_examples],
|
| 298 |
)
|
| 299 |
+
|
| 300 |
+
def update_followup_counter(count):
|
| 301 |
+
remaining_followups = MAX_MESSAGES_IN_CONVERSATION - count
|
| 302 |
+
no_more_followups = False
|
| 303 |
+
if remaining_followups > 1:
|
| 304 |
+
text = f"✨ `{remaining_followups}` more follow-ups to go."
|
| 305 |
+
elif remaining_followups == 1:
|
| 306 |
+
text = "🌟 Just one more follow-up to go!"
|
| 307 |
+
else:
|
| 308 |
+
text = "✅ That was the last follow-up."
|
| 309 |
+
no_more_followups = True
|
| 310 |
+
return gr.update(value=text), gr.update(visible=not no_more_followups)
|
| 311 |
+
|
| 312 |
+
message_count.change(
|
| 313 |
+
update_followup_counter,
|
| 314 |
+
inputs=[message_count],
|
| 315 |
+
outputs=[followup_count_textbox, followup_examples],
|
| 316 |
+
)
|
chat_utils.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
|
|
| 1 |
import json
|
| 2 |
import random
|
| 3 |
import asyncio
|
|
@@ -58,6 +59,7 @@ thinking_verbs = [
|
|
| 58 |
|
| 59 |
graph = generate_graph()
|
| 60 |
|
|
|
|
| 61 |
def add_node_to_tree(
|
| 62 |
node_tree: list[str], node_label: str, tooltip: str = "no arguments to show"
|
| 63 |
) -> list[str]:
|
|
@@ -114,6 +116,7 @@ def chat(debug_mode, message, history, thread_id, preferred_language="English"):
|
|
| 114 |
)
|
| 115 |
return response["messages"][-1].content
|
| 116 |
|
|
|
|
| 117 |
async def chat_streaming(
|
| 118 |
debug_mode: bool, message, history, thread_id, preferred_language="English"
|
| 119 |
):
|
|
@@ -284,4 +287,60 @@ async def chat_streaming(
|
|
| 284 |
"\nhere is what I got so far ...\n"
|
| 285 |
f"\n{streamed_response}"
|
| 286 |
)
|
| 287 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import uuid
|
| 2 |
import json
|
| 3 |
import random
|
| 4 |
import asyncio
|
|
|
|
| 59 |
|
| 60 |
graph = generate_graph()
|
| 61 |
|
| 62 |
+
|
| 63 |
def add_node_to_tree(
|
| 64 |
node_tree: list[str], node_label: str, tooltip: str = "no arguments to show"
|
| 65 |
) -> list[str]:
|
|
|
|
| 116 |
)
|
| 117 |
return response["messages"][-1].content
|
| 118 |
|
| 119 |
+
|
| 120 |
async def chat_streaming(
|
| 121 |
debug_mode: bool, message, history, thread_id, preferred_language="English"
|
| 122 |
):
|
|
|
|
| 287 |
"\nhere is what I got so far ...\n"
|
| 288 |
f"\n{streamed_response}"
|
| 289 |
)
|
| 290 |
+
return
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
def init_session():
|
| 294 |
+
return str(uuid.uuid4())
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
MAX_MESSAGES_IN_CONVERSATION = 5
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
async def limited_chat_wrapper(
|
| 301 |
+
message, history, thread_id, debug, preferred_language, count
|
| 302 |
+
):
|
| 303 |
+
# increment **after processing the message**
|
| 304 |
+
count += 1
|
| 305 |
+
|
| 306 |
+
# warn before reset
|
| 307 |
+
if count == MAX_MESSAGES_IN_CONVERSATION - 1:
|
| 308 |
+
yield [
|
| 309 |
+
{
|
| 310 |
+
"role": "system",
|
| 311 |
+
"content": "⚠️ You are allowed to ask one more follow-up. The next question will be considered a new conversation. Please wait ... processing your request ...",
|
| 312 |
+
}
|
| 313 |
+
], thread_id, count
|
| 314 |
+
await asyncio.sleep(1)
|
| 315 |
+
|
| 316 |
+
# reset
|
| 317 |
+
if count > MAX_MESSAGES_IN_CONVERSATION:
|
| 318 |
+
thread_id = init_session()
|
| 319 |
+
history = []
|
| 320 |
+
count = 1
|
| 321 |
+
yield [
|
| 322 |
+
{
|
| 323 |
+
"role": "system",
|
| 324 |
+
"content": "🔄 This is now considered a new question. Don't worry, your message shall still be processed! If I am giving irrelevant responses, you know why :-)",
|
| 325 |
+
}
|
| 326 |
+
], thread_id, count
|
| 327 |
+
await asyncio.sleep(1)
|
| 328 |
+
|
| 329 |
+
# normal flow: stream from your original chat_wrapper
|
| 330 |
+
final_chunk = []
|
| 331 |
+
async for chunk in chat_wrapper(
|
| 332 |
+
message, history, thread_id, debug, preferred_language
|
| 333 |
+
):
|
| 334 |
+
yield chunk, thread_id, count
|
| 335 |
+
final_chunk = chunk
|
| 336 |
+
|
| 337 |
+
# Simulating LLM Response
|
| 338 |
+
# for i in range(5):
|
| 339 |
+
# final_chunk += [{
|
| 340 |
+
# "role": "assistant",
|
| 341 |
+
# "content": f"Simulated LLM output {i+1}",
|
| 342 |
+
# }]
|
| 343 |
+
# yield final_chunk, thread_id, count
|
| 344 |
+
# await asyncio.sleep(0.25)
|
| 345 |
+
|
| 346 |
+
yield final_chunk, thread_id, count
|