import gradio as gr import os from openai import OpenAI import time # Initialize the OpenAI client with ZenMux def get_client(api_key): return OpenAI( base_url="https://zenmux.ai/api/v1", api_key=api_key, ) def stream_response(messages, api_key, model="inclusionai/ring-1t"): """Stream responses from the ZenMux API""" try: client = get_client(api_key) # Convert messages to the format expected by the API api_messages = [] for msg in messages: api_messages.append({ "role": msg["role"], "content": msg["content"] }) # Create streaming completion stream = client.chat.completions.create( model=model, messages=api_messages, stream=True ) # Accumulate the response response_text = "" for chunk in stream: if chunk.choices[0].delta.content: response_text += chunk.choices[0].delta.content yield response_text except Exception as e: yield f"Error: {str(e)}" def chat_interface(message, history, api_key, model): """Handle chat interactions""" if not api_key: history.append({"role": "assistant", "content": "Please enter your ZenMux API key first."}) return history # Add user message to history history.append({"role": "user", "content": message}) # Get assistant response full_response = "" for response in stream_response(history, api_key, model): full_response = response # Update the last message (assistant's response) if len(history) > 0 and history[-1]["role"] == "assistant": history[-1]["content"] = response else: history.append({"role": "assistant", "content": response}) yield history # Ensure final response is properly set if history and history[-1]["role"] == "assistant": history[-1]["content"] = full_response else: history.append({"role": "assistant", "content": full_response}) yield history def clear_history(): """Clear chat history""" return [] # Custom CSS for a modern look css = """ .gradio-container { max-width: 900px !important; margin: auto !important; } .main-header { text-align: center; margin-bottom: 2rem; padding: 1.5rem; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); border-radius: 15px; color: white; box-shadow: 0 4px 15px rgba(0,0,0,0.1); } .main-header h1 { margin: 0; font-size: 2.5rem; font-weight: 700; } .main-header p { margin: 0.5rem 0 0 0; opacity: 0.9; font-size: 1.1rem; } .api-section { background: #f8f9fa; padding: 1.5rem; border-radius: 10px; margin-bottom: 1.5rem; border: 1px solid #e9ecef; } .chat-container { border: 1px solid #e9ecef; border-radius: 10px; overflow: hidden; } .message.user { background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; } .message.assistant { background: #f8f9fa; color: #333; } .message:hover { transform: translateY(-1px); box-shadow: 0 2px 8px rgba(0,0,0,0.1); } .footer { text-align: center; margin-top: 2rem; padding: 1rem; color: #666; font-size: 0.9rem; } .footer a { color: #667eea; text-decoration: none; font-weight: 600; } .footer a:hover { text-decoration: underline; } """ # Create the Gradio interface with gr.Blocks(css=css, title="ZenMux Chat Interface") as demo: # Header with gr.Row(): gr.HTML("""

🤖 ZenMux Chat

Powered by InclusionAI Ring-1T • Built with anycoder

""") # API Key Section with gr.Row(): with gr.Column(): gr.HTML("""

🔑 API Configuration

Enter your ZenMux API key to start chatting. Get your key from zenmux.ai

""") api_key_input = gr.Textbox( label="ZenMux API Key", type="password", placeholder="Enter your ZenMux API key here...", scale=4 ) model_selector = gr.Dropdown( choices=[ "inclusionai/ring-1t", "inclusionai/ring-1t-turbo", "inclusionai/ring-1t-lite" ], value="inclusionai/ring-1t", label="Model", scale=1 ) # Chat Interface with gr.Row(): chatbot = gr.Chatbot( type="messages", height=500, show_copy_button=True, bubble_full_width=False, placeholder="Start a conversation by typing a message below..." ) # Input Section with gr.Row(): msg_input = gr.MultimodalTextbox( placeholder="Type your message here...", show_label=False, scale=4 ) with gr.Column(scale=1): submit_btn = gr.Button("Send", variant="primary", size="lg") clear_btn = gr.Button("Clear", variant="secondary", size="lg") # Footer gr.HTML(""" """) # Event handlers msg_input.submit( chat_interface, [msg_input, chatbot, api_key_input, model_selector], chatbot ) submit_btn.click( chat_interface, [msg_input, chatbot, api_key_input, model_selector], chatbot ) clear_btn.click( clear_history, outputs=chatbot ) # Clear input after sending def clear_input(): return "" msg_input.submit( clear_input, outputs=msg_input ) submit_btn.click( clear_input, outputs=msg_input ) # Examples gr.Examples( examples=[ ["What is artificial intelligence?"], ["Explain quantum computing in simple terms"], ["Write a short story about a robot discovering emotions"], ["What are the benefits of renewable energy?"], ["Help me understand machine learning concepts"] ], inputs=msg_input, label="Example Prompts" ) if __name__ == "__main__": demo.launch()