🤖 ZenMux Chat
Powered by InclusionAI Ring-1T • Built with anycoder
import gradio as gr import os from openai import OpenAI import time # Initialize the OpenAI client with ZenMux def get_client(api_key): return OpenAI( base_url="https://zenmux.ai/api/v1", api_key=api_key, ) def stream_response(messages, api_key, model="inclusionai/ring-1t"): """Stream responses from the ZenMux API""" try: client = get_client(api_key) # Convert messages to the format expected by the API api_messages = [] for msg in messages: api_messages.append({ "role": msg["role"], "content": msg["content"] }) # Create streaming completion stream = client.chat.completions.create( model=model, messages=api_messages, stream=True ) # Accumulate the response response_text = "" for chunk in stream: if chunk.choices[0].delta.content: response_text += chunk.choices[0].delta.content yield response_text except Exception as e: yield f"Error: {str(e)}" def chat_interface(message, history, api_key, model): """Handle chat interactions""" if not api_key: history.append({"role": "assistant", "content": "Please enter your ZenMux API key first."}) return history # Add user message to history history.append({"role": "user", "content": message}) # Get assistant response full_response = "" for response in stream_response(history, api_key, model): full_response = response # Update the last message (assistant's response) if len(history) > 0 and history[-1]["role"] == "assistant": history[-1]["content"] = response else: history.append({"role": "assistant", "content": response}) yield history # Ensure final response is properly set if history and history[-1]["role"] == "assistant": history[-1]["content"] = full_response else: history.append({"role": "assistant", "content": full_response}) yield history def clear_history(): """Clear chat history""" return [] # Custom CSS for a modern look css = """ .gradio-container { max-width: 900px !important; margin: auto !important; } .main-header { text-align: center; margin-bottom: 2rem; padding: 1.5rem; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); border-radius: 15px; color: white; box-shadow: 0 4px 15px rgba(0,0,0,0.1); } .main-header h1 { margin: 0; font-size: 2.5rem; font-weight: 700; } .main-header p { margin: 0.5rem 0 0 0; opacity: 0.9; font-size: 1.1rem; } .api-section { background: #f8f9fa; padding: 1.5rem; border-radius: 10px; margin-bottom: 1.5rem; border: 1px solid #e9ecef; } .chat-container { border: 1px solid #e9ecef; border-radius: 10px; overflow: hidden; } .message.user { background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; } .message.assistant { background: #f8f9fa; color: #333; } .message:hover { transform: translateY(-1px); box-shadow: 0 2px 8px rgba(0,0,0,0.1); } .footer { text-align: center; margin-top: 2rem; padding: 1rem; color: #666; font-size: 0.9rem; } .footer a { color: #667eea; text-decoration: none; font-weight: 600; } .footer a:hover { text-decoration: underline; } """ # Create the Gradio interface with gr.Blocks(css=css, title="ZenMux Chat Interface") as demo: # Header with gr.Row(): gr.HTML("""
Powered by InclusionAI Ring-1T • Built with anycoder
Enter your ZenMux API key to start chatting. Get your key from zenmux.ai