import gradio as gr import requests import os # Hugging Face token from Secrets HF_TOKEN = os.getenv("HF_API_TOKEN") MODEL_ID = "bigcode/starcoder2-3b" # Free multi-language code model languages = ["Python", "C", "C++", "Java", "JavaScript", "Solidity", "SQL", "Bash"] def code_assistant(prompt, language="Python"): if not HF_TOKEN: return "Error: Hugging Face API token not set. Please add HF_API_TOKEN in Space Secrets and restart the Space." # Prepend language info to prompt for multi-language generation full_prompt = f"# Language: {language}\n{prompt}" headers = { "Authorization": f"Bearer {HF_TOKEN}", "Content-Type": "application/json" } payload = { "inputs": full_prompt, "options": {"use_cache": False} # optional } try: response = requests.post( f"https://router.huggingface.co/models/{MODEL_ID}", headers=headers, json=payload, timeout=60 ) data = response.json() # HF router returns list with generated_text if isinstance(data, dict) and "generated_text" in data: text = data["generated_text"] elif isinstance(data, dict) and "error" in data: return f"Error from model: {data['error']}" else: text = str(data) # Wrap in Markdown code block if not text.startswith("```"): text = f"```{language.lower()}\n{text}\n```" return text except Exception as e: return f"Error: {e}" # Gradio UI iface = gr.Interface( fn=code_assistant, inputs=[ gr.Textbox(label="Your Prompt", placeholder="Write, explain, or debug code..."), gr.Dropdown(languages, label="Language") ], outputs=gr.Markdown(label="Output"), title="TheWizard 💬 — Free Code Assistant", description="Ask for code, explanations, or debugging in multiple languages using free HF models." ) iface.launch()