from fastapi import FastAPI, HTTPException from pydantic import BaseModel from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig from huggingface_hub import login import os import torch import uvicorn login(os.getenv("HF_TOKEN")) app = FastAPI( title="VexaAI Model-Platform: NVIDIA Nemotron-Nano-9B-V2", description="Self-hosted AI-Model NVIDIA Nemotron-Nano-9B-V2, powered by VexaAI.", version="0.9" ) model_name = "nvidia/NVIDIA-Nemotron-Nano-9B-v2" bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16 ) tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained( model_name, quantization_config=bnb_config, device_map="auto", trust_remote_code=True, torch_dtype=torch.bfloat16 ) model.eval() class GenerateRequest(BaseModel): prompt: str max_new_tokens: int = 512 temperature: float = 0.7 @app.post("/generate") async def generate_text(request: GenerateRequest): try: inputs = tokenizer(request.prompt, return_tensors="pt").to(model.device) with torch.no_grad(): outputs = model.generate( **inputs, max_new_tokens=request.max_new_tokens, temperature=request.temperature, do_sample=True, eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.eos_token_id ) full_text = tokenizer.decode(outputs[0], skip_special_tokens=True) generated_text = full_text[len(tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True)):].strip() return {"generated_text": generated_text} except Exception as e: raise HTTPException(status_code=500, detail=f"VexaAI Model_Platform: HTTP/S error: {str(e)}") @app.get("/") async def root(): return {"message": "To start generating text, use /generate."} if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=7860)