#!/bin/bash # Qwen3-4B Tool Calling Installation Script # This script installs all dependencies and sets up the environment echo "🚀 Qwen3-4B Tool Calling Installation" echo "=====================================" # Check if Python is available if ! command -v python3 &> /dev/null; then echo "❌ Python3 not found. Please install Python 3.8+ first." exit 1 fi echo "✅ Python3 found: $(python3 --version)" # Check if pip is available if ! command -v pip3 &> /dev/null; then echo "❌ pip3 not found. Please install pip first." exit 1 fi echo "✅ pip3 found: $(pip3 --version)" # Install Python dependencies echo "📦 Installing Python dependencies..." pip3 install -r requirements.txt if [ $? -ne 0 ]; then echo "❌ Failed to install Python dependencies" exit 1 fi echo "✅ Python dependencies installed successfully" # Check if model file exists if [ ! -f "Qwen3-4B-Function-Calling-Pro.gguf" ]; then echo "⚠️ Model file not found: Qwen3-4B-Function-Calling-Pro.gguf" echo "📥 Please download the model file from:" echo " https://huggingface.co/Manojb/qwen3-4b-toolcall-gguf-llamacpp-codex" echo "" echo "💡 You can download it with:" echo " huggingface-cli download Manojb/qwen3-4b-toolcall-gguf-llamacpp-codex Qwen3-4B-Function-Calling-Pro.gguf" else echo "✅ Model file found: Qwen3-4B-Function-Calling-Pro.gguf" fi # Make scripts executable chmod +x run_model.sh chmod +x quick_start.py chmod +x codex_integration.py echo "✅ Scripts made executable" echo "" echo "🎉 Installation complete!" echo "" echo "📚 Usage:" echo " ./run_model.sh # Run interactively" echo " ./run_model.sh server # Start Codex server" echo " python3 quick_start.py # Quick start demo" echo " python3 codex_integration.py # Codex integration demo" echo "" echo "🔌 For Codex integration:" echo " 1. Start server: ./run_model.sh server" echo " 2. Configure Codex with: http://localhost:8000" echo " 3. Model: Qwen3-4B-Function-Calling-Pro" echo "" echo "🐳 For Docker deployment:" echo " docker-compose up -d" echo "" echo "Happy coding! 🚀"