Spaces:
Sleeping
Sleeping
| """ | |
| Test LLM Service | |
| Tests for LLM integration and service functionality. | |
| """ | |
| from unittest.mock import Mock, patch | |
| import pytest | |
| import requests | |
| from src.llm.llm_service import LLMConfig, LLMResponse, LLMService | |
| class TestLLMConfig: | |
| """Test LLMConfig dataclass.""" | |
| def test_llm_config_creation(self): | |
| """Test basic LLMConfig creation.""" | |
| config = LLMConfig( | |
| provider="openrouter", | |
| api_key="test-key", | |
| model_name="test-model", | |
| base_url="https://test.com", | |
| ) | |
| assert config.provider == "openrouter" | |
| assert config.api_key == "test-key" | |
| assert config.model_name == "test-model" | |
| assert config.base_url == "https://test.com" | |
| assert config.max_tokens == 1000 # Default value | |
| assert config.temperature == 0.1 # Default value | |
| class TestLLMResponse: | |
| """Test LLMResponse dataclass.""" | |
| def test_llm_response_creation(self): | |
| """Test basic LLMResponse creation.""" | |
| response = LLMResponse( | |
| content="Test response", | |
| provider="openrouter", | |
| model="test-model", | |
| usage={"tokens": 100}, | |
| response_time=1.5, | |
| success=True, | |
| ) | |
| assert response.content == "Test response" | |
| assert response.provider == "openrouter" | |
| assert response.model == "test-model" | |
| assert response.usage == {"tokens": 100} | |
| assert response.response_time == 1.5 | |
| assert response.success is True | |
| assert response.error_message is None | |
| class TestLLMService: | |
| """Test LLMService functionality.""" | |
| def test_initialization_with_configs(self): | |
| """Test LLMService initialization with configurations.""" | |
| config = LLMConfig( | |
| provider="openrouter", | |
| api_key="test-key", | |
| model_name="test-model", | |
| base_url="https://test.com", | |
| ) | |
| service = LLMService([config]) | |
| assert len(service.configs) == 1 | |
| assert service.configs[0] == config | |
| assert service.current_config_index == 0 | |
| def test_initialization_empty_configs_raises_error(self): | |
| """Test that empty configs raise ValueError.""" | |
| with pytest.raises(ValueError, match="At least one LLM configuration must be provided"): | |
| LLMService([]) | |
| def test_from_environment_with_openrouter_key(self): | |
| """Test creating service from environment with OpenRouter key.""" | |
| service = LLMService.from_environment() | |
| assert len(service.configs) >= 1 | |
| openrouter_config = next( | |
| (config for config in service.configs if config.provider == "openrouter"), | |
| None, | |
| ) | |
| assert openrouter_config is not None | |
| assert openrouter_config.api_key == "test-openrouter-key" | |
| def test_from_environment_with_groq_key(self): | |
| """Test creating service from environment with Groq key.""" | |
| service = LLMService.from_environment() | |
| assert len(service.configs) >= 1 | |
| groq_config = next((config for config in service.configs if config.provider == "groq"), None) | |
| assert groq_config is not None | |
| assert groq_config.api_key == "test-groq-key" | |
| def test_from_environment_no_keys_raises_error(self): | |
| """Test that no environment keys raise ValueError.""" | |
| with pytest.raises(ValueError, match="No LLM API keys found in environment"): | |
| LLMService.from_environment() | |
| def test_successful_response_generation(self, mock_post): | |
| """Test successful response generation.""" | |
| # Mock successful API response | |
| mock_response = Mock() | |
| mock_response.status_code = 200 | |
| mock_response.json.return_value = { | |
| "choices": [{"message": {"content": "Test response content"}}], | |
| "usage": {"prompt_tokens": 50, "completion_tokens": 20}, | |
| } | |
| mock_response.raise_for_status = Mock() | |
| mock_post.return_value = mock_response | |
| config = LLMConfig( | |
| provider="openrouter", | |
| api_key="test-key", | |
| model_name="test-model", | |
| base_url="https://api.openrouter.ai/api/v1", | |
| ) | |
| service = LLMService([config]) | |
| result = service.generate_response("Test prompt") | |
| assert result.success is True | |
| assert result.content == "Test response content" | |
| assert result.provider == "openrouter" | |
| assert result.model == "test-model" | |
| assert result.usage == {"prompt_tokens": 50, "completion_tokens": 20} | |
| assert result.response_time > 0 | |
| # Verify API call | |
| mock_post.assert_called_once() | |
| args, kwargs = mock_post.call_args | |
| assert args[0] == "https://api.openrouter.ai/api/v1/chat/completions" | |
| assert kwargs["json"]["model"] == "test-model" | |
| assert kwargs["json"]["messages"][0]["content"] == "Test prompt" | |
| def test_api_error_handling(self, mock_post): | |
| """Test handling of API errors.""" | |
| # Mock API error | |
| mock_post.side_effect = requests.exceptions.RequestException("API Error") | |
| config = LLMConfig( | |
| provider="openrouter", | |
| api_key="test-key", | |
| model_name="test-model", | |
| base_url="https://api.openrouter.ai/api/v1", | |
| ) | |
| service = LLMService([config]) | |
| result = service.generate_response("Test prompt") | |
| assert result.success is False | |
| assert "API Error" in result.error_message | |
| assert result.content == "" | |
| assert result.provider == "none" # When all providers fail, provider is "none" | |
| def test_fallback_to_second_provider(self, mock_post): | |
| """Test fallback to second provider when first fails.""" | |
| # Mock first provider failing 3 times (1 attempt + 2 retries), second succeeding | |
| first_error = requests.exceptions.RequestException("First provider error") | |
| second_response = Mock() | |
| second_response.status_code = 200 | |
| second_response.json.return_value = { | |
| "choices": [{"message": {"content": "Second provider response"}}], | |
| "usage": {}, | |
| } | |
| second_response.raise_for_status = Mock() | |
| # First provider fails 3 times, then second provider succeeds | |
| mock_post.side_effect = [first_error, first_error, first_error, second_response] | |
| config1 = LLMConfig( | |
| provider="openrouter", | |
| api_key="key1", | |
| model_name="model1", | |
| base_url="https://api1.com", | |
| ) | |
| config2 = LLMConfig( | |
| provider="groq", | |
| api_key="key2", | |
| model_name="model2", | |
| base_url="https://api2.com", | |
| ) | |
| service = LLMService([config1, config2]) | |
| result = service.generate_response("Test prompt") | |
| assert result.success is True | |
| assert result.content == "Second provider response" | |
| assert result.provider == "groq" | |
| assert mock_post.call_count == 4 # 3 failed attempts on first provider + 1 success on second | |
| def test_all_providers_fail(self, mock_post): | |
| """Test when all providers fail.""" | |
| mock_post.side_effect = requests.exceptions.RequestException("All providers down") | |
| config1 = LLMConfig(provider="provider1", api_key="key1", model_name="model1", base_url="url1") | |
| config2 = LLMConfig(provider="provider2", api_key="key2", model_name="model2", base_url="url2") | |
| service = LLMService([config1, config2]) | |
| result = service.generate_response("Test prompt") | |
| assert result.success is False | |
| assert "All providers failed" in result.error_message | |
| assert result.provider == "none" | |
| assert result.model == "none" | |
| def test_retry_logic(self, mock_post): | |
| """Test retry logic for failed requests.""" | |
| # First call fails, second succeeds | |
| first_response = Mock() | |
| first_response.side_effect = requests.exceptions.RequestException("Temporary error") | |
| second_response = Mock() | |
| second_response.status_code = 200 | |
| second_response.json.return_value = { | |
| "choices": [{"message": {"content": "Success after retry"}}], | |
| "usage": {}, | |
| } | |
| second_response.raise_for_status = Mock() | |
| mock_post.side_effect = [first_response.side_effect, second_response] | |
| config = LLMConfig( | |
| provider="openrouter", | |
| api_key="test-key", | |
| model_name="test-model", | |
| base_url="https://api.openrouter.ai/api/v1", | |
| ) | |
| service = LLMService([config]) | |
| result = service.generate_response("Test prompt", max_retries=1) | |
| assert result.success is True | |
| assert result.content == "Success after retry" | |
| assert mock_post.call_count == 2 | |
| def test_get_available_providers(self): | |
| """Test getting list of available providers.""" | |
| config1 = LLMConfig(provider="openrouter", api_key="key1", model_name="model1", base_url="url1") | |
| config2 = LLMConfig(provider="groq", api_key="key2", model_name="model2", base_url="url2") | |
| service = LLMService([config1, config2]) | |
| providers = service.get_available_providers() | |
| assert providers == ["openrouter", "groq"] | |
| def test_health_check(self, mock_post): | |
| """Test health check functionality.""" | |
| # Mock successful health check | |
| mock_response = Mock() | |
| mock_response.status_code = 200 | |
| mock_response.json.return_value = { | |
| "choices": [{"message": {"content": "OK"}}], | |
| "usage": {}, | |
| } | |
| mock_response.raise_for_status = Mock() | |
| mock_post.return_value = mock_response | |
| config = LLMConfig( | |
| provider="openrouter", | |
| api_key="test-key", | |
| model_name="test-model", | |
| base_url="https://api.openrouter.ai/api/v1", | |
| ) | |
| service = LLMService([config]) | |
| health_status = service.health_check() | |
| assert "openrouter" in health_status | |
| assert health_status["openrouter"]["status"] == "healthy" | |
| assert health_status["openrouter"]["model"] == "test-model" | |
| assert health_status["openrouter"]["response_time"] > 0 | |
| def test_openrouter_specific_headers(self, mock_post): | |
| """Test that OpenRouter-specific headers are added.""" | |
| mock_response = Mock() | |
| mock_response.status_code = 200 | |
| mock_response.json.return_value = { | |
| "choices": [{"message": {"content": "Test"}}], | |
| "usage": {}, | |
| } | |
| mock_response.raise_for_status = Mock() | |
| mock_post.return_value = mock_response | |
| config = LLMConfig( | |
| provider="openrouter", | |
| api_key="test-key", | |
| model_name="test-model", | |
| base_url="https://api.openrouter.ai/api/v1", | |
| ) | |
| service = LLMService([config]) | |
| service.generate_response("Test") | |
| # Check headers | |
| args, kwargs = mock_post.call_args | |
| headers = kwargs["headers"] | |
| assert "HTTP-Referer" in headers | |
| assert "X-Title" in headers | |
| assert headers["HTTP-Referer"] == "https://github.com/sethmcknight/msse-ai-engineering" | |