Spaces:
Sleeping
Sleeping
Tobias Pasquale
commited on
Commit
·
f35ca9e
1
Parent(s):
a3b3a5c
fix: correct test import paths and LLM service test expectations
Browse files- Fix chat endpoint tests to patch correct import paths (src.* instead of app.*)
- Fix LLM service error handling test to expect 'none' provider when all fail
- Fix LLM service fallback test to account for proper retry behavior (3 attempts per provider)
- All previously failing tests now pass: chat endpoint tests and LLM service tests
- tests/test_chat_endpoint.py +24 -24
- tests/test_llm/test_llm_service.py +8 -10
tests/test_chat_endpoint.py
CHANGED
|
@@ -21,12 +21,12 @@ class TestChatEndpoint:
|
|
| 21 |
"""Test cases for the /chat endpoint"""
|
| 22 |
|
| 23 |
@patch.dict(os.environ, {"OPENROUTER_API_KEY": "test_key"})
|
| 24 |
-
@patch("
|
| 25 |
-
@patch("
|
| 26 |
-
@patch("
|
| 27 |
-
@patch("
|
| 28 |
-
@patch("
|
| 29 |
-
@patch("
|
| 30 |
def test_chat_endpoint_valid_request(
|
| 31 |
self,
|
| 32 |
mock_embedding,
|
|
@@ -90,12 +90,12 @@ class TestChatEndpoint:
|
|
| 90 |
assert "citations" in data
|
| 91 |
|
| 92 |
@patch.dict(os.environ, {"OPENROUTER_API_KEY": "test_key"})
|
| 93 |
-
@patch("
|
| 94 |
-
@patch("
|
| 95 |
-
@patch("
|
| 96 |
-
@patch("
|
| 97 |
-
@patch("
|
| 98 |
-
@patch("
|
| 99 |
def test_chat_endpoint_minimal_request(
|
| 100 |
self,
|
| 101 |
mock_embedding,
|
|
@@ -205,12 +205,12 @@ class TestChatEndpoint:
|
|
| 205 |
assert "LLM service configuration error" in data["message"]
|
| 206 |
|
| 207 |
@patch.dict(os.environ, {"OPENROUTER_API_KEY": "test_key"})
|
| 208 |
-
@patch("
|
| 209 |
-
@patch("
|
| 210 |
-
@patch("
|
| 211 |
-
@patch("
|
| 212 |
-
@patch("
|
| 213 |
-
@patch("
|
| 214 |
def test_chat_endpoint_with_conversation_id(
|
| 215 |
self,
|
| 216 |
mock_embedding,
|
|
@@ -259,12 +259,12 @@ class TestChatEndpoint:
|
|
| 259 |
assert data["status"] == "success"
|
| 260 |
|
| 261 |
@patch.dict(os.environ, {"OPENROUTER_API_KEY": "test_key"})
|
| 262 |
-
@patch("
|
| 263 |
-
@patch("
|
| 264 |
-
@patch("
|
| 265 |
-
@patch("
|
| 266 |
-
@patch("
|
| 267 |
-
@patch("
|
| 268 |
def test_chat_endpoint_with_debug(
|
| 269 |
self,
|
| 270 |
mock_embedding,
|
|
|
|
| 21 |
"""Test cases for the /chat endpoint"""
|
| 22 |
|
| 23 |
@patch.dict(os.environ, {"OPENROUTER_API_KEY": "test_key"})
|
| 24 |
+
@patch("src.rag.rag_pipeline.RAGPipeline")
|
| 25 |
+
@patch("src.rag.response_formatter.ResponseFormatter")
|
| 26 |
+
@patch("src.llm.llm_service.LLMService")
|
| 27 |
+
@patch("src.search.search_service.SearchService")
|
| 28 |
+
@patch("src.vector_store.vector_db.VectorDatabase")
|
| 29 |
+
@patch("src.embedding.embedding_service.EmbeddingService")
|
| 30 |
def test_chat_endpoint_valid_request(
|
| 31 |
self,
|
| 32 |
mock_embedding,
|
|
|
|
| 90 |
assert "citations" in data
|
| 91 |
|
| 92 |
@patch.dict(os.environ, {"OPENROUTER_API_KEY": "test_key"})
|
| 93 |
+
@patch("src.rag.rag_pipeline.RAGPipeline")
|
| 94 |
+
@patch("src.rag.response_formatter.ResponseFormatter")
|
| 95 |
+
@patch("src.llm.llm_service.LLMService")
|
| 96 |
+
@patch("src.search.search_service.SearchService")
|
| 97 |
+
@patch("src.vector_store.vector_db.VectorDatabase")
|
| 98 |
+
@patch("src.embedding.embedding_service.EmbeddingService")
|
| 99 |
def test_chat_endpoint_minimal_request(
|
| 100 |
self,
|
| 101 |
mock_embedding,
|
|
|
|
| 205 |
assert "LLM service configuration error" in data["message"]
|
| 206 |
|
| 207 |
@patch.dict(os.environ, {"OPENROUTER_API_KEY": "test_key"})
|
| 208 |
+
@patch("src.rag.rag_pipeline.RAGPipeline")
|
| 209 |
+
@patch("src.rag.response_formatter.ResponseFormatter")
|
| 210 |
+
@patch("src.llm.llm_service.LLMService")
|
| 211 |
+
@patch("src.search.search_service.SearchService")
|
| 212 |
+
@patch("src.vector_store.vector_db.VectorDatabase")
|
| 213 |
+
@patch("src.embedding.embedding_service.EmbeddingService")
|
| 214 |
def test_chat_endpoint_with_conversation_id(
|
| 215 |
self,
|
| 216 |
mock_embedding,
|
|
|
|
| 259 |
assert data["status"] == "success"
|
| 260 |
|
| 261 |
@patch.dict(os.environ, {"OPENROUTER_API_KEY": "test_key"})
|
| 262 |
+
@patch("src.rag.rag_pipeline.RAGPipeline")
|
| 263 |
+
@patch("src.rag.response_formatter.ResponseFormatter")
|
| 264 |
+
@patch("src.llm.llm_service.LLMService")
|
| 265 |
+
@patch("src.search.search_service.SearchService")
|
| 266 |
+
@patch("src.vector_store.vector_db.VectorDatabase")
|
| 267 |
+
@patch("src.embedding.embedding_service.EmbeddingService")
|
| 268 |
def test_chat_endpoint_with_debug(
|
| 269 |
self,
|
| 270 |
mock_embedding,
|
tests/test_llm/test_llm_service.py
CHANGED
|
@@ -167,18 +167,14 @@ class TestLLMService:
|
|
| 167 |
assert result.success is False
|
| 168 |
assert "API Error" in result.error_message
|
| 169 |
assert result.content == ""
|
| 170 |
-
assert result.provider == "
|
| 171 |
|
| 172 |
@patch("requests.post")
|
| 173 |
def test_fallback_to_second_provider(self, mock_post):
|
| 174 |
"""Test fallback to second provider when first fails."""
|
| 175 |
-
# Mock first provider failing, second succeeding
|
| 176 |
-
|
| 177 |
-
first_call.side_effect = requests.exceptions.RequestException(
|
| 178 |
-
"First provider error"
|
| 179 |
-
)
|
| 180 |
|
| 181 |
-
second_call = Mock()
|
| 182 |
second_response = Mock()
|
| 183 |
second_response.status_code = 200
|
| 184 |
second_response.json.return_value = {
|
|
@@ -186,9 +182,9 @@ class TestLLMService:
|
|
| 186 |
"usage": {},
|
| 187 |
}
|
| 188 |
second_response.raise_for_status = Mock()
|
| 189 |
-
second_call.return_value = second_response
|
| 190 |
|
| 191 |
-
|
|
|
|
| 192 |
|
| 193 |
config1 = LLMConfig(
|
| 194 |
provider="openrouter",
|
|
@@ -209,7 +205,9 @@ class TestLLMService:
|
|
| 209 |
assert result.success is True
|
| 210 |
assert result.content == "Second provider response"
|
| 211 |
assert result.provider == "groq"
|
| 212 |
-
assert
|
|
|
|
|
|
|
| 213 |
|
| 214 |
@patch("requests.post")
|
| 215 |
def test_all_providers_fail(self, mock_post):
|
|
|
|
| 167 |
assert result.success is False
|
| 168 |
assert "API Error" in result.error_message
|
| 169 |
assert result.content == ""
|
| 170 |
+
assert result.provider == "none" # When all providers fail, provider is "none"
|
| 171 |
|
| 172 |
@patch("requests.post")
|
| 173 |
def test_fallback_to_second_provider(self, mock_post):
|
| 174 |
"""Test fallback to second provider when first fails."""
|
| 175 |
+
# Mock first provider failing 3 times (1 attempt + 2 retries), second succeeding
|
| 176 |
+
first_error = requests.exceptions.RequestException("First provider error")
|
|
|
|
|
|
|
|
|
|
| 177 |
|
|
|
|
| 178 |
second_response = Mock()
|
| 179 |
second_response.status_code = 200
|
| 180 |
second_response.json.return_value = {
|
|
|
|
| 182 |
"usage": {},
|
| 183 |
}
|
| 184 |
second_response.raise_for_status = Mock()
|
|
|
|
| 185 |
|
| 186 |
+
# First provider fails 3 times, then second provider succeeds
|
| 187 |
+
mock_post.side_effect = [first_error, first_error, first_error, second_response]
|
| 188 |
|
| 189 |
config1 = LLMConfig(
|
| 190 |
provider="openrouter",
|
|
|
|
| 205 |
assert result.success is True
|
| 206 |
assert result.content == "Second provider response"
|
| 207 |
assert result.provider == "groq"
|
| 208 |
+
assert (
|
| 209 |
+
mock_post.call_count == 4
|
| 210 |
+
) # 3 failed attempts on first provider + 1 success on second
|
| 211 |
|
| 212 |
@patch("requests.post")
|
| 213 |
def test_all_providers_fail(self, mock_post):
|