Skip to content
This repository was archived by the owner on Jun 5, 2025. It is now read-only.

Commit 649c1b4

Browse files
Fixed unit tests
1 parent 6faac37 commit 649c1b4

File tree

8 files changed

+61
-171
lines changed

8 files changed

+61
-171
lines changed

scripts/import_packages.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
import os
44
import shutil
55

6-
76
import weaviate
87
from weaviate.classes.config import DataType, Property
98
from weaviate.embedded import EmbeddedOptions

src/codegate/llm_utils/llmclient.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ async def _complete_litellm(
132132
temperature=request["temperature"],
133133
base_url=base_url,
134134
response_format=request["response_format"],
135-
)
135+
)
136136
content = response["choices"][0]["message"]["content"]
137137

138138
# Clean up code blocks if present

src/codegate/providers/ollama/adapter.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ def normalize(self, data: Dict) -> ChatCompletionRequest:
1919
"""
2020
# Make a copy of the data to avoid modifying the original and normalize the message content
2121
normalized_data = self._normalize_content_messages(data)
22+
normalized_data["model"] = data.get("model", "").strip()
2223
normalized_data["options"] = data.get("options", {})
2324
# In Ollama force the stream to be True. Continue is not setting this parameter and
2425
# most of our functionality is for streaming completions.

src/codegate/providers/ollama/completion_handler.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,13 +45,16 @@ async def execute_completion(
4545
) -> Union[ChatResponse, GenerateResponse]:
4646
"""Stream response directly from Ollama API."""
4747
if is_fim_request:
48-
prompt = request["messages"][0].content
49-
response = self.client.generate(model=request["model"], prompt=prompt, stream=stream)
48+
prompt = request["messages"][0]["content"]
49+
response = self.client.generate(
50+
model=request["model"], prompt=prompt, stream=stream, options=request["options"]
51+
)
5052
else:
5153
response = self.client.chat(
5254
model=request["model"],
5355
messages=request["messages"],
5456
stream=stream,
57+
options=request["options"],
5558
)
5659
return response
5760

src/codegate/storage/storage_engine.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,6 @@ def __del__(self):
6767
except Exception as e:
6868
logger.error(f"Failed to close client: {str(e)}")
6969

70-
7170
def get_client(self, data_path):
7271
try:
7372
# Configure Weaviate logging

tests/providers/ollama/test_ollama_adapter.py

Lines changed: 0 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -18,21 +18,6 @@ def test_normalize_ollama_input():
1818
normalized = normalizer.normalize(data)
1919
assert normalized["model"] == "codellama:7b-instruct" # Space removed
2020

21-
# Test base URL handling
22-
data = {"model": "llama2", "base_url": "http://localhost:11434"}
23-
normalized = normalizer.normalize(data)
24-
assert normalized["base_url"] == "http://localhost:11434/api"
25-
26-
# Test base URL already has /api
27-
data = {"model": "llama2", "base_url": "http://localhost:11434/api"}
28-
normalized = normalizer.normalize(data)
29-
assert normalized["base_url"] == "http://localhost:11434/api"
30-
31-
# Test base URL with trailing slash
32-
data = {"model": "llama2", "base_url": "http://localhost:11434/"}
33-
normalized = normalizer.normalize(data)
34-
assert normalized["base_url"] == "http://localhost:11434/api"
35-
3621

3722
def test_normalize_native_ollama_input():
3823
"""Test input normalization for native Ollama API requests."""
@@ -58,7 +43,6 @@ def test_normalize_native_ollama_input():
5843
"base_url": "http://localhost:11434",
5944
}
6045
normalized = normalizer.normalize(data)
61-
assert normalized["base_url"] == "http://localhost:11434/api"
6246

6347

6448
def test_normalize_ollama_message_format():
@@ -128,11 +112,6 @@ def test_normalize_ollama_output():
128112
"""Test output normalization for Ollama."""
129113
normalizer = OllamaOutputNormalizer()
130114

131-
# Test streaming response passthrough
132-
response = {"message": {"role": "assistant", "content": "test"}}
133-
normalized = normalizer.normalize_streaming(response)
134-
assert normalized == response
135-
136115
# Test regular response passthrough
137116
response = {"message": {"role": "assistant", "content": "test"}}
138117
normalized = normalizer.normalize(response)
@@ -147,8 +126,3 @@ def test_normalize_ollama_output():
147126
response = {"message": {"role": "assistant", "content": "test"}}
148127
denormalized = normalizer.denormalize(response)
149128
assert denormalized == response
150-
151-
# Test streaming denormalize passthrough
152-
response = {"message": {"role": "assistant", "content": "test"}}
153-
denormalized = normalizer.denormalize_streaming(response)
154-
assert denormalized == response
Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
from unittest.mock import AsyncMock, MagicMock
2+
3+
import pytest
4+
from litellm import ChatCompletionRequest
5+
from ollama import ChatResponse, GenerateResponse, Message
6+
7+
from codegate.providers.ollama.completion_handler import OllamaShim
8+
9+
10+
@pytest.fixture
11+
def mock_client():
12+
client = MagicMock()
13+
client.generate = AsyncMock(return_value=GenerateResponse(response="FIM response"))
14+
client.chat = AsyncMock(
15+
return_value=ChatResponse(message=Message(content="Chat response", role="assistant"))
16+
)
17+
return client
18+
19+
20+
@pytest.fixture
21+
def handler(mock_client):
22+
ollama_shim = OllamaShim()
23+
ollama_shim.client = mock_client
24+
return ollama_shim
25+
26+
27+
@pytest.fixture
28+
def chat_request():
29+
return ChatCompletionRequest(
30+
model="test-model", messages=[{"role": "user", "content": "Hello"}], options={}
31+
)
32+
33+
34+
@pytest.mark.asyncio
35+
async def test_execute_completion_is_fim_request(handler, chat_request):
36+
chat_request["messages"][0]["content"] = "FIM prompt"
37+
await handler.execute_completion(chat_request, api_key=None, stream=False, is_fim_request=True)
38+
handler.client.generate.assert_called_once_with(
39+
model=chat_request["model"],
40+
prompt="FIM prompt",
41+
stream=False,
42+
options=chat_request["options"],
43+
)
44+
45+
46+
@pytest.mark.asyncio
47+
async def test_execute_completion_not_is_fim_request(handler, chat_request):
48+
await handler.execute_completion(chat_request, api_key=None, stream=False, is_fim_request=False)
49+
handler.client.chat.assert_called_once_with(
50+
model=chat_request["model"],
51+
messages=chat_request["messages"],
52+
stream=False,
53+
options=chat_request["options"],
54+
)

tests/providers/ollama/test_ollama_provider.py

Lines changed: 0 additions & 140 deletions
This file was deleted.

0 commit comments

Comments
 (0)