Skip to content
This repository was archived by the owner on Jun 5, 2025. It is now read-only.

feat: add support for aider #543

Merged
merged 4 commits into from
Jan 13, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -49,3 +49,4 @@ sqlite_data/vectordb.db

# certificate directory
*certs/
.aider*
9 changes: 9 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,14 @@ With Continue, you can choose from several leading AI model providers:

🔮 Many more on the way!

- **[Aider](https://aider.chat)

With Aider, you can choose from two leading AI model providers:

- 💻 Local LLMs with [Ollama](https://ollama.com/)
- 🧠 [OpenAI API](https://openai.com/api/)


### Privacy first

Unlike E.T., your code never phones home! 🛸 CodeGate is designed with privacy
Expand All @@ -84,6 +92,7 @@ Check out the quickstart guides to get up and running quickly!
- [Quickstart guide for GitHub Copilot with VS Code](https://docs.codegate.ai/quickstart)
- [Quickstart guide for Continue with VS Code and Ollama](https://docs.codegate.ai/quickstart-continue)


## 🎯 Usage

### IDE integration
Expand Down
8 changes: 5 additions & 3 deletions src/codegate/pipeline/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -253,9 +253,11 @@ def get_latest_user_messages(request: ChatCompletionRequest) -> str:

for message in reversed(request.get("messages", [])):
if message["role"] == "user":
latest_user_messages += "\n" + message["content"]
else:
break
# if found we can stop here, if not we continue until we find it
message_str = message.get("content", "")
if message_str:
latest_user_messages += "\n" + str(message_str)
break

return latest_user_messages

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,6 @@ async def process(
"""
Use RAG DB to add context to the user request
"""

# Get the latest user messages
user_messages = self.get_latest_user_messages(request)

Expand Down
2 changes: 2 additions & 0 deletions src/codegate/pipeline/output.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ class OutputPipelineContext:
snippets: List[CodeSnippet] = field(default_factory=list)
# Store all content that has been processed by the pipeline
processed_content: List[str] = field(default_factory=list)
# partial buffer to store prefixes
prefix_buffer: str = ""


class OutputPipelineStep(ABC):
Expand Down
13 changes: 9 additions & 4 deletions src/codegate/pipeline/secrets/secrets.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ async def process(
if "content" in message and message["content"]:
# Protect the text
protected_string, redacted_count = self._redact_text(
message["content"], secrets_manager, session_id, context
str(message["content"]), secrets_manager, session_id, context
)
new_request["messages"][i]["content"] = protected_string

Expand Down Expand Up @@ -389,12 +389,17 @@ async def process_chunk(
return [chunk]

# If we have a partial marker at the end, keep buffering
if self.marker_start in buffered_content or self._is_partial_marker_prefix(
buffered_content
):
if self.marker_start in buffered_content:
context.prefix_buffer = ""
return []

if self._is_partial_marker_prefix(buffered_content):
context.prefix_buffer += buffered_content
return []

# No markers or partial markers, let pipeline handle the chunk normally
chunk.choices[0].delta.content = context.prefix_buffer + chunk.choices[0].delta.content
context.prefix_buffer = ""
return [chunk]


Expand Down
26 changes: 26 additions & 0 deletions src/codegate/providers/ollama/provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,32 @@ def _setup_routes(self):
"""
Sets up Ollama API routes.
"""
@self.router.get(f"/{self.provider_route_name}/api/tags")
async def get_tags(request: Request):
"""
Special route for /api/tags that responds outside of the pipeline
Tags are used to get the list of models
https://github.com/ollama/ollama/blob/main/docs/api.md#list-local-models
"""
async with httpx.AsyncClient() as client:
response = await client.get(f"{self.base_url}/api/tags")
return response.json()

@self.router.post(f"/{self.provider_route_name}/api/show")
async def show_model(request: Request):
"""
route for /api/show that responds outside of the pipeline
/api/show displays model is used to get the model information
https://github.com/ollama/ollama/blob/main/docs/api.md#show-model-information
"""
body = await request.body()
async with httpx.AsyncClient() as client:
response = await client.post(
f"{self.base_url}/api/show",
content=body,
headers={"Content-Type": "application/json"},
)
return response.json()

# Native Ollama API routes
@self.router.post(f"/{self.provider_route_name}/api/chat")
Expand Down
Loading