Skip to content

Commit ac30a9b

Browse files
CTY-gitpatched-adminpatched.codes[bot]
authored
Log analysis (#1280)
* sav init * sav * complete log analysis * update lock file * split file view from code edit tools * fix tests * pydantic ai model * add v2 versioning * add forgotten v2 * lint and fix * update * update lock fi;e * bump version * fix typo * apply sync hack * finalise * Patched patchwork/steps/CallShell/README.md (#1284) Co-authored-by: patched.codes[bot] <298395+patched.codes[bot]@users.noreply.github.com> * Patched patchwork/steps/AgenticLLM/README.md (#1285) Co-authored-by: patched.codes[bot] <298395+patched.codes[bot]@users.noreply.github.com> * Patched patchwork/steps/CallSQL/README.md (#1286) Co-authored-by: patched.codes[bot] <298395+patched.codes[bot]@users.noreply.github.com> * Patched patchwork/steps/SendEmail/README.md (#1298) Co-authored-by: patched.codes[bot] <298395+patched.codes[bot]@users.noreply.github.com> * Patched patchwork/steps/AgenticLLMV2/README.md (#1329) Co-authored-by: patched.codes[bot] <298395+patched.codes[bot]@users.noreply.github.com> * Patched patchwork/steps/ReadEmail/README.md (#1330) Co-authored-by: patched.codes[bot] <298395+patched.codes[bot]@users.noreply.github.com> * finalise log analysis --------- Co-authored-by: Patched <[email protected]> Co-authored-by: patched.codes[bot] <298395+patched.codes[bot]@users.noreply.github.com>
1 parent 4cc8d3e commit ac30a9b

40 files changed

+1921
-476
lines changed

patchwork/app.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,12 +8,11 @@
88
from collections import deque
99
from contextlib import nullcontext
1010
from pathlib import Path
11-
from typing import Any
1211

1312
import click
1413
import yaml
1514
from click import echo
16-
from typing_extensions import Iterable
15+
from typing_extensions import Any, Iterable
1716

1817
from patchwork.common.client.patched import PatchedClient
1918
from patchwork.common.constants import PROMPT_TEMPLATE_FILE_KEY

patchwork/common/client/llm/aio.py

Lines changed: 63 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,11 @@
1010
ChatCompletionToolParam,
1111
completion_create_params,
1212
)
13-
from typing_extensions import Dict, Iterable, List, Optional, Union
13+
from pydantic_ai.messages import ModelMessage, ModelResponse
14+
from pydantic_ai.models import ModelRequestParameters, StreamedResponse
15+
from pydantic_ai.settings import ModelSettings
16+
from pydantic_ai.usage import Usage
17+
from typing_extensions import AsyncIterator, Dict, Iterable, List, Optional, Union
1418

1519
from patchwork.common.client.llm.anthropic import AnthropicLlmClient
1620
from patchwork.common.client.llm.google import GoogleLlmClient
@@ -32,6 +36,64 @@ def __init__(self, *clients: LlmClient):
3236
except Exception:
3337
pass
3438

39+
def __get_model(self, model_settings: ModelSettings | None) -> Optional[str]:
40+
if model_settings is None:
41+
raise ValueError("Model settings cannot be None")
42+
model_name = model_settings.get("model")
43+
if model_name is None:
44+
raise ValueError("Model must be set cannot be None")
45+
46+
return model_name
47+
48+
async def request(
49+
self,
50+
messages: list[ModelMessage],
51+
model_settings: ModelSettings | None,
52+
model_request_parameters: ModelRequestParameters,
53+
) -> tuple[ModelResponse, Usage]:
54+
model = self.__get_model(model_settings)
55+
if model is None:
56+
raise ValueError("Model cannot be unset")
57+
58+
for client in self.__clients:
59+
if client.is_model_supported(model):
60+
return await client.request(messages, model_settings, model_request_parameters)
61+
62+
client_names = [client.__class__.__name__ for client in self.__original_clients]
63+
raise ValueError(
64+
f"Model {model} is not supported by {client_names} clients. "
65+
f"Please ensure that the respective API keys are correct."
66+
)
67+
68+
async def request_stream(
69+
self,
70+
messages: list[ModelMessage],
71+
model_settings: ModelSettings | None,
72+
model_request_parameters: ModelRequestParameters,
73+
) -> AsyncIterator[StreamedResponse]:
74+
model = self.__get_model(model_settings)
75+
if model is None:
76+
raise ValueError("Model cannot be unset")
77+
78+
for client in self.__clients:
79+
if client.is_model_supported(model):
80+
yield client.request(messages, model_settings, model_request_parameters)
81+
return
82+
83+
client_names = [client.__class__.__name__ for client in self.__original_clients]
84+
raise ValueError(
85+
f"Model {model} is not supported by {client_names} clients. "
86+
f"Please ensure that the respective API keys are correct."
87+
)
88+
89+
@property
90+
def model_name(self) -> str:
91+
return "Undetermined"
92+
93+
@property
94+
def system(self) -> str:
95+
return next(iter(self.__clients)).system
96+
3597
def get_models(self) -> set[str]:
3698
return self.__supported_models
3799

patchwork/common/client/llm/anthropic.py

Lines changed: 50 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
import json
44
import time
5-
from functools import lru_cache
5+
from functools import cached_property, lru_cache
66
from pathlib import Path
77

88
from anthropic import Anthropic
@@ -15,13 +15,18 @@
1515
ChatCompletionToolParam,
1616
completion_create_params,
1717
)
18-
from openai.types.chat.chat_completion import Choice, CompletionUsage
18+
from openai.types.chat.chat_completion import Choice
1919
from openai.types.chat.chat_completion_message_tool_call import (
2020
ChatCompletionMessageToolCall,
2121
Function,
2222
)
2323
from openai.types.completion_usage import CompletionUsage
24-
from typing_extensions import Dict, Iterable, List, Optional, Union
24+
from pydantic_ai.messages import ModelMessage, ModelResponse
25+
from pydantic_ai.models import Model, ModelRequestParameters, StreamedResponse
26+
from pydantic_ai.models.anthropic import AnthropicModel
27+
from pydantic_ai.settings import ModelSettings
28+
from pydantic_ai.usage import Usage
29+
from typing_extensions import AsyncIterator, Dict, Iterable, List, Optional, Union
2530

2631
from patchwork.common.client.llm.protocol import NOT_GIVEN, LlmClient, NotGiven
2732

@@ -74,7 +79,46 @@ class AnthropicLlmClient(LlmClient):
7479
__100k_models = {"claude-2.0", "claude-instant-1.2"}
7580

7681
def __init__(self, api_key: str):
77-
self.client = Anthropic(api_key=api_key)
82+
self.__api_key = api_key
83+
84+
@cached_property
85+
def __client(self):
86+
return Anthropic(api_key=self.__api_key)
87+
88+
def __get_pydantic_model(self, model_settings: ModelSettings | None) -> Model:
89+
if model_settings is None:
90+
raise ValueError("Model settings cannot be None")
91+
model_name = model_settings.get("model")
92+
if model_name is None:
93+
raise ValueError("Model must be set cannot be None")
94+
95+
return AnthropicModel(model_name, api_key=self.__api_key)
96+
97+
async def request(
98+
self,
99+
messages: list[ModelMessage],
100+
model_settings: ModelSettings | None,
101+
model_request_parameters: ModelRequestParameters,
102+
) -> tuple[ModelResponse, Usage]:
103+
model = self.__get_pydantic_model(model_settings)
104+
return await model.request(messages, model_settings, model_request_parameters)
105+
106+
async def request_stream(
107+
self,
108+
messages: list[ModelMessage],
109+
model_settings: ModelSettings | None,
110+
model_request_parameters: ModelRequestParameters,
111+
) -> AsyncIterator[StreamedResponse]:
112+
model = self.__get_pydantic_model(model_settings)
113+
yield model.request_stream(messages, model_settings, model_request_parameters)
114+
115+
@property
116+
def model_name(self) -> str:
117+
return "Undetermined"
118+
119+
@property
120+
def system(self) -> str:
121+
return "anthropic"
78122

79123
def __get_model_limit(self, model: str) -> int:
80124
# it is observed that the count tokens is not accurate, so we are using a safety margin
@@ -250,7 +294,7 @@ def is_prompt_supported(
250294
for k, v in input_kwargs.items()
251295
if k in {"messages", "model", "system", "tool_choice", "tools", "beta"}
252296
}
253-
message_token_count = self.client.beta.messages.count_tokens(**count_token_input_kwargs)
297+
message_token_count = self.__client.beta.messages.count_tokens(**count_token_input_kwargs)
254298
return model_limit - message_token_count.input_tokens
255299

256300
def truncate_messages(
@@ -295,5 +339,5 @@ def chat_completion(
295339
top_p=top_p,
296340
)
297341

298-
response = self.client.messages.create(**input_kwargs)
342+
response = self.__client.messages.create(**input_kwargs)
299343
return _anthropic_to_openai_response(model, response)

patchwork/common/client/llm/google.py

Lines changed: 50 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,21 @@
2525
)
2626
from openai.types.chat.chat_completion import ChatCompletion, Choice
2727
from pydantic import BaseModel
28-
from typing_extensions import Any, Dict, Iterable, List, Optional, Type, Union
28+
from pydantic_ai.messages import ModelMessage, ModelResponse
29+
from pydantic_ai.models import Model, ModelRequestParameters, StreamedResponse
30+
from pydantic_ai.models.gemini import GeminiModel
31+
from pydantic_ai.settings import ModelSettings
32+
from pydantic_ai.usage import Usage
33+
from typing_extensions import (
34+
Any,
35+
AsyncIterator,
36+
Dict,
37+
Iterable,
38+
List,
39+
Optional,
40+
Type,
41+
Union,
42+
)
2943

3044
from patchwork.common.client.llm.protocol import NOT_GIVEN, LlmClient, NotGiven
3145
from patchwork.common.client.llm.utils import json_schema_to_model
@@ -48,6 +62,41 @@ def __init__(self, api_key: str):
4862
def __get_models_info(self) -> list[Model]:
4963
return list(self.client.models.list())
5064

65+
def __get_pydantic_model(self, model_settings: ModelSettings | None) -> Model:
66+
if model_settings is None:
67+
raise ValueError("Model settings cannot be None")
68+
model_name = model_settings.get("model")
69+
if model_name is None:
70+
raise ValueError("Model must be set cannot be None")
71+
72+
return GeminiModel(model_name, api_key=self.__api_key)
73+
74+
async def request(
75+
self,
76+
messages: list[ModelMessage],
77+
model_settings: ModelSettings | None,
78+
model_request_parameters: ModelRequestParameters,
79+
) -> tuple[ModelResponse, Usage]:
80+
model = self.__get_pydantic_model(model_settings)
81+
return await model.request(messages, model_settings, model_request_parameters)
82+
83+
async def request_stream(
84+
self,
85+
messages: list[ModelMessage],
86+
model_settings: ModelSettings | None,
87+
model_request_parameters: ModelRequestParameters,
88+
) -> AsyncIterator[StreamedResponse]:
89+
model = self.__get_pydantic_model(model_settings)
90+
yield model.request_stream(messages, model_settings, model_request_parameters)
91+
92+
@property
93+
def model_name(self) -> str:
94+
return "Undetermined"
95+
96+
@property
97+
def system(self) -> str:
98+
return "google-gla"
99+
51100
def __get_model_limits(self, model: str) -> int:
52101
for model_info in self.__get_models_info():
53102
if model_info.name == f"{self.__MODEL_PREFIX}{model}" and model_info.input_token_limit is not None:

patchwork/common/client/llm/openai_.py

Lines changed: 52 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
from __future__ import annotations
22

33
import functools
4+
from functools import cached_property
45
from pathlib import Path
56

67
import tiktoken
@@ -12,7 +13,12 @@
1213
ChatCompletionToolParam,
1314
completion_create_params,
1415
)
15-
from typing_extensions import Dict, Iterable, List, Optional, Union
16+
from pydantic_ai.messages import ModelMessage, ModelResponse
17+
from pydantic_ai.models import Model, ModelRequestParameters, StreamedResponse
18+
from pydantic_ai.models.openai import OpenAIModel
19+
from pydantic_ai.settings import ModelSettings
20+
from pydantic_ai.usage import Usage
21+
from typing_extensions import AsyncIterator, Dict, Iterable, List, Optional, Union
1622

1723
from patchwork.common.client.llm.protocol import NOT_GIVEN, LlmClient, NotGiven
1824
from patchwork.logger import logger
@@ -42,20 +48,59 @@ class OpenAiLlmClient(LlmClient):
4248
}
4349

4450
def __init__(self, api_key: str, base_url=None, **kwargs):
45-
self.api_key = api_key
46-
self.base_url = base_url
47-
self.client = OpenAI(api_key=api_key, base_url=base_url, **kwargs)
51+
self.__api_key = api_key
52+
self.__base_url = base_url
53+
self.__kwargs = kwargs
54+
55+
@cached_property
56+
def __client(self) -> OpenAI:
57+
return OpenAI(api_key=self.__api_key, base_url=self.__base_url, **self.__kwargs)
58+
59+
def __get_pydantic_model(self, model_settings: ModelSettings | None) -> Model:
60+
if model_settings is None:
61+
raise ValueError("Model settings cannot be None")
62+
model_name = model_settings.get("model")
63+
if model_name is None:
64+
raise ValueError("Model must be set cannot be None")
65+
66+
return OpenAIModel(model_name, base_url=self.__base_url, api_key=self.__api_key)
67+
68+
async def request(
69+
self,
70+
messages: list[ModelMessage],
71+
model_settings: ModelSettings | None,
72+
model_request_parameters: ModelRequestParameters,
73+
) -> tuple[ModelResponse, Usage]:
74+
model = self.__get_pydantic_model(model_settings)
75+
return await model.request(messages, model_settings, model_request_parameters)
76+
77+
async def request_stream(
78+
self,
79+
messages: list[ModelMessage],
80+
model_settings: ModelSettings | None,
81+
model_request_parameters: ModelRequestParameters,
82+
) -> AsyncIterator[StreamedResponse]:
83+
model = self.__get_pydantic_model(model_settings)
84+
yield model.request_stream(messages, model_settings, model_request_parameters)
85+
86+
@property
87+
def model_name(self) -> str:
88+
return "Undetermined"
89+
90+
@property
91+
def system(self) -> str | None:
92+
return "openai"
4893

4994
def __is_not_openai_url(self):
5095
# Some providers/apis only implement the chat completion endpoint.
5196
# We mainly use this to skip using the model endpoints.
52-
return self.base_url is not None and self.base_url != "https://api.openai.com/v1"
97+
return self.__base_url is not None and self.__base_url != "https://api.openai.com/v1"
5398

5499
def get_models(self) -> set[str]:
55100
if self.__is_not_openai_url():
56101
return set()
57102

58-
return _cached_list_models_from_openai(self.api_key)
103+
return _cached_list_models_from_openai(self.__api_key)
59104

60105
def is_model_supported(self, model: str) -> bool:
61106
# might not implement model endpoint
@@ -147,4 +192,4 @@ def chat_completion(
147192
top_p=top_p,
148193
)
149194

150-
return self.client.chat.completions.create(**NotGiven.remove_not_given(input_kwargs))
195+
return self.__client.chat.completions.create(**NotGiven.remove_not_given(input_kwargs))

0 commit comments

Comments
 (0)