Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions patchwork/common/client/llm/utils.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
from __future__ import annotations

import json
import random
import string

from openai.lib._parsing._completions import type_to_response_format_param
from openai.types.chat.completion_create_params import ResponseFormat
Expand Down Expand Up @@ -114,5 +116,5 @@ def example_dict_to_base_model(example_data: dict) -> Type[BaseModel]:

field = Field(**field_kwargs)
base_model_field_defs[example_data_key] = (value_typing, field)

return create_model("ResponseFormat", **base_model_field_defs)
random_suffix = "".join(random.choice(string.ascii_lowercase) for _ in range(4))
return create_model(f"ResponseFormat_{random_suffix}", **base_model_field_defs)
42 changes: 29 additions & 13 deletions patchwork/common/multiturn_strategy/agentic_strategy_v2.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,23 @@
from __future__ import annotations

import asyncio
import json
import logging
import sys

from pydantic import BaseModel
from pydantic_ai import Agent
from pydantic_ai.models.anthropic import AnthropicModel
from pydantic_ai.result import RunResult
from pydantic_ai.agent import AgentRunResult
from typing_extensions import Any, Dict, Optional, Union

from patchwork.common.client.llm.protocol import LlmClient
from patchwork.common.client.llm.utils import example_json_to_base_model
from patchwork.common.tools import Tool
from patchwork.common.utils.utils import mustache_render

_COMPLETION_FLAG_ATTRIBUTE = "is_task_completed"
_MESSAGE_ATTRIBUTE = "message"
DEFAULT_AGENT_EXAMPLE_JSON = f'{{"{_MESSAGE_ATTRIBUTE}":"message", "{_COMPLETION_FLAG_ATTRIBUTE}": false}}'


class AgentConfig(BaseModel):
Expand All @@ -25,15 +27,23 @@ class Config:
name: str
tool_set: Dict[str, Tool]
system_prompt: str = ""
example_json: Union[
str, Dict[str, Any]
] = f'{{"{_MESSAGE_ATTRIBUTE}":"message", "{_COMPLETION_FLAG_ATTRIBUTE}": false}}'
example_json: Union[str, Dict[str, Any]] = DEFAULT_AGENT_EXAMPLE_JSON

def model_post_init(self, __context: Any) -> None:
if self.example_json == DEFAULT_AGENT_EXAMPLE_JSON:
return

wanted = json.loads(self.example_json)
default_wanted = json.loads(DEFAULT_AGENT_EXAMPLE_JSON)
default_wanted.update(wanted)
self.example_json = json.dumps(default_wanted)


class AgenticStrategyV2:
def __init__(
self,
api_key: str,
model: str,
llm_client: LlmClient,
template_data: dict[str, str],
system_prompt_template: str,
user_prompt_template: str,
Expand All @@ -44,25 +54,30 @@ def __init__(
self.__limit = limit
self.__template_data = template_data
self.__user_prompt_template = user_prompt_template
model = AnthropicModel("claude-3-5-sonnet-latest", api_key=api_key)
self.__summariser = Agent(
model,
llm_client,
system_prompt=mustache_render(system_prompt_template, self.__template_data),
result_type=example_json_to_base_model(example_json),
model_settings=dict(parallel_tool_calls=False),
model_settings=dict(
parallel_tool_calls=False,
model=model,
),
)
self.__agents = []
for agent_config in agent_configs:
tools = []
for tool in agent_config.tool_set.values():
tools.append(tool.to_pydantic_ai_function_tool())
agent = Agent(
model,
llm_client,
name=agent_config.name,
system_prompt=mustache_render(agent_config.system_prompt, self.__template_data),
tools=tools,
result_type=example_json_to_base_model(agent_config.example_json),
model_settings=dict(parallel_tool_calls=False),
model_settings=dict(
parallel_tool_calls=False,
model=model,
),
)

self.__agents.append(agent)
Expand All @@ -89,7 +104,7 @@ def execute(self, limit: Optional[int] = None) -> dict:
message_history = None
agent_output = None
for i in range(limit or self.__limit or sys.maxsize):
agent_output: RunResult[Any] = loop.run_until_complete(
agent_output: AgentRunResult[Any] = loop.run_until_complete(
agent.run(user_message, message_history=message_history)
)
message_history = agent_output.all_messages()
Expand All @@ -107,10 +122,11 @@ def execute(self, limit: Optional[int] = None) -> dict:
return dict()

if len(agents_result) == 1:
history = next(v for _, v in agents_result.items()).all_messages()
final_result = loop.run_until_complete(
self.__summariser.run(
"From the actions taken by the assistant. Please give me the result.",
message_history=next(v for _, v in agents_result.items()).all_messages(),
message_history=history,
)
)
else:
Expand Down
46 changes: 46 additions & 0 deletions patchwork/common/tools/github_tool.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
from __future__ import annotations

import os
import subprocess

from patchwork.common.tools.tool import Tool


class GitHubTool(Tool, tool_name="github_tool"):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

abc_register=False?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

its alright since Tool.get_tools will exclude it when gh_token is not provided

def __init__(self, path: str, gh_token: str):
super().__init__()
self.path = path
self.gh_token = gh_token

@property
def json_schema(self) -> dict:
return {
"name": "github_tool",
"description": """\
Access to the GitHub CLI, the command is also `gh` all args provided are used as is
""",
"input_schema": {
"type": "object",
"properties": {
"args": {
"type": "array",
"items": {"type": "string"},
"description": "The args to run `gh` command with.",
}
},
"required": ["args"],
},
}

def execute(self, args: list[str]) -> str:
env = os.environ.copy()
env["GH_TOKEN"] = self.gh_token
p = subprocess.run(
["gh", *args],
env=env,
cwd=self.path,
text=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
return p.stdout
2 changes: 1 addition & 1 deletion patchwork/common/tools/grep_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def execute(self, pattern: Optional[str] = None, depth: int = 1, is_case_sensiti


class FindTextTool(Tool, tool_name="find_text"):
__CHAR_LIMIT = 200
__CHAR_LIMIT = 400
__CHAR_LIMIT_TEXT = "<Too many characters>"

def __init__(self, path: Path | str, **kwargs):
Expand Down
28 changes: 25 additions & 3 deletions patchwork/common/tools/tool.py
Original file line number Diff line number Diff line change
@@ -1,22 +1,29 @@
import functools
from abc import ABC, abstractmethod

from pydantic_ai.tools import RunContext
from pydantic_ai.tools import Tool as PydanticTool
from pydantic_ai.tools import ToolDefinition
from typing_extensions import Type

from patchwork.logger import logger


class Tool(ABC):
__internal_map: dict[str, Type["Tool"]] = dict()

def __init_subclass__(cls, tool_name=None, abc_register=True, **kwargs):
def __init_subclass__(cls, tool_name=None, abc_register=True, tool_logging=True, **kwargs):
cls_name = tool_name or cls.__name__
cls.name = cls_name

if tool_logging:
setattr(cls, "execute", Tool.__execute_logging_wrapper(cls.__dict__["execute"]))

if not abc_register:
return

cls_name = tool_name or cls.__name__
if cls_name in cls.__internal_map.keys():
raise ValueError(f"Duplicate subclass name for class {cls.__name__}: {cls_name}")
cls.name = cls_name
Tool.__internal_map[cls_name] = cls

@property
Expand Down Expand Up @@ -55,3 +62,18 @@ async def _prep(ctx: RunContext[None], tool_def: ToolDefinition) -> ToolDefiniti
return PydanticTool(
self.execute, prepare=_prep, name=self.name, description=self.json_schema.get("description", "")
)

@staticmethod
def __execute_logging_wrapper(func):
@functools.wraps(func)
def execute_logging_wrapper(self, *args, **kwargs):
arg_text = ""
if len(args) > 0:
arg_text += f"args: {args}"
if len(kwargs) > 0:
arg_text += f"kwargs: {kwargs}"

logger.info(f"Executing Tool: {self.name} with {arg_text}")
return func(self, *args, **kwargs)

return execute_logging_wrapper
12 changes: 9 additions & 3 deletions patchwork/logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,13 @@
logger.addHandler(__noop)


def evict_null_handler():
global logger, __noop

warnings.simplefilter("ignore")
logger.removeHandler(__noop)


class TerminalHandler(RichHandler):
def __init__(self, log_level: str):
super().__init__(
Expand Down Expand Up @@ -137,10 +144,9 @@ def inner(record: logging.LogRecord) -> bool:


def init_cli_logger(log_level: str) -> logging.Logger:
global logger, __noop
global logger

warnings.simplefilter("ignore")
logger.removeHandler(__noop)
evict_null_handler()

if not os.path.exists(HOME_FOLDER): # Check if HOME_FOLDER exists at this point
os.makedirs(HOME_FOLDER)
Expand Down
4 changes: 3 additions & 1 deletion patchwork/steps/AgenticLLMV2/AgenticLLMV2.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from pathlib import Path

from patchwork.common.client.llm.aio import AioLlmClient
from patchwork.common.multiturn_strategy.agentic_strategy_v2 import (
AgentConfig,
AgenticStrategyV2,
Expand All @@ -17,7 +18,8 @@ def __init__(self, inputs):
base_path = str(Path.cwd())
self.conversation_limit = int(inputs.get("max_agent_calls", 1))
self.agentic_strategy = AgenticStrategyV2(
api_key=inputs.get("anthropic_api_key"),
model="claude-3-7-sonnet-latest",
llm_client=AioLlmClient.create_aio_client(inputs),
template_data=inputs.get("prompt_value", {}),
system_prompt_template=inputs.get("system_prompt", "Summarise from our previous conversation"),
user_prompt_template=inputs.get("user_prompt"),
Expand Down
9 changes: 3 additions & 6 deletions patchwork/steps/BrowserUse/BrowserUse.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,7 @@ def init_browser():
os.makedirs(downloads_path)

context_config = BrowserContextConfig(save_downloads_path=downloads_path)
config = BrowserConfig(
headless=True, disable_security=True, new_context_config=context_config
)
config = BrowserConfig(headless=True, disable_security=True, new_context_config=context_config)
controller = Controller()

# Register custom action to upload files to web elements
Expand Down Expand Up @@ -124,6 +122,7 @@ class BrowserUse(Step, input_class=BrowserUseInputs, output_class=BrowserUseOutp
This class provides a high-level interface for executing browser-based tasks
using various LLM providers (Google, OpenAI, Anthropic) to control the browser.
"""

required_keys = {"task"}

def __init__(self, inputs):
Expand All @@ -142,9 +141,7 @@ def __init__(self, inputs):
if "google_api_key" in self.inputs:
from langchain_google_genai import ChatGoogleGenerativeAI

self.llm = ChatGoogleGenerativeAI(
model="gemini-2.0-flash", google_api_key=self.inputs["google_api_key"]
)
self.llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", google_api_key=self.inputs["google_api_key"])
elif "openai_api_key" in self.inputs:
from langchain_openai import ChatOpenAI

Expand Down
4 changes: 1 addition & 3 deletions patchwork/steps/BrowserUse/typed.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,7 @@ class BrowserUseInputs(TypedDict, total=False):
str,
StepTypeConfig(is_config=True, or_op=["google_api_key", "anthropic_api_key"]),
]
anthropic_api_key: Annotated[
str, StepTypeConfig(is_config=True, or_op=["google_api_key", "openai_api_key"])
]
anthropic_api_key: Annotated[str, StepTypeConfig(is_config=True, or_op=["google_api_key", "openai_api_key"])]
google_api_key: Annotated[
str,
StepTypeConfig(is_config=True, or_op=["openai_api_key", "anthropic_api_key"]),
Expand Down
47 changes: 47 additions & 0 deletions patchwork/steps/GitHubAgent/GitHubAgent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
from pathlib import Path

from patchwork.common.client.llm.aio import AioLlmClient
from patchwork.common.multiturn_strategy.agentic_strategy_v2 import (
AgentConfig,
AgenticStrategyV2,
)
from patchwork.common.tools.github_tool import GitHubTool
from patchwork.step import Step
from patchwork.steps.GitHubAgent.typed import GitHubAgentInputs, GitHubAgentOutputs


class GitHubAgent(Step, input_class=GitHubAgentInputs, output_class=GitHubAgentOutputs):
def __init__(self, inputs):
super().__init__(inputs)
base_path = inputs.get("base_path", str(Path.cwd()))
task = inputs["task"]
self.agentic_strategy = AgenticStrategyV2(
model="claude-3-7-sonnet-latest",
llm_client=AioLlmClient.create_aio_client(inputs),
template_data=dict(),
system_prompt_template=f"""\
Please summarise the conversation given and provide the result in the structure that is asked of you.
""",
user_prompt_template=f"""\
Please help me with the following task using the GitHub CLI. You should not do anything extra.
Please take note of any requirements to the data required to fetch.

{task}
""",
agent_configs=[
AgentConfig(
name="Assistant",
tool_set=dict(github_tool=GitHubTool(base_path, inputs["github_api_token"])),
system_prompt="""\
You are a senior software developer helping the program manager to obtain some data from GitHub.
You can access github through the `gh` CLI app.
Your `gh` app has already been authenticated.
""",
)
],
example_json=inputs.get("example_json"),
)

def run(self) -> dict:
result = self.agentic_strategy.execute(limit=10)
return {**result, **self.agentic_strategy.usage()}
Empty file.
27 changes: 27 additions & 0 deletions patchwork/steps/GitHubAgent/typed.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
from typing_extensions import Annotated, Any, Dict, List, TypedDict

from patchwork.common.utils.step_typing import StepTypeConfig


class GitHubAgentInputs(TypedDict, total=False):
base_path: str
prompt_value: Dict[str, Any]
system_prompt: str
user_prompt: str
max_llm_calls: Annotated[int, StepTypeConfig(is_config=True)]
openai_api_key: Annotated[
str, StepTypeConfig(is_config=True, or_op=["patched_api_key", "google_api_key", "anthropic_api_key"])
]
anthropic_api_key: Annotated[
str, StepTypeConfig(is_config=True, or_op=["patched_api_key", "google_api_key", "openai_api_key"])
]
google_api_key: Annotated[
str, StepTypeConfig(is_config=True, or_op=["patched_api_key", "openai_api_key", "anthropic_api_key"])
]


class GitHubAgentOutputs(TypedDict):
conversation_history: List[Dict]
tool_records: List[Dict]
request_tokens: int
response_tokens: int
Loading