Skip to content

Commit 7693d11

Browse files
authored
chore: Release 2.0.7 (#4679)
# Changelog ## New Features: - **LlamaCpp Model:** Added a new LlamaCpp Model class ## Improvements: - **AgentOS CORS Middleware**: AgentOS now automatically merges existing CORSMiddleware allow_origin lists with what is required for the AgentOS UI to function. - **Update create_agent_run and File.valid_mime_types:** 1. Couldn’t get some info about files injected in tools, e.g., filename, content type, etc. So added it in function create_agent_run. 2. add `application/json` and `application/vnd.openxmlformats-officedocument.wordprocessingml.document` in `File.valid_mime_types` ## Bug Fixes: - **`input_schema` in UI breaking workflows:** for [[this](https://gist.github.com/ysolanky/54b5361be9908a22f5b93149bd0b7acc)](https://gist.github.com/ysolanky/54b5361be9908a22f5b93149bd0b7acc) type of workflow config where the steps are not wrapped in `Step`class, the UI does not show the `input_schema` form for them. - **Improve functionality of send_media_to_model flag:** - we currently use the `send_media_to_model` flag for the case where the input image to agent is to be accessed by a tool and should not be available to the model. - In this usecase if we use a tool to generate an image (custom or DallE) and using a non-multimodal model, so in this case case we append the tool generated media now as a user message to model, we can also use the same `send_media_to_model` to decide to not send to model, otherwise the non multimodal model throws error.
1 parent 93fb56b commit 7693d11

File tree

15 files changed

+389
-425
lines changed

15 files changed

+389
-425
lines changed

cookbook/agent_os/mcp/mcp_tools_advanced_example.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,13 @@
44
AgentOS handles the lifespan of the MCPTools internally.
55
"""
66

7+
from os import getenv
8+
79
from agno.agent import Agent
810
from agno.db.sqlite import SqliteDb
911
from agno.models.anthropic import Claude
1012
from agno.os import AgentOS
1113
from agno.tools.mcp import MCPTools # noqa: F401
12-
from os import getenv
1314

1415
# Setup the database
1516
db = SqliteDb(db_file="tmp/agentos.db")

libs/agno/agno/knowledge/chunking/semantic.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
from typing import Any, Dict, List, Optional
21
import inspect
2+
from typing import Any, Dict, List, Optional
33

44
from agno.knowledge.chunking.strategy import ChunkingStrategy
55
from agno.knowledge.document.base import Document

libs/agno/agno/models/base.py

Lines changed: 23 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -302,7 +302,11 @@ def response(
302302

303303
if any(msg.images or msg.videos or msg.audio or msg.files for msg in function_call_results):
304304
# Handle function call media
305-
self._handle_function_call_media(messages=messages, function_call_results=function_call_results, send_media_to_model=send_media_to_model)
305+
self._handle_function_call_media(
306+
messages=messages,
307+
function_call_results=function_call_results,
308+
send_media_to_model=send_media_to_model,
309+
)
306310

307311
for function_call_result in function_call_results:
308312
function_call_result.log(metrics=True)
@@ -443,7 +447,11 @@ async def aresponse(
443447

444448
if any(msg.images or msg.videos or msg.audio or msg.files for msg in function_call_results):
445449
# Handle function call media
446-
self._handle_function_call_media(messages=messages, function_call_results=function_call_results, send_media_to_model=send_media_to_model)
450+
self._handle_function_call_media(
451+
messages=messages,
452+
function_call_results=function_call_results,
453+
send_media_to_model=send_media_to_model,
454+
)
447455

448456
for function_call_result in function_call_results:
449457
function_call_result.log(metrics=True)
@@ -781,7 +789,11 @@ def response_stream(
781789

782790
# Handle function call media
783791
if any(msg.images or msg.videos or msg.audio for msg in function_call_results):
784-
self._handle_function_call_media(messages=messages, function_call_results=function_call_results, send_media_to_model=send_media_to_model)
792+
self._handle_function_call_media(
793+
messages=messages,
794+
function_call_results=function_call_results,
795+
send_media_to_model=send_media_to_model,
796+
)
785797

786798
for function_call_result in function_call_results:
787799
function_call_result.log(metrics=True)
@@ -941,7 +953,11 @@ async def aresponse_stream(
941953

942954
# Handle function call media
943955
if any(msg.images or msg.videos or msg.audio for msg in function_call_results):
944-
self._handle_function_call_media(messages=messages, function_call_results=function_call_results, send_media_to_model=send_media_to_model)
956+
self._handle_function_call_media(
957+
messages=messages,
958+
function_call_results=function_call_results,
959+
send_media_to_model=send_media_to_model,
960+
)
945961

946962
for function_call_result in function_call_results:
947963
function_call_result.log(metrics=True)
@@ -1718,7 +1734,9 @@ def format_function_call_results(
17181734
if len(function_call_results) > 0:
17191735
messages.extend(function_call_results)
17201736

1721-
def _handle_function_call_media(self, messages: List[Message], function_call_results: List[Message], send_media_to_model: bool = True) -> None:
1737+
def _handle_function_call_media(
1738+
self, messages: List[Message], function_call_results: List[Message], send_media_to_model: bool = True
1739+
) -> None:
17221740
"""
17231741
Handle media artifacts from function calls by adding follow-up user messages for generated media if needed.
17241742
"""

libs/agno/agno/os/utils.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,9 @@ def process_document(file: UploadFile) -> Optional[FileMedia]:
132132
content = file.file.read()
133133
if not content:
134134
raise HTTPException(status_code=400, detail="Empty file")
135-
return FileMedia(content=content, filename=file.filename, format=extract_format(file), mime_type=file.content_type)
135+
return FileMedia(
136+
content=content, filename=file.filename, format=extract_format(file), mime_type=file.content_type
137+
)
136138
except Exception as e:
137139
logger.error(f"Error processing document {file.filename}: {e}")
138140
return None

libs/agno/agno/team/team.py

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -5090,20 +5090,19 @@ def get_team_history(num_chats: Optional[int] = None) -> str:
50905090
import json
50915091

50925092
history: List[Dict[str, Any]] = []
5093-
if session is not None:
5094-
all_chats = self.get_messages_for_session(session_id=session.session_id)
50955093

5096-
if len(all_chats) == 0:
5097-
return ""
5094+
all_chats = session.get_messages_from_last_n_runs(
5095+
team_id=self.id,
5096+
)
50985097

5099-
for chat in all_chats[::-1]: # type: ignore
5100-
history.insert(0, chat.to_dict()) # type: ignore
5098+
if len(all_chats) == 0:
5099+
return ""
51015100

5102-
if num_chats is not None:
5103-
history = history[:num_chats]
5101+
for chat in all_chats[::-1]: # type: ignore
5102+
history.insert(0, chat.to_dict()) # type: ignore
51045103

5105-
else:
5106-
return ""
5104+
if num_chats is not None:
5105+
history = history[:num_chats]
51075106

51085107
return json.dumps(history)
51095108

libs/agno/agno/tools/decorator.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -250,8 +250,10 @@ async def async_gen_wrapper(*args: Any, **kwargs: Any) -> Any:
250250
if kwargs.get("stop_after_tool_call") is True:
251251
if "show_result" not in kwargs or kwargs.get("show_result") is None:
252252
tool_config["show_result"] = True
253-
254-
return Function(**tool_config)
253+
function = Function(**tool_config)
254+
# Determine parameters for the function
255+
function.process_entrypoint()
256+
return function
255257

256258
# Handle both @tool and @tool() cases
257259
if len(args) == 1 and callable(args[0]) and not kwargs:

libs/agno/agno/tools/mcp.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ def __init__(
102102
transport: Literal["stdio", "sse", "streamable-http"] = "stdio",
103103
server_params: Optional[Union[StdioServerParameters, SSEClientParams, StreamableHTTPClientParams]] = None,
104104
session: Optional[ClientSession] = None,
105-
timeout_seconds: int = 5,
105+
timeout_seconds: int = 10,
106106
client=None,
107107
include_tools: Optional[list[str]] = None,
108108
exclude_tools: Optional[list[str]] = None,

libs/agno/agno/tools/memori.py

Lines changed: 1 addition & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
import json
22
from typing import Any, Dict, List, Optional
33

4-
from agno.agent import Agent
54
from agno.tools.toolkit import Toolkit
65
from agno.utils.log import log_debug, log_error, log_info, log_warning
76

@@ -122,7 +121,6 @@ def __init__(
122121

123122
def search_memory(
124123
self,
125-
agent: Agent,
126124
query: str,
127125
limit: Optional[int] = None,
128126
) -> str:
@@ -180,7 +178,7 @@ def search_memory(
180178
log_error(f"Error searching memory: {e}")
181179
return json.dumps({"success": False, "error": f"Memory search error: {str(e)}"})
182180

183-
def record_conversation(self, agent: Agent, content: str) -> str:
181+
def record_conversation(self, content: str) -> str:
184182
"""
185183
Add important information or facts to memory.
186184
@@ -222,7 +220,6 @@ def record_conversation(self, agent: Agent, content: str) -> str:
222220

223221
def get_memory_stats(
224222
self,
225-
agent: Agent,
226223
) -> str:
227224
"""
228225
Get statistics about the memory system.
@@ -340,52 +337,3 @@ def disable_memory_system(self) -> bool:
340337
except Exception as e:
341338
log_error(f"Failed to disable memory system: {e}")
342339
return False
343-
344-
345-
def create_memori_search_tool(memori_toolkit: MemoriTools):
346-
"""
347-
Create a standalone memory search function for use with Agno agents.
348-
349-
This is a convenience function that creates a memory search tool similar
350-
to the pattern shown in the Memori example code.
351-
352-
Args:
353-
memori_toolkit: An initialized MemoriTools instance
354-
355-
Returns:
356-
Callable: A memory search function that can be used as an agent tool
357-
358-
Example:
359-
```python
360-
memori_tools = MemoriTools(database_connect="sqlite:///memory.db")
361-
search_tool = create_memori_search_tool(memori_tools)
362-
363-
agent = Agent(
364-
model=OpenAIChat(),
365-
tools=[search_tool],
366-
description="Agent with memory search capability"
367-
)
368-
```
369-
"""
370-
371-
def search_memory(query: str) -> str:
372-
"""
373-
Search the agent's memory for past conversations and information.
374-
375-
Args:
376-
query: What to search for in memory
377-
378-
Returns:
379-
str: Search results or error message
380-
"""
381-
try:
382-
if not query.strip():
383-
return "Please provide a search query"
384-
385-
result = memori_toolkit._memory_tool.execute(query=query.strip())
386-
return str(result) if result else "No relevant memories found"
387-
388-
except Exception as e:
389-
return f"Memory search error: {str(e)}"
390-
391-
return search_memory

libs/agno/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "agno"
3-
version = "2.0.6"
3+
version = "2.0.7"
44
description = "Agno: a lightweight library for building Multi-Agent Systems"
55
requires-python = ">=3.7,<4"
66
readme = "README.md"

libs/agno/tests/integration/agent/test_user_confirmation_flows.py

Lines changed: 0 additions & 74 deletions
Original file line numberDiff line numberDiff line change
@@ -218,42 +218,6 @@ def get_the_weather(city: str):
218218
assert len(session_from_db.runs[0].messages) == 5, [m.role for m in session_from_db.runs[0].messages]
219219

220220

221-
@pytest.mark.flaky(reruns=2, reason="Asserting against a generated response makes this flaky")
222-
def test_tool_call_requires_confirmation_stream(shared_db):
223-
@tool(requires_confirmation=True)
224-
def get_the_weather(city: str):
225-
return f"It is currently 70 degrees and cloudy in {city}"
226-
227-
agent = Agent(
228-
model=OpenAIChat(id="gpt-4o-mini"),
229-
tools=[get_the_weather],
230-
db=shared_db,
231-
markdown=True,
232-
telemetry=False,
233-
)
234-
235-
found_confirmation = False
236-
for response in agent.run("What is the weather in Tokyo?", stream=True):
237-
if response.is_paused:
238-
assert response.tools is not None
239-
assert response.tools[0].requires_confirmation
240-
assert response.tools[0].tool_name == "get_the_weather"
241-
assert response.tools[0].tool_args == {"city": "Tokyo"}
242-
# Mark the tool as confirmed
243-
response.tools[0].confirmed = True
244-
found_confirmation = True
245-
246-
assert found_confirmation, "No tools were found to require confirmation"
247-
run_response = agent.get_last_run_output()
248-
249-
found_confirmation = False
250-
for response in agent.continue_run(run_response, stream=True):
251-
if response.is_paused:
252-
found_confirmation = True
253-
254-
assert found_confirmation is False, "Some tools still require confirmation"
255-
256-
257221
@pytest.mark.asyncio
258222
async def test_tool_call_requires_confirmation_async(shared_db):
259223
@tool(requires_confirmation=True)
@@ -282,44 +246,6 @@ async def get_the_weather(city: str):
282246
assert response.tools[0].result == "It is currently 70 degrees and cloudy in Tokyo"
283247

284248

285-
@pytest.mark.asyncio
286-
@pytest.mark.flaky(reruns=2, reason="Async makes this test flaky")
287-
async def test_tool_call_requires_confirmation_stream_async(shared_db):
288-
@tool(requires_confirmation=True)
289-
async def get_the_weather(city: str):
290-
return f"It is currently 70 degrees and cloudy in {city}"
291-
292-
agent = Agent(
293-
model=OpenAIChat(id="gpt-4o-mini"),
294-
tools=[get_the_weather],
295-
db=shared_db,
296-
markdown=True,
297-
telemetry=False,
298-
)
299-
300-
found_confirmation = False
301-
async for response in agent.arun("What is the weather in Tokyo?", stream=True):
302-
if response.is_paused:
303-
assert response.tools[0].requires_confirmation
304-
assert response.tools[0].tool_name == "get_the_weather"
305-
assert response.tools[0].tool_args == {"city": "Tokyo"}
306-
307-
# Mark the tool as confirmed
308-
for tool_response in response.tools:
309-
if tool_response.requires_confirmation:
310-
tool_response.confirmed = True
311-
found_confirmation = True
312-
assert found_confirmation, "No tools were found to require confirmation"
313-
314-
run_response = agent.get_last_run_output()
315-
316-
found_confirmation = False
317-
async for response in agent.acontinue_run(run_response, stream=True):
318-
if response.is_paused:
319-
found_confirmation = True
320-
assert found_confirmation is False, "Some tools still require confirmation"
321-
322-
323249
def test_tool_call_multiple_requires_confirmation(shared_db):
324250
@tool(requires_confirmation=True)
325251
def get_the_weather(city: str):

0 commit comments

Comments
 (0)