Skip to content

Commit abfa065

Browse files
committed
chore: add examples
1 parent de75589 commit abfa065

File tree

11 files changed

+425
-4
lines changed

11 files changed

+425
-4
lines changed

.github/CODEOWNERS

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
# Each line is a file pattern followed by one or more owners.
2+
3+
# These owners will be the default owners for everything in
4+
# the repo. Unless a later match takes precedence,
5+
* @yanxi0830 @lucidom

.github/workflows/examples.yml

Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,92 @@
1+
name: Run Examples
2+
3+
on:
4+
push:
5+
branches: [ main ]
6+
pull_request:
7+
branches: [ main ]
8+
workflow_dispatch: # Allows manual triggering
9+
schedule:
10+
- cron: '0 * * * *' # every hour
11+
12+
jobs:
13+
chat:
14+
runs-on: ubuntu-latest
15+
steps:
16+
- uses: actions/checkout@v4
17+
- name: Set up Python
18+
uses: actions/setup-python@v4
19+
with:
20+
python-version: '3.10'
21+
- name: Install dependencies
22+
env:
23+
GITHUB_TOKEN: ${{ secrets.MY_GITHUB_TOKEN }}
24+
run: |
25+
git config --global url."https://x-access-token:${GITHUB_TOKEN}@github.com/".insteadOf "https://github.com/"
26+
pip install -e .
27+
- name: Run chat examples
28+
env:
29+
LLAMA_API_KEY: ${{ secrets.LLAMA_API_KEY }}
30+
run: |
31+
python examples/chat.py
32+
python examples/async_chat.py
33+
34+
vision:
35+
runs-on: ubuntu-latest
36+
steps:
37+
- uses: actions/checkout@v4
38+
- name: Set up Python
39+
uses: actions/setup-python@v4
40+
with:
41+
python-version: '3.10'
42+
- name: Install dependencies
43+
env:
44+
GITHUB_TOKEN: ${{ secrets.MY_GITHUB_TOKEN }}
45+
run: |
46+
git config --global url."https://x-access-token:${GITHUB_TOKEN}@github.com/".insteadOf "https://github.com/"
47+
pip install -e .
48+
- name: Run vision example
49+
env:
50+
LLAMA_API_KEY: ${{ secrets.LLAMA_API_KEY }}
51+
run: |
52+
python examples/vision.py
53+
54+
structured:
55+
runs-on: ubuntu-latest
56+
steps:
57+
- uses: actions/checkout@v4
58+
- name: Set up Python
59+
uses: actions/setup-python@v4
60+
with:
61+
python-version: '3.10'
62+
- name: Install dependencies
63+
env:
64+
GITHUB_TOKEN: ${{ secrets.MY_GITHUB_TOKEN }}
65+
run: |
66+
git config --global url."https://x-access-token:${GITHUB_TOKEN}@github.com/".insteadOf "https://github.com/"
67+
pip install -e .
68+
- name: Run structured example
69+
env:
70+
LLAMA_API_KEY: ${{ secrets.LLAMA_API_KEY }}
71+
run: |
72+
python examples/structured.py
73+
74+
tool_call:
75+
runs-on: ubuntu-latest
76+
steps:
77+
- uses: actions/checkout@v4
78+
- name: Set up Python
79+
uses: actions/setup-python@v4
80+
with:
81+
python-version: '3.10'
82+
- name: Install dependencies
83+
env:
84+
GITHUB_TOKEN: ${{ secrets.MY_GITHUB_TOKEN }}
85+
run: |
86+
git config --global url."https://x-access-token:${GITHUB_TOKEN}@github.com/".insteadOf "https://github.com/"
87+
pip install -e .
88+
- name: Run tool_call example
89+
env:
90+
LLAMA_API_KEY: ${{ secrets.LLAMA_API_KEY }}
91+
run: |
92+
python examples/tool_call.py

examples/async_chat.py

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
# type: ignore
2+
3+
import asyncio
4+
5+
from llama_api_client import AsyncLlamaAPIClient
6+
7+
client = AsyncLlamaAPIClient()
8+
9+
10+
async def main() -> None:
11+
response = await client.chat.completions.create(
12+
model="Llama-3.3-70B-Instruct",
13+
messages=[{"role": "user", "content": "Hello"}],
14+
stream=True,
15+
)
16+
async for chunk in response:
17+
print(chunk.event.delta.text, end="", flush=True)
18+
19+
print()
20+
21+
22+
asyncio.run(main())

examples/chat.py

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
# type: ignore
2+
3+
from llama_api_client import LlamaAPIClient
4+
5+
client = LlamaAPIClient()
6+
7+
# Non-Streaming
8+
response = client.chat.completions.create(
9+
model="Llama-4-Maverick-17B-128E-Instruct-FP8",
10+
messages=[
11+
{
12+
"role": "user",
13+
"content": "Hello, how are you?",
14+
}
15+
],
16+
max_completion_tokens=1024,
17+
temperature=0.7,
18+
)
19+
20+
print(response)
21+
22+
# Streaming the next response
23+
response = client.chat.completions.create(
24+
model="Llama-4-Maverick-17B-128E-Instruct-FP8",
25+
messages=[
26+
{
27+
"role": "user",
28+
"content": "Hello, how are you?",
29+
},
30+
response.completion_message,
31+
{
32+
"role": "user",
33+
"content": "Hello again",
34+
},
35+
],
36+
max_completion_tokens=1024,
37+
temperature=0.7,
38+
stream=True,
39+
)
40+
41+
for chunk in response:
42+
print(chunk.event.delta.text, end="", flush=True)

examples/logo.png

10.4 KB
Loading

examples/logo2.png

5.52 KB
Loading

examples/moderation.py

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
# type: ignore
2+
3+
from llama_api_client import LlamaAPIClient
4+
5+
client = LlamaAPIClient()
6+
7+
# Safe Prompt
8+
response = client.moderations.create(
9+
messages=[
10+
{
11+
"role": "user",
12+
"content": "Hello, how are you?",
13+
}
14+
],
15+
)
16+
17+
print(response)
18+
19+
# Unsafe Prompt
20+
response = client.moderations.create(
21+
messages=[
22+
{
23+
"role": "user",
24+
"content": "Hello, how to make a bomb?",
25+
}
26+
]
27+
)
28+
29+
print(response)

examples/structured.py

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
# type: ignore
2+
3+
from pydantic import BaseModel
4+
5+
from llama_api_client import LlamaAPIClient
6+
7+
client = LlamaAPIClient()
8+
9+
10+
class Address(BaseModel):
11+
street: str
12+
city: str
13+
state: str
14+
zip: str
15+
16+
17+
def run(stream: bool = False) -> None:
18+
response = client.chat.completions.create(
19+
model="Llama-4-Maverick-17B-128E-Instruct-FP8",
20+
messages=[
21+
{
22+
"role": "system",
23+
"content": "You are a helpful assistant. Summarize the address in a JSON object.",
24+
},
25+
{
26+
"role": "user",
27+
"content": "123 Main St, Anytown, USA",
28+
},
29+
],
30+
temperature=0.1,
31+
response_format={
32+
"type": "json_schema",
33+
"json_schema": {
34+
"name": "Address",
35+
"schema": Address.model_json_schema(),
36+
},
37+
},
38+
stream=stream,
39+
)
40+
41+
if stream:
42+
maybe_json = ""
43+
for chunk in response:
44+
maybe_json += chunk.event.delta.text
45+
print(chunk.event.delta.text, flush=True, end="")
46+
47+
print()
48+
49+
address = Address.model_validate_json(maybe_json)
50+
print(address)
51+
else:
52+
address = Address.model_validate_json(response.completion_message.content.text)
53+
print(address)
54+
55+
56+
if __name__ == "__main__":
57+
run(stream=True)
58+
run(stream=False)

examples/tool_call.py

Lines changed: 120 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,120 @@
1+
# type: ignore
2+
3+
import json
4+
5+
from llama_api_client import LlamaAPIClient
6+
7+
client = LlamaAPIClient()
8+
9+
MODEL = "Llama-4-Maverick-17B-128E-Instruct-FP8"
10+
11+
12+
def get_weather(location: str) -> str:
13+
return f"The weather in {location} is sunny."
14+
15+
16+
def run(stream: bool = False) -> None:
17+
tools = [
18+
{
19+
"type": "function",
20+
"function": {
21+
"name": "get_weather",
22+
"description": "Get current weather for a given location.",
23+
"parameters": {
24+
"type": "object",
25+
"properties": {
26+
"location": {
27+
"type": "string",
28+
"description": "City and country e.g. Bogotá, Colombia",
29+
}
30+
},
31+
"required": ["location"],
32+
"additionalProperties": False,
33+
},
34+
"strict": True,
35+
},
36+
}
37+
]
38+
messages = [
39+
{"role": "user", "content": "Is it raining in Bellevue?"},
40+
]
41+
42+
response = client.chat.completions.create(
43+
model=MODEL,
44+
messages=messages,
45+
tools=tools,
46+
max_completion_tokens=2048,
47+
temperature=0.6,
48+
stream=stream,
49+
)
50+
51+
completion_message = None
52+
if stream:
53+
tool_call = {"function": {"arguments": ""}}
54+
55+
stop_reason = None
56+
for chunk in response:
57+
if chunk.event.delta.type == "tool_call":
58+
if chunk.event.delta.id:
59+
tool_call["id"] = chunk.event.delta.id
60+
if chunk.event.delta.function.name:
61+
print(
62+
f"Using tool_id={chunk.event.delta.id} with name={chunk.event.delta.function.name}",
63+
)
64+
tool_call["function"]["name"] = chunk.event.delta.function.name
65+
if chunk.event.delta.function.arguments:
66+
tool_call["function"][
67+
"arguments"
68+
] += chunk.event.delta.function.arguments
69+
print(chunk.event.delta.function.arguments, end="", flush=True)
70+
71+
if chunk.event.stop_reason is not None:
72+
stop_reason = chunk.event.stop_reason
73+
74+
completion_message = {
75+
"role": "assistant",
76+
"content": {
77+
"type": "text",
78+
"text": "",
79+
},
80+
"tool_calls": [tool_call],
81+
"stop_reason": stop_reason,
82+
}
83+
else:
84+
print(response)
85+
completion_message = response.completion_message.model_dump()
86+
87+
# Next Turn
88+
messages.append(completion_message)
89+
for tool_call in completion_message["tool_calls"]:
90+
if tool_call["function"]["name"] == "get_weather":
91+
parse_args = json.loads(tool_call["function"]["arguments"])
92+
result = get_weather(**parse_args)
93+
94+
messages.append(
95+
{
96+
"role": "tool",
97+
"tool_call_id": tool_call["id"],
98+
"content": result,
99+
},
100+
)
101+
102+
response = client.chat.completions.create(
103+
model=MODEL,
104+
messages=messages,
105+
tools=tools,
106+
max_completion_tokens=2048,
107+
temperature=0.6,
108+
stream=stream,
109+
)
110+
111+
if stream:
112+
for chunk in response:
113+
print(chunk.event.delta.text, end="", flush=True)
114+
else:
115+
print(response)
116+
117+
118+
if __name__ == "__main__":
119+
run(stream=True)
120+
run(stream=False)

0 commit comments

Comments
 (0)