Skip to content

Commit 8adfb2c

Browse files
Merge pull request #241 from restackio/bumpDemo92
Bump 0.0.94
2 parents 55d8704 + 8028707 commit 8adfb2c

File tree

7 files changed

+73
-64
lines changed

7 files changed

+73
-64
lines changed

production_demo/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ requires-python = ">=3.10,<3.14"
77
readme = "README.md"
88
dependencies = [
99
"pydantic>=2.10.6",
10-
"restack-ai==0.0.91",
10+
"restack-ai==0.0.94",
1111
"watchfiles>=1.0.4",
1212
"python-dotenv==1.0.1",
1313
"openai>=1.61.0",

production_demo/src/functions/evaluate.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from restack_ai.function import function, FunctionFailure, log
1+
from restack_ai.function import function, NonRetryableError, log
22
from openai import OpenAI
33
from pydantic import BaseModel
44

@@ -11,7 +11,7 @@ async def llm_evaluate(input: EvaluateInput) -> str:
1111
client = OpenAI(base_url="http://192.168.205.1:1234/v1/",api_key="llmstudio")
1212
except Exception as e:
1313
log.error(f"Failed to create LLM client {e}")
14-
raise FunctionFailure(f"Failed to create OpenAI client {e}", non_retryable=True) from e
14+
raise NonRetryableError(message=f"Failed to create OpenAI client {e}") from e
1515

1616
prompt = (
1717
f"Evaluate the following joke for humor, creativity, and originality. "

production_demo/src/functions/function.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from restack_ai.function import function, log, FunctionFailure
1+
from restack_ai.function import function, log, RetryableError
22

33
tries = 0
44

@@ -14,7 +14,7 @@ async def example_function(input: ExampleFunctionInput) -> str:
1414

1515
if tries == 0:
1616
tries += 1
17-
raise FunctionFailure(message="Simulated failure", non_retryable=False)
17+
raise RetryableError(message="Simulated failure")
1818

1919
log.info("example function started", input=input)
2020
return f"Hello, {input.name}!"

production_demo/src/functions/generate.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from restack_ai.function import function, FunctionFailure, log
1+
from restack_ai.function import function, NonRetryableError, log
22
from openai import OpenAI
33

44
from pydantic import BaseModel
@@ -10,10 +10,10 @@ class GenerateInput(BaseModel):
1010
async def llm_generate(input: GenerateInput) -> str:
1111

1212
try:
13-
client = OpenAI(base_url="http://192.168.205.1:1234/v1/",api_key="llmstudio")
13+
client = OpenAI(base_url="http://192.168.178.57:1234/v1/",api_key="llmstudio")
1414
except Exception as e:
1515
log.error(f"Failed to create LLM client {e}")
16-
raise FunctionFailure(f"Failed to create OpenAI client {e}", non_retryable=True) from e
16+
raise NonRetryableError(message=f"Failed to create OpenAI client {e}") from e
1717

1818
try:
1919
response = client.chat.completions.create(

production_demo/src/services.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,21 +8,19 @@
88
from src.functions.function import example_function
99
from src.functions.generate import llm_generate
1010
from src.functions.evaluate import llm_evaluate
11-
1211
from src.workflows.workflow import ExampleWorkflow, ChildWorkflow
12+
1313
import webbrowser
1414

1515

1616
async def main():
17-
1817
await asyncio.gather(
1918
client.start_service(
2019
workflows=[ExampleWorkflow, ChildWorkflow],
2120
functions=[example_function],
2221
options=ServiceOptions(
2322
max_concurrent_workflow_runs=1000
2423
)
25-
2624
),
2725
client.start_service(
2826
task_queue="llm",
@@ -31,7 +29,7 @@ async def main():
3129
rate_limit=1,
3230
max_concurrent_function_runs=1
3331
)
34-
)
32+
),
3533
)
3634

3735
def run_services():
Lines changed: 29 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
from datetime import timedelta
22
from pydantic import BaseModel, Field
3-
from restack_ai.workflow import workflow, import_functions, log
3+
from restack_ai.workflow import workflow, import_functions, log, NonRetryableError, RetryPolicy
44

55
with import_functions():
66
from src.functions.function import example_function, ExampleFunctionInput
@@ -14,28 +14,34 @@ class ChildWorkflowInput(BaseModel):
1414
class ChildWorkflow:
1515
@workflow.run
1616
async def run(self, input: ChildWorkflowInput):
17+
1718
log.info("ChildWorkflow started")
18-
await workflow.step(example_function, input=ExampleFunctionInput(name='John Doe'), start_to_close_timeout=timedelta(minutes=2))
19-
20-
await workflow.sleep(1)
21-
22-
generated_text = await workflow.step(
23-
function=llm_generate,
24-
function_input=GenerateInput(prompt=input.prompt),
25-
task_queue="llm",
26-
start_to_close_timeout=timedelta(minutes=2)
27-
)
28-
29-
evaluation = await workflow.step(
30-
function=llm_evaluate,
31-
function_input=EvaluateInput(generated_text=generated_text),
32-
task_queue="llm",
33-
start_to_close_timeout=timedelta(minutes=5)
34-
)
35-
36-
return {
37-
"generated_text": generated_text,
38-
"evaluation": evaluation
39-
}
19+
20+
try:
21+
await workflow.step(function=example_function, function_input=ExampleFunctionInput(name='John Doe'), start_to_close_timeout=timedelta(minutes=2), retry_policy=RetryPolicy(maximum_attempts=3))
22+
23+
await workflow.sleep(1)
24+
25+
generated_text = await workflow.step(
26+
function=llm_generate,
27+
function_input=GenerateInput(prompt=input.prompt),
28+
task_queue="llm",
29+
start_to_close_timeout=timedelta(minutes=2)
30+
)
31+
32+
evaluation = await workflow.step(
33+
function=llm_evaluate,
34+
function_input=EvaluateInput(generated_text=generated_text),
35+
task_queue="llm",
36+
start_to_close_timeout=timedelta(minutes=5)
37+
)
38+
39+
return {
40+
"generated_text": generated_text,
41+
"evaluation": evaluation
42+
}
43+
except Exception as e:
44+
log.error(f"ChildWorkflow failed {e}")
45+
raise NonRetryableError(message=f"ChildWorkflow failed {e}") from e
4046

4147

Lines changed: 34 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import asyncio
22
from datetime import timedelta
33
from pydantic import BaseModel, Field
4-
from restack_ai.workflow import workflow, log, workflow_info, import_functions
4+
from restack_ai.workflow import workflow, log, workflow_info, import_functions, NonRetryableError
55
from .child import ChildWorkflow, ChildWorkflowInput
66

77
with import_functions():
@@ -14,34 +14,39 @@ class ExampleWorkflowInput(BaseModel):
1414
class ExampleWorkflow:
1515
@workflow.run
1616
async def run(self, input: ExampleWorkflowInput):
17-
# use the parent run id to create child workflow ids
18-
parent_workflow_id = workflow_info().workflow_id
19-
20-
tasks = []
21-
for i in range(input.amount):
22-
log.info(f"Queue ChildWorkflow {i+1} for execution")
23-
task = workflow.child_execute(
24-
workflow=ChildWorkflow,
25-
workflow_id=f"{parent_workflow_id}-child-execute-{i+1}",
26-
input=ChildWorkflowInput(name=f"child workflow {i+1}")
17+
18+
try:
19+
# use the parent run id to create child workflow ids
20+
parent_workflow_id = workflow_info().workflow_id
21+
22+
tasks = []
23+
for i in range(input.amount):
24+
log.info(f"Queue ChildWorkflow {i+1} for execution")
25+
task = workflow.child_execute(
26+
workflow=ChildWorkflow,
27+
workflow_id=f"{parent_workflow_id}-child-execute-{i+1}",
28+
workflow_input=ChildWorkflowInput(prompt="Generate a random joke in max 20 words."),
29+
)
30+
tasks.append(task)
31+
32+
# Run all child workflows in parallel and wait for their results
33+
results = await asyncio.gather(*tasks)
34+
35+
for i, result in enumerate(results, start=1):
36+
log.info(f"ChildWorkflow {i} completed", result=result)
37+
38+
generated_text = await workflow.step(
39+
function=llm_generate,
40+
function_input=GenerateInput(prompt=f"Give me the top 3 unique jokes according to the results. {results}"),
41+
task_queue="llm",
42+
start_to_close_timeout=timedelta(minutes=2)
2743
)
28-
tasks.append(task)
2944

30-
# Run all child workflows in parallel and wait for their results
31-
results = await asyncio.gather(*tasks)
32-
33-
for i, result in enumerate(results, start=1):
34-
log.info(f"ChildWorkflow {i} completed", result=result)
35-
36-
generated_text = await workflow.step(
37-
function=llm_generate,
38-
function_input=GenerateInput(prompt=f"Give me the top 3 unique jokes according to the results. {results}"),
39-
task_queue="llm",
40-
start_to_close_timeout=timedelta(minutes=2)
41-
)
42-
43-
return {
44-
"top_jokes": generated_text,
45-
"results": results
46-
}
45+
return {
46+
"top_jokes": generated_text,
47+
"results": results
48+
}
4749

50+
except Exception as e:
51+
log.error(f"ExampleWorkflow failed {e}")
52+
raise NonRetryableError(message=f"ExampleWorkflow failed {e}") from e

0 commit comments

Comments
 (0)