diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 75629f2..64c8364 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -2,4 +2,4 @@ # These owners will be the default owners for everything in # the repo. Unless a later match takes precedence, -* @yanxi0830 +* @yanxi0830 \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3b286e5..eb09427 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,18 +1,23 @@ name: CI on: push: - branches: - - main + branches-ignore: + - 'generated' + - 'codegen/**' + - 'integrated/**' + - 'stl-preview-head/**' + - 'stl-preview-base/**' pull_request: - branches: - - main - - next + branches-ignore: + - 'stl-preview-head/**' + - 'stl-preview-base/**' jobs: lint: + timeout-minutes: 10 name: lint - runs-on: ubuntu-latest - + runs-on: ${{ github.repository == 'stainless-sdks/llama-api-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork steps: - uses: actions/checkout@v4 @@ -30,10 +35,49 @@ jobs: - name: Run lints run: ./scripts/lint + build: + if: github.repository == 'stainless-sdks/llama-api-python' && (github.event_name == 'push' || github.event.pull_request.head.repo.fork) + timeout-minutes: 10 + name: build + permissions: + contents: read + id-token: write + runs-on: depot-ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + + - name: Install Rye + run: | + curl -sSf https://rye.astral.sh/get | bash + echo "$HOME/.rye/shims" >> $GITHUB_PATH + env: + RYE_VERSION: '0.44.0' + RYE_INSTALL_OPTION: '--yes' + + - name: Install dependencies + run: rye sync --all-features + + - name: Run build + run: rye build + + - name: Get GitHub OIDC Token + id: github-oidc + uses: actions/github-script@v6 + with: + script: core.setOutput('github_token', await core.getIDToken()); + + - name: Upload tarball + env: + URL: https://pkg.stainless.com/s + AUTH: ${{ steps.github-oidc.outputs.github_token }} + SHA: ${{ github.sha }} + run: ./scripts/utils/upload-artifact.sh + test: + timeout-minutes: 10 name: test - runs-on: ubuntu-latest - + runs-on: ${{ github.repository == 'stainless-sdks/llama-api-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml new file mode 100644 index 0000000..24fe5b9 --- /dev/null +++ b/.github/workflows/publish-pypi.yml @@ -0,0 +1,31 @@ +# This workflow is triggered when a GitHub release is created. +# It can also be run manually to re-publish to PyPI in case it failed for some reason. +# You can run this workflow by navigating to https://www.github.com/meta-llama/llama-api-python/actions/workflows/publish-pypi.yml +name: Publish PyPI +on: + workflow_dispatch: + + release: + types: [published] + +jobs: + publish: + name: publish + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Install Rye + run: | + curl -sSf https://rye.astral.sh/get | bash + echo "$HOME/.rye/shims" >> $GITHUB_PATH + env: + RYE_VERSION: '0.44.0' + RYE_INSTALL_OPTION: '--yes' + + - name: Publish to PyPI + run: | + bash ./bin/publish-pypi + env: + PYPI_TOKEN: ${{ secrets.LLAMA_API_CLIENT_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml new file mode 100644 index 0000000..0de5113 --- /dev/null +++ b/.github/workflows/release-doctor.yml @@ -0,0 +1,21 @@ +name: Release Doctor +on: + pull_request: + branches: + - main + workflow_dispatch: + +jobs: + release_doctor: + name: release doctor + runs-on: ubuntu-latest + if: github.repository == 'meta-llama/llama-api-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') + + steps: + - uses: actions/checkout@v4 + + - name: Check release environment + run: | + bash ./bin/check-release-environment + env: + PYPI_TOKEN: ${{ secrets.LLAMA_API_CLIENT_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.release-please-manifest.json b/.release-please-manifest.json new file mode 100644 index 0000000..10f3091 --- /dev/null +++ b/.release-please-manifest.json @@ -0,0 +1,3 @@ +{ + ".": "0.2.0" +} \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index b2c07e8..66293d0 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 4 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/meta%2Fllama-api-bfa0267b010dcc4b39e62dfbd698ac6f9421f3212c44b3408b9b154bd6c67a8b.yml openapi_spec_hash: 7f424537bc7ea7638e3934ef721b8d71 -config_hash: 3ae62c8625d97ed8a867ab369f591724 +config_hash: d121aca03b5b9ad503ffce2b0860b0d6 diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..f9c7e8b --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,31 @@ +# Changelog + +## 0.2.0 (2025-07-15) + +Full Changelog: [v0.1.2...v0.2.0](https://github.com/meta-llama/llama-api-python/compare/v0.1.2...v0.2.0) + +### Features + +* clean up environment call outs ([4afbd01](https://github.com/meta-llama/llama-api-python/commit/4afbd01ed735b93d8b4c8c282881f2b78673995c)) + + +### Bug Fixes + +* **client:** don't send Content-Type header on GET requests ([efec88a](https://github.com/meta-llama/llama-api-python/commit/efec88aa519948ea58ee629507cd91e9af90c1c8)) +* **parsing:** correctly handle nested discriminated unions ([b627686](https://github.com/meta-llama/llama-api-python/commit/b6276863bea64a7127cdb71b6fbb02534d2e762b)) + + +### Chores + +* add examples ([abfa065](https://github.com/meta-llama/llama-api-python/commit/abfa06572191caeaa33603c846d5953aa453521e)) +* **internal:** bump pinned h11 dep ([d40e1b1](https://github.com/meta-llama/llama-api-python/commit/d40e1b1d736ec5e5fe7e3c65ace9c5d65d038081)) +* **package:** mark python 3.13 as supported ([ef5bc36](https://github.com/meta-llama/llama-api-python/commit/ef5bc36693fa419e3d865e97cae97e7f5df19b1a)) +* **readme:** fix version rendering on pypi ([786f9fb](https://github.com/meta-llama/llama-api-python/commit/786f9fbdb75e54ceac9eaf00d4c4d7002ed97a94)) +* sync repo ([7e697f6](https://github.com/meta-llama/llama-api-python/commit/7e697f6550485728ee00d4fd18800a90fb3592ab)) +* update SDK settings ([de22c0e](https://github.com/meta-llama/llama-api-python/commit/de22c0ece778c938f75e4717baf3e628c7a45087)) + + +### Documentation + +* code of conduct ([efe1af2](https://github.com/meta-llama/llama-api-python/commit/efe1af28fb893fa657394504dc8c513b20ac589a)) +* readme and license ([d53eafd](https://github.com/meta-llama/llama-api-python/commit/d53eafd104749e9483015676fba150091e754928)) diff --git a/README.md b/README.md index c998965..01651c9 100644 --- a/README.md +++ b/README.md @@ -1,15 +1,17 @@ # Llama API Client Python API library -[![PyPI version](https://img.shields.io/pypi/v/llama_api_client.svg)](https://pypi.org/project/llama_api_client/) + +[![PyPI version](https://img.shields.io/pypi/v/llama_api_client.svg?label=pypi%20(stable))](https://pypi.org/project/llama_api_client/) The Llama API Client Python library provides convenient access to the Llama API Client REST API from any Python 3.8+ application. The library includes type definitions for all request params and response fields, and offers both synchronous and asynchronous clients powered by [httpx](https://github.com/encode/httpx). +It is generated with [Stainless](https://www.stainless.com/). ## Documentation -The REST API documentation can be found on [https://llama.developer.meta.com/docs](https://llama.developer.meta.com/docs). The full API of this library can be found in [api.md](api.md). +The REST API documentation can be found on [llama.developer.meta.com](https://llama.developer.meta.com/docs). The full API of this library can be found in [api.md](api.md). ## Installation @@ -78,6 +80,45 @@ asyncio.run(main()) Functionality between the synchronous and asynchronous clients is otherwise identical. +### With aiohttp + +By default, the async client uses `httpx` for HTTP requests. However, for improved concurrency performance you may also use `aiohttp` as the HTTP backend. + +You can enable this by installing `aiohttp`: + +```sh +# install from the production repo +pip install 'llama_api_client[aiohttp] @ git+ssh://git@github.com/meta-llama/llama-api-python.git' +``` + +Then you can enable it by instantiating the client with `http_client=DefaultAioHttpClient()`: + +```python +import asyncio +from llama_api_client import DefaultAioHttpClient +from llama_api_client import AsyncLlamaAPIClient + + +async def main() -> None: + async with AsyncLlamaAPIClient( + api_key="My API Key", + http_client=DefaultAioHttpClient(), + ) as client: + create_chat_completion_response = await client.chat.completions.create( + messages=[ + { + "content": "string", + "role": "user", + } + ], + model="model", + ) + print(create_chat_completion_response.completion_message) + + +asyncio.run(main()) +``` + ## Streaming responses We provide support for streaming responses using Server Side Events (SSE). @@ -212,7 +253,7 @@ client.with_options(max_retries=5).chat.completions.create( ### Timeouts By default requests time out after 1 minute. You can configure this with a `timeout` option, -which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/#fine-tuning-the-configuration) object: +which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/timeouts/#fine-tuning-the-configuration) object: ```python from llama_api_client import LlamaAPIClient @@ -288,7 +329,7 @@ response = client.chat.completions.with_raw_response.create( print(response.headers.get('X-My-Header')) completion = response.parse() # get the object that `chat.completions.create()` would have returned -print(completion.completion_message) +print(completion.id) ``` These methods return an [`APIResponse`](https://github.com/meta-llama/llama-api-python/tree/main/src/llama_api_client/_response.py) object. @@ -427,4 +468,4 @@ Python 3.8 or higher. See [the contributing documentation](./CONTRIBUTING.md). ## License -Llama API Python SDK is MIT licensed, as found in the LICENSE file. +Llama API Python SDK is MIT licensed, as found in the LICENSE file. \ No newline at end of file diff --git a/bin/check-release-environment b/bin/check-release-environment new file mode 100644 index 0000000..b845b0f --- /dev/null +++ b/bin/check-release-environment @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +errors=() + +if [ -z "${PYPI_TOKEN}" ]; then + errors+=("The PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.") +fi + +lenErrors=${#errors[@]} + +if [[ lenErrors -gt 0 ]]; then + echo -e "Found the following errors in the release environment:\n" + + for error in "${errors[@]}"; do + echo -e "- $error\n" + done + + exit 1 +fi + +echo "The environment is ready to push releases!" diff --git a/pyproject.toml b/pyproject.toml index c0a6bf2..f25ab95 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "llama_api_client" -version = "0.1.2" +version = "0.2.0" description = "The official Python library for the llama-api-client API" dynamic = ["readme"] license = "MIT" @@ -24,6 +24,7 @@ classifiers = [ "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: MacOS", @@ -38,7 +39,7 @@ Homepage = "https://github.com/meta-llama/llama-api-python" Repository = "https://github.com/meta-llama/llama-api-python" [project.optional-dependencies] -aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.6"] +aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.8"] [tool.rye] managed = true diff --git a/release-please-config.json b/release-please-config.json new file mode 100644 index 0000000..e5fde35 --- /dev/null +++ b/release-please-config.json @@ -0,0 +1,66 @@ +{ + "packages": { + ".": {} + }, + "$schema": "https://raw.githubusercontent.com/stainless-api/release-please/main/schemas/config.json", + "include-v-in-tag": true, + "include-component-in-tag": false, + "versioning": "prerelease", + "prerelease": true, + "bump-minor-pre-major": true, + "bump-patch-for-minor-pre-major": false, + "pull-request-header": "Automated Release PR", + "pull-request-title-pattern": "release: ${version}", + "changelog-sections": [ + { + "type": "feat", + "section": "Features" + }, + { + "type": "fix", + "section": "Bug Fixes" + }, + { + "type": "perf", + "section": "Performance Improvements" + }, + { + "type": "revert", + "section": "Reverts" + }, + { + "type": "chore", + "section": "Chores" + }, + { + "type": "docs", + "section": "Documentation" + }, + { + "type": "style", + "section": "Styles" + }, + { + "type": "refactor", + "section": "Refactors" + }, + { + "type": "test", + "section": "Tests", + "hidden": true + }, + { + "type": "build", + "section": "Build System" + }, + { + "type": "ci", + "section": "Continuous Integration", + "hidden": true + } + ], + "release-type": "python", + "extra-files": [ + "src/llama_api_client/_version.py" + ] +} \ No newline at end of file diff --git a/requirements-dev.lock b/requirements-dev.lock index 7d3944d..ab012fd 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -48,15 +48,15 @@ filelock==3.12.4 frozenlist==1.6.2 # via aiohttp # via aiosignal -h11==0.14.0 +h11==0.16.0 # via httpcore -httpcore==1.0.2 +httpcore==1.0.9 # via httpx httpx==0.28.1 # via httpx-aiohttp # via llama-api-client # via respx -httpx-aiohttp==0.1.6 +httpx-aiohttp==0.1.8 # via llama-api-client idna==3.4 # via anyio diff --git a/requirements.lock b/requirements.lock index 8dca115..78b24f2 100644 --- a/requirements.lock +++ b/requirements.lock @@ -36,14 +36,14 @@ exceptiongroup==1.2.2 frozenlist==1.6.2 # via aiohttp # via aiosignal -h11==0.14.0 +h11==0.16.0 # via httpcore -httpcore==1.0.2 +httpcore==1.0.9 # via httpx httpx==0.28.1 # via httpx-aiohttp # via llama-api-client -httpx-aiohttp==0.1.6 +httpx-aiohttp==0.1.8 # via llama-api-client idna==3.4 # via anyio diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh index 8079ae7..1bfcdc2 100755 --- a/scripts/utils/upload-artifact.sh +++ b/scripts/utils/upload-artifact.sh @@ -1,7 +1,9 @@ #!/usr/bin/env bash set -exuo pipefail -RESPONSE=$(curl -X POST "$URL" \ +FILENAME=$(basename dist/*.whl) + +RESPONSE=$(curl -X POST "$URL?filename=$FILENAME" \ -H "Authorization: Bearer $AUTH" \ -H "Content-Type: application/json") @@ -12,13 +14,13 @@ if [[ "$SIGNED_URL" == "null" ]]; then exit 1 fi -UPLOAD_RESPONSE=$(tar -cz . | curl -v -X PUT \ - -H "Content-Type: application/gzip" \ - --data-binary @- "$SIGNED_URL" 2>&1) +UPLOAD_RESPONSE=$(curl -v -X PUT \ + -H "Content-Type: binary/octet-stream" \ + --data-binary "@dist/$FILENAME" "$SIGNED_URL" 2>&1) if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then echo -e "\033[32mUploaded build to Stainless storage.\033[0m" - echo -e "\033[32mInstallation: pip install --pre 'https://pkg.stainless.com/s/llama-api-python/$SHA'\033[0m" + echo -e "\033[32mInstallation: pip install 'https://pkg.stainless.com/s/llama-api-python/$SHA/$FILENAME'\033[0m" else echo -e "\033[31mFailed to upload artifact.\033[0m" exit 1 diff --git a/src/llama_api_client/_base_client.py b/src/llama_api_client/_base_client.py index cc9ca21..2d0c0e6 100644 --- a/src/llama_api_client/_base_client.py +++ b/src/llama_api_client/_base_client.py @@ -529,6 +529,15 @@ def _build_request( # work around https://github.com/encode/httpx/discussions/2880 kwargs["extensions"] = {"sni_hostname": prepared_url.host.replace("_", "-")} + is_body_allowed = options.method.lower() != "get" + + if is_body_allowed: + kwargs["json"] = json_data if is_given(json_data) else None + kwargs["files"] = files + else: + headers.pop("Content-Type", None) + kwargs.pop("data", None) + # TODO: report this error to httpx return self._client.build_request( # pyright: ignore[reportUnknownMemberType] headers=headers, @@ -540,8 +549,6 @@ def _build_request( # so that passing a `TypedDict` doesn't cause an error. # https://github.com/microsoft/pyright/issues/3526#event-6715453066 params=self.qs.stringify(cast(Mapping[str, Any], params)) if params else None, - json=json_data if is_given(json_data) else None, - files=files, **kwargs, ) diff --git a/src/llama_api_client/_models.py b/src/llama_api_client/_models.py index 4f21498..528d568 100644 --- a/src/llama_api_client/_models.py +++ b/src/llama_api_client/_models.py @@ -2,9 +2,10 @@ import os import inspect -from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, cast +from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, Optional, cast from datetime import date, datetime from typing_extensions import ( + List, Unpack, Literal, ClassVar, @@ -366,7 +367,7 @@ def _construct_field(value: object, field: FieldInfo, key: str) -> object: if type_ is None: raise RuntimeError(f"Unexpected field type is None for {key}") - return construct_type(value=value, type_=type_) + return construct_type(value=value, type_=type_, metadata=getattr(field, "metadata", None)) def is_basemodel(type_: type) -> bool: @@ -420,7 +421,7 @@ def construct_type_unchecked(*, value: object, type_: type[_T]) -> _T: return cast(_T, construct_type(value=value, type_=type_)) -def construct_type(*, value: object, type_: object) -> object: +def construct_type(*, value: object, type_: object, metadata: Optional[List[Any]] = None) -> object: """Loose coercion to the expected type with construction of nested values. If the given value does not match the expected type then it is returned as-is. @@ -438,8 +439,10 @@ def construct_type(*, value: object, type_: object) -> object: type_ = type_.__value__ # type: ignore[unreachable] # unwrap `Annotated[T, ...]` -> `T` - if is_annotated_type(type_): - meta: tuple[Any, ...] = get_args(type_)[1:] + if metadata is not None: + meta: tuple[Any, ...] = tuple(metadata) + elif is_annotated_type(type_): + meta = get_args(type_)[1:] type_ = extract_type_arg(type_, 0) else: meta = tuple() diff --git a/src/llama_api_client/_version.py b/src/llama_api_client/_version.py index e73d415..37004fb 100644 --- a/src/llama_api_client/_version.py +++ b/src/llama_api_client/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "llama_api_client" -__version__ = "0.1.2" +__version__ = "0.2.0" # x-release-please-version diff --git a/src/llama_api/lib/.keep b/src/llama_api_client/lib/.keep similarity index 100% rename from src/llama_api/lib/.keep rename to src/llama_api_client/lib/.keep diff --git a/src/llama_api_client/resources/chat/completions.py b/src/llama_api_client/resources/chat/completions.py index 431e369..279b03c 100644 --- a/src/llama_api_client/resources/chat/completions.py +++ b/src/llama_api_client/resources/chat/completions.py @@ -298,8 +298,6 @@ def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> CreateChatCompletionResponse | Stream[CreateChatCompletionResponseStreamChunk]: - if stream: - extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})} return self._post( "/chat/completions", body=maybe_transform( @@ -601,8 +599,6 @@ async def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> CreateChatCompletionResponse | AsyncStream[CreateChatCompletionResponseStreamChunk]: - if stream: - extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})} return await self._post( "/chat/completions", body=await async_maybe_transform( diff --git a/tests/test_client.py b/tests/test_client.py index 7537a2b..528de81 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -196,6 +196,7 @@ def test_copy_signature(self) -> None: copy_param = copy_signature.parameters.get(name) assert copy_param is not None, f"copy() signature is missing the {name} param" + @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12") def test_copy_build_request(self) -> None: options = FinalRequestOptions(method="get", url="/foo") @@ -468,7 +469,7 @@ def test_request_extra_query(self) -> None: def test_multipart_repeating_array(self, client: LlamaAPIClient) -> None: request = client._build_request( FinalRequestOptions.construct( - method="get", + method="post", url="/foo", headers={"Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"}, json_data={"array": ["foo", "bar"]}, @@ -1048,6 +1049,7 @@ def test_copy_signature(self) -> None: copy_param = copy_signature.parameters.get(name) assert copy_param is not None, f"copy() signature is missing the {name} param" + @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12") def test_copy_build_request(self) -> None: options = FinalRequestOptions(method="get", url="/foo") @@ -1320,7 +1322,7 @@ def test_request_extra_query(self) -> None: def test_multipart_repeating_array(self, async_client: AsyncLlamaAPIClient) -> None: request = async_client._build_request( FinalRequestOptions.construct( - method="get", + method="post", url="/foo", headers={"Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"}, json_data={"array": ["foo", "bar"]}, diff --git a/tests/test_models.py b/tests/test_models.py index 6300be0..64bac16 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -889,3 +889,48 @@ class ModelB(BaseModel): ) assert isinstance(m, ModelB) + + +def test_nested_discriminated_union() -> None: + class InnerType1(BaseModel): + type: Literal["type_1"] + + class InnerModel(BaseModel): + inner_value: str + + class InnerType2(BaseModel): + type: Literal["type_2"] + some_inner_model: InnerModel + + class Type1(BaseModel): + base_type: Literal["base_type_1"] + value: Annotated[ + Union[ + InnerType1, + InnerType2, + ], + PropertyInfo(discriminator="type"), + ] + + class Type2(BaseModel): + base_type: Literal["base_type_2"] + + T = Annotated[ + Union[ + Type1, + Type2, + ], + PropertyInfo(discriminator="base_type"), + ] + + model = construct_type( + type_=T, + value={ + "base_type": "base_type_1", + "value": { + "type": "type_2", + }, + }, + ) + assert isinstance(model, Type1) + assert isinstance(model.value, InnerType2)