From 48f4cca7563d7e658824e40893068ae155ef75d4 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 13 Jan 2026 18:34:15 +0000
Subject: [PATCH 01/23] feat(client): add support for binary request streaming
---
src/dedalus_labs/_base_client.py | 145 ++++++++++++++++++++++--
src/dedalus_labs/_models.py | 17 ++-
src/dedalus_labs/_types.py | 9 ++
tests/test_client.py | 187 ++++++++++++++++++++++++++++++-
4 files changed, 344 insertions(+), 14 deletions(-)
diff --git a/src/dedalus_labs/_base_client.py b/src/dedalus_labs/_base_client.py
index c8d001a..42f1a46 100644
--- a/src/dedalus_labs/_base_client.py
+++ b/src/dedalus_labs/_base_client.py
@@ -9,6 +9,7 @@
import inspect
import logging
import platform
+import warnings
import email.utils
from types import TracebackType
from random import random
@@ -51,9 +52,11 @@
ResponseT,
AnyMapping,
PostParser,
+ BinaryTypes,
RequestFiles,
HttpxSendArgs,
RequestOptions,
+ AsyncBinaryTypes,
HttpxRequestFiles,
ModelBuilderProtocol,
not_given,
@@ -477,8 +480,19 @@ def _build_request(
retries_taken: int = 0,
) -> httpx.Request:
if log.isEnabledFor(logging.DEBUG):
- log.debug("Request options: %s", model_dump(options, exclude_unset=True))
-
+ log.debug(
+ "Request options: %s",
+ model_dump(
+ options,
+ exclude_unset=True,
+ # Pydantic v1 can't dump every type we support in content, so we exclude it for now.
+ exclude={
+ "content",
+ }
+ if PYDANTIC_V1
+ else {},
+ ),
+ )
kwargs: dict[str, Any] = {}
json_data = options.json_data
@@ -532,7 +546,13 @@ def _build_request(
is_body_allowed = options.method.lower() != "get"
if is_body_allowed:
- if isinstance(json_data, bytes):
+ if options.content is not None and json_data is not None:
+ raise TypeError("Passing both `content` and `json_data` is not supported")
+ if options.content is not None and files is not None:
+ raise TypeError("Passing both `content` and `files` is not supported")
+ if options.content is not None:
+ kwargs["content"] = options.content
+ elif isinstance(json_data, bytes):
kwargs["content"] = json_data
else:
kwargs["json"] = json_data if is_given(json_data) else None
@@ -1194,6 +1214,7 @@ def post(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: BinaryTypes | None = None,
options: RequestOptions = {},
files: RequestFiles | None = None,
stream: Literal[False] = False,
@@ -1206,6 +1227,7 @@ def post(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: BinaryTypes | None = None,
options: RequestOptions = {},
files: RequestFiles | None = None,
stream: Literal[True],
@@ -1219,6 +1241,7 @@ def post(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: BinaryTypes | None = None,
options: RequestOptions = {},
files: RequestFiles | None = None,
stream: bool,
@@ -1231,13 +1254,25 @@ def post(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: BinaryTypes | None = None,
options: RequestOptions = {},
files: RequestFiles | None = None,
stream: bool = False,
stream_cls: type[_StreamT] | None = None,
) -> ResponseT | _StreamT:
+ if body is not None and content is not None:
+ raise TypeError("Passing both `body` and `content` is not supported")
+ if files is not None and content is not None:
+ raise TypeError("Passing both `files` and `content` is not supported")
+ if isinstance(body, bytes):
+ warnings.warn(
+ "Passing raw bytes as `body` is deprecated and will be removed in a future version. "
+ "Please pass raw bytes via the `content` parameter instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
opts = FinalRequestOptions.construct(
- method="post", url=path, json_data=body, files=to_httpx_files(files), **options
+ method="post", url=path, json_data=body, content=content, files=to_httpx_files(files), **options
)
return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
@@ -1247,11 +1282,23 @@ def patch(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: BinaryTypes | None = None,
files: RequestFiles | None = None,
options: RequestOptions = {},
) -> ResponseT:
+ if body is not None and content is not None:
+ raise TypeError("Passing both `body` and `content` is not supported")
+ if files is not None and content is not None:
+ raise TypeError("Passing both `files` and `content` is not supported")
+ if isinstance(body, bytes):
+ warnings.warn(
+ "Passing raw bytes as `body` is deprecated and will be removed in a future version. "
+ "Please pass raw bytes via the `content` parameter instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
opts = FinalRequestOptions.construct(
- method="patch", url=path, json_data=body, files=to_httpx_files(files), **options
+ method="patch", url=path, json_data=body, content=content, files=to_httpx_files(files), **options
)
return self.request(cast_to, opts)
@@ -1261,11 +1308,23 @@ def put(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: BinaryTypes | None = None,
files: RequestFiles | None = None,
options: RequestOptions = {},
) -> ResponseT:
+ if body is not None and content is not None:
+ raise TypeError("Passing both `body` and `content` is not supported")
+ if files is not None and content is not None:
+ raise TypeError("Passing both `files` and `content` is not supported")
+ if isinstance(body, bytes):
+ warnings.warn(
+ "Passing raw bytes as `body` is deprecated and will be removed in a future version. "
+ "Please pass raw bytes via the `content` parameter instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
opts = FinalRequestOptions.construct(
- method="put", url=path, json_data=body, files=to_httpx_files(files), **options
+ method="put", url=path, json_data=body, content=content, files=to_httpx_files(files), **options
)
return self.request(cast_to, opts)
@@ -1275,9 +1334,19 @@ def delete(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: BinaryTypes | None = None,
options: RequestOptions = {},
) -> ResponseT:
- opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, **options)
+ if body is not None and content is not None:
+ raise TypeError("Passing both `body` and `content` is not supported")
+ if isinstance(body, bytes):
+ warnings.warn(
+ "Passing raw bytes as `body` is deprecated and will be removed in a future version. "
+ "Please pass raw bytes via the `content` parameter instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, content=content, **options)
return self.request(cast_to, opts)
def get_api_list(
@@ -1717,6 +1786,7 @@ async def post(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: AsyncBinaryTypes | None = None,
files: RequestFiles | None = None,
options: RequestOptions = {},
stream: Literal[False] = False,
@@ -1729,6 +1799,7 @@ async def post(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: AsyncBinaryTypes | None = None,
files: RequestFiles | None = None,
options: RequestOptions = {},
stream: Literal[True],
@@ -1742,6 +1813,7 @@ async def post(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: AsyncBinaryTypes | None = None,
files: RequestFiles | None = None,
options: RequestOptions = {},
stream: bool,
@@ -1754,13 +1826,25 @@ async def post(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: AsyncBinaryTypes | None = None,
files: RequestFiles | None = None,
options: RequestOptions = {},
stream: bool = False,
stream_cls: type[_AsyncStreamT] | None = None,
) -> ResponseT | _AsyncStreamT:
+ if body is not None and content is not None:
+ raise TypeError("Passing both `body` and `content` is not supported")
+ if files is not None and content is not None:
+ raise TypeError("Passing both `files` and `content` is not supported")
+ if isinstance(body, bytes):
+ warnings.warn(
+ "Passing raw bytes as `body` is deprecated and will be removed in a future version. "
+ "Please pass raw bytes via the `content` parameter instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
opts = FinalRequestOptions.construct(
- method="post", url=path, json_data=body, files=await async_to_httpx_files(files), **options
+ method="post", url=path, json_data=body, content=content, files=await async_to_httpx_files(files), **options
)
return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)
@@ -1770,11 +1854,28 @@ async def patch(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: AsyncBinaryTypes | None = None,
files: RequestFiles | None = None,
options: RequestOptions = {},
) -> ResponseT:
+ if body is not None and content is not None:
+ raise TypeError("Passing both `body` and `content` is not supported")
+ if files is not None and content is not None:
+ raise TypeError("Passing both `files` and `content` is not supported")
+ if isinstance(body, bytes):
+ warnings.warn(
+ "Passing raw bytes as `body` is deprecated and will be removed in a future version. "
+ "Please pass raw bytes via the `content` parameter instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
opts = FinalRequestOptions.construct(
- method="patch", url=path, json_data=body, files=await async_to_httpx_files(files), **options
+ method="patch",
+ url=path,
+ json_data=body,
+ content=content,
+ files=await async_to_httpx_files(files),
+ **options,
)
return await self.request(cast_to, opts)
@@ -1784,11 +1885,23 @@ async def put(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: AsyncBinaryTypes | None = None,
files: RequestFiles | None = None,
options: RequestOptions = {},
) -> ResponseT:
+ if body is not None and content is not None:
+ raise TypeError("Passing both `body` and `content` is not supported")
+ if files is not None and content is not None:
+ raise TypeError("Passing both `files` and `content` is not supported")
+ if isinstance(body, bytes):
+ warnings.warn(
+ "Passing raw bytes as `body` is deprecated and will be removed in a future version. "
+ "Please pass raw bytes via the `content` parameter instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
opts = FinalRequestOptions.construct(
- method="put", url=path, json_data=body, files=await async_to_httpx_files(files), **options
+ method="put", url=path, json_data=body, content=content, files=await async_to_httpx_files(files), **options
)
return await self.request(cast_to, opts)
@@ -1798,9 +1911,19 @@ async def delete(
*,
cast_to: Type[ResponseT],
body: Body | None = None,
+ content: AsyncBinaryTypes | None = None,
options: RequestOptions = {},
) -> ResponseT:
- opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, **options)
+ if body is not None and content is not None:
+ raise TypeError("Passing both `body` and `content` is not supported")
+ if isinstance(body, bytes):
+ warnings.warn(
+ "Passing raw bytes as `body` is deprecated and will be removed in a future version. "
+ "Please pass raw bytes via the `content` parameter instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, content=content, **options)
return await self.request(cast_to, opts)
def get_api_list(
diff --git a/src/dedalus_labs/_models.py b/src/dedalus_labs/_models.py
index 75cad7f..e7642c8 100644
--- a/src/dedalus_labs/_models.py
+++ b/src/dedalus_labs/_models.py
@@ -3,7 +3,20 @@
import os
import inspect
import weakref
-from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, Optional, cast
+from typing import (
+ IO,
+ TYPE_CHECKING,
+ Any,
+ Type,
+ Union,
+ Generic,
+ TypeVar,
+ Callable,
+ Iterable,
+ Optional,
+ AsyncIterable,
+ cast,
+)
from datetime import date, datetime
from typing_extensions import (
List,
@@ -788,6 +801,7 @@ class FinalRequestOptionsInput(TypedDict, total=False):
timeout: float | Timeout | None
files: HttpxRequestFiles | None
idempotency_key: str
+ content: Union[bytes, bytearray, IO[bytes], Iterable[bytes], AsyncIterable[bytes], None]
json_data: Body
extra_json: AnyMapping
follow_redirects: bool
@@ -806,6 +820,7 @@ class FinalRequestOptions(pydantic.BaseModel):
post_parser: Union[Callable[[Any], Any], NotGiven] = NotGiven()
follow_redirects: Union[bool, None] = None
+ content: Union[bytes, bytearray, IO[bytes], Iterable[bytes], AsyncIterable[bytes], None] = None
# It should be noted that we cannot use `json` here as that would override
# a BaseModel method in an incompatible fashion.
json_data: Union[Body, None] = None
diff --git a/src/dedalus_labs/_types.py b/src/dedalus_labs/_types.py
index c7221e6..f9b4340 100644
--- a/src/dedalus_labs/_types.py
+++ b/src/dedalus_labs/_types.py
@@ -13,9 +13,11 @@
Mapping,
TypeVar,
Callable,
+ Iterable,
Iterator,
Optional,
Sequence,
+ AsyncIterable,
)
from typing_extensions import (
Set,
@@ -56,6 +58,13 @@
else:
Base64FileInput = Union[IO[bytes], PathLike]
FileContent = Union[IO[bytes], bytes, PathLike] # PathLike is not subscriptable in Python 3.8.
+
+
+# Used for sending raw binary data / streaming data in request bodies
+# e.g. for file uploads without multipart encoding
+BinaryTypes = Union[bytes, bytearray, IO[bytes], Iterable[bytes]]
+AsyncBinaryTypes = Union[bytes, bytearray, IO[bytes], AsyncIterable[bytes]]
+
FileTypes = Union[
# file (or bytes)
FileContent,
diff --git a/tests/test_client.py b/tests/test_client.py
index 7939a7d..dace038 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -8,10 +8,11 @@
import json
import asyncio
import inspect
+import dataclasses
import tracemalloc
-from typing import Any, Union, cast
+from typing import Any, Union, TypeVar, Callable, Iterable, Iterator, Optional, Coroutine, cast
from unittest import mock
-from typing_extensions import Literal
+from typing_extensions import Literal, AsyncIterator, override
import httpx
import pytest
@@ -37,6 +38,7 @@
from .utils import update_env
+T = TypeVar("T")
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
api_key = "My API Key"
@@ -51,6 +53,57 @@ def _low_retry_timeout(*_args: Any, **_kwargs: Any) -> float:
return 0.1
+def mirror_request_content(request: httpx.Request) -> httpx.Response:
+ return httpx.Response(200, content=request.content)
+
+
+# note: we can't use the httpx.MockTransport class as it consumes the request
+# body itself, which means we can't test that the body is read lazily
+class MockTransport(httpx.BaseTransport, httpx.AsyncBaseTransport):
+ def __init__(
+ self,
+ handler: Callable[[httpx.Request], httpx.Response]
+ | Callable[[httpx.Request], Coroutine[Any, Any, httpx.Response]],
+ ) -> None:
+ self.handler = handler
+
+ @override
+ def handle_request(
+ self,
+ request: httpx.Request,
+ ) -> httpx.Response:
+ assert not inspect.iscoroutinefunction(self.handler), "handler must not be a coroutine function"
+ assert inspect.isfunction(self.handler), "handler must be a function"
+ return self.handler(request)
+
+ @override
+ async def handle_async_request(
+ self,
+ request: httpx.Request,
+ ) -> httpx.Response:
+ assert inspect.iscoroutinefunction(self.handler), "handler must be a coroutine function"
+ return await self.handler(request)
+
+
+@dataclasses.dataclass
+class Counter:
+ value: int = 0
+
+
+def _make_sync_iterator(iterable: Iterable[T], counter: Optional[Counter] = None) -> Iterator[T]:
+ for item in iterable:
+ if counter:
+ counter.value += 1
+ yield item
+
+
+async def _make_async_iterator(iterable: Iterable[T], counter: Optional[Counter] = None) -> AsyncIterator[T]:
+ for item in iterable:
+ if counter:
+ counter.value += 1
+ yield item
+
+
def _get_open_connections(client: Dedalus | AsyncDedalus) -> int:
transport = client._client._transport
assert isinstance(transport, httpx.HTTPTransport) or isinstance(transport, httpx.AsyncHTTPTransport)
@@ -510,6 +563,70 @@ def test_multipart_repeating_array(self, client: Dedalus) -> None:
b"",
]
+ @pytest.mark.respx(base_url=base_url)
+ def test_binary_content_upload(self, respx_mock: MockRouter, client: Dedalus) -> None:
+ respx_mock.post("/upload").mock(side_effect=mirror_request_content)
+
+ file_content = b"Hello, this is a test file."
+
+ response = client.post(
+ "/upload",
+ content=file_content,
+ cast_to=httpx.Response,
+ options={"headers": {"Content-Type": "application/octet-stream"}},
+ )
+
+ assert response.status_code == 200
+ assert response.request.headers["Content-Type"] == "application/octet-stream"
+ assert response.content == file_content
+
+ def test_binary_content_upload_with_iterator(self) -> None:
+ file_content = b"Hello, this is a test file."
+ counter = Counter()
+ iterator = _make_sync_iterator([file_content], counter=counter)
+
+ def mock_handler(request: httpx.Request) -> httpx.Response:
+ assert counter.value == 0, "the request body should not have been read"
+ return httpx.Response(200, content=request.read())
+
+ with Dedalus(
+ base_url=base_url,
+ api_key=api_key,
+ _strict_response_validation=True,
+ http_client=httpx.Client(transport=MockTransport(handler=mock_handler)),
+ ) as client:
+ response = client.post(
+ "/upload",
+ content=iterator,
+ cast_to=httpx.Response,
+ options={"headers": {"Content-Type": "application/octet-stream"}},
+ )
+
+ assert response.status_code == 200
+ assert response.request.headers["Content-Type"] == "application/octet-stream"
+ assert response.content == file_content
+ assert counter.value == 1
+
+ @pytest.mark.respx(base_url=base_url)
+ def test_binary_content_upload_with_body_is_deprecated(self, respx_mock: MockRouter, client: Dedalus) -> None:
+ respx_mock.post("/upload").mock(side_effect=mirror_request_content)
+
+ file_content = b"Hello, this is a test file."
+
+ with pytest.deprecated_call(
+ match="Passing raw bytes as `body` is deprecated and will be removed in a future version. Please pass raw bytes via the `content` parameter instead."
+ ):
+ response = client.post(
+ "/upload",
+ body=file_content,
+ cast_to=httpx.Response,
+ options={"headers": {"Content-Type": "application/octet-stream"}},
+ )
+
+ assert response.status_code == 200
+ assert response.request.headers["Content-Type"] == "application/octet-stream"
+ assert response.content == file_content
+
@pytest.mark.respx(base_url=base_url)
def test_basic_union_response(self, respx_mock: MockRouter, client: Dedalus) -> None:
class Model1(BaseModel):
@@ -1394,6 +1511,72 @@ def test_multipart_repeating_array(self, async_client: AsyncDedalus) -> None:
b"",
]
+ @pytest.mark.respx(base_url=base_url)
+ async def test_binary_content_upload(self, respx_mock: MockRouter, async_client: AsyncDedalus) -> None:
+ respx_mock.post("/upload").mock(side_effect=mirror_request_content)
+
+ file_content = b"Hello, this is a test file."
+
+ response = await async_client.post(
+ "/upload",
+ content=file_content,
+ cast_to=httpx.Response,
+ options={"headers": {"Content-Type": "application/octet-stream"}},
+ )
+
+ assert response.status_code == 200
+ assert response.request.headers["Content-Type"] == "application/octet-stream"
+ assert response.content == file_content
+
+ async def test_binary_content_upload_with_asynciterator(self) -> None:
+ file_content = b"Hello, this is a test file."
+ counter = Counter()
+ iterator = _make_async_iterator([file_content], counter=counter)
+
+ async def mock_handler(request: httpx.Request) -> httpx.Response:
+ assert counter.value == 0, "the request body should not have been read"
+ return httpx.Response(200, content=await request.aread())
+
+ async with AsyncDedalus(
+ base_url=base_url,
+ api_key=api_key,
+ _strict_response_validation=True,
+ http_client=httpx.AsyncClient(transport=MockTransport(handler=mock_handler)),
+ ) as client:
+ response = await client.post(
+ "/upload",
+ content=iterator,
+ cast_to=httpx.Response,
+ options={"headers": {"Content-Type": "application/octet-stream"}},
+ )
+
+ assert response.status_code == 200
+ assert response.request.headers["Content-Type"] == "application/octet-stream"
+ assert response.content == file_content
+ assert counter.value == 1
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_binary_content_upload_with_body_is_deprecated(
+ self, respx_mock: MockRouter, async_client: AsyncDedalus
+ ) -> None:
+ respx_mock.post("/upload").mock(side_effect=mirror_request_content)
+
+ file_content = b"Hello, this is a test file."
+
+ with pytest.deprecated_call(
+ match="Passing raw bytes as `body` is deprecated and will be removed in a future version. Please pass raw bytes via the `content` parameter instead."
+ ):
+ response = await async_client.post(
+ "/upload",
+ body=file_content,
+ cast_to=httpx.Response,
+ options={"headers": {"Content-Type": "application/octet-stream"}},
+ )
+
+ assert response.status_code == 200
+ assert response.request.headers["Content-Type"] == "application/octet-stream"
+ assert response.content == file_content
+
@pytest.mark.respx(base_url=base_url)
async def test_basic_union_response(self, respx_mock: MockRouter, async_client: AsyncDedalus) -> None:
class Model1(BaseModel):
From c72dfca95456904ce446548768b1262387686467 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 16 Jan 2026 18:31:26 +0000
Subject: [PATCH 02/23] chore(internal): update `actions/checkout` version
---
.github/workflows/ci.yml | 6 +++---
.github/workflows/publish-pypi.yml | 2 +-
.github/workflows/release-doctor.yml | 2 +-
3 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 169eadd..2313c1a 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -19,7 +19,7 @@ jobs:
runs-on: ${{ github.repository == 'stainless-sdks/dedalus-sdk-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
- name: Install uv
uses: astral-sh/setup-uv@v5
@@ -41,7 +41,7 @@ jobs:
id-token: write
runs-on: ${{ github.repository == 'stainless-sdks/dedalus-sdk-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
- name: Install uv
uses: astral-sh/setup-uv@v5
@@ -75,7 +75,7 @@ jobs:
runs-on: ${{ github.repository == 'stainless-sdks/dedalus-sdk-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
- name: Install uv
uses: astral-sh/setup-uv@v5
diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml
index 0523779..c3cd036 100644
--- a/.github/workflows/publish-pypi.yml
+++ b/.github/workflows/publish-pypi.yml
@@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
- name: Install uv
uses: astral-sh/setup-uv@v5
diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml
index 6e22ada..86f7f56 100644
--- a/.github/workflows/release-doctor.yml
+++ b/.github/workflows/release-doctor.yml
@@ -13,7 +13,7 @@ jobs:
if: github.repository == 'dedalus-labs/dedalus-sdk-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next')
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
- name: Check release environment
run: |
From 38c637af8275faec50a08cd9c7cd7ebd5f47e78d Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Thu, 22 Jan 2026 03:58:37 +0000
Subject: [PATCH 03/23] fix(api): default auth server
---
.stats.yml | 8 +-
README.md | 2 +-
api.md | 12 ++
src/dedalus_labs/_client.py | 43 +++-
src/dedalus_labs/_constants.py | 6 +-
src/dedalus_labs/resources/__init__.py | 14 ++
src/dedalus_labs/resources/ocr.py | 200 ++++++++++++++++++
src/dedalus_labs/types/__init__.py | 8 +-
.../chat/chat_completion_functions_param.py | 6 +-
.../types/chat/chat_completion_tool_param.py | 7 +-
.../types/chat/completion_create_params.py | 12 +-
src/dedalus_labs/types/ocr_document_param.py | 16 ++
src/dedalus_labs/types/ocr_page.py | 13 ++
src/dedalus_labs/types/ocr_process_params.py | 16 ++
src/dedalus_labs/types/ocr_response.py | 18 ++
.../types/shared/function_definition.py | 9 +-
.../shared/response_format_json_schema.py | 9 +-
.../shared_params/function_definition.py | 7 +-
.../response_format_json_schema.py | 7 +-
tests/api_resources/chat/test_completions.py | 16 +-
tests/api_resources/test_ocr.py | 116 ++++++++++
tests/test_client.py | 56 ++---
22 files changed, 512 insertions(+), 89 deletions(-)
create mode 100644 src/dedalus_labs/resources/ocr.py
create mode 100644 src/dedalus_labs/types/ocr_document_param.py
create mode 100644 src/dedalus_labs/types/ocr_page.py
create mode 100644 src/dedalus_labs/types/ocr_process_params.py
create mode 100644 src/dedalus_labs/types/ocr_response.py
create mode 100644 tests/api_resources/test_ocr.py
diff --git a/.stats.yml b/.stats.yml
index d40b845..1e99710 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 10
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/dedalus-labs%2Fdedalus-sdk-9543ba4968eb09fe1d5ccf3bcbc0acdc614a53401893cfb15f530d51d7fe952d.yml
-openapi_spec_hash: eebaaecfa11e98efa3c44d709c08cbd6
-config_hash: 1890670c4485d0ade7c70a0c8bd20423
+configured_endpoints: 11
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/dedalus-labs%2Fdedalus-sdk-2158e2dd12dc5bc533e872e1fa4a9bd1627c2f15b0e417aa4645554e045d7054.yml
+openapi_spec_hash: 30d4d077bf498b7634b3e14deb9d0a1d
+config_hash: 5324d9c636d34ebbadb48aca070564b8
diff --git a/README.md b/README.md
index 896e3c6..4639a23 100644
--- a/README.md
+++ b/README.md
@@ -299,7 +299,7 @@ Error codes are as follows:
### Retries
-Certain errors are automatically retried 0 times by default, with a short exponential backoff.
+Certain errors are automatically retried 2 times by default, with a short exponential backoff.
Connection errors (for example, due to a network connectivity problem), 408 Request Timeout, 409 Conflict,
429 Rate Limit, and >=500 Internal errors are all retried by default.
diff --git a/api.md b/api.md
index ffd2c65..887b0d5 100644
--- a/api.md
+++ b/api.md
@@ -92,6 +92,18 @@ Methods:
- client.images.edit(\*\*params) -> ImagesResponse
- client.images.generate(\*\*params) -> ImagesResponse
+# Ocr
+
+Types:
+
+```python
+from dedalus_labs.types import OcrDocument, OcrPage, OcrRequest, OcrResponse
+```
+
+Methods:
+
+- client.ocr.process(\*\*params) -> OcrResponse
+
# Chat
## Completions
diff --git a/src/dedalus_labs/_client.py b/src/dedalus_labs/_client.py
index 59086e8..70e3bf6 100644
--- a/src/dedalus_labs/_client.py
+++ b/src/dedalus_labs/_client.py
@@ -34,7 +34,8 @@
from .lib.mcp import prepare_mcp_request, prepare_mcp_request_sync
if TYPE_CHECKING:
- from .resources import chat, audio, images, models, embeddings
+ from .resources import ocr, chat, audio, images, models, embeddings
+ from .resources.ocr import OcrResource, AsyncOcrResource
from .resources.images import ImagesResource, AsyncImagesResource
from .resources.models import ModelsResource, AsyncModelsResource
from .resources.chat.chat import ChatResource, AsyncChatResource
@@ -121,7 +122,7 @@ def __init__(
self.x_api_key = x_api_key
if as_base_url is None:
- as_base_url = os.environ.get("DEDALUS_AS_URL")
+ as_base_url = os.environ.get("DEDALUS_AS_URL") or "https://as.dedaluslabs.ai"
self.as_base_url = as_base_url
if dedalus_org_id is None:
@@ -211,6 +212,12 @@ def images(self) -> ImagesResource:
return ImagesResource(self)
+ @cached_property
+ def ocr(self) -> OcrResource:
+ from .resources.ocr import OcrResource
+
+ return OcrResource(self)
+
@cached_property
def chat(self) -> ChatResource:
from .resources.chat import ChatResource
@@ -435,7 +442,7 @@ def __init__(
self.x_api_key = x_api_key
if as_base_url is None:
- as_base_url = os.environ.get("DEDALUS_AS_URL")
+ as_base_url = os.environ.get("DEDALUS_AS_URL") or "https://as.dedaluslabs.ai"
self.as_base_url = as_base_url
if dedalus_org_id is None:
@@ -525,6 +532,12 @@ def images(self) -> AsyncImagesResource:
return AsyncImagesResource(self)
+ @cached_property
+ def ocr(self) -> AsyncOcrResource:
+ from .resources.ocr import AsyncOcrResource
+
+ return AsyncOcrResource(self)
+
@cached_property
def chat(self) -> AsyncChatResource:
from .resources.chat import AsyncChatResource
@@ -717,6 +730,12 @@ def images(self) -> images.ImagesResourceWithRawResponse:
return ImagesResourceWithRawResponse(self._client.images)
+ @cached_property
+ def ocr(self) -> ocr.OcrResourceWithRawResponse:
+ from .resources.ocr import OcrResourceWithRawResponse
+
+ return OcrResourceWithRawResponse(self._client.ocr)
+
@cached_property
def chat(self) -> chat.ChatResourceWithRawResponse:
from .resources.chat import ChatResourceWithRawResponse
@@ -754,6 +773,12 @@ def images(self) -> images.AsyncImagesResourceWithRawResponse:
return AsyncImagesResourceWithRawResponse(self._client.images)
+ @cached_property
+ def ocr(self) -> ocr.AsyncOcrResourceWithRawResponse:
+ from .resources.ocr import AsyncOcrResourceWithRawResponse
+
+ return AsyncOcrResourceWithRawResponse(self._client.ocr)
+
@cached_property
def chat(self) -> chat.AsyncChatResourceWithRawResponse:
from .resources.chat import AsyncChatResourceWithRawResponse
@@ -791,6 +816,12 @@ def images(self) -> images.ImagesResourceWithStreamingResponse:
return ImagesResourceWithStreamingResponse(self._client.images)
+ @cached_property
+ def ocr(self) -> ocr.OcrResourceWithStreamingResponse:
+ from .resources.ocr import OcrResourceWithStreamingResponse
+
+ return OcrResourceWithStreamingResponse(self._client.ocr)
+
@cached_property
def chat(self) -> chat.ChatResourceWithStreamingResponse:
from .resources.chat import ChatResourceWithStreamingResponse
@@ -828,6 +859,12 @@ def images(self) -> images.AsyncImagesResourceWithStreamingResponse:
return AsyncImagesResourceWithStreamingResponse(self._client.images)
+ @cached_property
+ def ocr(self) -> ocr.AsyncOcrResourceWithStreamingResponse:
+ from .resources.ocr import AsyncOcrResourceWithStreamingResponse
+
+ return AsyncOcrResourceWithStreamingResponse(self._client.ocr)
+
@cached_property
def chat(self) -> chat.AsyncChatResourceWithStreamingResponse:
from .resources.chat import AsyncChatResourceWithStreamingResponse
diff --git a/src/dedalus_labs/_constants.py b/src/dedalus_labs/_constants.py
index 733a5e9..6ddf2c7 100644
--- a/src/dedalus_labs/_constants.py
+++ b/src/dedalus_labs/_constants.py
@@ -7,8 +7,8 @@
# default timeout is 1 minute
DEFAULT_TIMEOUT = httpx.Timeout(timeout=60, connect=5.0)
-DEFAULT_MAX_RETRIES = 0
+DEFAULT_MAX_RETRIES = 2
DEFAULT_CONNECTION_LIMITS = httpx.Limits(max_connections=100, max_keepalive_connections=20)
-INITIAL_RETRY_DELAY = 0.1
-MAX_RETRY_DELAY = 3.0
+INITIAL_RETRY_DELAY = 0.5
+MAX_RETRY_DELAY = 8.0
diff --git a/src/dedalus_labs/resources/__init__.py b/src/dedalus_labs/resources/__init__.py
index a63a918..566bc5c 100644
--- a/src/dedalus_labs/resources/__init__.py
+++ b/src/dedalus_labs/resources/__init__.py
@@ -1,5 +1,13 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+from .ocr import (
+ OcrResource,
+ AsyncOcrResource,
+ OcrResourceWithRawResponse,
+ AsyncOcrResourceWithRawResponse,
+ OcrResourceWithStreamingResponse,
+ AsyncOcrResourceWithStreamingResponse,
+)
from .chat import (
ChatResource,
AsyncChatResource,
@@ -66,6 +74,12 @@
"AsyncImagesResourceWithRawResponse",
"ImagesResourceWithStreamingResponse",
"AsyncImagesResourceWithStreamingResponse",
+ "OcrResource",
+ "AsyncOcrResource",
+ "OcrResourceWithRawResponse",
+ "AsyncOcrResourceWithRawResponse",
+ "OcrResourceWithStreamingResponse",
+ "AsyncOcrResourceWithStreamingResponse",
"ChatResource",
"AsyncChatResource",
"ChatResourceWithRawResponse",
diff --git a/src/dedalus_labs/resources/ocr.py b/src/dedalus_labs/resources/ocr.py
new file mode 100644
index 0000000..c0046f0
--- /dev/null
+++ b/src/dedalus_labs/resources/ocr.py
@@ -0,0 +1,200 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..types import ocr_process_params
+from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from .._utils import maybe_transform, async_maybe_transform
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from .._base_client import make_request_options
+from ..types.ocr_response import OcrResponse
+from ..types.ocr_document_param import OcrDocumentParam
+
+__all__ = ["OcrResource", "AsyncOcrResource"]
+
+
+class OcrResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> OcrResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/dedalus-labs/dedalus-sdk-python#accessing-raw-response-data-eg-headers
+ """
+ return OcrResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> OcrResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/dedalus-labs/dedalus-sdk-python#with_streaming_response
+ """
+ return OcrResourceWithStreamingResponse(self)
+
+ def process(
+ self,
+ *,
+ document: OcrDocumentParam,
+ model: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ idempotency_key: str | None = None,
+ ) -> OcrResponse:
+ """
+ Process a document through Mistral OCR.
+
+ Extracts text from PDFs and images, returning markdown-formatted content.
+
+ Args:
+ document: Document input for OCR.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+
+ idempotency_key: Specify a custom idempotency key for this request
+ """
+ return self._post(
+ "/v1/ocr",
+ body=maybe_transform(
+ {
+ "document": document,
+ "model": model,
+ },
+ ocr_process_params.OcrProcessParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ idempotency_key=idempotency_key,
+ ),
+ cast_to=OcrResponse,
+ )
+
+
+class AsyncOcrResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncOcrResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/dedalus-labs/dedalus-sdk-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncOcrResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncOcrResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/dedalus-labs/dedalus-sdk-python#with_streaming_response
+ """
+ return AsyncOcrResourceWithStreamingResponse(self)
+
+ async def process(
+ self,
+ *,
+ document: OcrDocumentParam,
+ model: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ idempotency_key: str | None = None,
+ ) -> OcrResponse:
+ """
+ Process a document through Mistral OCR.
+
+ Extracts text from PDFs and images, returning markdown-formatted content.
+
+ Args:
+ document: Document input for OCR.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+
+ idempotency_key: Specify a custom idempotency key for this request
+ """
+ return await self._post(
+ "/v1/ocr",
+ body=await async_maybe_transform(
+ {
+ "document": document,
+ "model": model,
+ },
+ ocr_process_params.OcrProcessParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ idempotency_key=idempotency_key,
+ ),
+ cast_to=OcrResponse,
+ )
+
+
+class OcrResourceWithRawResponse:
+ def __init__(self, ocr: OcrResource) -> None:
+ self._ocr = ocr
+
+ self.process = to_raw_response_wrapper(
+ ocr.process,
+ )
+
+
+class AsyncOcrResourceWithRawResponse:
+ def __init__(self, ocr: AsyncOcrResource) -> None:
+ self._ocr = ocr
+
+ self.process = async_to_raw_response_wrapper(
+ ocr.process,
+ )
+
+
+class OcrResourceWithStreamingResponse:
+ def __init__(self, ocr: OcrResource) -> None:
+ self._ocr = ocr
+
+ self.process = to_streamed_response_wrapper(
+ ocr.process,
+ )
+
+
+class AsyncOcrResourceWithStreamingResponse:
+ def __init__(self, ocr: AsyncOcrResource) -> None:
+ self._ocr = ocr
+
+ self.process = async_to_streamed_response_wrapper(
+ ocr.process,
+ )
diff --git a/src/dedalus_labs/types/__init__.py b/src/dedalus_labs/types/__init__.py
index 40934bf..d0ea1fb 100644
--- a/src/dedalus_labs/types/__init__.py
+++ b/src/dedalus_labs/types/__init__.py
@@ -24,8 +24,12 @@
ResponseFormatJSONObject as ResponseFormatJSONObject,
ResponseFormatJSONSchema as ResponseFormatJSONSchema,
)
+from .ocr_page import OcrPage as OcrPage
+from .ocr_response import OcrResponse as OcrResponse
from .images_response import ImagesResponse as ImagesResponse
from .image_edit_params import ImageEditParams as ImageEditParams
+from .ocr_document_param import OcrDocumentParam as OcrDocumentParam
+from .ocr_process_params import OcrProcessParams as OcrProcessParams
from .list_models_response import ListModelsResponse as ListModelsResponse
from .image_generate_params import ImageGenerateParams as ImageGenerateParams
from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams
@@ -39,14 +43,10 @@
if _compat.PYDANTIC_V1:
chat.chat_completion.ChatCompletion.update_forward_refs() # type: ignore
shared.dedalus_model.DedalusModel.update_forward_refs() # type: ignore
- shared.function_definition.FunctionDefinition.update_forward_refs() # type: ignore
shared.mcp_tool_result.MCPToolResult.update_forward_refs() # type: ignore
shared.model_settings.ModelSettings.update_forward_refs() # type: ignore
- shared.response_format_json_schema.ResponseFormatJSONSchema.update_forward_refs() # type: ignore
else:
chat.chat_completion.ChatCompletion.model_rebuild(_parent_namespace_depth=0)
shared.dedalus_model.DedalusModel.model_rebuild(_parent_namespace_depth=0)
- shared.function_definition.FunctionDefinition.model_rebuild(_parent_namespace_depth=0)
shared.mcp_tool_result.MCPToolResult.model_rebuild(_parent_namespace_depth=0)
shared.model_settings.ModelSettings.model_rebuild(_parent_namespace_depth=0)
- shared.response_format_json_schema.ResponseFormatJSONSchema.model_rebuild(_parent_namespace_depth=0)
diff --git a/src/dedalus_labs/types/chat/chat_completion_functions_param.py b/src/dedalus_labs/types/chat/chat_completion_functions_param.py
index 0ebde92..2d101c7 100644
--- a/src/dedalus_labs/types/chat/chat_completion_functions_param.py
+++ b/src/dedalus_labs/types/chat/chat_completion_functions_param.py
@@ -2,6 +2,7 @@
from __future__ import annotations
+from typing import Dict
from typing_extensions import Required, TypedDict
__all__ = ["ChatCompletionFunctionsParam"]
@@ -29,7 +30,7 @@ class ChatCompletionFunctionsParam(TypedDict, total=False):
how to call the function.
"""
- parameters: "JSONObjectInput"
+ parameters: Dict[str, object]
"""The parameters the functions accepts, described as a JSON Schema object.
See the [guide](https://platform.openai.com/docs/guides/function-calling) for
@@ -39,6 +40,3 @@ class ChatCompletionFunctionsParam(TypedDict, total=False):
Omitting `parameters` defines a function with an empty parameter list.
"""
-
-
-from ..shared_params.json_object_input import JSONObjectInput
diff --git a/src/dedalus_labs/types/chat/chat_completion_tool_param.py b/src/dedalus_labs/types/chat/chat_completion_tool_param.py
index da10c13..3dc63cd 100644
--- a/src/dedalus_labs/types/chat/chat_completion_tool_param.py
+++ b/src/dedalus_labs/types/chat/chat_completion_tool_param.py
@@ -4,6 +4,8 @@
from typing_extensions import Literal, Required, TypedDict
+from ..shared_params.function_definition import FunctionDefinition
+
__all__ = ["ChatCompletionToolParam"]
@@ -15,7 +17,7 @@ class ChatCompletionToolParam(TypedDict, total=False):
- function (required): FunctionObject
"""
- function: Required["FunctionDefinition"]
+ function: Required[FunctionDefinition]
"""Schema for FunctionObject.
Fields:
@@ -28,6 +30,3 @@ class ChatCompletionToolParam(TypedDict, total=False):
type: Required[Literal["function"]]
"""The type of the tool. Currently, only `function` is supported."""
-
-
-from ..shared_params.function_definition import FunctionDefinition
diff --git a/src/dedalus_labs/types/chat/completion_create_params.py b/src/dedalus_labs/types/chat/completion_create_params.py
index 91eaec2..7958c39 100644
--- a/src/dedalus_labs/types/chat/completion_create_params.py
+++ b/src/dedalus_labs/types/chat/completion_create_params.py
@@ -13,11 +13,13 @@
from .tool_choice_tool_param import ToolChoiceToolParam
from .prediction_content_param import PredictionContentParam
from ..shared_params.credential import Credential
+from .chat_completion_tool_param import ChatCompletionToolParam
from .chat_completion_audio_param import ChatCompletionAudioParam
from .thinking_config_enabled_param import ThinkingConfigEnabledParam
from ..shared_params.mcp_credentials import MCPCredentials
from ..shared_params.mcp_server_spec import MCPServerSpec
from .thinking_config_disabled_param import ThinkingConfigDisabledParam
+from .chat_completion_functions_param import ChatCompletionFunctionsParam
from .chat_completion_tool_message_param import ChatCompletionToolMessageParam
from .chat_completion_user_message_param import ChatCompletionUserMessageParam
from ..shared_params.response_format_text import ResponseFormatText
@@ -26,6 +28,7 @@
from .chat_completion_assistant_message_param import ChatCompletionAssistantMessageParam
from .chat_completion_developer_message_param import ChatCompletionDeveloperMessageParam
from ..shared_params.response_format_json_object import ResponseFormatJSONObject
+from ..shared_params.response_format_json_schema import ResponseFormatJSONSchema
__all__ = [
"CompletionCreateParamsBase",
@@ -109,7 +112,7 @@ class CompletionCreateParamsBase(TypedDict, total=False):
function_call: Optional[str]
"""Wrapper for union variant: function call mode."""
- functions: Optional[Iterable["ChatCompletionFunctionsParam"]]
+ functions: Optional[Iterable[ChatCompletionFunctionsParam]]
"""Deprecated in favor of `tools`.
A list of functions the model may generate JSON inputs for.
@@ -383,7 +386,7 @@ class CompletionCreateParamsBase(TypedDict, total=False):
ChatCompletionFunctionMessageParam,
]
-ResponseFormat: TypeAlias = Union[ResponseFormatText, "ResponseFormatJSONSchema", ResponseFormatJSONObject]
+ResponseFormat: TypeAlias = Union[ResponseFormatText, ResponseFormatJSONSchema, ResponseFormatJSONObject]
class SafetySetting(TypedDict, total=False):
@@ -513,7 +516,7 @@ class ToolCustomToolChatCompletions(TypedDict, total=False):
"""The type of the custom tool. Always `custom`."""
-Tool: TypeAlias = Union["ChatCompletionToolParam", ToolCustomToolChatCompletions]
+Tool: TypeAlias = Union[ChatCompletionToolParam, ToolCustomToolChatCompletions]
class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False):
@@ -528,9 +531,6 @@ class CompletionCreateParamsStreaming(CompletionCreateParamsBase):
CompletionCreateParams = Union[CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming]
-from .chat_completion_tool_param import ChatCompletionToolParam
from ..shared_params.dedalus_model import DedalusModel
-from .chat_completion_functions_param import ChatCompletionFunctionsParam
from ..shared_params.json_object_input import JSONObjectInput
from ..shared_params.dedalus_model_choice import DedalusModelChoice
-from ..shared_params.response_format_json_schema import ResponseFormatJSONSchema
diff --git a/src/dedalus_labs/types/ocr_document_param.py b/src/dedalus_labs/types/ocr_document_param.py
new file mode 100644
index 0000000..1d0cbe5
--- /dev/null
+++ b/src/dedalus_labs/types/ocr_document_param.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["OcrDocumentParam"]
+
+
+class OcrDocumentParam(TypedDict, total=False):
+ """Document input for OCR."""
+
+ document_url: Required[str]
+ """Data URI with base64-encoded document"""
+
+ type: str
diff --git a/src/dedalus_labs/types/ocr_page.py b/src/dedalus_labs/types/ocr_page.py
new file mode 100644
index 0000000..41e0005
--- /dev/null
+++ b/src/dedalus_labs/types/ocr_page.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .._models import BaseModel
+
+__all__ = ["OcrPage"]
+
+
+class OcrPage(BaseModel):
+ """Single page OCR result."""
+
+ index: int
+
+ markdown: str
diff --git a/src/dedalus_labs/types/ocr_process_params.py b/src/dedalus_labs/types/ocr_process_params.py
new file mode 100644
index 0000000..b671b2b
--- /dev/null
+++ b/src/dedalus_labs/types/ocr_process_params.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+from .ocr_document_param import OcrDocumentParam
+
+__all__ = ["OcrProcessParams"]
+
+
+class OcrProcessParams(TypedDict, total=False):
+ document: Required[OcrDocumentParam]
+ """Document input for OCR."""
+
+ model: str
diff --git a/src/dedalus_labs/types/ocr_response.py b/src/dedalus_labs/types/ocr_response.py
new file mode 100644
index 0000000..93db8fc
--- /dev/null
+++ b/src/dedalus_labs/types/ocr_response.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Optional
+
+from .._models import BaseModel
+from .ocr_page import OcrPage
+
+__all__ = ["OcrResponse"]
+
+
+class OcrResponse(BaseModel):
+ """OCR response schema."""
+
+ model: str
+
+ pages: List[OcrPage]
+
+ usage: Optional[Dict[str, object]] = None
diff --git a/src/dedalus_labs/types/shared/function_definition.py b/src/dedalus_labs/types/shared/function_definition.py
index 5277262..97b3308 100644
--- a/src/dedalus_labs/types/shared/function_definition.py
+++ b/src/dedalus_labs/types/shared/function_definition.py
@@ -1,8 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from __future__ import annotations
-
-from typing import Optional
+from typing import Dict, Optional
from ..._models import BaseModel
@@ -32,7 +30,7 @@ class FunctionDefinition(BaseModel):
how to call the function.
"""
- parameters: Optional["JSONObjectInput"] = None
+ parameters: Optional[Dict[str, object]] = None
"""The parameters the functions accepts, described as a JSON Schema object.
See the [guide](https://platform.openai.com/docs/guides/function-calling) for
@@ -51,6 +49,3 @@ class FunctionDefinition(BaseModel):
`true`. Learn more about Structured Outputs in the
[function calling guide](https://platform.openai.com/docs/guides/function-calling).
"""
-
-
-from .json_object_input import JSONObjectInput
diff --git a/src/dedalus_labs/types/shared/response_format_json_schema.py b/src/dedalus_labs/types/shared/response_format_json_schema.py
index 68dc5a9..34a2276 100644
--- a/src/dedalus_labs/types/shared/response_format_json_schema.py
+++ b/src/dedalus_labs/types/shared/response_format_json_schema.py
@@ -1,8 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from __future__ import annotations
-
-from typing import Optional
+from typing import Dict, Optional
from typing_extensions import Literal
from pydantic import Field as FieldInfo
@@ -28,7 +26,7 @@ class JSONSchema(BaseModel):
how to respond in the format.
"""
- schema_: Optional["JSONObjectInput"] = FieldInfo(alias="schema", default=None)
+ schema_: Optional[Dict[str, object]] = FieldInfo(alias="schema", default=None)
"""
The schema for the response format, described as a JSON Schema object. Learn how
to build JSON schemas [here](https://json-schema.org/).
@@ -60,6 +58,3 @@ class ResponseFormatJSONSchema(BaseModel):
type: Literal["json_schema"]
"""The type of response format being defined. Always `json_schema`."""
-
-
-from .json_object_input import JSONObjectInput
diff --git a/src/dedalus_labs/types/shared_params/function_definition.py b/src/dedalus_labs/types/shared_params/function_definition.py
index 1511287..0c1b618 100644
--- a/src/dedalus_labs/types/shared_params/function_definition.py
+++ b/src/dedalus_labs/types/shared_params/function_definition.py
@@ -2,7 +2,7 @@
from __future__ import annotations
-from typing import Optional
+from typing import Dict, Optional
from typing_extensions import Required, TypedDict
__all__ = ["FunctionDefinition"]
@@ -31,7 +31,7 @@ class FunctionDefinition(TypedDict, total=False):
how to call the function.
"""
- parameters: "JSONObjectInput"
+ parameters: Dict[str, object]
"""The parameters the functions accepts, described as a JSON Schema object.
See the [guide](https://platform.openai.com/docs/guides/function-calling) for
@@ -50,6 +50,3 @@ class FunctionDefinition(TypedDict, total=False):
`true`. Learn more about Structured Outputs in the
[function calling guide](https://platform.openai.com/docs/guides/function-calling).
"""
-
-
-from .json_object_input import JSONObjectInput
diff --git a/src/dedalus_labs/types/shared_params/response_format_json_schema.py b/src/dedalus_labs/types/shared_params/response_format_json_schema.py
index 5ec2bc3..c38aac7 100644
--- a/src/dedalus_labs/types/shared_params/response_format_json_schema.py
+++ b/src/dedalus_labs/types/shared_params/response_format_json_schema.py
@@ -2,7 +2,7 @@
from __future__ import annotations
-from typing import Optional
+from typing import Dict, Optional
from typing_extensions import Literal, Required, TypedDict
__all__ = ["ResponseFormatJSONSchema", "JSONSchema"]
@@ -24,7 +24,7 @@ class JSONSchema(TypedDict, total=False):
how to respond in the format.
"""
- schema: "JSONObjectInput"
+ schema: Dict[str, object]
"""
The schema for the response format, described as a JSON Schema object. Learn how
to build JSON schemas [here](https://json-schema.org/).
@@ -56,6 +56,3 @@ class ResponseFormatJSONSchema(TypedDict, total=False):
type: Required[Literal["json_schema"]]
"""The type of response format being defined. Always `json_schema`."""
-
-
-from .json_object_input import JSONObjectInput
diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py
index d9c8383..c9ec34a 100644
--- a/tests/api_resources/chat/test_completions.py
+++ b/tests/api_resources/chat/test_completions.py
@@ -53,7 +53,7 @@ def test_method_create_with_all_params_overload_1(self, client: Dedalus) -> None
{
"name": "name",
"description": "description",
- "parameters": {"foo": "string"},
+ "parameters": {"foo": "bar"},
}
],
generation_config={"foo": "string"},
@@ -123,7 +123,7 @@ def test_method_create_with_all_params_overload_1(self, client: Dedalus) -> None
"function": {
"name": "name",
"description": "description",
- "parameters": {"foo": "string"},
+ "parameters": {"foo": "bar"},
"strict": True,
},
"type": "function",
@@ -200,7 +200,7 @@ def test_method_create_with_all_params_overload_2(self, client: Dedalus) -> None
{
"name": "name",
"description": "description",
- "parameters": {"foo": "string"},
+ "parameters": {"foo": "bar"},
}
],
generation_config={"foo": "string"},
@@ -269,7 +269,7 @@ def test_method_create_with_all_params_overload_2(self, client: Dedalus) -> None
"function": {
"name": "name",
"description": "description",
- "parameters": {"foo": "string"},
+ "parameters": {"foo": "bar"},
"strict": True,
},
"type": "function",
@@ -351,7 +351,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
{
"name": "name",
"description": "description",
- "parameters": {"foo": "string"},
+ "parameters": {"foo": "bar"},
}
],
generation_config={"foo": "string"},
@@ -421,7 +421,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
"function": {
"name": "name",
"description": "description",
- "parameters": {"foo": "string"},
+ "parameters": {"foo": "bar"},
"strict": True,
},
"type": "function",
@@ -498,7 +498,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
{
"name": "name",
"description": "description",
- "parameters": {"foo": "string"},
+ "parameters": {"foo": "bar"},
}
],
generation_config={"foo": "string"},
@@ -567,7 +567,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
"function": {
"name": "name",
"description": "description",
- "parameters": {"foo": "string"},
+ "parameters": {"foo": "bar"},
"strict": True,
},
"type": "function",
diff --git a/tests/api_resources/test_ocr.py b/tests/api_resources/test_ocr.py
new file mode 100644
index 0000000..0d0ee8f
--- /dev/null
+++ b/tests/api_resources/test_ocr.py
@@ -0,0 +1,116 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from tests.utils import assert_matches_type
+from dedalus_labs import Dedalus, AsyncDedalus
+from dedalus_labs.types import OcrResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestOcr:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_process(self, client: Dedalus) -> None:
+ ocr = client.ocr.process(
+ document={"document_url": "document_url"},
+ )
+ assert_matches_type(OcrResponse, ocr, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_process_with_all_params(self, client: Dedalus) -> None:
+ ocr = client.ocr.process(
+ document={
+ "document_url": "document_url",
+ "type": "type",
+ },
+ model="model",
+ )
+ assert_matches_type(OcrResponse, ocr, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_process(self, client: Dedalus) -> None:
+ response = client.ocr.with_raw_response.process(
+ document={"document_url": "document_url"},
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ ocr = response.parse()
+ assert_matches_type(OcrResponse, ocr, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_process(self, client: Dedalus) -> None:
+ with client.ocr.with_streaming_response.process(
+ document={"document_url": "document_url"},
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ ocr = response.parse()
+ assert_matches_type(OcrResponse, ocr, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncOcr:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_process(self, async_client: AsyncDedalus) -> None:
+ ocr = await async_client.ocr.process(
+ document={"document_url": "document_url"},
+ )
+ assert_matches_type(OcrResponse, ocr, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_process_with_all_params(self, async_client: AsyncDedalus) -> None:
+ ocr = await async_client.ocr.process(
+ document={
+ "document_url": "document_url",
+ "type": "type",
+ },
+ model="model",
+ )
+ assert_matches_type(OcrResponse, ocr, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_process(self, async_client: AsyncDedalus) -> None:
+ response = await async_client.ocr.with_raw_response.process(
+ document={"document_url": "document_url"},
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ ocr = await response.parse()
+ assert_matches_type(OcrResponse, ocr, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_process(self, async_client: AsyncDedalus) -> None:
+ async with async_client.ocr.with_streaming_response.process(
+ document={"document_url": "document_url"},
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ ocr = await response.parse()
+ assert_matches_type(OcrResponse, ocr, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/test_client.py b/tests/test_client.py
index dace038..f6f2c26 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -145,7 +145,7 @@ def test_copy_default_options(self, client: Dedalus) -> None:
# options that have a default are overridden correctly
copied = client.copy(max_retries=7)
assert copied.max_retries == 7
- assert client.max_retries == 0
+ assert client.max_retries == 2
copied2 = copied.copy(max_retries=6)
assert copied2.max_retries == 6
@@ -882,21 +882,21 @@ class Model(BaseModel):
"remaining_retries,retry_after,timeout",
[
[3, "20", 20],
- [3, "0", 0.1],
- [3, "-10", 0.1],
+ [3, "0", 0.5],
+ [3, "-10", 0.5],
[3, "60", 60],
- [3, "61", 0.1],
+ [3, "61", 0.5],
[3, "Fri, 29 Sep 2023 16:26:57 GMT", 20],
- [3, "Fri, 29 Sep 2023 16:26:37 GMT", 0.1],
- [3, "Fri, 29 Sep 2023 16:26:27 GMT", 0.1],
+ [3, "Fri, 29 Sep 2023 16:26:37 GMT", 0.5],
+ [3, "Fri, 29 Sep 2023 16:26:27 GMT", 0.5],
[3, "Fri, 29 Sep 2023 16:27:37 GMT", 60],
- [3, "Fri, 29 Sep 2023 16:27:38 GMT", 0.1],
- [3, "99999999999999999999999999999999999", 0.1],
- [3, "Zun, 29 Sep 2023 16:26:27 GMT", 0.1],
- [3, "", 0.1],
- [2, "", 0.1 * 2.0],
- [1, "", 0.1 * 4.0],
- [-1100, "", 3], # test large number potentially overflowing
+ [3, "Fri, 29 Sep 2023 16:27:38 GMT", 0.5],
+ [3, "99999999999999999999999999999999999", 0.5],
+ [3, "Zun, 29 Sep 2023 16:26:27 GMT", 0.5],
+ [3, "", 0.5],
+ [2, "", 0.5 * 2.0],
+ [1, "", 0.5 * 4.0],
+ [-1100, "", 8], # test large number potentially overflowing
],
)
@mock.patch("time.time", mock.MagicMock(return_value=1696004797))
@@ -906,7 +906,7 @@ def test_parse_retry_after_header(
headers = httpx.Headers({"retry-after": retry_after})
options = FinalRequestOptions(method="get", url="/foo", max_retries=3)
calculated = client._calculate_retry_timeout(remaining_retries, options, headers)
- assert calculated == pytest.approx(timeout, 0.1 * 0.875) # pyright: ignore[reportUnknownMemberType]
+ assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType]
@mock.patch("dedalus_labs._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
@@ -1089,7 +1089,7 @@ def test_copy_default_options(self, async_client: AsyncDedalus) -> None:
# options that have a default are overridden correctly
copied = async_client.copy(max_retries=7)
assert copied.max_retries == 7
- assert async_client.max_retries == 0
+ assert async_client.max_retries == 2
copied2 = copied.copy(max_retries=6)
assert copied2.max_retries == 6
@@ -1847,21 +1847,21 @@ class Model(BaseModel):
"remaining_retries,retry_after,timeout",
[
[3, "20", 20],
- [3, "0", 0.1],
- [3, "-10", 0.1],
+ [3, "0", 0.5],
+ [3, "-10", 0.5],
[3, "60", 60],
- [3, "61", 0.1],
+ [3, "61", 0.5],
[3, "Fri, 29 Sep 2023 16:26:57 GMT", 20],
- [3, "Fri, 29 Sep 2023 16:26:37 GMT", 0.1],
- [3, "Fri, 29 Sep 2023 16:26:27 GMT", 0.1],
+ [3, "Fri, 29 Sep 2023 16:26:37 GMT", 0.5],
+ [3, "Fri, 29 Sep 2023 16:26:27 GMT", 0.5],
[3, "Fri, 29 Sep 2023 16:27:37 GMT", 60],
- [3, "Fri, 29 Sep 2023 16:27:38 GMT", 0.1],
- [3, "99999999999999999999999999999999999", 0.1],
- [3, "Zun, 29 Sep 2023 16:26:27 GMT", 0.1],
- [3, "", 0.1],
- [2, "", 0.1 * 2.0],
- [1, "", 0.1 * 4.0],
- [-1100, "", 3], # test large number potentially overflowing
+ [3, "Fri, 29 Sep 2023 16:27:38 GMT", 0.5],
+ [3, "99999999999999999999999999999999999", 0.5],
+ [3, "Zun, 29 Sep 2023 16:26:27 GMT", 0.5],
+ [3, "", 0.5],
+ [2, "", 0.5 * 2.0],
+ [1, "", 0.5 * 4.0],
+ [-1100, "", 8], # test large number potentially overflowing
],
)
@mock.patch("time.time", mock.MagicMock(return_value=1696004797))
@@ -1871,7 +1871,7 @@ async def test_parse_retry_after_header(
headers = httpx.Headers({"retry-after": retry_after})
options = FinalRequestOptions(method="get", url="/foo", max_retries=3)
calculated = async_client._calculate_retry_timeout(remaining_retries, options, headers)
- assert calculated == pytest.approx(timeout, 0.1 * 0.875) # pyright: ignore[reportUnknownMemberType]
+ assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType]
@mock.patch("dedalus_labs._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
From cf53a9e097577c01785d09ee45e6df4a3745cdec Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 23 Jan 2026 18:14:37 +0000
Subject: [PATCH 04/23] chore(ci): upgrade `actions/github-script`
---
.github/workflows/ci.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 2313c1a..e7a016d 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -57,7 +57,7 @@ jobs:
- name: Get GitHub OIDC Token
if: github.repository == 'stainless-sdks/dedalus-sdk-python'
id: github-oidc
- uses: actions/github-script@v6
+ uses: actions/github-script@v8
with:
script: core.setOutput('github_token', await core.getIDToken());
From e4e3619990099b43df78e88415e3627daf8c7425 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 28 Jan 2026 16:36:55 +0000
Subject: [PATCH 05/23] fix(docs): fix mcp installation instructions for remote
servers
---
README.md | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/README.md b/README.md
index 4639a23..a1ef63c 100644
--- a/README.md
+++ b/README.md
@@ -13,8 +13,8 @@ It is generated with [Stainless](https://www.stainless.com/).
Use the Dedalus MCP Server to enable AI assistants to interact with this API, allowing them to explore endpoints, make test requests, and use documentation to help integrate this SDK into your application.
-[](https://cursor.com/en-US/install-mcp?name=dedalus-labs-mcp&config=eyJuYW1lIjoiZGVkYWx1cy1sYWJzLW1jcCIsInRyYW5zcG9ydCI6InNzZSIsInVybCI6Imh0dHBzOi8vZGVkYWx1cy1zZGsuc3RsbWNwLmNvbS9zc2UifQ)
-[](https://vscode.stainless.com/mcp/%7B%22name%22%3A%22dedalus-labs-mcp%22%2C%22type%22%3A%22sse%22%2C%22url%22%3A%22https%3A%2F%2Fdedalus-sdk.stlmcp.com%2Fsse%22%7D)
+[](https://cursor.com/en-US/install-mcp?name=dedalus-labs-mcp&config=eyJuYW1lIjoiZGVkYWx1cy1sYWJzLW1jcCIsInRyYW5zcG9ydCI6Imh0dHAiLCJ1cmwiOiJodHRwczovL2RlZGFsdXMtc2RrLnN0bG1jcC5jb20iLCJoZWFkZXJzIjp7IngtZGVkYWx1cy1hcGkta2V5IjoiTXkgQVBJIEtleSIsIngtYXBpLWtleSI6Ik15IFggQVBJIEtleSJ9fQ)
+[](https://vscode.stainless.com/mcp/%7B%22name%22%3A%22dedalus-labs-mcp%22%2C%22type%22%3A%22http%22%2C%22url%22%3A%22https%3A%2F%2Fdedalus-sdk.stlmcp.com%22%2C%22headers%22%3A%7B%22x-dedalus-api-key%22%3A%22My%20API%20Key%22%2C%22x-api-key%22%3A%22My%20X%20API%20Key%22%7D%7D)
> Note: You may need to set environment variables in your MCP client.
From 7e6716f61769038e7beb91eabed6d240c3443a9e Mon Sep 17 00:00:00 2001
From: Windsor
Date: Fri, 6 Feb 2026 18:47:53 -0800
Subject: [PATCH 06/23] feat(runner): dependency-aware parallel tool execution
---
src/dedalus_labs/lib/runner/_scheduler.py | 293 ++++++++++
src/dedalus_labs/lib/runner/core.py | 634 ++++++++++------------
tests/test_local_scheduler.py | 278 ++++++++++
3 files changed, 845 insertions(+), 360 deletions(-)
create mode 100644 src/dedalus_labs/lib/runner/_scheduler.py
create mode 100644 tests/test_local_scheduler.py
diff --git a/src/dedalus_labs/lib/runner/_scheduler.py b/src/dedalus_labs/lib/runner/_scheduler.py
new file mode 100644
index 0000000..fe94b3a
--- /dev/null
+++ b/src/dedalus_labs/lib/runner/_scheduler.py
@@ -0,0 +1,293 @@
+# ==============================================================================
+# © 2025 Dedalus Labs, Inc. and affiliates
+# Licensed under MIT
+# github.com/dedalus-labs/dedalus-sdk-python/LICENSE
+# ==============================================================================
+
+"""Dependency-aware local tool scheduler.
+
+When the API server returns ``pending_local_calls`` with dependency
+info, this module topo-sorts and executes them in parallel layers.
+Independent tools fire concurrently; dependent tools wait for their
+prerequisites.
+
+Falls back to sequential execution when dependencies form a cycle
+(model hallucinated wrong deps).
+
+Functions:
+ execute_local_tools_async -- async path, uses asyncio.gather per layer
+ execute_local_tools_sync -- sync path, sequential within layers
+"""
+
+from __future__ import annotations
+
+import json
+import asyncio
+from typing import Any, Dict, List, Tuple
+
+from graphlib import CycleError, TopologicalSorter
+
+
+def _parse_pending_calls(
+ tool_calls: List[Dict[str, Any]],
+) -> Tuple[Dict[str, Dict[str, Any]], TopologicalSorter]:
+ """Parse pending local calls and build a TopologicalSorter.
+
+ Returns (calls_by_id, sorter). Each entry in calls_by_id has
+ the parsed function name and arguments ready for execution.
+
+ Raises CycleError if dependencies are cyclic.
+ """
+ calls_by_id: Dict[str, Dict[str, Any]] = {}
+ sorter: TopologicalSorter = TopologicalSorter()
+ known_ids = {tc["id"] for tc in tool_calls if "id" in tc}
+
+ for tc in tool_calls:
+ call_id = tc.get("id", "")
+ fn_name = tc["function"]["name"]
+ fn_args_str = tc["function"]["arguments"]
+
+ try:
+ fn_args = json.loads(fn_args_str) if isinstance(fn_args_str, str) else fn_args_str
+ except json.JSONDecodeError:
+ fn_args = {}
+
+ # Filter deps to only known ids in this batch.
+ raw_deps = tc.get("dependencies") or []
+ deps = [dep for dep in raw_deps if dep in known_ids and dep != call_id]
+
+ calls_by_id[call_id] = {"name": fn_name, "args": fn_args, "id": call_id}
+ sorter.add(call_id, *deps)
+
+ sorter.prepare()
+ return calls_by_id, sorter
+
+
+async def execute_local_tools_async(
+ tool_calls: List[Dict[str, Any]],
+ tool_handler: Any,
+ messages: List[Dict[str, Any]],
+ tool_results: List[Dict[str, Any]],
+ tools_called: List[str],
+ step: int,
+ *,
+ verbose: bool = False,
+) -> None:
+ """Execute local tool calls respecting dependencies (async).
+
+ Independent tools within the same topo layer fire concurrently
+ via asyncio.gather. Falls back to sequential on cycle.
+
+ The caller is responsible for appending the assistant message
+ (with tool_calls and any reasoning_content) before calling this.
+ """
+ if not tool_calls:
+ return
+
+ try:
+ calls_by_id, sorter = _parse_pending_calls(tool_calls)
+ except CycleError:
+ # If wrong deps from model, fall back to sequential.
+ await _execute_sequential_async(
+ tool_calls,
+ tool_handler,
+ messages,
+ tool_results,
+ tools_called,
+ step,
+ verbose,
+ )
+ return
+
+ # Drive the sorter layer by layer.
+ while sorter.is_active():
+ ready = list(sorter.get_ready())
+ if not ready:
+ break
+
+ if len(ready) == 1:
+ # Single tool: no gather overhead.
+ call_id = ready[0]
+ await _run_one_async(
+ calls_by_id[call_id],
+ tool_handler,
+ messages,
+ tool_results,
+ tools_called,
+ step,
+ verbose,
+ )
+ sorter.done(call_id)
+ else:
+ # Multiple independent tools: fire concurrently.
+ results = await asyncio.gather(
+ *[
+ _run_one_async(
+ calls_by_id[call_id],
+ tool_handler,
+ messages,
+ tool_results,
+ tools_called,
+ step,
+ verbose,
+ )
+ for call_id in ready
+ ],
+ return_exceptions=True,
+ )
+
+ for call_id, result in zip(ready, results):
+ if isinstance(result, Exception):
+ # Already recorded in messages by _run_one_async.
+ pass
+ sorter.done(call_id)
+
+
+def execute_local_tools_sync(
+ tool_calls: List[Dict[str, Any]],
+ tool_handler: Any,
+ messages: List[Dict[str, Any]],
+ tool_results: List[Dict[str, Any]],
+ tools_called: List[str],
+ step: int,
+) -> None:
+ """Execute local tool calls respecting dependencies (sync).
+
+ Executes in topo order, one at a time. No parallelism in sync mode,
+ but ordering is correct.
+
+ The caller is responsible for appending the assistant message
+ (with tool_calls and any reasoning_content) before calling this.
+ """
+ if not tool_calls:
+ return
+
+ try:
+ calls_by_id, sorter = _parse_pending_calls(tool_calls)
+ except CycleError:
+ _execute_sequential_sync(
+ tool_calls,
+ tool_handler,
+ messages,
+ tool_results,
+ tools_called,
+ step,
+ )
+ return
+
+ while sorter.is_active():
+ ready = list(sorter.get_ready())
+ if not ready:
+ break
+ for call_id in ready:
+ _run_one_sync(
+ calls_by_id[call_id],
+ tool_handler,
+ messages,
+ tool_results,
+ tools_called,
+ step,
+ )
+ sorter.done(call_id)
+
+
+# --- Single tool execution ---
+
+
+async def _run_one_async(
+ call: Dict[str, Any],
+ tool_handler: Any,
+ messages: List[Dict[str, Any]],
+ tool_results: List[Dict[str, Any]],
+ tools_called: List[str],
+ step: int,
+ verbose: bool,
+) -> None:
+ """Execute a single tool call and record results."""
+ fn_name = call["name"]
+ fn_args = call["args"]
+ call_id = call["id"]
+
+ try:
+ result = await tool_handler.exec(fn_name, fn_args)
+ tool_results.append({"name": fn_name, "result": result, "step": step})
+ tools_called.append(fn_name)
+ messages.append({"role": "tool", "tool_call_id": call_id, "content": str(result)})
+ if verbose:
+ print(f" Tool {fn_name}: {str(result)[:50]}...") # noqa: T201
+ except Exception as e:
+ tool_results.append({"error": str(e), "name": fn_name, "step": step})
+ messages.append({"role": "tool", "tool_call_id": call_id, "content": f"Error: {e}"})
+ if verbose:
+ print(f" Tool {fn_name} failed: {e}") # noqa: T201
+
+
+def _run_one_sync(
+ call: Dict[str, Any],
+ tool_handler: Any,
+ messages: List[Dict[str, Any]],
+ tool_results: List[Dict[str, Any]],
+ tools_called: List[str],
+ step: int,
+) -> None:
+ """Execute a single tool call synchronously and record results."""
+ fn_name = call["name"]
+ fn_args = call["args"]
+ call_id = call["id"]
+
+ try:
+ result = tool_handler.exec_sync(fn_name, fn_args)
+ tool_results.append({"name": fn_name, "result": result, "step": step})
+ tools_called.append(fn_name)
+ messages.append({"role": "tool", "tool_call_id": call_id, "content": str(result)})
+ except Exception as e:
+ tool_results.append({"error": str(e), "name": fn_name, "step": step})
+ messages.append({"role": "tool", "tool_call_id": call_id, "content": f"Error: {e}"})
+
+
+# --- Sequential fallback ---
+
+
+async def _execute_sequential_async(
+ tool_calls: List[Dict[str, Any]],
+ tool_handler: Any,
+ messages: List[Dict[str, Any]],
+ tool_results: List[Dict[str, Any]],
+ tools_called: List[str],
+ step: int,
+ verbose: bool,
+) -> None:
+ """Fallback: execute all tools sequentially (no dependency ordering)."""
+ for tc in tool_calls:
+ fn_name = tc["function"]["name"]
+ fn_args_str = tc["function"]["arguments"]
+ call_id = tc.get("id", "")
+ try:
+ fn_args = json.loads(fn_args_str) if isinstance(fn_args_str, str) else fn_args_str
+ except json.JSONDecodeError:
+ fn_args = {}
+
+ call = {"name": fn_name, "args": fn_args, "id": call_id}
+ await _run_one_async(call, tool_handler, messages, tool_results, tools_called, step, verbose)
+
+
+def _execute_sequential_sync(
+ tool_calls: List[Dict[str, Any]],
+ tool_handler: Any,
+ messages: List[Dict[str, Any]],
+ tool_results: List[Dict[str, Any]],
+ tools_called: List[str],
+ step: int,
+) -> None:
+ """Fallback: execute all tools sequentially (no dependency ordering)."""
+ for tc in tool_calls:
+ fn_name = tc["function"]["name"]
+ fn_args_str = tc["function"]["arguments"]
+ call_id = tc.get("id", "")
+ try:
+ fn_args = json.loads(fn_args_str) if isinstance(fn_args_str, str) else fn_args_str
+ except json.JSONDecodeError:
+ fn_args = {}
+
+ call = {"name": fn_name, "args": fn_args, "id": call_id}
+ _run_one_sync(call, tool_handler, messages, tool_results, tools_called, step)
diff --git a/src/dedalus_labs/lib/runner/core.py b/src/dedalus_labs/lib/runner/core.py
index 8ed01a4..b02a557 100644
--- a/src/dedalus_labs/lib/runner/core.py
+++ b/src/dedalus_labs/lib/runner/core.py
@@ -6,31 +6,29 @@
from __future__ import annotations
-import json
import asyncio
import inspect
from typing import (
TYPE_CHECKING,
Any,
Dict,
+ Union,
Literal,
Callable,
Iterator,
Protocol,
- AsyncIterator,
Sequence,
- Union,
+ AsyncIterator,
)
-from dataclasses import field, asdict, dataclass
+from dataclasses import field, dataclass
if TYPE_CHECKING:
from ...types.shared.dedalus_model import DedalusModel
-from ..._client import Dedalus, AsyncDedalus
-
+from ..mcp import MCPServerProtocol, serialize_mcp_servers
from .types import Message, ToolCall, JsonValue, ToolResult, PolicyInput, PolicyContext
+from ..._client import Dedalus, AsyncDedalus
from ...types.shared import MCPToolResult
-from ..mcp import serialize_mcp_servers, MCPServerProtocol
# Type alias for mcp_servers parameter - accepts strings, server objects, or mixed lists
MCPServersInput = Union[
@@ -120,22 +118,17 @@ def exec_sync(self, name: str, args: Dict[str, JsonValue]) -> JsonValue:
@dataclass
class _ModelConfig:
- """Model configuration parameters."""
+ """Model routing info + passthrough API kwargs.
+
+ ``api_kwargs`` holds every parameter destined for the chat
+ completions API (temperature, reasoning_effort, thinking, etc.).
+ The runner doesn't interpret most of them — it just forwards
+ them to ``client.chat.completions.create(**api_kwargs)``.
+ """
id: str
- model_list: list[str] | None = None # Store the full model list when provided
- temperature: float | None = None
- max_tokens: int | None = None
- top_p: float | None = None
- frequency_penalty: float | None = None
- presence_penalty: float | None = None
- logit_bias: Dict[str, int] | None = None
- response_format: Dict[str, JsonValue] | type | None = None # Dict or Pydantic model
- agent_attributes: Dict[str, float] | None = None
- model_attributes: Dict[str, Dict[str, float]] | None = None
- tool_choice: str | Dict[str, JsonValue] | None = None
- guardrails: list[Dict[str, JsonValue]] | None = None
- handoff_config: Dict[str, JsonValue] | None = None
+ model_list: list[str] | None = None
+ api_kwargs: Dict[str, Any] = field(default_factory=dict)
@dataclass
@@ -184,6 +177,97 @@ def to_input_list(self) -> list[Message]:
return list(self.messages)
+def _collect_api_kwargs(**params: Any) -> Dict[str, Any]:
+ """Build API kwargs dict from explicit params, filtering out Nones."""
+ return {k: v for k, v in params.items() if v is not None}
+
+
+# Params that DedalusModel may carry and should be extracted.
+_MODEL_EXTRACT_PARAMS = (
+ "temperature",
+ "max_tokens",
+ "top_p",
+ "frequency_penalty",
+ "presence_penalty",
+ "logit_bias",
+ "tool_choice",
+ "reasoning_effort",
+ "thinking",
+ "n",
+ "stop",
+ "stream_options",
+ "logprobs",
+ "top_logprobs",
+ "seed",
+ "service_tier",
+ "parallel_tool_calls",
+ "user",
+ "max_completion_tokens",
+)
+
+
+def _extract_from_dedalus_model(
+ model_obj: Any,
+ api_kwargs: Dict[str, Any],
+) -> bool:
+ """Extract params from a DedalusModel into api_kwargs.
+
+ Explicit values already in api_kwargs take precedence.
+ Returns True if stream should be overridden from the model.
+ """
+ for param in _MODEL_EXTRACT_PARAMS:
+ if param not in api_kwargs:
+ val = getattr(model_obj, param, None)
+ if val is not None:
+ api_kwargs[param] = val
+
+ # Dedalus-specific: attributes → agent_attributes
+ if "agent_attributes" not in api_kwargs:
+ attrs = getattr(model_obj, "attributes", None)
+ if attrs:
+ api_kwargs["agent_attributes"] = attrs
+
+ return getattr(model_obj, "stream", False)
+
+
+def _parse_model(
+ model: Any,
+ api_kwargs: Dict[str, Any],
+ stream: bool,
+) -> tuple:
+ """Parse model param into (model_name, model_list, stream).
+
+ Handles strings, DedalusModel objects, and lists of either.
+ Extracts model-embedded params into api_kwargs.
+ """
+ if isinstance(model, list):
+ if not model:
+ raise ValueError("model list cannot be empty")
+ model_name = None
+ model_list = []
+ for m in model:
+ if hasattr(m, "name"):
+ model_list.append(m.name)
+ if model_name is None:
+ model_name = m.name
+ model_stream = _extract_from_dedalus_model(m, api_kwargs)
+ if not stream:
+ stream = model_stream
+ else:
+ model_list.append(m)
+ if model_name is None:
+ model_name = m
+ return model_name, model_list, stream
+
+ if hasattr(model, "name"):
+ model_stream = _extract_from_dedalus_model(model, api_kwargs)
+ if not stream:
+ stream = model_stream
+ return model.name, [model.name], stream
+
+ return model, [model] if model else [], stream
+
+
class DedalusRunner:
"""Enhanced Dedalus client with tool execution capabilities."""
@@ -198,32 +282,71 @@ def run(
messages: list[Message] | None = None,
instructions: str | None = None,
model: str | list[str] | DedalusModel | list[DedalusModel] | None = None,
+ # --- Runner config ---
max_steps: int = 10,
mcp_servers: MCPServersInput = None,
- credentials: Sequence[Any] | None = None, # TODO: Loosely typed as `Any` for now
- temperature: float | None = None,
- max_tokens: int | None = None,
- top_p: float | None = None,
- frequency_penalty: float | None = None,
- presence_penalty: float | None = None,
- logit_bias: Dict[str, int] | None = None,
- response_format: Dict[str, JsonValue] | type | None = None,
+ credentials: Sequence[Any] | None = None,
stream: bool = False,
transport: Literal["http", "realtime"] = "http",
verbose: bool | None = None,
debug: bool | None = None,
on_tool_event: Callable[[Dict[str, JsonValue]], None] | None = None,
return_intent: bool = False,
- agent_attributes: Dict[str, float] | None = None,
- model_attributes: Dict[str, Dict[str, float]] | None = None,
- tool_choice: str | Dict[str, JsonValue] | None = None,
- guardrails: list[Dict[str, JsonValue]] | None = None,
- handoff_config: Dict[str, JsonValue] | None = None,
policy: PolicyInput = None,
available_models: list[str] | None = None,
strict_models: bool = True,
+ agent_attributes: Dict[str, float] | None = None,
+ audio: Dict[str, Any] | None = None,
+ cached_content: str | None = None,
+ deferred: bool | None = None,
+ frequency_penalty: float | None = None,
+ function_call: str | None = None,
+ generation_config: Dict[str, Any] | None = None,
+ guardrails: list[Dict[str, JsonValue]] | None = None,
+ handoff_config: Dict[str, JsonValue] | None = None,
+ logit_bias: Dict[str, int] | None = None,
+ logprobs: bool | None = None,
+ max_completion_tokens: int | None = None,
+ max_tokens: int | None = None,
+ metadata: Dict[str, Any] | None = None,
+ modalities: list[str] | None = None,
+ model_attributes: Dict[str, Dict[str, float]] | None = None,
+ n: int | None = None,
+ parallel_tool_calls: bool | None = None,
+ prediction: Dict[str, Any] | None = None,
+ presence_penalty: float | None = None,
+ prompt_cache_key: str | None = None,
+ prompt_cache_retention: str | None = None,
+ prompt_mode: str | None = None,
+ reasoning_effort: str | None = None,
+ response_format: Dict[str, JsonValue] | type | None = None,
+ safe_prompt: bool | None = None,
+ safety_identifier: str | None = None,
+ safety_settings: list[Dict[str, Any]] | None = None,
+ search_parameters: Dict[str, Any] | None = None,
+ seed: int | None = None,
+ service_tier: str | None = None,
+ stop: str | list[str] | None = None,
+ store: bool | None = None,
+ stream_options: Dict[str, Any] | None = None,
+ system_instruction: str | Dict[str, Any] | None = None,
+ temperature: float | None = None,
+ thinking: Dict[str, Any] | None = None,
+ tool_choice: str | Dict[str, JsonValue] | None = None,
+ tool_config: Dict[str, Any] | None = None,
+ top_k: int | None = None,
+ top_logprobs: int | None = None,
+ top_p: float | None = None,
+ user: str | None = None,
+ verbosity: str | None = None,
+ web_search_options: Dict[str, Any] | None = None,
):
- """Execute tools with unified async/sync + streaming/non-streaming logic."""
+ """Execute a tool-enabled conversation.
+
+ All parameters from the chat completions API are accepted and
+ forwarded to the server verbatim. See ``CompletionCreateParamsBase``
+ for full documentation of each parameter.
+ """
if not model:
raise ValueError("model must be provided")
@@ -233,7 +356,6 @@ def run(
msg = "tools must be a list of callable functions or None"
raise ValueError(msg)
- # Check for nested lists (common mistake: tools=[[]] instead of tools=[])
for i, tool in enumerate(tools):
if not callable(tool):
if isinstance(tool, list):
@@ -244,160 +366,64 @@ def run(
)
raise TypeError(msg)
- # Parse model to extract name and config
- model_name = None
- model_list = []
+ # Collect all API kwargs, filtering out Nones.
+ api_kwargs = _collect_api_kwargs(
+ agent_attributes=agent_attributes,
+ audio=audio,
+ cached_content=cached_content,
+ deferred=deferred,
+ frequency_penalty=frequency_penalty,
+ function_call=function_call,
+ generation_config=generation_config,
+ guardrails=guardrails,
+ handoff_config=handoff_config,
+ logit_bias=logit_bias,
+ logprobs=logprobs,
+ max_completion_tokens=max_completion_tokens,
+ max_tokens=max_tokens,
+ metadata=metadata,
+ modalities=modalities,
+ model_attributes=model_attributes,
+ n=n,
+ parallel_tool_calls=parallel_tool_calls,
+ prediction=prediction,
+ presence_penalty=presence_penalty,
+ prompt_cache_key=prompt_cache_key,
+ prompt_cache_retention=prompt_cache_retention,
+ prompt_mode=prompt_mode,
+ reasoning_effort=reasoning_effort,
+ response_format=response_format,
+ safe_prompt=safe_prompt,
+ safety_identifier=safety_identifier,
+ safety_settings=safety_settings,
+ search_parameters=search_parameters,
+ seed=seed,
+ service_tier=service_tier,
+ stop=stop,
+ store=store,
+ stream_options=stream_options,
+ system_instruction=system_instruction,
+ temperature=temperature,
+ thinking=thinking,
+ tool_choice=tool_choice,
+ tool_config=tool_config,
+ top_k=top_k,
+ top_logprobs=top_logprobs,
+ top_p=top_p,
+ user=user,
+ verbosity=verbosity,
+ web_search_options=web_search_options,
+ )
- if isinstance(model, list):
- if not model:
- raise ValueError("model list cannot be empty")
- # Handle list of DedalusModel or strings
- for m in model:
- if hasattr(m, "name"): # DedalusModel
- model_list.append(m.name)
- # Use config from first DedalusModel if params not explicitly set
- if model_name is None:
- model_name = m.name
- temperature = temperature if temperature is not None else getattr(m, "temperature", None)
- max_tokens = max_tokens if max_tokens is not None else getattr(m, "max_tokens", None)
- top_p = top_p if top_p is not None else getattr(m, "top_p", None)
- frequency_penalty = (
- frequency_penalty
- if frequency_penalty is not None
- else getattr(m, "frequency_penalty", None)
- )
- presence_penalty = (
- presence_penalty if presence_penalty is not None else getattr(m, "presence_penalty", None)
- )
- logit_bias = logit_bias if logit_bias is not None else getattr(m, "logit_bias", None)
-
- # Extract additional parameters from first DedalusModel
- stream = stream if stream is not False else getattr(m, "stream", False)
- tool_choice = tool_choice if tool_choice is not None else getattr(m, "tool_choice", None)
-
- # Extract Dedalus-specific extensions
- if hasattr(m, "attributes") and m.attributes:
- agent_attributes = agent_attributes if agent_attributes is not None else m.attributes
-
- # Check for unsupported parameters (only warn once for first model)
- unsupported_params = []
- if hasattr(m, "n") and m.n is not None:
- unsupported_params.append("n")
- if hasattr(m, "stop") and m.stop is not None:
- unsupported_params.append("stop")
- if hasattr(m, "stream_options") and m.stream_options is not None:
- unsupported_params.append("stream_options")
- if hasattr(m, "logprobs") and m.logprobs is not None:
- unsupported_params.append("logprobs")
- if hasattr(m, "top_logprobs") and m.top_logprobs is not None:
- unsupported_params.append("top_logprobs")
- if hasattr(m, "seed") and m.seed is not None:
- unsupported_params.append("seed")
- if hasattr(m, "service_tier") and m.service_tier is not None:
- unsupported_params.append("service_tier")
- if hasattr(m, "tools") and m.tools is not None:
- unsupported_params.append("tools")
- if hasattr(m, "parallel_tool_calls") and m.parallel_tool_calls is not None:
- unsupported_params.append("parallel_tool_calls")
- if hasattr(m, "user") and m.user is not None:
- unsupported_params.append("user")
- if hasattr(m, "max_completion_tokens") and m.max_completion_tokens is not None:
- unsupported_params.append("max_completion_tokens")
-
- if unsupported_params:
- import warnings
-
- warnings.warn(
- f"The following DedalusModel parameters are not yet supported and will be ignored: {', '.join(unsupported_params)}. "
- f"Support for these parameters is coming soon.",
- UserWarning,
- stacklevel=2,
- )
- else: # String
- model_list.append(m)
- if model_name is None:
- model_name = m
- elif hasattr(model, "name"): # Single DedalusModel
- model_name = model.name
- model_list = [model.name]
- # Extract config from DedalusModel if params not explicitly set
- temperature = temperature if temperature is not None else getattr(model, "temperature", None)
- max_tokens = max_tokens if max_tokens is not None else getattr(model, "max_tokens", None)
- top_p = top_p if top_p is not None else getattr(model, "top_p", None)
- frequency_penalty = (
- frequency_penalty if frequency_penalty is not None else getattr(model, "frequency_penalty", None)
- )
- presence_penalty = (
- presence_penalty if presence_penalty is not None else getattr(model, "presence_penalty", None)
- )
- logit_bias = logit_bias if logit_bias is not None else getattr(model, "logit_bias", None)
-
- # Extract additional supported parameters
- stream = stream if stream is not False else getattr(model, "stream", False)
- tool_choice = tool_choice if tool_choice is not None else getattr(model, "tool_choice", None)
-
- # Extract Dedalus-specific extensions
- if hasattr(model, "attributes") and model.attributes:
- agent_attributes = agent_attributes if agent_attributes is not None else model.attributes
- if hasattr(model, "metadata") and model.metadata:
- # metadata is stored but not yet fully utilized
- pass
-
- # Log warnings for unsupported parameters
- unsupported_params = []
- if hasattr(model, "n") and model.n is not None:
- unsupported_params.append("n")
- if hasattr(model, "stop") and model.stop is not None:
- unsupported_params.append("stop")
- if hasattr(model, "stream_options") and model.stream_options is not None:
- unsupported_params.append("stream_options")
- if hasattr(model, "logprobs") and model.logprobs is not None:
- unsupported_params.append("logprobs")
- if hasattr(model, "top_logprobs") and model.top_logprobs is not None:
- unsupported_params.append("top_logprobs")
- if hasattr(model, "seed") and model.seed is not None:
- unsupported_params.append("seed")
- if hasattr(model, "service_tier") and model.service_tier is not None:
- unsupported_params.append("service_tier")
- if hasattr(model, "tools") and model.tools is not None:
- unsupported_params.append("tools")
- if hasattr(model, "parallel_tool_calls") and model.parallel_tool_calls is not None:
- unsupported_params.append("parallel_tool_calls")
- if hasattr(model, "user") and model.user is not None:
- unsupported_params.append("user")
- if hasattr(model, "max_completion_tokens") and model.max_completion_tokens is not None:
- unsupported_params.append("max_completion_tokens")
-
- if unsupported_params:
- import warnings
-
- warnings.warn(
- f"The following DedalusModel parameters are not yet supported and will be ignored: {', '.join(unsupported_params)}. "
- f"Support for these parameters is coming soon.",
- UserWarning,
- stacklevel=2,
- )
- else: # Single string
- model_name = model
- model_list = [model] if model else []
+ # Parse model to extract name, list, and any model-embedded params.
+ model_name, model_list, stream = _parse_model(model, api_kwargs, stream)
available_models = model_list if available_models is None else available_models
model_config = _ModelConfig(
id=str(model_name),
- model_list=model_list, # Pass the full model list
- temperature=temperature,
- max_tokens=max_tokens,
- top_p=top_p,
- frequency_penalty=frequency_penalty,
- presence_penalty=presence_penalty,
- logit_bias=logit_bias,
- response_format=response_format,
- agent_attributes=agent_attributes,
- model_attributes=model_attributes,
- tool_choice=tool_choice,
- guardrails=guardrails,
- handoff_config=handoff_config,
+ model_list=model_list,
+ api_kwargs=api_kwargs,
)
# Serialize mcp_servers to wire format
@@ -740,65 +766,27 @@ async def _execute_streaming_async(
print(f" All tools are MCP, expecting streamed response")
# Don't break here - let the next iteration handle it
else:
- # We have at least one local tool
- # Filter to only include local tool calls in the assistant message
- local_only_tool_calls = [
+ # We have at least one local tool — delegate to scheduler.
+ local_only = [
tc for tc in tool_calls if tc["function"]["name"] in getattr(tool_handler, "_funcs", {})
]
- messages.append({"role": "assistant", "tool_calls": local_only_tool_calls})
- if exec_config.verbose:
- print(
- f" Added assistant message with {len(local_only_tool_calls)} local tool calls (filtered from {len(tool_calls)} total)"
- )
-
- # Execute only local tools
- for tc in tool_calls:
- fn_name = tc["function"]["name"]
- fn_args_str = tc["function"]["arguments"]
-
- if fn_name in getattr(tool_handler, "_funcs", {}):
- # Local tool
- try:
- fn_args = json.loads(fn_args_str)
- except json.JSONDecodeError:
- fn_args = {}
-
- try:
- result = await tool_handler.exec(fn_name, fn_args)
- messages.append(
- {
- "role": "tool",
- "tool_call_id": tc["id"],
- "content": str(result),
- }
- )
- if exec_config.verbose:
- print(f" Executed local tool {fn_name}: {str(result)[:50]}...")
- except Exception as e:
- messages.append(
- {
- "role": "tool",
- "tool_call_id": tc["id"],
- "content": f"Error: {str(e)}",
- }
- )
- if exec_config.verbose:
- print(f" Error executing local tool {fn_name}: {e}")
- else:
- # MCP tool - DON'T add any message
- # The API server should handle this
- if exec_config.verbose:
- print(f" MCP tool {fn_name} - skipping (server will handle)")
+ messages.append({"role": "assistant", "tool_calls": local_only})
+
+ from ._scheduler import execute_local_tools_async
+
+ await execute_local_tools_async(
+ local_only,
+ tool_handler,
+ messages,
+ [],
+ [],
+ steps,
+ verbose=exec_config.verbose,
+ )
if exec_config.verbose:
print(f" Messages after tool execution: {len(messages)}")
- # Only continue if we have NO MCP tools
- if not mcp_names:
- print(f" No MCP tools, continuing loop to step {steps + 1}...")
- else:
- print(f" MCP tools present, expecting response in next iteration")
-
# Continue loop only if we need another response
if exec_config.verbose:
print(f" Tool processing complete")
@@ -1070,65 +1058,26 @@ def _execute_streaming_sync(
print(f" All tools are MCP, expecting streamed response")
# Don't break here - let the next iteration handle it
else:
- # We have at least one local tool
- # Filter to only include local tool calls in the assistant message
- local_only_tool_calls = [
+ # We have at least one local tool — delegate to scheduler.
+ local_only = [
tc for tc in tool_calls if tc["function"]["name"] in getattr(tool_handler, "_funcs", {})
]
- messages.append({"role": "assistant", "tool_calls": local_only_tool_calls})
- if exec_config.verbose:
- print(
- f" Added assistant message with {len(local_only_tool_calls)} local tool calls (filtered from {len(tool_calls)} total)"
- )
+ messages.append({"role": "assistant", "tool_calls": local_only})
- # Execute only local tools
- for tc in tool_calls:
- fn_name = tc["function"]["name"]
- fn_args_str = tc["function"]["arguments"]
-
- if fn_name in getattr(tool_handler, "_funcs", {}):
- # Local tool
- try:
- fn_args = json.loads(fn_args_str)
- except json.JSONDecodeError:
- fn_args = {}
-
- try:
- result = tool_handler.exec_sync(fn_name, fn_args)
- messages.append(
- {
- "role": "tool",
- "tool_call_id": tc["id"],
- "content": str(result),
- }
- )
- if exec_config.verbose:
- print(f" Executed local tool {fn_name}: {str(result)[:50]}...")
- except Exception as e:
- messages.append(
- {
- "role": "tool",
- "tool_call_id": tc["id"],
- "content": f"Error: {str(e)}",
- }
- )
- if exec_config.verbose:
- print(f" Error executing local tool {fn_name}: {e}")
- else:
- # MCP tool - DON'T add any message
- # The API server should handle this
- if exec_config.verbose:
- print(f" MCP tool {fn_name} - skipping (server will handle)")
+ from ._scheduler import execute_local_tools_sync
+
+ execute_local_tools_sync(
+ local_only,
+ tool_handler,
+ messages,
+ [],
+ [],
+ steps,
+ )
if exec_config.verbose:
print(f" Messages after tool execution: {len(messages)}")
- # Only continue if we have NO MCP tools
- if not mcp_names:
- print(f" No MCP tools, continuing loop to step {steps + 1}...")
- else:
- print(f" MCP tools present, expecting response in next iteration")
-
# Continue loop only if we need another response
if exec_config.verbose:
print(f" Tool processing complete")
@@ -1244,47 +1193,28 @@ async def _execute_tool_calls(
step: int,
verbose: bool = False,
):
- """Execute tool calls asynchronously."""
+ """Execute tool calls asynchronously with dependency-aware scheduling.
+
+ Independent tools fire concurrently. Dependent tools wait for
+ their prerequisites. Falls back to sequential on cyclic deps.
+ """
+ from ._scheduler import execute_local_tools_async
+
if verbose:
print(f" _execute_tool_calls: Processing {len(tool_calls)} tool calls")
- # Record single assistant message with ALL tool calls (OpenAI format)
+ # Record assistant message with all tool calls (OpenAI format).
messages.append({"role": "assistant", "tool_calls": list(tool_calls)})
- for i, tc in enumerate(tool_calls):
- fn_name = tc["function"]["name"]
- fn_args_str = tc["function"]["arguments"]
-
- if verbose:
- print(f" Tool {i + 1}/{len(tool_calls)}: {fn_name}")
-
- try:
- fn_args = json.loads(fn_args_str)
- except json.JSONDecodeError:
- fn_args = {}
-
- try:
- result = await tool_handler.exec(fn_name, fn_args)
- tool_results.append({"name": fn_name, "result": result, "step": step})
- tools_called.append(fn_name)
- messages.append({"role": "tool", "tool_call_id": tc["id"], "content": str(result)})
-
- if verbose:
- print(f" Tool {fn_name} executed successfully: {str(result)[:50]}...")
- except Exception as e:
- error_result = {"error": str(e), "name": fn_name, "step": step}
- tool_results.append(error_result)
- messages.append(
- {
- "role": "tool",
- "tool_call_id": tc["id"],
- "content": f"Error: {str(e)}",
- }
- )
-
- if verbose:
- print(f" Tool {fn_name} failed with error: {e}")
- print(f" Error type: {type(e).__name__}")
+ await execute_local_tools_async(
+ tool_calls,
+ tool_handler,
+ messages,
+ tool_results,
+ tools_called,
+ step,
+ verbose=verbose,
+ )
def _execute_tool_calls_sync(
self,
@@ -1295,34 +1225,20 @@ def _execute_tool_calls_sync(
tools_called: list[str],
step: int,
):
- """Execute tool calls synchronously."""
- # Record single assistant message with ALL tool calls (OpenAI format)
- messages.append({"role": "assistant", "tool_calls": list(tool_calls)})
+ """Execute tool calls synchronously with dependency-aware ordering."""
+ from ._scheduler import execute_local_tools_sync
- for tc in tool_calls:
- fn_name = tc["function"]["name"]
- fn_args_str = tc["function"]["arguments"]
-
- try:
- fn_args = json.loads(fn_args_str)
- except json.JSONDecodeError:
- fn_args = {}
+ # Record assistant message with all tool calls (OpenAI format).
+ messages.append({"role": "assistant", "tool_calls": list(tool_calls)})
- try:
- result = tool_handler.exec_sync(fn_name, fn_args)
- tool_results.append({"name": fn_name, "result": result, "step": step})
- tools_called.append(fn_name)
- messages.append({"role": "tool", "tool_call_id": tc["id"], "content": str(result)})
- except Exception as e:
- error_result = {"error": str(e), "name": fn_name, "step": step}
- tool_results.append(error_result)
- messages.append(
- {
- "role": "tool",
- "tool_call_id": tc["id"],
- "content": f"Error: {str(e)}",
- }
- )
+ execute_local_tools_sync(
+ tool_calls,
+ tool_handler,
+ messages,
+ tool_results,
+ tools_called,
+ step,
+ )
def _accumulate_tool_calls(self, deltas, acc: list[ToolCall]) -> None:
"""Accumulate streaming tool call deltas."""
@@ -1350,17 +1266,15 @@ def _accumulate_tool_calls(self, deltas, acc: list[ToolCall]) -> None:
@staticmethod
def _mk_kwargs(mc: _ModelConfig) -> Dict[str, Any]:
- """Convert model config to kwargs for client call."""
+ """Convert model config to kwargs for the API call."""
from ..._utils import is_given
from ...lib._parsing import type_to_response_format_param
- d = asdict(mc)
- d.pop("id", None) # Remove id since it's passed separately
- d.pop("model_list", None) # Remove model_list since it's not an API parameter
+ kwargs = dict(mc.api_kwargs)
- # Convert Pydantic model to dict schema if needed
- if "response_format" in d and d["response_format"] is not None:
- converted = type_to_response_format_param(d["response_format"])
- d["response_format"] = converted if is_given(converted) else None
+ # Convert Pydantic model class to dict schema if needed.
+ if "response_format" in kwargs and kwargs["response_format"] is not None:
+ converted = type_to_response_format_param(kwargs["response_format"])
+ kwargs["response_format"] = converted if is_given(converted) else None
- return {k: v for k, v in d.items() if v is not None}
+ return {k: v for k, v in kwargs.items() if v is not None}
diff --git a/tests/test_local_scheduler.py b/tests/test_local_scheduler.py
new file mode 100644
index 0000000..99ba7d0
--- /dev/null
+++ b/tests/test_local_scheduler.py
@@ -0,0 +1,278 @@
+# ==============================================================================
+# © 2025 Dedalus Labs, Inc. and affiliates
+# Licensed under MIT
+# github.com/dedalus-labs/dedalus-sdk-python/LICENSE
+# ==============================================================================
+
+"""Tests for SDK-side dependency-aware local tool scheduler."""
+
+from __future__ import annotations
+
+import asyncio
+import json
+import time
+from typing import Any, Dict
+from unittest.mock import AsyncMock, MagicMock
+
+import pytest
+
+from dedalus_labs.lib.runner._scheduler import (
+ execute_local_tools_async,
+ execute_local_tools_sync,
+)
+
+
+# --- Test helpers ---
+
+
+def _make_tool_call(
+ call_id: str,
+ name: str,
+ args: Dict[str, Any] | None = None,
+ dependencies: list[str] | None = None,
+) -> Dict[str, Any]:
+ """Build a tool call dict matching the format from core.py."""
+ return {
+ "id": call_id,
+ "type": "function",
+ "function": {
+ "name": name,
+ "arguments": json.dumps(args or {}),
+ },
+ "dependencies": dependencies or [],
+ }
+
+
+def _make_async_handler(
+ results: Dict[str, Any] | None = None,
+ delay: float = 0.0,
+) -> AsyncMock:
+ """Create a mock tool handler with async exec."""
+ handler = MagicMock()
+ results = results or {}
+
+ async def exec_fn(name: str, args: Dict[str, Any]) -> Any:
+ if delay:
+ await asyncio.sleep(delay)
+ if name in results:
+ return results[name]
+ return f"result_{name}"
+
+ handler.exec = AsyncMock(side_effect=exec_fn)
+ return handler
+
+
+def _make_sync_handler(results: Dict[str, Any] | None = None) -> MagicMock:
+ """Create a mock tool handler with sync exec_sync."""
+ handler = MagicMock()
+ results = results or {}
+
+ def exec_sync_fn(name: str, args: Dict[str, Any]) -> Any:
+ if name in results:
+ return results[name]
+ return f"result_{name}"
+
+ handler.exec_sync = MagicMock(side_effect=exec_sync_fn)
+ return handler
+
+
+# --- Async tests ---
+
+
+@pytest.mark.asyncio
+async def test_async_independent_tools_run_in_parallel():
+ """Independent tools with no deps fire concurrently."""
+ handler = _make_async_handler(delay=0.05)
+ calls = [
+ _make_tool_call("a", "fetch_a"),
+ _make_tool_call("b", "fetch_b"),
+ _make_tool_call("c", "fetch_c"),
+ ]
+ messages: list = []
+ tool_results: list = []
+ tools_called: list = []
+
+ start = time.monotonic()
+ await execute_local_tools_async(calls, handler, messages, tool_results, tools_called, step=1)
+ elapsed = time.monotonic() - start
+
+ assert len(tools_called) == 3
+ # Parallel: should complete in ~1x delay, not 3x.
+ assert elapsed < 0.15
+
+
+@pytest.mark.asyncio
+async def test_async_chain_respects_ordering():
+ """b depends on a: a executes before b."""
+ execution_order: list[str] = []
+ handler = MagicMock()
+
+ async def tracking_exec(name: str, args: Dict[str, Any]) -> str:
+ execution_order.append(name)
+ return f"done_{name}"
+
+ handler.exec = AsyncMock(side_effect=tracking_exec)
+
+ calls = [
+ _make_tool_call("a", "fetch"),
+ _make_tool_call("b", "transform", dependencies=["a"]),
+ ]
+ messages: list = []
+ tool_results: list = []
+ tools_called: list = []
+
+ await execute_local_tools_async(calls, handler, messages, tool_results, tools_called, step=1)
+ assert execution_order == ["fetch", "transform"]
+
+
+@pytest.mark.asyncio
+async def test_async_diamond_dependency():
+ """Diamond: a,b independent, c depends on both."""
+ execution_order: list[str] = []
+ handler = MagicMock()
+
+ async def tracking_exec(name: str, args: Dict[str, Any]) -> str:
+ execution_order.append(name)
+ return f"done_{name}"
+
+ handler.exec = AsyncMock(side_effect=tracking_exec)
+
+ calls = [
+ _make_tool_call("a", "fetch_a"),
+ _make_tool_call("b", "fetch_b"),
+ _make_tool_call("c", "combine", dependencies=["a", "b"]),
+ ]
+ messages: list = []
+ tool_results: list = []
+ tools_called: list = []
+
+ await execute_local_tools_async(calls, handler, messages, tool_results, tools_called, step=1)
+ # a and b execute first (any order), then c.
+ assert execution_order[-1] == "combine"
+ assert set(execution_order[:2]) == {"fetch_a", "fetch_b"}
+
+
+@pytest.mark.asyncio
+async def test_async_cycle_falls_back_to_sequential():
+ """Cyclic deps don't crash. Falls back to sequential execution."""
+ handler = _make_async_handler()
+ calls = [
+ _make_tool_call("a", "fetch", dependencies=["b"]),
+ _make_tool_call("b", "fetch", dependencies=["a"]),
+ ]
+ messages: list = []
+ tool_results: list = []
+ tools_called: list = []
+
+ await execute_local_tools_async(calls, handler, messages, tool_results, tools_called, step=1)
+ assert len(tools_called) == 2
+
+
+@pytest.mark.asyncio
+async def test_async_records_messages_correctly():
+ """Tool results are recorded as messages in correct format."""
+ handler = _make_async_handler(results={"my_tool": "hello world"})
+ calls = [_make_tool_call("call_1", "my_tool", args={"x": 1})]
+ messages: list = []
+ tool_results: list = []
+ tools_called: list = []
+
+ await execute_local_tools_async(calls, handler, messages, tool_results, tools_called, step=1)
+
+ # Scheduler only appends tool result messages (assistant message is caller's job).
+ assert messages[0]["role"] == "tool"
+ assert messages[0]["tool_call_id"] == "call_1"
+ assert messages[0]["content"] == "hello world"
+
+
+@pytest.mark.asyncio
+async def test_async_tool_error_recorded():
+ """Tool execution errors are recorded in messages, not raised."""
+ handler = MagicMock()
+
+ async def failing_exec(name: str, args: Dict[str, Any]) -> str:
+ raise RuntimeError("boom")
+
+ handler.exec = AsyncMock(side_effect=failing_exec)
+ calls = [_make_tool_call("a", "bad_tool")]
+ messages: list = []
+ tool_results: list = []
+ tools_called: list = []
+
+ await execute_local_tools_async(calls, handler, messages, tool_results, tools_called, step=1)
+ assert "error" in tool_results[0]
+ assert messages[0]["content"] == "Error: boom"
+
+
+@pytest.mark.asyncio
+async def test_async_empty_calls():
+ """Empty call list is a no-op."""
+ handler = _make_async_handler()
+ messages: list = []
+ tool_results: list = []
+ tools_called: list = []
+
+ await execute_local_tools_async([], handler, messages, tool_results, tools_called, step=1)
+ assert messages == []
+
+
+@pytest.mark.asyncio
+async def test_async_unknown_deps_ignored():
+ """Dependencies referencing ids not in this batch are filtered."""
+ handler = _make_async_handler()
+ calls = [_make_tool_call("a", "fetch", dependencies=["nonexistent"])]
+ messages: list = []
+ tool_results: list = []
+ tools_called: list = []
+
+ await execute_local_tools_async(calls, handler, messages, tool_results, tools_called, step=1)
+ assert len(tools_called) == 1
+
+
+# --- Sync tests ---
+
+
+def test_sync_chain_respects_ordering():
+ """Sync path: b depends on a, executes in correct order."""
+ execution_order: list[str] = []
+ handler = MagicMock()
+
+ def tracking_sync(name: str, args: Dict[str, Any]) -> str:
+ execution_order.append(name)
+ return f"done_{name}"
+
+ handler.exec_sync = MagicMock(side_effect=tracking_sync)
+
+ calls = [
+ _make_tool_call("a", "fetch"),
+ _make_tool_call("b", "transform", dependencies=["a"]),
+ ]
+ messages: list = []
+ tool_results: list = []
+ tools_called: list = []
+
+ execute_local_tools_sync(calls, handler, messages, tool_results, tools_called, step=1)
+ assert execution_order == ["fetch", "transform"]
+
+
+def test_sync_cycle_falls_back():
+ """Sync path: cycle detected, falls back to sequential."""
+ handler = _make_sync_handler()
+ calls = [
+ _make_tool_call("a", "fetch", dependencies=["b"]),
+ _make_tool_call("b", "fetch", dependencies=["a"]),
+ ]
+ messages: list = []
+ tool_results: list = []
+ tools_called: list = []
+
+ execute_local_tools_sync(calls, handler, messages, tool_results, tools_called, step=1)
+ assert len(tools_called) == 2
+
+
+def test_sync_empty_calls():
+ """Sync path: empty call list is a no-op."""
+ handler = _make_sync_handler()
+ messages: list = []
+ execute_local_tools_sync([], handler, messages, [], [], step=1)
+ assert messages == []
From 288b70e22ee6e9af0593dc45ddac11ae6de78eb8 Mon Sep 17 00:00:00 2001
From: aryanma
Date: Fri, 6 Feb 2026 21:53:55 -0800
Subject: [PATCH 07/23] fix(runner): inject server tool results into
conversation for mixed tool calls
---
src/dedalus_labs/lib/runner/core.py | 1436 ++++++++++++++++++++++++++-
1 file changed, 1432 insertions(+), 4 deletions(-)
diff --git a/src/dedalus_labs/lib/runner/core.py b/src/dedalus_labs/lib/runner/core.py
index b02a557..c6a2ac4 100644
--- a/src/dedalus_labs/lib/runner/core.py
+++ b/src/dedalus_labs/lib/runner/core.py
@@ -1,3 +1,6 @@
+# ===========================================================================
+# BEGIN: feat/dep-graph-scheduler version (ACTIVE)
+# ===========================================================================
# ==============================================================================
# © 2025 Dedalus Labs, Inc. and affiliates
# Licensed under MIT
@@ -8,6 +11,7 @@
import asyncio
import inspect
+import json
from typing import (
TYPE_CHECKING,
Any,
@@ -685,6 +689,7 @@ async def _execute_streaming_async(
content_chunks = 0
tool_call_chunks = 0
finish_reason = None
+ mcp_tool_results_from_server: list = []
async for chunk in stream:
chunk_count += 1
if exec_config.verbose:
@@ -694,6 +699,12 @@ async def _execute_streaming_async(
meta = extra.get("x_dedalus_event") or extra.get("dedalus_event")
if isinstance(meta, dict) and meta.get("type") == "agent_updated":
print(f" [EVENT] agent_updated: agent={meta.get('agent')} model={meta.get('model')}")
+
+ # Collect MCP tool results emitted by the server
+ chunk_extra = getattr(chunk, "__pydantic_extra__", None) or {}
+ if isinstance(chunk_extra, dict) and "mcp_tool_results" in chunk_extra:
+ mcp_tool_results_from_server = chunk_extra["mcp_tool_results"]
+
if hasattr(chunk, "choices") and chunk.choices:
choice = chunk.choices[0]
delta = choice.delta
@@ -766,11 +777,30 @@ async def _execute_streaming_async(
print(f" All tools are MCP, expecting streamed response")
# Don't break here - let the next iteration handle it
else:
- # We have at least one local tool — delegate to scheduler.
+ # Mixed local + server tools. Include ALL tool calls
+ # in the assistant message so the model sees the
+ # complete conversation on the next turn.
+ messages.append({"role": "assistant", "tool_calls": tool_calls})
+
+ # Inject server (MCP) tool results as tool messages
+ for tc in tool_calls:
+ tc_name = tc["function"]["name"]
+ if tc_name not in getattr(tool_handler, "_funcs", {}):
+ call_id = tc["id"]
+ result_data = next(
+ (r for r in mcp_tool_results_from_server if r.get("call_id") == call_id),
+ None,
+ )
+ if result_data:
+ content = json.dumps(result_data["result"]) if result_data.get("result") is not None else ""
+ messages.append({"role": "tool", "tool_call_id": call_id, "content": content})
+ elif exec_config.verbose:
+ print(f" Warning: no server result for MCP tool {tc_name} ({call_id[:8]}...)")
+
+ # Execute only local tools
local_only = [
tc for tc in tool_calls if tc["function"]["name"] in getattr(tool_handler, "_funcs", {})
]
- messages.append({"role": "assistant", "tool_calls": local_only})
from ._scheduler import execute_local_tools_async
@@ -974,9 +1004,16 @@ def _execute_streaming_sync(
tool_call_chunks = 0
finish_reason = None
accumulated_content = ""
+ mcp_tool_results_from_server: list = []
for chunk in stream:
chunk_count += 1
+
+ # Collect MCP tool results emitted by the server
+ chunk_extra = getattr(chunk, "__pydantic_extra__", None) or {}
+ if isinstance(chunk_extra, dict) and "mcp_tool_results" in chunk_extra:
+ mcp_tool_results_from_server = chunk_extra["mcp_tool_results"]
+
if hasattr(chunk, "choices") and chunk.choices:
choice = chunk.choices[0]
delta = choice.delta
@@ -1058,11 +1095,26 @@ def _execute_streaming_sync(
print(f" All tools are MCP, expecting streamed response")
# Don't break here - let the next iteration handle it
else:
- # We have at least one local tool — delegate to scheduler.
+ # Mixed local + server tools. Include ALL tool calls.
+ messages.append({"role": "assistant", "tool_calls": tool_calls})
+
+ # Inject server (MCP) tool results as tool messages
+ for tc in tool_calls:
+ tc_name = tc["function"]["name"]
+ if tc_name not in getattr(tool_handler, "_funcs", {}):
+ call_id = tc["id"]
+ result_data = next(
+ (r for r in mcp_tool_results_from_server if r.get("call_id") == call_id),
+ None,
+ )
+ if result_data:
+ content = json.dumps(result_data["result"]) if result_data.get("result") is not None else ""
+ messages.append({"role": "tool", "tool_call_id": call_id, "content": content})
+
+ # Execute only local tools
local_only = [
tc for tc in tool_calls if tc["function"]["name"] in getattr(tool_handler, "_funcs", {})
]
- messages.append({"role": "assistant", "tool_calls": local_only})
from ._scheduler import execute_local_tools_sync
@@ -1278,3 +1330,1379 @@ def _mk_kwargs(mc: _ModelConfig) -> Dict[str, Any]:
kwargs["response_format"] = converted if is_given(converted) else None
return {k: v for k, v in kwargs.items() if v is not None}
+
+# ===========================================================================
+# END: feat/dep-graph-scheduler version
+# ===========================================================================
+
+# ===========================================================================
+# BEGIN: production version (COMMENTED OUT)
+# ===========================================================================
+# # © 2025 Dedalus Labs, Inc. and affiliates
+# # Licensed under MIT
+# # github.com/dedalus-labs/dedalus-sdk-python/LICENSE
+#
+# from __future__ import annotations
+#
+# import json
+# import asyncio
+# import inspect
+# from typing import (
+# TYPE_CHECKING,
+# Any,
+# Dict,
+# Literal,
+# Callable,
+# Iterator,
+# Protocol,
+# AsyncIterator,
+# Sequence,
+# Union,
+# )
+# from dataclasses import field, asdict, dataclass
+#
+# if TYPE_CHECKING:
+# from ...types.shared.dedalus_model import DedalusModel
+#
+# from ..._client import Dedalus, AsyncDedalus
+#
+# from .types import Message, ToolCall, JsonValue, ToolResult, PolicyInput, PolicyContext
+# from ...types.shared import MCPToolResult
+# from ..mcp import serialize_mcp_servers, MCPServerProtocol
+#
+# # Type alias for mcp_servers parameter - accepts strings, server objects, or mixed lists
+# MCPServersInput = Union[
+# str, # Single slug or URL
+# MCPServerProtocol, # MCP server object
+# Sequence[Union[str, MCPServerProtocol, Dict[str, Any]]], # Mixed list
+# None,
+# ]
+# from ..utils._schemas import to_schema
+#
+#
+# def _process_policy(policy: PolicyInput, context: PolicyContext) -> Dict[str, JsonValue]:
+# """Process policy, handling all possible input types safely."""
+# if policy is None:
+# return {}
+#
+# if callable(policy):
+# try:
+# result = policy(context)
+# return result if isinstance(result, dict) else {}
+# except Exception:
+# return {}
+#
+# if isinstance(policy, dict):
+# try:
+# return dict(policy)
+# except Exception:
+# return {}
+#
+# return {}
+#
+#
+# def _extract_mcp_results(response: Any) -> list[MCPToolResult]:
+# """Extract MCP tool results from API response."""
+# mcp_results = getattr(response, "mcp_tool_results", None)
+# if not mcp_results:
+# return []
+# return [item if isinstance(item, MCPToolResult) else MCPToolResult.model_validate(item) for item in mcp_results]
+#
+#
+# class _ToolHandler(Protocol):
+# def schemas(self) -> list[Dict]: ...
+# async def exec(self, name: str, args: Dict[str, JsonValue]) -> JsonValue: ...
+# def exec_sync(self, name: str, args: Dict[str, JsonValue]) -> JsonValue: ...
+#
+#
+# class _FunctionToolHandler:
+# """Converts Python functions to tool handler via introspection."""
+#
+# def __init__(self, funcs: list[Callable[..., Any]]):
+# self._funcs = {f.__name__: f for f in funcs}
+#
+# def schemas(self) -> list[Dict]:
+# """Build OpenAI-compatible function schemas via introspection."""
+# out: list[Dict[str, Any]] = []
+# for fn in self._funcs.values():
+# try:
+# out.append(to_schema(fn))
+# except Exception:
+# continue
+# return out
+#
+# async def exec(self, name: str, args: Dict[str, JsonValue]) -> JsonValue:
+# """Execute tool by name with given args (async)."""
+# fn = self._funcs[name]
+# if inspect.iscoroutinefunction(fn):
+# return await fn(**args)
+# # asyncio.to_thread is Python 3.9+, use run_in_executor for 3.8 compat
+# loop = asyncio.get_event_loop()
+# # Use partial to properly pass keyword arguments
+# from functools import partial
+#
+# return await loop.run_in_executor(None, partial(fn, **args))
+#
+# def exec_sync(self, name: str, args: Dict[str, JsonValue]) -> JsonValue:
+# """Execute tool by name with given args (sync)."""
+# fn = self._funcs[name]
+# if inspect.iscoroutinefunction(fn):
+# loop = asyncio.new_event_loop()
+# asyncio.set_event_loop(loop)
+# try:
+# return loop.run_until_complete(fn(**args))
+# finally:
+# loop.close()
+# return fn(**args)
+#
+#
+# @dataclass
+# class _ModelConfig:
+# """Model configuration parameters."""
+#
+# id: str
+# model_list: list[str] | None = None # Store the full model list when provided
+# temperature: float | None = None
+# max_tokens: int | None = None
+# top_p: float | None = None
+# frequency_penalty: float | None = None
+# presence_penalty: float | None = None
+# logit_bias: Dict[str, int] | None = None
+# response_format: Dict[str, JsonValue] | type | None = None # Dict or Pydantic model
+# agent_attributes: Dict[str, float] | None = None
+# model_attributes: Dict[str, Dict[str, float]] | None = None
+# tool_choice: str | Dict[str, JsonValue] | None = None
+# guardrails: list[Dict[str, JsonValue]] | None = None
+# handoff_config: Dict[str, JsonValue] | None = None
+#
+#
+# @dataclass
+# class _ExecutionConfig:
+# """Configuration for tool execution behavior and policies."""
+#
+# mcp_servers: list[str | Dict[str, Any]] = field(default_factory=list) # Wire format
+# credentials: list[Any] | None = None # CredentialProtocol objects (not serialized)
+# max_steps: int = 10
+# stream: bool = False
+# transport: Literal["http", "realtime"] = "http"
+# verbose: bool = False
+# debug: bool = False
+# on_tool_event: Callable[[Dict[str, JsonValue]], None] | None = None
+# return_intent: bool = False
+# policy: PolicyInput = None
+# available_models: list[str] = field(default_factory=list)
+# strict_models: bool = True
+#
+#
+# @dataclass
+# class _RunResult:
+# """Result from a completed tool execution run."""
+#
+# final_output: str # Final text output from conversation
+# tool_results: list[ToolResult]
+# steps_used: int
+# messages: list[Message] = field(default_factory=list) # Full conversation history
+# intents: list[Dict[str, JsonValue]] | None = None
+# tools_called: list[str] = field(default_factory=list)
+# mcp_results: list[MCPToolResult] = field(default_factory=list)
+# """MCP tool results from server-side tool calls."""
+#
+# @property
+# def output(self) -> str:
+# """Alias for final_output."""
+# return self.final_output
+#
+# @property
+# def content(self) -> str:
+# """Alias for final_output."""
+# return self.final_output
+#
+# def to_input_list(self) -> list[Message]:
+# """Get the full conversation history for continuation."""
+# return list(self.messages)
+#
+#
+# class DedalusRunner:
+# """Enhanced Dedalus client with tool execution capabilities."""
+#
+# def __init__(self, client: Dedalus | AsyncDedalus, verbose: bool = False):
+# self.client = client
+# self.verbose = verbose
+#
+# def run(
+# self,
+# input: str | list[Message] | None = None,
+# tools: list[Callable] | None = None,
+# messages: list[Message] | None = None,
+# instructions: str | None = None,
+# model: str | list[str] | DedalusModel | list[DedalusModel] | None = None,
+# max_steps: int = 10,
+# mcp_servers: MCPServersInput = None,
+# credentials: Sequence[Any] | None = None, # TODO: Loosely typed as `Any` for now
+# temperature: float | None = None,
+# max_tokens: int | None = None,
+# top_p: float | None = None,
+# frequency_penalty: float | None = None,
+# presence_penalty: float | None = None,
+# logit_bias: Dict[str, int] | None = None,
+# response_format: Dict[str, JsonValue] | type | None = None,
+# stream: bool = False,
+# transport: Literal["http", "realtime"] = "http",
+# verbose: bool | None = None,
+# debug: bool | None = None,
+# on_tool_event: Callable[[Dict[str, JsonValue]], None] | None = None,
+# return_intent: bool = False,
+# agent_attributes: Dict[str, float] | None = None,
+# model_attributes: Dict[str, Dict[str, float]] | None = None,
+# tool_choice: str | Dict[str, JsonValue] | None = None,
+# guardrails: list[Dict[str, JsonValue]] | None = None,
+# handoff_config: Dict[str, JsonValue] | None = None,
+# policy: PolicyInput = None,
+# available_models: list[str] | None = None,
+# strict_models: bool = True,
+# ):
+# """Execute tools with unified async/sync + streaming/non-streaming logic."""
+# if not model:
+# raise ValueError("model must be provided")
+#
+# # Validate tools parameter
+# if tools is not None:
+# if not isinstance(tools, list):
+# msg = "tools must be a list of callable functions or None"
+# raise ValueError(msg)
+#
+# # Check for nested lists (common mistake: tools=[[]] instead of tools=[])
+# for i, tool in enumerate(tools):
+# if not callable(tool):
+# if isinstance(tool, list):
+# msg = f"tools[{i}] is a list, not a callable function. Did you mean to pass tools={tool} instead of tools=[{tool}]?"
+# raise TypeError(msg)
+# msg = (
+# f"tools[{i}] is not callable (got {type(tool).__name__}). All tools must be callable functions."
+# )
+# raise TypeError(msg)
+#
+# # Parse model to extract name and config
+# model_name = None
+# model_list = []
+#
+# if isinstance(model, list):
+# if not model:
+# raise ValueError("model list cannot be empty")
+# # Handle list of DedalusModel or strings
+# for m in model:
+# if hasattr(m, "name"): # DedalusModel
+# model_list.append(m.name)
+# # Use config from first DedalusModel if params not explicitly set
+# if model_name is None:
+# model_name = m.name
+# temperature = temperature if temperature is not None else getattr(m, "temperature", None)
+# max_tokens = max_tokens if max_tokens is not None else getattr(m, "max_tokens", None)
+# top_p = top_p if top_p is not None else getattr(m, "top_p", None)
+# frequency_penalty = (
+# frequency_penalty
+# if frequency_penalty is not None
+# else getattr(m, "frequency_penalty", None)
+# )
+# presence_penalty = (
+# presence_penalty if presence_penalty is not None else getattr(m, "presence_penalty", None)
+# )
+# logit_bias = logit_bias if logit_bias is not None else getattr(m, "logit_bias", None)
+#
+# # Extract additional parameters from first DedalusModel
+# stream = stream if stream is not False else getattr(m, "stream", False)
+# tool_choice = tool_choice if tool_choice is not None else getattr(m, "tool_choice", None)
+#
+# # Extract Dedalus-specific extensions
+# if hasattr(m, "attributes") and m.attributes:
+# agent_attributes = agent_attributes if agent_attributes is not None else m.attributes
+#
+# # Check for unsupported parameters (only warn once for first model)
+# unsupported_params = []
+# if hasattr(m, "n") and m.n is not None:
+# unsupported_params.append("n")
+# if hasattr(m, "stop") and m.stop is not None:
+# unsupported_params.append("stop")
+# if hasattr(m, "stream_options") and m.stream_options is not None:
+# unsupported_params.append("stream_options")
+# if hasattr(m, "logprobs") and m.logprobs is not None:
+# unsupported_params.append("logprobs")
+# if hasattr(m, "top_logprobs") and m.top_logprobs is not None:
+# unsupported_params.append("top_logprobs")
+# if hasattr(m, "seed") and m.seed is not None:
+# unsupported_params.append("seed")
+# if hasattr(m, "service_tier") and m.service_tier is not None:
+# unsupported_params.append("service_tier")
+# if hasattr(m, "tools") and m.tools is not None:
+# unsupported_params.append("tools")
+# if hasattr(m, "parallel_tool_calls") and m.parallel_tool_calls is not None:
+# unsupported_params.append("parallel_tool_calls")
+# if hasattr(m, "user") and m.user is not None:
+# unsupported_params.append("user")
+# if hasattr(m, "max_completion_tokens") and m.max_completion_tokens is not None:
+# unsupported_params.append("max_completion_tokens")
+#
+# if unsupported_params:
+# import warnings
+#
+# warnings.warn(
+# f"The following DedalusModel parameters are not yet supported and will be ignored: {', '.join(unsupported_params)}. "
+# f"Support for these parameters is coming soon.",
+# UserWarning,
+# stacklevel=2,
+# )
+# else: # String
+# model_list.append(m)
+# if model_name is None:
+# model_name = m
+# elif hasattr(model, "name"): # Single DedalusModel
+# model_name = model.name
+# model_list = [model.name]
+# # Extract config from DedalusModel if params not explicitly set
+# temperature = temperature if temperature is not None else getattr(model, "temperature", None)
+# max_tokens = max_tokens if max_tokens is not None else getattr(model, "max_tokens", None)
+# top_p = top_p if top_p is not None else getattr(model, "top_p", None)
+# frequency_penalty = (
+# frequency_penalty if frequency_penalty is not None else getattr(model, "frequency_penalty", None)
+# )
+# presence_penalty = (
+# presence_penalty if presence_penalty is not None else getattr(model, "presence_penalty", None)
+# )
+# logit_bias = logit_bias if logit_bias is not None else getattr(model, "logit_bias", None)
+#
+# # Extract additional supported parameters
+# stream = stream if stream is not False else getattr(model, "stream", False)
+# tool_choice = tool_choice if tool_choice is not None else getattr(model, "tool_choice", None)
+#
+# # Extract Dedalus-specific extensions
+# if hasattr(model, "attributes") and model.attributes:
+# agent_attributes = agent_attributes if agent_attributes is not None else model.attributes
+# if hasattr(model, "metadata") and model.metadata:
+# # metadata is stored but not yet fully utilized
+# pass
+#
+# # Log warnings for unsupported parameters
+# unsupported_params = []
+# if hasattr(model, "n") and model.n is not None:
+# unsupported_params.append("n")
+# if hasattr(model, "stop") and model.stop is not None:
+# unsupported_params.append("stop")
+# if hasattr(model, "stream_options") and model.stream_options is not None:
+# unsupported_params.append("stream_options")
+# if hasattr(model, "logprobs") and model.logprobs is not None:
+# unsupported_params.append("logprobs")
+# if hasattr(model, "top_logprobs") and model.top_logprobs is not None:
+# unsupported_params.append("top_logprobs")
+# if hasattr(model, "seed") and model.seed is not None:
+# unsupported_params.append("seed")
+# if hasattr(model, "service_tier") and model.service_tier is not None:
+# unsupported_params.append("service_tier")
+# if hasattr(model, "tools") and model.tools is not None:
+# unsupported_params.append("tools")
+# if hasattr(model, "parallel_tool_calls") and model.parallel_tool_calls is not None:
+# unsupported_params.append("parallel_tool_calls")
+# if hasattr(model, "user") and model.user is not None:
+# unsupported_params.append("user")
+# if hasattr(model, "max_completion_tokens") and model.max_completion_tokens is not None:
+# unsupported_params.append("max_completion_tokens")
+#
+# if unsupported_params:
+# import warnings
+#
+# warnings.warn(
+# f"The following DedalusModel parameters are not yet supported and will be ignored: {', '.join(unsupported_params)}. "
+# f"Support for these parameters is coming soon.",
+# UserWarning,
+# stacklevel=2,
+# )
+# else: # Single string
+# model_name = model
+# model_list = [model] if model else []
+#
+# available_models = model_list if available_models is None else available_models
+#
+# model_config = _ModelConfig(
+# id=str(model_name),
+# model_list=model_list, # Pass the full model list
+# temperature=temperature,
+# max_tokens=max_tokens,
+# top_p=top_p,
+# frequency_penalty=frequency_penalty,
+# presence_penalty=presence_penalty,
+# logit_bias=logit_bias,
+# response_format=response_format,
+# agent_attributes=agent_attributes,
+# model_attributes=model_attributes,
+# tool_choice=tool_choice,
+# guardrails=guardrails,
+# handoff_config=handoff_config,
+# )
+#
+# # Serialize mcp_servers to wire format
+# serialized_mcp_servers = serialize_mcp_servers(mcp_servers)
+#
+# exec_config = _ExecutionConfig(
+# mcp_servers=serialized_mcp_servers,
+# credentials=list(credentials) if credentials else None,
+# max_steps=max_steps,
+# stream=stream,
+# transport=transport,
+# verbose=verbose if verbose is not None else self.verbose,
+# debug=debug or False,
+# on_tool_event=on_tool_event,
+# return_intent=return_intent,
+# policy=policy,
+# available_models=available_models or [],
+# strict_models=strict_models,
+# )
+#
+# tool_handler = _FunctionToolHandler(list(tools or []))
+#
+# # Handle instructions and messages parameters
+# if instructions is not None and messages is not None:
+# # instructions overrides any existing system messages
+# conversation = [{"role": "system", "content": instructions}] + [
+# msg for msg in messages if msg.get("role") != "system"
+# ]
+# elif instructions is not None:
+# # Convert instructions to system message, optionally with user input
+# if input is not None:
+# if isinstance(input, str):
+# conversation = [
+# {"role": "system", "content": instructions},
+# {"role": "user", "content": input},
+# ]
+# else:
+# conversation = [{"role": "system", "content": instructions}] + list(input)
+# else:
+# conversation = [{"role": "system", "content": instructions}]
+# elif messages is not None:
+# conversation = messages
+# elif input is not None:
+# conversation = [{"role": "user", "content": input}] if isinstance(input, str) else input
+# else:
+# raise ValueError("Must provide one of: 'instructions', 'messages', or 'input'")
+#
+# return self._execute_conversation(conversation, tool_handler, model_config, exec_config)
+#
+# def _execute_conversation(
+# self,
+# messages: list[Message],
+# tool_handler: _ToolHandler,
+# model_config: _ModelConfig,
+# exec_config: _ExecutionConfig,
+# ):
+# """Execute conversation with unified logic for all client/streaming combinations."""
+# is_async = isinstance(self.client, AsyncDedalus)
+#
+# if is_async:
+# if exec_config.stream:
+# return self._execute_streaming_async(messages, tool_handler, model_config, exec_config)
+# else:
+# return self._execute_turns_async(messages, tool_handler, model_config, exec_config)
+# else:
+# if exec_config.stream:
+# return self._execute_streaming_sync(messages, tool_handler, model_config, exec_config)
+# else:
+# return self._execute_turns_sync(messages, tool_handler, model_config, exec_config)
+#
+# async def _execute_turns_async(
+# self,
+# messages: list[Message],
+# tool_handler: _ToolHandler,
+# model_config: _ModelConfig,
+# exec_config: _ExecutionConfig,
+# ) -> _RunResult:
+# """Execute async non-streaming conversation."""
+# messages = list(messages)
+# steps = 0
+# final_text = ""
+# tool_results: list[ToolResult] = []
+# tools_called: list[str] = []
+#
+# while steps < exec_config.max_steps:
+# steps += 1
+# if exec_config.verbose:
+# print(f"Step started: Step={steps}")
+# # Show what models are configured
+# if model_config.model_list and len(model_config.model_list) > 1:
+# print(f" Available models: {model_config.model_list}")
+# print(f" Primary model: {model_config.id}")
+# else:
+# print(f" Using model: {model_config.id}")
+#
+# # Apply policy and get model params
+# policy_result = self._apply_policy(
+# exec_config.policy,
+# {
+# "step": steps,
+# "messages": messages,
+# "model": model_config.id,
+# "mcp_servers": exec_config.mcp_servers,
+# "tools": list(getattr(tool_handler, "_funcs", {}).keys()),
+# "available_models": exec_config.available_models,
+# },
+# model_config,
+# exec_config,
+# )
+#
+# # Make model call
+# current_messages = self._build_messages(messages, policy_result["prepend"], policy_result["append"])
+#
+# response = await self.client.chat.completions.create(
+# model=policy_result["model"],
+# messages=current_messages,
+# tools=tool_handler.schemas() or None,
+# mcp_servers=policy_result["mcp_servers"],
+# credentials=exec_config.credentials,
+# **{**self._mk_kwargs(model_config), **policy_result["model_kwargs"]},
+# )
+#
+# if exec_config.verbose:
+# actual_model = policy_result["model"]
+# if isinstance(actual_model, list):
+# print(f" API called with model list: {actual_model}")
+# else:
+# print(f" API called with single model: {actual_model}")
+# print(f" Response received (server says model: {getattr(response, 'model', 'unknown')})")
+# print(f" Response type: {type(response).__name__}")
+# # Surface agent timeline if server included it
+# agents_used = getattr(response, "agents_used", None)
+# if not agents_used:
+# extra = getattr(response, "__pydantic_extra__", None)
+# if isinstance(extra, dict):
+# agents_used = extra.get("agents_used")
+# if agents_used:
+# print(f" [EVENT] agents_used: {agents_used}")
+#
+# # Check if we have tool calls
+# if not hasattr(response, "choices") or not response.choices:
+# final_text = ""
+# break
+#
+# message = response.choices[0].message
+# msg = vars(message) if hasattr(message, "__dict__") else message
+# tool_calls = msg.get("tool_calls")
+# content = msg.get("content", "")
+#
+# if exec_config.verbose:
+# print(f" Response content: {content[:100] if content else '(none)'}...")
+# if tool_calls:
+# call_names = []
+# for tc in tool_calls:
+# try:
+# if isinstance(tc, dict):
+# call_names.append(tc.get("function", {}).get("name", "?"))
+# else:
+# call_names.append(getattr(getattr(tc, "function", None), "name", "?"))
+# except Exception:
+# call_names.append("?")
+# print(f" Tool calls in response: {call_names}")
+#
+# if not tool_calls:
+# final_text = content or ""
+# # Add assistant response to conversation
+# if final_text:
+# messages.append({"role": "assistant", "content": final_text})
+# break
+#
+# # Execute tools
+# tool_calls = self._extract_tool_calls(response.choices[0])
+# if exec_config.verbose:
+# print(f" Extracted {len(tool_calls)} tool calls")
+# for tc in tool_calls:
+# print(f" - {tc.get('function', {}).get('name', '?')} (id: {tc.get('id', '?')})")
+# await self._execute_tool_calls(
+# tool_calls,
+# tool_handler,
+# messages,
+# tool_results,
+# tools_called,
+# steps,
+# verbose=exec_config.verbose,
+# )
+#
+# # Extract MCP tool executions from the last response
+# mcp_results = _extract_mcp_results(response)
+#
+# return _RunResult(
+# final_output=final_text,
+# tool_results=tool_results,
+# steps_used=steps,
+# tools_called=tools_called,
+# messages=messages,
+# mcp_results=mcp_results,
+# )
+#
+# async def _execute_streaming_async(
+# self,
+# messages: list[Message],
+# tool_handler: _ToolHandler,
+# model_config: _ModelConfig,
+# exec_config: _ExecutionConfig,
+# ) -> AsyncIterator[Any]:
+# """Execute async streaming conversation."""
+# messages = list(messages)
+# steps = 0
+#
+# while steps < exec_config.max_steps:
+# steps += 1
+# if exec_config.verbose:
+# print(f"Step started: Step={steps} (max_steps={exec_config.max_steps})")
+# print(f" Starting step {steps} with {len(messages)} messages in conversation")
+# print(f" Message history:")
+# for i, msg in enumerate(messages):
+# role = msg.get("role")
+# content = str(msg.get("content", ""))[:50] + "..." if msg.get("content") else ""
+# tool_info = ""
+# if msg.get("tool_calls"):
+# tool_names = [tc.get("function", {}).get("name", "?") for tc in msg.get("tool_calls", [])]
+# tool_info = f" [calling: {', '.join(tool_names)}]"
+# elif msg.get("tool_call_id"):
+# tool_info = f" [response to: {msg.get('tool_call_id')[:8]}...]"
+# print(f" [Message {i}] {role}: {content}{tool_info}")
+#
+# # Apply policy
+# policy_result = self._apply_policy(
+# exec_config.policy,
+# {
+# "step": steps,
+# "messages": messages,
+# "model": model_config.id,
+# "mcp_servers": exec_config.mcp_servers,
+# "tools": list(getattr(tool_handler, "_funcs", {}).keys()),
+# "available_models": exec_config.available_models,
+# },
+# model_config,
+# exec_config,
+# )
+#
+# # Stream model response
+# current_messages = self._build_messages(messages, policy_result["prepend"], policy_result["append"])
+#
+# # Suppress per-message debug; keep streaming minimal
+#
+# stream = await self.client.chat.completions.create(
+# model=policy_result["model"],
+# messages=current_messages,
+# tools=tool_handler.schemas() or None,
+# mcp_servers=policy_result["mcp_servers"],
+# credentials=exec_config.credentials,
+# stream=True,
+# **{**self._mk_kwargs(model_config), **policy_result["model_kwargs"]},
+# )
+#
+# tool_calls = []
+# chunk_count = 0
+# content_chunks = 0
+# tool_call_chunks = 0
+# finish_reason = None
+# async for chunk in stream:
+# chunk_count += 1
+# if exec_config.verbose:
+# # Only surface agent_updated metadata; suppress raw chunk spam
+# extra = getattr(chunk, "__pydantic_extra__", None)
+# if isinstance(extra, dict):
+# meta = extra.get("x_dedalus_event") or extra.get("dedalus_event")
+# if isinstance(meta, dict) and meta.get("type") == "agent_updated":
+# print(f" [EVENT] agent_updated: agent={meta.get('agent')} model={meta.get('model')}")
+# if hasattr(chunk, "choices") and chunk.choices:
+# choice = chunk.choices[0]
+# delta = choice.delta
+#
+# # Check finish reason
+# if hasattr(choice, "finish_reason") and choice.finish_reason:
+# finish_reason = choice.finish_reason
+# # suppress per-chunk finish_reason spam
+#
+# # Check for tool calls
+# if hasattr(delta, "tool_calls") and delta.tool_calls:
+# tool_call_chunks += 1
+# self._accumulate_tool_calls(delta.tool_calls, tool_calls)
+# # suppress per-chunk tool_call delta spam
+#
+# # Check for content
+# if hasattr(delta, "content") and delta.content:
+# content_chunks += 1
+# # suppress per-chunk content spam
+#
+# # Check for role (suppressed)
+# if hasattr(delta, "role") and delta.role:
+# pass
+#
+# yield chunk
+#
+# if exec_config.verbose:
+# # Keep a compact end-of-stream summary
+# names = [tc.get("function", {}).get("name", "unknown") for tc in tool_calls]
+# print(f" Stream summary: chunks={chunk_count} content={content_chunks} tool_calls={tool_call_chunks}")
+# if names:
+# print(f" Tools called this turn: {names}")
+#
+# # Execute any accumulated tool calls
+# if tool_calls:
+# if exec_config.verbose:
+# print(f" Processing {len(tool_calls)} tool calls")
+#
+# # Categorize tools
+# local_names = [
+# tc["function"]["name"]
+# for tc in tool_calls
+# if tc["function"]["name"] in getattr(tool_handler, "_funcs", {})
+# ]
+# mcp_names = [
+# tc["function"]["name"]
+# for tc in tool_calls
+# if tc["function"]["name"] not in getattr(tool_handler, "_funcs", {})
+# ]
+#
+# # Check if ALL tools are MCP tools (none are local)
+# all_mcp = all(tc["function"]["name"] not in getattr(tool_handler, "_funcs", {}) for tc in tool_calls)
+#
+# # Check if stream already contains content (MCP results)
+# has_streamed_content = content_chunks > 0
+#
+# if exec_config.verbose:
+# print(f" Local tools used: {local_names}")
+# print(f" Server tools used: {mcp_names}")
+#
+# # When MCP tools are involved and content was streamed, we're done
+# if mcp_names and has_streamed_content:
+# if exec_config.verbose:
+# print(f" MCP tools called and content streamed - response complete, breaking loop")
+# break
+#
+# if all_mcp:
+# # All tools are MCP - the response should be streamed
+# if exec_config.verbose:
+# print(f" All tools are MCP, expecting streamed response")
+# # Don't break here - let the next iteration handle it
+# else:
+# # We have at least one local tool
+# # Filter to only include local tool calls in the assistant message
+# local_only_tool_calls = [
+# tc for tc in tool_calls if tc["function"]["name"] in getattr(tool_handler, "_funcs", {})
+# ]
+# messages.append({"role": "assistant", "tool_calls": local_only_tool_calls})
+# if exec_config.verbose:
+# print(
+# f" Added assistant message with {len(local_only_tool_calls)} local tool calls (filtered from {len(tool_calls)} total)"
+# )
+#
+# # Execute only local tools
+# for tc in tool_calls:
+# fn_name = tc["function"]["name"]
+# fn_args_str = tc["function"]["arguments"]
+#
+# if fn_name in getattr(tool_handler, "_funcs", {}):
+# # Local tool
+# try:
+# fn_args = json.loads(fn_args_str)
+# except json.JSONDecodeError:
+# fn_args = {}
+#
+# try:
+# result = await tool_handler.exec(fn_name, fn_args)
+# messages.append(
+# {
+# "role": "tool",
+# "tool_call_id": tc["id"],
+# "content": str(result),
+# }
+# )
+# if exec_config.verbose:
+# print(f" Executed local tool {fn_name}: {str(result)[:50]}...")
+# except Exception as e:
+# messages.append(
+# {
+# "role": "tool",
+# "tool_call_id": tc["id"],
+# "content": f"Error: {str(e)}",
+# }
+# )
+# if exec_config.verbose:
+# print(f" Error executing local tool {fn_name}: {e}")
+# else:
+# # MCP tool - DON'T add any message
+# # The API server should handle this
+# if exec_config.verbose:
+# print(f" MCP tool {fn_name} - skipping (server will handle)")
+#
+# if exec_config.verbose:
+# print(f" Messages after tool execution: {len(messages)}")
+#
+# # Only continue if we have NO MCP tools
+# if not mcp_names:
+# print(f" No MCP tools, continuing loop to step {steps + 1}...")
+# else:
+# print(f" MCP tools present, expecting response in next iteration")
+#
+# # Continue loop only if we need another response
+# if exec_config.verbose:
+# print(f" Tool processing complete")
+# else:
+# if exec_config.verbose:
+# print(f" No tool calls found, breaking out of loop")
+# break
+#
+# if exec_config.verbose:
+# print(f"\n[DEBUG] Exited main loop after {steps} steps")
+#
+# def _execute_turns_sync(
+# self,
+# messages: list[Message],
+# tool_handler: _ToolHandler,
+# model_config: _ModelConfig,
+# exec_config: _ExecutionConfig,
+# ) -> _RunResult:
+# """Execute sync non-streaming conversation."""
+# messages = list(messages)
+# steps = 0
+# final_text = ""
+# tool_results: list[ToolResult] = []
+# tools_called: list[str] = []
+#
+# while steps < exec_config.max_steps:
+# steps += 1
+# if exec_config.verbose:
+# print(f"Step started: Step={steps}")
+# # Show what models are configured
+# if model_config.model_list and len(model_config.model_list) > 1:
+# print(f" Available models: {model_config.model_list}")
+# print(f" Primary model: {model_config.id}")
+# else:
+# print(f" Using model: {model_config.id}")
+#
+# # Apply policy
+# policy_result = self._apply_policy(
+# exec_config.policy,
+# {
+# "step": steps,
+# "messages": messages,
+# "model": model_config.id,
+# "mcp_servers": exec_config.mcp_servers,
+# "tools": list(getattr(tool_handler, "_funcs", {}).keys()),
+# "available_models": exec_config.available_models,
+# },
+# model_config,
+# exec_config,
+# )
+#
+# # Make model call
+# current_messages = self._build_messages(messages, policy_result["prepend"], policy_result["append"])
+#
+# if exec_config.verbose:
+# actual_model = policy_result["model"]
+# if isinstance(actual_model, list):
+# print(f" API called with model list: {actual_model}")
+# else:
+# print(f" API called with single model: {actual_model}")
+#
+# response = self.client.chat.completions.create(
+# model=policy_result["model"],
+# messages=current_messages,
+# tools=tool_handler.schemas() or None,
+# mcp_servers=policy_result["mcp_servers"],
+# credentials=exec_config.credentials,
+# **{**self._mk_kwargs(model_config), **policy_result["model_kwargs"]},
+# )
+#
+# if exec_config.verbose:
+# print(f" Response received (server says model: {getattr(response, 'model', 'unknown')})")
+# print(f" Response type: {type(response).__name__}")
+#
+# # Check if we have tool calls
+# if not hasattr(response, "choices") or not response.choices:
+# final_text = ""
+# break
+#
+# message = response.choices[0].message
+# msg = vars(message) if hasattr(message, "__dict__") else message
+# tool_calls = msg.get("tool_calls")
+# content = msg.get("content", "")
+#
+# if exec_config.verbose:
+# print(f" Response content: {content[:100] if content else '(none)'}...")
+# if tool_calls:
+# tool_names = [tc.get("function", {}).get("name", "?") for tc in tool_calls]
+# print(f" 🔧 Tool calls in response: {tool_names}")
+#
+# if not tool_calls:
+# final_text = content or ""
+# # Add assistant response to conversation
+# if final_text:
+# messages.append({"role": "assistant", "content": final_text})
+# break
+#
+# # Execute tools
+# tool_calls = self._extract_tool_calls(response.choices[0])
+# self._execute_tool_calls_sync(tool_calls, tool_handler, messages, tool_results, tools_called, steps)
+#
+# # Extract MCP tool executions from the last response
+# mcp_results = _extract_mcp_results(response)
+#
+# return _RunResult(
+# final_output=final_text,
+# tool_results=tool_results,
+# steps_used=steps,
+# tools_called=tools_called,
+# messages=messages,
+# mcp_results=mcp_results,
+# )
+#
+# def _execute_streaming_sync(
+# self,
+# messages: list[Message],
+# tool_handler: _ToolHandler,
+# model_config: _ModelConfig,
+# exec_config: _ExecutionConfig,
+# ) -> Iterator[Any]:
+# """Execute sync streaming conversation."""
+# messages = list(messages)
+# steps = 0
+#
+# while steps < exec_config.max_steps:
+# steps += 1
+# if exec_config.verbose:
+# print(f"Step started: Step={steps} (max_steps={exec_config.max_steps})")
+# print(f" Starting step {steps} with {len(messages)} messages in conversation")
+# print(f" Message history:")
+# for i, msg in enumerate(messages):
+# role = msg.get("role")
+# content = str(msg.get("content", ""))[:50] + "..." if msg.get("content") else ""
+# tool_info = ""
+# if msg.get("tool_calls"):
+# tool_names = [tc.get("function", {}).get("name", "?") for tc in msg.get("tool_calls", [])]
+# tool_info = f" [calling: {', '.join(tool_names)}]"
+# elif msg.get("tool_call_id"):
+# tool_info = f" [response to: {msg.get('tool_call_id')[:8]}...]"
+# print(f" [Message {i}] {role}: {content}{tool_info}")
+#
+# # Apply policy
+# policy_result = self._apply_policy(
+# exec_config.policy,
+# {
+# "step": steps,
+# "messages": messages,
+# "model": model_config.id,
+# "mcp_servers": exec_config.mcp_servers,
+# "tools": list(getattr(tool_handler, "_funcs", {}).keys()),
+# "available_models": exec_config.available_models,
+# },
+# model_config,
+# exec_config,
+# )
+#
+# # Stream model response
+# current_messages = self._build_messages(messages, policy_result["prepend"], policy_result["append"])
+#
+# if exec_config.verbose:
+# print(f" Messages being sent to API:")
+# for i, msg in enumerate(current_messages):
+# content_preview = str(msg.get("content", ""))[:100]
+# tool_call_info = ""
+# if msg.get("tool_calls"):
+# tool_names = [tc.get("function", {}).get("name", "unknown") for tc in msg.get("tool_calls", [])]
+# tool_call_info = f" tool_calls=[{', '.join(tool_names)}]"
+# print(f" [{i}] {msg.get('role')}: {content_preview}...{tool_call_info}")
+# print(f" MCP servers: {policy_result['mcp_servers']}")
+# print(f" Local tools available: {list(getattr(tool_handler, '_funcs', {}).keys())}")
+#
+# stream = self.client.chat.completions.create(
+# model=policy_result["model"],
+# messages=current_messages,
+# tools=tool_handler.schemas() or None,
+# mcp_servers=policy_result["mcp_servers"],
+# credentials=exec_config.credentials,
+# stream=True,
+# **{**self._mk_kwargs(model_config), **policy_result["model_kwargs"]},
+# )
+#
+# tool_calls = []
+# chunk_count = 0
+# content_chunks = 0
+# tool_call_chunks = 0
+# finish_reason = None
+# accumulated_content = ""
+#
+# for chunk in stream:
+# chunk_count += 1
+# if hasattr(chunk, "choices") and chunk.choices:
+# choice = chunk.choices[0]
+# delta = choice.delta
+#
+# # Check finish reason
+# if hasattr(choice, "finish_reason") and choice.finish_reason:
+# finish_reason = choice.finish_reason
+#
+# # Check for tool calls
+# if hasattr(delta, "tool_calls") and delta.tool_calls:
+# tool_call_chunks += 1
+# self._accumulate_tool_calls(delta.tool_calls, tool_calls)
+# if exec_config.verbose:
+# # Show tool calls in a more readable format
+# for tc_delta in delta.tool_calls:
+# if (
+# hasattr(tc_delta, "function")
+# and hasattr(tc_delta.function, "name")
+# and tc_delta.function.name
+# ):
+# print(f"-> Calling {tc_delta.function.name}")
+#
+# # Check for content
+# if hasattr(delta, "content") and delta.content:
+# content_chunks += 1
+# accumulated_content += delta.content
+#
+# yield chunk
+#
+# if exec_config.verbose:
+# if accumulated_content:
+# print() # New line after streamed content
+# if tool_calls:
+# print(f"\nReceived {len(tool_calls)} tool call(s)")
+# for i, tc in enumerate(tool_calls, 1):
+# tool_name = tc.get("function", {}).get("name", "unknown")
+# # Clean up the tool name for display
+# display_name = tool_name.replace("transfer_to_", "").replace("_", " ").title()
+# print(f" {i}. {display_name}")
+# else:
+# print(f"\n✓ Response complete ({content_chunks} content chunks)")
+#
+# # Execute any accumulated tool calls
+# if tool_calls:
+# if exec_config.verbose:
+# print(f" Processing {len(tool_calls)} tool calls")
+#
+# # Categorize tools
+# local_names = [
+# tc["function"]["name"]
+# for tc in tool_calls
+# if tc["function"]["name"] in getattr(tool_handler, "_funcs", {})
+# ]
+# mcp_names = [
+# tc["function"]["name"]
+# for tc in tool_calls
+# if tc["function"]["name"] not in getattr(tool_handler, "_funcs", {})
+# ]
+#
+# # Check if ALL tools are MCP tools (none are local)
+# all_mcp = all(tc["function"]["name"] not in getattr(tool_handler, "_funcs", {}) for tc in tool_calls)
+#
+# # Check if stream already contains content (MCP results)
+# has_streamed_content = content_chunks > 0
+#
+# if exec_config.verbose:
+# print(f" Local tools: {local_names}")
+# print(f" Server tools: {mcp_names}")
+#
+# # When MCP tools are involved and content was streamed, we're done
+# if mcp_names and has_streamed_content:
+# if exec_config.verbose:
+# print(f" MCP tools called and content streamed - response complete, breaking loop")
+# break
+#
+# if all_mcp:
+# # All tools are MCP - the response should be streamed
+# if exec_config.verbose:
+# print(f" All tools are MCP, expecting streamed response")
+# # Don't break here - let the next iteration handle it
+# else:
+# # We have at least one local tool
+# # Filter to only include local tool calls in the assistant message
+# local_only_tool_calls = [
+# tc for tc in tool_calls if tc["function"]["name"] in getattr(tool_handler, "_funcs", {})
+# ]
+# messages.append({"role": "assistant", "tool_calls": local_only_tool_calls})
+# if exec_config.verbose:
+# print(
+# f" Added assistant message with {len(local_only_tool_calls)} local tool calls (filtered from {len(tool_calls)} total)"
+# )
+#
+# # Execute only local tools
+# for tc in tool_calls:
+# fn_name = tc["function"]["name"]
+# fn_args_str = tc["function"]["arguments"]
+#
+# if fn_name in getattr(tool_handler, "_funcs", {}):
+# # Local tool
+# try:
+# fn_args = json.loads(fn_args_str)
+# except json.JSONDecodeError:
+# fn_args = {}
+#
+# try:
+# result = tool_handler.exec_sync(fn_name, fn_args)
+# messages.append(
+# {
+# "role": "tool",
+# "tool_call_id": tc["id"],
+# "content": str(result),
+# }
+# )
+# if exec_config.verbose:
+# print(f" Executed local tool {fn_name}: {str(result)[:50]}...")
+# except Exception as e:
+# messages.append(
+# {
+# "role": "tool",
+# "tool_call_id": tc["id"],
+# "content": f"Error: {str(e)}",
+# }
+# )
+# if exec_config.verbose:
+# print(f" Error executing local tool {fn_name}: {e}")
+# else:
+# # MCP tool - DON'T add any message
+# # The API server should handle this
+# if exec_config.verbose:
+# print(f" MCP tool {fn_name} - skipping (server will handle)")
+#
+# if exec_config.verbose:
+# print(f" Messages after tool execution: {len(messages)}")
+#
+# # Only continue if we have NO MCP tools
+# if not mcp_names:
+# print(f" No MCP tools, continuing loop to step {steps + 1}...")
+# else:
+# print(f" MCP tools present, expecting response in next iteration")
+#
+# # Continue loop only if we need another response
+# if exec_config.verbose:
+# print(f" Tool processing complete")
+# else:
+# if exec_config.verbose:
+# print(f" No tool calls found, breaking out of loop")
+# break
+#
+# def _apply_policy(
+# self,
+# policy: PolicyInput,
+# context: PolicyContext,
+# model_config: _ModelConfig,
+# exec_config: _ExecutionConfig,
+# ) -> Dict[str, Any]:
+# """Apply policy and return unified configuration."""
+# pol = _process_policy(policy, context)
+#
+# # Start with defaults
+# result = {
+# "model_id": model_config.id,
+# "model": model_config.model_list
+# if model_config.model_list
+# else model_config.id, # Use full list when available
+# "mcp_servers": list(exec_config.mcp_servers),
+# "model_kwargs": {},
+# "prepend": [],
+# "append": [],
+# }
+#
+# if pol:
+# # Handle model override
+# requested_model = pol.get("model")
+# if requested_model and exec_config.strict_models and exec_config.available_models:
+# if isinstance(requested_model, list):
+# # Filter to only available models
+# valid_models = [m for m in requested_model if m in exec_config.available_models]
+# if valid_models:
+# result["model"] = valid_models
+# result["model_id"] = str(valid_models[0])
+# elif exec_config.verbose:
+# print(f"[RUNNER] Policy requested unavailable models {requested_model}, ignoring")
+# elif requested_model not in exec_config.available_models:
+# if exec_config.verbose:
+# print(f"[RUNNER] Policy requested unavailable model '{requested_model}', ignoring")
+# else:
+# result["model_id"] = str(requested_model)
+# result["model"] = str(requested_model)
+# elif requested_model:
+# if isinstance(requested_model, list):
+# result["model"] = requested_model
+# result["model_id"] = str(requested_model[0]) if requested_model else result["model_id"]
+# else:
+# result["model_id"] = str(requested_model)
+# result["model"] = str(requested_model)
+#
+# # Handle other policy settings
+# result["mcp_servers"] = list(pol.get("mcp_servers", result["mcp_servers"]))
+# result["model_kwargs"] = dict(pol.get("model_settings", {}))
+# result["prepend"] = list(pol.get("message_prepend", []))
+# result["append"] = list(pol.get("message_append", []))
+#
+# # Handle max_steps update
+# if pol.get("max_steps") is not None:
+# try:
+# exec_config.max_steps = int(pol.get("max_steps"))
+# except Exception:
+# pass
+#
+# return result
+#
+# def _build_messages(self, messages: list[Message], prepend: list[Message], append: list[Message]) -> list[Message]:
+# """Build final message list with prepend/append."""
+# return (prepend + messages + append) if (prepend or append) else messages
+#
+# def _extract_tool_calls(self, choice) -> list[ToolCall]:
+# """Extract tool calls from response choice."""
+# if not hasattr(choice, "message"):
+# return []
+#
+# message = choice.message
+# msg = vars(message) if hasattr(message, "__dict__") else message
+# tool_calls = msg.get("tool_calls", [])
+#
+# if not tool_calls:
+# return []
+#
+# calls = []
+# for tc in tool_calls:
+# tc_dict = vars(tc) if hasattr(tc, "__dict__") else tc
+# fn = tc_dict.get("function", {})
+# fn_dict = vars(fn) if hasattr(fn, "__dict__") else fn
+#
+# calls.append(
+# {
+# "id": tc_dict.get("id", ""),
+# "type": tc_dict.get("type", "function"),
+# "function": {
+# "name": fn_dict.get("name", ""),
+# "arguments": fn_dict.get("arguments", "{}"),
+# },
+# }
+# )
+# return calls
+#
+# async def _execute_tool_calls(
+# self,
+# tool_calls: list[ToolCall],
+# tool_handler: _ToolHandler,
+# messages: list[Message],
+# tool_results: list[ToolResult],
+# tools_called: list[str],
+# step: int,
+# verbose: bool = False,
+# ):
+# """Execute tool calls asynchronously."""
+# if verbose:
+# print(f" _execute_tool_calls: Processing {len(tool_calls)} tool calls")
+#
+# # Record single assistant message with ALL tool calls (OpenAI format)
+# messages.append({"role": "assistant", "tool_calls": list(tool_calls)})
+#
+# for i, tc in enumerate(tool_calls):
+# fn_name = tc["function"]["name"]
+# fn_args_str = tc["function"]["arguments"]
+#
+# if verbose:
+# print(f" Tool {i + 1}/{len(tool_calls)}: {fn_name}")
+#
+# try:
+# fn_args = json.loads(fn_args_str)
+# except json.JSONDecodeError:
+# fn_args = {}
+#
+# try:
+# result = await tool_handler.exec(fn_name, fn_args)
+# tool_results.append({"name": fn_name, "result": result, "step": step})
+# tools_called.append(fn_name)
+# messages.append({"role": "tool", "tool_call_id": tc["id"], "content": str(result)})
+#
+# if verbose:
+# print(f" Tool {fn_name} executed successfully: {str(result)[:50]}...")
+# except Exception as e:
+# error_result = {"error": str(e), "name": fn_name, "step": step}
+# tool_results.append(error_result)
+# messages.append(
+# {
+# "role": "tool",
+# "tool_call_id": tc["id"],
+# "content": f"Error: {str(e)}",
+# }
+# )
+#
+# if verbose:
+# print(f" Tool {fn_name} failed with error: {e}")
+# print(f" Error type: {type(e).__name__}")
+#
+# def _execute_tool_calls_sync(
+# self,
+# tool_calls: list[ToolCall],
+# tool_handler: _ToolHandler,
+# messages: list[Message],
+# tool_results: list[ToolResult],
+# tools_called: list[str],
+# step: int,
+# ):
+# """Execute tool calls synchronously."""
+# # Record single assistant message with ALL tool calls (OpenAI format)
+# messages.append({"role": "assistant", "tool_calls": list(tool_calls)})
+#
+# for tc in tool_calls:
+# fn_name = tc["function"]["name"]
+# fn_args_str = tc["function"]["arguments"]
+#
+# try:
+# fn_args = json.loads(fn_args_str)
+# except json.JSONDecodeError:
+# fn_args = {}
+#
+# try:
+# result = tool_handler.exec_sync(fn_name, fn_args)
+# tool_results.append({"name": fn_name, "result": result, "step": step})
+# tools_called.append(fn_name)
+# messages.append({"role": "tool", "tool_call_id": tc["id"], "content": str(result)})
+# except Exception as e:
+# error_result = {"error": str(e), "name": fn_name, "step": step}
+# tool_results.append(error_result)
+# messages.append(
+# {
+# "role": "tool",
+# "tool_call_id": tc["id"],
+# "content": f"Error: {str(e)}",
+# }
+# )
+#
+# def _accumulate_tool_calls(self, deltas, acc: list[ToolCall]) -> None:
+# """Accumulate streaming tool call deltas."""
+# for delta in deltas:
+# index = getattr(delta, "index", 0)
+#
+# # Ensure we have enough entries in acc
+# while len(acc) <= index:
+# acc.append(
+# {
+# "id": "",
+# "type": "function",
+# "function": {"name": "", "arguments": ""},
+# }
+# )
+#
+# if hasattr(delta, "id") and delta.id:
+# acc[index]["id"] = delta.id
+# if hasattr(delta, "function"):
+# fn = delta.function
+# if hasattr(fn, "name") and fn.name:
+# acc[index]["function"]["name"] = fn.name
+# if hasattr(fn, "arguments") and fn.arguments:
+# acc[index]["function"]["arguments"] += fn.arguments
+#
+# @staticmethod
+# def _mk_kwargs(mc: _ModelConfig) -> Dict[str, Any]:
+# """Convert model config to kwargs for client call."""
+# from ..._utils import is_given
+# from ...lib._parsing import type_to_response_format_param
+#
+# d = asdict(mc)
+# d.pop("id", None) # Remove id since it's passed separately
+# d.pop("model_list", None) # Remove model_list since it's not an API parameter
+#
+# # Convert Pydantic model to dict schema if needed
+# if "response_format" in d and d["response_format"] is not None:
+# converted = type_to_response_format_param(d["response_format"])
+# d["response_format"] = converted if is_given(converted) else None
+#
+# return {k: v for k, v in d.items() if v is not None}
+#
+# ===========================================================================
+# END: production version
+# ===========================================================================
From ad7379b033a1eb8216147f823d30c71fdbf815c4 Mon Sep 17 00:00:00 2001
From: aryanma
Date: Fri, 6 Feb 2026 22:10:27 -0800
Subject: [PATCH 08/23] fix(runner): skip early break when local tools need
execution alongside MCP
---
src/dedalus_labs/lib/runner/core.py | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/src/dedalus_labs/lib/runner/core.py b/src/dedalus_labs/lib/runner/core.py
index c6a2ac4..47aeaa2 100644
--- a/src/dedalus_labs/lib/runner/core.py
+++ b/src/dedalus_labs/lib/runner/core.py
@@ -765,10 +765,10 @@ async def _execute_streaming_async(
print(f" Local tools used: {local_names}")
print(f" Server tools used: {mcp_names}")
- # When MCP tools are involved and content was streamed, we're done
- if mcp_names and has_streamed_content:
+ # When ONLY MCP tools (no local) and content was streamed, we're done
+ if mcp_names and has_streamed_content and not local_names:
if exec_config.verbose:
- print(f" MCP tools called and content streamed - response complete, breaking loop")
+ print(f" MCP-only tools called and content streamed - response complete, breaking loop")
break
if all_mcp:
@@ -1083,10 +1083,10 @@ def _execute_streaming_sync(
print(f" Local tools: {local_names}")
print(f" Server tools: {mcp_names}")
- # When MCP tools are involved and content was streamed, we're done
- if mcp_names and has_streamed_content:
+ # When ONLY MCP tools (no local) and content was streamed, we're done
+ if mcp_names and has_streamed_content and not local_names:
if exec_config.verbose:
- print(f" MCP tools called and content streamed - response complete, breaking loop")
+ print(f" MCP-only tools called and content streamed - response complete, breaking loop")
break
if all_mcp:
From 77e5958beb3699e3fd08f8f2fd0b6ecb2932d010 Mon Sep 17 00:00:00 2001
From: aryanma
Date: Fri, 6 Feb 2026 22:55:12 -0800
Subject: [PATCH 09/23] fix(runner): preserve thought_signature in tool call
accumulation and extraction
---
src/dedalus_labs/lib/runner/core.py | 25 +++++++++++++++----------
1 file changed, 15 insertions(+), 10 deletions(-)
diff --git a/src/dedalus_labs/lib/runner/core.py b/src/dedalus_labs/lib/runner/core.py
index 47aeaa2..b0b3796 100644
--- a/src/dedalus_labs/lib/runner/core.py
+++ b/src/dedalus_labs/lib/runner/core.py
@@ -1223,16 +1223,18 @@ def _extract_tool_calls(self, choice) -> list[ToolCall]:
fn = tc_dict.get("function", {})
fn_dict = vars(fn) if hasattr(fn, "__dict__") else fn
- calls.append(
- {
- "id": tc_dict.get("id", ""),
- "type": tc_dict.get("type", "function"),
- "function": {
- "name": fn_dict.get("name", ""),
- "arguments": fn_dict.get("arguments", "{}"),
- },
- }
- )
+ tc_out: ToolCall = {
+ "id": tc_dict.get("id", ""),
+ "type": tc_dict.get("type", "function"),
+ "function": {
+ "name": fn_dict.get("name", ""),
+ "arguments": fn_dict.get("arguments", "{}"),
+ },
+ }
+ thought_sig = tc_dict.get("thought_signature")
+ if thought_sig:
+ tc_out["thought_signature"] = thought_sig
+ calls.append(tc_out)
return calls
async def _execute_tool_calls(
@@ -1315,6 +1317,9 @@ def _accumulate_tool_calls(self, deltas, acc: list[ToolCall]) -> None:
acc[index]["function"]["name"] = fn.name
if hasattr(fn, "arguments") and fn.arguments:
acc[index]["function"]["arguments"] += fn.arguments
+ thought_sig = getattr(delta, "thought_signature", None)
+ if thought_sig:
+ acc[index]["thought_signature"] = thought_sig
@staticmethod
def _mk_kwargs(mc: _ModelConfig) -> Dict[str, Any]:
From 5d0ce6d829684e54dcbccbd6c006373fd0fd855b Mon Sep 17 00:00:00 2001
From: Windsor
Date: Sat, 7 Feb 2026 01:57:24 -0800
Subject: [PATCH 10/23] fix(runner): allow local tool execution in mixed
MCP+local scenarios
---
src/dedalus_labs/lib/runner/core.py | 40 ++++++++---------------------
1 file changed, 11 insertions(+), 29 deletions(-)
diff --git a/src/dedalus_labs/lib/runner/core.py b/src/dedalus_labs/lib/runner/core.py
index b02a557..94f0acf 100644
--- a/src/dedalus_labs/lib/runner/core.py
+++ b/src/dedalus_labs/lib/runner/core.py
@@ -754,19 +754,14 @@ async def _execute_streaming_async(
print(f" Local tools used: {local_names}")
print(f" Server tools used: {mcp_names}")
- # When MCP tools are involved and content was streamed, we're done
- if mcp_names and has_streamed_content:
+ # All tools are server side and results have already been streamed.
+ if all_mcp and has_streamed_content:
if exec_config.verbose:
- print(f" MCP tools called and content streamed - response complete, breaking loop")
+ print(f" All tools are MCP and content streamed, breaking loop")
break
- if all_mcp:
- # All tools are MCP - the response should be streamed
- if exec_config.verbose:
- print(f" All tools are MCP, expecting streamed response")
- # Don't break here - let the next iteration handle it
- else:
- # We have at least one local tool — delegate to scheduler.
+ # At least one local tool exists. Execute via the dependency aware scheduler.
+ if not all_mcp:
local_only = [
tc for tc in tool_calls if tc["function"]["name"] in getattr(tool_handler, "_funcs", {})
]
@@ -786,10 +781,6 @@ async def _execute_streaming_async(
if exec_config.verbose:
print(f" Messages after tool execution: {len(messages)}")
-
- # Continue loop only if we need another response
- if exec_config.verbose:
- print(f" Tool processing complete")
else:
if exec_config.verbose:
print(f" No tool calls found, breaking out of loop")
@@ -1046,19 +1037,14 @@ def _execute_streaming_sync(
print(f" Local tools: {local_names}")
print(f" Server tools: {mcp_names}")
- # When MCP tools are involved and content was streamed, we're done
- if mcp_names and has_streamed_content:
+ # All tools are server side and results have already been streamed.
+ if all_mcp and has_streamed_content:
if exec_config.verbose:
- print(f" MCP tools called and content streamed - response complete, breaking loop")
+ print(f" All tools are MCP and content streamed, breaking loop")
break
- if all_mcp:
- # All tools are MCP - the response should be streamed
- if exec_config.verbose:
- print(f" All tools are MCP, expecting streamed response")
- # Don't break here - let the next iteration handle it
- else:
- # We have at least one local tool — delegate to scheduler.
+ # At least one local tool exists. Execute via the dependency aware scheduler.
+ if not all_mcp:
local_only = [
tc for tc in tool_calls if tc["function"]["name"] in getattr(tool_handler, "_funcs", {})
]
@@ -1076,11 +1062,7 @@ def _execute_streaming_sync(
)
if exec_config.verbose:
- print(f" Messages after tool execution: {len(messages)}")
-
- # Continue loop only if we need another response
- if exec_config.verbose:
- print(f" Tool processing complete")
+ print(f" Messages after tool execution: {len(messages)}")
else:
if exec_config.verbose:
print(f" No tool calls found, breaking out of loop")
From 59350e37cdd0b825addb7c40c2be7887ed83586f Mon Sep 17 00:00:00 2001
From: aryanma
Date: Sat, 7 Feb 2026 02:50:45 -0800
Subject: [PATCH 11/23] chore(runner): strip commented-out production version
and banner comments from core.py
---
src/dedalus_labs/lib/runner/core.py | 1379 ---------------------------
1 file changed, 1379 deletions(-)
diff --git a/src/dedalus_labs/lib/runner/core.py b/src/dedalus_labs/lib/runner/core.py
index b0b3796..b91e0da 100644
--- a/src/dedalus_labs/lib/runner/core.py
+++ b/src/dedalus_labs/lib/runner/core.py
@@ -1,6 +1,3 @@
-# ===========================================================================
-# BEGIN: feat/dep-graph-scheduler version (ACTIVE)
-# ===========================================================================
# ==============================================================================
# © 2025 Dedalus Labs, Inc. and affiliates
# Licensed under MIT
@@ -1335,1379 +1332,3 @@ def _mk_kwargs(mc: _ModelConfig) -> Dict[str, Any]:
kwargs["response_format"] = converted if is_given(converted) else None
return {k: v for k, v in kwargs.items() if v is not None}
-
-# ===========================================================================
-# END: feat/dep-graph-scheduler version
-# ===========================================================================
-
-# ===========================================================================
-# BEGIN: production version (COMMENTED OUT)
-# ===========================================================================
-# # © 2025 Dedalus Labs, Inc. and affiliates
-# # Licensed under MIT
-# # github.com/dedalus-labs/dedalus-sdk-python/LICENSE
-#
-# from __future__ import annotations
-#
-# import json
-# import asyncio
-# import inspect
-# from typing import (
-# TYPE_CHECKING,
-# Any,
-# Dict,
-# Literal,
-# Callable,
-# Iterator,
-# Protocol,
-# AsyncIterator,
-# Sequence,
-# Union,
-# )
-# from dataclasses import field, asdict, dataclass
-#
-# if TYPE_CHECKING:
-# from ...types.shared.dedalus_model import DedalusModel
-#
-# from ..._client import Dedalus, AsyncDedalus
-#
-# from .types import Message, ToolCall, JsonValue, ToolResult, PolicyInput, PolicyContext
-# from ...types.shared import MCPToolResult
-# from ..mcp import serialize_mcp_servers, MCPServerProtocol
-#
-# # Type alias for mcp_servers parameter - accepts strings, server objects, or mixed lists
-# MCPServersInput = Union[
-# str, # Single slug or URL
-# MCPServerProtocol, # MCP server object
-# Sequence[Union[str, MCPServerProtocol, Dict[str, Any]]], # Mixed list
-# None,
-# ]
-# from ..utils._schemas import to_schema
-#
-#
-# def _process_policy(policy: PolicyInput, context: PolicyContext) -> Dict[str, JsonValue]:
-# """Process policy, handling all possible input types safely."""
-# if policy is None:
-# return {}
-#
-# if callable(policy):
-# try:
-# result = policy(context)
-# return result if isinstance(result, dict) else {}
-# except Exception:
-# return {}
-#
-# if isinstance(policy, dict):
-# try:
-# return dict(policy)
-# except Exception:
-# return {}
-#
-# return {}
-#
-#
-# def _extract_mcp_results(response: Any) -> list[MCPToolResult]:
-# """Extract MCP tool results from API response."""
-# mcp_results = getattr(response, "mcp_tool_results", None)
-# if not mcp_results:
-# return []
-# return [item if isinstance(item, MCPToolResult) else MCPToolResult.model_validate(item) for item in mcp_results]
-#
-#
-# class _ToolHandler(Protocol):
-# def schemas(self) -> list[Dict]: ...
-# async def exec(self, name: str, args: Dict[str, JsonValue]) -> JsonValue: ...
-# def exec_sync(self, name: str, args: Dict[str, JsonValue]) -> JsonValue: ...
-#
-#
-# class _FunctionToolHandler:
-# """Converts Python functions to tool handler via introspection."""
-#
-# def __init__(self, funcs: list[Callable[..., Any]]):
-# self._funcs = {f.__name__: f for f in funcs}
-#
-# def schemas(self) -> list[Dict]:
-# """Build OpenAI-compatible function schemas via introspection."""
-# out: list[Dict[str, Any]] = []
-# for fn in self._funcs.values():
-# try:
-# out.append(to_schema(fn))
-# except Exception:
-# continue
-# return out
-#
-# async def exec(self, name: str, args: Dict[str, JsonValue]) -> JsonValue:
-# """Execute tool by name with given args (async)."""
-# fn = self._funcs[name]
-# if inspect.iscoroutinefunction(fn):
-# return await fn(**args)
-# # asyncio.to_thread is Python 3.9+, use run_in_executor for 3.8 compat
-# loop = asyncio.get_event_loop()
-# # Use partial to properly pass keyword arguments
-# from functools import partial
-#
-# return await loop.run_in_executor(None, partial(fn, **args))
-#
-# def exec_sync(self, name: str, args: Dict[str, JsonValue]) -> JsonValue:
-# """Execute tool by name with given args (sync)."""
-# fn = self._funcs[name]
-# if inspect.iscoroutinefunction(fn):
-# loop = asyncio.new_event_loop()
-# asyncio.set_event_loop(loop)
-# try:
-# return loop.run_until_complete(fn(**args))
-# finally:
-# loop.close()
-# return fn(**args)
-#
-#
-# @dataclass
-# class _ModelConfig:
-# """Model configuration parameters."""
-#
-# id: str
-# model_list: list[str] | None = None # Store the full model list when provided
-# temperature: float | None = None
-# max_tokens: int | None = None
-# top_p: float | None = None
-# frequency_penalty: float | None = None
-# presence_penalty: float | None = None
-# logit_bias: Dict[str, int] | None = None
-# response_format: Dict[str, JsonValue] | type | None = None # Dict or Pydantic model
-# agent_attributes: Dict[str, float] | None = None
-# model_attributes: Dict[str, Dict[str, float]] | None = None
-# tool_choice: str | Dict[str, JsonValue] | None = None
-# guardrails: list[Dict[str, JsonValue]] | None = None
-# handoff_config: Dict[str, JsonValue] | None = None
-#
-#
-# @dataclass
-# class _ExecutionConfig:
-# """Configuration for tool execution behavior and policies."""
-#
-# mcp_servers: list[str | Dict[str, Any]] = field(default_factory=list) # Wire format
-# credentials: list[Any] | None = None # CredentialProtocol objects (not serialized)
-# max_steps: int = 10
-# stream: bool = False
-# transport: Literal["http", "realtime"] = "http"
-# verbose: bool = False
-# debug: bool = False
-# on_tool_event: Callable[[Dict[str, JsonValue]], None] | None = None
-# return_intent: bool = False
-# policy: PolicyInput = None
-# available_models: list[str] = field(default_factory=list)
-# strict_models: bool = True
-#
-#
-# @dataclass
-# class _RunResult:
-# """Result from a completed tool execution run."""
-#
-# final_output: str # Final text output from conversation
-# tool_results: list[ToolResult]
-# steps_used: int
-# messages: list[Message] = field(default_factory=list) # Full conversation history
-# intents: list[Dict[str, JsonValue]] | None = None
-# tools_called: list[str] = field(default_factory=list)
-# mcp_results: list[MCPToolResult] = field(default_factory=list)
-# """MCP tool results from server-side tool calls."""
-#
-# @property
-# def output(self) -> str:
-# """Alias for final_output."""
-# return self.final_output
-#
-# @property
-# def content(self) -> str:
-# """Alias for final_output."""
-# return self.final_output
-#
-# def to_input_list(self) -> list[Message]:
-# """Get the full conversation history for continuation."""
-# return list(self.messages)
-#
-#
-# class DedalusRunner:
-# """Enhanced Dedalus client with tool execution capabilities."""
-#
-# def __init__(self, client: Dedalus | AsyncDedalus, verbose: bool = False):
-# self.client = client
-# self.verbose = verbose
-#
-# def run(
-# self,
-# input: str | list[Message] | None = None,
-# tools: list[Callable] | None = None,
-# messages: list[Message] | None = None,
-# instructions: str | None = None,
-# model: str | list[str] | DedalusModel | list[DedalusModel] | None = None,
-# max_steps: int = 10,
-# mcp_servers: MCPServersInput = None,
-# credentials: Sequence[Any] | None = None, # TODO: Loosely typed as `Any` for now
-# temperature: float | None = None,
-# max_tokens: int | None = None,
-# top_p: float | None = None,
-# frequency_penalty: float | None = None,
-# presence_penalty: float | None = None,
-# logit_bias: Dict[str, int] | None = None,
-# response_format: Dict[str, JsonValue] | type | None = None,
-# stream: bool = False,
-# transport: Literal["http", "realtime"] = "http",
-# verbose: bool | None = None,
-# debug: bool | None = None,
-# on_tool_event: Callable[[Dict[str, JsonValue]], None] | None = None,
-# return_intent: bool = False,
-# agent_attributes: Dict[str, float] | None = None,
-# model_attributes: Dict[str, Dict[str, float]] | None = None,
-# tool_choice: str | Dict[str, JsonValue] | None = None,
-# guardrails: list[Dict[str, JsonValue]] | None = None,
-# handoff_config: Dict[str, JsonValue] | None = None,
-# policy: PolicyInput = None,
-# available_models: list[str] | None = None,
-# strict_models: bool = True,
-# ):
-# """Execute tools with unified async/sync + streaming/non-streaming logic."""
-# if not model:
-# raise ValueError("model must be provided")
-#
-# # Validate tools parameter
-# if tools is not None:
-# if not isinstance(tools, list):
-# msg = "tools must be a list of callable functions or None"
-# raise ValueError(msg)
-#
-# # Check for nested lists (common mistake: tools=[[]] instead of tools=[])
-# for i, tool in enumerate(tools):
-# if not callable(tool):
-# if isinstance(tool, list):
-# msg = f"tools[{i}] is a list, not a callable function. Did you mean to pass tools={tool} instead of tools=[{tool}]?"
-# raise TypeError(msg)
-# msg = (
-# f"tools[{i}] is not callable (got {type(tool).__name__}). All tools must be callable functions."
-# )
-# raise TypeError(msg)
-#
-# # Parse model to extract name and config
-# model_name = None
-# model_list = []
-#
-# if isinstance(model, list):
-# if not model:
-# raise ValueError("model list cannot be empty")
-# # Handle list of DedalusModel or strings
-# for m in model:
-# if hasattr(m, "name"): # DedalusModel
-# model_list.append(m.name)
-# # Use config from first DedalusModel if params not explicitly set
-# if model_name is None:
-# model_name = m.name
-# temperature = temperature if temperature is not None else getattr(m, "temperature", None)
-# max_tokens = max_tokens if max_tokens is not None else getattr(m, "max_tokens", None)
-# top_p = top_p if top_p is not None else getattr(m, "top_p", None)
-# frequency_penalty = (
-# frequency_penalty
-# if frequency_penalty is not None
-# else getattr(m, "frequency_penalty", None)
-# )
-# presence_penalty = (
-# presence_penalty if presence_penalty is not None else getattr(m, "presence_penalty", None)
-# )
-# logit_bias = logit_bias if logit_bias is not None else getattr(m, "logit_bias", None)
-#
-# # Extract additional parameters from first DedalusModel
-# stream = stream if stream is not False else getattr(m, "stream", False)
-# tool_choice = tool_choice if tool_choice is not None else getattr(m, "tool_choice", None)
-#
-# # Extract Dedalus-specific extensions
-# if hasattr(m, "attributes") and m.attributes:
-# agent_attributes = agent_attributes if agent_attributes is not None else m.attributes
-#
-# # Check for unsupported parameters (only warn once for first model)
-# unsupported_params = []
-# if hasattr(m, "n") and m.n is not None:
-# unsupported_params.append("n")
-# if hasattr(m, "stop") and m.stop is not None:
-# unsupported_params.append("stop")
-# if hasattr(m, "stream_options") and m.stream_options is not None:
-# unsupported_params.append("stream_options")
-# if hasattr(m, "logprobs") and m.logprobs is not None:
-# unsupported_params.append("logprobs")
-# if hasattr(m, "top_logprobs") and m.top_logprobs is not None:
-# unsupported_params.append("top_logprobs")
-# if hasattr(m, "seed") and m.seed is not None:
-# unsupported_params.append("seed")
-# if hasattr(m, "service_tier") and m.service_tier is not None:
-# unsupported_params.append("service_tier")
-# if hasattr(m, "tools") and m.tools is not None:
-# unsupported_params.append("tools")
-# if hasattr(m, "parallel_tool_calls") and m.parallel_tool_calls is not None:
-# unsupported_params.append("parallel_tool_calls")
-# if hasattr(m, "user") and m.user is not None:
-# unsupported_params.append("user")
-# if hasattr(m, "max_completion_tokens") and m.max_completion_tokens is not None:
-# unsupported_params.append("max_completion_tokens")
-#
-# if unsupported_params:
-# import warnings
-#
-# warnings.warn(
-# f"The following DedalusModel parameters are not yet supported and will be ignored: {', '.join(unsupported_params)}. "
-# f"Support for these parameters is coming soon.",
-# UserWarning,
-# stacklevel=2,
-# )
-# else: # String
-# model_list.append(m)
-# if model_name is None:
-# model_name = m
-# elif hasattr(model, "name"): # Single DedalusModel
-# model_name = model.name
-# model_list = [model.name]
-# # Extract config from DedalusModel if params not explicitly set
-# temperature = temperature if temperature is not None else getattr(model, "temperature", None)
-# max_tokens = max_tokens if max_tokens is not None else getattr(model, "max_tokens", None)
-# top_p = top_p if top_p is not None else getattr(model, "top_p", None)
-# frequency_penalty = (
-# frequency_penalty if frequency_penalty is not None else getattr(model, "frequency_penalty", None)
-# )
-# presence_penalty = (
-# presence_penalty if presence_penalty is not None else getattr(model, "presence_penalty", None)
-# )
-# logit_bias = logit_bias if logit_bias is not None else getattr(model, "logit_bias", None)
-#
-# # Extract additional supported parameters
-# stream = stream if stream is not False else getattr(model, "stream", False)
-# tool_choice = tool_choice if tool_choice is not None else getattr(model, "tool_choice", None)
-#
-# # Extract Dedalus-specific extensions
-# if hasattr(model, "attributes") and model.attributes:
-# agent_attributes = agent_attributes if agent_attributes is not None else model.attributes
-# if hasattr(model, "metadata") and model.metadata:
-# # metadata is stored but not yet fully utilized
-# pass
-#
-# # Log warnings for unsupported parameters
-# unsupported_params = []
-# if hasattr(model, "n") and model.n is not None:
-# unsupported_params.append("n")
-# if hasattr(model, "stop") and model.stop is not None:
-# unsupported_params.append("stop")
-# if hasattr(model, "stream_options") and model.stream_options is not None:
-# unsupported_params.append("stream_options")
-# if hasattr(model, "logprobs") and model.logprobs is not None:
-# unsupported_params.append("logprobs")
-# if hasattr(model, "top_logprobs") and model.top_logprobs is not None:
-# unsupported_params.append("top_logprobs")
-# if hasattr(model, "seed") and model.seed is not None:
-# unsupported_params.append("seed")
-# if hasattr(model, "service_tier") and model.service_tier is not None:
-# unsupported_params.append("service_tier")
-# if hasattr(model, "tools") and model.tools is not None:
-# unsupported_params.append("tools")
-# if hasattr(model, "parallel_tool_calls") and model.parallel_tool_calls is not None:
-# unsupported_params.append("parallel_tool_calls")
-# if hasattr(model, "user") and model.user is not None:
-# unsupported_params.append("user")
-# if hasattr(model, "max_completion_tokens") and model.max_completion_tokens is not None:
-# unsupported_params.append("max_completion_tokens")
-#
-# if unsupported_params:
-# import warnings
-#
-# warnings.warn(
-# f"The following DedalusModel parameters are not yet supported and will be ignored: {', '.join(unsupported_params)}. "
-# f"Support for these parameters is coming soon.",
-# UserWarning,
-# stacklevel=2,
-# )
-# else: # Single string
-# model_name = model
-# model_list = [model] if model else []
-#
-# available_models = model_list if available_models is None else available_models
-#
-# model_config = _ModelConfig(
-# id=str(model_name),
-# model_list=model_list, # Pass the full model list
-# temperature=temperature,
-# max_tokens=max_tokens,
-# top_p=top_p,
-# frequency_penalty=frequency_penalty,
-# presence_penalty=presence_penalty,
-# logit_bias=logit_bias,
-# response_format=response_format,
-# agent_attributes=agent_attributes,
-# model_attributes=model_attributes,
-# tool_choice=tool_choice,
-# guardrails=guardrails,
-# handoff_config=handoff_config,
-# )
-#
-# # Serialize mcp_servers to wire format
-# serialized_mcp_servers = serialize_mcp_servers(mcp_servers)
-#
-# exec_config = _ExecutionConfig(
-# mcp_servers=serialized_mcp_servers,
-# credentials=list(credentials) if credentials else None,
-# max_steps=max_steps,
-# stream=stream,
-# transport=transport,
-# verbose=verbose if verbose is not None else self.verbose,
-# debug=debug or False,
-# on_tool_event=on_tool_event,
-# return_intent=return_intent,
-# policy=policy,
-# available_models=available_models or [],
-# strict_models=strict_models,
-# )
-#
-# tool_handler = _FunctionToolHandler(list(tools or []))
-#
-# # Handle instructions and messages parameters
-# if instructions is not None and messages is not None:
-# # instructions overrides any existing system messages
-# conversation = [{"role": "system", "content": instructions}] + [
-# msg for msg in messages if msg.get("role") != "system"
-# ]
-# elif instructions is not None:
-# # Convert instructions to system message, optionally with user input
-# if input is not None:
-# if isinstance(input, str):
-# conversation = [
-# {"role": "system", "content": instructions},
-# {"role": "user", "content": input},
-# ]
-# else:
-# conversation = [{"role": "system", "content": instructions}] + list(input)
-# else:
-# conversation = [{"role": "system", "content": instructions}]
-# elif messages is not None:
-# conversation = messages
-# elif input is not None:
-# conversation = [{"role": "user", "content": input}] if isinstance(input, str) else input
-# else:
-# raise ValueError("Must provide one of: 'instructions', 'messages', or 'input'")
-#
-# return self._execute_conversation(conversation, tool_handler, model_config, exec_config)
-#
-# def _execute_conversation(
-# self,
-# messages: list[Message],
-# tool_handler: _ToolHandler,
-# model_config: _ModelConfig,
-# exec_config: _ExecutionConfig,
-# ):
-# """Execute conversation with unified logic for all client/streaming combinations."""
-# is_async = isinstance(self.client, AsyncDedalus)
-#
-# if is_async:
-# if exec_config.stream:
-# return self._execute_streaming_async(messages, tool_handler, model_config, exec_config)
-# else:
-# return self._execute_turns_async(messages, tool_handler, model_config, exec_config)
-# else:
-# if exec_config.stream:
-# return self._execute_streaming_sync(messages, tool_handler, model_config, exec_config)
-# else:
-# return self._execute_turns_sync(messages, tool_handler, model_config, exec_config)
-#
-# async def _execute_turns_async(
-# self,
-# messages: list[Message],
-# tool_handler: _ToolHandler,
-# model_config: _ModelConfig,
-# exec_config: _ExecutionConfig,
-# ) -> _RunResult:
-# """Execute async non-streaming conversation."""
-# messages = list(messages)
-# steps = 0
-# final_text = ""
-# tool_results: list[ToolResult] = []
-# tools_called: list[str] = []
-#
-# while steps < exec_config.max_steps:
-# steps += 1
-# if exec_config.verbose:
-# print(f"Step started: Step={steps}")
-# # Show what models are configured
-# if model_config.model_list and len(model_config.model_list) > 1:
-# print(f" Available models: {model_config.model_list}")
-# print(f" Primary model: {model_config.id}")
-# else:
-# print(f" Using model: {model_config.id}")
-#
-# # Apply policy and get model params
-# policy_result = self._apply_policy(
-# exec_config.policy,
-# {
-# "step": steps,
-# "messages": messages,
-# "model": model_config.id,
-# "mcp_servers": exec_config.mcp_servers,
-# "tools": list(getattr(tool_handler, "_funcs", {}).keys()),
-# "available_models": exec_config.available_models,
-# },
-# model_config,
-# exec_config,
-# )
-#
-# # Make model call
-# current_messages = self._build_messages(messages, policy_result["prepend"], policy_result["append"])
-#
-# response = await self.client.chat.completions.create(
-# model=policy_result["model"],
-# messages=current_messages,
-# tools=tool_handler.schemas() or None,
-# mcp_servers=policy_result["mcp_servers"],
-# credentials=exec_config.credentials,
-# **{**self._mk_kwargs(model_config), **policy_result["model_kwargs"]},
-# )
-#
-# if exec_config.verbose:
-# actual_model = policy_result["model"]
-# if isinstance(actual_model, list):
-# print(f" API called with model list: {actual_model}")
-# else:
-# print(f" API called with single model: {actual_model}")
-# print(f" Response received (server says model: {getattr(response, 'model', 'unknown')})")
-# print(f" Response type: {type(response).__name__}")
-# # Surface agent timeline if server included it
-# agents_used = getattr(response, "agents_used", None)
-# if not agents_used:
-# extra = getattr(response, "__pydantic_extra__", None)
-# if isinstance(extra, dict):
-# agents_used = extra.get("agents_used")
-# if agents_used:
-# print(f" [EVENT] agents_used: {agents_used}")
-#
-# # Check if we have tool calls
-# if not hasattr(response, "choices") or not response.choices:
-# final_text = ""
-# break
-#
-# message = response.choices[0].message
-# msg = vars(message) if hasattr(message, "__dict__") else message
-# tool_calls = msg.get("tool_calls")
-# content = msg.get("content", "")
-#
-# if exec_config.verbose:
-# print(f" Response content: {content[:100] if content else '(none)'}...")
-# if tool_calls:
-# call_names = []
-# for tc in tool_calls:
-# try:
-# if isinstance(tc, dict):
-# call_names.append(tc.get("function", {}).get("name", "?"))
-# else:
-# call_names.append(getattr(getattr(tc, "function", None), "name", "?"))
-# except Exception:
-# call_names.append("?")
-# print(f" Tool calls in response: {call_names}")
-#
-# if not tool_calls:
-# final_text = content or ""
-# # Add assistant response to conversation
-# if final_text:
-# messages.append({"role": "assistant", "content": final_text})
-# break
-#
-# # Execute tools
-# tool_calls = self._extract_tool_calls(response.choices[0])
-# if exec_config.verbose:
-# print(f" Extracted {len(tool_calls)} tool calls")
-# for tc in tool_calls:
-# print(f" - {tc.get('function', {}).get('name', '?')} (id: {tc.get('id', '?')})")
-# await self._execute_tool_calls(
-# tool_calls,
-# tool_handler,
-# messages,
-# tool_results,
-# tools_called,
-# steps,
-# verbose=exec_config.verbose,
-# )
-#
-# # Extract MCP tool executions from the last response
-# mcp_results = _extract_mcp_results(response)
-#
-# return _RunResult(
-# final_output=final_text,
-# tool_results=tool_results,
-# steps_used=steps,
-# tools_called=tools_called,
-# messages=messages,
-# mcp_results=mcp_results,
-# )
-#
-# async def _execute_streaming_async(
-# self,
-# messages: list[Message],
-# tool_handler: _ToolHandler,
-# model_config: _ModelConfig,
-# exec_config: _ExecutionConfig,
-# ) -> AsyncIterator[Any]:
-# """Execute async streaming conversation."""
-# messages = list(messages)
-# steps = 0
-#
-# while steps < exec_config.max_steps:
-# steps += 1
-# if exec_config.verbose:
-# print(f"Step started: Step={steps} (max_steps={exec_config.max_steps})")
-# print(f" Starting step {steps} with {len(messages)} messages in conversation")
-# print(f" Message history:")
-# for i, msg in enumerate(messages):
-# role = msg.get("role")
-# content = str(msg.get("content", ""))[:50] + "..." if msg.get("content") else ""
-# tool_info = ""
-# if msg.get("tool_calls"):
-# tool_names = [tc.get("function", {}).get("name", "?") for tc in msg.get("tool_calls", [])]
-# tool_info = f" [calling: {', '.join(tool_names)}]"
-# elif msg.get("tool_call_id"):
-# tool_info = f" [response to: {msg.get('tool_call_id')[:8]}...]"
-# print(f" [Message {i}] {role}: {content}{tool_info}")
-#
-# # Apply policy
-# policy_result = self._apply_policy(
-# exec_config.policy,
-# {
-# "step": steps,
-# "messages": messages,
-# "model": model_config.id,
-# "mcp_servers": exec_config.mcp_servers,
-# "tools": list(getattr(tool_handler, "_funcs", {}).keys()),
-# "available_models": exec_config.available_models,
-# },
-# model_config,
-# exec_config,
-# )
-#
-# # Stream model response
-# current_messages = self._build_messages(messages, policy_result["prepend"], policy_result["append"])
-#
-# # Suppress per-message debug; keep streaming minimal
-#
-# stream = await self.client.chat.completions.create(
-# model=policy_result["model"],
-# messages=current_messages,
-# tools=tool_handler.schemas() or None,
-# mcp_servers=policy_result["mcp_servers"],
-# credentials=exec_config.credentials,
-# stream=True,
-# **{**self._mk_kwargs(model_config), **policy_result["model_kwargs"]},
-# )
-#
-# tool_calls = []
-# chunk_count = 0
-# content_chunks = 0
-# tool_call_chunks = 0
-# finish_reason = None
-# async for chunk in stream:
-# chunk_count += 1
-# if exec_config.verbose:
-# # Only surface agent_updated metadata; suppress raw chunk spam
-# extra = getattr(chunk, "__pydantic_extra__", None)
-# if isinstance(extra, dict):
-# meta = extra.get("x_dedalus_event") or extra.get("dedalus_event")
-# if isinstance(meta, dict) and meta.get("type") == "agent_updated":
-# print(f" [EVENT] agent_updated: agent={meta.get('agent')} model={meta.get('model')}")
-# if hasattr(chunk, "choices") and chunk.choices:
-# choice = chunk.choices[0]
-# delta = choice.delta
-#
-# # Check finish reason
-# if hasattr(choice, "finish_reason") and choice.finish_reason:
-# finish_reason = choice.finish_reason
-# # suppress per-chunk finish_reason spam
-#
-# # Check for tool calls
-# if hasattr(delta, "tool_calls") and delta.tool_calls:
-# tool_call_chunks += 1
-# self._accumulate_tool_calls(delta.tool_calls, tool_calls)
-# # suppress per-chunk tool_call delta spam
-#
-# # Check for content
-# if hasattr(delta, "content") and delta.content:
-# content_chunks += 1
-# # suppress per-chunk content spam
-#
-# # Check for role (suppressed)
-# if hasattr(delta, "role") and delta.role:
-# pass
-#
-# yield chunk
-#
-# if exec_config.verbose:
-# # Keep a compact end-of-stream summary
-# names = [tc.get("function", {}).get("name", "unknown") for tc in tool_calls]
-# print(f" Stream summary: chunks={chunk_count} content={content_chunks} tool_calls={tool_call_chunks}")
-# if names:
-# print(f" Tools called this turn: {names}")
-#
-# # Execute any accumulated tool calls
-# if tool_calls:
-# if exec_config.verbose:
-# print(f" Processing {len(tool_calls)} tool calls")
-#
-# # Categorize tools
-# local_names = [
-# tc["function"]["name"]
-# for tc in tool_calls
-# if tc["function"]["name"] in getattr(tool_handler, "_funcs", {})
-# ]
-# mcp_names = [
-# tc["function"]["name"]
-# for tc in tool_calls
-# if tc["function"]["name"] not in getattr(tool_handler, "_funcs", {})
-# ]
-#
-# # Check if ALL tools are MCP tools (none are local)
-# all_mcp = all(tc["function"]["name"] not in getattr(tool_handler, "_funcs", {}) for tc in tool_calls)
-#
-# # Check if stream already contains content (MCP results)
-# has_streamed_content = content_chunks > 0
-#
-# if exec_config.verbose:
-# print(f" Local tools used: {local_names}")
-# print(f" Server tools used: {mcp_names}")
-#
-# # When MCP tools are involved and content was streamed, we're done
-# if mcp_names and has_streamed_content:
-# if exec_config.verbose:
-# print(f" MCP tools called and content streamed - response complete, breaking loop")
-# break
-#
-# if all_mcp:
-# # All tools are MCP - the response should be streamed
-# if exec_config.verbose:
-# print(f" All tools are MCP, expecting streamed response")
-# # Don't break here - let the next iteration handle it
-# else:
-# # We have at least one local tool
-# # Filter to only include local tool calls in the assistant message
-# local_only_tool_calls = [
-# tc for tc in tool_calls if tc["function"]["name"] in getattr(tool_handler, "_funcs", {})
-# ]
-# messages.append({"role": "assistant", "tool_calls": local_only_tool_calls})
-# if exec_config.verbose:
-# print(
-# f" Added assistant message with {len(local_only_tool_calls)} local tool calls (filtered from {len(tool_calls)} total)"
-# )
-#
-# # Execute only local tools
-# for tc in tool_calls:
-# fn_name = tc["function"]["name"]
-# fn_args_str = tc["function"]["arguments"]
-#
-# if fn_name in getattr(tool_handler, "_funcs", {}):
-# # Local tool
-# try:
-# fn_args = json.loads(fn_args_str)
-# except json.JSONDecodeError:
-# fn_args = {}
-#
-# try:
-# result = await tool_handler.exec(fn_name, fn_args)
-# messages.append(
-# {
-# "role": "tool",
-# "tool_call_id": tc["id"],
-# "content": str(result),
-# }
-# )
-# if exec_config.verbose:
-# print(f" Executed local tool {fn_name}: {str(result)[:50]}...")
-# except Exception as e:
-# messages.append(
-# {
-# "role": "tool",
-# "tool_call_id": tc["id"],
-# "content": f"Error: {str(e)}",
-# }
-# )
-# if exec_config.verbose:
-# print(f" Error executing local tool {fn_name}: {e}")
-# else:
-# # MCP tool - DON'T add any message
-# # The API server should handle this
-# if exec_config.verbose:
-# print(f" MCP tool {fn_name} - skipping (server will handle)")
-#
-# if exec_config.verbose:
-# print(f" Messages after tool execution: {len(messages)}")
-#
-# # Only continue if we have NO MCP tools
-# if not mcp_names:
-# print(f" No MCP tools, continuing loop to step {steps + 1}...")
-# else:
-# print(f" MCP tools present, expecting response in next iteration")
-#
-# # Continue loop only if we need another response
-# if exec_config.verbose:
-# print(f" Tool processing complete")
-# else:
-# if exec_config.verbose:
-# print(f" No tool calls found, breaking out of loop")
-# break
-#
-# if exec_config.verbose:
-# print(f"\n[DEBUG] Exited main loop after {steps} steps")
-#
-# def _execute_turns_sync(
-# self,
-# messages: list[Message],
-# tool_handler: _ToolHandler,
-# model_config: _ModelConfig,
-# exec_config: _ExecutionConfig,
-# ) -> _RunResult:
-# """Execute sync non-streaming conversation."""
-# messages = list(messages)
-# steps = 0
-# final_text = ""
-# tool_results: list[ToolResult] = []
-# tools_called: list[str] = []
-#
-# while steps < exec_config.max_steps:
-# steps += 1
-# if exec_config.verbose:
-# print(f"Step started: Step={steps}")
-# # Show what models are configured
-# if model_config.model_list and len(model_config.model_list) > 1:
-# print(f" Available models: {model_config.model_list}")
-# print(f" Primary model: {model_config.id}")
-# else:
-# print(f" Using model: {model_config.id}")
-#
-# # Apply policy
-# policy_result = self._apply_policy(
-# exec_config.policy,
-# {
-# "step": steps,
-# "messages": messages,
-# "model": model_config.id,
-# "mcp_servers": exec_config.mcp_servers,
-# "tools": list(getattr(tool_handler, "_funcs", {}).keys()),
-# "available_models": exec_config.available_models,
-# },
-# model_config,
-# exec_config,
-# )
-#
-# # Make model call
-# current_messages = self._build_messages(messages, policy_result["prepend"], policy_result["append"])
-#
-# if exec_config.verbose:
-# actual_model = policy_result["model"]
-# if isinstance(actual_model, list):
-# print(f" API called with model list: {actual_model}")
-# else:
-# print(f" API called with single model: {actual_model}")
-#
-# response = self.client.chat.completions.create(
-# model=policy_result["model"],
-# messages=current_messages,
-# tools=tool_handler.schemas() or None,
-# mcp_servers=policy_result["mcp_servers"],
-# credentials=exec_config.credentials,
-# **{**self._mk_kwargs(model_config), **policy_result["model_kwargs"]},
-# )
-#
-# if exec_config.verbose:
-# print(f" Response received (server says model: {getattr(response, 'model', 'unknown')})")
-# print(f" Response type: {type(response).__name__}")
-#
-# # Check if we have tool calls
-# if not hasattr(response, "choices") or not response.choices:
-# final_text = ""
-# break
-#
-# message = response.choices[0].message
-# msg = vars(message) if hasattr(message, "__dict__") else message
-# tool_calls = msg.get("tool_calls")
-# content = msg.get("content", "")
-#
-# if exec_config.verbose:
-# print(f" Response content: {content[:100] if content else '(none)'}...")
-# if tool_calls:
-# tool_names = [tc.get("function", {}).get("name", "?") for tc in tool_calls]
-# print(f" 🔧 Tool calls in response: {tool_names}")
-#
-# if not tool_calls:
-# final_text = content or ""
-# # Add assistant response to conversation
-# if final_text:
-# messages.append({"role": "assistant", "content": final_text})
-# break
-#
-# # Execute tools
-# tool_calls = self._extract_tool_calls(response.choices[0])
-# self._execute_tool_calls_sync(tool_calls, tool_handler, messages, tool_results, tools_called, steps)
-#
-# # Extract MCP tool executions from the last response
-# mcp_results = _extract_mcp_results(response)
-#
-# return _RunResult(
-# final_output=final_text,
-# tool_results=tool_results,
-# steps_used=steps,
-# tools_called=tools_called,
-# messages=messages,
-# mcp_results=mcp_results,
-# )
-#
-# def _execute_streaming_sync(
-# self,
-# messages: list[Message],
-# tool_handler: _ToolHandler,
-# model_config: _ModelConfig,
-# exec_config: _ExecutionConfig,
-# ) -> Iterator[Any]:
-# """Execute sync streaming conversation."""
-# messages = list(messages)
-# steps = 0
-#
-# while steps < exec_config.max_steps:
-# steps += 1
-# if exec_config.verbose:
-# print(f"Step started: Step={steps} (max_steps={exec_config.max_steps})")
-# print(f" Starting step {steps} with {len(messages)} messages in conversation")
-# print(f" Message history:")
-# for i, msg in enumerate(messages):
-# role = msg.get("role")
-# content = str(msg.get("content", ""))[:50] + "..." if msg.get("content") else ""
-# tool_info = ""
-# if msg.get("tool_calls"):
-# tool_names = [tc.get("function", {}).get("name", "?") for tc in msg.get("tool_calls", [])]
-# tool_info = f" [calling: {', '.join(tool_names)}]"
-# elif msg.get("tool_call_id"):
-# tool_info = f" [response to: {msg.get('tool_call_id')[:8]}...]"
-# print(f" [Message {i}] {role}: {content}{tool_info}")
-#
-# # Apply policy
-# policy_result = self._apply_policy(
-# exec_config.policy,
-# {
-# "step": steps,
-# "messages": messages,
-# "model": model_config.id,
-# "mcp_servers": exec_config.mcp_servers,
-# "tools": list(getattr(tool_handler, "_funcs", {}).keys()),
-# "available_models": exec_config.available_models,
-# },
-# model_config,
-# exec_config,
-# )
-#
-# # Stream model response
-# current_messages = self._build_messages(messages, policy_result["prepend"], policy_result["append"])
-#
-# if exec_config.verbose:
-# print(f" Messages being sent to API:")
-# for i, msg in enumerate(current_messages):
-# content_preview = str(msg.get("content", ""))[:100]
-# tool_call_info = ""
-# if msg.get("tool_calls"):
-# tool_names = [tc.get("function", {}).get("name", "unknown") for tc in msg.get("tool_calls", [])]
-# tool_call_info = f" tool_calls=[{', '.join(tool_names)}]"
-# print(f" [{i}] {msg.get('role')}: {content_preview}...{tool_call_info}")
-# print(f" MCP servers: {policy_result['mcp_servers']}")
-# print(f" Local tools available: {list(getattr(tool_handler, '_funcs', {}).keys())}")
-#
-# stream = self.client.chat.completions.create(
-# model=policy_result["model"],
-# messages=current_messages,
-# tools=tool_handler.schemas() or None,
-# mcp_servers=policy_result["mcp_servers"],
-# credentials=exec_config.credentials,
-# stream=True,
-# **{**self._mk_kwargs(model_config), **policy_result["model_kwargs"]},
-# )
-#
-# tool_calls = []
-# chunk_count = 0
-# content_chunks = 0
-# tool_call_chunks = 0
-# finish_reason = None
-# accumulated_content = ""
-#
-# for chunk in stream:
-# chunk_count += 1
-# if hasattr(chunk, "choices") and chunk.choices:
-# choice = chunk.choices[0]
-# delta = choice.delta
-#
-# # Check finish reason
-# if hasattr(choice, "finish_reason") and choice.finish_reason:
-# finish_reason = choice.finish_reason
-#
-# # Check for tool calls
-# if hasattr(delta, "tool_calls") and delta.tool_calls:
-# tool_call_chunks += 1
-# self._accumulate_tool_calls(delta.tool_calls, tool_calls)
-# if exec_config.verbose:
-# # Show tool calls in a more readable format
-# for tc_delta in delta.tool_calls:
-# if (
-# hasattr(tc_delta, "function")
-# and hasattr(tc_delta.function, "name")
-# and tc_delta.function.name
-# ):
-# print(f"-> Calling {tc_delta.function.name}")
-#
-# # Check for content
-# if hasattr(delta, "content") and delta.content:
-# content_chunks += 1
-# accumulated_content += delta.content
-#
-# yield chunk
-#
-# if exec_config.verbose:
-# if accumulated_content:
-# print() # New line after streamed content
-# if tool_calls:
-# print(f"\nReceived {len(tool_calls)} tool call(s)")
-# for i, tc in enumerate(tool_calls, 1):
-# tool_name = tc.get("function", {}).get("name", "unknown")
-# # Clean up the tool name for display
-# display_name = tool_name.replace("transfer_to_", "").replace("_", " ").title()
-# print(f" {i}. {display_name}")
-# else:
-# print(f"\n✓ Response complete ({content_chunks} content chunks)")
-#
-# # Execute any accumulated tool calls
-# if tool_calls:
-# if exec_config.verbose:
-# print(f" Processing {len(tool_calls)} tool calls")
-#
-# # Categorize tools
-# local_names = [
-# tc["function"]["name"]
-# for tc in tool_calls
-# if tc["function"]["name"] in getattr(tool_handler, "_funcs", {})
-# ]
-# mcp_names = [
-# tc["function"]["name"]
-# for tc in tool_calls
-# if tc["function"]["name"] not in getattr(tool_handler, "_funcs", {})
-# ]
-#
-# # Check if ALL tools are MCP tools (none are local)
-# all_mcp = all(tc["function"]["name"] not in getattr(tool_handler, "_funcs", {}) for tc in tool_calls)
-#
-# # Check if stream already contains content (MCP results)
-# has_streamed_content = content_chunks > 0
-#
-# if exec_config.verbose:
-# print(f" Local tools: {local_names}")
-# print(f" Server tools: {mcp_names}")
-#
-# # When MCP tools are involved and content was streamed, we're done
-# if mcp_names and has_streamed_content:
-# if exec_config.verbose:
-# print(f" MCP tools called and content streamed - response complete, breaking loop")
-# break
-#
-# if all_mcp:
-# # All tools are MCP - the response should be streamed
-# if exec_config.verbose:
-# print(f" All tools are MCP, expecting streamed response")
-# # Don't break here - let the next iteration handle it
-# else:
-# # We have at least one local tool
-# # Filter to only include local tool calls in the assistant message
-# local_only_tool_calls = [
-# tc for tc in tool_calls if tc["function"]["name"] in getattr(tool_handler, "_funcs", {})
-# ]
-# messages.append({"role": "assistant", "tool_calls": local_only_tool_calls})
-# if exec_config.verbose:
-# print(
-# f" Added assistant message with {len(local_only_tool_calls)} local tool calls (filtered from {len(tool_calls)} total)"
-# )
-#
-# # Execute only local tools
-# for tc in tool_calls:
-# fn_name = tc["function"]["name"]
-# fn_args_str = tc["function"]["arguments"]
-#
-# if fn_name in getattr(tool_handler, "_funcs", {}):
-# # Local tool
-# try:
-# fn_args = json.loads(fn_args_str)
-# except json.JSONDecodeError:
-# fn_args = {}
-#
-# try:
-# result = tool_handler.exec_sync(fn_name, fn_args)
-# messages.append(
-# {
-# "role": "tool",
-# "tool_call_id": tc["id"],
-# "content": str(result),
-# }
-# )
-# if exec_config.verbose:
-# print(f" Executed local tool {fn_name}: {str(result)[:50]}...")
-# except Exception as e:
-# messages.append(
-# {
-# "role": "tool",
-# "tool_call_id": tc["id"],
-# "content": f"Error: {str(e)}",
-# }
-# )
-# if exec_config.verbose:
-# print(f" Error executing local tool {fn_name}: {e}")
-# else:
-# # MCP tool - DON'T add any message
-# # The API server should handle this
-# if exec_config.verbose:
-# print(f" MCP tool {fn_name} - skipping (server will handle)")
-#
-# if exec_config.verbose:
-# print(f" Messages after tool execution: {len(messages)}")
-#
-# # Only continue if we have NO MCP tools
-# if not mcp_names:
-# print(f" No MCP tools, continuing loop to step {steps + 1}...")
-# else:
-# print(f" MCP tools present, expecting response in next iteration")
-#
-# # Continue loop only if we need another response
-# if exec_config.verbose:
-# print(f" Tool processing complete")
-# else:
-# if exec_config.verbose:
-# print(f" No tool calls found, breaking out of loop")
-# break
-#
-# def _apply_policy(
-# self,
-# policy: PolicyInput,
-# context: PolicyContext,
-# model_config: _ModelConfig,
-# exec_config: _ExecutionConfig,
-# ) -> Dict[str, Any]:
-# """Apply policy and return unified configuration."""
-# pol = _process_policy(policy, context)
-#
-# # Start with defaults
-# result = {
-# "model_id": model_config.id,
-# "model": model_config.model_list
-# if model_config.model_list
-# else model_config.id, # Use full list when available
-# "mcp_servers": list(exec_config.mcp_servers),
-# "model_kwargs": {},
-# "prepend": [],
-# "append": [],
-# }
-#
-# if pol:
-# # Handle model override
-# requested_model = pol.get("model")
-# if requested_model and exec_config.strict_models and exec_config.available_models:
-# if isinstance(requested_model, list):
-# # Filter to only available models
-# valid_models = [m for m in requested_model if m in exec_config.available_models]
-# if valid_models:
-# result["model"] = valid_models
-# result["model_id"] = str(valid_models[0])
-# elif exec_config.verbose:
-# print(f"[RUNNER] Policy requested unavailable models {requested_model}, ignoring")
-# elif requested_model not in exec_config.available_models:
-# if exec_config.verbose:
-# print(f"[RUNNER] Policy requested unavailable model '{requested_model}', ignoring")
-# else:
-# result["model_id"] = str(requested_model)
-# result["model"] = str(requested_model)
-# elif requested_model:
-# if isinstance(requested_model, list):
-# result["model"] = requested_model
-# result["model_id"] = str(requested_model[0]) if requested_model else result["model_id"]
-# else:
-# result["model_id"] = str(requested_model)
-# result["model"] = str(requested_model)
-#
-# # Handle other policy settings
-# result["mcp_servers"] = list(pol.get("mcp_servers", result["mcp_servers"]))
-# result["model_kwargs"] = dict(pol.get("model_settings", {}))
-# result["prepend"] = list(pol.get("message_prepend", []))
-# result["append"] = list(pol.get("message_append", []))
-#
-# # Handle max_steps update
-# if pol.get("max_steps") is not None:
-# try:
-# exec_config.max_steps = int(pol.get("max_steps"))
-# except Exception:
-# pass
-#
-# return result
-#
-# def _build_messages(self, messages: list[Message], prepend: list[Message], append: list[Message]) -> list[Message]:
-# """Build final message list with prepend/append."""
-# return (prepend + messages + append) if (prepend or append) else messages
-#
-# def _extract_tool_calls(self, choice) -> list[ToolCall]:
-# """Extract tool calls from response choice."""
-# if not hasattr(choice, "message"):
-# return []
-#
-# message = choice.message
-# msg = vars(message) if hasattr(message, "__dict__") else message
-# tool_calls = msg.get("tool_calls", [])
-#
-# if not tool_calls:
-# return []
-#
-# calls = []
-# for tc in tool_calls:
-# tc_dict = vars(tc) if hasattr(tc, "__dict__") else tc
-# fn = tc_dict.get("function", {})
-# fn_dict = vars(fn) if hasattr(fn, "__dict__") else fn
-#
-# calls.append(
-# {
-# "id": tc_dict.get("id", ""),
-# "type": tc_dict.get("type", "function"),
-# "function": {
-# "name": fn_dict.get("name", ""),
-# "arguments": fn_dict.get("arguments", "{}"),
-# },
-# }
-# )
-# return calls
-#
-# async def _execute_tool_calls(
-# self,
-# tool_calls: list[ToolCall],
-# tool_handler: _ToolHandler,
-# messages: list[Message],
-# tool_results: list[ToolResult],
-# tools_called: list[str],
-# step: int,
-# verbose: bool = False,
-# ):
-# """Execute tool calls asynchronously."""
-# if verbose:
-# print(f" _execute_tool_calls: Processing {len(tool_calls)} tool calls")
-#
-# # Record single assistant message with ALL tool calls (OpenAI format)
-# messages.append({"role": "assistant", "tool_calls": list(tool_calls)})
-#
-# for i, tc in enumerate(tool_calls):
-# fn_name = tc["function"]["name"]
-# fn_args_str = tc["function"]["arguments"]
-#
-# if verbose:
-# print(f" Tool {i + 1}/{len(tool_calls)}: {fn_name}")
-#
-# try:
-# fn_args = json.loads(fn_args_str)
-# except json.JSONDecodeError:
-# fn_args = {}
-#
-# try:
-# result = await tool_handler.exec(fn_name, fn_args)
-# tool_results.append({"name": fn_name, "result": result, "step": step})
-# tools_called.append(fn_name)
-# messages.append({"role": "tool", "tool_call_id": tc["id"], "content": str(result)})
-#
-# if verbose:
-# print(f" Tool {fn_name} executed successfully: {str(result)[:50]}...")
-# except Exception as e:
-# error_result = {"error": str(e), "name": fn_name, "step": step}
-# tool_results.append(error_result)
-# messages.append(
-# {
-# "role": "tool",
-# "tool_call_id": tc["id"],
-# "content": f"Error: {str(e)}",
-# }
-# )
-#
-# if verbose:
-# print(f" Tool {fn_name} failed with error: {e}")
-# print(f" Error type: {type(e).__name__}")
-#
-# def _execute_tool_calls_sync(
-# self,
-# tool_calls: list[ToolCall],
-# tool_handler: _ToolHandler,
-# messages: list[Message],
-# tool_results: list[ToolResult],
-# tools_called: list[str],
-# step: int,
-# ):
-# """Execute tool calls synchronously."""
-# # Record single assistant message with ALL tool calls (OpenAI format)
-# messages.append({"role": "assistant", "tool_calls": list(tool_calls)})
-#
-# for tc in tool_calls:
-# fn_name = tc["function"]["name"]
-# fn_args_str = tc["function"]["arguments"]
-#
-# try:
-# fn_args = json.loads(fn_args_str)
-# except json.JSONDecodeError:
-# fn_args = {}
-#
-# try:
-# result = tool_handler.exec_sync(fn_name, fn_args)
-# tool_results.append({"name": fn_name, "result": result, "step": step})
-# tools_called.append(fn_name)
-# messages.append({"role": "tool", "tool_call_id": tc["id"], "content": str(result)})
-# except Exception as e:
-# error_result = {"error": str(e), "name": fn_name, "step": step}
-# tool_results.append(error_result)
-# messages.append(
-# {
-# "role": "tool",
-# "tool_call_id": tc["id"],
-# "content": f"Error: {str(e)}",
-# }
-# )
-#
-# def _accumulate_tool_calls(self, deltas, acc: list[ToolCall]) -> None:
-# """Accumulate streaming tool call deltas."""
-# for delta in deltas:
-# index = getattr(delta, "index", 0)
-#
-# # Ensure we have enough entries in acc
-# while len(acc) <= index:
-# acc.append(
-# {
-# "id": "",
-# "type": "function",
-# "function": {"name": "", "arguments": ""},
-# }
-# )
-#
-# if hasattr(delta, "id") and delta.id:
-# acc[index]["id"] = delta.id
-# if hasattr(delta, "function"):
-# fn = delta.function
-# if hasattr(fn, "name") and fn.name:
-# acc[index]["function"]["name"] = fn.name
-# if hasattr(fn, "arguments") and fn.arguments:
-# acc[index]["function"]["arguments"] += fn.arguments
-#
-# @staticmethod
-# def _mk_kwargs(mc: _ModelConfig) -> Dict[str, Any]:
-# """Convert model config to kwargs for client call."""
-# from ..._utils import is_given
-# from ...lib._parsing import type_to_response_format_param
-#
-# d = asdict(mc)
-# d.pop("id", None) # Remove id since it's passed separately
-# d.pop("model_list", None) # Remove model_list since it's not an API parameter
-#
-# # Convert Pydantic model to dict schema if needed
-# if "response_format" in d and d["response_format"] is not None:
-# converted = type_to_response_format_param(d["response_format"])
-# d["response_format"] = converted if is_given(converted) else None
-#
-# return {k: v for k, v in d.items() if v is not None}
-#
-# ===========================================================================
-# END: production version
-# ===========================================================================
From 30a719572bb8087c9c87e980f4c9f65b95f8c1d0 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Sat, 7 Feb 2026 15:33:32 +0000
Subject: [PATCH 12/23] feat(client): add custom JSON encoder for extended type
support
---
src/dedalus_labs/_base_client.py | 7 +-
src/dedalus_labs/_compat.py | 7 +-
src/dedalus_labs/_utils/_json.py | 35 +++++++++
tests/test_utils/test_json.py | 126 +++++++++++++++++++++++++++++++
4 files changed, 169 insertions(+), 6 deletions(-)
create mode 100644 src/dedalus_labs/_utils/_json.py
create mode 100644 tests/test_utils/test_json.py
diff --git a/src/dedalus_labs/_base_client.py b/src/dedalus_labs/_base_client.py
index 42f1a46..fc60f6c 100644
--- a/src/dedalus_labs/_base_client.py
+++ b/src/dedalus_labs/_base_client.py
@@ -86,6 +86,7 @@
APIConnectionError,
APIResponseValidationError,
)
+from ._utils._json import openapi_dumps
log: logging.Logger = logging.getLogger(__name__)
@@ -554,8 +555,10 @@ def _build_request(
kwargs["content"] = options.content
elif isinstance(json_data, bytes):
kwargs["content"] = json_data
- else:
- kwargs["json"] = json_data if is_given(json_data) else None
+ elif not files:
+ # Don't set content when JSON is sent as multipart/form-data,
+ # since httpx's content param overrides other body arguments
+ kwargs["content"] = openapi_dumps(json_data) if is_given(json_data) and json_data is not None else None
kwargs["files"] = files
else:
headers.pop("Content-Type", None)
diff --git a/src/dedalus_labs/_compat.py b/src/dedalus_labs/_compat.py
index c6b9453..bf1f1b9 100644
--- a/src/dedalus_labs/_compat.py
+++ b/src/dedalus_labs/_compat.py
@@ -139,6 +139,7 @@ def model_dump(
exclude_defaults: bool = False,
warnings: bool = True,
mode: Literal["json", "python"] = "python",
+ by_alias: bool | None = None,
) -> dict[str, Any]:
if (not PYDANTIC_V1) or hasattr(model, "model_dump"):
return model.model_dump(
@@ -149,14 +150,12 @@ def model_dump(
exclude_defaults=exclude_defaults,
# warnings are not supported in Pydantic v1
warnings=True if PYDANTIC_V1 else warnings,
+ by_alias=by_alias,
)
return cast(
"dict[str, Any]",
model.dict( # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
- by_alias=True,
- exclude=exclude,
- exclude_unset=exclude_unset,
- exclude_defaults=exclude_defaults,
+ exclude=exclude, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, by_alias=bool(by_alias)
),
)
diff --git a/src/dedalus_labs/_utils/_json.py b/src/dedalus_labs/_utils/_json.py
new file mode 100644
index 0000000..6058421
--- /dev/null
+++ b/src/dedalus_labs/_utils/_json.py
@@ -0,0 +1,35 @@
+import json
+from typing import Any
+from datetime import datetime
+from typing_extensions import override
+
+import pydantic
+
+from .._compat import model_dump
+
+
+def openapi_dumps(obj: Any) -> bytes:
+ """
+ Serialize an object to UTF-8 encoded JSON bytes.
+
+ Extends the standard json.dumps with support for additional types
+ commonly used in the SDK, such as `datetime`, `pydantic.BaseModel`, etc.
+ """
+ return json.dumps(
+ obj,
+ cls=_CustomEncoder,
+ # Uses the same defaults as httpx's JSON serialization
+ ensure_ascii=False,
+ separators=(",", ":"),
+ allow_nan=False,
+ ).encode()
+
+
+class _CustomEncoder(json.JSONEncoder):
+ @override
+ def default(self, o: Any) -> Any:
+ if isinstance(o, datetime):
+ return o.isoformat()
+ if isinstance(o, pydantic.BaseModel):
+ return model_dump(o, exclude_unset=True, mode="json", by_alias=True)
+ return super().default(o)
diff --git a/tests/test_utils/test_json.py b/tests/test_utils/test_json.py
new file mode 100644
index 0000000..e5237c8
--- /dev/null
+++ b/tests/test_utils/test_json.py
@@ -0,0 +1,126 @@
+from __future__ import annotations
+
+import datetime
+from typing import Union
+
+import pydantic
+
+from dedalus_labs import _compat
+from dedalus_labs._utils._json import openapi_dumps
+
+
+class TestOpenapiDumps:
+ def test_basic(self) -> None:
+ data = {"key": "value", "number": 42}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"key":"value","number":42}'
+
+ def test_datetime_serialization(self) -> None:
+ dt = datetime.datetime(2023, 1, 1, 12, 0, 0)
+ data = {"datetime": dt}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"datetime":"2023-01-01T12:00:00"}'
+
+ def test_pydantic_model_serialization(self) -> None:
+ class User(pydantic.BaseModel):
+ first_name: str
+ last_name: str
+ age: int
+
+ model_instance = User(first_name="John", last_name="Kramer", age=83)
+ data = {"model": model_instance}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"first_name":"John","last_name":"Kramer","age":83}}'
+
+ def test_pydantic_model_with_default_values(self) -> None:
+ class User(pydantic.BaseModel):
+ name: str
+ role: str = "user"
+ active: bool = True
+ score: int = 0
+
+ model_instance = User(name="Alice")
+ data = {"model": model_instance}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"name":"Alice"}}'
+
+ def test_pydantic_model_with_default_values_overridden(self) -> None:
+ class User(pydantic.BaseModel):
+ name: str
+ role: str = "user"
+ active: bool = True
+
+ model_instance = User(name="Bob", role="admin", active=False)
+ data = {"model": model_instance}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"name":"Bob","role":"admin","active":false}}'
+
+ def test_pydantic_model_with_alias(self) -> None:
+ class User(pydantic.BaseModel):
+ first_name: str = pydantic.Field(alias="firstName")
+ last_name: str = pydantic.Field(alias="lastName")
+
+ model_instance = User(firstName="John", lastName="Doe")
+ data = {"model": model_instance}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"firstName":"John","lastName":"Doe"}}'
+
+ def test_pydantic_model_with_alias_and_default(self) -> None:
+ class User(pydantic.BaseModel):
+ user_name: str = pydantic.Field(alias="userName")
+ user_role: str = pydantic.Field(default="member", alias="userRole")
+ is_active: bool = pydantic.Field(default=True, alias="isActive")
+
+ model_instance = User(userName="charlie")
+ data = {"model": model_instance}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"userName":"charlie"}}'
+
+ model_with_overrides = User(userName="diana", userRole="admin", isActive=False)
+ data = {"model": model_with_overrides}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"userName":"diana","userRole":"admin","isActive":false}}'
+
+ def test_pydantic_model_with_nested_models_and_defaults(self) -> None:
+ class Address(pydantic.BaseModel):
+ street: str
+ city: str = "Unknown"
+
+ class User(pydantic.BaseModel):
+ name: str
+ address: Address
+ verified: bool = False
+
+ if _compat.PYDANTIC_V1:
+ # to handle forward references in Pydantic v1
+ User.update_forward_refs(**locals()) # type: ignore[reportDeprecated]
+
+ address = Address(street="123 Main St")
+ user = User(name="Diana", address=address)
+ data = {"user": user}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"user":{"name":"Diana","address":{"street":"123 Main St"}}}'
+
+ address_with_city = Address(street="456 Oak Ave", city="Boston")
+ user_verified = User(name="Eve", address=address_with_city, verified=True)
+ data = {"user": user_verified}
+ json_bytes = openapi_dumps(data)
+ assert (
+ json_bytes == b'{"user":{"name":"Eve","address":{"street":"456 Oak Ave","city":"Boston"},"verified":true}}'
+ )
+
+ def test_pydantic_model_with_optional_fields(self) -> None:
+ class User(pydantic.BaseModel):
+ name: str
+ email: Union[str, None]
+ phone: Union[str, None]
+
+ model_with_none = User(name="Eve", email=None, phone=None)
+ data = {"model": model_with_none}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"name":"Eve","email":null,"phone":null}}'
+
+ model_with_values = User(name="Frank", email="frank@example.com", phone=None)
+ data = {"model": model_with_values}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"name":"Frank","email":"frank@example.com","phone":null}}'
From 0ec49edae803773f99466df54aa6f67ce0453e32 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 3 Feb 2026 13:23:40 +0000
Subject: [PATCH 13/23] chore(ci): add missing environment
---
.github/workflows/publish-pypi.yml | 1 +
1 file changed, 1 insertion(+)
diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml
index c3cd036..2da411c 100644
--- a/.github/workflows/publish-pypi.yml
+++ b/.github/workflows/publish-pypi.yml
@@ -12,6 +12,7 @@ jobs:
publish:
name: publish
runs-on: ubuntu-latest
+ environment: production
steps:
- uses: actions/checkout@v6
From bf525727fbbb537225239ebcdf88c85c4e58d05d Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Sat, 7 Feb 2026 15:32:15 +0000
Subject: [PATCH 14/23] fix(api): add byok provider model
---
.stats.yml | 2 +-
api.md | 6 +-
src/dedalus_labs/_client.py | 40 +++++++-------
src/dedalus_labs/resources/__init__.py | 24 ++++----
src/dedalus_labs/resources/ocr.py | 58 ++++++++++----------
src/dedalus_labs/types/__init__.py | 8 +--
src/dedalus_labs/types/ocr_document_param.py | 4 +-
src/dedalus_labs/types/ocr_page.py | 4 +-
src/dedalus_labs/types/ocr_process_params.py | 8 +--
src/dedalus_labs/types/ocr_response.py | 8 +--
tests/api_resources/test_ocr.py | 22 ++++----
11 files changed, 93 insertions(+), 91 deletions(-)
diff --git a/.stats.yml b/.stats.yml
index 1e99710..0d4b32e 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 11
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/dedalus-labs%2Fdedalus-sdk-2158e2dd12dc5bc533e872e1fa4a9bd1627c2f15b0e417aa4645554e045d7054.yml
openapi_spec_hash: 30d4d077bf498b7634b3e14deb9d0a1d
-config_hash: 5324d9c636d34ebbadb48aca070564b8
+config_hash: c520c6ec7d9767224157d03e00c54985
diff --git a/api.md b/api.md
index 887b0d5..4a5e7e1 100644
--- a/api.md
+++ b/api.md
@@ -92,17 +92,17 @@ Methods:
- client.images.edit(\*\*params) -> ImagesResponse
- client.images.generate(\*\*params) -> ImagesResponse
-# Ocr
+# OCR
Types:
```python
-from dedalus_labs.types import OcrDocument, OcrPage, OcrRequest, OcrResponse
+from dedalus_labs.types import OCRDocument, OCRPage, OCRRequest, OCRResponse
```
Methods:
-- client.ocr.process(\*\*params) -> OcrResponse
+- client.ocr.process(\*\*params) -> OCRResponse
# Chat
diff --git a/src/dedalus_labs/_client.py b/src/dedalus_labs/_client.py
index 70e3bf6..03c0ee4 100644
--- a/src/dedalus_labs/_client.py
+++ b/src/dedalus_labs/_client.py
@@ -35,7 +35,7 @@
if TYPE_CHECKING:
from .resources import ocr, chat, audio, images, models, embeddings
- from .resources.ocr import OcrResource, AsyncOcrResource
+ from .resources.ocr import OCRResource, AsyncOCRResource
from .resources.images import ImagesResource, AsyncImagesResource
from .resources.models import ModelsResource, AsyncModelsResource
from .resources.chat.chat import ChatResource, AsyncChatResource
@@ -213,10 +213,10 @@ def images(self) -> ImagesResource:
return ImagesResource(self)
@cached_property
- def ocr(self) -> OcrResource:
- from .resources.ocr import OcrResource
+ def ocr(self) -> OCRResource:
+ from .resources.ocr import OCRResource
- return OcrResource(self)
+ return OCRResource(self)
@cached_property
def chat(self) -> ChatResource:
@@ -266,6 +266,7 @@ def default_headers(self) -> dict[str, str | Omit]:
"X-SDK-Version": "1.0.0",
"X-Provider": self.provider if self.provider is not None else Omit(),
"X-Provider-Key": self.provider_key if self.provider_key is not None else Omit(),
+ "X-Provider-Model": self.provider_model if self.provider_model is not None else Omit(),
**self._custom_headers,
}
@@ -533,10 +534,10 @@ def images(self) -> AsyncImagesResource:
return AsyncImagesResource(self)
@cached_property
- def ocr(self) -> AsyncOcrResource:
- from .resources.ocr import AsyncOcrResource
+ def ocr(self) -> AsyncOCRResource:
+ from .resources.ocr import AsyncOCRResource
- return AsyncOcrResource(self)
+ return AsyncOCRResource(self)
@cached_property
def chat(self) -> AsyncChatResource:
@@ -586,6 +587,7 @@ def default_headers(self) -> dict[str, str | Omit]:
"X-SDK-Version": "1.0.0",
"X-Provider": self.provider if self.provider is not None else Omit(),
"X-Provider-Key": self.provider_key if self.provider_key is not None else Omit(),
+ "X-Provider-Model": self.provider_model if self.provider_model is not None else Omit(),
**self._custom_headers,
}
@@ -731,10 +733,10 @@ def images(self) -> images.ImagesResourceWithRawResponse:
return ImagesResourceWithRawResponse(self._client.images)
@cached_property
- def ocr(self) -> ocr.OcrResourceWithRawResponse:
- from .resources.ocr import OcrResourceWithRawResponse
+ def ocr(self) -> ocr.OCRResourceWithRawResponse:
+ from .resources.ocr import OCRResourceWithRawResponse
- return OcrResourceWithRawResponse(self._client.ocr)
+ return OCRResourceWithRawResponse(self._client.ocr)
@cached_property
def chat(self) -> chat.ChatResourceWithRawResponse:
@@ -774,10 +776,10 @@ def images(self) -> images.AsyncImagesResourceWithRawResponse:
return AsyncImagesResourceWithRawResponse(self._client.images)
@cached_property
- def ocr(self) -> ocr.AsyncOcrResourceWithRawResponse:
- from .resources.ocr import AsyncOcrResourceWithRawResponse
+ def ocr(self) -> ocr.AsyncOCRResourceWithRawResponse:
+ from .resources.ocr import AsyncOCRResourceWithRawResponse
- return AsyncOcrResourceWithRawResponse(self._client.ocr)
+ return AsyncOCRResourceWithRawResponse(self._client.ocr)
@cached_property
def chat(self) -> chat.AsyncChatResourceWithRawResponse:
@@ -817,10 +819,10 @@ def images(self) -> images.ImagesResourceWithStreamingResponse:
return ImagesResourceWithStreamingResponse(self._client.images)
@cached_property
- def ocr(self) -> ocr.OcrResourceWithStreamingResponse:
- from .resources.ocr import OcrResourceWithStreamingResponse
+ def ocr(self) -> ocr.OCRResourceWithStreamingResponse:
+ from .resources.ocr import OCRResourceWithStreamingResponse
- return OcrResourceWithStreamingResponse(self._client.ocr)
+ return OCRResourceWithStreamingResponse(self._client.ocr)
@cached_property
def chat(self) -> chat.ChatResourceWithStreamingResponse:
@@ -860,10 +862,10 @@ def images(self) -> images.AsyncImagesResourceWithStreamingResponse:
return AsyncImagesResourceWithStreamingResponse(self._client.images)
@cached_property
- def ocr(self) -> ocr.AsyncOcrResourceWithStreamingResponse:
- from .resources.ocr import AsyncOcrResourceWithStreamingResponse
+ def ocr(self) -> ocr.AsyncOCRResourceWithStreamingResponse:
+ from .resources.ocr import AsyncOCRResourceWithStreamingResponse
- return AsyncOcrResourceWithStreamingResponse(self._client.ocr)
+ return AsyncOCRResourceWithStreamingResponse(self._client.ocr)
@cached_property
def chat(self) -> chat.AsyncChatResourceWithStreamingResponse:
diff --git a/src/dedalus_labs/resources/__init__.py b/src/dedalus_labs/resources/__init__.py
index 566bc5c..8d5f63f 100644
--- a/src/dedalus_labs/resources/__init__.py
+++ b/src/dedalus_labs/resources/__init__.py
@@ -1,12 +1,12 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .ocr import (
- OcrResource,
- AsyncOcrResource,
- OcrResourceWithRawResponse,
- AsyncOcrResourceWithRawResponse,
- OcrResourceWithStreamingResponse,
- AsyncOcrResourceWithStreamingResponse,
+ OCRResource,
+ AsyncOCRResource,
+ OCRResourceWithRawResponse,
+ AsyncOCRResourceWithRawResponse,
+ OCRResourceWithStreamingResponse,
+ AsyncOCRResourceWithStreamingResponse,
)
from .chat import (
ChatResource,
@@ -74,12 +74,12 @@
"AsyncImagesResourceWithRawResponse",
"ImagesResourceWithStreamingResponse",
"AsyncImagesResourceWithStreamingResponse",
- "OcrResource",
- "AsyncOcrResource",
- "OcrResourceWithRawResponse",
- "AsyncOcrResourceWithRawResponse",
- "OcrResourceWithStreamingResponse",
- "AsyncOcrResourceWithStreamingResponse",
+ "OCRResource",
+ "AsyncOCRResource",
+ "OCRResourceWithRawResponse",
+ "AsyncOCRResourceWithRawResponse",
+ "OCRResourceWithStreamingResponse",
+ "AsyncOCRResourceWithStreamingResponse",
"ChatResource",
"AsyncChatResource",
"ChatResourceWithRawResponse",
diff --git a/src/dedalus_labs/resources/ocr.py b/src/dedalus_labs/resources/ocr.py
index c0046f0..db4902c 100644
--- a/src/dedalus_labs/resources/ocr.py
+++ b/src/dedalus_labs/resources/ocr.py
@@ -16,36 +16,36 @@
async_to_streamed_response_wrapper,
)
from .._base_client import make_request_options
-from ..types.ocr_response import OcrResponse
-from ..types.ocr_document_param import OcrDocumentParam
+from ..types.ocr_response import OCRResponse
+from ..types.ocr_document_param import OCRDocumentParam
-__all__ = ["OcrResource", "AsyncOcrResource"]
+__all__ = ["OCRResource", "AsyncOCRResource"]
-class OcrResource(SyncAPIResource):
+class OCRResource(SyncAPIResource):
@cached_property
- def with_raw_response(self) -> OcrResourceWithRawResponse:
+ def with_raw_response(self) -> OCRResourceWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/dedalus-labs/dedalus-sdk-python#accessing-raw-response-data-eg-headers
"""
- return OcrResourceWithRawResponse(self)
+ return OCRResourceWithRawResponse(self)
@cached_property
- def with_streaming_response(self) -> OcrResourceWithStreamingResponse:
+ def with_streaming_response(self) -> OCRResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/dedalus-labs/dedalus-sdk-python#with_streaming_response
"""
- return OcrResourceWithStreamingResponse(self)
+ return OCRResourceWithStreamingResponse(self)
def process(
self,
*,
- document: OcrDocumentParam,
+ document: OCRDocumentParam,
model: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -54,7 +54,7 @@ def process(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
idempotency_key: str | None = None,
- ) -> OcrResponse:
+ ) -> OCRResponse:
"""
Process a document through Mistral OCR.
@@ -80,7 +80,7 @@ def process(
"document": document,
"model": model,
},
- ocr_process_params.OcrProcessParams,
+ ocr_process_params.OCRProcessParams,
),
options=make_request_options(
extra_headers=extra_headers,
@@ -89,34 +89,34 @@ def process(
timeout=timeout,
idempotency_key=idempotency_key,
),
- cast_to=OcrResponse,
+ cast_to=OCRResponse,
)
-class AsyncOcrResource(AsyncAPIResource):
+class AsyncOCRResource(AsyncAPIResource):
@cached_property
- def with_raw_response(self) -> AsyncOcrResourceWithRawResponse:
+ def with_raw_response(self) -> AsyncOCRResourceWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/dedalus-labs/dedalus-sdk-python#accessing-raw-response-data-eg-headers
"""
- return AsyncOcrResourceWithRawResponse(self)
+ return AsyncOCRResourceWithRawResponse(self)
@cached_property
- def with_streaming_response(self) -> AsyncOcrResourceWithStreamingResponse:
+ def with_streaming_response(self) -> AsyncOCRResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/dedalus-labs/dedalus-sdk-python#with_streaming_response
"""
- return AsyncOcrResourceWithStreamingResponse(self)
+ return AsyncOCRResourceWithStreamingResponse(self)
async def process(
self,
*,
- document: OcrDocumentParam,
+ document: OCRDocumentParam,
model: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -125,7 +125,7 @@ async def process(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
idempotency_key: str | None = None,
- ) -> OcrResponse:
+ ) -> OCRResponse:
"""
Process a document through Mistral OCR.
@@ -151,7 +151,7 @@ async def process(
"document": document,
"model": model,
},
- ocr_process_params.OcrProcessParams,
+ ocr_process_params.OCRProcessParams,
),
options=make_request_options(
extra_headers=extra_headers,
@@ -160,12 +160,12 @@ async def process(
timeout=timeout,
idempotency_key=idempotency_key,
),
- cast_to=OcrResponse,
+ cast_to=OCRResponse,
)
-class OcrResourceWithRawResponse:
- def __init__(self, ocr: OcrResource) -> None:
+class OCRResourceWithRawResponse:
+ def __init__(self, ocr: OCRResource) -> None:
self._ocr = ocr
self.process = to_raw_response_wrapper(
@@ -173,8 +173,8 @@ def __init__(self, ocr: OcrResource) -> None:
)
-class AsyncOcrResourceWithRawResponse:
- def __init__(self, ocr: AsyncOcrResource) -> None:
+class AsyncOCRResourceWithRawResponse:
+ def __init__(self, ocr: AsyncOCRResource) -> None:
self._ocr = ocr
self.process = async_to_raw_response_wrapper(
@@ -182,8 +182,8 @@ def __init__(self, ocr: AsyncOcrResource) -> None:
)
-class OcrResourceWithStreamingResponse:
- def __init__(self, ocr: OcrResource) -> None:
+class OCRResourceWithStreamingResponse:
+ def __init__(self, ocr: OCRResource) -> None:
self._ocr = ocr
self.process = to_streamed_response_wrapper(
@@ -191,8 +191,8 @@ def __init__(self, ocr: OcrResource) -> None:
)
-class AsyncOcrResourceWithStreamingResponse:
- def __init__(self, ocr: AsyncOcrResource) -> None:
+class AsyncOCRResourceWithStreamingResponse:
+ def __init__(self, ocr: AsyncOCRResource) -> None:
self._ocr = ocr
self.process = async_to_streamed_response_wrapper(
diff --git a/src/dedalus_labs/types/__init__.py b/src/dedalus_labs/types/__init__.py
index d0ea1fb..b22264b 100644
--- a/src/dedalus_labs/types/__init__.py
+++ b/src/dedalus_labs/types/__init__.py
@@ -24,12 +24,12 @@
ResponseFormatJSONObject as ResponseFormatJSONObject,
ResponseFormatJSONSchema as ResponseFormatJSONSchema,
)
-from .ocr_page import OcrPage as OcrPage
-from .ocr_response import OcrResponse as OcrResponse
+from .ocr_page import OCRPage as OCRPage
+from .ocr_response import OCRResponse as OCRResponse
from .images_response import ImagesResponse as ImagesResponse
from .image_edit_params import ImageEditParams as ImageEditParams
-from .ocr_document_param import OcrDocumentParam as OcrDocumentParam
-from .ocr_process_params import OcrProcessParams as OcrProcessParams
+from .ocr_document_param import OCRDocumentParam as OCRDocumentParam
+from .ocr_process_params import OCRProcessParams as OCRProcessParams
from .list_models_response import ListModelsResponse as ListModelsResponse
from .image_generate_params import ImageGenerateParams as ImageGenerateParams
from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams
diff --git a/src/dedalus_labs/types/ocr_document_param.py b/src/dedalus_labs/types/ocr_document_param.py
index 1d0cbe5..278b22d 100644
--- a/src/dedalus_labs/types/ocr_document_param.py
+++ b/src/dedalus_labs/types/ocr_document_param.py
@@ -4,10 +4,10 @@
from typing_extensions import Required, TypedDict
-__all__ = ["OcrDocumentParam"]
+__all__ = ["OCRDocumentParam"]
-class OcrDocumentParam(TypedDict, total=False):
+class OCRDocumentParam(TypedDict, total=False):
"""Document input for OCR."""
document_url: Required[str]
diff --git a/src/dedalus_labs/types/ocr_page.py b/src/dedalus_labs/types/ocr_page.py
index 41e0005..5dbab07 100644
--- a/src/dedalus_labs/types/ocr_page.py
+++ b/src/dedalus_labs/types/ocr_page.py
@@ -2,10 +2,10 @@
from .._models import BaseModel
-__all__ = ["OcrPage"]
+__all__ = ["OCRPage"]
-class OcrPage(BaseModel):
+class OCRPage(BaseModel):
"""Single page OCR result."""
index: int
diff --git a/src/dedalus_labs/types/ocr_process_params.py b/src/dedalus_labs/types/ocr_process_params.py
index b671b2b..f75ab33 100644
--- a/src/dedalus_labs/types/ocr_process_params.py
+++ b/src/dedalus_labs/types/ocr_process_params.py
@@ -4,13 +4,13 @@
from typing_extensions import Required, TypedDict
-from .ocr_document_param import OcrDocumentParam
+from .ocr_document_param import OCRDocumentParam
-__all__ = ["OcrProcessParams"]
+__all__ = ["OCRProcessParams"]
-class OcrProcessParams(TypedDict, total=False):
- document: Required[OcrDocumentParam]
+class OCRProcessParams(TypedDict, total=False):
+ document: Required[OCRDocumentParam]
"""Document input for OCR."""
model: str
diff --git a/src/dedalus_labs/types/ocr_response.py b/src/dedalus_labs/types/ocr_response.py
index 93db8fc..a0927b8 100644
--- a/src/dedalus_labs/types/ocr_response.py
+++ b/src/dedalus_labs/types/ocr_response.py
@@ -3,16 +3,16 @@
from typing import Dict, List, Optional
from .._models import BaseModel
-from .ocr_page import OcrPage
+from .ocr_page import OCRPage
-__all__ = ["OcrResponse"]
+__all__ = ["OCRResponse"]
-class OcrResponse(BaseModel):
+class OCRResponse(BaseModel):
"""OCR response schema."""
model: str
- pages: List[OcrPage]
+ pages: List[OCRPage]
usage: Optional[Dict[str, object]] = None
diff --git a/tests/api_resources/test_ocr.py b/tests/api_resources/test_ocr.py
index 0d0ee8f..ffc5dcb 100644
--- a/tests/api_resources/test_ocr.py
+++ b/tests/api_resources/test_ocr.py
@@ -9,12 +9,12 @@
from tests.utils import assert_matches_type
from dedalus_labs import Dedalus, AsyncDedalus
-from dedalus_labs.types import OcrResponse
+from dedalus_labs.types import OCRResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-class TestOcr:
+class TestOCR:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@pytest.mark.skip(reason="Prism tests are disabled")
@@ -23,7 +23,7 @@ def test_method_process(self, client: Dedalus) -> None:
ocr = client.ocr.process(
document={"document_url": "document_url"},
)
- assert_matches_type(OcrResponse, ocr, path=["response"])
+ assert_matches_type(OCRResponse, ocr, path=["response"])
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
@@ -35,7 +35,7 @@ def test_method_process_with_all_params(self, client: Dedalus) -> None:
},
model="model",
)
- assert_matches_type(OcrResponse, ocr, path=["response"])
+ assert_matches_type(OCRResponse, ocr, path=["response"])
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
@@ -47,7 +47,7 @@ def test_raw_response_process(self, client: Dedalus) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
ocr = response.parse()
- assert_matches_type(OcrResponse, ocr, path=["response"])
+ assert_matches_type(OCRResponse, ocr, path=["response"])
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
@@ -59,12 +59,12 @@ def test_streaming_response_process(self, client: Dedalus) -> None:
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
ocr = response.parse()
- assert_matches_type(OcrResponse, ocr, path=["response"])
+ assert_matches_type(OCRResponse, ocr, path=["response"])
assert cast(Any, response.is_closed) is True
-class TestAsyncOcr:
+class TestAsyncOCR:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@@ -75,7 +75,7 @@ async def test_method_process(self, async_client: AsyncDedalus) -> None:
ocr = await async_client.ocr.process(
document={"document_url": "document_url"},
)
- assert_matches_type(OcrResponse, ocr, path=["response"])
+ assert_matches_type(OCRResponse, ocr, path=["response"])
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
@@ -87,7 +87,7 @@ async def test_method_process_with_all_params(self, async_client: AsyncDedalus)
},
model="model",
)
- assert_matches_type(OcrResponse, ocr, path=["response"])
+ assert_matches_type(OCRResponse, ocr, path=["response"])
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
@@ -99,7 +99,7 @@ async def test_raw_response_process(self, async_client: AsyncDedalus) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
ocr = await response.parse()
- assert_matches_type(OcrResponse, ocr, path=["response"])
+ assert_matches_type(OCRResponse, ocr, path=["response"])
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
@@ -111,6 +111,6 @@ async def test_streaming_response_process(self, async_client: AsyncDedalus) -> N
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
ocr = await response.parse()
- assert_matches_type(OcrResponse, ocr, path=["response"])
+ assert_matches_type(OCRResponse, ocr, path=["response"])
assert cast(Any, response.is_closed) is True
From 696aacfd4fae842fff7f564b2a58c65b902ebcc4 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Mon, 9 Feb 2026 14:36:24 +0000
Subject: [PATCH 15/23] chore(internal): bump dependencies
---
requirements-dev.lock | 20 +-
uv.lock | 982 +++++++++++++++++++++---------------------
2 files changed, 509 insertions(+), 493 deletions(-)
diff --git a/requirements-dev.lock b/requirements-dev.lock
index c459281..157e05f 100644
--- a/requirements-dev.lock
+++ b/requirements-dev.lock
@@ -3,13 +3,13 @@
-e .
annotated-types==0.7.0
# via pydantic
-anyio==4.12.0
+anyio==4.12.1
# via
# dedalus-labs
# httpx
backports-asyncio-runner==1.2.0 ; python_full_version < '3.11'
# via pytest-asyncio
-certifi==2025.11.12
+certifi==2026.1.4
# via
# httpcore
# httpx
@@ -36,11 +36,13 @@ idna==3.11
# via
# anyio
# httpx
-importlib-metadata==8.7.0
+importlib-metadata==8.7.1
iniconfig==2.1.0 ; python_full_version < '3.10'
# via pytest
iniconfig==2.3.0 ; python_full_version >= '3.10'
# via pytest
+jiter==0.13.0
+ # via dedalus-labs
markdown-it-py==3.0.0 ; python_full_version < '3.10'
# via rich
markdown-it-py==4.0.0 ; python_full_version >= '3.10'
@@ -50,11 +52,11 @@ mdurl==0.1.2
mypy==1.17.0
mypy-extensions==1.1.0
# via mypy
-nodeenv==1.9.1
+nodeenv==1.10.0
# via pyright
packaging==25.0
# via pytest
-pathspec==0.12.1
+pathspec==1.0.3
# via mypy
pluggy==1.6.0
# via pytest
@@ -71,7 +73,7 @@ pytest==8.4.2 ; python_full_version < '3.10'
# via
# pytest-asyncio
# pytest-xdist
-pytest==9.0.1 ; python_full_version >= '3.10'
+pytest==9.0.2 ; python_full_version >= '3.10'
# via
# pytest-asyncio
# pytest-xdist
@@ -82,14 +84,14 @@ python-dateutil==2.9.0.post0 ; python_full_version < '3.10'
# via time-machine
respx==0.22.0
rich==14.2.0
-ruff==0.14.7
+ruff==0.14.13
six==1.17.0 ; python_full_version < '3.10'
# via python-dateutil
sniffio==1.3.1
# via dedalus-labs
time-machine==2.19.0 ; python_full_version < '3.10'
-time-machine==3.1.0 ; python_full_version >= '3.10'
-tomli==2.3.0 ; python_full_version < '3.11'
+time-machine==3.2.0 ; python_full_version >= '3.10'
+tomli==2.4.0 ; python_full_version < '3.11'
# via
# mypy
# pytest
diff --git a/uv.lock b/uv.lock
index a3a5b7c..fb6d812 100644
--- a/uv.lock
+++ b/uv.lock
@@ -26,7 +26,7 @@ wheels = [
[[package]]
name = "aiohttp"
-version = "3.13.2"
+version = "3.13.3"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "aiohappyeyeballs" },
@@ -38,127 +38,127 @@ dependencies = [
{ name = "propcache" },
{ name = "yarl" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/1c/ce/3b83ebba6b3207a7135e5fcaba49706f8a4b6008153b4e30540c982fae26/aiohttp-3.13.2.tar.gz", hash = "sha256:40176a52c186aefef6eb3cad2cdd30cd06e3afbe88fe8ab2af9c0b90f228daca", size = 7837994, upload-time = "2025-10-28T20:59:39.937Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/50/42/32cf8e7704ceb4481406eb87161349abb46a57fee3f008ba9cb610968646/aiohttp-3.13.3.tar.gz", hash = "sha256:a949eee43d3782f2daae4f4a2819b2cb9b0c5d3b7f7a927067cc84dafdbb9f88", size = 7844556, upload-time = "2026-01-03T17:33:05.204Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/6d/34/939730e66b716b76046dedfe0842995842fa906ccc4964bba414ff69e429/aiohttp-3.13.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2372b15a5f62ed37789a6b383ff7344fc5b9f243999b0cd9b629d8bc5f5b4155", size = 736471, upload-time = "2025-10-28T20:55:27.924Z" },
- { url = "https://files.pythonhosted.org/packages/fd/cf/dcbdf2df7f6ca72b0bb4c0b4509701f2d8942cf54e29ca197389c214c07f/aiohttp-3.13.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e7f8659a48995edee7229522984bd1009c1213929c769c2daa80b40fe49a180c", size = 493985, upload-time = "2025-10-28T20:55:29.456Z" },
- { url = "https://files.pythonhosted.org/packages/9d/87/71c8867e0a1d0882dcbc94af767784c3cb381c1c4db0943ab4aae4fed65e/aiohttp-3.13.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:939ced4a7add92296b0ad38892ce62b98c619288a081170695c6babe4f50e636", size = 489274, upload-time = "2025-10-28T20:55:31.134Z" },
- { url = "https://files.pythonhosted.org/packages/38/0f/46c24e8dae237295eaadd113edd56dee96ef6462adf19b88592d44891dc5/aiohttp-3.13.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6315fb6977f1d0dd41a107c527fee2ed5ab0550b7d885bc15fee20ccb17891da", size = 1668171, upload-time = "2025-10-28T20:55:36.065Z" },
- { url = "https://files.pythonhosted.org/packages/eb/c6/4cdfb4440d0e28483681a48f69841fa5e39366347d66ef808cbdadddb20e/aiohttp-3.13.2-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6e7352512f763f760baaed2637055c49134fd1d35b37c2dedfac35bfe5cf8725", size = 1636036, upload-time = "2025-10-28T20:55:37.576Z" },
- { url = "https://files.pythonhosted.org/packages/84/37/8708cf678628216fb678ab327a4e1711c576d6673998f4f43e86e9ae90dd/aiohttp-3.13.2-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e09a0a06348a2dd73e7213353c90d709502d9786219f69b731f6caa0efeb46f5", size = 1727975, upload-time = "2025-10-28T20:55:39.457Z" },
- { url = "https://files.pythonhosted.org/packages/e6/2e/3ebfe12fdcb9b5f66e8a0a42dffcd7636844c8a018f261efb2419f68220b/aiohttp-3.13.2-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a09a6d073fb5789456545bdee2474d14395792faa0527887f2f4ec1a486a59d3", size = 1815823, upload-time = "2025-10-28T20:55:40.958Z" },
- { url = "https://files.pythonhosted.org/packages/a1/4f/ca2ef819488cbb41844c6cf92ca6dd15b9441e6207c58e5ae0e0fc8d70ad/aiohttp-3.13.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b59d13c443f8e049d9e94099c7e412e34610f1f49be0f230ec656a10692a5802", size = 1669374, upload-time = "2025-10-28T20:55:42.745Z" },
- { url = "https://files.pythonhosted.org/packages/f8/fe/1fe2e1179a0d91ce09c99069684aab619bf2ccde9b20bd6ca44f8837203e/aiohttp-3.13.2-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:20db2d67985d71ca033443a1ba2001c4b5693fe09b0e29f6d9358a99d4d62a8a", size = 1555315, upload-time = "2025-10-28T20:55:44.264Z" },
- { url = "https://files.pythonhosted.org/packages/5a/2b/f3781899b81c45d7cbc7140cddb8a3481c195e7cbff8e36374759d2ab5a5/aiohttp-3.13.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:960c2fc686ba27b535f9fd2b52d87ecd7e4fd1cf877f6a5cba8afb5b4a8bd204", size = 1639140, upload-time = "2025-10-28T20:55:46.626Z" },
- { url = "https://files.pythonhosted.org/packages/72/27/c37e85cd3ece6f6c772e549bd5a253d0c122557b25855fb274224811e4f2/aiohttp-3.13.2-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:6c00dbcf5f0d88796151e264a8eab23de2997c9303dd7c0bf622e23b24d3ce22", size = 1645496, upload-time = "2025-10-28T20:55:48.933Z" },
- { url = "https://files.pythonhosted.org/packages/66/20/3af1ab663151bd3780b123e907761cdb86ec2c4e44b2d9b195ebc91fbe37/aiohttp-3.13.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fed38a5edb7945f4d1bcabe2fcd05db4f6ec7e0e82560088b754f7e08d93772d", size = 1697625, upload-time = "2025-10-28T20:55:50.377Z" },
- { url = "https://files.pythonhosted.org/packages/95/eb/ae5cab15efa365e13d56b31b0d085a62600298bf398a7986f8388f73b598/aiohttp-3.13.2-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:b395bbca716c38bef3c764f187860e88c724b342c26275bc03e906142fc5964f", size = 1542025, upload-time = "2025-10-28T20:55:51.861Z" },
- { url = "https://files.pythonhosted.org/packages/e9/2d/1683e8d67ec72d911397fe4e575688d2a9b8f6a6e03c8fdc9f3fd3d4c03f/aiohttp-3.13.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:204ffff2426c25dfda401ba08da85f9c59525cdc42bda26660463dd1cbcfec6f", size = 1714918, upload-time = "2025-10-28T20:55:53.515Z" },
- { url = "https://files.pythonhosted.org/packages/99/a2/ffe8e0e1c57c5e542d47ffa1fcf95ef2b3ea573bf7c4d2ee877252431efc/aiohttp-3.13.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:05c4dd3c48fb5f15db31f57eb35374cb0c09afdde532e7fb70a75aede0ed30f6", size = 1656113, upload-time = "2025-10-28T20:55:55.438Z" },
- { url = "https://files.pythonhosted.org/packages/0d/42/d511aff5c3a2b06c09d7d214f508a4ad8ac7799817f7c3d23e7336b5e896/aiohttp-3.13.2-cp310-cp310-win32.whl", hash = "sha256:e574a7d61cf10351d734bcddabbe15ede0eaa8a02070d85446875dc11189a251", size = 432290, upload-time = "2025-10-28T20:55:56.96Z" },
- { url = "https://files.pythonhosted.org/packages/8b/ea/1c2eb7098b5bad4532994f2b7a8228d27674035c9b3234fe02c37469ef14/aiohttp-3.13.2-cp310-cp310-win_amd64.whl", hash = "sha256:364f55663085d658b8462a1c3f17b2b84a5c2e1ba858e1b79bff7b2e24ad1514", size = 455075, upload-time = "2025-10-28T20:55:58.373Z" },
- { url = "https://files.pythonhosted.org/packages/35/74/b321e7d7ca762638cdf8cdeceb39755d9c745aff7a64c8789be96ddf6e96/aiohttp-3.13.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4647d02df098f6434bafd7f32ad14942f05a9caa06c7016fdcc816f343997dd0", size = 743409, upload-time = "2025-10-28T20:56:00.354Z" },
- { url = "https://files.pythonhosted.org/packages/99/3d/91524b905ec473beaf35158d17f82ef5a38033e5809fe8742e3657cdbb97/aiohttp-3.13.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e3403f24bcb9c3b29113611c3c16a2a447c3953ecf86b79775e7be06f7ae7ccb", size = 497006, upload-time = "2025-10-28T20:56:01.85Z" },
- { url = "https://files.pythonhosted.org/packages/eb/d3/7f68bc02a67716fe80f063e19adbd80a642e30682ce74071269e17d2dba1/aiohttp-3.13.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:43dff14e35aba17e3d6d5ba628858fb8cb51e30f44724a2d2f0c75be492c55e9", size = 493195, upload-time = "2025-10-28T20:56:03.314Z" },
- { url = "https://files.pythonhosted.org/packages/98/31/913f774a4708775433b7375c4f867d58ba58ead833af96c8af3621a0d243/aiohttp-3.13.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e2a9ea08e8c58bb17655630198833109227dea914cd20be660f52215f6de5613", size = 1747759, upload-time = "2025-10-28T20:56:04.904Z" },
- { url = "https://files.pythonhosted.org/packages/e8/63/04efe156f4326f31c7c4a97144f82132c3bb21859b7bb84748d452ccc17c/aiohttp-3.13.2-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:53b07472f235eb80e826ad038c9d106c2f653584753f3ddab907c83f49eedead", size = 1704456, upload-time = "2025-10-28T20:56:06.986Z" },
- { url = "https://files.pythonhosted.org/packages/8e/02/4e16154d8e0a9cf4ae76f692941fd52543bbb148f02f098ca73cab9b1c1b/aiohttp-3.13.2-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e736c93e9c274fce6419af4aac199984d866e55f8a4cec9114671d0ea9688780", size = 1807572, upload-time = "2025-10-28T20:56:08.558Z" },
- { url = "https://files.pythonhosted.org/packages/34/58/b0583defb38689e7f06798f0285b1ffb3a6fb371f38363ce5fd772112724/aiohttp-3.13.2-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ff5e771f5dcbc81c64898c597a434f7682f2259e0cd666932a913d53d1341d1a", size = 1895954, upload-time = "2025-10-28T20:56:10.545Z" },
- { url = "https://files.pythonhosted.org/packages/6b/f3/083907ee3437425b4e376aa58b2c915eb1a33703ec0dc30040f7ae3368c6/aiohttp-3.13.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a3b6fb0c207cc661fa0bf8c66d8d9b657331ccc814f4719468af61034b478592", size = 1747092, upload-time = "2025-10-28T20:56:12.118Z" },
- { url = "https://files.pythonhosted.org/packages/ac/61/98a47319b4e425cc134e05e5f3fc512bf9a04bf65aafd9fdcda5d57ec693/aiohttp-3.13.2-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:97a0895a8e840ab3520e2288db7cace3a1981300d48babeb50e7425609e2e0ab", size = 1606815, upload-time = "2025-10-28T20:56:14.191Z" },
- { url = "https://files.pythonhosted.org/packages/97/4b/e78b854d82f66bb974189135d31fce265dee0f5344f64dd0d345158a5973/aiohttp-3.13.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9e8f8afb552297aca127c90cb840e9a1d4bfd6a10d7d8f2d9176e1acc69bad30", size = 1723789, upload-time = "2025-10-28T20:56:16.101Z" },
- { url = "https://files.pythonhosted.org/packages/ed/fc/9d2ccc794fc9b9acd1379d625c3a8c64a45508b5091c546dea273a41929e/aiohttp-3.13.2-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:ed2f9c7216e53c3df02264f25d824b079cc5914f9e2deba94155190ef648ee40", size = 1718104, upload-time = "2025-10-28T20:56:17.655Z" },
- { url = "https://files.pythonhosted.org/packages/66/65/34564b8765ea5c7d79d23c9113135d1dd3609173da13084830f1507d56cf/aiohttp-3.13.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:99c5280a329d5fa18ef30fd10c793a190d996567667908bef8a7f81f8202b948", size = 1785584, upload-time = "2025-10-28T20:56:19.238Z" },
- { url = "https://files.pythonhosted.org/packages/30/be/f6a7a426e02fc82781afd62016417b3948e2207426d90a0e478790d1c8a4/aiohttp-3.13.2-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:2ca6ffef405fc9c09a746cb5d019c1672cd7f402542e379afc66b370833170cf", size = 1595126, upload-time = "2025-10-28T20:56:20.836Z" },
- { url = "https://files.pythonhosted.org/packages/e5/c7/8e22d5d28f94f67d2af496f14a83b3c155d915d1fe53d94b66d425ec5b42/aiohttp-3.13.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:47f438b1a28e926c37632bff3c44df7d27c9b57aaf4e34b1def3c07111fdb782", size = 1800665, upload-time = "2025-10-28T20:56:22.922Z" },
- { url = "https://files.pythonhosted.org/packages/d1/11/91133c8b68b1da9fc16555706aa7276fdf781ae2bb0876c838dd86b8116e/aiohttp-3.13.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9acda8604a57bb60544e4646a4615c1866ee6c04a8edef9b8ee6fd1d8fa2ddc8", size = 1739532, upload-time = "2025-10-28T20:56:25.924Z" },
- { url = "https://files.pythonhosted.org/packages/17/6b/3747644d26a998774b21a616016620293ddefa4d63af6286f389aedac844/aiohttp-3.13.2-cp311-cp311-win32.whl", hash = "sha256:868e195e39b24aaa930b063c08bb0c17924899c16c672a28a65afded9c46c6ec", size = 431876, upload-time = "2025-10-28T20:56:27.524Z" },
- { url = "https://files.pythonhosted.org/packages/c3/63/688462108c1a00eb9f05765331c107f95ae86f6b197b865d29e930b7e462/aiohttp-3.13.2-cp311-cp311-win_amd64.whl", hash = "sha256:7fd19df530c292542636c2a9a85854fab93474396a52f1695e799186bbd7f24c", size = 456205, upload-time = "2025-10-28T20:56:29.062Z" },
- { url = "https://files.pythonhosted.org/packages/29/9b/01f00e9856d0a73260e86dd8ed0c2234a466c5c1712ce1c281548df39777/aiohttp-3.13.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b1e56bab2e12b2b9ed300218c351ee2a3d8c8fdab5b1ec6193e11a817767e47b", size = 737623, upload-time = "2025-10-28T20:56:30.797Z" },
- { url = "https://files.pythonhosted.org/packages/5a/1b/4be39c445e2b2bd0aab4ba736deb649fabf14f6757f405f0c9685019b9e9/aiohttp-3.13.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:364e25edaabd3d37b1db1f0cbcee8c73c9a3727bfa262b83e5e4cf3489a2a9dc", size = 492664, upload-time = "2025-10-28T20:56:32.708Z" },
- { url = "https://files.pythonhosted.org/packages/28/66/d35dcfea8050e131cdd731dff36434390479b4045a8d0b9d7111b0a968f1/aiohttp-3.13.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c5c94825f744694c4b8db20b71dba9a257cd2ba8e010a803042123f3a25d50d7", size = 491808, upload-time = "2025-10-28T20:56:34.57Z" },
- { url = "https://files.pythonhosted.org/packages/00/29/8e4609b93e10a853b65f8291e64985de66d4f5848c5637cddc70e98f01f8/aiohttp-3.13.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba2715d842ffa787be87cbfce150d5e88c87a98e0b62e0f5aa489169a393dbbb", size = 1738863, upload-time = "2025-10-28T20:56:36.377Z" },
- { url = "https://files.pythonhosted.org/packages/9d/fa/4ebdf4adcc0def75ced1a0d2d227577cd7b1b85beb7edad85fcc87693c75/aiohttp-3.13.2-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:585542825c4bc662221fb257889e011a5aa00f1ae4d75d1d246a5225289183e3", size = 1700586, upload-time = "2025-10-28T20:56:38.034Z" },
- { url = "https://files.pythonhosted.org/packages/da/04/73f5f02ff348a3558763ff6abe99c223381b0bace05cd4530a0258e52597/aiohttp-3.13.2-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:39d02cb6025fe1aabca329c5632f48c9532a3dabccd859e7e2f110668972331f", size = 1768625, upload-time = "2025-10-28T20:56:39.75Z" },
- { url = "https://files.pythonhosted.org/packages/f8/49/a825b79ffec124317265ca7d2344a86bcffeb960743487cb11988ffb3494/aiohttp-3.13.2-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e67446b19e014d37342f7195f592a2a948141d15a312fe0e700c2fd2f03124f6", size = 1867281, upload-time = "2025-10-28T20:56:41.471Z" },
- { url = "https://files.pythonhosted.org/packages/b9/48/adf56e05f81eac31edcfae45c90928f4ad50ef2e3ea72cb8376162a368f8/aiohttp-3.13.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4356474ad6333e41ccefd39eae869ba15a6c5299c9c01dfdcfdd5c107be4363e", size = 1752431, upload-time = "2025-10-28T20:56:43.162Z" },
- { url = "https://files.pythonhosted.org/packages/30/ab/593855356eead019a74e862f21523db09c27f12fd24af72dbc3555b9bfd9/aiohttp-3.13.2-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:eeacf451c99b4525f700f078becff32c32ec327b10dcf31306a8a52d78166de7", size = 1562846, upload-time = "2025-10-28T20:56:44.85Z" },
- { url = "https://files.pythonhosted.org/packages/39/0f/9f3d32271aa8dc35036e9668e31870a9d3b9542dd6b3e2c8a30931cb27ae/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d8a9b889aeabd7a4e9af0b7f4ab5ad94d42e7ff679aaec6d0db21e3b639ad58d", size = 1699606, upload-time = "2025-10-28T20:56:46.519Z" },
- { url = "https://files.pythonhosted.org/packages/2c/3c/52d2658c5699b6ef7692a3f7128b2d2d4d9775f2a68093f74bca06cf01e1/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:fa89cb11bc71a63b69568d5b8a25c3ca25b6d54c15f907ca1c130d72f320b76b", size = 1720663, upload-time = "2025-10-28T20:56:48.528Z" },
- { url = "https://files.pythonhosted.org/packages/9b/d4/8f8f3ff1fb7fb9e3f04fcad4e89d8a1cd8fc7d05de67e3de5b15b33008ff/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8aa7c807df234f693fed0ecd507192fc97692e61fee5702cdc11155d2e5cadc8", size = 1737939, upload-time = "2025-10-28T20:56:50.77Z" },
- { url = "https://files.pythonhosted.org/packages/03/d3/ddd348f8a27a634daae39a1b8e291ff19c77867af438af844bf8b7e3231b/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:9eb3e33fdbe43f88c3c75fa608c25e7c47bbd80f48d012763cb67c47f39a7e16", size = 1555132, upload-time = "2025-10-28T20:56:52.568Z" },
- { url = "https://files.pythonhosted.org/packages/39/b8/46790692dc46218406f94374903ba47552f2f9f90dad554eed61bfb7b64c/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9434bc0d80076138ea986833156c5a48c9c7a8abb0c96039ddbb4afc93184169", size = 1764802, upload-time = "2025-10-28T20:56:54.292Z" },
- { url = "https://files.pythonhosted.org/packages/ba/e4/19ce547b58ab2a385e5f0b8aa3db38674785085abcf79b6e0edd1632b12f/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ff15c147b2ad66da1f2cbb0622313f2242d8e6e8f9b79b5206c84523a4473248", size = 1719512, upload-time = "2025-10-28T20:56:56.428Z" },
- { url = "https://files.pythonhosted.org/packages/70/30/6355a737fed29dcb6dfdd48682d5790cb5eab050f7b4e01f49b121d3acad/aiohttp-3.13.2-cp312-cp312-win32.whl", hash = "sha256:27e569eb9d9e95dbd55c0fc3ec3a9335defbf1d8bc1d20171a49f3c4c607b93e", size = 426690, upload-time = "2025-10-28T20:56:58.736Z" },
- { url = "https://files.pythonhosted.org/packages/0a/0d/b10ac09069973d112de6ef980c1f6bb31cb7dcd0bc363acbdad58f927873/aiohttp-3.13.2-cp312-cp312-win_amd64.whl", hash = "sha256:8709a0f05d59a71f33fd05c17fc11fcb8c30140506e13c2f5e8ee1b8964e1b45", size = 453465, upload-time = "2025-10-28T20:57:00.795Z" },
- { url = "https://files.pythonhosted.org/packages/bf/78/7e90ca79e5aa39f9694dcfd74f4720782d3c6828113bb1f3197f7e7c4a56/aiohttp-3.13.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7519bdc7dfc1940d201651b52bf5e03f5503bda45ad6eacf64dda98be5b2b6be", size = 732139, upload-time = "2025-10-28T20:57:02.455Z" },
- { url = "https://files.pythonhosted.org/packages/db/ed/1f59215ab6853fbaa5c8495fa6cbc39edfc93553426152b75d82a5f32b76/aiohttp-3.13.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:088912a78b4d4f547a1f19c099d5a506df17eacec3c6f4375e2831ec1d995742", size = 490082, upload-time = "2025-10-28T20:57:04.784Z" },
- { url = "https://files.pythonhosted.org/packages/68/7b/fe0fe0f5e05e13629d893c760465173a15ad0039c0a5b0d0040995c8075e/aiohttp-3.13.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5276807b9de9092af38ed23ce120539ab0ac955547b38563a9ba4f5b07b95293", size = 489035, upload-time = "2025-10-28T20:57:06.894Z" },
- { url = "https://files.pythonhosted.org/packages/d2/04/db5279e38471b7ac801d7d36a57d1230feeee130bbe2a74f72731b23c2b1/aiohttp-3.13.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1237c1375eaef0db4dcd7c2559f42e8af7b87ea7d295b118c60c36a6e61cb811", size = 1720387, upload-time = "2025-10-28T20:57:08.685Z" },
- { url = "https://files.pythonhosted.org/packages/31/07/8ea4326bd7dae2bd59828f69d7fdc6e04523caa55e4a70f4a8725a7e4ed2/aiohttp-3.13.2-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:96581619c57419c3d7d78703d5b78c1e5e5fc0172d60f555bdebaced82ded19a", size = 1688314, upload-time = "2025-10-28T20:57:10.693Z" },
- { url = "https://files.pythonhosted.org/packages/48/ab/3d98007b5b87ffd519d065225438cc3b668b2f245572a8cb53da5dd2b1bc/aiohttp-3.13.2-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a2713a95b47374169409d18103366de1050fe0ea73db358fc7a7acb2880422d4", size = 1756317, upload-time = "2025-10-28T20:57:12.563Z" },
- { url = "https://files.pythonhosted.org/packages/97/3d/801ca172b3d857fafb7b50c7c03f91b72b867a13abca982ed6b3081774ef/aiohttp-3.13.2-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:228a1cd556b3caca590e9511a89444925da87d35219a49ab5da0c36d2d943a6a", size = 1858539, upload-time = "2025-10-28T20:57:14.623Z" },
- { url = "https://files.pythonhosted.org/packages/f7/0d/4764669bdf47bd472899b3d3db91fffbe925c8e3038ec591a2fd2ad6a14d/aiohttp-3.13.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ac6cde5fba8d7d8c6ac963dbb0256a9854e9fafff52fbcc58fdf819357892c3e", size = 1739597, upload-time = "2025-10-28T20:57:16.399Z" },
- { url = "https://files.pythonhosted.org/packages/c4/52/7bd3c6693da58ba16e657eb904a5b6decfc48ecd06e9ac098591653b1566/aiohttp-3.13.2-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f2bef8237544f4e42878c61cef4e2839fee6346dc60f5739f876a9c50be7fcdb", size = 1555006, upload-time = "2025-10-28T20:57:18.288Z" },
- { url = "https://files.pythonhosted.org/packages/48/30/9586667acec5993b6f41d2ebcf96e97a1255a85f62f3c653110a5de4d346/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:16f15a4eac3bc2d76c45f7ebdd48a65d41b242eb6c31c2245463b40b34584ded", size = 1683220, upload-time = "2025-10-28T20:57:20.241Z" },
- { url = "https://files.pythonhosted.org/packages/71/01/3afe4c96854cfd7b30d78333852e8e851dceaec1c40fd00fec90c6402dd2/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:bb7fb776645af5cc58ab804c58d7eba545a97e047254a52ce89c157b5af6cd0b", size = 1712570, upload-time = "2025-10-28T20:57:22.253Z" },
- { url = "https://files.pythonhosted.org/packages/11/2c/22799d8e720f4697a9e66fd9c02479e40a49de3de2f0bbe7f9f78a987808/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:e1b4951125ec10c70802f2cb09736c895861cd39fd9dcb35107b4dc8ae6220b8", size = 1733407, upload-time = "2025-10-28T20:57:24.37Z" },
- { url = "https://files.pythonhosted.org/packages/34/cb/90f15dd029f07cebbd91f8238a8b363978b530cd128488085b5703683594/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:550bf765101ae721ee1d37d8095f47b1f220650f85fe1af37a90ce75bab89d04", size = 1550093, upload-time = "2025-10-28T20:57:26.257Z" },
- { url = "https://files.pythonhosted.org/packages/69/46/12dce9be9d3303ecbf4d30ad45a7683dc63d90733c2d9fe512be6716cd40/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fe91b87fc295973096251e2d25a811388e7d8adf3bd2b97ef6ae78bc4ac6c476", size = 1758084, upload-time = "2025-10-28T20:57:28.349Z" },
- { url = "https://files.pythonhosted.org/packages/f9/c8/0932b558da0c302ffd639fc6362a313b98fdf235dc417bc2493da8394df7/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e0c8e31cfcc4592cb200160344b2fb6ae0f9e4effe06c644b5a125d4ae5ebe23", size = 1716987, upload-time = "2025-10-28T20:57:30.233Z" },
- { url = "https://files.pythonhosted.org/packages/5d/8b/f5bd1a75003daed099baec373aed678f2e9b34f2ad40d85baa1368556396/aiohttp-3.13.2-cp313-cp313-win32.whl", hash = "sha256:0740f31a60848d6edb296a0df827473eede90c689b8f9f2a4cdde74889eb2254", size = 425859, upload-time = "2025-10-28T20:57:32.105Z" },
- { url = "https://files.pythonhosted.org/packages/5d/28/a8a9fc6957b2cee8902414e41816b5ab5536ecf43c3b1843c10e82c559b2/aiohttp-3.13.2-cp313-cp313-win_amd64.whl", hash = "sha256:a88d13e7ca367394908f8a276b89d04a3652044612b9a408a0bb22a5ed976a1a", size = 452192, upload-time = "2025-10-28T20:57:34.166Z" },
- { url = "https://files.pythonhosted.org/packages/9b/36/e2abae1bd815f01c957cbf7be817b3043304e1c87bad526292a0410fdcf9/aiohttp-3.13.2-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:2475391c29230e063ef53a66669b7b691c9bfc3f1426a0f7bcdf1216bdbac38b", size = 735234, upload-time = "2025-10-28T20:57:36.415Z" },
- { url = "https://files.pythonhosted.org/packages/ca/e3/1ee62dde9b335e4ed41db6bba02613295a0d5b41f74a783c142745a12763/aiohttp-3.13.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:f33c8748abef4d8717bb20e8fb1b3e07c6adacb7fd6beaae971a764cf5f30d61", size = 490733, upload-time = "2025-10-28T20:57:38.205Z" },
- { url = "https://files.pythonhosted.org/packages/1a/aa/7a451b1d6a04e8d15a362af3e9b897de71d86feac3babf8894545d08d537/aiohttp-3.13.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ae32f24bbfb7dbb485a24b30b1149e2f200be94777232aeadba3eecece4d0aa4", size = 491303, upload-time = "2025-10-28T20:57:40.122Z" },
- { url = "https://files.pythonhosted.org/packages/57/1e/209958dbb9b01174870f6a7538cd1f3f28274fdbc88a750c238e2c456295/aiohttp-3.13.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d7f02042c1f009ffb70067326ef183a047425bb2ff3bc434ead4dd4a4a66a2b", size = 1717965, upload-time = "2025-10-28T20:57:42.28Z" },
- { url = "https://files.pythonhosted.org/packages/08/aa/6a01848d6432f241416bc4866cae8dc03f05a5a884d2311280f6a09c73d6/aiohttp-3.13.2-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:93655083005d71cd6c072cdab54c886e6570ad2c4592139c3fb967bfc19e4694", size = 1667221, upload-time = "2025-10-28T20:57:44.869Z" },
- { url = "https://files.pythonhosted.org/packages/87/4f/36c1992432d31bbc789fa0b93c768d2e9047ec8c7177e5cd84ea85155f36/aiohttp-3.13.2-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:0db1e24b852f5f664cd728db140cf11ea0e82450471232a394b3d1a540b0f906", size = 1757178, upload-time = "2025-10-28T20:57:47.216Z" },
- { url = "https://files.pythonhosted.org/packages/ac/b4/8e940dfb03b7e0f68a82b88fd182b9be0a65cb3f35612fe38c038c3112cf/aiohttp-3.13.2-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b009194665bcd128e23eaddef362e745601afa4641930848af4c8559e88f18f9", size = 1838001, upload-time = "2025-10-28T20:57:49.337Z" },
- { url = "https://files.pythonhosted.org/packages/d7/ef/39f3448795499c440ab66084a9db7d20ca7662e94305f175a80f5b7e0072/aiohttp-3.13.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c038a8fdc8103cd51dbd986ecdce141473ffd9775a7a8057a6ed9c3653478011", size = 1716325, upload-time = "2025-10-28T20:57:51.327Z" },
- { url = "https://files.pythonhosted.org/packages/d7/51/b311500ffc860b181c05d91c59a1313bdd05c82960fdd4035a15740d431e/aiohttp-3.13.2-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:66bac29b95a00db411cd758fea0e4b9bdba6d549dfe333f9a945430f5f2cc5a6", size = 1547978, upload-time = "2025-10-28T20:57:53.554Z" },
- { url = "https://files.pythonhosted.org/packages/31/64/b9d733296ef79815226dab8c586ff9e3df41c6aff2e16c06697b2d2e6775/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4ebf9cfc9ba24a74cf0718f04aac2a3bbe745902cc7c5ebc55c0f3b5777ef213", size = 1682042, upload-time = "2025-10-28T20:57:55.617Z" },
- { url = "https://files.pythonhosted.org/packages/3f/30/43d3e0f9d6473a6db7d472104c4eff4417b1e9df01774cb930338806d36b/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:a4b88ebe35ce54205c7074f7302bd08a4cb83256a3e0870c72d6f68a3aaf8e49", size = 1680085, upload-time = "2025-10-28T20:57:57.59Z" },
- { url = "https://files.pythonhosted.org/packages/16/51/c709f352c911b1864cfd1087577760ced64b3e5bee2aa88b8c0c8e2e4972/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:98c4fb90bb82b70a4ed79ca35f656f4281885be076f3f970ce315402b53099ae", size = 1728238, upload-time = "2025-10-28T20:57:59.525Z" },
- { url = "https://files.pythonhosted.org/packages/19/e2/19bd4c547092b773caeb48ff5ae4b1ae86756a0ee76c16727fcfd281404b/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:ec7534e63ae0f3759df3a1ed4fa6bc8f75082a924b590619c0dd2f76d7043caa", size = 1544395, upload-time = "2025-10-28T20:58:01.914Z" },
- { url = "https://files.pythonhosted.org/packages/cf/87/860f2803b27dfc5ed7be532832a3498e4919da61299b4a1f8eb89b8ff44d/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5b927cf9b935a13e33644cbed6c8c4b2d0f25b713d838743f8fe7191b33829c4", size = 1742965, upload-time = "2025-10-28T20:58:03.972Z" },
- { url = "https://files.pythonhosted.org/packages/67/7f/db2fc7618925e8c7a601094d5cbe539f732df4fb570740be88ed9e40e99a/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:88d6c017966a78c5265d996c19cdb79235be5e6412268d7e2ce7dee339471b7a", size = 1697585, upload-time = "2025-10-28T20:58:06.189Z" },
- { url = "https://files.pythonhosted.org/packages/0c/07/9127916cb09bb38284db5036036042b7b2c514c8ebaeee79da550c43a6d6/aiohttp-3.13.2-cp314-cp314-win32.whl", hash = "sha256:f7c183e786e299b5d6c49fb43a769f8eb8e04a2726a2bd5887b98b5cc2d67940", size = 431621, upload-time = "2025-10-28T20:58:08.636Z" },
- { url = "https://files.pythonhosted.org/packages/fb/41/554a8a380df6d3a2bba8a7726429a23f4ac62aaf38de43bb6d6cde7b4d4d/aiohttp-3.13.2-cp314-cp314-win_amd64.whl", hash = "sha256:fe242cd381e0fb65758faf5ad96c2e460df6ee5b2de1072fe97e4127927e00b4", size = 457627, upload-time = "2025-10-28T20:58:11Z" },
- { url = "https://files.pythonhosted.org/packages/c7/8e/3824ef98c039d3951cb65b9205a96dd2b20f22241ee17d89c5701557c826/aiohttp-3.13.2-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:f10d9c0b0188fe85398c61147bbd2a657d616c876863bfeff43376e0e3134673", size = 767360, upload-time = "2025-10-28T20:58:13.358Z" },
- { url = "https://files.pythonhosted.org/packages/a4/0f/6a03e3fc7595421274fa34122c973bde2d89344f8a881b728fa8c774e4f1/aiohttp-3.13.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:e7c952aefdf2460f4ae55c5e9c3e80aa72f706a6317e06020f80e96253b1accd", size = 504616, upload-time = "2025-10-28T20:58:15.339Z" },
- { url = "https://files.pythonhosted.org/packages/c6/aa/ed341b670f1bc8a6f2c6a718353d13b9546e2cef3544f573c6a1ff0da711/aiohttp-3.13.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c20423ce14771d98353d2e25e83591fa75dfa90a3c1848f3d7c68243b4fbded3", size = 509131, upload-time = "2025-10-28T20:58:17.693Z" },
- { url = "https://files.pythonhosted.org/packages/7f/f0/c68dac234189dae5c4bbccc0f96ce0cc16b76632cfc3a08fff180045cfa4/aiohttp-3.13.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e96eb1a34396e9430c19d8338d2ec33015e4a87ef2b4449db94c22412e25ccdf", size = 1864168, upload-time = "2025-10-28T20:58:20.113Z" },
- { url = "https://files.pythonhosted.org/packages/8f/65/75a9a76db8364b5d0e52a0c20eabc5d52297385d9af9c35335b924fafdee/aiohttp-3.13.2-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:23fb0783bc1a33640036465019d3bba069942616a6a2353c6907d7fe1ccdaf4e", size = 1719200, upload-time = "2025-10-28T20:58:22.583Z" },
- { url = "https://files.pythonhosted.org/packages/f5/55/8df2ed78d7f41d232f6bd3ff866b6f617026551aa1d07e2f03458f964575/aiohttp-3.13.2-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2e1a9bea6244a1d05a4e57c295d69e159a5c50d8ef16aa390948ee873478d9a5", size = 1843497, upload-time = "2025-10-28T20:58:24.672Z" },
- { url = "https://files.pythonhosted.org/packages/e9/e0/94d7215e405c5a02ccb6a35c7a3a6cfff242f457a00196496935f700cde5/aiohttp-3.13.2-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0a3d54e822688b56e9f6b5816fb3de3a3a64660efac64e4c2dc435230ad23bad", size = 1935703, upload-time = "2025-10-28T20:58:26.758Z" },
- { url = "https://files.pythonhosted.org/packages/0b/78/1eeb63c3f9b2d1015a4c02788fb543141aad0a03ae3f7a7b669b2483f8d4/aiohttp-3.13.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7a653d872afe9f33497215745da7a943d1dc15b728a9c8da1c3ac423af35178e", size = 1792738, upload-time = "2025-10-28T20:58:29.787Z" },
- { url = "https://files.pythonhosted.org/packages/41/75/aaf1eea4c188e51538c04cc568040e3082db263a57086ea74a7d38c39e42/aiohttp-3.13.2-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:56d36e80d2003fa3fc0207fac644216d8532e9504a785ef9a8fd013f84a42c61", size = 1624061, upload-time = "2025-10-28T20:58:32.529Z" },
- { url = "https://files.pythonhosted.org/packages/9b/c2/3b6034de81fbcc43de8aeb209073a2286dfb50b86e927b4efd81cf848197/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:78cd586d8331fb8e241c2dd6b2f4061778cc69e150514b39a9e28dd050475661", size = 1789201, upload-time = "2025-10-28T20:58:34.618Z" },
- { url = "https://files.pythonhosted.org/packages/c9/38/c15dcf6d4d890217dae79d7213988f4e5fe6183d43893a9cf2fe9e84ca8d/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:20b10bbfbff766294fe99987f7bb3b74fdd2f1a2905f2562132641ad434dcf98", size = 1776868, upload-time = "2025-10-28T20:58:38.835Z" },
- { url = "https://files.pythonhosted.org/packages/04/75/f74fd178ac81adf4f283a74847807ade5150e48feda6aef024403716c30c/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:9ec49dff7e2b3c85cdeaa412e9d438f0ecd71676fde61ec57027dd392f00c693", size = 1790660, upload-time = "2025-10-28T20:58:41.507Z" },
- { url = "https://files.pythonhosted.org/packages/e7/80/7368bd0d06b16b3aba358c16b919e9c46cf11587dc572091031b0e9e3ef0/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:94f05348c4406450f9d73d38efb41d669ad6cd90c7ee194810d0eefbfa875a7a", size = 1617548, upload-time = "2025-10-28T20:58:43.674Z" },
- { url = "https://files.pythonhosted.org/packages/7d/4b/a6212790c50483cb3212e507378fbe26b5086d73941e1ec4b56a30439688/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:fa4dcb605c6f82a80c7f95713c2b11c3b8e9893b3ebd2bc9bde93165ed6107be", size = 1817240, upload-time = "2025-10-28T20:58:45.787Z" },
- { url = "https://files.pythonhosted.org/packages/ff/f7/ba5f0ba4ea8d8f3c32850912944532b933acbf0f3a75546b89269b9b7dde/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cf00e5db968c3f67eccd2778574cf64d8b27d95b237770aa32400bd7a1ca4f6c", size = 1762334, upload-time = "2025-10-28T20:58:47.936Z" },
- { url = "https://files.pythonhosted.org/packages/7e/83/1a5a1856574588b1cad63609ea9ad75b32a8353ac995d830bf5da9357364/aiohttp-3.13.2-cp314-cp314t-win32.whl", hash = "sha256:d23b5fe492b0805a50d3371e8a728a9134d8de5447dce4c885f5587294750734", size = 464685, upload-time = "2025-10-28T20:58:50.642Z" },
- { url = "https://files.pythonhosted.org/packages/9f/4d/d22668674122c08f4d56972297c51a624e64b3ed1efaa40187607a7cb66e/aiohttp-3.13.2-cp314-cp314t-win_amd64.whl", hash = "sha256:ff0a7b0a82a7ab905cbda74006318d1b12e37c797eb1b0d4eb3e316cf47f658f", size = 498093, upload-time = "2025-10-28T20:58:52.782Z" },
- { url = "https://files.pythonhosted.org/packages/04/4a/3da532fdf51b5e58fffa1a86d6569184cb1bf4bf81cd4434b6541a8d14fd/aiohttp-3.13.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7fbdf5ad6084f1940ce88933de34b62358d0f4a0b6ec097362dcd3e5a65a4989", size = 739009, upload-time = "2025-10-28T20:58:55.682Z" },
- { url = "https://files.pythonhosted.org/packages/89/74/fefa6f7939cdc1d77e5cad712004e675a8847dccc589dcc3abca7feaed73/aiohttp-3.13.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7c3a50345635a02db61792c85bb86daffac05330f6473d524f1a4e3ef9d0046d", size = 495308, upload-time = "2025-10-28T20:58:58.408Z" },
- { url = "https://files.pythonhosted.org/packages/4e/b4/a0638ae1f12d09a0dc558870968a2f19a1eba1b10ad0a85ef142ddb40b50/aiohttp-3.13.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0e87dff73f46e969af38ab3f7cb75316a7c944e2e574ff7c933bc01b10def7f5", size = 490624, upload-time = "2025-10-28T20:59:00.479Z" },
- { url = "https://files.pythonhosted.org/packages/02/73/361cd4cac9d98a5a4183d1f26faf7b777330f8dba838c5aae2412862bdd0/aiohttp-3.13.2-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2adebd4577724dcae085665f294cc57c8701ddd4d26140504db622b8d566d7aa", size = 1662968, upload-time = "2025-10-28T20:59:03.105Z" },
- { url = "https://files.pythonhosted.org/packages/9e/93/ce2ca7584555a6c7dd78f2e6b539a96c5172d88815e13a05a576e14a5a22/aiohttp-3.13.2-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e036a3a645fe92309ec34b918394bb377950cbb43039a97edae6c08db64b23e2", size = 1627117, upload-time = "2025-10-28T20:59:05.274Z" },
- { url = "https://files.pythonhosted.org/packages/a6/42/7ee0e699111f5fc20a69b3203e8f5d5da0b681f270b90bc088d15e339980/aiohttp-3.13.2-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:23ad365e30108c422d0b4428cf271156dd56790f6dd50d770b8e360e6c5ab2e6", size = 1724037, upload-time = "2025-10-28T20:59:07.522Z" },
- { url = "https://files.pythonhosted.org/packages/66/88/67ad5ff11dd61dd1d7882cda39f085d5fca31cf7e2143f5173429d8a591e/aiohttp-3.13.2-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:1f9b2c2d4b9d958b1f9ae0c984ec1dd6b6689e15c75045be8ccb4011426268ca", size = 1812899, upload-time = "2025-10-28T20:59:11.698Z" },
- { url = "https://files.pythonhosted.org/packages/60/1b/a46f6e1c2a347b9c7a789292279c159b327fadecbf8340f3b05fffff1151/aiohttp-3.13.2-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3a92cf4b9bea33e15ecbaa5c59921be0f23222608143d025c989924f7e3e0c07", size = 1660961, upload-time = "2025-10-28T20:59:14.425Z" },
- { url = "https://files.pythonhosted.org/packages/44/cc/1af9e466eafd9b5d8922238c69aaf95b656137add4c5db65f63ee129bf3c/aiohttp-3.13.2-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:070599407f4954021509193404c4ac53153525a19531051661440644728ba9a7", size = 1553851, upload-time = "2025-10-28T20:59:17.044Z" },
- { url = "https://files.pythonhosted.org/packages/e5/d1/9e5f4f40f9d0ee5668e9b5e7ebfb0eaf371cc09da03785decdc5da56f4b3/aiohttp-3.13.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:29562998ec66f988d49fb83c9b01694fa927186b781463f376c5845c121e4e0b", size = 1634260, upload-time = "2025-10-28T20:59:19.378Z" },
- { url = "https://files.pythonhosted.org/packages/83/2e/5d065091c4ae8b55a153f458f19308191bad3b62a89496aa081385486338/aiohttp-3.13.2-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:4dd3db9d0f4ebca1d887d76f7cdbcd1116ac0d05a9221b9dad82c64a62578c4d", size = 1639499, upload-time = "2025-10-28T20:59:22.013Z" },
- { url = "https://files.pythonhosted.org/packages/a3/de/58ae6dc73691a51ff16f69a94d13657bf417456fa0fdfed2b59dd6b4c293/aiohttp-3.13.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:d7bc4b7f9c4921eba72677cd9fedd2308f4a4ca3e12fab58935295ad9ea98700", size = 1694087, upload-time = "2025-10-28T20:59:24.773Z" },
- { url = "https://files.pythonhosted.org/packages/45/fe/4d9df516268867d83041b6c073ee15cd532dbea58b82d675a7e1cf2ec24c/aiohttp-3.13.2-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:dacd50501cd017f8cccb328da0c90823511d70d24a323196826d923aad865901", size = 1540532, upload-time = "2025-10-28T20:59:27.982Z" },
- { url = "https://files.pythonhosted.org/packages/24/e7/a802619308232499482bf30b3530efb5d141481cfd61850368350fb1acb5/aiohttp-3.13.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:8b2f1414f6a1e0683f212ec80e813f4abef94c739fd090b66c9adf9d2a05feac", size = 1710369, upload-time = "2025-10-28T20:59:30.363Z" },
- { url = "https://files.pythonhosted.org/packages/62/08/e8593f39f025efe96ef59550d17cf097222d84f6f84798bedac5bf037fce/aiohttp-3.13.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04c3971421576ed24c191f610052bcb2f059e395bc2489dd99e397f9bc466329", size = 1649296, upload-time = "2025-10-28T20:59:33.285Z" },
- { url = "https://files.pythonhosted.org/packages/e5/fd/ffbc1b6aa46fc6c284af4a438b2c7eab79af1c8ac4b6d2ced185c17f403e/aiohttp-3.13.2-cp39-cp39-win32.whl", hash = "sha256:9f377d0a924e5cc94dc620bc6366fc3e889586a7f18b748901cf016c916e2084", size = 432980, upload-time = "2025-10-28T20:59:35.515Z" },
- { url = "https://files.pythonhosted.org/packages/ad/a9/d47e7873175a4d8aed425f2cdea2df700b2dd44fac024ffbd83455a69a50/aiohttp-3.13.2-cp39-cp39-win_amd64.whl", hash = "sha256:9c705601e16c03466cb72011bd1af55d68fa65b045356d8f96c216e5f6db0fa5", size = 456021, upload-time = "2025-10-28T20:59:37.659Z" },
+ { url = "https://files.pythonhosted.org/packages/36/d6/5aec9313ee6ea9c7cde8b891b69f4ff4001416867104580670a31daeba5b/aiohttp-3.13.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d5a372fd5afd301b3a89582817fdcdb6c34124787c70dbcc616f259013e7eef7", size = 738950, upload-time = "2026-01-03T17:29:13.002Z" },
+ { url = "https://files.pythonhosted.org/packages/68/03/8fa90a7e6d11ff20a18837a8e2b5dd23db01aabc475aa9271c8ad33299f5/aiohttp-3.13.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:147e422fd1223005c22b4fe080f5d93ced44460f5f9c105406b753612b587821", size = 496099, upload-time = "2026-01-03T17:29:15.268Z" },
+ { url = "https://files.pythonhosted.org/packages/d2/23/b81f744d402510a8366b74eb420fc0cc1170d0c43daca12d10814df85f10/aiohttp-3.13.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:859bd3f2156e81dd01432f5849fc73e2243d4a487c4fd26609b1299534ee1845", size = 491072, upload-time = "2026-01-03T17:29:16.922Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/e1/56d1d1c0dd334cd203dd97706ce004c1aa24b34a813b0b8daf3383039706/aiohttp-3.13.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dca68018bf48c251ba17c72ed479f4dafe9dbd5a73707ad8d28a38d11f3d42af", size = 1671588, upload-time = "2026-01-03T17:29:18.539Z" },
+ { url = "https://files.pythonhosted.org/packages/5f/34/8d7f962604f4bc2b4e39eb1220dac7d4e4cba91fb9ba0474b4ecd67db165/aiohttp-3.13.3-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:fee0c6bc7db1de362252affec009707a17478a00ec69f797d23ca256e36d5940", size = 1640334, upload-time = "2026-01-03T17:29:21.028Z" },
+ { url = "https://files.pythonhosted.org/packages/94/1d/fcccf2c668d87337ddeef9881537baee13c58d8f01f12ba8a24215f2b804/aiohttp-3.13.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c048058117fd649334d81b4b526e94bde3ccaddb20463a815ced6ecbb7d11160", size = 1722656, upload-time = "2026-01-03T17:29:22.531Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/98/c6f3b081c4c606bc1e5f2ec102e87d6411c73a9ef3616fea6f2d5c98c062/aiohttp-3.13.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:215a685b6fbbfcf71dfe96e3eba7a6f58f10da1dfdf4889c7dd856abe430dca7", size = 1817625, upload-time = "2026-01-03T17:29:24.276Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/c0/cfcc3d2e11b477f86e1af2863f3858c8850d751ce8dc39c4058a072c9e54/aiohttp-3.13.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2c184bb1fe2cbd2cefba613e9db29a5ab559323f994b6737e370d3da0ac455", size = 1672604, upload-time = "2026-01-03T17:29:26.099Z" },
+ { url = "https://files.pythonhosted.org/packages/1e/77/6b4ffcbcac4c6a5d041343a756f34a6dd26174ae07f977a64fe028dda5b0/aiohttp-3.13.3-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:75ca857eba4e20ce9f546cd59c7007b33906a4cd48f2ff6ccf1ccfc3b646f279", size = 1554370, upload-time = "2026-01-03T17:29:28.121Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/f0/e3ddfa93f17d689dbe014ba048f18e0c9f9b456033b70e94349a2e9048be/aiohttp-3.13.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:81e97251d9298386c2b7dbeb490d3d1badbdc69107fb8c9299dd04eb39bddc0e", size = 1642023, upload-time = "2026-01-03T17:29:30.002Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/45/c14019c9ec60a8e243d06d601b33dcc4fd92379424bde3021725859d7f99/aiohttp-3.13.3-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:c0e2d366af265797506f0283487223146af57815b388623f0357ef7eac9b209d", size = 1649680, upload-time = "2026-01-03T17:29:31.782Z" },
+ { url = "https://files.pythonhosted.org/packages/9c/fd/09c9451dae5aa5c5ed756df95ff9ef549d45d4be663bafd1e4954fd836f0/aiohttp-3.13.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4e239d501f73d6db1522599e14b9b321a7e3b1de66ce33d53a765d975e9f4808", size = 1692407, upload-time = "2026-01-03T17:29:33.392Z" },
+ { url = "https://files.pythonhosted.org/packages/a6/81/938bc2ec33c10efd6637ccb3d22f9f3160d08e8f3aa2587a2c2d5ab578eb/aiohttp-3.13.3-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:0db318f7a6f065d84cb1e02662c526294450b314a02bd9e2a8e67f0d8564ce40", size = 1543047, upload-time = "2026-01-03T17:29:34.855Z" },
+ { url = "https://files.pythonhosted.org/packages/f7/23/80488ee21c8d567c83045e412e1d9b7077d27171591a4eb7822586e8c06a/aiohttp-3.13.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:bfc1cc2fe31a6026a8a88e4ecfb98d7f6b1fec150cfd708adbfd1d2f42257c29", size = 1715264, upload-time = "2026-01-03T17:29:36.389Z" },
+ { url = "https://files.pythonhosted.org/packages/e2/83/259a8da6683182768200b368120ab3deff5370bed93880fb9a3a86299f34/aiohttp-3.13.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:af71fff7bac6bb7508956696dce8f6eec2bbb045eceb40343944b1ae62b5ef11", size = 1657275, upload-time = "2026-01-03T17:29:38.162Z" },
+ { url = "https://files.pythonhosted.org/packages/3f/4f/2c41f800a0b560785c10fb316216ac058c105f9be50bdc6a285de88db625/aiohttp-3.13.3-cp310-cp310-win32.whl", hash = "sha256:37da61e244d1749798c151421602884db5270faf479cf0ef03af0ff68954c9dd", size = 434053, upload-time = "2026-01-03T17:29:40.074Z" },
+ { url = "https://files.pythonhosted.org/packages/80/df/29cd63c7ecfdb65ccc12f7d808cac4fa2a19544660c06c61a4a48462de0c/aiohttp-3.13.3-cp310-cp310-win_amd64.whl", hash = "sha256:7e63f210bc1b57ef699035f2b4b6d9ce096b5914414a49b0997c839b2bd2223c", size = 456687, upload-time = "2026-01-03T17:29:41.819Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/4c/a164164834f03924d9a29dc3acd9e7ee58f95857e0b467f6d04298594ebb/aiohttp-3.13.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5b6073099fb654e0a068ae678b10feff95c5cae95bbfcbfa7af669d361a8aa6b", size = 746051, upload-time = "2026-01-03T17:29:43.287Z" },
+ { url = "https://files.pythonhosted.org/packages/82/71/d5c31390d18d4f58115037c432b7e0348c60f6f53b727cad33172144a112/aiohttp-3.13.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1cb93e166e6c28716c8c6aeb5f99dfb6d5ccf482d29fe9bf9a794110e6d0ab64", size = 499234, upload-time = "2026-01-03T17:29:44.822Z" },
+ { url = "https://files.pythonhosted.org/packages/0e/c9/741f8ac91e14b1d2e7100690425a5b2b919a87a5075406582991fb7de920/aiohttp-3.13.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:28e027cf2f6b641693a09f631759b4d9ce9165099d2b5d92af9bd4e197690eea", size = 494979, upload-time = "2026-01-03T17:29:46.405Z" },
+ { url = "https://files.pythonhosted.org/packages/75/b5/31d4d2e802dfd59f74ed47eba48869c1c21552c586d5e81a9d0d5c2ad640/aiohttp-3.13.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3b61b7169ababd7802f9568ed96142616a9118dd2be0d1866e920e77ec8fa92a", size = 1748297, upload-time = "2026-01-03T17:29:48.083Z" },
+ { url = "https://files.pythonhosted.org/packages/1a/3e/eefad0ad42959f226bb79664826883f2687d602a9ae2941a18e0484a74d3/aiohttp-3.13.3-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:80dd4c21b0f6237676449c6baaa1039abae86b91636b6c91a7f8e61c87f89540", size = 1707172, upload-time = "2026-01-03T17:29:49.648Z" },
+ { url = "https://files.pythonhosted.org/packages/c5/3a/54a64299fac2891c346cdcf2aa6803f994a2e4beeaf2e5a09dcc54acc842/aiohttp-3.13.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:65d2ccb7eabee90ce0503c17716fc77226be026dcc3e65cce859a30db715025b", size = 1805405, upload-time = "2026-01-03T17:29:51.244Z" },
+ { url = "https://files.pythonhosted.org/packages/6c/70/ddc1b7169cf64075e864f64595a14b147a895a868394a48f6a8031979038/aiohttp-3.13.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5b179331a481cb5529fca8b432d8d3c7001cb217513c94cd72d668d1248688a3", size = 1899449, upload-time = "2026-01-03T17:29:53.938Z" },
+ { url = "https://files.pythonhosted.org/packages/a1/7e/6815aab7d3a56610891c76ef79095677b8b5be6646aaf00f69b221765021/aiohttp-3.13.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d4c940f02f49483b18b079d1c27ab948721852b281f8b015c058100e9421dd1", size = 1748444, upload-time = "2026-01-03T17:29:55.484Z" },
+ { url = "https://files.pythonhosted.org/packages/6b/f2/073b145c4100da5511f457dc0f7558e99b2987cf72600d42b559db856fbc/aiohttp-3.13.3-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f9444f105664c4ce47a2a7171a2418bce5b7bae45fb610f4e2c36045d85911d3", size = 1606038, upload-time = "2026-01-03T17:29:57.179Z" },
+ { url = "https://files.pythonhosted.org/packages/0a/c1/778d011920cae03ae01424ec202c513dc69243cf2db303965615b81deeea/aiohttp-3.13.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:694976222c711d1d00ba131904beb60534f93966562f64440d0c9d41b8cdb440", size = 1724156, upload-time = "2026-01-03T17:29:58.914Z" },
+ { url = "https://files.pythonhosted.org/packages/0e/cb/3419eabf4ec1e9ec6f242c32b689248365a1cf621891f6f0386632525494/aiohttp-3.13.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:f33ed1a2bf1997a36661874b017f5c4b760f41266341af36febaf271d179f6d7", size = 1722340, upload-time = "2026-01-03T17:30:01.962Z" },
+ { url = "https://files.pythonhosted.org/packages/7a/e5/76cf77bdbc435bf233c1f114edad39ed4177ccbfab7c329482b179cff4f4/aiohttp-3.13.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e636b3c5f61da31a92bf0d91da83e58fdfa96f178ba682f11d24f31944cdd28c", size = 1783041, upload-time = "2026-01-03T17:30:03.609Z" },
+ { url = "https://files.pythonhosted.org/packages/9d/d4/dd1ca234c794fd29c057ce8c0566b8ef7fd6a51069de5f06fa84b9a1971c/aiohttp-3.13.3-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:5d2d94f1f5fcbe40838ac51a6ab5704a6f9ea42e72ceda48de5e6b898521da51", size = 1596024, upload-time = "2026-01-03T17:30:05.132Z" },
+ { url = "https://files.pythonhosted.org/packages/55/58/4345b5f26661a6180afa686c473620c30a66afdf120ed3dd545bbc809e85/aiohttp-3.13.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:2be0e9ccf23e8a94f6f0650ce06042cefc6ac703d0d7ab6c7a917289f2539ad4", size = 1804590, upload-time = "2026-01-03T17:30:07.135Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/06/05950619af6c2df7e0a431d889ba2813c9f0129cec76f663e547a5ad56f2/aiohttp-3.13.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9af5e68ee47d6534d36791bbe9b646d2a7c7deb6fc24d7943628edfbb3581f29", size = 1740355, upload-time = "2026-01-03T17:30:09.083Z" },
+ { url = "https://files.pythonhosted.org/packages/3e/80/958f16de79ba0422d7c1e284b2abd0c84bc03394fbe631d0a39ffa10e1eb/aiohttp-3.13.3-cp311-cp311-win32.whl", hash = "sha256:a2212ad43c0833a873d0fb3c63fa1bacedd4cf6af2fee62bf4b739ceec3ab239", size = 433701, upload-time = "2026-01-03T17:30:10.869Z" },
+ { url = "https://files.pythonhosted.org/packages/dc/f2/27cdf04c9851712d6c1b99df6821a6623c3c9e55956d4b1e318c337b5a48/aiohttp-3.13.3-cp311-cp311-win_amd64.whl", hash = "sha256:642f752c3eb117b105acbd87e2c143de710987e09860d674e068c4c2c441034f", size = 457678, upload-time = "2026-01-03T17:30:12.719Z" },
+ { url = "https://files.pythonhosted.org/packages/a0/be/4fc11f202955a69e0db803a12a062b8379c970c7c84f4882b6da17337cc1/aiohttp-3.13.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b903a4dfee7d347e2d87697d0713be59e0b87925be030c9178c5faa58ea58d5c", size = 739732, upload-time = "2026-01-03T17:30:14.23Z" },
+ { url = "https://files.pythonhosted.org/packages/97/2c/621d5b851f94fa0bb7430d6089b3aa970a9d9b75196bc93bb624b0db237a/aiohttp-3.13.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a45530014d7a1e09f4a55f4f43097ba0fd155089372e105e4bff4ca76cb1b168", size = 494293, upload-time = "2026-01-03T17:30:15.96Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/43/4be01406b78e1be8320bb8316dc9c42dbab553d281c40364e0f862d5661c/aiohttp-3.13.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:27234ef6d85c914f9efeb77ff616dbf4ad2380be0cda40b4db086ffc7ddd1b7d", size = 493533, upload-time = "2026-01-03T17:30:17.431Z" },
+ { url = "https://files.pythonhosted.org/packages/8d/a8/5a35dc56a06a2c90d4742cbf35294396907027f80eea696637945a106f25/aiohttp-3.13.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d32764c6c9aafb7fb55366a224756387cd50bfa720f32b88e0e6fa45b27dcf29", size = 1737839, upload-time = "2026-01-03T17:30:19.422Z" },
+ { url = "https://files.pythonhosted.org/packages/bf/62/4b9eeb331da56530bf2e198a297e5303e1c1ebdceeb00fe9b568a65c5a0c/aiohttp-3.13.3-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:b1a6102b4d3ebc07dad44fbf07b45bb600300f15b552ddf1851b5390202ea2e3", size = 1703932, upload-time = "2026-01-03T17:30:21.756Z" },
+ { url = "https://files.pythonhosted.org/packages/7c/f6/af16887b5d419e6a367095994c0b1332d154f647e7dc2bd50e61876e8e3d/aiohttp-3.13.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c014c7ea7fb775dd015b2d3137378b7be0249a448a1612268b5a90c2d81de04d", size = 1771906, upload-time = "2026-01-03T17:30:23.932Z" },
+ { url = "https://files.pythonhosted.org/packages/ce/83/397c634b1bcc24292fa1e0c7822800f9f6569e32934bdeef09dae7992dfb/aiohttp-3.13.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2b8d8ddba8f95ba17582226f80e2de99c7a7948e66490ef8d947e272a93e9463", size = 1871020, upload-time = "2026-01-03T17:30:26Z" },
+ { url = "https://files.pythonhosted.org/packages/86/f6/a62cbbf13f0ac80a70f71b1672feba90fdb21fd7abd8dbf25c0105fb6fa3/aiohttp-3.13.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9ae8dd55c8e6c4257eae3a20fd2c8f41edaea5992ed67156642493b8daf3cecc", size = 1755181, upload-time = "2026-01-03T17:30:27.554Z" },
+ { url = "https://files.pythonhosted.org/packages/0a/87/20a35ad487efdd3fba93d5843efdfaa62d2f1479eaafa7453398a44faf13/aiohttp-3.13.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:01ad2529d4b5035578f5081606a465f3b814c542882804e2e8cda61adf5c71bf", size = 1561794, upload-time = "2026-01-03T17:30:29.254Z" },
+ { url = "https://files.pythonhosted.org/packages/de/95/8fd69a66682012f6716e1bc09ef8a1a2a91922c5725cb904689f112309c4/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bb4f7475e359992b580559e008c598091c45b5088f28614e855e42d39c2f1033", size = 1697900, upload-time = "2026-01-03T17:30:31.033Z" },
+ { url = "https://files.pythonhosted.org/packages/e5/66/7b94b3b5ba70e955ff597672dad1691333080e37f50280178967aff68657/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:c19b90316ad3b24c69cd78d5c9b4f3aa4497643685901185b65166293d36a00f", size = 1728239, upload-time = "2026-01-03T17:30:32.703Z" },
+ { url = "https://files.pythonhosted.org/packages/47/71/6f72f77f9f7d74719692ab65a2a0252584bf8d5f301e2ecb4c0da734530a/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:96d604498a7c782cb15a51c406acaea70d8c027ee6b90c569baa6e7b93073679", size = 1740527, upload-time = "2026-01-03T17:30:34.695Z" },
+ { url = "https://files.pythonhosted.org/packages/fa/b4/75ec16cbbd5c01bdaf4a05b19e103e78d7ce1ef7c80867eb0ace42ff4488/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:084911a532763e9d3dd95adf78a78f4096cd5f58cdc18e6fdbc1b58417a45423", size = 1554489, upload-time = "2026-01-03T17:30:36.864Z" },
+ { url = "https://files.pythonhosted.org/packages/52/8f/bc518c0eea29f8406dcf7ed1f96c9b48e3bc3995a96159b3fc11f9e08321/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7a4a94eb787e606d0a09404b9c38c113d3b099d508021faa615d70a0131907ce", size = 1767852, upload-time = "2026-01-03T17:30:39.433Z" },
+ { url = "https://files.pythonhosted.org/packages/9d/f2/a07a75173124f31f11ea6f863dc44e6f09afe2bca45dd4e64979490deab1/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:87797e645d9d8e222e04160ee32aa06bc5c163e8499f24db719e7852ec23093a", size = 1722379, upload-time = "2026-01-03T17:30:41.081Z" },
+ { url = "https://files.pythonhosted.org/packages/3c/4a/1a3fee7c21350cac78e5c5cef711bac1b94feca07399f3d406972e2d8fcd/aiohttp-3.13.3-cp312-cp312-win32.whl", hash = "sha256:b04be762396457bef43f3597c991e192ee7da460a4953d7e647ee4b1c28e7046", size = 428253, upload-time = "2026-01-03T17:30:42.644Z" },
+ { url = "https://files.pythonhosted.org/packages/d9/b7/76175c7cb4eb73d91ad63c34e29fc4f77c9386bba4a65b53ba8e05ee3c39/aiohttp-3.13.3-cp312-cp312-win_amd64.whl", hash = "sha256:e3531d63d3bdfa7e3ac5e9b27b2dd7ec9df3206a98e0b3445fa906f233264c57", size = 455407, upload-time = "2026-01-03T17:30:44.195Z" },
+ { url = "https://files.pythonhosted.org/packages/97/8a/12ca489246ca1faaf5432844adbfce7ff2cc4997733e0af120869345643a/aiohttp-3.13.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:5dff64413671b0d3e7d5918ea490bdccb97a4ad29b3f311ed423200b2203e01c", size = 734190, upload-time = "2026-01-03T17:30:45.832Z" },
+ { url = "https://files.pythonhosted.org/packages/32/08/de43984c74ed1fca5c014808963cc83cb00d7bb06af228f132d33862ca76/aiohttp-3.13.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:87b9aab6d6ed88235aa2970294f496ff1a1f9adcd724d800e9b952395a80ffd9", size = 491783, upload-time = "2026-01-03T17:30:47.466Z" },
+ { url = "https://files.pythonhosted.org/packages/17/f8/8dd2cf6112a5a76f81f81a5130c57ca829d101ad583ce57f889179accdda/aiohttp-3.13.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:425c126c0dc43861e22cb1c14ba4c8e45d09516d0a3ae0a3f7494b79f5f233a3", size = 490704, upload-time = "2026-01-03T17:30:49.373Z" },
+ { url = "https://files.pythonhosted.org/packages/6d/40/a46b03ca03936f832bc7eaa47cfbb1ad012ba1be4790122ee4f4f8cba074/aiohttp-3.13.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7f9120f7093c2a32d9647abcaf21e6ad275b4fbec5b55969f978b1a97c7c86bf", size = 1720652, upload-time = "2026-01-03T17:30:50.974Z" },
+ { url = "https://files.pythonhosted.org/packages/f7/7e/917fe18e3607af92657e4285498f500dca797ff8c918bd7d90b05abf6c2a/aiohttp-3.13.3-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:697753042d57f4bf7122cab985bf15d0cef23c770864580f5af4f52023a56bd6", size = 1692014, upload-time = "2026-01-03T17:30:52.729Z" },
+ { url = "https://files.pythonhosted.org/packages/71/b6/cefa4cbc00d315d68973b671cf105b21a609c12b82d52e5d0c9ae61d2a09/aiohttp-3.13.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6de499a1a44e7de70735d0b39f67c8f25eb3d91eb3103be99ca0fa882cdd987d", size = 1759777, upload-time = "2026-01-03T17:30:54.537Z" },
+ { url = "https://files.pythonhosted.org/packages/fb/e3/e06ee07b45e59e6d81498b591fc589629be1553abb2a82ce33efe2a7b068/aiohttp-3.13.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:37239e9f9a7ea9ac5bf6b92b0260b01f8a22281996da609206a84df860bc1261", size = 1861276, upload-time = "2026-01-03T17:30:56.512Z" },
+ { url = "https://files.pythonhosted.org/packages/7c/24/75d274228acf35ceeb2850b8ce04de9dd7355ff7a0b49d607ee60c29c518/aiohttp-3.13.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f76c1e3fe7d7c8afad7ed193f89a292e1999608170dcc9751a7462a87dfd5bc0", size = 1743131, upload-time = "2026-01-03T17:30:58.256Z" },
+ { url = "https://files.pythonhosted.org/packages/04/98/3d21dde21889b17ca2eea54fdcff21b27b93f45b7bb94ca029c31ab59dc3/aiohttp-3.13.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fc290605db2a917f6e81b0e1e0796469871f5af381ce15c604a3c5c7e51cb730", size = 1556863, upload-time = "2026-01-03T17:31:00.445Z" },
+ { url = "https://files.pythonhosted.org/packages/9e/84/da0c3ab1192eaf64782b03971ab4055b475d0db07b17eff925e8c93b3aa5/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4021b51936308aeea0367b8f006dc999ca02bc118a0cc78c303f50a2ff6afb91", size = 1682793, upload-time = "2026-01-03T17:31:03.024Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/0f/5802ada182f575afa02cbd0ec5180d7e13a402afb7c2c03a9aa5e5d49060/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:49a03727c1bba9a97d3e93c9f93ca03a57300f484b6e935463099841261195d3", size = 1716676, upload-time = "2026-01-03T17:31:04.842Z" },
+ { url = "https://files.pythonhosted.org/packages/3f/8c/714d53bd8b5a4560667f7bbbb06b20c2382f9c7847d198370ec6526af39c/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3d9908a48eb7416dc1f4524e69f1d32e5d90e3981e4e37eb0aa1cd18f9cfa2a4", size = 1733217, upload-time = "2026-01-03T17:31:06.868Z" },
+ { url = "https://files.pythonhosted.org/packages/7d/79/e2176f46d2e963facea939f5be2d26368ce543622be6f00a12844d3c991f/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:2712039939ec963c237286113c68dbad80a82a4281543f3abf766d9d73228998", size = 1552303, upload-time = "2026-01-03T17:31:08.958Z" },
+ { url = "https://files.pythonhosted.org/packages/ab/6a/28ed4dea1759916090587d1fe57087b03e6c784a642b85ef48217b0277ae/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:7bfdc049127717581866fa4708791220970ce291c23e28ccf3922c700740fdc0", size = 1763673, upload-time = "2026-01-03T17:31:10.676Z" },
+ { url = "https://files.pythonhosted.org/packages/e8/35/4a3daeb8b9fab49240d21c04d50732313295e4bd813a465d840236dd0ce1/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8057c98e0c8472d8846b9c79f56766bcc57e3e8ac7bfd510482332366c56c591", size = 1721120, upload-time = "2026-01-03T17:31:12.575Z" },
+ { url = "https://files.pythonhosted.org/packages/bc/9f/d643bb3c5fb99547323e635e251c609fbbc660d983144cfebec529e09264/aiohttp-3.13.3-cp313-cp313-win32.whl", hash = "sha256:1449ceddcdbcf2e0446957863af03ebaaa03f94c090f945411b61269e2cb5daf", size = 427383, upload-time = "2026-01-03T17:31:14.382Z" },
+ { url = "https://files.pythonhosted.org/packages/4e/f1/ab0395f8a79933577cdd996dd2f9aa6014af9535f65dddcf88204682fe62/aiohttp-3.13.3-cp313-cp313-win_amd64.whl", hash = "sha256:693781c45a4033d31d4187d2436f5ac701e7bbfe5df40d917736108c1cc7436e", size = 453899, upload-time = "2026-01-03T17:31:15.958Z" },
+ { url = "https://files.pythonhosted.org/packages/99/36/5b6514a9f5d66f4e2597e40dea2e3db271e023eb7a5d22defe96ba560996/aiohttp-3.13.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:ea37047c6b367fd4bd632bff8077449b8fa034b69e812a18e0132a00fae6e808", size = 737238, upload-time = "2026-01-03T17:31:17.909Z" },
+ { url = "https://files.pythonhosted.org/packages/f7/49/459327f0d5bcd8c6c9ca69e60fdeebc3622861e696490d8674a6d0cb90a6/aiohttp-3.13.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:6fc0e2337d1a4c3e6acafda6a78a39d4c14caea625124817420abceed36e2415", size = 492292, upload-time = "2026-01-03T17:31:19.919Z" },
+ { url = "https://files.pythonhosted.org/packages/e8/0b/b97660c5fd05d3495b4eb27f2d0ef18dc1dc4eff7511a9bf371397ff0264/aiohttp-3.13.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c685f2d80bb67ca8c3837823ad76196b3694b0159d232206d1e461d3d434666f", size = 493021, upload-time = "2026-01-03T17:31:21.636Z" },
+ { url = "https://files.pythonhosted.org/packages/54/d4/438efabdf74e30aeceb890c3290bbaa449780583b1270b00661126b8aae4/aiohttp-3.13.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:48e377758516d262bde50c2584fc6c578af272559c409eecbdd2bae1601184d6", size = 1717263, upload-time = "2026-01-03T17:31:23.296Z" },
+ { url = "https://files.pythonhosted.org/packages/71/f2/7bddc7fd612367d1459c5bcf598a9e8f7092d6580d98de0e057eb42697ad/aiohttp-3.13.3-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:34749271508078b261c4abb1767d42b8d0c0cc9449c73a4df494777dc55f0687", size = 1669107, upload-time = "2026-01-03T17:31:25.334Z" },
+ { url = "https://files.pythonhosted.org/packages/00/5a/1aeaecca40e22560f97610a329e0e5efef5e0b5afdf9f857f0d93839ab2e/aiohttp-3.13.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:82611aeec80eb144416956ec85b6ca45a64d76429c1ed46ae1b5f86c6e0c9a26", size = 1760196, upload-time = "2026-01-03T17:31:27.394Z" },
+ { url = "https://files.pythonhosted.org/packages/f8/f8/0ff6992bea7bd560fc510ea1c815f87eedd745fe035589c71ce05612a19a/aiohttp-3.13.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2fff83cfc93f18f215896e3a190e8e5cb413ce01553901aca925176e7568963a", size = 1843591, upload-time = "2026-01-03T17:31:29.238Z" },
+ { url = "https://files.pythonhosted.org/packages/e3/d1/e30e537a15f53485b61f5be525f2157da719819e8377298502aebac45536/aiohttp-3.13.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bbe7d4cecacb439e2e2a8a1a7b935c25b812af7a5fd26503a66dadf428e79ec1", size = 1720277, upload-time = "2026-01-03T17:31:31.053Z" },
+ { url = "https://files.pythonhosted.org/packages/84/45/23f4c451d8192f553d38d838831ebbc156907ea6e05557f39563101b7717/aiohttp-3.13.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b928f30fe49574253644b1ca44b1b8adbd903aa0da4b9054a6c20fc7f4092a25", size = 1548575, upload-time = "2026-01-03T17:31:32.87Z" },
+ { url = "https://files.pythonhosted.org/packages/6a/ed/0a42b127a43712eda7807e7892c083eadfaf8429ca8fb619662a530a3aab/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7b5e8fe4de30df199155baaf64f2fcd604f4c678ed20910db8e2c66dc4b11603", size = 1679455, upload-time = "2026-01-03T17:31:34.76Z" },
+ { url = "https://files.pythonhosted.org/packages/2e/b5/c05f0c2b4b4fe2c9d55e73b6d3ed4fd6c9dc2684b1d81cbdf77e7fad9adb/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:8542f41a62bcc58fc7f11cf7c90e0ec324ce44950003feb70640fc2a9092c32a", size = 1687417, upload-time = "2026-01-03T17:31:36.699Z" },
+ { url = "https://files.pythonhosted.org/packages/c9/6b/915bc5dad66aef602b9e459b5a973529304d4e89ca86999d9d75d80cbd0b/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:5e1d8c8b8f1d91cd08d8f4a3c2b067bfca6ec043d3ff36de0f3a715feeedf926", size = 1729968, upload-time = "2026-01-03T17:31:38.622Z" },
+ { url = "https://files.pythonhosted.org/packages/11/3b/e84581290a9520024a08640b63d07673057aec5ca548177a82026187ba73/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:90455115e5da1c3c51ab619ac57f877da8fd6d73c05aacd125c5ae9819582aba", size = 1545690, upload-time = "2026-01-03T17:31:40.57Z" },
+ { url = "https://files.pythonhosted.org/packages/f5/04/0c3655a566c43fd647c81b895dfe361b9f9ad6d58c19309d45cff52d6c3b/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:042e9e0bcb5fba81886c8b4fbb9a09d6b8a00245fd8d88e4d989c1f96c74164c", size = 1746390, upload-time = "2026-01-03T17:31:42.857Z" },
+ { url = "https://files.pythonhosted.org/packages/1f/53/71165b26978f719c3419381514c9690bd5980e764a09440a10bb816ea4ab/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2eb752b102b12a76ca02dff751a801f028b4ffbbc478840b473597fc91a9ed43", size = 1702188, upload-time = "2026-01-03T17:31:44.984Z" },
+ { url = "https://files.pythonhosted.org/packages/29/a7/cbe6c9e8e136314fa1980da388a59d2f35f35395948a08b6747baebb6aa6/aiohttp-3.13.3-cp314-cp314-win32.whl", hash = "sha256:b556c85915d8efaed322bf1bdae9486aa0f3f764195a0fb6ee962e5c71ef5ce1", size = 433126, upload-time = "2026-01-03T17:31:47.463Z" },
+ { url = "https://files.pythonhosted.org/packages/de/56/982704adea7d3b16614fc5936014e9af85c0e34b58f9046655817f04306e/aiohttp-3.13.3-cp314-cp314-win_amd64.whl", hash = "sha256:9bf9f7a65e7aa20dd764151fb3d616c81088f91f8df39c3893a536e279b4b984", size = 459128, upload-time = "2026-01-03T17:31:49.2Z" },
+ { url = "https://files.pythonhosted.org/packages/6c/2a/3c79b638a9c3d4658d345339d22070241ea341ed4e07b5ac60fb0f418003/aiohttp-3.13.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:05861afbbec40650d8a07ea324367cb93e9e8cc7762e04dd4405df99fa65159c", size = 769512, upload-time = "2026-01-03T17:31:51.134Z" },
+ { url = "https://files.pythonhosted.org/packages/29/b9/3e5014d46c0ab0db8707e0ac2711ed28c4da0218c358a4e7c17bae0d8722/aiohttp-3.13.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2fc82186fadc4a8316768d61f3722c230e2c1dcab4200d52d2ebdf2482e47592", size = 506444, upload-time = "2026-01-03T17:31:52.85Z" },
+ { url = "https://files.pythonhosted.org/packages/90/03/c1d4ef9a054e151cd7839cdc497f2638f00b93cbe8043983986630d7a80c/aiohttp-3.13.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:0add0900ff220d1d5c5ebbf99ed88b0c1bbf87aa7e4262300ed1376a6b13414f", size = 510798, upload-time = "2026-01-03T17:31:54.91Z" },
+ { url = "https://files.pythonhosted.org/packages/ea/76/8c1e5abbfe8e127c893fe7ead569148a4d5a799f7cf958d8c09f3eedf097/aiohttp-3.13.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:568f416a4072fbfae453dcf9a99194bbb8bdeab718e08ee13dfa2ba0e4bebf29", size = 1868835, upload-time = "2026-01-03T17:31:56.733Z" },
+ { url = "https://files.pythonhosted.org/packages/8e/ac/984c5a6f74c363b01ff97adc96a3976d9c98940b8969a1881575b279ac5d/aiohttp-3.13.3-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:add1da70de90a2569c5e15249ff76a631ccacfe198375eead4aadf3b8dc849dc", size = 1720486, upload-time = "2026-01-03T17:31:58.65Z" },
+ { url = "https://files.pythonhosted.org/packages/b2/9a/b7039c5f099c4eb632138728828b33428585031a1e658d693d41d07d89d1/aiohttp-3.13.3-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:10b47b7ba335d2e9b1239fa571131a87e2d8ec96b333e68b2a305e7a98b0bae2", size = 1847951, upload-time = "2026-01-03T17:32:00.989Z" },
+ { url = "https://files.pythonhosted.org/packages/3c/02/3bec2b9a1ba3c19ff89a43a19324202b8eb187ca1e928d8bdac9bbdddebd/aiohttp-3.13.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3dd4dce1c718e38081c8f35f323209d4c1df7d4db4bab1b5c88a6b4d12b74587", size = 1941001, upload-time = "2026-01-03T17:32:03.122Z" },
+ { url = "https://files.pythonhosted.org/packages/37/df/d879401cedeef27ac4717f6426c8c36c3091c6e9f08a9178cc87549c537f/aiohttp-3.13.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:34bac00a67a812570d4a460447e1e9e06fae622946955f939051e7cc895cfab8", size = 1797246, upload-time = "2026-01-03T17:32:05.255Z" },
+ { url = "https://files.pythonhosted.org/packages/8d/15/be122de1f67e6953add23335c8ece6d314ab67c8bebb3f181063010795a7/aiohttp-3.13.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a19884d2ee70b06d9204b2727a7b9f983d0c684c650254679e716b0b77920632", size = 1627131, upload-time = "2026-01-03T17:32:07.607Z" },
+ { url = "https://files.pythonhosted.org/packages/12/12/70eedcac9134cfa3219ab7af31ea56bc877395b1ac30d65b1bc4b27d0438/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5f8ca7f2bb6ba8348a3614c7918cc4bb73268c5ac2a207576b7afea19d3d9f64", size = 1795196, upload-time = "2026-01-03T17:32:09.59Z" },
+ { url = "https://files.pythonhosted.org/packages/32/11/b30e1b1cd1f3054af86ebe60df96989c6a414dd87e27ad16950eee420bea/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:b0d95340658b9d2f11d9697f59b3814a9d3bb4b7a7c20b131df4bcef464037c0", size = 1782841, upload-time = "2026-01-03T17:32:11.445Z" },
+ { url = "https://files.pythonhosted.org/packages/88/0d/d98a9367b38912384a17e287850f5695c528cff0f14f791ce8ee2e4f7796/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:a1e53262fd202e4b40b70c3aff944a8155059beedc8a89bba9dc1f9ef06a1b56", size = 1795193, upload-time = "2026-01-03T17:32:13.705Z" },
+ { url = "https://files.pythonhosted.org/packages/43/a5/a2dfd1f5ff5581632c7f6a30e1744deda03808974f94f6534241ef60c751/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:d60ac9663f44168038586cab2157e122e46bdef09e9368b37f2d82d354c23f72", size = 1621979, upload-time = "2026-01-03T17:32:15.965Z" },
+ { url = "https://files.pythonhosted.org/packages/fa/f0/12973c382ae7c1cccbc4417e129c5bf54c374dfb85af70893646e1f0e749/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:90751b8eed69435bac9ff4e3d2f6b3af1f57e37ecb0fbeee59c0174c9e2d41df", size = 1822193, upload-time = "2026-01-03T17:32:18.219Z" },
+ { url = "https://files.pythonhosted.org/packages/3c/5f/24155e30ba7f8c96918af1350eb0663e2430aad9e001c0489d89cd708ab1/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:fc353029f176fd2b3ec6cfc71be166aba1936fe5d73dd1992ce289ca6647a9aa", size = 1769801, upload-time = "2026-01-03T17:32:20.25Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/f8/7314031ff5c10e6ece114da79b338ec17eeff3a079e53151f7e9f43c4723/aiohttp-3.13.3-cp314-cp314t-win32.whl", hash = "sha256:2e41b18a58da1e474a057b3d35248d8320029f61d70a37629535b16a0c8f3767", size = 466523, upload-time = "2026-01-03T17:32:22.215Z" },
+ { url = "https://files.pythonhosted.org/packages/b4/63/278a98c715ae467624eafe375542d8ba9b4383a016df8fdefe0ae28382a7/aiohttp-3.13.3-cp314-cp314t-win_amd64.whl", hash = "sha256:44531a36aa2264a1860089ffd4dce7baf875ee5a6079d5fb42e261c704ef7344", size = 499694, upload-time = "2026-01-03T17:32:24.546Z" },
+ { url = "https://files.pythonhosted.org/packages/bf/79/446655656861d3e7e2c32bfcf160c7aa9e9dc63776a691b124dba65cdd77/aiohttp-3.13.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:31a83ea4aead760dfcb6962efb1d861db48c34379f2ff72db9ddddd4cda9ea2e", size = 741433, upload-time = "2026-01-03T17:32:26.453Z" },
+ { url = "https://files.pythonhosted.org/packages/cb/49/773c4b310b5140d2fb5e79bb0bf40b7b41dad80a288ca1a8759f5f72bda9/aiohttp-3.13.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:988a8c5e317544fdf0d39871559e67b6341065b87fceac641108c2096d5506b7", size = 497332, upload-time = "2026-01-03T17:32:28.37Z" },
+ { url = "https://files.pythonhosted.org/packages/bc/31/1dcbc4b83a4e6f76a0ad883f07f21ffbfe29750c89db97381701508c9f45/aiohttp-3.13.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9b174f267b5cfb9a7dba9ee6859cecd234e9a681841eb85068059bc867fb8f02", size = 492365, upload-time = "2026-01-03T17:32:30.234Z" },
+ { url = "https://files.pythonhosted.org/packages/5a/b5/b50657496c8754482cd7964e50aaf3aa84b3db61ed45daec4c1aec5b94b4/aiohttp-3.13.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:947c26539750deeaee933b000fb6517cc770bbd064bad6033f1cff4803881e43", size = 1660440, upload-time = "2026-01-03T17:32:32.586Z" },
+ { url = "https://files.pythonhosted.org/packages/2a/73/9b69e5139d89d75127569298931444ad78ea86a5befd5599780b1e9a6880/aiohttp-3.13.3-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9ebf57d09e131f5323464bd347135a88622d1c0976e88ce15b670e7ad57e4bd6", size = 1632740, upload-time = "2026-01-03T17:32:34.793Z" },
+ { url = "https://files.pythonhosted.org/packages/ef/fe/3ea9b5af694b4e3aec0d0613a806132ca744747146fca68e96bf056f61a7/aiohttp-3.13.3-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4ae5b5a0e1926e504c81c5b84353e7a5516d8778fbbff00429fe7b05bb25cbce", size = 1719782, upload-time = "2026-01-03T17:32:37.737Z" },
+ { url = "https://files.pythonhosted.org/packages/fb/c2/46b3b06e60851cbb71efb0f79a3267279cbef7b12c58e68a1e897f269cca/aiohttp-3.13.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2ba0eea45eb5cc3172dbfc497c066f19c41bac70963ea1a67d51fc92e4cf9a80", size = 1813527, upload-time = "2026-01-03T17:32:39.973Z" },
+ { url = "https://files.pythonhosted.org/packages/36/23/71ceb78c769ed65fe4c697692de232b63dab399210678d2b00961ccb0619/aiohttp-3.13.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bae5c2ed2eae26cc382020edad80d01f36cb8e746da40b292e68fec40421dc6a", size = 1661268, upload-time = "2026-01-03T17:32:42.082Z" },
+ { url = "https://files.pythonhosted.org/packages/c4/8d/86e929523d955e85ebab7c0e2b9e0cb63604cfc27dc3280e10d0063cf682/aiohttp-3.13.3-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:8a60e60746623925eab7d25823329941aee7242d559baa119ca2b253c88a7bd6", size = 1552742, upload-time = "2026-01-03T17:32:44.622Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/ea/3f5987cba1bab6bd151f0d97aa60f0ce04d3c83316692a6bb6ba2fb69f92/aiohttp-3.13.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e50a2e1404f063427c9d027378472316201a2290959a295169bcf25992d04558", size = 1632918, upload-time = "2026-01-03T17:32:46.749Z" },
+ { url = "https://files.pythonhosted.org/packages/be/2c/7e1e85121f2e31ee938cb83a8f32dfafd4908530c10fabd6d46761c12ac7/aiohttp-3.13.3-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:9a9dc347e5a3dc7dfdbc1f82da0ef29e388ddb2ed281bfce9dd8248a313e62b7", size = 1644446, upload-time = "2026-01-03T17:32:49.063Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/35/ce6133d423ad0e8ca976a7c848f7146bca3520eea4ccf6b95e2d077c9d20/aiohttp-3.13.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:b46020d11d23fe16551466c77823df9cc2f2c1e63cc965daf67fa5eec6ca1877", size = 1689487, upload-time = "2026-01-03T17:32:51.113Z" },
+ { url = "https://files.pythonhosted.org/packages/50/f7/ff7a27c15603d460fd1366b3c22054f7ae4fa9310aca40b43bde35867fcd/aiohttp-3.13.3-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:69c56fbc1993fa17043e24a546959c0178fe2b5782405ad4559e6c13975c15e3", size = 1540715, upload-time = "2026-01-03T17:32:53.38Z" },
+ { url = "https://files.pythonhosted.org/packages/17/02/053f11346e5b962e6d8a1c4f8c70c29d5970a1b4b8e7894c68e12c27a57f/aiohttp-3.13.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:b99281b0704c103d4e11e72a76f1b543d4946fea7dd10767e7e1b5f00d4e5704", size = 1711835, upload-time = "2026-01-03T17:32:56.088Z" },
+ { url = "https://files.pythonhosted.org/packages/fb/71/9b9761ddf276fd6708d13720197cbac19b8d67ecfa9116777924056cfcaa/aiohttp-3.13.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:40c5e40ecc29ba010656c18052b877a1c28f84344825efa106705e835c28530f", size = 1649593, upload-time = "2026-01-03T17:32:58.181Z" },
+ { url = "https://files.pythonhosted.org/packages/ae/72/5d817e9ea218acae12a5e3b9ad1178cf0c12fc3570c0b47eea2daf95f9ea/aiohttp-3.13.3-cp39-cp39-win32.whl", hash = "sha256:56339a36b9f1fc708260c76c87e593e2afb30d26de9ae1eb445b5e051b98a7a1", size = 434831, upload-time = "2026-01-03T17:33:00.577Z" },
+ { url = "https://files.pythonhosted.org/packages/39/cb/22659d9bf3149b7a2927bc2769cc9c8f8f5a80eba098398e03c199a43a85/aiohttp-3.13.3-cp39-cp39-win_amd64.whl", hash = "sha256:c6b8568a3bb5819a0ad087f16d40e5a3fb6099f39ea1d5625a3edc1e923fc538", size = 457697, upload-time = "2026-01-03T17:33:03.167Z" },
]
[[package]]
@@ -185,16 +185,16 @@ wheels = [
[[package]]
name = "anyio"
-version = "4.12.0"
+version = "4.12.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "exceptiongroup", marker = "python_full_version < '3.11' or (extra == 'group-12-dedalus-labs-pydantic-v1' and extra == 'group-12-dedalus-labs-pydantic-v2')" },
{ name = "idna" },
{ name = "typing-extensions", marker = "python_full_version < '3.13' or (extra == 'group-12-dedalus-labs-pydantic-v1' and extra == 'group-12-dedalus-labs-pydantic-v2')" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/16/ce/8a777047513153587e5434fd752e89334ac33e379aa3497db860eeb60377/anyio-4.12.0.tar.gz", hash = "sha256:73c693b567b0c55130c104d0b43a9baf3aa6a31fc6110116509f27bf75e21ec0", size = 228266, upload-time = "2025-11-28T23:37:38.911Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/96/f0/5eb65b2bb0d09ac6776f2eb54adee6abe8228ea05b20a5ad0e4945de8aac/anyio-4.12.1.tar.gz", hash = "sha256:41cfcc3a4c85d3f05c932da7c26d0201ac36f72abd4435ba90d0464a3ffed703", size = 228685, upload-time = "2026-01-06T11:45:21.246Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/7f/9c/36c5c37947ebfb8c7f22e0eb6e4d188ee2d53aa3880f3f2744fb894f0cb1/anyio-4.12.0-py3-none-any.whl", hash = "sha256:dad2376a628f98eeca4881fc56cd06affd18f659b17a747d3ff0307ced94b1bb", size = 113362, upload-time = "2025-11-28T23:36:57.897Z" },
+ { url = "https://files.pythonhosted.org/packages/38/0e/27be9fdef66e72d64c0cdc3cc2823101b80585f8119b5c112c2e8f5f7dab/anyio-4.12.1-py3-none-any.whl", hash = "sha256:d405828884fc140aa80a3c667b8beed277f1dfedec42ba031bd6ac3db606ab6c", size = 113592, upload-time = "2026-01-06T11:45:19.497Z" },
]
[[package]]
@@ -226,11 +226,11 @@ wheels = [
[[package]]
name = "certifi"
-version = "2025.11.12"
+version = "2026.1.4"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/a2/8c/58f469717fa48465e4a50c014a0400602d3c437d7c0c468e17ada824da3a/certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316", size = 160538, upload-time = "2025-11-12T02:54:51.517Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/e0/2d/a891ca51311197f6ad14a7ef42e2399f36cf2f9bd44752b3dc4eab60fdc5/certifi-2026.1.4.tar.gz", hash = "sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120", size = 154268, upload-time = "2026-01-04T02:42:41.825Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/70/7d/9bc192684cea499815ff478dfcdc13835ddf401365057044fb721ec6bddb/certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b", size = 159438, upload-time = "2025-11-12T02:54:49.735Z" },
+ { url = "https://files.pythonhosted.org/packages/e6/ad/3cc14f097111b4de0040c83a525973216457bbeeb63739ef1ed275c1c021/certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c", size = 152900, upload-time = "2026-01-04T02:42:40.15Z" },
]
[[package]]
@@ -238,7 +238,8 @@ name = "cffi"
version = "2.0.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
- { name = "pycparser", marker = "implementation_name != 'PyPy' or (extra == 'group-12-dedalus-labs-pydantic-v1' and extra == 'group-12-dedalus-labs-pydantic-v2')" },
+ { name = "pycparser", version = "2.23", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.10' and implementation_name != 'PyPy') or (python_full_version >= '3.10' and extra == 'group-12-dedalus-labs-pydantic-v1' and extra == 'group-12-dedalus-labs-pydantic-v2') or (implementation_name == 'PyPy' and extra == 'group-12-dedalus-labs-pydantic-v1' and extra == 'group-12-dedalus-labs-pydantic-v2')" },
+ { name = "pycparser", version = "3.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.10' and implementation_name != 'PyPy') or (python_full_version < '3.10' and extra == 'group-12-dedalus-labs-pydantic-v1' and extra == 'group-12-dedalus-labs-pydantic-v2') or (implementation_name == 'PyPy' and extra == 'group-12-dedalus-labs-pydantic-v1' and extra == 'group-12-dedalus-labs-pydantic-v2')" },
]
sdist = { url = "https://files.pythonhosted.org/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588, upload-time = "2025-09-08T23:24:04.541Z" }
wheels = [
@@ -338,67 +339,62 @@ wheels = [
[[package]]
name = "cryptography"
-version = "46.0.3"
+version = "46.0.4"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "cffi", marker = "platform_python_implementation != 'PyPy' or (extra == 'group-12-dedalus-labs-pydantic-v1' and extra == 'group-12-dedalus-labs-pydantic-v2')" },
{ name = "typing-extensions", marker = "python_full_version < '3.11' or (extra == 'group-12-dedalus-labs-pydantic-v1' and extra == 'group-12-dedalus-labs-pydantic-v2')" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/9f/33/c00162f49c0e2fe8064a62cb92b93e50c74a72bc370ab92f86112b33ff62/cryptography-46.0.3.tar.gz", hash = "sha256:a8b17438104fed022ce745b362294d9ce35b4c2e45c1d958ad4a4b019285f4a1", size = 749258, upload-time = "2025-10-15T23:18:31.74Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/78/19/f748958276519adf6a0c1e79e7b8860b4830dda55ccdf29f2719b5fc499c/cryptography-46.0.4.tar.gz", hash = "sha256:bfd019f60f8abc2ed1b9be4ddc21cfef059c841d86d710bb69909a688cbb8f59", size = 749301, upload-time = "2026-01-28T00:24:37.379Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/1d/42/9c391dd801d6cf0d561b5890549d4b27bafcc53b39c31a817e69d87c625b/cryptography-46.0.3-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:109d4ddfadf17e8e7779c39f9b18111a09efb969a301a31e987416a0191ed93a", size = 7225004, upload-time = "2025-10-15T23:16:52.239Z" },
- { url = "https://files.pythonhosted.org/packages/1c/67/38769ca6b65f07461eb200e85fc1639b438bdc667be02cf7f2cd6a64601c/cryptography-46.0.3-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:09859af8466b69bc3c27bdf4f5d84a665e0f7ab5088412e9e2ec49758eca5cbc", size = 4296667, upload-time = "2025-10-15T23:16:54.369Z" },
- { url = "https://files.pythonhosted.org/packages/5c/49/498c86566a1d80e978b42f0d702795f69887005548c041636df6ae1ca64c/cryptography-46.0.3-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:01ca9ff2885f3acc98c29f1860552e37f6d7c7d013d7334ff2a9de43a449315d", size = 4450807, upload-time = "2025-10-15T23:16:56.414Z" },
- { url = "https://files.pythonhosted.org/packages/4b/0a/863a3604112174c8624a2ac3c038662d9e59970c7f926acdcfaed8d61142/cryptography-46.0.3-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:6eae65d4c3d33da080cff9c4ab1f711b15c1d9760809dad6ea763f3812d254cb", size = 4299615, upload-time = "2025-10-15T23:16:58.442Z" },
- { url = "https://files.pythonhosted.org/packages/64/02/b73a533f6b64a69f3cd3872acb6ebc12aef924d8d103133bb3ea750dc703/cryptography-46.0.3-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e5bf0ed4490068a2e72ac03d786693adeb909981cc596425d09032d372bcc849", size = 4016800, upload-time = "2025-10-15T23:17:00.378Z" },
- { url = "https://files.pythonhosted.org/packages/25/d5/16e41afbfa450cde85a3b7ec599bebefaef16b5c6ba4ec49a3532336ed72/cryptography-46.0.3-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5ecfccd2329e37e9b7112a888e76d9feca2347f12f37918facbb893d7bb88ee8", size = 4984707, upload-time = "2025-10-15T23:17:01.98Z" },
- { url = "https://files.pythonhosted.org/packages/c9/56/e7e69b427c3878352c2fb9b450bd0e19ed552753491d39d7d0a2f5226d41/cryptography-46.0.3-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a2c0cd47381a3229c403062f764160d57d4d175e022c1df84e168c6251a22eec", size = 4482541, upload-time = "2025-10-15T23:17:04.078Z" },
- { url = "https://files.pythonhosted.org/packages/78/f6/50736d40d97e8483172f1bb6e698895b92a223dba513b0ca6f06b2365339/cryptography-46.0.3-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:549e234ff32571b1f4076ac269fcce7a808d3bf98b76c8dd560e42dbc66d7d91", size = 4299464, upload-time = "2025-10-15T23:17:05.483Z" },
- { url = "https://files.pythonhosted.org/packages/00/de/d8e26b1a855f19d9994a19c702fa2e93b0456beccbcfe437eda00e0701f2/cryptography-46.0.3-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:c0a7bb1a68a5d3471880e264621346c48665b3bf1c3759d682fc0864c540bd9e", size = 4950838, upload-time = "2025-10-15T23:17:07.425Z" },
- { url = "https://files.pythonhosted.org/packages/8f/29/798fc4ec461a1c9e9f735f2fc58741b0daae30688f41b2497dcbc9ed1355/cryptography-46.0.3-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:10b01676fc208c3e6feeb25a8b83d81767e8059e1fe86e1dc62d10a3018fa926", size = 4481596, upload-time = "2025-10-15T23:17:09.343Z" },
- { url = "https://files.pythonhosted.org/packages/15/8d/03cd48b20a573adfff7652b76271078e3045b9f49387920e7f1f631d125e/cryptography-46.0.3-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:0abf1ffd6e57c67e92af68330d05760b7b7efb243aab8377e583284dbab72c71", size = 4426782, upload-time = "2025-10-15T23:17:11.22Z" },
- { url = "https://files.pythonhosted.org/packages/fa/b1/ebacbfe53317d55cf33165bda24c86523497a6881f339f9aae5c2e13e57b/cryptography-46.0.3-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a04bee9ab6a4da801eb9b51f1b708a1b5b5c9eb48c03f74198464c66f0d344ac", size = 4698381, upload-time = "2025-10-15T23:17:12.829Z" },
- { url = "https://files.pythonhosted.org/packages/96/92/8a6a9525893325fc057a01f654d7efc2c64b9de90413adcf605a85744ff4/cryptography-46.0.3-cp311-abi3-win32.whl", hash = "sha256:f260d0d41e9b4da1ed1e0f1ce571f97fe370b152ab18778e9e8f67d6af432018", size = 3055988, upload-time = "2025-10-15T23:17:14.65Z" },
- { url = "https://files.pythonhosted.org/packages/7e/bf/80fbf45253ea585a1e492a6a17efcb93467701fa79e71550a430c5e60df0/cryptography-46.0.3-cp311-abi3-win_amd64.whl", hash = "sha256:a9a3008438615669153eb86b26b61e09993921ebdd75385ddd748702c5adfddb", size = 3514451, upload-time = "2025-10-15T23:17:16.142Z" },
- { url = "https://files.pythonhosted.org/packages/2e/af/9b302da4c87b0beb9db4e756386a7c6c5b8003cd0e742277888d352ae91d/cryptography-46.0.3-cp311-abi3-win_arm64.whl", hash = "sha256:5d7f93296ee28f68447397bf5198428c9aeeab45705a55d53a6343455dcb2c3c", size = 2928007, upload-time = "2025-10-15T23:17:18.04Z" },
- { url = "https://files.pythonhosted.org/packages/f5/e2/a510aa736755bffa9d2f75029c229111a1d02f8ecd5de03078f4c18d91a3/cryptography-46.0.3-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:00a5e7e87938e5ff9ff5447ab086a5706a957137e6e433841e9d24f38a065217", size = 7158012, upload-time = "2025-10-15T23:17:19.982Z" },
- { url = "https://files.pythonhosted.org/packages/73/dc/9aa866fbdbb95b02e7f9d086f1fccfeebf8953509b87e3f28fff927ff8a0/cryptography-46.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c8daeb2d2174beb4575b77482320303f3d39b8e81153da4f0fb08eb5fe86a6c5", size = 4288728, upload-time = "2025-10-15T23:17:21.527Z" },
- { url = "https://files.pythonhosted.org/packages/c5/fd/bc1daf8230eaa075184cbbf5f8cd00ba9db4fd32d63fb83da4671b72ed8a/cryptography-46.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:39b6755623145ad5eff1dab323f4eae2a32a77a7abef2c5089a04a3d04366715", size = 4435078, upload-time = "2025-10-15T23:17:23.042Z" },
- { url = "https://files.pythonhosted.org/packages/82/98/d3bd5407ce4c60017f8ff9e63ffee4200ab3e23fe05b765cab805a7db008/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:db391fa7c66df6762ee3f00c95a89e6d428f4d60e7abc8328f4fe155b5ac6e54", size = 4293460, upload-time = "2025-10-15T23:17:24.885Z" },
- { url = "https://files.pythonhosted.org/packages/26/e9/e23e7900983c2b8af7a08098db406cf989d7f09caea7897e347598d4cd5b/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:78a97cf6a8839a48c49271cdcbd5cf37ca2c1d6b7fdd86cc864f302b5e9bf459", size = 3995237, upload-time = "2025-10-15T23:17:26.449Z" },
- { url = "https://files.pythonhosted.org/packages/91/15/af68c509d4a138cfe299d0d7ddb14afba15233223ebd933b4bbdbc7155d3/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:dfb781ff7eaa91a6f7fd41776ec37c5853c795d3b358d4896fdbb5df168af422", size = 4967344, upload-time = "2025-10-15T23:17:28.06Z" },
- { url = "https://files.pythonhosted.org/packages/ca/e3/8643d077c53868b681af077edf6b3cb58288b5423610f21c62aadcbe99f4/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:6f61efb26e76c45c4a227835ddeae96d83624fb0d29eb5df5b96e14ed1a0afb7", size = 4466564, upload-time = "2025-10-15T23:17:29.665Z" },
- { url = "https://files.pythonhosted.org/packages/0e/43/c1e8726fa59c236ff477ff2b5dc071e54b21e5a1e51aa2cee1676f1c986f/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:23b1a8f26e43f47ceb6d6a43115f33a5a37d57df4ea0ca295b780ae8546e8044", size = 4292415, upload-time = "2025-10-15T23:17:31.686Z" },
- { url = "https://files.pythonhosted.org/packages/42/f9/2f8fefdb1aee8a8e3256a0568cffc4e6d517b256a2fe97a029b3f1b9fe7e/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:b419ae593c86b87014b9be7396b385491ad7f320bde96826d0dd174459e54665", size = 4931457, upload-time = "2025-10-15T23:17:33.478Z" },
- { url = "https://files.pythonhosted.org/packages/79/30/9b54127a9a778ccd6d27c3da7563e9f2d341826075ceab89ae3b41bf5be2/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:50fc3343ac490c6b08c0cf0d704e881d0d660be923fd3076db3e932007e726e3", size = 4466074, upload-time = "2025-10-15T23:17:35.158Z" },
- { url = "https://files.pythonhosted.org/packages/ac/68/b4f4a10928e26c941b1b6a179143af9f4d27d88fe84a6a3c53592d2e76bf/cryptography-46.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:22d7e97932f511d6b0b04f2bfd818d73dcd5928db509460aaf48384778eb6d20", size = 4420569, upload-time = "2025-10-15T23:17:37.188Z" },
- { url = "https://files.pythonhosted.org/packages/a3/49/3746dab4c0d1979888f125226357d3262a6dd40e114ac29e3d2abdf1ec55/cryptography-46.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d55f3dffadd674514ad19451161118fd010988540cee43d8bc20675e775925de", size = 4681941, upload-time = "2025-10-15T23:17:39.236Z" },
- { url = "https://files.pythonhosted.org/packages/fd/30/27654c1dbaf7e4a3531fa1fc77986d04aefa4d6d78259a62c9dc13d7ad36/cryptography-46.0.3-cp314-cp314t-win32.whl", hash = "sha256:8a6e050cb6164d3f830453754094c086ff2d0b2f3a897a1d9820f6139a1f0914", size = 3022339, upload-time = "2025-10-15T23:17:40.888Z" },
- { url = "https://files.pythonhosted.org/packages/f6/30/640f34ccd4d2a1bc88367b54b926b781b5a018d65f404d409aba76a84b1c/cryptography-46.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:760f83faa07f8b64e9c33fc963d790a2edb24efb479e3520c14a45741cd9b2db", size = 3494315, upload-time = "2025-10-15T23:17:42.769Z" },
- { url = "https://files.pythonhosted.org/packages/ba/8b/88cc7e3bd0a8e7b861f26981f7b820e1f46aa9d26cc482d0feba0ecb4919/cryptography-46.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:516ea134e703e9fe26bcd1277a4b59ad30586ea90c365a87781d7887a646fe21", size = 2919331, upload-time = "2025-10-15T23:17:44.468Z" },
- { url = "https://files.pythonhosted.org/packages/fd/23/45fe7f376a7df8daf6da3556603b36f53475a99ce4faacb6ba2cf3d82021/cryptography-46.0.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:cb3d760a6117f621261d662bccc8ef5bc32ca673e037c83fbe565324f5c46936", size = 7218248, upload-time = "2025-10-15T23:17:46.294Z" },
- { url = "https://files.pythonhosted.org/packages/27/32/b68d27471372737054cbd34c84981f9edbc24fe67ca225d389799614e27f/cryptography-46.0.3-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4b7387121ac7d15e550f5cb4a43aef2559ed759c35df7336c402bb8275ac9683", size = 4294089, upload-time = "2025-10-15T23:17:48.269Z" },
- { url = "https://files.pythonhosted.org/packages/26/42/fa8389d4478368743e24e61eea78846a0006caffaf72ea24a15159215a14/cryptography-46.0.3-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:15ab9b093e8f09daab0f2159bb7e47532596075139dd74365da52ecc9cb46c5d", size = 4440029, upload-time = "2025-10-15T23:17:49.837Z" },
- { url = "https://files.pythonhosted.org/packages/5f/eb/f483db0ec5ac040824f269e93dd2bd8a21ecd1027e77ad7bdf6914f2fd80/cryptography-46.0.3-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:46acf53b40ea38f9c6c229599a4a13f0d46a6c3fa9ef19fc1a124d62e338dfa0", size = 4297222, upload-time = "2025-10-15T23:17:51.357Z" },
- { url = "https://files.pythonhosted.org/packages/fd/cf/da9502c4e1912cb1da3807ea3618a6829bee8207456fbbeebc361ec38ba3/cryptography-46.0.3-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:10ca84c4668d066a9878890047f03546f3ae0a6b8b39b697457b7757aaf18dbc", size = 4012280, upload-time = "2025-10-15T23:17:52.964Z" },
- { url = "https://files.pythonhosted.org/packages/6b/8f/9adb86b93330e0df8b3dcf03eae67c33ba89958fc2e03862ef1ac2b42465/cryptography-46.0.3-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:36e627112085bb3b81b19fed209c05ce2a52ee8b15d161b7c643a7d5a88491f3", size = 4978958, upload-time = "2025-10-15T23:17:54.965Z" },
- { url = "https://files.pythonhosted.org/packages/d1/a0/5fa77988289c34bdb9f913f5606ecc9ada1adb5ae870bd0d1054a7021cc4/cryptography-46.0.3-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:1000713389b75c449a6e979ffc7dcc8ac90b437048766cef052d4d30b8220971", size = 4473714, upload-time = "2025-10-15T23:17:56.754Z" },
- { url = "https://files.pythonhosted.org/packages/14/e5/fc82d72a58d41c393697aa18c9abe5ae1214ff6f2a5c18ac470f92777895/cryptography-46.0.3-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:b02cf04496f6576afffef5ddd04a0cb7d49cf6be16a9059d793a30b035f6b6ac", size = 4296970, upload-time = "2025-10-15T23:17:58.588Z" },
- { url = "https://files.pythonhosted.org/packages/78/06/5663ed35438d0b09056973994f1aec467492b33bd31da36e468b01ec1097/cryptography-46.0.3-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:71e842ec9bc7abf543b47cf86b9a743baa95f4677d22baa4c7d5c69e49e9bc04", size = 4940236, upload-time = "2025-10-15T23:18:00.897Z" },
- { url = "https://files.pythonhosted.org/packages/fc/59/873633f3f2dcd8a053b8dd1d38f783043b5fce589c0f6988bf55ef57e43e/cryptography-46.0.3-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:402b58fc32614f00980b66d6e56a5b4118e6cb362ae8f3fda141ba4689bd4506", size = 4472642, upload-time = "2025-10-15T23:18:02.749Z" },
- { url = "https://files.pythonhosted.org/packages/3d/39/8e71f3930e40f6877737d6f69248cf74d4e34b886a3967d32f919cc50d3b/cryptography-46.0.3-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ef639cb3372f69ec44915fafcd6698b6cc78fbe0c2ea41be867f6ed612811963", size = 4423126, upload-time = "2025-10-15T23:18:04.85Z" },
- { url = "https://files.pythonhosted.org/packages/cd/c7/f65027c2810e14c3e7268353b1681932b87e5a48e65505d8cc17c99e36ae/cryptography-46.0.3-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3b51b8ca4f1c6453d8829e1eb7299499ca7f313900dd4d89a24b8b87c0a780d4", size = 4686573, upload-time = "2025-10-15T23:18:06.908Z" },
- { url = "https://files.pythonhosted.org/packages/0a/6e/1c8331ddf91ca4730ab3086a0f1be19c65510a33b5a441cb334e7a2d2560/cryptography-46.0.3-cp38-abi3-win32.whl", hash = "sha256:6276eb85ef938dc035d59b87c8a7dc559a232f954962520137529d77b18ff1df", size = 3036695, upload-time = "2025-10-15T23:18:08.672Z" },
- { url = "https://files.pythonhosted.org/packages/90/45/b0d691df20633eff80955a0fc7695ff9051ffce8b69741444bd9ed7bd0db/cryptography-46.0.3-cp38-abi3-win_amd64.whl", hash = "sha256:416260257577718c05135c55958b674000baef9a1c7d9e8f306ec60d71db850f", size = 3501720, upload-time = "2025-10-15T23:18:10.632Z" },
- { url = "https://files.pythonhosted.org/packages/e8/cb/2da4cc83f5edb9c3257d09e1e7ab7b23f049c7962cae8d842bbef0a9cec9/cryptography-46.0.3-cp38-abi3-win_arm64.whl", hash = "sha256:d89c3468de4cdc4f08a57e214384d0471911a3830fcdaf7a8cc587e42a866372", size = 2918740, upload-time = "2025-10-15T23:18:12.277Z" },
- { url = "https://files.pythonhosted.org/packages/d9/cd/1a8633802d766a0fa46f382a77e096d7e209e0817892929655fe0586ae32/cryptography-46.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a23582810fedb8c0bc47524558fb6c56aac3fc252cb306072fd2815da2a47c32", size = 3689163, upload-time = "2025-10-15T23:18:13.821Z" },
- { url = "https://files.pythonhosted.org/packages/4c/59/6b26512964ace6480c3e54681a9859c974172fb141c38df11eadd8416947/cryptography-46.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e7aec276d68421f9574040c26e2a7c3771060bc0cff408bae1dcb19d3ab1e63c", size = 3429474, upload-time = "2025-10-15T23:18:15.477Z" },
- { url = "https://files.pythonhosted.org/packages/06/8a/e60e46adab4362a682cf142c7dcb5bf79b782ab2199b0dcb81f55970807f/cryptography-46.0.3-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7ce938a99998ed3c8aa7e7272dca1a610401ede816d36d0693907d863b10d9ea", size = 3698132, upload-time = "2025-10-15T23:18:17.056Z" },
- { url = "https://files.pythonhosted.org/packages/da/38/f59940ec4ee91e93d3311f7532671a5cef5570eb04a144bf203b58552d11/cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:191bb60a7be5e6f54e30ba16fdfae78ad3a342a0599eb4193ba88e3f3d6e185b", size = 4243992, upload-time = "2025-10-15T23:18:18.695Z" },
- { url = "https://files.pythonhosted.org/packages/b0/0c/35b3d92ddebfdfda76bb485738306545817253d0a3ded0bfe80ef8e67aa5/cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c70cc23f12726be8f8bc72e41d5065d77e4515efae3690326764ea1b07845cfb", size = 4409944, upload-time = "2025-10-15T23:18:20.597Z" },
- { url = "https://files.pythonhosted.org/packages/99/55/181022996c4063fc0e7666a47049a1ca705abb9c8a13830f074edb347495/cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:9394673a9f4de09e28b5356e7fff97d778f8abad85c9d5ac4a4b7e25a0de7717", size = 4242957, upload-time = "2025-10-15T23:18:22.18Z" },
- { url = "https://files.pythonhosted.org/packages/ba/af/72cd6ef29f9c5f731251acadaeb821559fe25f10852f44a63374c9ca08c1/cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:94cd0549accc38d1494e1f8de71eca837d0509d0d44bf11d158524b0e12cebf9", size = 4409447, upload-time = "2025-10-15T23:18:24.209Z" },
- { url = "https://files.pythonhosted.org/packages/0d/c3/e90f4a4feae6410f914f8ebac129b9ae7a8c92eb60a638012dde42030a9d/cryptography-46.0.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:6b5063083824e5509fdba180721d55909ffacccc8adbec85268b48439423d78c", size = 3438528, upload-time = "2025-10-15T23:18:26.227Z" },
+ { url = "https://files.pythonhosted.org/packages/8d/99/157aae7949a5f30d51fcb1a9851e8ebd5c74bf99b5285d8bb4b8b9ee641e/cryptography-46.0.4-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:281526e865ed4166009e235afadf3a4c4cba6056f99336a99efba65336fd5485", size = 7173686, upload-time = "2026-01-28T00:23:07.515Z" },
+ { url = "https://files.pythonhosted.org/packages/87/91/874b8910903159043b5c6a123b7e79c4559ddd1896e38967567942635778/cryptography-46.0.4-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5f14fba5bf6f4390d7ff8f086c566454bff0411f6d8aa7af79c88b6f9267aecc", size = 4275871, upload-time = "2026-01-28T00:23:09.439Z" },
+ { url = "https://files.pythonhosted.org/packages/c0/35/690e809be77896111f5b195ede56e4b4ed0435b428c2f2b6d35046fbb5e8/cryptography-46.0.4-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:47bcd19517e6389132f76e2d5303ded6cf3f78903da2158a671be8de024f4cd0", size = 4423124, upload-time = "2026-01-28T00:23:11.529Z" },
+ { url = "https://files.pythonhosted.org/packages/1a/5b/a26407d4f79d61ca4bebaa9213feafdd8806dc69d3d290ce24996d3cfe43/cryptography-46.0.4-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:01df4f50f314fbe7009f54046e908d1754f19d0c6d3070df1e6268c5a4af09fa", size = 4277090, upload-time = "2026-01-28T00:23:13.123Z" },
+ { url = "https://files.pythonhosted.org/packages/0c/d8/4bb7aec442a9049827aa34cee1aa83803e528fa55da9a9d45d01d1bb933e/cryptography-46.0.4-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5aa3e463596b0087b3da0dbe2b2487e9fc261d25da85754e30e3b40637d61f81", size = 4947652, upload-time = "2026-01-28T00:23:14.554Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/08/f83e2e0814248b844265802d081f2fac2f1cbe6cd258e72ba14ff006823a/cryptography-46.0.4-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:0a9ad24359fee86f131836a9ac3bffc9329e956624a2d379b613f8f8abaf5255", size = 4455157, upload-time = "2026-01-28T00:23:16.443Z" },
+ { url = "https://files.pythonhosted.org/packages/0a/05/19d849cf4096448779d2dcc9bb27d097457dac36f7273ffa875a93b5884c/cryptography-46.0.4-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:dc1272e25ef673efe72f2096e92ae39dea1a1a450dd44918b15351f72c5a168e", size = 3981078, upload-time = "2026-01-28T00:23:17.838Z" },
+ { url = "https://files.pythonhosted.org/packages/e6/89/f7bac81d66ba7cde867a743ea5b37537b32b5c633c473002b26a226f703f/cryptography-46.0.4-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:de0f5f4ec8711ebc555f54735d4c673fc34b65c44283895f1a08c2b49d2fd99c", size = 4276213, upload-time = "2026-01-28T00:23:19.257Z" },
+ { url = "https://files.pythonhosted.org/packages/da/9f/7133e41f24edd827020ad21b068736e792bc68eecf66d93c924ad4719fb3/cryptography-46.0.4-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:eeeb2e33d8dbcccc34d64651f00a98cb41b2dc69cef866771a5717e6734dfa32", size = 4912190, upload-time = "2026-01-28T00:23:21.244Z" },
+ { url = "https://files.pythonhosted.org/packages/a6/f7/6d43cbaddf6f65b24816e4af187d211f0bc536a29961f69faedc48501d8e/cryptography-46.0.4-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:3d425eacbc9aceafd2cb429e42f4e5d5633c6f873f5e567077043ef1b9bbf616", size = 4454641, upload-time = "2026-01-28T00:23:22.866Z" },
+ { url = "https://files.pythonhosted.org/packages/9e/4f/ebd0473ad656a0ac912a16bd07db0f5d85184924e14fc88feecae2492834/cryptography-46.0.4-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:91627ebf691d1ea3976a031b61fb7bac1ccd745afa03602275dda443e11c8de0", size = 4405159, upload-time = "2026-01-28T00:23:25.278Z" },
+ { url = "https://files.pythonhosted.org/packages/d1/f7/7923886f32dc47e27adeff8246e976d77258fd2aa3efdd1754e4e323bf49/cryptography-46.0.4-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2d08bc22efd73e8854b0b7caff402d735b354862f1145d7be3b9c0f740fef6a0", size = 4666059, upload-time = "2026-01-28T00:23:26.766Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/a7/0fca0fd3591dffc297278a61813d7f661a14243dd60f499a7a5b48acb52a/cryptography-46.0.4-cp311-abi3-win32.whl", hash = "sha256:82a62483daf20b8134f6e92898da70d04d0ef9a75829d732ea1018678185f4f5", size = 3026378, upload-time = "2026-01-28T00:23:28.317Z" },
+ { url = "https://files.pythonhosted.org/packages/2d/12/652c84b6f9873f0909374864a57b003686c642ea48c84d6c7e2c515e6da5/cryptography-46.0.4-cp311-abi3-win_amd64.whl", hash = "sha256:6225d3ebe26a55dbc8ead5ad1265c0403552a63336499564675b29eb3184c09b", size = 3478614, upload-time = "2026-01-28T00:23:30.275Z" },
+ { url = "https://files.pythonhosted.org/packages/b9/27/542b029f293a5cce59349d799d4d8484b3b1654a7b9a0585c266e974a488/cryptography-46.0.4-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:485e2b65d25ec0d901bca7bcae0f53b00133bf3173916d8e421f6fddde103908", size = 7116417, upload-time = "2026-01-28T00:23:31.958Z" },
+ { url = "https://files.pythonhosted.org/packages/f8/f5/559c25b77f40b6bf828eabaf988efb8b0e17b573545edb503368ca0a2a03/cryptography-46.0.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:078e5f06bd2fa5aea5a324f2a09f914b1484f1d0c2a4d6a8a28c74e72f65f2da", size = 4264508, upload-time = "2026-01-28T00:23:34.264Z" },
+ { url = "https://files.pythonhosted.org/packages/49/a1/551fa162d33074b660dc35c9bc3616fefa21a0e8c1edd27b92559902e408/cryptography-46.0.4-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:dce1e4f068f03008da7fa51cc7abc6ddc5e5de3e3d1550334eaf8393982a5829", size = 4409080, upload-time = "2026-01-28T00:23:35.793Z" },
+ { url = "https://files.pythonhosted.org/packages/b0/6a/4d8d129a755f5d6df1bbee69ea2f35ebfa954fa1847690d1db2e8bca46a5/cryptography-46.0.4-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:2067461c80271f422ee7bdbe79b9b4be54a5162e90345f86a23445a0cf3fd8a2", size = 4270039, upload-time = "2026-01-28T00:23:37.263Z" },
+ { url = "https://files.pythonhosted.org/packages/4c/f5/ed3fcddd0a5e39321e595e144615399e47e7c153a1fb8c4862aec3151ff9/cryptography-46.0.4-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:c92010b58a51196a5f41c3795190203ac52edfd5dc3ff99149b4659eba9d2085", size = 4926748, upload-time = "2026-01-28T00:23:38.884Z" },
+ { url = "https://files.pythonhosted.org/packages/43/ae/9f03d5f0c0c00e85ecb34f06d3b79599f20630e4db91b8a6e56e8f83d410/cryptography-46.0.4-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:829c2b12bbc5428ab02d6b7f7e9bbfd53e33efd6672d21341f2177470171ad8b", size = 4442307, upload-time = "2026-01-28T00:23:40.56Z" },
+ { url = "https://files.pythonhosted.org/packages/8b/22/e0f9f2dae8040695103369cf2283ef9ac8abe4d51f68710bec2afd232609/cryptography-46.0.4-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:62217ba44bf81b30abaeda1488686a04a702a261e26f87db51ff61d9d3510abd", size = 3959253, upload-time = "2026-01-28T00:23:42.827Z" },
+ { url = "https://files.pythonhosted.org/packages/01/5b/6a43fcccc51dae4d101ac7d378a8724d1ba3de628a24e11bf2f4f43cba4d/cryptography-46.0.4-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:9c2da296c8d3415b93e6053f5a728649a87a48ce084a9aaf51d6e46c87c7f2d2", size = 4269372, upload-time = "2026-01-28T00:23:44.655Z" },
+ { url = "https://files.pythonhosted.org/packages/17/b7/0f6b8c1dd0779df2b526e78978ff00462355e31c0a6f6cff8a3e99889c90/cryptography-46.0.4-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:9b34d8ba84454641a6bf4d6762d15847ecbd85c1316c0a7984e6e4e9f748ec2e", size = 4891908, upload-time = "2026-01-28T00:23:46.48Z" },
+ { url = "https://files.pythonhosted.org/packages/83/17/259409b8349aa10535358807a472c6a695cf84f106022268d31cea2b6c97/cryptography-46.0.4-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:df4a817fa7138dd0c96c8c8c20f04b8aaa1fac3bbf610913dcad8ea82e1bfd3f", size = 4441254, upload-time = "2026-01-28T00:23:48.403Z" },
+ { url = "https://files.pythonhosted.org/packages/9c/fe/e4a1b0c989b00cee5ffa0764401767e2d1cf59f45530963b894129fd5dce/cryptography-46.0.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:b1de0ebf7587f28f9190b9cb526e901bf448c9e6a99655d2b07fff60e8212a82", size = 4396520, upload-time = "2026-01-28T00:23:50.26Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/81/ba8fd9657d27076eb40d6a2f941b23429a3c3d2f56f5a921d6b936a27bc9/cryptography-46.0.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:9b4d17bc7bd7cdd98e3af40b441feaea4c68225e2eb2341026c84511ad246c0c", size = 4651479, upload-time = "2026-01-28T00:23:51.674Z" },
+ { url = "https://files.pythonhosted.org/packages/00/03/0de4ed43c71c31e4fe954edd50b9d28d658fef56555eba7641696370a8e2/cryptography-46.0.4-cp314-cp314t-win32.whl", hash = "sha256:c411f16275b0dea722d76544a61d6421e2cc829ad76eec79280dbdc9ddf50061", size = 3001986, upload-time = "2026-01-28T00:23:53.485Z" },
+ { url = "https://files.pythonhosted.org/packages/5c/70/81830b59df7682917d7a10f833c4dab2a5574cd664e86d18139f2b421329/cryptography-46.0.4-cp314-cp314t-win_amd64.whl", hash = "sha256:728fedc529efc1439eb6107b677f7f7558adab4553ef8669f0d02d42d7b959a7", size = 3468288, upload-time = "2026-01-28T00:23:55.09Z" },
+ { url = "https://files.pythonhosted.org/packages/56/f7/f648fdbb61d0d45902d3f374217451385edc7e7768d1b03ff1d0e5ffc17b/cryptography-46.0.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a9556ba711f7c23f77b151d5798f3ac44a13455cc68db7697a1096e6d0563cab", size = 7169583, upload-time = "2026-01-28T00:23:56.558Z" },
+ { url = "https://files.pythonhosted.org/packages/d8/cc/8f3224cbb2a928de7298d6ed4790f5ebc48114e02bdc9559196bfb12435d/cryptography-46.0.4-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8bf75b0259e87fa70bddc0b8b4078b76e7fd512fd9afae6c1193bcf440a4dbef", size = 4275419, upload-time = "2026-01-28T00:23:58.364Z" },
+ { url = "https://files.pythonhosted.org/packages/17/43/4a18faa7a872d00e4264855134ba82d23546c850a70ff209e04ee200e76f/cryptography-46.0.4-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3c268a3490df22270955966ba236d6bc4a8f9b6e4ffddb78aac535f1a5ea471d", size = 4419058, upload-time = "2026-01-28T00:23:59.867Z" },
+ { url = "https://files.pythonhosted.org/packages/ee/64/6651969409821d791ba12346a124f55e1b76f66a819254ae840a965d4b9c/cryptography-46.0.4-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:812815182f6a0c1d49a37893a303b44eaac827d7f0d582cecfc81b6427f22973", size = 4278151, upload-time = "2026-01-28T00:24:01.731Z" },
+ { url = "https://files.pythonhosted.org/packages/20/0b/a7fce65ee08c3c02f7a8310cc090a732344066b990ac63a9dfd0a655d321/cryptography-46.0.4-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:a90e43e3ef65e6dcf969dfe3bb40cbf5aef0d523dff95bfa24256be172a845f4", size = 4939441, upload-time = "2026-01-28T00:24:03.175Z" },
+ { url = "https://files.pythonhosted.org/packages/db/a7/20c5701e2cd3e1dfd7a19d2290c522a5f435dd30957d431dcb531d0f1413/cryptography-46.0.4-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a05177ff6296644ef2876fce50518dffb5bcdf903c85250974fc8bc85d54c0af", size = 4451617, upload-time = "2026-01-28T00:24:05.403Z" },
+ { url = "https://files.pythonhosted.org/packages/00/dc/3e16030ea9aa47b63af6524c354933b4fb0e352257c792c4deeb0edae367/cryptography-46.0.4-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:daa392191f626d50f1b136c9b4cf08af69ca8279d110ea24f5c2700054d2e263", size = 3977774, upload-time = "2026-01-28T00:24:06.851Z" },
+ { url = "https://files.pythonhosted.org/packages/42/c8/ad93f14118252717b465880368721c963975ac4b941b7ef88f3c56bf2897/cryptography-46.0.4-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e07ea39c5b048e085f15923511d8121e4a9dc45cee4e3b970ca4f0d338f23095", size = 4277008, upload-time = "2026-01-28T00:24:08.926Z" },
+ { url = "https://files.pythonhosted.org/packages/00/cf/89c99698151c00a4631fbfcfcf459d308213ac29e321b0ff44ceeeac82f1/cryptography-46.0.4-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:d5a45ddc256f492ce42a4e35879c5e5528c09cd9ad12420828c972951d8e016b", size = 4903339, upload-time = "2026-01-28T00:24:12.009Z" },
+ { url = "https://files.pythonhosted.org/packages/03/c3/c90a2cb358de4ac9309b26acf49b2a100957e1ff5cc1e98e6c4996576710/cryptography-46.0.4-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:6bb5157bf6a350e5b28aee23beb2d84ae6f5be390b2f8ee7ea179cda077e1019", size = 4451216, upload-time = "2026-01-28T00:24:13.975Z" },
+ { url = "https://files.pythonhosted.org/packages/96/2c/8d7f4171388a10208671e181ca43cdc0e596d8259ebacbbcfbd16de593da/cryptography-46.0.4-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:dd5aba870a2c40f87a3af043e0dee7d9eb02d4aff88a797b48f2b43eff8c3ab4", size = 4404299, upload-time = "2026-01-28T00:24:16.169Z" },
+ { url = "https://files.pythonhosted.org/packages/e9/23/cbb2036e450980f65c6e0a173b73a56ff3bccd8998965dea5cc9ddd424a5/cryptography-46.0.4-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:93d8291da8d71024379ab2cb0b5c57915300155ad42e07f76bea6ad838d7e59b", size = 4664837, upload-time = "2026-01-28T00:24:17.629Z" },
+ { url = "https://files.pythonhosted.org/packages/0a/21/f7433d18fe6d5845329cbdc597e30caf983229c7a245bcf54afecc555938/cryptography-46.0.4-cp38-abi3-win32.whl", hash = "sha256:0563655cb3c6d05fb2afe693340bc050c30f9f34e15763361cf08e94749401fc", size = 3009779, upload-time = "2026-01-28T00:24:20.198Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/6a/bd2e7caa2facffedf172a45c1a02e551e6d7d4828658c9a245516a598d94/cryptography-46.0.4-cp38-abi3-win_amd64.whl", hash = "sha256:fa0900b9ef9c49728887d1576fd8d9e7e3ea872fa9b25ef9b64888adc434e976", size = 3466633, upload-time = "2026-01-28T00:24:21.851Z" },
+ { url = "https://files.pythonhosted.org/packages/59/e0/f9c6c53e1f2a1c2507f00f2faba00f01d2f334b35b0fbfe5286715da2184/cryptography-46.0.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:766330cce7416c92b5e90c3bb71b1b79521760cdcfc3a6a1a182d4c9fab23d2b", size = 3476316, upload-time = "2026-01-28T00:24:24.144Z" },
+ { url = "https://files.pythonhosted.org/packages/27/7a/f8d2d13227a9a1a9fe9c7442b057efecffa41f1e3c51d8622f26b9edbe8f/cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c236a44acfb610e70f6b3e1c3ca20ff24459659231ef2f8c48e879e2d32b73da", size = 4216693, upload-time = "2026-01-28T00:24:25.758Z" },
+ { url = "https://files.pythonhosted.org/packages/c5/de/3787054e8f7972658370198753835d9d680f6cd4a39df9f877b57f0dd69c/cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8a15fb869670efa8f83cbffbc8753c1abf236883225aed74cd179b720ac9ec80", size = 4382765, upload-time = "2026-01-28T00:24:27.577Z" },
+ { url = "https://files.pythonhosted.org/packages/8a/5f/60e0afb019973ba6a0b322e86b3d61edf487a4f5597618a430a2a15f2d22/cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:fdc3daab53b212472f1524d070735b2f0c214239df131903bae1d598016fa822", size = 4216066, upload-time = "2026-01-28T00:24:29.056Z" },
+ { url = "https://files.pythonhosted.org/packages/81/8e/bf4a0de294f147fee66f879d9bae6f8e8d61515558e3d12785dd90eca0be/cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:44cc0675b27cadb71bdbb96099cca1fa051cd11d2ade09e5cd3a2edb929ed947", size = 4382025, upload-time = "2026-01-28T00:24:30.681Z" },
+ { url = "https://files.pythonhosted.org/packages/79/f4/9ceb90cfd6a3847069b0b0b353fd3075dc69b49defc70182d8af0c4ca390/cryptography-46.0.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:be8c01a7d5a55f9a47d1888162b76c8f49d62b234d88f0ff91a9fbebe32ffbc3", size = 3406043, upload-time = "2026-01-28T00:24:32.236Z" },
]
[[package]]
@@ -410,7 +406,7 @@ dependencies = [
{ name = "distro" },
{ name = "httpx" },
{ name = "jiter" },
- { name = "pydantic", version = "1.10.24", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'group-12-dedalus-labs-pydantic-v1'" },
+ { name = "pydantic", version = "1.10.26", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'group-12-dedalus-labs-pydantic-v1'" },
{ name = "pydantic", version = "2.12.5", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'group-12-dedalus-labs-pydantic-v2' or extra != 'group-12-dedalus-labs-pydantic-v1'" },
{ name = "sniffio" },
{ name = "typing-extensions" },
@@ -440,10 +436,10 @@ dev = [
{ name = "rich" },
{ name = "ruff" },
{ name = "time-machine", version = "2.19.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10' or (extra == 'group-12-dedalus-labs-pydantic-v1' and extra == 'group-12-dedalus-labs-pydantic-v2')" },
- { name = "time-machine", version = "3.1.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10' or (extra == 'group-12-dedalus-labs-pydantic-v1' and extra == 'group-12-dedalus-labs-pydantic-v2')" },
+ { name = "time-machine", version = "3.2.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10' or (extra == 'group-12-dedalus-labs-pydantic-v1' and extra == 'group-12-dedalus-labs-pydantic-v2')" },
]
pydantic-v1 = [
- { name = "pydantic", version = "1.10.24", source = { registry = "https://pypi.org/simple" } },
+ { name = "pydantic", version = "1.10.26", source = { registry = "https://pypi.org/simple" } },
]
pydantic-v2 = [
{ name = "pydantic", version = "2.12.5", source = { registry = "https://pypi.org/simple" } },
@@ -699,15 +695,15 @@ wheels = [
[[package]]
name = "httpx-aiohttp"
-version = "0.1.9"
+version = "0.1.12"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "aiohttp" },
{ name = "httpx" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/d8/f2/9a86ce9bc48cf57dabb3a3160dfed26d8bbe5a2478a51f9d1dbf89f2f1fc/httpx_aiohttp-0.1.9.tar.gz", hash = "sha256:4ee8b22e6f2e7c80cd03be29eff98bfe7d89bd77f021ce0b578ee76b73b4bfe6", size = 206023, upload-time = "2025-10-15T08:52:57.475Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/63/2c/b894861cecf030fb45675ea24aa55b5722e97c602a163d872fca66c5a6d8/httpx_aiohttp-0.1.12.tar.gz", hash = "sha256:81feec51fd82c0ecfa0e9aaf1b1a6c2591260d5e2bcbeb7eb0277a78e610df2c", size = 275945, upload-time = "2025-12-12T10:12:15.283Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/a1/db/5cfa8254a86c34a1ab7fe0dbec9f81bb5ebd831cbdd65aa4be4f37027804/httpx_aiohttp-0.1.9-py3-none-any.whl", hash = "sha256:3dc2845568b07742588710fcf3d72db2cbcdf2acc93376edf85f789c4d8e5fda", size = 6180, upload-time = "2025-10-15T08:52:56.521Z" },
+ { url = "https://files.pythonhosted.org/packages/16/8d/85c9701e9af72ca132a1783e2a54364a90c6da832304416a30fc11196ab2/httpx_aiohttp-0.1.12-py3-none-any.whl", hash = "sha256:5b0eac39a7f360fa7867a60bcb46bb1024eada9c01cbfecdb54dc1edb3fb7141", size = 6367, upload-time = "2025-12-12T10:12:14.018Z" },
]
[[package]]
@@ -721,14 +717,14 @@ wheels = [
[[package]]
name = "importlib-metadata"
-version = "8.7.0"
+version = "8.7.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "zipp" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/76/66/650a33bd90f786193e4de4b3ad86ea60b53c89b669a5c7be931fac31cdb0/importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000", size = 56641, upload-time = "2025-04-27T15:29:01.736Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/f3/49/3b30cad09e7771a4982d9975a8cbf64f00d4a1ececb53297f1d9a7be1b10/importlib_metadata-8.7.1.tar.gz", hash = "sha256:49fef1ae6440c182052f407c8d34a68f72efc36db9ca90dc0113398f2fdde8bb", size = 57107, upload-time = "2025-12-21T10:00:19.278Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/20/b0/36bd937216ec521246249be3bf9855081de4c5e06a0c9b4219dbeda50373/importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd", size = 27656, upload-time = "2025-04-27T15:29:00.214Z" },
+ { url = "https://files.pythonhosted.org/packages/fa/5e/f8e9a1d23b9c20a551a8a02ea3637b4642e22c2626e3a13a9a29cdea99eb/importlib_metadata-8.7.1-py3-none-any.whl", hash = "sha256:5a1f80bf1daa489495071efbb095d75a634cf28a8bc299581244063b53176151", size = 27865, upload-time = "2025-12-21T10:00:18.329Z" },
]
[[package]]
@@ -760,111 +756,111 @@ wheels = [
[[package]]
name = "jiter"
-version = "0.12.0"
+version = "0.13.0"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/45/9d/e0660989c1370e25848bb4c52d061c71837239738ad937e83edca174c273/jiter-0.12.0.tar.gz", hash = "sha256:64dfcd7d5c168b38d3f9f8bba7fc639edb3418abcc74f22fdbe6b8938293f30b", size = 168294, upload-time = "2025-11-09T20:49:23.302Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/0d/5e/4ec91646aee381d01cdb9974e30882c9cd3b8c5d1079d6b5ff4af522439a/jiter-0.13.0.tar.gz", hash = "sha256:f2839f9c2c7e2dffc1bc5929a510e14ce0a946be9365fd1219e7ef342dae14f4", size = 164847, upload-time = "2026-02-02T12:37:56.441Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/3b/91/13cb9505f7be74a933f37da3af22e029f6ba64f5669416cb8b2774bc9682/jiter-0.12.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:e7acbaba9703d5de82a2c98ae6a0f59ab9770ab5af5fa35e43a303aee962cf65", size = 316652, upload-time = "2025-11-09T20:46:41.021Z" },
- { url = "https://files.pythonhosted.org/packages/4e/76/4e9185e5d9bb4e482cf6dec6410d5f78dfeb374cfcecbbe9888d07c52daa/jiter-0.12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:364f1a7294c91281260364222f535bc427f56d4de1d8ffd718162d21fbbd602e", size = 319829, upload-time = "2025-11-09T20:46:43.281Z" },
- { url = "https://files.pythonhosted.org/packages/86/af/727de50995d3a153138139f259baae2379d8cb0522c0c00419957bc478a6/jiter-0.12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85ee4d25805d4fb23f0a5167a962ef8e002dbfb29c0989378488e32cf2744b62", size = 350568, upload-time = "2025-11-09T20:46:45.075Z" },
- { url = "https://files.pythonhosted.org/packages/6a/c1/d6e9f4b7a3d5ac63bcbdfddeb50b2dcfbdc512c86cffc008584fdc350233/jiter-0.12.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:796f466b7942107eb889c08433b6e31b9a7ed31daceaecf8af1be26fb26c0ca8", size = 369052, upload-time = "2025-11-09T20:46:46.818Z" },
- { url = "https://files.pythonhosted.org/packages/eb/be/00824cd530f30ed73fa8a4f9f3890a705519e31ccb9e929f1e22062e7c76/jiter-0.12.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:35506cb71f47dba416694e67af996bbdefb8e3608f1f78799c2e1f9058b01ceb", size = 481585, upload-time = "2025-11-09T20:46:48.319Z" },
- { url = "https://files.pythonhosted.org/packages/74/b6/2ad7990dff9504d4b5052eef64aa9574bd03d722dc7edced97aad0d47be7/jiter-0.12.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:726c764a90c9218ec9e4f99a33d6bf5ec169163f2ca0fc21b654e88c2abc0abc", size = 380541, upload-time = "2025-11-09T20:46:49.643Z" },
- { url = "https://files.pythonhosted.org/packages/b5/c7/f3c26ecbc1adbf1db0d6bba99192143d8fe8504729d9594542ecc4445784/jiter-0.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa47810c5565274810b726b0dc86d18dce5fd17b190ebdc3890851d7b2a0e74", size = 364423, upload-time = "2025-11-09T20:46:51.731Z" },
- { url = "https://files.pythonhosted.org/packages/18/51/eac547bf3a2d7f7e556927278e14c56a0604b8cddae75815d5739f65f81d/jiter-0.12.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f8ec0259d3f26c62aed4d73b198c53e316ae11f0f69c8fbe6682c6dcfa0fcce2", size = 389958, upload-time = "2025-11-09T20:46:53.432Z" },
- { url = "https://files.pythonhosted.org/packages/2c/1f/9ca592e67175f2db156cff035e0d817d6004e293ee0c1d73692d38fcb596/jiter-0.12.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:79307d74ea83465b0152fa23e5e297149506435535282f979f18b9033c0bb025", size = 522084, upload-time = "2025-11-09T20:46:54.848Z" },
- { url = "https://files.pythonhosted.org/packages/83/ff/597d9cdc3028f28224f53e1a9d063628e28b7a5601433e3196edda578cdd/jiter-0.12.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cf6e6dd18927121fec86739f1a8906944703941d000f0639f3eb6281cc601dca", size = 513054, upload-time = "2025-11-09T20:46:56.487Z" },
- { url = "https://files.pythonhosted.org/packages/24/6d/1970bce1351bd02e3afcc5f49e4f7ef3dabd7fb688f42be7e8091a5b809a/jiter-0.12.0-cp310-cp310-win32.whl", hash = "sha256:b6ae2aec8217327d872cbfb2c1694489057b9433afce447955763e6ab015b4c4", size = 206368, upload-time = "2025-11-09T20:46:58.638Z" },
- { url = "https://files.pythonhosted.org/packages/e3/6b/eb1eb505b2d86709b59ec06681a2b14a94d0941db091f044b9f0e16badc0/jiter-0.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:c7f49ce90a71e44f7e1aa9e7ec415b9686bbc6a5961e57eab511015e6759bc11", size = 204847, upload-time = "2025-11-09T20:47:00.295Z" },
- { url = "https://files.pythonhosted.org/packages/32/f9/eaca4633486b527ebe7e681c431f529b63fe2709e7c5242fc0f43f77ce63/jiter-0.12.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d8f8a7e317190b2c2d60eb2e8aa835270b008139562d70fe732e1c0020ec53c9", size = 316435, upload-time = "2025-11-09T20:47:02.087Z" },
- { url = "https://files.pythonhosted.org/packages/10/c1/40c9f7c22f5e6ff715f28113ebaba27ab85f9af2660ad6e1dd6425d14c19/jiter-0.12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2218228a077e784c6c8f1a8e5d6b8cb1dea62ce25811c356364848554b2056cd", size = 320548, upload-time = "2025-11-09T20:47:03.409Z" },
- { url = "https://files.pythonhosted.org/packages/6b/1b/efbb68fe87e7711b00d2cfd1f26bb4bfc25a10539aefeaa7727329ffb9cb/jiter-0.12.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9354ccaa2982bf2188fd5f57f79f800ef622ec67beb8329903abf6b10da7d423", size = 351915, upload-time = "2025-11-09T20:47:05.171Z" },
- { url = "https://files.pythonhosted.org/packages/15/2d/c06e659888c128ad1e838123d0638f0efad90cc30860cb5f74dd3f2fc0b3/jiter-0.12.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8f2607185ea89b4af9a604d4c7ec40e45d3ad03ee66998b031134bc510232bb7", size = 368966, upload-time = "2025-11-09T20:47:06.508Z" },
- { url = "https://files.pythonhosted.org/packages/6b/20/058db4ae5fb07cf6a4ab2e9b9294416f606d8e467fb74c2184b2a1eeacba/jiter-0.12.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3a585a5e42d25f2e71db5f10b171f5e5ea641d3aa44f7df745aa965606111cc2", size = 482047, upload-time = "2025-11-09T20:47:08.382Z" },
- { url = "https://files.pythonhosted.org/packages/49/bb/dc2b1c122275e1de2eb12905015d61e8316b2f888bdaac34221c301495d6/jiter-0.12.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd9e21d34edff5a663c631f850edcb786719c960ce887a5661e9c828a53a95d9", size = 380835, upload-time = "2025-11-09T20:47:09.81Z" },
- { url = "https://files.pythonhosted.org/packages/23/7d/38f9cd337575349de16da575ee57ddb2d5a64d425c9367f5ef9e4612e32e/jiter-0.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a612534770470686cd5431478dc5a1b660eceb410abade6b1b74e320ca98de6", size = 364587, upload-time = "2025-11-09T20:47:11.529Z" },
- { url = "https://files.pythonhosted.org/packages/f0/a3/b13e8e61e70f0bb06085099c4e2462647f53cc2ca97614f7fedcaa2bb9f3/jiter-0.12.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3985aea37d40a908f887b34d05111e0aae822943796ebf8338877fee2ab67725", size = 390492, upload-time = "2025-11-09T20:47:12.993Z" },
- { url = "https://files.pythonhosted.org/packages/07/71/e0d11422ed027e21422f7bc1883c61deba2d9752b720538430c1deadfbca/jiter-0.12.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b1207af186495f48f72529f8d86671903c8c10127cac6381b11dddc4aaa52df6", size = 522046, upload-time = "2025-11-09T20:47:14.6Z" },
- { url = "https://files.pythonhosted.org/packages/9f/59/b968a9aa7102a8375dbbdfbd2aeebe563c7e5dddf0f47c9ef1588a97e224/jiter-0.12.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ef2fb241de583934c9915a33120ecc06d94aa3381a134570f59eed784e87001e", size = 513392, upload-time = "2025-11-09T20:47:16.011Z" },
- { url = "https://files.pythonhosted.org/packages/ca/e4/7df62002499080dbd61b505c5cb351aa09e9959d176cac2aa8da6f93b13b/jiter-0.12.0-cp311-cp311-win32.whl", hash = "sha256:453b6035672fecce8007465896a25b28a6b59cfe8fbc974b2563a92f5a92a67c", size = 206096, upload-time = "2025-11-09T20:47:17.344Z" },
- { url = "https://files.pythonhosted.org/packages/bb/60/1032b30ae0572196b0de0e87dce3b6c26a1eff71aad5fe43dee3082d32e0/jiter-0.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:ca264b9603973c2ad9435c71a8ec8b49f8f715ab5ba421c85a51cde9887e421f", size = 204899, upload-time = "2025-11-09T20:47:19.365Z" },
- { url = "https://files.pythonhosted.org/packages/49/d5/c145e526fccdb834063fb45c071df78b0cc426bbaf6de38b0781f45d956f/jiter-0.12.0-cp311-cp311-win_arm64.whl", hash = "sha256:cb00ef392e7d684f2754598c02c409f376ddcef857aae796d559e6cacc2d78a5", size = 188070, upload-time = "2025-11-09T20:47:20.75Z" },
- { url = "https://files.pythonhosted.org/packages/92/c9/5b9f7b4983f1b542c64e84165075335e8a236fa9e2ea03a0c79780062be8/jiter-0.12.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:305e061fa82f4680607a775b2e8e0bcb071cd2205ac38e6ef48c8dd5ebe1cf37", size = 314449, upload-time = "2025-11-09T20:47:22.999Z" },
- { url = "https://files.pythonhosted.org/packages/98/6e/e8efa0e78de00db0aee82c0cf9e8b3f2027efd7f8a71f859d8f4be8e98ef/jiter-0.12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5c1860627048e302a528333c9307c818c547f214d8659b0705d2195e1a94b274", size = 319855, upload-time = "2025-11-09T20:47:24.779Z" },
- { url = "https://files.pythonhosted.org/packages/20/26/894cd88e60b5d58af53bec5c6759d1292bd0b37a8b5f60f07abf7a63ae5f/jiter-0.12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df37577a4f8408f7e0ec3205d2a8f87672af8f17008358063a4d6425b6081ce3", size = 350171, upload-time = "2025-11-09T20:47:26.469Z" },
- { url = "https://files.pythonhosted.org/packages/f5/27/a7b818b9979ac31b3763d25f3653ec3a954044d5e9f5d87f2f247d679fd1/jiter-0.12.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:75fdd787356c1c13a4f40b43c2156276ef7a71eb487d98472476476d803fb2cf", size = 365590, upload-time = "2025-11-09T20:47:27.918Z" },
- { url = "https://files.pythonhosted.org/packages/ba/7e/e46195801a97673a83746170b17984aa8ac4a455746354516d02ca5541b4/jiter-0.12.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1eb5db8d9c65b112aacf14fcd0faae9913d07a8afea5ed06ccdd12b724e966a1", size = 479462, upload-time = "2025-11-09T20:47:29.654Z" },
- { url = "https://files.pythonhosted.org/packages/ca/75/f833bfb009ab4bd11b1c9406d333e3b4357709ed0570bb48c7c06d78c7dd/jiter-0.12.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:73c568cc27c473f82480abc15d1301adf333a7ea4f2e813d6a2c7d8b6ba8d0df", size = 378983, upload-time = "2025-11-09T20:47:31.026Z" },
- { url = "https://files.pythonhosted.org/packages/71/b3/7a69d77943cc837d30165643db753471aff5df39692d598da880a6e51c24/jiter-0.12.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4321e8a3d868919bcb1abb1db550d41f2b5b326f72df29e53b2df8b006eb9403", size = 361328, upload-time = "2025-11-09T20:47:33.286Z" },
- { url = "https://files.pythonhosted.org/packages/b0/ac/a78f90caf48d65ba70d8c6efc6f23150bc39dc3389d65bbec2a95c7bc628/jiter-0.12.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0a51bad79f8cc9cac2b4b705039f814049142e0050f30d91695a2d9a6611f126", size = 386740, upload-time = "2025-11-09T20:47:34.703Z" },
- { url = "https://files.pythonhosted.org/packages/39/b6/5d31c2cc8e1b6a6bcf3c5721e4ca0a3633d1ab4754b09bc7084f6c4f5327/jiter-0.12.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:2a67b678f6a5f1dd6c36d642d7db83e456bc8b104788262aaefc11a22339f5a9", size = 520875, upload-time = "2025-11-09T20:47:36.058Z" },
- { url = "https://files.pythonhosted.org/packages/30/b5/4df540fae4e9f68c54b8dab004bd8c943a752f0b00efd6e7d64aa3850339/jiter-0.12.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efe1a211fe1fd14762adea941e3cfd6c611a136e28da6c39272dbb7a1bbe6a86", size = 511457, upload-time = "2025-11-09T20:47:37.932Z" },
- { url = "https://files.pythonhosted.org/packages/07/65/86b74010e450a1a77b2c1aabb91d4a91dd3cd5afce99f34d75fd1ac64b19/jiter-0.12.0-cp312-cp312-win32.whl", hash = "sha256:d779d97c834b4278276ec703dc3fc1735fca50af63eb7262f05bdb4e62203d44", size = 204546, upload-time = "2025-11-09T20:47:40.47Z" },
- { url = "https://files.pythonhosted.org/packages/1c/c7/6659f537f9562d963488e3e55573498a442503ced01f7e169e96a6110383/jiter-0.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:e8269062060212b373316fe69236096aaf4c49022d267c6736eebd66bbbc60bb", size = 205196, upload-time = "2025-11-09T20:47:41.794Z" },
- { url = "https://files.pythonhosted.org/packages/21/f4/935304f5169edadfec7f9c01eacbce4c90bb9a82035ac1de1f3bd2d40be6/jiter-0.12.0-cp312-cp312-win_arm64.whl", hash = "sha256:06cb970936c65de926d648af0ed3d21857f026b1cf5525cb2947aa5e01e05789", size = 186100, upload-time = "2025-11-09T20:47:43.007Z" },
- { url = "https://files.pythonhosted.org/packages/3d/a6/97209693b177716e22576ee1161674d1d58029eb178e01866a0422b69224/jiter-0.12.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6cc49d5130a14b732e0612bc76ae8db3b49898732223ef8b7599aa8d9810683e", size = 313658, upload-time = "2025-11-09T20:47:44.424Z" },
- { url = "https://files.pythonhosted.org/packages/06/4d/125c5c1537c7d8ee73ad3d530a442d6c619714b95027143f1b61c0b4dfe0/jiter-0.12.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:37f27a32ce36364d2fa4f7fdc507279db604d27d239ea2e044c8f148410defe1", size = 318605, upload-time = "2025-11-09T20:47:45.973Z" },
- { url = "https://files.pythonhosted.org/packages/99/bf/a840b89847885064c41a5f52de6e312e91fa84a520848ee56c97e4fa0205/jiter-0.12.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbc0944aa3d4b4773e348cda635252824a78f4ba44328e042ef1ff3f6080d1cf", size = 349803, upload-time = "2025-11-09T20:47:47.535Z" },
- { url = "https://files.pythonhosted.org/packages/8a/88/e63441c28e0db50e305ae23e19c1d8fae012d78ed55365da392c1f34b09c/jiter-0.12.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:da25c62d4ee1ffbacb97fac6dfe4dcd6759ebdc9015991e92a6eae5816287f44", size = 365120, upload-time = "2025-11-09T20:47:49.284Z" },
- { url = "https://files.pythonhosted.org/packages/0a/7c/49b02714af4343970eb8aca63396bc1c82fa01197dbb1e9b0d274b550d4e/jiter-0.12.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:048485c654b838140b007390b8182ba9774621103bd4d77c9c3f6f117474ba45", size = 479918, upload-time = "2025-11-09T20:47:50.807Z" },
- { url = "https://files.pythonhosted.org/packages/69/ba/0a809817fdd5a1db80490b9150645f3aae16afad166960bcd562be194f3b/jiter-0.12.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:635e737fbb7315bef0037c19b88b799143d2d7d3507e61a76751025226b3ac87", size = 379008, upload-time = "2025-11-09T20:47:52.211Z" },
- { url = "https://files.pythonhosted.org/packages/5f/c3/c9fc0232e736c8877d9e6d83d6eeb0ba4e90c6c073835cc2e8f73fdeef51/jiter-0.12.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e017c417b1ebda911bd13b1e40612704b1f5420e30695112efdbed8a4b389ed", size = 361785, upload-time = "2025-11-09T20:47:53.512Z" },
- { url = "https://files.pythonhosted.org/packages/96/61/61f69b7e442e97ca6cd53086ddc1cf59fb830549bc72c0a293713a60c525/jiter-0.12.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:89b0bfb8b2bf2351fba36bb211ef8bfceba73ef58e7f0c68fb67b5a2795ca2f9", size = 386108, upload-time = "2025-11-09T20:47:54.893Z" },
- { url = "https://files.pythonhosted.org/packages/e9/2e/76bb3332f28550c8f1eba3bf6e5efe211efda0ddbbaf24976bc7078d42a5/jiter-0.12.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:f5aa5427a629a824a543672778c9ce0c5e556550d1569bb6ea28a85015287626", size = 519937, upload-time = "2025-11-09T20:47:56.253Z" },
- { url = "https://files.pythonhosted.org/packages/84/d6/fa96efa87dc8bff2094fb947f51f66368fa56d8d4fc9e77b25d7fbb23375/jiter-0.12.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed53b3d6acbcb0fd0b90f20c7cb3b24c357fe82a3518934d4edfa8c6898e498c", size = 510853, upload-time = "2025-11-09T20:47:58.32Z" },
- { url = "https://files.pythonhosted.org/packages/8a/28/93f67fdb4d5904a708119a6ab58a8f1ec226ff10a94a282e0215402a8462/jiter-0.12.0-cp313-cp313-win32.whl", hash = "sha256:4747de73d6b8c78f2e253a2787930f4fffc68da7fa319739f57437f95963c4de", size = 204699, upload-time = "2025-11-09T20:47:59.686Z" },
- { url = "https://files.pythonhosted.org/packages/c4/1f/30b0eb087045a0abe2a5c9c0c0c8da110875a1d3be83afd4a9a4e548be3c/jiter-0.12.0-cp313-cp313-win_amd64.whl", hash = "sha256:e25012eb0c456fcc13354255d0338cd5397cce26c77b2832b3c4e2e255ea5d9a", size = 204258, upload-time = "2025-11-09T20:48:01.01Z" },
- { url = "https://files.pythonhosted.org/packages/2c/f4/2b4daf99b96bce6fc47971890b14b2a36aef88d7beb9f057fafa032c6141/jiter-0.12.0-cp313-cp313-win_arm64.whl", hash = "sha256:c97b92c54fe6110138c872add030a1f99aea2401ddcdaa21edf74705a646dd60", size = 185503, upload-time = "2025-11-09T20:48:02.35Z" },
- { url = "https://files.pythonhosted.org/packages/39/ca/67bb15a7061d6fe20b9b2a2fd783e296a1e0f93468252c093481a2f00efa/jiter-0.12.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:53839b35a38f56b8be26a7851a48b89bc47e5d88e900929df10ed93b95fea3d6", size = 317965, upload-time = "2025-11-09T20:48:03.783Z" },
- { url = "https://files.pythonhosted.org/packages/18/af/1788031cd22e29c3b14bc6ca80b16a39a0b10e611367ffd480c06a259831/jiter-0.12.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94f669548e55c91ab47fef8bddd9c954dab1938644e715ea49d7e117015110a4", size = 345831, upload-time = "2025-11-09T20:48:05.55Z" },
- { url = "https://files.pythonhosted.org/packages/05/17/710bf8472d1dff0d3caf4ced6031060091c1320f84ee7d5dcbed1f352417/jiter-0.12.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:351d54f2b09a41600ffea43d081522d792e81dcfb915f6d2d242744c1cc48beb", size = 361272, upload-time = "2025-11-09T20:48:06.951Z" },
- { url = "https://files.pythonhosted.org/packages/fb/f1/1dcc4618b59761fef92d10bcbb0b038b5160be653b003651566a185f1a5c/jiter-0.12.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2a5e90604620f94bf62264e7c2c038704d38217b7465b863896c6d7c902b06c7", size = 204604, upload-time = "2025-11-09T20:48:08.328Z" },
- { url = "https://files.pythonhosted.org/packages/d9/32/63cb1d9f1c5c6632a783c0052cde9ef7ba82688f7065e2f0d5f10a7e3edb/jiter-0.12.0-cp313-cp313t-win_arm64.whl", hash = "sha256:88ef757017e78d2860f96250f9393b7b577b06a956ad102c29c8237554380db3", size = 185628, upload-time = "2025-11-09T20:48:09.572Z" },
- { url = "https://files.pythonhosted.org/packages/a8/99/45c9f0dbe4a1416b2b9a8a6d1236459540f43d7fb8883cff769a8db0612d/jiter-0.12.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:c46d927acd09c67a9fb1416df45c5a04c27e83aae969267e98fba35b74e99525", size = 312478, upload-time = "2025-11-09T20:48:10.898Z" },
- { url = "https://files.pythonhosted.org/packages/4c/a7/54ae75613ba9e0f55fcb0bc5d1f807823b5167cc944e9333ff322e9f07dd/jiter-0.12.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:774ff60b27a84a85b27b88cd5583899c59940bcc126caca97eb2a9df6aa00c49", size = 318706, upload-time = "2025-11-09T20:48:12.266Z" },
- { url = "https://files.pythonhosted.org/packages/59/31/2aa241ad2c10774baf6c37f8b8e1f39c07db358f1329f4eb40eba179c2a2/jiter-0.12.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5433fab222fb072237df3f637d01b81f040a07dcac1cb4a5c75c7aa9ed0bef1", size = 351894, upload-time = "2025-11-09T20:48:13.673Z" },
- { url = "https://files.pythonhosted.org/packages/54/4f/0f2759522719133a9042781b18cc94e335b6d290f5e2d3e6899d6af933e3/jiter-0.12.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f8c593c6e71c07866ec6bfb790e202a833eeec885022296aff6b9e0b92d6a70e", size = 365714, upload-time = "2025-11-09T20:48:15.083Z" },
- { url = "https://files.pythonhosted.org/packages/dc/6f/806b895f476582c62a2f52c453151edd8a0fde5411b0497baaa41018e878/jiter-0.12.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:90d32894d4c6877a87ae00c6b915b609406819dce8bc0d4e962e4de2784e567e", size = 478989, upload-time = "2025-11-09T20:48:16.706Z" },
- { url = "https://files.pythonhosted.org/packages/86/6c/012d894dc6e1033acd8db2b8346add33e413ec1c7c002598915278a37f79/jiter-0.12.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:798e46eed9eb10c3adbbacbd3bdb5ecd4cf7064e453d00dbef08802dae6937ff", size = 378615, upload-time = "2025-11-09T20:48:18.614Z" },
- { url = "https://files.pythonhosted.org/packages/87/30/d718d599f6700163e28e2c71c0bbaf6dace692e7df2592fd793ac9276717/jiter-0.12.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3f1368f0a6719ea80013a4eb90ba72e75d7ea67cfc7846db2ca504f3df0169a", size = 364745, upload-time = "2025-11-09T20:48:20.117Z" },
- { url = "https://files.pythonhosted.org/packages/8f/85/315b45ce4b6ddc7d7fceca24068543b02bdc8782942f4ee49d652e2cc89f/jiter-0.12.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:65f04a9d0b4406f7e51279710b27484af411896246200e461d80d3ba0caa901a", size = 386502, upload-time = "2025-11-09T20:48:21.543Z" },
- { url = "https://files.pythonhosted.org/packages/74/0b/ce0434fb40c5b24b368fe81b17074d2840748b4952256bab451b72290a49/jiter-0.12.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:fd990541982a24281d12b67a335e44f117e4c6cbad3c3b75c7dea68bf4ce3a67", size = 519845, upload-time = "2025-11-09T20:48:22.964Z" },
- { url = "https://files.pythonhosted.org/packages/e8/a3/7a7a4488ba052767846b9c916d208b3ed114e3eb670ee984e4c565b9cf0d/jiter-0.12.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:b111b0e9152fa7df870ecaebb0bd30240d9f7fff1f2003bcb4ed0f519941820b", size = 510701, upload-time = "2025-11-09T20:48:24.483Z" },
- { url = "https://files.pythonhosted.org/packages/c3/16/052ffbf9d0467b70af24e30f91e0579e13ded0c17bb4a8eb2aed3cb60131/jiter-0.12.0-cp314-cp314-win32.whl", hash = "sha256:a78befb9cc0a45b5a5a0d537b06f8544c2ebb60d19d02c41ff15da28a9e22d42", size = 205029, upload-time = "2025-11-09T20:48:25.749Z" },
- { url = "https://files.pythonhosted.org/packages/e4/18/3cf1f3f0ccc789f76b9a754bdb7a6977e5d1d671ee97a9e14f7eb728d80e/jiter-0.12.0-cp314-cp314-win_amd64.whl", hash = "sha256:e1fe01c082f6aafbe5c8faf0ff074f38dfb911d53f07ec333ca03f8f6226debf", size = 204960, upload-time = "2025-11-09T20:48:27.415Z" },
- { url = "https://files.pythonhosted.org/packages/02/68/736821e52ecfdeeb0f024b8ab01b5a229f6b9293bbdb444c27efade50b0f/jiter-0.12.0-cp314-cp314-win_arm64.whl", hash = "sha256:d72f3b5a432a4c546ea4bedc84cce0c3404874f1d1676260b9c7f048a9855451", size = 185529, upload-time = "2025-11-09T20:48:29.125Z" },
- { url = "https://files.pythonhosted.org/packages/30/61/12ed8ee7a643cce29ac97c2281f9ce3956eb76b037e88d290f4ed0d41480/jiter-0.12.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:e6ded41aeba3603f9728ed2b6196e4df875348ab97b28fc8afff115ed42ba7a7", size = 318974, upload-time = "2025-11-09T20:48:30.87Z" },
- { url = "https://files.pythonhosted.org/packages/2d/c6/f3041ede6d0ed5e0e79ff0de4c8f14f401bbf196f2ef3971cdbe5fd08d1d/jiter-0.12.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a947920902420a6ada6ad51892082521978e9dd44a802663b001436e4b771684", size = 345932, upload-time = "2025-11-09T20:48:32.658Z" },
- { url = "https://files.pythonhosted.org/packages/d5/5d/4d94835889edd01ad0e2dbfc05f7bdfaed46292e7b504a6ac7839aa00edb/jiter-0.12.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:add5e227e0554d3a52cf390a7635edaffdf4f8fce4fdbcef3cc2055bb396a30c", size = 367243, upload-time = "2025-11-09T20:48:34.093Z" },
- { url = "https://files.pythonhosted.org/packages/fd/76/0051b0ac2816253a99d27baf3dda198663aff882fa6ea7deeb94046da24e/jiter-0.12.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f9b1cda8fcb736250d7e8711d4580ebf004a46771432be0ae4796944b5dfa5d", size = 479315, upload-time = "2025-11-09T20:48:35.507Z" },
- { url = "https://files.pythonhosted.org/packages/70/ae/83f793acd68e5cb24e483f44f482a1a15601848b9b6f199dacb970098f77/jiter-0.12.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:deeb12a2223fe0135c7ff1356a143d57f95bbf1f4a66584f1fc74df21d86b993", size = 380714, upload-time = "2025-11-09T20:48:40.014Z" },
- { url = "https://files.pythonhosted.org/packages/b1/5e/4808a88338ad2c228b1126b93fcd8ba145e919e886fe910d578230dabe3b/jiter-0.12.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c596cc0f4cb574877550ce4ecd51f8037469146addd676d7c1a30ebe6391923f", size = 365168, upload-time = "2025-11-09T20:48:41.462Z" },
- { url = "https://files.pythonhosted.org/packages/0c/d4/04619a9e8095b42aef436b5aeb4c0282b4ff1b27d1db1508df9f5dc82750/jiter-0.12.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ab4c823b216a4aeab3fdbf579c5843165756bd9ad87cc6b1c65919c4715f783", size = 387893, upload-time = "2025-11-09T20:48:42.921Z" },
- { url = "https://files.pythonhosted.org/packages/17/ea/d3c7e62e4546fdc39197fa4a4315a563a89b95b6d54c0d25373842a59cbe/jiter-0.12.0-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:e427eee51149edf962203ff8db75a7514ab89be5cb623fb9cea1f20b54f1107b", size = 520828, upload-time = "2025-11-09T20:48:44.278Z" },
- { url = "https://files.pythonhosted.org/packages/cc/0b/c6d3562a03fd767e31cb119d9041ea7958c3c80cb3d753eafb19b3b18349/jiter-0.12.0-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:edb868841f84c111255ba5e80339d386d937ec1fdce419518ce1bd9370fac5b6", size = 511009, upload-time = "2025-11-09T20:48:45.726Z" },
- { url = "https://files.pythonhosted.org/packages/aa/51/2cb4468b3448a8385ebcd15059d325c9ce67df4e2758d133ab9442b19834/jiter-0.12.0-cp314-cp314t-win32.whl", hash = "sha256:8bbcfe2791dfdb7c5e48baf646d37a6a3dcb5a97a032017741dea9f817dca183", size = 205110, upload-time = "2025-11-09T20:48:47.033Z" },
- { url = "https://files.pythonhosted.org/packages/b2/c5/ae5ec83dec9c2d1af805fd5fe8f74ebded9c8670c5210ec7820ce0dbeb1e/jiter-0.12.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2fa940963bf02e1d8226027ef461e36af472dea85d36054ff835aeed944dd873", size = 205223, upload-time = "2025-11-09T20:48:49.076Z" },
- { url = "https://files.pythonhosted.org/packages/97/9a/3c5391907277f0e55195550cf3fa8e293ae9ee0c00fb402fec1e38c0c82f/jiter-0.12.0-cp314-cp314t-win_arm64.whl", hash = "sha256:506c9708dd29b27288f9f8f1140c3cb0e3d8ddb045956d7757b1fa0e0f39a473", size = 185564, upload-time = "2025-11-09T20:48:50.376Z" },
- { url = "https://files.pythonhosted.org/packages/7d/da/3e1fbd1f03f89ff0b4469d481be0b5cf2880c8e7b56fd80303b3ab5ae52d/jiter-0.12.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c9d28b218d5f9e5f69a0787a196322a5056540cb378cac8ff542b4fa7219966c", size = 319378, upload-time = "2025-11-09T20:48:51.761Z" },
- { url = "https://files.pythonhosted.org/packages/c7/4e/e07d69285e9e19a153050a6d281d2f0968600753a8fed8a3a141d6ffc140/jiter-0.12.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d0ee12028daf8cfcf880dd492349a122a64f42c059b6c62a2b0c96a83a8da820", size = 312195, upload-time = "2025-11-09T20:48:53.547Z" },
- { url = "https://files.pythonhosted.org/packages/2d/82/1f1cb5231b36af9f3d6d5b6030e70110faf14fd143419fc5fe7d852e691a/jiter-0.12.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b135ebe757a82d67ed2821526e72d0acf87dd61f6013e20d3c45b8048af927b", size = 352777, upload-time = "2025-11-09T20:48:55.058Z" },
- { url = "https://files.pythonhosted.org/packages/6a/5e/728393bbbc99b31e8f7a4fdd8fa55e455a0a9648f79097d9088baf1f676f/jiter-0.12.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:15d7fafb81af8a9e3039fc305529a61cd933eecee33b4251878a1c89859552a3", size = 370738, upload-time = "2025-11-09T20:48:56.632Z" },
- { url = "https://files.pythonhosted.org/packages/30/08/ac92f0df7b14ac82f2fe0a382a8000e600ab90af95798d4a7db0c1bd0736/jiter-0.12.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:92d1f41211d8a8fe412faad962d424d334764c01dac6691c44691c2e4d3eedaf", size = 483744, upload-time = "2025-11-09T20:48:58.006Z" },
- { url = "https://files.pythonhosted.org/packages/7e/f4/dbfa4e759a2b82e969a14c3d0a91b176f1ed94717183a2f495cf94a651b9/jiter-0.12.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3a64a48d7c917b8f32f25c176df8749ecf08cec17c466114727efe7441e17f6d", size = 382888, upload-time = "2025-11-09T20:48:59.471Z" },
- { url = "https://files.pythonhosted.org/packages/6c/d9/b86fff7f748b0bb54222a8f132ffaf4d1be56b4591fa76d3cfdd701a33e5/jiter-0.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:122046f3b3710b85de99d9aa2f3f0492a8233a2f54a64902b096efc27ea747b5", size = 366465, upload-time = "2025-11-09T20:49:01.408Z" },
- { url = "https://files.pythonhosted.org/packages/93/3c/1152d8b433317a568927e13c1b125c680e6c058ff5d304833be8469bd4f2/jiter-0.12.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:27ec39225e03c32c6b863ba879deb427882f243ae46f0d82d68b695fa5b48b40", size = 392603, upload-time = "2025-11-09T20:49:02.784Z" },
- { url = "https://files.pythonhosted.org/packages/6e/92/ff19d8fb87f3f9438eb7464862c8d0126455bc046b345d59b21443640c62/jiter-0.12.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:26b9e155ddc132225a39b1995b3b9f0fe0f79a6d5cbbeacf103271e7d309b404", size = 523780, upload-time = "2025-11-09T20:49:04.42Z" },
- { url = "https://files.pythonhosted.org/packages/87/3a/4260e2d84e4a293c36d2a8e8b8dcd69609c671f3bd310e4625359217c517/jiter-0.12.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9ab05b7c58e29bb9e60b70c2e0094c98df79a1e42e397b9bb6eaa989b7a66dd0", size = 514874, upload-time = "2025-11-09T20:49:05.844Z" },
- { url = "https://files.pythonhosted.org/packages/2e/f7/574d2cb79e86feb035ade18c2254da71d04417555907c9df51dd6b183426/jiter-0.12.0-cp39-cp39-win32.whl", hash = "sha256:59f9f9df87ed499136db1c2b6c9efb902f964bed42a582ab7af413b6a293e7b0", size = 208329, upload-time = "2025-11-09T20:49:07.444Z" },
- { url = "https://files.pythonhosted.org/packages/05/ce/50725ec39782d8c973f19ae2d7dd3d192d01332c7cbde48c75e16a3e85a9/jiter-0.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:d3719596a1ebe7a48a498e8d5d0c4bf7553321d4c3eee1d620628d51351a3928", size = 206557, upload-time = "2025-11-09T20:49:08.888Z" },
- { url = "https://files.pythonhosted.org/packages/fe/54/5339ef1ecaa881c6948669956567a64d2670941925f245c434f494ffb0e5/jiter-0.12.0-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:4739a4657179ebf08f85914ce50332495811004cc1747852e8b2041ed2aab9b8", size = 311144, upload-time = "2025-11-09T20:49:10.503Z" },
- { url = "https://files.pythonhosted.org/packages/27/74/3446c652bffbd5e81ab354e388b1b5fc1d20daac34ee0ed11ff096b1b01a/jiter-0.12.0-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:41da8def934bf7bec16cb24bd33c0ca62126d2d45d81d17b864bd5ad721393c3", size = 305877, upload-time = "2025-11-09T20:49:12.269Z" },
- { url = "https://files.pythonhosted.org/packages/a1/f4/ed76ef9043450f57aac2d4fbeb27175aa0eb9c38f833be6ef6379b3b9a86/jiter-0.12.0-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c44ee814f499c082e69872d426b624987dbc5943ab06e9bbaa4f81989fdb79e", size = 340419, upload-time = "2025-11-09T20:49:13.803Z" },
- { url = "https://files.pythonhosted.org/packages/21/01/857d4608f5edb0664aa791a3d45702e1a5bcfff9934da74035e7b9803846/jiter-0.12.0-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd2097de91cf03eaa27b3cbdb969addf83f0179c6afc41bbc4513705e013c65d", size = 347212, upload-time = "2025-11-09T20:49:15.643Z" },
- { url = "https://files.pythonhosted.org/packages/cb/f5/12efb8ada5f5c9edc1d4555fe383c1fb2eac05ac5859258a72d61981d999/jiter-0.12.0-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:e8547883d7b96ef2e5fe22b88f8a4c8725a56e7f4abafff20fd5272d634c7ecb", size = 309974, upload-time = "2025-11-09T20:49:17.187Z" },
- { url = "https://files.pythonhosted.org/packages/85/15/d6eb3b770f6a0d332675141ab3962fd4a7c270ede3515d9f3583e1d28276/jiter-0.12.0-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:89163163c0934854a668ed783a2546a0617f71706a2551a4a0666d91ab365d6b", size = 304233, upload-time = "2025-11-09T20:49:18.734Z" },
- { url = "https://files.pythonhosted.org/packages/8c/3e/e7e06743294eea2cf02ced6aa0ff2ad237367394e37a0e2b4a1108c67a36/jiter-0.12.0-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d96b264ab7d34bbb2312dedc47ce07cd53f06835eacbc16dde3761f47c3a9e7f", size = 338537, upload-time = "2025-11-09T20:49:20.317Z" },
- { url = "https://files.pythonhosted.org/packages/2f/9c/6753e6522b8d0ef07d3a3d239426669e984fb0eba15a315cdbc1253904e4/jiter-0.12.0-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c24e864cb30ab82311c6425655b0cdab0a98c5d973b065c66a3f020740c2324c", size = 346110, upload-time = "2025-11-09T20:49:21.817Z" },
+ { url = "https://files.pythonhosted.org/packages/d0/5a/41da76c5ea07bec1b0472b6b2fdb1b651074d504b19374d7e130e0cdfb25/jiter-0.13.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2ffc63785fd6c7977defe49b9824ae6ce2b2e2b77ce539bdaf006c26da06342e", size = 311164, upload-time = "2026-02-02T12:35:17.688Z" },
+ { url = "https://files.pythonhosted.org/packages/40/cb/4a1bf994a3e869f0d39d10e11efb471b76d0ad70ecbfb591427a46c880c2/jiter-0.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4a638816427006c1e3f0013eb66d391d7a3acda99a7b0cf091eff4497ccea33a", size = 320296, upload-time = "2026-02-02T12:35:19.828Z" },
+ { url = "https://files.pythonhosted.org/packages/09/82/acd71ca9b50ecebadc3979c541cd717cce2fe2bc86236f4fa597565d8f1a/jiter-0.13.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19928b5d1ce0ff8c1ee1b9bdef3b5bfc19e8304f1b904e436caf30bc15dc6cf5", size = 352742, upload-time = "2026-02-02T12:35:21.258Z" },
+ { url = "https://files.pythonhosted.org/packages/71/03/d1fc996f3aecfd42eb70922edecfb6dd26421c874503e241153ad41df94f/jiter-0.13.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:309549b778b949d731a2f0e1594a3f805716be704a73bf3ad9a807eed5eb5721", size = 363145, upload-time = "2026-02-02T12:35:24.653Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/61/a30492366378cc7a93088858f8991acd7d959759fe6138c12a4644e58e81/jiter-0.13.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bcdabaea26cb04e25df3103ce47f97466627999260290349a88c8136ecae0060", size = 487683, upload-time = "2026-02-02T12:35:26.162Z" },
+ { url = "https://files.pythonhosted.org/packages/20/4e/4223cffa9dbbbc96ed821c5aeb6bca510848c72c02086d1ed3f1da3d58a7/jiter-0.13.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a3a377af27b236abbf665a69b2bdd680e3b5a0bd2af825cd3b81245279a7606c", size = 373579, upload-time = "2026-02-02T12:35:27.582Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/c9/b0489a01329ab07a83812d9ebcffe7820a38163c6d9e7da644f926ff877c/jiter-0.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe49d3ff6db74321f144dff9addd4a5874d3105ac5ba7c5b77fac099cfae31ae", size = 362904, upload-time = "2026-02-02T12:35:28.925Z" },
+ { url = "https://files.pythonhosted.org/packages/05/af/53e561352a44afcba9a9bc67ee1d320b05a370aed8df54eafe714c4e454d/jiter-0.13.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2113c17c9a67071b0f820733c0893ed1d467b5fcf4414068169e5c2cabddb1e2", size = 392380, upload-time = "2026-02-02T12:35:30.385Z" },
+ { url = "https://files.pythonhosted.org/packages/76/2a/dd805c3afb8ed5b326c5ae49e725d1b1255b9754b1b77dbecdc621b20773/jiter-0.13.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ab1185ca5c8b9491b55ebf6c1e8866b8f68258612899693e24a92c5fdb9455d5", size = 517939, upload-time = "2026-02-02T12:35:31.865Z" },
+ { url = "https://files.pythonhosted.org/packages/20/2a/7b67d76f55b8fe14c937e7640389612f05f9a4145fc28ae128aaa5e62257/jiter-0.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9621ca242547edc16400981ca3231e0c91c0c4c1ab8573a596cd9bb3575d5c2b", size = 551696, upload-time = "2026-02-02T12:35:33.306Z" },
+ { url = "https://files.pythonhosted.org/packages/85/9c/57cdd64dac8f4c6ab8f994fe0eb04dc9fd1db102856a4458fcf8a99dfa62/jiter-0.13.0-cp310-cp310-win32.whl", hash = "sha256:a7637d92b1c9d7a771e8c56f445c7f84396d48f2e756e5978840ecba2fac0894", size = 204592, upload-time = "2026-02-02T12:35:34.58Z" },
+ { url = "https://files.pythonhosted.org/packages/a7/38/f4f3ea5788b8a5bae7510a678cdc747eda0c45ffe534f9878ff37e7cf3b3/jiter-0.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c1b609e5cbd2f52bb74fb721515745b407df26d7b800458bd97cb3b972c29e7d", size = 206016, upload-time = "2026-02-02T12:35:36.435Z" },
+ { url = "https://files.pythonhosted.org/packages/71/29/499f8c9eaa8a16751b1c0e45e6f5f1761d180da873d417996cc7bddc8eef/jiter-0.13.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ea026e70a9a28ebbdddcbcf0f1323128a8db66898a06eaad3a4e62d2f554d096", size = 311157, upload-time = "2026-02-02T12:35:37.758Z" },
+ { url = "https://files.pythonhosted.org/packages/50/f6/566364c777d2ab450b92100bea11333c64c38d32caf8dc378b48e5b20c46/jiter-0.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:66aa3e663840152d18cc8ff1e4faad3dd181373491b9cfdc6004b92198d67911", size = 319729, upload-time = "2026-02-02T12:35:39.246Z" },
+ { url = "https://files.pythonhosted.org/packages/73/dd/560f13ec5e4f116d8ad2658781646cca91b617ae3b8758d4a5076b278f70/jiter-0.13.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3524798e70655ff19aec58c7d05adb1f074fecff62da857ea9be2b908b6d701", size = 354766, upload-time = "2026-02-02T12:35:40.662Z" },
+ { url = "https://files.pythonhosted.org/packages/7c/0d/061faffcfe94608cbc28a0d42a77a74222bdf5055ccdbe5fd2292b94f510/jiter-0.13.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ec7e287d7fbd02cb6e22f9a00dd9c9cd504c40a61f2c61e7e1f9690a82726b4c", size = 362587, upload-time = "2026-02-02T12:35:42.025Z" },
+ { url = "https://files.pythonhosted.org/packages/92/c9/c66a7864982fd38a9773ec6e932e0398d1262677b8c60faecd02ffb67bf3/jiter-0.13.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:47455245307e4debf2ce6c6e65a717550a0244231240dcf3b8f7d64e4c2f22f4", size = 487537, upload-time = "2026-02-02T12:35:43.459Z" },
+ { url = "https://files.pythonhosted.org/packages/6c/86/84eb4352cd3668f16d1a88929b5888a3fe0418ea8c1dfc2ad4e7bf6e069a/jiter-0.13.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ee9da221dca6e0429c2704c1b3655fe7b025204a71d4d9b73390c759d776d165", size = 373717, upload-time = "2026-02-02T12:35:44.928Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/09/9fe4c159358176f82d4390407a03f506a8659ed13ca3ac93a843402acecf/jiter-0.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24ab43126d5e05f3d53a36a8e11eb2f23304c6c1117844aaaf9a0aa5e40b5018", size = 362683, upload-time = "2026-02-02T12:35:46.636Z" },
+ { url = "https://files.pythonhosted.org/packages/c9/5e/85f3ab9caca0c1d0897937d378b4a515cae9e119730563572361ea0c48ae/jiter-0.13.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9da38b4fedde4fb528c740c2564628fbab737166a0e73d6d46cb4bb5463ff411", size = 392345, upload-time = "2026-02-02T12:35:48.088Z" },
+ { url = "https://files.pythonhosted.org/packages/12/4c/05b8629ad546191939e6f0c2f17e29f542a398f4a52fb987bc70b6d1eb8b/jiter-0.13.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0b34c519e17658ed88d5047999a93547f8889f3c1824120c26ad6be5f27b6cf5", size = 517775, upload-time = "2026-02-02T12:35:49.482Z" },
+ { url = "https://files.pythonhosted.org/packages/4d/88/367ea2eb6bc582c7052e4baf5ddf57ebe5ab924a88e0e09830dfb585c02d/jiter-0.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d2a6394e6af690d462310a86b53c47ad75ac8c21dc79f120714ea449979cb1d3", size = 551325, upload-time = "2026-02-02T12:35:51.104Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/12/fa377ffb94a2f28c41afaed093e0d70cfe512035d5ecb0cad0ae4792d35e/jiter-0.13.0-cp311-cp311-win32.whl", hash = "sha256:0f0c065695f616a27c920a56ad0d4fc46415ef8b806bf8fc1cacf25002bd24e1", size = 204709, upload-time = "2026-02-02T12:35:52.467Z" },
+ { url = "https://files.pythonhosted.org/packages/cb/16/8e8203ce92f844dfcd3d9d6a5a7322c77077248dbb12da52d23193a839cd/jiter-0.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:0733312953b909688ae3c2d58d043aa040f9f1a6a75693defed7bc2cc4bf2654", size = 204560, upload-time = "2026-02-02T12:35:53.925Z" },
+ { url = "https://files.pythonhosted.org/packages/44/26/97cc40663deb17b9e13c3a5cf29251788c271b18ee4d262c8f94798b8336/jiter-0.13.0-cp311-cp311-win_arm64.whl", hash = "sha256:5d9b34ad56761b3bf0fbe8f7e55468704107608512350962d3317ffd7a4382d5", size = 189608, upload-time = "2026-02-02T12:35:55.304Z" },
+ { url = "https://files.pythonhosted.org/packages/2e/30/7687e4f87086829955013ca12a9233523349767f69653ebc27036313def9/jiter-0.13.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:0a2bd69fc1d902e89925fc34d1da51b2128019423d7b339a45d9e99c894e0663", size = 307958, upload-time = "2026-02-02T12:35:57.165Z" },
+ { url = "https://files.pythonhosted.org/packages/c3/27/e57f9a783246ed95481e6749cc5002a8a767a73177a83c63ea71f0528b90/jiter-0.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f917a04240ef31898182f76a332f508f2cc4b57d2b4d7ad2dbfebbfe167eb505", size = 318597, upload-time = "2026-02-02T12:35:58.591Z" },
+ { url = "https://files.pythonhosted.org/packages/cf/52/e5719a60ac5d4d7c5995461a94ad5ef962a37c8bf5b088390e6fad59b2ff/jiter-0.13.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1e2b199f446d3e82246b4fd9236d7cb502dc2222b18698ba0d986d2fecc6152", size = 348821, upload-time = "2026-02-02T12:36:00.093Z" },
+ { url = "https://files.pythonhosted.org/packages/61/db/c1efc32b8ba4c740ab3fc2d037d8753f67685f475e26b9d6536a4322bcdd/jiter-0.13.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04670992b576fa65bd056dbac0c39fe8bd67681c380cb2b48efa885711d9d726", size = 364163, upload-time = "2026-02-02T12:36:01.937Z" },
+ { url = "https://files.pythonhosted.org/packages/55/8a/fb75556236047c8806995671a18e4a0ad646ed255276f51a20f32dceaeec/jiter-0.13.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5a1aff1fbdb803a376d4d22a8f63f8e7ccbce0b4890c26cc7af9e501ab339ef0", size = 483709, upload-time = "2026-02-02T12:36:03.41Z" },
+ { url = "https://files.pythonhosted.org/packages/7e/16/43512e6ee863875693a8e6f6d532e19d650779d6ba9a81593ae40a9088ff/jiter-0.13.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b3fb8c2053acaef8580809ac1d1f7481a0a0bdc012fd7f5d8b18fb696a5a089", size = 370480, upload-time = "2026-02-02T12:36:04.791Z" },
+ { url = "https://files.pythonhosted.org/packages/f8/4c/09b93e30e984a187bc8aaa3510e1ec8dcbdcd71ca05d2f56aac0492453aa/jiter-0.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdaba7d87e66f26a2c45d8cbadcbfc4bf7884182317907baf39cfe9775bb4d93", size = 360735, upload-time = "2026-02-02T12:36:06.994Z" },
+ { url = "https://files.pythonhosted.org/packages/1a/1b/46c5e349019874ec5dfa508c14c37e29864ea108d376ae26d90bee238cd7/jiter-0.13.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7b88d649135aca526da172e48083da915ec086b54e8e73a425ba50999468cc08", size = 391814, upload-time = "2026-02-02T12:36:08.368Z" },
+ { url = "https://files.pythonhosted.org/packages/15/9e/26184760e85baee7162ad37b7912797d2077718476bf91517641c92b3639/jiter-0.13.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e404ea551d35438013c64b4f357b0474c7abf9f781c06d44fcaf7a14c69ff9e2", size = 513990, upload-time = "2026-02-02T12:36:09.993Z" },
+ { url = "https://files.pythonhosted.org/packages/e9/34/2c9355247d6debad57a0a15e76ab1566ab799388042743656e566b3b7de1/jiter-0.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1f4748aad1b4a93c8bdd70f604d0f748cdc0e8744c5547798acfa52f10e79228", size = 548021, upload-time = "2026-02-02T12:36:11.376Z" },
+ { url = "https://files.pythonhosted.org/packages/ac/4a/9f2c23255d04a834398b9c2e0e665382116911dc4d06b795710503cdad25/jiter-0.13.0-cp312-cp312-win32.whl", hash = "sha256:0bf670e3b1445fc4d31612199f1744f67f889ee1bbae703c4b54dc097e5dd394", size = 203024, upload-time = "2026-02-02T12:36:12.682Z" },
+ { url = "https://files.pythonhosted.org/packages/09/ee/f0ae675a957ae5a8f160be3e87acea6b11dc7b89f6b7ab057e77b2d2b13a/jiter-0.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:15db60e121e11fe186c0b15236bd5d18381b9ddacdcf4e659feb96fc6c969c92", size = 205424, upload-time = "2026-02-02T12:36:13.93Z" },
+ { url = "https://files.pythonhosted.org/packages/1b/02/ae611edf913d3cbf02c97cdb90374af2082c48d7190d74c1111dde08bcdd/jiter-0.13.0-cp312-cp312-win_arm64.whl", hash = "sha256:41f92313d17989102f3cb5dd533a02787cdb99454d494344b0361355da52fcb9", size = 186818, upload-time = "2026-02-02T12:36:15.308Z" },
+ { url = "https://files.pythonhosted.org/packages/91/9c/7ee5a6ff4b9991e1a45263bfc46731634c4a2bde27dfda6c8251df2d958c/jiter-0.13.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1f8a55b848cbabf97d861495cd65f1e5c590246fabca8b48e1747c4dfc8f85bf", size = 306897, upload-time = "2026-02-02T12:36:16.748Z" },
+ { url = "https://files.pythonhosted.org/packages/7c/02/be5b870d1d2be5dd6a91bdfb90f248fbb7dcbd21338f092c6b89817c3dbf/jiter-0.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f556aa591c00f2c45eb1b89f68f52441a016034d18b65da60e2d2875bbbf344a", size = 317507, upload-time = "2026-02-02T12:36:18.351Z" },
+ { url = "https://files.pythonhosted.org/packages/da/92/b25d2ec333615f5f284f3a4024f7ce68cfa0604c322c6808b2344c7f5d2b/jiter-0.13.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7e1d61da332ec412350463891923f960c3073cf1aae93b538f0bb4c8cd46efb", size = 350560, upload-time = "2026-02-02T12:36:19.746Z" },
+ { url = "https://files.pythonhosted.org/packages/be/ec/74dcb99fef0aca9fbe56b303bf79f6bd839010cb18ad41000bf6cc71eec0/jiter-0.13.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3097d665a27bc96fd9bbf7f86178037db139f319f785e4757ce7ccbf390db6c2", size = 363232, upload-time = "2026-02-02T12:36:21.243Z" },
+ { url = "https://files.pythonhosted.org/packages/1b/37/f17375e0bb2f6a812d4dd92d7616e41917f740f3e71343627da9db2824ce/jiter-0.13.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d01ecc3a8cbdb6f25a37bd500510550b64ddf9f7d64a107d92f3ccb25035d0f", size = 483727, upload-time = "2026-02-02T12:36:22.688Z" },
+ { url = "https://files.pythonhosted.org/packages/77/d2/a71160a5ae1a1e66c1395b37ef77da67513b0adba73b993a27fbe47eb048/jiter-0.13.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ed9bbc30f5d60a3bdf63ae76beb3f9db280d7f195dfcfa61af792d6ce912d159", size = 370799, upload-time = "2026-02-02T12:36:24.106Z" },
+ { url = "https://files.pythonhosted.org/packages/01/99/ed5e478ff0eb4e8aa5fd998f9d69603c9fd3f32de3bd16c2b1194f68361c/jiter-0.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98fbafb6e88256f4454de33c1f40203d09fc33ed19162a68b3b257b29ca7f663", size = 359120, upload-time = "2026-02-02T12:36:25.519Z" },
+ { url = "https://files.pythonhosted.org/packages/16/be/7ffd08203277a813f732ba897352797fa9493faf8dc7995b31f3d9cb9488/jiter-0.13.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5467696f6b827f1116556cb0db620440380434591e93ecee7fd14d1a491b6daa", size = 390664, upload-time = "2026-02-02T12:36:26.866Z" },
+ { url = "https://files.pythonhosted.org/packages/d1/84/e0787856196d6d346264d6dcccb01f741e5f0bd014c1d9a2ebe149caf4f3/jiter-0.13.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:2d08c9475d48b92892583df9da592a0e2ac49bcd41fae1fec4f39ba6cf107820", size = 513543, upload-time = "2026-02-02T12:36:28.217Z" },
+ { url = "https://files.pythonhosted.org/packages/65/50/ecbd258181c4313cf79bca6c88fb63207d04d5bf5e4f65174114d072aa55/jiter-0.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:aed40e099404721d7fcaf5b89bd3b4568a4666358bcac7b6b15c09fb6252ab68", size = 547262, upload-time = "2026-02-02T12:36:29.678Z" },
+ { url = "https://files.pythonhosted.org/packages/27/da/68f38d12e7111d2016cd198161b36e1f042bd115c169255bcb7ec823a3bf/jiter-0.13.0-cp313-cp313-win32.whl", hash = "sha256:36ebfbcffafb146d0e6ffb3e74d51e03d9c35ce7c625c8066cdbfc7b953bdc72", size = 200630, upload-time = "2026-02-02T12:36:31.808Z" },
+ { url = "https://files.pythonhosted.org/packages/25/65/3bd1a972c9a08ecd22eb3b08a95d1941ebe6938aea620c246cf426ae09c2/jiter-0.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:8d76029f077379374cf0dbc78dbe45b38dec4a2eb78b08b5194ce836b2517afc", size = 202602, upload-time = "2026-02-02T12:36:33.679Z" },
+ { url = "https://files.pythonhosted.org/packages/15/fe/13bd3678a311aa67686bb303654792c48206a112068f8b0b21426eb6851e/jiter-0.13.0-cp313-cp313-win_arm64.whl", hash = "sha256:bb7613e1a427cfcb6ea4544f9ac566b93d5bf67e0d48c787eca673ff9c9dff2b", size = 185939, upload-time = "2026-02-02T12:36:35.065Z" },
+ { url = "https://files.pythonhosted.org/packages/49/19/a929ec002ad3228bc97ca01dbb14f7632fffdc84a95ec92ceaf4145688ae/jiter-0.13.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:fa476ab5dd49f3bf3a168e05f89358c75a17608dbabb080ef65f96b27c19ab10", size = 316616, upload-time = "2026-02-02T12:36:36.579Z" },
+ { url = "https://files.pythonhosted.org/packages/52/56/d19a9a194afa37c1728831e5fb81b7722c3de18a3109e8f282bfc23e587a/jiter-0.13.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ade8cb6ff5632a62b7dbd4757d8c5573f7a2e9ae285d6b5b841707d8363205ef", size = 346850, upload-time = "2026-02-02T12:36:38.058Z" },
+ { url = "https://files.pythonhosted.org/packages/36/4a/94e831c6bf287754a8a019cb966ed39ff8be6ab78cadecf08df3bb02d505/jiter-0.13.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9950290340acc1adaded363edd94baebcee7dabdfa8bee4790794cd5cfad2af6", size = 358551, upload-time = "2026-02-02T12:36:39.417Z" },
+ { url = "https://files.pythonhosted.org/packages/a2/ec/a4c72c822695fa80e55d2b4142b73f0012035d9fcf90eccc56bc060db37c/jiter-0.13.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2b4972c6df33731aac0742b64fd0d18e0a69bc7d6e03108ce7d40c85fd9e3e6d", size = 201950, upload-time = "2026-02-02T12:36:40.791Z" },
+ { url = "https://files.pythonhosted.org/packages/b6/00/393553ec27b824fbc29047e9c7cd4a3951d7fbe4a76743f17e44034fa4e4/jiter-0.13.0-cp313-cp313t-win_arm64.whl", hash = "sha256:701a1e77d1e593c1b435315ff625fd071f0998c5f02792038a5ca98899261b7d", size = 185852, upload-time = "2026-02-02T12:36:42.077Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/f5/f1997e987211f6f9bd71b8083047b316208b4aca0b529bb5f8c96c89ef3e/jiter-0.13.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:cc5223ab19fe25e2f0bf2643204ad7318896fe3729bf12fde41b77bfc4fafff0", size = 308804, upload-time = "2026-02-02T12:36:43.496Z" },
+ { url = "https://files.pythonhosted.org/packages/cd/8f/5482a7677731fd44881f0204981ce2d7175db271f82cba2085dd2212e095/jiter-0.13.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:9776ebe51713acf438fd9b4405fcd86893ae5d03487546dae7f34993217f8a91", size = 318787, upload-time = "2026-02-02T12:36:45.071Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/b9/7257ac59778f1cd025b26a23c5520a36a424f7f1b068f2442a5b499b7464/jiter-0.13.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:879e768938e7b49b5e90b7e3fecc0dbec01b8cb89595861fb39a8967c5220d09", size = 353880, upload-time = "2026-02-02T12:36:47.365Z" },
+ { url = "https://files.pythonhosted.org/packages/c3/87/719eec4a3f0841dad99e3d3604ee4cba36af4419a76f3cb0b8e2e691ad67/jiter-0.13.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:682161a67adea11e3aae9038c06c8b4a9a71023228767477d683f69903ebc607", size = 366702, upload-time = "2026-02-02T12:36:48.871Z" },
+ { url = "https://files.pythonhosted.org/packages/d2/65/415f0a75cf6921e43365a1bc227c565cb949caca8b7532776e430cbaa530/jiter-0.13.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a13b68cd1cd8cc9de8f244ebae18ccb3e4067ad205220ef324c39181e23bbf66", size = 486319, upload-time = "2026-02-02T12:36:53.006Z" },
+ { url = "https://files.pythonhosted.org/packages/54/a2/9e12b48e82c6bbc6081fd81abf915e1443add1b13d8fc586e1d90bb02bb8/jiter-0.13.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87ce0f14c6c08892b610686ae8be350bf368467b6acd5085a5b65441e2bf36d2", size = 372289, upload-time = "2026-02-02T12:36:54.593Z" },
+ { url = "https://files.pythonhosted.org/packages/4e/c1/e4693f107a1789a239c759a432e9afc592366f04e901470c2af89cfd28e1/jiter-0.13.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c365005b05505a90d1c47856420980d0237adf82f70c4aff7aebd3c1cc143ad", size = 360165, upload-time = "2026-02-02T12:36:56.112Z" },
+ { url = "https://files.pythonhosted.org/packages/17/08/91b9ea976c1c758240614bd88442681a87672eebc3d9a6dde476874e706b/jiter-0.13.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1317fdffd16f5873e46ce27d0e0f7f4f90f0cdf1d86bf6abeaea9f63ca2c401d", size = 389634, upload-time = "2026-02-02T12:36:57.495Z" },
+ { url = "https://files.pythonhosted.org/packages/18/23/58325ef99390d6d40427ed6005bf1ad54f2577866594bcf13ce55675f87d/jiter-0.13.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:c05b450d37ba0c9e21c77fef1f205f56bcee2330bddca68d344baebfc55ae0df", size = 514933, upload-time = "2026-02-02T12:36:58.909Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/25/69f1120c7c395fd276c3996bb8adefa9c6b84c12bb7111e5c6ccdcd8526d/jiter-0.13.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:775e10de3849d0631a97c603f996f518159272db00fdda0a780f81752255ee9d", size = 548842, upload-time = "2026-02-02T12:37:00.433Z" },
+ { url = "https://files.pythonhosted.org/packages/18/05/981c9669d86850c5fbb0d9e62bba144787f9fba84546ba43d624ee27ef29/jiter-0.13.0-cp314-cp314-win32.whl", hash = "sha256:632bf7c1d28421c00dd8bbb8a3bac5663e1f57d5cd5ed962bce3c73bf62608e6", size = 202108, upload-time = "2026-02-02T12:37:01.718Z" },
+ { url = "https://files.pythonhosted.org/packages/8d/96/cdcf54dd0b0341db7d25413229888a346c7130bd20820530905fdb65727b/jiter-0.13.0-cp314-cp314-win_amd64.whl", hash = "sha256:f22ef501c3f87ede88f23f9b11e608581c14f04db59b6a801f354397ae13739f", size = 204027, upload-time = "2026-02-02T12:37:03.075Z" },
+ { url = "https://files.pythonhosted.org/packages/fb/f9/724bcaaab7a3cd727031fe4f6995cb86c4bd344909177c186699c8dec51a/jiter-0.13.0-cp314-cp314-win_arm64.whl", hash = "sha256:07b75fe09a4ee8e0c606200622e571e44943f47254f95e2436c8bdcaceb36d7d", size = 187199, upload-time = "2026-02-02T12:37:04.414Z" },
+ { url = "https://files.pythonhosted.org/packages/62/92/1661d8b9fd6a3d7a2d89831db26fe3c1509a287d83ad7838831c7b7a5c7e/jiter-0.13.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:964538479359059a35fb400e769295d4b315ae61e4105396d355a12f7fef09f0", size = 318423, upload-time = "2026-02-02T12:37:05.806Z" },
+ { url = "https://files.pythonhosted.org/packages/4f/3b/f77d342a54d4ebcd128e520fc58ec2f5b30a423b0fd26acdfc0c6fef8e26/jiter-0.13.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e104da1db1c0991b3eaed391ccd650ae8d947eab1480c733e5a3fb28d4313e40", size = 351438, upload-time = "2026-02-02T12:37:07.189Z" },
+ { url = "https://files.pythonhosted.org/packages/76/b3/ba9a69f0e4209bd3331470c723c2f5509e6f0482e416b612431a5061ed71/jiter-0.13.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0e3a5f0cde8ff433b8e88e41aa40131455420fb3649a3c7abdda6145f8cb7202", size = 364774, upload-time = "2026-02-02T12:37:08.579Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/16/6cdb31fa342932602458dbb631bfbd47f601e03d2e4950740e0b2100b570/jiter-0.13.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:57aab48f40be1db920a582b30b116fe2435d184f77f0e4226f546794cedd9cf0", size = 487238, upload-time = "2026-02-02T12:37:10.066Z" },
+ { url = "https://files.pythonhosted.org/packages/ed/b1/956cc7abaca8d95c13aa8d6c9b3f3797241c246cd6e792934cc4c8b250d2/jiter-0.13.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7772115877c53f62beeb8fd853cab692dbc04374ef623b30f997959a4c0e7e95", size = 372892, upload-time = "2026-02-02T12:37:11.656Z" },
+ { url = "https://files.pythonhosted.org/packages/26/c4/97ecde8b1e74f67b8598c57c6fccf6df86ea7861ed29da84629cdbba76c4/jiter-0.13.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1211427574b17b633cfceba5040de8081e5abf114f7a7602f73d2e16f9fdaa59", size = 360309, upload-time = "2026-02-02T12:37:13.244Z" },
+ { url = "https://files.pythonhosted.org/packages/4b/d7/eabe3cf46715854ccc80be2cd78dd4c36aedeb30751dbf85a1d08c14373c/jiter-0.13.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7beae3a3d3b5212d3a55d2961db3c292e02e302feb43fce6a3f7a31b90ea6dfe", size = 389607, upload-time = "2026-02-02T12:37:14.881Z" },
+ { url = "https://files.pythonhosted.org/packages/df/2d/03963fc0804e6109b82decfb9974eb92df3797fe7222428cae12f8ccaa0c/jiter-0.13.0-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:e5562a0f0e90a6223b704163ea28e831bd3a9faa3512a711f031611e6b06c939", size = 514986, upload-time = "2026-02-02T12:37:16.326Z" },
+ { url = "https://files.pythonhosted.org/packages/f6/6c/8c83b45eb3eb1c1e18d841fe30b4b5bc5619d781267ca9bc03e005d8fd0a/jiter-0.13.0-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:6c26a424569a59140fb51160a56df13f438a2b0967365e987889186d5fc2f6f9", size = 548756, upload-time = "2026-02-02T12:37:17.736Z" },
+ { url = "https://files.pythonhosted.org/packages/47/66/eea81dfff765ed66c68fd2ed8c96245109e13c896c2a5015c7839c92367e/jiter-0.13.0-cp314-cp314t-win32.whl", hash = "sha256:24dc96eca9f84da4131cdf87a95e6ce36765c3b156fc9ae33280873b1c32d5f6", size = 201196, upload-time = "2026-02-02T12:37:19.101Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/32/4ac9c7a76402f8f00d00842a7f6b83b284d0cf7c1e9d4227bc95aa6d17fa/jiter-0.13.0-cp314-cp314t-win_amd64.whl", hash = "sha256:0a8d76c7524087272c8ae913f5d9d608bd839154b62c4322ef65723d2e5bb0b8", size = 204215, upload-time = "2026-02-02T12:37:20.495Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/8e/7def204fea9f9be8b3c21a6f2dd6c020cf56c7d5ff753e0e23ed7f9ea57e/jiter-0.13.0-cp314-cp314t-win_arm64.whl", hash = "sha256:2c26cf47e2cad140fa23b6d58d435a7c0161f5c514284802f25e87fddfe11024", size = 187152, upload-time = "2026-02-02T12:37:22.124Z" },
+ { url = "https://files.pythonhosted.org/packages/41/95/8e6611379c9ce8534ff94dd800c50d6d0061b2c9ae6386fbcd86c7386f0a/jiter-0.13.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:4397ee562b9f69d283e5674445551b47a5e8076fdde75e71bfac5891113dc543", size = 313635, upload-time = "2026-02-02T12:37:23.545Z" },
+ { url = "https://files.pythonhosted.org/packages/70/ea/17db64dcaf84bbb187874232222030ea4d689e6008f93bda6e7c691bc4c7/jiter-0.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f90023f8f672e13ea1819507d2d21b9d2d1c18920a3b3a5f1541955a85b5504", size = 309761, upload-time = "2026-02-02T12:37:25.075Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/36/b2e2a7b12b94ecc7248acf2a8fe6288be893d1ebb9728655ceada22f00ad/jiter-0.13.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed0240dd1536a98c3ab55e929c60dfff7c899fecafcb7d01161b21a99fc8c363", size = 355245, upload-time = "2026-02-02T12:37:26.646Z" },
+ { url = "https://files.pythonhosted.org/packages/77/3f/5b159663c5be622daec20074c997bb66bc1fac63c167c02aef3df476fb32/jiter-0.13.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6207fc61c395b26fffdcf637a0b06b4326f35bfa93c6e92fe1a166a21aeb6731", size = 365842, upload-time = "2026-02-02T12:37:28.207Z" },
+ { url = "https://files.pythonhosted.org/packages/98/30/76a68fa2c9c815c6b7802a92fc354080d66ffba9acc4690fd85622f77ad4/jiter-0.13.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00203f47c214156df427b5989de74cb340c65c8180d09be1bf9de81d0abad599", size = 489223, upload-time = "2026-02-02T12:37:29.571Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/39/7c5cb85ccd71241513c878054c26a55828ccded6567d931a23ea4be73787/jiter-0.13.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c26ad6967c9dcedf10c995a21539c3aa57d4abad7001b7a84f621a263a6b605", size = 375762, upload-time = "2026-02-02T12:37:31.186Z" },
+ { url = "https://files.pythonhosted.org/packages/a8/6a/381cd18d050b0102e60324e8d3f51f37ef02c56e9f4e5f0b7d26ba18958d/jiter-0.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a576f5dce9ac7de5d350b8e2f552cf364f32975ed84717c35379a51c7cb198bd", size = 364996, upload-time = "2026-02-02T12:37:32.931Z" },
+ { url = "https://files.pythonhosted.org/packages/37/1e/d66310f1f7085c13ea6f1119c9566ec5d2cfd1dc90df963118a6869247bb/jiter-0.13.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b22945be8425d161f2e536cdae66da300b6b000f1c0ba3ddf237d1bfd45d21b8", size = 395463, upload-time = "2026-02-02T12:37:34.446Z" },
+ { url = "https://files.pythonhosted.org/packages/c0/ab/06ae77cb293f860b152c356c635c15aaa800ce48772865a41704d9fac80d/jiter-0.13.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6eeb7db8bc77dc20476bc2f7407a23dbe3d46d9cc664b166e3d474e1c1de4baa", size = 520944, upload-time = "2026-02-02T12:37:35.987Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/8e/57b49b20361c42a80d455a6d83cb38626204508cab4298d6a22880205319/jiter-0.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:19cd6f85e1dc090277c3ce90a5b7d96f32127681d825e71c9dce28788e39fc0c", size = 554955, upload-time = "2026-02-02T12:37:37.656Z" },
+ { url = "https://files.pythonhosted.org/packages/79/dd/113489973c3b4256e383321aea11bd57389e401912fa48eb145a99b38767/jiter-0.13.0-cp39-cp39-win32.whl", hash = "sha256:dc3ce84cfd4fa9628fe62c4f85d0d597a4627d4242cfafac32a12cc1455d00f7", size = 206876, upload-time = "2026-02-02T12:37:39.225Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/73/2bdfc7133c5ee0c8f18cfe4a7582f3cfbbf3ff672cec1b5f4ca67ff9d041/jiter-0.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:9ffda299e417dc83362963966c50cb76d42da673ee140de8a8ac762d4bb2378b", size = 206404, upload-time = "2026-02-02T12:37:40.632Z" },
+ { url = "https://files.pythonhosted.org/packages/79/b3/3c29819a27178d0e461a8571fb63c6ae38be6dc36b78b3ec2876bbd6a910/jiter-0.13.0-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b1cbfa133241d0e6bdab48dcdc2604e8ba81512f6bbd68ec3e8e1357dd3c316c", size = 307016, upload-time = "2026-02-02T12:37:42.755Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/ae/60993e4b07b1ac5ebe46da7aa99fdbb802eb986c38d26e3883ac0125c4e0/jiter-0.13.0-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:db367d8be9fad6e8ebbac4a7578b7af562e506211036cba2c06c3b998603c3d2", size = 305024, upload-time = "2026-02-02T12:37:44.774Z" },
+ { url = "https://files.pythonhosted.org/packages/77/fa/2227e590e9cf98803db2811f172b2d6460a21539ab73006f251c66f44b14/jiter-0.13.0-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45f6f8efb2f3b0603092401dc2df79fa89ccbc027aaba4174d2d4133ed661434", size = 339337, upload-time = "2026-02-02T12:37:46.668Z" },
+ { url = "https://files.pythonhosted.org/packages/2d/92/015173281f7eb96c0ef580c997da8ef50870d4f7f4c9e03c845a1d62ae04/jiter-0.13.0-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:597245258e6ad085d064780abfb23a284d418d3e61c57362d9449c6c7317ee2d", size = 346395, upload-time = "2026-02-02T12:37:48.09Z" },
+ { url = "https://files.pythonhosted.org/packages/80/60/e50fa45dd7e2eae049f0ce964663849e897300433921198aef94b6ffa23a/jiter-0.13.0-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:3d744a6061afba08dd7ae375dcde870cffb14429b7477e10f67e9e6d68772a0a", size = 305169, upload-time = "2026-02-02T12:37:50.376Z" },
+ { url = "https://files.pythonhosted.org/packages/d2/73/a009f41c5eed71c49bec53036c4b33555afcdee70682a18c6f66e396c039/jiter-0.13.0-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:ff732bd0a0e778f43d5009840f20b935e79087b4dc65bd36f1cd0f9b04b8ff7f", size = 303808, upload-time = "2026-02-02T12:37:52.092Z" },
+ { url = "https://files.pythonhosted.org/packages/c4/10/528b439290763bff3d939268085d03382471b442f212dca4ff5f12802d43/jiter-0.13.0-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab44b178f7981fcaea7e0a5df20e773c663d06ffda0198f1a524e91b2fde7e59", size = 337384, upload-time = "2026-02-02T12:37:53.582Z" },
+ { url = "https://files.pythonhosted.org/packages/67/8a/a342b2f0251f3dac4ca17618265d93bf244a2a4d089126e81e4c1056ac50/jiter-0.13.0-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bb00b6d26db67a05fe3e12c76edc75f32077fb51deed13822dc648fa373bc19", size = 343768, upload-time = "2026-02-02T12:37:55.055Z" },
]
[[package]]
@@ -1121,11 +1117,11 @@ wheels = [
[[package]]
name = "nodeenv"
-version = "1.9.1"
+version = "1.10.0"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/43/16/fc88b08840de0e0a72a2f9d8c6bae36be573e475a6326ae854bcc549fc45/nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f", size = 47437, upload-time = "2024-06-04T18:44:11.171Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/24/bf/d1bda4f6168e0b2e9e5958945e01910052158313224ada5ce1fb2e1113b8/nodeenv-1.10.0.tar.gz", hash = "sha256:996c191ad80897d076bdfba80a41994c2b47c68e224c542b48feba42ba00f8bb", size = 55611, upload-time = "2025-12-20T14:08:54.006Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/d2/1d/1b658dbd2b9fa9c4c9f32accbfc0205d532c8c6194dc0f2a4c0428e7128a/nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9", size = 22314, upload-time = "2024-06-04T18:44:08.352Z" },
+ { url = "https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl", hash = "sha256:5bb13e3eed2923615535339b3c620e76779af4cb4c6a90deccc9e36b274d3827", size = 23438, upload-time = "2025-12-20T14:08:52.782Z" },
]
[[package]]
@@ -1139,11 +1135,11 @@ wheels = [
[[package]]
name = "pathspec"
-version = "0.12.1"
+version = "1.0.3"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043, upload-time = "2023-12-10T22:30:45Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/4c/b2/bb8e495d5262bfec41ab5cb18f522f1012933347fb5d9e62452d446baca2/pathspec-1.0.3.tar.gz", hash = "sha256:bac5cf97ae2c2876e2d25ebb15078eb04d76e4b98921ee31c6f85ade8b59444d", size = 130841, upload-time = "2026-01-09T15:46:46.009Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" },
+ { url = "https://files.pythonhosted.org/packages/32/2b/121e912bd60eebd623f873fd090de0e84f322972ab25a7f9044c056804ed/pathspec-1.0.3-py3-none-any.whl", hash = "sha256:e80767021c1cc524aa3fb14bedda9c34406591343cc42797b386ce7b9354fb6c", size = 55021, upload-time = "2026-01-09T15:46:44.652Z" },
]
[[package]]
@@ -1288,14 +1284,32 @@ wheels = [
name = "pycparser"
version = "2.23"
source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version < '3.10'",
+]
sdist = { url = "https://files.pythonhosted.org/packages/fe/cf/d2d3b9f5699fb1e4615c8e32ff220203e43b248e1dfcc6736ad9057731ca/pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2", size = 173734, upload-time = "2025-09-09T13:23:47.91Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140, upload-time = "2025-09-09T13:23:46.651Z" },
]
+[[package]]
+name = "pycparser"
+version = "3.0"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.14' and extra != 'group-12-dedalus-labs-pydantic-v1' and extra == 'group-12-dedalus-labs-pydantic-v2'",
+ "python_full_version >= '3.10' and python_full_version < '3.14' and extra != 'group-12-dedalus-labs-pydantic-v1' and extra == 'group-12-dedalus-labs-pydantic-v2'",
+ "python_full_version >= '3.10' and extra == 'group-12-dedalus-labs-pydantic-v1' and extra != 'group-12-dedalus-labs-pydantic-v2'",
+ "python_full_version >= '3.10' and extra != 'group-12-dedalus-labs-pydantic-v1' and extra != 'group-12-dedalus-labs-pydantic-v2'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/1b/7d/92392ff7815c21062bea51aa7b87d45576f649f16458d78b7cf94b9ab2e6/pycparser-3.0.tar.gz", hash = "sha256:600f49d217304a5902ac3c37e1281c9fe94e4d0489de643a9504c5cdfdfc6b29", size = 103492, upload-time = "2026-01-21T14:26:51.89Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/0c/c3/44f3fbbfa403ea2a7c779186dc20772604442dde72947e7d01069cbe98e3/pycparser-3.0-py3-none-any.whl", hash = "sha256:b727414169a36b7d524c1c3e31839a521725078d7b2ff038656844266160a992", size = 48172, upload-time = "2026-01-21T14:26:50.693Z" },
+]
+
[[package]]
name = "pydantic"
-version = "1.10.24"
+version = "1.10.26"
source = { registry = "https://pypi.org/simple" }
resolution-markers = [
"python_full_version >= '3.10'",
@@ -1304,44 +1318,39 @@ resolution-markers = [
dependencies = [
{ name = "typing-extensions", marker = "extra == 'group-12-dedalus-labs-pydantic-v1'" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/ae/8d/7b346ed940c3e0f9eee7db9be37915a6dac0d9535d736e2ca47a81a066f3/pydantic-1.10.24.tar.gz", hash = "sha256:7e6d1af1bd3d2312079f28c9baf2aafb4a452a06b50717526e5ac562e37baa53", size = 357314, upload-time = "2025-09-25T01:36:33.065Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/7b/da/fd89f987a376c807cd81ea0eff4589aade783bbb702637b4734ef2c743a2/pydantic-1.10.26.tar.gz", hash = "sha256:8c6aa39b494c5af092e690127c283d84f363ac36017106a9e66cb33a22ac412e", size = 357906, upload-time = "2025-12-18T15:47:46.557Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/f9/6e/71eb0c860bf888e73243fbc22be79c47e68180b65b33036efb5a1f1085de/pydantic-1.10.24-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eef07ea2fba12f9188cfa2c50cb3eaa6516b56c33e2a8cc3cd288b4190ee6c0c", size = 2494239, upload-time = "2025-09-25T01:35:02.451Z" },
- { url = "https://files.pythonhosted.org/packages/fd/1f/a2c09049c2ec33b88b111aa99e4bbfe9e821914dcf2ce662e00fa1423fa8/pydantic-1.10.24-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5a42033fac69b9f1f867ecc3a2159f0e94dceb1abfc509ad57e9e88d49774683", size = 2302370, upload-time = "2025-09-25T01:35:05.386Z" },
- { url = "https://files.pythonhosted.org/packages/70/73/fde9af3a76cc5714880828eee50c0f7f1b263d2c77a74c65ba19325b4706/pydantic-1.10.24-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c626596c1b95dc6d45f7129f10b6743fbb50f29d942d25a22b2ceead670c067d", size = 2960499, upload-time = "2025-09-25T01:35:07.243Z" },
- { url = "https://files.pythonhosted.org/packages/1d/d5/9fd98afa478020e9bad54a9ec6e42ba71f8a1a7f6df4d12ce5be76b0a96a/pydantic-1.10.24-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8057172868b0d98f95e6fcddcc5f75d01570e85c6308702dd2c50ea673bc197b", size = 3031125, upload-time = "2025-09-25T01:35:09.048Z" },
- { url = "https://files.pythonhosted.org/packages/1c/99/2fc6df8644c096dc6e3347e1793868a758df874eaf5ba52ca8b5a80e42d8/pydantic-1.10.24-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:82f951210ebcdb778b1d93075af43adcd04e9ebfd4f44b1baa8eeb21fbd71e36", size = 3099888, upload-time = "2025-09-25T01:35:10.894Z" },
- { url = "https://files.pythonhosted.org/packages/71/71/2f4c115951140f525136089da491b0bb4b7d24de8d697913afedde3f326c/pydantic-1.10.24-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b66e4892d8ae005f436a5c5f1519ecf837574d8414b1c93860fb3c13943d9b37", size = 3038385, upload-time = "2025-09-25T01:35:12.744Z" },
- { url = "https://files.pythonhosted.org/packages/ec/6d/d3893a9b8479a0ea9357ba4a1eb84a5776a80705b5409bda4ad9e7ca0804/pydantic-1.10.24-cp310-cp310-win_amd64.whl", hash = "sha256:50d9f8a207c07f347d4b34806dc576872000d9a60fd481ed9eb78ea8512e0666", size = 2093504, upload-time = "2025-09-25T01:35:14.439Z" },
- { url = "https://files.pythonhosted.org/packages/bd/b5/1b49b94e99ae4cad5f034c4b33e9ab481e53238fb55b59ffed5c6e6ee4cf/pydantic-1.10.24-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:70152291488f8d2bbcf2027b5c28c27724c78a7949c91b466d28ad75d6d12702", size = 2526778, upload-time = "2025-09-25T01:35:16.448Z" },
- { url = "https://files.pythonhosted.org/packages/87/d8/63fb1850ca93511b324d709f1c5bd31131039f9b93d0bc2ae210285db6d1/pydantic-1.10.24-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:956b30638272c51c85caaff76851b60db4b339022c0ee6eca677c41e3646255b", size = 2307760, upload-time = "2025-09-25T01:35:18.234Z" },
- { url = "https://files.pythonhosted.org/packages/2a/b8/428453ce573b8898afaf39a5ce32f7dbacf54f8aad9ce9c0abf19a1cdb2c/pydantic-1.10.24-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bed9d6eea5fabbc6978c42e947190c7bd628ddaff3b56fc963fe696c3710ccd6", size = 2902586, upload-time = "2025-09-25T01:35:20.118Z" },
- { url = "https://files.pythonhosted.org/packages/96/e0/68b5eb3c26b5e7136a8946f00f6d2eb8ef2fde530fcf6b491c66e3989d0d/pydantic-1.10.24-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:af8e2b3648128b8cadb1a71e2f8092a6f42d4ca123fad7a8d7ce6db8938b1db3", size = 2976378, upload-time = "2025-09-25T01:35:22.077Z" },
- { url = "https://files.pythonhosted.org/packages/93/8c/2c6c46b7bc265ba35bad019c63f77d9ef44fabc026353768d7e6ea16dd51/pydantic-1.10.24-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:076fff9da02ca716e4c8299c68512fdfbeac32fdefc9c160e6f80bdadca0993d", size = 3063515, upload-time = "2025-09-25T01:35:24.048Z" },
- { url = "https://files.pythonhosted.org/packages/84/be/a051e26eff43b6af69f968c1085cdf9069628a7c3614a9836d3ce71327e4/pydantic-1.10.24-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8f2447ca88a7e14fd4d268857521fb37535c53a367b594fa2d7c2551af905993", size = 2988590, upload-time = "2025-09-25T01:35:25.794Z" },
- { url = "https://files.pythonhosted.org/packages/da/d8/f1aca10d538a0f18d2c99f7e84d3bb5c4abb6bd499272d6c4fc21f39af30/pydantic-1.10.24-cp311-cp311-win_amd64.whl", hash = "sha256:58d42a7c344882c00e3bb7c6c8c6f62db2e3aafa671f307271c45ad96e8ccf7a", size = 2096524, upload-time = "2025-09-25T01:35:27.367Z" },
- { url = "https://files.pythonhosted.org/packages/79/4b/73b59168d0babc14fb40b56795bde269d15709ef33de888e12e4f0add5ea/pydantic-1.10.24-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:17e7610119483f03954569c18d4de16f4e92f1585f20975414033ac2d4a96624", size = 2533707, upload-time = "2025-09-25T01:35:28.953Z" },
- { url = "https://files.pythonhosted.org/packages/6d/36/18e6f421a23ddceecfc5d3800d0e86af05e85574aa9e88cc9e29222db066/pydantic-1.10.24-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e24435a9970dcb2b35648f2cf57505d4bd414fcca1a404c82e28d948183fe0a6", size = 2322935, upload-time = "2025-09-25T01:35:30.838Z" },
- { url = "https://files.pythonhosted.org/packages/05/14/4d3fc4bea30add2f2f3c287e931b276f7e304bcb322fe5b2c05a76ccdee7/pydantic-1.10.24-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a9e92b9c78d7f3cfa085c21c110e7000894446e24a836d006aabfc6ae3f1813", size = 2779568, upload-time = "2025-09-25T01:35:32.309Z" },
- { url = "https://files.pythonhosted.org/packages/15/32/5349a7b6675d4384f07f9d461d8230de877b2b913529aa20e659c84bab07/pydantic-1.10.24-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef14dfa7c98b314a3e449e92df6f1479cafe74c626952f353ff0176b075070de", size = 2829163, upload-time = "2025-09-25T01:35:34.294Z" },
- { url = "https://files.pythonhosted.org/packages/f0/00/4d93755e279e8975f7f33adc0af0e9d9aa0db58bcd9c807227d65b396311/pydantic-1.10.24-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52219b4e70c1db185cfd103a804e416384e1c8950168a2d4f385664c7c35d21a", size = 2912003, upload-time = "2025-09-25T01:35:35.935Z" },
- { url = "https://files.pythonhosted.org/packages/db/0c/c839c2a9cf14185c7b5dcc0959d3c3d4a00da400fe02565abf04a7dff6e0/pydantic-1.10.24-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5ce0986799248082e9a5a026c9b5d2f9fa2e24d2afb9b0eace9104334a58fdc1", size = 2859825, upload-time = "2025-09-25T01:35:37.657Z" },
- { url = "https://files.pythonhosted.org/packages/cb/0c/f0b8e35033322c176574d6f431455c8d6e3f63811a2c5a00c96b2b97a393/pydantic-1.10.24-cp312-cp312-win_amd64.whl", hash = "sha256:874a78e4ed821258295a472e325eee7de3d91ba7a61d0639ce1b0367a3c63d4c", size = 1969911, upload-time = "2025-09-25T01:35:39.479Z" },
- { url = "https://files.pythonhosted.org/packages/bd/56/9168c282af2bb8bdb102981a9ff0ed41ab4d3735a52b732b2d2ad0e14018/pydantic-1.10.24-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:265788a1120285c4955f8b3d52b3ea6a52c7a74db097c4c13a4d3567f0c6df3c", size = 2589497, upload-time = "2025-09-25T01:35:41.141Z" },
- { url = "https://files.pythonhosted.org/packages/9c/eb/eb4b0e2988a2889a1905c3196f859509e62c208830889d2382928b92fdd2/pydantic-1.10.24-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d255bebd927e5f1e026b32605684f7b6fc36a13e62b07cb97b29027b91657def", size = 2351231, upload-time = "2025-09-25T01:35:43.221Z" },
- { url = "https://files.pythonhosted.org/packages/1e/be/7451b633ffdc2d28de582a339af2275c3ffcca789dda97d8ac9133f0c616/pydantic-1.10.24-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d6e45dbc79a44e34c2c83ef1fcb56ff663040474dcf4dfc452db24a1de0f7574", size = 2762972, upload-time = "2025-09-25T01:35:45.304Z" },
- { url = "https://files.pythonhosted.org/packages/9e/fb/5de3cfde0b808f2fa0538ec1f1c186f44d905ecbcc96ba22e2cac1f30b23/pydantic-1.10.24-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:af31565b12a7db5bfa5fe8c3a4f8fda4d32f5c2929998b1b241f1c22e9ab6e69", size = 2801015, upload-time = "2025-09-25T01:35:46.774Z" },
- { url = "https://files.pythonhosted.org/packages/2f/6a/9b6b51d19d1af57e8864caff08ce5e8554388b91dc41987ce49315bce3e1/pydantic-1.10.24-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:9c377fc30d9ca40dbff5fd79c5a5e1f0d6fff040fa47a18851bb6b0bd040a5d8", size = 2890844, upload-time = "2025-09-25T01:35:48.724Z" },
- { url = "https://files.pythonhosted.org/packages/27/ca/1ab6b16bd792c8a1fb54949d8b5eef8032d672932ca4afc3048e4febfcdc/pydantic-1.10.24-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b644d6f14b2ce617d6def21622f9ba73961a16b7dffdba7f6692e2f66fa05d00", size = 2850844, upload-time = "2025-09-25T01:35:50.279Z" },
- { url = "https://files.pythonhosted.org/packages/86/5f/fcc5635818113858a6b37099fed6b860a15b27bb1d0fb270ceb50d0a91b6/pydantic-1.10.24-cp313-cp313-win_amd64.whl", hash = "sha256:0cbbf306124ae41cc153fdc2559b37faa1bec9a23ef7b082c1756d1315ceffe6", size = 1971713, upload-time = "2025-09-25T01:35:52.027Z" },
- { url = "https://files.pythonhosted.org/packages/a9/29/62dd3ffcf7d003f53e834942e9651c2ddd9dc6fb59e6619317e0ed37cf6b/pydantic-1.10.24-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:25fb9a69a21d711deb5acefdab9ff8fb49e6cc77fdd46d38217d433bff2e3de2", size = 2504290, upload-time = "2025-09-25T01:36:16.661Z" },
- { url = "https://files.pythonhosted.org/packages/f2/83/ef9c4be8e7fc96f52320296aed34f7cbe50fa0219833cc2756e611b644f2/pydantic-1.10.24-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6af36a8fb3072526b5b38d3f341b12d8f423188e7d185f130c0079fe02cdec7f", size = 2311007, upload-time = "2025-09-25T01:36:18.75Z" },
- { url = "https://files.pythonhosted.org/packages/1c/b7/ec7da8fbaac8c8100b05301a81fac6b2b7446961edb91bbef4b564834abf/pydantic-1.10.24-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fc35569dfd15d3b3fc06a22abee0a45fdde0784be644e650a8769cd0b2abd94", size = 2968514, upload-time = "2025-09-25T01:36:20.511Z" },
- { url = "https://files.pythonhosted.org/packages/49/84/9e218a35008fbc32dac2974a35a4bd88d7deb0f5b572cf46ccf003a06310/pydantic-1.10.24-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fac7fbcb65171959973f3136d0792c3d1668bc01fd414738f0898b01f692f1b4", size = 3039539, upload-time = "2025-09-25T01:36:24.359Z" },
- { url = "https://files.pythonhosted.org/packages/b0/2f/b13a8c2d641e3af3fbba136202a9808025ee7cde4b1326ce1aabd1c79d51/pydantic-1.10.24-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:fc3f4a6544517380658b63b144c7d43d5276a343012913b7e5d18d9fba2f12bb", size = 3108949, upload-time = "2025-09-25T01:36:26.138Z" },
- { url = "https://files.pythonhosted.org/packages/1f/57/dccbf080b35b9797f4d477f4c59935e39e4493cd507f31b5ca5ee49c930d/pydantic-1.10.24-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:415c638ca5fd57b915a62dd38c18c8e0afe5adf5527be6f8ce16b4636b616816", size = 3049395, upload-time = "2025-09-25T01:36:27.782Z" },
- { url = "https://files.pythonhosted.org/packages/a2/ff/2a25855a1495fcbe1d3b8c782276994575e98ce2218dbf57c1f2eee7c894/pydantic-1.10.24-cp39-cp39-win_amd64.whl", hash = "sha256:a5bf94042efbc6ab56b18a5921f426ebbeefc04f554a911d76029e7be9057d01", size = 2100530, upload-time = "2025-09-25T01:36:29.932Z" },
- { url = "https://files.pythonhosted.org/packages/46/7f/a168d7077f85f85128aa5636abf13c804c06235c786f1881e659703899a4/pydantic-1.10.24-py3-none-any.whl", hash = "sha256:093768eba26db55a88b12f3073017e3fdee319ef60d3aef5c6c04a4e484db193", size = 166727, upload-time = "2025-09-25T01:36:31.732Z" },
+ { url = "https://files.pythonhosted.org/packages/71/08/2587a6d4314e7539eec84acd062cb7b037638edb57a0335d20e4c5b8878c/pydantic-1.10.26-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f7ae36fa0ecef8d39884120f212e16c06bb096a38f523421278e2f39c1784546", size = 2444588, upload-time = "2025-12-18T15:46:28.882Z" },
+ { url = "https://files.pythonhosted.org/packages/47/e6/10df5f08c105bcbb4adbee7d1108ff4b347702b110fed058f6a03f1c6b73/pydantic-1.10.26-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d95a76cf503f0f72ed7812a91de948440b2bf564269975738a4751e4fadeb572", size = 2255972, upload-time = "2025-12-18T15:46:31.72Z" },
+ { url = "https://files.pythonhosted.org/packages/ba/7d/fdb961e7adc2c31f394feba6f560ef2c74c446f0285e2c2eb87d2b7206c7/pydantic-1.10.26-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a943ce8e00ad708ed06a1d9df5b4fd28f5635a003b82a4908ece6f24c0b18464", size = 2857175, upload-time = "2025-12-18T15:46:34Z" },
+ { url = "https://files.pythonhosted.org/packages/8f/6c/f21e27dda475d4c562bd01b5874284dd3180f336c1e669413b743ca8b278/pydantic-1.10.26-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:465ad8edb29b15c10b779b16431fe8e77c380098badf6db367b7a1d3e572cf53", size = 2947001, upload-time = "2025-12-18T15:46:35.922Z" },
+ { url = "https://files.pythonhosted.org/packages/6d/f6/27ea206232cbb6ec24dc4e4e8888a9a734f96a1eaf13504be4b30ef26aa7/pydantic-1.10.26-cp310-cp310-win_amd64.whl", hash = "sha256:80e6be6272839c8a7641d26ad569ab77772809dd78f91d0068dc0fc97f071945", size = 2066217, upload-time = "2025-12-18T15:46:37.614Z" },
+ { url = "https://files.pythonhosted.org/packages/1d/c1/d521e64c8130e1ad9d22c270bed3fabcc0940c9539b076b639c88fd32a8d/pydantic-1.10.26-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:116233e53889bcc536f617e38c1b8337d7fa9c280f0fd7a4045947515a785637", size = 2428347, upload-time = "2025-12-18T15:46:39.41Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/08/f4b804a00c16e3ea994cb640a7c25c579b4f1fa674cde6a19fa0dfb0ae4f/pydantic-1.10.26-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c3cfdd361addb6eb64ccd26ac356ad6514cee06a61ab26b27e16b5ed53108f77", size = 2212605, upload-time = "2025-12-18T15:46:41.006Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/78/0df4b9efef29bbc5e39f247fcba99060d15946b4463d82a5589cf7923d71/pydantic-1.10.26-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0e4451951a9a93bf9a90576f3e25240b47ee49ab5236adccb8eff6ac943adf0f", size = 2753560, upload-time = "2025-12-18T15:46:43.215Z" },
+ { url = "https://files.pythonhosted.org/packages/68/66/6ab6c1d3a116d05d2508fce64f96e35242938fac07544d611e11d0d363a0/pydantic-1.10.26-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9858ed44c6bea5f29ffe95308db9e62060791c877766c67dd5f55d072c8612b5", size = 2859235, upload-time = "2025-12-18T15:46:45.112Z" },
+ { url = "https://files.pythonhosted.org/packages/61/4e/f1676bb0fcdf6ed2ce4670d7d1fc1d6c3a06d84497644acfbe02649503f1/pydantic-1.10.26-cp311-cp311-win_amd64.whl", hash = "sha256:ac1089f723e2106ebde434377d31239e00870a7563245072968e5af5cc4d33df", size = 2066646, upload-time = "2025-12-18T15:46:46.816Z" },
+ { url = "https://files.pythonhosted.org/packages/02/6c/cd97a5a776c4515e6ee2ae81c2f2c5be51376dda6c31f965d7746ce0019f/pydantic-1.10.26-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:468d5b9cacfcaadc76ed0a4645354ab6f263ec01a63fb6d05630ea1df6ae453f", size = 2433795, upload-time = "2025-12-18T15:46:49.321Z" },
+ { url = "https://files.pythonhosted.org/packages/47/12/de20affa30dcef728fcf9cc98e13ff4438c7a630de8d2f90eb38eba0891c/pydantic-1.10.26-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2c1b0b914be31671000ca25cf7ea17fcaaa68cfeadf6924529c5c5aa24b7ab1f", size = 2227387, upload-time = "2025-12-18T15:46:50.877Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/1d/9d65dcc5b8c17ba590f1f9f486e9306346831902318b7ee93f63516f4003/pydantic-1.10.26-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:15b13b9f8ba8867095769e1156e0d7fbafa1f65b898dd40fd1c02e34430973cb", size = 2629594, upload-time = "2025-12-18T15:46:53.42Z" },
+ { url = "https://files.pythonhosted.org/packages/3f/76/acb41409356789e23e1a7ef58f93821410c96409183ce314ddb58d97f23e/pydantic-1.10.26-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad7025ca324ae263d4313998e25078dcaec5f9ed0392c06dedb57e053cc8086b", size = 2745305, upload-time = "2025-12-18T15:46:55.987Z" },
+ { url = "https://files.pythonhosted.org/packages/22/72/a98c0c5e527a66057d969fedd61675223c7975ade61acebbca9f1abd6dc0/pydantic-1.10.26-cp312-cp312-win_amd64.whl", hash = "sha256:4482b299874dabb88a6c3759e3d85c6557c407c3b586891f7d808d8a38b66b9c", size = 1937647, upload-time = "2025-12-18T15:46:57.905Z" },
+ { url = "https://files.pythonhosted.org/packages/28/b9/17a5a5a421c23ac27486b977724a42c9d5f8b7f0f4aab054251066223900/pydantic-1.10.26-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1ae7913bb40a96c87e3d3f6fe4e918ef53bf181583de4e71824360a9b11aef1c", size = 2494599, upload-time = "2025-12-18T15:47:00.209Z" },
+ { url = "https://files.pythonhosted.org/packages/e6/8e/6e3bd4241076cf227b443d7577245dd5d181ecf40b3182fcb908bc8c197d/pydantic-1.10.26-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:8154c13f58d4de5d3a856bb6c909c7370f41fb876a5952a503af6b975265f4ba", size = 2254391, upload-time = "2025-12-18T15:47:02.268Z" },
+ { url = "https://files.pythonhosted.org/packages/a8/30/a1c4092eda2145ecbead6c92db489b223e101e1ba0da82576d0cf73dd422/pydantic-1.10.26-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f8af0507bf6118b054a9765fb2e402f18a8b70c964f420d95b525eb711122d62", size = 2609445, upload-time = "2025-12-18T15:47:04.909Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/2a/0491f1729ee4b7b6bc859ec22f69752f0c09bee1b66ac6f5f701136f34c3/pydantic-1.10.26-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:dcb5a7318fb43189fde6af6f21ac7149c4bcbcfffc54bc87b5becddc46084847", size = 2732124, upload-time = "2025-12-18T15:47:07.464Z" },
+ { url = "https://files.pythonhosted.org/packages/2a/56/b59f3b2f84e1df2b04ae768a1bb04d9f0288ff71b67cdcbb07683757b2c0/pydantic-1.10.26-cp313-cp313-win_amd64.whl", hash = "sha256:71cde228bc0600cf8619f0ee62db050d1880dcc477eba0e90b23011b4ee0f314", size = 1939888, upload-time = "2025-12-18T15:47:09.618Z" },
+ { url = "https://files.pythonhosted.org/packages/d2/8b/0c3dc02d4b97790b0f199bf933f677c14e7be4a8d21307c5f2daae06aa41/pydantic-1.10.26-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:6b40730cc81d53d515dc0b8bb5c9b43fadb9bed46de4a3c03bd95e8571616dba", size = 2502689, upload-time = "2025-12-18T15:47:12.308Z" },
+ { url = "https://files.pythonhosted.org/packages/d4/9d/d31aeea45542b2ae4b09ecba92b88aaba696b801c31919811aa979a1242d/pydantic-1.10.26-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c3bbb9c0eecdf599e4db9b372fa9cc55be12e80a0d9c6d307950a39050cb0e37", size = 2269494, upload-time = "2025-12-18T15:47:14.53Z" },
+ { url = "https://files.pythonhosted.org/packages/78/c1/3a4d069593283ca4dd0006039ba33644e21e432cddc09da706ac50441610/pydantic-1.10.26-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cc2e3fe7bc4993626ef6b6fa855defafa1d6f8996aa1caef2deb83c5ac4d043a", size = 2620047, upload-time = "2025-12-18T15:47:17.089Z" },
+ { url = "https://files.pythonhosted.org/packages/e0/0e/340c3d29197d99c15ab04093d43bb9c9d0fd17c2a34b80cb9d36ed732b09/pydantic-1.10.26-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:36d9e46b588aaeb1dcd2409fa4c467fe0b331f3cc9f227b03a7a00643704e962", size = 2747625, upload-time = "2025-12-18T15:47:19.21Z" },
+ { url = "https://files.pythonhosted.org/packages/1e/58/f12ab3727339b172c830b32151919456b67787cdfe8808b2568b322fb15c/pydantic-1.10.26-cp314-cp314-win_amd64.whl", hash = "sha256:81ce3c8616d12a7be31b4aadfd3434f78f6b44b75adbfaec2fe1ad4f7f999b8c", size = 1976436, upload-time = "2025-12-18T15:47:21.384Z" },
+ { url = "https://files.pythonhosted.org/packages/e1/8a/3a5a6267d5f03617b5c0f1985aa9fdfbafd33a50ef6dadd866a15ed4d123/pydantic-1.10.26-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:502b9d30d18a2dfaf81b7302f6ba0e5853474b1c96212449eb4db912cb604b7d", size = 2457039, upload-time = "2025-12-18T15:47:34.584Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/fa/343ac0db26918a033ac6256c036d72c3b6eb1196b7de622e2e8a94b19079/pydantic-1.10.26-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0d8f6087bf697dec3bf7ffcd7fe8362674f16519f3151789f33cbe8f1d19fc15", size = 2266441, upload-time = "2025-12-18T15:47:36.807Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/36/1ab48136578608dba2f2a62e452f3db2083b474d4e49be5749c6ae0c123c/pydantic-1.10.26-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:dd40a99c358419910c85e6f5d22f9c56684c25b5e7abc40879b3b4a52f34ae90", size = 2869383, upload-time = "2025-12-18T15:47:38.883Z" },
+ { url = "https://files.pythonhosted.org/packages/a2/25/41dbf1bffc31eb242cece8080561a4133eaeb513372dec36a84477a3fb71/pydantic-1.10.26-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ce3293b86ca9f4125df02ff0a70be91bc7946522467cbd98e7f1493f340616ba", size = 2963582, upload-time = "2025-12-18T15:47:40.854Z" },
+ { url = "https://files.pythonhosted.org/packages/61/2f/f072ae160a300c85eb9f059915101fd33dacf12d8df08c2b804acb3b95d1/pydantic-1.10.26-cp39-cp39-win_amd64.whl", hash = "sha256:1a4e3062b71ab1d5df339ba12c48f9ed5817c5de6cb92a961dd5c64bb32e7b96", size = 2075530, upload-time = "2025-12-18T15:47:43.181Z" },
+ { url = "https://files.pythonhosted.org/packages/1f/98/556e82f00b98486def0b8af85da95e69d2be7e367cf2431408e108bc3095/pydantic-1.10.26-py3-none-any.whl", hash = "sha256:c43ad70dc3ce7787543d563792426a16fd7895e14be4b194b5665e36459dd917", size = 166975, upload-time = "2025-12-18T15:47:44.927Z" },
]
[[package]]
@@ -1508,11 +1517,11 @@ wheels = [
[[package]]
name = "pyjwt"
-version = "2.10.1"
+version = "2.11.0"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/e7/46/bd74733ff231675599650d3e47f361794b22ef3e3770998dda30d3b63726/pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953", size = 87785, upload-time = "2024-11-28T03:43:29.933Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/5c/5a/b46fa56bf322901eee5b0454a34343cdbdae202cd421775a8ee4e42fd519/pyjwt-2.11.0.tar.gz", hash = "sha256:35f95c1f0fbe5d5ba6e43f00271c275f7a1a4db1dab27bf708073b75318ea623", size = 98019, upload-time = "2026-01-30T19:59:55.694Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997, upload-time = "2024-11-28T03:43:27.893Z" },
+ { url = "https://files.pythonhosted.org/packages/6f/01/c26ce75ba460d5cd503da9e13b21a33804d38c2165dec7b716d06b13010c/pyjwt-2.11.0-py3-none-any.whl", hash = "sha256:94a6bde30eb5c8e04fee991062b534071fd1439ef58d2adc9ccb823e7bcd0469", size = 28224, upload-time = "2026-01-30T19:59:54.539Z" },
]
[package.optional-dependencies]
@@ -1669,28 +1678,28 @@ wheels = [
[[package]]
name = "ruff"
-version = "0.14.8"
+version = "0.14.13"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/ed/d9/f7a0c4b3a2bf2556cd5d99b05372c29980249ef71e8e32669ba77428c82c/ruff-0.14.8.tar.gz", hash = "sha256:774ed0dd87d6ce925e3b8496feb3a00ac564bea52b9feb551ecd17e0a23d1eed", size = 5765385, upload-time = "2025-12-04T15:06:17.669Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/50/0a/1914efb7903174b381ee2ffeebb4253e729de57f114e63595114c8ca451f/ruff-0.14.13.tar.gz", hash = "sha256:83cd6c0763190784b99650a20fec7633c59f6ebe41c5cc9d45ee42749563ad47", size = 6059504, upload-time = "2026-01-15T20:15:16.918Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/48/b8/9537b52010134b1d2b72870cc3f92d5fb759394094741b09ceccae183fbe/ruff-0.14.8-py3-none-linux_armv6l.whl", hash = "sha256:ec071e9c82eca417f6111fd39f7043acb53cd3fde9b1f95bbed745962e345afb", size = 13441540, upload-time = "2025-12-04T15:06:14.896Z" },
- { url = "https://files.pythonhosted.org/packages/24/00/99031684efb025829713682012b6dd37279b1f695ed1b01725f85fd94b38/ruff-0.14.8-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:8cdb162a7159f4ca36ce980a18c43d8f036966e7f73f866ac8f493b75e0c27e9", size = 13669384, upload-time = "2025-12-04T15:06:51.809Z" },
- { url = "https://files.pythonhosted.org/packages/72/64/3eb5949169fc19c50c04f28ece2c189d3b6edd57e5b533649dae6ca484fe/ruff-0.14.8-py3-none-macosx_11_0_arm64.whl", hash = "sha256:2e2fcbefe91f9fad0916850edf0854530c15bd1926b6b779de47e9ab619ea38f", size = 12806917, upload-time = "2025-12-04T15:06:08.925Z" },
- { url = "https://files.pythonhosted.org/packages/c4/08/5250babb0b1b11910f470370ec0cbc67470231f7cdc033cee57d4976f941/ruff-0.14.8-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9d70721066a296f45786ec31916dc287b44040f553da21564de0ab4d45a869b", size = 13256112, upload-time = "2025-12-04T15:06:23.498Z" },
- { url = "https://files.pythonhosted.org/packages/78/4c/6c588e97a8e8c2d4b522c31a579e1df2b4d003eddfbe23d1f262b1a431ff/ruff-0.14.8-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2c87e09b3cd9d126fc67a9ecd3b5b1d3ded2b9c7fce3f16e315346b9d05cfb52", size = 13227559, upload-time = "2025-12-04T15:06:33.432Z" },
- { url = "https://files.pythonhosted.org/packages/23/ce/5f78cea13eda8eceac71b5f6fa6e9223df9b87bb2c1891c166d1f0dce9f1/ruff-0.14.8-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d62cb310c4fbcb9ee4ac023fe17f984ae1e12b8a4a02e3d21489f9a2a5f730c", size = 13896379, upload-time = "2025-12-04T15:06:02.687Z" },
- { url = "https://files.pythonhosted.org/packages/cf/79/13de4517c4dadce9218a20035b21212a4c180e009507731f0d3b3f5df85a/ruff-0.14.8-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:1af35c2d62633d4da0521178e8a2641c636d2a7153da0bac1b30cfd4ccd91344", size = 15372786, upload-time = "2025-12-04T15:06:29.828Z" },
- { url = "https://files.pythonhosted.org/packages/00/06/33df72b3bb42be8a1c3815fd4fae83fa2945fc725a25d87ba3e42d1cc108/ruff-0.14.8-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:25add4575ffecc53d60eed3f24b1e934493631b48ebbc6ebaf9d8517924aca4b", size = 14990029, upload-time = "2025-12-04T15:06:36.812Z" },
- { url = "https://files.pythonhosted.org/packages/64/61/0f34927bd90925880394de0e081ce1afab66d7b3525336f5771dcf0cb46c/ruff-0.14.8-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4c943d847b7f02f7db4201a0600ea7d244d8a404fbb639b439e987edcf2baf9a", size = 14407037, upload-time = "2025-12-04T15:06:39.979Z" },
- { url = "https://files.pythonhosted.org/packages/96/bc/058fe0aefc0fbf0d19614cb6d1a3e2c048f7dc77ca64957f33b12cfdc5ef/ruff-0.14.8-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb6e8bf7b4f627548daa1b69283dac5a296bfe9ce856703b03130732e20ddfe2", size = 14102390, upload-time = "2025-12-04T15:06:46.372Z" },
- { url = "https://files.pythonhosted.org/packages/af/a4/e4f77b02b804546f4c17e8b37a524c27012dd6ff05855d2243b49a7d3cb9/ruff-0.14.8-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:7aaf2974f378e6b01d1e257c6948207aec6a9b5ba53fab23d0182efb887a0e4a", size = 14230793, upload-time = "2025-12-04T15:06:20.497Z" },
- { url = "https://files.pythonhosted.org/packages/3f/52/bb8c02373f79552e8d087cedaffad76b8892033d2876c2498a2582f09dcf/ruff-0.14.8-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:e5758ca513c43ad8a4ef13f0f081f80f08008f410790f3611a21a92421ab045b", size = 13160039, upload-time = "2025-12-04T15:06:49.06Z" },
- { url = "https://files.pythonhosted.org/packages/1f/ad/b69d6962e477842e25c0b11622548df746290cc6d76f9e0f4ed7456c2c31/ruff-0.14.8-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:f74f7ba163b6e85a8d81a590363bf71618847e5078d90827749bfda1d88c9cdf", size = 13205158, upload-time = "2025-12-04T15:06:54.574Z" },
- { url = "https://files.pythonhosted.org/packages/06/63/54f23da1315c0b3dfc1bc03fbc34e10378918a20c0b0f086418734e57e74/ruff-0.14.8-py3-none-musllinux_1_2_i686.whl", hash = "sha256:eed28f6fafcc9591994c42254f5a5c5ca40e69a30721d2ab18bb0bb3baac3ab6", size = 13469550, upload-time = "2025-12-04T15:05:59.209Z" },
- { url = "https://files.pythonhosted.org/packages/70/7d/a4d7b1961e4903bc37fffb7ddcfaa7beb250f67d97cfd1ee1d5cddb1ec90/ruff-0.14.8-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:21d48fa744c9d1cb8d71eb0a740c4dd02751a5de9db9a730a8ef75ca34cf138e", size = 14211332, upload-time = "2025-12-04T15:06:06.027Z" },
- { url = "https://files.pythonhosted.org/packages/5d/93/2a5063341fa17054e5c86582136e9895db773e3c2ffb770dde50a09f35f0/ruff-0.14.8-py3-none-win32.whl", hash = "sha256:15f04cb45c051159baebb0f0037f404f1dc2f15a927418f29730f411a79bc4e7", size = 13151890, upload-time = "2025-12-04T15:06:11.668Z" },
- { url = "https://files.pythonhosted.org/packages/02/1c/65c61a0859c0add13a3e1cbb6024b42de587456a43006ca2d4fd3d1618fe/ruff-0.14.8-py3-none-win_amd64.whl", hash = "sha256:9eeb0b24242b5bbff3011409a739929f497f3fb5fe3b5698aba5e77e8c833097", size = 14537826, upload-time = "2025-12-04T15:06:26.409Z" },
- { url = "https://files.pythonhosted.org/packages/6d/63/8b41cea3afd7f58eb64ac9251668ee0073789a3bc9ac6f816c8c6fef986d/ruff-0.14.8-py3-none-win_arm64.whl", hash = "sha256:965a582c93c63fe715fd3e3f8aa37c4b776777203d8e1d8aa3cc0c14424a4b99", size = 13634522, upload-time = "2025-12-04T15:06:43.212Z" },
+ { url = "https://files.pythonhosted.org/packages/c3/ae/0deefbc65ca74b0ab1fd3917f94dc3b398233346a74b8bbb0a916a1a6bf6/ruff-0.14.13-py3-none-linux_armv6l.whl", hash = "sha256:76f62c62cd37c276cb03a275b198c7c15bd1d60c989f944db08a8c1c2dbec18b", size = 13062418, upload-time = "2026-01-15T20:14:50.779Z" },
+ { url = "https://files.pythonhosted.org/packages/47/df/5916604faa530a97a3c154c62a81cb6b735c0cb05d1e26d5ad0f0c8ac48a/ruff-0.14.13-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:914a8023ece0528d5cc33f5a684f5f38199bbb566a04815c2c211d8f40b5d0ed", size = 13442344, upload-time = "2026-01-15T20:15:07.94Z" },
+ { url = "https://files.pythonhosted.org/packages/4c/f3/e0e694dd69163c3a1671e102aa574a50357536f18a33375050334d5cd517/ruff-0.14.13-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d24899478c35ebfa730597a4a775d430ad0d5631b8647a3ab368c29b7e7bd063", size = 12354720, upload-time = "2026-01-15T20:15:09.854Z" },
+ { url = "https://files.pythonhosted.org/packages/c3/e8/67f5fcbbaee25e8fc3b56cc33e9892eca7ffe09f773c8e5907757a7e3bdb/ruff-0.14.13-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9aaf3870f14d925bbaf18b8a2347ee0ae7d95a2e490e4d4aea6813ed15ebc80e", size = 12774493, upload-time = "2026-01-15T20:15:20.908Z" },
+ { url = "https://files.pythonhosted.org/packages/6b/ce/d2e9cb510870b52a9565d885c0d7668cc050e30fa2c8ac3fb1fda15c083d/ruff-0.14.13-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac5b7f63dd3b27cc811850f5ffd8fff845b00ad70e60b043aabf8d6ecc304e09", size = 12815174, upload-time = "2026-01-15T20:15:05.74Z" },
+ { url = "https://files.pythonhosted.org/packages/88/00/c38e5da58beebcf4fa32d0ddd993b63dfacefd02ab7922614231330845bf/ruff-0.14.13-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78d2b1097750d90ba82ce4ba676e85230a0ed694178ca5e61aa9b459970b3eb9", size = 13680909, upload-time = "2026-01-15T20:15:14.537Z" },
+ { url = "https://files.pythonhosted.org/packages/61/61/cd37c9dd5bd0a3099ba79b2a5899ad417d8f3b04038810b0501a80814fd7/ruff-0.14.13-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:7d0bf87705acbbcb8d4c24b2d77fbb73d40210a95c3903b443cd9e30824a5032", size = 15144215, upload-time = "2026-01-15T20:15:22.886Z" },
+ { url = "https://files.pythonhosted.org/packages/56/8a/85502d7edbf98c2df7b8876f316c0157359165e16cdf98507c65c8d07d3d/ruff-0.14.13-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a3eb5da8e2c9e9f13431032fdcbe7681de9ceda5835efee3269417c13f1fed5c", size = 14706067, upload-time = "2026-01-15T20:14:48.271Z" },
+ { url = "https://files.pythonhosted.org/packages/7e/2f/de0df127feb2ee8c1e54354dc1179b4a23798f0866019528c938ba439aca/ruff-0.14.13-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:642442b42957093811cd8d2140dfadd19c7417030a7a68cf8d51fcdd5f217427", size = 14133916, upload-time = "2026-01-15T20:14:57.357Z" },
+ { url = "https://files.pythonhosted.org/packages/0d/77/9b99686bb9fe07a757c82f6f95e555c7a47801a9305576a9c67e0a31d280/ruff-0.14.13-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4acdf009f32b46f6e8864af19cbf6841eaaed8638e65c8dac845aea0d703c841", size = 13859207, upload-time = "2026-01-15T20:14:55.111Z" },
+ { url = "https://files.pythonhosted.org/packages/7d/46/2bdcb34a87a179a4d23022d818c1c236cb40e477faf0d7c9afb6813e5876/ruff-0.14.13-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:591a7f68860ea4e003917d19b5c4f5ac39ff558f162dc753a2c5de897fd5502c", size = 14043686, upload-time = "2026-01-15T20:14:52.841Z" },
+ { url = "https://files.pythonhosted.org/packages/1a/a9/5c6a4f56a0512c691cf143371bcf60505ed0f0860f24a85da8bd123b2bf1/ruff-0.14.13-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:774c77e841cc6e046fc3e91623ce0903d1cd07e3a36b1a9fe79b81dab3de506b", size = 12663837, upload-time = "2026-01-15T20:15:18.921Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/bb/b920016ece7651fa7fcd335d9d199306665486694d4361547ccb19394c44/ruff-0.14.13-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:61f4e40077a1248436772bb6512db5fc4457fe4c49e7a94ea7c5088655dd21ae", size = 12805867, upload-time = "2026-01-15T20:14:59.272Z" },
+ { url = "https://files.pythonhosted.org/packages/7d/b3/0bd909851e5696cd21e32a8fc25727e5f58f1934b3596975503e6e85415c/ruff-0.14.13-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6d02f1428357fae9e98ac7aa94b7e966fd24151088510d32cf6f902d6c09235e", size = 13208528, upload-time = "2026-01-15T20:15:03.732Z" },
+ { url = "https://files.pythonhosted.org/packages/3b/3b/e2d94cb613f6bbd5155a75cbe072813756363eba46a3f2177a1fcd0cd670/ruff-0.14.13-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:e399341472ce15237be0c0ae5fbceca4b04cd9bebab1a2b2c979e015455d8f0c", size = 13929242, upload-time = "2026-01-15T20:15:11.918Z" },
+ { url = "https://files.pythonhosted.org/packages/6a/c5/abd840d4132fd51a12f594934af5eba1d5d27298a6f5b5d6c3be45301caf/ruff-0.14.13-py3-none-win32.whl", hash = "sha256:ef720f529aec113968b45dfdb838ac8934e519711da53a0456038a0efecbd680", size = 12919024, upload-time = "2026-01-15T20:14:43.647Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/55/6384b0b8ce731b6e2ade2b5449bf07c0e4c31e8a2e68ea65b3bafadcecc5/ruff-0.14.13-py3-none-win_amd64.whl", hash = "sha256:6070bd026e409734b9257e03e3ef18c6e1a216f0435c6751d7a8ec69cb59abef", size = 14097887, upload-time = "2026-01-15T20:15:01.48Z" },
+ { url = "https://files.pythonhosted.org/packages/4d/e1/7348090988095e4e39560cfc2f7555b1b2a7357deba19167b600fdf5215d/ruff-0.14.13-py3-none-win_arm64.whl", hash = "sha256:7ab819e14f1ad9fe39f246cfcc435880ef7a9390d81a2b6ac7e01039083dd247", size = 13080224, upload-time = "2026-01-15T20:14:45.853Z" },
]
[[package]]
@@ -1815,7 +1824,7 @@ wheels = [
[[package]]
name = "time-machine"
-version = "3.1.0"
+version = "3.2.0"
source = { registry = "https://pypi.org/simple" }
resolution-markers = [
"python_full_version >= '3.14' and extra != 'group-12-dedalus-labs-pydantic-v1' and extra == 'group-12-dedalus-labs-pydantic-v2'",
@@ -1823,134 +1832,139 @@ resolution-markers = [
"python_full_version >= '3.10' and extra == 'group-12-dedalus-labs-pydantic-v1' and extra != 'group-12-dedalus-labs-pydantic-v2'",
"python_full_version >= '3.10' and extra != 'group-12-dedalus-labs-pydantic-v1' and extra != 'group-12-dedalus-labs-pydantic-v2'",
]
-sdist = { url = "https://files.pythonhosted.org/packages/17/bd/a1bb03eb39ce35c966f0dde6559df7348ca0580f7cd3a956fdd7ed0b5080/time_machine-3.1.0.tar.gz", hash = "sha256:90831c2cf9e18e4199abb85fafa0c0ca0c6c15d0894a03ef68d5005a796c4f27", size = 14436, upload-time = "2025-11-21T13:56:33.802Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/02/fc/37b02f6094dbb1f851145330460532176ed2f1dc70511a35828166c41e52/time_machine-3.2.0.tar.gz", hash = "sha256:a4ddd1cea17b8950e462d1805a42b20c81eb9aafc8f66b392dd5ce997e037d79", size = 14804, upload-time = "2025-12-17T23:33:02.599Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/0a/c9/0aaa082d6b5c489c22d9db025cbf17016062e953f70a7678d76b520f274f/time_machine-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e867178cc87490c578534832c29f048cc954b32a01681237e52ccda704baece5", size = 19046, upload-time = "2025-11-21T13:54:53.273Z" },
- { url = "https://files.pythonhosted.org/packages/30/0d/e825251028c68822a63478c7e44a0dca640daedb15f685a9a3973edf8ae8/time_machine-3.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d4d4b944e0197c7726844f452fcb3dc17991165e9b4fab779e505b095bb7363", size = 15027, upload-time = "2025-11-21T13:54:54.474Z" },
- { url = "https://files.pythonhosted.org/packages/01/ee/d1e041f85787b835ed70bc7c31000a03d197f831490304f2527e9b840507/time_machine-3.1.0-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9fc81014e6fc989b833e8cae3684b78b1e1f7c0d867c2fbfea785385a806ea6b", size = 32548, upload-time = "2025-11-21T13:54:55.868Z" },
- { url = "https://files.pythonhosted.org/packages/50/c0/12fc339c054e0db1da79c476bb1c3cc0b8796789e90eb8687af96bbdde9b/time_machine-3.1.0-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:aebd2a55f860b1ef28a2c1efe81454a6fa110ec1a6517c011760132f3cbf35fa", size = 34150, upload-time = "2025-11-21T13:54:56.955Z" },
- { url = "https://files.pythonhosted.org/packages/84/ab/40a2d31d4f742e41fc7f3703da672bbc25b505df3e7ab5df6c11a39e435e/time_machine-3.1.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:02e8018629e12e8064fee4f8fbbb8ae353f5907051965b5721ef189aeb7f833c", size = 35694, upload-time = "2025-11-21T13:54:58.053Z" },
- { url = "https://files.pythonhosted.org/packages/6f/1f/f31b604cb72af2c89311e8152bfe4e64a890785daeb19939bb841ed4cb77/time_machine-3.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2fbbcf9de9a1d3e94e8b6c41ac7e9da46948fbdf489dbc083ea6c28ed045a43a", size = 34500, upload-time = "2025-11-21T13:54:59.428Z" },
- { url = "https://files.pythonhosted.org/packages/4f/1d/dca59c5d54dd0777b342fa708ffb24e0c595b8c47106300bc154dbaa8d98/time_machine-3.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:358f0feead47ee74c7747cb5b7f73582cbde1c033c20f9350e58ab4389aa59ff", size = 32605, upload-time = "2025-11-21T13:55:00.877Z" },
- { url = "https://files.pythonhosted.org/packages/f0/af/033b7b29c9364f05e99b4f35152574edc21b2f038589dafd60a60945a017/time_machine-3.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f6d3f9e531127306610433fe62368b9e14621e336da2042d062565d655d97a02", size = 33699, upload-time = "2025-11-21T13:55:02.287Z" },
- { url = "https://files.pythonhosted.org/packages/4e/92/782c75e9f958e2cb33ba30f9a7adeb819811626eb3193e5ade4343ef197d/time_machine-3.1.0-cp310-cp310-win32.whl", hash = "sha256:bd4b4279938472ea18e5580330c10f8d49b8aec34e0df71be46e3be3b0f03f1d", size = 17054, upload-time = "2025-11-21T13:55:04.036Z" },
- { url = "https://files.pythonhosted.org/packages/ce/e6/cbdb32a72d2e122646cd3c62aed47dcddb366196798caa39043985d4e11d/time_machine-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:10a0d18b65af008e1cb60e0cc57594e67d3bbfee07204879f0e5864223dfd899", size = 17898, upload-time = "2025-11-21T13:55:05.069Z" },
- { url = "https://files.pythonhosted.org/packages/1f/d2/3663703fc694b07241673391c267629f881d4c025c392d7df09161031ac7/time_machine-3.1.0-cp310-cp310-win_arm64.whl", hash = "sha256:53702d6e594fc395b66517bc75c3d2ad8bfbff1f693d09bb67f8178cdfd21cd5", size = 16630, upload-time = "2025-11-21T13:55:06.178Z" },
- { url = "https://files.pythonhosted.org/packages/67/2b/9f5cea745e6c704cbbd1b6c36e0c73ca3204160e9c79234d66f140b326f6/time_machine-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3b205f91d3907f6bd1747fe5484a3ed931e121e51fec32e4d6a8ee6eb41c37c3", size = 18741, upload-time = "2025-11-21T13:55:07.288Z" },
- { url = "https://files.pythonhosted.org/packages/76/17/7b5d94a119883b56b446980387e8ab83f37037db01696cf236cbc85807dc/time_machine-3.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7004f2fd396b6d40cbf7c21df82f838445d899a3ed2ecc5b1fb67eea7e3d2fa8", size = 14865, upload-time = "2025-11-21T13:55:08.304Z" },
- { url = "https://files.pythonhosted.org/packages/2c/1e/a6ff6587d520ac20bf7414faf8f53cf094dd9fe450acf3b0c85e0b332c8a/time_machine-3.1.0-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:68c35b3450c27cf0087c90ae872ab41ec8097b76b5fb6b07321fc5873e78f152", size = 30643, upload-time = "2025-11-21T13:55:09.8Z" },
- { url = "https://files.pythonhosted.org/packages/45/50/dcf4272d7f9a4690d9edd983b5690efa8df3cc7671ade9bcf3439adac278/time_machine-3.1.0-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:2e28aed9da0182c10cb0344aa06fac585f394768c7d088bee781ad2779ea6fe0", size = 32226, upload-time = "2025-11-21T13:55:10.927Z" },
- { url = "https://files.pythonhosted.org/packages/dc/7c/b719ae736568b3f2c9bf8d3bc65bada96b04c9241c628fcb5ab0724a6928/time_machine-3.1.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9d7faced22388578dbed3b4be0603eb00f709857cd57b7b9738cd81fbaf326a9", size = 33883, upload-time = "2025-11-21T13:55:12.32Z" },
- { url = "https://files.pythonhosted.org/packages/e5/5c/f433fe44eadecfe21e4f2bf128d240c15d295592c877490a475960644281/time_machine-3.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2e8a78dd7afc3a081e208e0b8d1a8105cff95d96e4f79da746afdf05fb822e7a", size = 32769, upload-time = "2025-11-21T13:55:13.541Z" },
- { url = "https://files.pythonhosted.org/packages/09/0f/fbc0e1437801315d4bdd8c21f9ef0c51f005a327ab0289ca034658fe78a1/time_machine-3.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8b9dce925dae420d6b66dec4cae00785260dbededec5c89eaedbfba99a2be55b", size = 30765, upload-time = "2025-11-21T13:55:14.706Z" },
- { url = "https://files.pythonhosted.org/packages/3f/c0/87d23d5817d95ed07fe272bb7a5de91177bb18274718d318c73a4aa0a4c2/time_machine-3.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:17f0d30a05073cdee68162779870eadf5e7f091bc94ae96d90d8fddbb8020714", size = 31885, upload-time = "2025-11-21T13:55:16.56Z" },
- { url = "https://files.pythonhosted.org/packages/4f/d7/572e38dadab9efe6ec4fff6e063f488866121dc384873d5b04fc5855ca83/time_machine-3.1.0-cp311-cp311-win32.whl", hash = "sha256:9e836e4fa8cb58de80de863335f4566f896b4dcd69d8a400d705857ca8301872", size = 16935, upload-time = "2025-11-21T13:55:17.612Z" },
- { url = "https://files.pythonhosted.org/packages/00/a6/edc968e1429a14d28676adb596f42570aa42def63014ccd3ccaf8d279d43/time_machine-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:1d652f85cbd2fd41e4262c27515023cc216589ca0b4aebab458972cce8119cc1", size = 17779, upload-time = "2025-11-21T13:55:18.668Z" },
- { url = "https://files.pythonhosted.org/packages/a0/97/2025eea7792f1be50777d85a2e2974d4416698c0002c419a61fcc6222de8/time_machine-3.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:4be520b8ed752e788f57f72617f12b5bf5103e3db2b062e69b82e7f7f2977c4f", size = 16496, upload-time = "2025-11-21T13:55:19.753Z" },
- { url = "https://files.pythonhosted.org/packages/39/3d/412015d3e2f682548b7222367aa8d7b91d323145234d216847bc56c2d720/time_machine-3.1.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d3d6606ae81a29c925452f8b56115936aeda34c0a25d40af53cf1b29c66235ef", size = 18851, upload-time = "2025-11-21T13:55:21Z" },
- { url = "https://files.pythonhosted.org/packages/33/98/48f60552570d6d66773dcfbc1d7b8fb305d3e9ae0694dd249f1ae0bc3b77/time_machine-3.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0265df307778104c44d9114b55246a5b99da03f1dcb791305f9af21b0389ef7b", size = 14955, upload-time = "2025-11-21T13:55:22.408Z" },
- { url = "https://files.pythonhosted.org/packages/4d/7a/6fd1c47d3a1d87919d38f85c12db8f838298acb4ca3d6908f3288bcea0fd/time_machine-3.1.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:098da08900bdf6c6bd44b36ec06afd5b92c7a7140cd48c8686a79b6c6fef9da7", size = 32622, upload-time = "2025-11-21T13:55:23.541Z" },
- { url = "https://files.pythonhosted.org/packages/64/74/01641bd3d5f8c4f22710b7070d1dbeaeb501e8549e37419fc8b995bead32/time_machine-3.1.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:092f2e7a2526a02fcbd9c0a4165e0790350a13da4b01b6306b82e9580d83ae69", size = 34172, upload-time = "2025-11-21T13:55:24.638Z" },
- { url = "https://files.pythonhosted.org/packages/35/df/91f39b8bfe42c67dd3e66d8d2baa2c1b10126cc6e217fb3c7b1e777804c5/time_machine-3.1.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b64b6cada6dd2433cdaeda53dd940bdd579e40a8c92c5379527694570bb58b97", size = 35554, upload-time = "2025-11-21T13:55:25.841Z" },
- { url = "https://files.pythonhosted.org/packages/b0/6c/236434de77561003429e90300327b5ac6a6eeaa6d6c967282d28d1983232/time_machine-3.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2776e3300ef499541f9081b4a03ff1b3e7681e51a594572e1bf191773504bd21", size = 34261, upload-time = "2025-11-21T13:55:27.715Z" },
- { url = "https://files.pythonhosted.org/packages/97/76/32eea75715aefbd7ccfeea70285bb5400ecebd8dc3524b9c3491115e2504/time_machine-3.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:86e10914592a95e35edeb081975b6527919300bd1b65c04ee7f765db7bf2c1ad", size = 32485, upload-time = "2025-11-21T13:55:29.55Z" },
- { url = "https://files.pythonhosted.org/packages/b0/1b/91c529de2d2c6d7097692b4ae620cbe30bf24a4609d737b5f41d91a77bb0/time_machine-3.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c1362a6672252fb0d79da492dcf75eb7369b37fe595946ee3c2848df2fcc22e7", size = 33674, upload-time = "2025-11-21T13:55:30.636Z" },
- { url = "https://files.pythonhosted.org/packages/d6/af/345dfab6543e79151867daabbc4f4788ee10e408b8bd1361d066d0fea932/time_machine-3.1.0-cp312-cp312-win32.whl", hash = "sha256:50773648c69960e6e8497077875427aeb484d6a57a06399502cc125e349fca19", size = 16962, upload-time = "2025-11-21T13:55:31.73Z" },
- { url = "https://files.pythonhosted.org/packages/7c/9d/79a4ebed58d78cad85a5fc5c4ed4558d4d018d8a2bb7858ea02704b49be7/time_machine-3.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:20e0974e58a40a626d353d6132b7595de3fcb8deb72da4a762071b315cc95f6f", size = 17723, upload-time = "2025-11-21T13:55:32.76Z" },
- { url = "https://files.pythonhosted.org/packages/9a/a2/dd405133427dc47afd95618c3519854147408ed05deb209ba1b6b704689b/time_machine-3.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:da9bced71b5966312f13c423b5b124649f1b08a9400d95018eb6d23311b384b9", size = 16520, upload-time = "2025-11-21T13:55:33.972Z" },
- { url = "https://files.pythonhosted.org/packages/c7/27/8af0187b4f7c574d7c4d7e86dbaece47ac92666fda8717c787849bc48560/time_machine-3.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:99d91fc0d4d91be1e1ea92389e1e93b0f43bf60433409616cb43de69be6505a8", size = 18911, upload-time = "2025-11-21T13:55:35.056Z" },
- { url = "https://files.pythonhosted.org/packages/92/35/a948a07659d471be160c8500c2e82ca0576a067d52d86ebe7ef24ea8e141/time_machine-3.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ff18d291b7e681af3dc2f069a182a367baee510ab9800120a487a01d2ec929a9", size = 14983, upload-time = "2025-11-21T13:55:36.414Z" },
- { url = "https://files.pythonhosted.org/packages/4a/8c/0cc16dd1d058580c00ffa685401756bd6170efe4434d418b724e96e3a0ac/time_machine-3.1.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:8fedc78cab733dfe1afeec09bd841aea314249b316eb02f17ad2e617c400fa4d", size = 32709, upload-time = "2025-11-21T13:55:37.628Z" },
- { url = "https://files.pythonhosted.org/packages/20/34/f2f162c67854be20c34ed9c49474b6abd6427108b98c3452533e60ba2526/time_machine-3.1.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:62bb78376c4a13a0463b71bc9ffd81d9e3e6ed9efdbe500716d9c51ae5a2a60c", size = 34299, upload-time = "2025-11-21T13:55:38.999Z" },
- { url = "https://files.pythonhosted.org/packages/8b/2c/314fe33e24bbc46837643d5add7a9843c7cbd4b66f355a94e98c700ddcac/time_machine-3.1.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bc750d300bd8794a0581360632e20dd5896d21b5c1c6b74c7c01c72bebd65df2", size = 35699, upload-time = "2025-11-21T13:55:40.493Z" },
- { url = "https://files.pythonhosted.org/packages/ac/28/c2f5d93024ef712897352e3fb801425325adfb3b2c33d3ba7838c8ea5941/time_machine-3.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d23623c3223318cb4b43ad07e10b682821c59b6ab99cce3d9db0f80bc2206ead", size = 34359, upload-time = "2025-11-21T13:55:41.672Z" },
- { url = "https://files.pythonhosted.org/packages/25/c6/67a6abd6ab75a6c16275cd8b5bf13053f33fac1de83a5b8e569685d37005/time_machine-3.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:48a4ad44202dcafd302310739e086374c8e928d6604bb5812b1f5e755dbcd5e1", size = 32604, upload-time = "2025-11-21T13:55:42.878Z" },
- { url = "https://files.pythonhosted.org/packages/13/c7/1277ebfbcfaea02bbf01a69beac821a6543e1829a47bda52a020b3509ba2/time_machine-3.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8b6f8572c1e674d5ae43882ee9f39a03cea86c31cf9904ff3767905d41fc5e5a", size = 33779, upload-time = "2025-11-21T13:55:44.057Z" },
- { url = "https://files.pythonhosted.org/packages/6b/39/773a7456222b391e2f0bc6d049e00d99ea78b1e226b8b36c261e1235f36d/time_machine-3.1.0-cp313-cp313-win32.whl", hash = "sha256:8743edd11d3e2cb2d0244d4e83d96873fd96a375ba75364399f2f64fd95c7ec4", size = 16984, upload-time = "2025-11-21T13:55:45.144Z" },
- { url = "https://files.pythonhosted.org/packages/e9/95/94b9a839586eae1e3afcd575d1dabf81929e44e3886ad6d94deb5e2d5bda/time_machine-3.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:3a99c68a1cc962f76709c2b67efdcf4c97c9ad4a950f694cccb413ab378f9d94", size = 17727, upload-time = "2025-11-21T13:55:46.524Z" },
- { url = "https://files.pythonhosted.org/packages/1f/fd/f1fb569e8c7547c983b4e3259ee40684b0c4fdc882f36864d5eb05d71f72/time_machine-3.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:7be2af8b69892615d897b2c6b546093e45dba7d9cde6af64c17f1e5da4f38dbc", size = 16558, upload-time = "2025-11-21T13:55:47.922Z" },
- { url = "https://files.pythonhosted.org/packages/e9/15/ccb1178e3a0988c320075285fe7b5ab26e51b71b2e5e14eee7158bd04dd6/time_machine-3.1.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:56938d4a24435014d3d9141415aee81699cf1a5419462a24357c7e3181c67f06", size = 19593, upload-time = "2025-11-21T13:55:50.114Z" },
- { url = "https://files.pythonhosted.org/packages/6a/9e/495e1ae27227cc3fc20f5d9e9011c14a3bda515f0c98630b0d0e2c444c4a/time_machine-3.1.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:ecdd9b98305cecfbff29ad9b663f38cbcf728ff023dc1db63cc94bd439890da5", size = 15252, upload-time = "2025-11-21T13:55:51.213Z" },
- { url = "https://files.pythonhosted.org/packages/d6/03/c905659d81c6b071cd8f2a6a6a23b1e25cd2a498167125b95e543fea7cec/time_machine-3.1.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:e5f0dc8ba33fcd624d11dc4029fa3fd1712f96660ddc629ce61097c71d8f6400", size = 38810, upload-time = "2025-11-21T13:55:52.379Z" },
- { url = "https://files.pythonhosted.org/packages/42/26/393277a6f07472cdb56ee2d8e34f0bdc203f64c8857180b73c4ba9cf0d91/time_machine-3.1.0-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:812aad79acf4b3471d997d29a5e7010f0a914740b1fe5b6cefb81b462cb28824", size = 40358, upload-time = "2025-11-21T13:55:54.077Z" },
- { url = "https://files.pythonhosted.org/packages/86/70/0cc738ba7fdaf8d29acd128a124be00c781b33e3ea84f34211f5a2cff4c2/time_machine-3.1.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5b0218aa05865a16c000320cfdac921d0e02992ef51e711325bc366bacdc4aeb", size = 43118, upload-time = "2025-11-21T13:55:55.616Z" },
- { url = "https://files.pythonhosted.org/packages/dc/5a/6c42a046abfcb8996ef3239bbc1cfd7c0051dea166a0f9f01923d1eb1848/time_machine-3.1.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:3544ab394f4149e8884996f1c5047d52dbb08bb2396109c530eee6ecffd6f4c4", size = 41321, upload-time = "2025-11-21T13:55:56.869Z" },
- { url = "https://files.pythonhosted.org/packages/36/3e/1123a93add930d4933ca4f1c3441f1832eba6b9e1b41b9ca3a5d3f9203c7/time_machine-3.1.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:acf41d44e526cae2f62c9c6ac95daef42bdbd7d95bdb3bb60e071b4b61110723", size = 38547, upload-time = "2025-11-21T13:55:58.065Z" },
- { url = "https://files.pythonhosted.org/packages/d1/c8/c98855aa75a6bc49c352e00396e545353db3e5d7c65a6eefca76366d9aac/time_machine-3.1.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:bb7031a367209c223c41ab625172b38362e0ce07f13f1f1e7d75d5194fcdd0d7", size = 39421, upload-time = "2025-11-21T13:55:59.224Z" },
- { url = "https://files.pythonhosted.org/packages/aa/94/f1520be4f125489a9d327848048688c2c13c5705770b98caac63e35cc204/time_machine-3.1.0-cp313-cp313t-win32.whl", hash = "sha256:ecf49c418f854f42171b4f0859906a26ff56d73303dee2e83beb307747e11db1", size = 17436, upload-time = "2025-11-21T13:56:00.395Z" },
- { url = "https://files.pythonhosted.org/packages/be/2f/79e13c341b20e8ceb1629fb2e1ae36063c9dee42f3886be44a54867ad0dc/time_machine-3.1.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ee93cf4c11452bf8211bf12a926d6f5179c241558f6af30c2de2669bf26ba1c1", size = 18505, upload-time = "2025-11-21T13:56:01.54Z" },
- { url = "https://files.pythonhosted.org/packages/72/42/0cdb0b67d44ebfa47f4dbecb65d25522312ee772f59c4d63a0df0c895f34/time_machine-3.1.0-cp313-cp313t-win_arm64.whl", hash = "sha256:9c317eebaa0578a370a29f40152db0ac00bd34b387b54c95bf01fd123bca178d", size = 16852, upload-time = "2025-11-21T13:56:02.977Z" },
- { url = "https://files.pythonhosted.org/packages/f0/14/2f9b4c6ae63662827c48d81c445fedeba4733248a56640747c8e5be55870/time_machine-3.1.0-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:2b07055e5327e04d725557a07a69523d14d2d897877d90781b9c27c70bd8997c", size = 18899, upload-time = "2025-11-21T13:56:04.186Z" },
- { url = "https://files.pythonhosted.org/packages/6a/c2/09a91825ea60413316ece41e448d275a9a4b719bc92b35b6166013dc01bb/time_machine-3.1.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:b71948998e3f709bda9b600b0d250bb4ad677b28fac32475b6093aa5b9e8969f", size = 15027, upload-time = "2025-11-21T13:56:05.299Z" },
- { url = "https://files.pythonhosted.org/packages/f5/65/b737258b39b98406a3ed681cdc025fa788441221c5d24a59897a4752e413/time_machine-3.1.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:343dfb0663ccb1e5d5dc6dfb651b7b7233985c73b3a3f6af0fe58c9cf5b0f4ab", size = 32798, upload-time = "2025-11-21T13:56:06.521Z" },
- { url = "https://files.pythonhosted.org/packages/e6/e0/063edd2188a5c7e8f4b1a184dc9e87de955dcfd5cd8f706131739ff0685c/time_machine-3.1.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:3df9b834ec2ee8536a398c15c70f5d54dfe3bbb34344f6549ba29acf80916948", size = 34385, upload-time = "2025-11-21T13:56:07.719Z" },
- { url = "https://files.pythonhosted.org/packages/84/e8/ead05dc304f973b01443829367be3c504f3ff86c394a3fec932c4d720f3f/time_machine-3.1.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9e1ee0725a016f69fa8f0f37d793ba6d2d3870b32e164650a6922caf065f2ce2", size = 35781, upload-time = "2025-11-21T13:56:08.931Z" },
- { url = "https://files.pythonhosted.org/packages/1b/5e/23303d6b13482436d6c37015d17142821adf8e47c1104c0a4c5fc0bdb173/time_machine-3.1.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ed552f135efabca895e678044ce1dbb693e6a399003606e9d6a413b2eaf48a51", size = 34447, upload-time = "2025-11-21T13:56:10.468Z" },
- { url = "https://files.pythonhosted.org/packages/30/be/5ace5c8e2dc0b6899c3e18ebf4301211a50e1addfcbecbf61a100a76ac03/time_machine-3.1.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:c93451242de81fe2a96f699dad97aa463250688b10d2e8a72e98208df9bd62b1", size = 32814, upload-time = "2025-11-21T13:56:12.072Z" },
- { url = "https://files.pythonhosted.org/packages/40/cb/b0212b98e422fbb08f7328aabe0c6f59e853146eb61337df8f497dd4a2ad/time_machine-3.1.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:010c2dd1f084eae4687021f7b7fd798abc7a8472f2e783919aafe7b8fe624c8b", size = 33864, upload-time = "2025-11-21T13:56:13.28Z" },
- { url = "https://files.pythonhosted.org/packages/28/53/08ad68c1971257ee0c9b9ec38b1f2ef88ae7565e0c7eb272f9ca3ff40152/time_machine-3.1.0-cp314-cp314-win32.whl", hash = "sha256:9b8e24de4ba47401dcec53733d98db9678a708f6bafb77a64e46636304eca64c", size = 17127, upload-time = "2025-11-21T13:56:14.414Z" },
- { url = "https://files.pythonhosted.org/packages/0c/36/95d5b7fff7e1506f9f4a481df3b17ebae0f3ab4a36669e6a93890df1da5f/time_machine-3.1.0-cp314-cp314-win_amd64.whl", hash = "sha256:6873c903c8de85884655afc49c8465136ea5d6c7500ad2bea31601cf6a48939f", size = 18006, upload-time = "2025-11-21T13:56:15.575Z" },
- { url = "https://files.pythonhosted.org/packages/d2/c8/b30211e36117b4436368927e46dcf1f785626069b11a12cc3ea150337136/time_machine-3.1.0-cp314-cp314-win_arm64.whl", hash = "sha256:9a74b2da34e7e6aaa2db011556f40f8ea26e89a3a1683ffad43ceca1789b8af0", size = 16633, upload-time = "2025-11-21T13:56:16.706Z" },
- { url = "https://files.pythonhosted.org/packages/fe/3d/eeb00cd285102e39cc4eeeb4e78cc1fcff8a89691bdc6a708d4c40fe38cc/time_machine-3.1.0-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:0c688897189b50f0820e2916579c7e88aefef2a86cd17aa05b5b7a6676dbd97e", size = 19578, upload-time = "2025-11-21T13:56:17.853Z" },
- { url = "https://files.pythonhosted.org/packages/3e/91/30710e1883a4c39b1367ef469d6fd18c791bec4ee8783a19af9ac82bc632/time_machine-3.1.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:1715d221c4c49bd2a0bc73868d5543133ab15e02e0d9726d73d802ccf978e1c0", size = 15299, upload-time = "2025-11-21T13:56:18.955Z" },
- { url = "https://files.pythonhosted.org/packages/d4/7f/2311774df6d41dba3934494b6589195a726fec0753a4e8e8eba28e509327/time_machine-3.1.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:8cecc83df46ab1095f93ff42dceaddb313e12efd9135cd153d0021a98b570390", size = 38801, upload-time = "2025-11-21T13:56:20.357Z" },
- { url = "https://files.pythonhosted.org/packages/1b/74/5af7e7af3787333c927d860476b505ec8770412e2bb1ba4e2d00a3aa644a/time_machine-3.1.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:be63019454e0e30138bfe414f3dd5f626d32b8a265ea99bdc4b107867b68128a", size = 40357, upload-time = "2025-11-21T13:56:21.971Z" },
- { url = "https://files.pythonhosted.org/packages/10/1a/ebcecff1e57f52788989f0734a57eab5e045c9784cfd998040b8ba280f5b/time_machine-3.1.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ac1245d6a4a4ac40e308362e4236c1aad6ead836c97576c7e29167752a5283d0", size = 43101, upload-time = "2025-11-21T13:56:23.555Z" },
- { url = "https://files.pythonhosted.org/packages/99/b3/63883e2d8555358469da098dd1568ec8f6c9b6d7317796cfbf8bc5c59ab2/time_machine-3.1.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:9fde90d14b13396ecaa5ce2366f788f7c01b7bf4ac4246b798c622bc6369b861", size = 41305, upload-time = "2025-11-21T13:56:24.831Z" },
- { url = "https://files.pythonhosted.org/packages/04/a3/d7851676cf7a5d5451b73f271b6b7229688f403488a8dd111b5fe5fde7cf/time_machine-3.1.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:15d83cc338d02dc6e674e24ce40a8c311d75191c86014b0be455a4267f27f00e", size = 38534, upload-time = "2025-11-21T13:56:26.032Z" },
- { url = "https://files.pythonhosted.org/packages/13/dc/ced9245bc633f0c4790a57b3c6089a586f0a208b50f8ec7d001bf8254d49/time_machine-3.1.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cbbd90494a9bc6626e3180594246cb9557418e32f2cb2c40edf8526a182f5e31", size = 39430, upload-time = "2025-11-21T13:56:28.83Z" },
- { url = "https://files.pythonhosted.org/packages/67/24/bb20ff76ed4e8e09ab65910bf21a315dc7562c8be92250363b25f3ab1dd1/time_machine-3.1.0-cp314-cp314t-win32.whl", hash = "sha256:6c00758d155601d710fa036c8d24d5ad3fb28531933cf70343006cf2be93092a", size = 17674, upload-time = "2025-11-21T13:56:29.969Z" },
- { url = "https://files.pythonhosted.org/packages/2e/64/42573a6da9298efd68a831d4e9eabc8c9c0cac9305bc19bb24a4066bbba0/time_machine-3.1.0-cp314-cp314t-win_amd64.whl", hash = "sha256:e00239b54b05255862e0965e6ae67728e467727ca7dc23d9a6c5a51c7b5b01c8", size = 18792, upload-time = "2025-11-21T13:56:31.123Z" },
- { url = "https://files.pythonhosted.org/packages/35/10/09ad4e5ccc27224ed8377a6f3d191034242c404d0c1ad5f119d79fb18363/time_machine-3.1.0-cp314-cp314t-win_arm64.whl", hash = "sha256:8b47da89595dc3c7f0b52f1e3f3f8da8325037f3746e66d74bebac9f42f2a989", size = 16944, upload-time = "2025-11-21T13:56:32.254Z" },
+ { url = "https://files.pythonhosted.org/packages/9c/31/6bf41cb4a326230518d9b76c910dfc11d4fc23444d1cbfdf2d7652bd99f4/time_machine-3.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:68142c070e78b62215d8029ec7394905083a4f9aacb0a2a11514ce70b5951b13", size = 19447, upload-time = "2025-12-17T23:31:30.181Z" },
+ { url = "https://files.pythonhosted.org/packages/fa/14/d71ce771712e1cbfa15d8c24452225109262b16cb6caaf967e9f60662b67/time_machine-3.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:161bbd0648802ffdfcb4bb297ecb26b3009684a47d3a4dedb90bc549df4fa2ad", size = 15432, upload-time = "2025-12-17T23:31:31.381Z" },
+ { url = "https://files.pythonhosted.org/packages/8b/d6/dcb43a11f8029561996fad58ff9d3dc5e6d7f32b74f0745a2965d7e4b4f3/time_machine-3.2.0-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:1359ba8c258be695ba69253bc84db882fd616fe69b426cc6056536da2c7bf68e", size = 32956, upload-time = "2025-12-17T23:31:32.469Z" },
+ { url = "https://files.pythonhosted.org/packages/77/da/d802cd3c335c414f9b11b479f7459aa72df5de6485c799966cfdf8856d53/time_machine-3.2.0-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:c85b169998ca2c24a78fb214586ec11c4cad56d9c38f55ad8326235cb481c884", size = 34556, upload-time = "2025-12-17T23:31:33.946Z" },
+ { url = "https://files.pythonhosted.org/packages/85/ee/51ad553514ab0b940c7c82c6e1519dd10fd06ac07b32039a1d153ef09c88/time_machine-3.2.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:65b9367cb8a10505bc8f67da0da514ba20fa816fc47e11f434f7c60350322b4c", size = 36101, upload-time = "2025-12-17T23:31:35.462Z" },
+ { url = "https://files.pythonhosted.org/packages/11/39/938b111b5bb85a2b07502d0f9d8a704fc75bd760d62e76bce23c89ed16c9/time_machine-3.2.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9faca6a0f1973d7df3233c951fc2a11ff0c54df74087d8aaf41ae3deb19d0893", size = 34905, upload-time = "2025-12-17T23:31:36.543Z" },
+ { url = "https://files.pythonhosted.org/packages/dd/50/0951f73b23e76455de0b4a3a58ac5a24bd8d10489624b1c5e03f10c6fc0b/time_machine-3.2.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:213b1ada7f385d467e598999b642eda4a8e89ae10ad5dc4f5d8f672cbf604261", size = 33012, upload-time = "2025-12-17T23:31:37.967Z" },
+ { url = "https://files.pythonhosted.org/packages/4f/95/5304912d3dcecc4e14ed222dbe0396352efdf8497534abc3c9edd67a7528/time_machine-3.2.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:160b6afd94c39855af04d39c58e4cf602406abd6d79427ab80e830ea71789cfb", size = 34104, upload-time = "2025-12-17T23:31:39.449Z" },
+ { url = "https://files.pythonhosted.org/packages/d4/1c/af56518652ec7adac4ced193b7a42c4ff354fef28a412b3b5ffa5763aead/time_machine-3.2.0-cp310-cp310-win32.whl", hash = "sha256:c15d9ac257c78c124d112e4fc91fa9f3dcb004bdda913c19f0e7368d713cf080", size = 17468, upload-time = "2025-12-17T23:31:40.432Z" },
+ { url = "https://files.pythonhosted.org/packages/48/15/0213f00ca3cf6fe1c9fdbd7fd467e801052fc85534f30c0e4684bd474190/time_machine-3.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:3bf0f428487f93b8fe9d27aa01eccc817885da3290b467341b4a4a795e1d1891", size = 18313, upload-time = "2025-12-17T23:31:41.617Z" },
+ { url = "https://files.pythonhosted.org/packages/77/e4/811f96aa7a634b2b264d9a476f3400e710744dda503b4ad87a5c76db32c9/time_machine-3.2.0-cp310-cp310-win_arm64.whl", hash = "sha256:347f6be2129fcd35b1c94b9387fcb2cbe7949b1e649228c5f22949a811b78976", size = 17037, upload-time = "2025-12-17T23:31:42.924Z" },
+ { url = "https://files.pythonhosted.org/packages/f5/e1/03aae5fbaa53859f665094af696338fc7cae733d926a024af69982712350/time_machine-3.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c188a9dda9fcf975022f1b325b466651b96a4dfc223c523ed7ed8d979f9bf3e8", size = 19143, upload-time = "2025-12-17T23:31:44.258Z" },
+ { url = "https://files.pythonhosted.org/packages/75/8f/98cb17bebb52b22ff4ec26984dd44280f9c71353c3bae0640a470e6683e5/time_machine-3.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:17245f1cc2dd13f9d63a174be59bb2684a9e5e0a112ab707e37be92068cd655f", size = 15273, upload-time = "2025-12-17T23:31:45.246Z" },
+ { url = "https://files.pythonhosted.org/packages/dd/2f/ca11e4a7897234bb9331fcc5f4ed4714481ba4012370cc79a0ae8c42ea0a/time_machine-3.2.0-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d9bd1de1996e76efd36ae15970206c5089fb3728356794455bd5cd8d392b5537", size = 31049, upload-time = "2025-12-17T23:31:46.613Z" },
+ { url = "https://files.pythonhosted.org/packages/cf/ad/d17d83a59943094e6b6c6a3743caaf6811b12203c3e07a30cc7bcc2ab7ee/time_machine-3.2.0-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:98493cd50e8b7f941eab69b9e18e697ad69db1a0ec1959f78f3d7b0387107e5c", size = 32632, upload-time = "2025-12-17T23:31:47.72Z" },
+ { url = "https://files.pythonhosted.org/packages/71/50/d60576d047a0dfb5638cdfb335e9c3deb6e8528544fa0b3966a8480f72b7/time_machine-3.2.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:31f2a33d595d9f91eb9bc7f157f0dc5721f5789f4c4a9e8b852cdedb2a7d9b16", size = 34289, upload-time = "2025-12-17T23:31:48.913Z" },
+ { url = "https://files.pythonhosted.org/packages/fa/fe/4afa602dbdebddde6d0ea4a7fe849e49b9bb85dc3fb415725a87ccb4b471/time_machine-3.2.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9f78ac4213c10fbc44283edd1a29cfb7d3382484f4361783ddc057292aaa1889", size = 33175, upload-time = "2025-12-17T23:31:50.611Z" },
+ { url = "https://files.pythonhosted.org/packages/0d/87/c152e23977c1d7d7c94eb3ed3ea45cc55971796205125c6fdff40db2c60f/time_machine-3.2.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:c1326b09e947b360926d529a96d1d9e126ce120359b63b506ecdc6ee20755c23", size = 31170, upload-time = "2025-12-17T23:31:51.645Z" },
+ { url = "https://files.pythonhosted.org/packages/80/af/54acf51d0f3ade3b51eab73df6192937c9a938753ef5456dff65eb8630be/time_machine-3.2.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9f2949f03d15264cc15c38918a2cda8966001f0f4ebe190cbfd9c56d91aed8ac", size = 32292, upload-time = "2025-12-17T23:31:52.803Z" },
+ { url = "https://files.pythonhosted.org/packages/cc/bc/3745963f36e75661a807196428639327a366f4332f35f1f775c074d4062f/time_machine-3.2.0-cp311-cp311-win32.whl", hash = "sha256:6dfe48e0499e6e16751476b9799e67be7514e6ef04cdf39571ef95a279645831", size = 17349, upload-time = "2025-12-17T23:31:54.19Z" },
+ { url = "https://files.pythonhosted.org/packages/82/a2/057469232a99d1f5a0160ae7c5bae7b095c9168b333dd598fcbcfbc1c87b/time_machine-3.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:809bdf267a29189c304154873620fe0bcc0c9513295fa46b19e21658231c4915", size = 18191, upload-time = "2025-12-17T23:31:55.472Z" },
+ { url = "https://files.pythonhosted.org/packages/79/d8/bf9c8de57262ee7130d92a6ed49ed6a6e40a36317e46979428d373630c12/time_machine-3.2.0-cp311-cp311-win_arm64.whl", hash = "sha256:a3f4c17fa90f54902a3f8692c75caf67be87edc3429eeb71cb4595da58198f8e", size = 16905, upload-time = "2025-12-17T23:31:56.658Z" },
+ { url = "https://files.pythonhosted.org/packages/71/8b/080c8eedcd67921a52ba5bd0e075362062509ab63c86fc1a0442fad241a6/time_machine-3.2.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:cc4bee5b0214d7dc4ebc91f4a4c600f1a598e9b5606ac751f42cb6f6740b1dbb", size = 19255, upload-time = "2025-12-17T23:31:58.057Z" },
+ { url = "https://files.pythonhosted.org/packages/66/17/0e5291e9eb705bf8a5a1305f826e979af307bbeb79def4ddbf4b3f9a81e0/time_machine-3.2.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3ca036304b4460ae2fdc1b52dd8b1fa7cf1464daa427fc49567413c09aa839c1", size = 15360, upload-time = "2025-12-17T23:31:59.048Z" },
+ { url = "https://files.pythonhosted.org/packages/8b/e8/9ab87b71d2e2b62463b9b058b7ae7ac09fb57f8fcd88729dec169d304340/time_machine-3.2.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:5442735b41d7a2abc2f04579b4ca6047ed4698a8338a4fec92c7c9423e7938cb", size = 33029, upload-time = "2025-12-17T23:32:00.413Z" },
+ { url = "https://files.pythonhosted.org/packages/4b/26/b5ca19da6f25ea905b3e10a0ea95d697c1aeba0404803a43c68f1af253e6/time_machine-3.2.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:97da3e971e505cb637079fb07ab0bcd36e33279f8ecac888ff131f45ef1e4d8d", size = 34579, upload-time = "2025-12-17T23:32:01.431Z" },
+ { url = "https://files.pythonhosted.org/packages/79/ca/6ac7ad5f10ea18cc1d9de49716ba38c32132c7b64532430d92ef240c116b/time_machine-3.2.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3cdda6dee4966e38aeb487309bb414c6cb23a81fc500291c77a8fcd3098832e7", size = 35961, upload-time = "2025-12-17T23:32:02.521Z" },
+ { url = "https://files.pythonhosted.org/packages/33/67/390dd958bed395ab32d79a9fe61fe111825c0dd4ded54dbba7e867f171e6/time_machine-3.2.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:33d9efd302a6998bcc8baa4d84f259f8a4081105bd3d7f7af7f1d0abd3b1c8aa", size = 34668, upload-time = "2025-12-17T23:32:03.585Z" },
+ { url = "https://files.pythonhosted.org/packages/da/57/c88fff034a4e9538b3ae7c68c9cfb283670b14d17522c5a8bc17d29f9a4b/time_machine-3.2.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3a0b0a33971f14145853c9bd95a6ab0353cf7e0019fa2a7aa1ae9fddfe8eab50", size = 32891, upload-time = "2025-12-17T23:32:04.656Z" },
+ { url = "https://files.pythonhosted.org/packages/2d/70/ebbb76022dba0fec8f9156540fc647e4beae1680c787c01b1b6200e56d70/time_machine-3.2.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2d0be9e5f22c38082d247a2cdcd8a936504e9db60b7b3606855fb39f299e9548", size = 34080, upload-time = "2025-12-17T23:32:06.146Z" },
+ { url = "https://files.pythonhosted.org/packages/db/9a/2ca9e7af3df540dc1c79e3de588adeddb7dcc2107829248e6969c4f14167/time_machine-3.2.0-cp312-cp312-win32.whl", hash = "sha256:3f74623648b936fdce5f911caf386c0a0b579456410975de8c0dfeaaffece1d8", size = 17371, upload-time = "2025-12-17T23:32:07.164Z" },
+ { url = "https://files.pythonhosted.org/packages/d8/ce/21d23efc9c2151939af1b7ee4e60d86d661b74ef32b8eaa148f6fe8c899c/time_machine-3.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:34e26a41d994b5e4b205136a90e9578470386749cc9a2ecf51ca18f83ce25e23", size = 18132, upload-time = "2025-12-17T23:32:08.447Z" },
+ { url = "https://files.pythonhosted.org/packages/2f/34/c2b70be483accf6db9e5d6c3139bce3c38fe51f898ccf64e8d3fe14fbf4d/time_machine-3.2.0-cp312-cp312-win_arm64.whl", hash = "sha256:0615d3d82c418d6293f271c348945c5091a71f37e37173653d5c26d0e74b13a8", size = 16930, upload-time = "2025-12-17T23:32:09.477Z" },
+ { url = "https://files.pythonhosted.org/packages/ee/cd/43ad5efc88298af3c59b66769cea7f055567a85071579ed40536188530c1/time_machine-3.2.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c421a8eb85a4418a7675a41bf8660224318c46cc62e4751c8f1ceca752059090", size = 19318, upload-time = "2025-12-17T23:32:10.518Z" },
+ { url = "https://files.pythonhosted.org/packages/b0/f6/084010ef7f4a3f38b5a4900923d7c85b29e797655c4f6ee4ce54d903cca8/time_machine-3.2.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8f4e758f7727d0058c4950c66b58200c187072122d6f7a98b610530a4233ea7b", size = 15390, upload-time = "2025-12-17T23:32:11.625Z" },
+ { url = "https://files.pythonhosted.org/packages/25/aa/1cabb74134f492270dc6860cb7865859bf40ecf828be65972827646e91ad/time_machine-3.2.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:154bd3f75c81f70218b2585cc12b60762fb2665c507eec5ec5037d8756d9b4e0", size = 33115, upload-time = "2025-12-17T23:32:13.219Z" },
+ { url = "https://files.pythonhosted.org/packages/5e/03/78c5d7dfa366924eb4dbfcc3fc917c39a4280ca234b12819cc1f16c03d88/time_machine-3.2.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d50cfe5ebea422c896ad8d278af9648412b7533b8ea6adeeee698a3fd9b1d3b7", size = 34705, upload-time = "2025-12-17T23:32:14.29Z" },
+ { url = "https://files.pythonhosted.org/packages/86/93/d5e877c24541f674c6869ff6e9c56833369796010190252e92c9d7ae5f0f/time_machine-3.2.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:636576501724bd6a9124e69d86e5aef263479e89ef739c5db361469f0463a0a1", size = 36104, upload-time = "2025-12-17T23:32:15.354Z" },
+ { url = "https://files.pythonhosted.org/packages/22/1c/d4bae72f388f67efc9609f89b012e434bb19d9549c7a7b47d6c7d9e5c55d/time_machine-3.2.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:40e6f40c57197fcf7ec32d2c563f4df0a82c42cdcc3cab27f688e98f6060df10", size = 34765, upload-time = "2025-12-17T23:32:16.434Z" },
+ { url = "https://files.pythonhosted.org/packages/1d/c3/ac378cf301d527d8dfad2f0db6bad0dfb1ab73212eaa56d6b96ee5d9d20b/time_machine-3.2.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:a1bcf0b846bbfc19a79bc19e3fa04d8c7b1e8101c1b70340ffdb689cd801ea53", size = 33010, upload-time = "2025-12-17T23:32:17.532Z" },
+ { url = "https://files.pythonhosted.org/packages/06/35/7ce897319accda7a6970b288a9a8c52d25227342a7508505a2b3d235b649/time_machine-3.2.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ae55a56c179f4fe7a62575ad5148b6ed82f6c7e5cf2f9a9ec65f2f5b067db5f5", size = 34185, upload-time = "2025-12-17T23:32:18.566Z" },
+ { url = "https://files.pythonhosted.org/packages/bf/28/f922022269749cb02eee2b62919671153c4088994fa955a6b0e50327ff81/time_machine-3.2.0-cp313-cp313-win32.whl", hash = "sha256:a66fe55a107e46916007a391d4030479df8864ec6ad6f6a6528221befc5c886e", size = 17397, upload-time = "2025-12-17T23:32:19.605Z" },
+ { url = "https://files.pythonhosted.org/packages/ee/dc/fd87cde397f4a7bea493152f0aca8fd569ec709cad9e0f2ca7011eb8c7f7/time_machine-3.2.0-cp313-cp313-win_amd64.whl", hash = "sha256:30c9ce57165df913e4f74e285a8ab829ff9b7aa3e5ec0973f88f642b9a7b3d15", size = 18139, upload-time = "2025-12-17T23:32:20.991Z" },
+ { url = "https://files.pythonhosted.org/packages/75/81/b8ce58233addc5d7d54d2fabc49dcbc02d79e3f079d150aa1bec3d5275ef/time_machine-3.2.0-cp313-cp313-win_arm64.whl", hash = "sha256:89cad7e179e9bdcc84dcf09efe52af232c4cc7a01b3de868356bbd59d95bd9b8", size = 16964, upload-time = "2025-12-17T23:32:22.075Z" },
+ { url = "https://files.pythonhosted.org/packages/67/e7/487f0ba5fe6c58186a5e1af2a118dfa2c160fedb37ef53a7e972d410408e/time_machine-3.2.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:59d71545e62525a4b85b6de9ab5c02ee3c61110fd7f636139914a2335dcbfc9c", size = 20000, upload-time = "2025-12-17T23:32:23.058Z" },
+ { url = "https://files.pythonhosted.org/packages/e1/17/eb2c0054c8d44dd42df84ccd434539249a9c7d0b8eb53f799be2102500ab/time_machine-3.2.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:999672c621c35362bc28e03ca0c7df21500195540773c25993421fd8d6cc5003", size = 15657, upload-time = "2025-12-17T23:32:24.125Z" },
+ { url = "https://files.pythonhosted.org/packages/43/21/93443b5d1dd850f8bb9442e90d817a9033dcce6bfbdd3aabbb9786251c80/time_machine-3.2.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:5faf7397f0580c7b9d67288522c8d7863e85f0cffadc0f1fccdb2c3dfce5783e", size = 39216, upload-time = "2025-12-17T23:32:25.542Z" },
+ { url = "https://files.pythonhosted.org/packages/9f/9e/18544cf8acc72bb1dc03762231c82ecc259733f4bb6770a7bbe5cd138603/time_machine-3.2.0-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d3dd886ec49f1fa5a00e844f5947e5c0f98ce574750c24b7424c6f77fc1c3e87", size = 40764, upload-time = "2025-12-17T23:32:26.643Z" },
+ { url = "https://files.pythonhosted.org/packages/27/f7/9fe9ce2795636a3a7467307af6bdf38bb613ddb701a8a5cd50ec713beb5e/time_machine-3.2.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:da0ecd96bc7bbe450acaaabe569d84e81688f1be8ad58d1470e42371d145fb53", size = 43526, upload-time = "2025-12-17T23:32:27.693Z" },
+ { url = "https://files.pythonhosted.org/packages/03/c1/a93e975ba9dec22e87ec92d18c28e67d36bd536f9119ffa439b2892b0c9c/time_machine-3.2.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:158220e946c1c4fb8265773a0282c88c35a7e3bb5d78e3561214e3b3231166f3", size = 41727, upload-time = "2025-12-17T23:32:28.985Z" },
+ { url = "https://files.pythonhosted.org/packages/5f/fb/e3633e5a6bbed1c76bb2e9810dabc2f8467532ffcd29b9aed404b473061a/time_machine-3.2.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:8c1aee29bc54356f248d5d7dfdd131e12ca825e850a08c0ebdb022266d073013", size = 38952, upload-time = "2025-12-17T23:32:30.031Z" },
+ { url = "https://files.pythonhosted.org/packages/82/3d/02e9fb2526b3d6b1b45bc8e4d912d95d1cd699d1a3f6df985817d37a0600/time_machine-3.2.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c8ed2224f09d25b1c2fc98683613aca12f90f682a427eabb68fc824d27014e4a", size = 39829, upload-time = "2025-12-17T23:32:31.075Z" },
+ { url = "https://files.pythonhosted.org/packages/85/c8/c14265212436da8e0814c45463987b3f57de3eca4de023cc2eabb0c62ef3/time_machine-3.2.0-cp313-cp313t-win32.whl", hash = "sha256:3498719f8dab51da76d29a20c1b5e52ee7db083dddf3056af7fa69c1b94e1fe6", size = 17852, upload-time = "2025-12-17T23:32:32.079Z" },
+ { url = "https://files.pythonhosted.org/packages/1d/bc/8acb13cf6149f47508097b158a9a8bec9ec4530a70cb406124e8023581f5/time_machine-3.2.0-cp313-cp313t-win_amd64.whl", hash = "sha256:e0d90bee170b219e1d15e6a58164aa808f5170090e4f090bd0670303e34181b1", size = 18918, upload-time = "2025-12-17T23:32:33.106Z" },
+ { url = "https://files.pythonhosted.org/packages/24/87/c443ee508c2708fd2514ccce9052f5e48888783ce690506919629ebc8eb0/time_machine-3.2.0-cp313-cp313t-win_arm64.whl", hash = "sha256:051de220fdb6e20d648111bbad423d9506fdbb2e44d4429cef3dc0382abf1fc2", size = 17261, upload-time = "2025-12-17T23:32:34.446Z" },
+ { url = "https://files.pythonhosted.org/packages/61/70/b4b980d126ed155c78d1879c50d60c8dcbd47bd11cb14ee7be50e0dfc07f/time_machine-3.2.0-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:1398980c017fe5744d66f419e0115ee48a53b00b146d738e1416c225eb610b82", size = 19303, upload-time = "2025-12-17T23:32:35.796Z" },
+ { url = "https://files.pythonhosted.org/packages/73/73/eaa33603c69a68fe2b6f54f9dd75481693d62f1d29676531002be06e2d1c/time_machine-3.2.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:4f8f4e35f4191ef70c2ab8ff490761ee9051b891afce2bf86dde3918eb7b537b", size = 15431, upload-time = "2025-12-17T23:32:37.244Z" },
+ { url = "https://files.pythonhosted.org/packages/76/10/b81e138e86cc7bab40cdb59d294b341e172201f4a6c84bb0ec080407977a/time_machine-3.2.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:6db498686ecf6163c5aa8cf0bcd57bbe0f4081184f247edf3ee49a2612b584f9", size = 33206, upload-time = "2025-12-17T23:32:38.713Z" },
+ { url = "https://files.pythonhosted.org/packages/d3/72/4deab446b579e8bd5dca91de98595c5d6bd6a17ce162abf5c5f2ce40d3d8/time_machine-3.2.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:027c1807efb74d0cd58ad16524dec94212fbe900115d70b0123399883657ac0f", size = 34792, upload-time = "2025-12-17T23:32:40.223Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/39/439c6b587ddee76d533fe972289d0646e0a5520e14dc83d0a30aeb5565f7/time_machine-3.2.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:92432610c05676edd5e6946a073c6f0c926923123ce7caee1018dc10782c713d", size = 36187, upload-time = "2025-12-17T23:32:41.705Z" },
+ { url = "https://files.pythonhosted.org/packages/4b/db/2da4368db15180989bab83746a857bde05ad16e78f326801c142bb747a06/time_machine-3.2.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c25586b62480eb77ef3d953fba273209478e1ef49654592cd6a52a68dfe56a67", size = 34855, upload-time = "2025-12-17T23:32:42.817Z" },
+ { url = "https://files.pythonhosted.org/packages/88/84/120a431fee50bc4c241425bee4d3a4910df4923b7ab5f7dff1bf0c772f08/time_machine-3.2.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:6bf3a2fa738d15e0b95d14469a0b8ea42635467408d8b490e263d5d45c9a177f", size = 33222, upload-time = "2025-12-17T23:32:43.94Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/ea/89cfda82bb8c57ff91bb9a26751aa234d6d90e9b4d5ab0ad9dce0f9f0329/time_machine-3.2.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:ce76b82276d7ad2a66cdc85dad4df19d1422b69183170a34e8fbc4c3f35502f7", size = 34270, upload-time = "2025-12-17T23:32:45.037Z" },
+ { url = "https://files.pythonhosted.org/packages/8a/aa/235357da4f69a51a8d35fcbfcfa77cdc7dc24f62ae54025006570bda7e2d/time_machine-3.2.0-cp314-cp314-win32.whl", hash = "sha256:14d6778273c543441863dff712cd1d7803dee946b18de35921eb8df10714539d", size = 17544, upload-time = "2025-12-17T23:32:46.099Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/51/6c8405a7276be79693b792cff22ce41067ec05db26a7d02f2d5b06324434/time_machine-3.2.0-cp314-cp314-win_amd64.whl", hash = "sha256:cbf821da96dbc80d349fa9e7c36e670b41d68a878d28c8850057992fed430eef", size = 18423, upload-time = "2025-12-17T23:32:47.468Z" },
+ { url = "https://files.pythonhosted.org/packages/d9/03/a3cf419e20c35fc203c6e4fed48b5b667c1a2b4da456d9971e605f73ecef/time_machine-3.2.0-cp314-cp314-win_arm64.whl", hash = "sha256:71c75d71f8e68abc8b669bca26ed2ddd558430a6c171e32b8620288565f18c0e", size = 17050, upload-time = "2025-12-17T23:32:48.91Z" },
+ { url = "https://files.pythonhosted.org/packages/86/a1/142de946dc4393f910bf4564b5c3ba819906e1f49b06c9cb557519c849e4/time_machine-3.2.0-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:4e374779021446fc2b5c29d80457ec9a3b1a5df043dc2aae07d7c1415d52323c", size = 19991, upload-time = "2025-12-17T23:32:49.933Z" },
+ { url = "https://files.pythonhosted.org/packages/ee/62/7f17def6289901f94726921811a16b9adce46e666362c75d45730c60274f/time_machine-3.2.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:122310a6af9c36e9a636da32830e591e7923e8a07bdd0a43276c3a36c6821c90", size = 15707, upload-time = "2025-12-17T23:32:50.969Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/d3/3502fb9bd3acb159c18844b26c43220201a0d4a622c0c853785d07699a92/time_machine-3.2.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:ba3eeb0f018cc362dd8128befa3426696a2e16dd223c3fb695fde184892d4d8c", size = 39207, upload-time = "2025-12-17T23:32:52.033Z" },
+ { url = "https://files.pythonhosted.org/packages/5a/be/8b27f4aa296fda14a5a2ad7f588ddd450603c33415ab3f8e85b2f1a44678/time_machine-3.2.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:77d38ba664b381a7793f8786efc13b5004f0d5f672dae814430445b8202a67a6", size = 40764, upload-time = "2025-12-17T23:32:53.167Z" },
+ { url = "https://files.pythonhosted.org/packages/42/cd/fe4c4e5c8ab6d48fab3624c32be9116fb120173a35fe67e482e5cf68b3d2/time_machine-3.2.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f09abeb8f03f044d72712207e0489a62098ad3ad16dac38927fcf80baca4d6a7", size = 43508, upload-time = "2025-12-17T23:32:54.597Z" },
+ { url = "https://files.pythonhosted.org/packages/b4/28/5a3ba2fce85b97655a425d6bb20a441550acd2b304c96b2c19d3839f721a/time_machine-3.2.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:6b28367ce4f73987a55e230e1d30a57a3af85da8eb1a140074eb6e8c7e6ef19f", size = 41712, upload-time = "2025-12-17T23:32:55.781Z" },
+ { url = "https://files.pythonhosted.org/packages/81/58/e38084be7fdabb4835db68a3a47e58c34182d79fc35df1ecbe0db2c5359f/time_machine-3.2.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:903c7751c904581da9f7861c3015bed7cdc40047321291d3694a3cdc783bbca3", size = 38939, upload-time = "2025-12-17T23:32:56.867Z" },
+ { url = "https://files.pythonhosted.org/packages/40/d0/ad3feb0a392ef4e0c08bc32024950373ddc0669002cbdcbb9f3bf0c2d114/time_machine-3.2.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:528217cad85ede5f85c8bc78b0341868d3c3cfefc6ecb5b622e1cacb6c73247b", size = 39837, upload-time = "2025-12-17T23:32:58.283Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/9e/5f4b2ea63b267bd78f3245e76f5528836611b5f2d30b5e7300a722fe4428/time_machine-3.2.0-cp314-cp314t-win32.whl", hash = "sha256:75724762ffd517e7e80aaec1fad1ff5a7414bd84e2b3ee7a0bacfeb67c14926e", size = 18091, upload-time = "2025-12-17T23:32:59.403Z" },
+ { url = "https://files.pythonhosted.org/packages/39/6f/456b1f4d2700ae02b19eba830f870596a4b89b74bac3b6c80666f1b108c5/time_machine-3.2.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2526abbd053c5bca898d1b3e7898eec34626b12206718d8c7ce88fd12c1c9c5c", size = 19208, upload-time = "2025-12-17T23:33:00.488Z" },
+ { url = "https://files.pythonhosted.org/packages/2f/22/8063101427ecd3d2652aada4d21d0876b07a3dc789125bca2ee858fec3ed/time_machine-3.2.0-cp314-cp314t-win_arm64.whl", hash = "sha256:7f2fb6784b414edbe2c0b558bfaab0c251955ba27edd62946cce4a01675a992c", size = 17359, upload-time = "2025-12-17T23:33:01.54Z" },
]
[[package]]
name = "tomli"
-version = "2.3.0"
+version = "2.4.0"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/52/ed/3f73f72945444548f33eba9a87fc7a6e969915e7b1acc8260b30e1f76a2f/tomli-2.3.0.tar.gz", hash = "sha256:64be704a875d2a59753d80ee8a533c3fe183e3f06807ff7dc2232938ccb01549", size = 17392, upload-time = "2025-10-08T22:01:47.119Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/82/30/31573e9457673ab10aa432461bee537ce6cef177667deca369efb79df071/tomli-2.4.0.tar.gz", hash = "sha256:aa89c3f6c277dd275d8e243ad24f3b5e701491a860d5121f2cdd399fbb31fc9c", size = 17477, upload-time = "2026-01-11T11:22:38.165Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/b3/2e/299f62b401438d5fe1624119c723f5d877acc86a4c2492da405626665f12/tomli-2.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88bd15eb972f3664f5ed4b57c1634a97153b4bac4479dcb6a495f41921eb7f45", size = 153236, upload-time = "2025-10-08T22:01:00.137Z" },
- { url = "https://files.pythonhosted.org/packages/86/7f/d8fffe6a7aefdb61bced88fcb5e280cfd71e08939da5894161bd71bea022/tomli-2.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:883b1c0d6398a6a9d29b508c331fa56adbcdff647f6ace4dfca0f50e90dfd0ba", size = 148084, upload-time = "2025-10-08T22:01:01.63Z" },
- { url = "https://files.pythonhosted.org/packages/47/5c/24935fb6a2ee63e86d80e4d3b58b222dafaf438c416752c8b58537c8b89a/tomli-2.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1381caf13ab9f300e30dd8feadb3de072aeb86f1d34a8569453ff32a7dea4bf", size = 234832, upload-time = "2025-10-08T22:01:02.543Z" },
- { url = "https://files.pythonhosted.org/packages/89/da/75dfd804fc11e6612846758a23f13271b76d577e299592b4371a4ca4cd09/tomli-2.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a0e285d2649b78c0d9027570d4da3425bdb49830a6156121360b3f8511ea3441", size = 242052, upload-time = "2025-10-08T22:01:03.836Z" },
- { url = "https://files.pythonhosted.org/packages/70/8c/f48ac899f7b3ca7eb13af73bacbc93aec37f9c954df3c08ad96991c8c373/tomli-2.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0a154a9ae14bfcf5d8917a59b51ffd5a3ac1fd149b71b47a3a104ca4edcfa845", size = 239555, upload-time = "2025-10-08T22:01:04.834Z" },
- { url = "https://files.pythonhosted.org/packages/ba/28/72f8afd73f1d0e7829bfc093f4cb98ce0a40ffc0cc997009ee1ed94ba705/tomli-2.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:74bf8464ff93e413514fefd2be591c3b0b23231a77f901db1eb30d6f712fc42c", size = 245128, upload-time = "2025-10-08T22:01:05.84Z" },
- { url = "https://files.pythonhosted.org/packages/b6/eb/a7679c8ac85208706d27436e8d421dfa39d4c914dcf5fa8083a9305f58d9/tomli-2.3.0-cp311-cp311-win32.whl", hash = "sha256:00b5f5d95bbfc7d12f91ad8c593a1659b6387b43f054104cda404be6bda62456", size = 96445, upload-time = "2025-10-08T22:01:06.896Z" },
- { url = "https://files.pythonhosted.org/packages/0a/fe/3d3420c4cb1ad9cb462fb52967080575f15898da97e21cb6f1361d505383/tomli-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:4dc4ce8483a5d429ab602f111a93a6ab1ed425eae3122032db7e9acf449451be", size = 107165, upload-time = "2025-10-08T22:01:08.107Z" },
- { url = "https://files.pythonhosted.org/packages/ff/b7/40f36368fcabc518bb11c8f06379a0fd631985046c038aca08c6d6a43c6e/tomli-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d7d86942e56ded512a594786a5ba0a5e521d02529b3826e7761a05138341a2ac", size = 154891, upload-time = "2025-10-08T22:01:09.082Z" },
- { url = "https://files.pythonhosted.org/packages/f9/3f/d9dd692199e3b3aab2e4e4dd948abd0f790d9ded8cd10cbaae276a898434/tomli-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:73ee0b47d4dad1c5e996e3cd33b8a76a50167ae5f96a2607cbe8cc773506ab22", size = 148796, upload-time = "2025-10-08T22:01:10.266Z" },
- { url = "https://files.pythonhosted.org/packages/60/83/59bff4996c2cf9f9387a0f5a3394629c7efa5ef16142076a23a90f1955fa/tomli-2.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:792262b94d5d0a466afb5bc63c7daa9d75520110971ee269152083270998316f", size = 242121, upload-time = "2025-10-08T22:01:11.332Z" },
- { url = "https://files.pythonhosted.org/packages/45/e5/7c5119ff39de8693d6baab6c0b6dcb556d192c165596e9fc231ea1052041/tomli-2.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4f195fe57ecceac95a66a75ac24d9d5fbc98ef0962e09b2eddec5d39375aae52", size = 250070, upload-time = "2025-10-08T22:01:12.498Z" },
- { url = "https://files.pythonhosted.org/packages/45/12/ad5126d3a278f27e6701abde51d342aa78d06e27ce2bb596a01f7709a5a2/tomli-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e31d432427dcbf4d86958c184b9bfd1e96b5b71f8eb17e6d02531f434fd335b8", size = 245859, upload-time = "2025-10-08T22:01:13.551Z" },
- { url = "https://files.pythonhosted.org/packages/fb/a1/4d6865da6a71c603cfe6ad0e6556c73c76548557a8d658f9e3b142df245f/tomli-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b0882799624980785240ab732537fcfc372601015c00f7fc367c55308c186f6", size = 250296, upload-time = "2025-10-08T22:01:14.614Z" },
- { url = "https://files.pythonhosted.org/packages/a0/b7/a7a7042715d55c9ba6e8b196d65d2cb662578b4d8cd17d882d45322b0d78/tomli-2.3.0-cp312-cp312-win32.whl", hash = "sha256:ff72b71b5d10d22ecb084d345fc26f42b5143c5533db5e2eaba7d2d335358876", size = 97124, upload-time = "2025-10-08T22:01:15.629Z" },
- { url = "https://files.pythonhosted.org/packages/06/1e/f22f100db15a68b520664eb3328fb0ae4e90530887928558112c8d1f4515/tomli-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:1cb4ed918939151a03f33d4242ccd0aa5f11b3547d0cf30f7c74a408a5b99878", size = 107698, upload-time = "2025-10-08T22:01:16.51Z" },
- { url = "https://files.pythonhosted.org/packages/89/48/06ee6eabe4fdd9ecd48bf488f4ac783844fd777f547b8d1b61c11939974e/tomli-2.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5192f562738228945d7b13d4930baffda67b69425a7f0da96d360b0a3888136b", size = 154819, upload-time = "2025-10-08T22:01:17.964Z" },
- { url = "https://files.pythonhosted.org/packages/f1/01/88793757d54d8937015c75dcdfb673c65471945f6be98e6a0410fba167ed/tomli-2.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:be71c93a63d738597996be9528f4abe628d1adf5e6eb11607bc8fe1a510b5dae", size = 148766, upload-time = "2025-10-08T22:01:18.959Z" },
- { url = "https://files.pythonhosted.org/packages/42/17/5e2c956f0144b812e7e107f94f1cc54af734eb17b5191c0bbfb72de5e93e/tomli-2.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4665508bcbac83a31ff8ab08f424b665200c0e1e645d2bd9ab3d3e557b6185b", size = 240771, upload-time = "2025-10-08T22:01:20.106Z" },
- { url = "https://files.pythonhosted.org/packages/d5/f4/0fbd014909748706c01d16824eadb0307115f9562a15cbb012cd9b3512c5/tomli-2.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4021923f97266babc6ccab9f5068642a0095faa0a51a246a6a02fccbb3514eaf", size = 248586, upload-time = "2025-10-08T22:01:21.164Z" },
- { url = "https://files.pythonhosted.org/packages/30/77/fed85e114bde5e81ecf9bc5da0cc69f2914b38f4708c80ae67d0c10180c5/tomli-2.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4ea38c40145a357d513bffad0ed869f13c1773716cf71ccaa83b0fa0cc4e42f", size = 244792, upload-time = "2025-10-08T22:01:22.417Z" },
- { url = "https://files.pythonhosted.org/packages/55/92/afed3d497f7c186dc71e6ee6d4fcb0acfa5f7d0a1a2878f8beae379ae0cc/tomli-2.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad805ea85eda330dbad64c7ea7a4556259665bdf9d2672f5dccc740eb9d3ca05", size = 248909, upload-time = "2025-10-08T22:01:23.859Z" },
- { url = "https://files.pythonhosted.org/packages/f8/84/ef50c51b5a9472e7265ce1ffc7f24cd4023d289e109f669bdb1553f6a7c2/tomli-2.3.0-cp313-cp313-win32.whl", hash = "sha256:97d5eec30149fd3294270e889b4234023f2c69747e555a27bd708828353ab606", size = 96946, upload-time = "2025-10-08T22:01:24.893Z" },
- { url = "https://files.pythonhosted.org/packages/b2/b7/718cd1da0884f281f95ccfa3a6cc572d30053cba64603f79d431d3c9b61b/tomli-2.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0c95ca56fbe89e065c6ead5b593ee64b84a26fca063b5d71a1122bf26e533999", size = 107705, upload-time = "2025-10-08T22:01:26.153Z" },
- { url = "https://files.pythonhosted.org/packages/19/94/aeafa14a52e16163008060506fcb6aa1949d13548d13752171a755c65611/tomli-2.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:cebc6fe843e0733ee827a282aca4999b596241195f43b4cc371d64fc6639da9e", size = 154244, upload-time = "2025-10-08T22:01:27.06Z" },
- { url = "https://files.pythonhosted.org/packages/db/e4/1e58409aa78eefa47ccd19779fc6f36787edbe7d4cd330eeeedb33a4515b/tomli-2.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4c2ef0244c75aba9355561272009d934953817c49f47d768070c3c94355c2aa3", size = 148637, upload-time = "2025-10-08T22:01:28.059Z" },
- { url = "https://files.pythonhosted.org/packages/26/b6/d1eccb62f665e44359226811064596dd6a366ea1f985839c566cd61525ae/tomli-2.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c22a8bf253bacc0cf11f35ad9808b6cb75ada2631c2d97c971122583b129afbc", size = 241925, upload-time = "2025-10-08T22:01:29.066Z" },
- { url = "https://files.pythonhosted.org/packages/70/91/7cdab9a03e6d3d2bb11beae108da5bdc1c34bdeb06e21163482544ddcc90/tomli-2.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0eea8cc5c5e9f89c9b90c4896a8deefc74f518db5927d0e0e8d4a80953d774d0", size = 249045, upload-time = "2025-10-08T22:01:31.98Z" },
- { url = "https://files.pythonhosted.org/packages/15/1b/8c26874ed1f6e4f1fcfeb868db8a794cbe9f227299402db58cfcc858766c/tomli-2.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b74a0e59ec5d15127acdabd75ea17726ac4c5178ae51b85bfe39c4f8a278e879", size = 245835, upload-time = "2025-10-08T22:01:32.989Z" },
- { url = "https://files.pythonhosted.org/packages/fd/42/8e3c6a9a4b1a1360c1a2a39f0b972cef2cc9ebd56025168c4137192a9321/tomli-2.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b5870b50c9db823c595983571d1296a6ff3e1b88f734a4c8f6fc6188397de005", size = 253109, upload-time = "2025-10-08T22:01:34.052Z" },
- { url = "https://files.pythonhosted.org/packages/22/0c/b4da635000a71b5f80130937eeac12e686eefb376b8dee113b4a582bba42/tomli-2.3.0-cp314-cp314-win32.whl", hash = "sha256:feb0dacc61170ed7ab602d3d972a58f14ee3ee60494292d384649a3dc38ef463", size = 97930, upload-time = "2025-10-08T22:01:35.082Z" },
- { url = "https://files.pythonhosted.org/packages/b9/74/cb1abc870a418ae99cd5c9547d6bce30701a954e0e721821df483ef7223c/tomli-2.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:b273fcbd7fc64dc3600c098e39136522650c49bca95df2d11cf3b626422392c8", size = 107964, upload-time = "2025-10-08T22:01:36.057Z" },
- { url = "https://files.pythonhosted.org/packages/54/78/5c46fff6432a712af9f792944f4fcd7067d8823157949f4e40c56b8b3c83/tomli-2.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:940d56ee0410fa17ee1f12b817b37a4d4e4dc4d27340863cc67236c74f582e77", size = 163065, upload-time = "2025-10-08T22:01:37.27Z" },
- { url = "https://files.pythonhosted.org/packages/39/67/f85d9bd23182f45eca8939cd2bc7050e1f90c41f4a2ecbbd5963a1d1c486/tomli-2.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f85209946d1fe94416debbb88d00eb92ce9cd5266775424ff81bc959e001acaf", size = 159088, upload-time = "2025-10-08T22:01:38.235Z" },
- { url = "https://files.pythonhosted.org/packages/26/5a/4b546a0405b9cc0659b399f12b6adb750757baf04250b148d3c5059fc4eb/tomli-2.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a56212bdcce682e56b0aaf79e869ba5d15a6163f88d5451cbde388d48b13f530", size = 268193, upload-time = "2025-10-08T22:01:39.712Z" },
- { url = "https://files.pythonhosted.org/packages/42/4f/2c12a72ae22cf7b59a7fe75b3465b7aba40ea9145d026ba41cb382075b0e/tomli-2.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c5f3ffd1e098dfc032d4d3af5c0ac64f6d286d98bc148698356847b80fa4de1b", size = 275488, upload-time = "2025-10-08T22:01:40.773Z" },
- { url = "https://files.pythonhosted.org/packages/92/04/a038d65dbe160c3aa5a624e93ad98111090f6804027d474ba9c37c8ae186/tomli-2.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5e01decd096b1530d97d5d85cb4dff4af2d8347bd35686654a004f8dea20fc67", size = 272669, upload-time = "2025-10-08T22:01:41.824Z" },
- { url = "https://files.pythonhosted.org/packages/be/2f/8b7c60a9d1612a7cbc39ffcca4f21a73bf368a80fc25bccf8253e2563267/tomli-2.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8a35dd0e643bb2610f156cca8db95d213a90015c11fee76c946aa62b7ae7e02f", size = 279709, upload-time = "2025-10-08T22:01:43.177Z" },
- { url = "https://files.pythonhosted.org/packages/7e/46/cc36c679f09f27ded940281c38607716c86cf8ba4a518d524e349c8b4874/tomli-2.3.0-cp314-cp314t-win32.whl", hash = "sha256:a1f7f282fe248311650081faafa5f4732bdbfef5d45fe3f2e702fbc6f2d496e0", size = 107563, upload-time = "2025-10-08T22:01:44.233Z" },
- { url = "https://files.pythonhosted.org/packages/84/ff/426ca8683cf7b753614480484f6437f568fd2fda2edbdf57a2d3d8b27a0b/tomli-2.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:70a251f8d4ba2d9ac2542eecf008b3c8a9fc5c3f9f02c56a9d7952612be2fdba", size = 119756, upload-time = "2025-10-08T22:01:45.234Z" },
- { url = "https://files.pythonhosted.org/packages/77/b8/0135fadc89e73be292b473cb820b4f5a08197779206b33191e801feeae40/tomli-2.3.0-py3-none-any.whl", hash = "sha256:e95b1af3c5b07d9e643909b5abbec77cd9f1217e6d0bca72b0234736b9fb1f1b", size = 14408, upload-time = "2025-10-08T22:01:46.04Z" },
+ { url = "https://files.pythonhosted.org/packages/3c/d9/3dc2289e1f3b32eb19b9785b6a006b28ee99acb37d1d47f78d4c10e28bf8/tomli-2.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b5ef256a3fd497d4973c11bf142e9ed78b150d36f5773f1ca6088c230ffc5867", size = 153663, upload-time = "2026-01-11T11:21:45.27Z" },
+ { url = "https://files.pythonhosted.org/packages/51/32/ef9f6845e6b9ca392cd3f64f9ec185cc6f09f0a2df3db08cbe8809d1d435/tomli-2.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5572e41282d5268eb09a697c89a7bee84fae66511f87533a6f88bd2f7b652da9", size = 148469, upload-time = "2026-01-11T11:21:46.873Z" },
+ { url = "https://files.pythonhosted.org/packages/d6/c2/506e44cce89a8b1b1e047d64bd495c22c9f71f21e05f380f1a950dd9c217/tomli-2.4.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:551e321c6ba03b55676970b47cb1b73f14a0a4dce6a3e1a9458fd6d921d72e95", size = 236039, upload-time = "2026-01-11T11:21:48.503Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/40/e1b65986dbc861b7e986e8ec394598187fa8aee85b1650b01dd925ca0be8/tomli-2.4.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e3f639a7a8f10069d0e15408c0b96a2a828cfdec6fca05296ebcdcc28ca7c76", size = 243007, upload-time = "2026-01-11T11:21:49.456Z" },
+ { url = "https://files.pythonhosted.org/packages/9c/6f/6e39ce66b58a5b7ae572a0f4352ff40c71e8573633deda43f6a379d56b3e/tomli-2.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1b168f2731796b045128c45982d3a4874057626da0e2ef1fdd722848b741361d", size = 240875, upload-time = "2026-01-11T11:21:50.755Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/ad/cb089cb190487caa80204d503c7fd0f4d443f90b95cf4ef5cf5aa0f439b0/tomli-2.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:133e93646ec4300d651839d382d63edff11d8978be23da4cc106f5a18b7d0576", size = 246271, upload-time = "2026-01-11T11:21:51.81Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/63/69125220e47fd7a3a27fd0de0c6398c89432fec41bc739823bcc66506af6/tomli-2.4.0-cp311-cp311-win32.whl", hash = "sha256:b6c78bdf37764092d369722d9946cb65b8767bfa4110f902a1b2542d8d173c8a", size = 96770, upload-time = "2026-01-11T11:21:52.647Z" },
+ { url = "https://files.pythonhosted.org/packages/1e/0d/a22bb6c83f83386b0008425a6cd1fa1c14b5f3dd4bad05e98cf3dbbf4a64/tomli-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:d3d1654e11d724760cdb37a3d7691f0be9db5fbdaef59c9f532aabf87006dbaa", size = 107626, upload-time = "2026-01-11T11:21:53.459Z" },
+ { url = "https://files.pythonhosted.org/packages/2f/6d/77be674a3485e75cacbf2ddba2b146911477bd887dda9d8c9dfb2f15e871/tomli-2.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:cae9c19ed12d4e8f3ebf46d1a75090e4c0dc16271c5bce1c833ac168f08fb614", size = 94842, upload-time = "2026-01-11T11:21:54.831Z" },
+ { url = "https://files.pythonhosted.org/packages/3c/43/7389a1869f2f26dba52404e1ef13b4784b6b37dac93bac53457e3ff24ca3/tomli-2.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:920b1de295e72887bafa3ad9f7a792f811847d57ea6b1215154030cf131f16b1", size = 154894, upload-time = "2026-01-11T11:21:56.07Z" },
+ { url = "https://files.pythonhosted.org/packages/e9/05/2f9bf110b5294132b2edf13fe6ca6ae456204f3d749f623307cbb7a946f2/tomli-2.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7d6d9a4aee98fac3eab4952ad1d73aee87359452d1c086b5ceb43ed02ddb16b8", size = 149053, upload-time = "2026-01-11T11:21:57.467Z" },
+ { url = "https://files.pythonhosted.org/packages/e8/41/1eda3ca1abc6f6154a8db4d714a4d35c4ad90adc0bcf700657291593fbf3/tomli-2.4.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36b9d05b51e65b254ea6c2585b59d2c4cb91c8a3d91d0ed0f17591a29aaea54a", size = 243481, upload-time = "2026-01-11T11:21:58.661Z" },
+ { url = "https://files.pythonhosted.org/packages/d2/6d/02ff5ab6c8868b41e7d4b987ce2b5f6a51d3335a70aa144edd999e055a01/tomli-2.4.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1c8a885b370751837c029ef9bc014f27d80840e48bac415f3412e6593bbc18c1", size = 251720, upload-time = "2026-01-11T11:22:00.178Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/57/0405c59a909c45d5b6f146107c6d997825aa87568b042042f7a9c0afed34/tomli-2.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8768715ffc41f0008abe25d808c20c3d990f42b6e2e58305d5da280ae7d1fa3b", size = 247014, upload-time = "2026-01-11T11:22:01.238Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/0e/2e37568edd944b4165735687cbaf2fe3648129e440c26d02223672ee0630/tomli-2.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b438885858efd5be02a9a133caf5812b8776ee0c969fea02c45e8e3f296ba51", size = 251820, upload-time = "2026-01-11T11:22:02.727Z" },
+ { url = "https://files.pythonhosted.org/packages/5a/1c/ee3b707fdac82aeeb92d1a113f803cf6d0f37bdca0849cb489553e1f417a/tomli-2.4.0-cp312-cp312-win32.whl", hash = "sha256:0408e3de5ec77cc7f81960c362543cbbd91ef883e3138e81b729fc3eea5b9729", size = 97712, upload-time = "2026-01-11T11:22:03.777Z" },
+ { url = "https://files.pythonhosted.org/packages/69/13/c07a9177d0b3bab7913299b9278845fc6eaaca14a02667c6be0b0a2270c8/tomli-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:685306e2cc7da35be4ee914fd34ab801a6acacb061b6a7abca922aaf9ad368da", size = 108296, upload-time = "2026-01-11T11:22:04.86Z" },
+ { url = "https://files.pythonhosted.org/packages/18/27/e267a60bbeeee343bcc279bb9e8fbed0cbe224bc7b2a3dc2975f22809a09/tomli-2.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:5aa48d7c2356055feef06a43611fc401a07337d5b006be13a30f6c58f869e3c3", size = 94553, upload-time = "2026-01-11T11:22:05.854Z" },
+ { url = "https://files.pythonhosted.org/packages/34/91/7f65f9809f2936e1f4ce6268ae1903074563603b2a2bd969ebbda802744f/tomli-2.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:84d081fbc252d1b6a982e1870660e7330fb8f90f676f6e78b052ad4e64714bf0", size = 154915, upload-time = "2026-01-11T11:22:06.703Z" },
+ { url = "https://files.pythonhosted.org/packages/20/aa/64dd73a5a849c2e8f216b755599c511badde80e91e9bc2271baa7b2cdbb1/tomli-2.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9a08144fa4cba33db5255f9b74f0b89888622109bd2776148f2597447f92a94e", size = 149038, upload-time = "2026-01-11T11:22:07.56Z" },
+ { url = "https://files.pythonhosted.org/packages/9e/8a/6d38870bd3d52c8d1505ce054469a73f73a0fe62c0eaf5dddf61447e32fa/tomli-2.4.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c73add4bb52a206fd0c0723432db123c0c75c280cbd67174dd9d2db228ebb1b4", size = 242245, upload-time = "2026-01-11T11:22:08.344Z" },
+ { url = "https://files.pythonhosted.org/packages/59/bb/8002fadefb64ab2669e5b977df3f5e444febea60e717e755b38bb7c41029/tomli-2.4.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1fb2945cbe303b1419e2706e711b7113da57b7db31ee378d08712d678a34e51e", size = 250335, upload-time = "2026-01-11T11:22:09.951Z" },
+ { url = "https://files.pythonhosted.org/packages/a5/3d/4cdb6f791682b2ea916af2de96121b3cb1284d7c203d97d92d6003e91c8d/tomli-2.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bbb1b10aa643d973366dc2cb1ad94f99c1726a02343d43cbc011edbfac579e7c", size = 245962, upload-time = "2026-01-11T11:22:11.27Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/4a/5f25789f9a460bd858ba9756ff52d0830d825b458e13f754952dd15fb7bb/tomli-2.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4cbcb367d44a1f0c2be408758b43e1ffb5308abe0ea222897d6bfc8e8281ef2f", size = 250396, upload-time = "2026-01-11T11:22:12.325Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/2f/b73a36fea58dfa08e8b3a268750e6853a6aac2a349241a905ebd86f3047a/tomli-2.4.0-cp313-cp313-win32.whl", hash = "sha256:7d49c66a7d5e56ac959cb6fc583aff0651094ec071ba9ad43df785abc2320d86", size = 97530, upload-time = "2026-01-11T11:22:13.865Z" },
+ { url = "https://files.pythonhosted.org/packages/3b/af/ca18c134b5d75de7e8dc551c5234eaba2e8e951f6b30139599b53de9c187/tomli-2.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:3cf226acb51d8f1c394c1b310e0e0e61fecdd7adcb78d01e294ac297dd2e7f87", size = 108227, upload-time = "2026-01-11T11:22:15.224Z" },
+ { url = "https://files.pythonhosted.org/packages/22/c3/b386b832f209fee8073c8138ec50f27b4460db2fdae9ffe022df89a57f9b/tomli-2.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:d20b797a5c1ad80c516e41bc1fb0443ddb5006e9aaa7bda2d71978346aeb9132", size = 94748, upload-time = "2026-01-11T11:22:16.009Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/c4/84047a97eb1004418bc10bdbcfebda209fca6338002eba2dc27cc6d13563/tomli-2.4.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:26ab906a1eb794cd4e103691daa23d95c6919cc2fa9160000ac02370cc9dd3f6", size = 154725, upload-time = "2026-01-11T11:22:17.269Z" },
+ { url = "https://files.pythonhosted.org/packages/a8/5d/d39038e646060b9d76274078cddf146ced86dc2b9e8bbf737ad5983609a0/tomli-2.4.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:20cedb4ee43278bc4f2fee6cb50daec836959aadaf948db5172e776dd3d993fc", size = 148901, upload-time = "2026-01-11T11:22:18.287Z" },
+ { url = "https://files.pythonhosted.org/packages/73/e5/383be1724cb30f4ce44983d249645684a48c435e1cd4f8b5cded8a816d3c/tomli-2.4.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:39b0b5d1b6dd03684b3fb276407ebed7090bbec989fa55838c98560c01113b66", size = 243375, upload-time = "2026-01-11T11:22:19.154Z" },
+ { url = "https://files.pythonhosted.org/packages/31/f0/bea80c17971c8d16d3cc109dc3585b0f2ce1036b5f4a8a183789023574f2/tomli-2.4.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a26d7ff68dfdb9f87a016ecfd1e1c2bacbe3108f4e0f8bcd2228ef9a766c787d", size = 250639, upload-time = "2026-01-11T11:22:20.168Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/8f/2853c36abbb7608e3f945d8a74e32ed3a74ee3a1f468f1ffc7d1cb3abba6/tomli-2.4.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:20ffd184fb1df76a66e34bd1b36b4a4641bd2b82954befa32fe8163e79f1a702", size = 246897, upload-time = "2026-01-11T11:22:21.544Z" },
+ { url = "https://files.pythonhosted.org/packages/49/f0/6c05e3196ed5337b9fe7ea003e95fd3819a840b7a0f2bf5a408ef1dad8ed/tomli-2.4.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:75c2f8bbddf170e8effc98f5e9084a8751f8174ea6ccf4fca5398436e0320bc8", size = 254697, upload-time = "2026-01-11T11:22:23.058Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/f5/2922ef29c9f2951883525def7429967fc4d8208494e5ab524234f06b688b/tomli-2.4.0-cp314-cp314-win32.whl", hash = "sha256:31d556d079d72db7c584c0627ff3a24c5d3fb4f730221d3444f3efb1b2514776", size = 98567, upload-time = "2026-01-11T11:22:24.033Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/31/22b52e2e06dd2a5fdbc3ee73226d763b184ff21fc24e20316a44ccc4d96b/tomli-2.4.0-cp314-cp314-win_amd64.whl", hash = "sha256:43e685b9b2341681907759cf3a04e14d7104b3580f808cfde1dfdb60ada85475", size = 108556, upload-time = "2026-01-11T11:22:25.378Z" },
+ { url = "https://files.pythonhosted.org/packages/48/3d/5058dff3255a3d01b705413f64f4306a141a8fd7a251e5a495e3f192a998/tomli-2.4.0-cp314-cp314-win_arm64.whl", hash = "sha256:3d895d56bd3f82ddd6faaff993c275efc2ff38e52322ea264122d72729dca2b2", size = 96014, upload-time = "2026-01-11T11:22:26.138Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/4e/75dab8586e268424202d3a1997ef6014919c941b50642a1682df43204c22/tomli-2.4.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:5b5807f3999fb66776dbce568cc9a828544244a8eb84b84b9bafc080c99597b9", size = 163339, upload-time = "2026-01-11T11:22:27.143Z" },
+ { url = "https://files.pythonhosted.org/packages/06/e3/b904d9ab1016829a776d97f163f183a48be6a4deb87304d1e0116a349519/tomli-2.4.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c084ad935abe686bd9c898e62a02a19abfc9760b5a79bc29644463eaf2840cb0", size = 159490, upload-time = "2026-01-11T11:22:28.399Z" },
+ { url = "https://files.pythonhosted.org/packages/e3/5a/fc3622c8b1ad823e8ea98a35e3c632ee316d48f66f80f9708ceb4f2a0322/tomli-2.4.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f2e3955efea4d1cfbcb87bc321e00dc08d2bcb737fd1d5e398af111d86db5df", size = 269398, upload-time = "2026-01-11T11:22:29.345Z" },
+ { url = "https://files.pythonhosted.org/packages/fd/33/62bd6152c8bdd4c305ad9faca48f51d3acb2df1f8791b1477d46ff86e7f8/tomli-2.4.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e0fe8a0b8312acf3a88077a0802565cb09ee34107813bba1c7cd591fa6cfc8d", size = 276515, upload-time = "2026-01-11T11:22:30.327Z" },
+ { url = "https://files.pythonhosted.org/packages/4b/ff/ae53619499f5235ee4211e62a8d7982ba9e439a0fb4f2f351a93d67c1dd2/tomli-2.4.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:413540dce94673591859c4c6f794dfeaa845e98bf35d72ed59636f869ef9f86f", size = 273806, upload-time = "2026-01-11T11:22:32.56Z" },
+ { url = "https://files.pythonhosted.org/packages/47/71/cbca7787fa68d4d0a9f7072821980b39fbb1b6faeb5f5cf02f4a5559fa28/tomli-2.4.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0dc56fef0e2c1c470aeac5b6ca8cc7b640bb93e92d9803ddaf9ea03e198f5b0b", size = 281340, upload-time = "2026-01-11T11:22:33.505Z" },
+ { url = "https://files.pythonhosted.org/packages/f5/00/d595c120963ad42474cf6ee7771ad0d0e8a49d0f01e29576ee9195d9ecdf/tomli-2.4.0-cp314-cp314t-win32.whl", hash = "sha256:d878f2a6707cc9d53a1be1414bbb419e629c3d6e67f69230217bb663e76b5087", size = 108106, upload-time = "2026-01-11T11:22:34.451Z" },
+ { url = "https://files.pythonhosted.org/packages/de/69/9aa0c6a505c2f80e519b43764f8b4ba93b5a0bbd2d9a9de6e2b24271b9a5/tomli-2.4.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2add28aacc7425117ff6364fe9e06a183bb0251b03f986df0e78e974047571fd", size = 120504, upload-time = "2026-01-11T11:22:35.764Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/9f/f1668c281c58cfae01482f7114a4b88d345e4c140386241a1a24dcc9e7bc/tomli-2.4.0-cp314-cp314t-win_arm64.whl", hash = "sha256:2b1e3b80e1d5e52e40e9b924ec43d81570f0e7d09d11081b797bc4692765a3d4", size = 99561, upload-time = "2026-01-11T11:22:36.624Z" },
+ { url = "https://files.pythonhosted.org/packages/23/d1/136eb2cb77520a31e1f64cbae9d33ec6df0d78bdf4160398e86eec8a8754/tomli-2.4.0-py3-none-any.whl", hash = "sha256:1f776e7d669ebceb01dee46484485f43a4048746235e683bcdffacdf1fb4785a", size = 14477, upload-time = "2026-01-11T11:22:37.446Z" },
]
[[package]]
From 3e16d9887c409d153e70df9c1190e33eb5b585e6 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 10 Feb 2026 06:32:03 +0000
Subject: [PATCH 16/23] fix(api): narrow types
---
.stats.yml | 8 +-
api.md | 13 +
src/dedalus_labs/_client.py | 39 +-
src/dedalus_labs/resources/__init__.py | 14 +
src/dedalus_labs/resources/audio/speech.py | 42 +-
.../resources/chat/completions.py | 557 ++++++++++-----
src/dedalus_labs/resources/embeddings.py | 18 +-
src/dedalus_labs/resources/responses.py | 675 ++++++++++++++++++
src/dedalus_labs/types/__init__.py | 7 +-
.../types/audio/speech_create_params.py | 32 +-
.../audio/transcription_create_response.py | 12 +-
.../audio/translation_create_response.py | 12 +-
src/dedalus_labs/types/chat/audio_param.py | 2 +-
.../types/chat/chat_completion.py | 86 ++-
...chat_completion_assistant_message_param.py | 2 +-
.../types/chat/chat_completion_audio_param.py | 26 +-
.../types/chat/chat_completion_chunk.py | 4 +-
...chat_completion_content_part_file_param.py | 3 +-
...hat_completion_content_part_image_param.py | 4 +-
...mpletion_content_part_input_audio_param.py | 2 +-
...chat_completion_content_part_text_param.py | 3 +-
.../chat/chat_completion_functions_param.py | 3 +-
.../types/chat/chat_completion_message.py | 7 +-
.../types/chat/chat_completion_tool_param.py | 14 +-
.../types/chat/completion_create_params.py | 195 ++---
.../types/create_embedding_response.py | 2 +-
.../types/embedding_create_params.py | 11 +-
src/dedalus_labs/types/model.py | 4 +-
src/dedalus_labs/types/response.py | 145 ++++
.../types/response_create_params.py | 316 ++++++++
src/dedalus_labs/types/shared/__init__.py | 1 +
.../types/shared/function_definition.py | 39 +-
.../types/shared/mcp_tool_result.py | 12 +-
.../shared/response_format_json_schema.py | 4 +-
.../types/shared/voice_ids_or_custom_voice.py | 16 +
.../types/shared_params/__init__.py | 1 +
.../shared_params/function_definition.py | 38 +-
.../response_format_json_schema.py | 4 +-
.../voice_ids_or_custom_voice.py | 18 +
tests/api_resources/chat/test_completions.py | 52 +-
tests/api_resources/test_responses.py | 188 +++++
41 files changed, 2098 insertions(+), 533 deletions(-)
create mode 100644 src/dedalus_labs/resources/responses.py
create mode 100644 src/dedalus_labs/types/response.py
create mode 100644 src/dedalus_labs/types/response_create_params.py
create mode 100644 src/dedalus_labs/types/shared/voice_ids_or_custom_voice.py
create mode 100644 src/dedalus_labs/types/shared_params/voice_ids_or_custom_voice.py
create mode 100644 tests/api_resources/test_responses.py
diff --git a/.stats.yml b/.stats.yml
index 0d4b32e..9fe1fff 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 11
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/dedalus-labs%2Fdedalus-sdk-2158e2dd12dc5bc533e872e1fa4a9bd1627c2f15b0e417aa4645554e045d7054.yml
-openapi_spec_hash: 30d4d077bf498b7634b3e14deb9d0a1d
-config_hash: c520c6ec7d9767224157d03e00c54985
+configured_endpoints: 12
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/dedalus-labs%2Fdedalus-sdk-3330faa66a188880e45bcc35fa1b899365424c079dfd79b45c4da710eeccab10.yml
+openapi_spec_hash: 542937ce78a5c4ee4dbe54ab837151d2
+config_hash: 2b25f3d3742dd0d7790fd4339f500a29
diff --git a/api.md b/api.md
index 4a5e7e1..b610253 100644
--- a/api.md
+++ b/api.md
@@ -18,6 +18,7 @@ from dedalus_labs.types import (
ResponseFormatJSONSchema,
ResponseFormatText,
ToolChoice,
+ VoiceIDsOrCustomVoice,
)
```
@@ -104,6 +105,18 @@ Methods:
- client.ocr.process(\*\*params) -> OCRResponse
+# Responses
+
+Types:
+
+```python
+from dedalus_labs.types import Response, ResponseCreateParams
+```
+
+Methods:
+
+- client.responses.create(\*\*params) -> Response
+
# Chat
## Completions
diff --git a/src/dedalus_labs/_client.py b/src/dedalus_labs/_client.py
index 03c0ee4..708dac5 100644
--- a/src/dedalus_labs/_client.py
+++ b/src/dedalus_labs/_client.py
@@ -34,11 +34,12 @@
from .lib.mcp import prepare_mcp_request, prepare_mcp_request_sync
if TYPE_CHECKING:
- from .resources import ocr, chat, audio, images, models, embeddings
+ from .resources import ocr, chat, audio, images, models, responses, embeddings
from .resources.ocr import OCRResource, AsyncOCRResource
from .resources.images import ImagesResource, AsyncImagesResource
from .resources.models import ModelsResource, AsyncModelsResource
from .resources.chat.chat import ChatResource, AsyncChatResource
+ from .resources.responses import ResponsesResource, AsyncResponsesResource
from .resources.embeddings import EmbeddingsResource, AsyncEmbeddingsResource
from .resources.audio.audio import AudioResource, AsyncAudioResource
@@ -218,6 +219,12 @@ def ocr(self) -> OCRResource:
return OCRResource(self)
+ @cached_property
+ def responses(self) -> ResponsesResource:
+ from .resources.responses import ResponsesResource
+
+ return ResponsesResource(self)
+
@cached_property
def chat(self) -> ChatResource:
from .resources.chat import ChatResource
@@ -539,6 +546,12 @@ def ocr(self) -> AsyncOCRResource:
return AsyncOCRResource(self)
+ @cached_property
+ def responses(self) -> AsyncResponsesResource:
+ from .resources.responses import AsyncResponsesResource
+
+ return AsyncResponsesResource(self)
+
@cached_property
def chat(self) -> AsyncChatResource:
from .resources.chat import AsyncChatResource
@@ -738,6 +751,12 @@ def ocr(self) -> ocr.OCRResourceWithRawResponse:
return OCRResourceWithRawResponse(self._client.ocr)
+ @cached_property
+ def responses(self) -> responses.ResponsesResourceWithRawResponse:
+ from .resources.responses import ResponsesResourceWithRawResponse
+
+ return ResponsesResourceWithRawResponse(self._client.responses)
+
@cached_property
def chat(self) -> chat.ChatResourceWithRawResponse:
from .resources.chat import ChatResourceWithRawResponse
@@ -781,6 +800,12 @@ def ocr(self) -> ocr.AsyncOCRResourceWithRawResponse:
return AsyncOCRResourceWithRawResponse(self._client.ocr)
+ @cached_property
+ def responses(self) -> responses.AsyncResponsesResourceWithRawResponse:
+ from .resources.responses import AsyncResponsesResourceWithRawResponse
+
+ return AsyncResponsesResourceWithRawResponse(self._client.responses)
+
@cached_property
def chat(self) -> chat.AsyncChatResourceWithRawResponse:
from .resources.chat import AsyncChatResourceWithRawResponse
@@ -824,6 +849,12 @@ def ocr(self) -> ocr.OCRResourceWithStreamingResponse:
return OCRResourceWithStreamingResponse(self._client.ocr)
+ @cached_property
+ def responses(self) -> responses.ResponsesResourceWithStreamingResponse:
+ from .resources.responses import ResponsesResourceWithStreamingResponse
+
+ return ResponsesResourceWithStreamingResponse(self._client.responses)
+
@cached_property
def chat(self) -> chat.ChatResourceWithStreamingResponse:
from .resources.chat import ChatResourceWithStreamingResponse
@@ -867,6 +898,12 @@ def ocr(self) -> ocr.AsyncOCRResourceWithStreamingResponse:
return AsyncOCRResourceWithStreamingResponse(self._client.ocr)
+ @cached_property
+ def responses(self) -> responses.AsyncResponsesResourceWithStreamingResponse:
+ from .resources.responses import AsyncResponsesResourceWithStreamingResponse
+
+ return AsyncResponsesResourceWithStreamingResponse(self._client.responses)
+
@cached_property
def chat(self) -> chat.AsyncChatResourceWithStreamingResponse:
from .resources.chat import AsyncChatResourceWithStreamingResponse
diff --git a/src/dedalus_labs/resources/__init__.py b/src/dedalus_labs/resources/__init__.py
index 8d5f63f..8db2b20 100644
--- a/src/dedalus_labs/resources/__init__.py
+++ b/src/dedalus_labs/resources/__init__.py
@@ -40,6 +40,14 @@
ModelsResourceWithStreamingResponse,
AsyncModelsResourceWithStreamingResponse,
)
+from .responses import (
+ ResponsesResource,
+ AsyncResponsesResource,
+ ResponsesResourceWithRawResponse,
+ AsyncResponsesResourceWithRawResponse,
+ ResponsesResourceWithStreamingResponse,
+ AsyncResponsesResourceWithStreamingResponse,
+)
from .embeddings import (
EmbeddingsResource,
AsyncEmbeddingsResource,
@@ -80,6 +88,12 @@
"AsyncOCRResourceWithRawResponse",
"OCRResourceWithStreamingResponse",
"AsyncOCRResourceWithStreamingResponse",
+ "ResponsesResource",
+ "AsyncResponsesResource",
+ "ResponsesResourceWithRawResponse",
+ "AsyncResponsesResourceWithRawResponse",
+ "ResponsesResourceWithStreamingResponse",
+ "AsyncResponsesResourceWithStreamingResponse",
"ChatResource",
"AsyncChatResource",
"ChatResourceWithRawResponse",
diff --git a/src/dedalus_labs/resources/audio/speech.py b/src/dedalus_labs/resources/audio/speech.py
index 67b033c..e43172e 100644
--- a/src/dedalus_labs/resources/audio/speech.py
+++ b/src/dedalus_labs/resources/audio/speech.py
@@ -51,10 +51,8 @@ def create(
self,
*,
input: str,
- model: Union[str, Literal["tts-1", "tts-1-hd", "gpt-4o-mini-tts"]],
- voice: Union[
- str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"]
- ],
+ model: Union[str, Literal["tts-1", "tts-1-hd", "gpt-4o-mini-tts", "gpt-4o-mini-tts-2025-12-15"]],
+ voice: speech_create_params.Voice,
instructions: str | Omit = omit,
response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | Omit = omit,
speed: float | Omit = omit,
@@ -79,14 +77,15 @@ def create(
Args:
input: The text to generate audio for. The maximum length is 4096 characters.
- model:
- One of the available [TTS models](https://platform.openai.com/docs/models#tts):
- `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
+ model: One of the available [TTS models](/docs/models#tts): `tts-1`, `tts-1-hd`,
+ `gpt-4o-mini-tts`, or `gpt-4o-mini-tts-2025-12-15`.
- voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
- `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and
- `verse`. Previews of the voices are available in the
- [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
+ voice: The voice to use when generating the audio. Supported built-in voices are
+ `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
+ `shimmer`, `verse`, `marin`, and `cedar`. You may also provide a custom voice
+ object with an `id`, for example `{ "id": "voice_1234" }`. Previews of the
+ voices are available in the
+ [Text to speech guide](/docs/guides/text-to-speech#voice-options).
instructions: Control the voice of your generated audio with additional instructions. Does not
work with `tts-1` or `tts-1-hd`.
@@ -160,10 +159,8 @@ async def create(
self,
*,
input: str,
- model: Union[str, Literal["tts-1", "tts-1-hd", "gpt-4o-mini-tts"]],
- voice: Union[
- str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"]
- ],
+ model: Union[str, Literal["tts-1", "tts-1-hd", "gpt-4o-mini-tts", "gpt-4o-mini-tts-2025-12-15"]],
+ voice: speech_create_params.Voice,
instructions: str | Omit = omit,
response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | Omit = omit,
speed: float | Omit = omit,
@@ -188,14 +185,15 @@ async def create(
Args:
input: The text to generate audio for. The maximum length is 4096 characters.
- model:
- One of the available [TTS models](https://platform.openai.com/docs/models#tts):
- `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
+ model: One of the available [TTS models](/docs/models#tts): `tts-1`, `tts-1-hd`,
+ `gpt-4o-mini-tts`, or `gpt-4o-mini-tts-2025-12-15`.
- voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
- `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and
- `verse`. Previews of the voices are available in the
- [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
+ voice: The voice to use when generating the audio. Supported built-in voices are
+ `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
+ `shimmer`, `verse`, `marin`, and `cedar`. You may also provide a custom voice
+ object with an `id`, for example `{ "id": "voice_1234" }`. Previews of the
+ voices are available in the
+ [Text to speech guide](/docs/guides/text-to-speech#voice-options).
instructions: Control the voice of your generated audio with additional instructions. Does not
work with `tts-1` or `tts-1-hd`.
diff --git a/src/dedalus_labs/resources/chat/completions.py b/src/dedalus_labs/resources/chat/completions.py
index aeca9c7..e168d64 100644
--- a/src/dedalus_labs/resources/chat/completions.py
+++ b/src/dedalus_labs/resources/chat/completions.py
@@ -34,6 +34,7 @@
AsyncChatCompletionStreamManager,
)
from ...types.chat.prediction_content_param import PredictionContentParam
+from ...types.chat.chat_completion_tool_param import ChatCompletionToolParam
from ...types.shared_params.json_object_input import JSONObjectInput
from ...types.chat.chat_completion_audio_param import ChatCompletionAudioParam
from ...types.chat.chat_completion_functions_param import ChatCompletionFunctionsParam
@@ -70,14 +71,18 @@ def create(
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
automatic_tool_execution: bool | Omit = omit,
cached_content: Optional[str] | Omit = omit,
+ correlation_id: Optional[str] | Omit = omit,
credentials: Optional[completion_create_params.Credentials] | Omit = omit,
deferred: Optional[bool] | Omit = omit,
+ deferred_calls: Optional[Iterable[Dict[str, object]]] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: Optional[str] | Omit = omit,
functions: Optional[Iterable[ChatCompletionFunctionsParam]] | Omit = omit,
generation_config: Optional[JSONObjectInput] | Omit = omit,
guardrails: Optional[Iterable[Dict[str, object]]] | Omit = omit,
handoff_config: Optional[Dict[str, object]] | Omit = omit,
+ handoff_mode: Optional[bool] | Omit = omit,
+ inference_geo: Optional[str] | Omit = omit,
logit_bias: Optional[Dict[str, int]] | Omit = omit,
logprobs: Optional[bool] | Omit = omit,
max_completion_tokens: Optional[int] | Omit = omit,
@@ -89,6 +94,7 @@ def create(
modalities: Optional[SequenceNotStr[str]] | Omit = omit,
model_attributes: Optional[Dict[str, Dict[str, float]]] | Omit = omit,
n: Optional[int] | Omit = omit,
+ output_config: Optional[JSONObjectInput] | Omit = omit,
parallel_tool_calls: Optional[bool] | Omit = omit,
prediction: Optional[PredictionContentParam] | Omit = omit,
presence_penalty: Optional[float] | Omit = omit,
@@ -103,6 +109,7 @@ def create(
search_parameters: Optional[JSONObjectInput] | Omit = omit,
seed: Optional[int] | Omit = omit,
service_tier: Optional[str] | Omit = omit,
+ speed: Optional[Literal["standard", "fast"]] | Omit = omit,
stop: Union[SequenceNotStr[str], str, None] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream: Optional[Literal[False]] | Omit = omit,
@@ -112,7 +119,7 @@ def create(
thinking: Optional[completion_create_params.Thinking] | Omit = omit,
tool_choice: Optional[completion_create_params.ToolChoice] | Omit = omit,
tool_config: Optional[JSONObjectInput] | Omit = omit,
- tools: Optional[Iterable[completion_create_params.Tool]] | Omit = omit,
+ tools: Optional[Iterable[ChatCompletionToolParam]] | Omit = omit,
top_k: Optional[int] | Omit = omit,
top_logprobs: Optional[int] | Omit = omit,
top_p: Optional[float] | Omit = omit,
@@ -136,7 +143,8 @@ def create(
Headers:
- Authorization: bearer key for the calling account.
- - Optional BYOK or provider headers if applicable.
+ - X-Provider / X-Provider-Key: optional headers for using your own provider API
+ key.
Behavior:
@@ -187,12 +195,11 @@ def create(
agent_attributes: Agent attributes. Values in [0.0, 1.0].
audio: Parameters for audio output. Required when audio output is requested with
- `modalities: ["audio"]`.
- [Learn more](https://platform.openai.com/docs/guides/audio).
+ `modalities: ["audio"]`. [Learn more](/docs/guides/audio).
Fields:
- - voice (required): VoiceIdsShared
+ - voice (required): VoiceIdsOrCustomVoice
- format (required): Literal["wav", "aac", "mp3", "flac", "opus", "pcm16"]
automatic_tool_execution: Execute tools server-side. If false, returns raw tool calls for manual handling.
@@ -201,17 +208,29 @@ def create(
[cached](https://ai.google.dev/gemini-api/docs/caching) to use as context to
serve the prediction. Format: `cachedContents/{cachedContent}`
+ correlation_id: Stable session ID for resuming a previous handoff. Returned by the server on
+ handoff; echo it on the next request to resume.
+
credentials: Credentials for MCP server authentication. Each credential is matched to servers
by connection name.
deferred: If set to `true`, the request returns a `request_id`. You can then get the
deferred response by GET `/v1/chat/deferred-completion/{request_id}`.
+ deferred_calls: Tier 2 stateless resumption. Deferred tool specs from a previous handoff
+ response, sent back verbatim so the server can resume without Redis.
+
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
- function_call: Wrapper for union variant: function call mode.
+ function_call: Deprecated in favor of `tool_choice`. Controls which (if any) function is called
+ by the model. `none` means the model will not call a function and instead
+ generates a message. `auto` means the model can pick between generating a
+ message or calling a function. Specifying a particular function via
+ `{"name": "my_function"}` forces the model to call that function. `none` is the
+ default when no functions are present. `auto` is the default if functions are
+ present.
functions: Deprecated in favor of `tools`. A list of functions the model may generate JSON
inputs for.
@@ -222,6 +241,12 @@ def create(
handoff_config: Configuration for multi-model handoffs.
+ handoff_mode: Handoff control. None or omitted: auto-detect. true: structured handoff (SDK).
+ false: drop-in (LLM re-run for mixed turns).
+
+ inference_geo: Specifies the geographic region for inference processing. If not specified, the
+ workspace's `default_inference_geo` is used.
+
logit_bias: Modify the likelihood of specified tokens appearing in the completion. Accepts a
JSON object that maps tokens (specified by their token ID in the tokenizer) to
an associated bias value from -100 to 100. Mathematically, the bias is added to
@@ -253,8 +278,7 @@ def create(
modalities: Output types that you would like the model to generate. Most models are capable
of generating text, which is the default: `["text"]` The `gpt-4o-audio-preview`
- model can also be used to
- [generate audio](https://platform.openai.com/docs/guides/audio). To request that
+ model can also be used to [generate audio](/docs/guides/audio). To request that
this model generate both text and audio responses, you can use:
`["text", "audio"]`
@@ -265,7 +289,7 @@ def create(
you will be charged based on the number of generated tokens across all of the
choices. Keep `n` as `1` to minimize costs.
- parallel_tool_calls: Whether to enable parallel tool calls (Anthropic uses inverted polarity)
+ parallel_tool_calls: Whether to enable parallel tool calls (Anthropic uses inverted polarity).
prediction: Static predicted output content, such as the content of a text file that is
being regenerated.
@@ -282,35 +306,34 @@ def create(
talk about new topics.
prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
- hit rates. Replaces the `user` field.
- [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
+ hit rates. Replaces the `user` field. [Learn more](/docs/guides/prompt-caching).
prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
prompt caching, which keeps cached prefixes active for longer, up to a maximum
- of 24 hours.
- [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
+ of 24 hours. [Learn more](/docs/guides/prompt-caching#prompt-cache-retention).
prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to
`reasoning` the system prompt for reasoning models will be used.
reasoning_effort: Constrains effort on reasoning for
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
- reasoning effort can result in faster responses and fewer tokens used on
- reasoning in a response. - `gpt-5.1` defaults to `none`, which does not perform
- reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`,
- `medium`, and `high`. Tool calls are supported for all reasoning values in
- gpt-5.1. - All models before `gpt-5.1` default to `medium` reasoning effort, and
- do not support `none`. - The `gpt-5-pro` model defaults to (and only supports)
- `high` reasoning effort.
+ supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
+ Reducing reasoning effort can result in faster responses and fewer tokens used
+ on reasoning in a response. - `gpt-5.1` defaults to `none`, which does not
+ perform reasoning. The supported reasoning values for `gpt-5.1` are `none`,
+ `low`, `medium`, and `high`. Tool calls are supported for all reasoning values
+ in gpt-5.1. - All models before `gpt-5.1` default to `medium` reasoning effort,
+ and do not support `none`. - The `gpt-5-pro` model defaults to (and only
+ supports) `high` reasoning effort. - `xhigh` is supported for all models after
+ `gpt-5.1-codex-max`.
response_format: An object specifying the format that the model must output. Setting to
`{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
which ensures the model will match your supplied JSON schema. Learn more in the
- [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
- Setting to `{ "type": "json_object" }` enables the older JSON mode, which
- ensures the message the model generates is valid JSON. Using `json_schema` is
- preferred for models that support it.
+ [Structured Outputs guide](/docs/guides/structured-outputs). Setting to
+ `{ "type": "json_object" }` enables the older JSON mode, which ensures the
+ message the model generates is valid JSON. Using `json_schema` is preferred for
+ models that support it.
safe_prompt: Whether to inject a safety prompt before all conversations.
@@ -318,7 +341,7 @@ def create(
violating OpenAI's usage policies. The IDs should be a string that uniquely
identifies each user. We recommend hashing their username or email address, in
order to avoid sending us any identifying information.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
+ [Learn more](/docs/guides/safety-best-practices#safety-identifiers).
safety_settings: Safety/content filtering settings (Google-specific)
@@ -329,12 +352,15 @@ def create(
service_tier: Service tier for request processing
+ speed: The inference speed mode for this request. `"fast"` enables high
+ output-tokens-per-second inference.
+
stop: Sequences that stop generation
store: Whether or not to store the output of this chat completion request for use in
- our [model distillation](https://platform.openai.com/docs/guides/distillation)
- or [evals](https://platform.openai.com/docs/guides/evals) products. Supports
- text and image inputs. Note: image inputs over 8MB will be dropped.
+ our [model distillation](/docs/guides/distillation) or
+ [evals](/docs/guides/evals) products. Supports text and image inputs. Note:
+ image inputs over 8MB will be dropped.
stream: Enable streaming response
@@ -370,15 +396,14 @@ def create(
`prompt_cache_key` instead to maintain caching optimizations. A stable
identifier for your end-users. Used to boost cache hit rates by better bucketing
similar requests and to help OpenAI detect and prevent abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
+ [Learn more](/docs/guides/safety-best-practices#safety-identifiers).
verbosity: Constrains the verbosity of the model's response. Lower values will result in
more concise responses, while higher values will result in more verbose
responses. Currently supported values are `low`, `medium`, and `high`.
web_search_options: This tool searches the web for relevant results to use in a response. Learn more
- about the
- [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
+ about the [web search tool](/docs/guides/tools-web-search?api-mode=chat).
extra_headers: Send extra headers
@@ -402,14 +427,18 @@ def create(
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
automatic_tool_execution: bool | Omit = omit,
cached_content: Optional[str] | Omit = omit,
+ correlation_id: Optional[str] | Omit = omit,
credentials: Optional[completion_create_params.Credentials] | Omit = omit,
deferred: Optional[bool] | Omit = omit,
+ deferred_calls: Optional[Iterable[Dict[str, object]]] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: Optional[str] | Omit = omit,
functions: Optional[Iterable[ChatCompletionFunctionsParam]] | Omit = omit,
generation_config: Optional[JSONObjectInput] | Omit = omit,
guardrails: Optional[Iterable[Dict[str, object]]] | Omit = omit,
handoff_config: Optional[Dict[str, object]] | Omit = omit,
+ handoff_mode: Optional[bool] | Omit = omit,
+ inference_geo: Optional[str] | Omit = omit,
logit_bias: Optional[Dict[str, int]] | Omit = omit,
logprobs: Optional[bool] | Omit = omit,
max_completion_tokens: Optional[int] | Omit = omit,
@@ -421,6 +450,7 @@ def create(
modalities: Optional[SequenceNotStr[str]] | Omit = omit,
model_attributes: Optional[Dict[str, Dict[str, float]]] | Omit = omit,
n: Optional[int] | Omit = omit,
+ output_config: Optional[JSONObjectInput] | Omit = omit,
parallel_tool_calls: Optional[bool] | Omit = omit,
prediction: Optional[PredictionContentParam] | Omit = omit,
presence_penalty: Optional[float] | Omit = omit,
@@ -435,6 +465,7 @@ def create(
search_parameters: Optional[JSONObjectInput] | Omit = omit,
seed: Optional[int] | Omit = omit,
service_tier: Optional[str] | Omit = omit,
+ speed: Optional[Literal["standard", "fast"]] | Omit = omit,
stop: Union[SequenceNotStr[str], str, None] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream_options: Optional[JSONObjectInput] | Omit = omit,
@@ -443,7 +474,7 @@ def create(
thinking: Optional[completion_create_params.Thinking] | Omit = omit,
tool_choice: Optional[completion_create_params.ToolChoice] | Omit = omit,
tool_config: Optional[JSONObjectInput] | Omit = omit,
- tools: Optional[Iterable[completion_create_params.Tool]] | Omit = omit,
+ tools: Optional[Iterable[ChatCompletionToolParam]] | Omit = omit,
top_k: Optional[int] | Omit = omit,
top_logprobs: Optional[int] | Omit = omit,
top_p: Optional[float] | Omit = omit,
@@ -467,7 +498,8 @@ def create(
Headers:
- Authorization: bearer key for the calling account.
- - Optional BYOK or provider headers if applicable.
+ - X-Provider / X-Provider-Key: optional headers for using your own provider API
+ key.
Behavior:
@@ -520,12 +552,11 @@ def create(
agent_attributes: Agent attributes. Values in [0.0, 1.0].
audio: Parameters for audio output. Required when audio output is requested with
- `modalities: ["audio"]`.
- [Learn more](https://platform.openai.com/docs/guides/audio).
+ `modalities: ["audio"]`. [Learn more](/docs/guides/audio).
Fields:
- - voice (required): VoiceIdsShared
+ - voice (required): VoiceIdsOrCustomVoice
- format (required): Literal["wav", "aac", "mp3", "flac", "opus", "pcm16"]
automatic_tool_execution: Execute tools server-side. If false, returns raw tool calls for manual handling.
@@ -534,17 +565,29 @@ def create(
[cached](https://ai.google.dev/gemini-api/docs/caching) to use as context to
serve the prediction. Format: `cachedContents/{cachedContent}`
+ correlation_id: Stable session ID for resuming a previous handoff. Returned by the server on
+ handoff; echo it on the next request to resume.
+
credentials: Credentials for MCP server authentication. Each credential is matched to servers
by connection name.
deferred: If set to `true`, the request returns a `request_id`. You can then get the
deferred response by GET `/v1/chat/deferred-completion/{request_id}`.
+ deferred_calls: Tier 2 stateless resumption. Deferred tool specs from a previous handoff
+ response, sent back verbatim so the server can resume without Redis.
+
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
- function_call: Wrapper for union variant: function call mode.
+ function_call: Deprecated in favor of `tool_choice`. Controls which (if any) function is called
+ by the model. `none` means the model will not call a function and instead
+ generates a message. `auto` means the model can pick between generating a
+ message or calling a function. Specifying a particular function via
+ `{"name": "my_function"}` forces the model to call that function. `none` is the
+ default when no functions are present. `auto` is the default if functions are
+ present.
functions: Deprecated in favor of `tools`. A list of functions the model may generate JSON
inputs for.
@@ -555,6 +598,12 @@ def create(
handoff_config: Configuration for multi-model handoffs.
+ handoff_mode: Handoff control. None or omitted: auto-detect. true: structured handoff (SDK).
+ false: drop-in (LLM re-run for mixed turns).
+
+ inference_geo: Specifies the geographic region for inference processing. If not specified, the
+ workspace's `default_inference_geo` is used.
+
logit_bias: Modify the likelihood of specified tokens appearing in the completion. Accepts a
JSON object that maps tokens (specified by their token ID in the tokenizer) to
an associated bias value from -100 to 100. Mathematically, the bias is added to
@@ -586,8 +635,7 @@ def create(
modalities: Output types that you would like the model to generate. Most models are capable
of generating text, which is the default: `["text"]` The `gpt-4o-audio-preview`
- model can also be used to
- [generate audio](https://platform.openai.com/docs/guides/audio). To request that
+ model can also be used to [generate audio](/docs/guides/audio). To request that
this model generate both text and audio responses, you can use:
`["text", "audio"]`
@@ -598,7 +646,7 @@ def create(
you will be charged based on the number of generated tokens across all of the
choices. Keep `n` as `1` to minimize costs.
- parallel_tool_calls: Whether to enable parallel tool calls (Anthropic uses inverted polarity)
+ parallel_tool_calls: Whether to enable parallel tool calls (Anthropic uses inverted polarity).
prediction: Static predicted output content, such as the content of a text file that is
being regenerated.
@@ -615,35 +663,34 @@ def create(
talk about new topics.
prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
- hit rates. Replaces the `user` field.
- [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
+ hit rates. Replaces the `user` field. [Learn more](/docs/guides/prompt-caching).
prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
prompt caching, which keeps cached prefixes active for longer, up to a maximum
- of 24 hours.
- [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
+ of 24 hours. [Learn more](/docs/guides/prompt-caching#prompt-cache-retention).
prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to
`reasoning` the system prompt for reasoning models will be used.
reasoning_effort: Constrains effort on reasoning for
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
- reasoning effort can result in faster responses and fewer tokens used on
- reasoning in a response. - `gpt-5.1` defaults to `none`, which does not perform
- reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`,
- `medium`, and `high`. Tool calls are supported for all reasoning values in
- gpt-5.1. - All models before `gpt-5.1` default to `medium` reasoning effort, and
- do not support `none`. - The `gpt-5-pro` model defaults to (and only supports)
- `high` reasoning effort.
+ supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
+ Reducing reasoning effort can result in faster responses and fewer tokens used
+ on reasoning in a response. - `gpt-5.1` defaults to `none`, which does not
+ perform reasoning. The supported reasoning values for `gpt-5.1` are `none`,
+ `low`, `medium`, and `high`. Tool calls are supported for all reasoning values
+ in gpt-5.1. - All models before `gpt-5.1` default to `medium` reasoning effort,
+ and do not support `none`. - The `gpt-5-pro` model defaults to (and only
+ supports) `high` reasoning effort. - `xhigh` is supported for all models after
+ `gpt-5.1-codex-max`.
response_format: An object specifying the format that the model must output. Setting to
`{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
which ensures the model will match your supplied JSON schema. Learn more in the
- [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
- Setting to `{ "type": "json_object" }` enables the older JSON mode, which
- ensures the message the model generates is valid JSON. Using `json_schema` is
- preferred for models that support it.
+ [Structured Outputs guide](/docs/guides/structured-outputs). Setting to
+ `{ "type": "json_object" }` enables the older JSON mode, which ensures the
+ message the model generates is valid JSON. Using `json_schema` is preferred for
+ models that support it.
safe_prompt: Whether to inject a safety prompt before all conversations.
@@ -651,7 +698,7 @@ def create(
violating OpenAI's usage policies. The IDs should be a string that uniquely
identifies each user. We recommend hashing their username or email address, in
order to avoid sending us any identifying information.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
+ [Learn more](/docs/guides/safety-best-practices#safety-identifiers).
safety_settings: Safety/content filtering settings (Google-specific)
@@ -662,12 +709,15 @@ def create(
service_tier: Service tier for request processing
+ speed: The inference speed mode for this request. `"fast"` enables high
+ output-tokens-per-second inference.
+
stop: Sequences that stop generation
store: Whether or not to store the output of this chat completion request for use in
- our [model distillation](https://platform.openai.com/docs/guides/distillation)
- or [evals](https://platform.openai.com/docs/guides/evals) products. Supports
- text and image inputs. Note: image inputs over 8MB will be dropped.
+ our [model distillation](/docs/guides/distillation) or
+ [evals](/docs/guides/evals) products. Supports text and image inputs. Note:
+ image inputs over 8MB will be dropped.
stream_options: Options for streaming response. Only set this when you set `stream: true`.
@@ -701,15 +751,14 @@ def create(
`prompt_cache_key` instead to maintain caching optimizations. A stable
identifier for your end-users. Used to boost cache hit rates by better bucketing
similar requests and to help OpenAI detect and prevent abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
+ [Learn more](/docs/guides/safety-best-practices#safety-identifiers).
verbosity: Constrains the verbosity of the model's response. Lower values will result in
more concise responses, while higher values will result in more verbose
responses. Currently supported values are `low`, `medium`, and `high`.
web_search_options: This tool searches the web for relevant results to use in a response. Learn more
- about the
- [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
+ about the [web search tool](/docs/guides/tools-web-search?api-mode=chat).
extra_headers: Send extra headers
@@ -733,14 +782,18 @@ def create(
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
automatic_tool_execution: bool | Omit = omit,
cached_content: Optional[str] | Omit = omit,
+ correlation_id: Optional[str] | Omit = omit,
credentials: Optional[completion_create_params.Credentials] | Omit = omit,
deferred: Optional[bool] | Omit = omit,
+ deferred_calls: Optional[Iterable[Dict[str, object]]] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: Optional[str] | Omit = omit,
functions: Optional[Iterable[ChatCompletionFunctionsParam]] | Omit = omit,
generation_config: Optional[JSONObjectInput] | Omit = omit,
guardrails: Optional[Iterable[Dict[str, object]]] | Omit = omit,
handoff_config: Optional[Dict[str, object]] | Omit = omit,
+ handoff_mode: Optional[bool] | Omit = omit,
+ inference_geo: Optional[str] | Omit = omit,
logit_bias: Optional[Dict[str, int]] | Omit = omit,
logprobs: Optional[bool] | Omit = omit,
max_completion_tokens: Optional[int] | Omit = omit,
@@ -752,6 +805,7 @@ def create(
modalities: Optional[SequenceNotStr[str]] | Omit = omit,
model_attributes: Optional[Dict[str, Dict[str, float]]] | Omit = omit,
n: Optional[int] | Omit = omit,
+ output_config: Optional[JSONObjectInput] | Omit = omit,
parallel_tool_calls: Optional[bool] | Omit = omit,
prediction: Optional[PredictionContentParam] | Omit = omit,
presence_penalty: Optional[float] | Omit = omit,
@@ -766,6 +820,7 @@ def create(
search_parameters: Optional[JSONObjectInput] | Omit = omit,
seed: Optional[int] | Omit = omit,
service_tier: Optional[str] | Omit = omit,
+ speed: Optional[Literal["standard", "fast"]] | Omit = omit,
stop: Union[SequenceNotStr[str], str, None] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream_options: Optional[JSONObjectInput] | Omit = omit,
@@ -774,7 +829,7 @@ def create(
thinking: Optional[completion_create_params.Thinking] | Omit = omit,
tool_choice: Optional[completion_create_params.ToolChoice] | Omit = omit,
tool_config: Optional[JSONObjectInput] | Omit = omit,
- tools: Optional[Iterable[completion_create_params.Tool]] | Omit = omit,
+ tools: Optional[Iterable[ChatCompletionToolParam]] | Omit = omit,
top_k: Optional[int] | Omit = omit,
top_logprobs: Optional[int] | Omit = omit,
top_p: Optional[float] | Omit = omit,
@@ -798,7 +853,8 @@ def create(
Headers:
- Authorization: bearer key for the calling account.
- - Optional BYOK or provider headers if applicable.
+ - X-Provider / X-Provider-Key: optional headers for using your own provider API
+ key.
Behavior:
@@ -851,12 +907,11 @@ def create(
agent_attributes: Agent attributes. Values in [0.0, 1.0].
audio: Parameters for audio output. Required when audio output is requested with
- `modalities: ["audio"]`.
- [Learn more](https://platform.openai.com/docs/guides/audio).
+ `modalities: ["audio"]`. [Learn more](/docs/guides/audio).
Fields:
- - voice (required): VoiceIdsShared
+ - voice (required): VoiceIdsOrCustomVoice
- format (required): Literal["wav", "aac", "mp3", "flac", "opus", "pcm16"]
automatic_tool_execution: Execute tools server-side. If false, returns raw tool calls for manual handling.
@@ -865,17 +920,29 @@ def create(
[cached](https://ai.google.dev/gemini-api/docs/caching) to use as context to
serve the prediction. Format: `cachedContents/{cachedContent}`
+ correlation_id: Stable session ID for resuming a previous handoff. Returned by the server on
+ handoff; echo it on the next request to resume.
+
credentials: Credentials for MCP server authentication. Each credential is matched to servers
by connection name.
deferred: If set to `true`, the request returns a `request_id`. You can then get the
deferred response by GET `/v1/chat/deferred-completion/{request_id}`.
+ deferred_calls: Tier 2 stateless resumption. Deferred tool specs from a previous handoff
+ response, sent back verbatim so the server can resume without Redis.
+
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
- function_call: Wrapper for union variant: function call mode.
+ function_call: Deprecated in favor of `tool_choice`. Controls which (if any) function is called
+ by the model. `none` means the model will not call a function and instead
+ generates a message. `auto` means the model can pick between generating a
+ message or calling a function. Specifying a particular function via
+ `{"name": "my_function"}` forces the model to call that function. `none` is the
+ default when no functions are present. `auto` is the default if functions are
+ present.
functions: Deprecated in favor of `tools`. A list of functions the model may generate JSON
inputs for.
@@ -886,6 +953,12 @@ def create(
handoff_config: Configuration for multi-model handoffs.
+ handoff_mode: Handoff control. None or omitted: auto-detect. true: structured handoff (SDK).
+ false: drop-in (LLM re-run for mixed turns).
+
+ inference_geo: Specifies the geographic region for inference processing. If not specified, the
+ workspace's `default_inference_geo` is used.
+
logit_bias: Modify the likelihood of specified tokens appearing in the completion. Accepts a
JSON object that maps tokens (specified by their token ID in the tokenizer) to
an associated bias value from -100 to 100. Mathematically, the bias is added to
@@ -917,8 +990,7 @@ def create(
modalities: Output types that you would like the model to generate. Most models are capable
of generating text, which is the default: `["text"]` The `gpt-4o-audio-preview`
- model can also be used to
- [generate audio](https://platform.openai.com/docs/guides/audio). To request that
+ model can also be used to [generate audio](/docs/guides/audio). To request that
this model generate both text and audio responses, you can use:
`["text", "audio"]`
@@ -929,7 +1001,7 @@ def create(
you will be charged based on the number of generated tokens across all of the
choices. Keep `n` as `1` to minimize costs.
- parallel_tool_calls: Whether to enable parallel tool calls (Anthropic uses inverted polarity)
+ parallel_tool_calls: Whether to enable parallel tool calls (Anthropic uses inverted polarity).
prediction: Static predicted output content, such as the content of a text file that is
being regenerated.
@@ -946,35 +1018,34 @@ def create(
talk about new topics.
prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
- hit rates. Replaces the `user` field.
- [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
+ hit rates. Replaces the `user` field. [Learn more](/docs/guides/prompt-caching).
prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
prompt caching, which keeps cached prefixes active for longer, up to a maximum
- of 24 hours.
- [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
+ of 24 hours. [Learn more](/docs/guides/prompt-caching#prompt-cache-retention).
prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to
`reasoning` the system prompt for reasoning models will be used.
reasoning_effort: Constrains effort on reasoning for
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
- reasoning effort can result in faster responses and fewer tokens used on
- reasoning in a response. - `gpt-5.1` defaults to `none`, which does not perform
- reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`,
- `medium`, and `high`. Tool calls are supported for all reasoning values in
- gpt-5.1. - All models before `gpt-5.1` default to `medium` reasoning effort, and
- do not support `none`. - The `gpt-5-pro` model defaults to (and only supports)
- `high` reasoning effort.
+ supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
+ Reducing reasoning effort can result in faster responses and fewer tokens used
+ on reasoning in a response. - `gpt-5.1` defaults to `none`, which does not
+ perform reasoning. The supported reasoning values for `gpt-5.1` are `none`,
+ `low`, `medium`, and `high`. Tool calls are supported for all reasoning values
+ in gpt-5.1. - All models before `gpt-5.1` default to `medium` reasoning effort,
+ and do not support `none`. - The `gpt-5-pro` model defaults to (and only
+ supports) `high` reasoning effort. - `xhigh` is supported for all models after
+ `gpt-5.1-codex-max`.
response_format: An object specifying the format that the model must output. Setting to
`{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
which ensures the model will match your supplied JSON schema. Learn more in the
- [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
- Setting to `{ "type": "json_object" }` enables the older JSON mode, which
- ensures the message the model generates is valid JSON. Using `json_schema` is
- preferred for models that support it.
+ [Structured Outputs guide](/docs/guides/structured-outputs). Setting to
+ `{ "type": "json_object" }` enables the older JSON mode, which ensures the
+ message the model generates is valid JSON. Using `json_schema` is preferred for
+ models that support it.
safe_prompt: Whether to inject a safety prompt before all conversations.
@@ -982,7 +1053,7 @@ def create(
violating OpenAI's usage policies. The IDs should be a string that uniquely
identifies each user. We recommend hashing their username or email address, in
order to avoid sending us any identifying information.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
+ [Learn more](/docs/guides/safety-best-practices#safety-identifiers).
safety_settings: Safety/content filtering settings (Google-specific)
@@ -993,12 +1064,15 @@ def create(
service_tier: Service tier for request processing
+ speed: The inference speed mode for this request. `"fast"` enables high
+ output-tokens-per-second inference.
+
stop: Sequences that stop generation
store: Whether or not to store the output of this chat completion request for use in
- our [model distillation](https://platform.openai.com/docs/guides/distillation)
- or [evals](https://platform.openai.com/docs/guides/evals) products. Supports
- text and image inputs. Note: image inputs over 8MB will be dropped.
+ our [model distillation](/docs/guides/distillation) or
+ [evals](/docs/guides/evals) products. Supports text and image inputs. Note:
+ image inputs over 8MB will be dropped.
stream_options: Options for streaming response. Only set this when you set `stream: true`.
@@ -1032,15 +1106,14 @@ def create(
`prompt_cache_key` instead to maintain caching optimizations. A stable
identifier for your end-users. Used to boost cache hit rates by better bucketing
similar requests and to help OpenAI detect and prevent abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
+ [Learn more](/docs/guides/safety-best-practices#safety-identifiers).
verbosity: Constrains the verbosity of the model's response. Lower values will result in
more concise responses, while higher values will result in more verbose
responses. Currently supported values are `low`, `medium`, and `high`.
web_search_options: This tool searches the web for relevant results to use in a response. Learn more
- about the
- [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
+ about the [web search tool](/docs/guides/tools-web-search?api-mode=chat).
extra_headers: Send extra headers
@@ -1063,14 +1136,18 @@ def create(
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
automatic_tool_execution: bool | Omit = omit,
cached_content: Optional[str] | Omit = omit,
+ correlation_id: Optional[str] | Omit = omit,
credentials: Optional[completion_create_params.Credentials] | Omit = omit,
deferred: Optional[bool] | Omit = omit,
+ deferred_calls: Optional[Iterable[Dict[str, object]]] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: Optional[str] | Omit = omit,
functions: Optional[Iterable[ChatCompletionFunctionsParam]] | Omit = omit,
generation_config: Optional[JSONObjectInput] | Omit = omit,
guardrails: Optional[Iterable[Dict[str, object]]] | Omit = omit,
handoff_config: Optional[Dict[str, object]] | Omit = omit,
+ handoff_mode: Optional[bool] | Omit = omit,
+ inference_geo: Optional[str] | Omit = omit,
logit_bias: Optional[Dict[str, int]] | Omit = omit,
logprobs: Optional[bool] | Omit = omit,
max_completion_tokens: Optional[int] | Omit = omit,
@@ -1082,6 +1159,7 @@ def create(
modalities: Optional[SequenceNotStr[str]] | Omit = omit,
model_attributes: Optional[Dict[str, Dict[str, float]]] | Omit = omit,
n: Optional[int] | Omit = omit,
+ output_config: Optional[JSONObjectInput] | Omit = omit,
parallel_tool_calls: Optional[bool] | Omit = omit,
prediction: Optional[PredictionContentParam] | Omit = omit,
presence_penalty: Optional[float] | Omit = omit,
@@ -1096,6 +1174,7 @@ def create(
search_parameters: Optional[JSONObjectInput] | Omit = omit,
seed: Optional[int] | Omit = omit,
service_tier: Optional[str] | Omit = omit,
+ speed: Optional[Literal["standard", "fast"]] | Omit = omit,
stop: Union[SequenceNotStr[str], str, None] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
@@ -1105,7 +1184,7 @@ def create(
thinking: Optional[completion_create_params.Thinking] | Omit = omit,
tool_choice: Optional[completion_create_params.ToolChoice] | Omit = omit,
tool_config: Optional[JSONObjectInput] | Omit = omit,
- tools: Optional[Iterable[completion_create_params.Tool]] | Omit = omit,
+ tools: Optional[Iterable[ChatCompletionToolParam]] | Omit = omit,
top_k: Optional[int] | Omit = omit,
top_logprobs: Optional[int] | Omit = omit,
top_p: Optional[float] | Omit = omit,
@@ -1144,14 +1223,18 @@ def create(
"audio": audio,
"automatic_tool_execution": automatic_tool_execution,
"cached_content": cached_content,
+ "correlation_id": correlation_id,
"credentials": credentials,
"deferred": deferred,
+ "deferred_calls": deferred_calls,
"frequency_penalty": frequency_penalty,
"function_call": function_call,
"functions": functions,
"generation_config": generation_config,
"guardrails": guardrails,
"handoff_config": handoff_config,
+ "handoff_mode": handoff_mode,
+ "inference_geo": inference_geo,
"logit_bias": logit_bias,
"logprobs": logprobs,
"max_completion_tokens": max_completion_tokens,
@@ -1163,6 +1246,7 @@ def create(
"modalities": modalities,
"model_attributes": model_attributes,
"n": n,
+ "output_config": output_config,
"parallel_tool_calls": parallel_tool_calls,
"prediction": prediction,
"presence_penalty": presence_penalty,
@@ -1177,6 +1261,7 @@ def create(
"search_parameters": search_parameters,
"seed": seed,
"service_tier": service_tier,
+ "speed": speed,
"stop": stop,
"store": store,
"stream": stream,
@@ -1549,14 +1634,18 @@ async def create(
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
automatic_tool_execution: bool | Omit = omit,
cached_content: Optional[str] | Omit = omit,
+ correlation_id: Optional[str] | Omit = omit,
credentials: Optional[completion_create_params.Credentials] | Omit = omit,
deferred: Optional[bool] | Omit = omit,
+ deferred_calls: Optional[Iterable[Dict[str, object]]] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: Optional[str] | Omit = omit,
functions: Optional[Iterable[ChatCompletionFunctionsParam]] | Omit = omit,
generation_config: Optional[JSONObjectInput] | Omit = omit,
guardrails: Optional[Iterable[Dict[str, object]]] | Omit = omit,
handoff_config: Optional[Dict[str, object]] | Omit = omit,
+ handoff_mode: Optional[bool] | Omit = omit,
+ inference_geo: Optional[str] | Omit = omit,
logit_bias: Optional[Dict[str, int]] | Omit = omit,
logprobs: Optional[bool] | Omit = omit,
max_completion_tokens: Optional[int] | Omit = omit,
@@ -1568,6 +1657,7 @@ async def create(
modalities: Optional[SequenceNotStr[str]] | Omit = omit,
model_attributes: Optional[Dict[str, Dict[str, float]]] | Omit = omit,
n: Optional[int] | Omit = omit,
+ output_config: Optional[JSONObjectInput] | Omit = omit,
parallel_tool_calls: Optional[bool] | Omit = omit,
prediction: Optional[PredictionContentParam] | Omit = omit,
presence_penalty: Optional[float] | Omit = omit,
@@ -1582,6 +1672,7 @@ async def create(
search_parameters: Optional[JSONObjectInput] | Omit = omit,
seed: Optional[int] | Omit = omit,
service_tier: Optional[str] | Omit = omit,
+ speed: Optional[Literal["standard", "fast"]] | Omit = omit,
stop: Union[SequenceNotStr[str], str, None] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream: Optional[Literal[False]] | Omit = omit,
@@ -1591,7 +1682,7 @@ async def create(
thinking: Optional[completion_create_params.Thinking] | Omit = omit,
tool_choice: Optional[completion_create_params.ToolChoice] | Omit = omit,
tool_config: Optional[JSONObjectInput] | Omit = omit,
- tools: Optional[Iterable[completion_create_params.Tool]] | Omit = omit,
+ tools: Optional[Iterable[ChatCompletionToolParam]] | Omit = omit,
top_k: Optional[int] | Omit = omit,
top_logprobs: Optional[int] | Omit = omit,
top_p: Optional[float] | Omit = omit,
@@ -1615,7 +1706,8 @@ async def create(
Headers:
- Authorization: bearer key for the calling account.
- - Optional BYOK or provider headers if applicable.
+ - X-Provider / X-Provider-Key: optional headers for using your own provider API
+ key.
Behavior:
@@ -1666,12 +1758,11 @@ async def create(
agent_attributes: Agent attributes. Values in [0.0, 1.0].
audio: Parameters for audio output. Required when audio output is requested with
- `modalities: ["audio"]`.
- [Learn more](https://platform.openai.com/docs/guides/audio).
+ `modalities: ["audio"]`. [Learn more](/docs/guides/audio).
Fields:
- - voice (required): VoiceIdsShared
+ - voice (required): VoiceIdsOrCustomVoice
- format (required): Literal["wav", "aac", "mp3", "flac", "opus", "pcm16"]
automatic_tool_execution: Execute tools server-side. If false, returns raw tool calls for manual handling.
@@ -1680,17 +1771,29 @@ async def create(
[cached](https://ai.google.dev/gemini-api/docs/caching) to use as context to
serve the prediction. Format: `cachedContents/{cachedContent}`
+ correlation_id: Stable session ID for resuming a previous handoff. Returned by the server on
+ handoff; echo it on the next request to resume.
+
credentials: Credentials for MCP server authentication. Each credential is matched to servers
by connection name.
deferred: If set to `true`, the request returns a `request_id`. You can then get the
deferred response by GET `/v1/chat/deferred-completion/{request_id}`.
+ deferred_calls: Tier 2 stateless resumption. Deferred tool specs from a previous handoff
+ response, sent back verbatim so the server can resume without Redis.
+
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
- function_call: Wrapper for union variant: function call mode.
+ function_call: Deprecated in favor of `tool_choice`. Controls which (if any) function is called
+ by the model. `none` means the model will not call a function and instead
+ generates a message. `auto` means the model can pick between generating a
+ message or calling a function. Specifying a particular function via
+ `{"name": "my_function"}` forces the model to call that function. `none` is the
+ default when no functions are present. `auto` is the default if functions are
+ present.
functions: Deprecated in favor of `tools`. A list of functions the model may generate JSON
inputs for.
@@ -1701,6 +1804,12 @@ async def create(
handoff_config: Configuration for multi-model handoffs.
+ handoff_mode: Handoff control. None or omitted: auto-detect. true: structured handoff (SDK).
+ false: drop-in (LLM re-run for mixed turns).
+
+ inference_geo: Specifies the geographic region for inference processing. If not specified, the
+ workspace's `default_inference_geo` is used.
+
logit_bias: Modify the likelihood of specified tokens appearing in the completion. Accepts a
JSON object that maps tokens (specified by their token ID in the tokenizer) to
an associated bias value from -100 to 100. Mathematically, the bias is added to
@@ -1732,8 +1841,7 @@ async def create(
modalities: Output types that you would like the model to generate. Most models are capable
of generating text, which is the default: `["text"]` The `gpt-4o-audio-preview`
- model can also be used to
- [generate audio](https://platform.openai.com/docs/guides/audio). To request that
+ model can also be used to [generate audio](/docs/guides/audio). To request that
this model generate both text and audio responses, you can use:
`["text", "audio"]`
@@ -1744,7 +1852,7 @@ async def create(
you will be charged based on the number of generated tokens across all of the
choices. Keep `n` as `1` to minimize costs.
- parallel_tool_calls: Whether to enable parallel tool calls (Anthropic uses inverted polarity)
+ parallel_tool_calls: Whether to enable parallel tool calls (Anthropic uses inverted polarity).
prediction: Static predicted output content, such as the content of a text file that is
being regenerated.
@@ -1761,35 +1869,34 @@ async def create(
talk about new topics.
prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
- hit rates. Replaces the `user` field.
- [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
+ hit rates. Replaces the `user` field. [Learn more](/docs/guides/prompt-caching).
prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
prompt caching, which keeps cached prefixes active for longer, up to a maximum
- of 24 hours.
- [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
+ of 24 hours. [Learn more](/docs/guides/prompt-caching#prompt-cache-retention).
prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to
`reasoning` the system prompt for reasoning models will be used.
reasoning_effort: Constrains effort on reasoning for
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
- reasoning effort can result in faster responses and fewer tokens used on
- reasoning in a response. - `gpt-5.1` defaults to `none`, which does not perform
- reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`,
- `medium`, and `high`. Tool calls are supported for all reasoning values in
- gpt-5.1. - All models before `gpt-5.1` default to `medium` reasoning effort, and
- do not support `none`. - The `gpt-5-pro` model defaults to (and only supports)
- `high` reasoning effort.
+ supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
+ Reducing reasoning effort can result in faster responses and fewer tokens used
+ on reasoning in a response. - `gpt-5.1` defaults to `none`, which does not
+ perform reasoning. The supported reasoning values for `gpt-5.1` are `none`,
+ `low`, `medium`, and `high`. Tool calls are supported for all reasoning values
+ in gpt-5.1. - All models before `gpt-5.1` default to `medium` reasoning effort,
+ and do not support `none`. - The `gpt-5-pro` model defaults to (and only
+ supports) `high` reasoning effort. - `xhigh` is supported for all models after
+ `gpt-5.1-codex-max`.
response_format: An object specifying the format that the model must output. Setting to
`{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
which ensures the model will match your supplied JSON schema. Learn more in the
- [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
- Setting to `{ "type": "json_object" }` enables the older JSON mode, which
- ensures the message the model generates is valid JSON. Using `json_schema` is
- preferred for models that support it.
+ [Structured Outputs guide](/docs/guides/structured-outputs). Setting to
+ `{ "type": "json_object" }` enables the older JSON mode, which ensures the
+ message the model generates is valid JSON. Using `json_schema` is preferred for
+ models that support it.
safe_prompt: Whether to inject a safety prompt before all conversations.
@@ -1797,7 +1904,7 @@ async def create(
violating OpenAI's usage policies. The IDs should be a string that uniquely
identifies each user. We recommend hashing their username or email address, in
order to avoid sending us any identifying information.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
+ [Learn more](/docs/guides/safety-best-practices#safety-identifiers).
safety_settings: Safety/content filtering settings (Google-specific)
@@ -1808,12 +1915,15 @@ async def create(
service_tier: Service tier for request processing
+ speed: The inference speed mode for this request. `"fast"` enables high
+ output-tokens-per-second inference.
+
stop: Sequences that stop generation
store: Whether or not to store the output of this chat completion request for use in
- our [model distillation](https://platform.openai.com/docs/guides/distillation)
- or [evals](https://platform.openai.com/docs/guides/evals) products. Supports
- text and image inputs. Note: image inputs over 8MB will be dropped.
+ our [model distillation](/docs/guides/distillation) or
+ [evals](/docs/guides/evals) products. Supports text and image inputs. Note:
+ image inputs over 8MB will be dropped.
stream: Enable streaming response
@@ -1849,15 +1959,14 @@ async def create(
`prompt_cache_key` instead to maintain caching optimizations. A stable
identifier for your end-users. Used to boost cache hit rates by better bucketing
similar requests and to help OpenAI detect and prevent abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
+ [Learn more](/docs/guides/safety-best-practices#safety-identifiers).
verbosity: Constrains the verbosity of the model's response. Lower values will result in
more concise responses, while higher values will result in more verbose
responses. Currently supported values are `low`, `medium`, and `high`.
web_search_options: This tool searches the web for relevant results to use in a response. Learn more
- about the
- [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
+ about the [web search tool](/docs/guides/tools-web-search?api-mode=chat).
extra_headers: Send extra headers
@@ -1881,14 +1990,18 @@ async def create(
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
automatic_tool_execution: bool | Omit = omit,
cached_content: Optional[str] | Omit = omit,
+ correlation_id: Optional[str] | Omit = omit,
credentials: Optional[completion_create_params.Credentials] | Omit = omit,
deferred: Optional[bool] | Omit = omit,
+ deferred_calls: Optional[Iterable[Dict[str, object]]] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: Optional[str] | Omit = omit,
functions: Optional[Iterable[ChatCompletionFunctionsParam]] | Omit = omit,
generation_config: Optional[JSONObjectInput] | Omit = omit,
guardrails: Optional[Iterable[Dict[str, object]]] | Omit = omit,
handoff_config: Optional[Dict[str, object]] | Omit = omit,
+ handoff_mode: Optional[bool] | Omit = omit,
+ inference_geo: Optional[str] | Omit = omit,
logit_bias: Optional[Dict[str, int]] | Omit = omit,
logprobs: Optional[bool] | Omit = omit,
max_completion_tokens: Optional[int] | Omit = omit,
@@ -1900,6 +2013,7 @@ async def create(
modalities: Optional[SequenceNotStr[str]] | Omit = omit,
model_attributes: Optional[Dict[str, Dict[str, float]]] | Omit = omit,
n: Optional[int] | Omit = omit,
+ output_config: Optional[JSONObjectInput] | Omit = omit,
parallel_tool_calls: Optional[bool] | Omit = omit,
prediction: Optional[PredictionContentParam] | Omit = omit,
presence_penalty: Optional[float] | Omit = omit,
@@ -1914,6 +2028,7 @@ async def create(
search_parameters: Optional[JSONObjectInput] | Omit = omit,
seed: Optional[int] | Omit = omit,
service_tier: Optional[str] | Omit = omit,
+ speed: Optional[Literal["standard", "fast"]] | Omit = omit,
stop: Union[SequenceNotStr[str], str, None] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream_options: Optional[JSONObjectInput] | Omit = omit,
@@ -1922,7 +2037,7 @@ async def create(
thinking: Optional[completion_create_params.Thinking] | Omit = omit,
tool_choice: Optional[completion_create_params.ToolChoice] | Omit = omit,
tool_config: Optional[JSONObjectInput] | Omit = omit,
- tools: Optional[Iterable[completion_create_params.Tool]] | Omit = omit,
+ tools: Optional[Iterable[ChatCompletionToolParam]] | Omit = omit,
top_k: Optional[int] | Omit = omit,
top_logprobs: Optional[int] | Omit = omit,
top_p: Optional[float] | Omit = omit,
@@ -1946,7 +2061,8 @@ async def create(
Headers:
- Authorization: bearer key for the calling account.
- - Optional BYOK or provider headers if applicable.
+ - X-Provider / X-Provider-Key: optional headers for using your own provider API
+ key.
Behavior:
@@ -1999,12 +2115,11 @@ async def create(
agent_attributes: Agent attributes. Values in [0.0, 1.0].
audio: Parameters for audio output. Required when audio output is requested with
- `modalities: ["audio"]`.
- [Learn more](https://platform.openai.com/docs/guides/audio).
+ `modalities: ["audio"]`. [Learn more](/docs/guides/audio).
Fields:
- - voice (required): VoiceIdsShared
+ - voice (required): VoiceIdsOrCustomVoice
- format (required): Literal["wav", "aac", "mp3", "flac", "opus", "pcm16"]
automatic_tool_execution: Execute tools server-side. If false, returns raw tool calls for manual handling.
@@ -2013,17 +2128,29 @@ async def create(
[cached](https://ai.google.dev/gemini-api/docs/caching) to use as context to
serve the prediction. Format: `cachedContents/{cachedContent}`
+ correlation_id: Stable session ID for resuming a previous handoff. Returned by the server on
+ handoff; echo it on the next request to resume.
+
credentials: Credentials for MCP server authentication. Each credential is matched to servers
by connection name.
deferred: If set to `true`, the request returns a `request_id`. You can then get the
deferred response by GET `/v1/chat/deferred-completion/{request_id}`.
+ deferred_calls: Tier 2 stateless resumption. Deferred tool specs from a previous handoff
+ response, sent back verbatim so the server can resume without Redis.
+
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
- function_call: Wrapper for union variant: function call mode.
+ function_call: Deprecated in favor of `tool_choice`. Controls which (if any) function is called
+ by the model. `none` means the model will not call a function and instead
+ generates a message. `auto` means the model can pick between generating a
+ message or calling a function. Specifying a particular function via
+ `{"name": "my_function"}` forces the model to call that function. `none` is the
+ default when no functions are present. `auto` is the default if functions are
+ present.
functions: Deprecated in favor of `tools`. A list of functions the model may generate JSON
inputs for.
@@ -2034,6 +2161,12 @@ async def create(
handoff_config: Configuration for multi-model handoffs.
+ handoff_mode: Handoff control. None or omitted: auto-detect. true: structured handoff (SDK).
+ false: drop-in (LLM re-run for mixed turns).
+
+ inference_geo: Specifies the geographic region for inference processing. If not specified, the
+ workspace's `default_inference_geo` is used.
+
logit_bias: Modify the likelihood of specified tokens appearing in the completion. Accepts a
JSON object that maps tokens (specified by their token ID in the tokenizer) to
an associated bias value from -100 to 100. Mathematically, the bias is added to
@@ -2065,8 +2198,7 @@ async def create(
modalities: Output types that you would like the model to generate. Most models are capable
of generating text, which is the default: `["text"]` The `gpt-4o-audio-preview`
- model can also be used to
- [generate audio](https://platform.openai.com/docs/guides/audio). To request that
+ model can also be used to [generate audio](/docs/guides/audio). To request that
this model generate both text and audio responses, you can use:
`["text", "audio"]`
@@ -2077,7 +2209,7 @@ async def create(
you will be charged based on the number of generated tokens across all of the
choices. Keep `n` as `1` to minimize costs.
- parallel_tool_calls: Whether to enable parallel tool calls (Anthropic uses inverted polarity)
+ parallel_tool_calls: Whether to enable parallel tool calls (Anthropic uses inverted polarity).
prediction: Static predicted output content, such as the content of a text file that is
being regenerated.
@@ -2094,35 +2226,34 @@ async def create(
talk about new topics.
prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
- hit rates. Replaces the `user` field.
- [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
+ hit rates. Replaces the `user` field. [Learn more](/docs/guides/prompt-caching).
prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
prompt caching, which keeps cached prefixes active for longer, up to a maximum
- of 24 hours.
- [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
+ of 24 hours. [Learn more](/docs/guides/prompt-caching#prompt-cache-retention).
prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to
`reasoning` the system prompt for reasoning models will be used.
reasoning_effort: Constrains effort on reasoning for
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
- reasoning effort can result in faster responses and fewer tokens used on
- reasoning in a response. - `gpt-5.1` defaults to `none`, which does not perform
- reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`,
- `medium`, and `high`. Tool calls are supported for all reasoning values in
- gpt-5.1. - All models before `gpt-5.1` default to `medium` reasoning effort, and
- do not support `none`. - The `gpt-5-pro` model defaults to (and only supports)
- `high` reasoning effort.
+ supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
+ Reducing reasoning effort can result in faster responses and fewer tokens used
+ on reasoning in a response. - `gpt-5.1` defaults to `none`, which does not
+ perform reasoning. The supported reasoning values for `gpt-5.1` are `none`,
+ `low`, `medium`, and `high`. Tool calls are supported for all reasoning values
+ in gpt-5.1. - All models before `gpt-5.1` default to `medium` reasoning effort,
+ and do not support `none`. - The `gpt-5-pro` model defaults to (and only
+ supports) `high` reasoning effort. - `xhigh` is supported for all models after
+ `gpt-5.1-codex-max`.
response_format: An object specifying the format that the model must output. Setting to
`{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
which ensures the model will match your supplied JSON schema. Learn more in the
- [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
- Setting to `{ "type": "json_object" }` enables the older JSON mode, which
- ensures the message the model generates is valid JSON. Using `json_schema` is
- preferred for models that support it.
+ [Structured Outputs guide](/docs/guides/structured-outputs). Setting to
+ `{ "type": "json_object" }` enables the older JSON mode, which ensures the
+ message the model generates is valid JSON. Using `json_schema` is preferred for
+ models that support it.
safe_prompt: Whether to inject a safety prompt before all conversations.
@@ -2130,7 +2261,7 @@ async def create(
violating OpenAI's usage policies. The IDs should be a string that uniquely
identifies each user. We recommend hashing their username or email address, in
order to avoid sending us any identifying information.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
+ [Learn more](/docs/guides/safety-best-practices#safety-identifiers).
safety_settings: Safety/content filtering settings (Google-specific)
@@ -2141,12 +2272,15 @@ async def create(
service_tier: Service tier for request processing
+ speed: The inference speed mode for this request. `"fast"` enables high
+ output-tokens-per-second inference.
+
stop: Sequences that stop generation
store: Whether or not to store the output of this chat completion request for use in
- our [model distillation](https://platform.openai.com/docs/guides/distillation)
- or [evals](https://platform.openai.com/docs/guides/evals) products. Supports
- text and image inputs. Note: image inputs over 8MB will be dropped.
+ our [model distillation](/docs/guides/distillation) or
+ [evals](/docs/guides/evals) products. Supports text and image inputs. Note:
+ image inputs over 8MB will be dropped.
stream_options: Options for streaming response. Only set this when you set `stream: true`.
@@ -2180,15 +2314,14 @@ async def create(
`prompt_cache_key` instead to maintain caching optimizations. A stable
identifier for your end-users. Used to boost cache hit rates by better bucketing
similar requests and to help OpenAI detect and prevent abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
+ [Learn more](/docs/guides/safety-best-practices#safety-identifiers).
verbosity: Constrains the verbosity of the model's response. Lower values will result in
more concise responses, while higher values will result in more verbose
responses. Currently supported values are `low`, `medium`, and `high`.
web_search_options: This tool searches the web for relevant results to use in a response. Learn more
- about the
- [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
+ about the [web search tool](/docs/guides/tools-web-search?api-mode=chat).
extra_headers: Send extra headers
@@ -2212,14 +2345,18 @@ async def create(
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
automatic_tool_execution: bool | Omit = omit,
cached_content: Optional[str] | Omit = omit,
+ correlation_id: Optional[str] | Omit = omit,
credentials: Optional[completion_create_params.Credentials] | Omit = omit,
deferred: Optional[bool] | Omit = omit,
+ deferred_calls: Optional[Iterable[Dict[str, object]]] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: Optional[str] | Omit = omit,
functions: Optional[Iterable[ChatCompletionFunctionsParam]] | Omit = omit,
generation_config: Optional[JSONObjectInput] | Omit = omit,
guardrails: Optional[Iterable[Dict[str, object]]] | Omit = omit,
handoff_config: Optional[Dict[str, object]] | Omit = omit,
+ handoff_mode: Optional[bool] | Omit = omit,
+ inference_geo: Optional[str] | Omit = omit,
logit_bias: Optional[Dict[str, int]] | Omit = omit,
logprobs: Optional[bool] | Omit = omit,
max_completion_tokens: Optional[int] | Omit = omit,
@@ -2231,6 +2368,7 @@ async def create(
modalities: Optional[SequenceNotStr[str]] | Omit = omit,
model_attributes: Optional[Dict[str, Dict[str, float]]] | Omit = omit,
n: Optional[int] | Omit = omit,
+ output_config: Optional[JSONObjectInput] | Omit = omit,
parallel_tool_calls: Optional[bool] | Omit = omit,
prediction: Optional[PredictionContentParam] | Omit = omit,
presence_penalty: Optional[float] | Omit = omit,
@@ -2245,6 +2383,7 @@ async def create(
search_parameters: Optional[JSONObjectInput] | Omit = omit,
seed: Optional[int] | Omit = omit,
service_tier: Optional[str] | Omit = omit,
+ speed: Optional[Literal["standard", "fast"]] | Omit = omit,
stop: Union[SequenceNotStr[str], str, None] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream_options: Optional[JSONObjectInput] | Omit = omit,
@@ -2253,7 +2392,7 @@ async def create(
thinking: Optional[completion_create_params.Thinking] | Omit = omit,
tool_choice: Optional[completion_create_params.ToolChoice] | Omit = omit,
tool_config: Optional[JSONObjectInput] | Omit = omit,
- tools: Optional[Iterable[completion_create_params.Tool]] | Omit = omit,
+ tools: Optional[Iterable[ChatCompletionToolParam]] | Omit = omit,
top_k: Optional[int] | Omit = omit,
top_logprobs: Optional[int] | Omit = omit,
top_p: Optional[float] | Omit = omit,
@@ -2277,7 +2416,8 @@ async def create(
Headers:
- Authorization: bearer key for the calling account.
- - Optional BYOK or provider headers if applicable.
+ - X-Provider / X-Provider-Key: optional headers for using your own provider API
+ key.
Behavior:
@@ -2330,12 +2470,11 @@ async def create(
agent_attributes: Agent attributes. Values in [0.0, 1.0].
audio: Parameters for audio output. Required when audio output is requested with
- `modalities: ["audio"]`.
- [Learn more](https://platform.openai.com/docs/guides/audio).
+ `modalities: ["audio"]`. [Learn more](/docs/guides/audio).
Fields:
- - voice (required): VoiceIdsShared
+ - voice (required): VoiceIdsOrCustomVoice
- format (required): Literal["wav", "aac", "mp3", "flac", "opus", "pcm16"]
automatic_tool_execution: Execute tools server-side. If false, returns raw tool calls for manual handling.
@@ -2344,17 +2483,29 @@ async def create(
[cached](https://ai.google.dev/gemini-api/docs/caching) to use as context to
serve the prediction. Format: `cachedContents/{cachedContent}`
+ correlation_id: Stable session ID for resuming a previous handoff. Returned by the server on
+ handoff; echo it on the next request to resume.
+
credentials: Credentials for MCP server authentication. Each credential is matched to servers
by connection name.
deferred: If set to `true`, the request returns a `request_id`. You can then get the
deferred response by GET `/v1/chat/deferred-completion/{request_id}`.
+ deferred_calls: Tier 2 stateless resumption. Deferred tool specs from a previous handoff
+ response, sent back verbatim so the server can resume without Redis.
+
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
- function_call: Wrapper for union variant: function call mode.
+ function_call: Deprecated in favor of `tool_choice`. Controls which (if any) function is called
+ by the model. `none` means the model will not call a function and instead
+ generates a message. `auto` means the model can pick between generating a
+ message or calling a function. Specifying a particular function via
+ `{"name": "my_function"}` forces the model to call that function. `none` is the
+ default when no functions are present. `auto` is the default if functions are
+ present.
functions: Deprecated in favor of `tools`. A list of functions the model may generate JSON
inputs for.
@@ -2365,6 +2516,12 @@ async def create(
handoff_config: Configuration for multi-model handoffs.
+ handoff_mode: Handoff control. None or omitted: auto-detect. true: structured handoff (SDK).
+ false: drop-in (LLM re-run for mixed turns).
+
+ inference_geo: Specifies the geographic region for inference processing. If not specified, the
+ workspace's `default_inference_geo` is used.
+
logit_bias: Modify the likelihood of specified tokens appearing in the completion. Accepts a
JSON object that maps tokens (specified by their token ID in the tokenizer) to
an associated bias value from -100 to 100. Mathematically, the bias is added to
@@ -2396,8 +2553,7 @@ async def create(
modalities: Output types that you would like the model to generate. Most models are capable
of generating text, which is the default: `["text"]` The `gpt-4o-audio-preview`
- model can also be used to
- [generate audio](https://platform.openai.com/docs/guides/audio). To request that
+ model can also be used to [generate audio](/docs/guides/audio). To request that
this model generate both text and audio responses, you can use:
`["text", "audio"]`
@@ -2408,7 +2564,7 @@ async def create(
you will be charged based on the number of generated tokens across all of the
choices. Keep `n` as `1` to minimize costs.
- parallel_tool_calls: Whether to enable parallel tool calls (Anthropic uses inverted polarity)
+ parallel_tool_calls: Whether to enable parallel tool calls (Anthropic uses inverted polarity).
prediction: Static predicted output content, such as the content of a text file that is
being regenerated.
@@ -2425,35 +2581,34 @@ async def create(
talk about new topics.
prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
- hit rates. Replaces the `user` field.
- [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
+ hit rates. Replaces the `user` field. [Learn more](/docs/guides/prompt-caching).
prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
prompt caching, which keeps cached prefixes active for longer, up to a maximum
- of 24 hours.
- [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
+ of 24 hours. [Learn more](/docs/guides/prompt-caching#prompt-cache-retention).
prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to
`reasoning` the system prompt for reasoning models will be used.
reasoning_effort: Constrains effort on reasoning for
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
- reasoning effort can result in faster responses and fewer tokens used on
- reasoning in a response. - `gpt-5.1` defaults to `none`, which does not perform
- reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`,
- `medium`, and `high`. Tool calls are supported for all reasoning values in
- gpt-5.1. - All models before `gpt-5.1` default to `medium` reasoning effort, and
- do not support `none`. - The `gpt-5-pro` model defaults to (and only supports)
- `high` reasoning effort.
+ supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
+ Reducing reasoning effort can result in faster responses and fewer tokens used
+ on reasoning in a response. - `gpt-5.1` defaults to `none`, which does not
+ perform reasoning. The supported reasoning values for `gpt-5.1` are `none`,
+ `low`, `medium`, and `high`. Tool calls are supported for all reasoning values
+ in gpt-5.1. - All models before `gpt-5.1` default to `medium` reasoning effort,
+ and do not support `none`. - The `gpt-5-pro` model defaults to (and only
+ supports) `high` reasoning effort. - `xhigh` is supported for all models after
+ `gpt-5.1-codex-max`.
response_format: An object specifying the format that the model must output. Setting to
`{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
which ensures the model will match your supplied JSON schema. Learn more in the
- [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
- Setting to `{ "type": "json_object" }` enables the older JSON mode, which
- ensures the message the model generates is valid JSON. Using `json_schema` is
- preferred for models that support it.
+ [Structured Outputs guide](/docs/guides/structured-outputs). Setting to
+ `{ "type": "json_object" }` enables the older JSON mode, which ensures the
+ message the model generates is valid JSON. Using `json_schema` is preferred for
+ models that support it.
safe_prompt: Whether to inject a safety prompt before all conversations.
@@ -2461,7 +2616,7 @@ async def create(
violating OpenAI's usage policies. The IDs should be a string that uniquely
identifies each user. We recommend hashing their username or email address, in
order to avoid sending us any identifying information.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
+ [Learn more](/docs/guides/safety-best-practices#safety-identifiers).
safety_settings: Safety/content filtering settings (Google-specific)
@@ -2472,12 +2627,15 @@ async def create(
service_tier: Service tier for request processing
+ speed: The inference speed mode for this request. `"fast"` enables high
+ output-tokens-per-second inference.
+
stop: Sequences that stop generation
store: Whether or not to store the output of this chat completion request for use in
- our [model distillation](https://platform.openai.com/docs/guides/distillation)
- or [evals](https://platform.openai.com/docs/guides/evals) products. Supports
- text and image inputs. Note: image inputs over 8MB will be dropped.
+ our [model distillation](/docs/guides/distillation) or
+ [evals](/docs/guides/evals) products. Supports text and image inputs. Note:
+ image inputs over 8MB will be dropped.
stream_options: Options for streaming response. Only set this when you set `stream: true`.
@@ -2511,15 +2669,14 @@ async def create(
`prompt_cache_key` instead to maintain caching optimizations. A stable
identifier for your end-users. Used to boost cache hit rates by better bucketing
similar requests and to help OpenAI detect and prevent abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
+ [Learn more](/docs/guides/safety-best-practices#safety-identifiers).
verbosity: Constrains the verbosity of the model's response. Lower values will result in
more concise responses, while higher values will result in more verbose
responses. Currently supported values are `low`, `medium`, and `high`.
web_search_options: This tool searches the web for relevant results to use in a response. Learn more
- about the
- [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
+ about the [web search tool](/docs/guides/tools-web-search?api-mode=chat).
extra_headers: Send extra headers
@@ -2542,14 +2699,18 @@ async def create(
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
automatic_tool_execution: bool | Omit = omit,
cached_content: Optional[str] | Omit = omit,
+ correlation_id: Optional[str] | Omit = omit,
credentials: Optional[completion_create_params.Credentials] | Omit = omit,
deferred: Optional[bool] | Omit = omit,
+ deferred_calls: Optional[Iterable[Dict[str, object]]] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: Optional[str] | Omit = omit,
functions: Optional[Iterable[ChatCompletionFunctionsParam]] | Omit = omit,
generation_config: Optional[JSONObjectInput] | Omit = omit,
guardrails: Optional[Iterable[Dict[str, object]]] | Omit = omit,
handoff_config: Optional[Dict[str, object]] | Omit = omit,
+ handoff_mode: Optional[bool] | Omit = omit,
+ inference_geo: Optional[str] | Omit = omit,
logit_bias: Optional[Dict[str, int]] | Omit = omit,
logprobs: Optional[bool] | Omit = omit,
max_completion_tokens: Optional[int] | Omit = omit,
@@ -2561,6 +2722,7 @@ async def create(
modalities: Optional[SequenceNotStr[str]] | Omit = omit,
model_attributes: Optional[Dict[str, Dict[str, float]]] | Omit = omit,
n: Optional[int] | Omit = omit,
+ output_config: Optional[JSONObjectInput] | Omit = omit,
parallel_tool_calls: Optional[bool] | Omit = omit,
prediction: Optional[PredictionContentParam] | Omit = omit,
presence_penalty: Optional[float] | Omit = omit,
@@ -2575,6 +2737,7 @@ async def create(
search_parameters: Optional[JSONObjectInput] | Omit = omit,
seed: Optional[int] | Omit = omit,
service_tier: Optional[str] | Omit = omit,
+ speed: Optional[Literal["standard", "fast"]] | Omit = omit,
stop: Union[SequenceNotStr[str], str, None] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
@@ -2584,7 +2747,7 @@ async def create(
thinking: Optional[completion_create_params.Thinking] | Omit = omit,
tool_choice: Optional[completion_create_params.ToolChoice] | Omit = omit,
tool_config: Optional[JSONObjectInput] | Omit = omit,
- tools: Optional[Iterable[completion_create_params.Tool]] | Omit = omit,
+ tools: Optional[Iterable[ChatCompletionToolParam]] | Omit = omit,
top_k: Optional[int] | Omit = omit,
top_logprobs: Optional[int] | Omit = omit,
top_p: Optional[float] | Omit = omit,
@@ -2623,14 +2786,18 @@ async def create(
"audio": audio,
"automatic_tool_execution": automatic_tool_execution,
"cached_content": cached_content,
+ "correlation_id": correlation_id,
"credentials": credentials,
"deferred": deferred,
+ "deferred_calls": deferred_calls,
"frequency_penalty": frequency_penalty,
"function_call": function_call,
"functions": functions,
"generation_config": generation_config,
"guardrails": guardrails,
"handoff_config": handoff_config,
+ "handoff_mode": handoff_mode,
+ "inference_geo": inference_geo,
"logit_bias": logit_bias,
"logprobs": logprobs,
"max_completion_tokens": max_completion_tokens,
@@ -2642,6 +2809,7 @@ async def create(
"modalities": modalities,
"model_attributes": model_attributes,
"n": n,
+ "output_config": output_config,
"parallel_tool_calls": parallel_tool_calls,
"prediction": prediction,
"presence_penalty": presence_penalty,
@@ -2656,6 +2824,7 @@ async def create(
"search_parameters": search_parameters,
"seed": seed,
"service_tier": service_tier,
+ "speed": speed,
"stop": stop,
"store": store,
"stream": stream,
diff --git a/src/dedalus_labs/resources/embeddings.py b/src/dedalus_labs/resources/embeddings.py
index a0e7ac2..8ce6dc3 100644
--- a/src/dedalus_labs/resources/embeddings.py
+++ b/src/dedalus_labs/resources/embeddings.py
@@ -75,10 +75,8 @@ def create(
request.
model: ID of the model to use. You can use the
- [List models](https://platform.openai.com/docs/api-reference/models/list) API to
- see all of your available models, or see our
- [Model overview](https://platform.openai.com/docs/models) for descriptions of
- them.
+ [List models](/docs/api-reference/models/list) API to see all of your available
+ models, or see our [Model overview](/docs/models) for descriptions of them.
dimensions: The number of dimensions the resulting output embeddings should have. Only
supported in `text-embedding-3` and later models.
@@ -87,8 +85,7 @@ def create(
[`base64`](https://pypi.org/project/pybase64/).
user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+ and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
@@ -174,10 +171,8 @@ async def create(
request.
model: ID of the model to use. You can use the
- [List models](https://platform.openai.com/docs/api-reference/models/list) API to
- see all of your available models, or see our
- [Model overview](https://platform.openai.com/docs/models) for descriptions of
- them.
+ [List models](/docs/api-reference/models/list) API to see all of your available
+ models, or see our [Model overview](/docs/models) for descriptions of them.
dimensions: The number of dimensions the resulting output embeddings should have. Only
supported in `text-embedding-3` and later models.
@@ -186,8 +181,7 @@ async def create(
[`base64`](https://pypi.org/project/pybase64/).
user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+ and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
diff --git a/src/dedalus_labs/resources/responses.py b/src/dedalus_labs/resources/responses.py
new file mode 100644
index 0000000..fda3962
--- /dev/null
+++ b/src/dedalus_labs/resources/responses.py
@@ -0,0 +1,675 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Union, Iterable, Optional
+from typing_extensions import Literal
+
+import httpx
+
+from ..types import response_create_params
+from .._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
+from .._utils import maybe_transform, async_maybe_transform
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from .._base_client import make_request_options
+from ..types.response import Response
+from ..types.shared_params.json_object_input import JSONObjectInput
+
+__all__ = ["ResponsesResource", "AsyncResponsesResource"]
+
+
+class ResponsesResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> ResponsesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/dedalus-labs/dedalus-sdk-python#accessing-raw-response-data-eg-headers
+ """
+ return ResponsesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ResponsesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/dedalus-labs/dedalus-sdk-python#with_streaming_response
+ """
+ return ResponsesResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ background: Optional[bool] | Omit = omit,
+ conversation: Optional[response_create_params.Conversation] | Omit = omit,
+ credentials: Optional[response_create_params.Credentials] | Omit = omit,
+ frequency_penalty: Optional[float] | Omit = omit,
+ include: Optional[SequenceNotStr[str]] | Omit = omit,
+ input: Union[str, Iterable[JSONObjectInput], None] | Omit = omit,
+ instructions: Union[str, Iterable[JSONObjectInput], None] | Omit = omit,
+ max_output_tokens: Optional[int] | Omit = omit,
+ max_tool_calls: Optional[int] | Omit = omit,
+ mcp_servers: Optional[response_create_params.MCPServers] | Omit = omit,
+ metadata: Optional[Dict[str, str]] | Omit = omit,
+ model: Optional[response_create_params.Model] | Omit = omit,
+ parallel_tool_calls: Optional[bool] | Omit = omit,
+ presence_penalty: Optional[float] | Omit = omit,
+ previous_response_id: Optional[str] | Omit = omit,
+ prompt: Optional[response_create_params.Prompt] | Omit = omit,
+ prompt_cache_key: Optional[str] | Omit = omit,
+ reasoning: Optional[JSONObjectInput] | Omit = omit,
+ safety_identifier: Optional[str] | Omit = omit,
+ service_tier: Optional[Literal["auto", "default"]] | Omit = omit,
+ store: Optional[bool] | Omit = omit,
+ stream: bool | Omit = omit,
+ stream_options: Optional[JSONObjectInput] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ text: Optional[JSONObjectInput] | Omit = omit,
+ tool_choice: Union[str, JSONObjectInput, None] | Omit = omit,
+ tools: Optional[Iterable[JSONObjectInput]] | Omit = omit,
+ top_logprobs: Optional[int] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ truncation: Optional[Literal["auto", "disabled"]] | Omit = omit,
+ user: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ idempotency_key: str | None = None,
+ ) -> Response:
+ """
+ Create a response using the OpenAI Responses API.
+
+ This endpoint routes directly to OpenAI's Responses API. Only OpenAI models are
+ supported.
+
+ Args:
+ background: Whether to run the model response in the background.
+ [Learn more](https://platform.openai.com/docs/guides/background).
+
+ conversation: Conversation that this response belongs to. Items from this conversation are
+ prepended to the input items, and output items from this response are
+ automatically added after completion.
+
+ credentials: Credentials for MCP server authentication. Each credential is matched to servers
+ by connection name.
+
+ frequency_penalty: Penalizes new tokens based on their frequency in the text so far.
+
+ include: Specify additional output data to include in the model response. Currently
+ supported values are:
+
+ - `web_search_call.action.sources`: Include the sources of the web search tool
+ call.
+ - `code_interpreter_call.outputs`: Includes the outputs of python code execution
+ in code interpreter tool call items.
+ - `computer_call_output.output.image_url`: Include image urls from the computer
+ call output.
+ - `file_search_call.results`: Include the search results of the file search tool
+ call.
+ - `message.input_image.image_url`: Include image urls from the input message.
+ - `message.output_text.logprobs`: Include logprobs with assistant messages.
+ - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
+ tokens in reasoning item outputs. This enables reasoning items to be used in
+ multi-turn conversations when using the Responses API statelessly (like when
+ the `store` parameter is set to `false`, or when an organization is enrolled
+ in the zero data retention program).
+
+ input: Text, image, or file inputs to the model, used to generate a response.
+
+ Learn more:
+
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+ - [Image inputs](https://platform.openai.com/docs/guides/images)
+ - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
+ - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
+ - [Function calling](https://platform.openai.com/docs/guides/function-calling)
+
+ instructions: A system (or developer) message inserted into the model's context.
+
+ When using along with `previous_response_id`, the instructions from a previous
+ response will not be carried over to the next response. This makes it simple to
+ swap out system (or developer) messages in new responses.
+
+ max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
+ including visible output tokens and
+ [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+
+ max_tool_calls: The maximum number of total calls to built-in tools that can be processed in a
+ response. This maximum number applies across all built-in tool calls, not per
+ individual tool. Any further attempts to call a tool by the model will be
+ ignored.
+
+ mcp_servers: MCP server identifiers. Accepts marketplace slugs, URLs, or MCPServerSpec
+ objects. MCP tools are executed server-side and billed separately.
+
+ metadata: Set of up to 16 key-value string pairs that can be attached to the response for
+ structured metadata and later querying via the API or dashboard.
+
+ model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
+ wide range of models with different capabilities, performance characteristics,
+ and price points. Refer to the
+ [model guide](https://platform.openai.com/docs/models) to browse and compare
+ available models.
+
+ parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
+
+ presence_penalty: Penalizes new tokens based on whether they appear in the text so far.
+
+ previous_response_id: Unique ID of the previous response to continue from when creating multi-turn
+ conversations. Cannot be used together with `conversation`.
+
+ prompt: Stored prompt template reference (BYOK).
+
+ prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
+ hit rates. Replaces the `user` field.
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
+
+ reasoning: **gpt-5 and o-series models only**
+
+ Configuration options for
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+
+ safety_identifier: A stable identifier used to help detect users of your application that may be
+ violating OpenAI's usage policies. The IDs should be a string that uniquely
+ identifies each user. We recommend hashing their username or email address, in
+ order to avoid sending us any identifying information.
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
+
+ service_tier: Specifies the processing type used for serving the request.
+
+ - If set to 'auto', then the request will be processed with the service tier
+ configured in the Project settings. Unless otherwise configured, the Project
+ will use 'default'.
+ - If set to 'default', then the request will be processed with the standard
+ pricing and performance for the selected model.
+ - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
+ '[priority](https://openai.com/api-priority-processing/)', then the request
+ will be processed with the corresponding service tier.
+ - When not set, the default behavior is 'auto'.
+
+ When the `service_tier` parameter is set, the response body will include the
+ `service_tier` value based on the processing mode actually used to serve the
+ request. This response value may be different from the value set in the
+ parameter.
+
+ store: Whether to store the generated response for later retrieval via the Responses
+ API.
+
+ stream: If set to true, the model response data will be streamed to the client as it is
+ generated using
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+ See the
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+ for more information.
+
+ stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ make the output more random, while lower values like 0.2 will make it more
+ focused and deterministic. We generally recommend altering this or `top_p` but
+ not both.
+
+ text: Configuration options for a text response from the model. Can be plain text or
+ structured JSON data. Learn more:
+
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+ - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+
+ tool_choice: How the model should select which tool (or tools) to use when generating a
+ response. See the `tools` parameter to see how to specify which tools the model
+ can call.
+
+ tools: An array of tools the model may call while generating a response. You can
+ specify which tool to use by setting the `tool_choice` parameter.
+
+ We support the following categories of tools:
+
+ - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+ capabilities, like
+ [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+ [file search](https://platform.openai.com/docs/guides/tools-file-search).
+ Learn more about
+ [built-in tools](https://platform.openai.com/docs/guides/tools).
+ - **MCP Tools**: Integrations with third-party systems via custom MCP servers or
+ predefined connectors such as Google Drive and SharePoint. Learn more about
+ [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
+ - **Function calls (custom tools)**: Functions that are defined by you, enabling
+ the model to call your own code with strongly typed arguments and outputs.
+ Learn more about
+ [function calling](https://platform.openai.com/docs/guides/function-calling).
+ You can also use custom tools to call your own code.
+
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
+ return at each token position, each with an associated log probability.
+
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+
+ truncation: The truncation strategy to use for the model response.
+
+ - `auto`: If the input to this Response exceeds the model's context window size,
+ the model will truncate the response to fit the context window by dropping
+ items from the beginning of the conversation.
+ - `disabled` (default): If the input size will exceed the context window size
+ for a model, the request will fail with a 400 error.
+
+ user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
+ `prompt_cache_key` instead to maintain caching optimizations. A stable
+ identifier for your end-users. Used to boost cache hit rates by better bucketing
+ similar requests and to help OpenAI detect and prevent abuse.
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+
+ idempotency_key: Specify a custom idempotency key for this request
+ """
+ return self._post(
+ "/v1/responses",
+ body=maybe_transform(
+ {
+ "background": background,
+ "conversation": conversation,
+ "credentials": credentials,
+ "frequency_penalty": frequency_penalty,
+ "include": include,
+ "input": input,
+ "instructions": instructions,
+ "max_output_tokens": max_output_tokens,
+ "max_tool_calls": max_tool_calls,
+ "mcp_servers": mcp_servers,
+ "metadata": metadata,
+ "model": model,
+ "parallel_tool_calls": parallel_tool_calls,
+ "presence_penalty": presence_penalty,
+ "previous_response_id": previous_response_id,
+ "prompt": prompt,
+ "prompt_cache_key": prompt_cache_key,
+ "reasoning": reasoning,
+ "safety_identifier": safety_identifier,
+ "service_tier": service_tier,
+ "store": store,
+ "stream": stream,
+ "stream_options": stream_options,
+ "temperature": temperature,
+ "text": text,
+ "tool_choice": tool_choice,
+ "tools": tools,
+ "top_logprobs": top_logprobs,
+ "top_p": top_p,
+ "truncation": truncation,
+ "user": user,
+ },
+ response_create_params.ResponseCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ idempotency_key=idempotency_key,
+ ),
+ cast_to=Response,
+ )
+
+
+class AsyncResponsesResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncResponsesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/dedalus-labs/dedalus-sdk-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncResponsesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncResponsesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/dedalus-labs/dedalus-sdk-python#with_streaming_response
+ """
+ return AsyncResponsesResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ background: Optional[bool] | Omit = omit,
+ conversation: Optional[response_create_params.Conversation] | Omit = omit,
+ credentials: Optional[response_create_params.Credentials] | Omit = omit,
+ frequency_penalty: Optional[float] | Omit = omit,
+ include: Optional[SequenceNotStr[str]] | Omit = omit,
+ input: Union[str, Iterable[JSONObjectInput], None] | Omit = omit,
+ instructions: Union[str, Iterable[JSONObjectInput], None] | Omit = omit,
+ max_output_tokens: Optional[int] | Omit = omit,
+ max_tool_calls: Optional[int] | Omit = omit,
+ mcp_servers: Optional[response_create_params.MCPServers] | Omit = omit,
+ metadata: Optional[Dict[str, str]] | Omit = omit,
+ model: Optional[response_create_params.Model] | Omit = omit,
+ parallel_tool_calls: Optional[bool] | Omit = omit,
+ presence_penalty: Optional[float] | Omit = omit,
+ previous_response_id: Optional[str] | Omit = omit,
+ prompt: Optional[response_create_params.Prompt] | Omit = omit,
+ prompt_cache_key: Optional[str] | Omit = omit,
+ reasoning: Optional[JSONObjectInput] | Omit = omit,
+ safety_identifier: Optional[str] | Omit = omit,
+ service_tier: Optional[Literal["auto", "default"]] | Omit = omit,
+ store: Optional[bool] | Omit = omit,
+ stream: bool | Omit = omit,
+ stream_options: Optional[JSONObjectInput] | Omit = omit,
+ temperature: Optional[float] | Omit = omit,
+ text: Optional[JSONObjectInput] | Omit = omit,
+ tool_choice: Union[str, JSONObjectInput, None] | Omit = omit,
+ tools: Optional[Iterable[JSONObjectInput]] | Omit = omit,
+ top_logprobs: Optional[int] | Omit = omit,
+ top_p: Optional[float] | Omit = omit,
+ truncation: Optional[Literal["auto", "disabled"]] | Omit = omit,
+ user: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ idempotency_key: str | None = None,
+ ) -> Response:
+ """
+ Create a response using the OpenAI Responses API.
+
+ This endpoint routes directly to OpenAI's Responses API. Only OpenAI models are
+ supported.
+
+ Args:
+ background: Whether to run the model response in the background.
+ [Learn more](https://platform.openai.com/docs/guides/background).
+
+ conversation: Conversation that this response belongs to. Items from this conversation are
+ prepended to the input items, and output items from this response are
+ automatically added after completion.
+
+ credentials: Credentials for MCP server authentication. Each credential is matched to servers
+ by connection name.
+
+ frequency_penalty: Penalizes new tokens based on their frequency in the text so far.
+
+ include: Specify additional output data to include in the model response. Currently
+ supported values are:
+
+ - `web_search_call.action.sources`: Include the sources of the web search tool
+ call.
+ - `code_interpreter_call.outputs`: Includes the outputs of python code execution
+ in code interpreter tool call items.
+ - `computer_call_output.output.image_url`: Include image urls from the computer
+ call output.
+ - `file_search_call.results`: Include the search results of the file search tool
+ call.
+ - `message.input_image.image_url`: Include image urls from the input message.
+ - `message.output_text.logprobs`: Include logprobs with assistant messages.
+ - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
+ tokens in reasoning item outputs. This enables reasoning items to be used in
+ multi-turn conversations when using the Responses API statelessly (like when
+ the `store` parameter is set to `false`, or when an organization is enrolled
+ in the zero data retention program).
+
+ input: Text, image, or file inputs to the model, used to generate a response.
+
+ Learn more:
+
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+ - [Image inputs](https://platform.openai.com/docs/guides/images)
+ - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
+ - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
+ - [Function calling](https://platform.openai.com/docs/guides/function-calling)
+
+ instructions: A system (or developer) message inserted into the model's context.
+
+ When using along with `previous_response_id`, the instructions from a previous
+ response will not be carried over to the next response. This makes it simple to
+ swap out system (or developer) messages in new responses.
+
+ max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
+ including visible output tokens and
+ [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+
+ max_tool_calls: The maximum number of total calls to built-in tools that can be processed in a
+ response. This maximum number applies across all built-in tool calls, not per
+ individual tool. Any further attempts to call a tool by the model will be
+ ignored.
+
+ mcp_servers: MCP server identifiers. Accepts marketplace slugs, URLs, or MCPServerSpec
+ objects. MCP tools are executed server-side and billed separately.
+
+ metadata: Set of up to 16 key-value string pairs that can be attached to the response for
+ structured metadata and later querying via the API or dashboard.
+
+ model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
+ wide range of models with different capabilities, performance characteristics,
+ and price points. Refer to the
+ [model guide](https://platform.openai.com/docs/models) to browse and compare
+ available models.
+
+ parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
+
+ presence_penalty: Penalizes new tokens based on whether they appear in the text so far.
+
+ previous_response_id: Unique ID of the previous response to continue from when creating multi-turn
+ conversations. Cannot be used together with `conversation`.
+
+ prompt: Stored prompt template reference (BYOK).
+
+ prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
+ hit rates. Replaces the `user` field.
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
+
+ reasoning: **gpt-5 and o-series models only**
+
+ Configuration options for
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+
+ safety_identifier: A stable identifier used to help detect users of your application that may be
+ violating OpenAI's usage policies. The IDs should be a string that uniquely
+ identifies each user. We recommend hashing their username or email address, in
+ order to avoid sending us any identifying information.
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
+
+ service_tier: Specifies the processing type used for serving the request.
+
+ - If set to 'auto', then the request will be processed with the service tier
+ configured in the Project settings. Unless otherwise configured, the Project
+ will use 'default'.
+ - If set to 'default', then the request will be processed with the standard
+ pricing and performance for the selected model.
+ - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
+ '[priority](https://openai.com/api-priority-processing/)', then the request
+ will be processed with the corresponding service tier.
+ - When not set, the default behavior is 'auto'.
+
+ When the `service_tier` parameter is set, the response body will include the
+ `service_tier` value based on the processing mode actually used to serve the
+ request. This response value may be different from the value set in the
+ parameter.
+
+ store: Whether to store the generated response for later retrieval via the Responses
+ API.
+
+ stream: If set to true, the model response data will be streamed to the client as it is
+ generated using
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+ See the
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+ for more information.
+
+ stream_options: Options for streaming response. Only set this when you set `stream: true`.
+
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ make the output more random, while lower values like 0.2 will make it more
+ focused and deterministic. We generally recommend altering this or `top_p` but
+ not both.
+
+ text: Configuration options for a text response from the model. Can be plain text or
+ structured JSON data. Learn more:
+
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+ - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+
+ tool_choice: How the model should select which tool (or tools) to use when generating a
+ response. See the `tools` parameter to see how to specify which tools the model
+ can call.
+
+ tools: An array of tools the model may call while generating a response. You can
+ specify which tool to use by setting the `tool_choice` parameter.
+
+ We support the following categories of tools:
+
+ - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+ capabilities, like
+ [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+ [file search](https://platform.openai.com/docs/guides/tools-file-search).
+ Learn more about
+ [built-in tools](https://platform.openai.com/docs/guides/tools).
+ - **MCP Tools**: Integrations with third-party systems via custom MCP servers or
+ predefined connectors such as Google Drive and SharePoint. Learn more about
+ [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
+ - **Function calls (custom tools)**: Functions that are defined by you, enabling
+ the model to call your own code with strongly typed arguments and outputs.
+ Learn more about
+ [function calling](https://platform.openai.com/docs/guides/function-calling).
+ You can also use custom tools to call your own code.
+
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
+ return at each token position, each with an associated log probability.
+
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+
+ truncation: The truncation strategy to use for the model response.
+
+ - `auto`: If the input to this Response exceeds the model's context window size,
+ the model will truncate the response to fit the context window by dropping
+ items from the beginning of the conversation.
+ - `disabled` (default): If the input size will exceed the context window size
+ for a model, the request will fail with a 400 error.
+
+ user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
+ `prompt_cache_key` instead to maintain caching optimizations. A stable
+ identifier for your end-users. Used to boost cache hit rates by better bucketing
+ similar requests and to help OpenAI detect and prevent abuse.
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+
+ idempotency_key: Specify a custom idempotency key for this request
+ """
+ return await self._post(
+ "/v1/responses",
+ body=await async_maybe_transform(
+ {
+ "background": background,
+ "conversation": conversation,
+ "credentials": credentials,
+ "frequency_penalty": frequency_penalty,
+ "include": include,
+ "input": input,
+ "instructions": instructions,
+ "max_output_tokens": max_output_tokens,
+ "max_tool_calls": max_tool_calls,
+ "mcp_servers": mcp_servers,
+ "metadata": metadata,
+ "model": model,
+ "parallel_tool_calls": parallel_tool_calls,
+ "presence_penalty": presence_penalty,
+ "previous_response_id": previous_response_id,
+ "prompt": prompt,
+ "prompt_cache_key": prompt_cache_key,
+ "reasoning": reasoning,
+ "safety_identifier": safety_identifier,
+ "service_tier": service_tier,
+ "store": store,
+ "stream": stream,
+ "stream_options": stream_options,
+ "temperature": temperature,
+ "text": text,
+ "tool_choice": tool_choice,
+ "tools": tools,
+ "top_logprobs": top_logprobs,
+ "top_p": top_p,
+ "truncation": truncation,
+ "user": user,
+ },
+ response_create_params.ResponseCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ idempotency_key=idempotency_key,
+ ),
+ cast_to=Response,
+ )
+
+
+class ResponsesResourceWithRawResponse:
+ def __init__(self, responses: ResponsesResource) -> None:
+ self._responses = responses
+
+ self.create = to_raw_response_wrapper(
+ responses.create,
+ )
+
+
+class AsyncResponsesResourceWithRawResponse:
+ def __init__(self, responses: AsyncResponsesResource) -> None:
+ self._responses = responses
+
+ self.create = async_to_raw_response_wrapper(
+ responses.create,
+ )
+
+
+class ResponsesResourceWithStreamingResponse:
+ def __init__(self, responses: ResponsesResource) -> None:
+ self._responses = responses
+
+ self.create = to_streamed_response_wrapper(
+ responses.create,
+ )
+
+
+class AsyncResponsesResourceWithStreamingResponse:
+ def __init__(self, responses: AsyncResponsesResource) -> None:
+ self._responses = responses
+
+ self.create = async_to_streamed_response_wrapper(
+ responses.create,
+ )
diff --git a/src/dedalus_labs/types/__init__.py b/src/dedalus_labs/types/__init__.py
index b22264b..252c826 100644
--- a/src/dedalus_labs/types/__init__.py
+++ b/src/dedalus_labs/types/__init__.py
@@ -2,7 +2,7 @@
from __future__ import annotations
-from . import chat, shared
+from . import chat, shared, response
from .. import _compat
from .image import Image as Image
from .model import Model as Model
@@ -21,10 +21,12 @@
DedalusModelChoice as DedalusModelChoice,
FunctionDefinition as FunctionDefinition,
ResponseFormatText as ResponseFormatText,
+ VoiceIDsOrCustomVoice as VoiceIDsOrCustomVoice,
ResponseFormatJSONObject as ResponseFormatJSONObject,
ResponseFormatJSONSchema as ResponseFormatJSONSchema,
)
from .ocr_page import OCRPage as OCRPage
+from .response import Response as Response
from .ocr_response import OCRResponse as OCRResponse
from .images_response import ImagesResponse as ImagesResponse
from .image_edit_params import ImageEditParams as ImageEditParams
@@ -32,6 +34,7 @@
from .ocr_process_params import OCRProcessParams as OCRProcessParams
from .list_models_response import ListModelsResponse as ListModelsResponse
from .image_generate_params import ImageGenerateParams as ImageGenerateParams
+from .response_create_params import ResponseCreateParams as ResponseCreateParams
from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams
from .create_embedding_response import CreateEmbeddingResponse as CreateEmbeddingResponse
from .image_create_variation_params import ImageCreateVariationParams as ImageCreateVariationParams
@@ -41,11 +44,13 @@
# Pydantic can resolve the necessary references.
# See: https://github.com/pydantic/pydantic/issues/11250 for more context.
if _compat.PYDANTIC_V1:
+ response.Response.update_forward_refs() # type: ignore
chat.chat_completion.ChatCompletion.update_forward_refs() # type: ignore
shared.dedalus_model.DedalusModel.update_forward_refs() # type: ignore
shared.mcp_tool_result.MCPToolResult.update_forward_refs() # type: ignore
shared.model_settings.ModelSettings.update_forward_refs() # type: ignore
else:
+ response.Response.model_rebuild(_parent_namespace_depth=0)
chat.chat_completion.ChatCompletion.model_rebuild(_parent_namespace_depth=0)
shared.dedalus_model.DedalusModel.model_rebuild(_parent_namespace_depth=0)
shared.mcp_tool_result.MCPToolResult.model_rebuild(_parent_namespace_depth=0)
diff --git a/src/dedalus_labs/types/audio/speech_create_params.py b/src/dedalus_labs/types/audio/speech_create_params.py
index 888f49e..785f598 100644
--- a/src/dedalus_labs/types/audio/speech_create_params.py
+++ b/src/dedalus_labs/types/audio/speech_create_params.py
@@ -3,30 +3,31 @@
from __future__ import annotations
from typing import Union
-from typing_extensions import Literal, Required, TypedDict
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
-__all__ = ["SpeechCreateParams"]
+from ..shared_params.voice_ids_or_custom_voice import VoiceIDsOrCustomVoice
+
+__all__ = ["SpeechCreateParams", "Voice"]
class SpeechCreateParams(TypedDict, total=False):
input: Required[str]
"""The text to generate audio for. The maximum length is 4096 characters."""
- model: Required[Union[str, Literal["tts-1", "tts-1-hd", "gpt-4o-mini-tts"]]]
+ model: Required[Union[str, Literal["tts-1", "tts-1-hd", "gpt-4o-mini-tts", "gpt-4o-mini-tts-2025-12-15"]]]
"""
- One of the available [TTS models](https://platform.openai.com/docs/models#tts):
- `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
+ One of the available [TTS models](/docs/models#tts): `tts-1`, `tts-1-hd`,
+ `gpt-4o-mini-tts`, or `gpt-4o-mini-tts-2025-12-15`.
"""
- voice: Required[
- Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"]]
- ]
+ voice: Required[Voice]
"""The voice to use when generating the audio.
- Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`,
- `nova`, `sage`, `shimmer`, and `verse`. Previews of the voices are available in
- the
- [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
+ Supported built-in voices are `alloy`, `ash`, `ballad`, `coral`, `echo`,
+ `fable`, `onyx`, `nova`, `sage`, `shimmer`, `verse`, `marin`, and `cedar`. You
+ may also provide a custom voice object with an `id`, for example
+ `{ "id": "voice_1234" }`. Previews of the voices are available in the
+ [Text to speech guide](/docs/guides/text-to-speech#voice-options).
"""
instructions: str
@@ -53,3 +54,10 @@ class SpeechCreateParams(TypedDict, total=False):
Supported formats are `sse` and `audio`. `sse` is not supported for `tts-1` or
`tts-1-hd`.
"""
+
+
+Voice: TypeAlias = Union[
+ str,
+ Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"],
+ VoiceIDsOrCustomVoice,
+]
diff --git a/src/dedalus_labs/types/audio/transcription_create_response.py b/src/dedalus_labs/types/audio/transcription_create_response.py
index 6f2f1d4..a0bce8b 100644
--- a/src/dedalus_labs/types/audio/transcription_create_response.py
+++ b/src/dedalus_labs/types/audio/transcription_create_response.py
@@ -22,8 +22,8 @@
class CreateTranscriptionResponseVerboseJSONSegment(BaseModel):
- """
- Fields:
+ """Fields: # noqa: D415.
+
- id (required): int
- seek (required): int
- start (required): float
@@ -88,8 +88,8 @@ class CreateTranscriptionResponseVerboseJSONUsage(BaseModel):
class CreateTranscriptionResponseVerboseJSONWord(BaseModel):
- """
- Fields:
+ """Fields: # noqa: D415.
+
- word (required): str
- start (required): float
- end (required): float
@@ -138,8 +138,8 @@ class CreateTranscriptionResponseVerboseJSON(BaseModel):
class CreateTranscriptionResponseJSONLogprob(BaseModel):
- """
- Fields:
+ """Fields: # noqa: D415.
+
- token (optional): str
- logprob (optional): float
- bytes (optional): list[float]
diff --git a/src/dedalus_labs/types/audio/translation_create_response.py b/src/dedalus_labs/types/audio/translation_create_response.py
index 055d8b1..1ebb38c 100644
--- a/src/dedalus_labs/types/audio/translation_create_response.py
+++ b/src/dedalus_labs/types/audio/translation_create_response.py
@@ -14,8 +14,8 @@
class CreateTranslationResponseVerboseJSONSegment(BaseModel):
- """
- Fields:
+ """Fields: # noqa: D415.
+
- id (required): int
- seek (required): int
- start (required): float
@@ -70,8 +70,8 @@ class CreateTranslationResponseVerboseJSONSegment(BaseModel):
class CreateTranslationResponseVerboseJSON(BaseModel):
- """
- Fields:
+ """Fields: # noqa: D415.
+
- language (required): str
- duration (required): float
- text (required): str
@@ -92,8 +92,8 @@ class CreateTranslationResponseVerboseJSON(BaseModel):
class CreateTranslationResponseJSON(BaseModel):
- """
- Fields:
+ """Fields: # noqa: D415.
+
- text (required): str
"""
diff --git a/src/dedalus_labs/types/chat/audio_param.py b/src/dedalus_labs/types/chat/audio_param.py
index a786fb1..25408d7 100644
--- a/src/dedalus_labs/types/chat/audio_param.py
+++ b/src/dedalus_labs/types/chat/audio_param.py
@@ -10,7 +10,7 @@
class AudioParam(TypedDict, total=False):
"""
Data about a previous audio response from the model.
- [Learn more](https://platform.openai.com/docs/guides/audio).
+ [Learn more](/docs/guides/audio).
Fields:
- id (required): str
diff --git a/src/dedalus_labs/types/chat/chat_completion.py b/src/dedalus_labs/types/chat/chat_completion.py
index 5c6f850..967a5f9 100644
--- a/src/dedalus_labs/types/chat/chat_completion.py
+++ b/src/dedalus_labs/types/chat/chat_completion.py
@@ -2,7 +2,6 @@
from __future__ import annotations
-import builtins
from typing import Dict, List, Optional
from typing_extensions import Literal
@@ -10,7 +9,61 @@
from ..._models import BaseModel
from .completion_usage import CompletionUsage
-__all__ = ["ChatCompletion"]
+__all__ = ["ChatCompletion", "Deferred", "MCPServerErrors", "PendingTool"]
+
+
+class Deferred(BaseModel):
+ """Server-side call blocked until pending client calls complete.
+
+ Carries full spec for stateless resumption on subsequent turns.
+ """
+
+ id: str
+ """Unique identifier for this deferred call."""
+
+ name: str
+ """Name of the tool."""
+
+ arguments: Optional["JSONObjectInput"] = None
+ """Input arguments for the tool call."""
+
+ blocked_by: Optional[List[str]] = None
+ """IDs of pending client calls blocking this call."""
+
+ dependencies: Optional[List[str]] = None
+ """IDs of calls this depends on."""
+
+ venue: Optional[str] = None
+ """Execution venue (server or client)."""
+
+
+class MCPServerErrors(BaseModel):
+ """Error details for a single MCP server failure."""
+
+ message: str
+ """Human-readable error message."""
+
+ code: Optional[str] = None
+ """Machine-readable error code."""
+
+ recommendation: Optional[str] = None
+ """Suggested action for the user."""
+
+
+class PendingTool(BaseModel):
+ """Client-side tool call the SDK must execute."""
+
+ id: str
+ """Unique identifier for this tool call."""
+
+ arguments: "JSONObjectInput"
+ """Input arguments for the tool call."""
+
+ name: str
+ """Name of the tool to execute."""
+
+ dependencies: Optional[List[str]] = None
+ """IDs of other pending calls that must complete first."""
class ChatCompletion(BaseModel):
@@ -39,13 +92,18 @@ class ChatCompletion(BaseModel):
object: Literal["chat.completion"]
"""The object type, which is always `chat.completion`."""
- mcp_server_errors: Optional[Dict[str, builtins.object]] = None
- """Information about MCP server failures, if any occurred during the request.
+ correlation_id: Optional[str] = None
+ """Stable session ID for cross-turn handoff state.
- Contains details about which servers failed and why, along with recommendations
- for the user. Only present when MCP server failures occurred.
+ Echo this on the next request to resume server-side execution.
"""
+ deferred: Optional[List[Deferred]] = None
+ """Server tools blocked on client results."""
+
+ mcp_server_errors: Optional[Dict[str, MCPServerErrors]] = None
+ """MCP server failures keyed by server name."""
+
mcp_tool_results: Optional[List["MCPToolResult"]] = None
"""Detailed results of MCP tool executions including inputs, outputs, and timing.
@@ -53,6 +111,12 @@ class ChatCompletion(BaseModel):
purposes.
"""
+ pending_tools: Optional[List[PendingTool]] = None
+ """Client tools to execute, with dependency ordering."""
+
+ server_results: Optional[Dict[str, Optional["JSONValueInput"]]] = None
+ """Completed server tool outputs keyed by call ID."""
+
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] = None
"""Specifies the processing type used for serving the request.
@@ -61,7 +125,7 @@ class ChatCompletion(BaseModel):
will use 'default'.
- If set to 'default', then the request will be processed with the standard
pricing and performance for the selected model.
- - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
+ - If set to '[flex](/docs/guides/flex-processing)' or
'[priority](https://openai.com/api-priority-processing/)', then the request
will be processed with the corresponding service tier.
- When not set, the default behavior is 'auto'.
@@ -86,8 +150,16 @@ class ChatCompletion(BaseModel):
client-side execution.
"""
+ turns_consumed: Optional[int] = None
+ """Number of internal LLM calls made during this request.
+
+ SDKs can sum this across their outer loop to track total LLM calls.
+ """
+
usage: Optional[CompletionUsage] = None
"""Usage statistics for the completion request."""
from ..shared.mcp_tool_result import MCPToolResult
+from ..shared.json_value_input import JSONValueInput
+from ..shared.json_object_input import JSONObjectInput
diff --git a/src/dedalus_labs/types/chat/chat_completion_assistant_message_param.py b/src/dedalus_labs/types/chat/chat_completion_assistant_message_param.py
index 70faf6b..111c5b4 100644
--- a/src/dedalus_labs/types/chat/chat_completion_assistant_message_param.py
+++ b/src/dedalus_labs/types/chat/chat_completion_assistant_message_param.py
@@ -67,7 +67,7 @@ class ChatCompletionAssistantMessageParam(TypedDict, total=False):
audio: Optional[AudioParam]
"""
Data about a previous audio response from the model.
- [Learn more](https://platform.openai.com/docs/guides/audio).
+ [Learn more](/docs/guides/audio).
Fields:
diff --git a/src/dedalus_labs/types/chat/chat_completion_audio_param.py b/src/dedalus_labs/types/chat/chat_completion_audio_param.py
index 0ea75cd..8592a68 100644
--- a/src/dedalus_labs/types/chat/chat_completion_audio_param.py
+++ b/src/dedalus_labs/types/chat/chat_completion_audio_param.py
@@ -3,19 +3,27 @@
from __future__ import annotations
from typing import Union
-from typing_extensions import Literal, Required, TypedDict
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
-__all__ = ["ChatCompletionAudioParam"]
+from ..shared_params.voice_ids_or_custom_voice import VoiceIDsOrCustomVoice
+
+__all__ = ["ChatCompletionAudioParam", "Voice"]
+
+Voice: TypeAlias = Union[
+ str,
+ Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"],
+ VoiceIDsOrCustomVoice,
+]
class ChatCompletionAudioParam(TypedDict, total=False):
"""Parameters for audio output.
Required when audio output is requested with
- `modalities: ["audio"]`. [Learn more](https://platform.openai.com/docs/guides/audio).
+ `modalities: ["audio"]`. [Learn more](/docs/guides/audio).
Fields:
- - voice (required): VoiceIdsShared
+ - voice (required): VoiceIdsOrCustomVoice
- format (required): Literal["wav", "aac", "mp3", "flac", "opus", "pcm16"]
"""
@@ -25,11 +33,11 @@ class ChatCompletionAudioParam(TypedDict, total=False):
Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`.
"""
- voice: Required[
- Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"]]
- ]
+ voice: Required[Voice]
"""The voice the model uses to respond.
- Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `nova`,
- `onyx`, `sage`, and `shimmer`.
+ Supported built-in voices are `alloy`, `ash`, `ballad`, `coral`, `echo`,
+ `fable`, `nova`, `onyx`, `sage`, `shimmer`, `marin`, and `cedar`. You may also
+ provide a custom voice object with an `id`, for example
+ `{ "id": "voice_1234" }`.
"""
diff --git a/src/dedalus_labs/types/chat/chat_completion_chunk.py b/src/dedalus_labs/types/chat/chat_completion_chunk.py
index 4248a61..0859ab8 100644
--- a/src/dedalus_labs/types/chat/chat_completion_chunk.py
+++ b/src/dedalus_labs/types/chat/chat_completion_chunk.py
@@ -14,7 +14,7 @@ class ChatCompletionChunk(BaseModel):
"""
Represents a streamed chunk of a chat completion response returned
by the model, based on the provided input.
- [Learn more](https://platform.openai.com/docs/guides/streaming-responses).
+ [Learn more](/docs/guides/streaming-responses).
Fields:
- id (required): str
@@ -57,7 +57,7 @@ class ChatCompletionChunk(BaseModel):
will use 'default'.
- If set to 'default', then the request will be processed with the standard
pricing and performance for the selected model.
- - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
+ - If set to '[flex](/docs/guides/flex-processing)' or
'[priority](https://openai.com/api-priority-processing/)', then the request
will be processed with the corresponding service tier.
- When not set, the default behavior is 'auto'.
diff --git a/src/dedalus_labs/types/chat/chat_completion_content_part_file_param.py b/src/dedalus_labs/types/chat/chat_completion_content_part_file_param.py
index df4cf6e..8ea20a7 100644
--- a/src/dedalus_labs/types/chat/chat_completion_content_part_file_param.py
+++ b/src/dedalus_labs/types/chat/chat_completion_content_part_file_param.py
@@ -30,8 +30,7 @@ class File(TypedDict, total=False):
class ChatCompletionContentPartFileParam(TypedDict, total=False):
- """
- Learn about [file inputs](https://platform.openai.com/docs/guides/text) for text generation.
+ """Learn about [file inputs](/docs/guides/text) for text generation.
Fields:
- type (required): Literal["file"]
diff --git a/src/dedalus_labs/types/chat/chat_completion_content_part_image_param.py b/src/dedalus_labs/types/chat/chat_completion_content_part_image_param.py
index 5b7c529..877d588 100644
--- a/src/dedalus_labs/types/chat/chat_completion_content_part_image_param.py
+++ b/src/dedalus_labs/types/chat/chat_completion_content_part_image_param.py
@@ -22,12 +22,12 @@ class ImageURL(TypedDict, total=False):
"""Specifies the detail level of the image.
Learn more in the
- [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding).
+ [Vision guide](/docs/guides/vision#low-or-high-fidelity-image-understanding).
"""
class ChatCompletionContentPartImageParam(TypedDict, total=False):
- """Learn about [image inputs](https://platform.openai.com/docs/guides/vision).
+ """Learn about [image inputs](/docs/guides/vision).
Fields:
- type (required): Literal["image_url"]
diff --git a/src/dedalus_labs/types/chat/chat_completion_content_part_input_audio_param.py b/src/dedalus_labs/types/chat/chat_completion_content_part_input_audio_param.py
index 169e8ce..e269a78 100644
--- a/src/dedalus_labs/types/chat/chat_completion_content_part_input_audio_param.py
+++ b/src/dedalus_labs/types/chat/chat_completion_content_part_input_audio_param.py
@@ -23,7 +23,7 @@ class InputAudio(TypedDict, total=False):
class ChatCompletionContentPartInputAudioParam(TypedDict, total=False):
- """Learn about [audio inputs](https://platform.openai.com/docs/guides/audio).
+ """Learn about [audio inputs](/docs/guides/audio).
Fields:
- type (required): Literal["input_audio"]
diff --git a/src/dedalus_labs/types/chat/chat_completion_content_part_text_param.py b/src/dedalus_labs/types/chat/chat_completion_content_part_text_param.py
index 13e844e..c291597 100644
--- a/src/dedalus_labs/types/chat/chat_completion_content_part_text_param.py
+++ b/src/dedalus_labs/types/chat/chat_completion_content_part_text_param.py
@@ -8,8 +8,7 @@
class ChatCompletionContentPartTextParam(TypedDict, total=False):
- """
- Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation).
+ """Learn about [text inputs](/docs/guides/text-generation).
Fields:
- type (required): Literal["text"]
diff --git a/src/dedalus_labs/types/chat/chat_completion_functions_param.py b/src/dedalus_labs/types/chat/chat_completion_functions_param.py
index 2d101c7..dbb8c61 100644
--- a/src/dedalus_labs/types/chat/chat_completion_functions_param.py
+++ b/src/dedalus_labs/types/chat/chat_completion_functions_param.py
@@ -33,8 +33,7 @@ class ChatCompletionFunctionsParam(TypedDict, total=False):
parameters: Dict[str, object]
"""The parameters the functions accepts, described as a JSON Schema object.
- See the [guide](https://platform.openai.com/docs/guides/function-calling) for
- examples, and the
+ See the [guide](/docs/guides/function-calling) for examples, and the
[JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
documentation about the format.
diff --git a/src/dedalus_labs/types/chat/chat_completion_message.py b/src/dedalus_labs/types/chat/chat_completion_message.py
index c240952..a85222d 100644
--- a/src/dedalus_labs/types/chat/chat_completion_message.py
+++ b/src/dedalus_labs/types/chat/chat_completion_message.py
@@ -60,7 +60,7 @@ class Annotation(BaseModel):
class Audio(BaseModel):
"""
If the audio output modality is requested, this object contains data
- about the audio response from the model. [Learn more](https://platform.openai.com/docs/guides/audio).
+ about the audio response from the model. [Learn more](/docs/guides/audio).
Fields:
- id (required): str
@@ -140,14 +140,13 @@ class ChatCompletionMessage(BaseModel):
annotations: Optional[List[Annotation]] = None
"""
Annotations for the message, when applicable, as when using the
- [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
+ [web search tool](/docs/guides/tools-web-search?api-mode=chat).
"""
audio: Optional[Audio] = None
"""
If the audio output modality is requested, this object contains data about the
- audio response from the model.
- [Learn more](https://platform.openai.com/docs/guides/audio).
+ audio response from the model. [Learn more](/docs/guides/audio).
Fields:
diff --git a/src/dedalus_labs/types/chat/chat_completion_tool_param.py b/src/dedalus_labs/types/chat/chat_completion_tool_param.py
index 3dc63cd..09d4004 100644
--- a/src/dedalus_labs/types/chat/chat_completion_tool_param.py
+++ b/src/dedalus_labs/types/chat/chat_completion_tool_param.py
@@ -10,23 +10,19 @@
class ChatCompletionToolParam(TypedDict, total=False):
- """A function tool that can be used to generate a response.
+ """Schema for Tool.
Fields:
- - type (required): Literal["function"]
- - function (required): FunctionObject
+ - type (optional): ToolTypes
+ - function (required): Function
"""
function: Required[FunctionDefinition]
- """Schema for FunctionObject.
+ """Schema for Function.
Fields:
- - description (optional): str
- name (required): str
- - parameters (optional): FunctionParameters
- - strict (optional): bool | None
"""
- type: Required[Literal["function"]]
- """The type of the tool. Currently, only `function` is supported."""
+ type: Literal["function"]
diff --git a/src/dedalus_labs/types/chat/completion_create_params.py b/src/dedalus_labs/types/chat/completion_create_params.py
index 7958c39..f231d32 100644
--- a/src/dedalus_labs/types/chat/completion_create_params.py
+++ b/src/dedalus_labs/types/chat/completion_create_params.py
@@ -39,14 +39,8 @@
"ResponseFormat",
"SafetySetting",
"Thinking",
+ "ThinkingThinkingConfigAdaptive",
"ToolChoice",
- "Tool",
- "ToolCustomToolChatCompletions",
- "ToolCustomToolChatCompletionsCustom",
- "ToolCustomToolChatCompletionsCustomFormat",
- "ToolCustomToolChatCompletionsCustomFormatTextFormat",
- "ToolCustomToolChatCompletionsCustomFormatGrammarFormat",
- "ToolCustomToolChatCompletionsCustomFormatGrammarFormatGrammar",
"CompletionCreateParamsNonStreaming",
"CompletionCreateParamsStreaming",
]
@@ -67,11 +61,11 @@ class CompletionCreateParamsBase(TypedDict, total=False):
"""Parameters for audio output.
Required when audio output is requested with `modalities: ["audio"]`.
- [Learn more](https://platform.openai.com/docs/guides/audio).
+ [Learn more](/docs/guides/audio).
Fields:
- - voice (required): VoiceIdsShared
+ - voice (required): VoiceIdsOrCustomVoice
- format (required): Literal["wav", "aac", "mp3", "flac", "opus", "pcm16"]
"""
@@ -89,6 +83,12 @@ class CompletionCreateParamsBase(TypedDict, total=False):
`cachedContents/{cachedContent}`
"""
+ correlation_id: Optional[str]
+ """Stable session ID for resuming a previous handoff.
+
+ Returned by the server on handoff; echo it on the next request to resume.
+ """
+
credentials: Optional[Credentials]
"""Credentials for MCP server authentication.
@@ -102,6 +102,13 @@ class CompletionCreateParamsBase(TypedDict, total=False):
`/v1/chat/deferred-completion/{request_id}`.
"""
+ deferred_calls: Optional[Iterable[Dict[str, object]]]
+ """Tier 2 stateless resumption.
+
+ Deferred tool specs from a previous handoff response, sent back verbatim so the
+ server can resume without Redis.
+ """
+
frequency_penalty: Optional[float]
"""Number between -2.0 and 2.0.
@@ -110,7 +117,15 @@ class CompletionCreateParamsBase(TypedDict, total=False):
"""
function_call: Optional[str]
- """Wrapper for union variant: function call mode."""
+ """Deprecated in favor of `tool_choice`.
+
+ Controls which (if any) function is called by the model. `none` means the model
+ will not call a function and instead generates a message. `auto` means the model
+ can pick between generating a message or calling a function. Specifying a
+ particular function via `{"name": "my_function"}` forces the model to call that
+ function. `none` is the default when no functions are present. `auto` is the
+ default if functions are present.
+ """
functions: Optional[Iterable[ChatCompletionFunctionsParam]]
"""Deprecated in favor of `tools`.
@@ -127,6 +142,19 @@ class CompletionCreateParamsBase(TypedDict, total=False):
handoff_config: Optional[Dict[str, object]]
"""Configuration for multi-model handoffs."""
+ handoff_mode: Optional[bool]
+ """Handoff control.
+
+ None or omitted: auto-detect. true: structured handoff (SDK). false: drop-in
+ (LLM re-run for mixed turns).
+ """
+
+ inference_geo: Optional[str]
+ """Specifies the geographic region for inference processing.
+
+ If not specified, the workspace's `default_inference_geo` is used.
+ """
+
logit_bias: Optional[Dict[str, int]]
"""Modify the likelihood of specified tokens appearing in the completion.
@@ -178,9 +206,8 @@ class CompletionCreateParamsBase(TypedDict, total=False):
Most models are capable of generating text, which is the default: `["text"]` The
`gpt-4o-audio-preview` model can also be used to
- [generate audio](https://platform.openai.com/docs/guides/audio). To request that
- this model generate both text and audio responses, you can use:
- `["text", "audio"]`
+ [generate audio](/docs/guides/audio). To request that this model generate both
+ text and audio responses, you can use: `["text", "audio"]`
"""
model_attributes: Optional[Dict[str, Dict[str, float]]]
@@ -196,8 +223,10 @@ class CompletionCreateParamsBase(TypedDict, total=False):
of the choices. Keep `n` as `1` to minimize costs.
"""
+ output_config: Optional["JSONObjectInput"]
+
parallel_tool_calls: Optional[bool]
- """Whether to enable parallel tool calls (Anthropic uses inverted polarity)"""
+ """Whether to enable parallel tool calls (Anthropic uses inverted polarity)."""
prediction: Optional[PredictionContentParam]
"""
@@ -222,8 +251,7 @@ class CompletionCreateParamsBase(TypedDict, total=False):
prompt_cache_key: Optional[str]
"""
Used by OpenAI to cache responses for similar requests to optimize your cache
- hit rates. Replaces the `user` field.
- [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
+ hit rates. Replaces the `user` field. [Learn more](/docs/guides/prompt-caching).
"""
prompt_cache_retention: Optional[str]
@@ -231,7 +259,7 @@ class CompletionCreateParamsBase(TypedDict, total=False):
Set to `24h` to enable extended prompt caching, which keeps cached prefixes
active for longer, up to a maximum of 24 hours.
- [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
+ [Learn more](/docs/guides/prompt-caching#prompt-cache-retention).
"""
prompt_mode: Optional[Literal["reasoning"]]
@@ -244,14 +272,15 @@ class CompletionCreateParamsBase(TypedDict, total=False):
"""
Constrains effort on reasoning for
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
- reasoning effort can result in faster responses and fewer tokens used on
- reasoning in a response. - `gpt-5.1` defaults to `none`, which does not perform
- reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`,
- `medium`, and `high`. Tool calls are supported for all reasoning values in
- gpt-5.1. - All models before `gpt-5.1` default to `medium` reasoning effort, and
- do not support `none`. - The `gpt-5-pro` model defaults to (and only supports)
- `high` reasoning effort.
+ supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
+ Reducing reasoning effort can result in faster responses and fewer tokens used
+ on reasoning in a response. - `gpt-5.1` defaults to `none`, which does not
+ perform reasoning. The supported reasoning values for `gpt-5.1` are `none`,
+ `low`, `medium`, and `high`. Tool calls are supported for all reasoning values
+ in gpt-5.1. - All models before `gpt-5.1` default to `medium` reasoning effort,
+ and do not support `none`. - The `gpt-5-pro` model defaults to (and only
+ supports) `high` reasoning effort. - `xhigh` is supported for all models after
+ `gpt-5.1-codex-max`.
"""
response_format: Optional[ResponseFormat]
@@ -259,11 +288,10 @@ class CompletionCreateParamsBase(TypedDict, total=False):
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
Outputs which ensures the model will match your supplied JSON schema. Learn more
- in the
- [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
- Setting to `{ "type": "json_object" }` enables the older JSON mode, which
- ensures the message the model generates is valid JSON. Using `json_schema` is
- preferred for models that support it.
+ in the [Structured Outputs guide](/docs/guides/structured-outputs). Setting to
+ `{ "type": "json_object" }` enables the older JSON mode, which ensures the
+ message the model generates is valid JSON. Using `json_schema` is preferred for
+ models that support it.
"""
safe_prompt: Optional[bool]
@@ -275,7 +303,7 @@ class CompletionCreateParamsBase(TypedDict, total=False):
violating OpenAI's usage policies. The IDs should be a string that uniquely
identifies each user. We recommend hashing their username or email address, in
order to avoid sending us any identifying information.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
+ [Learn more](/docs/guides/safety-best-practices#safety-identifiers).
"""
safety_settings: Optional[Iterable[SafetySetting]]
@@ -293,15 +321,21 @@ class CompletionCreateParamsBase(TypedDict, total=False):
service_tier: Optional[str]
"""Service tier for request processing"""
+ speed: Optional[Literal["standard", "fast"]]
+ """The inference speed mode for this request.
+
+ `"fast"` enables high output-tokens-per-second inference.
+ """
+
stop: Union[SequenceNotStr[str], str, None]
"""Sequences that stop generation"""
store: Optional[bool]
"""
Whether or not to store the output of this chat completion request for use in
- our [model distillation](https://platform.openai.com/docs/guides/distillation)
- or [evals](https://platform.openai.com/docs/guides/evals) products. Supports
- text and image inputs. Note: image inputs over 8MB will be dropped.
+ our [model distillation](/docs/guides/distillation) or
+ [evals](/docs/guides/evals) products. Supports text and image inputs. Note:
+ image inputs over 8MB will be dropped.
"""
stream_options: Optional["JSONObjectInput"]
@@ -330,7 +364,7 @@ class CompletionCreateParamsBase(TypedDict, total=False):
tool_config: Optional["JSONObjectInput"]
"""Tool calling configuration (Google-specific)"""
- tools: Optional[Iterable[Tool]]
+ tools: Optional[Iterable[ChatCompletionToolParam]]
"""Available tools/functions for the model"""
top_k: Optional[int]
@@ -352,7 +386,7 @@ class CompletionCreateParamsBase(TypedDict, total=False):
Use `prompt_cache_key` instead to maintain caching optimizations. A stable
identifier for your end-users. Used to boost cache hit rates by better bucketing
similar requests and to help OpenAI detect and prevent abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
+ [Learn more](/docs/guides/safety-best-practices#safety-identifiers).
"""
verbosity: Optional[str]
@@ -367,7 +401,7 @@ class CompletionCreateParamsBase(TypedDict, total=False):
"""This tool searches the web for relevant results to use in a response.
Learn more about the
- [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
+ [web search tool](/docs/guides/tools-web-search?api-mode=chat).
"""
@@ -396,8 +430,8 @@ class SafetySetting(TypedDict, total=False):
content is blocked.
Fields:
- - category (required): HarmCategory
- threshold (required): Literal["HARM_BLOCK_THRESHOLD_UNSPECIFIED", "BLOCK_LOW_AND_ABOVE", "BLOCK_MEDIUM_AND_ABOVE", "BLOCK_ONLY_HIGH", "BLOCK_NONE", "OFF"]
+ - category (required): HarmCategory
"""
category: Required[
@@ -431,92 +465,19 @@ class SafetySetting(TypedDict, total=False):
"""Required. Controls the probability threshold at which harm is blocked."""
-Thinking: TypeAlias = Union[ThinkingConfigEnabledParam, ThinkingConfigDisabledParam]
-
-ToolChoice: TypeAlias = Union[ToolChoiceAutoParam, ToolChoiceAnyParam, ToolChoiceToolParam, ToolChoiceNoneParam]
-
-
-class ToolCustomToolChatCompletionsCustomFormatTextFormat(TypedDict, total=False):
- """Unconstrained free-form text.
+class ThinkingThinkingConfigAdaptive(TypedDict, total=False):
+ """Schema for ThinkingConfigAdaptive.
Fields:
- - type (required): Literal["text"]
+ - type (required): Literal["adaptive"]
"""
- type: Required[Literal["text"]]
- """Unconstrained text format. Always `text`."""
-
-
-class ToolCustomToolChatCompletionsCustomFormatGrammarFormatGrammar(TypedDict, total=False):
- """Your chosen grammar.
-
- Fields:
- - definition (required): str
- - syntax (required): Literal["lark", "regex"]
- """
+ type: Required[Literal["adaptive"]]
- definition: Required[str]
- """The grammar definition."""
- syntax: Required[Literal["lark", "regex"]]
- """The syntax of the grammar definition. One of `lark` or `regex`."""
+Thinking: TypeAlias = Union[ThinkingConfigEnabledParam, ThinkingConfigDisabledParam, ThinkingThinkingConfigAdaptive]
-
-class ToolCustomToolChatCompletionsCustomFormatGrammarFormat(TypedDict, total=False):
- """A grammar defined by the user.
-
- Fields:
- - type (required): Literal["grammar"]
- - grammar (required): GrammarFormatGrammarFormat
- """
-
- grammar: Required[ToolCustomToolChatCompletionsCustomFormatGrammarFormatGrammar]
- """Your chosen grammar.
-
- Fields:
-
- - definition (required): str
- - syntax (required): Literal["lark", "regex"]
- """
-
- type: Required[Literal["grammar"]]
- """Grammar format. Always `grammar`."""
-
-
-ToolCustomToolChatCompletionsCustomFormat: TypeAlias = Union[
- ToolCustomToolChatCompletionsCustomFormatTextFormat, ToolCustomToolChatCompletionsCustomFormatGrammarFormat
-]
-
-
-class ToolCustomToolChatCompletionsCustom(TypedDict, total=False):
- """Properties of the custom tool."""
-
- name: Required[str]
- """The name of the custom tool, used to identify it in tool calls."""
-
- description: str
- """Optional description of the custom tool, used to provide more context."""
-
- format: ToolCustomToolChatCompletionsCustomFormat
- """The input format for the custom tool. Default is unconstrained text."""
-
-
-class ToolCustomToolChatCompletions(TypedDict, total=False):
- """A custom tool that processes input using a specified format.
-
- Fields:
- - type (required): Literal["custom"]
- - custom (required): CustomToolProperties
- """
-
- custom: Required[ToolCustomToolChatCompletionsCustom]
- """Properties of the custom tool."""
-
- type: Required[Literal["custom"]]
- """The type of the custom tool. Always `custom`."""
-
-
-Tool: TypeAlias = Union[ChatCompletionToolParam, ToolCustomToolChatCompletions]
+ToolChoice: TypeAlias = Union[ToolChoiceAutoParam, ToolChoiceAnyParam, ToolChoiceToolParam, ToolChoiceNoneParam]
class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False):
diff --git a/src/dedalus_labs/types/create_embedding_response.py b/src/dedalus_labs/types/create_embedding_response.py
index cb26fdb..72378f1 100644
--- a/src/dedalus_labs/types/create_embedding_response.py
+++ b/src/dedalus_labs/types/create_embedding_response.py
@@ -21,7 +21,7 @@ class Data(BaseModel):
"""The embedding vector, which is a list of floats.
The length of vector depends on the model as listed in the
- [embedding guide](https://platform.openai.com/docs/guides/embeddings).
+ [embedding guide](/docs/guides/embeddings).
"""
index: int
diff --git a/src/dedalus_labs/types/embedding_create_params.py b/src/dedalus_labs/types/embedding_create_params.py
index 51fdb52..ac3eed8 100644
--- a/src/dedalus_labs/types/embedding_create_params.py
+++ b/src/dedalus_labs/types/embedding_create_params.py
@@ -27,11 +27,9 @@ class EmbeddingCreateParams(TypedDict, total=False):
model: Required[Union[str, Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"]]]
"""ID of the model to use.
- You can use the
- [List models](https://platform.openai.com/docs/api-reference/models/list) API to
- see all of your available models, or see our
- [Model overview](https://platform.openai.com/docs/models) for descriptions of
- them.
+ You can use the [List models](/docs/api-reference/models/list) API to see all of
+ your available models, or see our [Model overview](/docs/models) for
+ descriptions of them.
"""
dimensions: int
@@ -49,6 +47,5 @@ class EmbeddingCreateParams(TypedDict, total=False):
user: str
"""
A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+ and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
"""
diff --git a/src/dedalus_labs/types/model.py b/src/dedalus_labs/types/model.py
index 0ac28fc..f7ba379 100644
--- a/src/dedalus_labs/types/model.py
+++ b/src/dedalus_labs/types/model.py
@@ -72,7 +72,9 @@ class Model(BaseModel):
created_at: datetime
"""When the model was released (RFC 3339)"""
- provider: Literal["openai", "anthropic", "google", "xai", "mistral", "groq", "fireworks", "deepseek"]
+ provider: Literal[
+ "openai", "anthropic", "google", "xai", "mistral", "groq", "fireworks", "deepseek", "moonshot", "cerebras"
+ ]
"""Provider that hosts this model"""
capabilities: Optional[Capabilities] = None
diff --git a/src/dedalus_labs/types/response.py b/src/dedalus_labs/types/response.py
new file mode 100644
index 0000000..9bf677e
--- /dev/null
+++ b/src/dedalus_labs/types/response.py
@@ -0,0 +1,145 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, List, Union, Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["Response", "MCPServerErrors"]
+
+
+class MCPServerErrors(BaseModel):
+ """Error details for a single MCP server failure."""
+
+ message: str
+ """Human-readable error message."""
+
+ code: Optional[str] = None
+ """Machine-readable error code."""
+
+ recommendation: Optional[str] = None
+ """Suggested action for the user."""
+
+
+class Response(BaseModel):
+ """Responses API response with Dedalus extensions."""
+
+ id: str
+ """Unique identifier for this Response"""
+
+ created_at: float
+ """Unix timestamp (in seconds) when this Response was created"""
+
+ model: str
+ """The model used for this response"""
+
+ output: List["JSONObjectInput"]
+ """Array of content items generated by the model"""
+
+ status: Literal["completed", "failed", "in_progress", "cancelled", "queued", "incomplete"]
+ """The status of the response generation"""
+
+ background: Optional[bool] = None
+ """Whether the request was run in the background"""
+
+ completed_at: Optional[float] = None
+ """Unix timestamp (in seconds) when the response was completed"""
+
+ conversation: Optional["JSONObjectInput"] = None
+ """Conversation this response belongs to (requires BYOK)"""
+
+ error: Optional["JSONObjectInput"] = None
+ """Error details if status is 'failed'"""
+
+ frequency_penalty: Optional[float] = None
+ """Frequency penalty used for this response"""
+
+ incomplete_details: Optional[Dict[str, str]] = None
+ """
+ Details about why the response is incomplete (e.g., max_output_tokens,
+ content_filter)
+ """
+
+ instructions: Union[str, List["JSONObjectInput"], None] = None
+ """System/developer instructions used"""
+
+ max_output_tokens: Optional[int] = None
+ """Maximum output tokens allowed for this response"""
+
+ max_tool_calls: Optional[int] = None
+ """Maximum tool calls allowed for this response"""
+
+ mcp_server_errors: Optional[Dict[str, MCPServerErrors]] = None
+ """MCP server failures keyed by server name."""
+
+ mcp_tool_results: Optional[List["MCPToolResult"]] = None
+ """Results of MCP tool executions including inputs, outputs, and timing."""
+
+ metadata: Optional[Dict[str, str]] = None
+ """Key-value pairs attached to this response (requires BYOK)"""
+
+ object: Optional[Literal["response"]] = None
+ """The object type, always 'response'"""
+
+ output_text: Optional[str] = None
+ """
+ Aggregated text output from all output_text items (SDK-only convenience
+ property)
+ """
+
+ parallel_tool_calls: Optional[bool] = None
+ """Whether parallel tool calls were enabled"""
+
+ presence_penalty: Optional[float] = None
+ """Presence penalty used for this response"""
+
+ previous_response_id: Optional[str] = None
+ """Previous response ID referenced, if any"""
+
+ prompt_cache_key: Optional[str] = None
+ """Prompt cache key used for this response"""
+
+ reasoning: Optional["JSONObjectInput"] = None
+ """Reasoning configuration and outputs"""
+
+ safety_identifier: Optional[str] = None
+ """Safety identifier used for this response"""
+
+ service_tier: Optional[str] = None
+ """Service tier used for this response"""
+
+ store: Optional[bool] = None
+ """Whether the response was stored for later retrieval"""
+
+ temperature: Optional[float] = None
+ """Temperature parameter used"""
+
+ text: Optional["JSONObjectInput"] = None
+ """Text configuration used for this response"""
+
+ tool_choice: Union[str, "JSONObjectInput", None] = None
+ """Tool choice configuration used"""
+
+ tools: Optional[List["JSONObjectInput"]] = None
+ """Tools that were available to the model"""
+
+ tools_executed: Optional[List[str]] = None
+ """List of tool names that were executed server-side (e.g., MCP tools)."""
+
+ top_logprobs: Optional[int] = None
+ """Number of logprob tokens returned per position"""
+
+ top_p: Optional[float] = None
+ """Top-p parameter used"""
+
+ truncation: Optional[str] = None
+ """Truncation strategy applied when context exceeded limits"""
+
+ usage: Optional["JSONObjectInput"] = None
+ """Token usage statistics for the response"""
+
+
+from .shared.mcp_tool_result import MCPToolResult
+from .shared.json_object_input import JSONObjectInput
diff --git a/src/dedalus_labs/types/response_create_params.py b/src/dedalus_labs/types/response_create_params.py
new file mode 100644
index 0000000..27b6c73
--- /dev/null
+++ b/src/dedalus_labs/types/response_create_params.py
@@ -0,0 +1,316 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .._types import SequenceNotStr
+from .shared_params import mcp_servers
+from .shared_params.credential import Credential
+from .shared_params.mcp_credentials import MCPCredentials
+from .shared_params.mcp_server_spec import MCPServerSpec
+
+__all__ = [
+ "ResponseCreateParams",
+ "Conversation",
+ "ConversationResponseConversationParam",
+ "Credentials",
+ "MCPServers",
+ "Model",
+ "Prompt",
+]
+
+
+class ResponseCreateParams(TypedDict, total=False):
+ background: Optional[bool]
+ """
+ Whether to run the model response in the background.
+ [Learn more](https://platform.openai.com/docs/guides/background).
+ """
+
+ conversation: Optional[Conversation]
+ """Conversation that this response belongs to.
+
+ Items from this conversation are prepended to the input items, and output items
+ from this response are automatically added after completion.
+ """
+
+ credentials: Optional[Credentials]
+ """Credentials for MCP server authentication.
+
+ Each credential is matched to servers by connection name.
+ """
+
+ frequency_penalty: Optional[float]
+ """Penalizes new tokens based on their frequency in the text so far."""
+
+ include: Optional[SequenceNotStr[str]]
+ """Specify additional output data to include in the model response.
+
+ Currently supported values are:
+
+ - `web_search_call.action.sources`: Include the sources of the web search tool
+ call.
+ - `code_interpreter_call.outputs`: Includes the outputs of python code execution
+ in code interpreter tool call items.
+ - `computer_call_output.output.image_url`: Include image urls from the computer
+ call output.
+ - `file_search_call.results`: Include the search results of the file search tool
+ call.
+ - `message.input_image.image_url`: Include image urls from the input message.
+ - `message.output_text.logprobs`: Include logprobs with assistant messages.
+ - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
+ tokens in reasoning item outputs. This enables reasoning items to be used in
+ multi-turn conversations when using the Responses API statelessly (like when
+ the `store` parameter is set to `false`, or when an organization is enrolled
+ in the zero data retention program).
+ """
+
+ input: Union[str, Iterable["JSONObjectInput"], None]
+ """Text, image, or file inputs to the model, used to generate a response.
+
+ Learn more:
+
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+ - [Image inputs](https://platform.openai.com/docs/guides/images)
+ - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
+ - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
+ - [Function calling](https://platform.openai.com/docs/guides/function-calling)
+ """
+
+ instructions: Union[str, Iterable["JSONObjectInput"], None]
+ """A system (or developer) message inserted into the model's context.
+
+ When using along with `previous_response_id`, the instructions from a previous
+ response will not be carried over to the next response. This makes it simple to
+ swap out system (or developer) messages in new responses.
+ """
+
+ max_output_tokens: Optional[int]
+ """
+ An upper bound for the number of tokens that can be generated for a response,
+ including visible output tokens and
+ [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
+ """
+
+ max_tool_calls: Optional[int]
+ """
+ The maximum number of total calls to built-in tools that can be processed in a
+ response. This maximum number applies across all built-in tool calls, not per
+ individual tool. Any further attempts to call a tool by the model will be
+ ignored.
+ """
+
+ mcp_servers: Optional[MCPServers]
+ """MCP server identifiers.
+
+ Accepts marketplace slugs, URLs, or MCPServerSpec objects. MCP tools are
+ executed server-side and billed separately.
+ """
+
+ metadata: Optional[Dict[str, str]]
+ """
+ Set of up to 16 key-value string pairs that can be attached to the response for
+ structured metadata and later querying via the API or dashboard.
+ """
+
+ model: Optional[Model]
+ """Model ID used to generate the response, like `gpt-4o` or `o3`.
+
+ OpenAI offers a wide range of models with different capabilities, performance
+ characteristics, and price points. Refer to the
+ [model guide](https://platform.openai.com/docs/models) to browse and compare
+ available models.
+ """
+
+ parallel_tool_calls: Optional[bool]
+ """Whether to allow the model to run tool calls in parallel."""
+
+ presence_penalty: Optional[float]
+ """Penalizes new tokens based on whether they appear in the text so far."""
+
+ previous_response_id: Optional[str]
+ """
+ Unique ID of the previous response to continue from when creating multi-turn
+ conversations. Cannot be used together with `conversation`.
+ """
+
+ prompt: Optional[Prompt]
+ """Stored prompt template reference (BYOK)."""
+
+ prompt_cache_key: Optional[str]
+ """
+ Used by OpenAI to cache responses for similar requests to optimize your cache
+ hit rates. Replaces the `user` field.
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
+ """
+
+ reasoning: Optional["JSONObjectInput"]
+ """**gpt-5 and o-series models only**
+
+ Configuration options for
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+ """
+
+ safety_identifier: Optional[str]
+ """
+ A stable identifier used to help detect users of your application that may be
+ violating OpenAI's usage policies. The IDs should be a string that uniquely
+ identifies each user. We recommend hashing their username or email address, in
+ order to avoid sending us any identifying information.
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
+ """
+
+ service_tier: Optional[Literal["auto", "default"]]
+ """Specifies the processing type used for serving the request.
+
+ - If set to 'auto', then the request will be processed with the service tier
+ configured in the Project settings. Unless otherwise configured, the Project
+ will use 'default'.
+ - If set to 'default', then the request will be processed with the standard
+ pricing and performance for the selected model.
+ - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
+ '[priority](https://openai.com/api-priority-processing/)', then the request
+ will be processed with the corresponding service tier.
+ - When not set, the default behavior is 'auto'.
+
+ When the `service_tier` parameter is set, the response body will include the
+ `service_tier` value based on the processing mode actually used to serve the
+ request. This response value may be different from the value set in the
+ parameter.
+ """
+
+ store: Optional[bool]
+ """
+ Whether to store the generated response for later retrieval via the Responses
+ API.
+ """
+
+ stream: bool
+ """
+ If set to true, the model response data will be streamed to the client as it is
+ generated using
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+ See the
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+ for more information.
+ """
+
+ stream_options: Optional["JSONObjectInput"]
+ """Options for streaming response. Only set this when you set `stream: true`."""
+
+ temperature: Optional[float]
+ """What sampling temperature to use, between 0 and 2.
+
+ Higher values like 0.8 will make the output more random, while lower values like
+ 0.2 will make it more focused and deterministic. We generally recommend altering
+ this or `top_p` but not both.
+ """
+
+ text: Optional["JSONObjectInput"]
+ """Configuration options for a text response from the model.
+
+ Can be plain text or structured JSON data. Learn more:
+
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+ - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+ """
+
+ tool_choice: Union[str, "JSONObjectInput", None]
+ """
+ How the model should select which tool (or tools) to use when generating a
+ response. See the `tools` parameter to see how to specify which tools the model
+ can call.
+ """
+
+ tools: Optional[Iterable["JSONObjectInput"]]
+ """An array of tools the model may call while generating a response.
+
+ You can specify which tool to use by setting the `tool_choice` parameter.
+
+ We support the following categories of tools:
+
+ - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
+ capabilities, like
+ [web search](https://platform.openai.com/docs/guides/tools-web-search) or
+ [file search](https://platform.openai.com/docs/guides/tools-file-search).
+ Learn more about
+ [built-in tools](https://platform.openai.com/docs/guides/tools).
+ - **MCP Tools**: Integrations with third-party systems via custom MCP servers or
+ predefined connectors such as Google Drive and SharePoint. Learn more about
+ [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
+ - **Function calls (custom tools)**: Functions that are defined by you, enabling
+ the model to call your own code with strongly typed arguments and outputs.
+ Learn more about
+ [function calling](https://platform.openai.com/docs/guides/function-calling).
+ You can also use custom tools to call your own code.
+ """
+
+ top_logprobs: Optional[int]
+ """
+ An integer between 0 and 20 specifying the number of most likely tokens to
+ return at each token position, each with an associated log probability.
+ """
+
+ top_p: Optional[float]
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the
+ model considers the results of the tokens with top_p probability mass. So 0.1
+ means only the tokens comprising the top 10% probability mass are considered.
+
+ We generally recommend altering this or `temperature` but not both.
+ """
+
+ truncation: Optional[Literal["auto", "disabled"]]
+ """The truncation strategy to use for the model response.
+
+ - `auto`: If the input to this Response exceeds the model's context window size,
+ the model will truncate the response to fit the context window by dropping
+ items from the beginning of the conversation.
+ - `disabled` (default): If the input size will exceed the context window size
+ for a model, the request will fail with a 400 error.
+ """
+
+ user: Optional[str]
+ """This field is being replaced by `safety_identifier` and `prompt_cache_key`.
+
+ Use `prompt_cache_key` instead to maintain caching optimizations. A stable
+ identifier for your end-users. Used to boost cache hit rates by better bucketing
+ similar requests and to help OpenAI detect and prevent abuse.
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
+ """
+
+
+class ConversationResponseConversationParam(TypedDict, total=False):
+ """Conversation reference for continuing a Responses session."""
+
+ id: Required[str]
+ """Identifier of the existing conversation."""
+
+
+Conversation: TypeAlias = Union[str, ConversationResponseConversationParam]
+
+Credentials: TypeAlias = Union[Credential, MCPCredentials]
+
+MCPServers: TypeAlias = Union[str, MCPServerSpec, mcp_servers.MCPServers]
+
+Model: TypeAlias = Union[str, "DedalusModel", SequenceNotStr["DedalusModelChoice"]]
+
+
+class Prompt(TypedDict, total=False):
+ """Stored prompt template reference (BYOK)."""
+
+ id: Required[str]
+ """Identifier of the stored prompt."""
+
+ variables: Optional["JSONObjectInput"]
+ """Variables to substitute into the stored prompt template."""
+
+ version: Optional[str]
+ """Optional version identifier of the stored prompt."""
+
+
+from .shared_params.dedalus_model import DedalusModel
+from .shared_params.json_object_input import JSONObjectInput
+from .shared_params.dedalus_model_choice import DedalusModelChoice
diff --git a/src/dedalus_labs/types/shared/__init__.py b/src/dedalus_labs/types/shared/__init__.py
index 343f3ed..b59edd1 100644
--- a/src/dedalus_labs/types/shared/__init__.py
+++ b/src/dedalus_labs/types/shared/__init__.py
@@ -14,5 +14,6 @@
from .function_definition import FunctionDefinition as FunctionDefinition
from .dedalus_model_choice import DedalusModelChoice as DedalusModelChoice
from .response_format_text import ResponseFormatText as ResponseFormatText
+from .voice_ids_or_custom_voice import VoiceIDsOrCustomVoice as VoiceIDsOrCustomVoice
from .response_format_json_object import ResponseFormatJSONObject as ResponseFormatJSONObject
from .response_format_json_schema import ResponseFormatJSONSchema as ResponseFormatJSONSchema
diff --git a/src/dedalus_labs/types/shared/function_definition.py b/src/dedalus_labs/types/shared/function_definition.py
index 97b3308..92ca62b 100644
--- a/src/dedalus_labs/types/shared/function_definition.py
+++ b/src/dedalus_labs/types/shared/function_definition.py
@@ -1,51 +1,16 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Dict, Optional
-
from ..._models import BaseModel
__all__ = ["FunctionDefinition"]
class FunctionDefinition(BaseModel):
- """Schema for FunctionObject.
+ """Schema for Function.
Fields:
- - description (optional): str
- name (required): str
- - parameters (optional): FunctionParameters
- - strict (optional): bool | None
"""
name: str
- """The name of the function to be called.
-
- Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
- of 64.
- """
-
- description: Optional[str] = None
- """
- A description of what the function does, used by the model to choose when and
- how to call the function.
- """
-
- parameters: Optional[Dict[str, object]] = None
- """The parameters the functions accepts, described as a JSON Schema object.
-
- See the [guide](https://platform.openai.com/docs/guides/function-calling) for
- examples, and the
- [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
- documentation about the format.
-
- Omitting `parameters` defines a function with an empty parameter list.
- """
-
- strict: Optional[bool] = None
- """Whether to enable strict schema adherence when generating the function call.
-
- If set to true, the model will follow the exact schema defined in the
- `parameters` field. Only a subset of JSON Schema is supported when `strict` is
- `true`. Learn more about Structured Outputs in the
- [function calling guide](https://platform.openai.com/docs/guides/function-calling).
- """
+ """The name of the function to call."""
diff --git a/src/dedalus_labs/types/shared/mcp_tool_result.py b/src/dedalus_labs/types/shared/mcp_tool_result.py
index 3b6ebdf..77f98b0 100644
--- a/src/dedalus_labs/types/shared/mcp_tool_result.py
+++ b/src/dedalus_labs/types/shared/mcp_tool_result.py
@@ -17,22 +17,22 @@ class MCPToolResult(BaseModel):
"""
arguments: "JSONObjectInput"
- """Input arguments passed to the tool"""
+ """Input arguments passed to the tool."""
is_error: bool
- """Whether the tool execution resulted in an error"""
+ """Whether the tool execution resulted in an error."""
server_name: str
- """Name of the MCP server that handled the tool"""
+ """Name of the MCP server that handled the tool."""
tool_name: str
- """Name of the MCP tool that was executed"""
+ """Name of the MCP tool that was executed."""
duration_ms: Optional[int] = None
- """Execution time in milliseconds"""
+ """Execution time in milliseconds."""
result: Optional["JSONValueInput"] = None
- """Structured result from the tool (parsed from structuredContent or content)"""
+ """Structured result from the tool (parsed from structuredContent or content)."""
from .json_value_input import JSONValueInput
diff --git a/src/dedalus_labs/types/shared/response_format_json_schema.py b/src/dedalus_labs/types/shared/response_format_json_schema.py
index 34a2276..b25e79e 100644
--- a/src/dedalus_labs/types/shared/response_format_json_schema.py
+++ b/src/dedalus_labs/types/shared/response_format_json_schema.py
@@ -38,7 +38,7 @@ class JSONSchema(BaseModel):
true, the model will always follow the exact schema defined in the `schema`
field. Only a subset of JSON Schema is supported when `strict` is `true`. To
learn more, read the
- [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ [Structured Outputs guide](/docs/guides/structured-outputs).
"""
@@ -46,7 +46,7 @@ class ResponseFormatJSONSchema(BaseModel):
"""JSON Schema response format.
Used to generate structured JSON responses.
- Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs).
+ Learn more about [Structured Outputs](/docs/guides/structured-outputs).
Fields:
- type (required): Literal["json_schema"]
diff --git a/src/dedalus_labs/types/shared/voice_ids_or_custom_voice.py b/src/dedalus_labs/types/shared/voice_ids_or_custom_voice.py
new file mode 100644
index 0000000..72e3a4b
--- /dev/null
+++ b/src/dedalus_labs/types/shared/voice_ids_or_custom_voice.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from ..._models import BaseModel
+
+__all__ = ["VoiceIDsOrCustomVoice"]
+
+
+class VoiceIDsOrCustomVoice(BaseModel):
+ """Custom voice reference.
+
+ Fields:
+ - id (required): str
+ """
+
+ id: str
+ """The custom voice ID, e.g. `voice_1234`."""
diff --git a/src/dedalus_labs/types/shared_params/__init__.py b/src/dedalus_labs/types/shared_params/__init__.py
index ceda706..93171d6 100644
--- a/src/dedalus_labs/types/shared_params/__init__.py
+++ b/src/dedalus_labs/types/shared_params/__init__.py
@@ -13,5 +13,6 @@
from .function_definition import FunctionDefinition as FunctionDefinition
from .dedalus_model_choice import DedalusModelChoice as DedalusModelChoice
from .response_format_text import ResponseFormatText as ResponseFormatText
+from .voice_ids_or_custom_voice import VoiceIDsOrCustomVoice as VoiceIDsOrCustomVoice
from .response_format_json_object import ResponseFormatJSONObject as ResponseFormatJSONObject
from .response_format_json_schema import ResponseFormatJSONSchema as ResponseFormatJSONSchema
diff --git a/src/dedalus_labs/types/shared_params/function_definition.py b/src/dedalus_labs/types/shared_params/function_definition.py
index 0c1b618..427ade8 100644
--- a/src/dedalus_labs/types/shared_params/function_definition.py
+++ b/src/dedalus_labs/types/shared_params/function_definition.py
@@ -2,51 +2,17 @@
from __future__ import annotations
-from typing import Dict, Optional
from typing_extensions import Required, TypedDict
__all__ = ["FunctionDefinition"]
class FunctionDefinition(TypedDict, total=False):
- """Schema for FunctionObject.
+ """Schema for Function.
Fields:
- - description (optional): str
- name (required): str
- - parameters (optional): FunctionParameters
- - strict (optional): bool | None
"""
name: Required[str]
- """The name of the function to be called.
-
- Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
- of 64.
- """
-
- description: str
- """
- A description of what the function does, used by the model to choose when and
- how to call the function.
- """
-
- parameters: Dict[str, object]
- """The parameters the functions accepts, described as a JSON Schema object.
-
- See the [guide](https://platform.openai.com/docs/guides/function-calling) for
- examples, and the
- [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
- documentation about the format.
-
- Omitting `parameters` defines a function with an empty parameter list.
- """
-
- strict: Optional[bool]
- """Whether to enable strict schema adherence when generating the function call.
-
- If set to true, the model will follow the exact schema defined in the
- `parameters` field. Only a subset of JSON Schema is supported when `strict` is
- `true`. Learn more about Structured Outputs in the
- [function calling guide](https://platform.openai.com/docs/guides/function-calling).
- """
+ """The name of the function to call."""
diff --git a/src/dedalus_labs/types/shared_params/response_format_json_schema.py b/src/dedalus_labs/types/shared_params/response_format_json_schema.py
index c38aac7..252d78b 100644
--- a/src/dedalus_labs/types/shared_params/response_format_json_schema.py
+++ b/src/dedalus_labs/types/shared_params/response_format_json_schema.py
@@ -36,7 +36,7 @@ class JSONSchema(TypedDict, total=False):
true, the model will always follow the exact schema defined in the `schema`
field. Only a subset of JSON Schema is supported when `strict` is `true`. To
learn more, read the
- [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ [Structured Outputs guide](/docs/guides/structured-outputs).
"""
@@ -44,7 +44,7 @@ class ResponseFormatJSONSchema(TypedDict, total=False):
"""JSON Schema response format.
Used to generate structured JSON responses.
- Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs).
+ Learn more about [Structured Outputs](/docs/guides/structured-outputs).
Fields:
- type (required): Literal["json_schema"]
diff --git a/src/dedalus_labs/types/shared_params/voice_ids_or_custom_voice.py b/src/dedalus_labs/types/shared_params/voice_ids_or_custom_voice.py
new file mode 100644
index 0000000..3535052
--- /dev/null
+++ b/src/dedalus_labs/types/shared_params/voice_ids_or_custom_voice.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["VoiceIDsOrCustomVoice"]
+
+
+class VoiceIDsOrCustomVoice(TypedDict, total=False):
+ """Custom voice reference.
+
+ Fields:
+ - id (required): str
+ """
+
+ id: Required[str]
+ """The custom voice ID, e.g. `voice_1234`."""
diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py
index c9ec34a..44420a4 100644
--- a/tests/api_resources/chat/test_completions.py
+++ b/tests/api_resources/chat/test_completions.py
@@ -42,11 +42,13 @@ def test_method_create_with_all_params_overload_1(self, client: Dedalus) -> None
},
automatic_tool_execution=True,
cached_content="cached_content",
+ correlation_id="correlation_id",
credentials={
"connection_name": "external-service",
"values": {"api_key": "sk-..."},
},
deferred=True,
+ deferred_calls=[{"foo": "bar"}],
frequency_penalty=-2,
function_call="function_call",
functions=[
@@ -59,6 +61,8 @@ def test_method_create_with_all_params_overload_1(self, client: Dedalus) -> None
generation_config={"foo": "string"},
guardrails=[{"foo": "bar"}],
handoff_config={"foo": "bar"},
+ handoff_mode=True,
+ inference_geo="inference_geo",
logit_bias={"foo": 0},
logprobs=True,
max_completion_tokens=0,
@@ -81,6 +85,7 @@ def test_method_create_with_all_params_overload_1(self, client: Dedalus) -> None
}
},
n=1,
+ output_config={"foo": "string"},
parallel_tool_calls=True,
prediction={
"content": "string",
@@ -103,6 +108,7 @@ def test_method_create_with_all_params_overload_1(self, client: Dedalus) -> None
search_parameters={"foo": "string"},
seed=0,
service_tier="service_tier",
+ speed="standard",
stop=["string"],
store=True,
stream=False,
@@ -120,12 +126,7 @@ def test_method_create_with_all_params_overload_1(self, client: Dedalus) -> None
tool_config={"foo": "string"},
tools=[
{
- "function": {
- "name": "name",
- "description": "description",
- "parameters": {"foo": "bar"},
- "strict": True,
- },
+ "function": {"name": "name"},
"type": "function",
}
],
@@ -189,11 +190,13 @@ def test_method_create_with_all_params_overload_2(self, client: Dedalus) -> None
},
automatic_tool_execution=True,
cached_content="cached_content",
+ correlation_id="correlation_id",
credentials={
"connection_name": "external-service",
"values": {"api_key": "sk-..."},
},
deferred=True,
+ deferred_calls=[{"foo": "bar"}],
frequency_penalty=-2,
function_call="function_call",
functions=[
@@ -206,6 +209,8 @@ def test_method_create_with_all_params_overload_2(self, client: Dedalus) -> None
generation_config={"foo": "string"},
guardrails=[{"foo": "bar"}],
handoff_config={"foo": "bar"},
+ handoff_mode=True,
+ inference_geo="inference_geo",
logit_bias={"foo": 0},
logprobs=True,
max_completion_tokens=0,
@@ -228,6 +233,7 @@ def test_method_create_with_all_params_overload_2(self, client: Dedalus) -> None
}
},
n=1,
+ output_config={"foo": "string"},
parallel_tool_calls=True,
prediction={
"content": "string",
@@ -250,6 +256,7 @@ def test_method_create_with_all_params_overload_2(self, client: Dedalus) -> None
search_parameters={"foo": "string"},
seed=0,
service_tier="service_tier",
+ speed="standard",
stop=["string"],
store=True,
stream_options={"foo": "string"},
@@ -266,12 +273,7 @@ def test_method_create_with_all_params_overload_2(self, client: Dedalus) -> None
tool_config={"foo": "string"},
tools=[
{
- "function": {
- "name": "name",
- "description": "description",
- "parameters": {"foo": "bar"},
- "strict": True,
- },
+ "function": {"name": "name"},
"type": "function",
}
],
@@ -340,11 +342,13 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
},
automatic_tool_execution=True,
cached_content="cached_content",
+ correlation_id="correlation_id",
credentials={
"connection_name": "external-service",
"values": {"api_key": "sk-..."},
},
deferred=True,
+ deferred_calls=[{"foo": "bar"}],
frequency_penalty=-2,
function_call="function_call",
functions=[
@@ -357,6 +361,8 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
generation_config={"foo": "string"},
guardrails=[{"foo": "bar"}],
handoff_config={"foo": "bar"},
+ handoff_mode=True,
+ inference_geo="inference_geo",
logit_bias={"foo": 0},
logprobs=True,
max_completion_tokens=0,
@@ -379,6 +385,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
}
},
n=1,
+ output_config={"foo": "string"},
parallel_tool_calls=True,
prediction={
"content": "string",
@@ -401,6 +408,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
search_parameters={"foo": "string"},
seed=0,
service_tier="service_tier",
+ speed="standard",
stop=["string"],
store=True,
stream=False,
@@ -418,12 +426,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
tool_config={"foo": "string"},
tools=[
{
- "function": {
- "name": "name",
- "description": "description",
- "parameters": {"foo": "bar"},
- "strict": True,
- },
+ "function": {"name": "name"},
"type": "function",
}
],
@@ -487,11 +490,13 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
},
automatic_tool_execution=True,
cached_content="cached_content",
+ correlation_id="correlation_id",
credentials={
"connection_name": "external-service",
"values": {"api_key": "sk-..."},
},
deferred=True,
+ deferred_calls=[{"foo": "bar"}],
frequency_penalty=-2,
function_call="function_call",
functions=[
@@ -504,6 +509,8 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
generation_config={"foo": "string"},
guardrails=[{"foo": "bar"}],
handoff_config={"foo": "bar"},
+ handoff_mode=True,
+ inference_geo="inference_geo",
logit_bias={"foo": 0},
logprobs=True,
max_completion_tokens=0,
@@ -526,6 +533,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
}
},
n=1,
+ output_config={"foo": "string"},
parallel_tool_calls=True,
prediction={
"content": "string",
@@ -548,6 +556,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
search_parameters={"foo": "string"},
seed=0,
service_tier="service_tier",
+ speed="standard",
stop=["string"],
store=True,
stream_options={"foo": "string"},
@@ -564,12 +573,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
tool_config={"foo": "string"},
tools=[
{
- "function": {
- "name": "name",
- "description": "description",
- "parameters": {"foo": "bar"},
- "strict": True,
- },
+ "function": {"name": "name"},
"type": "function",
}
],
diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py
new file mode 100644
index 0000000..09da41e
--- /dev/null
+++ b/tests/api_resources/test_responses.py
@@ -0,0 +1,188 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from tests.utils import assert_matches_type
+from dedalus_labs import Dedalus, AsyncDedalus
+from dedalus_labs.types import Response
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestResponses:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_create(self, client: Dedalus) -> None:
+ response = client.responses.create()
+ assert_matches_type(Response, response, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params(self, client: Dedalus) -> None:
+ response = client.responses.create(
+ background=True,
+ conversation="string",
+ credentials={
+ "connection_name": "external-service",
+ "values": {"api_key": "sk-..."},
+ },
+ frequency_penalty=0,
+ include=["message.output_text.logprobs"],
+ input="What is the capital of France?",
+ instructions="You are a helpful assistant.",
+ max_output_tokens=1000,
+ max_tool_calls=10,
+ mcp_servers="dedalus-labs/example-server",
+ metadata={"foo": "string"},
+ model="openai/gpt-4o",
+ parallel_tool_calls=True,
+ presence_penalty=0,
+ previous_response_id="previous_response_id",
+ prompt={
+ "id": "id",
+ "variables": {"foo": "string"},
+ "version": "version",
+ },
+ prompt_cache_key="prompt_cache_key",
+ reasoning={"foo": "string"},
+ safety_identifier="safety_identifier",
+ service_tier="auto",
+ store=True,
+ stream=True,
+ stream_options={"foo": "string"},
+ temperature=0,
+ text={"foo": "string"},
+ tool_choice="auto",
+ tools=[
+ {
+ "function": {
+ "description": None,
+ "name": None,
+ "parameters": None,
+ },
+ "type": "function",
+ }
+ ],
+ top_logprobs=5,
+ top_p=0.1,
+ truncation="auto",
+ user="user",
+ )
+ assert_matches_type(Response, response, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_create(self, client: Dedalus) -> None:
+ http_response = client.responses.with_raw_response.create()
+
+ assert http_response.is_closed is True
+ assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+ response = http_response.parse()
+ assert_matches_type(Response, response, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_create(self, client: Dedalus) -> None:
+ with client.responses.with_streaming_response.create() as http_response:
+ assert not http_response.is_closed
+ assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ response = http_response.parse()
+ assert_matches_type(Response, response, path=["response"])
+
+ assert cast(Any, http_response.is_closed) is True
+
+
+class TestAsyncResponses:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_create(self, async_client: AsyncDedalus) -> None:
+ response = await async_client.responses.create()
+ assert_matches_type(Response, response, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncDedalus) -> None:
+ response = await async_client.responses.create(
+ background=True,
+ conversation="string",
+ credentials={
+ "connection_name": "external-service",
+ "values": {"api_key": "sk-..."},
+ },
+ frequency_penalty=0,
+ include=["message.output_text.logprobs"],
+ input="What is the capital of France?",
+ instructions="You are a helpful assistant.",
+ max_output_tokens=1000,
+ max_tool_calls=10,
+ mcp_servers="dedalus-labs/example-server",
+ metadata={"foo": "string"},
+ model="openai/gpt-4o",
+ parallel_tool_calls=True,
+ presence_penalty=0,
+ previous_response_id="previous_response_id",
+ prompt={
+ "id": "id",
+ "variables": {"foo": "string"},
+ "version": "version",
+ },
+ prompt_cache_key="prompt_cache_key",
+ reasoning={"foo": "string"},
+ safety_identifier="safety_identifier",
+ service_tier="auto",
+ store=True,
+ stream=True,
+ stream_options={"foo": "string"},
+ temperature=0,
+ text={"foo": "string"},
+ tool_choice="auto",
+ tools=[
+ {
+ "function": {
+ "description": None,
+ "name": None,
+ "parameters": None,
+ },
+ "type": "function",
+ }
+ ],
+ top_logprobs=5,
+ top_p=0.1,
+ truncation="auto",
+ user="user",
+ )
+ assert_matches_type(Response, response, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncDedalus) -> None:
+ http_response = await async_client.responses.with_raw_response.create()
+
+ assert http_response.is_closed is True
+ assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+ response = await http_response.parse()
+ assert_matches_type(Response, response, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncDedalus) -> None:
+ async with async_client.responses.with_streaming_response.create() as http_response:
+ assert not http_response.is_closed
+ assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ response = await http_response.parse()
+ assert_matches_type(Response, response, path=["response"])
+
+ assert cast(Any, http_response.is_closed) is True
From 2268aff5c14821d23341baf4b65d7d7e5a26b7b7 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 10 Feb 2026 07:52:32 +0000
Subject: [PATCH 17/23] chore(api): small type fixes
---
.stats.yml | 6 +-
api.md | 1 +
.../resources/chat/completions.py | 17 ++---
src/dedalus_labs/types/__init__.py | 2 +
src/dedalus_labs/types/chat/__init__.py | 2 +
.../types/chat/chat_completion.py | 30 +--------
.../types/chat/completion_create_params.py | 5 +-
.../types/chat/deferred_call_response.py | 37 +++++++++++
.../chat/deferred_call_response_param.py | 37 +++++++++++
tests/api_resources/chat/test_completions.py | 64 +++++++++++++------
10 files changed, 141 insertions(+), 60 deletions(-)
create mode 100644 src/dedalus_labs/types/chat/deferred_call_response.py
create mode 100644 src/dedalus_labs/types/chat/deferred_call_response_param.py
diff --git a/.stats.yml b/.stats.yml
index 9fe1fff..bea80b1 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 12
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/dedalus-labs%2Fdedalus-sdk-3330faa66a188880e45bcc35fa1b899365424c079dfd79b45c4da710eeccab10.yml
-openapi_spec_hash: 542937ce78a5c4ee4dbe54ab837151d2
-config_hash: 2b25f3d3742dd0d7790fd4339f500a29
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/dedalus-labs%2Fdedalus-sdk-78928123e88f6699e87022537ef23c34c0a802c5b78ac0c5259b30b59371492e.yml
+openapi_spec_hash: 50b6107b70d1a6367726f78a04af5493
+config_hash: b57dddec045c6790044b72c73f0d1891
diff --git a/api.md b/api.md
index b610253..8d920d5 100644
--- a/api.md
+++ b/api.md
@@ -153,6 +153,7 @@ from dedalus_labs.types.chat import (
ChoiceLogprobs,
CompletionTokensDetails,
CompletionUsage,
+ DeferredCallResponse,
InputTokenDetails,
PredictionContent,
PromptTokensDetails,
diff --git a/src/dedalus_labs/resources/chat/completions.py b/src/dedalus_labs/resources/chat/completions.py
index e168d64..6d8ba47 100644
--- a/src/dedalus_labs/resources/chat/completions.py
+++ b/src/dedalus_labs/resources/chat/completions.py
@@ -37,6 +37,7 @@
from ...types.chat.chat_completion_tool_param import ChatCompletionToolParam
from ...types.shared_params.json_object_input import JSONObjectInput
from ...types.chat.chat_completion_audio_param import ChatCompletionAudioParam
+from ...types.chat.deferred_call_response_param import DeferredCallResponseParam
from ...types.chat.chat_completion_functions_param import ChatCompletionFunctionsParam
__all__ = ["CompletionsResource", "AsyncCompletionsResource"]
@@ -74,7 +75,7 @@ def create(
correlation_id: Optional[str] | Omit = omit,
credentials: Optional[completion_create_params.Credentials] | Omit = omit,
deferred: Optional[bool] | Omit = omit,
- deferred_calls: Optional[Iterable[Dict[str, object]]] | Omit = omit,
+ deferred_calls: Optional[Iterable[DeferredCallResponseParam]] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: Optional[str] | Omit = omit,
functions: Optional[Iterable[ChatCompletionFunctionsParam]] | Omit = omit,
@@ -430,7 +431,7 @@ def create(
correlation_id: Optional[str] | Omit = omit,
credentials: Optional[completion_create_params.Credentials] | Omit = omit,
deferred: Optional[bool] | Omit = omit,
- deferred_calls: Optional[Iterable[Dict[str, object]]] | Omit = omit,
+ deferred_calls: Optional[Iterable[DeferredCallResponseParam]] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: Optional[str] | Omit = omit,
functions: Optional[Iterable[ChatCompletionFunctionsParam]] | Omit = omit,
@@ -785,7 +786,7 @@ def create(
correlation_id: Optional[str] | Omit = omit,
credentials: Optional[completion_create_params.Credentials] | Omit = omit,
deferred: Optional[bool] | Omit = omit,
- deferred_calls: Optional[Iterable[Dict[str, object]]] | Omit = omit,
+ deferred_calls: Optional[Iterable[DeferredCallResponseParam]] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: Optional[str] | Omit = omit,
functions: Optional[Iterable[ChatCompletionFunctionsParam]] | Omit = omit,
@@ -1139,7 +1140,7 @@ def create(
correlation_id: Optional[str] | Omit = omit,
credentials: Optional[completion_create_params.Credentials] | Omit = omit,
deferred: Optional[bool] | Omit = omit,
- deferred_calls: Optional[Iterable[Dict[str, object]]] | Omit = omit,
+ deferred_calls: Optional[Iterable[DeferredCallResponseParam]] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: Optional[str] | Omit = omit,
functions: Optional[Iterable[ChatCompletionFunctionsParam]] | Omit = omit,
@@ -1637,7 +1638,7 @@ async def create(
correlation_id: Optional[str] | Omit = omit,
credentials: Optional[completion_create_params.Credentials] | Omit = omit,
deferred: Optional[bool] | Omit = omit,
- deferred_calls: Optional[Iterable[Dict[str, object]]] | Omit = omit,
+ deferred_calls: Optional[Iterable[DeferredCallResponseParam]] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: Optional[str] | Omit = omit,
functions: Optional[Iterable[ChatCompletionFunctionsParam]] | Omit = omit,
@@ -1993,7 +1994,7 @@ async def create(
correlation_id: Optional[str] | Omit = omit,
credentials: Optional[completion_create_params.Credentials] | Omit = omit,
deferred: Optional[bool] | Omit = omit,
- deferred_calls: Optional[Iterable[Dict[str, object]]] | Omit = omit,
+ deferred_calls: Optional[Iterable[DeferredCallResponseParam]] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: Optional[str] | Omit = omit,
functions: Optional[Iterable[ChatCompletionFunctionsParam]] | Omit = omit,
@@ -2348,7 +2349,7 @@ async def create(
correlation_id: Optional[str] | Omit = omit,
credentials: Optional[completion_create_params.Credentials] | Omit = omit,
deferred: Optional[bool] | Omit = omit,
- deferred_calls: Optional[Iterable[Dict[str, object]]] | Omit = omit,
+ deferred_calls: Optional[Iterable[DeferredCallResponseParam]] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: Optional[str] | Omit = omit,
functions: Optional[Iterable[ChatCompletionFunctionsParam]] | Omit = omit,
@@ -2702,7 +2703,7 @@ async def create(
correlation_id: Optional[str] | Omit = omit,
credentials: Optional[completion_create_params.Credentials] | Omit = omit,
deferred: Optional[bool] | Omit = omit,
- deferred_calls: Optional[Iterable[Dict[str, object]]] | Omit = omit,
+ deferred_calls: Optional[Iterable[DeferredCallResponseParam]] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: Optional[str] | Omit = omit,
functions: Optional[Iterable[ChatCompletionFunctionsParam]] | Omit = omit,
diff --git a/src/dedalus_labs/types/__init__.py b/src/dedalus_labs/types/__init__.py
index 252c826..afe3bc6 100644
--- a/src/dedalus_labs/types/__init__.py
+++ b/src/dedalus_labs/types/__init__.py
@@ -46,12 +46,14 @@
if _compat.PYDANTIC_V1:
response.Response.update_forward_refs() # type: ignore
chat.chat_completion.ChatCompletion.update_forward_refs() # type: ignore
+ chat.deferred_call_response.DeferredCallResponse.update_forward_refs() # type: ignore
shared.dedalus_model.DedalusModel.update_forward_refs() # type: ignore
shared.mcp_tool_result.MCPToolResult.update_forward_refs() # type: ignore
shared.model_settings.ModelSettings.update_forward_refs() # type: ignore
else:
response.Response.model_rebuild(_parent_namespace_depth=0)
chat.chat_completion.ChatCompletion.model_rebuild(_parent_namespace_depth=0)
+ chat.deferred_call_response.DeferredCallResponse.model_rebuild(_parent_namespace_depth=0)
shared.dedalus_model.DedalusModel.model_rebuild(_parent_namespace_depth=0)
shared.mcp_tool_result.MCPToolResult.model_rebuild(_parent_namespace_depth=0)
shared.model_settings.ModelSettings.model_rebuild(_parent_namespace_depth=0)
diff --git a/src/dedalus_labs/types/chat/__init__.py b/src/dedalus_labs/types/chat/__init__.py
index 875ffcb..568d243 100644
--- a/src/dedalus_labs/types/chat/__init__.py
+++ b/src/dedalus_labs/types/chat/__init__.py
@@ -14,6 +14,7 @@
from .prompt_tokens_details import PromptTokensDetails as PromptTokensDetails
from .tool_choice_any_param import ToolChoiceAnyParam as ToolChoiceAnyParam
from .choice_delta_tool_call import ChoiceDeltaToolCall as ChoiceDeltaToolCall
+from .deferred_call_response import DeferredCallResponse as DeferredCallResponse
from .stream_choice_logprobs import StreamChoiceLogprobs as StreamChoiceLogprobs
from .tool_choice_auto_param import ToolChoiceAutoParam as ToolChoiceAutoParam
from .tool_choice_none_param import ToolChoiceNoneParam as ToolChoiceNoneParam
@@ -24,6 +25,7 @@
from .completion_tokens_details import CompletionTokensDetails as CompletionTokensDetails
from .chat_completion_tool_param import ChatCompletionToolParam as ChatCompletionToolParam
from .chat_completion_audio_param import ChatCompletionAudioParam as ChatCompletionAudioParam
+from .deferred_call_response_param import DeferredCallResponseParam as DeferredCallResponseParam
from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob
from .thinking_config_enabled_param import ThinkingConfigEnabledParam as ThinkingConfigEnabledParam
from .thinking_config_disabled_param import ThinkingConfigDisabledParam as ThinkingConfigDisabledParam
diff --git a/src/dedalus_labs/types/chat/chat_completion.py b/src/dedalus_labs/types/chat/chat_completion.py
index 967a5f9..4c8533c 100644
--- a/src/dedalus_labs/types/chat/chat_completion.py
+++ b/src/dedalus_labs/types/chat/chat_completion.py
@@ -9,32 +9,7 @@
from ..._models import BaseModel
from .completion_usage import CompletionUsage
-__all__ = ["ChatCompletion", "Deferred", "MCPServerErrors", "PendingTool"]
-
-
-class Deferred(BaseModel):
- """Server-side call blocked until pending client calls complete.
-
- Carries full spec for stateless resumption on subsequent turns.
- """
-
- id: str
- """Unique identifier for this deferred call."""
-
- name: str
- """Name of the tool."""
-
- arguments: Optional["JSONObjectInput"] = None
- """Input arguments for the tool call."""
-
- blocked_by: Optional[List[str]] = None
- """IDs of pending client calls blocking this call."""
-
- dependencies: Optional[List[str]] = None
- """IDs of calls this depends on."""
-
- venue: Optional[str] = None
- """Execution venue (server or client)."""
+__all__ = ["ChatCompletion", "MCPServerErrors", "PendingTool"]
class MCPServerErrors(BaseModel):
@@ -98,7 +73,7 @@ class ChatCompletion(BaseModel):
Echo this on the next request to resume server-side execution.
"""
- deferred: Optional[List[Deferred]] = None
+ deferred: Optional[List["DeferredCallResponse"]] = None
"""Server tools blocked on client results."""
mcp_server_errors: Optional[Dict[str, MCPServerErrors]] = None
@@ -160,6 +135,7 @@ class ChatCompletion(BaseModel):
"""Usage statistics for the completion request."""
+from .deferred_call_response import DeferredCallResponse
from ..shared.mcp_tool_result import MCPToolResult
from ..shared.json_value_input import JSONValueInput
from ..shared.json_object_input import JSONObjectInput
diff --git a/src/dedalus_labs/types/chat/completion_create_params.py b/src/dedalus_labs/types/chat/completion_create_params.py
index f231d32..931cd23 100644
--- a/src/dedalus_labs/types/chat/completion_create_params.py
+++ b/src/dedalus_labs/types/chat/completion_create_params.py
@@ -102,7 +102,7 @@ class CompletionCreateParamsBase(TypedDict, total=False):
`/v1/chat/deferred-completion/{request_id}`.
"""
- deferred_calls: Optional[Iterable[Dict[str, object]]]
+ deferred_calls: Optional[Iterable["DeferredCallResponseParam"]]
"""Tier 2 stateless resumption.
Deferred tool specs from a previous handoff response, sent back verbatim so the
@@ -477,7 +477,7 @@ class ThinkingThinkingConfigAdaptive(TypedDict, total=False):
Thinking: TypeAlias = Union[ThinkingConfigEnabledParam, ThinkingConfigDisabledParam, ThinkingThinkingConfigAdaptive]
-ToolChoice: TypeAlias = Union[ToolChoiceAutoParam, ToolChoiceAnyParam, ToolChoiceToolParam, ToolChoiceNoneParam]
+ToolChoice: TypeAlias = Union[str, ToolChoiceAutoParam, ToolChoiceAnyParam, ToolChoiceToolParam, ToolChoiceNoneParam]
class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False):
@@ -493,5 +493,6 @@ class CompletionCreateParamsStreaming(CompletionCreateParamsBase):
CompletionCreateParams = Union[CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming]
from ..shared_params.dedalus_model import DedalusModel
+from .deferred_call_response_param import DeferredCallResponseParam
from ..shared_params.json_object_input import JSONObjectInput
from ..shared_params.dedalus_model_choice import DedalusModelChoice
diff --git a/src/dedalus_labs/types/chat/deferred_call_response.py b/src/dedalus_labs/types/chat/deferred_call_response.py
new file mode 100644
index 0000000..9ddfb01
--- /dev/null
+++ b/src/dedalus_labs/types/chat/deferred_call_response.py
@@ -0,0 +1,37 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+
+__all__ = ["DeferredCallResponse"]
+
+
+class DeferredCallResponse(BaseModel):
+ """Server-side call blocked until pending client calls complete.
+
+ Carries full spec for stateless resumption on subsequent turns.
+ """
+
+ id: str
+ """Unique identifier for this deferred call."""
+
+ name: str
+ """Name of the tool."""
+
+ arguments: Optional["JSONObjectInput"] = None
+ """Input arguments for the tool call."""
+
+ blocked_by: Optional[List[str]] = None
+ """IDs of pending client calls blocking this call."""
+
+ dependencies: Optional[List[str]] = None
+ """IDs of calls this depends on."""
+
+ venue: Optional[str] = None
+ """Execution venue (server or client)."""
+
+
+from ..shared.json_object_input import JSONObjectInput
diff --git a/src/dedalus_labs/types/chat/deferred_call_response_param.py b/src/dedalus_labs/types/chat/deferred_call_response_param.py
new file mode 100644
index 0000000..ad763e7
--- /dev/null
+++ b/src/dedalus_labs/types/chat/deferred_call_response_param.py
@@ -0,0 +1,37 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+from ..._types import SequenceNotStr
+
+__all__ = ["DeferredCallResponseParam"]
+
+
+class DeferredCallResponseParam(TypedDict, total=False):
+ """Server-side call blocked until pending client calls complete.
+
+ Carries full spec for stateless resumption on subsequent turns.
+ """
+
+ id: Required[str]
+ """Unique identifier for this deferred call."""
+
+ name: Required[str]
+ """Name of the tool."""
+
+ arguments: "JSONObjectInput"
+ """Input arguments for the tool call."""
+
+ blocked_by: SequenceNotStr[str]
+ """IDs of pending client calls blocking this call."""
+
+ dependencies: SequenceNotStr[str]
+ """IDs of calls this depends on."""
+
+ venue: str
+ """Execution venue (server or client)."""
+
+
+from ..shared_params.json_object_input import JSONObjectInput
diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py
index 44420a4..91de76c 100644
--- a/tests/api_resources/chat/test_completions.py
+++ b/tests/api_resources/chat/test_completions.py
@@ -48,7 +48,16 @@ def test_method_create_with_all_params_overload_1(self, client: Dedalus) -> None
"values": {"api_key": "sk-..."},
},
deferred=True,
- deferred_calls=[{"foo": "bar"}],
+ deferred_calls=[
+ {
+ "id": "id",
+ "name": "name",
+ "arguments": {"foo": "string"},
+ "blocked_by": ["string"],
+ "dependencies": ["string"],
+ "venue": "venue",
+ }
+ ],
frequency_penalty=-2,
function_call="function_call",
functions=[
@@ -119,10 +128,7 @@ def test_method_create_with_all_params_overload_1(self, client: Dedalus) -> None
"budget_tokens": 1024,
"type": "enabled",
},
- tool_choice={
- "type": "auto",
- "disable_parallel_tool_use": True,
- },
+ tool_choice="string",
tool_config={"foo": "string"},
tools=[
{
@@ -196,7 +202,16 @@ def test_method_create_with_all_params_overload_2(self, client: Dedalus) -> None
"values": {"api_key": "sk-..."},
},
deferred=True,
- deferred_calls=[{"foo": "bar"}],
+ deferred_calls=[
+ {
+ "id": "id",
+ "name": "name",
+ "arguments": {"foo": "string"},
+ "blocked_by": ["string"],
+ "dependencies": ["string"],
+ "venue": "venue",
+ }
+ ],
frequency_penalty=-2,
function_call="function_call",
functions=[
@@ -266,10 +281,7 @@ def test_method_create_with_all_params_overload_2(self, client: Dedalus) -> None
"budget_tokens": 1024,
"type": "enabled",
},
- tool_choice={
- "type": "auto",
- "disable_parallel_tool_use": True,
- },
+ tool_choice="string",
tool_config={"foo": "string"},
tools=[
{
@@ -348,7 +360,16 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
"values": {"api_key": "sk-..."},
},
deferred=True,
- deferred_calls=[{"foo": "bar"}],
+ deferred_calls=[
+ {
+ "id": "id",
+ "name": "name",
+ "arguments": {"foo": "string"},
+ "blocked_by": ["string"],
+ "dependencies": ["string"],
+ "venue": "venue",
+ }
+ ],
frequency_penalty=-2,
function_call="function_call",
functions=[
@@ -419,10 +440,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
"budget_tokens": 1024,
"type": "enabled",
},
- tool_choice={
- "type": "auto",
- "disable_parallel_tool_use": True,
- },
+ tool_choice="string",
tool_config={"foo": "string"},
tools=[
{
@@ -496,7 +514,16 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
"values": {"api_key": "sk-..."},
},
deferred=True,
- deferred_calls=[{"foo": "bar"}],
+ deferred_calls=[
+ {
+ "id": "id",
+ "name": "name",
+ "arguments": {"foo": "string"},
+ "blocked_by": ["string"],
+ "dependencies": ["string"],
+ "venue": "venue",
+ }
+ ],
frequency_penalty=-2,
function_call="function_call",
functions=[
@@ -566,10 +593,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
"budget_tokens": 1024,
"type": "enabled",
},
- tool_choice={
- "type": "auto",
- "disable_parallel_tool_use": True,
- },
+ tool_choice="string",
tool_config={"foo": "string"},
tools=[
{
From a7deb3904b73f21775309796a2b1f04a0a240e20 Mon Sep 17 00:00:00 2001
From: Windsor
Date: Tue, 10 Feb 2026 15:07:35 -0800
Subject: [PATCH 18/23] fix(_compat): remove duplicate by_alias keyword arg
Stainless regen (2026-02-07) added `by_alias=by_alias` but a manually
added `by_alias=True` from Nov 2025 was still present. Duplicate keyword
arguments are a SyntaxError on Python 3.9.
---
src/dedalus_labs/_compat.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/src/dedalus_labs/_compat.py b/src/dedalus_labs/_compat.py
index bf1f1b9..76d017b 100644
--- a/src/dedalus_labs/_compat.py
+++ b/src/dedalus_labs/_compat.py
@@ -144,7 +144,6 @@ def model_dump(
if (not PYDANTIC_V1) or hasattr(model, "model_dump"):
return model.model_dump(
mode=mode,
- by_alias=True,
exclude=exclude,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
From e3a55da85cc4deed8c5122193084a8a4675818c5 Mon Sep 17 00:00:00 2001
From: Windsor
Date: Tue, 10 Feb 2026 15:07:42 -0800
Subject: [PATCH 19/23] feat(stream): return StreamResult from stream_async /
stream_sync
stream_async and stream_sync now accumulate content and tool-call deltas
into a StreamResult dataclass and return it. The accumulation logic is
extracted into a shared accumulate_tool_call() function that
DedalusRunner.core also delegates to (DRY).
Exports StreamResult and accumulate_tool_call from dedalus_labs.utils.stream.
Adds tests covering content, tool-call reassembly, thought_signature,
parallel calls, empty streams, and result independence.
---
src/dedalus_labs/lib/runner/core.py | 25 +---
src/dedalus_labs/lib/utils/_stream.py | 190 ++++++++++++++++---------
src/dedalus_labs/utils/stream.py | 4 +-
tests/test_stream_helpers.py | 194 ++++++++++++++++++++++++++
4 files changed, 321 insertions(+), 92 deletions(-)
create mode 100644 tests/test_stream_helpers.py
diff --git a/src/dedalus_labs/lib/runner/core.py b/src/dedalus_labs/lib/runner/core.py
index 0e44600..a295f09 100644
--- a/src/dedalus_labs/lib/runner/core.py
+++ b/src/dedalus_labs/lib/runner/core.py
@@ -30,6 +30,7 @@
from .types import Message, ToolCall, JsonValue, ToolResult, PolicyInput, PolicyContext
from ..._client import Dedalus, AsyncDedalus
from ...types.shared import MCPToolResult
+from ..utils._stream import accumulate_tool_call
# Type alias for mcp_servers parameter - accepts strings, server objects, or mixed lists
MCPServersInput = Union[
@@ -1240,29 +1241,7 @@ def _execute_tool_calls_sync(
def _accumulate_tool_calls(self, deltas, acc: list[ToolCall]) -> None:
"""Accumulate streaming tool call deltas."""
for delta in deltas:
- index = getattr(delta, "index", 0)
-
- # Ensure we have enough entries in acc
- while len(acc) <= index:
- acc.append(
- {
- "id": "",
- "type": "function",
- "function": {"name": "", "arguments": ""},
- }
- )
-
- if hasattr(delta, "id") and delta.id:
- acc[index]["id"] = delta.id
- if hasattr(delta, "function"):
- fn = delta.function
- if hasattr(fn, "name") and fn.name:
- acc[index]["function"]["name"] = fn.name
- if hasattr(fn, "arguments") and fn.arguments:
- acc[index]["function"]["arguments"] += fn.arguments
- thought_sig = getattr(delta, "thought_signature", None)
- if thought_sig:
- acc[index]["thought_signature"] = thought_sig
+ accumulate_tool_call(acc, delta)
@staticmethod
def _mk_kwargs(mc: _ModelConfig) -> Dict[str, Any]:
diff --git a/src/dedalus_labs/lib/utils/_stream.py b/src/dedalus_labs/lib/utils/_stream.py
index ac4c305..302b04b 100644
--- a/src/dedalus_labs/lib/utils/_stream.py
+++ b/src/dedalus_labs/lib/utils/_stream.py
@@ -7,23 +7,117 @@
from __future__ import annotations
import os
-
+from dataclasses import dataclass, field
from collections.abc import AsyncIterator, Iterator
-from typing import TYPE_CHECKING
+from typing import Any, Dict, List, TYPE_CHECKING
if TYPE_CHECKING:
from ...types.chat.stream_chunk import StreamChunk
__all__ = [
+ "StreamResult",
+ "accumulate_tool_call",
"stream_async",
"stream_sync",
]
-async def stream_async(stream: AsyncIterator[StreamChunk] | object) -> None:
+@dataclass
+class StreamResult:
+ """Collected data from a consumed stream.
+
+ Returned by :func:`stream_async` and :func:`stream_sync` so callers
+ can inspect what happened after the stream is exhausted.
+ """
+
+ content: str = ""
+ """Concatenated text content from all ``delta.content`` fragments."""
+
+ tool_calls: List[Dict[str, Any]] = field(default_factory=list)
+ """Reassembled tool calls accumulated from streaming deltas.
+
+ Each entry has the shape ``{"id": str, "function": {"name": str, "arguments": str}}``.
+ """
+
+
+def accumulate_tool_call(
+ tool_calls: List[Dict[str, Any]],
+ tc_delta: object,
+) -> None:
+ """Accumulate a single streaming tool-call delta into *tool_calls*.
+
+ Reassembles the incremental fragments (id, function name, argument
+ chunks, thought_signature) that arrive across multiple SSE chunks
+ into complete tool-call dicts keyed by ``tc_delta.index``.
+
+ This is the canonical implementation shared by both the stream
+ helpers and :class:`~dedalus_labs.lib.runner.core.DedalusRunner`.
+ """
+ idx: int = getattr(tc_delta, "index", 0)
+ while len(tool_calls) <= idx:
+ tool_calls.append({"id": "", "type": "function", "function": {"name": "", "arguments": ""}})
+ entry = tool_calls[idx]
+
+ tc_id = getattr(tc_delta, "id", None)
+ if tc_id:
+ entry["id"] = tc_id
+
+ fn = getattr(tc_delta, "function", None)
+ if fn is not None:
+ if getattr(fn, "name", None):
+ entry["function"]["name"] = fn.name
+ if getattr(fn, "arguments", None):
+ entry["function"]["arguments"] += fn.arguments
+
+ thought_sig = getattr(tc_delta, "thought_signature", None)
+ if thought_sig:
+ entry["thought_signature"] = thought_sig
+
+
+def _process_chunk(
+ chunk: object,
+ result: StreamResult,
+ verbose: bool,
+) -> None:
+ """Extract content and tool-call deltas from a single StreamChunk."""
+ if verbose:
+ extra = getattr(chunk, "__pydantic_extra__", None)
+ if isinstance(extra, dict):
+ meta = extra.get("dedalus_event")
+ if isinstance(meta, dict):
+ print(f"\n[EVENT] {meta}")
+
+ choices = getattr(chunk, "choices", None)
+ if not choices:
+ return
+
+ choice = choices[0]
+ delta = choice.delta
+
+ # Tool-call deltas
+ for tc in getattr(delta, "tool_calls", None) or []:
+ if verbose:
+ fn = getattr(tc, "function", None)
+ print(f"\n[TOOL_CALL] name={getattr(fn, 'name', None)} id={getattr(tc, 'id', None)}")
+ accumulate_tool_call(result.tool_calls, tc)
+
+ # Content
+ if delta.content:
+ print(delta.content, end="", flush=True)
+ result.content += delta.content
+
+ # Finish reason (verbose-only)
+ if verbose and getattr(choice, "finish_reason", None):
+ print(f"\n[FINISH] reason={choice.finish_reason}")
+
+
+async def stream_async(stream: AsyncIterator[StreamChunk] | object) -> StreamResult:
"""Stream text content from an async streaming response.
+ Prints content tokens to stdout as they arrive **and** returns a
+ :class:`StreamResult` with the accumulated content and tool calls.
+
Supports both:
- Raw StreamChunk iterator from .create(stream=True) or DedalusRunner.run(stream=True)
- ChatCompletionStreamManager from .stream() (Pydantic models with event API)
@@ -31,16 +125,16 @@ async def stream_async(stream: AsyncIterator[StreamChunk] | object) -> None:
Args:
stream: An async iterator of StreamChunk or a ChatCompletionStreamManager
- Example:
- >>> # With .create(stream=True)
- >>> stream = await client.chat.completions.create(stream=True, ...)
- >>> await stream_async(stream)
+ Returns:
+ A :class:`StreamResult` containing the full content and any tool calls.
- >>> # With .stream() (Pydantic models)
- >>> stream = client.chat.completions.stream(response_format=Model, ...)
- >>> await stream_async(stream)
+ Example:
+ >>> stream = runner.run(input="...", model="...", stream=True)
+ >>> result = await stream_async(stream)
+ >>> print(result.tool_calls)
"""
verbose = os.environ.get("DEDALUS_SDK_VERBOSE", "").lower() in ("1", "true", "yes", "on", "debug")
+ result = StreamResult()
# Stream manager (event API) vs raw AsyncStream: discriminate via __aenter__ without __aiter__
if hasattr(stream, "__aenter__") and not hasattr(stream, "__aiter__"):
@@ -48,43 +142,25 @@ async def stream_async(stream: AsyncIterator[StreamChunk] | object) -> None:
async for event in event_stream:
if event.type == "content.delta":
print(event.delta, end="", flush=True)
+ result.content += event.delta
elif verbose and event.type == "content.done" and hasattr(event, "parsed") and event.parsed:
print(f"\n[PARSED] {type(event.parsed).__name__}")
print() # Final newline
- return
+ return result
# Simple StreamChunk iterator case
async for chunk in stream:
- # Print server-side metadata events if present (verbose-only)
- if verbose:
- extra = getattr(chunk, "__pydantic_extra__", None)
- if isinstance(extra, dict):
- meta = extra.get("dedalus_event")
- if isinstance(meta, dict):
- print(f"\n[EVENT] {meta}")
-
- if chunk.choices:
- choice = chunk.choices[0]
- delta = choice.delta
- # Print tool-call deltas as debug (verbose-only)
- if verbose and getattr(delta, "tool_calls", None):
- for tc in delta.tool_calls:
- fn = getattr(tc, "function", None)
- name = getattr(fn, "name", None)
- tcid = getattr(tc, "id", None)
- print(f"\n[TOOL_CALL] name={name} id={tcid}")
- # Always print content
- if delta.content:
- print(delta.content, end="", flush=True)
- # Print finish reason (verbose-only)
- if verbose and getattr(choice, "finish_reason", None):
- print(f"\n[FINISH] reason={choice.finish_reason}")
+ _process_chunk(chunk, result, verbose)
print() # Final newline
+ return result
-def stream_sync(stream: Iterator[StreamChunk] | object) -> None:
+def stream_sync(stream: Iterator[StreamChunk] | object) -> StreamResult:
"""Stream text content from a streaming response.
+ Prints content tokens to stdout as they arrive **and** returns a
+ :class:`StreamResult` with the accumulated content and tool calls.
+
Supports both:
- Raw StreamChunk iterator from .create(stream=True) or DedalusRunner.run(stream=True)
- ChatCompletionStreamManager from .stream() (Pydantic models with event API)
@@ -92,16 +168,16 @@ def stream_sync(stream: Iterator[StreamChunk] | object) -> None:
Args:
stream: An iterator of StreamChunk or a ChatCompletionStreamManager
- Example:
- >>> # With .create(stream=True)
- >>> stream = client.chat.completions.create(stream=True, ...)
- >>> stream_sync(stream)
+ Returns:
+ A :class:`StreamResult` containing the full content and any tool calls.
- >>> # With .stream() (Pydantic models)
- >>> stream = client.chat.completions.stream(response_format=Model, ...)
- >>> stream_sync(stream)
+ Example:
+ >>> stream = runner.run(input="...", model="...", stream=True)
+ >>> result = stream_sync(stream)
+ >>> print(result.tool_calls)
"""
verbose = os.environ.get("DEDALUS_SDK_VERBOSE", "").lower() in ("1", "true", "yes", "on", "debug")
+ result = StreamResult()
# Stream manager (event API) vs raw Stream: discriminate via __enter__ without __iter__
if hasattr(stream, "__enter__") and not hasattr(stream, "__iter__"):
@@ -109,34 +185,14 @@ def stream_sync(stream: Iterator[StreamChunk] | object) -> None:
for event in event_stream:
if event.type == "content.delta":
print(event.delta, end="", flush=True)
+ result.content += event.delta
elif verbose and event.type == "content.done" and hasattr(event, "parsed") and event.parsed:
print(f"\n[PARSED] {type(event.parsed).__name__}")
print() # Final newline
- return
+ return result
# Simple StreamChunk iterator case
for chunk in stream:
- # Print server-side metadata events if present (verbose-only)
- if verbose:
- extra = getattr(chunk, "__pydantic_extra__", None)
- if isinstance(extra, dict):
- meta = extra.get("dedalus_event")
- if isinstance(meta, dict):
- print(f"\n[EVENT] {meta}")
-
- if chunk.choices:
- choice = chunk.choices[0]
- delta = choice.delta
- # Print tool-call deltas as debug (verbose-only)
- if verbose and getattr(delta, "tool_calls", None):
- for tc in delta.tool_calls:
- fn = getattr(tc, "function", None)
- name = getattr(fn, "name", None)
- tcid = getattr(tc, "id", None)
- print(f"\n[TOOL_CALL] name={name} id={tcid}")
- # Always print content
- if delta.content:
- print(delta.content, end="", flush=True)
- if verbose and getattr(choice, "finish_reason", None):
- print(f"\n[FINISH] reason={choice.finish_reason}")
+ _process_chunk(chunk, result, verbose)
print() # Final newline
+ return result
diff --git a/src/dedalus_labs/utils/stream.py b/src/dedalus_labs/utils/stream.py
index a82ecd0..ec9f9b8 100644
--- a/src/dedalus_labs/utils/stream.py
+++ b/src/dedalus_labs/utils/stream.py
@@ -6,6 +6,6 @@
"""Stream utilities for printing model responses in real-time."""
-from ..lib.utils._stream import stream_async, stream_sync
+from ..lib.utils._stream import StreamResult, stream_async, stream_sync
-__all__ = ["stream_async", "stream_sync"]
+__all__ = ["StreamResult", "stream_async", "stream_sync"]
diff --git a/tests/test_stream_helpers.py b/tests/test_stream_helpers.py
new file mode 100644
index 0000000..2c2d89a
--- /dev/null
+++ b/tests/test_stream_helpers.py
@@ -0,0 +1,194 @@
+# ==============================================================================
+# © 2025 Dedalus Labs, Inc. and affiliates
+# Licensed under MIT
+# github.com/dedalus-labs/dedalus-sdk-python/LICENSE
+# ==============================================================================
+
+"""Tests for stream helpers: accumulate_tool_call, stream_async, stream_sync."""
+
+from __future__ import annotations
+
+from types import SimpleNamespace
+from typing import Any, AsyncIterator, List
+
+import pytest
+
+from dedalus_labs.lib.utils._stream import StreamResult, accumulate_tool_call, stream_async, stream_sync
+
+
+# --- Test helpers ---
+
+
+def _chunk(
+ *,
+ content: str | None = None,
+ tool_calls: List[Any] | None = None,
+ finish_reason: str | None = None,
+) -> SimpleNamespace:
+ delta = SimpleNamespace(content=content, tool_calls=tool_calls)
+ choice = SimpleNamespace(delta=delta, finish_reason=finish_reason)
+ return SimpleNamespace(choices=[choice])
+
+
+def _tc(
+ index: int,
+ *,
+ tc_id: str | None = None,
+ name: str | None = None,
+ arguments: str | None = None,
+ thought_signature: str | None = None,
+) -> SimpleNamespace:
+ fn = SimpleNamespace(name=name, arguments=arguments)
+ ns = SimpleNamespace(index=index, id=tc_id, function=fn)
+ if thought_signature is not None:
+ ns.thought_signature = thought_signature
+ return ns
+
+
+async def _aiter(items: list) -> AsyncIterator:
+ for item in items:
+ yield item
+
+
+# --- accumulate_tool_call ---
+
+
+def test_accumulate_creates_entry():
+ acc: list[dict] = []
+ accumulate_tool_call(acc, _tc(0, tc_id="call_1", name="search"))
+ assert acc == [{"id": "call_1", "type": "function", "function": {"name": "search", "arguments": ""}}]
+
+
+def test_accumulate_appends_arguments():
+ acc: list[dict] = []
+ accumulate_tool_call(acc, _tc(0, tc_id="c", name="fn"))
+ accumulate_tool_call(acc, _tc(0, arguments='{"a":'))
+ accumulate_tool_call(acc, _tc(0, arguments=" 1}"))
+ assert acc[0]["function"]["arguments"] == '{"a": 1}'
+
+
+def test_accumulate_parallel_indices():
+ acc: list[dict] = []
+ accumulate_tool_call(acc, _tc(0, tc_id="c0", name="alpha"))
+ accumulate_tool_call(acc, _tc(1, tc_id="c1", name="beta"))
+ assert len(acc) == 2
+ assert acc[0]["function"]["name"] == "alpha"
+ assert acc[1]["function"]["name"] == "beta"
+
+
+def test_accumulate_sparse_index_pads():
+ acc: list[dict] = []
+ accumulate_tool_call(acc, _tc(2, tc_id="c2", name="gamma"))
+ assert len(acc) == 3
+ assert acc[2]["function"]["name"] == "gamma"
+ assert acc[0]["function"]["name"] == ""
+
+
+def test_accumulate_thought_signature():
+ acc: list[dict] = []
+ accumulate_tool_call(acc, _tc(0, tc_id="c", name="fn", thought_signature="sig_abc"))
+ assert acc[0]["thought_signature"] == "sig_abc"
+
+
+def test_accumulate_no_thought_signature():
+ acc: list[dict] = []
+ accumulate_tool_call(acc, _tc(0, tc_id="c", name="fn"))
+ assert "thought_signature" not in acc[0]
+
+
+# --- stream_async ---
+
+
+@pytest.mark.asyncio
+async def test_async_content_only(capsys: pytest.CaptureFixture[str]):
+ result = await stream_async(_aiter([_chunk(content="Hello"), _chunk(content=" world")]))
+ assert isinstance(result, StreamResult)
+ assert result.content == "Hello world"
+ assert result.tool_calls == []
+ assert "Hello world" in capsys.readouterr().out
+
+
+@pytest.mark.asyncio
+async def test_async_empty():
+ result = await stream_async(_aiter([]))
+ assert result.content == ""
+ assert result.tool_calls == []
+
+
+@pytest.mark.asyncio
+async def test_async_reassembles_tool_call():
+ chunks = [
+ _chunk(tool_calls=[_tc(0, tc_id="call_abc", name="gmail_send")]),
+ _chunk(tool_calls=[_tc(0, arguments='{"to":')]),
+ _chunk(tool_calls=[_tc(0, arguments=' "a@b.com"}')]),
+ _chunk(content="Sent.", finish_reason="stop"),
+ ]
+ result = await stream_async(_aiter(chunks))
+ assert result.content == "Sent."
+ assert len(result.tool_calls) == 1
+ assert result.tool_calls[0]["id"] == "call_abc"
+ assert result.tool_calls[0]["type"] == "function"
+ assert result.tool_calls[0]["function"] == {"name": "gmail_send", "arguments": '{"to": "a@b.com"}'}
+
+
+@pytest.mark.asyncio
+async def test_async_parallel_tool_calls():
+ chunks = [
+ _chunk(tool_calls=[_tc(0, tc_id="c1", name="search"), _tc(1, tc_id="c2", name="fetch")]),
+ _chunk(tool_calls=[_tc(0, arguments='{"q":"x"}')]),
+ _chunk(tool_calls=[_tc(1, arguments='{"url":"y"}')]),
+ ]
+ result = await stream_async(_aiter(chunks))
+ assert len(result.tool_calls) == 2
+ assert result.tool_calls[0]["function"]["name"] == "search"
+ assert result.tool_calls[1]["function"]["name"] == "fetch"
+
+
+@pytest.mark.asyncio
+async def test_async_skips_empty_choices():
+ chunks = [SimpleNamespace(choices=[]), _chunk(content="ok"), SimpleNamespace(choices=None)]
+ result = await stream_async(_aiter(chunks))
+ assert result.content == "ok"
+
+
+@pytest.mark.asyncio
+async def test_async_independent_results():
+ """Each call returns a fresh StreamResult."""
+ a = await stream_async(_aiter([_chunk(tool_calls=[_tc(0, tc_id="a", name="fa")])]))
+ b = await stream_async(_aiter([_chunk(tool_calls=[_tc(0, tc_id="b", name="fb")])]))
+ assert a.tool_calls[0]["id"] == "a"
+ assert b.tool_calls[0]["id"] == "b"
+ assert a.tool_calls is not b.tool_calls
+
+
+# --- stream_sync ---
+
+
+def test_sync_content_only(capsys: pytest.CaptureFixture[str]):
+ result = stream_sync(iter([_chunk(content="Hello"), _chunk(content=" world")]))
+ assert result.content == "Hello world"
+ assert result.tool_calls == []
+ assert "Hello world" in capsys.readouterr().out
+
+
+def test_sync_empty():
+ result = stream_sync(iter([]))
+ assert result.content == ""
+ assert result.tool_calls == []
+
+
+def test_sync_reassembles_tool_call():
+ chunks = [
+ _chunk(tool_calls=[_tc(0, tc_id="call_abc", name="gmail_send")]),
+ _chunk(tool_calls=[_tc(0, arguments='{"to": "a@b.com"}')]),
+ _chunk(content="Sent.", finish_reason="stop"),
+ ]
+ result = stream_sync(iter(chunks))
+ assert result.tool_calls[0]["function"] == {"name": "gmail_send", "arguments": '{"to": "a@b.com"}'}
+ assert result.content == "Sent."
+
+
+def test_sync_thought_signature():
+ chunks = [_chunk(tool_calls=[_tc(0, tc_id="c", name="fn", thought_signature="sig")])]
+ result = stream_sync(iter(chunks))
+ assert result.tool_calls[0]["thought_signature"] == "sig"
From 57dc1818c864459b2e1da71487eb3adf94dbf668 Mon Sep 17 00:00:00 2001
From: Windsor
Date: Tue, 10 Feb 2026 15:07:47 -0800
Subject: [PATCH 20/23] style(tests): flatten test classes to module-level
functions
Standardise hand-written test files (test_encryption, test_mcp_wire,
test_mcp_wire_connections, test__bug_report) to match the module-level
function style used in test_local_scheduler. Replaces class wrappers and
multi-line banners with single-line section comments.
---
tests/test__bug_report.py | 230 ++++++++------------
tests/test_encryption.py | 267 ++++++++++-------------
tests/test_mcp_wire.py | 327 +++++++++++++----------------
tests/test_mcp_wire_connections.py | 314 ++++++++++-----------------
4 files changed, 453 insertions(+), 685 deletions(-)
diff --git a/tests/test__bug_report.py b/tests/test__bug_report.py
index a3b8e3e..6e570dd 100644
--- a/tests/test__bug_report.py
+++ b/tests/test__bug_report.py
@@ -11,186 +11,128 @@
from urllib.parse import parse_qs, urlparse
import httpx
-import pytest
-from dedalus_labs._exceptions import APIError, APIStatusError, BadRequestError
+from dedalus_labs._exceptions import APIError, BadRequestError
from dedalus_labs.lib._bug_report import generate_bug_report_url, get_bug_report_url_from_error
-class TestGenerateBugReportUrl:
- """Tests for generate_bug_report_url function."""
+# --- generate_bug_report_url ---
- def test_minimal_parameters(self):
- """URL generation with no params includes auto-populated system info."""
- url = generate_bug_report_url()
- parsed = urlparse(url)
- params = parse_qs(parsed.query)
+def test_minimal_parameters():
+ url = generate_bug_report_url()
+ parsed = urlparse(url)
+ params = parse_qs(parsed.query)
- assert parsed.netloc == "github.com"
- assert parsed.path == "/dedalus-labs/dedalus-sdk-python/issues/new"
- assert params["template"] == ["bug-report.yml"]
- assert params["component"] == ["Python SDK"]
- assert "python_version" in params
- assert "platform" in params
+ assert parsed.netloc == "github.com"
+ assert parsed.path == "/dedalus-labs/dedalus-sdk-python/issues/new"
+ assert params["template"] == ["bug-report.yml"]
+ assert params["component"] == ["Python SDK"]
+ assert "python_version" in params
+ assert "platform" in params
- def test_all_parameters(self):
- """URL generation with all parameters populates fields correctly."""
- url = generate_bug_report_url(
- version="0.0.1",
- error_type="APIError",
- error_message="Connection timeout",
- environment="dev",
- request_id="req-123",
- endpoint="/v1/chat/completions",
- method="POST",
- )
- parsed = urlparse(url)
- params = parse_qs(parsed.query)
+def test_all_parameters():
+ url = generate_bug_report_url(
+ version="0.0.1",
+ error_type="APIError",
+ error_message="Connection timeout",
+ environment="dev",
+ request_id="req-123",
+ endpoint="/v1/chat/completions",
+ method="POST",
+ )
+ params = parse_qs(urlparse(url).query)
- assert params["version"] == ["0.0.1"]
- assert params["error_type"] == ["APIError"]
- assert params["actual"] == ["Connection timeout"]
- assert params["environment"] == ["dev"]
- assert params["notes"][0] == "Request ID: req-123\nEndpoint: POST /v1/chat/completions"
+ assert params["version"] == ["0.0.1"]
+ assert params["error_type"] == ["APIError"]
+ assert params["actual"] == ["Connection timeout"]
+ assert params["environment"] == ["dev"]
+ assert params["notes"][0] == "Request ID: req-123\nEndpoint: POST /v1/chat/completions"
- def test_request_id_in_notes(self):
- """Request ID is included in notes field."""
- url = generate_bug_report_url(request_id="req-abc-123")
- parsed = urlparse(url)
- params = parse_qs(parsed.query)
+def test_request_id_in_notes():
+ params = parse_qs(urlparse(generate_bug_report_url(request_id="req-abc-123")).query)
+ assert "Request ID: req-abc-123" in params["notes"][0]
- assert "notes" in params
- assert "Request ID: req-abc-123" in params["notes"][0]
- def test_custom_template(self):
- """Custom template name is respected."""
- url = generate_bug_report_url(template="custom.yml")
+def test_custom_template():
+ params = parse_qs(urlparse(generate_bug_report_url(template="custom.yml")).query)
+ assert params["template"] == ["custom.yml"]
- parsed = urlparse(url)
- params = parse_qs(parsed.query)
- assert params["template"] == ["custom.yml"]
+# --- get_bug_report_url_from_error ---
-class TestGetBugReportUrlFromError:
- """Tests for get_bug_report_url_from_error function."""
+def test_basic_api_error():
+ request = httpx.Request("POST", "https://api.dedalus.ai/v1/chat/completions")
+ error = APIError("Request failed", request, body=None)
+ params = parse_qs(urlparse(get_bug_report_url_from_error(error)).query)
- def test_basic_api_error(self):
- """Generates URL from basic APIError instance."""
- request = httpx.Request("POST", "https://api.dedalus.ai/v1/chat/completions")
- error = APIError("Request failed", request, body=None)
+ assert params["error_type"] == ["APIError"]
+ assert params["actual"] == ["Request failed"]
+ assert "version" in params
- url = get_bug_report_url_from_error(error)
- parsed = urlparse(url)
- params = parse_qs(parsed.query)
+def test_status_error_with_code():
+ request = httpx.Request("POST", "https://api.dedalus.ai/v1/chat/completions")
+ response = httpx.Response(400, request=request)
+ error = BadRequestError("Invalid request", response=response, body=None)
+ params = parse_qs(urlparse(get_bug_report_url_from_error(error)).query)
- assert params["error_type"] == ["APIError"]
- assert params["actual"] == ["Request failed"]
- assert "version" in params
+ assert params["error_type"] == ["BadRequestError"]
+ assert "[400]" in params["actual"][0]
+ assert "Invalid request" in params["actual"][0]
- def test_api_status_error_with_code(self):
- """Status code is included in error message for APIStatusError."""
- request = httpx.Request("POST", "https://api.dedalus.ai/v1/chat/completions")
- response = httpx.Response(400, request=request)
- error = BadRequestError("Invalid request", response=response, body=None)
- url = get_bug_report_url_from_error(error)
+def test_error_with_request_id():
+ request = httpx.Request("POST", "https://api.dedalus.ai/v1/chat/completions")
+ error = APIError("Test error", request, body=None)
+ params = parse_qs(urlparse(get_bug_report_url_from_error(error, request_id="req-456")).query)
+ assert "Request ID: req-456" in params["notes"][0]
- parsed = urlparse(url)
- params = parse_qs(parsed.query)
- assert params["error_type"] == ["BadRequestError"]
- assert "[400]" in params["actual"][0]
- assert "Invalid request" in params["actual"][0]
+def test_includes_sdk_version():
+ request = httpx.Request("POST", "https://api.dedalus.ai/v1/chat/completions")
+ error = APIError("Test error", request, body=None)
+ params = parse_qs(urlparse(get_bug_report_url_from_error(error)).query)
+ assert len(params["version"][0]) > 0
- def test_with_request_id(self):
- """Request ID parameter is included when provided."""
- request = httpx.Request("POST", "https://api.dedalus.ai/v1/chat/completions")
- error = APIError("Test error", request, body=None)
- url = get_bug_report_url_from_error(error, request_id="req-456")
+# --- Platform info ---
- parsed = urlparse(url)
- params = parse_qs(parsed.query)
- assert "notes" in params
- assert "Request ID: req-456" in params["notes"][0]
+def test_platform_info_format():
+ params = parse_qs(urlparse(generate_bug_report_url()).query)
+ parts = params["platform"][0].split()
+ assert len(parts) >= 2
- def test_includes_sdk_version(self):
- """SDK version is automatically included from __version__."""
- request = httpx.Request("POST", "https://api.dedalus.ai/v1/chat/completions")
- error = APIError("Test error", request, body=None)
- url = get_bug_report_url_from_error(error)
+def test_python_version_format():
+ params = parse_qs(urlparse(generate_bug_report_url()).query)
+ python_version = params["python_version"][0]
+ assert python_version.startswith("Python ")
+ assert python_version.replace("Python ", "")[0].isdigit()
- parsed = urlparse(url)
- params = parse_qs(parsed.query)
- # Should have version parameter populated
- assert "version" in params
- # Should be non-empty
- assert len(params["version"][0]) > 0
+# --- URL encoding ---
-class TestPlatformInfo:
- """Tests for platform info collection."""
+def test_special_chars_encoded():
+ url = generate_bug_report_url(
+ error_message="Error @ 127.0.0.1:8080 #fail",
+ request_id="req/test#123",
+ )
+ query_string = url.split("?")[1]
+ assert "@" not in query_string
+ assert "#" not in query_string
- def test_platform_info_format(self):
- """Platform info has expected format."""
- url = generate_bug_report_url()
+ params = parse_qs(urlparse(url).query)
+ assert "@" in params["actual"][0]
+ assert "#" in params["notes"][0]
- parsed = urlparse(url)
- params = parse_qs(parsed.query)
- platform_info = params["platform"][0]
- # Format: "System Release Machine"
- parts = platform_info.split()
- assert len(parts) >= 2
-
- def test_python_version_format(self):
- """Python version has expected format."""
- url = generate_bug_report_url()
-
- parsed = urlparse(url)
- params = parse_qs(parsed.query)
- python_version = params["python_version"][0]
-
- assert python_version.startswith("Python ")
- version_part = python_version.replace("Python ", "")
- assert len(version_part) > 0
- assert version_part[0].isdigit()
-
-
-class TestUrlEncoding:
- """Tests for URL encoding edge cases."""
-
- def test_special_chars_encoded(self):
- """Special characters are properly URL-encoded."""
- url = generate_bug_report_url(
- error_message="Error @ 127.0.0.1:8080 #fail",
- request_id="req/test#123",
- )
-
- # URL query string should not contain raw special chars
- query_string = url.split("?")[1]
- assert "@" not in query_string
- assert "#" not in query_string
-
- # But decoded params should contain them
- parsed = urlparse(url)
- params = parse_qs(parsed.query)
- assert "@" in params["actual"][0]
- assert "#" in params["notes"][0]
-
- def test_unicode_handling(self):
- """Unicode characters are properly encoded."""
- url = generate_bug_report_url(error_message="Error: 数据库连接失败")
-
- # Should not raise and should produce valid URL
- parsed = urlparse(url)
- params = parse_qs(parsed.query)
- assert "数据库连接失败" in params["actual"][0]
+def test_unicode_handling():
+ url = generate_bug_report_url(error_message="Error: 数据库连接失败")
+ params = parse_qs(urlparse(url).query)
+ assert "数据库连接失败" in params["actual"][0]
diff --git a/tests/test_encryption.py b/tests/test_encryption.py
index c1e09ea..ac5af38 100644
--- a/tests/test_encryption.py
+++ b/tests/test_encryption.py
@@ -28,56 +28,66 @@
)
-# Envelope v1 constants (must match encryption.py)
+# --- Constants (must match encryption.py) ---
+
_ENVELOPE_VERSION = 0x01
_NONCE_LEN = 12
_TAG_LEN = 16
+# --- Test helpers ---
+
+
def _b64url_encode(data: bytes) -> str:
- """Base64url encode without padding (test helper)."""
return base64.urlsafe_b64encode(data).rstrip(b"=").decode("ascii")
def _b64url_decode(s: str) -> bytes:
- """Base64url decode with padding restoration (test helper)."""
pad = 4 - len(s) % 4
if pad != 4:
s += "=" * pad
return base64.urlsafe_b64decode(s)
+def _decrypt_envelope_v1(private_key: Any, envelope: bytes) -> bytes:
+ """Decrypt envelope v1 format."""
+ key_size = private_key.key_size // 8
+ assert envelope[0] == _ENVELOPE_VERSION
+ wrapped_key = envelope[1 : 1 + key_size]
+ nonce = envelope[1 + key_size : 1 + key_size + _NONCE_LEN]
+ ciphertext_with_tag = envelope[1 + key_size + _NONCE_LEN :]
+ aes_key = private_key.decrypt(
+ wrapped_key,
+ padding.OAEP(
+ mgf=padding.MGF1(algorithm=hashes.SHA256()),
+ algorithm=hashes.SHA256(),
+ label=None,
+ ),
+ )
+ return AESGCM(aes_key).decrypt(nonce, ciphertext_with_tag, None)
+
+
+# --- Fixtures ---
+
+
@pytest.fixture
def rsa_keypair() -> tuple[Any, Any]:
- """Generate RSA keypair for testing."""
- private_key = rsa.generate_private_key(
- public_exponent=65537,
- key_size=2048,
- backend=default_backend(),
- )
+ private_key = rsa.generate_private_key(65537, 2048, default_backend())
return private_key, private_key.public_key()
@pytest.fixture
def rsa_keypair_3072() -> tuple[Any, Any]:
- """Generate 3072-bit RSA keypair (production size)."""
- private_key = rsa.generate_private_key(
- public_exponent=65537,
- key_size=3072,
- backend=default_backend(),
- )
+ private_key = rsa.generate_private_key(65537, 3072, default_backend())
return private_key, private_key.public_key()
@pytest.fixture
def rsa_jwk(rsa_keypair: tuple[Any, Any]) -> dict[str, Any]:
- """Create JWK from keypair."""
_, public_key = rsa_keypair
numbers = public_key.public_numbers()
-
n_bytes = numbers.n.to_bytes((numbers.n.bit_length() + 7) // 8, "big")
e_bytes = numbers.e.to_bytes((numbers.e.bit_length() + 7) // 8, "big")
-
return {
"kty": "RSA",
"use": "enc",
@@ -87,170 +97,113 @@ def rsa_jwk(rsa_keypair: tuple[Any, Any]) -> dict[str, Any]:
}
-def decrypt_envelope_v1(private_key: Any, envelope: bytes) -> bytes:
- """Decrypt envelope v1 format (test helper)."""
- key_size = private_key.key_size // 8
-
- version = envelope[0]
- assert version == _ENVELOPE_VERSION, f"Expected version 0x01, got 0x{version:02x}"
-
- wrapped_key = envelope[1 : 1 + key_size]
- nonce = envelope[1 + key_size : 1 + key_size + _NONCE_LEN]
- ciphertext_with_tag = envelope[1 + key_size + _NONCE_LEN :]
-
- aes_key = private_key.decrypt(
- wrapped_key,
- padding.OAEP(
- mgf=padding.MGF1(algorithm=hashes.SHA256()),
- algorithm=hashes.SHA256(),
- label=None,
- ),
- )
-
- return AESGCM(aes_key).decrypt(nonce, ciphertext_with_tag, None)
-
-
-class TestJwkToPublicKey:
- """Test JWK to public key conversion."""
-
- def test_valid_jwk(self, rsa_jwk: dict[str, Any], rsa_keypair: tuple[Any, Any]) -> None:
- """Convert valid JWK to public key."""
- _, expected_public = rsa_keypair
- public_key = jwk_to_public_key(rsa_jwk)
-
- assert public_key.public_numbers().n == expected_public.public_numbers().n
- assert public_key.public_numbers().e == expected_public.public_numbers().e
-
- def test_wrong_kty_raises(self) -> None:
- """Raise on non-RSA key type."""
- with pytest.raises(ValueError, match="expected RSA key type"):
- jwk_to_public_key({"kty": "EC", "n": "xxx", "e": "xxx"})
-
- def test_missing_n_raises(self, rsa_jwk: dict[str, Any]) -> None:
- """Raise on missing n parameter."""
- del rsa_jwk["n"]
- with pytest.raises(ValueError, match="missing required JWK field"):
- jwk_to_public_key(rsa_jwk)
+# --- jwk_to_public_key ---
- def test_small_key_rejected(self) -> None:
- """Reject keys smaller than minimum size."""
- small_key = rsa.generate_private_key(65537, 1024, default_backend())
- numbers = small_key.public_key().public_numbers()
- n_bytes = numbers.n.to_bytes((numbers.n.bit_length() + 7) // 8, "big")
- e_bytes = numbers.e.to_bytes((numbers.e.bit_length() + 7) // 8, "big")
- jwk = {"kty": "RSA", "n": _b64url_encode(n_bytes), "e": _b64url_encode(e_bytes)}
+def test_jwk_valid(rsa_jwk: dict[str, Any], rsa_keypair: tuple[Any, Any]):
+ _, expected_public = rsa_keypair
+ public_key = jwk_to_public_key(rsa_jwk)
+ assert public_key.public_numbers().n == expected_public.public_numbers().n
+ assert public_key.public_numbers().e == expected_public.public_numbers().e
- with pytest.raises(ValueError, match="below minimum"):
- jwk_to_public_key(jwk, min_key_size=2048)
+def test_jwk_wrong_kty_raises():
+ with pytest.raises(ValueError, match="expected RSA key type"):
+ jwk_to_public_key({"kty": "EC", "n": "xxx", "e": "xxx"})
-class TestEncryptCredentials:
- """Test credential encryption (envelope v1)."""
- def test_envelope_format(self, rsa_keypair: tuple[Any, Any]) -> None:
- """Encrypt produces valid envelope v1 format."""
- private_key, public_key = rsa_keypair
- credentials = {"token": "ghp_xxx123"}
+def test_jwk_missing_n_raises(rsa_jwk: dict[str, Any]):
+ del rsa_jwk["n"]
+ with pytest.raises(ValueError, match="missing required JWK field"):
+ jwk_to_public_key(rsa_jwk)
- ciphertext_b64 = encrypt_credentials(public_key, credentials)
- envelope = _b64url_decode(ciphertext_b64)
- key_size = private_key.key_size // 8
- min_len = 1 + key_size + _NONCE_LEN + _TAG_LEN
- assert len(envelope) >= min_len
- assert envelope[0] == _ENVELOPE_VERSION
-
- def test_roundtrip(self, rsa_keypair: tuple[Any, Any]) -> None:
- """Encrypted credentials can be decrypted with private key."""
- private_key, public_key = rsa_keypair
- credentials = {"api_key": "sk_test_123", "org_id": "org_456"}
-
- ciphertext_b64 = encrypt_credentials(public_key, credentials)
- envelope = _b64url_decode(ciphertext_b64)
- plaintext = decrypt_envelope_v1(private_key, envelope)
-
- assert json.loads(plaintext) == credentials
-
- def test_large_payload(self, rsa_keypair: tuple[Any, Any]) -> None:
- """Envelope v1 handles payloads larger than RSA limit."""
- private_key, public_key = rsa_keypair
- credentials = {"large_token": "x" * 1000, "another": "y" * 500}
-
- ciphertext_b64 = encrypt_credentials(public_key, credentials)
- envelope = _b64url_decode(ciphertext_b64)
- plaintext = decrypt_envelope_v1(private_key, envelope)
-
- assert json.loads(plaintext) == credentials
-
- def test_randomized(self, rsa_keypair: tuple[Any, Any]) -> None:
- """Same plaintext produces different ciphertext each time."""
- _, public_key = rsa_keypair
- credentials = {"token": "same_value"}
-
- ct1 = encrypt_credentials(public_key, credentials)
- ct2 = encrypt_credentials(public_key, credentials)
+def test_jwk_small_key_rejected():
+ small_key = rsa.generate_private_key(65537, 1024, default_backend())
+ numbers = small_key.public_key().public_numbers()
+ n_bytes = numbers.n.to_bytes((numbers.n.bit_length() + 7) // 8, "big")
+ e_bytes = numbers.e.to_bytes((numbers.e.bit_length() + 7) // 8, "big")
+ jwk = {"kty": "RSA", "n": _b64url_encode(n_bytes), "e": _b64url_encode(e_bytes)}
+ with pytest.raises(ValueError, match="below minimum"):
+ jwk_to_public_key(jwk, min_key_size=2048)
- assert ct1 != ct2
- def test_with_3072_key(self, rsa_keypair_3072: tuple[Any, Any]) -> None:
- """Works with production-size 3072-bit keys."""
- private_key, public_key = rsa_keypair_3072
- credentials = {"token": "production_token"}
+# --- encrypt_credentials (envelope v1) ---
- ciphertext_b64 = encrypt_credentials(public_key, credentials)
- envelope = _b64url_decode(ciphertext_b64)
- plaintext = decrypt_envelope_v1(private_key, envelope)
- assert json.loads(plaintext) == credentials
+def test_encrypt_envelope_format(rsa_keypair: tuple[Any, Any]):
+ private_key, public_key = rsa_keypair
+ ciphertext_b64 = encrypt_credentials(public_key, {"token": "ghp_xxx123"})
+ envelope = _b64url_decode(ciphertext_b64)
+ key_size = private_key.key_size // 8
+ assert len(envelope) >= 1 + key_size + _NONCE_LEN + _TAG_LEN
+ assert envelope[0] == _ENVELOPE_VERSION
-class TestSecurityInvariants:
- """Verify security properties."""
+def test_encrypt_roundtrip(rsa_keypair: tuple[Any, Any]):
+ private_key, public_key = rsa_keypair
+ credentials = {"api_key": "sk_test_123", "org_id": "org_456"}
+ ciphertext_b64 = encrypt_credentials(public_key, credentials)
+ plaintext = _decrypt_envelope_v1(private_key, _b64url_decode(ciphertext_b64))
+ assert json.loads(plaintext) == credentials
- def test_plaintext_not_in_ciphertext(self, rsa_keypair: tuple[Any, Any]) -> None:
- """Plaintext must not appear in ciphertext."""
- _, public_key = rsa_keypair
- secret = "ghp_super_secret_token_12345"
- ciphertext = encrypt_credentials(public_key, {"token": secret})
+def test_encrypt_large_payload(rsa_keypair: tuple[Any, Any]):
+ private_key, public_key = rsa_keypair
+ credentials = {"large_token": "x" * 1000, "another": "y" * 500}
+ ciphertext_b64 = encrypt_credentials(public_key, credentials)
+ plaintext = _decrypt_envelope_v1(private_key, _b64url_decode(ciphertext_b64))
+ assert json.loads(plaintext) == credentials
- assert secret not in ciphertext
- assert "ghp_" not in ciphertext
- def test_wrong_key_fails(self, rsa_keypair: tuple[Any, Any]) -> None:
- """Decryption fails with wrong private key."""
- _, public_key = rsa_keypair
- attacker_key = rsa.generate_private_key(65537, 2048, default_backend())
+def test_encrypt_randomized(rsa_keypair: tuple[Any, Any]):
+ """Same plaintext produces different ciphertext each time."""
+ _, public_key = rsa_keypair
+ credentials = {"token": "same_value"}
+ assert encrypt_credentials(public_key, credentials) != encrypt_credentials(public_key, credentials)
- ciphertext_b64 = encrypt_credentials(public_key, {"token": "secret"})
- envelope = _b64url_decode(ciphertext_b64)
- with pytest.raises(Exception):
- decrypt_envelope_v1(attacker_key, envelope)
+def test_encrypt_with_3072_key(rsa_keypair_3072: tuple[Any, Any]):
+ private_key, public_key = rsa_keypair_3072
+ credentials = {"token": "production_token"}
+ ciphertext_b64 = encrypt_credentials(public_key, credentials)
+ plaintext = _decrypt_envelope_v1(private_key, _b64url_decode(ciphertext_b64))
+ assert json.loads(plaintext) == credentials
- def test_tampered_ciphertext_fails(self, rsa_keypair: tuple[Any, Any]) -> None:
- """GCM authentication rejects tampered ciphertext."""
- private_key, public_key = rsa_keypair
- ciphertext_b64 = encrypt_credentials(public_key, {"token": "test"})
- envelope = bytearray(_b64url_decode(ciphertext_b64))
+# --- Security invariants ---
- # Tamper with ciphertext portion
- key_size = private_key.key_size // 8
- envelope[1 + key_size + _NONCE_LEN + 5] ^= 0xFF
- with pytest.raises(Exception):
- decrypt_envelope_v1(private_key, bytes(envelope))
+def test_plaintext_not_in_ciphertext(rsa_keypair: tuple[Any, Any]):
+ _, public_key = rsa_keypair
+ secret = "ghp_super_secret_token_12345"
+ ciphertext = encrypt_credentials(public_key, {"token": secret})
+ assert secret not in ciphertext
+ assert "ghp_" not in ciphertext
- def test_tampered_wrapped_key_fails(self, rsa_keypair: tuple[Any, Any]) -> None:
- """Tampered wrapped key is rejected."""
- private_key, public_key = rsa_keypair
- ciphertext_b64 = encrypt_credentials(public_key, {"token": "test"})
- envelope = bytearray(_b64url_decode(ciphertext_b64))
+def test_wrong_key_fails(rsa_keypair: tuple[Any, Any]):
+ _, public_key = rsa_keypair
+ attacker_key = rsa.generate_private_key(65537, 2048, default_backend())
+ ciphertext_b64 = encrypt_credentials(public_key, {"token": "secret"})
+ with pytest.raises(Exception):
+ _decrypt_envelope_v1(attacker_key, _b64url_decode(ciphertext_b64))
- envelope[10] ^= 0xFF
- with pytest.raises(Exception):
- decrypt_envelope_v1(private_key, bytes(envelope))
+def test_tampered_ciphertext_fails(rsa_keypair: tuple[Any, Any]):
+ private_key, public_key = rsa_keypair
+ ciphertext_b64 = encrypt_credentials(public_key, {"token": "test"})
+ envelope = bytearray(_b64url_decode(ciphertext_b64))
+ key_size = private_key.key_size // 8
+ envelope[1 + key_size + _NONCE_LEN + 5] ^= 0xFF
+ with pytest.raises(Exception):
+ _decrypt_envelope_v1(private_key, bytes(envelope))
+
+
+def test_tampered_wrapped_key_fails(rsa_keypair: tuple[Any, Any]):
+ private_key, public_key = rsa_keypair
+ ciphertext_b64 = encrypt_credentials(public_key, {"token": "test"})
+ envelope = bytearray(_b64url_decode(ciphertext_b64))
+ envelope[10] ^= 0xFF
+ with pytest.raises(Exception):
+ _decrypt_envelope_v1(private_key, bytes(envelope))
diff --git a/tests/test_mcp_wire.py b/tests/test_mcp_wire.py
index f7eb810..f40999d 100644
--- a/tests/test_mcp_wire.py
+++ b/tests/test_mcp_wire.py
@@ -22,7 +22,7 @@
)
-# --- Fixtures ----------------------------------------------------------------
+# --- Test helpers ---
class FakeMCPServer:
@@ -45,247 +45,204 @@ def serve(self, *args: Any, **kwargs: Any) -> None:
class IncompleteServer:
- """Missing required protocol attributes (no url, no serve)."""
+ """Missing required protocol attributes."""
def __init__(self) -> None:
self.name = "incomplete"
-# --- MCPServerWireSpec Construction ------------------------------------------
+# --- MCPServerWireSpec construction ---
-class TestMCPServerWireSpecConstruction:
- """Factory methods for creating wire specs."""
+def test_from_slug_simple():
+ spec = MCPServerWireSpec.from_slug("dedalus-labs/example-server")
+ assert spec.slug == "dedalus-labs/example-server"
+ assert spec.version is None
- def test_from_slug_simple(self) -> None:
- """Simple marketplace slug."""
- spec = MCPServerWireSpec.from_slug("dedalus-labs/example-server")
- assert spec.slug == "dedalus-labs/example-server"
- assert spec.version is None
- def test_from_slug_with_version(self) -> None:
- """Slug with explicit version parameter."""
- spec = MCPServerWireSpec.from_slug("dedalus-labs/example-server", version="v1.2.0")
- assert spec.version == "v1.2.0"
+def test_from_slug_with_version():
+ spec = MCPServerWireSpec.from_slug("dedalus-labs/example-server", version="v1.2.0")
+ assert spec.version == "v1.2.0"
- def test_from_slug_with_embedded_version(self) -> None:
- """Slug@version syntax parsed correctly."""
- spec = MCPServerWireSpec.from_slug("dedalus-labs/example-server@v2")
- assert spec.slug == "dedalus-labs/example-server"
- assert spec.version == "v2"
- def test_from_url(self) -> None:
- """Direct URL."""
- spec = MCPServerWireSpec.from_url(url="http://127.0.0.1:8000/mcp")
- assert spec.url == "http://127.0.0.1:8000/mcp"
+def test_from_slug_with_embedded_version():
+ spec = MCPServerWireSpec.from_slug("dedalus-labs/example-server@v2")
+ assert spec.slug == "dedalus-labs/example-server"
+ assert spec.version == "v2"
-# --- MCPServerWireSpec Validation --------------------------------------------
+def test_from_url():
+ spec = MCPServerWireSpec.from_url(url="http://127.0.0.1:8000/mcp")
+ assert spec.url == "http://127.0.0.1:8000/mcp"
-class TestMCPServerWireSpecValidation:
- """Pydantic validation rules for wire specs."""
+# --- MCPServerWireSpec validation ---
- def test_requires_slug_or_url(self) -> None:
- """Must provide either slug or url."""
- with pytest.raises(ValidationError) as exc_info:
- MCPServerWireSpec()
- assert "requires either 'slug' or 'url'" in str(exc_info.value)
- def test_rejects_both_slug_and_url(self) -> None:
- """Cannot provide both slug and url."""
- with pytest.raises(ValidationError) as exc_info:
- MCPServerWireSpec(
- slug="dedalus-labs/example-server",
- url="http://localhost:8000/mcp",
- )
- assert "cannot have both" in str(exc_info.value)
+def test_requires_slug_or_url():
+ with pytest.raises(ValidationError, match="requires either"):
+ MCPServerWireSpec()
- def test_url_must_start_with_http(self) -> None:
- """URL must have http:// or https:// scheme."""
- with pytest.raises(ValidationError) as exc_info:
- MCPServerWireSpec(url="localhost:8000/mcp")
- assert "must start with http://" in str(exc_info.value)
- def test_https_url_accepted(self) -> None:
- """HTTPS URLs are valid."""
- spec = MCPServerWireSpec(url="https://mcp.dedaluslabs.ai/acme/my-server/mcp")
- assert spec.url == "https://mcp.dedaluslabs.ai/acme/my-server/mcp"
+def test_rejects_both_slug_and_url():
+ with pytest.raises(ValidationError, match="cannot have both"):
+ MCPServerWireSpec(slug="dedalus-labs/example-server", url="http://localhost:8000/mcp")
- def test_localhost_url_accepted(self) -> None:
- """Localhost URLs are valid for dev."""
- spec = MCPServerWireSpec(url="http://127.0.0.1:8000/mcp")
- assert spec.url == "http://127.0.0.1:8000/mcp"
- def test_slug_format_validation(self) -> None:
- """Slug must match org/project pattern."""
- MCPServerWireSpec(slug="dedalus-labs/example-server")
- MCPServerWireSpec(slug="org_123/project_456")
- MCPServerWireSpec(slug="a/b")
+def test_url_must_start_with_http():
+ with pytest.raises(ValidationError, match="must start with http://"):
+ MCPServerWireSpec(url="localhost:8000/mcp")
- with pytest.raises(ValidationError):
- MCPServerWireSpec(slug="invalid-no-slash")
- with pytest.raises(ValidationError):
- MCPServerWireSpec(slug="too/many/slashes")
+def test_https_url_accepted():
+ spec = MCPServerWireSpec(url="https://mcp.dedaluslabs.ai/acme/my-server/mcp")
+ assert spec.url == "https://mcp.dedaluslabs.ai/acme/my-server/mcp"
- def test_slug_with_at_sign_rejected_by_pattern(self) -> None:
- """Slug pattern doesn't allow @ - use from_slug() for version parsing."""
- with pytest.raises(ValidationError) as exc_info:
- MCPServerWireSpec(slug="org/project@v1", version="v2")
- assert "string_pattern_mismatch" in str(exc_info.value).lower()
- # Correct way: use from_slug() which parses the version
- spec = MCPServerWireSpec.from_slug("org/project@v1")
- assert spec.slug == "org/project"
- assert spec.version == "v1"
+def test_localhost_url_accepted():
+ spec = MCPServerWireSpec(url="http://127.0.0.1:8000/mcp")
+ assert spec.url == "http://127.0.0.1:8000/mcp"
- def test_extra_fields_forbidden(self) -> None:
- """Extra fields rejected (ConfigDict extra='forbid')."""
- with pytest.raises(ValidationError) as exc_info:
- MCPServerWireSpec(slug="org/test", unknown_field="value") # type: ignore[call-arg]
- assert "extra" in str(exc_info.value).lower()
+def test_slug_format_validation():
+ MCPServerWireSpec(slug="dedalus-labs/example-server")
+ MCPServerWireSpec(slug="org_123/project_456")
+ MCPServerWireSpec(slug="a/b")
-# --- MCPServerWireSpec Serialization -----------------------------------------
+ with pytest.raises(ValidationError):
+ MCPServerWireSpec(slug="invalid-no-slash")
+ with pytest.raises(ValidationError):
+ MCPServerWireSpec(slug="too/many/slashes")
-class TestMCPServerWireSpecSerialization:
- """to_wire() output for different spec types."""
- def test_simple_slug_serializes_to_string(self) -> None:
- """Simple slug-only specs serialize to plain string (efficient)."""
- spec = MCPServerWireSpec.from_slug("dedalus-labs/example-server")
- wire = spec.to_wire()
- assert wire == "dedalus-labs/example-server"
- assert isinstance(wire, str)
+def test_slug_with_at_sign_rejected():
+ """Slug pattern doesn't allow @. Use from_slug() for version parsing."""
+ with pytest.raises(ValidationError):
+ MCPServerWireSpec(slug="org/project@v1", version="v2")
- def test_versioned_slug_serializes_to_dict(self) -> None:
- """Slug with version serializes to dict."""
- spec = MCPServerWireSpec.from_slug("dedalus-labs/example-server", version="v1.0.0")
- wire = spec.to_wire()
- assert wire == {"slug": "dedalus-labs/example-server", "version": "v1.0.0"}
+ spec = MCPServerWireSpec.from_slug("org/project@v1")
+ assert spec.slug == "org/project"
+ assert spec.version == "v1"
- def test_url_spec_serializes_to_dict(self) -> None:
- """URL-based specs serialize to dict with just url."""
- spec = MCPServerWireSpec.from_url(url="http://127.0.0.1:8000/mcp")
- wire = spec.to_wire()
- assert wire == {"url": "http://127.0.0.1:8000/mcp"}
- def test_serialization_is_json_compatible(self) -> None:
- """Wire format round-trips through JSON."""
- spec = MCPServerWireSpec.from_url(url="http://127.0.0.1:8000/mcp")
- json_str = json.dumps(spec.to_wire())
- assert '"url": "http://127.0.0.1:8000/mcp"' in json_str
+def test_extra_fields_forbidden():
+ with pytest.raises(ValidationError):
+ MCPServerWireSpec(slug="org/test", unknown_field="value") # type: ignore[call-arg]
-# --- MCPServerProtocol -------------------------------------------------------
+# --- MCPServerWireSpec serialization ---
-class TestMCPServerProtocol:
- """Structural typing for MCP server objects."""
-
- def test_fake_server_satisfies_protocol(self) -> None:
- """FakeMCPServer satisfies MCPServerProtocol."""
- server = FakeMCPServer(name="test", url="http://localhost:8000/mcp")
- assert is_mcp_server(server)
- assert isinstance(server, MCPServerProtocol)
-
- def test_string_does_not_satisfy_protocol(self) -> None:
- """Plain strings are not MCPServerProtocol."""
- assert not is_mcp_server("dedalus-labs/example-server")
-
- def test_dict_does_not_satisfy_protocol(self) -> None:
- """Dicts are not MCPServerProtocol."""
- assert not is_mcp_server({"name": "test", "url": "http://localhost/mcp"})
+def test_simple_slug_serializes_to_string():
+ wire = MCPServerWireSpec.from_slug("dedalus-labs/example-server").to_wire()
+ assert wire == "dedalus-labs/example-server"
+ assert isinstance(wire, str)
- def test_incomplete_server_does_not_satisfy(self) -> None:
- """Missing attributes means protocol not satisfied."""
- assert not is_mcp_server(IncompleteServer())
+def test_versioned_slug_serializes_to_dict():
+ wire = MCPServerWireSpec.from_slug("dedalus-labs/example-server", version="v1.0.0").to_wire()
+ assert wire == {"slug": "dedalus-labs/example-server", "version": "v1.0.0"}
-# --- serialize_mcp_servers ---------------------------------------------------
+def test_url_spec_serializes_to_dict():
+ wire = MCPServerWireSpec.from_url(url="http://127.0.0.1:8000/mcp").to_wire()
+ assert wire == {"url": "http://127.0.0.1:8000/mcp"}
-class TestSerializeMCPServers:
- """End-to-end serialization of mixed mcp_servers input."""
- def test_none_returns_empty_list(self) -> None:
- """None input returns empty list."""
- assert serialize_mcp_servers(None) == []
+def test_serialization_json_roundtrip():
+ wire = MCPServerWireSpec.from_url(url="http://127.0.0.1:8000/mcp").to_wire()
+ assert '"url": "http://127.0.0.1:8000/mcp"' in json.dumps(wire)
- def test_single_string_slug(self) -> None:
- """Single slug string passes through."""
- assert serialize_mcp_servers("dedalus-labs/example-server") == ["dedalus-labs/example-server"]
- def test_single_string_url(self) -> None:
- """Single URL string passes through."""
- assert serialize_mcp_servers("http://localhost:8000/mcp") == ["http://localhost:8000/mcp"]
+# --- MCPServerProtocol ---
- def test_single_mcp_server_object(self) -> None:
- """MCPServerProtocol object serializes to URL dict."""
- server = FakeMCPServer(name="calculator", url="http://127.0.0.1:8000/mcp")
- result = serialize_mcp_servers(server)
- assert result == [{"url": "http://127.0.0.1:8000/mcp"}]
- def test_list_of_slugs(self) -> None:
- """List of slug strings."""
- result = serialize_mcp_servers(["dedalus-labs/example-server", "dedalus-labs/weather"])
- assert result == ["dedalus-labs/example-server", "dedalus-labs/weather"]
+def test_fake_server_satisfies_protocol():
+ server = FakeMCPServer(name="test", url="http://localhost:8000/mcp")
+ assert is_mcp_server(server)
+ assert isinstance(server, MCPServerProtocol)
- def test_versioned_slug_in_list(self) -> None:
- """Slug@version syntax expands to dict."""
- result = serialize_mcp_servers(["dedalus-labs/example-server@v2"])
- assert result == [{"slug": "dedalus-labs/example-server", "version": "v2"}]
- def test_mixed_list(self) -> None:
- """Mixed list of slugs, URLs, and server objects."""
- server = FakeMCPServer(name="local", url="http://127.0.0.1:8000/mcp")
- result = serialize_mcp_servers([server, "dedalus-labs/example-server", "dedalus-labs/weather@v2"])
+def test_string_not_protocol():
+ assert not is_mcp_server("dedalus-labs/example-server")
- assert len(result) == 3
- assert result[0] == {"url": "http://127.0.0.1:8000/mcp"}
- assert result[1] == "dedalus-labs/example-server"
- assert result[2] == {"slug": "dedalus-labs/weather", "version": "v2"}
- def test_server_without_url_uses_name_as_slug(self) -> None:
- """Server object without URL returns name as slug."""
- server = FakeMCPServer(name="org/my-server", url=None)
- result = serialize_mcp_servers(server)
- assert result == ["org/my-server"]
-
- def test_dict_input_validated(self) -> None:
- """Dict inputs pass through MCPServerWireSpec validation."""
- result = serialize_mcp_servers([{"slug": "dedalus-labs/test"}])
- assert result == ["dedalus-labs/test"]
+def test_dict_not_protocol():
+ assert not is_mcp_server({"name": "test", "url": "http://localhost/mcp"})
-# --- JSON Compatibility ------------------------------------------------------
+def test_incomplete_server_not_protocol():
+ assert not is_mcp_server(IncompleteServer())
-class TestJSONCompatibility:
- """Wire format is JSON-serializable and API-compatible."""
+# --- serialize_mcp_servers ---
- def test_full_payload_structure(self) -> None:
- """Complete API payload round-trips through JSON."""
- server = FakeMCPServer(name="calculator", url="http://127.0.0.1:8000/mcp")
- wire_data = serialize_mcp_servers([server, "dedalus-labs/example-server", "dedalus-labs/weather@v2"])
- payload = {
- "model": "openai/gpt-5-nano",
- "messages": [{"role": "user", "content": "What is 2 + 2?"}],
- "mcp_servers": wire_data,
- }
+def test_serialize_none():
+ assert serialize_mcp_servers(None) == []
- parsed = json.loads(json.dumps(payload))
- assert parsed["mcp_servers"][0] == {"url": "http://127.0.0.1:8000/mcp"}
- assert parsed["mcp_servers"][1] == "dedalus-labs/example-server"
- assert parsed["mcp_servers"][2]["slug"] == "dedalus-labs/weather"
- def test_unicode_in_url(self) -> None:
- """Unicode in URL paths are handled."""
- # Dedalus-hosted URL with unicode in path token
- spec = MCPServerWireSpec(url="http://mcp.dedaluslabs.ai/acme/計算機/mcp")
- result = spec.to_wire()
- json_str = json.dumps(result, ensure_ascii=False)
- assert "計算機" in json_str
+def test_serialize_single_slug():
+ assert serialize_mcp_servers("dedalus-labs/example-server") == ["dedalus-labs/example-server"]
+
+
+def test_serialize_single_url():
+ assert serialize_mcp_servers("http://localhost:8000/mcp") == ["http://localhost:8000/mcp"]
+
+
+def test_serialize_server_object():
+ server = FakeMCPServer(name="calculator", url="http://127.0.0.1:8000/mcp")
+ assert serialize_mcp_servers(server) == [{"url": "http://127.0.0.1:8000/mcp"}]
+
+
+def test_serialize_slug_list():
+ result = serialize_mcp_servers(["dedalus-labs/example-server", "dedalus-labs/weather"])
+ assert result == ["dedalus-labs/example-server", "dedalus-labs/weather"]
+
+
+def test_serialize_versioned_slug():
+ result = serialize_mcp_servers(["dedalus-labs/example-server@v2"])
+ assert result == [{"slug": "dedalus-labs/example-server", "version": "v2"}]
+
+
+def test_serialize_mixed_list():
+ server = FakeMCPServer(name="local", url="http://127.0.0.1:8000/mcp")
+ result = serialize_mcp_servers([server, "dedalus-labs/example-server", "dedalus-labs/weather@v2"])
+
+ assert len(result) == 3
+ assert result[0] == {"url": "http://127.0.0.1:8000/mcp"}
+ assert result[1] == "dedalus-labs/example-server"
+ assert result[2] == {"slug": "dedalus-labs/weather", "version": "v2"}
+
+
+def test_serialize_server_without_url():
+ server = FakeMCPServer(name="org/my-server", url=None)
+ assert serialize_mcp_servers(server) == ["org/my-server"]
+
+
+def test_serialize_dict_input():
+ result = serialize_mcp_servers([{"slug": "dedalus-labs/test"}])
+ assert result == ["dedalus-labs/test"]
+
+
+# --- JSON compatibility ---
+
+
+def test_full_payload_json_roundtrip():
+ server = FakeMCPServer(name="calculator", url="http://127.0.0.1:8000/mcp")
+ wire_data = serialize_mcp_servers([server, "dedalus-labs/example-server", "dedalus-labs/weather@v2"])
+ payload = {
+ "model": "openai/gpt-5-nano",
+ "messages": [{"role": "user", "content": "What is 2 + 2?"}],
+ "mcp_servers": wire_data,
+ }
+ parsed = json.loads(json.dumps(payload))
+ assert parsed["mcp_servers"][0] == {"url": "http://127.0.0.1:8000/mcp"}
+ assert parsed["mcp_servers"][1] == "dedalus-labs/example-server"
+ assert parsed["mcp_servers"][2]["slug"] == "dedalus-labs/weather"
+
+
+def test_unicode_in_url():
+ spec = MCPServerWireSpec(url="http://mcp.dedaluslabs.ai/acme/計算機/mcp")
+ assert "計算機" in json.dumps(spec.to_wire(), ensure_ascii=False)
diff --git a/tests/test_mcp_wire_connections.py b/tests/test_mcp_wire_connections.py
index f71d8de..b0bdae2 100644
--- a/tests/test_mcp_wire_connections.py
+++ b/tests/test_mcp_wire_connections.py
@@ -20,18 +20,11 @@
)
-# --- Mock objects for testing ---
+# --- Test helpers ---
class MockConnection:
- """Mock Connection object implementing the protocol."""
-
- def __init__(
- self,
- name: str,
- base_url: str | None = None,
- timeout_ms: int = 30000,
- ) -> None:
+ def __init__(self, name: str, base_url: str | None = None, timeout_ms: int = 30000) -> None:
self._name = name
self._base_url = base_url
self._timeout_ms = timeout_ms
@@ -58,8 +51,6 @@ def to_dict(self) -> dict[str, Any]:
class MockCredential:
- """Mock Secret object implementing the protocol."""
-
def __init__(self, connection: MockConnection, **values: Any) -> None:
self._connection = connection
self._values = values
@@ -73,236 +64,161 @@ def values(self) -> dict[str, Any]:
return dict(self._values)
def to_dict(self) -> dict[str, Any]:
- return {
- "connection_name": self._connection.name,
- "values": dict(self._values),
- }
+ return {"connection_name": self._connection.name, "values": dict(self._values)}
def values_for_encryption(self) -> dict[str, Any]:
return dict(self._values)
-class TestSerializeConnection:
- """Test serialize_connection helper."""
-
- def test_with_connection_object(self) -> None:
- """Serialize Connection object with to_dict()."""
- conn = MockConnection("github", "https://api.github.com", 60000)
-
- result = serialize_connection(conn)
-
- assert result["name"] == "github"
- assert result["base_url"] == "https://api.github.com"
- assert result["timeout_ms"] == 60000
-
- def test_with_dict(self) -> None:
- """Pass-through for dict input."""
- data = {"name": "dedalus", "base_url": "https://api.dedaluslabs.ai/v1"}
-
- result = serialize_connection(data)
-
- assert result == data
-
- def test_duck_type_extraction(self) -> None:
- """Extract fields from object without to_dict()."""
-
- class BareConnection:
- name = "bare"
- base_url = "https://bare.api.com"
- timeout_ms = 15000
-
- result = serialize_connection(BareConnection())
-
- assert result["name"] == "bare"
- assert result["base_url"] == "https://bare.api.com"
- assert result["timeout_ms"] == 15000
-
-
-class TestMatchSecretsToConnections:
- """Test match_credentials_to_connections helper."""
-
- def test_basic_matching(self) -> None:
- """Match secrets to connections by name."""
- github = MockConnection("github")
- dedalus = MockConnection("dedalus")
-
- github_secret = MockCredential(github, token="ghp_xxx")
- dedalus_secret = MockCredential(dedalus, api_key="sk_xxx")
-
- pairs = match_credentials_to_connections(
- [github, dedalus],
- [dedalus_secret, github_secret], # Different order
- )
-
- assert len(pairs) == 2
- # Pairs should be in connection order
- assert pairs[0][0].name == "github"
- assert pairs[0][1].values == {"token": "ghp_xxx"}
- assert pairs[1][0].name == "dedalus"
- assert pairs[1][1].values == {"api_key": "sk_xxx"}
-
- def test_missing_secret_raises(self) -> None:
- """Raise ValueError if connection has no secret."""
- github = MockConnection("github")
- dedalus = MockConnection("dedalus")
-
- github_secret = MockCredential(github, token="ghp_xxx")
-
- with pytest.raises(
- ValueError, match="Missing credentials for connections.*dedalus"
- ):
- match_credentials_to_connections([github, dedalus], [github_secret])
-
- def test_with_dict_inputs(self) -> None:
- """Works with dict inputs too."""
- connections = [{"name": "api"}]
- secrets = [{"connection_name": "api", "values": {"key": "xxx"}}]
-
- pairs = match_credentials_to_connections(connections, secrets)
-
- assert len(pairs) == 1
- assert pairs[0][0]["name"] == "api"
- assert pairs[0][1]["values"] == {"key": "xxx"}
-
- def test_missing_multiple_secrets(self) -> None:
- """Error message lists all missing secrets."""
- github = MockConnection("github")
- dedalus = MockConnection("dedalus")
- slack = MockConnection("slack")
-
- github_secret = MockCredential(github, token="ghp_xxx")
-
- with pytest.raises(ValueError) as exc:
- match_credentials_to_connections([github, dedalus, slack], [github_secret])
-
- assert "dedalus" in str(exc.value)
- assert "slack" in str(exc.value)
-
-
-# --- Mock server for multi-server tests ---
-
-
class MockServer:
- """Mock MCPServer for testing."""
-
def __init__(self, name: str, connections: list[Any] | None = None) -> None:
self.name = name
self.connections = connections or []
-class TestCollectUniqueConnections:
- """Test collect_unique_connections helper."""
+# --- serialize_connection ---
- def test_single_server(self) -> None:
- """Collect connections from single server."""
- github = MockConnection("github")
- dedalus = MockConnection("dedalus")
- server = MockServer("bot", connections=[github, dedalus])
- result = collect_unique_connections([server])
+def test_serialize_connection_object():
+ conn = MockConnection("github", "https://api.github.com", 60000)
+ result = serialize_connection(conn)
+ assert result["name"] == "github"
+ assert result["base_url"] == "https://api.github.com"
+ assert result["timeout_ms"] == 60000
- assert len(result) == 2
- assert result[0].name == "github"
- assert result[1].name == "dedalus"
- def test_shared_connection_deduplicated(self) -> None:
- """Shared Connection appears only once."""
- github = MockConnection("github")
+def test_serialize_connection_dict():
+ data = {"name": "dedalus", "base_url": "https://api.dedaluslabs.ai/v1"}
+ assert serialize_connection(data) == data
- server_a = MockServer("issues", connections=[github])
- server_b = MockServer("prs", connections=[github])
- result = collect_unique_connections([server_a, server_b])
+def test_serialize_connection_duck_type():
+ class BareConnection:
+ name = "bare"
+ base_url = "https://bare.api.com"
+ timeout_ms = 15000
- assert len(result) == 1
- assert result[0].name == "github"
+ result = serialize_connection(BareConnection())
+ assert result["name"] == "bare"
+ assert result["base_url"] == "https://bare.api.com"
+ assert result["timeout_ms"] == 15000
- def test_same_name_different_objects(self) -> None:
- """Connections with same name are deduplicated."""
- # Even if different objects, same name means same logical connection
- github_a = MockConnection("github", base_url="https://api.github.com")
- github_b = MockConnection("github", base_url="https://api.github.com")
- server_a = MockServer("a", connections=[github_a])
- server_b = MockServer("b", connections=[github_b])
+# --- match_credentials_to_connections ---
- result = collect_unique_connections([server_a, server_b])
- # Should only include first occurrence
- assert len(result) == 1
- assert result[0] is github_a
+def test_match_basic():
+ github = MockConnection("github")
+ dedalus = MockConnection("dedalus")
+ pairs = match_credentials_to_connections(
+ [github, dedalus],
+ [MockCredential(dedalus, api_key="sk_xxx"), MockCredential(github, token="ghp_xxx")],
+ )
+ assert len(pairs) == 2
+ assert pairs[0][0].name == "github"
+ assert pairs[0][1].values == {"token": "ghp_xxx"}
+ assert pairs[1][0].name == "dedalus"
+ assert pairs[1][1].values == {"api_key": "sk_xxx"}
- def test_multiple_servers_multiple_connections(self) -> None:
- """Collect and deduplicate across multiple servers."""
- github = MockConnection("github")
- dedalus = MockConnection("dedalus")
- slack = MockConnection("slack")
- server_a = MockServer("bot1", connections=[github, dedalus])
- server_b = MockServer("bot2", connections=[github, slack])
+def test_match_missing_raises():
+ github = MockConnection("github")
+ dedalus = MockConnection("dedalus")
+ with pytest.raises(ValueError, match="Missing credentials for connections.*dedalus"):
+ match_credentials_to_connections([github, dedalus], [MockCredential(github, token="ghp_xxx")])
- result = collect_unique_connections([server_a, server_b])
- assert len(result) == 3
- names = [c.name for c in result]
- assert names == ["github", "dedalus", "slack"]
+def test_match_with_dicts():
+ connections = [{"name": "api"}]
+ secrets = [{"connection_name": "api", "values": {"key": "xxx"}}]
+ pairs = match_credentials_to_connections(connections, secrets)
+ assert len(pairs) == 1
+ assert pairs[0][0]["name"] == "api"
+ assert pairs[0][1]["values"] == {"key": "xxx"}
- def test_server_without_connections(self) -> None:
- """Handle servers with no connections."""
- server_a = MockServer("empty")
- server_b = MockServer("has", connections=[MockConnection("api")])
- result = collect_unique_connections([server_a, server_b])
+def test_match_missing_multiple():
+ github = MockConnection("github")
+ dedalus = MockConnection("dedalus")
+ slack = MockConnection("slack")
+ with pytest.raises(ValueError) as exc:
+ match_credentials_to_connections([github, dedalus, slack], [MockCredential(github, token="ghp_xxx")])
+ assert "dedalus" in str(exc.value)
+ assert "slack" in str(exc.value)
- assert len(result) == 1
+# --- collect_unique_connections ---
-class TestValidateSecretsForServers:
- """Test validate_credentials_for_servers (main SDK init validation)."""
- def test_all_connections_have_secrets(self) -> None:
- """Success when all connections have matching secrets."""
- github = MockConnection("github")
- dedalus = MockConnection("dedalus")
+def test_collect_single_server():
+ github = MockConnection("github")
+ dedalus = MockConnection("dedalus")
+ result = collect_unique_connections([MockServer("bot", connections=[github, dedalus])])
+ assert len(result) == 2
+ assert result[0].name == "github"
+ assert result[1].name == "dedalus"
- server = MockServer("bot", connections=[github, dedalus])
- github_secret = MockCredential(github, token="ghp_xxx")
- dedalus_secret = MockCredential(dedalus, api_key="sk_xxx")
+def test_collect_deduplicates_shared():
+ github = MockConnection("github")
+ result = collect_unique_connections([MockServer("a", [github]), MockServer("b", [github])])
+ assert len(result) == 1
+ assert result[0].name == "github"
- pairs = validate_credentials_for_servers(
- [server], [github_secret, dedalus_secret]
- )
- assert len(pairs) == 2
+def test_collect_deduplicates_by_name():
+ github_a = MockConnection("github", base_url="https://api.github.com")
+ github_b = MockConnection("github", base_url="https://api.github.com")
+ result = collect_unique_connections([MockServer("a", [github_a]), MockServer("b", [github_b])])
+ assert len(result) == 1
+ assert result[0] is github_a
- def test_shared_connection_one_secret(self) -> None:
- """One Secret covers shared Connection across servers."""
- github = MockConnection("github")
- server_a = MockServer("issues", connections=[github])
- server_b = MockServer("prs", connections=[github])
+def test_collect_multiple_servers():
+ github = MockConnection("github")
+ dedalus = MockConnection("dedalus")
+ slack = MockConnection("slack")
+ result = collect_unique_connections([
+ MockServer("bot1", [github, dedalus]),
+ MockServer("bot2", [github, slack]),
+ ])
+ assert [c.name for c in result] == ["github", "dedalus", "slack"]
- github_secret = MockCredential(github, token="ghp_xxx")
- pairs = validate_credentials_for_servers(
- [server_a, server_b],
- [github_secret], # Only one secret needed
- )
+def test_collect_server_without_connections():
+ result = collect_unique_connections([MockServer("empty"), MockServer("has", [MockConnection("api")])])
+ assert len(result) == 1
- assert len(pairs) == 1
- assert pairs[0][0].name == "github"
- def test_missing_secret_fails_fast(self) -> None:
- """Raise immediately if any connection lacks a secret."""
- github = MockConnection("github")
- dedalus = MockConnection("dedalus")
+# --- validate_credentials_for_servers ---
- server = MockServer("bot", connections=[github, dedalus])
- github_secret = MockCredential(github, token="ghp_xxx")
- with pytest.raises(ValueError) as exc:
- validate_credentials_for_servers([server], [github_secret])
+def test_validate_all_present():
+ github = MockConnection("github")
+ dedalus = MockConnection("dedalus")
+ server = MockServer("bot", connections=[github, dedalus])
+ pairs = validate_credentials_for_servers(
+ [server],
+ [MockCredential(github, token="ghp_xxx"), MockCredential(dedalus, api_key="sk_xxx")],
+ )
+ assert len(pairs) == 2
+
- assert "dedalus" in str(exc.value)
- assert "Missing credentials" in str(exc.value)
+def test_validate_shared_connection():
+ github = MockConnection("github")
+ pairs = validate_credentials_for_servers(
+ [MockServer("a", [github]), MockServer("b", [github])],
+ [MockCredential(github, token="ghp_xxx")],
+ )
+ assert len(pairs) == 1
+ assert pairs[0][0].name == "github"
+
+
+def test_validate_missing_fails_fast():
+ github = MockConnection("github")
+ dedalus = MockConnection("dedalus")
+ server = MockServer("bot", connections=[github, dedalus])
+ with pytest.raises(ValueError) as exc:
+ validate_credentials_for_servers([server], [MockCredential(github, token="ghp_xxx")])
+ assert "dedalus" in str(exc.value)
+ assert "Missing credentials" in str(exc.value)
From eace98189285aad352842177fcc95ac8298d077a Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 11 Feb 2026 14:24:34 +0000
Subject: [PATCH 21/23] chore(internal): fix lint error on Python 3.14
---
src/dedalus_labs/_utils/_compat.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/dedalus_labs/_utils/_compat.py b/src/dedalus_labs/_utils/_compat.py
index dd70323..2c70b29 100644
--- a/src/dedalus_labs/_utils/_compat.py
+++ b/src/dedalus_labs/_utils/_compat.py
@@ -26,7 +26,7 @@ def is_union(tp: Optional[Type[Any]]) -> bool:
else:
import types
- return tp is Union or tp is types.UnionType
+ return tp is Union or tp is types.UnionType # type: ignore[comparison-overlap]
def is_typeddict(tp: Type[Any]) -> bool:
From ead37d4131d3e8758a5ace93c1eaf9e06a2849b5 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Thu, 12 Feb 2026 16:56:05 +0000
Subject: [PATCH 22/23] chore: format all `api.md` files
---
scripts/format | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/scripts/format b/scripts/format
index 1d2f9c6..c8e1f69 100755
--- a/scripts/format
+++ b/scripts/format
@@ -11,4 +11,4 @@ uv run ruff check --fix .
uv run ruff format
echo "==> Formatting docs"
-uv run python scripts/utils/ruffen-docs.py README.md api.md
+uv run python scripts/utils/ruffen-docs.py README.md $(find . -type f -name api.md)
From afba00e1eb0612b016d86c556fcd89a39dcbf828 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 13 Feb 2026 00:06:33 +0000
Subject: [PATCH 23/23] release: 0.3.0
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 44 +++++++++++++++++++++++++++++++++++
pyproject.toml | 2 +-
src/dedalus_labs/_version.py | 2 +-
4 files changed, 47 insertions(+), 3 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 10f3091..6b7b74c 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.2.0"
+ ".": "0.3.0"
}
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e940d47..f097936 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,49 @@
# Changelog
+## 0.3.0 (2026-02-13)
+
+Full Changelog: [v0.2.0...v0.3.0](https://github.com/dedalus-labs/dedalus-sdk-python/compare/v0.2.0...v0.3.0)
+
+### Features
+
+* **client:** add custom JSON encoder for extended type support ([30a7195](https://github.com/dedalus-labs/dedalus-sdk-python/commit/30a719572bb8087c9c87e980f4c9f65b95f8c1d0))
+* **client:** add support for binary request streaming ([48f4cca](https://github.com/dedalus-labs/dedalus-sdk-python/commit/48f4cca7563d7e658824e40893068ae155ef75d4))
+* **runner:** dependency-aware parallel tool execution ([7e6716f](https://github.com/dedalus-labs/dedalus-sdk-python/commit/7e6716f61769038e7beb91eabed6d240c3443a9e))
+* **runner:** dependency-aware parallel tool execution ([#44](https://github.com/dedalus-labs/dedalus-sdk-python/issues/44)) ([a72f70f](https://github.com/dedalus-labs/dedalus-sdk-python/commit/a72f70f930c7f016e06ab3c0a3e82ffdb689f83e))
+* **stream:** return StreamResult from stream helpers; fix _compat SyntaxError ([#46](https://github.com/dedalus-labs/dedalus-sdk-python/issues/46)) ([5590e57](https://github.com/dedalus-labs/dedalus-sdk-python/commit/5590e57a4fc13b4f29c8c0109bd3eb4c6da585fa))
+* **stream:** return StreamResult from stream_async / stream_sync ([e3a55da](https://github.com/dedalus-labs/dedalus-sdk-python/commit/e3a55da85cc4deed8c5122193084a8a4675818c5))
+
+
+### Bug Fixes
+
+* **_compat:** remove duplicate by_alias keyword arg ([a7deb39](https://github.com/dedalus-labs/dedalus-sdk-python/commit/a7deb3904b73f21775309796a2b1f04a0a240e20))
+* **api:** add byok provider model ([bf52572](https://github.com/dedalus-labs/dedalus-sdk-python/commit/bf525727fbbb537225239ebcdf88c85c4e58d05d))
+* **api:** default auth server ([38c637a](https://github.com/dedalus-labs/dedalus-sdk-python/commit/38c637af8275faec50a08cd9c7cd7ebd5f47e78d))
+* **api:** narrow types ([3e16d98](https://github.com/dedalus-labs/dedalus-sdk-python/commit/3e16d9887c409d153e70df9c1190e33eb5b585e6))
+* **docs:** fix mcp installation instructions for remote servers ([e4e3619](https://github.com/dedalus-labs/dedalus-sdk-python/commit/e4e3619990099b43df78e88415e3627daf8c7425))
+* **runner:** allow local tool execution in mixed MCP+local scenarios ([5d0ce6d](https://github.com/dedalus-labs/dedalus-sdk-python/commit/5d0ce6d829684e54dcbccbd6c006373fd0fd855b))
+* **runner:** inject server tool results into conversation for mixed tool calls ([288b70e](https://github.com/dedalus-labs/dedalus-sdk-python/commit/288b70e22ee6e9af0593dc45ddac11ae6de78eb8))
+* **runner:** preserve thought_signature in tool call accumulation and extraction ([77e5958](https://github.com/dedalus-labs/dedalus-sdk-python/commit/77e5958beb3699e3fd08f8f2fd0b6ecb2932d010))
+* **runner:** server tool results, mixed-tool execution, thought_signature passthrough ([#45](https://github.com/dedalus-labs/dedalus-sdk-python/issues/45)) ([637d9b8](https://github.com/dedalus-labs/dedalus-sdk-python/commit/637d9b846fe197110e6685e61dfaa514857fd46f))
+* **runner:** skip early break when local tools need execution alongside MCP ([ad7379b](https://github.com/dedalus-labs/dedalus-sdk-python/commit/ad7379b033a1eb8216147f823d30c71fdbf815c4))
+
+
+### Chores
+
+* **api:** small type fixes ([2268aff](https://github.com/dedalus-labs/dedalus-sdk-python/commit/2268aff5c14821d23341baf4b65d7d7e5a26b7b7))
+* **ci:** add missing environment ([0ec49ed](https://github.com/dedalus-labs/dedalus-sdk-python/commit/0ec49edae803773f99466df54aa6f67ce0453e32))
+* **ci:** upgrade `actions/github-script` ([cf53a9e](https://github.com/dedalus-labs/dedalus-sdk-python/commit/cf53a9e097577c01785d09ee45e6df4a3745cdec))
+* format all `api.md` files ([ead37d4](https://github.com/dedalus-labs/dedalus-sdk-python/commit/ead37d4131d3e8758a5ace93c1eaf9e06a2849b5))
+* **internal:** bump dependencies ([696aacf](https://github.com/dedalus-labs/dedalus-sdk-python/commit/696aacfd4fae842fff7f564b2a58c65b902ebcc4))
+* **internal:** fix lint error on Python 3.14 ([eace981](https://github.com/dedalus-labs/dedalus-sdk-python/commit/eace98189285aad352842177fcc95ac8298d077a))
+* **internal:** update `actions/checkout` version ([c72dfca](https://github.com/dedalus-labs/dedalus-sdk-python/commit/c72dfca95456904ce446548768b1262387686467))
+* **runner:** strip commented-out production version and banner comments from core.py ([59350e3](https://github.com/dedalus-labs/dedalus-sdk-python/commit/59350e37cdd0b825addb7c40c2be7887ed83586f))
+
+
+### Styles
+
+* **tests:** flatten test classes to module-level functions ([57dc181](https://github.com/dedalus-labs/dedalus-sdk-python/commit/57dc1818c864459b2e1da71487eb3adf94dbf668))
+
## 0.2.0 (2026-01-08)
Full Changelog: [v0.2.0...v0.2.0](https://github.com/dedalus-labs/dedalus-sdk-python/compare/v0.2.0...v0.2.0)
diff --git a/pyproject.toml b/pyproject.toml
index 8c5724e..c409c14 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "dedalus_labs"
-version = "0.2.0"
+version = "0.3.0"
description = "The official Python library for the Dedalus API"
dynamic = ["readme"]
license = "MIT"
diff --git a/src/dedalus_labs/_version.py b/src/dedalus_labs/_version.py
index 6971f9b..9329938 100644
--- a/src/dedalus_labs/_version.py
+++ b/src/dedalus_labs/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "dedalus_labs"
-__version__ = "0.2.0" # x-release-please-version
+__version__ = "0.3.0" # x-release-please-version