From f0793c171465dd57d0fbf82a3bb2281d046f500e Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 17 Mar 2026 08:07:51 +0000
Subject: [PATCH 01/32] fix(pydantic): do not pass `by_alias` unless set
---
src/parallel/_compat.py | 11 +++++++++--
1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/src/parallel/_compat.py b/src/parallel/_compat.py
index 020ffeb..340c91a 100644
--- a/src/parallel/_compat.py
+++ b/src/parallel/_compat.py
@@ -2,7 +2,7 @@
from typing import TYPE_CHECKING, Any, Union, Generic, TypeVar, Callable, cast, overload
from datetime import date, datetime
-from typing_extensions import Self, Literal
+from typing_extensions import Self, Literal, TypedDict
import pydantic
from pydantic.fields import FieldInfo
@@ -131,6 +131,10 @@ def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str:
return model.model_dump_json(indent=indent)
+class _ModelDumpKwargs(TypedDict, total=False):
+ by_alias: bool
+
+
def model_dump(
model: pydantic.BaseModel,
*,
@@ -142,6 +146,9 @@ def model_dump(
by_alias: bool | None = None,
) -> dict[str, Any]:
if (not PYDANTIC_V1) or hasattr(model, "model_dump"):
+ kwargs: _ModelDumpKwargs = {}
+ if by_alias is not None:
+ kwargs["by_alias"] = by_alias
return model.model_dump(
mode=mode,
exclude=exclude,
@@ -149,7 +156,7 @@ def model_dump(
exclude_defaults=exclude_defaults,
# warnings are not supported in Pydantic v1
warnings=True if PYDANTIC_V1 else warnings,
- by_alias=by_alias,
+ **kwargs,
)
return cast(
"dict[str, Any]",
From 964a46ddfc9ead64e4105e42192a780bc91716b0 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 17 Mar 2026 08:13:07 +0000
Subject: [PATCH 02/32] fix(deps): bump minimum typing-extensions version
---
pyproject.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pyproject.toml b/pyproject.toml
index 12646c4..b50e064 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -11,7 +11,7 @@ authors = [
dependencies = [
"httpx>=0.23.0, <1",
"pydantic>=1.9.0, <3",
- "typing-extensions>=4.10, <5",
+ "typing-extensions>=4.14, <5",
"anyio>=3.5.0, <5",
"distro>=1.7.0, <2",
"sniffio",
From 014c80287318df0db6207df1579be00c4717f24d Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 17 Mar 2026 08:17:05 +0000
Subject: [PATCH 03/32] chore(internal): tweak CI branches
---
.github/workflows/ci.yml | 14 ++++++++------
1 file changed, 8 insertions(+), 6 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 3c4a087..35d407b 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -1,12 +1,14 @@
name: CI
on:
push:
- branches-ignore:
- - 'generated'
- - 'codegen/**'
- - 'integrated/**'
- - 'stl-preview-head/**'
- - 'stl-preview-base/**'
+ branches:
+ - '**'
+ - '!integrated/**'
+ - '!stl-preview-head/**'
+ - '!stl-preview-base/**'
+ - '!generated'
+ - '!codegen/**'
+ - 'codegen/stl/**'
pull_request:
branches-ignore:
- 'stl-preview-head/**'
From 59315972d27246485be5cb52671aecaa3aa46253 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 20 Mar 2026 03:03:18 +0000
Subject: [PATCH 04/32] fix: sanitize endpoint path params
---
src/parallel/_utils/__init__.py | 1 +
src/parallel/_utils/_path.py | 127 ++++++++++++++++++++++
src/parallel/resources/beta/findall.py | 30 ++---
src/parallel/resources/beta/task_group.py | 18 +--
src/parallel/resources/beta/task_run.py | 10 +-
src/parallel/resources/task_run.py | 10 +-
tests/test_utils/test_path.py | 89 +++++++++++++++
7 files changed, 251 insertions(+), 34 deletions(-)
create mode 100644 src/parallel/_utils/_path.py
create mode 100644 tests/test_utils/test_path.py
diff --git a/src/parallel/_utils/__init__.py b/src/parallel/_utils/__init__.py
index b70d7b2..f1aef8a 100644
--- a/src/parallel/_utils/__init__.py
+++ b/src/parallel/_utils/__init__.py
@@ -1,3 +1,4 @@
+from ._path import path_template as path_template
from ._sync import asyncify as asyncify
from ._proxy import LazyProxy as LazyProxy
from ._utils import (
diff --git a/src/parallel/_utils/_path.py b/src/parallel/_utils/_path.py
new file mode 100644
index 0000000..4d6e1e4
--- /dev/null
+++ b/src/parallel/_utils/_path.py
@@ -0,0 +1,127 @@
+from __future__ import annotations
+
+import re
+from typing import (
+ Any,
+ Mapping,
+ Callable,
+)
+from urllib.parse import quote
+
+# Matches '.' or '..' where each dot is either literal or percent-encoded (%2e / %2E).
+_DOT_SEGMENT_RE = re.compile(r"^(?:\.|%2[eE]){1,2}$")
+
+_PLACEHOLDER_RE = re.compile(r"\{(\w+)\}")
+
+
+def _quote_path_segment_part(value: str) -> str:
+ """Percent-encode `value` for use in a URI path segment.
+
+ Considers characters not in `pchar` set from RFC 3986 §3.3 to be unsafe.
+ https://datatracker.ietf.org/doc/html/rfc3986#section-3.3
+ """
+ # quote() already treats unreserved characters (letters, digits, and -._~)
+ # as safe, so we only need to add sub-delims, ':', and '@'.
+ # Notably, unlike the default `safe` for quote(), / is unsafe and must be quoted.
+ return quote(value, safe="!$&'()*+,;=:@")
+
+
+def _quote_query_part(value: str) -> str:
+ """Percent-encode `value` for use in a URI query string.
+
+ Considers &, = and characters not in `query` set from RFC 3986 §3.4 to be unsafe.
+ https://datatracker.ietf.org/doc/html/rfc3986#section-3.4
+ """
+ return quote(value, safe="!$'()*+,;:@/?")
+
+
+def _quote_fragment_part(value: str) -> str:
+ """Percent-encode `value` for use in a URI fragment.
+
+ Considers characters not in `fragment` set from RFC 3986 §3.5 to be unsafe.
+ https://datatracker.ietf.org/doc/html/rfc3986#section-3.5
+ """
+ return quote(value, safe="!$&'()*+,;=:@/?")
+
+
+def _interpolate(
+ template: str,
+ values: Mapping[str, Any],
+ quoter: Callable[[str], str],
+) -> str:
+ """Replace {name} placeholders in `template`, quoting each value with `quoter`.
+
+ Placeholder names are looked up in `values`.
+
+ Raises:
+ KeyError: If a placeholder is not found in `values`.
+ """
+ # re.split with a capturing group returns alternating
+ # [text, name, text, name, ..., text] elements.
+ parts = _PLACEHOLDER_RE.split(template)
+
+ for i in range(1, len(parts), 2):
+ name = parts[i]
+ if name not in values:
+ raise KeyError(f"a value for placeholder {{{name}}} was not provided")
+ val = values[name]
+ if val is None:
+ parts[i] = "null"
+ elif isinstance(val, bool):
+ parts[i] = "true" if val else "false"
+ else:
+ parts[i] = quoter(str(values[name]))
+
+ return "".join(parts)
+
+
+def path_template(template: str, /, **kwargs: Any) -> str:
+ """Interpolate {name} placeholders in `template` from keyword arguments.
+
+ Args:
+ template: The template string containing {name} placeholders.
+ **kwargs: Keyword arguments to interpolate into the template.
+
+ Returns:
+ The template with placeholders interpolated and percent-encoded.
+
+ Safe characters for percent-encoding are dependent on the URI component.
+ Placeholders in path and fragment portions are percent-encoded where the `segment`
+ and `fragment` sets from RFC 3986 respectively are considered safe.
+ Placeholders in the query portion are percent-encoded where the `query` set from
+ RFC 3986 §3.3 is considered safe except for = and & characters.
+
+ Raises:
+ KeyError: If a placeholder is not found in `kwargs`.
+ ValueError: If resulting path contains /./ or /../ segments (including percent-encoded dot-segments).
+ """
+ # Split the template into path, query, and fragment portions.
+ fragment_template: str | None = None
+ query_template: str | None = None
+
+ rest = template
+ if "#" in rest:
+ rest, fragment_template = rest.split("#", 1)
+ if "?" in rest:
+ rest, query_template = rest.split("?", 1)
+ path_template = rest
+
+ # Interpolate each portion with the appropriate quoting rules.
+ path_result = _interpolate(path_template, kwargs, _quote_path_segment_part)
+
+ # Reject dot-segments (. and ..) in the final assembled path. The check
+ # runs after interpolation so that adjacent placeholders or a mix of static
+ # text and placeholders that together form a dot-segment are caught.
+ # Also reject percent-encoded dot-segments to protect against incorrectly
+ # implemented normalization in servers/proxies.
+ for segment in path_result.split("/"):
+ if _DOT_SEGMENT_RE.match(segment):
+ raise ValueError(f"Constructed path {path_result!r} contains dot-segment {segment!r} which is not allowed")
+
+ result = path_result
+ if query_template is not None:
+ result += "?" + _interpolate(query_template, kwargs, _quote_query_part)
+ if fragment_template is not None:
+ result += "#" + _interpolate(fragment_template, kwargs, _quote_fragment_part)
+
+ return result
diff --git a/src/parallel/resources/beta/findall.py b/src/parallel/resources/beta/findall.py
index ef5ab71..acc3d98 100644
--- a/src/parallel/resources/beta/findall.py
+++ b/src/parallel/resources/beta/findall.py
@@ -9,7 +9,7 @@
import httpx
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ..._utils import is_given, maybe_transform, strip_not_given, async_maybe_transform
+from ..._utils import is_given, path_template, maybe_transform, strip_not_given, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -209,7 +209,7 @@ def retrieve(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return self._get(
- f"/v1beta/findall/runs/{findall_id}",
+ path_template("/v1beta/findall/runs/{findall_id}", findall_id=findall_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -256,7 +256,7 @@ def cancel(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return self._post(
- f"/v1beta/findall/runs/{findall_id}/cancel",
+ path_template("/v1beta/findall/runs/{findall_id}/cancel", findall_id=findall_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -312,7 +312,7 @@ def enrich(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return self._post(
- f"/v1beta/findall/runs/{findall_id}/enrich",
+ path_template("/v1beta/findall/runs/{findall_id}/enrich", findall_id=findall_id),
body=maybe_transform(
{
"output_schema": output_schema,
@@ -375,7 +375,7 @@ def events(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return self._get(
- f"/v1beta/findall/runs/{findall_id}/events",
+ path_template("/v1beta/findall/runs/{findall_id}/events", findall_id=findall_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -439,7 +439,7 @@ def extend(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return self._post(
- f"/v1beta/findall/runs/{findall_id}/extend",
+ path_template("/v1beta/findall/runs/{findall_id}/extend", findall_id=findall_id),
body=maybe_transform(
{"additional_match_limit": additional_match_limit}, findall_extend_params.FindAllExtendParams
),
@@ -542,7 +542,7 @@ def result(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return self._get(
- f"/v1beta/findall/runs/{findall_id}/result",
+ path_template("/v1beta/findall/runs/{findall_id}/result", findall_id=findall_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -589,7 +589,7 @@ def schema(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return self._get(
- f"/v1beta/findall/runs/{findall_id}/schema",
+ path_template("/v1beta/findall/runs/{findall_id}/schema", findall_id=findall_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -754,7 +754,7 @@ async def retrieve(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return await self._get(
- f"/v1beta/findall/runs/{findall_id}",
+ path_template("/v1beta/findall/runs/{findall_id}", findall_id=findall_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -801,7 +801,7 @@ async def cancel(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return await self._post(
- f"/v1beta/findall/runs/{findall_id}/cancel",
+ path_template("/v1beta/findall/runs/{findall_id}/cancel", findall_id=findall_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -857,7 +857,7 @@ async def enrich(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return await self._post(
- f"/v1beta/findall/runs/{findall_id}/enrich",
+ path_template("/v1beta/findall/runs/{findall_id}/enrich", findall_id=findall_id),
body=await async_maybe_transform(
{
"output_schema": output_schema,
@@ -920,7 +920,7 @@ async def events(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return await self._get(
- f"/v1beta/findall/runs/{findall_id}/events",
+ path_template("/v1beta/findall/runs/{findall_id}/events", findall_id=findall_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -984,7 +984,7 @@ async def extend(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return await self._post(
- f"/v1beta/findall/runs/{findall_id}/extend",
+ path_template("/v1beta/findall/runs/{findall_id}/extend", findall_id=findall_id),
body=await async_maybe_transform(
{"additional_match_limit": additional_match_limit}, findall_extend_params.FindAllExtendParams
),
@@ -1087,7 +1087,7 @@ async def result(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return await self._get(
- f"/v1beta/findall/runs/{findall_id}/result",
+ path_template("/v1beta/findall/runs/{findall_id}/result", findall_id=findall_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -1134,7 +1134,7 @@ async def schema(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return await self._get(
- f"/v1beta/findall/runs/{findall_id}/schema",
+ path_template("/v1beta/findall/runs/{findall_id}/schema", findall_id=findall_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/parallel/resources/beta/task_group.py b/src/parallel/resources/beta/task_group.py
index b15eab7..54f8cb1 100644
--- a/src/parallel/resources/beta/task_group.py
+++ b/src/parallel/resources/beta/task_group.py
@@ -9,7 +9,7 @@
import httpx
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ..._utils import is_given, maybe_transform, strip_not_given, async_maybe_transform
+from ..._utils import is_given, path_template, maybe_transform, strip_not_given, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -129,7 +129,7 @@ def retrieve(
raise ValueError(f"Expected a non-empty value for `task_group_id` but received {task_group_id!r}")
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return self._get(
- f"/v1beta/tasks/groups/{task_group_id}",
+ path_template("/v1beta/tasks/groups/{task_group_id}", task_group_id=task_group_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -188,7 +188,7 @@ def add_runs(
}
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return self._post(
- f"/v1beta/tasks/groups/{task_group_id}/runs",
+ path_template("/v1beta/tasks/groups/{task_group_id}/runs", task_group_id=task_group_id),
body=maybe_transform(
{
"inputs": inputs,
@@ -235,7 +235,7 @@ def events(
extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})}
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return self._get(
- f"/v1beta/tasks/groups/{task_group_id}/events",
+ path_template("/v1beta/tasks/groups/{task_group_id}/events", task_group_id=task_group_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -300,7 +300,7 @@ def get_runs(
extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})}
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return self._get(
- f"/v1beta/tasks/groups/{task_group_id}/runs",
+ path_template("/v1beta/tasks/groups/{task_group_id}/runs", task_group_id=task_group_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -416,7 +416,7 @@ async def retrieve(
raise ValueError(f"Expected a non-empty value for `task_group_id` but received {task_group_id!r}")
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return await self._get(
- f"/v1beta/tasks/groups/{task_group_id}",
+ path_template("/v1beta/tasks/groups/{task_group_id}", task_group_id=task_group_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -475,7 +475,7 @@ async def add_runs(
}
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return await self._post(
- f"/v1beta/tasks/groups/{task_group_id}/runs",
+ path_template("/v1beta/tasks/groups/{task_group_id}/runs", task_group_id=task_group_id),
body=await async_maybe_transform(
{
"inputs": inputs,
@@ -522,7 +522,7 @@ async def events(
extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})}
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return await self._get(
- f"/v1beta/tasks/groups/{task_group_id}/events",
+ path_template("/v1beta/tasks/groups/{task_group_id}/events", task_group_id=task_group_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -587,7 +587,7 @@ async def get_runs(
extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})}
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return await self._get(
- f"/v1beta/tasks/groups/{task_group_id}/runs",
+ path_template("/v1beta/tasks/groups/{task_group_id}/runs", task_group_id=task_group_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
diff --git a/src/parallel/resources/beta/task_run.py b/src/parallel/resources/beta/task_run.py
index 3fb567c..b19286a 100644
--- a/src/parallel/resources/beta/task_run.py
+++ b/src/parallel/resources/beta/task_run.py
@@ -8,7 +8,7 @@
import httpx
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ..._utils import is_given, maybe_transform, strip_not_given, async_maybe_transform
+from ..._utils import is_given, path_template, maybe_transform, strip_not_given, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -198,7 +198,7 @@ def events(
extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})}
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return self._get(
- f"/v1beta/tasks/runs/{run_id}/events",
+ path_template("/v1beta/tasks/runs/{run_id}/events", run_id=run_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -248,7 +248,7 @@ def result(
}
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return self._get(
- f"/v1/tasks/runs/{run_id}/result?beta=true",
+ path_template("/v1/tasks/runs/{run_id}/result?beta=true", run_id=run_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -426,7 +426,7 @@ async def events(
extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})}
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return await self._get(
- f"/v1beta/tasks/runs/{run_id}/events",
+ path_template("/v1beta/tasks/runs/{run_id}/events", run_id=run_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -476,7 +476,7 @@ async def result(
}
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return await self._get(
- f"/v1/tasks/runs/{run_id}/result?beta=true",
+ path_template("/v1/tasks/runs/{run_id}/result?beta=true", run_id=run_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
diff --git a/src/parallel/resources/task_run.py b/src/parallel/resources/task_run.py
index eb0df13..af330cd 100644
--- a/src/parallel/resources/task_run.py
+++ b/src/parallel/resources/task_run.py
@@ -11,7 +11,7 @@
from ..types import task_run_create_params, task_run_result_params
from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from .._utils import maybe_transform, async_maybe_transform
+from .._utils import path_template, maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
@@ -161,7 +161,7 @@ def retrieve(
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return self._get(
- f"/v1/tasks/runs/{run_id}",
+ path_template("/v1/tasks/runs/{run_id}", run_id=run_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -195,7 +195,7 @@ def result(
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return self._get(
- f"/v1/tasks/runs/{run_id}/result",
+ path_template("/v1/tasks/runs/{run_id}/result", run_id=run_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -460,7 +460,7 @@ async def retrieve(
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return await self._get(
- f"/v1/tasks/runs/{run_id}",
+ path_template("/v1/tasks/runs/{run_id}", run_id=run_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -494,7 +494,7 @@ async def result(
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return await self._get(
- f"/v1/tasks/runs/{run_id}/result",
+ path_template("/v1/tasks/runs/{run_id}/result", run_id=run_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
diff --git a/tests/test_utils/test_path.py b/tests/test_utils/test_path.py
new file mode 100644
index 0000000..c0364ff
--- /dev/null
+++ b/tests/test_utils/test_path.py
@@ -0,0 +1,89 @@
+from __future__ import annotations
+
+from typing import Any
+
+import pytest
+
+from parallel._utils._path import path_template
+
+
+@pytest.mark.parametrize(
+ "template, kwargs, expected",
+ [
+ ("/v1/{id}", dict(id="abc"), "/v1/abc"),
+ ("/v1/{a}/{b}", dict(a="x", b="y"), "/v1/x/y"),
+ ("/v1/{a}{b}/path/{c}?val={d}#{e}", dict(a="x", b="y", c="z", d="u", e="v"), "/v1/xy/path/z?val=u#v"),
+ ("/{w}/{w}", dict(w="echo"), "/echo/echo"),
+ ("/v1/static", {}, "/v1/static"),
+ ("", {}, ""),
+ ("/v1/?q={n}&count=10", dict(n=42), "/v1/?q=42&count=10"),
+ ("/v1/{v}", dict(v=None), "/v1/null"),
+ ("/v1/{v}", dict(v=True), "/v1/true"),
+ ("/v1/{v}", dict(v=False), "/v1/false"),
+ ("/v1/{v}", dict(v=".hidden"), "/v1/.hidden"), # dot prefix ok
+ ("/v1/{v}", dict(v="file.txt"), "/v1/file.txt"), # dot in middle ok
+ ("/v1/{v}", dict(v="..."), "/v1/..."), # triple dot ok
+ ("/v1/{a}{b}", dict(a=".", b="txt"), "/v1/.txt"), # dot var combining with adjacent to be ok
+ ("/items?q={v}#{f}", dict(v=".", f=".."), "/items?q=.#.."), # dots in query/fragment are fine
+ (
+ "/v1/{a}?query={b}",
+ dict(a="../../other/endpoint", b="a&bad=true"),
+ "/v1/..%2F..%2Fother%2Fendpoint?query=a%26bad%3Dtrue",
+ ),
+ ("/v1/{val}", dict(val="a/b/c"), "/v1/a%2Fb%2Fc"),
+ ("/v1/{val}", dict(val="a/b/c?query=value"), "/v1/a%2Fb%2Fc%3Fquery=value"),
+ ("/v1/{val}", dict(val="a/b/c?query=value&bad=true"), "/v1/a%2Fb%2Fc%3Fquery=value&bad=true"),
+ ("/v1/{val}", dict(val="%20"), "/v1/%2520"), # escapes escape sequences in input
+ # Query: slash and ? are safe, # is not
+ ("/items?q={v}", dict(v="a/b"), "/items?q=a/b"),
+ ("/items?q={v}", dict(v="a?b"), "/items?q=a?b"),
+ ("/items?q={v}", dict(v="a#b"), "/items?q=a%23b"),
+ ("/items?q={v}", dict(v="a b"), "/items?q=a%20b"),
+ # Fragment: slash and ? are safe
+ ("/docs#{v}", dict(v="a/b"), "/docs#a/b"),
+ ("/docs#{v}", dict(v="a?b"), "/docs#a?b"),
+ # Path: slash, ? and # are all encoded
+ ("/v1/{v}", dict(v="a/b"), "/v1/a%2Fb"),
+ ("/v1/{v}", dict(v="a?b"), "/v1/a%3Fb"),
+ ("/v1/{v}", dict(v="a#b"), "/v1/a%23b"),
+ # same var encoded differently by component
+ (
+ "/v1/{v}?q={v}#{v}",
+ dict(v="a/b?c#d"),
+ "/v1/a%2Fb%3Fc%23d?q=a/b?c%23d#a/b?c%23d",
+ ),
+ ("/v1/{val}", dict(val="x?admin=true"), "/v1/x%3Fadmin=true"), # query injection
+ ("/v1/{val}", dict(val="x#admin"), "/v1/x%23admin"), # fragment injection
+ ],
+)
+def test_interpolation(template: str, kwargs: dict[str, Any], expected: str) -> None:
+ assert path_template(template, **kwargs) == expected
+
+
+def test_missing_kwarg_raises_key_error() -> None:
+ with pytest.raises(KeyError, match="org_id"):
+ path_template("/v1/{org_id}")
+
+
+@pytest.mark.parametrize(
+ "template, kwargs",
+ [
+ ("{a}/path", dict(a=".")),
+ ("{a}/path", dict(a="..")),
+ ("/v1/{a}", dict(a=".")),
+ ("/v1/{a}", dict(a="..")),
+ ("/v1/{a}/path", dict(a=".")),
+ ("/v1/{a}/path", dict(a="..")),
+ ("/v1/{a}{b}", dict(a=".", b=".")), # adjacent vars → ".."
+ ("/v1/{a}.", dict(a=".")), # var + static → ".."
+ ("/v1/{a}{b}", dict(a="", b=".")), # empty + dot → "."
+ ("/v1/%2e/{x}", dict(x="ok")), # encoded dot in static text
+ ("/v1/%2e./{x}", dict(x="ok")), # mixed encoded ".." in static
+ ("/v1/.%2E/{x}", dict(x="ok")), # mixed encoded ".." in static
+ ("/v1/{v}?q=1", dict(v="..")),
+ ("/v1/{v}#frag", dict(v="..")),
+ ],
+)
+def test_dot_segment_rejected(template: str, kwargs: dict[str, Any]) -> None:
+ with pytest.raises(ValueError, match="dot-segment"):
+ path_template(template, **kwargs)
From 032745ea1a03b3d2516b789a28a3c8b8034660d8 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 20 Mar 2026 03:04:29 +0000
Subject: [PATCH 05/32] refactor(tests): switch from prism to steady
---
CONTRIBUTING.md | 2 +-
scripts/mock | 26 ++++++++++-----------
scripts/test | 16 ++++++-------
tests/api_resources/beta/test_findall.py | 10 --------
tests/api_resources/beta/test_task_group.py | 20 ----------------
tests/api_resources/beta/test_task_run.py | 8 -------
6 files changed, 22 insertions(+), 60 deletions(-)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 3276e79..1ecb266 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -85,7 +85,7 @@ $ pip install ./path-to-wheel-file.whl
## Running tests
-Most tests require you to [set up a mock server](https://github.com/stoplightio/prism) against the OpenAPI spec to run the tests.
+Most tests require you to [set up a mock server](https://github.com/dgellow/steady) against the OpenAPI spec to run the tests.
```sh
$ ./scripts/mock
diff --git a/scripts/mock b/scripts/mock
index bcf3b39..38201de 100755
--- a/scripts/mock
+++ b/scripts/mock
@@ -19,34 +19,34 @@ fi
echo "==> Starting mock server with URL ${URL}"
-# Run prism mock on the given spec
+# Run steady mock on the given spec
if [ "$1" == "--daemon" ]; then
# Pre-install the package so the download doesn't eat into the startup timeout
- npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism --version
+ npm exec --package=@stdy/cli@0.19.3 -- steady --version
- npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL" &> .prism.log &
+ npm exec --package=@stdy/cli@0.19.3 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-query-object-format=brackets "$URL" &> .stdy.log &
- # Wait for server to come online (max 30s)
+ # Wait for server to come online via health endpoint (max 30s)
echo -n "Waiting for server"
attempts=0
- while ! grep -q "✖ fatal\|Prism is listening" ".prism.log" ; do
+ while ! curl --silent --fail "http://127.0.0.1:4010/_x-steady/health" >/dev/null 2>&1; do
+ if ! kill -0 $! 2>/dev/null; then
+ echo
+ cat .stdy.log
+ exit 1
+ fi
attempts=$((attempts + 1))
if [ "$attempts" -ge 300 ]; then
echo
- echo "Timed out waiting for Prism server to start"
- cat .prism.log
+ echo "Timed out waiting for Steady server to start"
+ cat .stdy.log
exit 1
fi
echo -n "."
sleep 0.1
done
- if grep -q "✖ fatal" ".prism.log"; then
- cat .prism.log
- exit 1
- fi
-
echo
else
- npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL"
+ npm exec --package=@stdy/cli@0.19.3 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-query-object-format=brackets "$URL"
fi
diff --git a/scripts/test b/scripts/test
index dbeda2d..2dfdc40 100755
--- a/scripts/test
+++ b/scripts/test
@@ -9,8 +9,8 @@ GREEN='\033[0;32m'
YELLOW='\033[0;33m'
NC='\033[0m' # No Color
-function prism_is_running() {
- curl --silent "http://localhost:4010" >/dev/null 2>&1
+function steady_is_running() {
+ curl --silent "http://127.0.0.1:4010/_x-steady/health" >/dev/null 2>&1
}
kill_server_on_port() {
@@ -25,7 +25,7 @@ function is_overriding_api_base_url() {
[ -n "$TEST_API_BASE_URL" ]
}
-if ! is_overriding_api_base_url && ! prism_is_running ; then
+if ! is_overriding_api_base_url && ! steady_is_running ; then
# When we exit this script, make sure to kill the background mock server process
trap 'kill_server_on_port 4010' EXIT
@@ -36,19 +36,19 @@ fi
if is_overriding_api_base_url ; then
echo -e "${GREEN}✔ Running tests against ${TEST_API_BASE_URL}${NC}"
echo
-elif ! prism_is_running ; then
- echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Prism server"
+elif ! steady_is_running ; then
+ echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Steady server"
echo -e "running against your OpenAPI spec."
echo
echo -e "To run the server, pass in the path or url of your OpenAPI"
- echo -e "spec to the prism command:"
+ echo -e "spec to the steady command:"
echo
- echo -e " \$ ${YELLOW}npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock path/to/your.openapi.yml${NC}"
+ echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.19.3 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-query-object-format=brackets${NC}"
echo
exit 1
else
- echo -e "${GREEN}✔ Mock prism server is running with your OpenAPI spec${NC}"
+ echo -e "${GREEN}✔ Mock steady server is running with your OpenAPI spec${NC}"
echo
fi
diff --git a/tests/api_resources/beta/test_findall.py b/tests/api_resources/beta/test_findall.py
index 6ee829d..18996a9 100644
--- a/tests/api_resources/beta/test_findall.py
+++ b/tests/api_resources/beta/test_findall.py
@@ -296,7 +296,6 @@ def test_path_params_enrich(self, client: Parallel) -> None:
},
)
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_method_events(self, client: Parallel) -> None:
findall_stream = client.beta.findall.events(
@@ -304,7 +303,6 @@ def test_method_events(self, client: Parallel) -> None:
)
findall_stream.response.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_method_events_with_all_params(self, client: Parallel) -> None:
findall_stream = client.beta.findall.events(
@@ -315,7 +313,6 @@ def test_method_events_with_all_params(self, client: Parallel) -> None:
)
findall_stream.response.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_raw_response_events(self, client: Parallel) -> None:
response = client.beta.findall.with_raw_response.events(
@@ -326,7 +323,6 @@ def test_raw_response_events(self, client: Parallel) -> None:
stream = response.parse()
stream.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_streaming_response_events(self, client: Parallel) -> None:
with client.beta.findall.with_streaming_response.events(
@@ -340,7 +336,6 @@ def test_streaming_response_events(self, client: Parallel) -> None:
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_path_params_events(self, client: Parallel) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `findall_id` but received ''"):
@@ -811,7 +806,6 @@ async def test_path_params_enrich(self, async_client: AsyncParallel) -> None:
},
)
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_method_events(self, async_client: AsyncParallel) -> None:
findall_stream = await async_client.beta.findall.events(
@@ -819,7 +813,6 @@ async def test_method_events(self, async_client: AsyncParallel) -> None:
)
await findall_stream.response.aclose()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_method_events_with_all_params(self, async_client: AsyncParallel) -> None:
findall_stream = await async_client.beta.findall.events(
@@ -830,7 +823,6 @@ async def test_method_events_with_all_params(self, async_client: AsyncParallel)
)
await findall_stream.response.aclose()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_raw_response_events(self, async_client: AsyncParallel) -> None:
response = await async_client.beta.findall.with_raw_response.events(
@@ -841,7 +833,6 @@ async def test_raw_response_events(self, async_client: AsyncParallel) -> None:
stream = await response.parse()
await stream.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_streaming_response_events(self, async_client: AsyncParallel) -> None:
async with async_client.beta.findall.with_streaming_response.events(
@@ -855,7 +846,6 @@ async def test_streaming_response_events(self, async_client: AsyncParallel) -> N
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_path_params_events(self, async_client: AsyncParallel) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `findall_id` but received ''"):
diff --git a/tests/api_resources/beta/test_task_group.py b/tests/api_resources/beta/test_task_group.py
index cc200ce..f186596 100644
--- a/tests/api_resources/beta/test_task_group.py
+++ b/tests/api_resources/beta/test_task_group.py
@@ -212,7 +212,6 @@ def test_path_params_add_runs(self, client: Parallel) -> None:
],
)
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_method_events(self, client: Parallel) -> None:
task_group_stream = client.beta.task_group.events(
@@ -220,7 +219,6 @@ def test_method_events(self, client: Parallel) -> None:
)
task_group_stream.response.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_method_events_with_all_params(self, client: Parallel) -> None:
task_group_stream = client.beta.task_group.events(
@@ -230,7 +228,6 @@ def test_method_events_with_all_params(self, client: Parallel) -> None:
)
task_group_stream.response.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_raw_response_events(self, client: Parallel) -> None:
response = client.beta.task_group.with_raw_response.events(
@@ -241,7 +238,6 @@ def test_raw_response_events(self, client: Parallel) -> None:
stream = response.parse()
stream.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_streaming_response_events(self, client: Parallel) -> None:
with client.beta.task_group.with_streaming_response.events(
@@ -255,7 +251,6 @@ def test_streaming_response_events(self, client: Parallel) -> None:
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_path_params_events(self, client: Parallel) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `task_group_id` but received ''"):
@@ -263,7 +258,6 @@ def test_path_params_events(self, client: Parallel) -> None:
task_group_id="",
)
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_method_get_runs(self, client: Parallel) -> None:
task_group_stream = client.beta.task_group.get_runs(
@@ -271,7 +265,6 @@ def test_method_get_runs(self, client: Parallel) -> None:
)
task_group_stream.response.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_method_get_runs_with_all_params(self, client: Parallel) -> None:
task_group_stream = client.beta.task_group.get_runs(
@@ -283,7 +276,6 @@ def test_method_get_runs_with_all_params(self, client: Parallel) -> None:
)
task_group_stream.response.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_raw_response_get_runs(self, client: Parallel) -> None:
response = client.beta.task_group.with_raw_response.get_runs(
@@ -294,7 +286,6 @@ def test_raw_response_get_runs(self, client: Parallel) -> None:
stream = response.parse()
stream.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_streaming_response_get_runs(self, client: Parallel) -> None:
with client.beta.task_group.with_streaming_response.get_runs(
@@ -308,7 +299,6 @@ def test_streaming_response_get_runs(self, client: Parallel) -> None:
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_path_params_get_runs(self, client: Parallel) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `task_group_id` but received ''"):
@@ -513,7 +503,6 @@ async def test_path_params_add_runs(self, async_client: AsyncParallel) -> None:
],
)
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_method_events(self, async_client: AsyncParallel) -> None:
task_group_stream = await async_client.beta.task_group.events(
@@ -521,7 +510,6 @@ async def test_method_events(self, async_client: AsyncParallel) -> None:
)
await task_group_stream.response.aclose()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_method_events_with_all_params(self, async_client: AsyncParallel) -> None:
task_group_stream = await async_client.beta.task_group.events(
@@ -531,7 +519,6 @@ async def test_method_events_with_all_params(self, async_client: AsyncParallel)
)
await task_group_stream.response.aclose()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_raw_response_events(self, async_client: AsyncParallel) -> None:
response = await async_client.beta.task_group.with_raw_response.events(
@@ -542,7 +529,6 @@ async def test_raw_response_events(self, async_client: AsyncParallel) -> None:
stream = await response.parse()
await stream.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_streaming_response_events(self, async_client: AsyncParallel) -> None:
async with async_client.beta.task_group.with_streaming_response.events(
@@ -556,7 +542,6 @@ async def test_streaming_response_events(self, async_client: AsyncParallel) -> N
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_path_params_events(self, async_client: AsyncParallel) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `task_group_id` but received ''"):
@@ -564,7 +549,6 @@ async def test_path_params_events(self, async_client: AsyncParallel) -> None:
task_group_id="",
)
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_method_get_runs(self, async_client: AsyncParallel) -> None:
task_group_stream = await async_client.beta.task_group.get_runs(
@@ -572,7 +556,6 @@ async def test_method_get_runs(self, async_client: AsyncParallel) -> None:
)
await task_group_stream.response.aclose()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_method_get_runs_with_all_params(self, async_client: AsyncParallel) -> None:
task_group_stream = await async_client.beta.task_group.get_runs(
@@ -584,7 +567,6 @@ async def test_method_get_runs_with_all_params(self, async_client: AsyncParallel
)
await task_group_stream.response.aclose()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_raw_response_get_runs(self, async_client: AsyncParallel) -> None:
response = await async_client.beta.task_group.with_raw_response.get_runs(
@@ -595,7 +577,6 @@ async def test_raw_response_get_runs(self, async_client: AsyncParallel) -> None:
stream = await response.parse()
await stream.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_streaming_response_get_runs(self, async_client: AsyncParallel) -> None:
async with async_client.beta.task_group.with_streaming_response.get_runs(
@@ -609,7 +590,6 @@ async def test_streaming_response_get_runs(self, async_client: AsyncParallel) ->
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_path_params_get_runs(self, async_client: AsyncParallel) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `task_group_id` but received ''"):
diff --git a/tests/api_resources/beta/test_task_run.py b/tests/api_resources/beta/test_task_run.py
index 794846d..c28344a 100644
--- a/tests/api_resources/beta/test_task_run.py
+++ b/tests/api_resources/beta/test_task_run.py
@@ -95,7 +95,6 @@ def test_streaming_response_create(self, client: Parallel) -> None:
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_method_events(self, client: Parallel) -> None:
task_run_stream = client.beta.task_run.events(
@@ -103,7 +102,6 @@ def test_method_events(self, client: Parallel) -> None:
)
task_run_stream.response.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_raw_response_events(self, client: Parallel) -> None:
response = client.beta.task_run.with_raw_response.events(
@@ -114,7 +112,6 @@ def test_raw_response_events(self, client: Parallel) -> None:
stream = response.parse()
stream.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_streaming_response_events(self, client: Parallel) -> None:
with client.beta.task_run.with_streaming_response.events(
@@ -128,7 +125,6 @@ def test_streaming_response_events(self, client: Parallel) -> None:
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_path_params_events(self, client: Parallel) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
@@ -265,7 +261,6 @@ async def test_streaming_response_create(self, async_client: AsyncParallel) -> N
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_method_events(self, async_client: AsyncParallel) -> None:
task_run_stream = await async_client.beta.task_run.events(
@@ -273,7 +268,6 @@ async def test_method_events(self, async_client: AsyncParallel) -> None:
)
await task_run_stream.response.aclose()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_raw_response_events(self, async_client: AsyncParallel) -> None:
response = await async_client.beta.task_run.with_raw_response.events(
@@ -284,7 +278,6 @@ async def test_raw_response_events(self, async_client: AsyncParallel) -> None:
stream = await response.parse()
await stream.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_streaming_response_events(self, async_client: AsyncParallel) -> None:
async with async_client.beta.task_run.with_streaming_response.events(
@@ -298,7 +291,6 @@ async def test_streaming_response_events(self, async_client: AsyncParallel) -> N
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_path_params_events(self, async_client: AsyncParallel) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
From ebee2e761e2a8587cc6aa4c2decfd6310092b039 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Sat, 21 Mar 2026 03:42:12 +0000
Subject: [PATCH 06/32] chore(tests): bump steady to v0.19.4
---
scripts/mock | 6 +++---
scripts/test | 2 +-
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/scripts/mock b/scripts/mock
index 38201de..e1c19e8 100755
--- a/scripts/mock
+++ b/scripts/mock
@@ -22,9 +22,9 @@ echo "==> Starting mock server with URL ${URL}"
# Run steady mock on the given spec
if [ "$1" == "--daemon" ]; then
# Pre-install the package so the download doesn't eat into the startup timeout
- npm exec --package=@stdy/cli@0.19.3 -- steady --version
+ npm exec --package=@stdy/cli@0.19.4 -- steady --version
- npm exec --package=@stdy/cli@0.19.3 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-query-object-format=brackets "$URL" &> .stdy.log &
+ npm exec --package=@stdy/cli@0.19.4 -- steady --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets "$URL" &> .stdy.log &
# Wait for server to come online via health endpoint (max 30s)
echo -n "Waiting for server"
@@ -48,5 +48,5 @@ if [ "$1" == "--daemon" ]; then
echo
else
- npm exec --package=@stdy/cli@0.19.3 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-query-object-format=brackets "$URL"
+ npm exec --package=@stdy/cli@0.19.4 -- steady --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets "$URL"
fi
diff --git a/scripts/test b/scripts/test
index 2dfdc40..36fab0a 100755
--- a/scripts/test
+++ b/scripts/test
@@ -43,7 +43,7 @@ elif ! steady_is_running ; then
echo -e "To run the server, pass in the path or url of your OpenAPI"
echo -e "spec to the steady command:"
echo
- echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.19.3 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-query-object-format=brackets${NC}"
+ echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.19.4 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets${NC}"
echo
exit 1
From 2774099f753bc0826e9c6b6e9fbb40d4e72e3405 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Sat, 21 Mar 2026 03:46:27 +0000
Subject: [PATCH 07/32] chore(tests): bump steady to v0.19.5
---
scripts/mock | 6 +++---
scripts/test | 2 +-
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/scripts/mock b/scripts/mock
index e1c19e8..ab814d3 100755
--- a/scripts/mock
+++ b/scripts/mock
@@ -22,9 +22,9 @@ echo "==> Starting mock server with URL ${URL}"
# Run steady mock on the given spec
if [ "$1" == "--daemon" ]; then
# Pre-install the package so the download doesn't eat into the startup timeout
- npm exec --package=@stdy/cli@0.19.4 -- steady --version
+ npm exec --package=@stdy/cli@0.19.5 -- steady --version
- npm exec --package=@stdy/cli@0.19.4 -- steady --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets "$URL" &> .stdy.log &
+ npm exec --package=@stdy/cli@0.19.5 -- steady --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets "$URL" &> .stdy.log &
# Wait for server to come online via health endpoint (max 30s)
echo -n "Waiting for server"
@@ -48,5 +48,5 @@ if [ "$1" == "--daemon" ]; then
echo
else
- npm exec --package=@stdy/cli@0.19.4 -- steady --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets "$URL"
+ npm exec --package=@stdy/cli@0.19.5 -- steady --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets "$URL"
fi
diff --git a/scripts/test b/scripts/test
index 36fab0a..d1c8e1a 100755
--- a/scripts/test
+++ b/scripts/test
@@ -43,7 +43,7 @@ elif ! steady_is_running ; then
echo -e "To run the server, pass in the path or url of your OpenAPI"
echo -e "spec to the steady command:"
echo
- echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.19.4 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets${NC}"
+ echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.19.5 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets${NC}"
echo
exit 1
From 1f4f6b0e5d2a46e1a5457879e937ea5aa551073c Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 24 Mar 2026 03:44:19 +0000
Subject: [PATCH 08/32] chore(internal): update gitignore
---
.gitignore | 1 +
1 file changed, 1 insertion(+)
diff --git a/.gitignore b/.gitignore
index 95ceb18..3824f4c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,5 @@
.prism.log
+.stdy.log
_dev
__pycache__
From 8e3ee3d04dc149b2bcedb0e4acd92474fafd8d05 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 24 Mar 2026 03:48:49 +0000
Subject: [PATCH 09/32] chore(tests): bump steady to v0.19.6
---
scripts/mock | 6 +++---
scripts/test | 2 +-
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/scripts/mock b/scripts/mock
index ab814d3..b319bdf 100755
--- a/scripts/mock
+++ b/scripts/mock
@@ -22,9 +22,9 @@ echo "==> Starting mock server with URL ${URL}"
# Run steady mock on the given spec
if [ "$1" == "--daemon" ]; then
# Pre-install the package so the download doesn't eat into the startup timeout
- npm exec --package=@stdy/cli@0.19.5 -- steady --version
+ npm exec --package=@stdy/cli@0.19.6 -- steady --version
- npm exec --package=@stdy/cli@0.19.5 -- steady --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets "$URL" &> .stdy.log &
+ npm exec --package=@stdy/cli@0.19.6 -- steady --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets "$URL" &> .stdy.log &
# Wait for server to come online via health endpoint (max 30s)
echo -n "Waiting for server"
@@ -48,5 +48,5 @@ if [ "$1" == "--daemon" ]; then
echo
else
- npm exec --package=@stdy/cli@0.19.5 -- steady --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets "$URL"
+ npm exec --package=@stdy/cli@0.19.6 -- steady --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets "$URL"
fi
diff --git a/scripts/test b/scripts/test
index d1c8e1a..ab01948 100755
--- a/scripts/test
+++ b/scripts/test
@@ -43,7 +43,7 @@ elif ! steady_is_running ; then
echo -e "To run the server, pass in the path or url of your OpenAPI"
echo -e "spec to the steady command:"
echo
- echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.19.5 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets${NC}"
+ echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.19.6 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets${NC}"
echo
exit 1
From 403448c7760e70fe0f4b3998a20f048910e91cd6 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 25 Mar 2026 02:48:28 +0000
Subject: [PATCH 10/32] chore(ci): skip lint on metadata-only changes
Note that we still want to run tests, as these depend on the metadata.
---
.github/workflows/ci.yml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 35d407b..99ec9fb 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -19,7 +19,7 @@ jobs:
timeout-minutes: 10
name: lint
runs-on: ${{ github.repository == 'stainless-sdks/parallel-sdk-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
- if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
+ if: (github.event_name == 'push' || github.event.pull_request.head.repo.fork) && (github.event_name != 'push' || github.event.head_commit.message != 'codegen metadata')
steps:
- uses: actions/checkout@v6
@@ -38,7 +38,7 @@ jobs:
run: ./scripts/lint
build:
- if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
+ if: (github.event_name == 'push' || github.event.pull_request.head.repo.fork) && (github.event_name != 'push' || github.event.head_commit.message != 'codegen metadata')
timeout-minutes: 10
name: build
permissions:
From 4bcf12e670b7997e23ade3d991711fe1ef741e35 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 25 Mar 2026 02:48:53 +0000
Subject: [PATCH 11/32] chore(tests): bump steady to v0.19.7
---
scripts/mock | 6 +++---
scripts/test | 2 +-
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/scripts/mock b/scripts/mock
index b319bdf..09eb49f 100755
--- a/scripts/mock
+++ b/scripts/mock
@@ -22,9 +22,9 @@ echo "==> Starting mock server with URL ${URL}"
# Run steady mock on the given spec
if [ "$1" == "--daemon" ]; then
# Pre-install the package so the download doesn't eat into the startup timeout
- npm exec --package=@stdy/cli@0.19.6 -- steady --version
+ npm exec --package=@stdy/cli@0.19.7 -- steady --version
- npm exec --package=@stdy/cli@0.19.6 -- steady --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets "$URL" &> .stdy.log &
+ npm exec --package=@stdy/cli@0.19.7 -- steady --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets "$URL" &> .stdy.log &
# Wait for server to come online via health endpoint (max 30s)
echo -n "Waiting for server"
@@ -48,5 +48,5 @@ if [ "$1" == "--daemon" ]; then
echo
else
- npm exec --package=@stdy/cli@0.19.6 -- steady --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets "$URL"
+ npm exec --package=@stdy/cli@0.19.7 -- steady --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets "$URL"
fi
diff --git a/scripts/test b/scripts/test
index ab01948..e46b9b5 100755
--- a/scripts/test
+++ b/scripts/test
@@ -43,7 +43,7 @@ elif ! steady_is_running ; then
echo -e "To run the server, pass in the path or url of your OpenAPI"
echo -e "spec to the steady command:"
echo
- echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.19.6 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets${NC}"
+ echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.19.7 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets${NC}"
echo
exit 1
From 3df5972e34c9aa1709eabc4eb5b8cbbc0adccae2 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 27 Mar 2026 04:45:43 +0000
Subject: [PATCH 12/32] feat(internal): implement indices array format for
query and form serialization
---
scripts/mock | 4 ++--
scripts/test | 2 +-
src/parallel/_qs.py | 5 ++++-
3 files changed, 7 insertions(+), 4 deletions(-)
diff --git a/scripts/mock b/scripts/mock
index 09eb49f..290e21b 100755
--- a/scripts/mock
+++ b/scripts/mock
@@ -24,7 +24,7 @@ if [ "$1" == "--daemon" ]; then
# Pre-install the package so the download doesn't eat into the startup timeout
npm exec --package=@stdy/cli@0.19.7 -- steady --version
- npm exec --package=@stdy/cli@0.19.7 -- steady --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets "$URL" &> .stdy.log &
+ npm exec --package=@stdy/cli@0.19.7 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL" &> .stdy.log &
# Wait for server to come online via health endpoint (max 30s)
echo -n "Waiting for server"
@@ -48,5 +48,5 @@ if [ "$1" == "--daemon" ]; then
echo
else
- npm exec --package=@stdy/cli@0.19.7 -- steady --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets "$URL"
+ npm exec --package=@stdy/cli@0.19.7 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL"
fi
diff --git a/scripts/test b/scripts/test
index e46b9b5..661f9bf 100755
--- a/scripts/test
+++ b/scripts/test
@@ -43,7 +43,7 @@ elif ! steady_is_running ; then
echo -e "To run the server, pass in the path or url of your OpenAPI"
echo -e "spec to the steady command:"
echo
- echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.19.7 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets${NC}"
+ echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.19.7 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets${NC}"
echo
exit 1
diff --git a/src/parallel/_qs.py b/src/parallel/_qs.py
index ada6fd3..de8c99b 100644
--- a/src/parallel/_qs.py
+++ b/src/parallel/_qs.py
@@ -101,7 +101,10 @@ def _stringify_item(
items.extend(self._stringify_item(key, item, opts))
return items
elif array_format == "indices":
- raise NotImplementedError("The array indices format is not supported yet")
+ items = []
+ for i, item in enumerate(value):
+ items.extend(self._stringify_item(f"{key}[{i}]", item, opts))
+ return items
elif array_format == "brackets":
items = []
key = key + "[]"
From d82ce601c5687553b0e96990e418289fd8a14e00 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 1 Apr 2026 06:11:40 +0000
Subject: [PATCH 13/32] chore(tests): bump steady to v0.20.1
---
scripts/mock | 6 +++---
scripts/test | 2 +-
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/scripts/mock b/scripts/mock
index 290e21b..15c2994 100755
--- a/scripts/mock
+++ b/scripts/mock
@@ -22,9 +22,9 @@ echo "==> Starting mock server with URL ${URL}"
# Run steady mock on the given spec
if [ "$1" == "--daemon" ]; then
# Pre-install the package so the download doesn't eat into the startup timeout
- npm exec --package=@stdy/cli@0.19.7 -- steady --version
+ npm exec --package=@stdy/cli@0.20.1 -- steady --version
- npm exec --package=@stdy/cli@0.19.7 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL" &> .stdy.log &
+ npm exec --package=@stdy/cli@0.20.1 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL" &> .stdy.log &
# Wait for server to come online via health endpoint (max 30s)
echo -n "Waiting for server"
@@ -48,5 +48,5 @@ if [ "$1" == "--daemon" ]; then
echo
else
- npm exec --package=@stdy/cli@0.19.7 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL"
+ npm exec --package=@stdy/cli@0.20.1 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL"
fi
diff --git a/scripts/test b/scripts/test
index 661f9bf..c8e2e9d 100755
--- a/scripts/test
+++ b/scripts/test
@@ -43,7 +43,7 @@ elif ! steady_is_running ; then
echo -e "To run the server, pass in the path or url of your OpenAPI"
echo -e "spec to the steady command:"
echo
- echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.19.7 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets${NC}"
+ echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.20.1 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets${NC}"
echo
exit 1
From 746ca39c749f899dc9137a2f9be5de9aa39210c6 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 1 Apr 2026 06:15:30 +0000
Subject: [PATCH 14/32] chore(tests): bump steady to v0.20.2
---
scripts/mock | 6 +++---
scripts/test | 2 +-
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/scripts/mock b/scripts/mock
index 15c2994..5cd7c15 100755
--- a/scripts/mock
+++ b/scripts/mock
@@ -22,9 +22,9 @@ echo "==> Starting mock server with URL ${URL}"
# Run steady mock on the given spec
if [ "$1" == "--daemon" ]; then
# Pre-install the package so the download doesn't eat into the startup timeout
- npm exec --package=@stdy/cli@0.20.1 -- steady --version
+ npm exec --package=@stdy/cli@0.20.2 -- steady --version
- npm exec --package=@stdy/cli@0.20.1 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL" &> .stdy.log &
+ npm exec --package=@stdy/cli@0.20.2 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL" &> .stdy.log &
# Wait for server to come online via health endpoint (max 30s)
echo -n "Waiting for server"
@@ -48,5 +48,5 @@ if [ "$1" == "--daemon" ]; then
echo
else
- npm exec --package=@stdy/cli@0.20.1 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL"
+ npm exec --package=@stdy/cli@0.20.2 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL"
fi
diff --git a/scripts/test b/scripts/test
index c8e2e9d..b8143aa 100755
--- a/scripts/test
+++ b/scripts/test
@@ -43,7 +43,7 @@ elif ! steady_is_running ; then
echo -e "To run the server, pass in the path or url of your OpenAPI"
echo -e "spec to the steady command:"
echo
- echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.20.1 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets${NC}"
+ echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.20.2 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets${NC}"
echo
exit 1
From fae95f4f2c7cb60ebc0babc4fe540617e3334b2d Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 8 Apr 2026 00:16:09 +0000
Subject: [PATCH 15/32] feat(api): Update OpenAPI spec
---
.stats.yml | 4 ++--
src/parallel/resources/beta/task_group.py | 18 ++++++++++++++++--
.../types/beta/task_group_add_runs_params.py | 2 ++
tests/api_resources/beta/test_task_group.py | 2 ++
4 files changed, 22 insertions(+), 4 deletions(-)
diff --git a/.stats.yml b/.stats.yml
index d30019d..ef170c0 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 22
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/parallel-web%2Fparallel-sdk-970b780e86490322cc3c7e2b57f140ca6766a3d9f6e0d3402837ebaf7c2183fc.yml
-openapi_spec_hash: 34f784ce2dec796048e6780924bae08f
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/parallel-web%2Fparallel-sdk-8870e80e4ae564fa22febb630c65e14d914b4f48fcf76a9953c99e860704764a.yml
+openapi_spec_hash: fddc70c809ae1d3101d4b805265abb5a
config_hash: a398d153133d8884bed4e5256a0ae818
diff --git a/src/parallel/resources/beta/task_group.py b/src/parallel/resources/beta/task_group.py
index 54f8cb1..c54e3da 100644
--- a/src/parallel/resources/beta/task_group.py
+++ b/src/parallel/resources/beta/task_group.py
@@ -141,6 +141,7 @@ def add_runs(
task_group_id: str,
*,
inputs: Iterable[BetaRunInputParam],
+ refresh_status: bool | Omit = omit,
default_task_spec: Optional[TaskSpecParam] | Omit = omit,
betas: List[ParallelBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -197,7 +198,13 @@ def add_runs(
task_group_add_runs_params.TaskGroupAddRunsParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {"refresh_status": refresh_status}, task_group_add_runs_params.TaskGroupAddRunsParams
+ ),
),
cast_to=TaskGroupRunResponse,
)
@@ -428,6 +435,7 @@ async def add_runs(
task_group_id: str,
*,
inputs: Iterable[BetaRunInputParam],
+ refresh_status: bool | Omit = omit,
default_task_spec: Optional[TaskSpecParam] | Omit = omit,
betas: List[ParallelBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -484,7 +492,13 @@ async def add_runs(
task_group_add_runs_params.TaskGroupAddRunsParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {"refresh_status": refresh_status}, task_group_add_runs_params.TaskGroupAddRunsParams
+ ),
),
cast_to=TaskGroupRunResponse,
)
diff --git a/src/parallel/types/beta/task_group_add_runs_params.py b/src/parallel/types/beta/task_group_add_runs_params.py
index 5732934..cee578d 100644
--- a/src/parallel/types/beta/task_group_add_runs_params.py
+++ b/src/parallel/types/beta/task_group_add_runs_params.py
@@ -21,6 +21,8 @@ class TaskGroupAddRunsParams(TypedDict, total=False):
split them across multiple TaskGroup POST requests.
"""
+ refresh_status: bool
+
default_task_spec: Optional[TaskSpecParam]
"""Specification for a task.
diff --git a/tests/api_resources/beta/test_task_group.py b/tests/api_resources/beta/test_task_group.py
index f186596..a9c4e42 100644
--- a/tests/api_resources/beta/test_task_group.py
+++ b/tests/api_resources/beta/test_task_group.py
@@ -147,6 +147,7 @@ def test_method_add_runs_with_all_params(self, client: Parallel) -> None:
},
}
],
+ refresh_status=True,
default_task_spec={
"output_schema": {
"json_schema": {
@@ -438,6 +439,7 @@ async def test_method_add_runs_with_all_params(self, async_client: AsyncParallel
},
}
],
+ refresh_status=True,
default_task_spec={
"output_schema": {
"json_schema": {
From 7a4d651c3e9f35334175f82daaf6392e9f76dee5 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 8 Apr 2026 00:25:34 +0000
Subject: [PATCH 16/32] feat(api): Remove full_content from OpenAPI Spec
---
.stats.yml | 4 +--
src/parallel/resources/beta/beta.py | 14 ---------
.../types/beta/beta_extract_params.py | 29 ++-----------------
src/parallel/types/beta/extract_result.py | 3 --
tests/api_resources/test_beta.py | 2 --
5 files changed, 4 insertions(+), 48 deletions(-)
diff --git a/.stats.yml b/.stats.yml
index ef170c0..b682aae 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 22
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/parallel-web%2Fparallel-sdk-8870e80e4ae564fa22febb630c65e14d914b4f48fcf76a9953c99e860704764a.yml
-openapi_spec_hash: fddc70c809ae1d3101d4b805265abb5a
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/parallel-web%2Fparallel-sdk-728ba63c23bc2eb4fe37b429fb084ed7600ae50c8c652aeb0c787216c3ece07a.yml
+openapi_spec_hash: 3568175d488fc927f1710b3ebca87cfc
config_hash: a398d153133d8884bed4e5256a0ae818
diff --git a/src/parallel/resources/beta/beta.py b/src/parallel/resources/beta/beta.py
index c2f4368..008233f 100644
--- a/src/parallel/resources/beta/beta.py
+++ b/src/parallel/resources/beta/beta.py
@@ -109,7 +109,6 @@ def extract(
urls: SequenceNotStr[str],
excerpts: beta_extract_params.Excerpts | Omit = omit,
fetch_policy: Optional[FetchPolicyParam] | Omit = omit,
- full_content: beta_extract_params.FullContent | Omit = omit,
objective: Optional[str] | Omit = omit,
search_queries: Optional[SequenceNotStr[str]] | Omit = omit,
betas: List[ParallelBetaParam] | Omit = omit,
@@ -128,14 +127,9 @@ def extract(
Args:
excerpts: Include excerpts from each URL relevant to the search objective and queries.
- Note that if neither objective nor search_queries is provided, excerpts are
- redundant with full content.
fetch_policy: Policy for live fetching web results.
- full_content: Include full content from each URL. Note that if neither objective nor
- search_queries is provided, excerpts are redundant with full content.
-
objective: If provided, focuses extracted content on the specified search objective.
search_queries: If provided, focuses extracted content on the specified keyword search queries.
@@ -168,7 +162,6 @@ def extract(
"urls": urls,
"excerpts": excerpts,
"fetch_policy": fetch_policy,
- "full_content": full_content,
"objective": objective,
"search_queries": search_queries,
},
@@ -334,7 +327,6 @@ async def extract(
urls: SequenceNotStr[str],
excerpts: beta_extract_params.Excerpts | Omit = omit,
fetch_policy: Optional[FetchPolicyParam] | Omit = omit,
- full_content: beta_extract_params.FullContent | Omit = omit,
objective: Optional[str] | Omit = omit,
search_queries: Optional[SequenceNotStr[str]] | Omit = omit,
betas: List[ParallelBetaParam] | Omit = omit,
@@ -353,14 +345,9 @@ async def extract(
Args:
excerpts: Include excerpts from each URL relevant to the search objective and queries.
- Note that if neither objective nor search_queries is provided, excerpts are
- redundant with full content.
fetch_policy: Policy for live fetching web results.
- full_content: Include full content from each URL. Note that if neither objective nor
- search_queries is provided, excerpts are redundant with full content.
-
objective: If provided, focuses extracted content on the specified search objective.
search_queries: If provided, focuses extracted content on the specified keyword search queries.
@@ -393,7 +380,6 @@ async def extract(
"urls": urls,
"excerpts": excerpts,
"fetch_policy": fetch_policy,
- "full_content": full_content,
"objective": objective,
"search_queries": search_queries,
},
diff --git a/src/parallel/types/beta/beta_extract_params.py b/src/parallel/types/beta/beta_extract_params.py
index 7a87574..79c0262 100644
--- a/src/parallel/types/beta/beta_extract_params.py
+++ b/src/parallel/types/beta/beta_extract_params.py
@@ -11,29 +11,18 @@
from .parallel_beta_param import ParallelBetaParam
from .excerpt_settings_param import ExcerptSettingsParam
-__all__ = ["BetaExtractParams", "Excerpts", "FullContent", "FullContentFullContentSettings"]
+__all__ = ["BetaExtractParams", "Excerpts"]
class BetaExtractParams(TypedDict, total=False):
urls: Required[SequenceNotStr[str]]
excerpts: Excerpts
- """Include excerpts from each URL relevant to the search objective and queries.
-
- Note that if neither objective nor search_queries is provided, excerpts are
- redundant with full content.
- """
+ """Include excerpts from each URL relevant to the search objective and queries."""
fetch_policy: Optional[FetchPolicyParam]
"""Policy for live fetching web results."""
- full_content: FullContent
- """Include full content from each URL.
-
- Note that if neither objective nor search_queries is provided, excerpts are
- redundant with full content.
- """
-
objective: Optional[str]
"""If provided, focuses extracted content on the specified search objective."""
@@ -45,17 +34,3 @@ class BetaExtractParams(TypedDict, total=False):
Excerpts: TypeAlias = Union[bool, ExcerptSettingsParam]
-
-
-class FullContentFullContentSettings(TypedDict, total=False):
- """Optional settings for returning full content."""
-
- max_chars_per_result: Optional[int]
- """
- Optional limit on the number of characters to include in the full content for
- each url. Full content always starts at the beginning of the page and is
- truncated at the limit if necessary.
- """
-
-
-FullContent: TypeAlias = Union[bool, FullContentFullContentSettings]
diff --git a/src/parallel/types/beta/extract_result.py b/src/parallel/types/beta/extract_result.py
index 8d74038..9b5e594 100644
--- a/src/parallel/types/beta/extract_result.py
+++ b/src/parallel/types/beta/extract_result.py
@@ -16,9 +16,6 @@ class ExtractResult(BaseModel):
excerpts: Optional[List[str]] = None
"""Relevant excerpted content from the URL, formatted as markdown."""
- full_content: Optional[str] = None
- """Full content from the URL formatted as markdown, if requested."""
-
publish_date: Optional[str] = None
"""Publish date of the webpage in YYYY-MM-DD format, if available."""
diff --git a/tests/api_resources/test_beta.py b/tests/api_resources/test_beta.py
index 1c98ae1..a074455 100644
--- a/tests/api_resources/test_beta.py
+++ b/tests/api_resources/test_beta.py
@@ -38,7 +38,6 @@ def test_method_extract_with_all_params(self, client: Parallel) -> None:
"max_age_seconds": 86400,
"timeout_seconds": 60,
},
- full_content=True,
objective="objective",
search_queries=["string"],
betas=["mcp-server-2025-07-17"],
@@ -144,7 +143,6 @@ async def test_method_extract_with_all_params(self, async_client: AsyncParallel)
"max_age_seconds": 86400,
"timeout_seconds": 60,
},
- full_content=True,
objective="objective",
search_queries=["string"],
betas=["mcp-server-2025-07-17"],
From 08080bc22c415881cc9f9b05bc22f09ab83c7e8d Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 8 Apr 2026 04:01:28 +0000
Subject: [PATCH 17/32] fix(client): preserve hardcoded query params when
merging with user params
---
src/parallel/_base_client.py | 4 +++
tests/test_client.py | 48 ++++++++++++++++++++++++++++++++++++
2 files changed, 52 insertions(+)
diff --git a/src/parallel/_base_client.py b/src/parallel/_base_client.py
index 5128667..b283b92 100644
--- a/src/parallel/_base_client.py
+++ b/src/parallel/_base_client.py
@@ -540,6 +540,10 @@ def _build_request(
files = cast(HttpxRequestFiles, ForceMultipartDict())
prepared_url = self._prepare_url(options.url)
+ # preserve hard-coded query params from the url
+ if params and prepared_url.query:
+ params = {**dict(prepared_url.params.items()), **params}
+ prepared_url = prepared_url.copy_with(raw_path=prepared_url.raw_path.split(b"?", 1)[0])
if "_" in prepared_url.host:
# work around https://github.com/encode/httpx/discussions/2880
kwargs["extensions"] = {"sni_hostname": prepared_url.host.replace("_", "-")}
diff --git a/tests/test_client.py b/tests/test_client.py
index c2b772f..5e022bd 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -429,6 +429,30 @@ def test_default_query_option(self) -> None:
client.close()
+ def test_hardcoded_query_params_in_url(self, client: Parallel) -> None:
+ request = client._build_request(FinalRequestOptions(method="get", url="/foo?beta=true"))
+ url = httpx.URL(request.url)
+ assert dict(url.params) == {"beta": "true"}
+
+ request = client._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/foo?beta=true",
+ params={"limit": "10", "page": "abc"},
+ )
+ )
+ url = httpx.URL(request.url)
+ assert dict(url.params) == {"beta": "true", "limit": "10", "page": "abc"}
+
+ request = client._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/files/a%2Fb?beta=true",
+ params={"limit": "10"},
+ )
+ )
+ assert request.url.raw_path == b"/files/a%2Fb?beta=true&limit=10"
+
def test_request_extra_json(self, client: Parallel) -> None:
request = client._build_request(
FinalRequestOptions(
@@ -1334,6 +1358,30 @@ async def test_default_query_option(self) -> None:
await client.close()
+ async def test_hardcoded_query_params_in_url(self, async_client: AsyncParallel) -> None:
+ request = async_client._build_request(FinalRequestOptions(method="get", url="/foo?beta=true"))
+ url = httpx.URL(request.url)
+ assert dict(url.params) == {"beta": "true"}
+
+ request = async_client._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/foo?beta=true",
+ params={"limit": "10", "page": "abc"},
+ )
+ )
+ url = httpx.URL(request.url)
+ assert dict(url.params) == {"beta": "true", "limit": "10", "page": "abc"}
+
+ request = async_client._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/files/a%2Fb?beta=true",
+ params={"limit": "10"},
+ )
+ )
+ assert request.url.raw_path == b"/files/a%2Fb?beta=true&limit=10"
+
def test_request_extra_json(self, client: Parallel) -> None:
request = client._build_request(
FinalRequestOptions(
From ea487f32b73aee955f662d1fa225841421ce1ba3 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 10 Apr 2026 23:31:08 +0000
Subject: [PATCH 18/32] feat(api): Add Search and Extract v1 and associated
types
---
.stats.yml | 8 +-
api.md | 22 ++
src/parallel/_client.py | 362 +++++++++++++++++-
src/parallel/resources/beta/api.md | 6 +-
src/parallel/resources/beta/beta.py | 120 +++++-
src/parallel/resources/beta/task_group.py | 18 +-
src/parallel/resources/beta/task_run.py | 14 +
src/parallel/resources/task_run.py | 14 +
src/parallel/types/__init__.py | 10 +
.../types/beta/beta_extract_params.py | 31 +-
src/parallel/types/beta/beta_search_params.py | 5 +-
src/parallel/types/beta/extract_error.py | 19 +-
src/parallel/types/beta/extract_response.py | 4 +-
src/parallel/types/beta/extract_result.py | 3 +
src/parallel/types/beta/fetch_policy_param.py | 24 +-
src/parallel/types/beta/search_result.py | 2 +-
src/parallel/types/beta/usage_item.py | 12 +-
src/parallel/types/client_extract_params.py | 77 ++++
src/parallel/types/client_search_params.py | 72 ++++
src/parallel/types/excerpt_settings_param.py | 19 +
src/parallel/types/extract_error.py | 22 ++
src/parallel/types/extract_response.py | 30 ++
src/parallel/types/extract_result.py | 26 ++
src/parallel/types/fetch_policy_param.py | 27 ++
src/parallel/types/search_result.py | 26 ++
src/parallel/types/usage_item.py | 15 +
src/parallel/types/web_search_result.py | 23 ++
tests/api_resources/test_beta.py | 9 +-
tests/api_resources/test_client.py | 239 ++++++++++++
29 files changed, 1164 insertions(+), 95 deletions(-)
create mode 100644 src/parallel/types/client_extract_params.py
create mode 100644 src/parallel/types/client_search_params.py
create mode 100644 src/parallel/types/excerpt_settings_param.py
create mode 100644 src/parallel/types/extract_error.py
create mode 100644 src/parallel/types/extract_response.py
create mode 100644 src/parallel/types/extract_result.py
create mode 100644 src/parallel/types/fetch_policy_param.py
create mode 100644 src/parallel/types/search_result.py
create mode 100644 src/parallel/types/usage_item.py
create mode 100644 src/parallel/types/web_search_result.py
create mode 100644 tests/api_resources/test_client.py
diff --git a/.stats.yml b/.stats.yml
index b682aae..c8b869e 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 22
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/parallel-web%2Fparallel-sdk-728ba63c23bc2eb4fe37b429fb084ed7600ae50c8c652aeb0c787216c3ece07a.yml
-openapi_spec_hash: 3568175d488fc927f1710b3ebca87cfc
-config_hash: a398d153133d8884bed4e5256a0ae818
+configured_endpoints: 23
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/parallel-web%2Fparallel-sdk-44048676c9b07d49ed9dbee5fad53d145eddaea5ba682b6557681c5a7e04f8ed.yml
+openapi_spec_hash: e239787937742b1bc15e7f211fe3c518
+config_hash: 42d4e6039ef223ba2a824414a94da176
diff --git a/api.md b/api.md
index 66e3651..62ef485 100644
--- a/api.md
+++ b/api.md
@@ -4,6 +4,28 @@
from parallel.types import ErrorObject, ErrorResponse, SourcePolicy, Warning
```
+# Parallel
+
+Types:
+
+```python
+from parallel.types import (
+ ExcerptSettings,
+ ExtractError,
+ ExtractResponse,
+ ExtractResult,
+ FetchPolicy,
+ SearchResult,
+ UsageItem,
+ WebSearchResult,
+)
+```
+
+Methods:
+
+- client.extract(\*\*params) -> ExtractResponse
+- client.search(\*\*params) -> SearchResult
+
# TaskRun
Types:
diff --git a/src/parallel/_client.py b/src/parallel/_client.py
index cf3f898..21a5507 100644
--- a/src/parallel/_client.py
+++ b/src/parallel/_client.py
@@ -3,32 +3,52 @@
from __future__ import annotations
import os
-from typing import TYPE_CHECKING, Any, Mapping
-from typing_extensions import Self, override
+from typing import TYPE_CHECKING, Any, Mapping, Optional
+from typing_extensions import Self, Literal, override
import httpx
from . import _exceptions
from ._qs import Querystring
+from .types import client_search_params, client_extract_params
from ._types import (
+ Body,
Omit,
+ Query,
+ Headers,
Timeout,
NotGiven,
Transport,
ProxiesTypes,
RequestOptions,
+ SequenceNotStr,
+ omit,
not_given,
)
-from ._utils import is_given, get_async_library
+from ._utils import (
+ is_given,
+ maybe_transform,
+ get_async_library,
+ async_maybe_transform,
+)
from ._compat import cached_property
from ._version import __version__
+from ._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
from ._streaming import Stream as Stream, AsyncStream as AsyncStream
from ._exceptions import ParallelError, APIStatusError
from ._base_client import (
DEFAULT_MAX_RETRIES,
SyncAPIClient,
AsyncAPIClient,
+ make_request_options,
)
+from .types.search_result import SearchResult
+from .types.extract_response import ExtractResponse
if TYPE_CHECKING:
from .resources import beta, task_run
@@ -108,6 +128,13 @@ def task_run(self) -> TaskRunResource:
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
from .resources.task_run import TaskRunResource
@@ -198,6 +225,139 @@ def copy(
# client.with_options(timeout=10).foo.create(...)
with_options = copy
+ def extract(
+ self,
+ *,
+ urls: SequenceNotStr[str],
+ advanced: Optional[client_extract_params.Advanced] | Omit = omit,
+ client_model: Optional[str] | Omit = omit,
+ max_chars_total: Optional[int] | Omit = omit,
+ objective: Optional[str] | Omit = omit,
+ search_queries: Optional[SequenceNotStr[str]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ExtractResponse:
+ """
+ Extracts relevant content from specific web URLs.
+
+ Args:
+ urls: URLs to extract content from. Up to 20 URLs.
+
+ advanced: Advanced extract configuration.
+
+ client_model: The model generating this request and consuming the results. Enables
+ optimizations and tailors default settings for the model's capabilities.
+
+ max_chars_total: Upper bound on total characters across excerpts from all extracted results. Does
+ not affect full_content if requested. Default is dynamic based on urls,
+ objective, and client_model.
+
+ objective: As in SearchRequest, a natural-language description of the underlying question
+ or goal driving the request. Used together with search_queries to focus excerpts
+ on the most relevant content.
+
+ search_queries: Optional keyword search queries, as in SearchRequest. Used together with
+ objective to focus excerpts on the most relevant content.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self.post(
+ "/v1/extract",
+ body=maybe_transform(
+ {
+ "urls": urls,
+ "advanced": advanced,
+ "client_model": client_model,
+ "max_chars_total": max_chars_total,
+ "objective": objective,
+ "search_queries": search_queries,
+ },
+ client_extract_params.ClientExtractParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ExtractResponse,
+ )
+
+ def search(
+ self,
+ *,
+ search_queries: SequenceNotStr[str],
+ advanced: Optional[client_search_params.Advanced] | Omit = omit,
+ client_model: Optional[str] | Omit = omit,
+ max_chars_total: Optional[int] | Omit = omit,
+ mode: Optional[Literal["basic", "standard"]] | Omit = omit,
+ objective: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SearchResult:
+ """
+ Searches the web.
+
+ Args:
+ search_queries: Concise keyword search queries, 3-6 words each. At least one query is required,
+ provide 2-3 for best results. Used together with objective to focus results on
+ the most relevant content.
+
+ advanced: Advanced search configuration.
+
+ client_model: The model generating this request and consuming the results. Enables
+ optimizations and tailors default settings for the model's capabilities.
+
+ max_chars_total: Upper bound on total characters across excerpts from all results. Default is
+ dynamic based on search_queries, objective, and client_model.
+
+ mode: Search mode preset: supported values are basic and standard. Basic mode offers
+ the lowest latency and works best with 2-3 high-quality search_queries. Standard
+ mode provides higher quality with more advanced retrieval and compression.
+
+ objective: Natural-language description of the underlying question or goal driving the
+ search. Used together with search_queries to focus results on the most relevant
+ content. Should be self-contained with enough context to understand the intent
+ of the search.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self.post(
+ "/v1/search",
+ body=maybe_transform(
+ {
+ "search_queries": search_queries,
+ "advanced": advanced,
+ "client_model": client_model,
+ "max_chars_total": max_chars_total,
+ "mode": mode,
+ "objective": objective,
+ },
+ client_search_params.ClientSearchParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=SearchResult,
+ )
+
@override
def _make_status_error(
self,
@@ -293,6 +453,13 @@ def task_run(self) -> AsyncTaskRunResource:
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
from .resources.task_run import AsyncTaskRunResource
@@ -383,6 +550,139 @@ def copy(
# client.with_options(timeout=10).foo.create(...)
with_options = copy
+ async def extract(
+ self,
+ *,
+ urls: SequenceNotStr[str],
+ advanced: Optional[client_extract_params.Advanced] | Omit = omit,
+ client_model: Optional[str] | Omit = omit,
+ max_chars_total: Optional[int] | Omit = omit,
+ objective: Optional[str] | Omit = omit,
+ search_queries: Optional[SequenceNotStr[str]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ExtractResponse:
+ """
+ Extracts relevant content from specific web URLs.
+
+ Args:
+ urls: URLs to extract content from. Up to 20 URLs.
+
+ advanced: Advanced extract configuration.
+
+ client_model: The model generating this request and consuming the results. Enables
+ optimizations and tailors default settings for the model's capabilities.
+
+ max_chars_total: Upper bound on total characters across excerpts from all extracted results. Does
+ not affect full_content if requested. Default is dynamic based on urls,
+ objective, and client_model.
+
+ objective: As in SearchRequest, a natural-language description of the underlying question
+ or goal driving the request. Used together with search_queries to focus excerpts
+ on the most relevant content.
+
+ search_queries: Optional keyword search queries, as in SearchRequest. Used together with
+ objective to focus excerpts on the most relevant content.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self.post(
+ "/v1/extract",
+ body=await async_maybe_transform(
+ {
+ "urls": urls,
+ "advanced": advanced,
+ "client_model": client_model,
+ "max_chars_total": max_chars_total,
+ "objective": objective,
+ "search_queries": search_queries,
+ },
+ client_extract_params.ClientExtractParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ExtractResponse,
+ )
+
+ async def search(
+ self,
+ *,
+ search_queries: SequenceNotStr[str],
+ advanced: Optional[client_search_params.Advanced] | Omit = omit,
+ client_model: Optional[str] | Omit = omit,
+ max_chars_total: Optional[int] | Omit = omit,
+ mode: Optional[Literal["basic", "standard"]] | Omit = omit,
+ objective: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SearchResult:
+ """
+ Searches the web.
+
+ Args:
+ search_queries: Concise keyword search queries, 3-6 words each. At least one query is required,
+ provide 2-3 for best results. Used together with objective to focus results on
+ the most relevant content.
+
+ advanced: Advanced search configuration.
+
+ client_model: The model generating this request and consuming the results. Enables
+ optimizations and tailors default settings for the model's capabilities.
+
+ max_chars_total: Upper bound on total characters across excerpts from all results. Default is
+ dynamic based on search_queries, objective, and client_model.
+
+ mode: Search mode preset: supported values are basic and standard. Basic mode offers
+ the lowest latency and works best with 2-3 high-quality search_queries. Standard
+ mode provides higher quality with more advanced retrieval and compression.
+
+ objective: Natural-language description of the underlying question or goal driving the
+ search. Used together with search_queries to focus results on the most relevant
+ content. Should be self-contained with enough context to understand the intent
+ of the search.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self.post(
+ "/v1/search",
+ body=await async_maybe_transform(
+ {
+ "search_queries": search_queries,
+ "advanced": advanced,
+ "client_model": client_model,
+ "max_chars_total": max_chars_total,
+ "mode": mode,
+ "objective": objective,
+ },
+ client_search_params.ClientSearchParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=SearchResult,
+ )
+
@override
def _make_status_error(
self,
@@ -423,12 +723,26 @@ class ParallelWithRawResponse:
def __init__(self, client: Parallel) -> None:
self._client = client
+ self.extract = to_raw_response_wrapper(
+ client.extract,
+ )
+ self.search = to_raw_response_wrapper(
+ client.search,
+ )
+
@cached_property
def task_run(self) -> task_run.TaskRunResourceWithRawResponse:
"""The Task API executes web research and extraction tasks.
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
from .resources.task_run import TaskRunResourceWithRawResponse
@@ -447,12 +761,26 @@ class AsyncParallelWithRawResponse:
def __init__(self, client: AsyncParallel) -> None:
self._client = client
+ self.extract = async_to_raw_response_wrapper(
+ client.extract,
+ )
+ self.search = async_to_raw_response_wrapper(
+ client.search,
+ )
+
@cached_property
def task_run(self) -> task_run.AsyncTaskRunResourceWithRawResponse:
"""The Task API executes web research and extraction tasks.
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
from .resources.task_run import AsyncTaskRunResourceWithRawResponse
@@ -471,12 +799,26 @@ class ParallelWithStreamedResponse:
def __init__(self, client: Parallel) -> None:
self._client = client
+ self.extract = to_streamed_response_wrapper(
+ client.extract,
+ )
+ self.search = to_streamed_response_wrapper(
+ client.search,
+ )
+
@cached_property
def task_run(self) -> task_run.TaskRunResourceWithStreamingResponse:
"""The Task API executes web research and extraction tasks.
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
from .resources.task_run import TaskRunResourceWithStreamingResponse
@@ -495,12 +837,26 @@ class AsyncParallelWithStreamedResponse:
def __init__(self, client: AsyncParallel) -> None:
self._client = client
+ self.extract = async_to_streamed_response_wrapper(
+ client.extract,
+ )
+ self.search = async_to_streamed_response_wrapper(
+ client.search,
+ )
+
@cached_property
def task_run(self) -> task_run.AsyncTaskRunResourceWithStreamingResponse:
"""The Task API executes web research and extraction tasks.
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
from .resources.task_run import AsyncTaskRunResourceWithStreamingResponse
diff --git a/src/parallel/resources/beta/api.md b/src/parallel/resources/beta/api.md
index 0b56fe7..8652cf3 100644
--- a/src/parallel/resources/beta/api.md
+++ b/src/parallel/resources/beta/api.md
@@ -5,13 +5,13 @@ Types:
```python
from parallel.types.beta import (
ExcerptSettings,
- ExtractError,
ExtractResponse,
ExtractResult,
- FetchPolicy,
SearchResult,
- UsageItem,
WebSearchResult,
+ ExtractError,
+ FetchPolicy,
+ UsageItem,
)
```
diff --git a/src/parallel/resources/beta/beta.py b/src/parallel/resources/beta/beta.py
index 008233f..d32d2a6 100644
--- a/src/parallel/resources/beta/beta.py
+++ b/src/parallel/resources/beta/beta.py
@@ -45,8 +45,8 @@
from ...types.beta import beta_search_params, beta_extract_params
from ..._base_client import make_request_options
from ...types.beta.search_result import SearchResult
+from ...types.fetch_policy_param import FetchPolicyParam
from ...types.beta.extract_response import ExtractResponse
-from ...types.beta.fetch_policy_param import FetchPolicyParam
from ...types.beta.parallel_beta_param import ParallelBetaParam
from ...types.beta.excerpt_settings_param import ExcerptSettingsParam
from ...types.shared_params.source_policy import SourcePolicy
@@ -61,19 +61,29 @@ def task_run(self) -> TaskRunResource:
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
return TaskRunResource(self._client)
@cached_property
def task_group(self) -> TaskGroupResource:
- """
- The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling.
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
- Submit hundreds or thousands of Tasks as a single group
- Observe group progress and receive results as they complete
- Real-time updates via Server-Sent Events (SSE)
- Add tasks to an existing group while it is running
- Group-level retry and error aggregation
- Status: beta and subject to change.
"""
return TaskGroupResource(self._client)
@@ -109,6 +119,7 @@ def extract(
urls: SequenceNotStr[str],
excerpts: beta_extract_params.Excerpts | Omit = omit,
fetch_policy: Optional[FetchPolicyParam] | Omit = omit,
+ full_content: beta_extract_params.FullContent | Omit = omit,
objective: Optional[str] | Omit = omit,
search_queries: Optional[SequenceNotStr[str]] | Omit = omit,
betas: List[ParallelBetaParam] | Omit = omit,
@@ -127,9 +138,14 @@ def extract(
Args:
excerpts: Include excerpts from each URL relevant to the search objective and queries.
+ Note that if neither objective nor search_queries is provided, excerpts are
+ redundant with full content.
fetch_policy: Policy for live fetching web results.
+ full_content: Include full content from each URL. Note that if neither objective nor
+ search_queries is provided, excerpts are redundant with full content.
+
objective: If provided, focuses extracted content on the specified search objective.
search_queries: If provided, focuses extracted content on the specified keyword search queries.
@@ -162,6 +178,7 @@ def extract(
"urls": urls,
"excerpts": excerpts,
"fetch_policy": fetch_policy,
+ "full_content": full_content,
"objective": objective,
"search_queries": search_queries,
},
@@ -178,6 +195,7 @@ def search(
*,
excerpts: ExcerptSettingsParam | Omit = omit,
fetch_policy: Optional[FetchPolicyParam] | Omit = omit,
+ location: Optional[str] | Omit = omit,
max_chars_per_result: Optional[int] | Omit = omit,
max_results: Optional[int] | Omit = omit,
mode: Optional[Literal["one-shot", "agentic", "fast"]] | Omit = omit,
@@ -201,6 +219,8 @@ def search(
fetch_policy: Policy for live fetching web results.
+ location: ISO 3166-1 alpha-2 country code for geo-targeted search results.
+
max_chars_per_result: DEPRECATED: Use `excerpts.max_chars_per_result` instead.
max_results: Upper bound on the number of results to return. Defaults to 10 if not provided.
@@ -255,6 +275,7 @@ def search(
{
"excerpts": excerpts,
"fetch_policy": fetch_policy,
+ "location": location,
"max_chars_per_result": max_chars_per_result,
"max_results": max_results,
"mode": mode,
@@ -279,19 +300,29 @@ def task_run(self) -> AsyncTaskRunResource:
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
return AsyncTaskRunResource(self._client)
@cached_property
def task_group(self) -> AsyncTaskGroupResource:
- """
- The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling.
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
- Submit hundreds or thousands of Tasks as a single group
- Observe group progress and receive results as they complete
- Real-time updates via Server-Sent Events (SSE)
- Add tasks to an existing group while it is running
- Group-level retry and error aggregation
- Status: beta and subject to change.
"""
return AsyncTaskGroupResource(self._client)
@@ -327,6 +358,7 @@ async def extract(
urls: SequenceNotStr[str],
excerpts: beta_extract_params.Excerpts | Omit = omit,
fetch_policy: Optional[FetchPolicyParam] | Omit = omit,
+ full_content: beta_extract_params.FullContent | Omit = omit,
objective: Optional[str] | Omit = omit,
search_queries: Optional[SequenceNotStr[str]] | Omit = omit,
betas: List[ParallelBetaParam] | Omit = omit,
@@ -345,9 +377,14 @@ async def extract(
Args:
excerpts: Include excerpts from each URL relevant to the search objective and queries.
+ Note that if neither objective nor search_queries is provided, excerpts are
+ redundant with full content.
fetch_policy: Policy for live fetching web results.
+ full_content: Include full content from each URL. Note that if neither objective nor
+ search_queries is provided, excerpts are redundant with full content.
+
objective: If provided, focuses extracted content on the specified search objective.
search_queries: If provided, focuses extracted content on the specified keyword search queries.
@@ -380,6 +417,7 @@ async def extract(
"urls": urls,
"excerpts": excerpts,
"fetch_policy": fetch_policy,
+ "full_content": full_content,
"objective": objective,
"search_queries": search_queries,
},
@@ -396,6 +434,7 @@ async def search(
*,
excerpts: ExcerptSettingsParam | Omit = omit,
fetch_policy: Optional[FetchPolicyParam] | Omit = omit,
+ location: Optional[str] | Omit = omit,
max_chars_per_result: Optional[int] | Omit = omit,
max_results: Optional[int] | Omit = omit,
mode: Optional[Literal["one-shot", "agentic", "fast"]] | Omit = omit,
@@ -419,6 +458,8 @@ async def search(
fetch_policy: Policy for live fetching web results.
+ location: ISO 3166-1 alpha-2 country code for geo-targeted search results.
+
max_chars_per_result: DEPRECATED: Use `excerpts.max_chars_per_result` instead.
max_results: Upper bound on the number of results to return. Defaults to 10 if not provided.
@@ -473,6 +514,7 @@ async def search(
{
"excerpts": excerpts,
"fetch_policy": fetch_policy,
+ "location": location,
"max_chars_per_result": max_chars_per_result,
"max_results": max_results,
"mode": mode,
@@ -507,19 +549,29 @@ def task_run(self) -> TaskRunResourceWithRawResponse:
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
return TaskRunResourceWithRawResponse(self._beta.task_run)
@cached_property
def task_group(self) -> TaskGroupResourceWithRawResponse:
- """
- The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling.
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
- Submit hundreds or thousands of Tasks as a single group
- Observe group progress and receive results as they complete
- Real-time updates via Server-Sent Events (SSE)
- Add tasks to an existing group while it is running
- Group-level retry and error aggregation
- Status: beta and subject to change.
"""
return TaskGroupResourceWithRawResponse(self._beta.task_group)
@@ -548,19 +600,29 @@ def task_run(self) -> AsyncTaskRunResourceWithRawResponse:
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
return AsyncTaskRunResourceWithRawResponse(self._beta.task_run)
@cached_property
def task_group(self) -> AsyncTaskGroupResourceWithRawResponse:
- """
- The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling.
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
- Submit hundreds or thousands of Tasks as a single group
- Observe group progress and receive results as they complete
- Real-time updates via Server-Sent Events (SSE)
- Add tasks to an existing group while it is running
- Group-level retry and error aggregation
- Status: beta and subject to change.
"""
return AsyncTaskGroupResourceWithRawResponse(self._beta.task_group)
@@ -589,19 +651,29 @@ def task_run(self) -> TaskRunResourceWithStreamingResponse:
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
return TaskRunResourceWithStreamingResponse(self._beta.task_run)
@cached_property
def task_group(self) -> TaskGroupResourceWithStreamingResponse:
- """
- The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling.
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
- Submit hundreds or thousands of Tasks as a single group
- Observe group progress and receive results as they complete
- Real-time updates via Server-Sent Events (SSE)
- Add tasks to an existing group while it is running
- Group-level retry and error aggregation
- Status: beta and subject to change.
"""
return TaskGroupResourceWithStreamingResponse(self._beta.task_group)
@@ -630,19 +702,29 @@ def task_run(self) -> AsyncTaskRunResourceWithStreamingResponse:
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
return AsyncTaskRunResourceWithStreamingResponse(self._beta.task_run)
@cached_property
def task_group(self) -> AsyncTaskGroupResourceWithStreamingResponse:
- """
- The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling.
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
- Submit hundreds or thousands of Tasks as a single group
- Observe group progress and receive results as they complete
- Real-time updates via Server-Sent Events (SSE)
- Add tasks to an existing group while it is running
- Group-level retry and error aggregation
- Status: beta and subject to change.
"""
return AsyncTaskGroupResourceWithStreamingResponse(self._beta.task_group)
diff --git a/src/parallel/resources/beta/task_group.py b/src/parallel/resources/beta/task_group.py
index c54e3da..5ccb046 100644
--- a/src/parallel/resources/beta/task_group.py
+++ b/src/parallel/resources/beta/task_group.py
@@ -38,14 +38,17 @@
class TaskGroupResource(SyncAPIResource):
- """
- The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling.
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
- Submit hundreds or thousands of Tasks as a single group
- Observe group progress and receive results as they complete
- Real-time updates via Server-Sent Events (SSE)
- Add tasks to an existing group while it is running
- Group-level retry and error aggregation
- Status: beta and subject to change.
"""
@cached_property
@@ -332,14 +335,17 @@ def get_runs(
class AsyncTaskGroupResource(AsyncAPIResource):
- """
- The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling.
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
- Submit hundreds or thousands of Tasks as a single group
- Observe group progress and receive results as they complete
- Real-time updates via Server-Sent Events (SSE)
- Add tasks to an existing group while it is running
- Group-level retry and error aggregation
- Status: beta and subject to change.
"""
@cached_property
diff --git a/src/parallel/resources/beta/task_run.py b/src/parallel/resources/beta/task_run.py
index b19286a..9fd6752 100644
--- a/src/parallel/resources/beta/task_run.py
+++ b/src/parallel/resources/beta/task_run.py
@@ -37,6 +37,13 @@ class TaskRunResource(SyncAPIResource):
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
@cached_property
@@ -265,6 +272,13 @@ class AsyncTaskRunResource(AsyncAPIResource):
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
@cached_property
diff --git a/src/parallel/resources/task_run.py b/src/parallel/resources/task_run.py
index af330cd..4efc8d9 100644
--- a/src/parallel/resources/task_run.py
+++ b/src/parallel/resources/task_run.py
@@ -41,6 +41,13 @@ class TaskRunResource(SyncAPIResource):
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
@cached_property
@@ -340,6 +347,13 @@ class AsyncTaskRunResource(AsyncAPIResource):
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
@cached_property
diff --git a/src/parallel/types/__init__.py b/src/parallel/types/__init__.py
index 15d056e..29fd1e0 100644
--- a/src/parallel/types/__init__.py
+++ b/src/parallel/types/__init__.py
@@ -20,8 +20,18 @@
from .auto_schema_param import AutoSchemaParam as AutoSchemaParam
from .json_schema_param import JsonSchemaParam as JsonSchemaParam
from .text_schema_param import TextSchemaParam as TextSchemaParam
+from .web_search_result import WebSearchResult as WebSearchResult
+from .fetch_policy_param import FetchPolicyParam as FetchPolicyParam
+from .client_search_params import ClientSearchParams as ClientSearchParams
from .task_run_json_output import TaskRunJsonOutput as TaskRunJsonOutput
from .task_run_text_output import TaskRunTextOutput as TaskRunTextOutput
from .parsed_task_run_result import ParsedTaskRunResult as ParsedTaskRunResult
+from .usage_item import UsageItem as UsageItem
+from .extract_error import ExtractError as ExtractError
+from .search_result import SearchResult as SearchResult
+from .extract_result import ExtractResult as ExtractResult
+from .extract_response import ExtractResponse as ExtractResponse
+from .client_extract_params import ClientExtractParams as ClientExtractParams
+from .excerpt_settings_param import ExcerptSettingsParam as ExcerptSettingsParam
from .task_run_create_params import TaskRunCreateParams as TaskRunCreateParams
from .task_run_result_params import TaskRunResultParams as TaskRunResultParams
diff --git a/src/parallel/types/beta/beta_extract_params.py b/src/parallel/types/beta/beta_extract_params.py
index 79c0262..e4838ee 100644
--- a/src/parallel/types/beta/beta_extract_params.py
+++ b/src/parallel/types/beta/beta_extract_params.py
@@ -7,22 +7,33 @@
from ..._types import SequenceNotStr
from ..._utils import PropertyInfo
-from .fetch_policy_param import FetchPolicyParam
+from ..fetch_policy_param import FetchPolicyParam
from .parallel_beta_param import ParallelBetaParam
from .excerpt_settings_param import ExcerptSettingsParam
-__all__ = ["BetaExtractParams", "Excerpts"]
+__all__ = ["BetaExtractParams", "Excerpts", "FullContent", "FullContentFullContentSettings"]
class BetaExtractParams(TypedDict, total=False):
urls: Required[SequenceNotStr[str]]
excerpts: Excerpts
- """Include excerpts from each URL relevant to the search objective and queries."""
+ """Include excerpts from each URL relevant to the search objective and queries.
+
+ Note that if neither objective nor search_queries is provided, excerpts are
+ redundant with full content.
+ """
fetch_policy: Optional[FetchPolicyParam]
"""Policy for live fetching web results."""
+ full_content: FullContent
+ """Include full content from each URL.
+
+ Note that if neither objective nor search_queries is provided, excerpts are
+ redundant with full content.
+ """
+
objective: Optional[str]
"""If provided, focuses extracted content on the specified search objective."""
@@ -34,3 +45,17 @@ class BetaExtractParams(TypedDict, total=False):
Excerpts: TypeAlias = Union[bool, ExcerptSettingsParam]
+
+
+class FullContentFullContentSettings(TypedDict, total=False):
+ """Optional settings for returning full content."""
+
+ max_chars_per_result: Optional[int]
+ """
+ Optional limit on the number of characters to include in the full content for
+ each url. Full content always starts at the beginning of the page and is
+ truncated at the limit if necessary.
+ """
+
+
+FullContent: TypeAlias = Union[bool, FullContentFullContentSettings]
diff --git a/src/parallel/types/beta/beta_search_params.py b/src/parallel/types/beta/beta_search_params.py
index 4a0776f..7e47fd7 100644
--- a/src/parallel/types/beta/beta_search_params.py
+++ b/src/parallel/types/beta/beta_search_params.py
@@ -7,7 +7,7 @@
from ..._types import SequenceNotStr
from ..._utils import PropertyInfo
-from .fetch_policy_param import FetchPolicyParam
+from ..fetch_policy_param import FetchPolicyParam
from .parallel_beta_param import ParallelBetaParam
from .excerpt_settings_param import ExcerptSettingsParam
from ..shared_params.source_policy import SourcePolicy
@@ -22,6 +22,9 @@ class BetaSearchParams(TypedDict, total=False):
fetch_policy: Optional[FetchPolicyParam]
"""Policy for live fetching web results."""
+ location: Optional[str]
+ """ISO 3166-1 alpha-2 country code for geo-targeted search results."""
+
max_chars_per_result: Optional[int]
"""DEPRECATED: Use `excerpts.max_chars_per_result` instead."""
diff --git a/src/parallel/types/beta/extract_error.py b/src/parallel/types/beta/extract_error.py
index 0c8a19f..499920f 100644
--- a/src/parallel/types/beta/extract_error.py
+++ b/src/parallel/types/beta/extract_error.py
@@ -1,22 +1,7 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Optional
-
-from ..._models import BaseModel
+from .. import extract_error
__all__ = ["ExtractError"]
-
-class ExtractError(BaseModel):
- """Extract error details."""
-
- content: Optional[str] = None
- """Content returned for http client or server errors, if any."""
-
- error_type: str
- """Error type."""
-
- http_status_code: Optional[int] = None
- """HTTP status code, if available."""
-
- url: str
+ExtractError = extract_error.ExtractError
diff --git a/src/parallel/types/beta/extract_response.py b/src/parallel/types/beta/extract_response.py
index 45717bc..5fa3d4d 100644
--- a/src/parallel/types/beta/extract_response.py
+++ b/src/parallel/types/beta/extract_response.py
@@ -3,8 +3,8 @@
from typing import List, Optional
from ..._models import BaseModel
-from .usage_item import UsageItem
-from .extract_error import ExtractError
+from ..usage_item import UsageItem
+from ..extract_error import ExtractError
from .extract_result import ExtractResult
from ..shared.warning import Warning
diff --git a/src/parallel/types/beta/extract_result.py b/src/parallel/types/beta/extract_result.py
index 9b5e594..8d74038 100644
--- a/src/parallel/types/beta/extract_result.py
+++ b/src/parallel/types/beta/extract_result.py
@@ -16,6 +16,9 @@ class ExtractResult(BaseModel):
excerpts: Optional[List[str]] = None
"""Relevant excerpted content from the URL, formatted as markdown."""
+ full_content: Optional[str] = None
+ """Full content from the URL formatted as markdown, if requested."""
+
publish_date: Optional[str] = None
"""Publish date of the webpage in YYYY-MM-DD format, if available."""
diff --git a/src/parallel/types/beta/fetch_policy_param.py b/src/parallel/types/beta/fetch_policy_param.py
index 5bc4447..0949e76 100644
--- a/src/parallel/types/beta/fetch_policy_param.py
+++ b/src/parallel/types/beta/fetch_policy_param.py
@@ -2,26 +2,6 @@
from __future__ import annotations
-from typing import Optional
-from typing_extensions import TypedDict
+from .. import fetch_policy_param
-__all__ = ["FetchPolicyParam"]
-
-
-class FetchPolicyParam(TypedDict, total=False):
- """Policy for live fetching web results."""
-
- disable_cache_fallback: bool
- """
- If false, fallback to cached content older than max-age if live fetch fails or
- times out. If true, returns an error instead.
- """
-
- max_age_seconds: Optional[int]
- """Maximum age of cached content in seconds to trigger a live fetch.
-
- Minimum value 600 seconds (10 minutes).
- """
-
- timeout_seconds: Optional[float]
- """Timeout in seconds for fetching live content if unavailable in cache."""
+FetchPolicyParam = fetch_policy_param.FetchPolicyParam
diff --git a/src/parallel/types/beta/search_result.py b/src/parallel/types/beta/search_result.py
index c7dd935..4c20ccb 100644
--- a/src/parallel/types/beta/search_result.py
+++ b/src/parallel/types/beta/search_result.py
@@ -3,7 +3,7 @@
from typing import List, Optional
from ..._models import BaseModel
-from .usage_item import UsageItem
+from ..usage_item import UsageItem
from ..shared.warning import Warning
from .web_search_result import WebSearchResult
diff --git a/src/parallel/types/beta/usage_item.py b/src/parallel/types/beta/usage_item.py
index b3584bd..587bb3c 100644
--- a/src/parallel/types/beta/usage_item.py
+++ b/src/parallel/types/beta/usage_item.py
@@ -1,15 +1,7 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from ..._models import BaseModel
+from .. import usage_item
__all__ = ["UsageItem"]
-
-class UsageItem(BaseModel):
- """Usage item for a single operation."""
-
- count: int
- """Count of the SKU."""
-
- name: str
- """Name of the SKU."""
+UsageItem = usage_item.UsageItem
diff --git a/src/parallel/types/client_extract_params.py b/src/parallel/types/client_extract_params.py
new file mode 100644
index 0000000..d22f3cf
--- /dev/null
+++ b/src/parallel/types/client_extract_params.py
@@ -0,0 +1,77 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Optional
+from typing_extensions import Required, TypeAlias, TypedDict
+
+from .._types import SequenceNotStr
+from .fetch_policy_param import FetchPolicyParam
+from .excerpt_settings_param import ExcerptSettingsParam
+
+__all__ = ["ClientExtractParams", "Advanced", "AdvancedFullContent", "AdvancedFullContentFullContentSettings"]
+
+
+class ClientExtractParams(TypedDict, total=False):
+ urls: Required[SequenceNotStr[str]]
+ """URLs to extract content from. Up to 20 URLs."""
+
+ advanced: Optional[Advanced]
+ """Advanced extract configuration."""
+
+ client_model: Optional[str]
+ """The model generating this request and consuming the results.
+
+ Enables optimizations and tailors default settings for the model's capabilities.
+ """
+
+ max_chars_total: Optional[int]
+ """Upper bound on total characters across excerpts from all extracted results.
+
+ Does not affect full_content if requested. Default is dynamic based on urls,
+ objective, and client_model.
+ """
+
+ objective: Optional[str]
+ """
+ As in SearchRequest, a natural-language description of the underlying question
+ or goal driving the request. Used together with search_queries to focus excerpts
+ on the most relevant content.
+ """
+
+ search_queries: Optional[SequenceNotStr[str]]
+ """Optional keyword search queries, as in SearchRequest.
+
+ Used together with objective to focus excerpts on the most relevant content.
+ """
+
+
+class AdvancedFullContentFullContentSettings(TypedDict, total=False):
+ """Optional settings for returning full content."""
+
+ max_chars_per_result: Optional[int]
+ """
+ Optional limit on the number of characters to include in the full content for
+ each url. Full content always starts at the beginning of the page and is
+ truncated at the limit if necessary.
+ """
+
+
+AdvancedFullContent: TypeAlias = Union[AdvancedFullContentFullContentSettings, bool]
+
+
+class Advanced(TypedDict, total=False):
+ """Advanced extract configuration."""
+
+ excerpt_settings: Optional[ExcerptSettingsParam]
+ """Optional settings for returning relevant excerpts."""
+
+ fetch_policy: Optional[FetchPolicyParam]
+ """Policy for live fetching web results."""
+
+ full_content: AdvancedFullContent
+ """Controls full content extraction.
+
+ Set to true to enable with defaults, false to disable, or provide
+ FullContentSettings for fine-grained control.
+ """
diff --git a/src/parallel/types/client_search_params.py b/src/parallel/types/client_search_params.py
new file mode 100644
index 0000000..c37e2d5
--- /dev/null
+++ b/src/parallel/types/client_search_params.py
@@ -0,0 +1,72 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from .._types import SequenceNotStr
+from .fetch_policy_param import FetchPolicyParam
+from .excerpt_settings_param import ExcerptSettingsParam
+from .shared_params.source_policy import SourcePolicy
+
+__all__ = ["ClientSearchParams", "Advanced"]
+
+
+class ClientSearchParams(TypedDict, total=False):
+ search_queries: Required[SequenceNotStr[str]]
+ """Concise keyword search queries, 3-6 words each.
+
+ At least one query is required, provide 2-3 for best results. Used together with
+ objective to focus results on the most relevant content.
+ """
+
+ advanced: Optional[Advanced]
+ """Advanced search configuration."""
+
+ client_model: Optional[str]
+ """The model generating this request and consuming the results.
+
+ Enables optimizations and tailors default settings for the model's capabilities.
+ """
+
+ max_chars_total: Optional[int]
+ """Upper bound on total characters across excerpts from all results.
+
+ Default is dynamic based on search_queries, objective, and client_model.
+ """
+
+ mode: Optional[Literal["basic", "standard"]]
+ """Search mode preset: supported values are basic and standard.
+
+ Basic mode offers the lowest latency and works best with 2-3 high-quality
+ search_queries. Standard mode provides higher quality with more advanced
+ retrieval and compression.
+ """
+
+ objective: Optional[str]
+ """
+ Natural-language description of the underlying question or goal driving the
+ search. Used together with search_queries to focus results on the most relevant
+ content. Should be self-contained with enough context to understand the intent
+ of the search.
+ """
+
+
+class Advanced(TypedDict, total=False):
+ """Advanced search configuration."""
+
+ excerpt_settings: Optional[ExcerptSettingsParam]
+ """Optional settings for returning relevant excerpts."""
+
+ fetch_policy: Optional[FetchPolicyParam]
+ """Policy for live fetching web results."""
+
+ location: Optional[str]
+ """ISO 3166-1 alpha-2 country code for geo-targeted search results."""
+
+ source_policy: Optional[SourcePolicy]
+ """Source policy for web search results.
+
+ This policy governs which sources are allowed/disallowed in results.
+ """
diff --git a/src/parallel/types/excerpt_settings_param.py b/src/parallel/types/excerpt_settings_param.py
new file mode 100644
index 0000000..17bd00e
--- /dev/null
+++ b/src/parallel/types/excerpt_settings_param.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import TypedDict
+
+__all__ = ["ExcerptSettingsParam"]
+
+
+class ExcerptSettingsParam(TypedDict, total=False):
+ """Optional settings for returning relevant excerpts."""
+
+ max_chars_per_result: Optional[int]
+ """Optional upper bound on the total number of characters to include per url.
+
+ Excerpts may contain fewer characters than this limit to maximize relevance and
+ token efficiency. Values below 1000 will be automatically set to 1000.
+ """
diff --git a/src/parallel/types/extract_error.py b/src/parallel/types/extract_error.py
new file mode 100644
index 0000000..3379cb6
--- /dev/null
+++ b/src/parallel/types/extract_error.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["ExtractError"]
+
+
+class ExtractError(BaseModel):
+ """Extract error details."""
+
+ content: Optional[str] = None
+ """Content returned for http client or server errors, if any."""
+
+ error_type: str
+ """Error type."""
+
+ http_status_code: Optional[int] = None
+ """HTTP status code, if available."""
+
+ url: str
diff --git a/src/parallel/types/extract_response.py b/src/parallel/types/extract_response.py
new file mode 100644
index 0000000..8d2830b
--- /dev/null
+++ b/src/parallel/types/extract_response.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from .._models import BaseModel
+from .usage_item import UsageItem
+from .extract_error import ExtractError
+from .extract_result import ExtractResult
+from .shared.warning import Warning
+
+__all__ = ["ExtractResponse"]
+
+
+class ExtractResponse(BaseModel):
+ """Extract response (GA)."""
+
+ errors: List[ExtractError]
+ """Extract errors: requested URLs not in the results."""
+
+ extract_id: str
+ """Extract request ID, e.g. `extract_cad0a6d2dec046bd95ae900527d880e7`"""
+
+ results: List[ExtractResult]
+ """Successful extract results."""
+
+ usage: Optional[List[UsageItem]] = None
+ """Usage metrics for the extract request."""
+
+ warnings: Optional[List[Warning]] = None
+ """Warnings for the extract request, if any."""
diff --git a/src/parallel/types/extract_result.py b/src/parallel/types/extract_result.py
new file mode 100644
index 0000000..e02243b
--- /dev/null
+++ b/src/parallel/types/extract_result.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from .._models import BaseModel
+
+__all__ = ["ExtractResult"]
+
+
+class ExtractResult(BaseModel):
+ """Extract result for a single URL."""
+
+ excerpts: List[str]
+ """Relevant excerpted content from the URL, formatted as markdown."""
+
+ url: str
+ """URL associated with the search result."""
+
+ full_content: Optional[str] = None
+ """Full content from the URL formatted as markdown, if requested."""
+
+ publish_date: Optional[str] = None
+ """Publish date of the webpage in YYYY-MM-DD format, if available."""
+
+ title: Optional[str] = None
+ """Title of the webpage, if available."""
diff --git a/src/parallel/types/fetch_policy_param.py b/src/parallel/types/fetch_policy_param.py
new file mode 100644
index 0000000..5bc4447
--- /dev/null
+++ b/src/parallel/types/fetch_policy_param.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import TypedDict
+
+__all__ = ["FetchPolicyParam"]
+
+
+class FetchPolicyParam(TypedDict, total=False):
+ """Policy for live fetching web results."""
+
+ disable_cache_fallback: bool
+ """
+ If false, fallback to cached content older than max-age if live fetch fails or
+ times out. If true, returns an error instead.
+ """
+
+ max_age_seconds: Optional[int]
+ """Maximum age of cached content in seconds to trigger a live fetch.
+
+ Minimum value 600 seconds (10 minutes).
+ """
+
+ timeout_seconds: Optional[float]
+ """Timeout in seconds for fetching live content if unavailable in cache."""
diff --git a/src/parallel/types/search_result.py b/src/parallel/types/search_result.py
new file mode 100644
index 0000000..c7e3a4f
--- /dev/null
+++ b/src/parallel/types/search_result.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from .._models import BaseModel
+from .usage_item import UsageItem
+from .shared.warning import Warning
+from .web_search_result import WebSearchResult
+
+__all__ = ["SearchResult"]
+
+
+class SearchResult(BaseModel):
+ """Search response (GA)."""
+
+ results: List[WebSearchResult]
+ """A list of search results, ordered by decreasing relevance."""
+
+ search_id: str
+ """Search ID. Example: `search_cad0a6d2dec046bd95ae900527d880e7`"""
+
+ usage: Optional[List[UsageItem]] = None
+ """Usage metrics for the search request."""
+
+ warnings: Optional[List[Warning]] = None
+ """Warnings for the search request, if any."""
diff --git a/src/parallel/types/usage_item.py b/src/parallel/types/usage_item.py
new file mode 100644
index 0000000..471d112
--- /dev/null
+++ b/src/parallel/types/usage_item.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .._models import BaseModel
+
+__all__ = ["UsageItem"]
+
+
+class UsageItem(BaseModel):
+ """Usage item for a single operation."""
+
+ count: int
+ """Count of the SKU."""
+
+ name: str
+ """Name of the SKU."""
diff --git a/src/parallel/types/web_search_result.py b/src/parallel/types/web_search_result.py
new file mode 100644
index 0000000..6178b6e
--- /dev/null
+++ b/src/parallel/types/web_search_result.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from .._models import BaseModel
+
+__all__ = ["WebSearchResult"]
+
+
+class WebSearchResult(BaseModel):
+ """A single search result from the web search API."""
+
+ excerpts: List[str]
+ """Relevant excerpted content from the URL, formatted as markdown."""
+
+ url: str
+ """URL associated with the search result."""
+
+ publish_date: Optional[str] = None
+ """Publish date of the webpage in YYYY-MM-DD format, if available."""
+
+ title: Optional[str] = None
+ """Title of the webpage, if available."""
diff --git a/tests/api_resources/test_beta.py b/tests/api_resources/test_beta.py
index a074455..e3198c5 100644
--- a/tests/api_resources/test_beta.py
+++ b/tests/api_resources/test_beta.py
@@ -10,10 +10,7 @@
from parallel import Parallel, AsyncParallel
from tests.utils import assert_matches_type
from parallel._utils import parse_date
-from parallel.types.beta import (
- SearchResult,
- ExtractResponse,
-)
+from parallel.types.beta import SearchResult, ExtractResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -38,6 +35,7 @@ def test_method_extract_with_all_params(self, client: Parallel) -> None:
"max_age_seconds": 86400,
"timeout_seconds": 60,
},
+ full_content=True,
objective="objective",
search_queries=["string"],
betas=["mcp-server-2025-07-17"],
@@ -85,6 +83,7 @@ def test_method_search_with_all_params(self, client: Parallel) -> None:
"max_age_seconds": 86400,
"timeout_seconds": 60,
},
+ location="us",
max_chars_per_result=0,
max_results=0,
mode="one-shot",
@@ -143,6 +142,7 @@ async def test_method_extract_with_all_params(self, async_client: AsyncParallel)
"max_age_seconds": 86400,
"timeout_seconds": 60,
},
+ full_content=True,
objective="objective",
search_queries=["string"],
betas=["mcp-server-2025-07-17"],
@@ -190,6 +190,7 @@ async def test_method_search_with_all_params(self, async_client: AsyncParallel)
"max_age_seconds": 86400,
"timeout_seconds": 60,
},
+ location="us",
max_chars_per_result=0,
max_results=0,
mode="one-shot",
diff --git a/tests/api_resources/test_client.py b/tests/api_resources/test_client.py
new file mode 100644
index 0000000..3b9ea3a
--- /dev/null
+++ b/tests/api_resources/test_client.py
@@ -0,0 +1,239 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from parallel import Parallel, AsyncParallel
+from tests.utils import assert_matches_type
+from parallel.types import SearchResult, ExtractResponse
+from parallel._utils import parse_date
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestClient:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_extract(self, client: Parallel) -> None:
+ client_ = client.extract(
+ urls=["string"],
+ )
+ assert_matches_type(ExtractResponse, client_, path=["response"])
+
+ @parametrize
+ def test_method_extract_with_all_params(self, client: Parallel) -> None:
+ client_ = client.extract(
+ urls=["string"],
+ advanced={
+ "excerpt_settings": {"max_chars_per_result": 0},
+ "fetch_policy": {
+ "disable_cache_fallback": True,
+ "max_age_seconds": 86400,
+ "timeout_seconds": 60,
+ },
+ "full_content": {"max_chars_per_result": 0},
+ },
+ client_model="claude-sonnet-4-6-20260401",
+ max_chars_total=0,
+ objective="objective",
+ search_queries=["string"],
+ )
+ assert_matches_type(ExtractResponse, client_, path=["response"])
+
+ @parametrize
+ def test_raw_response_extract(self, client: Parallel) -> None:
+ response = client.with_raw_response.extract(
+ urls=["string"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ client_ = response.parse()
+ assert_matches_type(ExtractResponse, client_, path=["response"])
+
+ @parametrize
+ def test_streaming_response_extract(self, client: Parallel) -> None:
+ with client.with_streaming_response.extract(
+ urls=["string"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ client_ = response.parse()
+ assert_matches_type(ExtractResponse, client_, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_search(self, client: Parallel) -> None:
+ client_ = client.search(
+ search_queries=["string"],
+ )
+ assert_matches_type(SearchResult, client_, path=["response"])
+
+ @parametrize
+ def test_method_search_with_all_params(self, client: Parallel) -> None:
+ client_ = client.search(
+ search_queries=["string"],
+ advanced={
+ "excerpt_settings": {"max_chars_per_result": 0},
+ "fetch_policy": {
+ "disable_cache_fallback": True,
+ "max_age_seconds": 86400,
+ "timeout_seconds": 60,
+ },
+ "location": "us",
+ "source_policy": {
+ "after_date": parse_date("2024-01-01"),
+ "exclude_domains": ["reddit.com", "x.com", ".ai"],
+ "include_domains": ["wikipedia.org", "usa.gov", ".edu"],
+ },
+ },
+ client_model="claude-sonnet-4-6-20260401",
+ max_chars_total=0,
+ mode="basic",
+ objective="objective",
+ )
+ assert_matches_type(SearchResult, client_, path=["response"])
+
+ @parametrize
+ def test_raw_response_search(self, client: Parallel) -> None:
+ response = client.with_raw_response.search(
+ search_queries=["string"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ client_ = response.parse()
+ assert_matches_type(SearchResult, client_, path=["response"])
+
+ @parametrize
+ def test_streaming_response_search(self, client: Parallel) -> None:
+ with client.with_streaming_response.search(
+ search_queries=["string"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ client_ = response.parse()
+ assert_matches_type(SearchResult, client_, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncClient:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_extract(self, async_client: AsyncParallel) -> None:
+ client = await async_client.extract(
+ urls=["string"],
+ )
+ assert_matches_type(ExtractResponse, client, path=["response"])
+
+ @parametrize
+ async def test_method_extract_with_all_params(self, async_client: AsyncParallel) -> None:
+ client = await async_client.extract(
+ urls=["string"],
+ advanced={
+ "excerpt_settings": {"max_chars_per_result": 0},
+ "fetch_policy": {
+ "disable_cache_fallback": True,
+ "max_age_seconds": 86400,
+ "timeout_seconds": 60,
+ },
+ "full_content": {"max_chars_per_result": 0},
+ },
+ client_model="claude-sonnet-4-6-20260401",
+ max_chars_total=0,
+ objective="objective",
+ search_queries=["string"],
+ )
+ assert_matches_type(ExtractResponse, client, path=["response"])
+
+ @parametrize
+ async def test_raw_response_extract(self, async_client: AsyncParallel) -> None:
+ response = await async_client.with_raw_response.extract(
+ urls=["string"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ client = await response.parse()
+ assert_matches_type(ExtractResponse, client, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_extract(self, async_client: AsyncParallel) -> None:
+ async with async_client.with_streaming_response.extract(
+ urls=["string"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ client = await response.parse()
+ assert_matches_type(ExtractResponse, client, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_search(self, async_client: AsyncParallel) -> None:
+ client = await async_client.search(
+ search_queries=["string"],
+ )
+ assert_matches_type(SearchResult, client, path=["response"])
+
+ @parametrize
+ async def test_method_search_with_all_params(self, async_client: AsyncParallel) -> None:
+ client = await async_client.search(
+ search_queries=["string"],
+ advanced={
+ "excerpt_settings": {"max_chars_per_result": 0},
+ "fetch_policy": {
+ "disable_cache_fallback": True,
+ "max_age_seconds": 86400,
+ "timeout_seconds": 60,
+ },
+ "location": "us",
+ "source_policy": {
+ "after_date": parse_date("2024-01-01"),
+ "exclude_domains": ["reddit.com", "x.com", ".ai"],
+ "include_domains": ["wikipedia.org", "usa.gov", ".edu"],
+ },
+ },
+ client_model="claude-sonnet-4-6-20260401",
+ max_chars_total=0,
+ mode="basic",
+ objective="objective",
+ )
+ assert_matches_type(SearchResult, client, path=["response"])
+
+ @parametrize
+ async def test_raw_response_search(self, async_client: AsyncParallel) -> None:
+ response = await async_client.with_raw_response.search(
+ search_queries=["string"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ client = await response.parse()
+ assert_matches_type(SearchResult, client, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_search(self, async_client: AsyncParallel) -> None:
+ async with async_client.with_streaming_response.search(
+ search_queries=["string"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ client = await response.parse()
+ assert_matches_type(SearchResult, client, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
From 01cfb314b3f0ee731ca5bedd9f9a575073419ce0 Mon Sep 17 00:00:00 2001
From: Edward He
Date: Fri, 10 Apr 2026 17:28:07 -0700
Subject: [PATCH 19/32] lint init typing
---
src/parallel/types/__init__.py | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/src/parallel/types/__init__.py b/src/parallel/types/__init__.py
index 29fd1e0..936b7b3 100644
--- a/src/parallel/types/__init__.py
+++ b/src/parallel/types/__init__.py
@@ -11,12 +11,17 @@
from .citation import Citation as Citation
from .task_run import TaskRun as TaskRun
from .task_spec import TaskSpec as TaskSpec
+from .usage_item import UsageItem as UsageItem
from .auto_schema import AutoSchema as AutoSchema
from .field_basis import FieldBasis as FieldBasis
from .json_schema import JsonSchema as JsonSchema
from .text_schema import TextSchema as TextSchema
+from .extract_error import ExtractError as ExtractError
+from .search_result import SearchResult as SearchResult
+from .extract_result import ExtractResult as ExtractResult
from .task_run_result import TaskRunResult as TaskRunResult
from .task_spec_param import TaskSpecParam as TaskSpecParam
+from .extract_response import ExtractResponse as ExtractResponse
from .auto_schema_param import AutoSchemaParam as AutoSchemaParam
from .json_schema_param import JsonSchemaParam as JsonSchemaParam
from .text_schema_param import TextSchemaParam as TextSchemaParam
@@ -25,13 +30,8 @@
from .client_search_params import ClientSearchParams as ClientSearchParams
from .task_run_json_output import TaskRunJsonOutput as TaskRunJsonOutput
from .task_run_text_output import TaskRunTextOutput as TaskRunTextOutput
-from .parsed_task_run_result import ParsedTaskRunResult as ParsedTaskRunResult
-from .usage_item import UsageItem as UsageItem
-from .extract_error import ExtractError as ExtractError
-from .search_result import SearchResult as SearchResult
-from .extract_result import ExtractResult as ExtractResult
-from .extract_response import ExtractResponse as ExtractResponse
from .client_extract_params import ClientExtractParams as ClientExtractParams
from .excerpt_settings_param import ExcerptSettingsParam as ExcerptSettingsParam
+from .parsed_task_run_result import ParsedTaskRunResult as ParsedTaskRunResult
from .task_run_create_params import TaskRunCreateParams as TaskRunCreateParams
from .task_run_result_params import TaskRunResultParams as TaskRunResultParams
From 1c15cc00b1ae223db8e51893ce23b9a2193e3ae7 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Sat, 11 Apr 2026 06:29:56 +0000
Subject: [PATCH 20/32] fix: ensure file data are only sent as 1 parameter
---
src/parallel/_utils/_utils.py | 5 +++--
tests/test_extract_files.py | 9 +++++++++
2 files changed, 12 insertions(+), 2 deletions(-)
diff --git a/src/parallel/_utils/_utils.py b/src/parallel/_utils/_utils.py
index ece4d87..569c62b 100644
--- a/src/parallel/_utils/_utils.py
+++ b/src/parallel/_utils/_utils.py
@@ -86,8 +86,9 @@ def _extract_items(
index += 1
if is_dict(obj):
try:
- # We are at the last entry in the path so we must remove the field
- if (len(path)) == index:
+ # Remove the field if there are no more dict keys in the path,
+ # only "" traversal markers or end.
+ if all(p == "" for p in path[index:]):
item = obj.pop(key)
else:
item = obj[key]
diff --git a/tests/test_extract_files.py b/tests/test_extract_files.py
index ad0eca3..d9bb4c0 100644
--- a/tests/test_extract_files.py
+++ b/tests/test_extract_files.py
@@ -35,6 +35,15 @@ def test_multiple_files() -> None:
assert query == {"documents": [{}, {}]}
+def test_top_level_file_array() -> None:
+ query = {"files": [b"file one", b"file two"], "title": "hello"}
+ assert extract_files(query, paths=[["files", ""]]) == [
+ ("files[]", b"file one"),
+ ("files[]", b"file two"),
+ ]
+ assert query == {"title": "hello"}
+
+
@pytest.mark.parametrize(
"query,paths,expected",
[
From 5836a6fbe8db3a3509556442067f087918edb2bb Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Sun, 12 Apr 2026 22:30:33 +0000
Subject: [PATCH 21/32] feat(api): manual - add AdvancedSearchSettings and
AdvancedExtractSettings models
---
.stats.yml | 2 +-
api.md | 2 +
src/parallel/_client.py | 10 +++--
src/parallel/types/__init__.py | 2 +
.../types/advanced_extract_settings_param.py | 42 +++++++++++++++++++
.../types/advanced_search_settings_param.py | 31 ++++++++++++++
src/parallel/types/client_extract_params.py | 42 +++----------------
src/parallel/types/client_search_params.py | 27 ++----------
tests/api_resources/test_client.py | 5 ++-
9 files changed, 96 insertions(+), 67 deletions(-)
create mode 100644 src/parallel/types/advanced_extract_settings_param.py
create mode 100644 src/parallel/types/advanced_search_settings_param.py
diff --git a/.stats.yml b/.stats.yml
index c8b869e..6cfd973 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 23
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/parallel-web%2Fparallel-sdk-44048676c9b07d49ed9dbee5fad53d145eddaea5ba682b6557681c5a7e04f8ed.yml
openapi_spec_hash: e239787937742b1bc15e7f211fe3c518
-config_hash: 42d4e6039ef223ba2a824414a94da176
+config_hash: fe820a5a10ee48e143c9e49a153b23b4
diff --git a/api.md b/api.md
index 62ef485..6462851 100644
--- a/api.md
+++ b/api.md
@@ -10,6 +10,8 @@ Types:
```python
from parallel.types import (
+ AdvancedExtractSettings,
+ AdvancedSearchSettings,
ExcerptSettings,
ExtractError,
ExtractResponse,
diff --git a/src/parallel/_client.py b/src/parallel/_client.py
index 21a5507..7a007e9 100644
--- a/src/parallel/_client.py
+++ b/src/parallel/_client.py
@@ -49,6 +49,8 @@
)
from .types.search_result import SearchResult
from .types.extract_response import ExtractResponse
+from .types.advanced_search_settings_param import AdvancedSearchSettingsParam
+from .types.advanced_extract_settings_param import AdvancedExtractSettingsParam
if TYPE_CHECKING:
from .resources import beta, task_run
@@ -229,7 +231,7 @@ def extract(
self,
*,
urls: SequenceNotStr[str],
- advanced: Optional[client_extract_params.Advanced] | Omit = omit,
+ advanced: Optional[AdvancedExtractSettingsParam] | Omit = omit,
client_model: Optional[str] | Omit = omit,
max_chars_total: Optional[int] | Omit = omit,
objective: Optional[str] | Omit = omit,
@@ -294,7 +296,7 @@ def search(
self,
*,
search_queries: SequenceNotStr[str],
- advanced: Optional[client_search_params.Advanced] | Omit = omit,
+ advanced: Optional[AdvancedSearchSettingsParam] | Omit = omit,
client_model: Optional[str] | Omit = omit,
max_chars_total: Optional[int] | Omit = omit,
mode: Optional[Literal["basic", "standard"]] | Omit = omit,
@@ -554,7 +556,7 @@ async def extract(
self,
*,
urls: SequenceNotStr[str],
- advanced: Optional[client_extract_params.Advanced] | Omit = omit,
+ advanced: Optional[AdvancedExtractSettingsParam] | Omit = omit,
client_model: Optional[str] | Omit = omit,
max_chars_total: Optional[int] | Omit = omit,
objective: Optional[str] | Omit = omit,
@@ -619,7 +621,7 @@ async def search(
self,
*,
search_queries: SequenceNotStr[str],
- advanced: Optional[client_search_params.Advanced] | Omit = omit,
+ advanced: Optional[AdvancedSearchSettingsParam] | Omit = omit,
client_model: Optional[str] | Omit = omit,
max_chars_total: Optional[int] | Omit = omit,
mode: Optional[Literal["basic", "standard"]] | Omit = omit,
diff --git a/src/parallel/types/__init__.py b/src/parallel/types/__init__.py
index 936b7b3..526d252 100644
--- a/src/parallel/types/__init__.py
+++ b/src/parallel/types/__init__.py
@@ -35,3 +35,5 @@
from .parsed_task_run_result import ParsedTaskRunResult as ParsedTaskRunResult
from .task_run_create_params import TaskRunCreateParams as TaskRunCreateParams
from .task_run_result_params import TaskRunResultParams as TaskRunResultParams
+from .advanced_search_settings_param import AdvancedSearchSettingsParam as AdvancedSearchSettingsParam
+from .advanced_extract_settings_param import AdvancedExtractSettingsParam as AdvancedExtractSettingsParam
diff --git a/src/parallel/types/advanced_extract_settings_param.py b/src/parallel/types/advanced_extract_settings_param.py
new file mode 100644
index 0000000..00480c5
--- /dev/null
+++ b/src/parallel/types/advanced_extract_settings_param.py
@@ -0,0 +1,42 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Optional
+from typing_extensions import TypeAlias, TypedDict
+
+from .fetch_policy_param import FetchPolicyParam
+from .excerpt_settings_param import ExcerptSettingsParam
+
+__all__ = ["AdvancedExtractSettingsParam", "FullContent", "FullContentFullContentSettings"]
+
+
+class FullContentFullContentSettings(TypedDict, total=False):
+ """Optional settings for returning full content."""
+
+ max_chars_per_result: Optional[int]
+ """
+ Optional limit on the number of characters to include in the full content for
+ each url. Full content always starts at the beginning of the page and is
+ truncated at the limit if necessary.
+ """
+
+
+FullContent: TypeAlias = Union[FullContentFullContentSettings, bool]
+
+
+class AdvancedExtractSettingsParam(TypedDict, total=False):
+ """Advanced extract configuration."""
+
+ excerpt_settings: Optional[ExcerptSettingsParam]
+ """Optional settings for returning relevant excerpts."""
+
+ fetch_policy: Optional[FetchPolicyParam]
+ """Policy for live fetching web results."""
+
+ full_content: FullContent
+ """Controls full content extraction.
+
+ Set to true to enable with defaults, false to disable, or provide
+ FullContentSettings for fine-grained control.
+ """
diff --git a/src/parallel/types/advanced_search_settings_param.py b/src/parallel/types/advanced_search_settings_param.py
new file mode 100644
index 0000000..fc4afd2
--- /dev/null
+++ b/src/parallel/types/advanced_search_settings_param.py
@@ -0,0 +1,31 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import TypedDict
+
+from .fetch_policy_param import FetchPolicyParam
+from .excerpt_settings_param import ExcerptSettingsParam
+from .shared_params.source_policy import SourcePolicy
+
+__all__ = ["AdvancedSearchSettingsParam"]
+
+
+class AdvancedSearchSettingsParam(TypedDict, total=False):
+ """Advanced search configuration."""
+
+ excerpt_settings: Optional[ExcerptSettingsParam]
+ """Optional settings for returning relevant excerpts."""
+
+ fetch_policy: Optional[FetchPolicyParam]
+ """Policy for live fetching web results."""
+
+ location: Optional[str]
+ """ISO 3166-1 alpha-2 country code for geo-targeted search results."""
+
+ source_policy: Optional[SourcePolicy]
+ """Source policy for web search results.
+
+ This policy governs which sources are allowed/disallowed in results.
+ """
diff --git a/src/parallel/types/client_extract_params.py b/src/parallel/types/client_extract_params.py
index d22f3cf..a53a4c6 100644
--- a/src/parallel/types/client_extract_params.py
+++ b/src/parallel/types/client_extract_params.py
@@ -2,21 +2,20 @@
from __future__ import annotations
-from typing import Union, Optional
-from typing_extensions import Required, TypeAlias, TypedDict
+from typing import Optional
+from typing_extensions import Required, TypedDict
from .._types import SequenceNotStr
-from .fetch_policy_param import FetchPolicyParam
-from .excerpt_settings_param import ExcerptSettingsParam
+from .advanced_extract_settings_param import AdvancedExtractSettingsParam
-__all__ = ["ClientExtractParams", "Advanced", "AdvancedFullContent", "AdvancedFullContentFullContentSettings"]
+__all__ = ["ClientExtractParams"]
class ClientExtractParams(TypedDict, total=False):
urls: Required[SequenceNotStr[str]]
"""URLs to extract content from. Up to 20 URLs."""
- advanced: Optional[Advanced]
+ advanced: Optional[AdvancedExtractSettingsParam]
"""Advanced extract configuration."""
client_model: Optional[str]
@@ -44,34 +43,3 @@ class ClientExtractParams(TypedDict, total=False):
Used together with objective to focus excerpts on the most relevant content.
"""
-
-
-class AdvancedFullContentFullContentSettings(TypedDict, total=False):
- """Optional settings for returning full content."""
-
- max_chars_per_result: Optional[int]
- """
- Optional limit on the number of characters to include in the full content for
- each url. Full content always starts at the beginning of the page and is
- truncated at the limit if necessary.
- """
-
-
-AdvancedFullContent: TypeAlias = Union[AdvancedFullContentFullContentSettings, bool]
-
-
-class Advanced(TypedDict, total=False):
- """Advanced extract configuration."""
-
- excerpt_settings: Optional[ExcerptSettingsParam]
- """Optional settings for returning relevant excerpts."""
-
- fetch_policy: Optional[FetchPolicyParam]
- """Policy for live fetching web results."""
-
- full_content: AdvancedFullContent
- """Controls full content extraction.
-
- Set to true to enable with defaults, false to disable, or provide
- FullContentSettings for fine-grained control.
- """
diff --git a/src/parallel/types/client_search_params.py b/src/parallel/types/client_search_params.py
index c37e2d5..82f8d0a 100644
--- a/src/parallel/types/client_search_params.py
+++ b/src/parallel/types/client_search_params.py
@@ -6,11 +6,9 @@
from typing_extensions import Literal, Required, TypedDict
from .._types import SequenceNotStr
-from .fetch_policy_param import FetchPolicyParam
-from .excerpt_settings_param import ExcerptSettingsParam
-from .shared_params.source_policy import SourcePolicy
+from .advanced_search_settings_param import AdvancedSearchSettingsParam
-__all__ = ["ClientSearchParams", "Advanced"]
+__all__ = ["ClientSearchParams"]
class ClientSearchParams(TypedDict, total=False):
@@ -21,7 +19,7 @@ class ClientSearchParams(TypedDict, total=False):
objective to focus results on the most relevant content.
"""
- advanced: Optional[Advanced]
+ advanced: Optional[AdvancedSearchSettingsParam]
"""Advanced search configuration."""
client_model: Optional[str]
@@ -51,22 +49,3 @@ class ClientSearchParams(TypedDict, total=False):
content. Should be self-contained with enough context to understand the intent
of the search.
"""
-
-
-class Advanced(TypedDict, total=False):
- """Advanced search configuration."""
-
- excerpt_settings: Optional[ExcerptSettingsParam]
- """Optional settings for returning relevant excerpts."""
-
- fetch_policy: Optional[FetchPolicyParam]
- """Policy for live fetching web results."""
-
- location: Optional[str]
- """ISO 3166-1 alpha-2 country code for geo-targeted search results."""
-
- source_policy: Optional[SourcePolicy]
- """Source policy for web search results.
-
- This policy governs which sources are allowed/disallowed in results.
- """
diff --git a/tests/api_resources/test_client.py b/tests/api_resources/test_client.py
index 3b9ea3a..f0bc6a0 100644
--- a/tests/api_resources/test_client.py
+++ b/tests/api_resources/test_client.py
@@ -9,7 +9,10 @@
from parallel import Parallel, AsyncParallel
from tests.utils import assert_matches_type
-from parallel.types import SearchResult, ExtractResponse
+from parallel.types import (
+ SearchResult,
+ ExtractResponse,
+)
from parallel._utils import parse_date
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
From 4ded29c2382594f1735101753a0b09a2f7c6972e Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 14 Apr 2026 23:53:17 +0000
Subject: [PATCH 22/32] feat(api): Search/Extract v1 with advanced_settings and
max_results
---
.stats.yml | 4 +--
src/parallel/_client.py | 26 ++++++++++---------
src/parallel/resources/beta/findall.py | 4 +--
.../types/advanced_search_settings_param.py | 3 +++
.../types/beta/findall_create_params.py | 2 +-
src/parallel/types/client_extract_params.py | 2 +-
src/parallel/types/client_search_params.py | 4 +--
tests/api_resources/test_client.py | 10 ++++---
8 files changed, 31 insertions(+), 24 deletions(-)
diff --git a/.stats.yml b/.stats.yml
index 6cfd973..b7d708f 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 23
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/parallel-web%2Fparallel-sdk-44048676c9b07d49ed9dbee5fad53d145eddaea5ba682b6557681c5a7e04f8ed.yml
-openapi_spec_hash: e239787937742b1bc15e7f211fe3c518
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/parallel-web%2Fparallel-sdk-7099c3aac2f0edacf413e426c176150404e1603f709cdd1f0af5c6689d33bd4f.yml
+openapi_spec_hash: eb61e027506aa339543ce33d7f052046
config_hash: fe820a5a10ee48e143c9e49a153b23b4
diff --git a/src/parallel/_client.py b/src/parallel/_client.py
index 7a007e9..23979fd 100644
--- a/src/parallel/_client.py
+++ b/src/parallel/_client.py
@@ -231,7 +231,7 @@ def extract(
self,
*,
urls: SequenceNotStr[str],
- advanced: Optional[AdvancedExtractSettingsParam] | Omit = omit,
+ advanced_settings: Optional[AdvancedExtractSettingsParam] | Omit = omit,
client_model: Optional[str] | Omit = omit,
max_chars_total: Optional[int] | Omit = omit,
objective: Optional[str] | Omit = omit,
@@ -249,7 +249,7 @@ def extract(
Args:
urls: URLs to extract content from. Up to 20 URLs.
- advanced: Advanced extract configuration.
+ advanced_settings: Advanced extract configuration.
client_model: The model generating this request and consuming the results. Enables
optimizations and tailors default settings for the model's capabilities.
@@ -278,7 +278,7 @@ def extract(
body=maybe_transform(
{
"urls": urls,
- "advanced": advanced,
+ "advanced_settings": advanced_settings,
"client_model": client_model,
"max_chars_total": max_chars_total,
"objective": objective,
@@ -296,7 +296,7 @@ def search(
self,
*,
search_queries: SequenceNotStr[str],
- advanced: Optional[AdvancedSearchSettingsParam] | Omit = omit,
+ advanced_settings: Optional[AdvancedSearchSettingsParam] | Omit = omit,
client_model: Optional[str] | Omit = omit,
max_chars_total: Optional[int] | Omit = omit,
mode: Optional[Literal["basic", "standard"]] | Omit = omit,
@@ -316,7 +316,7 @@ def search(
provide 2-3 for best results. Used together with objective to focus results on
the most relevant content.
- advanced: Advanced search configuration.
+ advanced_settings: Advanced search configuration.
client_model: The model generating this request and consuming the results. Enables
optimizations and tailors default settings for the model's capabilities.
@@ -327,6 +327,7 @@ def search(
mode: Search mode preset: supported values are basic and standard. Basic mode offers
the lowest latency and works best with 2-3 high-quality search_queries. Standard
mode provides higher quality with more advanced retrieval and compression.
+ Defaults to standard when omitted.
objective: Natural-language description of the underlying question or goal driving the
search. Used together with search_queries to focus results on the most relevant
@@ -346,7 +347,7 @@ def search(
body=maybe_transform(
{
"search_queries": search_queries,
- "advanced": advanced,
+ "advanced_settings": advanced_settings,
"client_model": client_model,
"max_chars_total": max_chars_total,
"mode": mode,
@@ -556,7 +557,7 @@ async def extract(
self,
*,
urls: SequenceNotStr[str],
- advanced: Optional[AdvancedExtractSettingsParam] | Omit = omit,
+ advanced_settings: Optional[AdvancedExtractSettingsParam] | Omit = omit,
client_model: Optional[str] | Omit = omit,
max_chars_total: Optional[int] | Omit = omit,
objective: Optional[str] | Omit = omit,
@@ -574,7 +575,7 @@ async def extract(
Args:
urls: URLs to extract content from. Up to 20 URLs.
- advanced: Advanced extract configuration.
+ advanced_settings: Advanced extract configuration.
client_model: The model generating this request and consuming the results. Enables
optimizations and tailors default settings for the model's capabilities.
@@ -603,7 +604,7 @@ async def extract(
body=await async_maybe_transform(
{
"urls": urls,
- "advanced": advanced,
+ "advanced_settings": advanced_settings,
"client_model": client_model,
"max_chars_total": max_chars_total,
"objective": objective,
@@ -621,7 +622,7 @@ async def search(
self,
*,
search_queries: SequenceNotStr[str],
- advanced: Optional[AdvancedSearchSettingsParam] | Omit = omit,
+ advanced_settings: Optional[AdvancedSearchSettingsParam] | Omit = omit,
client_model: Optional[str] | Omit = omit,
max_chars_total: Optional[int] | Omit = omit,
mode: Optional[Literal["basic", "standard"]] | Omit = omit,
@@ -641,7 +642,7 @@ async def search(
provide 2-3 for best results. Used together with objective to focus results on
the most relevant content.
- advanced: Advanced search configuration.
+ advanced_settings: Advanced search configuration.
client_model: The model generating this request and consuming the results. Enables
optimizations and tailors default settings for the model's capabilities.
@@ -652,6 +653,7 @@ async def search(
mode: Search mode preset: supported values are basic and standard. Basic mode offers
the lowest latency and works best with 2-3 high-quality search_queries. Standard
mode provides higher quality with more advanced retrieval and compression.
+ Defaults to standard when omitted.
objective: Natural-language description of the underlying question or goal driving the
search. Used together with search_queries to focus results on the most relevant
@@ -671,7 +673,7 @@ async def search(
body=await async_maybe_transform(
{
"search_queries": search_queries,
- "advanced": advanced,
+ "advanced_settings": advanced_settings,
"client_model": client_model,
"max_chars_total": max_chars_total,
"mode": mode,
diff --git a/src/parallel/resources/beta/findall.py b/src/parallel/resources/beta/findall.py
index acc3d98..878be14 100644
--- a/src/parallel/resources/beta/findall.py
+++ b/src/parallel/resources/beta/findall.py
@@ -117,7 +117,7 @@ def create(
match_conditions: List of match conditions for the FindAll run.
match_limit: Maximum number of matches to find for this FindAll run. Must be between 5 and
- 1000 (inclusive).
+ 1000 (inclusive). May return fewer results.
objective: Natural language objective of the FindAll run.
@@ -662,7 +662,7 @@ async def create(
match_conditions: List of match conditions for the FindAll run.
match_limit: Maximum number of matches to find for this FindAll run. Must be between 5 and
- 1000 (inclusive).
+ 1000 (inclusive). May return fewer results.
objective: Natural language objective of the FindAll run.
diff --git a/src/parallel/types/advanced_search_settings_param.py b/src/parallel/types/advanced_search_settings_param.py
index fc4afd2..b2db575 100644
--- a/src/parallel/types/advanced_search_settings_param.py
+++ b/src/parallel/types/advanced_search_settings_param.py
@@ -24,6 +24,9 @@ class AdvancedSearchSettingsParam(TypedDict, total=False):
location: Optional[str]
"""ISO 3166-1 alpha-2 country code for geo-targeted search results."""
+ max_results: Optional[int]
+ """Upper bound on the number of results to return. Defaults to 10 if not provided."""
+
source_policy: Optional[SourcePolicy]
"""Source policy for web search results.
diff --git a/src/parallel/types/beta/findall_create_params.py b/src/parallel/types/beta/findall_create_params.py
index d4dbed2..2486de9 100644
--- a/src/parallel/types/beta/findall_create_params.py
+++ b/src/parallel/types/beta/findall_create_params.py
@@ -25,7 +25,7 @@ class FindAllCreateParams(TypedDict, total=False):
match_limit: Required[int]
"""Maximum number of matches to find for this FindAll run.
- Must be between 5 and 1000 (inclusive).
+ Must be between 5 and 1000 (inclusive). May return fewer results.
"""
objective: Required[str]
diff --git a/src/parallel/types/client_extract_params.py b/src/parallel/types/client_extract_params.py
index a53a4c6..0673b3f 100644
--- a/src/parallel/types/client_extract_params.py
+++ b/src/parallel/types/client_extract_params.py
@@ -15,7 +15,7 @@ class ClientExtractParams(TypedDict, total=False):
urls: Required[SequenceNotStr[str]]
"""URLs to extract content from. Up to 20 URLs."""
- advanced: Optional[AdvancedExtractSettingsParam]
+ advanced_settings: Optional[AdvancedExtractSettingsParam]
"""Advanced extract configuration."""
client_model: Optional[str]
diff --git a/src/parallel/types/client_search_params.py b/src/parallel/types/client_search_params.py
index 82f8d0a..fa39fea 100644
--- a/src/parallel/types/client_search_params.py
+++ b/src/parallel/types/client_search_params.py
@@ -19,7 +19,7 @@ class ClientSearchParams(TypedDict, total=False):
objective to focus results on the most relevant content.
"""
- advanced: Optional[AdvancedSearchSettingsParam]
+ advanced_settings: Optional[AdvancedSearchSettingsParam]
"""Advanced search configuration."""
client_model: Optional[str]
@@ -39,7 +39,7 @@ class ClientSearchParams(TypedDict, total=False):
Basic mode offers the lowest latency and works best with 2-3 high-quality
search_queries. Standard mode provides higher quality with more advanced
- retrieval and compression.
+ retrieval and compression. Defaults to standard when omitted.
"""
objective: Optional[str]
diff --git a/tests/api_resources/test_client.py b/tests/api_resources/test_client.py
index f0bc6a0..00cdd6a 100644
--- a/tests/api_resources/test_client.py
+++ b/tests/api_resources/test_client.py
@@ -32,7 +32,7 @@ def test_method_extract(self, client: Parallel) -> None:
def test_method_extract_with_all_params(self, client: Parallel) -> None:
client_ = client.extract(
urls=["string"],
- advanced={
+ advanced_settings={
"excerpt_settings": {"max_chars_per_result": 0},
"fetch_policy": {
"disable_cache_fallback": True,
@@ -83,7 +83,7 @@ def test_method_search(self, client: Parallel) -> None:
def test_method_search_with_all_params(self, client: Parallel) -> None:
client_ = client.search(
search_queries=["string"],
- advanced={
+ advanced_settings={
"excerpt_settings": {"max_chars_per_result": 0},
"fetch_policy": {
"disable_cache_fallback": True,
@@ -91,6 +91,7 @@ def test_method_search_with_all_params(self, client: Parallel) -> None:
"timeout_seconds": 60,
},
"location": "us",
+ "max_results": 0,
"source_policy": {
"after_date": parse_date("2024-01-01"),
"exclude_domains": ["reddit.com", "x.com", ".ai"],
@@ -145,7 +146,7 @@ async def test_method_extract(self, async_client: AsyncParallel) -> None:
async def test_method_extract_with_all_params(self, async_client: AsyncParallel) -> None:
client = await async_client.extract(
urls=["string"],
- advanced={
+ advanced_settings={
"excerpt_settings": {"max_chars_per_result": 0},
"fetch_policy": {
"disable_cache_fallback": True,
@@ -196,7 +197,7 @@ async def test_method_search(self, async_client: AsyncParallel) -> None:
async def test_method_search_with_all_params(self, async_client: AsyncParallel) -> None:
client = await async_client.search(
search_queries=["string"],
- advanced={
+ advanced_settings={
"excerpt_settings": {"max_chars_per_result": 0},
"fetch_policy": {
"disable_cache_fallback": True,
@@ -204,6 +205,7 @@ async def test_method_search_with_all_params(self, async_client: AsyncParallel)
"timeout_seconds": 60,
},
"location": "us",
+ "max_results": 0,
"source_policy": {
"after_date": parse_date("2024-01-01"),
"exclude_domains": ["reddit.com", "x.com", ".ai"],
From 00e856464a2828693483a704de83ee5d6c4fe19e Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Sat, 18 Apr 2026 06:10:23 +0000
Subject: [PATCH 23/32] perf(client): optimize file structure copying in
multipart requests
---
src/parallel/_files.py | 56 ++++++++++++++++++-
src/parallel/_utils/__init__.py | 1 -
src/parallel/_utils/_utils.py | 15 -----
tests/test_deepcopy.py | 58 -------------------
tests/test_files.py | 99 ++++++++++++++++++++++++++++++++-
5 files changed, 151 insertions(+), 78 deletions(-)
delete mode 100644 tests/test_deepcopy.py
diff --git a/src/parallel/_files.py b/src/parallel/_files.py
index cc14c14..0fdce17 100644
--- a/src/parallel/_files.py
+++ b/src/parallel/_files.py
@@ -3,8 +3,8 @@
import io
import os
import pathlib
-from typing import overload
-from typing_extensions import TypeGuard
+from typing import Sequence, cast, overload
+from typing_extensions import TypeVar, TypeGuard
import anyio
@@ -17,7 +17,9 @@
HttpxFileContent,
HttpxRequestFiles,
)
-from ._utils import is_tuple_t, is_mapping_t, is_sequence_t
+from ._utils import is_list, is_mapping, is_tuple_t, is_mapping_t, is_sequence_t
+
+_T = TypeVar("_T")
def is_base64_file_input(obj: object) -> TypeGuard[Base64FileInput]:
@@ -121,3 +123,51 @@ async def async_read_file_content(file: FileContent) -> HttpxFileContent:
return await anyio.Path(file).read_bytes()
return file
+
+
+def deepcopy_with_paths(item: _T, paths: Sequence[Sequence[str]]) -> _T:
+ """Copy only the containers along the given paths.
+
+ Used to guard against mutation by extract_files without copying the entire structure.
+ Only dicts and lists that lie on a path are copied; everything else
+ is returned by reference.
+
+ For example, given paths=[["foo", "files", "file"]] and the structure:
+ {
+ "foo": {
+ "bar": {"baz": {}},
+ "files": {"file": }
+ }
+ }
+ The root dict, "foo", and "files" are copied (they lie on the path).
+ "bar" and "baz" are returned by reference (off the path).
+ """
+ return _deepcopy_with_paths(item, paths, 0)
+
+
+def _deepcopy_with_paths(item: _T, paths: Sequence[Sequence[str]], index: int) -> _T:
+ if not paths:
+ return item
+ if is_mapping(item):
+ key_to_paths: dict[str, list[Sequence[str]]] = {}
+ for path in paths:
+ if index < len(path):
+ key_to_paths.setdefault(path[index], []).append(path)
+
+ # if no path continues through this mapping, it won't be mutated and copying it is redundant
+ if not key_to_paths:
+ return item
+
+ result = dict(item)
+ for key, subpaths in key_to_paths.items():
+ if key in result:
+ result[key] = _deepcopy_with_paths(result[key], subpaths, index + 1)
+ return cast(_T, result)
+ if is_list(item):
+ array_paths = [path for path in paths if index < len(path) and path[index] == ""]
+
+ # if no path expects a list here, nothing will be mutated inside it - return by reference
+ if not array_paths:
+ return cast(_T, item)
+ return cast(_T, [_deepcopy_with_paths(entry, array_paths, index + 1) for entry in item])
+ return item
diff --git a/src/parallel/_utils/__init__.py b/src/parallel/_utils/__init__.py
index f1aef8a..3d1284f 100644
--- a/src/parallel/_utils/__init__.py
+++ b/src/parallel/_utils/__init__.py
@@ -25,7 +25,6 @@
coerce_integer as coerce_integer,
file_from_path as file_from_path,
strip_not_given as strip_not_given,
- deepcopy_minimal as deepcopy_minimal,
get_async_library as get_async_library,
maybe_coerce_float as maybe_coerce_float,
get_required_header as get_required_header,
diff --git a/src/parallel/_utils/_utils.py b/src/parallel/_utils/_utils.py
index 569c62b..463155d 100644
--- a/src/parallel/_utils/_utils.py
+++ b/src/parallel/_utils/_utils.py
@@ -181,21 +181,6 @@ def is_iterable(obj: object) -> TypeGuard[Iterable[object]]:
return isinstance(obj, Iterable)
-def deepcopy_minimal(item: _T) -> _T:
- """Minimal reimplementation of copy.deepcopy() that will only copy certain object types:
-
- - mappings, e.g. `dict`
- - list
-
- This is done for performance reasons.
- """
- if is_mapping(item):
- return cast(_T, {k: deepcopy_minimal(v) for k, v in item.items()})
- if is_list(item):
- return cast(_T, [deepcopy_minimal(entry) for entry in item])
- return item
-
-
# copied from https://github.com/Rapptz/RoboDanny
def human_join(seq: Sequence[str], *, delim: str = ", ", final: str = "or") -> str:
size = len(seq)
diff --git a/tests/test_deepcopy.py b/tests/test_deepcopy.py
deleted file mode 100644
index f6e9be1..0000000
--- a/tests/test_deepcopy.py
+++ /dev/null
@@ -1,58 +0,0 @@
-from parallel._utils import deepcopy_minimal
-
-
-def assert_different_identities(obj1: object, obj2: object) -> None:
- assert obj1 == obj2
- assert id(obj1) != id(obj2)
-
-
-def test_simple_dict() -> None:
- obj1 = {"foo": "bar"}
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
-
-
-def test_nested_dict() -> None:
- obj1 = {"foo": {"bar": True}}
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
- assert_different_identities(obj1["foo"], obj2["foo"])
-
-
-def test_complex_nested_dict() -> None:
- obj1 = {"foo": {"bar": [{"hello": "world"}]}}
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
- assert_different_identities(obj1["foo"], obj2["foo"])
- assert_different_identities(obj1["foo"]["bar"], obj2["foo"]["bar"])
- assert_different_identities(obj1["foo"]["bar"][0], obj2["foo"]["bar"][0])
-
-
-def test_simple_list() -> None:
- obj1 = ["a", "b", "c"]
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
-
-
-def test_nested_list() -> None:
- obj1 = ["a", [1, 2, 3]]
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
- assert_different_identities(obj1[1], obj2[1])
-
-
-class MyObject: ...
-
-
-def test_ignores_other_types() -> None:
- # custom classes
- my_obj = MyObject()
- obj1 = {"foo": my_obj}
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
- assert obj1["foo"] is my_obj
-
- # tuples
- obj3 = ("a", "b")
- obj4 = deepcopy_minimal(obj3)
- assert obj3 is obj4
diff --git a/tests/test_files.py b/tests/test_files.py
index 9cd16d8..f488947 100644
--- a/tests/test_files.py
+++ b/tests/test_files.py
@@ -4,7 +4,8 @@
import pytest
from dirty_equals import IsDict, IsList, IsBytes, IsTuple
-from parallel._files import to_httpx_files, async_to_httpx_files
+from parallel._files import to_httpx_files, deepcopy_with_paths, async_to_httpx_files
+from parallel._utils import extract_files
readme_path = Path(__file__).parent.parent.joinpath("README.md")
@@ -49,3 +50,99 @@ def test_string_not_allowed() -> None:
"file": "foo", # type: ignore
}
)
+
+
+def assert_different_identities(obj1: object, obj2: object) -> None:
+ assert obj1 == obj2
+ assert obj1 is not obj2
+
+
+class TestDeepcopyWithPaths:
+ def test_copies_top_level_dict(self) -> None:
+ original = {"file": b"data", "other": "value"}
+ result = deepcopy_with_paths(original, [["file"]])
+ assert_different_identities(result, original)
+
+ def test_file_value_is_same_reference(self) -> None:
+ file_bytes = b"contents"
+ original = {"file": file_bytes}
+ result = deepcopy_with_paths(original, [["file"]])
+ assert_different_identities(result, original)
+ assert result["file"] is file_bytes
+
+ def test_list_popped_wholesale(self) -> None:
+ files = [b"f1", b"f2"]
+ original = {"files": files, "title": "t"}
+ result = deepcopy_with_paths(original, [["files", ""]])
+ assert_different_identities(result, original)
+ result_files = result["files"]
+ assert isinstance(result_files, list)
+ assert_different_identities(result_files, files)
+
+ def test_nested_array_path_copies_list_and_elements(self) -> None:
+ elem1 = {"file": b"f1", "extra": 1}
+ elem2 = {"file": b"f2", "extra": 2}
+ original = {"items": [elem1, elem2]}
+ result = deepcopy_with_paths(original, [["items", "", "file"]])
+ assert_different_identities(result, original)
+ result_items = result["items"]
+ assert isinstance(result_items, list)
+ assert_different_identities(result_items, original["items"])
+ assert_different_identities(result_items[0], elem1)
+ assert_different_identities(result_items[1], elem2)
+
+ def test_empty_paths_returns_same_object(self) -> None:
+ original = {"foo": "bar"}
+ result = deepcopy_with_paths(original, [])
+ assert result is original
+
+ def test_multiple_paths(self) -> None:
+ f1 = b"file1"
+ f2 = b"file2"
+ original = {"a": f1, "b": f2, "c": "unchanged"}
+ result = deepcopy_with_paths(original, [["a"], ["b"]])
+ assert_different_identities(result, original)
+ assert result["a"] is f1
+ assert result["b"] is f2
+ assert result["c"] is original["c"]
+
+ def test_extract_files_does_not_mutate_original_top_level(self) -> None:
+ file_bytes = b"contents"
+ original = {"file": file_bytes, "other": "value"}
+
+ copied = deepcopy_with_paths(original, [["file"]])
+ extracted = extract_files(copied, paths=[["file"]])
+
+ assert extracted == [("file", file_bytes)]
+ assert original == {"file": file_bytes, "other": "value"}
+ assert copied == {"other": "value"}
+
+ def test_extract_files_does_not_mutate_original_nested_array_path(self) -> None:
+ file1 = b"f1"
+ file2 = b"f2"
+ original = {
+ "items": [
+ {"file": file1, "extra": 1},
+ {"file": file2, "extra": 2},
+ ],
+ "title": "example",
+ }
+
+ copied = deepcopy_with_paths(original, [["items", "", "file"]])
+ extracted = extract_files(copied, paths=[["items", "", "file"]])
+
+ assert extracted == [("items[][file]", file1), ("items[][file]", file2)]
+ assert original == {
+ "items": [
+ {"file": file1, "extra": 1},
+ {"file": file2, "extra": 2},
+ ],
+ "title": "example",
+ }
+ assert copied == {
+ "items": [
+ {"extra": 1},
+ {"extra": 2},
+ ],
+ "title": "example",
+ }
From dec81afb89f46850635daa89e64debe15717d053 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Sat, 18 Apr 2026 06:12:10 +0000
Subject: [PATCH 24/32] chore(tests): bump steady to v0.22.1
---
scripts/mock | 6 +++---
scripts/test | 2 +-
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/scripts/mock b/scripts/mock
index 5cd7c15..feebe5e 100755
--- a/scripts/mock
+++ b/scripts/mock
@@ -22,9 +22,9 @@ echo "==> Starting mock server with URL ${URL}"
# Run steady mock on the given spec
if [ "$1" == "--daemon" ]; then
# Pre-install the package so the download doesn't eat into the startup timeout
- npm exec --package=@stdy/cli@0.20.2 -- steady --version
+ npm exec --package=@stdy/cli@0.22.1 -- steady --version
- npm exec --package=@stdy/cli@0.20.2 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL" &> .stdy.log &
+ npm exec --package=@stdy/cli@0.22.1 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL" &> .stdy.log &
# Wait for server to come online via health endpoint (max 30s)
echo -n "Waiting for server"
@@ -48,5 +48,5 @@ if [ "$1" == "--daemon" ]; then
echo
else
- npm exec --package=@stdy/cli@0.20.2 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL"
+ npm exec --package=@stdy/cli@0.22.1 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL"
fi
diff --git a/scripts/test b/scripts/test
index b8143aa..19acc91 100755
--- a/scripts/test
+++ b/scripts/test
@@ -43,7 +43,7 @@ elif ! steady_is_running ; then
echo -e "To run the server, pass in the path or url of your OpenAPI"
echo -e "spec to the steady command:"
echo
- echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.20.2 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets${NC}"
+ echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.22.1 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets${NC}"
echo
exit 1
From 02db6c07ec5eb254b18732fcb6f7dc43e31de471 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 21 Apr 2026 03:10:25 +0000
Subject: [PATCH 25/32] feat(api): manual updates - update openapi spec
added session_id
---
.stats.yml | 4 +-
src/parallel/_client.py | 36 ++
src/parallel/resources/beta/beta.py | 24 +
src/parallel/resources/beta/task_run.py | 125 ++++--
src/parallel/resources/task_run.py | 175 +++++++-
src/parallel/types/__init__.py | 10 +
.../types/advanced_extract_settings_param.py | 6 +-
.../types/advanced_search_settings_param.py | 6 +-
src/parallel/types/beta/__init__.py | 53 +--
.../types/beta/beta_extract_params.py | 7 +
src/parallel/types/beta/beta_run_input.py | 66 +--
.../types/beta/beta_run_input_param.py | 67 +--
src/parallel/types/beta/beta_search_params.py | 7 +
.../types/beta/beta_task_run_result.py | 99 +----
src/parallel/types/beta/error_event.py | 16 +-
.../findall_candidate_match_status_event.py | 6 +-
.../types/beta/findall_create_params.py | 8 +-
.../types/beta/findall_enrich_input.py | 8 +-
.../types/beta/findall_enrich_params.py | 8 +-
.../types/beta/findall_events_params.py | 6 +-
.../types/beta/findall_events_response.py | 8 +-
.../types/beta/findall_extend_params.py | 6 +-
.../types/beta/findall_ingest_params.py | 6 +-
src/parallel/types/beta/findall_run.py | 6 +-
src/parallel/types/beta/findall_run_result.py | 6 +-
.../types/beta/findall_run_status_event.py | 6 +-
src/parallel/types/beta/findall_schema.py | 6 +-
.../beta/findall_schema_updated_event.py | 6 +-
src/parallel/types/beta/mcp_server.py | 25 +-
src/parallel/types/beta/mcp_server_param.py | 26 +-
src/parallel/types/beta/mcp_tool_call.py | 27 +-
.../types/beta/task_group_add_runs_params.py | 4 +-
.../types/beta/task_group_events_response.py | 4 +-
.../beta/task_group_get_runs_response.py | 4 +-
.../types/beta/task_run_create_params.py | 26 +-
src/parallel/types/beta/task_run_event.py | 37 +-
.../types/beta/task_run_events_response.py | 4 +-
src/parallel/types/beta/webhook.py | 16 +-
src/parallel/types/beta/webhook_param.py | 15 +-
src/parallel/types/client_extract_params.py | 13 +-
src/parallel/types/client_search_params.py | 13 +-
src/parallel/types/error_event.py | 18 +
src/parallel/types/extract_response.py | 8 +
src/parallel/types/mcp_server.py | 27 ++
src/parallel/types/mcp_server_param.py | 29 ++
src/parallel/types/mcp_tool_call.py | 29 ++
src/parallel/types/run_input.py | 72 +++
src/parallel/types/run_input_param.py | 74 ++++
src/parallel/types/search_result.py | 7 +
src/parallel/types/task_run.py | 5 +-
src/parallel/types/task_run_create_params.py | 39 +-
src/parallel/types/task_run_event.py | 37 ++
.../types/task_run_events_response.py | 70 +++
src/parallel/types/task_run_json_output.py | 19 +-
src/parallel/types/task_run_result.py | 32 +-
src/parallel/types/task_run_result_params.py | 5 +
src/parallel/types/task_run_text_output.py | 19 +-
src/parallel/types/webhook.py | 18 +
src/parallel/types/webhook_param.py | 18 +
tests/api_resources/beta/test_task_group.py | 2 +
tests/api_resources/beta/test_task_run.py | 419 ++++++++++--------
tests/api_resources/test_beta.py | 4 +
tests/api_resources/test_client.py | 4 +
tests/api_resources/test_task_run.py | 113 ++++-
64 files changed, 1287 insertions(+), 782 deletions(-)
create mode 100644 src/parallel/types/error_event.py
create mode 100644 src/parallel/types/mcp_server.py
create mode 100644 src/parallel/types/mcp_server_param.py
create mode 100644 src/parallel/types/mcp_tool_call.py
create mode 100644 src/parallel/types/run_input.py
create mode 100644 src/parallel/types/run_input_param.py
create mode 100644 src/parallel/types/task_run_event.py
create mode 100644 src/parallel/types/task_run_events_response.py
create mode 100644 src/parallel/types/webhook.py
create mode 100644 src/parallel/types/webhook_param.py
diff --git a/.stats.yml b/.stats.yml
index b7d708f..26be205 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 23
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/parallel-web%2Fparallel-sdk-7099c3aac2f0edacf413e426c176150404e1603f709cdd1f0af5c6689d33bd4f.yml
-openapi_spec_hash: eb61e027506aa339543ce33d7f052046
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/parallel-web%2Fparallel-sdk-c4cc922783460c6e65811d13c9abe35807a551c77c126d452806f93caaaf48fb.yml
+openapi_spec_hash: 99c9e48d4dafaca71f058107008d174b
config_hash: fe820a5a10ee48e143c9e49a153b23b4
diff --git a/src/parallel/_client.py b/src/parallel/_client.py
index 23979fd..44b86ee 100644
--- a/src/parallel/_client.py
+++ b/src/parallel/_client.py
@@ -236,6 +236,7 @@ def extract(
max_chars_total: Optional[int] | Omit = omit,
objective: Optional[str] | Omit = omit,
search_queries: Optional[SequenceNotStr[str]] | Omit = omit,
+ session_id: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -251,6 +252,9 @@ def extract(
advanced_settings: Advanced extract configuration.
+ These settings may impact result quality and latency unless used carefully. See
+ https://docs.parallel.ai/search/advanced-extract-settings for more info.
+
client_model: The model generating this request and consuming the results. Enables
optimizations and tailors default settings for the model's capabilities.
@@ -265,6 +269,10 @@ def extract(
search_queries: Optional keyword search queries, as in SearchRequest. Used together with
objective to focus excerpts on the most relevant content.
+ session_id: Session identifier for calls to search and extract made by an agent as part of a
+ larger task. May be a user-generated random string, e.g. a uuid, or a session_id
+ returned by a previous request.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -283,6 +291,7 @@ def extract(
"max_chars_total": max_chars_total,
"objective": objective,
"search_queries": search_queries,
+ "session_id": session_id,
},
client_extract_params.ClientExtractParams,
),
@@ -301,6 +310,7 @@ def search(
max_chars_total: Optional[int] | Omit = omit,
mode: Optional[Literal["basic", "standard"]] | Omit = omit,
objective: Optional[str] | Omit = omit,
+ session_id: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -318,6 +328,9 @@ def search(
advanced_settings: Advanced search configuration.
+ These settings may impact result quality and latency unless used carefully. See
+ https://docs.parallel.ai/search/advanced-search-settings for more info.
+
client_model: The model generating this request and consuming the results. Enables
optimizations and tailors default settings for the model's capabilities.
@@ -334,6 +347,10 @@ def search(
content. Should be self-contained with enough context to understand the intent
of the search.
+ session_id: Session identifier for calls to search and extract made by an agent as part of a
+ larger task. May be a user-generated random string, e.g. a uuid, or a session_id
+ returned by a previous request.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -352,6 +369,7 @@ def search(
"max_chars_total": max_chars_total,
"mode": mode,
"objective": objective,
+ "session_id": session_id,
},
client_search_params.ClientSearchParams,
),
@@ -562,6 +580,7 @@ async def extract(
max_chars_total: Optional[int] | Omit = omit,
objective: Optional[str] | Omit = omit,
search_queries: Optional[SequenceNotStr[str]] | Omit = omit,
+ session_id: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -577,6 +596,9 @@ async def extract(
advanced_settings: Advanced extract configuration.
+ These settings may impact result quality and latency unless used carefully. See
+ https://docs.parallel.ai/search/advanced-extract-settings for more info.
+
client_model: The model generating this request and consuming the results. Enables
optimizations and tailors default settings for the model's capabilities.
@@ -591,6 +613,10 @@ async def extract(
search_queries: Optional keyword search queries, as in SearchRequest. Used together with
objective to focus excerpts on the most relevant content.
+ session_id: Session identifier for calls to search and extract made by an agent as part of a
+ larger task. May be a user-generated random string, e.g. a uuid, or a session_id
+ returned by a previous request.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -609,6 +635,7 @@ async def extract(
"max_chars_total": max_chars_total,
"objective": objective,
"search_queries": search_queries,
+ "session_id": session_id,
},
client_extract_params.ClientExtractParams,
),
@@ -627,6 +654,7 @@ async def search(
max_chars_total: Optional[int] | Omit = omit,
mode: Optional[Literal["basic", "standard"]] | Omit = omit,
objective: Optional[str] | Omit = omit,
+ session_id: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -644,6 +672,9 @@ async def search(
advanced_settings: Advanced search configuration.
+ These settings may impact result quality and latency unless used carefully. See
+ https://docs.parallel.ai/search/advanced-search-settings for more info.
+
client_model: The model generating this request and consuming the results. Enables
optimizations and tailors default settings for the model's capabilities.
@@ -660,6 +691,10 @@ async def search(
content. Should be self-contained with enough context to understand the intent
of the search.
+ session_id: Session identifier for calls to search and extract made by an agent as part of a
+ larger task. May be a user-generated random string, e.g. a uuid, or a session_id
+ returned by a previous request.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -678,6 +713,7 @@ async def search(
"max_chars_total": max_chars_total,
"mode": mode,
"objective": objective,
+ "session_id": session_id,
},
client_search_params.ClientSearchParams,
),
diff --git a/src/parallel/resources/beta/beta.py b/src/parallel/resources/beta/beta.py
index d32d2a6..c5a43bb 100644
--- a/src/parallel/resources/beta/beta.py
+++ b/src/parallel/resources/beta/beta.py
@@ -122,6 +122,7 @@ def extract(
full_content: beta_extract_params.FullContent | Omit = omit,
objective: Optional[str] | Omit = omit,
search_queries: Optional[SequenceNotStr[str]] | Omit = omit,
+ session_id: Optional[str] | Omit = omit,
betas: List[ParallelBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -150,6 +151,10 @@ def extract(
search_queries: If provided, focuses extracted content on the specified keyword search queries.
+ session_id: Session identifier for calls to search and extract made by an agent as part of a
+ larger task. May be a user-generated random string (e.g. a uuid) or a session_id
+ from a previous request.
+
betas: Optional header to specify the beta version(s) to enable.
extra_headers: Send extra headers
@@ -181,6 +186,7 @@ def extract(
"full_content": full_content,
"objective": objective,
"search_queries": search_queries,
+ "session_id": session_id,
},
beta_extract_params.BetaExtractParams,
),
@@ -202,6 +208,7 @@ def search(
objective: Optional[str] | Omit = omit,
processor: Optional[Literal["base", "pro"]] | Omit = omit,
search_queries: Optional[SequenceNotStr[str]] | Omit = omit,
+ session_id: Optional[str] | Omit = omit,
source_policy: Optional[SourcePolicy] | Omit = omit,
betas: List[ParallelBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -244,6 +251,10 @@ def search(
contain search operators. At least one of objective or search_queries must be
provided.
+ session_id: Session identifier for calls to search and extract made by an agent as part of a
+ larger task. May be a user-generated random string (e.g. a uuid) or a session_id
+ from a previous request.
+
source_policy: Source policy for web search results.
This policy governs which sources are allowed/disallowed in results.
@@ -282,6 +293,7 @@ def search(
"objective": objective,
"processor": processor,
"search_queries": search_queries,
+ "session_id": session_id,
"source_policy": source_policy,
},
beta_search_params.BetaSearchParams,
@@ -361,6 +373,7 @@ async def extract(
full_content: beta_extract_params.FullContent | Omit = omit,
objective: Optional[str] | Omit = omit,
search_queries: Optional[SequenceNotStr[str]] | Omit = omit,
+ session_id: Optional[str] | Omit = omit,
betas: List[ParallelBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -389,6 +402,10 @@ async def extract(
search_queries: If provided, focuses extracted content on the specified keyword search queries.
+ session_id: Session identifier for calls to search and extract made by an agent as part of a
+ larger task. May be a user-generated random string (e.g. a uuid) or a session_id
+ from a previous request.
+
betas: Optional header to specify the beta version(s) to enable.
extra_headers: Send extra headers
@@ -420,6 +437,7 @@ async def extract(
"full_content": full_content,
"objective": objective,
"search_queries": search_queries,
+ "session_id": session_id,
},
beta_extract_params.BetaExtractParams,
),
@@ -441,6 +459,7 @@ async def search(
objective: Optional[str] | Omit = omit,
processor: Optional[Literal["base", "pro"]] | Omit = omit,
search_queries: Optional[SequenceNotStr[str]] | Omit = omit,
+ session_id: Optional[str] | Omit = omit,
source_policy: Optional[SourcePolicy] | Omit = omit,
betas: List[ParallelBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -483,6 +502,10 @@ async def search(
contain search operators. At least one of objective or search_queries must be
provided.
+ session_id: Session identifier for calls to search and extract made by an agent as part of a
+ larger task. May be a user-generated random string (e.g. a uuid) or a session_id
+ from a previous request.
+
source_policy: Source policy for web search results.
This policy governs which sources are allowed/disallowed in results.
@@ -521,6 +544,7 @@ async def search(
"objective": objective,
"processor": processor,
"search_queries": search_queries,
+ "session_id": session_id,
"source_policy": source_policy,
},
beta_search_params.BetaSearchParams,
diff --git a/src/parallel/resources/beta/task_run.py b/src/parallel/resources/beta/task_run.py
index 9fd6752..93a3832 100644
--- a/src/parallel/resources/beta/task_run.py
+++ b/src/parallel/resources/beta/task_run.py
@@ -2,6 +2,7 @@
from __future__ import annotations
+import typing_extensions
from typing import Any, Dict, List, Union, Iterable, Optional, cast
from itertools import chain
@@ -21,11 +22,11 @@
from ...types.beta import task_run_create_params, task_run_result_params
from ..._base_client import make_request_options
from ...types.task_run import TaskRun
+from ...types.webhook_param import WebhookParam
+from ...types.task_run_result import TaskRunResult
from ...types.task_spec_param import TaskSpecParam
-from ...types.beta.webhook_param import WebhookParam
-from ...types.beta.mcp_server_param import McpServerParam
+from ...types.mcp_server_param import McpServerParam
from ...types.beta.parallel_beta_param import ParallelBetaParam
-from ...types.beta.beta_task_run_result import BetaTaskRunResult
from ...types.shared_params.source_policy import SourcePolicy
from ...types.beta.task_run_events_response import TaskRunEventsResponse
@@ -65,11 +66,13 @@ def with_streaming_response(self) -> TaskRunResourceWithStreamingResponse:
"""
return TaskRunResourceWithStreamingResponse(self)
+ @typing_extensions.deprecated("Use GA Task Run instead")
def create(
self,
*,
input: Union[str, Dict[str, object]],
processor: str,
+ advanced_settings: Optional[task_run_create_params.AdvancedSettings] | Omit = omit,
enable_events: Optional[bool] | Omit = omit,
mcp_servers: Optional[Iterable[McpServerParam]] | Omit = omit,
metadata: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
@@ -97,18 +100,16 @@ def create(
processor: Processor to use for the task.
+ advanced_settings: Advanced search configuration for a task run.
+
enable_events: Controls tracking of task run execution progress. When set to true, progress
events are recorded and can be accessed via the
[Task Run events](https://platform.parallel.ai/api-reference) endpoint. When
false, no progress events are tracked. Note that progress tracking cannot be
enabled after a run has been created. The flag is set to true by default for
- premium processors (pro and above). To enable this feature in your requests,
- specify `events-sse-2025-07-24` as one of the values in `parallel-beta` header
- (for API calls) or `betas` param (for the SDKs).
+ premium processors (pro and above).
- mcp_servers: Optional list of MCP servers to use for the run. To enable this feature in your
- requests, specify `mcp-server-2025-07-17` as one of the values in
- `parallel-beta` header (for API calls) or `betas` param (for the SDKs).
+ mcp_servers: Optional list of MCP servers to use for the run.
metadata: User-provided metadata stored with the run. Keys and values must be strings with
a maximum length of 16 and 512 characters respectively.
@@ -150,11 +151,12 @@ def create(
}
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return self._post(
- "/v1/tasks/runs?beta=true",
+ "/v1/tasks/runs",
body=maybe_transform(
{
"input": input,
"processor": processor,
+ "advanced_settings": advanced_settings,
"enable_events": enable_events,
"mcp_servers": mcp_servers,
"metadata": metadata,
@@ -171,6 +173,7 @@ def create(
cast_to=TaskRun,
)
+ @typing_extensions.deprecated("Use GA Task Run instead")
def events(
self,
run_id: str,
@@ -214,6 +217,7 @@ def events(
stream_cls=Stream[TaskRunEventsResponse],
)
+ @typing_extensions.deprecated("Use GA Task Run instead")
def result(
self,
run_id: str,
@@ -226,7 +230,7 @@ def result(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> BetaTaskRunResult:
+ ) -> TaskRunResult:
"""
Retrieves a run result by run_id, blocking until the run is completed.
@@ -255,7 +259,7 @@ def result(
}
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return self._get(
- path_template("/v1/tasks/runs/{run_id}/result?beta=true", run_id=run_id),
+ path_template("/v1/tasks/runs/{run_id}/result", run_id=run_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -263,7 +267,7 @@ def result(
timeout=timeout,
query=maybe_transform({"api_timeout": api_timeout}, task_run_result_params.TaskRunResultParams),
),
- cast_to=BetaTaskRunResult,
+ cast_to=TaskRunResult,
)
@@ -300,11 +304,13 @@ def with_streaming_response(self) -> AsyncTaskRunResourceWithStreamingResponse:
"""
return AsyncTaskRunResourceWithStreamingResponse(self)
+ @typing_extensions.deprecated("Use GA Task Run instead")
async def create(
self,
*,
input: Union[str, Dict[str, object]],
processor: str,
+ advanced_settings: Optional[task_run_create_params.AdvancedSettings] | Omit = omit,
enable_events: Optional[bool] | Omit = omit,
mcp_servers: Optional[Iterable[McpServerParam]] | Omit = omit,
metadata: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
@@ -332,18 +338,16 @@ async def create(
processor: Processor to use for the task.
+ advanced_settings: Advanced search configuration for a task run.
+
enable_events: Controls tracking of task run execution progress. When set to true, progress
events are recorded and can be accessed via the
[Task Run events](https://platform.parallel.ai/api-reference) endpoint. When
false, no progress events are tracked. Note that progress tracking cannot be
enabled after a run has been created. The flag is set to true by default for
- premium processors (pro and above). To enable this feature in your requests,
- specify `events-sse-2025-07-24` as one of the values in `parallel-beta` header
- (for API calls) or `betas` param (for the SDKs).
+ premium processors (pro and above).
- mcp_servers: Optional list of MCP servers to use for the run. To enable this feature in your
- requests, specify `mcp-server-2025-07-17` as one of the values in
- `parallel-beta` header (for API calls) or `betas` param (for the SDKs).
+ mcp_servers: Optional list of MCP servers to use for the run.
metadata: User-provided metadata stored with the run. Keys and values must be strings with
a maximum length of 16 and 512 characters respectively.
@@ -385,11 +389,12 @@ async def create(
}
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return await self._post(
- "/v1/tasks/runs?beta=true",
+ "/v1/tasks/runs",
body=await async_maybe_transform(
{
"input": input,
"processor": processor,
+ "advanced_settings": advanced_settings,
"enable_events": enable_events,
"mcp_servers": mcp_servers,
"metadata": metadata,
@@ -406,6 +411,7 @@ async def create(
cast_to=TaskRun,
)
+ @typing_extensions.deprecated("Use GA Task Run instead")
async def events(
self,
run_id: str,
@@ -449,6 +455,7 @@ async def events(
stream_cls=AsyncStream[TaskRunEventsResponse],
)
+ @typing_extensions.deprecated("Use GA Task Run instead")
async def result(
self,
run_id: str,
@@ -461,7 +468,7 @@ async def result(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> BetaTaskRunResult:
+ ) -> TaskRunResult:
"""
Retrieves a run result by run_id, blocking until the run is completed.
@@ -490,7 +497,7 @@ async def result(
}
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return await self._get(
- path_template("/v1/tasks/runs/{run_id}/result?beta=true", run_id=run_id),
+ path_template("/v1/tasks/runs/{run_id}/result", run_id=run_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -500,7 +507,7 @@ async def result(
{"api_timeout": api_timeout}, task_run_result_params.TaskRunResultParams
),
),
- cast_to=BetaTaskRunResult,
+ cast_to=TaskRunResult,
)
@@ -508,14 +515,20 @@ class TaskRunResourceWithRawResponse:
def __init__(self, task_run: TaskRunResource) -> None:
self._task_run = task_run
- self.create = to_raw_response_wrapper(
- task_run.create,
+ self.create = ( # pyright: ignore[reportDeprecated]
+ to_raw_response_wrapper(
+ task_run.create, # pyright: ignore[reportDeprecated],
+ )
)
- self.events = to_raw_response_wrapper(
- task_run.events,
+ self.events = ( # pyright: ignore[reportDeprecated]
+ to_raw_response_wrapper(
+ task_run.events, # pyright: ignore[reportDeprecated],
+ )
)
- self.result = to_raw_response_wrapper(
- task_run.result,
+ self.result = ( # pyright: ignore[reportDeprecated]
+ to_raw_response_wrapper(
+ task_run.result, # pyright: ignore[reportDeprecated],
+ )
)
@@ -523,14 +536,20 @@ class AsyncTaskRunResourceWithRawResponse:
def __init__(self, task_run: AsyncTaskRunResource) -> None:
self._task_run = task_run
- self.create = async_to_raw_response_wrapper(
- task_run.create,
+ self.create = ( # pyright: ignore[reportDeprecated]
+ async_to_raw_response_wrapper(
+ task_run.create, # pyright: ignore[reportDeprecated],
+ )
)
- self.events = async_to_raw_response_wrapper(
- task_run.events,
+ self.events = ( # pyright: ignore[reportDeprecated]
+ async_to_raw_response_wrapper(
+ task_run.events, # pyright: ignore[reportDeprecated],
+ )
)
- self.result = async_to_raw_response_wrapper(
- task_run.result,
+ self.result = ( # pyright: ignore[reportDeprecated]
+ async_to_raw_response_wrapper(
+ task_run.result, # pyright: ignore[reportDeprecated],
+ )
)
@@ -538,14 +557,20 @@ class TaskRunResourceWithStreamingResponse:
def __init__(self, task_run: TaskRunResource) -> None:
self._task_run = task_run
- self.create = to_streamed_response_wrapper(
- task_run.create,
+ self.create = ( # pyright: ignore[reportDeprecated]
+ to_streamed_response_wrapper(
+ task_run.create, # pyright: ignore[reportDeprecated],
+ )
)
- self.events = to_streamed_response_wrapper(
- task_run.events,
+ self.events = ( # pyright: ignore[reportDeprecated]
+ to_streamed_response_wrapper(
+ task_run.events, # pyright: ignore[reportDeprecated],
+ )
)
- self.result = to_streamed_response_wrapper(
- task_run.result,
+ self.result = ( # pyright: ignore[reportDeprecated]
+ to_streamed_response_wrapper(
+ task_run.result, # pyright: ignore[reportDeprecated],
+ )
)
@@ -553,12 +578,18 @@ class AsyncTaskRunResourceWithStreamingResponse:
def __init__(self, task_run: AsyncTaskRunResource) -> None:
self._task_run = task_run
- self.create = async_to_streamed_response_wrapper(
- task_run.create,
+ self.create = ( # pyright: ignore[reportDeprecated]
+ async_to_streamed_response_wrapper(
+ task_run.create, # pyright: ignore[reportDeprecated],
+ )
)
- self.events = async_to_streamed_response_wrapper(
- task_run.events,
+ self.events = ( # pyright: ignore[reportDeprecated]
+ async_to_streamed_response_wrapper(
+ task_run.events, # pyright: ignore[reportDeprecated],
+ )
)
- self.result = async_to_streamed_response_wrapper(
- task_run.result,
+ self.result = ( # pyright: ignore[reportDeprecated]
+ async_to_streamed_response_wrapper(
+ task_run.result, # pyright: ignore[reportDeprecated],
+ )
)
diff --git a/src/parallel/resources/task_run.py b/src/parallel/resources/task_run.py
index 4efc8d9..58cdb56 100644
--- a/src/parallel/resources/task_run.py
+++ b/src/parallel/resources/task_run.py
@@ -3,7 +3,7 @@
from __future__ import annotations
import time
-from typing import Dict, Type, Union, Optional, overload
+from typing import Any, Dict, List, Type, Union, Iterable, Optional, cast, overload
import httpx
@@ -11,7 +11,7 @@
from ..types import task_run_create_params, task_run_result_params
from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from .._utils import path_template, maybe_transform, async_maybe_transform
+from .._utils import is_given, path_template, maybe_transform, strip_not_given, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
@@ -20,8 +20,10 @@
async_to_raw_response_wrapper,
async_to_streamed_response_wrapper,
)
+from .._streaming import Stream, AsyncStream
from .._base_client import make_request_options
from ..types.task_run import TaskRun
+from ..types.webhook_param import WebhookParam
from ..types.task_run_result import TaskRunResult
from ..types.task_spec_param import OutputT, OutputSchema, TaskSpecParam
from ..lib._parsing._task_spec import build_task_spec_param
@@ -31,6 +33,9 @@
wait_for_result_async as _wait_for_result_async,
task_run_result_parser,
)
+from ..types.mcp_server_param import McpServerParam
+from ..types.beta.parallel_beta_param import ParallelBetaParam
+from ..types.task_run_events_response import TaskRunEventsResponse
from ..types.shared_params.source_policy import SourcePolicy
__all__ = ["TaskRunResource", "AsyncTaskRunResource"]
@@ -74,10 +79,15 @@ def create(
*,
input: Union[str, Dict[str, object]],
processor: str,
+ advanced_settings: Optional[task_run_create_params.AdvancedSettings] | Omit = omit,
+ enable_events: Optional[bool] | Omit = omit,
+ mcp_servers: Optional[Iterable[McpServerParam]] | Omit = omit,
metadata: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
previous_interaction_id: Optional[str] | Omit = omit,
source_policy: Optional[SourcePolicy] | Omit = omit,
task_spec: Optional[TaskSpecParam] | Omit = omit,
+ webhook: Optional[WebhookParam] | Omit = omit,
+ betas: List[ParallelBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -97,6 +107,17 @@ def create(
processor: Processor to use for the task.
+ advanced_settings: Advanced search configuration for a task run.
+
+ enable_events: Controls tracking of task run execution progress. When set to true, progress
+ events are recorded and can be accessed via the
+ [Task Run events](https://platform.parallel.ai/api-reference) endpoint. When
+ false, no progress events are tracked. Note that progress tracking cannot be
+ enabled after a run has been created. The flag is set to true by default for
+ premium processors (pro and above).
+
+ mcp_servers: Optional list of MCP servers to use for the run.
+
metadata: User-provided metadata stored with the run. Keys and values must be strings with
a maximum length of 16 and 512 characters respectively.
@@ -113,6 +134,10 @@ def create(
For convenience bare strings are also accepted as input or output schemas.
+ webhook: Webhooks for Task Runs.
+
+ betas: Optional header to specify the beta version(s) to enable.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -121,16 +146,24 @@ def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
+ extra_headers = {
+ **strip_not_given({"parallel-beta": ",".join(str(e) for e in betas) if is_given(betas) else not_given}),
+ **(extra_headers or {}),
+ }
return self._post(
"/v1/tasks/runs",
body=maybe_transform(
{
"input": input,
"processor": processor,
+ "advanced_settings": advanced_settings,
+ "enable_events": enable_events,
+ "mcp_servers": mcp_servers,
"metadata": metadata,
"previous_interaction_id": previous_interaction_id,
"source_policy": source_policy,
"task_spec": task_spec,
+ "webhook": webhook,
},
task_run_create_params.TaskRunCreateParams,
),
@@ -175,11 +208,54 @@ def retrieve(
cast_to=TaskRun,
)
+ def events(
+ self,
+ run_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Stream[TaskRunEventsResponse]:
+ """
+ Streams events for a task run.
+
+ Returns a stream of events showing progress updates and state changes for the
+ task run.
+
+ For task runs that did not have enable_events set to true during creation, the
+ frequency of events will be reduced.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not run_id:
+ raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
+ extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})}
+ return self._get(
+ path_template("/v1/tasks/runs/{run_id}/events", run_id=run_id),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=cast(Any, TaskRunEventsResponse), # Union types cannot be passed in as arguments in the type system
+ stream=True,
+ stream_cls=Stream[TaskRunEventsResponse],
+ )
+
def result(
self,
run_id: str,
*,
api_timeout: int | Omit = omit,
+ betas: List[ParallelBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -191,6 +267,8 @@ def result(
Retrieves a run result by run_id, blocking until the run is completed.
Args:
+ betas: Optional header to specify the beta version(s) to enable.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -201,6 +279,10 @@ def result(
"""
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
+ extra_headers = {
+ **strip_not_given({"parallel-beta": ",".join(str(e) for e in betas) if is_given(betas) else not_given}),
+ **(extra_headers or {}),
+ }
return self._get(
path_template("/v1/tasks/runs/{run_id}/result", run_id=run_id),
options=make_request_options(
@@ -380,10 +462,15 @@ async def create(
*,
input: Union[str, Dict[str, object]],
processor: str,
+ advanced_settings: Optional[task_run_create_params.AdvancedSettings] | Omit = omit,
+ enable_events: Optional[bool] | Omit = omit,
+ mcp_servers: Optional[Iterable[McpServerParam]] | Omit = omit,
metadata: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
previous_interaction_id: Optional[str] | Omit = omit,
source_policy: Optional[SourcePolicy] | Omit = omit,
task_spec: Optional[TaskSpecParam] | Omit = omit,
+ webhook: Optional[WebhookParam] | Omit = omit,
+ betas: List[ParallelBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -403,6 +490,17 @@ async def create(
processor: Processor to use for the task.
+ advanced_settings: Advanced search configuration for a task run.
+
+ enable_events: Controls tracking of task run execution progress. When set to true, progress
+ events are recorded and can be accessed via the
+ [Task Run events](https://platform.parallel.ai/api-reference) endpoint. When
+ false, no progress events are tracked. Note that progress tracking cannot be
+ enabled after a run has been created. The flag is set to true by default for
+ premium processors (pro and above).
+
+ mcp_servers: Optional list of MCP servers to use for the run.
+
metadata: User-provided metadata stored with the run. Keys and values must be strings with
a maximum length of 16 and 512 characters respectively.
@@ -419,6 +517,10 @@ async def create(
For convenience bare strings are also accepted as input or output schemas.
+ webhook: Webhooks for Task Runs.
+
+ betas: Optional header to specify the beta version(s) to enable.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -427,16 +529,24 @@ async def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
+ extra_headers = {
+ **strip_not_given({"parallel-beta": ",".join(str(e) for e in betas) if is_given(betas) else not_given}),
+ **(extra_headers or {}),
+ }
return await self._post(
"/v1/tasks/runs",
body=await async_maybe_transform(
{
"input": input,
"processor": processor,
+ "advanced_settings": advanced_settings,
+ "enable_events": enable_events,
+ "mcp_servers": mcp_servers,
"metadata": metadata,
"previous_interaction_id": previous_interaction_id,
"source_policy": source_policy,
"task_spec": task_spec,
+ "webhook": webhook,
},
task_run_create_params.TaskRunCreateParams,
),
@@ -481,11 +591,54 @@ async def retrieve(
cast_to=TaskRun,
)
+ async def events(
+ self,
+ run_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncStream[TaskRunEventsResponse]:
+ """
+ Streams events for a task run.
+
+ Returns a stream of events showing progress updates and state changes for the
+ task run.
+
+ For task runs that did not have enable_events set to true during creation, the
+ frequency of events will be reduced.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not run_id:
+ raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
+ extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})}
+ return await self._get(
+ path_template("/v1/tasks/runs/{run_id}/events", run_id=run_id),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=cast(Any, TaskRunEventsResponse), # Union types cannot be passed in as arguments in the type system
+ stream=True,
+ stream_cls=AsyncStream[TaskRunEventsResponse],
+ )
+
async def result(
self,
run_id: str,
*,
api_timeout: int | Omit = omit,
+ betas: List[ParallelBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -497,6 +650,8 @@ async def result(
Retrieves a run result by run_id, blocking until the run is completed.
Args:
+ betas: Optional header to specify the beta version(s) to enable.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -507,6 +662,10 @@ async def result(
"""
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
+ extra_headers = {
+ **strip_not_given({"parallel-beta": ",".join(str(e) for e in betas) if is_given(betas) else not_given}),
+ **(extra_headers or {}),
+ }
return await self._get(
path_template("/v1/tasks/runs/{run_id}/result", run_id=run_id),
options=make_request_options(
@@ -656,6 +815,9 @@ def __init__(self, task_run: TaskRunResource) -> None:
self.retrieve = to_raw_response_wrapper(
task_run.retrieve,
)
+ self.events = to_raw_response_wrapper(
+ task_run.events,
+ )
self.result = to_raw_response_wrapper(
task_run.result,
)
@@ -671,6 +833,9 @@ def __init__(self, task_run: AsyncTaskRunResource) -> None:
self.retrieve = async_to_raw_response_wrapper(
task_run.retrieve,
)
+ self.events = async_to_raw_response_wrapper(
+ task_run.events,
+ )
self.result = async_to_raw_response_wrapper(
task_run.result,
)
@@ -686,6 +851,9 @@ def __init__(self, task_run: TaskRunResource) -> None:
self.retrieve = to_streamed_response_wrapper(
task_run.retrieve,
)
+ self.events = to_streamed_response_wrapper(
+ task_run.events,
+ )
self.result = to_streamed_response_wrapper(
task_run.result,
)
@@ -701,6 +869,9 @@ def __init__(self, task_run: AsyncTaskRunResource) -> None:
self.retrieve = async_to_streamed_response_wrapper(
task_run.retrieve,
)
+ self.events = async_to_streamed_response_wrapper(
+ task_run.events,
+ )
self.result = async_to_streamed_response_wrapper(
task_run.result,
)
diff --git a/src/parallel/types/__init__.py b/src/parallel/types/__init__.py
index 526d252..570116a 100644
--- a/src/parallel/types/__init__.py
+++ b/src/parallel/types/__init__.py
@@ -8,20 +8,29 @@
SourcePolicy as SourcePolicy,
ErrorResponse as ErrorResponse,
)
+from .webhook import Webhook as Webhook
from .citation import Citation as Citation
from .task_run import TaskRun as TaskRun
+from .run_input import RunInput as RunInput
from .task_spec import TaskSpec as TaskSpec
+from .mcp_server import McpServer as McpServer
from .usage_item import UsageItem as UsageItem
from .auto_schema import AutoSchema as AutoSchema
+from .error_event import ErrorEvent as ErrorEvent
from .field_basis import FieldBasis as FieldBasis
from .json_schema import JsonSchema as JsonSchema
from .text_schema import TextSchema as TextSchema
from .extract_error import ExtractError as ExtractError
+from .mcp_tool_call import McpToolCall as McpToolCall
from .search_result import SearchResult as SearchResult
+from .webhook_param import WebhookParam as WebhookParam
from .extract_result import ExtractResult as ExtractResult
+from .task_run_event import TaskRunEvent as TaskRunEvent
+from .run_input_param import RunInputParam as RunInputParam
from .task_run_result import TaskRunResult as TaskRunResult
from .task_spec_param import TaskSpecParam as TaskSpecParam
from .extract_response import ExtractResponse as ExtractResponse
+from .mcp_server_param import McpServerParam as McpServerParam
from .auto_schema_param import AutoSchemaParam as AutoSchemaParam
from .json_schema_param import JsonSchemaParam as JsonSchemaParam
from .text_schema_param import TextSchemaParam as TextSchemaParam
@@ -35,5 +44,6 @@
from .parsed_task_run_result import ParsedTaskRunResult as ParsedTaskRunResult
from .task_run_create_params import TaskRunCreateParams as TaskRunCreateParams
from .task_run_result_params import TaskRunResultParams as TaskRunResultParams
+from .task_run_events_response import TaskRunEventsResponse as TaskRunEventsResponse
from .advanced_search_settings_param import AdvancedSearchSettingsParam as AdvancedSearchSettingsParam
from .advanced_extract_settings_param import AdvancedExtractSettingsParam as AdvancedExtractSettingsParam
diff --git a/src/parallel/types/advanced_extract_settings_param.py b/src/parallel/types/advanced_extract_settings_param.py
index 00480c5..1328c26 100644
--- a/src/parallel/types/advanced_extract_settings_param.py
+++ b/src/parallel/types/advanced_extract_settings_param.py
@@ -26,7 +26,11 @@ class FullContentFullContentSettings(TypedDict, total=False):
class AdvancedExtractSettingsParam(TypedDict, total=False):
- """Advanced extract configuration."""
+ """Advanced extract configuration.
+
+ These settings may impact result quality and latency unless used carefully.
+ See https://docs.parallel.ai/search/advanced-extract-settings for more info.
+ """
excerpt_settings: Optional[ExcerptSettingsParam]
"""Optional settings for returning relevant excerpts."""
diff --git a/src/parallel/types/advanced_search_settings_param.py b/src/parallel/types/advanced_search_settings_param.py
index b2db575..ea11da4 100644
--- a/src/parallel/types/advanced_search_settings_param.py
+++ b/src/parallel/types/advanced_search_settings_param.py
@@ -13,7 +13,11 @@
class AdvancedSearchSettingsParam(TypedDict, total=False):
- """Advanced search configuration."""
+ """Advanced search configuration.
+
+ These settings may impact result quality and latency unless used carefully.
+ See https://docs.parallel.ai/search/advanced-search-settings for more info.
+ """
excerpt_settings: Optional[ExcerptSettingsParam]
"""Optional settings for returning relevant excerpts."""
diff --git a/src/parallel/types/beta/__init__.py b/src/parallel/types/beta/__init__.py
index ef052aa..dcd2b4c 100644
--- a/src/parallel/types/beta/__init__.py
+++ b/src/parallel/types/beta/__init__.py
@@ -7,14 +7,14 @@
from .task_group import TaskGroup as TaskGroup
from .usage_item import UsageItem as UsageItem
from .error_event import ErrorEvent as ErrorEvent
-from .findall_run import FindAllRun as FindAllRun, FindallRun as FindallRun
+from .findall_run import FindAllRun as FindAllRun
from .extract_error import ExtractError as ExtractError
from .mcp_tool_call import McpToolCall as McpToolCall
from .search_result import SearchResult as SearchResult
from .webhook_param import WebhookParam as WebhookParam
from .beta_run_input import BetaRunInput as BetaRunInput
from .extract_result import ExtractResult as ExtractResult
-from .findall_schema import FindAllSchema as FindAllSchema, FindallSchema as FindallSchema
+from .findall_schema import FindAllSchema as FindAllSchema
from .task_run_event import TaskRunEvent as TaskRunEvent
from .extract_response import ExtractResponse as ExtractResponse
from .mcp_server_param import McpServerParam as McpServerParam
@@ -22,56 +22,29 @@
from .web_search_result import WebSearchResult as WebSearchResult
from .beta_search_params import BetaSearchParams as BetaSearchParams
from .fetch_policy_param import FetchPolicyParam as FetchPolicyParam
-from .findall_run_result import FindAllRunResult as FindAllRunResult, FindallRunResult as FindallRunResult
+from .findall_run_result import FindAllRunResult as FindAllRunResult
from .beta_extract_params import BetaExtractParams as BetaExtractParams
from .parallel_beta_param import ParallelBetaParam as ParallelBetaParam
from .beta_run_input_param import BetaRunInputParam as BetaRunInputParam
from .beta_task_run_result import BetaTaskRunResult as BetaTaskRunResult
-from .findall_enrich_input import FindAllEnrichInput as FindAllEnrichInput, FindallEnrichInput as FindallEnrichInput
-from .findall_create_params import (
- FindAllCreateParams as FindAllCreateParams,
- FindallCreateParams as FindallCreateParams,
-)
-from .findall_enrich_params import (
- FindAllEnrichParams as FindAllEnrichParams,
- FindallEnrichParams as FindallEnrichParams,
-)
-from .findall_events_params import (
- FindAllEventsParams as FindAllEventsParams,
- FindallEventsParams as FindallEventsParams,
-)
-from .findall_extend_params import (
- FindAllExtendParams as FindAllExtendParams,
- FindallExtendParams as FindallExtendParams,
-)
-from .findall_ingest_params import (
- FindAllIngestParams as FindAllIngestParams,
- FindallIngestParams as FindallIngestParams,
-)
+from .findall_enrich_input import FindAllEnrichInput as FindAllEnrichInput
+from .findall_create_params import FindAllCreateParams as FindAllCreateParams
+from .findall_enrich_params import FindAllEnrichParams as FindAllEnrichParams
+from .findall_events_params import FindAllEventsParams as FindAllEventsParams
+from .findall_extend_params import FindAllExtendParams as FindAllExtendParams
+from .findall_ingest_params import FindAllIngestParams as FindAllIngestParams
from .excerpt_settings_param import ExcerptSettingsParam as ExcerptSettingsParam
from .task_run_create_params import TaskRunCreateParams as TaskRunCreateParams
from .task_run_result_params import TaskRunResultParams as TaskRunResultParams
-from .findall_events_response import (
- FindAllEventsResponse as FindAllEventsResponse,
- FindallEventsResponse as FindallEventsResponse,
-)
+from .findall_events_response import FindAllEventsResponse as FindAllEventsResponse
from .task_group_run_response import TaskGroupRunResponse as TaskGroupRunResponse
-from .findall_run_status_event import (
- FindAllRunStatusEvent as FindAllRunStatusEvent,
- FindallRunStatusEvent as FindallRunStatusEvent,
-)
+from .findall_run_status_event import FindAllRunStatusEvent as FindAllRunStatusEvent
from .task_group_create_params import TaskGroupCreateParams as TaskGroupCreateParams
from .task_group_events_params import TaskGroupEventsParams as TaskGroupEventsParams
from .task_run_events_response import TaskRunEventsResponse as TaskRunEventsResponse
from .task_group_add_runs_params import TaskGroupAddRunsParams as TaskGroupAddRunsParams
from .task_group_events_response import TaskGroupEventsResponse as TaskGroupEventsResponse
from .task_group_get_runs_params import TaskGroupGetRunsParams as TaskGroupGetRunsParams
-from .findall_schema_updated_event import (
- FindAllSchemaUpdatedEvent as FindAllSchemaUpdatedEvent,
- FindallSchemaUpdatedEvent as FindallSchemaUpdatedEvent,
-)
+from .findall_schema_updated_event import FindAllSchemaUpdatedEvent as FindAllSchemaUpdatedEvent
from .task_group_get_runs_response import TaskGroupGetRunsResponse as TaskGroupGetRunsResponse
-from .findall_candidate_match_status_event import (
- FindAllCandidateMatchStatusEvent as FindAllCandidateMatchStatusEvent,
- FindallCandidateMatchStatusEvent as FindallCandidateMatchStatusEvent,
-)
+from .findall_candidate_match_status_event import FindAllCandidateMatchStatusEvent as FindAllCandidateMatchStatusEvent
diff --git a/src/parallel/types/beta/beta_extract_params.py b/src/parallel/types/beta/beta_extract_params.py
index e4838ee..60e55a8 100644
--- a/src/parallel/types/beta/beta_extract_params.py
+++ b/src/parallel/types/beta/beta_extract_params.py
@@ -40,6 +40,13 @@ class BetaExtractParams(TypedDict, total=False):
search_queries: Optional[SequenceNotStr[str]]
"""If provided, focuses extracted content on the specified keyword search queries."""
+ session_id: Optional[str]
+ """
+ Session identifier for calls to search and extract made by an agent as part of a
+ larger task. May be a user-generated random string (e.g. a uuid) or a session_id
+ from a previous request.
+ """
+
betas: Annotated[List[ParallelBetaParam], PropertyInfo(alias="parallel-beta")]
"""Optional header to specify the beta version(s) to enable."""
diff --git a/src/parallel/types/beta/beta_run_input.py b/src/parallel/types/beta/beta_run_input.py
index 66f63e9..df1860f 100644
--- a/src/parallel/types/beta/beta_run_input.py
+++ b/src/parallel/types/beta/beta_run_input.py
@@ -1,68 +1,8 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Dict, List, Union, Optional
-
-from .webhook import Webhook
-from ..._models import BaseModel
-from ..task_spec import TaskSpec
-from .mcp_server import McpServer
-from ..shared.source_policy import SourcePolicy
+from ..run_input import RunInput
__all__ = ["BetaRunInput"]
-
-class BetaRunInput(BaseModel):
- """Task run input with additional beta fields."""
-
- input: Union[str, Dict[str, object]]
- """Input to the task, either text or a JSON object."""
-
- processor: str
- """Processor to use for the task."""
-
- enable_events: Optional[bool] = None
- """Controls tracking of task run execution progress.
-
- When set to true, progress events are recorded and can be accessed via the
- [Task Run events](https://platform.parallel.ai/api-reference) endpoint. When
- false, no progress events are tracked. Note that progress tracking cannot be
- enabled after a run has been created. The flag is set to true by default for
- premium processors (pro and above). To enable this feature in your requests,
- specify `events-sse-2025-07-24` as one of the values in `parallel-beta` header
- (for API calls) or `betas` param (for the SDKs).
- """
-
- mcp_servers: Optional[List[McpServer]] = None
- """
- Optional list of MCP servers to use for the run. To enable this feature in your
- requests, specify `mcp-server-2025-07-17` as one of the values in
- `parallel-beta` header (for API calls) or `betas` param (for the SDKs).
- """
-
- metadata: Optional[Dict[str, Union[str, float, bool]]] = None
- """User-provided metadata stored with the run.
-
- Keys and values must be strings with a maximum length of 16 and 512 characters
- respectively.
- """
-
- previous_interaction_id: Optional[str] = None
- """Interaction ID to use as context for this request."""
-
- source_policy: Optional[SourcePolicy] = None
- """Source policy for web search results.
-
- This policy governs which sources are allowed/disallowed in results.
- """
-
- task_spec: Optional[TaskSpec] = None
- """Specification for a task.
-
- Auto output schemas can be specified by setting `output_schema={"type":"auto"}`.
- Not specifying a TaskSpec is the same as setting an auto output schema.
-
- For convenience bare strings are also accepted as input or output schemas.
- """
-
- webhook: Optional[Webhook] = None
- """Webhooks for Task Runs."""
+BetaRunInput = RunInput
+"""Use parallel.types.task_run.TaskRunInput instead"""
diff --git a/src/parallel/types/beta/beta_run_input_param.py b/src/parallel/types/beta/beta_run_input_param.py
index 0112bc1..fa83fa5 100644
--- a/src/parallel/types/beta/beta_run_input_param.py
+++ b/src/parallel/types/beta/beta_run_input_param.py
@@ -2,69 +2,6 @@
from __future__ import annotations
-from typing import Dict, Union, Iterable, Optional
-from typing_extensions import Required, TypedDict
+from ..run_input_param import RunInputParam
-from .webhook_param import WebhookParam
-from ..task_spec_param import TaskSpecParam
-from .mcp_server_param import McpServerParam
-from ..shared_params.source_policy import SourcePolicy
-
-__all__ = ["BetaRunInputParam"]
-
-
-class BetaRunInputParam(TypedDict, total=False):
- """Task run input with additional beta fields."""
-
- input: Required[Union[str, Dict[str, object]]]
- """Input to the task, either text or a JSON object."""
-
- processor: Required[str]
- """Processor to use for the task."""
-
- enable_events: Optional[bool]
- """Controls tracking of task run execution progress.
-
- When set to true, progress events are recorded and can be accessed via the
- [Task Run events](https://platform.parallel.ai/api-reference) endpoint. When
- false, no progress events are tracked. Note that progress tracking cannot be
- enabled after a run has been created. The flag is set to true by default for
- premium processors (pro and above). To enable this feature in your requests,
- specify `events-sse-2025-07-24` as one of the values in `parallel-beta` header
- (for API calls) or `betas` param (for the SDKs).
- """
-
- mcp_servers: Optional[Iterable[McpServerParam]]
- """
- Optional list of MCP servers to use for the run. To enable this feature in your
- requests, specify `mcp-server-2025-07-17` as one of the values in
- `parallel-beta` header (for API calls) or `betas` param (for the SDKs).
- """
-
- metadata: Optional[Dict[str, Union[str, float, bool]]]
- """User-provided metadata stored with the run.
-
- Keys and values must be strings with a maximum length of 16 and 512 characters
- respectively.
- """
-
- previous_interaction_id: Optional[str]
- """Interaction ID to use as context for this request."""
-
- source_policy: Optional[SourcePolicy]
- """Source policy for web search results.
-
- This policy governs which sources are allowed/disallowed in results.
- """
-
- task_spec: Optional[TaskSpecParam]
- """Specification for a task.
-
- Auto output schemas can be specified by setting `output_schema={"type":"auto"}`.
- Not specifying a TaskSpec is the same as setting an auto output schema.
-
- For convenience bare strings are also accepted as input or output schemas.
- """
-
- webhook: Optional[WebhookParam]
- """Webhooks for Task Runs."""
+BetaRunInputParam = RunInputParam
diff --git a/src/parallel/types/beta/beta_search_params.py b/src/parallel/types/beta/beta_search_params.py
index 7e47fd7..35802a7 100644
--- a/src/parallel/types/beta/beta_search_params.py
+++ b/src/parallel/types/beta/beta_search_params.py
@@ -59,6 +59,13 @@ class BetaSearchParams(TypedDict, total=False):
be provided.
"""
+ session_id: Optional[str]
+ """
+ Session identifier for calls to search and extract made by an agent as part of a
+ larger task. May be a user-generated random string (e.g. a uuid) or a session_id
+ from a previous request.
+ """
+
source_policy: Optional[SourcePolicy]
"""Source policy for web search results.
diff --git a/src/parallel/types/beta/beta_task_run_result.py b/src/parallel/types/beta/beta_task_run_result.py
index 5bc7cf7..b14b49a 100644
--- a/src/parallel/types/beta/beta_task_run_result.py
+++ b/src/parallel/types/beta/beta_task_run_result.py
@@ -1,88 +1,31 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal, Annotated, TypeAlias
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
from ..._utils import PropertyInfo
-from ..._models import BaseModel
-from ..task_run import TaskRun
-from ..field_basis import FieldBasis
-from .mcp_tool_call import McpToolCall
-
-__all__ = ["BetaTaskRunResult", "Output", "OutputBetaTaskRunTextOutput", "OutputBetaTaskRunJsonOutput"]
-
-
-class OutputBetaTaskRunTextOutput(BaseModel):
- """Output from a task that returns text."""
-
- basis: List[FieldBasis]
- """Basis for the output.
-
- To include per-list-element basis entries, send the `parallel-beta` header with
- the value `field-basis-2025-11-25` when creating the run.
- """
-
- content: str
- """Text output from the task."""
-
- type: Literal["text"]
- """
- The type of output being returned, as determined by the output schema of the
- task spec.
- """
-
- beta_fields: Optional[Dict[str, object]] = None
- """Always None."""
-
- mcp_tool_calls: Optional[List[McpToolCall]] = None
- """MCP tool calls made by the task."""
-
-
-class OutputBetaTaskRunJsonOutput(BaseModel):
- """Output from a task that returns JSON."""
-
- basis: List[FieldBasis]
- """Basis for the output.
-
- To include per-list-element basis entries, send the `parallel-beta` header with
- the value `field-basis-2025-11-25` when creating the run.
- """
-
- content: Dict[str, object]
- """
- Output from the task as a native JSON object, as determined by the output schema
- of the task spec.
- """
-
- type: Literal["json"]
- """
- The type of output being returned, as determined by the output schema of the
- task spec.
- """
-
- beta_fields: Optional[Dict[str, object]] = None
- """Always None."""
-
- mcp_tool_calls: Optional[List[McpToolCall]] = None
- """MCP tool calls made by the task."""
+from ..task_run_result import TaskRunResult
+from ..task_run_json_output import TaskRunJsonOutput
+from ..task_run_text_output import TaskRunTextOutput
+
+__all__ = [
+ "BetaTaskRunResult",
+ "Output",
+ "OutputBetaTaskRunJsonOutput",
+ "OutputBetaTaskRunTextOutput",
+]
- output_schema: Optional[Dict[str, object]] = None
- """Output schema for the Task Run.
+BetaTaskRunResult = TaskRunResult
+"""This is deprecated, `TaskRunResult` should be used instead"""
- Populated only if the task was executed with an auto schema.
- """
+OutputBetaTaskRunJsonOutput = TaskRunJsonOutput
+"""This is deprecated, `TaskRunJsonOutput` should be used instead"""
+OutputBetaTaskRunTextOutput = TaskRunTextOutput
+"""This is deprecated, `TaskRunTextOutput` should be used instead"""
Output: TypeAlias = Annotated[
- Union[OutputBetaTaskRunTextOutput, OutputBetaTaskRunJsonOutput], PropertyInfo(discriminator="type")
+ Union[OutputBetaTaskRunTextOutput, OutputBetaTaskRunJsonOutput],
+ PropertyInfo(discriminator="type"),
]
-
-
-class BetaTaskRunResult(BaseModel):
- """Result of a beta task run. Available only if beta headers are specified."""
-
- output: Output
- """Output from the task conforming to the output schema."""
-
- run: TaskRun
- """Beta task run object with status 'completed'."""
+"""This is deprecated, use `Union[TaskRunTextOutput, TaskRunJsonOutput]` instead"""
diff --git a/src/parallel/types/beta/error_event.py b/src/parallel/types/beta/error_event.py
index 7ac7abc..becb915 100644
--- a/src/parallel/types/beta/error_event.py
+++ b/src/parallel/types/beta/error_event.py
@@ -1,18 +1,8 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-from ..shared.error_object import ErrorObject
+from .. import error_event
__all__ = ["ErrorEvent"]
-
-class ErrorEvent(BaseModel):
- """Event indicating an error."""
-
- error: ErrorObject
- """Error."""
-
- type: Literal["error"]
- """Event type; always 'error'."""
+ErrorEvent = error_event.ErrorEvent
+"""Use parallel.types.task_run.ErrorEvent instead"""
diff --git a/src/parallel/types/beta/findall_candidate_match_status_event.py b/src/parallel/types/beta/findall_candidate_match_status_event.py
index 01ebf20..5f7e3bb 100644
--- a/src/parallel/types/beta/findall_candidate_match_status_event.py
+++ b/src/parallel/types/beta/findall_candidate_match_status_event.py
@@ -7,7 +7,7 @@
from ..._models import BaseModel
from ..field_basis import FieldBasis
-__all__ = ["FindAllCandidateMatchStatusEvent", "FindallCandidateMatchStatusEvent", "Data"]
+__all__ = ["FindAllCandidateMatchStatusEvent", "Data"]
class Data(BaseModel):
@@ -66,7 +66,3 @@ class FindAllCandidateMatchStatusEvent(BaseModel):
findall.candidate.unmatched, findall.candidate.discarded,
findall.candidate.enriched.
"""
-
-
-FindallCandidateMatchStatusEvent = FindAllCandidateMatchStatusEvent # for backwards compatibility with v0.3.4
-"""This is deprecated, `FindAllCandidateMatchStatusEvent` should be used instead"""
diff --git a/src/parallel/types/beta/findall_create_params.py b/src/parallel/types/beta/findall_create_params.py
index 2486de9..5fe2690 100644
--- a/src/parallel/types/beta/findall_create_params.py
+++ b/src/parallel/types/beta/findall_create_params.py
@@ -6,10 +6,10 @@
from typing_extensions import Literal, Required, Annotated, TypedDict
from ..._utils import PropertyInfo
-from .webhook_param import WebhookParam
+from ..webhook_param import WebhookParam
from .parallel_beta_param import ParallelBetaParam
-__all__ = ["FindAllCreateParams", "FindallCreateParams", "MatchCondition", "ExcludeList"]
+__all__ = ["FindAllCreateParams", "MatchCondition", "ExcludeList"]
class FindAllCreateParams(TypedDict, total=False):
@@ -66,7 +66,3 @@ class ExcludeList(TypedDict, total=False):
url: Required[str]
"""URL of the entity to exclude from results."""
-
-
-FindallCreateParams = FindAllCreateParams # for backwards compatibility with v0.3.4
-"""This is deprecated, `FindAllCreateParams` should be used instead"""
diff --git a/src/parallel/types/beta/findall_enrich_input.py b/src/parallel/types/beta/findall_enrich_input.py
index 8b16a9e..f015709 100644
--- a/src/parallel/types/beta/findall_enrich_input.py
+++ b/src/parallel/types/beta/findall_enrich_input.py
@@ -3,10 +3,10 @@
from typing import List, Optional
from ..._models import BaseModel
-from .mcp_server import McpServer
+from ..mcp_server import McpServer
from ..json_schema import JsonSchema
-__all__ = ["FindAllEnrichInput", "FindallEnrichInput"]
+__all__ = ["FindAllEnrichInput"]
class FindAllEnrichInput(BaseModel):
@@ -20,7 +20,3 @@ class FindAllEnrichInput(BaseModel):
processor: Optional[str] = None
"""Processor to use for the task."""
-
-
-FindallEnrichInput = FindAllEnrichInput # for backwards compatibility with v0.3.4
-"""This is deprecated, `FindAllEnrichInput` should be used instead"""
diff --git a/src/parallel/types/beta/findall_enrich_params.py b/src/parallel/types/beta/findall_enrich_params.py
index f2e3490..31ce57e 100644
--- a/src/parallel/types/beta/findall_enrich_params.py
+++ b/src/parallel/types/beta/findall_enrich_params.py
@@ -6,11 +6,11 @@
from typing_extensions import Required, Annotated, TypedDict
from ..._utils import PropertyInfo
-from .mcp_server_param import McpServerParam
+from ..mcp_server_param import McpServerParam
from ..json_schema_param import JsonSchemaParam
from .parallel_beta_param import ParallelBetaParam
-__all__ = ["FindAllEnrichParams", "FindallEnrichParams"]
+__all__ = ["FindAllEnrichParams"]
class FindAllEnrichParams(TypedDict, total=False):
@@ -25,7 +25,3 @@ class FindAllEnrichParams(TypedDict, total=False):
betas: Annotated[List[ParallelBetaParam], PropertyInfo(alias="parallel-beta")]
"""Optional header to specify the beta version(s) to enable."""
-
-
-FindallEnrichParams = FindAllEnrichParams # for backwards compatibility with v0.3.4
-"""This is deprecated, `FindAllEnrichParams` should be used instead"""
diff --git a/src/parallel/types/beta/findall_events_params.py b/src/parallel/types/beta/findall_events_params.py
index f818b1b..1747020 100644
--- a/src/parallel/types/beta/findall_events_params.py
+++ b/src/parallel/types/beta/findall_events_params.py
@@ -8,7 +8,7 @@
from ..._utils import PropertyInfo
from .parallel_beta_param import ParallelBetaParam
-__all__ = ["FindAllEventsParams", "FindallEventsParams"]
+__all__ = ["FindAllEventsParams"]
class FindAllEventsParams(TypedDict, total=False):
@@ -18,7 +18,3 @@ class FindAllEventsParams(TypedDict, total=False):
betas: Annotated[List[ParallelBetaParam], PropertyInfo(alias="parallel-beta")]
"""Optional header to specify the beta version(s) to enable."""
-
-
-FindallEventsParams = FindAllEventsParams # for backwards compatibility with v0.3.4
-"""This is deprecated, `FindAllEventsParams` should be used instead"""
diff --git a/src/parallel/types/beta/findall_events_response.py b/src/parallel/types/beta/findall_events_response.py
index f3cd93e..0334372 100644
--- a/src/parallel/types/beta/findall_events_response.py
+++ b/src/parallel/types/beta/findall_events_response.py
@@ -4,18 +4,14 @@
from typing_extensions import Annotated, TypeAlias
from ..._utils import PropertyInfo
-from .error_event import ErrorEvent
+from ..error_event import ErrorEvent
from .findall_run_status_event import FindAllRunStatusEvent
from .findall_schema_updated_event import FindAllSchemaUpdatedEvent
from .findall_candidate_match_status_event import FindAllCandidateMatchStatusEvent
-__all__ = ["FindAllEventsResponse", "FindallEventsResponse"]
+__all__ = ["FindAllEventsResponse"]
FindAllEventsResponse: TypeAlias = Annotated[
Union[FindAllSchemaUpdatedEvent, FindAllRunStatusEvent, FindAllCandidateMatchStatusEvent, ErrorEvent],
PropertyInfo(discriminator="type"),
]
-
-
-FindallEventsResponse = FindAllEventsResponse # for backwards compatibility with v0.3.4
-"""This is deprecated, `FindAllEventsResponse` should be used instead"""
diff --git a/src/parallel/types/beta/findall_extend_params.py b/src/parallel/types/beta/findall_extend_params.py
index 41b2d88..d90226e 100644
--- a/src/parallel/types/beta/findall_extend_params.py
+++ b/src/parallel/types/beta/findall_extend_params.py
@@ -8,7 +8,7 @@
from ..._utils import PropertyInfo
from .parallel_beta_param import ParallelBetaParam
-__all__ = ["FindAllExtendParams", "FindallExtendParams"]
+__all__ = ["FindAllExtendParams"]
class FindAllExtendParams(TypedDict, total=False):
@@ -21,7 +21,3 @@ class FindAllExtendParams(TypedDict, total=False):
betas: Annotated[List[ParallelBetaParam], PropertyInfo(alias="parallel-beta")]
"""Optional header to specify the beta version(s) to enable."""
-
-
-FindallExtendParams = FindAllExtendParams # for backwards compatibility with v0.3.4
-"""This is deprecated, `FindAllExtendParams` should be used instead"""
diff --git a/src/parallel/types/beta/findall_ingest_params.py b/src/parallel/types/beta/findall_ingest_params.py
index fec1a52..fbdb3f3 100644
--- a/src/parallel/types/beta/findall_ingest_params.py
+++ b/src/parallel/types/beta/findall_ingest_params.py
@@ -8,7 +8,7 @@
from ..._utils import PropertyInfo
from .parallel_beta_param import ParallelBetaParam
-__all__ = ["FindAllIngestParams", "FindallIngestParams"]
+__all__ = ["FindAllIngestParams"]
class FindAllIngestParams(TypedDict, total=False):
@@ -17,7 +17,3 @@ class FindAllIngestParams(TypedDict, total=False):
betas: Annotated[List[ParallelBetaParam], PropertyInfo(alias="parallel-beta")]
"""Optional header to specify the beta version(s) to enable."""
-
-
-FindallIngestParams = FindAllIngestParams # for backwards compatibility with v0.3.4
-"""This is deprecated, `FindAllIngestParams` should be used instead"""
diff --git a/src/parallel/types/beta/findall_run.py b/src/parallel/types/beta/findall_run.py
index ad55025..4db3135 100644
--- a/src/parallel/types/beta/findall_run.py
+++ b/src/parallel/types/beta/findall_run.py
@@ -5,7 +5,7 @@
from ..._models import BaseModel
-__all__ = ["FindAllRun", "FindallRun", "Status", "StatusMetrics"]
+__all__ = ["FindAllRun", "Status", "StatusMetrics"]
class StatusMetrics(BaseModel):
@@ -67,7 +67,3 @@ class FindAllRun(BaseModel):
Timestamp of the latest modification to the FindAll run result, in RFC 3339
format.
"""
-
-
-FindallRun = FindAllRun # for backwards compatibility with v0.3.4
-"""This is deprecated, `FindAllRun` should be used instead"""
diff --git a/src/parallel/types/beta/findall_run_result.py b/src/parallel/types/beta/findall_run_result.py
index d1851a9..2b413f0 100644
--- a/src/parallel/types/beta/findall_run_result.py
+++ b/src/parallel/types/beta/findall_run_result.py
@@ -7,7 +7,7 @@
from .findall_run import FindAllRun
from ..field_basis import FieldBasis
-__all__ = ["FindAllRunResult", "FindallRunResult", "Candidate"]
+__all__ = ["FindAllRunResult", "Candidate"]
class Candidate(BaseModel):
@@ -65,7 +65,3 @@ class FindAllRunResult(BaseModel):
This can be used to resume streaming from the last event.
"""
-
-
-FindallRunResult = FindAllRunResult # for backwards compatibility with v0.3.4
-"""This is deprecated, `FindAllRunResult` should be used instead"""
diff --git a/src/parallel/types/beta/findall_run_status_event.py b/src/parallel/types/beta/findall_run_status_event.py
index fe3ce34..48371ca 100644
--- a/src/parallel/types/beta/findall_run_status_event.py
+++ b/src/parallel/types/beta/findall_run_status_event.py
@@ -6,7 +6,7 @@
from ..._models import BaseModel
from .findall_run import FindAllRun
-__all__ = ["FindAllRunStatusEvent", "FindallRunStatusEvent"]
+__all__ = ["FindAllRunStatusEvent"]
class FindAllRunStatusEvent(BaseModel):
@@ -23,7 +23,3 @@ class FindAllRunStatusEvent(BaseModel):
type: Literal["findall.status"]
"""Event type; always 'findall.status'."""
-
-
-FindallRunStatusEvent = FindAllRunStatusEvent # for backwards compatibility with v0.3.4
-"""This is deprecated, `FindAllRunStatusEvent` should be used instead"""
diff --git a/src/parallel/types/beta/findall_schema.py b/src/parallel/types/beta/findall_schema.py
index d214db7..7b9f4df 100644
--- a/src/parallel/types/beta/findall_schema.py
+++ b/src/parallel/types/beta/findall_schema.py
@@ -6,7 +6,7 @@
from ..._models import BaseModel
from .findall_enrich_input import FindAllEnrichInput
-__all__ = ["FindAllSchema", "FindallSchema", "MatchCondition"]
+__all__ = ["FindAllSchema", "MatchCondition"]
class MatchCondition(BaseModel):
@@ -43,7 +43,3 @@ class FindAllSchema(BaseModel):
match_limit: Optional[int] = None
"""Max number of candidates to evaluate"""
-
-
-FindallSchema = FindAllSchema # for backwards compatibility with v0.3.4
-"""This is deprecated, `FindAllSchema` should be used instead"""
diff --git a/src/parallel/types/beta/findall_schema_updated_event.py b/src/parallel/types/beta/findall_schema_updated_event.py
index 7eab28c..50054ad 100644
--- a/src/parallel/types/beta/findall_schema_updated_event.py
+++ b/src/parallel/types/beta/findall_schema_updated_event.py
@@ -6,7 +6,7 @@
from ..._models import BaseModel
from .findall_schema import FindAllSchema
-__all__ = ["FindAllSchemaUpdatedEvent", "FindallSchemaUpdatedEvent"]
+__all__ = ["FindAllSchemaUpdatedEvent"]
class FindAllSchemaUpdatedEvent(BaseModel):
@@ -23,7 +23,3 @@ class FindAllSchemaUpdatedEvent(BaseModel):
type: Literal["findall.schema.updated"]
"""Event type; always 'findall.schema.updated'."""
-
-
-FindallSchemaUpdatedEvent = FindAllSchemaUpdatedEvent # for backwards compatibility with v0.3.4
-"""This is deprecated, `FindAllSchemaUpdatedEvent` should be used instead"""
diff --git a/src/parallel/types/beta/mcp_server.py b/src/parallel/types/beta/mcp_server.py
index f0d8d12..5dc3c28 100644
--- a/src/parallel/types/beta/mcp_server.py
+++ b/src/parallel/types/beta/mcp_server.py
@@ -1,27 +1,8 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Dict, List, Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
+from .. import mcp_server
__all__ = ["McpServer"]
-
-class McpServer(BaseModel):
- """MCP server configuration."""
-
- name: str
- """Name of the MCP server."""
-
- url: str
- """URL of the MCP server."""
-
- allowed_tools: Optional[List[str]] = None
- """List of allowed tools for the MCP server."""
-
- headers: Optional[Dict[str, str]] = None
- """Headers for the MCP server."""
-
- type: Optional[Literal["url"]] = None
- """Type of MCP server being configured. Always `url`."""
+McpServer = mcp_server.McpServer
+"""Use parallel.types.task_run.McpServer instead"""
diff --git a/src/parallel/types/beta/mcp_server_param.py b/src/parallel/types/beta/mcp_server_param.py
index 02052a2..b406f2d 100644
--- a/src/parallel/types/beta/mcp_server_param.py
+++ b/src/parallel/types/beta/mcp_server_param.py
@@ -2,28 +2,6 @@
from __future__ import annotations
-from typing import Dict, Optional
-from typing_extensions import Literal, Required, TypedDict
+from .. import mcp_server_param
-from ..._types import SequenceNotStr
-
-__all__ = ["McpServerParam"]
-
-
-class McpServerParam(TypedDict, total=False):
- """MCP server configuration."""
-
- name: Required[str]
- """Name of the MCP server."""
-
- url: Required[str]
- """URL of the MCP server."""
-
- allowed_tools: Optional[SequenceNotStr[str]]
- """List of allowed tools for the MCP server."""
-
- headers: Optional[Dict[str, str]]
- """Headers for the MCP server."""
-
- type: Literal["url"]
- """Type of MCP server being configured. Always `url`."""
+McpServerParam = mcp_server_param.McpServerParam
diff --git a/src/parallel/types/beta/mcp_tool_call.py b/src/parallel/types/beta/mcp_tool_call.py
index d04b217..785a3d5 100644
--- a/src/parallel/types/beta/mcp_tool_call.py
+++ b/src/parallel/types/beta/mcp_tool_call.py
@@ -1,29 +1,8 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Optional
-
-from ..._models import BaseModel
+from .. import mcp_tool_call
__all__ = ["McpToolCall"]
-
-class McpToolCall(BaseModel):
- """Result of an MCP tool call."""
-
- arguments: str
- """Arguments used to call the MCP tool."""
-
- server_name: str
- """Name of the MCP server."""
-
- tool_call_id: str
- """Identifier for the tool call."""
-
- tool_name: str
- """Name of the tool being called."""
-
- content: Optional[str] = None
- """Output received from the tool call, if successful."""
-
- error: Optional[str] = None
- """Error message if the tool call failed."""
+McpToolCall = mcp_tool_call.McpToolCall
+"""Use parallel.types.task_run.McpToolCall instead"""
diff --git a/src/parallel/types/beta/task_group_add_runs_params.py b/src/parallel/types/beta/task_group_add_runs_params.py
index cee578d..2de405d 100644
--- a/src/parallel/types/beta/task_group_add_runs_params.py
+++ b/src/parallel/types/beta/task_group_add_runs_params.py
@@ -6,15 +6,15 @@
from typing_extensions import Required, Annotated, TypedDict
from ..._utils import PropertyInfo
+from ..run_input_param import RunInputParam
from ..task_spec_param import TaskSpecParam
from .parallel_beta_param import ParallelBetaParam
-from .beta_run_input_param import BetaRunInputParam
__all__ = ["TaskGroupAddRunsParams"]
class TaskGroupAddRunsParams(TypedDict, total=False):
- inputs: Required[Iterable[BetaRunInputParam]]
+ inputs: Required[Iterable[RunInputParam]]
"""List of task runs to execute.
Up to 1,000 runs can be specified per request. If you'd like to add more runs,
diff --git a/src/parallel/types/beta/task_group_events_response.py b/src/parallel/types/beta/task_group_events_response.py
index c1db25b..9728390 100644
--- a/src/parallel/types/beta/task_group_events_response.py
+++ b/src/parallel/types/beta/task_group_events_response.py
@@ -5,8 +5,8 @@
from ..._utils import PropertyInfo
from ..._models import BaseModel
-from .error_event import ErrorEvent
-from .task_run_event import TaskRunEvent
+from ..error_event import ErrorEvent
+from ..task_run_event import TaskRunEvent
from .task_group_status import TaskGroupStatus
__all__ = ["TaskGroupEventsResponse", "TaskGroupStatusEvent"]
diff --git a/src/parallel/types/beta/task_group_get_runs_response.py b/src/parallel/types/beta/task_group_get_runs_response.py
index b287dcb..95eab2c 100644
--- a/src/parallel/types/beta/task_group_get_runs_response.py
+++ b/src/parallel/types/beta/task_group_get_runs_response.py
@@ -4,8 +4,8 @@
from typing_extensions import Annotated, TypeAlias
from ..._utils import PropertyInfo
-from .error_event import ErrorEvent
-from .task_run_event import TaskRunEvent
+from ..error_event import ErrorEvent
+from ..task_run_event import TaskRunEvent
__all__ = ["TaskGroupGetRunsResponse"]
diff --git a/src/parallel/types/beta/task_run_create_params.py b/src/parallel/types/beta/task_run_create_params.py
index f7290c9..c0e1c76 100644
--- a/src/parallel/types/beta/task_run_create_params.py
+++ b/src/parallel/types/beta/task_run_create_params.py
@@ -6,13 +6,13 @@
from typing_extensions import Required, Annotated, TypedDict
from ..._utils import PropertyInfo
-from .webhook_param import WebhookParam
+from ..webhook_param import WebhookParam
from ..task_spec_param import TaskSpecParam
-from .mcp_server_param import McpServerParam
+from ..mcp_server_param import McpServerParam
from .parallel_beta_param import ParallelBetaParam
from ..shared_params.source_policy import SourcePolicy
-__all__ = ["TaskRunCreateParams"]
+__all__ = ["TaskRunCreateParams", "AdvancedSettings"]
class TaskRunCreateParams(TypedDict, total=False):
@@ -22,6 +22,9 @@ class TaskRunCreateParams(TypedDict, total=False):
processor: Required[str]
"""Processor to use for the task."""
+ advanced_settings: Optional[AdvancedSettings]
+ """Advanced search configuration for a task run."""
+
enable_events: Optional[bool]
"""Controls tracking of task run execution progress.
@@ -29,17 +32,11 @@ class TaskRunCreateParams(TypedDict, total=False):
[Task Run events](https://platform.parallel.ai/api-reference) endpoint. When
false, no progress events are tracked. Note that progress tracking cannot be
enabled after a run has been created. The flag is set to true by default for
- premium processors (pro and above). To enable this feature in your requests,
- specify `events-sse-2025-07-24` as one of the values in `parallel-beta` header
- (for API calls) or `betas` param (for the SDKs).
+ premium processors (pro and above).
"""
mcp_servers: Optional[Iterable[McpServerParam]]
- """
- Optional list of MCP servers to use for the run. To enable this feature in your
- requests, specify `mcp-server-2025-07-17` as one of the values in
- `parallel-beta` header (for API calls) or `betas` param (for the SDKs).
- """
+ """Optional list of MCP servers to use for the run."""
metadata: Optional[Dict[str, Union[str, float, bool]]]
"""User-provided metadata stored with the run.
@@ -71,3 +68,10 @@ class TaskRunCreateParams(TypedDict, total=False):
betas: Annotated[List[ParallelBetaParam], PropertyInfo(alias="parallel-beta")]
"""Optional header to specify the beta version(s) to enable."""
+
+
+class AdvancedSettings(TypedDict, total=False):
+ """Advanced search configuration for a task run."""
+
+ location: Optional[str]
+ """ISO 3166-1 alpha-2 country code for geo-targeted search results."""
diff --git a/src/parallel/types/beta/task_run_event.py b/src/parallel/types/beta/task_run_event.py
index c4c6c2f..e518907 100644
--- a/src/parallel/types/beta/task_run_event.py
+++ b/src/parallel/types/beta/task_run_event.py
@@ -1,37 +1,8 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Union, Optional
-from typing_extensions import Literal, Annotated, TypeAlias
+from .. import task_run_event
-from ..._utils import PropertyInfo
-from ..._models import BaseModel
-from ..task_run import TaskRun
-from .beta_run_input import BetaRunInput
-from ..task_run_json_output import TaskRunJsonOutput
-from ..task_run_text_output import TaskRunTextOutput
+__all__ = ["TaskRunEvent"]
-__all__ = ["TaskRunEvent", "Output"]
-
-Output: TypeAlias = Annotated[Union[TaskRunTextOutput, TaskRunJsonOutput, None], PropertyInfo(discriminator="type")]
-
-
-class TaskRunEvent(BaseModel):
- """Event when a task run transitions to a non-active status.
-
- May indicate completion, cancellation, or failure.
- """
-
- event_id: Optional[str] = None
- """Cursor to resume the event stream. Always empty for non Task Group runs."""
-
- run: TaskRun
- """Task run object."""
-
- type: Literal["task_run.state"]
- """Event type; always 'task_run.state'."""
-
- input: Optional[BetaRunInput] = None
- """Task run input with additional beta fields."""
-
- output: Optional[Output] = None
- """Output from the run; included only if requested and if status == `completed`."""
+TaskRunEvent = task_run_event.TaskRunEvent
+"""Use parallel.types.task_run.TaskRunEvent instead"""
diff --git a/src/parallel/types/beta/task_run_events_response.py b/src/parallel/types/beta/task_run_events_response.py
index 79088f5..1516f91 100644
--- a/src/parallel/types/beta/task_run_events_response.py
+++ b/src/parallel/types/beta/task_run_events_response.py
@@ -5,8 +5,8 @@
from ..._utils import PropertyInfo
from ..._models import BaseModel
-from .error_event import ErrorEvent
-from .task_run_event import TaskRunEvent
+from ..error_event import ErrorEvent
+from ..task_run_event import TaskRunEvent
__all__ = [
"TaskRunEventsResponse",
diff --git a/src/parallel/types/beta/webhook.py b/src/parallel/types/beta/webhook.py
index 6741b89..814b154 100644
--- a/src/parallel/types/beta/webhook.py
+++ b/src/parallel/types/beta/webhook.py
@@ -1,18 +1,8 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
+from .. import webhook
__all__ = ["Webhook"]
-
-class Webhook(BaseModel):
- """Webhooks for Task Runs."""
-
- url: str
- """URL for the webhook."""
-
- event_types: Optional[List[Literal["task_run.status"]]] = None
- """Event types to send the webhook notifications for."""
+Webhook = webhook.Webhook
+"""Use parallel.types.task_run.Webhook instead"""
diff --git a/src/parallel/types/beta/webhook_param.py b/src/parallel/types/beta/webhook_param.py
index 90a667d..a32d4c7 100644
--- a/src/parallel/types/beta/webhook_param.py
+++ b/src/parallel/types/beta/webhook_param.py
@@ -2,17 +2,6 @@
from __future__ import annotations
-from typing import List
-from typing_extensions import Literal, Required, TypedDict
+from .. import webhook_param
-__all__ = ["WebhookParam"]
-
-
-class WebhookParam(TypedDict, total=False):
- """Webhooks for Task Runs."""
-
- url: Required[str]
- """URL for the webhook."""
-
- event_types: List[Literal["task_run.status"]]
- """Event types to send the webhook notifications for."""
+WebhookParam = webhook_param.WebhookParam
diff --git a/src/parallel/types/client_extract_params.py b/src/parallel/types/client_extract_params.py
index 0673b3f..c72534b 100644
--- a/src/parallel/types/client_extract_params.py
+++ b/src/parallel/types/client_extract_params.py
@@ -16,7 +16,11 @@ class ClientExtractParams(TypedDict, total=False):
"""URLs to extract content from. Up to 20 URLs."""
advanced_settings: Optional[AdvancedExtractSettingsParam]
- """Advanced extract configuration."""
+ """Advanced extract configuration.
+
+ These settings may impact result quality and latency unless used carefully. See
+ https://docs.parallel.ai/search/advanced-extract-settings for more info.
+ """
client_model: Optional[str]
"""The model generating this request and consuming the results.
@@ -43,3 +47,10 @@ class ClientExtractParams(TypedDict, total=False):
Used together with objective to focus excerpts on the most relevant content.
"""
+
+ session_id: Optional[str]
+ """
+ Session identifier for calls to search and extract made by an agent as part of a
+ larger task. May be a user-generated random string, e.g. a uuid, or a session_id
+ returned by a previous request.
+ """
diff --git a/src/parallel/types/client_search_params.py b/src/parallel/types/client_search_params.py
index fa39fea..166c4a5 100644
--- a/src/parallel/types/client_search_params.py
+++ b/src/parallel/types/client_search_params.py
@@ -20,7 +20,11 @@ class ClientSearchParams(TypedDict, total=False):
"""
advanced_settings: Optional[AdvancedSearchSettingsParam]
- """Advanced search configuration."""
+ """Advanced search configuration.
+
+ These settings may impact result quality and latency unless used carefully. See
+ https://docs.parallel.ai/search/advanced-search-settings for more info.
+ """
client_model: Optional[str]
"""The model generating this request and consuming the results.
@@ -49,3 +53,10 @@ class ClientSearchParams(TypedDict, total=False):
content. Should be self-contained with enough context to understand the intent
of the search.
"""
+
+ session_id: Optional[str]
+ """
+ Session identifier for calls to search and extract made by an agent as part of a
+ larger task. May be a user-generated random string, e.g. a uuid, or a session_id
+ returned by a previous request.
+ """
diff --git a/src/parallel/types/error_event.py b/src/parallel/types/error_event.py
new file mode 100644
index 0000000..3ededc9
--- /dev/null
+++ b/src/parallel/types/error_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .._models import BaseModel
+from .shared.error_object import ErrorObject
+
+__all__ = ["ErrorEvent"]
+
+
+class ErrorEvent(BaseModel):
+ """Event indicating an error."""
+
+ error: ErrorObject
+ """Error."""
+
+ type: Literal["error"]
+ """Event type; always 'error'."""
diff --git a/src/parallel/types/extract_response.py b/src/parallel/types/extract_response.py
index 8d2830b..9f01b8e 100644
--- a/src/parallel/types/extract_response.py
+++ b/src/parallel/types/extract_response.py
@@ -23,6 +23,14 @@ class ExtractResponse(BaseModel):
results: List[ExtractResult]
"""Successful extract results."""
+ session_id: str
+ """Session identifier.
+
+ Echoed back from the request if provided, otherwise generated by the server.
+ Should be passed to future search and extract calls made by the agent as part of
+ the same larger task.
+ """
+
usage: Optional[List[UsageItem]] = None
"""Usage metrics for the extract request."""
diff --git a/src/parallel/types/mcp_server.py b/src/parallel/types/mcp_server.py
new file mode 100644
index 0000000..7c4ba25
--- /dev/null
+++ b/src/parallel/types/mcp_server.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["McpServer"]
+
+
+class McpServer(BaseModel):
+ """MCP server configuration."""
+
+ name: str
+ """Name of the MCP server."""
+
+ url: str
+ """URL of the MCP server."""
+
+ allowed_tools: Optional[List[str]] = None
+ """List of allowed tools for the MCP server."""
+
+ headers: Optional[Dict[str, str]] = None
+ """Headers for the MCP server."""
+
+ type: Optional[Literal["url"]] = None
+ """Type of MCP server being configured. Always `url`."""
diff --git a/src/parallel/types/mcp_server_param.py b/src/parallel/types/mcp_server_param.py
new file mode 100644
index 0000000..f3f207a
--- /dev/null
+++ b/src/parallel/types/mcp_server_param.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from .._types import SequenceNotStr
+
+__all__ = ["McpServerParam"]
+
+
+class McpServerParam(TypedDict, total=False):
+ """MCP server configuration."""
+
+ name: Required[str]
+ """Name of the MCP server."""
+
+ url: Required[str]
+ """URL of the MCP server."""
+
+ allowed_tools: Optional[SequenceNotStr[str]]
+ """List of allowed tools for the MCP server."""
+
+ headers: Optional[Dict[str, str]]
+ """Headers for the MCP server."""
+
+ type: Literal["url"]
+ """Type of MCP server being configured. Always `url`."""
diff --git a/src/parallel/types/mcp_tool_call.py b/src/parallel/types/mcp_tool_call.py
new file mode 100644
index 0000000..6cdccc2
--- /dev/null
+++ b/src/parallel/types/mcp_tool_call.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["McpToolCall"]
+
+
+class McpToolCall(BaseModel):
+ """Result of an MCP tool call."""
+
+ arguments: str
+ """Arguments used to call the MCP tool."""
+
+ server_name: str
+ """Name of the MCP server."""
+
+ tool_call_id: str
+ """Identifier for the tool call."""
+
+ tool_name: str
+ """Name of the tool being called."""
+
+ content: Optional[str] = None
+ """Output received from the tool call, if successful."""
+
+ error: Optional[str] = None
+ """Error message if the tool call failed."""
diff --git a/src/parallel/types/run_input.py b/src/parallel/types/run_input.py
new file mode 100644
index 0000000..13337ed
--- /dev/null
+++ b/src/parallel/types/run_input.py
@@ -0,0 +1,72 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Union, Optional
+
+from .webhook import Webhook
+from .._models import BaseModel
+from .task_spec import TaskSpec
+from .mcp_server import McpServer
+from .shared.source_policy import SourcePolicy
+
+__all__ = ["RunInput", "AdvancedSettings"]
+
+
+class AdvancedSettings(BaseModel):
+ """Advanced search configuration for a task run."""
+
+ location: Optional[str] = None
+ """ISO 3166-1 alpha-2 country code for geo-targeted search results."""
+
+
+class RunInput(BaseModel):
+ """Request to run a task."""
+
+ input: Union[str, Dict[str, object]]
+ """Input to the task, either text or a JSON object."""
+
+ processor: str
+ """Processor to use for the task."""
+
+ advanced_settings: Optional[AdvancedSettings] = None
+ """Advanced search configuration for a task run."""
+
+ enable_events: Optional[bool] = None
+ """Controls tracking of task run execution progress.
+
+ When set to true, progress events are recorded and can be accessed via the
+ [Task Run events](https://platform.parallel.ai/api-reference) endpoint. When
+ false, no progress events are tracked. Note that progress tracking cannot be
+ enabled after a run has been created. The flag is set to true by default for
+ premium processors (pro and above).
+ """
+
+ mcp_servers: Optional[List[McpServer]] = None
+ """Optional list of MCP servers to use for the run."""
+
+ metadata: Optional[Dict[str, Union[str, float, bool]]] = None
+ """User-provided metadata stored with the run.
+
+ Keys and values must be strings with a maximum length of 16 and 512 characters
+ respectively.
+ """
+
+ previous_interaction_id: Optional[str] = None
+ """Interaction ID to use as context for this request."""
+
+ source_policy: Optional[SourcePolicy] = None
+ """Source policy for web search results.
+
+ This policy governs which sources are allowed/disallowed in results.
+ """
+
+ task_spec: Optional[TaskSpec] = None
+ """Specification for a task.
+
+ Auto output schemas can be specified by setting `output_schema={"type":"auto"}`.
+ Not specifying a TaskSpec is the same as setting an auto output schema.
+
+ For convenience bare strings are also accepted as input or output schemas.
+ """
+
+ webhook: Optional[Webhook] = None
+ """Webhooks for Task Runs."""
diff --git a/src/parallel/types/run_input_param.py b/src/parallel/types/run_input_param.py
new file mode 100644
index 0000000..88afb18
--- /dev/null
+++ b/src/parallel/types/run_input_param.py
@@ -0,0 +1,74 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Union, Iterable, Optional
+from typing_extensions import Required, TypedDict
+
+from .webhook_param import WebhookParam
+from .task_spec_param import TaskSpecParam
+from .mcp_server_param import McpServerParam
+from .shared_params.source_policy import SourcePolicy
+
+__all__ = ["RunInputParam", "AdvancedSettings"]
+
+
+class AdvancedSettings(TypedDict, total=False):
+ """Advanced search configuration for a task run."""
+
+ location: Optional[str]
+ """ISO 3166-1 alpha-2 country code for geo-targeted search results."""
+
+
+class RunInputParam(TypedDict, total=False):
+ """Request to run a task."""
+
+ input: Required[Union[str, Dict[str, object]]]
+ """Input to the task, either text or a JSON object."""
+
+ processor: Required[str]
+ """Processor to use for the task."""
+
+ advanced_settings: Optional[AdvancedSettings]
+ """Advanced search configuration for a task run."""
+
+ enable_events: Optional[bool]
+ """Controls tracking of task run execution progress.
+
+ When set to true, progress events are recorded and can be accessed via the
+ [Task Run events](https://platform.parallel.ai/api-reference) endpoint. When
+ false, no progress events are tracked. Note that progress tracking cannot be
+ enabled after a run has been created. The flag is set to true by default for
+ premium processors (pro and above).
+ """
+
+ mcp_servers: Optional[Iterable[McpServerParam]]
+ """Optional list of MCP servers to use for the run."""
+
+ metadata: Optional[Dict[str, Union[str, float, bool]]]
+ """User-provided metadata stored with the run.
+
+ Keys and values must be strings with a maximum length of 16 and 512 characters
+ respectively.
+ """
+
+ previous_interaction_id: Optional[str]
+ """Interaction ID to use as context for this request."""
+
+ source_policy: Optional[SourcePolicy]
+ """Source policy for web search results.
+
+ This policy governs which sources are allowed/disallowed in results.
+ """
+
+ task_spec: Optional[TaskSpecParam]
+ """Specification for a task.
+
+ Auto output schemas can be specified by setting `output_schema={"type":"auto"}`.
+ Not specifying a TaskSpec is the same as setting an auto output schema.
+
+ For convenience bare strings are also accepted as input or output schemas.
+ """
+
+ webhook: Optional[WebhookParam]
+ """Webhooks for Task Runs."""
diff --git a/src/parallel/types/search_result.py b/src/parallel/types/search_result.py
index c7e3a4f..a2f5926 100644
--- a/src/parallel/types/search_result.py
+++ b/src/parallel/types/search_result.py
@@ -19,6 +19,13 @@ class SearchResult(BaseModel):
search_id: str
"""Search ID. Example: `search_cad0a6d2dec046bd95ae900527d880e7`"""
+ session_id: str
+ """
+ Session identifier, echoed back from the request if provided, otherwise
+ generated by the server. Should be passed to future search and extract calls
+ made by the agent as part of the same larger task.
+ """
+
usage: Optional[List[UsageItem]] = None
"""Usage metrics for the search request."""
diff --git a/src/parallel/types/task_run.py b/src/parallel/types/task_run.py
index 0bdb0b1..a4f52af 100644
--- a/src/parallel/types/task_run.py
+++ b/src/parallel/types/task_run.py
@@ -9,10 +9,7 @@
from .shared.warning import Warning
from .shared.error_object import ErrorObject
-__all__ = [
- "TaskRun",
- "Warning", # for backwards compatibility with v0.1.3
-]
+__all__ = ["TaskRun"]
class TaskRun(BaseModel):
diff --git a/src/parallel/types/task_run_create_params.py b/src/parallel/types/task_run_create_params.py
index 5f1c572..c3a8a7f 100644
--- a/src/parallel/types/task_run_create_params.py
+++ b/src/parallel/types/task_run_create_params.py
@@ -2,13 +2,17 @@
from __future__ import annotations
-from typing import Dict, Union, Optional
-from typing_extensions import Required, TypedDict
+from typing import Dict, List, Union, Iterable, Optional
+from typing_extensions import Required, Annotated, TypedDict
+from .._utils import PropertyInfo
+from .webhook_param import WebhookParam
from .task_spec_param import TaskSpecParam
+from .mcp_server_param import McpServerParam
+from .beta.parallel_beta_param import ParallelBetaParam
from .shared_params.source_policy import SourcePolicy
-__all__ = ["TaskRunCreateParams"]
+__all__ = ["TaskRunCreateParams", "AdvancedSettings"]
class TaskRunCreateParams(TypedDict, total=False):
@@ -18,6 +22,22 @@ class TaskRunCreateParams(TypedDict, total=False):
processor: Required[str]
"""Processor to use for the task."""
+ advanced_settings: Optional[AdvancedSettings]
+ """Advanced search configuration for a task run."""
+
+ enable_events: Optional[bool]
+ """Controls tracking of task run execution progress.
+
+ When set to true, progress events are recorded and can be accessed via the
+ [Task Run events](https://platform.parallel.ai/api-reference) endpoint. When
+ false, no progress events are tracked. Note that progress tracking cannot be
+ enabled after a run has been created. The flag is set to true by default for
+ premium processors (pro and above).
+ """
+
+ mcp_servers: Optional[Iterable[McpServerParam]]
+ """Optional list of MCP servers to use for the run."""
+
metadata: Optional[Dict[str, Union[str, float, bool]]]
"""User-provided metadata stored with the run.
@@ -42,3 +62,16 @@ class TaskRunCreateParams(TypedDict, total=False):
For convenience bare strings are also accepted as input or output schemas.
"""
+
+ webhook: Optional[WebhookParam]
+ """Webhooks for Task Runs."""
+
+ betas: Annotated[List[ParallelBetaParam], PropertyInfo(alias="parallel-beta")]
+ """Optional header to specify the beta version(s) to enable."""
+
+
+class AdvancedSettings(TypedDict, total=False):
+ """Advanced search configuration for a task run."""
+
+ location: Optional[str]
+ """ISO 3166-1 alpha-2 country code for geo-targeted search results."""
diff --git a/src/parallel/types/task_run_event.py b/src/parallel/types/task_run_event.py
new file mode 100644
index 0000000..4ed9071
--- /dev/null
+++ b/src/parallel/types/task_run_event.py
@@ -0,0 +1,37 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from .._utils import PropertyInfo
+from .._models import BaseModel
+from .task_run import TaskRun
+from .run_input import RunInput
+from .task_run_json_output import TaskRunJsonOutput
+from .task_run_text_output import TaskRunTextOutput
+
+__all__ = ["TaskRunEvent", "Output"]
+
+Output: TypeAlias = Annotated[Union[TaskRunTextOutput, TaskRunJsonOutput, None], PropertyInfo(discriminator="type")]
+
+
+class TaskRunEvent(BaseModel):
+ """Event when a task run transitions to a non-active status.
+
+ May indicate completion, cancellation, or failure.
+ """
+
+ event_id: Optional[str] = None
+ """Cursor to resume the event stream. Always empty for non Task Group runs."""
+
+ run: TaskRun
+ """Task run object."""
+
+ type: Literal["task_run.state"]
+ """Event type; always 'task_run.state'."""
+
+ input: Optional[RunInput] = None
+ """Request to run a task."""
+
+ output: Optional[Output] = None
+ """Output from the run; included only if requested and if status == `completed`."""
diff --git a/src/parallel/types/task_run_events_response.py b/src/parallel/types/task_run_events_response.py
new file mode 100644
index 0000000..20ded6e
--- /dev/null
+++ b/src/parallel/types/task_run_events_response.py
@@ -0,0 +1,70 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from .._utils import PropertyInfo
+from .._models import BaseModel
+from .error_event import ErrorEvent
+from .task_run_event import TaskRunEvent
+
+__all__ = [
+ "TaskRunEventsResponse",
+ "TaskRunProgressStatsEvent",
+ "TaskRunProgressStatsEventSourceStats",
+ "TaskRunProgressMessageEvent",
+]
+
+
+class TaskRunProgressStatsEventSourceStats(BaseModel):
+ """Source stats describing progress so far."""
+
+ num_sources_considered: Optional[int] = None
+ """Number of sources considered in processing the task."""
+
+ num_sources_read: Optional[int] = None
+ """Number of sources read in processing the task."""
+
+ sources_read_sample: Optional[List[str]] = None
+ """A sample of URLs of sources read in processing the task."""
+
+
+class TaskRunProgressStatsEvent(BaseModel):
+ """A progress update for a task run."""
+
+ progress_meter: float
+ """Completion percentage of the task run.
+
+ Ranges from 0 to 100 where 0 indicates no progress and 100 indicates completion.
+ """
+
+ source_stats: TaskRunProgressStatsEventSourceStats
+ """Source stats describing progress so far."""
+
+ type: Literal["task_run.progress_stats"]
+ """Event type; always 'task_run.progress_stats'."""
+
+
+class TaskRunProgressMessageEvent(BaseModel):
+ """A message for a task run progress update."""
+
+ message: str
+ """Progress update message."""
+
+ timestamp: Optional[str] = None
+ """Timestamp of the message."""
+
+ type: Literal[
+ "task_run.progress_msg.plan",
+ "task_run.progress_msg.search",
+ "task_run.progress_msg.result",
+ "task_run.progress_msg.tool_call",
+ "task_run.progress_msg.exec_status",
+ ]
+ """Event type; always starts with 'task_run.progress_msg'."""
+
+
+TaskRunEventsResponse: TypeAlias = Annotated[
+ Union[TaskRunProgressStatsEvent, TaskRunProgressMessageEvent, TaskRunEvent, ErrorEvent],
+ PropertyInfo(discriminator="type"),
+]
diff --git a/src/parallel/types/task_run_json_output.py b/src/parallel/types/task_run_json_output.py
index 8541b61..6a43d58 100644
--- a/src/parallel/types/task_run_json_output.py
+++ b/src/parallel/types/task_run_json_output.py
@@ -5,6 +5,7 @@
from .._models import BaseModel
from .field_basis import FieldBasis
+from .mcp_tool_call import McpToolCall
__all__ = ["TaskRunJsonOutput"]
@@ -32,19 +33,15 @@ class TaskRunJsonOutput(BaseModel):
"""
beta_fields: Optional[Dict[str, object]] = None
- """Additional fields from beta features used in this task run.
-
- When beta features are specified during both task run creation and result
- retrieval, this field will be empty and instead the relevant beta attributes
- will be directly included in the `BetaTaskRunJsonOutput` or corresponding output
- type. However, if beta features were specified during task run creation but not
- during result retrieval, this field will contain the dump of fields from those
- beta features. Each key represents the beta feature version (one amongst
- parallel-beta headers) and the values correspond to the beta feature attributes,
- if any. For now, only MCP server beta features have attributes. For example,
- `{mcp-server-2025-07-17: [{'server_name':'mcp_server', 'tool_call_id': 'tc_123', ...}]}}`
+ """Deprecated.
+
+ mcp-server-2025-07-17 is now included directly in the output (e.g.
+ mcp_tool_calls).
"""
+ mcp_tool_calls: Optional[List[McpToolCall]] = None
+ """MCP tool calls made by the task."""
+
output_schema: Optional[Dict[str, object]] = None
"""Output schema for the Task Run.
diff --git a/src/parallel/types/task_run_result.py b/src/parallel/types/task_run_result.py
index fb9d39e..75c3692 100644
--- a/src/parallel/types/task_run_result.py
+++ b/src/parallel/types/task_run_result.py
@@ -5,41 +5,11 @@
from .._utils import PropertyInfo
from .._models import BaseModel
-from .citation import Citation
from .task_run import TaskRun
-from .field_basis import FieldBasis
from .task_run_json_output import TaskRunJsonOutput
from .task_run_text_output import TaskRunTextOutput
-__all__ = [
- "TaskRunResult",
- "Output",
- "OutputTaskRunJsonOutput",
- "OutputTaskRunJsonOutputBasis",
- "OutputTaskRunJsonOutputBasisCitation",
- "OutputTaskRunTextOutput",
- "OutputTaskRunTextOutputBasis",
- "OutputTaskRunTextOutputBasisCitation",
-]
-
-OutputTaskRunJsonOutput = TaskRunJsonOutput # for backwards compatibility with v0.1.3
-"""This is deprecated, `TaskRunJsonOutput` should be used instead"""
-
-OutputTaskRunJsonOutputBasis = FieldBasis # for backwards compatibility with v0.1.3
-"""This is deprecated, `FieldBasis` should be used instead"""
-
-OutputTaskRunJsonOutputBasisCitation = Citation # for backwards compatibility with v0.1.3
-"""This is deprecated, `Citation` should be used instead"""
-
-OutputTaskRunTextOutput = TaskRunTextOutput # for backwards compatibility with v0.1.3
-"""This is deprecated, `TaskRunTextOutput` should be used instead"""
-
-OutputTaskRunTextOutputBasis = FieldBasis # for backwards compatibility with v0.1.3
-"""This is deprecated, `FieldBasis` should be used instead"""
-
-OutputTaskRunTextOutputBasisCitation = Citation # for backwards compatibility with v0.1.3
-"""This is deprecated, `Citation` should be used instead"""
-
+__all__ = ["TaskRunResult", "Output"]
Output: TypeAlias = Annotated[Union[TaskRunTextOutput, TaskRunJsonOutput], PropertyInfo(discriminator="type")]
diff --git a/src/parallel/types/task_run_result_params.py b/src/parallel/types/task_run_result_params.py
index 676bbda..45aaafb 100644
--- a/src/parallel/types/task_run_result_params.py
+++ b/src/parallel/types/task_run_result_params.py
@@ -2,12 +2,17 @@
from __future__ import annotations
+from typing import List
from typing_extensions import Annotated, TypedDict
from .._utils import PropertyInfo
+from .beta.parallel_beta_param import ParallelBetaParam
__all__ = ["TaskRunResultParams"]
class TaskRunResultParams(TypedDict, total=False):
api_timeout: Annotated[int, PropertyInfo(alias="timeout")]
+
+ betas: Annotated[List[ParallelBetaParam], PropertyInfo(alias="parallel-beta")]
+ """Optional header to specify the beta version(s) to enable."""
diff --git a/src/parallel/types/task_run_text_output.py b/src/parallel/types/task_run_text_output.py
index 5d7e4c6..46c23ff 100644
--- a/src/parallel/types/task_run_text_output.py
+++ b/src/parallel/types/task_run_text_output.py
@@ -5,6 +5,7 @@
from .._models import BaseModel
from .field_basis import FieldBasis
+from .mcp_tool_call import McpToolCall
__all__ = ["TaskRunTextOutput"]
@@ -25,15 +26,11 @@ class TaskRunTextOutput(BaseModel):
"""
beta_fields: Optional[Dict[str, object]] = None
- """Additional fields from beta features used in this task run.
-
- When beta features are specified during both task run creation and result
- retrieval, this field will be empty and instead the relevant beta attributes
- will be directly included in the `BetaTaskRunJsonOutput` or corresponding output
- type. However, if beta features were specified during task run creation but not
- during result retrieval, this field will contain the dump of fields from those
- beta features. Each key represents the beta feature version (one amongst
- parallel-beta headers) and the values correspond to the beta feature attributes,
- if any. For now, only MCP server beta features have attributes. For example,
- `{mcp-server-2025-07-17: [{'server_name':'mcp_server', 'tool_call_id': 'tc_123', ...}]}}`
+ """Deprecated.
+
+ mcp-server-2025-07-17 is now included directly in the output (e.g.
+ mcp_tool_calls).
"""
+
+ mcp_tool_calls: Optional[List[McpToolCall]] = None
+ """MCP tool calls made by the task."""
diff --git a/src/parallel/types/webhook.py b/src/parallel/types/webhook.py
new file mode 100644
index 0000000..67964b3
--- /dev/null
+++ b/src/parallel/types/webhook.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["Webhook"]
+
+
+class Webhook(BaseModel):
+ """Webhooks for Task Runs."""
+
+ url: str
+ """URL for the webhook."""
+
+ event_types: Optional[List[Literal["task_run.status"]]] = None
+ """Event types to send the webhook notifications for."""
diff --git a/src/parallel/types/webhook_param.py b/src/parallel/types/webhook_param.py
new file mode 100644
index 0000000..90a667d
--- /dev/null
+++ b/src/parallel/types/webhook_param.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["WebhookParam"]
+
+
+class WebhookParam(TypedDict, total=False):
+ """Webhooks for Task Runs."""
+
+ url: Required[str]
+ """URL for the webhook."""
+
+ event_types: List[Literal["task_run.status"]]
+ """Event types to send the webhook notifications for."""
diff --git a/tests/api_resources/beta/test_task_group.py b/tests/api_resources/beta/test_task_group.py
index a9c4e42..9f30f48 100644
--- a/tests/api_resources/beta/test_task_group.py
+++ b/tests/api_resources/beta/test_task_group.py
@@ -112,6 +112,7 @@ def test_method_add_runs_with_all_params(self, client: Parallel) -> None:
{
"input": "What was the GDP of France in 2023?",
"processor": "base",
+ "advanced_settings": {"location": "us"},
"enable_events": True,
"mcp_servers": [
{
@@ -404,6 +405,7 @@ async def test_method_add_runs_with_all_params(self, async_client: AsyncParallel
{
"input": "What was the GDP of France in 2023?",
"processor": "base",
+ "advanced_settings": {"location": "us"},
"enable_events": True,
"mcp_servers": [
{
diff --git a/tests/api_resources/beta/test_task_run.py b/tests/api_resources/beta/test_task_run.py
index c28344a..88474c5 100644
--- a/tests/api_resources/beta/test_task_run.py
+++ b/tests/api_resources/beta/test_task_run.py
@@ -9,9 +9,10 @@
from parallel import Parallel, AsyncParallel
from tests.utils import assert_matches_type
-from parallel.types import TaskRun
+from parallel.types import TaskRun, TaskRunResult
from parallel._utils import parse_date
-from parallel.types.beta import BetaTaskRunResult
+
+# pyright: reportDeprecated=false
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -21,60 +22,66 @@ class TestTaskRun:
@parametrize
def test_method_create(self, client: Parallel) -> None:
- task_run = client.beta.task_run.create(
- input="What was the GDP of France in 2023?",
- processor="base",
- )
+ with pytest.warns(DeprecationWarning):
+ task_run = client.beta.task_run.create(
+ input="What was the GDP of France in 2023?",
+ processor="base",
+ )
+
assert_matches_type(TaskRun, task_run, path=["response"])
@parametrize
def test_method_create_with_all_params(self, client: Parallel) -> None:
- task_run = client.beta.task_run.create(
- input="What was the GDP of France in 2023?",
- processor="base",
- enable_events=True,
- mcp_servers=[
- {
- "name": "name",
- "url": "url",
- "allowed_tools": ["string"],
- "headers": {"foo": "string"},
- "type": "url",
- }
- ],
- metadata={"foo": "string"},
- previous_interaction_id="previous_interaction_id",
- source_policy={
- "after_date": parse_date("2024-01-01"),
- "exclude_domains": ["reddit.com", "x.com", ".ai"],
- "include_domains": ["wikipedia.org", "usa.gov", ".edu"],
- },
- task_spec={
- "output_schema": {
- "json_schema": {
- "additionalProperties": "bar",
- "properties": "bar",
- "required": "bar",
- "type": "bar",
+ with pytest.warns(DeprecationWarning):
+ task_run = client.beta.task_run.create(
+ input="What was the GDP of France in 2023?",
+ processor="base",
+ advanced_settings={"location": "us"},
+ enable_events=True,
+ mcp_servers=[
+ {
+ "name": "name",
+ "url": "url",
+ "allowed_tools": ["string"],
+ "headers": {"foo": "string"},
+ "type": "url",
+ }
+ ],
+ metadata={"foo": "string"},
+ previous_interaction_id="previous_interaction_id",
+ source_policy={
+ "after_date": parse_date("2024-01-01"),
+ "exclude_domains": ["reddit.com", "x.com", ".ai"],
+ "include_domains": ["wikipedia.org", "usa.gov", ".edu"],
+ },
+ task_spec={
+ "output_schema": {
+ "json_schema": {
+ "additionalProperties": "bar",
+ "properties": "bar",
+ "required": "bar",
+ "type": "bar",
+ },
+ "type": "json",
},
- "type": "json",
+ "input_schema": "string",
},
- "input_schema": "string",
- },
- webhook={
- "url": "url",
- "event_types": ["task_run.status"],
- },
- betas=["mcp-server-2025-07-17"],
- )
+ webhook={
+ "url": "url",
+ "event_types": ["task_run.status"],
+ },
+ betas=["mcp-server-2025-07-17"],
+ )
+
assert_matches_type(TaskRun, task_run, path=["response"])
@parametrize
def test_raw_response_create(self, client: Parallel) -> None:
- response = client.beta.task_run.with_raw_response.create(
- input="What was the GDP of France in 2023?",
- processor="base",
- )
+ with pytest.warns(DeprecationWarning):
+ response = client.beta.task_run.with_raw_response.create(
+ input="What was the GDP of France in 2023?",
+ processor="base",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -83,30 +90,34 @@ def test_raw_response_create(self, client: Parallel) -> None:
@parametrize
def test_streaming_response_create(self, client: Parallel) -> None:
- with client.beta.task_run.with_streaming_response.create(
- input="What was the GDP of France in 2023?",
- processor="base",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ with client.beta.task_run.with_streaming_response.create(
+ input="What was the GDP of France in 2023?",
+ processor="base",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- task_run = response.parse()
- assert_matches_type(TaskRun, task_run, path=["response"])
+ task_run = response.parse()
+ assert_matches_type(TaskRun, task_run, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_events(self, client: Parallel) -> None:
- task_run_stream = client.beta.task_run.events(
- "run_id",
- )
+ with pytest.warns(DeprecationWarning):
+ task_run_stream = client.beta.task_run.events(
+ "run_id",
+ )
+
task_run_stream.response.close()
@parametrize
def test_raw_response_events(self, client: Parallel) -> None:
- response = client.beta.task_run.with_raw_response.events(
- "run_id",
- )
+ with pytest.warns(DeprecationWarning):
+ response = client.beta.task_run.with_raw_response.events(
+ "run_id",
+ )
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
stream = response.parse()
@@ -114,70 +125,79 @@ def test_raw_response_events(self, client: Parallel) -> None:
@parametrize
def test_streaming_response_events(self, client: Parallel) -> None:
- with client.beta.task_run.with_streaming_response.events(
- "run_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ with client.beta.task_run.with_streaming_response.events(
+ "run_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- stream = response.parse()
- stream.close()
+ stream = response.parse()
+ stream.close()
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_events(self, client: Parallel) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- client.beta.task_run.with_raw_response.events(
- "",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ client.beta.task_run.with_raw_response.events(
+ "",
+ )
@parametrize
def test_method_result(self, client: Parallel) -> None:
- task_run = client.beta.task_run.result(
- run_id="run_id",
- )
- assert_matches_type(BetaTaskRunResult, task_run, path=["response"])
+ with pytest.warns(DeprecationWarning):
+ task_run = client.beta.task_run.result(
+ run_id="run_id",
+ )
+
+ assert_matches_type(TaskRunResult, task_run, path=["response"])
@parametrize
def test_method_result_with_all_params(self, client: Parallel) -> None:
- task_run = client.beta.task_run.result(
- run_id="run_id",
- api_timeout=0,
- betas=["mcp-server-2025-07-17"],
- )
- assert_matches_type(BetaTaskRunResult, task_run, path=["response"])
+ with pytest.warns(DeprecationWarning):
+ task_run = client.beta.task_run.result(
+ run_id="run_id",
+ api_timeout=0,
+ betas=["mcp-server-2025-07-17"],
+ )
+
+ assert_matches_type(TaskRunResult, task_run, path=["response"])
@parametrize
def test_raw_response_result(self, client: Parallel) -> None:
- response = client.beta.task_run.with_raw_response.result(
- run_id="run_id",
- )
+ with pytest.warns(DeprecationWarning):
+ response = client.beta.task_run.with_raw_response.result(
+ run_id="run_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
task_run = response.parse()
- assert_matches_type(BetaTaskRunResult, task_run, path=["response"])
+ assert_matches_type(TaskRunResult, task_run, path=["response"])
@parametrize
def test_streaming_response_result(self, client: Parallel) -> None:
- with client.beta.task_run.with_streaming_response.result(
- run_id="run_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ with client.beta.task_run.with_streaming_response.result(
+ run_id="run_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- task_run = response.parse()
- assert_matches_type(BetaTaskRunResult, task_run, path=["response"])
+ task_run = response.parse()
+ assert_matches_type(TaskRunResult, task_run, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_result(self, client: Parallel) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- client.beta.task_run.with_raw_response.result(
- run_id="",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ client.beta.task_run.with_raw_response.result(
+ run_id="",
+ )
class TestAsyncTaskRun:
@@ -187,60 +207,66 @@ class TestAsyncTaskRun:
@parametrize
async def test_method_create(self, async_client: AsyncParallel) -> None:
- task_run = await async_client.beta.task_run.create(
- input="What was the GDP of France in 2023?",
- processor="base",
- )
+ with pytest.warns(DeprecationWarning):
+ task_run = await async_client.beta.task_run.create(
+ input="What was the GDP of France in 2023?",
+ processor="base",
+ )
+
assert_matches_type(TaskRun, task_run, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncParallel) -> None:
- task_run = await async_client.beta.task_run.create(
- input="What was the GDP of France in 2023?",
- processor="base",
- enable_events=True,
- mcp_servers=[
- {
- "name": "name",
- "url": "url",
- "allowed_tools": ["string"],
- "headers": {"foo": "string"},
- "type": "url",
- }
- ],
- metadata={"foo": "string"},
- previous_interaction_id="previous_interaction_id",
- source_policy={
- "after_date": parse_date("2024-01-01"),
- "exclude_domains": ["reddit.com", "x.com", ".ai"],
- "include_domains": ["wikipedia.org", "usa.gov", ".edu"],
- },
- task_spec={
- "output_schema": {
- "json_schema": {
- "additionalProperties": "bar",
- "properties": "bar",
- "required": "bar",
- "type": "bar",
+ with pytest.warns(DeprecationWarning):
+ task_run = await async_client.beta.task_run.create(
+ input="What was the GDP of France in 2023?",
+ processor="base",
+ advanced_settings={"location": "us"},
+ enable_events=True,
+ mcp_servers=[
+ {
+ "name": "name",
+ "url": "url",
+ "allowed_tools": ["string"],
+ "headers": {"foo": "string"},
+ "type": "url",
+ }
+ ],
+ metadata={"foo": "string"},
+ previous_interaction_id="previous_interaction_id",
+ source_policy={
+ "after_date": parse_date("2024-01-01"),
+ "exclude_domains": ["reddit.com", "x.com", ".ai"],
+ "include_domains": ["wikipedia.org", "usa.gov", ".edu"],
+ },
+ task_spec={
+ "output_schema": {
+ "json_schema": {
+ "additionalProperties": "bar",
+ "properties": "bar",
+ "required": "bar",
+ "type": "bar",
+ },
+ "type": "json",
},
- "type": "json",
+ "input_schema": "string",
+ },
+ webhook={
+ "url": "url",
+ "event_types": ["task_run.status"],
},
- "input_schema": "string",
- },
- webhook={
- "url": "url",
- "event_types": ["task_run.status"],
- },
- betas=["mcp-server-2025-07-17"],
- )
+ betas=["mcp-server-2025-07-17"],
+ )
+
assert_matches_type(TaskRun, task_run, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncParallel) -> None:
- response = await async_client.beta.task_run.with_raw_response.create(
- input="What was the GDP of France in 2023?",
- processor="base",
- )
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.beta.task_run.with_raw_response.create(
+ input="What was the GDP of France in 2023?",
+ processor="base",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -249,30 +275,34 @@ async def test_raw_response_create(self, async_client: AsyncParallel) -> None:
@parametrize
async def test_streaming_response_create(self, async_client: AsyncParallel) -> None:
- async with async_client.beta.task_run.with_streaming_response.create(
- input="What was the GDP of France in 2023?",
- processor="base",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ async with async_client.beta.task_run.with_streaming_response.create(
+ input="What was the GDP of France in 2023?",
+ processor="base",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- task_run = await response.parse()
- assert_matches_type(TaskRun, task_run, path=["response"])
+ task_run = await response.parse()
+ assert_matches_type(TaskRun, task_run, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_events(self, async_client: AsyncParallel) -> None:
- task_run_stream = await async_client.beta.task_run.events(
- "run_id",
- )
+ with pytest.warns(DeprecationWarning):
+ task_run_stream = await async_client.beta.task_run.events(
+ "run_id",
+ )
+
await task_run_stream.response.aclose()
@parametrize
async def test_raw_response_events(self, async_client: AsyncParallel) -> None:
- response = await async_client.beta.task_run.with_raw_response.events(
- "run_id",
- )
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.beta.task_run.with_raw_response.events(
+ "run_id",
+ )
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
stream = await response.parse()
@@ -280,67 +310,76 @@ async def test_raw_response_events(self, async_client: AsyncParallel) -> None:
@parametrize
async def test_streaming_response_events(self, async_client: AsyncParallel) -> None:
- async with async_client.beta.task_run.with_streaming_response.events(
- "run_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ async with async_client.beta.task_run.with_streaming_response.events(
+ "run_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- stream = await response.parse()
- await stream.close()
+ stream = await response.parse()
+ await stream.close()
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_events(self, async_client: AsyncParallel) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- await async_client.beta.task_run.with_raw_response.events(
- "",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ await async_client.beta.task_run.with_raw_response.events(
+ "",
+ )
@parametrize
async def test_method_result(self, async_client: AsyncParallel) -> None:
- task_run = await async_client.beta.task_run.result(
- run_id="run_id",
- )
- assert_matches_type(BetaTaskRunResult, task_run, path=["response"])
+ with pytest.warns(DeprecationWarning):
+ task_run = await async_client.beta.task_run.result(
+ run_id="run_id",
+ )
+
+ assert_matches_type(TaskRunResult, task_run, path=["response"])
@parametrize
async def test_method_result_with_all_params(self, async_client: AsyncParallel) -> None:
- task_run = await async_client.beta.task_run.result(
- run_id="run_id",
- api_timeout=0,
- betas=["mcp-server-2025-07-17"],
- )
- assert_matches_type(BetaTaskRunResult, task_run, path=["response"])
+ with pytest.warns(DeprecationWarning):
+ task_run = await async_client.beta.task_run.result(
+ run_id="run_id",
+ api_timeout=0,
+ betas=["mcp-server-2025-07-17"],
+ )
+
+ assert_matches_type(TaskRunResult, task_run, path=["response"])
@parametrize
async def test_raw_response_result(self, async_client: AsyncParallel) -> None:
- response = await async_client.beta.task_run.with_raw_response.result(
- run_id="run_id",
- )
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.beta.task_run.with_raw_response.result(
+ run_id="run_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
task_run = await response.parse()
- assert_matches_type(BetaTaskRunResult, task_run, path=["response"])
+ assert_matches_type(TaskRunResult, task_run, path=["response"])
@parametrize
async def test_streaming_response_result(self, async_client: AsyncParallel) -> None:
- async with async_client.beta.task_run.with_streaming_response.result(
- run_id="run_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ async with async_client.beta.task_run.with_streaming_response.result(
+ run_id="run_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- task_run = await response.parse()
- assert_matches_type(BetaTaskRunResult, task_run, path=["response"])
+ task_run = await response.parse()
+ assert_matches_type(TaskRunResult, task_run, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_result(self, async_client: AsyncParallel) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- await async_client.beta.task_run.with_raw_response.result(
- run_id="",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ await async_client.beta.task_run.with_raw_response.result(
+ run_id="",
+ )
diff --git a/tests/api_resources/test_beta.py b/tests/api_resources/test_beta.py
index e3198c5..417825f 100644
--- a/tests/api_resources/test_beta.py
+++ b/tests/api_resources/test_beta.py
@@ -38,6 +38,7 @@ def test_method_extract_with_all_params(self, client: Parallel) -> None:
full_content=True,
objective="objective",
search_queries=["string"],
+ session_id="session_id",
betas=["mcp-server-2025-07-17"],
)
assert_matches_type(ExtractResponse, beta, path=["response"])
@@ -90,6 +91,7 @@ def test_method_search_with_all_params(self, client: Parallel) -> None:
objective="objective",
processor="base",
search_queries=["string"],
+ session_id="session_id",
source_policy={
"after_date": parse_date("2024-01-01"),
"exclude_domains": ["reddit.com", "x.com", ".ai"],
@@ -145,6 +147,7 @@ async def test_method_extract_with_all_params(self, async_client: AsyncParallel)
full_content=True,
objective="objective",
search_queries=["string"],
+ session_id="session_id",
betas=["mcp-server-2025-07-17"],
)
assert_matches_type(ExtractResponse, beta, path=["response"])
@@ -197,6 +200,7 @@ async def test_method_search_with_all_params(self, async_client: AsyncParallel)
objective="objective",
processor="base",
search_queries=["string"],
+ session_id="session_id",
source_policy={
"after_date": parse_date("2024-01-01"),
"exclude_domains": ["reddit.com", "x.com", ".ai"],
diff --git a/tests/api_resources/test_client.py b/tests/api_resources/test_client.py
index 00cdd6a..e2baaba 100644
--- a/tests/api_resources/test_client.py
+++ b/tests/api_resources/test_client.py
@@ -45,6 +45,7 @@ def test_method_extract_with_all_params(self, client: Parallel) -> None:
max_chars_total=0,
objective="objective",
search_queries=["string"],
+ session_id="session_id",
)
assert_matches_type(ExtractResponse, client_, path=["response"])
@@ -102,6 +103,7 @@ def test_method_search_with_all_params(self, client: Parallel) -> None:
max_chars_total=0,
mode="basic",
objective="objective",
+ session_id="session_id",
)
assert_matches_type(SearchResult, client_, path=["response"])
@@ -159,6 +161,7 @@ async def test_method_extract_with_all_params(self, async_client: AsyncParallel)
max_chars_total=0,
objective="objective",
search_queries=["string"],
+ session_id="session_id",
)
assert_matches_type(ExtractResponse, client, path=["response"])
@@ -216,6 +219,7 @@ async def test_method_search_with_all_params(self, async_client: AsyncParallel)
max_chars_total=0,
mode="basic",
objective="objective",
+ session_id="session_id",
)
assert_matches_type(SearchResult, client, path=["response"])
diff --git a/tests/api_resources/test_task_run.py b/tests/api_resources/test_task_run.py
index 68e7db1..4bb9bd4 100644
--- a/tests/api_resources/test_task_run.py
+++ b/tests/api_resources/test_task_run.py
@@ -9,7 +9,10 @@
from parallel import Parallel, AsyncParallel
from tests.utils import assert_matches_type
-from parallel.types import TaskRun, TaskRunResult
+from parallel.types import (
+ TaskRun,
+ TaskRunResult,
+)
from parallel._utils import parse_date
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -31,6 +34,17 @@ def test_method_create_with_all_params(self, client: Parallel) -> None:
task_run = client.task_run.create(
input="What was the GDP of France in 2023?",
processor="base",
+ advanced_settings={"location": "us"},
+ enable_events=True,
+ mcp_servers=[
+ {
+ "name": "name",
+ "url": "url",
+ "allowed_tools": ["string"],
+ "headers": {"foo": "string"},
+ "type": "url",
+ }
+ ],
metadata={"foo": "string"},
previous_interaction_id="previous_interaction_id",
source_policy={
@@ -50,6 +64,11 @@ def test_method_create_with_all_params(self, client: Parallel) -> None:
},
"input_schema": "string",
},
+ webhook={
+ "url": "url",
+ "event_types": ["task_run.status"],
+ },
+ betas=["mcp-server-2025-07-17"],
)
assert_matches_type(TaskRun, task_run, path=["response"])
@@ -117,6 +136,43 @@ def test_path_params_retrieve(self, client: Parallel) -> None:
"",
)
+ @parametrize
+ def test_method_events(self, client: Parallel) -> None:
+ task_run_stream = client.task_run.events(
+ "run_id",
+ )
+ task_run_stream.response.close()
+
+ @parametrize
+ def test_raw_response_events(self, client: Parallel) -> None:
+ response = client.task_run.with_raw_response.events(
+ "run_id",
+ )
+
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ stream = response.parse()
+ stream.close()
+
+ @parametrize
+ def test_streaming_response_events(self, client: Parallel) -> None:
+ with client.task_run.with_streaming_response.events(
+ "run_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ stream = response.parse()
+ stream.close()
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_events(self, client: Parallel) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ client.task_run.with_raw_response.events(
+ "",
+ )
+
@parametrize
def test_method_result(self, client: Parallel) -> None:
task_run = client.task_run.result(
@@ -129,6 +185,7 @@ def test_method_result_with_all_params(self, client: Parallel) -> None:
task_run = client.task_run.result(
run_id="run_id",
api_timeout=0,
+ betas=["mcp-server-2025-07-17"],
)
assert_matches_type(TaskRunResult, task_run, path=["response"])
@@ -182,6 +239,17 @@ async def test_method_create_with_all_params(self, async_client: AsyncParallel)
task_run = await async_client.task_run.create(
input="What was the GDP of France in 2023?",
processor="base",
+ advanced_settings={"location": "us"},
+ enable_events=True,
+ mcp_servers=[
+ {
+ "name": "name",
+ "url": "url",
+ "allowed_tools": ["string"],
+ "headers": {"foo": "string"},
+ "type": "url",
+ }
+ ],
metadata={"foo": "string"},
previous_interaction_id="previous_interaction_id",
source_policy={
@@ -201,6 +269,11 @@ async def test_method_create_with_all_params(self, async_client: AsyncParallel)
},
"input_schema": "string",
},
+ webhook={
+ "url": "url",
+ "event_types": ["task_run.status"],
+ },
+ betas=["mcp-server-2025-07-17"],
)
assert_matches_type(TaskRun, task_run, path=["response"])
@@ -268,6 +341,43 @@ async def test_path_params_retrieve(self, async_client: AsyncParallel) -> None:
"",
)
+ @parametrize
+ async def test_method_events(self, async_client: AsyncParallel) -> None:
+ task_run_stream = await async_client.task_run.events(
+ "run_id",
+ )
+ await task_run_stream.response.aclose()
+
+ @parametrize
+ async def test_raw_response_events(self, async_client: AsyncParallel) -> None:
+ response = await async_client.task_run.with_raw_response.events(
+ "run_id",
+ )
+
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ stream = await response.parse()
+ await stream.close()
+
+ @parametrize
+ async def test_streaming_response_events(self, async_client: AsyncParallel) -> None:
+ async with async_client.task_run.with_streaming_response.events(
+ "run_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ stream = await response.parse()
+ await stream.close()
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_events(self, async_client: AsyncParallel) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ await async_client.task_run.with_raw_response.events(
+ "",
+ )
+
@parametrize
async def test_method_result(self, async_client: AsyncParallel) -> None:
task_run = await async_client.task_run.result(
@@ -280,6 +390,7 @@ async def test_method_result_with_all_params(self, async_client: AsyncParallel)
task_run = await async_client.task_run.result(
run_id="run_id",
api_timeout=0,
+ betas=["mcp-server-2025-07-17"],
)
assert_matches_type(TaskRunResult, task_run, path=["response"])
From 0866b12352cd6d8b41ec21465fd2f0ab4bf8cce9 Mon Sep 17 00:00:00 2001
From: Edward He
Date: Mon, 20 Apr 2026 20:23:08 -0700
Subject: [PATCH 26/32] lint
---
src/parallel/resources/task_run.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/parallel/resources/task_run.py b/src/parallel/resources/task_run.py
index 58cdb56..f98dafb 100644
--- a/src/parallel/resources/task_run.py
+++ b/src/parallel/resources/task_run.py
@@ -26,6 +26,7 @@
from ..types.webhook_param import WebhookParam
from ..types.task_run_result import TaskRunResult
from ..types.task_spec_param import OutputT, OutputSchema, TaskSpecParam
+from ..types.mcp_server_param import McpServerParam
from ..lib._parsing._task_spec import build_task_spec_param
from ..types.parsed_task_run_result import ParsedTaskRunResult
from ..lib._parsing._task_run_result import (
@@ -33,7 +34,6 @@
wait_for_result_async as _wait_for_result_async,
task_run_result_parser,
)
-from ..types.mcp_server_param import McpServerParam
from ..types.beta.parallel_beta_param import ParallelBetaParam
from ..types.task_run_events_response import TaskRunEventsResponse
from ..types.shared_params.source_policy import SourcePolicy
From 5907e00741e65ad2585e40f94fb36100557d6d35 Mon Sep 17 00:00:00 2001
From: Edward He
Date: Mon, 20 Apr 2026 20:42:34 -0700
Subject: [PATCH 27/32] add back findall aliasing
---
src/parallel/types/beta/__init__.py | 26 +++++++++----------
.../findall_candidate_match_status_event.py | 6 ++++-
.../types/beta/findall_create_params.py | 6 ++++-
.../types/beta/findall_enrich_input.py | 6 ++++-
.../types/beta/findall_enrich_params.py | 6 ++++-
.../types/beta/findall_events_params.py | 6 ++++-
.../types/beta/findall_events_response.py | 5 +++-
.../types/beta/findall_extend_params.py | 6 ++++-
.../types/beta/findall_ingest_params.py | 6 ++++-
src/parallel/types/beta/findall_run.py | 6 ++++-
src/parallel/types/beta/findall_run_result.py | 6 ++++-
.../types/beta/findall_run_status_event.py | 6 ++++-
src/parallel/types/beta/findall_schema.py | 6 ++++-
.../beta/findall_schema_updated_event.py | 6 ++++-
14 files changed, 77 insertions(+), 26 deletions(-)
diff --git a/src/parallel/types/beta/__init__.py b/src/parallel/types/beta/__init__.py
index dcd2b4c..4bd6c57 100644
--- a/src/parallel/types/beta/__init__.py
+++ b/src/parallel/types/beta/__init__.py
@@ -7,14 +7,14 @@
from .task_group import TaskGroup as TaskGroup
from .usage_item import UsageItem as UsageItem
from .error_event import ErrorEvent as ErrorEvent
-from .findall_run import FindAllRun as FindAllRun
+from .findall_run import FindAllRun as FindAllRun, FindallRun as FindallRun
from .extract_error import ExtractError as ExtractError
from .mcp_tool_call import McpToolCall as McpToolCall
from .search_result import SearchResult as SearchResult
from .webhook_param import WebhookParam as WebhookParam
from .beta_run_input import BetaRunInput as BetaRunInput
from .extract_result import ExtractResult as ExtractResult
-from .findall_schema import FindAllSchema as FindAllSchema
+from .findall_schema import FindAllSchema as FindAllSchema, FindallSchema as FindallSchema
from .task_run_event import TaskRunEvent as TaskRunEvent
from .extract_response import ExtractResponse as ExtractResponse
from .mcp_server_param import McpServerParam as McpServerParam
@@ -22,29 +22,29 @@
from .web_search_result import WebSearchResult as WebSearchResult
from .beta_search_params import BetaSearchParams as BetaSearchParams
from .fetch_policy_param import FetchPolicyParam as FetchPolicyParam
-from .findall_run_result import FindAllRunResult as FindAllRunResult
+from .findall_run_result import FindAllRunResult as FindAllRunResult, FindallRunResult as FindallRunResult
from .beta_extract_params import BetaExtractParams as BetaExtractParams
from .parallel_beta_param import ParallelBetaParam as ParallelBetaParam
from .beta_run_input_param import BetaRunInputParam as BetaRunInputParam
from .beta_task_run_result import BetaTaskRunResult as BetaTaskRunResult
-from .findall_enrich_input import FindAllEnrichInput as FindAllEnrichInput
-from .findall_create_params import FindAllCreateParams as FindAllCreateParams
-from .findall_enrich_params import FindAllEnrichParams as FindAllEnrichParams
-from .findall_events_params import FindAllEventsParams as FindAllEventsParams
-from .findall_extend_params import FindAllExtendParams as FindAllExtendParams
-from .findall_ingest_params import FindAllIngestParams as FindAllIngestParams
+from .findall_enrich_input import FindAllEnrichInput as FindAllEnrichInput, FindallEnrichInput as FindallEnrichInput
+from .findall_create_params import FindAllCreateParams as FindAllCreateParams, FindallCreateParams as FindallCreateParams
+from .findall_enrich_params import FindAllEnrichParams as FindAllEnrichParams, FindallEnrichParams as FindallEnrichParams
+from .findall_events_params import FindAllEventsParams as FindAllEventsParams, FindallEventsParams as FindallEventsParams
+from .findall_extend_params import FindAllExtendParams as FindAllExtendParams, FindallExtendParams as FindallExtendParams
+from .findall_ingest_params import FindAllIngestParams as FindAllIngestParams, FindallIngestParams as FindallIngestParams
from .excerpt_settings_param import ExcerptSettingsParam as ExcerptSettingsParam
from .task_run_create_params import TaskRunCreateParams as TaskRunCreateParams
from .task_run_result_params import TaskRunResultParams as TaskRunResultParams
-from .findall_events_response import FindAllEventsResponse as FindAllEventsResponse
+from .findall_events_response import FindAllEventsResponse as FindAllEventsResponse, FindallEventsResponse as FindallEventsResponse
from .task_group_run_response import TaskGroupRunResponse as TaskGroupRunResponse
-from .findall_run_status_event import FindAllRunStatusEvent as FindAllRunStatusEvent
+from .findall_run_status_event import FindAllRunStatusEvent as FindAllRunStatusEvent, FindallRunStatusEvent as FindallRunStatusEvent
from .task_group_create_params import TaskGroupCreateParams as TaskGroupCreateParams
from .task_group_events_params import TaskGroupEventsParams as TaskGroupEventsParams
from .task_run_events_response import TaskRunEventsResponse as TaskRunEventsResponse
from .task_group_add_runs_params import TaskGroupAddRunsParams as TaskGroupAddRunsParams
from .task_group_events_response import TaskGroupEventsResponse as TaskGroupEventsResponse
from .task_group_get_runs_params import TaskGroupGetRunsParams as TaskGroupGetRunsParams
-from .findall_schema_updated_event import FindAllSchemaUpdatedEvent as FindAllSchemaUpdatedEvent
+from .findall_schema_updated_event import FindAllSchemaUpdatedEvent as FindAllSchemaUpdatedEvent, FindallSchemaUpdatedEvent as FindallSchemaUpdatedEvent
from .task_group_get_runs_response import TaskGroupGetRunsResponse as TaskGroupGetRunsResponse
-from .findall_candidate_match_status_event import FindAllCandidateMatchStatusEvent as FindAllCandidateMatchStatusEvent
+from .findall_candidate_match_status_event import FindAllCandidateMatchStatusEvent as FindAllCandidateMatchStatusEvent, FindallCandidateMatchStatusEvent as FindallCandidateMatchStatusEvent
diff --git a/src/parallel/types/beta/findall_candidate_match_status_event.py b/src/parallel/types/beta/findall_candidate_match_status_event.py
index 5f7e3bb..01ebf20 100644
--- a/src/parallel/types/beta/findall_candidate_match_status_event.py
+++ b/src/parallel/types/beta/findall_candidate_match_status_event.py
@@ -7,7 +7,7 @@
from ..._models import BaseModel
from ..field_basis import FieldBasis
-__all__ = ["FindAllCandidateMatchStatusEvent", "Data"]
+__all__ = ["FindAllCandidateMatchStatusEvent", "FindallCandidateMatchStatusEvent", "Data"]
class Data(BaseModel):
@@ -66,3 +66,7 @@ class FindAllCandidateMatchStatusEvent(BaseModel):
findall.candidate.unmatched, findall.candidate.discarded,
findall.candidate.enriched.
"""
+
+
+FindallCandidateMatchStatusEvent = FindAllCandidateMatchStatusEvent # for backwards compatibility with v0.3.4
+"""This is deprecated, `FindAllCandidateMatchStatusEvent` should be used instead"""
diff --git a/src/parallel/types/beta/findall_create_params.py b/src/parallel/types/beta/findall_create_params.py
index 5fe2690..89a8715 100644
--- a/src/parallel/types/beta/findall_create_params.py
+++ b/src/parallel/types/beta/findall_create_params.py
@@ -9,7 +9,7 @@
from ..webhook_param import WebhookParam
from .parallel_beta_param import ParallelBetaParam
-__all__ = ["FindAllCreateParams", "MatchCondition", "ExcludeList"]
+__all__ = ["FindAllCreateParams", "FindallCreateParams", "MatchCondition", "ExcludeList"]
class FindAllCreateParams(TypedDict, total=False):
@@ -66,3 +66,7 @@ class ExcludeList(TypedDict, total=False):
url: Required[str]
"""URL of the entity to exclude from results."""
+
+
+FindallCreateParams = FindAllCreateParams # for backwards compatibility with v0.3.4
+"""This is deprecated, `FindAllCreateParams` should be used instead"""
diff --git a/src/parallel/types/beta/findall_enrich_input.py b/src/parallel/types/beta/findall_enrich_input.py
index f015709..8484621 100644
--- a/src/parallel/types/beta/findall_enrich_input.py
+++ b/src/parallel/types/beta/findall_enrich_input.py
@@ -6,7 +6,7 @@
from ..mcp_server import McpServer
from ..json_schema import JsonSchema
-__all__ = ["FindAllEnrichInput"]
+__all__ = ["FindAllEnrichInput", "FindallEnrichInput"]
class FindAllEnrichInput(BaseModel):
@@ -20,3 +20,7 @@ class FindAllEnrichInput(BaseModel):
processor: Optional[str] = None
"""Processor to use for the task."""
+
+
+FindallEnrichInput = FindAllEnrichInput # for backwards compatibility with v0.3.4
+"""This is deprecated, `FindAllEnrichInput` should be used instead"""
diff --git a/src/parallel/types/beta/findall_enrich_params.py b/src/parallel/types/beta/findall_enrich_params.py
index 31ce57e..a9be6a6 100644
--- a/src/parallel/types/beta/findall_enrich_params.py
+++ b/src/parallel/types/beta/findall_enrich_params.py
@@ -10,7 +10,7 @@
from ..json_schema_param import JsonSchemaParam
from .parallel_beta_param import ParallelBetaParam
-__all__ = ["FindAllEnrichParams"]
+__all__ = ["FindAllEnrichParams", "FindallEnrichParams"]
class FindAllEnrichParams(TypedDict, total=False):
@@ -25,3 +25,7 @@ class FindAllEnrichParams(TypedDict, total=False):
betas: Annotated[List[ParallelBetaParam], PropertyInfo(alias="parallel-beta")]
"""Optional header to specify the beta version(s) to enable."""
+
+
+FindallEnrichParams = FindAllEnrichParams # for backwards compatibility with v0.3.4
+"""This is deprecated, `FindAllEnrichParams` should be used instead"""
diff --git a/src/parallel/types/beta/findall_events_params.py b/src/parallel/types/beta/findall_events_params.py
index 1747020..f818b1b 100644
--- a/src/parallel/types/beta/findall_events_params.py
+++ b/src/parallel/types/beta/findall_events_params.py
@@ -8,7 +8,7 @@
from ..._utils import PropertyInfo
from .parallel_beta_param import ParallelBetaParam
-__all__ = ["FindAllEventsParams"]
+__all__ = ["FindAllEventsParams", "FindallEventsParams"]
class FindAllEventsParams(TypedDict, total=False):
@@ -18,3 +18,7 @@ class FindAllEventsParams(TypedDict, total=False):
betas: Annotated[List[ParallelBetaParam], PropertyInfo(alias="parallel-beta")]
"""Optional header to specify the beta version(s) to enable."""
+
+
+FindallEventsParams = FindAllEventsParams # for backwards compatibility with v0.3.4
+"""This is deprecated, `FindAllEventsParams` should be used instead"""
diff --git a/src/parallel/types/beta/findall_events_response.py b/src/parallel/types/beta/findall_events_response.py
index 0334372..995cf03 100644
--- a/src/parallel/types/beta/findall_events_response.py
+++ b/src/parallel/types/beta/findall_events_response.py
@@ -9,9 +9,12 @@
from .findall_schema_updated_event import FindAllSchemaUpdatedEvent
from .findall_candidate_match_status_event import FindAllCandidateMatchStatusEvent
-__all__ = ["FindAllEventsResponse"]
+__all__ = ["FindAllEventsResponse", "FindallEventsResponse"]
FindAllEventsResponse: TypeAlias = Annotated[
Union[FindAllSchemaUpdatedEvent, FindAllRunStatusEvent, FindAllCandidateMatchStatusEvent, ErrorEvent],
PropertyInfo(discriminator="type"),
]
+
+FindallEventsResponse = FindAllEventsResponse # for backwards compatibility with v0.3.4
+"""This is deprecated, `FindAllEventsResponse` should be used instead"""
diff --git a/src/parallel/types/beta/findall_extend_params.py b/src/parallel/types/beta/findall_extend_params.py
index d90226e..41b2d88 100644
--- a/src/parallel/types/beta/findall_extend_params.py
+++ b/src/parallel/types/beta/findall_extend_params.py
@@ -8,7 +8,7 @@
from ..._utils import PropertyInfo
from .parallel_beta_param import ParallelBetaParam
-__all__ = ["FindAllExtendParams"]
+__all__ = ["FindAllExtendParams", "FindallExtendParams"]
class FindAllExtendParams(TypedDict, total=False):
@@ -21,3 +21,7 @@ class FindAllExtendParams(TypedDict, total=False):
betas: Annotated[List[ParallelBetaParam], PropertyInfo(alias="parallel-beta")]
"""Optional header to specify the beta version(s) to enable."""
+
+
+FindallExtendParams = FindAllExtendParams # for backwards compatibility with v0.3.4
+"""This is deprecated, `FindAllExtendParams` should be used instead"""
diff --git a/src/parallel/types/beta/findall_ingest_params.py b/src/parallel/types/beta/findall_ingest_params.py
index fbdb3f3..fec1a52 100644
--- a/src/parallel/types/beta/findall_ingest_params.py
+++ b/src/parallel/types/beta/findall_ingest_params.py
@@ -8,7 +8,7 @@
from ..._utils import PropertyInfo
from .parallel_beta_param import ParallelBetaParam
-__all__ = ["FindAllIngestParams"]
+__all__ = ["FindAllIngestParams", "FindallIngestParams"]
class FindAllIngestParams(TypedDict, total=False):
@@ -17,3 +17,7 @@ class FindAllIngestParams(TypedDict, total=False):
betas: Annotated[List[ParallelBetaParam], PropertyInfo(alias="parallel-beta")]
"""Optional header to specify the beta version(s) to enable."""
+
+
+FindallIngestParams = FindAllIngestParams # for backwards compatibility with v0.3.4
+"""This is deprecated, `FindAllIngestParams` should be used instead"""
diff --git a/src/parallel/types/beta/findall_run.py b/src/parallel/types/beta/findall_run.py
index 4db3135..ad55025 100644
--- a/src/parallel/types/beta/findall_run.py
+++ b/src/parallel/types/beta/findall_run.py
@@ -5,7 +5,7 @@
from ..._models import BaseModel
-__all__ = ["FindAllRun", "Status", "StatusMetrics"]
+__all__ = ["FindAllRun", "FindallRun", "Status", "StatusMetrics"]
class StatusMetrics(BaseModel):
@@ -67,3 +67,7 @@ class FindAllRun(BaseModel):
Timestamp of the latest modification to the FindAll run result, in RFC 3339
format.
"""
+
+
+FindallRun = FindAllRun # for backwards compatibility with v0.3.4
+"""This is deprecated, `FindAllRun` should be used instead"""
diff --git a/src/parallel/types/beta/findall_run_result.py b/src/parallel/types/beta/findall_run_result.py
index 2b413f0..d1851a9 100644
--- a/src/parallel/types/beta/findall_run_result.py
+++ b/src/parallel/types/beta/findall_run_result.py
@@ -7,7 +7,7 @@
from .findall_run import FindAllRun
from ..field_basis import FieldBasis
-__all__ = ["FindAllRunResult", "Candidate"]
+__all__ = ["FindAllRunResult", "FindallRunResult", "Candidate"]
class Candidate(BaseModel):
@@ -65,3 +65,7 @@ class FindAllRunResult(BaseModel):
This can be used to resume streaming from the last event.
"""
+
+
+FindallRunResult = FindAllRunResult # for backwards compatibility with v0.3.4
+"""This is deprecated, `FindAllRunResult` should be used instead"""
diff --git a/src/parallel/types/beta/findall_run_status_event.py b/src/parallel/types/beta/findall_run_status_event.py
index 48371ca..fe3ce34 100644
--- a/src/parallel/types/beta/findall_run_status_event.py
+++ b/src/parallel/types/beta/findall_run_status_event.py
@@ -6,7 +6,7 @@
from ..._models import BaseModel
from .findall_run import FindAllRun
-__all__ = ["FindAllRunStatusEvent"]
+__all__ = ["FindAllRunStatusEvent", "FindallRunStatusEvent"]
class FindAllRunStatusEvent(BaseModel):
@@ -23,3 +23,7 @@ class FindAllRunStatusEvent(BaseModel):
type: Literal["findall.status"]
"""Event type; always 'findall.status'."""
+
+
+FindallRunStatusEvent = FindAllRunStatusEvent # for backwards compatibility with v0.3.4
+"""This is deprecated, `FindAllRunStatusEvent` should be used instead"""
diff --git a/src/parallel/types/beta/findall_schema.py b/src/parallel/types/beta/findall_schema.py
index 7b9f4df..d214db7 100644
--- a/src/parallel/types/beta/findall_schema.py
+++ b/src/parallel/types/beta/findall_schema.py
@@ -6,7 +6,7 @@
from ..._models import BaseModel
from .findall_enrich_input import FindAllEnrichInput
-__all__ = ["FindAllSchema", "MatchCondition"]
+__all__ = ["FindAllSchema", "FindallSchema", "MatchCondition"]
class MatchCondition(BaseModel):
@@ -43,3 +43,7 @@ class FindAllSchema(BaseModel):
match_limit: Optional[int] = None
"""Max number of candidates to evaluate"""
+
+
+FindallSchema = FindAllSchema # for backwards compatibility with v0.3.4
+"""This is deprecated, `FindAllSchema` should be used instead"""
diff --git a/src/parallel/types/beta/findall_schema_updated_event.py b/src/parallel/types/beta/findall_schema_updated_event.py
index 50054ad..7eab28c 100644
--- a/src/parallel/types/beta/findall_schema_updated_event.py
+++ b/src/parallel/types/beta/findall_schema_updated_event.py
@@ -6,7 +6,7 @@
from ..._models import BaseModel
from .findall_schema import FindAllSchema
-__all__ = ["FindAllSchemaUpdatedEvent"]
+__all__ = ["FindAllSchemaUpdatedEvent", "FindallSchemaUpdatedEvent"]
class FindAllSchemaUpdatedEvent(BaseModel):
@@ -23,3 +23,7 @@ class FindAllSchemaUpdatedEvent(BaseModel):
type: Literal["findall.schema.updated"]
"""Event type; always 'findall.schema.updated'."""
+
+
+FindallSchemaUpdatedEvent = FindAllSchemaUpdatedEvent # for backwards compatibility with v0.3.4
+"""This is deprecated, `FindAllSchemaUpdatedEvent` should be used instead"""
From 0b84d48ff9980518d71af88917f99746e74b4a33 Mon Sep 17 00:00:00 2001
From: Edward He
Date: Mon, 20 Apr 2026 20:43:08 -0700
Subject: [PATCH 28/32] lint fix
---
src/parallel/types/beta/__init__.py | 45 +++++++++++++++++++++++------
1 file changed, 36 insertions(+), 9 deletions(-)
diff --git a/src/parallel/types/beta/__init__.py b/src/parallel/types/beta/__init__.py
index 4bd6c57..ef052aa 100644
--- a/src/parallel/types/beta/__init__.py
+++ b/src/parallel/types/beta/__init__.py
@@ -28,23 +28,50 @@
from .beta_run_input_param import BetaRunInputParam as BetaRunInputParam
from .beta_task_run_result import BetaTaskRunResult as BetaTaskRunResult
from .findall_enrich_input import FindAllEnrichInput as FindAllEnrichInput, FindallEnrichInput as FindallEnrichInput
-from .findall_create_params import FindAllCreateParams as FindAllCreateParams, FindallCreateParams as FindallCreateParams
-from .findall_enrich_params import FindAllEnrichParams as FindAllEnrichParams, FindallEnrichParams as FindallEnrichParams
-from .findall_events_params import FindAllEventsParams as FindAllEventsParams, FindallEventsParams as FindallEventsParams
-from .findall_extend_params import FindAllExtendParams as FindAllExtendParams, FindallExtendParams as FindallExtendParams
-from .findall_ingest_params import FindAllIngestParams as FindAllIngestParams, FindallIngestParams as FindallIngestParams
+from .findall_create_params import (
+ FindAllCreateParams as FindAllCreateParams,
+ FindallCreateParams as FindallCreateParams,
+)
+from .findall_enrich_params import (
+ FindAllEnrichParams as FindAllEnrichParams,
+ FindallEnrichParams as FindallEnrichParams,
+)
+from .findall_events_params import (
+ FindAllEventsParams as FindAllEventsParams,
+ FindallEventsParams as FindallEventsParams,
+)
+from .findall_extend_params import (
+ FindAllExtendParams as FindAllExtendParams,
+ FindallExtendParams as FindallExtendParams,
+)
+from .findall_ingest_params import (
+ FindAllIngestParams as FindAllIngestParams,
+ FindallIngestParams as FindallIngestParams,
+)
from .excerpt_settings_param import ExcerptSettingsParam as ExcerptSettingsParam
from .task_run_create_params import TaskRunCreateParams as TaskRunCreateParams
from .task_run_result_params import TaskRunResultParams as TaskRunResultParams
-from .findall_events_response import FindAllEventsResponse as FindAllEventsResponse, FindallEventsResponse as FindallEventsResponse
+from .findall_events_response import (
+ FindAllEventsResponse as FindAllEventsResponse,
+ FindallEventsResponse as FindallEventsResponse,
+)
from .task_group_run_response import TaskGroupRunResponse as TaskGroupRunResponse
-from .findall_run_status_event import FindAllRunStatusEvent as FindAllRunStatusEvent, FindallRunStatusEvent as FindallRunStatusEvent
+from .findall_run_status_event import (
+ FindAllRunStatusEvent as FindAllRunStatusEvent,
+ FindallRunStatusEvent as FindallRunStatusEvent,
+)
from .task_group_create_params import TaskGroupCreateParams as TaskGroupCreateParams
from .task_group_events_params import TaskGroupEventsParams as TaskGroupEventsParams
from .task_run_events_response import TaskRunEventsResponse as TaskRunEventsResponse
from .task_group_add_runs_params import TaskGroupAddRunsParams as TaskGroupAddRunsParams
from .task_group_events_response import TaskGroupEventsResponse as TaskGroupEventsResponse
from .task_group_get_runs_params import TaskGroupGetRunsParams as TaskGroupGetRunsParams
-from .findall_schema_updated_event import FindAllSchemaUpdatedEvent as FindAllSchemaUpdatedEvent, FindallSchemaUpdatedEvent as FindallSchemaUpdatedEvent
+from .findall_schema_updated_event import (
+ FindAllSchemaUpdatedEvent as FindAllSchemaUpdatedEvent,
+ FindallSchemaUpdatedEvent as FindallSchemaUpdatedEvent,
+)
from .task_group_get_runs_response import TaskGroupGetRunsResponse as TaskGroupGetRunsResponse
-from .findall_candidate_match_status_event import FindAllCandidateMatchStatusEvent as FindAllCandidateMatchStatusEvent, FindallCandidateMatchStatusEvent as FindallCandidateMatchStatusEvent
+from .findall_candidate_match_status_event import (
+ FindAllCandidateMatchStatusEvent as FindAllCandidateMatchStatusEvent,
+ FindallCandidateMatchStatusEvent as FindallCandidateMatchStatusEvent,
+)
From ec3a71799dbff6e74d7526698da84b4b199aa887 Mon Sep 17 00:00:00 2001
From: Edward He
Date: Mon, 20 Apr 2026 20:57:28 -0700
Subject: [PATCH 29/32] Task run events
---
src/parallel/types/beta/task_run_event.py | 5 +++-
src/parallel/types/task_run_result.py | 31 ++++++++++++++++++++++-
2 files changed, 34 insertions(+), 2 deletions(-)
diff --git a/src/parallel/types/beta/task_run_event.py b/src/parallel/types/beta/task_run_event.py
index e518907..d2cada5 100644
--- a/src/parallel/types/beta/task_run_event.py
+++ b/src/parallel/types/beta/task_run_event.py
@@ -2,7 +2,10 @@
from .. import task_run_event
-__all__ = ["TaskRunEvent"]
+__all__ = ["TaskRunEvent", "Output"]
TaskRunEvent = task_run_event.TaskRunEvent
"""Use parallel.types.task_run.TaskRunEvent instead"""
+
+Output = task_run_event.Output
+"""This is deprecated, use `parallel.types.task_run_event.Output` instead"""
diff --git a/src/parallel/types/task_run_result.py b/src/parallel/types/task_run_result.py
index 75c3692..27e0f9b 100644
--- a/src/parallel/types/task_run_result.py
+++ b/src/parallel/types/task_run_result.py
@@ -5,11 +5,40 @@
from .._utils import PropertyInfo
from .._models import BaseModel
+from .citation import Citation
from .task_run import TaskRun
+from .field_basis import FieldBasis
from .task_run_json_output import TaskRunJsonOutput
from .task_run_text_output import TaskRunTextOutput
-__all__ = ["TaskRunResult", "Output"]
+__all__ = [
+ "TaskRunResult",
+ "Output",
+ "OutputTaskRunJsonOutput",
+ "OutputTaskRunJsonOutputBasis",
+ "OutputTaskRunJsonOutputBasisCitation",
+ "OutputTaskRunTextOutput",
+ "OutputTaskRunTextOutputBasis",
+ "OutputTaskRunTextOutputBasisCitation",
+]
+
+OutputTaskRunJsonOutput = TaskRunJsonOutput # for backwards compatibility with v0.1.3
+"""This is deprecated, `TaskRunJsonOutput` should be used instead"""
+
+OutputTaskRunJsonOutputBasis = FieldBasis # for backwards compatibility with v0.1.3
+"""This is deprecated, `FieldBasis` should be used instead"""
+
+OutputTaskRunJsonOutputBasisCitation = Citation # for backwards compatibility with v0.1.3
+"""This is deprecated, `Citation` should be used instead"""
+
+OutputTaskRunTextOutput = TaskRunTextOutput # for backwards compatibility with v0.1.3
+"""This is deprecated, `TaskRunTextOutput` should be used instead"""
+
+OutputTaskRunTextOutputBasis = FieldBasis # for backwards compatibility with v0.1.3
+"""This is deprecated, `FieldBasis` should be used instead"""
+
+OutputTaskRunTextOutputBasisCitation = Citation # for backwards compatibility with v0.1.3
+"""This is deprecated, `Citation` should be used instead"""
Output: TypeAlias = Annotated[Union[TaskRunTextOutput, TaskRunJsonOutput], PropertyInfo(discriminator="type")]
From 57c4ae25d77a8627c6be3673312bfd7f373e17da Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 21 Apr 2026 04:25:38 +0000
Subject: [PATCH 30/32] feat(api): Add Findall Candidates
client.beta.findall.candidates
---
.stats.yml | 4 +-
src/parallel/resources/beta/api.md | 3 +
src/parallel/resources/beta/findall.py | 120 ++++++++++++++++++
src/parallel/types/beta/__init__.py | 2 +
.../types/beta/findall_candidates_params.py | 22 ++++
.../types/beta/findall_candidates_response.py | 29 +++++
tests/api_resources/beta/test_findall.py | 87 +++++++++++++
7 files changed, 265 insertions(+), 2 deletions(-)
create mode 100644 src/parallel/types/beta/findall_candidates_params.py
create mode 100644 src/parallel/types/beta/findall_candidates_response.py
diff --git a/.stats.yml b/.stats.yml
index 26be205..b047b54 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 23
+configured_endpoints: 24
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/parallel-web%2Fparallel-sdk-c4cc922783460c6e65811d13c9abe35807a551c77c126d452806f93caaaf48fb.yml
openapi_spec_hash: 99c9e48d4dafaca71f058107008d174b
-config_hash: fe820a5a10ee48e143c9e49a153b23b4
+config_hash: 80e7ee7ad8e3424616aca7189ffd5ae7
diff --git a/src/parallel/resources/beta/api.md b/src/parallel/resources/beta/api.md
index 8652cf3..3ad7b56 100644
--- a/src/parallel/resources/beta/api.md
+++ b/src/parallel/resources/beta/api.md
@@ -73,6 +73,8 @@ Types:
```python
from parallel.types.beta import (
FindAllCandidateMatchStatusEvent,
+ FindAllCandidatesRequest,
+ FindAllCandidatesResponse,
FindAllEnrichInput,
FindAllExtendInput,
FindAllRun,
@@ -91,6 +93,7 @@ Methods:
- client.beta.findall.create(\*\*params) -> FindAllRun
- client.beta.findall.retrieve(findall_id) -> FindAllRun
- client.beta.findall.cancel(findall_id) -> object
+- client.beta.findall.candidates(\*\*params) -> FindAllCandidatesResponse
- client.beta.findall.enrich(findall_id, \*\*params) -> FindAllSchema
- client.beta.findall.events(findall_id, \*\*params) -> FindAllEventsResponse
- client.beta.findall.extend(findall_id, \*\*params) -> FindAllSchema
diff --git a/src/parallel/resources/beta/findall.py b/src/parallel/resources/beta/findall.py
index 878be14..b5d2ab2 100644
--- a/src/parallel/resources/beta/findall.py
+++ b/src/parallel/resources/beta/findall.py
@@ -25,6 +25,7 @@
findall_events_params,
findall_extend_params,
findall_ingest_params,
+ findall_candidates_params,
)
from ..._base_client import make_request_options
from ...types.beta.findall_run import FindAllRun
@@ -35,6 +36,7 @@
from ...types.beta.findall_run_result import FindAllRunResult
from ...types.beta.parallel_beta_param import ParallelBetaParam
from ...types.beta.findall_events_response import FindAllEventsResponse
+from ...types.beta.findall_candidates_response import FindAllCandidatesResponse
__all__ = [
"FindAllResource",
@@ -263,6 +265,59 @@ def cancel(
cast_to=object,
)
+ def candidates(
+ self,
+ *,
+ entity_type: Literal["company", "people"],
+ objective: str,
+ match_limit: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FindAllCandidatesResponse:
+ """
+ Return ranked entity candidates matching a natural language objective.
+
+ This endpoint performs a best-effort search optimised for low latency. For
+ comprehensive match evaluation and enrichment, use the
+ [FindAll API](https://docs.parallel.ai/findall-api/findall-quickstart).
+
+ Args:
+ entity_type: Type of entity to search for.
+
+ objective: Natural language description of target entities.
+
+ match_limit: Maximum number of candidates to return. Must be between 5 and 1000 (inclusive).
+ May return fewer results. Defaults to 100.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
+ return self._post(
+ "/v1beta/findall/candidates",
+ body=maybe_transform(
+ {
+ "entity_type": entity_type,
+ "objective": objective,
+ "match_limit": match_limit,
+ },
+ findall_candidates_params.FindAllCandidatesParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FindAllCandidatesResponse,
+ )
+
def enrich(
self,
findall_id: str,
@@ -808,6 +863,59 @@ async def cancel(
cast_to=object,
)
+ async def candidates(
+ self,
+ *,
+ entity_type: Literal["company", "people"],
+ objective: str,
+ match_limit: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FindAllCandidatesResponse:
+ """
+ Return ranked entity candidates matching a natural language objective.
+
+ This endpoint performs a best-effort search optimised for low latency. For
+ comprehensive match evaluation and enrichment, use the
+ [FindAll API](https://docs.parallel.ai/findall-api/findall-quickstart).
+
+ Args:
+ entity_type: Type of entity to search for.
+
+ objective: Natural language description of target entities.
+
+ match_limit: Maximum number of candidates to return. Must be between 5 and 1000 (inclusive).
+ May return fewer results. Defaults to 100.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
+ return await self._post(
+ "/v1beta/findall/candidates",
+ body=await async_maybe_transform(
+ {
+ "entity_type": entity_type,
+ "objective": objective,
+ "match_limit": match_limit,
+ },
+ findall_candidates_params.FindAllCandidatesParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FindAllCandidatesResponse,
+ )
+
async def enrich(
self,
findall_id: str,
@@ -1155,6 +1263,9 @@ def __init__(self, findall: FindAllResource) -> None:
self.cancel = to_raw_response_wrapper(
findall.cancel,
)
+ self.candidates = to_raw_response_wrapper(
+ findall.candidates,
+ )
self.enrich = to_raw_response_wrapper(
findall.enrich,
)
@@ -1188,6 +1299,9 @@ def __init__(self, findall: AsyncFindAllResource) -> None:
self.cancel = async_to_raw_response_wrapper(
findall.cancel,
)
+ self.candidates = async_to_raw_response_wrapper(
+ findall.candidates,
+ )
self.enrich = async_to_raw_response_wrapper(
findall.enrich,
)
@@ -1221,6 +1335,9 @@ def __init__(self, findall: FindAllResource) -> None:
self.cancel = to_streamed_response_wrapper(
findall.cancel,
)
+ self.candidates = to_streamed_response_wrapper(
+ findall.candidates,
+ )
self.enrich = to_streamed_response_wrapper(
findall.enrich,
)
@@ -1254,6 +1371,9 @@ def __init__(self, findall: AsyncFindAllResource) -> None:
self.cancel = async_to_streamed_response_wrapper(
findall.cancel,
)
+ self.candidates = async_to_streamed_response_wrapper(
+ findall.candidates,
+ )
self.enrich = async_to_streamed_response_wrapper(
findall.enrich,
)
diff --git a/src/parallel/types/beta/__init__.py b/src/parallel/types/beta/__init__.py
index ef052aa..91b7229 100644
--- a/src/parallel/types/beta/__init__.py
+++ b/src/parallel/types/beta/__init__.py
@@ -63,9 +63,11 @@
from .task_group_create_params import TaskGroupCreateParams as TaskGroupCreateParams
from .task_group_events_params import TaskGroupEventsParams as TaskGroupEventsParams
from .task_run_events_response import TaskRunEventsResponse as TaskRunEventsResponse
+from .findall_candidates_params import FindAllCandidatesParams as FindAllCandidatesParams
from .task_group_add_runs_params import TaskGroupAddRunsParams as TaskGroupAddRunsParams
from .task_group_events_response import TaskGroupEventsResponse as TaskGroupEventsResponse
from .task_group_get_runs_params import TaskGroupGetRunsParams as TaskGroupGetRunsParams
+from .findall_candidates_response import FindAllCandidatesResponse as FindAllCandidatesResponse
from .findall_schema_updated_event import (
FindAllSchemaUpdatedEvent as FindAllSchemaUpdatedEvent,
FindallSchemaUpdatedEvent as FindallSchemaUpdatedEvent,
diff --git a/src/parallel/types/beta/findall_candidates_params.py b/src/parallel/types/beta/findall_candidates_params.py
new file mode 100644
index 0000000..0c50b27
--- /dev/null
+++ b/src/parallel/types/beta/findall_candidates_params.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["FindAllCandidatesParams"]
+
+
+class FindAllCandidatesParams(TypedDict, total=False):
+ entity_type: Required[Literal["company", "people"]]
+ """Type of entity to search for."""
+
+ objective: Required[str]
+ """Natural language description of target entities."""
+
+ match_limit: int
+ """Maximum number of candidates to return.
+
+ Must be between 5 and 1000 (inclusive). May return fewer results. Defaults
+ to 100.
+ """
diff --git a/src/parallel/types/beta/findall_candidates_response.py b/src/parallel/types/beta/findall_candidates_response.py
new file mode 100644
index 0000000..38ed985
--- /dev/null
+++ b/src/parallel/types/beta/findall_candidates_response.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+
+from ..._models import BaseModel
+
+__all__ = ["FindAllCandidatesResponse", "Candidate"]
+
+
+class Candidate(BaseModel):
+ description: str
+ """Descriptive text about the entity."""
+
+ name: str
+ """Entity name."""
+
+ url: str
+ """Canonical URL for the entity."""
+
+
+class FindAllCandidatesResponse(BaseModel):
+ candidate_set_id: str
+ """Candidate set request ID.
+
+ Example: `candidate_set_cad0a6d2dec046bd95ae900527d880e7`
+ """
+
+ candidates: List[Candidate]
+ """Ranked list of entity candidates."""
diff --git a/tests/api_resources/beta/test_findall.py b/tests/api_resources/beta/test_findall.py
index 18996a9..3d73d48 100644
--- a/tests/api_resources/beta/test_findall.py
+++ b/tests/api_resources/beta/test_findall.py
@@ -13,6 +13,7 @@
FindAllRun,
FindAllSchema,
FindAllRunResult,
+ FindAllCandidatesResponse,
)
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -199,6 +200,49 @@ def test_path_params_cancel(self, client: Parallel) -> None:
findall_id="",
)
+ @parametrize
+ def test_method_candidates(self, client: Parallel) -> None:
+ findall = client.beta.findall.candidates(
+ entity_type="company",
+ objective="objective",
+ )
+ assert_matches_type(FindAllCandidatesResponse, findall, path=["response"])
+
+ @parametrize
+ def test_method_candidates_with_all_params(self, client: Parallel) -> None:
+ findall = client.beta.findall.candidates(
+ entity_type="company",
+ objective="objective",
+ match_limit=5,
+ )
+ assert_matches_type(FindAllCandidatesResponse, findall, path=["response"])
+
+ @parametrize
+ def test_raw_response_candidates(self, client: Parallel) -> None:
+ response = client.beta.findall.with_raw_response.candidates(
+ entity_type="company",
+ objective="objective",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ findall = response.parse()
+ assert_matches_type(FindAllCandidatesResponse, findall, path=["response"])
+
+ @parametrize
+ def test_streaming_response_candidates(self, client: Parallel) -> None:
+ with client.beta.findall.with_streaming_response.candidates(
+ entity_type="company",
+ objective="objective",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ findall = response.parse()
+ assert_matches_type(FindAllCandidatesResponse, findall, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
@parametrize
def test_method_enrich(self, client: Parallel) -> None:
findall = client.beta.findall.enrich(
@@ -709,6 +753,49 @@ async def test_path_params_cancel(self, async_client: AsyncParallel) -> None:
findall_id="",
)
+ @parametrize
+ async def test_method_candidates(self, async_client: AsyncParallel) -> None:
+ findall = await async_client.beta.findall.candidates(
+ entity_type="company",
+ objective="objective",
+ )
+ assert_matches_type(FindAllCandidatesResponse, findall, path=["response"])
+
+ @parametrize
+ async def test_method_candidates_with_all_params(self, async_client: AsyncParallel) -> None:
+ findall = await async_client.beta.findall.candidates(
+ entity_type="company",
+ objective="objective",
+ match_limit=5,
+ )
+ assert_matches_type(FindAllCandidatesResponse, findall, path=["response"])
+
+ @parametrize
+ async def test_raw_response_candidates(self, async_client: AsyncParallel) -> None:
+ response = await async_client.beta.findall.with_raw_response.candidates(
+ entity_type="company",
+ objective="objective",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ findall = await response.parse()
+ assert_matches_type(FindAllCandidatesResponse, findall, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_candidates(self, async_client: AsyncParallel) -> None:
+ async with async_client.beta.findall.with_streaming_response.candidates(
+ entity_type="company",
+ objective="objective",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ findall = await response.parse()
+ assert_matches_type(FindAllCandidatesResponse, findall, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
@parametrize
async def test_method_enrich(self, async_client: AsyncParallel) -> None:
findall = await async_client.beta.findall.enrich(
From 58f19f38174fb71dc906049c0aef6610ac67971e Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 21 Apr 2026 05:54:03 +0000
Subject: [PATCH 31/32] feat(api): Update OpenAPI spec
---
.stats.yml | 4 +-
src/parallel/_client.py | 70 ++++++++++---------
src/parallel/resources/beta/beta.py | 24 +++----
src/parallel/resources/beta/task_run.py | 16 ++---
src/parallel/resources/task_run.py | 16 ++---
.../types/beta/beta_extract_params.py | 6 +-
src/parallel/types/beta/beta_search_params.py | 6 +-
.../types/beta/task_run_create_params.py | 8 +--
src/parallel/types/client_extract_params.py | 12 ++--
src/parallel/types/client_search_params.py | 19 +++--
src/parallel/types/extract_response.py | 2 +-
src/parallel/types/run_input.py | 8 +--
src/parallel/types/run_input_param.py | 8 +--
src/parallel/types/search_result.py | 2 +-
src/parallel/types/shared/source_policy.py | 6 +-
.../types/shared_params/source_policy.py | 6 +-
src/parallel/types/task_run_create_params.py | 8 +--
17 files changed, 112 insertions(+), 109 deletions(-)
diff --git a/.stats.yml b/.stats.yml
index b047b54..ea4794f 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 24
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/parallel-web%2Fparallel-sdk-c4cc922783460c6e65811d13c9abe35807a551c77c126d452806f93caaaf48fb.yml
-openapi_spec_hash: 99c9e48d4dafaca71f058107008d174b
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/parallel-web%2Fparallel-sdk-57e1c56be0942c131ab5f24d8620de166d0721ef7f3423532abc7027e5a989e7.yml
+openapi_spec_hash: e61f831e30d19590eb3138a1b1709d1d
config_hash: 80e7ee7ad8e3424616aca7189ffd5ae7
diff --git a/src/parallel/_client.py b/src/parallel/_client.py
index 44b86ee..74f06c7 100644
--- a/src/parallel/_client.py
+++ b/src/parallel/_client.py
@@ -247,6 +247,9 @@ def extract(
"""
Extracts relevant content from specific web URLs.
+ The legacy Extract API reference is available
+ [here](https://docs.parallel.ai/api-reference/legacy/extract-beta/extract).
+
Args:
urls: URLs to extract content from. Up to 20 URLs.
@@ -258,9 +261,7 @@ def extract(
client_model: The model generating this request and consuming the results. Enables
optimizations and tailors default settings for the model's capabilities.
- max_chars_total: Upper bound on total characters across excerpts from all extracted results. Does
- not affect full_content if requested. Default is dynamic based on urls,
- objective, and client_model.
+ max_chars_total: Upper bound on total characters across excerpts from all extracted results.
objective: As in SearchRequest, a natural-language description of the underlying question
or goal driving the request. Used together with search_queries to focus excerpts
@@ -269,9 +270,9 @@ def extract(
search_queries: Optional keyword search queries, as in SearchRequest. Used together with
objective to focus excerpts on the most relevant content.
- session_id: Session identifier for calls to search and extract made by an agent as part of a
- larger task. May be a user-generated random string, e.g. a uuid, or a session_id
- returned by a previous request.
+ session_id: Session identifier to track calls across separate search and extract calls, to
+ be used as part of a larger task. Specifying it may give better contextual
+ results for subsequent API calls.
extra_headers: Send extra headers
@@ -308,7 +309,7 @@ def search(
advanced_settings: Optional[AdvancedSearchSettingsParam] | Omit = omit,
client_model: Optional[str] | Omit = omit,
max_chars_total: Optional[int] | Omit = omit,
- mode: Optional[Literal["basic", "standard"]] | Omit = omit,
+ mode: Optional[Literal["basic", "advanced"]] | Omit = omit,
objective: Optional[str] | Omit = omit,
session_id: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -321,6 +322,9 @@ def search(
"""
Searches the web.
+ The legacy Search API reference is available
+ [here](https://docs.parallel.ai/api-reference/legacy/search-beta/search).
+
Args:
search_queries: Concise keyword search queries, 3-6 words each. At least one query is required,
provide 2-3 for best results. Used together with objective to focus results on
@@ -334,22 +338,21 @@ def search(
client_model: The model generating this request and consuming the results. Enables
optimizations and tailors default settings for the model's capabilities.
- max_chars_total: Upper bound on total characters across excerpts from all results. Default is
- dynamic based on search_queries, objective, and client_model.
+ max_chars_total: Upper bound on total characters across excerpts from all results.
- mode: Search mode preset: supported values are basic and standard. Basic mode offers
- the lowest latency and works best with 2-3 high-quality search_queries. Standard
- mode provides higher quality with more advanced retrieval and compression.
- Defaults to standard when omitted.
+ mode: Search mode preset: supported values are `basic` and `advanced`. Basic mode
+ offers the lowest latency and works best with 2-3 high-quality search_queries.
+ Advanced mode provides higher quality with more advanced retrieval and
+ compression. Defaults to `advanced` when omitted.
objective: Natural-language description of the underlying question or goal driving the
search. Used together with search_queries to focus results on the most relevant
content. Should be self-contained with enough context to understand the intent
of the search.
- session_id: Session identifier for calls to search and extract made by an agent as part of a
- larger task. May be a user-generated random string, e.g. a uuid, or a session_id
- returned by a previous request.
+ session_id: Session identifier to track calls across separate search and extract calls, to
+ be used as part of a larger task. Specifying it may give better contextual
+ results for subsequent API calls.
extra_headers: Send extra headers
@@ -591,6 +594,9 @@ async def extract(
"""
Extracts relevant content from specific web URLs.
+ The legacy Extract API reference is available
+ [here](https://docs.parallel.ai/api-reference/legacy/extract-beta/extract).
+
Args:
urls: URLs to extract content from. Up to 20 URLs.
@@ -602,9 +608,7 @@ async def extract(
client_model: The model generating this request and consuming the results. Enables
optimizations and tailors default settings for the model's capabilities.
- max_chars_total: Upper bound on total characters across excerpts from all extracted results. Does
- not affect full_content if requested. Default is dynamic based on urls,
- objective, and client_model.
+ max_chars_total: Upper bound on total characters across excerpts from all extracted results.
objective: As in SearchRequest, a natural-language description of the underlying question
or goal driving the request. Used together with search_queries to focus excerpts
@@ -613,9 +617,9 @@ async def extract(
search_queries: Optional keyword search queries, as in SearchRequest. Used together with
objective to focus excerpts on the most relevant content.
- session_id: Session identifier for calls to search and extract made by an agent as part of a
- larger task. May be a user-generated random string, e.g. a uuid, or a session_id
- returned by a previous request.
+ session_id: Session identifier to track calls across separate search and extract calls, to
+ be used as part of a larger task. Specifying it may give better contextual
+ results for subsequent API calls.
extra_headers: Send extra headers
@@ -652,7 +656,7 @@ async def search(
advanced_settings: Optional[AdvancedSearchSettingsParam] | Omit = omit,
client_model: Optional[str] | Omit = omit,
max_chars_total: Optional[int] | Omit = omit,
- mode: Optional[Literal["basic", "standard"]] | Omit = omit,
+ mode: Optional[Literal["basic", "advanced"]] | Omit = omit,
objective: Optional[str] | Omit = omit,
session_id: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -665,6 +669,9 @@ async def search(
"""
Searches the web.
+ The legacy Search API reference is available
+ [here](https://docs.parallel.ai/api-reference/legacy/search-beta/search).
+
Args:
search_queries: Concise keyword search queries, 3-6 words each. At least one query is required,
provide 2-3 for best results. Used together with objective to focus results on
@@ -678,22 +685,21 @@ async def search(
client_model: The model generating this request and consuming the results. Enables
optimizations and tailors default settings for the model's capabilities.
- max_chars_total: Upper bound on total characters across excerpts from all results. Default is
- dynamic based on search_queries, objective, and client_model.
+ max_chars_total: Upper bound on total characters across excerpts from all results.
- mode: Search mode preset: supported values are basic and standard. Basic mode offers
- the lowest latency and works best with 2-3 high-quality search_queries. Standard
- mode provides higher quality with more advanced retrieval and compression.
- Defaults to standard when omitted.
+ mode: Search mode preset: supported values are `basic` and `advanced`. Basic mode
+ offers the lowest latency and works best with 2-3 high-quality search_queries.
+ Advanced mode provides higher quality with more advanced retrieval and
+ compression. Defaults to `advanced` when omitted.
objective: Natural-language description of the underlying question or goal driving the
search. Used together with search_queries to focus results on the most relevant
content. Should be self-contained with enough context to understand the intent
of the search.
- session_id: Session identifier for calls to search and extract made by an agent as part of a
- larger task. May be a user-generated random string, e.g. a uuid, or a session_id
- returned by a previous request.
+ session_id: Session identifier to track calls across separate search and extract calls, to
+ be used as part of a larger task. Specifying it may give better contextual
+ results for subsequent API calls.
extra_headers: Send extra headers
diff --git a/src/parallel/resources/beta/beta.py b/src/parallel/resources/beta/beta.py
index c5a43bb..fa59196 100644
--- a/src/parallel/resources/beta/beta.py
+++ b/src/parallel/resources/beta/beta.py
@@ -151,9 +151,9 @@ def extract(
search_queries: If provided, focuses extracted content on the specified keyword search queries.
- session_id: Session identifier for calls to search and extract made by an agent as part of a
- larger task. May be a user-generated random string (e.g. a uuid) or a session_id
- from a previous request.
+ session_id: Session identifier to track calls across separate search and extract calls, to
+ be used as part of a larger task. Specifying it may give better contextual
+ results for subsequent API calls.
betas: Optional header to specify the beta version(s) to enable.
@@ -251,9 +251,9 @@ def search(
contain search operators. At least one of objective or search_queries must be
provided.
- session_id: Session identifier for calls to search and extract made by an agent as part of a
- larger task. May be a user-generated random string (e.g. a uuid) or a session_id
- from a previous request.
+ session_id: Session identifier to track calls across separate search and extract calls, to
+ be used as part of a larger task. Specifying it may give better contextual
+ results for subsequent API calls.
source_policy: Source policy for web search results.
@@ -402,9 +402,9 @@ async def extract(
search_queries: If provided, focuses extracted content on the specified keyword search queries.
- session_id: Session identifier for calls to search and extract made by an agent as part of a
- larger task. May be a user-generated random string (e.g. a uuid) or a session_id
- from a previous request.
+ session_id: Session identifier to track calls across separate search and extract calls, to
+ be used as part of a larger task. Specifying it may give better contextual
+ results for subsequent API calls.
betas: Optional header to specify the beta version(s) to enable.
@@ -502,9 +502,9 @@ async def search(
contain search operators. At least one of objective or search_queries must be
provided.
- session_id: Session identifier for calls to search and extract made by an agent as part of a
- larger task. May be a user-generated random string (e.g. a uuid) or a session_id
- from a previous request.
+ session_id: Session identifier to track calls across separate search and extract calls, to
+ be used as part of a larger task. Specifying it may give better contextual
+ results for subsequent API calls.
source_policy: Source policy for web search results.
diff --git a/src/parallel/resources/beta/task_run.py b/src/parallel/resources/beta/task_run.py
index 93a3832..9b3a65d 100644
--- a/src/parallel/resources/beta/task_run.py
+++ b/src/parallel/resources/beta/task_run.py
@@ -104,10 +104,10 @@ def create(
enable_events: Controls tracking of task run execution progress. When set to true, progress
events are recorded and can be accessed via the
- [Task Run events](https://platform.parallel.ai/api-reference) endpoint. When
- false, no progress events are tracked. Note that progress tracking cannot be
- enabled after a run has been created. The flag is set to true by default for
- premium processors (pro and above).
+ [Task Run events](https://docs.parallel.ai/api-reference) endpoint. When false,
+ no progress events are tracked. Note that progress tracking cannot be enabled
+ after a run has been created. The flag is set to true by default for premium
+ processors (pro and above).
mcp_servers: Optional list of MCP servers to use for the run.
@@ -342,10 +342,10 @@ async def create(
enable_events: Controls tracking of task run execution progress. When set to true, progress
events are recorded and can be accessed via the
- [Task Run events](https://platform.parallel.ai/api-reference) endpoint. When
- false, no progress events are tracked. Note that progress tracking cannot be
- enabled after a run has been created. The flag is set to true by default for
- premium processors (pro and above).
+ [Task Run events](https://docs.parallel.ai/api-reference) endpoint. When false,
+ no progress events are tracked. Note that progress tracking cannot be enabled
+ after a run has been created. The flag is set to true by default for premium
+ processors (pro and above).
mcp_servers: Optional list of MCP servers to use for the run.
diff --git a/src/parallel/resources/task_run.py b/src/parallel/resources/task_run.py
index f98dafb..4f8e2f5 100644
--- a/src/parallel/resources/task_run.py
+++ b/src/parallel/resources/task_run.py
@@ -111,10 +111,10 @@ def create(
enable_events: Controls tracking of task run execution progress. When set to true, progress
events are recorded and can be accessed via the
- [Task Run events](https://platform.parallel.ai/api-reference) endpoint. When
- false, no progress events are tracked. Note that progress tracking cannot be
- enabled after a run has been created. The flag is set to true by default for
- premium processors (pro and above).
+ [Task Run events](https://docs.parallel.ai/api-reference) endpoint. When false,
+ no progress events are tracked. Note that progress tracking cannot be enabled
+ after a run has been created. The flag is set to true by default for premium
+ processors (pro and above).
mcp_servers: Optional list of MCP servers to use for the run.
@@ -494,10 +494,10 @@ async def create(
enable_events: Controls tracking of task run execution progress. When set to true, progress
events are recorded and can be accessed via the
- [Task Run events](https://platform.parallel.ai/api-reference) endpoint. When
- false, no progress events are tracked. Note that progress tracking cannot be
- enabled after a run has been created. The flag is set to true by default for
- premium processors (pro and above).
+ [Task Run events](https://docs.parallel.ai/api-reference) endpoint. When false,
+ no progress events are tracked. Note that progress tracking cannot be enabled
+ after a run has been created. The flag is set to true by default for premium
+ processors (pro and above).
mcp_servers: Optional list of MCP servers to use for the run.
diff --git a/src/parallel/types/beta/beta_extract_params.py b/src/parallel/types/beta/beta_extract_params.py
index 60e55a8..578ed7c 100644
--- a/src/parallel/types/beta/beta_extract_params.py
+++ b/src/parallel/types/beta/beta_extract_params.py
@@ -42,9 +42,9 @@ class BetaExtractParams(TypedDict, total=False):
session_id: Optional[str]
"""
- Session identifier for calls to search and extract made by an agent as part of a
- larger task. May be a user-generated random string (e.g. a uuid) or a session_id
- from a previous request.
+ Session identifier to track calls across separate search and extract calls, to
+ be used as part of a larger task. Specifying it may give better contextual
+ results for subsequent API calls.
"""
betas: Annotated[List[ParallelBetaParam], PropertyInfo(alias="parallel-beta")]
diff --git a/src/parallel/types/beta/beta_search_params.py b/src/parallel/types/beta/beta_search_params.py
index 35802a7..1b82406 100644
--- a/src/parallel/types/beta/beta_search_params.py
+++ b/src/parallel/types/beta/beta_search_params.py
@@ -61,9 +61,9 @@ class BetaSearchParams(TypedDict, total=False):
session_id: Optional[str]
"""
- Session identifier for calls to search and extract made by an agent as part of a
- larger task. May be a user-generated random string (e.g. a uuid) or a session_id
- from a previous request.
+ Session identifier to track calls across separate search and extract calls, to
+ be used as part of a larger task. Specifying it may give better contextual
+ results for subsequent API calls.
"""
source_policy: Optional[SourcePolicy]
diff --git a/src/parallel/types/beta/task_run_create_params.py b/src/parallel/types/beta/task_run_create_params.py
index c0e1c76..5f6f4c9 100644
--- a/src/parallel/types/beta/task_run_create_params.py
+++ b/src/parallel/types/beta/task_run_create_params.py
@@ -29,10 +29,10 @@ class TaskRunCreateParams(TypedDict, total=False):
"""Controls tracking of task run execution progress.
When set to true, progress events are recorded and can be accessed via the
- [Task Run events](https://platform.parallel.ai/api-reference) endpoint. When
- false, no progress events are tracked. Note that progress tracking cannot be
- enabled after a run has been created. The flag is set to true by default for
- premium processors (pro and above).
+ [Task Run events](https://docs.parallel.ai/api-reference) endpoint. When false,
+ no progress events are tracked. Note that progress tracking cannot be enabled
+ after a run has been created. The flag is set to true by default for premium
+ processors (pro and above).
"""
mcp_servers: Optional[Iterable[McpServerParam]]
diff --git a/src/parallel/types/client_extract_params.py b/src/parallel/types/client_extract_params.py
index c72534b..063ee4c 100644
--- a/src/parallel/types/client_extract_params.py
+++ b/src/parallel/types/client_extract_params.py
@@ -29,11 +29,7 @@ class ClientExtractParams(TypedDict, total=False):
"""
max_chars_total: Optional[int]
- """Upper bound on total characters across excerpts from all extracted results.
-
- Does not affect full_content if requested. Default is dynamic based on urls,
- objective, and client_model.
- """
+ """Upper bound on total characters across excerpts from all extracted results."""
objective: Optional[str]
"""
@@ -50,7 +46,7 @@ class ClientExtractParams(TypedDict, total=False):
session_id: Optional[str]
"""
- Session identifier for calls to search and extract made by an agent as part of a
- larger task. May be a user-generated random string, e.g. a uuid, or a session_id
- returned by a previous request.
+ Session identifier to track calls across separate search and extract calls, to
+ be used as part of a larger task. Specifying it may give better contextual
+ results for subsequent API calls.
"""
diff --git a/src/parallel/types/client_search_params.py b/src/parallel/types/client_search_params.py
index 166c4a5..f81ba5b 100644
--- a/src/parallel/types/client_search_params.py
+++ b/src/parallel/types/client_search_params.py
@@ -33,17 +33,14 @@ class ClientSearchParams(TypedDict, total=False):
"""
max_chars_total: Optional[int]
- """Upper bound on total characters across excerpts from all results.
+ """Upper bound on total characters across excerpts from all results."""
- Default is dynamic based on search_queries, objective, and client_model.
- """
-
- mode: Optional[Literal["basic", "standard"]]
- """Search mode preset: supported values are basic and standard.
+ mode: Optional[Literal["basic", "advanced"]]
+ """Search mode preset: supported values are `basic` and `advanced`.
Basic mode offers the lowest latency and works best with 2-3 high-quality
- search_queries. Standard mode provides higher quality with more advanced
- retrieval and compression. Defaults to standard when omitted.
+ search_queries. Advanced mode provides higher quality with more advanced
+ retrieval and compression. Defaults to `advanced` when omitted.
"""
objective: Optional[str]
@@ -56,7 +53,7 @@ class ClientSearchParams(TypedDict, total=False):
session_id: Optional[str]
"""
- Session identifier for calls to search and extract made by an agent as part of a
- larger task. May be a user-generated random string, e.g. a uuid, or a session_id
- returned by a previous request.
+ Session identifier to track calls across separate search and extract calls, to
+ be used as part of a larger task. Specifying it may give better contextual
+ results for subsequent API calls.
"""
diff --git a/src/parallel/types/extract_response.py b/src/parallel/types/extract_response.py
index 9f01b8e..8cb9568 100644
--- a/src/parallel/types/extract_response.py
+++ b/src/parallel/types/extract_response.py
@@ -12,7 +12,7 @@
class ExtractResponse(BaseModel):
- """Extract response (GA)."""
+ """Extract response."""
errors: List[ExtractError]
"""Extract errors: requested URLs not in the results."""
diff --git a/src/parallel/types/run_input.py b/src/parallel/types/run_input.py
index 13337ed..8c302d6 100644
--- a/src/parallel/types/run_input.py
+++ b/src/parallel/types/run_input.py
@@ -34,10 +34,10 @@ class RunInput(BaseModel):
"""Controls tracking of task run execution progress.
When set to true, progress events are recorded and can be accessed via the
- [Task Run events](https://platform.parallel.ai/api-reference) endpoint. When
- false, no progress events are tracked. Note that progress tracking cannot be
- enabled after a run has been created. The flag is set to true by default for
- premium processors (pro and above).
+ [Task Run events](https://docs.parallel.ai/api-reference) endpoint. When false,
+ no progress events are tracked. Note that progress tracking cannot be enabled
+ after a run has been created. The flag is set to true by default for premium
+ processors (pro and above).
"""
mcp_servers: Optional[List[McpServer]] = None
diff --git a/src/parallel/types/run_input_param.py b/src/parallel/types/run_input_param.py
index 88afb18..9fc2605 100644
--- a/src/parallel/types/run_input_param.py
+++ b/src/parallel/types/run_input_param.py
@@ -36,10 +36,10 @@ class RunInputParam(TypedDict, total=False):
"""Controls tracking of task run execution progress.
When set to true, progress events are recorded and can be accessed via the
- [Task Run events](https://platform.parallel.ai/api-reference) endpoint. When
- false, no progress events are tracked. Note that progress tracking cannot be
- enabled after a run has been created. The flag is set to true by default for
- premium processors (pro and above).
+ [Task Run events](https://docs.parallel.ai/api-reference) endpoint. When false,
+ no progress events are tracked. Note that progress tracking cannot be enabled
+ after a run has been created. The flag is set to true by default for premium
+ processors (pro and above).
"""
mcp_servers: Optional[Iterable[McpServerParam]]
diff --git a/src/parallel/types/search_result.py b/src/parallel/types/search_result.py
index a2f5926..0320bed 100644
--- a/src/parallel/types/search_result.py
+++ b/src/parallel/types/search_result.py
@@ -11,7 +11,7 @@
class SearchResult(BaseModel):
- """Search response (GA)."""
+ """Search response."""
results: List[WebSearchResult]
"""A list of search results, ordered by decreasing relevance."""
diff --git a/src/parallel/types/shared/source_policy.py b/src/parallel/types/shared/source_policy.py
index 7ea1deb..51b7aa1 100644
--- a/src/parallel/types/shared/source_policy.py
+++ b/src/parallel/types/shared/source_policy.py
@@ -26,7 +26,8 @@ class SourcePolicy(BaseModel):
If specified, sources from these domains will be excluded. Accepts plain domains
(e.g., example.com, subdomain.example.gov) or bare domain extension starting
- with a period (e.g., .gov, .edu, .co.uk).
+ with a period (e.g., .gov, .edu, .co.uk). The combined number of domains in
+ include_domains and exclude_domains cannot exceed 200.
"""
include_domains: Optional[List[str]] = None
@@ -34,5 +35,6 @@ class SourcePolicy(BaseModel):
If specified, only sources from these domains will be included. Accepts plain
domains (e.g., example.com, subdomain.example.gov) or bare domain extension
- starting with a period (e.g., .gov, .edu, .co.uk).
+ starting with a period (e.g., .gov, .edu, .co.uk). The combined number of
+ domains in include_domains and exclude_domains cannot exceed 200.
"""
diff --git a/src/parallel/types/shared_params/source_policy.py b/src/parallel/types/shared_params/source_policy.py
index c3da049..adc5dbb 100644
--- a/src/parallel/types/shared_params/source_policy.py
+++ b/src/parallel/types/shared_params/source_policy.py
@@ -30,7 +30,8 @@ class SourcePolicy(TypedDict, total=False):
If specified, sources from these domains will be excluded. Accepts plain domains
(e.g., example.com, subdomain.example.gov) or bare domain extension starting
- with a period (e.g., .gov, .edu, .co.uk).
+ with a period (e.g., .gov, .edu, .co.uk). The combined number of domains in
+ include_domains and exclude_domains cannot exceed 200.
"""
include_domains: SequenceNotStr[str]
@@ -38,5 +39,6 @@ class SourcePolicy(TypedDict, total=False):
If specified, only sources from these domains will be included. Accepts plain
domains (e.g., example.com, subdomain.example.gov) or bare domain extension
- starting with a period (e.g., .gov, .edu, .co.uk).
+ starting with a period (e.g., .gov, .edu, .co.uk). The combined number of
+ domains in include_domains and exclude_domains cannot exceed 200.
"""
diff --git a/src/parallel/types/task_run_create_params.py b/src/parallel/types/task_run_create_params.py
index c3a8a7f..be5d695 100644
--- a/src/parallel/types/task_run_create_params.py
+++ b/src/parallel/types/task_run_create_params.py
@@ -29,10 +29,10 @@ class TaskRunCreateParams(TypedDict, total=False):
"""Controls tracking of task run execution progress.
When set to true, progress events are recorded and can be accessed via the
- [Task Run events](https://platform.parallel.ai/api-reference) endpoint. When
- false, no progress events are tracked. Note that progress tracking cannot be
- enabled after a run has been created. The flag is set to true by default for
- premium processors (pro and above).
+ [Task Run events](https://docs.parallel.ai/api-reference) endpoint. When false,
+ no progress events are tracked. Note that progress tracking cannot be enabled
+ after a run has been created. The flag is set to true by default for premium
+ processors (pro and above).
"""
mcp_servers: Optional[Iterable[McpServerParam]]
From c6a101d38430fb5fe058c4516025472f046f4ad8 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 21 Apr 2026 05:54:30 +0000
Subject: [PATCH 32/32] release: 0.5.0
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 49 +++++++++++++++++++++++++++++++++++
pyproject.toml | 2 +-
src/parallel/_version.py | 2 +-
4 files changed, 52 insertions(+), 3 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 980ea05..2aca35a 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.4.2"
+ ".": "0.5.0"
}
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index a52aa9d..4848d83 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,54 @@
# Changelog
+## 0.5.0 (2026-04-21)
+
+Full Changelog: [v0.4.2...v0.5.0](https://github.com/parallel-web/parallel-sdk-python/compare/v0.4.2...v0.5.0)
+
+### Features
+
+* **api:** Add Findall Candidates ([57c4ae2](https://github.com/parallel-web/parallel-sdk-python/commit/57c4ae25d77a8627c6be3673312bfd7f373e17da))
+* **api:** Add Search and Extract v1 and associated types ([ea487f3](https://github.com/parallel-web/parallel-sdk-python/commit/ea487f32b73aee955f662d1fa225841421ce1ba3))
+* **api:** manual - add AdvancedSearchSettings and AdvancedExtractSettings models ([5836a6f](https://github.com/parallel-web/parallel-sdk-python/commit/5836a6fbe8db3a3509556442067f087918edb2bb))
+* **api:** manual updates - update openapi spec ([02db6c0](https://github.com/parallel-web/parallel-sdk-python/commit/02db6c07ec5eb254b18732fcb6f7dc43e31de471))
+* **api:** Remove full_content from OpenAPI Spec ([7a4d651](https://github.com/parallel-web/parallel-sdk-python/commit/7a4d651c3e9f35334175f82daaf6392e9f76dee5))
+* **api:** Search/Extract v1 with advanced_settings and max_results ([4ded29c](https://github.com/parallel-web/parallel-sdk-python/commit/4ded29c2382594f1735101753a0b09a2f7c6972e))
+* **api:** Update OpenAPI spec ([58f19f3](https://github.com/parallel-web/parallel-sdk-python/commit/58f19f38174fb71dc906049c0aef6610ac67971e))
+* **api:** Update OpenAPI spec ([fae95f4](https://github.com/parallel-web/parallel-sdk-python/commit/fae95f4f2c7cb60ebc0babc4fe540617e3334b2d))
+* **internal:** implement indices array format for query and form serialization ([3df5972](https://github.com/parallel-web/parallel-sdk-python/commit/3df5972e34c9aa1709eabc4eb5b8cbbc0adccae2))
+
+
+### Bug Fixes
+
+* **client:** preserve hardcoded query params when merging with user params ([08080bc](https://github.com/parallel-web/parallel-sdk-python/commit/08080bc22c415881cc9f9b05bc22f09ab83c7e8d))
+* **deps:** bump minimum typing-extensions version ([964a46d](https://github.com/parallel-web/parallel-sdk-python/commit/964a46ddfc9ead64e4105e42192a780bc91716b0))
+* ensure file data are only sent as 1 parameter ([1c15cc0](https://github.com/parallel-web/parallel-sdk-python/commit/1c15cc00b1ae223db8e51893ce23b9a2193e3ae7))
+* **pydantic:** do not pass `by_alias` unless set ([f0793c1](https://github.com/parallel-web/parallel-sdk-python/commit/f0793c171465dd57d0fbf82a3bb2281d046f500e))
+* sanitize endpoint path params ([5931597](https://github.com/parallel-web/parallel-sdk-python/commit/59315972d27246485be5cb52671aecaa3aa46253))
+
+
+### Performance Improvements
+
+* **client:** optimize file structure copying in multipart requests ([00e8564](https://github.com/parallel-web/parallel-sdk-python/commit/00e856464a2828693483a704de83ee5d6c4fe19e))
+
+
+### Chores
+
+* **ci:** skip lint on metadata-only changes ([403448c](https://github.com/parallel-web/parallel-sdk-python/commit/403448c7760e70fe0f4b3998a20f048910e91cd6))
+* **internal:** tweak CI branches ([014c802](https://github.com/parallel-web/parallel-sdk-python/commit/014c80287318df0db6207df1579be00c4717f24d))
+* **internal:** update gitignore ([1f4f6b0](https://github.com/parallel-web/parallel-sdk-python/commit/1f4f6b0e5d2a46e1a5457879e937ea5aa551073c))
+* **tests:** bump steady to v0.19.4 ([ebee2e7](https://github.com/parallel-web/parallel-sdk-python/commit/ebee2e761e2a8587cc6aa4c2decfd6310092b039))
+* **tests:** bump steady to v0.19.5 ([2774099](https://github.com/parallel-web/parallel-sdk-python/commit/2774099f753bc0826e9c6b6e9fbb40d4e72e3405))
+* **tests:** bump steady to v0.19.6 ([8e3ee3d](https://github.com/parallel-web/parallel-sdk-python/commit/8e3ee3d04dc149b2bcedb0e4acd92474fafd8d05))
+* **tests:** bump steady to v0.19.7 ([4bcf12e](https://github.com/parallel-web/parallel-sdk-python/commit/4bcf12e670b7997e23ade3d991711fe1ef741e35))
+* **tests:** bump steady to v0.20.1 ([d82ce60](https://github.com/parallel-web/parallel-sdk-python/commit/d82ce601c5687553b0e96990e418289fd8a14e00))
+* **tests:** bump steady to v0.20.2 ([746ca39](https://github.com/parallel-web/parallel-sdk-python/commit/746ca39c749f899dc9137a2f9be5de9aa39210c6))
+* **tests:** bump steady to v0.22.1 ([dec81af](https://github.com/parallel-web/parallel-sdk-python/commit/dec81afb89f46850635daa89e64debe15717d053))
+
+
+### Refactors
+
+* **tests:** switch from prism to steady ([032745e](https://github.com/parallel-web/parallel-sdk-python/commit/032745ea1a03b3d2516b789a28a3c8b8034660d8))
+
## 0.4.2 (2026-03-09)
Full Changelog: [v0.4.1...v0.4.2](https://github.com/parallel-web/parallel-sdk-python/compare/v0.4.1...v0.4.2)
diff --git a/pyproject.toml b/pyproject.toml
index b50e064..b55996c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "parallel-web"
-version = "0.4.2"
+version = "0.5.0"
description = "The official Python library for the Parallel API"
dynamic = ["readme"]
license = "MIT"
diff --git a/src/parallel/_version.py b/src/parallel/_version.py
index 1b80dfe..e3da38f 100644
--- a/src/parallel/_version.py
+++ b/src/parallel/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "parallel"
-__version__ = "0.4.2" # x-release-please-version
+__version__ = "0.5.0" # x-release-please-version