diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 3c4a087..99ec9fb 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -1,12 +1,14 @@
name: CI
on:
push:
- branches-ignore:
- - 'generated'
- - 'codegen/**'
- - 'integrated/**'
- - 'stl-preview-head/**'
- - 'stl-preview-base/**'
+ branches:
+ - '**'
+ - '!integrated/**'
+ - '!stl-preview-head/**'
+ - '!stl-preview-base/**'
+ - '!generated'
+ - '!codegen/**'
+ - 'codegen/stl/**'
pull_request:
branches-ignore:
- 'stl-preview-head/**'
@@ -17,7 +19,7 @@ jobs:
timeout-minutes: 10
name: lint
runs-on: ${{ github.repository == 'stainless-sdks/parallel-sdk-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
- if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
+ if: (github.event_name == 'push' || github.event.pull_request.head.repo.fork) && (github.event_name != 'push' || github.event.head_commit.message != 'codegen metadata')
steps:
- uses: actions/checkout@v6
@@ -36,7 +38,7 @@ jobs:
run: ./scripts/lint
build:
- if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
+ if: (github.event_name == 'push' || github.event.pull_request.head.repo.fork) && (github.event_name != 'push' || github.event.head_commit.message != 'codegen metadata')
timeout-minutes: 10
name: build
permissions:
diff --git a/.gitignore b/.gitignore
index 95ceb18..3824f4c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,5 @@
.prism.log
+.stdy.log
_dev
__pycache__
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 980ea05..2aca35a 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.4.2"
+ ".": "0.5.0"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index d30019d..ea4794f 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 22
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/parallel-web%2Fparallel-sdk-970b780e86490322cc3c7e2b57f140ca6766a3d9f6e0d3402837ebaf7c2183fc.yml
-openapi_spec_hash: 34f784ce2dec796048e6780924bae08f
-config_hash: a398d153133d8884bed4e5256a0ae818
+configured_endpoints: 24
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/parallel-web%2Fparallel-sdk-57e1c56be0942c131ab5f24d8620de166d0721ef7f3423532abc7027e5a989e7.yml
+openapi_spec_hash: e61f831e30d19590eb3138a1b1709d1d
+config_hash: 80e7ee7ad8e3424616aca7189ffd5ae7
diff --git a/CHANGELOG.md b/CHANGELOG.md
index a52aa9d..4848d83 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,54 @@
# Changelog
+## 0.5.0 (2026-04-21)
+
+Full Changelog: [v0.4.2...v0.5.0](https://github.com/parallel-web/parallel-sdk-python/compare/v0.4.2...v0.5.0)
+
+### Features
+
+* **api:** Add Findall Candidates ([57c4ae2](https://github.com/parallel-web/parallel-sdk-python/commit/57c4ae25d77a8627c6be3673312bfd7f373e17da))
+* **api:** Add Search and Extract v1 and associated types ([ea487f3](https://github.com/parallel-web/parallel-sdk-python/commit/ea487f32b73aee955f662d1fa225841421ce1ba3))
+* **api:** manual - add AdvancedSearchSettings and AdvancedExtractSettings models ([5836a6f](https://github.com/parallel-web/parallel-sdk-python/commit/5836a6fbe8db3a3509556442067f087918edb2bb))
+* **api:** manual updates - update openapi spec ([02db6c0](https://github.com/parallel-web/parallel-sdk-python/commit/02db6c07ec5eb254b18732fcb6f7dc43e31de471))
+* **api:** Remove full_content from OpenAPI Spec ([7a4d651](https://github.com/parallel-web/parallel-sdk-python/commit/7a4d651c3e9f35334175f82daaf6392e9f76dee5))
+* **api:** Search/Extract v1 with advanced_settings and max_results ([4ded29c](https://github.com/parallel-web/parallel-sdk-python/commit/4ded29c2382594f1735101753a0b09a2f7c6972e))
+* **api:** Update OpenAPI spec ([58f19f3](https://github.com/parallel-web/parallel-sdk-python/commit/58f19f38174fb71dc906049c0aef6610ac67971e))
+* **api:** Update OpenAPI spec ([fae95f4](https://github.com/parallel-web/parallel-sdk-python/commit/fae95f4f2c7cb60ebc0babc4fe540617e3334b2d))
+* **internal:** implement indices array format for query and form serialization ([3df5972](https://github.com/parallel-web/parallel-sdk-python/commit/3df5972e34c9aa1709eabc4eb5b8cbbc0adccae2))
+
+
+### Bug Fixes
+
+* **client:** preserve hardcoded query params when merging with user params ([08080bc](https://github.com/parallel-web/parallel-sdk-python/commit/08080bc22c415881cc9f9b05bc22f09ab83c7e8d))
+* **deps:** bump minimum typing-extensions version ([964a46d](https://github.com/parallel-web/parallel-sdk-python/commit/964a46ddfc9ead64e4105e42192a780bc91716b0))
+* ensure file data are only sent as 1 parameter ([1c15cc0](https://github.com/parallel-web/parallel-sdk-python/commit/1c15cc00b1ae223db8e51893ce23b9a2193e3ae7))
+* **pydantic:** do not pass `by_alias` unless set ([f0793c1](https://github.com/parallel-web/parallel-sdk-python/commit/f0793c171465dd57d0fbf82a3bb2281d046f500e))
+* sanitize endpoint path params ([5931597](https://github.com/parallel-web/parallel-sdk-python/commit/59315972d27246485be5cb52671aecaa3aa46253))
+
+
+### Performance Improvements
+
+* **client:** optimize file structure copying in multipart requests ([00e8564](https://github.com/parallel-web/parallel-sdk-python/commit/00e856464a2828693483a704de83ee5d6c4fe19e))
+
+
+### Chores
+
+* **ci:** skip lint on metadata-only changes ([403448c](https://github.com/parallel-web/parallel-sdk-python/commit/403448c7760e70fe0f4b3998a20f048910e91cd6))
+* **internal:** tweak CI branches ([014c802](https://github.com/parallel-web/parallel-sdk-python/commit/014c80287318df0db6207df1579be00c4717f24d))
+* **internal:** update gitignore ([1f4f6b0](https://github.com/parallel-web/parallel-sdk-python/commit/1f4f6b0e5d2a46e1a5457879e937ea5aa551073c))
+* **tests:** bump steady to v0.19.4 ([ebee2e7](https://github.com/parallel-web/parallel-sdk-python/commit/ebee2e761e2a8587cc6aa4c2decfd6310092b039))
+* **tests:** bump steady to v0.19.5 ([2774099](https://github.com/parallel-web/parallel-sdk-python/commit/2774099f753bc0826e9c6b6e9fbb40d4e72e3405))
+* **tests:** bump steady to v0.19.6 ([8e3ee3d](https://github.com/parallel-web/parallel-sdk-python/commit/8e3ee3d04dc149b2bcedb0e4acd92474fafd8d05))
+* **tests:** bump steady to v0.19.7 ([4bcf12e](https://github.com/parallel-web/parallel-sdk-python/commit/4bcf12e670b7997e23ade3d991711fe1ef741e35))
+* **tests:** bump steady to v0.20.1 ([d82ce60](https://github.com/parallel-web/parallel-sdk-python/commit/d82ce601c5687553b0e96990e418289fd8a14e00))
+* **tests:** bump steady to v0.20.2 ([746ca39](https://github.com/parallel-web/parallel-sdk-python/commit/746ca39c749f899dc9137a2f9be5de9aa39210c6))
+* **tests:** bump steady to v0.22.1 ([dec81af](https://github.com/parallel-web/parallel-sdk-python/commit/dec81afb89f46850635daa89e64debe15717d053))
+
+
+### Refactors
+
+* **tests:** switch from prism to steady ([032745e](https://github.com/parallel-web/parallel-sdk-python/commit/032745ea1a03b3d2516b789a28a3c8b8034660d8))
+
## 0.4.2 (2026-03-09)
Full Changelog: [v0.4.1...v0.4.2](https://github.com/parallel-web/parallel-sdk-python/compare/v0.4.1...v0.4.2)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 3276e79..1ecb266 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -85,7 +85,7 @@ $ pip install ./path-to-wheel-file.whl
## Running tests
-Most tests require you to [set up a mock server](https://github.com/stoplightio/prism) against the OpenAPI spec to run the tests.
+Most tests require you to [set up a mock server](https://github.com/dgellow/steady) against the OpenAPI spec to run the tests.
```sh
$ ./scripts/mock
diff --git a/api.md b/api.md
index 66e3651..6462851 100644
--- a/api.md
+++ b/api.md
@@ -4,6 +4,30 @@
from parallel.types import ErrorObject, ErrorResponse, SourcePolicy, Warning
```
+# Parallel
+
+Types:
+
+```python
+from parallel.types import (
+ AdvancedExtractSettings,
+ AdvancedSearchSettings,
+ ExcerptSettings,
+ ExtractError,
+ ExtractResponse,
+ ExtractResult,
+ FetchPolicy,
+ SearchResult,
+ UsageItem,
+ WebSearchResult,
+)
+```
+
+Methods:
+
+- client.extract(\*\*params) -> ExtractResponse
+- client.search(\*\*params) -> SearchResult
+
# TaskRun
Types:
diff --git a/pyproject.toml b/pyproject.toml
index 12646c4..b55996c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "parallel-web"
-version = "0.4.2"
+version = "0.5.0"
description = "The official Python library for the Parallel API"
dynamic = ["readme"]
license = "MIT"
@@ -11,7 +11,7 @@ authors = [
dependencies = [
"httpx>=0.23.0, <1",
"pydantic>=1.9.0, <3",
- "typing-extensions>=4.10, <5",
+ "typing-extensions>=4.14, <5",
"anyio>=3.5.0, <5",
"distro>=1.7.0, <2",
"sniffio",
diff --git a/scripts/mock b/scripts/mock
index bcf3b39..feebe5e 100755
--- a/scripts/mock
+++ b/scripts/mock
@@ -19,34 +19,34 @@ fi
echo "==> Starting mock server with URL ${URL}"
-# Run prism mock on the given spec
+# Run steady mock on the given spec
if [ "$1" == "--daemon" ]; then
# Pre-install the package so the download doesn't eat into the startup timeout
- npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism --version
+ npm exec --package=@stdy/cli@0.22.1 -- steady --version
- npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL" &> .prism.log &
+ npm exec --package=@stdy/cli@0.22.1 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL" &> .stdy.log &
- # Wait for server to come online (max 30s)
+ # Wait for server to come online via health endpoint (max 30s)
echo -n "Waiting for server"
attempts=0
- while ! grep -q "✖ fatal\|Prism is listening" ".prism.log" ; do
+ while ! curl --silent --fail "http://127.0.0.1:4010/_x-steady/health" >/dev/null 2>&1; do
+ if ! kill -0 $! 2>/dev/null; then
+ echo
+ cat .stdy.log
+ exit 1
+ fi
attempts=$((attempts + 1))
if [ "$attempts" -ge 300 ]; then
echo
- echo "Timed out waiting for Prism server to start"
- cat .prism.log
+ echo "Timed out waiting for Steady server to start"
+ cat .stdy.log
exit 1
fi
echo -n "."
sleep 0.1
done
- if grep -q "✖ fatal" ".prism.log"; then
- cat .prism.log
- exit 1
- fi
-
echo
else
- npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL"
+ npm exec --package=@stdy/cli@0.22.1 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL"
fi
diff --git a/scripts/test b/scripts/test
index dbeda2d..19acc91 100755
--- a/scripts/test
+++ b/scripts/test
@@ -9,8 +9,8 @@ GREEN='\033[0;32m'
YELLOW='\033[0;33m'
NC='\033[0m' # No Color
-function prism_is_running() {
- curl --silent "http://localhost:4010" >/dev/null 2>&1
+function steady_is_running() {
+ curl --silent "http://127.0.0.1:4010/_x-steady/health" >/dev/null 2>&1
}
kill_server_on_port() {
@@ -25,7 +25,7 @@ function is_overriding_api_base_url() {
[ -n "$TEST_API_BASE_URL" ]
}
-if ! is_overriding_api_base_url && ! prism_is_running ; then
+if ! is_overriding_api_base_url && ! steady_is_running ; then
# When we exit this script, make sure to kill the background mock server process
trap 'kill_server_on_port 4010' EXIT
@@ -36,19 +36,19 @@ fi
if is_overriding_api_base_url ; then
echo -e "${GREEN}✔ Running tests against ${TEST_API_BASE_URL}${NC}"
echo
-elif ! prism_is_running ; then
- echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Prism server"
+elif ! steady_is_running ; then
+ echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Steady server"
echo -e "running against your OpenAPI spec."
echo
echo -e "To run the server, pass in the path or url of your OpenAPI"
- echo -e "spec to the prism command:"
+ echo -e "spec to the steady command:"
echo
- echo -e " \$ ${YELLOW}npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock path/to/your.openapi.yml${NC}"
+ echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.22.1 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets${NC}"
echo
exit 1
else
- echo -e "${GREEN}✔ Mock prism server is running with your OpenAPI spec${NC}"
+ echo -e "${GREEN}✔ Mock steady server is running with your OpenAPI spec${NC}"
echo
fi
diff --git a/src/parallel/_base_client.py b/src/parallel/_base_client.py
index 5128667..b283b92 100644
--- a/src/parallel/_base_client.py
+++ b/src/parallel/_base_client.py
@@ -540,6 +540,10 @@ def _build_request(
files = cast(HttpxRequestFiles, ForceMultipartDict())
prepared_url = self._prepare_url(options.url)
+ # preserve hard-coded query params from the url
+ if params and prepared_url.query:
+ params = {**dict(prepared_url.params.items()), **params}
+ prepared_url = prepared_url.copy_with(raw_path=prepared_url.raw_path.split(b"?", 1)[0])
if "_" in prepared_url.host:
# work around https://github.com/encode/httpx/discussions/2880
kwargs["extensions"] = {"sni_hostname": prepared_url.host.replace("_", "-")}
diff --git a/src/parallel/_client.py b/src/parallel/_client.py
index cf3f898..74f06c7 100644
--- a/src/parallel/_client.py
+++ b/src/parallel/_client.py
@@ -3,32 +3,54 @@
from __future__ import annotations
import os
-from typing import TYPE_CHECKING, Any, Mapping
-from typing_extensions import Self, override
+from typing import TYPE_CHECKING, Any, Mapping, Optional
+from typing_extensions import Self, Literal, override
import httpx
from . import _exceptions
from ._qs import Querystring
+from .types import client_search_params, client_extract_params
from ._types import (
+ Body,
Omit,
+ Query,
+ Headers,
Timeout,
NotGiven,
Transport,
ProxiesTypes,
RequestOptions,
+ SequenceNotStr,
+ omit,
not_given,
)
-from ._utils import is_given, get_async_library
+from ._utils import (
+ is_given,
+ maybe_transform,
+ get_async_library,
+ async_maybe_transform,
+)
from ._compat import cached_property
from ._version import __version__
+from ._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
from ._streaming import Stream as Stream, AsyncStream as AsyncStream
from ._exceptions import ParallelError, APIStatusError
from ._base_client import (
DEFAULT_MAX_RETRIES,
SyncAPIClient,
AsyncAPIClient,
+ make_request_options,
)
+from .types.search_result import SearchResult
+from .types.extract_response import ExtractResponse
+from .types.advanced_search_settings_param import AdvancedSearchSettingsParam
+from .types.advanced_extract_settings_param import AdvancedExtractSettingsParam
if TYPE_CHECKING:
from .resources import beta, task_run
@@ -108,6 +130,13 @@ def task_run(self) -> TaskRunResource:
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
from .resources.task_run import TaskRunResource
@@ -198,6 +227,161 @@ def copy(
# client.with_options(timeout=10).foo.create(...)
with_options = copy
+ def extract(
+ self,
+ *,
+ urls: SequenceNotStr[str],
+ advanced_settings: Optional[AdvancedExtractSettingsParam] | Omit = omit,
+ client_model: Optional[str] | Omit = omit,
+ max_chars_total: Optional[int] | Omit = omit,
+ objective: Optional[str] | Omit = omit,
+ search_queries: Optional[SequenceNotStr[str]] | Omit = omit,
+ session_id: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ExtractResponse:
+ """
+ Extracts relevant content from specific web URLs.
+
+ The legacy Extract API reference is available
+ [here](https://docs.parallel.ai/api-reference/legacy/extract-beta/extract).
+
+ Args:
+ urls: URLs to extract content from. Up to 20 URLs.
+
+ advanced_settings: Advanced extract configuration.
+
+ These settings may impact result quality and latency unless used carefully. See
+ https://docs.parallel.ai/search/advanced-extract-settings for more info.
+
+ client_model: The model generating this request and consuming the results. Enables
+ optimizations and tailors default settings for the model's capabilities.
+
+ max_chars_total: Upper bound on total characters across excerpts from all extracted results.
+
+ objective: As in SearchRequest, a natural-language description of the underlying question
+ or goal driving the request. Used together with search_queries to focus excerpts
+ on the most relevant content.
+
+ search_queries: Optional keyword search queries, as in SearchRequest. Used together with
+ objective to focus excerpts on the most relevant content.
+
+ session_id: Session identifier to track calls across separate search and extract calls, to
+ be used as part of a larger task. Specifying it may give better contextual
+ results for subsequent API calls.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self.post(
+ "/v1/extract",
+ body=maybe_transform(
+ {
+ "urls": urls,
+ "advanced_settings": advanced_settings,
+ "client_model": client_model,
+ "max_chars_total": max_chars_total,
+ "objective": objective,
+ "search_queries": search_queries,
+ "session_id": session_id,
+ },
+ client_extract_params.ClientExtractParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ExtractResponse,
+ )
+
+ def search(
+ self,
+ *,
+ search_queries: SequenceNotStr[str],
+ advanced_settings: Optional[AdvancedSearchSettingsParam] | Omit = omit,
+ client_model: Optional[str] | Omit = omit,
+ max_chars_total: Optional[int] | Omit = omit,
+ mode: Optional[Literal["basic", "advanced"]] | Omit = omit,
+ objective: Optional[str] | Omit = omit,
+ session_id: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SearchResult:
+ """
+ Searches the web.
+
+ The legacy Search API reference is available
+ [here](https://docs.parallel.ai/api-reference/legacy/search-beta/search).
+
+ Args:
+ search_queries: Concise keyword search queries, 3-6 words each. At least one query is required,
+ provide 2-3 for best results. Used together with objective to focus results on
+ the most relevant content.
+
+ advanced_settings: Advanced search configuration.
+
+ These settings may impact result quality and latency unless used carefully. See
+ https://docs.parallel.ai/search/advanced-search-settings for more info.
+
+ client_model: The model generating this request and consuming the results. Enables
+ optimizations and tailors default settings for the model's capabilities.
+
+ max_chars_total: Upper bound on total characters across excerpts from all results.
+
+ mode: Search mode preset: supported values are `basic` and `advanced`. Basic mode
+ offers the lowest latency and works best with 2-3 high-quality search_queries.
+ Advanced mode provides higher quality with more advanced retrieval and
+ compression. Defaults to `advanced` when omitted.
+
+ objective: Natural-language description of the underlying question or goal driving the
+ search. Used together with search_queries to focus results on the most relevant
+ content. Should be self-contained with enough context to understand the intent
+ of the search.
+
+ session_id: Session identifier to track calls across separate search and extract calls, to
+ be used as part of a larger task. Specifying it may give better contextual
+ results for subsequent API calls.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self.post(
+ "/v1/search",
+ body=maybe_transform(
+ {
+ "search_queries": search_queries,
+ "advanced_settings": advanced_settings,
+ "client_model": client_model,
+ "max_chars_total": max_chars_total,
+ "mode": mode,
+ "objective": objective,
+ "session_id": session_id,
+ },
+ client_search_params.ClientSearchParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=SearchResult,
+ )
+
@override
def _make_status_error(
self,
@@ -293,6 +477,13 @@ def task_run(self) -> AsyncTaskRunResource:
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
from .resources.task_run import AsyncTaskRunResource
@@ -383,6 +574,161 @@ def copy(
# client.with_options(timeout=10).foo.create(...)
with_options = copy
+ async def extract(
+ self,
+ *,
+ urls: SequenceNotStr[str],
+ advanced_settings: Optional[AdvancedExtractSettingsParam] | Omit = omit,
+ client_model: Optional[str] | Omit = omit,
+ max_chars_total: Optional[int] | Omit = omit,
+ objective: Optional[str] | Omit = omit,
+ search_queries: Optional[SequenceNotStr[str]] | Omit = omit,
+ session_id: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ExtractResponse:
+ """
+ Extracts relevant content from specific web URLs.
+
+ The legacy Extract API reference is available
+ [here](https://docs.parallel.ai/api-reference/legacy/extract-beta/extract).
+
+ Args:
+ urls: URLs to extract content from. Up to 20 URLs.
+
+ advanced_settings: Advanced extract configuration.
+
+ These settings may impact result quality and latency unless used carefully. See
+ https://docs.parallel.ai/search/advanced-extract-settings for more info.
+
+ client_model: The model generating this request and consuming the results. Enables
+ optimizations and tailors default settings for the model's capabilities.
+
+ max_chars_total: Upper bound on total characters across excerpts from all extracted results.
+
+ objective: As in SearchRequest, a natural-language description of the underlying question
+ or goal driving the request. Used together with search_queries to focus excerpts
+ on the most relevant content.
+
+ search_queries: Optional keyword search queries, as in SearchRequest. Used together with
+ objective to focus excerpts on the most relevant content.
+
+ session_id: Session identifier to track calls across separate search and extract calls, to
+ be used as part of a larger task. Specifying it may give better contextual
+ results for subsequent API calls.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self.post(
+ "/v1/extract",
+ body=await async_maybe_transform(
+ {
+ "urls": urls,
+ "advanced_settings": advanced_settings,
+ "client_model": client_model,
+ "max_chars_total": max_chars_total,
+ "objective": objective,
+ "search_queries": search_queries,
+ "session_id": session_id,
+ },
+ client_extract_params.ClientExtractParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ExtractResponse,
+ )
+
+ async def search(
+ self,
+ *,
+ search_queries: SequenceNotStr[str],
+ advanced_settings: Optional[AdvancedSearchSettingsParam] | Omit = omit,
+ client_model: Optional[str] | Omit = omit,
+ max_chars_total: Optional[int] | Omit = omit,
+ mode: Optional[Literal["basic", "advanced"]] | Omit = omit,
+ objective: Optional[str] | Omit = omit,
+ session_id: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SearchResult:
+ """
+ Searches the web.
+
+ The legacy Search API reference is available
+ [here](https://docs.parallel.ai/api-reference/legacy/search-beta/search).
+
+ Args:
+ search_queries: Concise keyword search queries, 3-6 words each. At least one query is required,
+ provide 2-3 for best results. Used together with objective to focus results on
+ the most relevant content.
+
+ advanced_settings: Advanced search configuration.
+
+ These settings may impact result quality and latency unless used carefully. See
+ https://docs.parallel.ai/search/advanced-search-settings for more info.
+
+ client_model: The model generating this request and consuming the results. Enables
+ optimizations and tailors default settings for the model's capabilities.
+
+ max_chars_total: Upper bound on total characters across excerpts from all results.
+
+ mode: Search mode preset: supported values are `basic` and `advanced`. Basic mode
+ offers the lowest latency and works best with 2-3 high-quality search_queries.
+ Advanced mode provides higher quality with more advanced retrieval and
+ compression. Defaults to `advanced` when omitted.
+
+ objective: Natural-language description of the underlying question or goal driving the
+ search. Used together with search_queries to focus results on the most relevant
+ content. Should be self-contained with enough context to understand the intent
+ of the search.
+
+ session_id: Session identifier to track calls across separate search and extract calls, to
+ be used as part of a larger task. Specifying it may give better contextual
+ results for subsequent API calls.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self.post(
+ "/v1/search",
+ body=await async_maybe_transform(
+ {
+ "search_queries": search_queries,
+ "advanced_settings": advanced_settings,
+ "client_model": client_model,
+ "max_chars_total": max_chars_total,
+ "mode": mode,
+ "objective": objective,
+ "session_id": session_id,
+ },
+ client_search_params.ClientSearchParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=SearchResult,
+ )
+
@override
def _make_status_error(
self,
@@ -423,12 +769,26 @@ class ParallelWithRawResponse:
def __init__(self, client: Parallel) -> None:
self._client = client
+ self.extract = to_raw_response_wrapper(
+ client.extract,
+ )
+ self.search = to_raw_response_wrapper(
+ client.search,
+ )
+
@cached_property
def task_run(self) -> task_run.TaskRunResourceWithRawResponse:
"""The Task API executes web research and extraction tasks.
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
from .resources.task_run import TaskRunResourceWithRawResponse
@@ -447,12 +807,26 @@ class AsyncParallelWithRawResponse:
def __init__(self, client: AsyncParallel) -> None:
self._client = client
+ self.extract = async_to_raw_response_wrapper(
+ client.extract,
+ )
+ self.search = async_to_raw_response_wrapper(
+ client.search,
+ )
+
@cached_property
def task_run(self) -> task_run.AsyncTaskRunResourceWithRawResponse:
"""The Task API executes web research and extraction tasks.
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
from .resources.task_run import AsyncTaskRunResourceWithRawResponse
@@ -471,12 +845,26 @@ class ParallelWithStreamedResponse:
def __init__(self, client: Parallel) -> None:
self._client = client
+ self.extract = to_streamed_response_wrapper(
+ client.extract,
+ )
+ self.search = to_streamed_response_wrapper(
+ client.search,
+ )
+
@cached_property
def task_run(self) -> task_run.TaskRunResourceWithStreamingResponse:
"""The Task API executes web research and extraction tasks.
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
from .resources.task_run import TaskRunResourceWithStreamingResponse
@@ -495,12 +883,26 @@ class AsyncParallelWithStreamedResponse:
def __init__(self, client: AsyncParallel) -> None:
self._client = client
+ self.extract = async_to_streamed_response_wrapper(
+ client.extract,
+ )
+ self.search = async_to_streamed_response_wrapper(
+ client.search,
+ )
+
@cached_property
def task_run(self) -> task_run.AsyncTaskRunResourceWithStreamingResponse:
"""The Task API executes web research and extraction tasks.
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
from .resources.task_run import AsyncTaskRunResourceWithStreamingResponse
diff --git a/src/parallel/_compat.py b/src/parallel/_compat.py
index 020ffeb..340c91a 100644
--- a/src/parallel/_compat.py
+++ b/src/parallel/_compat.py
@@ -2,7 +2,7 @@
from typing import TYPE_CHECKING, Any, Union, Generic, TypeVar, Callable, cast, overload
from datetime import date, datetime
-from typing_extensions import Self, Literal
+from typing_extensions import Self, Literal, TypedDict
import pydantic
from pydantic.fields import FieldInfo
@@ -131,6 +131,10 @@ def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str:
return model.model_dump_json(indent=indent)
+class _ModelDumpKwargs(TypedDict, total=False):
+ by_alias: bool
+
+
def model_dump(
model: pydantic.BaseModel,
*,
@@ -142,6 +146,9 @@ def model_dump(
by_alias: bool | None = None,
) -> dict[str, Any]:
if (not PYDANTIC_V1) or hasattr(model, "model_dump"):
+ kwargs: _ModelDumpKwargs = {}
+ if by_alias is not None:
+ kwargs["by_alias"] = by_alias
return model.model_dump(
mode=mode,
exclude=exclude,
@@ -149,7 +156,7 @@ def model_dump(
exclude_defaults=exclude_defaults,
# warnings are not supported in Pydantic v1
warnings=True if PYDANTIC_V1 else warnings,
- by_alias=by_alias,
+ **kwargs,
)
return cast(
"dict[str, Any]",
diff --git a/src/parallel/_files.py b/src/parallel/_files.py
index cc14c14..0fdce17 100644
--- a/src/parallel/_files.py
+++ b/src/parallel/_files.py
@@ -3,8 +3,8 @@
import io
import os
import pathlib
-from typing import overload
-from typing_extensions import TypeGuard
+from typing import Sequence, cast, overload
+from typing_extensions import TypeVar, TypeGuard
import anyio
@@ -17,7 +17,9 @@
HttpxFileContent,
HttpxRequestFiles,
)
-from ._utils import is_tuple_t, is_mapping_t, is_sequence_t
+from ._utils import is_list, is_mapping, is_tuple_t, is_mapping_t, is_sequence_t
+
+_T = TypeVar("_T")
def is_base64_file_input(obj: object) -> TypeGuard[Base64FileInput]:
@@ -121,3 +123,51 @@ async def async_read_file_content(file: FileContent) -> HttpxFileContent:
return await anyio.Path(file).read_bytes()
return file
+
+
+def deepcopy_with_paths(item: _T, paths: Sequence[Sequence[str]]) -> _T:
+ """Copy only the containers along the given paths.
+
+ Used to guard against mutation by extract_files without copying the entire structure.
+ Only dicts and lists that lie on a path are copied; everything else
+ is returned by reference.
+
+ For example, given paths=[["foo", "files", "file"]] and the structure:
+ {
+ "foo": {
+ "bar": {"baz": {}},
+ "files": {"file": }
+ }
+ }
+ The root dict, "foo", and "files" are copied (they lie on the path).
+ "bar" and "baz" are returned by reference (off the path).
+ """
+ return _deepcopy_with_paths(item, paths, 0)
+
+
+def _deepcopy_with_paths(item: _T, paths: Sequence[Sequence[str]], index: int) -> _T:
+ if not paths:
+ return item
+ if is_mapping(item):
+ key_to_paths: dict[str, list[Sequence[str]]] = {}
+ for path in paths:
+ if index < len(path):
+ key_to_paths.setdefault(path[index], []).append(path)
+
+ # if no path continues through this mapping, it won't be mutated and copying it is redundant
+ if not key_to_paths:
+ return item
+
+ result = dict(item)
+ for key, subpaths in key_to_paths.items():
+ if key in result:
+ result[key] = _deepcopy_with_paths(result[key], subpaths, index + 1)
+ return cast(_T, result)
+ if is_list(item):
+ array_paths = [path for path in paths if index < len(path) and path[index] == ""]
+
+ # if no path expects a list here, nothing will be mutated inside it - return by reference
+ if not array_paths:
+ return cast(_T, item)
+ return cast(_T, [_deepcopy_with_paths(entry, array_paths, index + 1) for entry in item])
+ return item
diff --git a/src/parallel/_qs.py b/src/parallel/_qs.py
index ada6fd3..de8c99b 100644
--- a/src/parallel/_qs.py
+++ b/src/parallel/_qs.py
@@ -101,7 +101,10 @@ def _stringify_item(
items.extend(self._stringify_item(key, item, opts))
return items
elif array_format == "indices":
- raise NotImplementedError("The array indices format is not supported yet")
+ items = []
+ for i, item in enumerate(value):
+ items.extend(self._stringify_item(f"{key}[{i}]", item, opts))
+ return items
elif array_format == "brackets":
items = []
key = key + "[]"
diff --git a/src/parallel/_utils/__init__.py b/src/parallel/_utils/__init__.py
index b70d7b2..3d1284f 100644
--- a/src/parallel/_utils/__init__.py
+++ b/src/parallel/_utils/__init__.py
@@ -1,3 +1,4 @@
+from ._path import path_template as path_template
from ._sync import asyncify as asyncify
from ._proxy import LazyProxy as LazyProxy
from ._utils import (
@@ -24,7 +25,6 @@
coerce_integer as coerce_integer,
file_from_path as file_from_path,
strip_not_given as strip_not_given,
- deepcopy_minimal as deepcopy_minimal,
get_async_library as get_async_library,
maybe_coerce_float as maybe_coerce_float,
get_required_header as get_required_header,
diff --git a/src/parallel/_utils/_path.py b/src/parallel/_utils/_path.py
new file mode 100644
index 0000000..4d6e1e4
--- /dev/null
+++ b/src/parallel/_utils/_path.py
@@ -0,0 +1,127 @@
+from __future__ import annotations
+
+import re
+from typing import (
+ Any,
+ Mapping,
+ Callable,
+)
+from urllib.parse import quote
+
+# Matches '.' or '..' where each dot is either literal or percent-encoded (%2e / %2E).
+_DOT_SEGMENT_RE = re.compile(r"^(?:\.|%2[eE]){1,2}$")
+
+_PLACEHOLDER_RE = re.compile(r"\{(\w+)\}")
+
+
+def _quote_path_segment_part(value: str) -> str:
+ """Percent-encode `value` for use in a URI path segment.
+
+ Considers characters not in `pchar` set from RFC 3986 §3.3 to be unsafe.
+ https://datatracker.ietf.org/doc/html/rfc3986#section-3.3
+ """
+ # quote() already treats unreserved characters (letters, digits, and -._~)
+ # as safe, so we only need to add sub-delims, ':', and '@'.
+ # Notably, unlike the default `safe` for quote(), / is unsafe and must be quoted.
+ return quote(value, safe="!$&'()*+,;=:@")
+
+
+def _quote_query_part(value: str) -> str:
+ """Percent-encode `value` for use in a URI query string.
+
+ Considers &, = and characters not in `query` set from RFC 3986 §3.4 to be unsafe.
+ https://datatracker.ietf.org/doc/html/rfc3986#section-3.4
+ """
+ return quote(value, safe="!$'()*+,;:@/?")
+
+
+def _quote_fragment_part(value: str) -> str:
+ """Percent-encode `value` for use in a URI fragment.
+
+ Considers characters not in `fragment` set from RFC 3986 §3.5 to be unsafe.
+ https://datatracker.ietf.org/doc/html/rfc3986#section-3.5
+ """
+ return quote(value, safe="!$&'()*+,;=:@/?")
+
+
+def _interpolate(
+ template: str,
+ values: Mapping[str, Any],
+ quoter: Callable[[str], str],
+) -> str:
+ """Replace {name} placeholders in `template`, quoting each value with `quoter`.
+
+ Placeholder names are looked up in `values`.
+
+ Raises:
+ KeyError: If a placeholder is not found in `values`.
+ """
+ # re.split with a capturing group returns alternating
+ # [text, name, text, name, ..., text] elements.
+ parts = _PLACEHOLDER_RE.split(template)
+
+ for i in range(1, len(parts), 2):
+ name = parts[i]
+ if name not in values:
+ raise KeyError(f"a value for placeholder {{{name}}} was not provided")
+ val = values[name]
+ if val is None:
+ parts[i] = "null"
+ elif isinstance(val, bool):
+ parts[i] = "true" if val else "false"
+ else:
+ parts[i] = quoter(str(values[name]))
+
+ return "".join(parts)
+
+
+def path_template(template: str, /, **kwargs: Any) -> str:
+ """Interpolate {name} placeholders in `template` from keyword arguments.
+
+ Args:
+ template: The template string containing {name} placeholders.
+ **kwargs: Keyword arguments to interpolate into the template.
+
+ Returns:
+ The template with placeholders interpolated and percent-encoded.
+
+ Safe characters for percent-encoding are dependent on the URI component.
+ Placeholders in path and fragment portions are percent-encoded where the `segment`
+ and `fragment` sets from RFC 3986 respectively are considered safe.
+ Placeholders in the query portion are percent-encoded where the `query` set from
+ RFC 3986 §3.3 is considered safe except for = and & characters.
+
+ Raises:
+ KeyError: If a placeholder is not found in `kwargs`.
+ ValueError: If resulting path contains /./ or /../ segments (including percent-encoded dot-segments).
+ """
+ # Split the template into path, query, and fragment portions.
+ fragment_template: str | None = None
+ query_template: str | None = None
+
+ rest = template
+ if "#" in rest:
+ rest, fragment_template = rest.split("#", 1)
+ if "?" in rest:
+ rest, query_template = rest.split("?", 1)
+ path_template = rest
+
+ # Interpolate each portion with the appropriate quoting rules.
+ path_result = _interpolate(path_template, kwargs, _quote_path_segment_part)
+
+ # Reject dot-segments (. and ..) in the final assembled path. The check
+ # runs after interpolation so that adjacent placeholders or a mix of static
+ # text and placeholders that together form a dot-segment are caught.
+ # Also reject percent-encoded dot-segments to protect against incorrectly
+ # implemented normalization in servers/proxies.
+ for segment in path_result.split("/"):
+ if _DOT_SEGMENT_RE.match(segment):
+ raise ValueError(f"Constructed path {path_result!r} contains dot-segment {segment!r} which is not allowed")
+
+ result = path_result
+ if query_template is not None:
+ result += "?" + _interpolate(query_template, kwargs, _quote_query_part)
+ if fragment_template is not None:
+ result += "#" + _interpolate(fragment_template, kwargs, _quote_fragment_part)
+
+ return result
diff --git a/src/parallel/_utils/_utils.py b/src/parallel/_utils/_utils.py
index ece4d87..463155d 100644
--- a/src/parallel/_utils/_utils.py
+++ b/src/parallel/_utils/_utils.py
@@ -86,8 +86,9 @@ def _extract_items(
index += 1
if is_dict(obj):
try:
- # We are at the last entry in the path so we must remove the field
- if (len(path)) == index:
+ # Remove the field if there are no more dict keys in the path,
+ # only "" traversal markers or end.
+ if all(p == "" for p in path[index:]):
item = obj.pop(key)
else:
item = obj[key]
@@ -180,21 +181,6 @@ def is_iterable(obj: object) -> TypeGuard[Iterable[object]]:
return isinstance(obj, Iterable)
-def deepcopy_minimal(item: _T) -> _T:
- """Minimal reimplementation of copy.deepcopy() that will only copy certain object types:
-
- - mappings, e.g. `dict`
- - list
-
- This is done for performance reasons.
- """
- if is_mapping(item):
- return cast(_T, {k: deepcopy_minimal(v) for k, v in item.items()})
- if is_list(item):
- return cast(_T, [deepcopy_minimal(entry) for entry in item])
- return item
-
-
# copied from https://github.com/Rapptz/RoboDanny
def human_join(seq: Sequence[str], *, delim: str = ", ", final: str = "or") -> str:
size = len(seq)
diff --git a/src/parallel/_version.py b/src/parallel/_version.py
index 1b80dfe..e3da38f 100644
--- a/src/parallel/_version.py
+++ b/src/parallel/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "parallel"
-__version__ = "0.4.2" # x-release-please-version
+__version__ = "0.5.0" # x-release-please-version
diff --git a/src/parallel/resources/beta/api.md b/src/parallel/resources/beta/api.md
index 0b56fe7..3ad7b56 100644
--- a/src/parallel/resources/beta/api.md
+++ b/src/parallel/resources/beta/api.md
@@ -5,13 +5,13 @@ Types:
```python
from parallel.types.beta import (
ExcerptSettings,
- ExtractError,
ExtractResponse,
ExtractResult,
- FetchPolicy,
SearchResult,
- UsageItem,
WebSearchResult,
+ ExtractError,
+ FetchPolicy,
+ UsageItem,
)
```
@@ -73,6 +73,8 @@ Types:
```python
from parallel.types.beta import (
FindAllCandidateMatchStatusEvent,
+ FindAllCandidatesRequest,
+ FindAllCandidatesResponse,
FindAllEnrichInput,
FindAllExtendInput,
FindAllRun,
@@ -91,6 +93,7 @@ Methods:
- client.beta.findall.create(\*\*params) -> FindAllRun
- client.beta.findall.retrieve(findall_id) -> FindAllRun
- client.beta.findall.cancel(findall_id) -> object
+- client.beta.findall.candidates(\*\*params) -> FindAllCandidatesResponse
- client.beta.findall.enrich(findall_id, \*\*params) -> FindAllSchema
- client.beta.findall.events(findall_id, \*\*params) -> FindAllEventsResponse
- client.beta.findall.extend(findall_id, \*\*params) -> FindAllSchema
diff --git a/src/parallel/resources/beta/beta.py b/src/parallel/resources/beta/beta.py
index c2f4368..fa59196 100644
--- a/src/parallel/resources/beta/beta.py
+++ b/src/parallel/resources/beta/beta.py
@@ -45,8 +45,8 @@
from ...types.beta import beta_search_params, beta_extract_params
from ..._base_client import make_request_options
from ...types.beta.search_result import SearchResult
+from ...types.fetch_policy_param import FetchPolicyParam
from ...types.beta.extract_response import ExtractResponse
-from ...types.beta.fetch_policy_param import FetchPolicyParam
from ...types.beta.parallel_beta_param import ParallelBetaParam
from ...types.beta.excerpt_settings_param import ExcerptSettingsParam
from ...types.shared_params.source_policy import SourcePolicy
@@ -61,19 +61,29 @@ def task_run(self) -> TaskRunResource:
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
return TaskRunResource(self._client)
@cached_property
def task_group(self) -> TaskGroupResource:
- """
- The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling.
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
- Submit hundreds or thousands of Tasks as a single group
- Observe group progress and receive results as they complete
- Real-time updates via Server-Sent Events (SSE)
- Add tasks to an existing group while it is running
- Group-level retry and error aggregation
- Status: beta and subject to change.
"""
return TaskGroupResource(self._client)
@@ -112,6 +122,7 @@ def extract(
full_content: beta_extract_params.FullContent | Omit = omit,
objective: Optional[str] | Omit = omit,
search_queries: Optional[SequenceNotStr[str]] | Omit = omit,
+ session_id: Optional[str] | Omit = omit,
betas: List[ParallelBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -140,6 +151,10 @@ def extract(
search_queries: If provided, focuses extracted content on the specified keyword search queries.
+ session_id: Session identifier to track calls across separate search and extract calls, to
+ be used as part of a larger task. Specifying it may give better contextual
+ results for subsequent API calls.
+
betas: Optional header to specify the beta version(s) to enable.
extra_headers: Send extra headers
@@ -171,6 +186,7 @@ def extract(
"full_content": full_content,
"objective": objective,
"search_queries": search_queries,
+ "session_id": session_id,
},
beta_extract_params.BetaExtractParams,
),
@@ -185,12 +201,14 @@ def search(
*,
excerpts: ExcerptSettingsParam | Omit = omit,
fetch_policy: Optional[FetchPolicyParam] | Omit = omit,
+ location: Optional[str] | Omit = omit,
max_chars_per_result: Optional[int] | Omit = omit,
max_results: Optional[int] | Omit = omit,
mode: Optional[Literal["one-shot", "agentic", "fast"]] | Omit = omit,
objective: Optional[str] | Omit = omit,
processor: Optional[Literal["base", "pro"]] | Omit = omit,
search_queries: Optional[SequenceNotStr[str]] | Omit = omit,
+ session_id: Optional[str] | Omit = omit,
source_policy: Optional[SourcePolicy] | Omit = omit,
betas: List[ParallelBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -208,6 +226,8 @@ def search(
fetch_policy: Policy for live fetching web results.
+ location: ISO 3166-1 alpha-2 country code for geo-targeted search results.
+
max_chars_per_result: DEPRECATED: Use `excerpts.max_chars_per_result` instead.
max_results: Upper bound on the number of results to return. Defaults to 10 if not provided.
@@ -231,6 +251,10 @@ def search(
contain search operators. At least one of objective or search_queries must be
provided.
+ session_id: Session identifier to track calls across separate search and extract calls, to
+ be used as part of a larger task. Specifying it may give better contextual
+ results for subsequent API calls.
+
source_policy: Source policy for web search results.
This policy governs which sources are allowed/disallowed in results.
@@ -262,12 +286,14 @@ def search(
{
"excerpts": excerpts,
"fetch_policy": fetch_policy,
+ "location": location,
"max_chars_per_result": max_chars_per_result,
"max_results": max_results,
"mode": mode,
"objective": objective,
"processor": processor,
"search_queries": search_queries,
+ "session_id": session_id,
"source_policy": source_policy,
},
beta_search_params.BetaSearchParams,
@@ -286,19 +312,29 @@ def task_run(self) -> AsyncTaskRunResource:
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
return AsyncTaskRunResource(self._client)
@cached_property
def task_group(self) -> AsyncTaskGroupResource:
- """
- The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling.
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
- Submit hundreds or thousands of Tasks as a single group
- Observe group progress and receive results as they complete
- Real-time updates via Server-Sent Events (SSE)
- Add tasks to an existing group while it is running
- Group-level retry and error aggregation
- Status: beta and subject to change.
"""
return AsyncTaskGroupResource(self._client)
@@ -337,6 +373,7 @@ async def extract(
full_content: beta_extract_params.FullContent | Omit = omit,
objective: Optional[str] | Omit = omit,
search_queries: Optional[SequenceNotStr[str]] | Omit = omit,
+ session_id: Optional[str] | Omit = omit,
betas: List[ParallelBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -365,6 +402,10 @@ async def extract(
search_queries: If provided, focuses extracted content on the specified keyword search queries.
+ session_id: Session identifier to track calls across separate search and extract calls, to
+ be used as part of a larger task. Specifying it may give better contextual
+ results for subsequent API calls.
+
betas: Optional header to specify the beta version(s) to enable.
extra_headers: Send extra headers
@@ -396,6 +437,7 @@ async def extract(
"full_content": full_content,
"objective": objective,
"search_queries": search_queries,
+ "session_id": session_id,
},
beta_extract_params.BetaExtractParams,
),
@@ -410,12 +452,14 @@ async def search(
*,
excerpts: ExcerptSettingsParam | Omit = omit,
fetch_policy: Optional[FetchPolicyParam] | Omit = omit,
+ location: Optional[str] | Omit = omit,
max_chars_per_result: Optional[int] | Omit = omit,
max_results: Optional[int] | Omit = omit,
mode: Optional[Literal["one-shot", "agentic", "fast"]] | Omit = omit,
objective: Optional[str] | Omit = omit,
processor: Optional[Literal["base", "pro"]] | Omit = omit,
search_queries: Optional[SequenceNotStr[str]] | Omit = omit,
+ session_id: Optional[str] | Omit = omit,
source_policy: Optional[SourcePolicy] | Omit = omit,
betas: List[ParallelBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -433,6 +477,8 @@ async def search(
fetch_policy: Policy for live fetching web results.
+ location: ISO 3166-1 alpha-2 country code for geo-targeted search results.
+
max_chars_per_result: DEPRECATED: Use `excerpts.max_chars_per_result` instead.
max_results: Upper bound on the number of results to return. Defaults to 10 if not provided.
@@ -456,6 +502,10 @@ async def search(
contain search operators. At least one of objective or search_queries must be
provided.
+ session_id: Session identifier to track calls across separate search and extract calls, to
+ be used as part of a larger task. Specifying it may give better contextual
+ results for subsequent API calls.
+
source_policy: Source policy for web search results.
This policy governs which sources are allowed/disallowed in results.
@@ -487,12 +537,14 @@ async def search(
{
"excerpts": excerpts,
"fetch_policy": fetch_policy,
+ "location": location,
"max_chars_per_result": max_chars_per_result,
"max_results": max_results,
"mode": mode,
"objective": objective,
"processor": processor,
"search_queries": search_queries,
+ "session_id": session_id,
"source_policy": source_policy,
},
beta_search_params.BetaSearchParams,
@@ -521,19 +573,29 @@ def task_run(self) -> TaskRunResourceWithRawResponse:
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
return TaskRunResourceWithRawResponse(self._beta.task_run)
@cached_property
def task_group(self) -> TaskGroupResourceWithRawResponse:
- """
- The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling.
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
- Submit hundreds or thousands of Tasks as a single group
- Observe group progress and receive results as they complete
- Real-time updates via Server-Sent Events (SSE)
- Add tasks to an existing group while it is running
- Group-level retry and error aggregation
- Status: beta and subject to change.
"""
return TaskGroupResourceWithRawResponse(self._beta.task_group)
@@ -562,19 +624,29 @@ def task_run(self) -> AsyncTaskRunResourceWithRawResponse:
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
return AsyncTaskRunResourceWithRawResponse(self._beta.task_run)
@cached_property
def task_group(self) -> AsyncTaskGroupResourceWithRawResponse:
- """
- The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling.
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
- Submit hundreds or thousands of Tasks as a single group
- Observe group progress and receive results as they complete
- Real-time updates via Server-Sent Events (SSE)
- Add tasks to an existing group while it is running
- Group-level retry and error aggregation
- Status: beta and subject to change.
"""
return AsyncTaskGroupResourceWithRawResponse(self._beta.task_group)
@@ -603,19 +675,29 @@ def task_run(self) -> TaskRunResourceWithStreamingResponse:
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
return TaskRunResourceWithStreamingResponse(self._beta.task_run)
@cached_property
def task_group(self) -> TaskGroupResourceWithStreamingResponse:
- """
- The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling.
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
- Submit hundreds or thousands of Tasks as a single group
- Observe group progress and receive results as they complete
- Real-time updates via Server-Sent Events (SSE)
- Add tasks to an existing group while it is running
- Group-level retry and error aggregation
- Status: beta and subject to change.
"""
return TaskGroupResourceWithStreamingResponse(self._beta.task_group)
@@ -644,19 +726,29 @@ def task_run(self) -> AsyncTaskRunResourceWithStreamingResponse:
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
return AsyncTaskRunResourceWithStreamingResponse(self._beta.task_run)
@cached_property
def task_group(self) -> AsyncTaskGroupResourceWithStreamingResponse:
- """
- The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling.
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
- Submit hundreds or thousands of Tasks as a single group
- Observe group progress and receive results as they complete
- Real-time updates via Server-Sent Events (SSE)
- Add tasks to an existing group while it is running
- Group-level retry and error aggregation
- Status: beta and subject to change.
"""
return AsyncTaskGroupResourceWithStreamingResponse(self._beta.task_group)
diff --git a/src/parallel/resources/beta/findall.py b/src/parallel/resources/beta/findall.py
index ef5ab71..b5d2ab2 100644
--- a/src/parallel/resources/beta/findall.py
+++ b/src/parallel/resources/beta/findall.py
@@ -9,7 +9,7 @@
import httpx
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ..._utils import is_given, maybe_transform, strip_not_given, async_maybe_transform
+from ..._utils import is_given, path_template, maybe_transform, strip_not_given, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -25,6 +25,7 @@
findall_events_params,
findall_extend_params,
findall_ingest_params,
+ findall_candidates_params,
)
from ..._base_client import make_request_options
from ...types.beta.findall_run import FindAllRun
@@ -35,6 +36,7 @@
from ...types.beta.findall_run_result import FindAllRunResult
from ...types.beta.parallel_beta_param import ParallelBetaParam
from ...types.beta.findall_events_response import FindAllEventsResponse
+from ...types.beta.findall_candidates_response import FindAllCandidatesResponse
__all__ = [
"FindAllResource",
@@ -117,7 +119,7 @@ def create(
match_conditions: List of match conditions for the FindAll run.
match_limit: Maximum number of matches to find for this FindAll run. Must be between 5 and
- 1000 (inclusive).
+ 1000 (inclusive). May return fewer results.
objective: Natural language objective of the FindAll run.
@@ -209,7 +211,7 @@ def retrieve(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return self._get(
- f"/v1beta/findall/runs/{findall_id}",
+ path_template("/v1beta/findall/runs/{findall_id}", findall_id=findall_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -256,13 +258,66 @@ def cancel(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return self._post(
- f"/v1beta/findall/runs/{findall_id}/cancel",
+ path_template("/v1beta/findall/runs/{findall_id}/cancel", findall_id=findall_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=object,
)
+ def candidates(
+ self,
+ *,
+ entity_type: Literal["company", "people"],
+ objective: str,
+ match_limit: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FindAllCandidatesResponse:
+ """
+ Return ranked entity candidates matching a natural language objective.
+
+ This endpoint performs a best-effort search optimised for low latency. For
+ comprehensive match evaluation and enrichment, use the
+ [FindAll API](https://docs.parallel.ai/findall-api/findall-quickstart).
+
+ Args:
+ entity_type: Type of entity to search for.
+
+ objective: Natural language description of target entities.
+
+ match_limit: Maximum number of candidates to return. Must be between 5 and 1000 (inclusive).
+ May return fewer results. Defaults to 100.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
+ return self._post(
+ "/v1beta/findall/candidates",
+ body=maybe_transform(
+ {
+ "entity_type": entity_type,
+ "objective": objective,
+ "match_limit": match_limit,
+ },
+ findall_candidates_params.FindAllCandidatesParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FindAllCandidatesResponse,
+ )
+
def enrich(
self,
findall_id: str,
@@ -312,7 +367,7 @@ def enrich(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return self._post(
- f"/v1beta/findall/runs/{findall_id}/enrich",
+ path_template("/v1beta/findall/runs/{findall_id}/enrich", findall_id=findall_id),
body=maybe_transform(
{
"output_schema": output_schema,
@@ -375,7 +430,7 @@ def events(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return self._get(
- f"/v1beta/findall/runs/{findall_id}/events",
+ path_template("/v1beta/findall/runs/{findall_id}/events", findall_id=findall_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -439,7 +494,7 @@ def extend(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return self._post(
- f"/v1beta/findall/runs/{findall_id}/extend",
+ path_template("/v1beta/findall/runs/{findall_id}/extend", findall_id=findall_id),
body=maybe_transform(
{"additional_match_limit": additional_match_limit}, findall_extend_params.FindAllExtendParams
),
@@ -542,7 +597,7 @@ def result(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return self._get(
- f"/v1beta/findall/runs/{findall_id}/result",
+ path_template("/v1beta/findall/runs/{findall_id}/result", findall_id=findall_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -589,7 +644,7 @@ def schema(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return self._get(
- f"/v1beta/findall/runs/{findall_id}/schema",
+ path_template("/v1beta/findall/runs/{findall_id}/schema", findall_id=findall_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -662,7 +717,7 @@ async def create(
match_conditions: List of match conditions for the FindAll run.
match_limit: Maximum number of matches to find for this FindAll run. Must be between 5 and
- 1000 (inclusive).
+ 1000 (inclusive). May return fewer results.
objective: Natural language objective of the FindAll run.
@@ -754,7 +809,7 @@ async def retrieve(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return await self._get(
- f"/v1beta/findall/runs/{findall_id}",
+ path_template("/v1beta/findall/runs/{findall_id}", findall_id=findall_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -801,13 +856,66 @@ async def cancel(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return await self._post(
- f"/v1beta/findall/runs/{findall_id}/cancel",
+ path_template("/v1beta/findall/runs/{findall_id}/cancel", findall_id=findall_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=object,
)
+ async def candidates(
+ self,
+ *,
+ entity_type: Literal["company", "people"],
+ objective: str,
+ match_limit: int | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FindAllCandidatesResponse:
+ """
+ Return ranked entity candidates matching a natural language objective.
+
+ This endpoint performs a best-effort search optimised for low latency. For
+ comprehensive match evaluation and enrichment, use the
+ [FindAll API](https://docs.parallel.ai/findall-api/findall-quickstart).
+
+ Args:
+ entity_type: Type of entity to search for.
+
+ objective: Natural language description of target entities.
+
+ match_limit: Maximum number of candidates to return. Must be between 5 and 1000 (inclusive).
+ May return fewer results. Defaults to 100.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
+ return await self._post(
+ "/v1beta/findall/candidates",
+ body=await async_maybe_transform(
+ {
+ "entity_type": entity_type,
+ "objective": objective,
+ "match_limit": match_limit,
+ },
+ findall_candidates_params.FindAllCandidatesParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FindAllCandidatesResponse,
+ )
+
async def enrich(
self,
findall_id: str,
@@ -857,7 +965,7 @@ async def enrich(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return await self._post(
- f"/v1beta/findall/runs/{findall_id}/enrich",
+ path_template("/v1beta/findall/runs/{findall_id}/enrich", findall_id=findall_id),
body=await async_maybe_transform(
{
"output_schema": output_schema,
@@ -920,7 +1028,7 @@ async def events(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return await self._get(
- f"/v1beta/findall/runs/{findall_id}/events",
+ path_template("/v1beta/findall/runs/{findall_id}/events", findall_id=findall_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -984,7 +1092,7 @@ async def extend(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return await self._post(
- f"/v1beta/findall/runs/{findall_id}/extend",
+ path_template("/v1beta/findall/runs/{findall_id}/extend", findall_id=findall_id),
body=await async_maybe_transform(
{"additional_match_limit": additional_match_limit}, findall_extend_params.FindAllExtendParams
),
@@ -1087,7 +1195,7 @@ async def result(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return await self._get(
- f"/v1beta/findall/runs/{findall_id}/result",
+ path_template("/v1beta/findall/runs/{findall_id}/result", findall_id=findall_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -1134,7 +1242,7 @@ async def schema(
}
extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})}
return await self._get(
- f"/v1beta/findall/runs/{findall_id}/schema",
+ path_template("/v1beta/findall/runs/{findall_id}/schema", findall_id=findall_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -1155,6 +1263,9 @@ def __init__(self, findall: FindAllResource) -> None:
self.cancel = to_raw_response_wrapper(
findall.cancel,
)
+ self.candidates = to_raw_response_wrapper(
+ findall.candidates,
+ )
self.enrich = to_raw_response_wrapper(
findall.enrich,
)
@@ -1188,6 +1299,9 @@ def __init__(self, findall: AsyncFindAllResource) -> None:
self.cancel = async_to_raw_response_wrapper(
findall.cancel,
)
+ self.candidates = async_to_raw_response_wrapper(
+ findall.candidates,
+ )
self.enrich = async_to_raw_response_wrapper(
findall.enrich,
)
@@ -1221,6 +1335,9 @@ def __init__(self, findall: FindAllResource) -> None:
self.cancel = to_streamed_response_wrapper(
findall.cancel,
)
+ self.candidates = to_streamed_response_wrapper(
+ findall.candidates,
+ )
self.enrich = to_streamed_response_wrapper(
findall.enrich,
)
@@ -1254,6 +1371,9 @@ def __init__(self, findall: AsyncFindAllResource) -> None:
self.cancel = async_to_streamed_response_wrapper(
findall.cancel,
)
+ self.candidates = async_to_streamed_response_wrapper(
+ findall.candidates,
+ )
self.enrich = async_to_streamed_response_wrapper(
findall.enrich,
)
diff --git a/src/parallel/resources/beta/task_group.py b/src/parallel/resources/beta/task_group.py
index b15eab7..5ccb046 100644
--- a/src/parallel/resources/beta/task_group.py
+++ b/src/parallel/resources/beta/task_group.py
@@ -9,7 +9,7 @@
import httpx
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ..._utils import is_given, maybe_transform, strip_not_given, async_maybe_transform
+from ..._utils import is_given, path_template, maybe_transform, strip_not_given, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -38,14 +38,17 @@
class TaskGroupResource(SyncAPIResource):
- """
- The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling.
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
- Submit hundreds or thousands of Tasks as a single group
- Observe group progress and receive results as they complete
- Real-time updates via Server-Sent Events (SSE)
- Add tasks to an existing group while it is running
- Group-level retry and error aggregation
- Status: beta and subject to change.
"""
@cached_property
@@ -129,7 +132,7 @@ def retrieve(
raise ValueError(f"Expected a non-empty value for `task_group_id` but received {task_group_id!r}")
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return self._get(
- f"/v1beta/tasks/groups/{task_group_id}",
+ path_template("/v1beta/tasks/groups/{task_group_id}", task_group_id=task_group_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -141,6 +144,7 @@ def add_runs(
task_group_id: str,
*,
inputs: Iterable[BetaRunInputParam],
+ refresh_status: bool | Omit = omit,
default_task_spec: Optional[TaskSpecParam] | Omit = omit,
betas: List[ParallelBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -188,7 +192,7 @@ def add_runs(
}
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return self._post(
- f"/v1beta/tasks/groups/{task_group_id}/runs",
+ path_template("/v1beta/tasks/groups/{task_group_id}/runs", task_group_id=task_group_id),
body=maybe_transform(
{
"inputs": inputs,
@@ -197,7 +201,13 @@ def add_runs(
task_group_add_runs_params.TaskGroupAddRunsParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {"refresh_status": refresh_status}, task_group_add_runs_params.TaskGroupAddRunsParams
+ ),
),
cast_to=TaskGroupRunResponse,
)
@@ -235,7 +245,7 @@ def events(
extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})}
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return self._get(
- f"/v1beta/tasks/groups/{task_group_id}/events",
+ path_template("/v1beta/tasks/groups/{task_group_id}/events", task_group_id=task_group_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -300,7 +310,7 @@ def get_runs(
extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})}
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return self._get(
- f"/v1beta/tasks/groups/{task_group_id}/runs",
+ path_template("/v1beta/tasks/groups/{task_group_id}/runs", task_group_id=task_group_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -325,14 +335,17 @@ def get_runs(
class AsyncTaskGroupResource(AsyncAPIResource):
- """
- The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling.
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
- Submit hundreds or thousands of Tasks as a single group
- Observe group progress and receive results as they complete
- Real-time updates via Server-Sent Events (SSE)
- Add tasks to an existing group while it is running
- Group-level retry and error aggregation
- Status: beta and subject to change.
"""
@cached_property
@@ -416,7 +429,7 @@ async def retrieve(
raise ValueError(f"Expected a non-empty value for `task_group_id` but received {task_group_id!r}")
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return await self._get(
- f"/v1beta/tasks/groups/{task_group_id}",
+ path_template("/v1beta/tasks/groups/{task_group_id}", task_group_id=task_group_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -428,6 +441,7 @@ async def add_runs(
task_group_id: str,
*,
inputs: Iterable[BetaRunInputParam],
+ refresh_status: bool | Omit = omit,
default_task_spec: Optional[TaskSpecParam] | Omit = omit,
betas: List[ParallelBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -475,7 +489,7 @@ async def add_runs(
}
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return await self._post(
- f"/v1beta/tasks/groups/{task_group_id}/runs",
+ path_template("/v1beta/tasks/groups/{task_group_id}/runs", task_group_id=task_group_id),
body=await async_maybe_transform(
{
"inputs": inputs,
@@ -484,7 +498,13 @@ async def add_runs(
task_group_add_runs_params.TaskGroupAddRunsParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {"refresh_status": refresh_status}, task_group_add_runs_params.TaskGroupAddRunsParams
+ ),
),
cast_to=TaskGroupRunResponse,
)
@@ -522,7 +542,7 @@ async def events(
extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})}
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return await self._get(
- f"/v1beta/tasks/groups/{task_group_id}/events",
+ path_template("/v1beta/tasks/groups/{task_group_id}/events", task_group_id=task_group_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -587,7 +607,7 @@ async def get_runs(
extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})}
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return await self._get(
- f"/v1beta/tasks/groups/{task_group_id}/runs",
+ path_template("/v1beta/tasks/groups/{task_group_id}/runs", task_group_id=task_group_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
diff --git a/src/parallel/resources/beta/task_run.py b/src/parallel/resources/beta/task_run.py
index 3fb567c..9b3a65d 100644
--- a/src/parallel/resources/beta/task_run.py
+++ b/src/parallel/resources/beta/task_run.py
@@ -2,13 +2,14 @@
from __future__ import annotations
+import typing_extensions
from typing import Any, Dict, List, Union, Iterable, Optional, cast
from itertools import chain
import httpx
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ..._utils import is_given, maybe_transform, strip_not_given, async_maybe_transform
+from ..._utils import is_given, path_template, maybe_transform, strip_not_given, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -21,11 +22,11 @@
from ...types.beta import task_run_create_params, task_run_result_params
from ..._base_client import make_request_options
from ...types.task_run import TaskRun
+from ...types.webhook_param import WebhookParam
+from ...types.task_run_result import TaskRunResult
from ...types.task_spec_param import TaskSpecParam
-from ...types.beta.webhook_param import WebhookParam
-from ...types.beta.mcp_server_param import McpServerParam
+from ...types.mcp_server_param import McpServerParam
from ...types.beta.parallel_beta_param import ParallelBetaParam
-from ...types.beta.beta_task_run_result import BetaTaskRunResult
from ...types.shared_params.source_policy import SourcePolicy
from ...types.beta.task_run_events_response import TaskRunEventsResponse
@@ -37,6 +38,13 @@ class TaskRunResource(SyncAPIResource):
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
@cached_property
@@ -58,11 +66,13 @@ def with_streaming_response(self) -> TaskRunResourceWithStreamingResponse:
"""
return TaskRunResourceWithStreamingResponse(self)
+ @typing_extensions.deprecated("Use GA Task Run instead")
def create(
self,
*,
input: Union[str, Dict[str, object]],
processor: str,
+ advanced_settings: Optional[task_run_create_params.AdvancedSettings] | Omit = omit,
enable_events: Optional[bool] | Omit = omit,
mcp_servers: Optional[Iterable[McpServerParam]] | Omit = omit,
metadata: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
@@ -90,18 +100,16 @@ def create(
processor: Processor to use for the task.
+ advanced_settings: Advanced search configuration for a task run.
+
enable_events: Controls tracking of task run execution progress. When set to true, progress
events are recorded and can be accessed via the
- [Task Run events](https://platform.parallel.ai/api-reference) endpoint. When
- false, no progress events are tracked. Note that progress tracking cannot be
- enabled after a run has been created. The flag is set to true by default for
- premium processors (pro and above). To enable this feature in your requests,
- specify `events-sse-2025-07-24` as one of the values in `parallel-beta` header
- (for API calls) or `betas` param (for the SDKs).
+ [Task Run events](https://docs.parallel.ai/api-reference) endpoint. When false,
+ no progress events are tracked. Note that progress tracking cannot be enabled
+ after a run has been created. The flag is set to true by default for premium
+ processors (pro and above).
- mcp_servers: Optional list of MCP servers to use for the run. To enable this feature in your
- requests, specify `mcp-server-2025-07-17` as one of the values in
- `parallel-beta` header (for API calls) or `betas` param (for the SDKs).
+ mcp_servers: Optional list of MCP servers to use for the run.
metadata: User-provided metadata stored with the run. Keys and values must be strings with
a maximum length of 16 and 512 characters respectively.
@@ -143,11 +151,12 @@ def create(
}
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return self._post(
- "/v1/tasks/runs?beta=true",
+ "/v1/tasks/runs",
body=maybe_transform(
{
"input": input,
"processor": processor,
+ "advanced_settings": advanced_settings,
"enable_events": enable_events,
"mcp_servers": mcp_servers,
"metadata": metadata,
@@ -164,6 +173,7 @@ def create(
cast_to=TaskRun,
)
+ @typing_extensions.deprecated("Use GA Task Run instead")
def events(
self,
run_id: str,
@@ -198,7 +208,7 @@ def events(
extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})}
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return self._get(
- f"/v1beta/tasks/runs/{run_id}/events",
+ path_template("/v1beta/tasks/runs/{run_id}/events", run_id=run_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -207,6 +217,7 @@ def events(
stream_cls=Stream[TaskRunEventsResponse],
)
+ @typing_extensions.deprecated("Use GA Task Run instead")
def result(
self,
run_id: str,
@@ -219,7 +230,7 @@ def result(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> BetaTaskRunResult:
+ ) -> TaskRunResult:
"""
Retrieves a run result by run_id, blocking until the run is completed.
@@ -248,7 +259,7 @@ def result(
}
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return self._get(
- f"/v1/tasks/runs/{run_id}/result?beta=true",
+ path_template("/v1/tasks/runs/{run_id}/result", run_id=run_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -256,7 +267,7 @@ def result(
timeout=timeout,
query=maybe_transform({"api_timeout": api_timeout}, task_run_result_params.TaskRunResultParams),
),
- cast_to=BetaTaskRunResult,
+ cast_to=TaskRunResult,
)
@@ -265,6 +276,13 @@ class AsyncTaskRunResource(AsyncAPIResource):
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
@cached_property
@@ -286,11 +304,13 @@ def with_streaming_response(self) -> AsyncTaskRunResourceWithStreamingResponse:
"""
return AsyncTaskRunResourceWithStreamingResponse(self)
+ @typing_extensions.deprecated("Use GA Task Run instead")
async def create(
self,
*,
input: Union[str, Dict[str, object]],
processor: str,
+ advanced_settings: Optional[task_run_create_params.AdvancedSettings] | Omit = omit,
enable_events: Optional[bool] | Omit = omit,
mcp_servers: Optional[Iterable[McpServerParam]] | Omit = omit,
metadata: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
@@ -318,18 +338,16 @@ async def create(
processor: Processor to use for the task.
+ advanced_settings: Advanced search configuration for a task run.
+
enable_events: Controls tracking of task run execution progress. When set to true, progress
events are recorded and can be accessed via the
- [Task Run events](https://platform.parallel.ai/api-reference) endpoint. When
- false, no progress events are tracked. Note that progress tracking cannot be
- enabled after a run has been created. The flag is set to true by default for
- premium processors (pro and above). To enable this feature in your requests,
- specify `events-sse-2025-07-24` as one of the values in `parallel-beta` header
- (for API calls) or `betas` param (for the SDKs).
+ [Task Run events](https://docs.parallel.ai/api-reference) endpoint. When false,
+ no progress events are tracked. Note that progress tracking cannot be enabled
+ after a run has been created. The flag is set to true by default for premium
+ processors (pro and above).
- mcp_servers: Optional list of MCP servers to use for the run. To enable this feature in your
- requests, specify `mcp-server-2025-07-17` as one of the values in
- `parallel-beta` header (for API calls) or `betas` param (for the SDKs).
+ mcp_servers: Optional list of MCP servers to use for the run.
metadata: User-provided metadata stored with the run. Keys and values must be strings with
a maximum length of 16 and 512 characters respectively.
@@ -371,11 +389,12 @@ async def create(
}
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return await self._post(
- "/v1/tasks/runs?beta=true",
+ "/v1/tasks/runs",
body=await async_maybe_transform(
{
"input": input,
"processor": processor,
+ "advanced_settings": advanced_settings,
"enable_events": enable_events,
"mcp_servers": mcp_servers,
"metadata": metadata,
@@ -392,6 +411,7 @@ async def create(
cast_to=TaskRun,
)
+ @typing_extensions.deprecated("Use GA Task Run instead")
async def events(
self,
run_id: str,
@@ -426,7 +446,7 @@ async def events(
extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})}
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return await self._get(
- f"/v1beta/tasks/runs/{run_id}/events",
+ path_template("/v1beta/tasks/runs/{run_id}/events", run_id=run_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -435,6 +455,7 @@ async def events(
stream_cls=AsyncStream[TaskRunEventsResponse],
)
+ @typing_extensions.deprecated("Use GA Task Run instead")
async def result(
self,
run_id: str,
@@ -447,7 +468,7 @@ async def result(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> BetaTaskRunResult:
+ ) -> TaskRunResult:
"""
Retrieves a run result by run_id, blocking until the run is completed.
@@ -476,7 +497,7 @@ async def result(
}
extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})}
return await self._get(
- f"/v1/tasks/runs/{run_id}/result?beta=true",
+ path_template("/v1/tasks/runs/{run_id}/result", run_id=run_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -486,7 +507,7 @@ async def result(
{"api_timeout": api_timeout}, task_run_result_params.TaskRunResultParams
),
),
- cast_to=BetaTaskRunResult,
+ cast_to=TaskRunResult,
)
@@ -494,14 +515,20 @@ class TaskRunResourceWithRawResponse:
def __init__(self, task_run: TaskRunResource) -> None:
self._task_run = task_run
- self.create = to_raw_response_wrapper(
- task_run.create,
+ self.create = ( # pyright: ignore[reportDeprecated]
+ to_raw_response_wrapper(
+ task_run.create, # pyright: ignore[reportDeprecated],
+ )
)
- self.events = to_raw_response_wrapper(
- task_run.events,
+ self.events = ( # pyright: ignore[reportDeprecated]
+ to_raw_response_wrapper(
+ task_run.events, # pyright: ignore[reportDeprecated],
+ )
)
- self.result = to_raw_response_wrapper(
- task_run.result,
+ self.result = ( # pyright: ignore[reportDeprecated]
+ to_raw_response_wrapper(
+ task_run.result, # pyright: ignore[reportDeprecated],
+ )
)
@@ -509,14 +536,20 @@ class AsyncTaskRunResourceWithRawResponse:
def __init__(self, task_run: AsyncTaskRunResource) -> None:
self._task_run = task_run
- self.create = async_to_raw_response_wrapper(
- task_run.create,
+ self.create = ( # pyright: ignore[reportDeprecated]
+ async_to_raw_response_wrapper(
+ task_run.create, # pyright: ignore[reportDeprecated],
+ )
)
- self.events = async_to_raw_response_wrapper(
- task_run.events,
+ self.events = ( # pyright: ignore[reportDeprecated]
+ async_to_raw_response_wrapper(
+ task_run.events, # pyright: ignore[reportDeprecated],
+ )
)
- self.result = async_to_raw_response_wrapper(
- task_run.result,
+ self.result = ( # pyright: ignore[reportDeprecated]
+ async_to_raw_response_wrapper(
+ task_run.result, # pyright: ignore[reportDeprecated],
+ )
)
@@ -524,14 +557,20 @@ class TaskRunResourceWithStreamingResponse:
def __init__(self, task_run: TaskRunResource) -> None:
self._task_run = task_run
- self.create = to_streamed_response_wrapper(
- task_run.create,
+ self.create = ( # pyright: ignore[reportDeprecated]
+ to_streamed_response_wrapper(
+ task_run.create, # pyright: ignore[reportDeprecated],
+ )
)
- self.events = to_streamed_response_wrapper(
- task_run.events,
+ self.events = ( # pyright: ignore[reportDeprecated]
+ to_streamed_response_wrapper(
+ task_run.events, # pyright: ignore[reportDeprecated],
+ )
)
- self.result = to_streamed_response_wrapper(
- task_run.result,
+ self.result = ( # pyright: ignore[reportDeprecated]
+ to_streamed_response_wrapper(
+ task_run.result, # pyright: ignore[reportDeprecated],
+ )
)
@@ -539,12 +578,18 @@ class AsyncTaskRunResourceWithStreamingResponse:
def __init__(self, task_run: AsyncTaskRunResource) -> None:
self._task_run = task_run
- self.create = async_to_streamed_response_wrapper(
- task_run.create,
+ self.create = ( # pyright: ignore[reportDeprecated]
+ async_to_streamed_response_wrapper(
+ task_run.create, # pyright: ignore[reportDeprecated],
+ )
)
- self.events = async_to_streamed_response_wrapper(
- task_run.events,
+ self.events = ( # pyright: ignore[reportDeprecated]
+ async_to_streamed_response_wrapper(
+ task_run.events, # pyright: ignore[reportDeprecated],
+ )
)
- self.result = async_to_streamed_response_wrapper(
- task_run.result,
+ self.result = ( # pyright: ignore[reportDeprecated]
+ async_to_streamed_response_wrapper(
+ task_run.result, # pyright: ignore[reportDeprecated],
+ )
)
diff --git a/src/parallel/resources/task_run.py b/src/parallel/resources/task_run.py
index eb0df13..4f8e2f5 100644
--- a/src/parallel/resources/task_run.py
+++ b/src/parallel/resources/task_run.py
@@ -3,7 +3,7 @@
from __future__ import annotations
import time
-from typing import Dict, Type, Union, Optional, overload
+from typing import Any, Dict, List, Type, Union, Iterable, Optional, cast, overload
import httpx
@@ -11,7 +11,7 @@
from ..types import task_run_create_params, task_run_result_params
from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from .._utils import maybe_transform, async_maybe_transform
+from .._utils import is_given, path_template, maybe_transform, strip_not_given, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
@@ -20,10 +20,13 @@
async_to_raw_response_wrapper,
async_to_streamed_response_wrapper,
)
+from .._streaming import Stream, AsyncStream
from .._base_client import make_request_options
from ..types.task_run import TaskRun
+from ..types.webhook_param import WebhookParam
from ..types.task_run_result import TaskRunResult
from ..types.task_spec_param import OutputT, OutputSchema, TaskSpecParam
+from ..types.mcp_server_param import McpServerParam
from ..lib._parsing._task_spec import build_task_spec_param
from ..types.parsed_task_run_result import ParsedTaskRunResult
from ..lib._parsing._task_run_result import (
@@ -31,6 +34,8 @@
wait_for_result_async as _wait_for_result_async,
task_run_result_parser,
)
+from ..types.beta.parallel_beta_param import ParallelBetaParam
+from ..types.task_run_events_response import TaskRunEventsResponse
from ..types.shared_params.source_policy import SourcePolicy
__all__ = ["TaskRunResource", "AsyncTaskRunResource"]
@@ -41,6 +46,13 @@ class TaskRunResource(SyncAPIResource):
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
@cached_property
@@ -67,10 +79,15 @@ def create(
*,
input: Union[str, Dict[str, object]],
processor: str,
+ advanced_settings: Optional[task_run_create_params.AdvancedSettings] | Omit = omit,
+ enable_events: Optional[bool] | Omit = omit,
+ mcp_servers: Optional[Iterable[McpServerParam]] | Omit = omit,
metadata: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
previous_interaction_id: Optional[str] | Omit = omit,
source_policy: Optional[SourcePolicy] | Omit = omit,
task_spec: Optional[TaskSpecParam] | Omit = omit,
+ webhook: Optional[WebhookParam] | Omit = omit,
+ betas: List[ParallelBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -90,6 +107,17 @@ def create(
processor: Processor to use for the task.
+ advanced_settings: Advanced search configuration for a task run.
+
+ enable_events: Controls tracking of task run execution progress. When set to true, progress
+ events are recorded and can be accessed via the
+ [Task Run events](https://docs.parallel.ai/api-reference) endpoint. When false,
+ no progress events are tracked. Note that progress tracking cannot be enabled
+ after a run has been created. The flag is set to true by default for premium
+ processors (pro and above).
+
+ mcp_servers: Optional list of MCP servers to use for the run.
+
metadata: User-provided metadata stored with the run. Keys and values must be strings with
a maximum length of 16 and 512 characters respectively.
@@ -106,6 +134,10 @@ def create(
For convenience bare strings are also accepted as input or output schemas.
+ webhook: Webhooks for Task Runs.
+
+ betas: Optional header to specify the beta version(s) to enable.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -114,16 +146,24 @@ def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
+ extra_headers = {
+ **strip_not_given({"parallel-beta": ",".join(str(e) for e in betas) if is_given(betas) else not_given}),
+ **(extra_headers or {}),
+ }
return self._post(
"/v1/tasks/runs",
body=maybe_transform(
{
"input": input,
"processor": processor,
+ "advanced_settings": advanced_settings,
+ "enable_events": enable_events,
+ "mcp_servers": mcp_servers,
"metadata": metadata,
"previous_interaction_id": previous_interaction_id,
"source_policy": source_policy,
"task_spec": task_spec,
+ "webhook": webhook,
},
task_run_create_params.TaskRunCreateParams,
),
@@ -161,18 +201,61 @@ def retrieve(
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return self._get(
- f"/v1/tasks/runs/{run_id}",
+ path_template("/v1/tasks/runs/{run_id}", run_id=run_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=TaskRun,
)
+ def events(
+ self,
+ run_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Stream[TaskRunEventsResponse]:
+ """
+ Streams events for a task run.
+
+ Returns a stream of events showing progress updates and state changes for the
+ task run.
+
+ For task runs that did not have enable_events set to true during creation, the
+ frequency of events will be reduced.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not run_id:
+ raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
+ extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})}
+ return self._get(
+ path_template("/v1/tasks/runs/{run_id}/events", run_id=run_id),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=cast(Any, TaskRunEventsResponse), # Union types cannot be passed in as arguments in the type system
+ stream=True,
+ stream_cls=Stream[TaskRunEventsResponse],
+ )
+
def result(
self,
run_id: str,
*,
api_timeout: int | Omit = omit,
+ betas: List[ParallelBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -184,6 +267,8 @@ def result(
Retrieves a run result by run_id, blocking until the run is completed.
Args:
+ betas: Optional header to specify the beta version(s) to enable.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -194,8 +279,12 @@ def result(
"""
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
+ extra_headers = {
+ **strip_not_given({"parallel-beta": ",".join(str(e) for e in betas) if is_given(betas) else not_given}),
+ **(extra_headers or {}),
+ }
return self._get(
- f"/v1/tasks/runs/{run_id}/result",
+ path_template("/v1/tasks/runs/{run_id}/result", run_id=run_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -340,6 +429,13 @@ class AsyncTaskRunResource(AsyncAPIResource):
Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
- Output metadata: citations, excerpts, reasoning, and confidence per field
+
+ Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
"""
@cached_property
@@ -366,10 +462,15 @@ async def create(
*,
input: Union[str, Dict[str, object]],
processor: str,
+ advanced_settings: Optional[task_run_create_params.AdvancedSettings] | Omit = omit,
+ enable_events: Optional[bool] | Omit = omit,
+ mcp_servers: Optional[Iterable[McpServerParam]] | Omit = omit,
metadata: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
previous_interaction_id: Optional[str] | Omit = omit,
source_policy: Optional[SourcePolicy] | Omit = omit,
task_spec: Optional[TaskSpecParam] | Omit = omit,
+ webhook: Optional[WebhookParam] | Omit = omit,
+ betas: List[ParallelBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -389,6 +490,17 @@ async def create(
processor: Processor to use for the task.
+ advanced_settings: Advanced search configuration for a task run.
+
+ enable_events: Controls tracking of task run execution progress. When set to true, progress
+ events are recorded and can be accessed via the
+ [Task Run events](https://docs.parallel.ai/api-reference) endpoint. When false,
+ no progress events are tracked. Note that progress tracking cannot be enabled
+ after a run has been created. The flag is set to true by default for premium
+ processors (pro and above).
+
+ mcp_servers: Optional list of MCP servers to use for the run.
+
metadata: User-provided metadata stored with the run. Keys and values must be strings with
a maximum length of 16 and 512 characters respectively.
@@ -405,6 +517,10 @@ async def create(
For convenience bare strings are also accepted as input or output schemas.
+ webhook: Webhooks for Task Runs.
+
+ betas: Optional header to specify the beta version(s) to enable.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -413,16 +529,24 @@ async def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
+ extra_headers = {
+ **strip_not_given({"parallel-beta": ",".join(str(e) for e in betas) if is_given(betas) else not_given}),
+ **(extra_headers or {}),
+ }
return await self._post(
"/v1/tasks/runs",
body=await async_maybe_transform(
{
"input": input,
"processor": processor,
+ "advanced_settings": advanced_settings,
+ "enable_events": enable_events,
+ "mcp_servers": mcp_servers,
"metadata": metadata,
"previous_interaction_id": previous_interaction_id,
"source_policy": source_policy,
"task_spec": task_spec,
+ "webhook": webhook,
},
task_run_create_params.TaskRunCreateParams,
),
@@ -460,18 +584,61 @@ async def retrieve(
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return await self._get(
- f"/v1/tasks/runs/{run_id}",
+ path_template("/v1/tasks/runs/{run_id}", run_id=run_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=TaskRun,
)
+ async def events(
+ self,
+ run_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncStream[TaskRunEventsResponse]:
+ """
+ Streams events for a task run.
+
+ Returns a stream of events showing progress updates and state changes for the
+ task run.
+
+ For task runs that did not have enable_events set to true during creation, the
+ frequency of events will be reduced.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not run_id:
+ raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
+ extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})}
+ return await self._get(
+ path_template("/v1/tasks/runs/{run_id}/events", run_id=run_id),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=cast(Any, TaskRunEventsResponse), # Union types cannot be passed in as arguments in the type system
+ stream=True,
+ stream_cls=AsyncStream[TaskRunEventsResponse],
+ )
+
async def result(
self,
run_id: str,
*,
api_timeout: int | Omit = omit,
+ betas: List[ParallelBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -483,6 +650,8 @@ async def result(
Retrieves a run result by run_id, blocking until the run is completed.
Args:
+ betas: Optional header to specify the beta version(s) to enable.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -493,8 +662,12 @@ async def result(
"""
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
+ extra_headers = {
+ **strip_not_given({"parallel-beta": ",".join(str(e) for e in betas) if is_given(betas) else not_given}),
+ **(extra_headers or {}),
+ }
return await self._get(
- f"/v1/tasks/runs/{run_id}/result",
+ path_template("/v1/tasks/runs/{run_id}/result", run_id=run_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -642,6 +815,9 @@ def __init__(self, task_run: TaskRunResource) -> None:
self.retrieve = to_raw_response_wrapper(
task_run.retrieve,
)
+ self.events = to_raw_response_wrapper(
+ task_run.events,
+ )
self.result = to_raw_response_wrapper(
task_run.result,
)
@@ -657,6 +833,9 @@ def __init__(self, task_run: AsyncTaskRunResource) -> None:
self.retrieve = async_to_raw_response_wrapper(
task_run.retrieve,
)
+ self.events = async_to_raw_response_wrapper(
+ task_run.events,
+ )
self.result = async_to_raw_response_wrapper(
task_run.result,
)
@@ -672,6 +851,9 @@ def __init__(self, task_run: TaskRunResource) -> None:
self.retrieve = to_streamed_response_wrapper(
task_run.retrieve,
)
+ self.events = to_streamed_response_wrapper(
+ task_run.events,
+ )
self.result = to_streamed_response_wrapper(
task_run.result,
)
@@ -687,6 +869,9 @@ def __init__(self, task_run: AsyncTaskRunResource) -> None:
self.retrieve = async_to_streamed_response_wrapper(
task_run.retrieve,
)
+ self.events = async_to_streamed_response_wrapper(
+ task_run.events,
+ )
self.result = async_to_streamed_response_wrapper(
task_run.result,
)
diff --git a/src/parallel/types/__init__.py b/src/parallel/types/__init__.py
index 15d056e..570116a 100644
--- a/src/parallel/types/__init__.py
+++ b/src/parallel/types/__init__.py
@@ -8,20 +8,42 @@
SourcePolicy as SourcePolicy,
ErrorResponse as ErrorResponse,
)
+from .webhook import Webhook as Webhook
from .citation import Citation as Citation
from .task_run import TaskRun as TaskRun
+from .run_input import RunInput as RunInput
from .task_spec import TaskSpec as TaskSpec
+from .mcp_server import McpServer as McpServer
+from .usage_item import UsageItem as UsageItem
from .auto_schema import AutoSchema as AutoSchema
+from .error_event import ErrorEvent as ErrorEvent
from .field_basis import FieldBasis as FieldBasis
from .json_schema import JsonSchema as JsonSchema
from .text_schema import TextSchema as TextSchema
+from .extract_error import ExtractError as ExtractError
+from .mcp_tool_call import McpToolCall as McpToolCall
+from .search_result import SearchResult as SearchResult
+from .webhook_param import WebhookParam as WebhookParam
+from .extract_result import ExtractResult as ExtractResult
+from .task_run_event import TaskRunEvent as TaskRunEvent
+from .run_input_param import RunInputParam as RunInputParam
from .task_run_result import TaskRunResult as TaskRunResult
from .task_spec_param import TaskSpecParam as TaskSpecParam
+from .extract_response import ExtractResponse as ExtractResponse
+from .mcp_server_param import McpServerParam as McpServerParam
from .auto_schema_param import AutoSchemaParam as AutoSchemaParam
from .json_schema_param import JsonSchemaParam as JsonSchemaParam
from .text_schema_param import TextSchemaParam as TextSchemaParam
+from .web_search_result import WebSearchResult as WebSearchResult
+from .fetch_policy_param import FetchPolicyParam as FetchPolicyParam
+from .client_search_params import ClientSearchParams as ClientSearchParams
from .task_run_json_output import TaskRunJsonOutput as TaskRunJsonOutput
from .task_run_text_output import TaskRunTextOutput as TaskRunTextOutput
+from .client_extract_params import ClientExtractParams as ClientExtractParams
+from .excerpt_settings_param import ExcerptSettingsParam as ExcerptSettingsParam
from .parsed_task_run_result import ParsedTaskRunResult as ParsedTaskRunResult
from .task_run_create_params import TaskRunCreateParams as TaskRunCreateParams
from .task_run_result_params import TaskRunResultParams as TaskRunResultParams
+from .task_run_events_response import TaskRunEventsResponse as TaskRunEventsResponse
+from .advanced_search_settings_param import AdvancedSearchSettingsParam as AdvancedSearchSettingsParam
+from .advanced_extract_settings_param import AdvancedExtractSettingsParam as AdvancedExtractSettingsParam
diff --git a/src/parallel/types/advanced_extract_settings_param.py b/src/parallel/types/advanced_extract_settings_param.py
new file mode 100644
index 0000000..1328c26
--- /dev/null
+++ b/src/parallel/types/advanced_extract_settings_param.py
@@ -0,0 +1,46 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Optional
+from typing_extensions import TypeAlias, TypedDict
+
+from .fetch_policy_param import FetchPolicyParam
+from .excerpt_settings_param import ExcerptSettingsParam
+
+__all__ = ["AdvancedExtractSettingsParam", "FullContent", "FullContentFullContentSettings"]
+
+
+class FullContentFullContentSettings(TypedDict, total=False):
+ """Optional settings for returning full content."""
+
+ max_chars_per_result: Optional[int]
+ """
+ Optional limit on the number of characters to include in the full content for
+ each url. Full content always starts at the beginning of the page and is
+ truncated at the limit if necessary.
+ """
+
+
+FullContent: TypeAlias = Union[FullContentFullContentSettings, bool]
+
+
+class AdvancedExtractSettingsParam(TypedDict, total=False):
+ """Advanced extract configuration.
+
+ These settings may impact result quality and latency unless used carefully.
+ See https://docs.parallel.ai/search/advanced-extract-settings for more info.
+ """
+
+ excerpt_settings: Optional[ExcerptSettingsParam]
+ """Optional settings for returning relevant excerpts."""
+
+ fetch_policy: Optional[FetchPolicyParam]
+ """Policy for live fetching web results."""
+
+ full_content: FullContent
+ """Controls full content extraction.
+
+ Set to true to enable with defaults, false to disable, or provide
+ FullContentSettings for fine-grained control.
+ """
diff --git a/src/parallel/types/advanced_search_settings_param.py b/src/parallel/types/advanced_search_settings_param.py
new file mode 100644
index 0000000..ea11da4
--- /dev/null
+++ b/src/parallel/types/advanced_search_settings_param.py
@@ -0,0 +1,38 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import TypedDict
+
+from .fetch_policy_param import FetchPolicyParam
+from .excerpt_settings_param import ExcerptSettingsParam
+from .shared_params.source_policy import SourcePolicy
+
+__all__ = ["AdvancedSearchSettingsParam"]
+
+
+class AdvancedSearchSettingsParam(TypedDict, total=False):
+ """Advanced search configuration.
+
+ These settings may impact result quality and latency unless used carefully.
+ See https://docs.parallel.ai/search/advanced-search-settings for more info.
+ """
+
+ excerpt_settings: Optional[ExcerptSettingsParam]
+ """Optional settings for returning relevant excerpts."""
+
+ fetch_policy: Optional[FetchPolicyParam]
+ """Policy for live fetching web results."""
+
+ location: Optional[str]
+ """ISO 3166-1 alpha-2 country code for geo-targeted search results."""
+
+ max_results: Optional[int]
+ """Upper bound on the number of results to return. Defaults to 10 if not provided."""
+
+ source_policy: Optional[SourcePolicy]
+ """Source policy for web search results.
+
+ This policy governs which sources are allowed/disallowed in results.
+ """
diff --git a/src/parallel/types/beta/__init__.py b/src/parallel/types/beta/__init__.py
index ef052aa..91b7229 100644
--- a/src/parallel/types/beta/__init__.py
+++ b/src/parallel/types/beta/__init__.py
@@ -63,9 +63,11 @@
from .task_group_create_params import TaskGroupCreateParams as TaskGroupCreateParams
from .task_group_events_params import TaskGroupEventsParams as TaskGroupEventsParams
from .task_run_events_response import TaskRunEventsResponse as TaskRunEventsResponse
+from .findall_candidates_params import FindAllCandidatesParams as FindAllCandidatesParams
from .task_group_add_runs_params import TaskGroupAddRunsParams as TaskGroupAddRunsParams
from .task_group_events_response import TaskGroupEventsResponse as TaskGroupEventsResponse
from .task_group_get_runs_params import TaskGroupGetRunsParams as TaskGroupGetRunsParams
+from .findall_candidates_response import FindAllCandidatesResponse as FindAllCandidatesResponse
from .findall_schema_updated_event import (
FindAllSchemaUpdatedEvent as FindAllSchemaUpdatedEvent,
FindallSchemaUpdatedEvent as FindallSchemaUpdatedEvent,
diff --git a/src/parallel/types/beta/beta_extract_params.py b/src/parallel/types/beta/beta_extract_params.py
index 7a87574..578ed7c 100644
--- a/src/parallel/types/beta/beta_extract_params.py
+++ b/src/parallel/types/beta/beta_extract_params.py
@@ -7,7 +7,7 @@
from ..._types import SequenceNotStr
from ..._utils import PropertyInfo
-from .fetch_policy_param import FetchPolicyParam
+from ..fetch_policy_param import FetchPolicyParam
from .parallel_beta_param import ParallelBetaParam
from .excerpt_settings_param import ExcerptSettingsParam
@@ -40,6 +40,13 @@ class BetaExtractParams(TypedDict, total=False):
search_queries: Optional[SequenceNotStr[str]]
"""If provided, focuses extracted content on the specified keyword search queries."""
+ session_id: Optional[str]
+ """
+ Session identifier to track calls across separate search and extract calls, to
+ be used as part of a larger task. Specifying it may give better contextual
+ results for subsequent API calls.
+ """
+
betas: Annotated[List[ParallelBetaParam], PropertyInfo(alias="parallel-beta")]
"""Optional header to specify the beta version(s) to enable."""
diff --git a/src/parallel/types/beta/beta_run_input.py b/src/parallel/types/beta/beta_run_input.py
index 66f63e9..df1860f 100644
--- a/src/parallel/types/beta/beta_run_input.py
+++ b/src/parallel/types/beta/beta_run_input.py
@@ -1,68 +1,8 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Dict, List, Union, Optional
-
-from .webhook import Webhook
-from ..._models import BaseModel
-from ..task_spec import TaskSpec
-from .mcp_server import McpServer
-from ..shared.source_policy import SourcePolicy
+from ..run_input import RunInput
__all__ = ["BetaRunInput"]
-
-class BetaRunInput(BaseModel):
- """Task run input with additional beta fields."""
-
- input: Union[str, Dict[str, object]]
- """Input to the task, either text or a JSON object."""
-
- processor: str
- """Processor to use for the task."""
-
- enable_events: Optional[bool] = None
- """Controls tracking of task run execution progress.
-
- When set to true, progress events are recorded and can be accessed via the
- [Task Run events](https://platform.parallel.ai/api-reference) endpoint. When
- false, no progress events are tracked. Note that progress tracking cannot be
- enabled after a run has been created. The flag is set to true by default for
- premium processors (pro and above). To enable this feature in your requests,
- specify `events-sse-2025-07-24` as one of the values in `parallel-beta` header
- (for API calls) or `betas` param (for the SDKs).
- """
-
- mcp_servers: Optional[List[McpServer]] = None
- """
- Optional list of MCP servers to use for the run. To enable this feature in your
- requests, specify `mcp-server-2025-07-17` as one of the values in
- `parallel-beta` header (for API calls) or `betas` param (for the SDKs).
- """
-
- metadata: Optional[Dict[str, Union[str, float, bool]]] = None
- """User-provided metadata stored with the run.
-
- Keys and values must be strings with a maximum length of 16 and 512 characters
- respectively.
- """
-
- previous_interaction_id: Optional[str] = None
- """Interaction ID to use as context for this request."""
-
- source_policy: Optional[SourcePolicy] = None
- """Source policy for web search results.
-
- This policy governs which sources are allowed/disallowed in results.
- """
-
- task_spec: Optional[TaskSpec] = None
- """Specification for a task.
-
- Auto output schemas can be specified by setting `output_schema={"type":"auto"}`.
- Not specifying a TaskSpec is the same as setting an auto output schema.
-
- For convenience bare strings are also accepted as input or output schemas.
- """
-
- webhook: Optional[Webhook] = None
- """Webhooks for Task Runs."""
+BetaRunInput = RunInput
+"""Use parallel.types.task_run.TaskRunInput instead"""
diff --git a/src/parallel/types/beta/beta_run_input_param.py b/src/parallel/types/beta/beta_run_input_param.py
index 0112bc1..fa83fa5 100644
--- a/src/parallel/types/beta/beta_run_input_param.py
+++ b/src/parallel/types/beta/beta_run_input_param.py
@@ -2,69 +2,6 @@
from __future__ import annotations
-from typing import Dict, Union, Iterable, Optional
-from typing_extensions import Required, TypedDict
+from ..run_input_param import RunInputParam
-from .webhook_param import WebhookParam
-from ..task_spec_param import TaskSpecParam
-from .mcp_server_param import McpServerParam
-from ..shared_params.source_policy import SourcePolicy
-
-__all__ = ["BetaRunInputParam"]
-
-
-class BetaRunInputParam(TypedDict, total=False):
- """Task run input with additional beta fields."""
-
- input: Required[Union[str, Dict[str, object]]]
- """Input to the task, either text or a JSON object."""
-
- processor: Required[str]
- """Processor to use for the task."""
-
- enable_events: Optional[bool]
- """Controls tracking of task run execution progress.
-
- When set to true, progress events are recorded and can be accessed via the
- [Task Run events](https://platform.parallel.ai/api-reference) endpoint. When
- false, no progress events are tracked. Note that progress tracking cannot be
- enabled after a run has been created. The flag is set to true by default for
- premium processors (pro and above). To enable this feature in your requests,
- specify `events-sse-2025-07-24` as one of the values in `parallel-beta` header
- (for API calls) or `betas` param (for the SDKs).
- """
-
- mcp_servers: Optional[Iterable[McpServerParam]]
- """
- Optional list of MCP servers to use for the run. To enable this feature in your
- requests, specify `mcp-server-2025-07-17` as one of the values in
- `parallel-beta` header (for API calls) or `betas` param (for the SDKs).
- """
-
- metadata: Optional[Dict[str, Union[str, float, bool]]]
- """User-provided metadata stored with the run.
-
- Keys and values must be strings with a maximum length of 16 and 512 characters
- respectively.
- """
-
- previous_interaction_id: Optional[str]
- """Interaction ID to use as context for this request."""
-
- source_policy: Optional[SourcePolicy]
- """Source policy for web search results.
-
- This policy governs which sources are allowed/disallowed in results.
- """
-
- task_spec: Optional[TaskSpecParam]
- """Specification for a task.
-
- Auto output schemas can be specified by setting `output_schema={"type":"auto"}`.
- Not specifying a TaskSpec is the same as setting an auto output schema.
-
- For convenience bare strings are also accepted as input or output schemas.
- """
-
- webhook: Optional[WebhookParam]
- """Webhooks for Task Runs."""
+BetaRunInputParam = RunInputParam
diff --git a/src/parallel/types/beta/beta_search_params.py b/src/parallel/types/beta/beta_search_params.py
index 4a0776f..1b82406 100644
--- a/src/parallel/types/beta/beta_search_params.py
+++ b/src/parallel/types/beta/beta_search_params.py
@@ -7,7 +7,7 @@
from ..._types import SequenceNotStr
from ..._utils import PropertyInfo
-from .fetch_policy_param import FetchPolicyParam
+from ..fetch_policy_param import FetchPolicyParam
from .parallel_beta_param import ParallelBetaParam
from .excerpt_settings_param import ExcerptSettingsParam
from ..shared_params.source_policy import SourcePolicy
@@ -22,6 +22,9 @@ class BetaSearchParams(TypedDict, total=False):
fetch_policy: Optional[FetchPolicyParam]
"""Policy for live fetching web results."""
+ location: Optional[str]
+ """ISO 3166-1 alpha-2 country code for geo-targeted search results."""
+
max_chars_per_result: Optional[int]
"""DEPRECATED: Use `excerpts.max_chars_per_result` instead."""
@@ -56,6 +59,13 @@ class BetaSearchParams(TypedDict, total=False):
be provided.
"""
+ session_id: Optional[str]
+ """
+ Session identifier to track calls across separate search and extract calls, to
+ be used as part of a larger task. Specifying it may give better contextual
+ results for subsequent API calls.
+ """
+
source_policy: Optional[SourcePolicy]
"""Source policy for web search results.
diff --git a/src/parallel/types/beta/beta_task_run_result.py b/src/parallel/types/beta/beta_task_run_result.py
index 5bc7cf7..b14b49a 100644
--- a/src/parallel/types/beta/beta_task_run_result.py
+++ b/src/parallel/types/beta/beta_task_run_result.py
@@ -1,88 +1,31 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal, Annotated, TypeAlias
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
from ..._utils import PropertyInfo
-from ..._models import BaseModel
-from ..task_run import TaskRun
-from ..field_basis import FieldBasis
-from .mcp_tool_call import McpToolCall
-
-__all__ = ["BetaTaskRunResult", "Output", "OutputBetaTaskRunTextOutput", "OutputBetaTaskRunJsonOutput"]
-
-
-class OutputBetaTaskRunTextOutput(BaseModel):
- """Output from a task that returns text."""
-
- basis: List[FieldBasis]
- """Basis for the output.
-
- To include per-list-element basis entries, send the `parallel-beta` header with
- the value `field-basis-2025-11-25` when creating the run.
- """
-
- content: str
- """Text output from the task."""
-
- type: Literal["text"]
- """
- The type of output being returned, as determined by the output schema of the
- task spec.
- """
-
- beta_fields: Optional[Dict[str, object]] = None
- """Always None."""
-
- mcp_tool_calls: Optional[List[McpToolCall]] = None
- """MCP tool calls made by the task."""
-
-
-class OutputBetaTaskRunJsonOutput(BaseModel):
- """Output from a task that returns JSON."""
-
- basis: List[FieldBasis]
- """Basis for the output.
-
- To include per-list-element basis entries, send the `parallel-beta` header with
- the value `field-basis-2025-11-25` when creating the run.
- """
-
- content: Dict[str, object]
- """
- Output from the task as a native JSON object, as determined by the output schema
- of the task spec.
- """
-
- type: Literal["json"]
- """
- The type of output being returned, as determined by the output schema of the
- task spec.
- """
-
- beta_fields: Optional[Dict[str, object]] = None
- """Always None."""
-
- mcp_tool_calls: Optional[List[McpToolCall]] = None
- """MCP tool calls made by the task."""
+from ..task_run_result import TaskRunResult
+from ..task_run_json_output import TaskRunJsonOutput
+from ..task_run_text_output import TaskRunTextOutput
+
+__all__ = [
+ "BetaTaskRunResult",
+ "Output",
+ "OutputBetaTaskRunJsonOutput",
+ "OutputBetaTaskRunTextOutput",
+]
- output_schema: Optional[Dict[str, object]] = None
- """Output schema for the Task Run.
+BetaTaskRunResult = TaskRunResult
+"""This is deprecated, `TaskRunResult` should be used instead"""
- Populated only if the task was executed with an auto schema.
- """
+OutputBetaTaskRunJsonOutput = TaskRunJsonOutput
+"""This is deprecated, `TaskRunJsonOutput` should be used instead"""
+OutputBetaTaskRunTextOutput = TaskRunTextOutput
+"""This is deprecated, `TaskRunTextOutput` should be used instead"""
Output: TypeAlias = Annotated[
- Union[OutputBetaTaskRunTextOutput, OutputBetaTaskRunJsonOutput], PropertyInfo(discriminator="type")
+ Union[OutputBetaTaskRunTextOutput, OutputBetaTaskRunJsonOutput],
+ PropertyInfo(discriminator="type"),
]
-
-
-class BetaTaskRunResult(BaseModel):
- """Result of a beta task run. Available only if beta headers are specified."""
-
- output: Output
- """Output from the task conforming to the output schema."""
-
- run: TaskRun
- """Beta task run object with status 'completed'."""
+"""This is deprecated, use `Union[TaskRunTextOutput, TaskRunJsonOutput]` instead"""
diff --git a/src/parallel/types/beta/error_event.py b/src/parallel/types/beta/error_event.py
index 7ac7abc..becb915 100644
--- a/src/parallel/types/beta/error_event.py
+++ b/src/parallel/types/beta/error_event.py
@@ -1,18 +1,8 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-from ..shared.error_object import ErrorObject
+from .. import error_event
__all__ = ["ErrorEvent"]
-
-class ErrorEvent(BaseModel):
- """Event indicating an error."""
-
- error: ErrorObject
- """Error."""
-
- type: Literal["error"]
- """Event type; always 'error'."""
+ErrorEvent = error_event.ErrorEvent
+"""Use parallel.types.task_run.ErrorEvent instead"""
diff --git a/src/parallel/types/beta/extract_error.py b/src/parallel/types/beta/extract_error.py
index 0c8a19f..499920f 100644
--- a/src/parallel/types/beta/extract_error.py
+++ b/src/parallel/types/beta/extract_error.py
@@ -1,22 +1,7 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Optional
-
-from ..._models import BaseModel
+from .. import extract_error
__all__ = ["ExtractError"]
-
-class ExtractError(BaseModel):
- """Extract error details."""
-
- content: Optional[str] = None
- """Content returned for http client or server errors, if any."""
-
- error_type: str
- """Error type."""
-
- http_status_code: Optional[int] = None
- """HTTP status code, if available."""
-
- url: str
+ExtractError = extract_error.ExtractError
diff --git a/src/parallel/types/beta/extract_response.py b/src/parallel/types/beta/extract_response.py
index 45717bc..5fa3d4d 100644
--- a/src/parallel/types/beta/extract_response.py
+++ b/src/parallel/types/beta/extract_response.py
@@ -3,8 +3,8 @@
from typing import List, Optional
from ..._models import BaseModel
-from .usage_item import UsageItem
-from .extract_error import ExtractError
+from ..usage_item import UsageItem
+from ..extract_error import ExtractError
from .extract_result import ExtractResult
from ..shared.warning import Warning
diff --git a/src/parallel/types/beta/fetch_policy_param.py b/src/parallel/types/beta/fetch_policy_param.py
index 5bc4447..0949e76 100644
--- a/src/parallel/types/beta/fetch_policy_param.py
+++ b/src/parallel/types/beta/fetch_policy_param.py
@@ -2,26 +2,6 @@
from __future__ import annotations
-from typing import Optional
-from typing_extensions import TypedDict
+from .. import fetch_policy_param
-__all__ = ["FetchPolicyParam"]
-
-
-class FetchPolicyParam(TypedDict, total=False):
- """Policy for live fetching web results."""
-
- disable_cache_fallback: bool
- """
- If false, fallback to cached content older than max-age if live fetch fails or
- times out. If true, returns an error instead.
- """
-
- max_age_seconds: Optional[int]
- """Maximum age of cached content in seconds to trigger a live fetch.
-
- Minimum value 600 seconds (10 minutes).
- """
-
- timeout_seconds: Optional[float]
- """Timeout in seconds for fetching live content if unavailable in cache."""
+FetchPolicyParam = fetch_policy_param.FetchPolicyParam
diff --git a/src/parallel/types/beta/findall_candidates_params.py b/src/parallel/types/beta/findall_candidates_params.py
new file mode 100644
index 0000000..0c50b27
--- /dev/null
+++ b/src/parallel/types/beta/findall_candidates_params.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["FindAllCandidatesParams"]
+
+
+class FindAllCandidatesParams(TypedDict, total=False):
+ entity_type: Required[Literal["company", "people"]]
+ """Type of entity to search for."""
+
+ objective: Required[str]
+ """Natural language description of target entities."""
+
+ match_limit: int
+ """Maximum number of candidates to return.
+
+ Must be between 5 and 1000 (inclusive). May return fewer results. Defaults
+ to 100.
+ """
diff --git a/src/parallel/types/beta/findall_candidates_response.py b/src/parallel/types/beta/findall_candidates_response.py
new file mode 100644
index 0000000..38ed985
--- /dev/null
+++ b/src/parallel/types/beta/findall_candidates_response.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+
+from ..._models import BaseModel
+
+__all__ = ["FindAllCandidatesResponse", "Candidate"]
+
+
+class Candidate(BaseModel):
+ description: str
+ """Descriptive text about the entity."""
+
+ name: str
+ """Entity name."""
+
+ url: str
+ """Canonical URL for the entity."""
+
+
+class FindAllCandidatesResponse(BaseModel):
+ candidate_set_id: str
+ """Candidate set request ID.
+
+ Example: `candidate_set_cad0a6d2dec046bd95ae900527d880e7`
+ """
+
+ candidates: List[Candidate]
+ """Ranked list of entity candidates."""
diff --git a/src/parallel/types/beta/findall_create_params.py b/src/parallel/types/beta/findall_create_params.py
index d4dbed2..89a8715 100644
--- a/src/parallel/types/beta/findall_create_params.py
+++ b/src/parallel/types/beta/findall_create_params.py
@@ -6,7 +6,7 @@
from typing_extensions import Literal, Required, Annotated, TypedDict
from ..._utils import PropertyInfo
-from .webhook_param import WebhookParam
+from ..webhook_param import WebhookParam
from .parallel_beta_param import ParallelBetaParam
__all__ = ["FindAllCreateParams", "FindallCreateParams", "MatchCondition", "ExcludeList"]
@@ -25,7 +25,7 @@ class FindAllCreateParams(TypedDict, total=False):
match_limit: Required[int]
"""Maximum number of matches to find for this FindAll run.
- Must be between 5 and 1000 (inclusive).
+ Must be between 5 and 1000 (inclusive). May return fewer results.
"""
objective: Required[str]
diff --git a/src/parallel/types/beta/findall_enrich_input.py b/src/parallel/types/beta/findall_enrich_input.py
index 8b16a9e..8484621 100644
--- a/src/parallel/types/beta/findall_enrich_input.py
+++ b/src/parallel/types/beta/findall_enrich_input.py
@@ -3,7 +3,7 @@
from typing import List, Optional
from ..._models import BaseModel
-from .mcp_server import McpServer
+from ..mcp_server import McpServer
from ..json_schema import JsonSchema
__all__ = ["FindAllEnrichInput", "FindallEnrichInput"]
diff --git a/src/parallel/types/beta/findall_enrich_params.py b/src/parallel/types/beta/findall_enrich_params.py
index f2e3490..a9be6a6 100644
--- a/src/parallel/types/beta/findall_enrich_params.py
+++ b/src/parallel/types/beta/findall_enrich_params.py
@@ -6,7 +6,7 @@
from typing_extensions import Required, Annotated, TypedDict
from ..._utils import PropertyInfo
-from .mcp_server_param import McpServerParam
+from ..mcp_server_param import McpServerParam
from ..json_schema_param import JsonSchemaParam
from .parallel_beta_param import ParallelBetaParam
diff --git a/src/parallel/types/beta/findall_events_response.py b/src/parallel/types/beta/findall_events_response.py
index f3cd93e..995cf03 100644
--- a/src/parallel/types/beta/findall_events_response.py
+++ b/src/parallel/types/beta/findall_events_response.py
@@ -4,7 +4,7 @@
from typing_extensions import Annotated, TypeAlias
from ..._utils import PropertyInfo
-from .error_event import ErrorEvent
+from ..error_event import ErrorEvent
from .findall_run_status_event import FindAllRunStatusEvent
from .findall_schema_updated_event import FindAllSchemaUpdatedEvent
from .findall_candidate_match_status_event import FindAllCandidateMatchStatusEvent
@@ -16,6 +16,5 @@
PropertyInfo(discriminator="type"),
]
-
FindallEventsResponse = FindAllEventsResponse # for backwards compatibility with v0.3.4
"""This is deprecated, `FindAllEventsResponse` should be used instead"""
diff --git a/src/parallel/types/beta/mcp_server.py b/src/parallel/types/beta/mcp_server.py
index f0d8d12..5dc3c28 100644
--- a/src/parallel/types/beta/mcp_server.py
+++ b/src/parallel/types/beta/mcp_server.py
@@ -1,27 +1,8 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Dict, List, Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
+from .. import mcp_server
__all__ = ["McpServer"]
-
-class McpServer(BaseModel):
- """MCP server configuration."""
-
- name: str
- """Name of the MCP server."""
-
- url: str
- """URL of the MCP server."""
-
- allowed_tools: Optional[List[str]] = None
- """List of allowed tools for the MCP server."""
-
- headers: Optional[Dict[str, str]] = None
- """Headers for the MCP server."""
-
- type: Optional[Literal["url"]] = None
- """Type of MCP server being configured. Always `url`."""
+McpServer = mcp_server.McpServer
+"""Use parallel.types.task_run.McpServer instead"""
diff --git a/src/parallel/types/beta/mcp_server_param.py b/src/parallel/types/beta/mcp_server_param.py
index 02052a2..b406f2d 100644
--- a/src/parallel/types/beta/mcp_server_param.py
+++ b/src/parallel/types/beta/mcp_server_param.py
@@ -2,28 +2,6 @@
from __future__ import annotations
-from typing import Dict, Optional
-from typing_extensions import Literal, Required, TypedDict
+from .. import mcp_server_param
-from ..._types import SequenceNotStr
-
-__all__ = ["McpServerParam"]
-
-
-class McpServerParam(TypedDict, total=False):
- """MCP server configuration."""
-
- name: Required[str]
- """Name of the MCP server."""
-
- url: Required[str]
- """URL of the MCP server."""
-
- allowed_tools: Optional[SequenceNotStr[str]]
- """List of allowed tools for the MCP server."""
-
- headers: Optional[Dict[str, str]]
- """Headers for the MCP server."""
-
- type: Literal["url"]
- """Type of MCP server being configured. Always `url`."""
+McpServerParam = mcp_server_param.McpServerParam
diff --git a/src/parallel/types/beta/mcp_tool_call.py b/src/parallel/types/beta/mcp_tool_call.py
index d04b217..785a3d5 100644
--- a/src/parallel/types/beta/mcp_tool_call.py
+++ b/src/parallel/types/beta/mcp_tool_call.py
@@ -1,29 +1,8 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Optional
-
-from ..._models import BaseModel
+from .. import mcp_tool_call
__all__ = ["McpToolCall"]
-
-class McpToolCall(BaseModel):
- """Result of an MCP tool call."""
-
- arguments: str
- """Arguments used to call the MCP tool."""
-
- server_name: str
- """Name of the MCP server."""
-
- tool_call_id: str
- """Identifier for the tool call."""
-
- tool_name: str
- """Name of the tool being called."""
-
- content: Optional[str] = None
- """Output received from the tool call, if successful."""
-
- error: Optional[str] = None
- """Error message if the tool call failed."""
+McpToolCall = mcp_tool_call.McpToolCall
+"""Use parallel.types.task_run.McpToolCall instead"""
diff --git a/src/parallel/types/beta/search_result.py b/src/parallel/types/beta/search_result.py
index c7dd935..4c20ccb 100644
--- a/src/parallel/types/beta/search_result.py
+++ b/src/parallel/types/beta/search_result.py
@@ -3,7 +3,7 @@
from typing import List, Optional
from ..._models import BaseModel
-from .usage_item import UsageItem
+from ..usage_item import UsageItem
from ..shared.warning import Warning
from .web_search_result import WebSearchResult
diff --git a/src/parallel/types/beta/task_group_add_runs_params.py b/src/parallel/types/beta/task_group_add_runs_params.py
index 5732934..2de405d 100644
--- a/src/parallel/types/beta/task_group_add_runs_params.py
+++ b/src/parallel/types/beta/task_group_add_runs_params.py
@@ -6,21 +6,23 @@
from typing_extensions import Required, Annotated, TypedDict
from ..._utils import PropertyInfo
+from ..run_input_param import RunInputParam
from ..task_spec_param import TaskSpecParam
from .parallel_beta_param import ParallelBetaParam
-from .beta_run_input_param import BetaRunInputParam
__all__ = ["TaskGroupAddRunsParams"]
class TaskGroupAddRunsParams(TypedDict, total=False):
- inputs: Required[Iterable[BetaRunInputParam]]
+ inputs: Required[Iterable[RunInputParam]]
"""List of task runs to execute.
Up to 1,000 runs can be specified per request. If you'd like to add more runs,
split them across multiple TaskGroup POST requests.
"""
+ refresh_status: bool
+
default_task_spec: Optional[TaskSpecParam]
"""Specification for a task.
diff --git a/src/parallel/types/beta/task_group_events_response.py b/src/parallel/types/beta/task_group_events_response.py
index c1db25b..9728390 100644
--- a/src/parallel/types/beta/task_group_events_response.py
+++ b/src/parallel/types/beta/task_group_events_response.py
@@ -5,8 +5,8 @@
from ..._utils import PropertyInfo
from ..._models import BaseModel
-from .error_event import ErrorEvent
-from .task_run_event import TaskRunEvent
+from ..error_event import ErrorEvent
+from ..task_run_event import TaskRunEvent
from .task_group_status import TaskGroupStatus
__all__ = ["TaskGroupEventsResponse", "TaskGroupStatusEvent"]
diff --git a/src/parallel/types/beta/task_group_get_runs_response.py b/src/parallel/types/beta/task_group_get_runs_response.py
index b287dcb..95eab2c 100644
--- a/src/parallel/types/beta/task_group_get_runs_response.py
+++ b/src/parallel/types/beta/task_group_get_runs_response.py
@@ -4,8 +4,8 @@
from typing_extensions import Annotated, TypeAlias
from ..._utils import PropertyInfo
-from .error_event import ErrorEvent
-from .task_run_event import TaskRunEvent
+from ..error_event import ErrorEvent
+from ..task_run_event import TaskRunEvent
__all__ = ["TaskGroupGetRunsResponse"]
diff --git a/src/parallel/types/beta/task_run_create_params.py b/src/parallel/types/beta/task_run_create_params.py
index f7290c9..5f6f4c9 100644
--- a/src/parallel/types/beta/task_run_create_params.py
+++ b/src/parallel/types/beta/task_run_create_params.py
@@ -6,13 +6,13 @@
from typing_extensions import Required, Annotated, TypedDict
from ..._utils import PropertyInfo
-from .webhook_param import WebhookParam
+from ..webhook_param import WebhookParam
from ..task_spec_param import TaskSpecParam
-from .mcp_server_param import McpServerParam
+from ..mcp_server_param import McpServerParam
from .parallel_beta_param import ParallelBetaParam
from ..shared_params.source_policy import SourcePolicy
-__all__ = ["TaskRunCreateParams"]
+__all__ = ["TaskRunCreateParams", "AdvancedSettings"]
class TaskRunCreateParams(TypedDict, total=False):
@@ -22,24 +22,21 @@ class TaskRunCreateParams(TypedDict, total=False):
processor: Required[str]
"""Processor to use for the task."""
+ advanced_settings: Optional[AdvancedSettings]
+ """Advanced search configuration for a task run."""
+
enable_events: Optional[bool]
"""Controls tracking of task run execution progress.
When set to true, progress events are recorded and can be accessed via the
- [Task Run events](https://platform.parallel.ai/api-reference) endpoint. When
- false, no progress events are tracked. Note that progress tracking cannot be
- enabled after a run has been created. The flag is set to true by default for
- premium processors (pro and above). To enable this feature in your requests,
- specify `events-sse-2025-07-24` as one of the values in `parallel-beta` header
- (for API calls) or `betas` param (for the SDKs).
+ [Task Run events](https://docs.parallel.ai/api-reference) endpoint. When false,
+ no progress events are tracked. Note that progress tracking cannot be enabled
+ after a run has been created. The flag is set to true by default for premium
+ processors (pro and above).
"""
mcp_servers: Optional[Iterable[McpServerParam]]
- """
- Optional list of MCP servers to use for the run. To enable this feature in your
- requests, specify `mcp-server-2025-07-17` as one of the values in
- `parallel-beta` header (for API calls) or `betas` param (for the SDKs).
- """
+ """Optional list of MCP servers to use for the run."""
metadata: Optional[Dict[str, Union[str, float, bool]]]
"""User-provided metadata stored with the run.
@@ -71,3 +68,10 @@ class TaskRunCreateParams(TypedDict, total=False):
betas: Annotated[List[ParallelBetaParam], PropertyInfo(alias="parallel-beta")]
"""Optional header to specify the beta version(s) to enable."""
+
+
+class AdvancedSettings(TypedDict, total=False):
+ """Advanced search configuration for a task run."""
+
+ location: Optional[str]
+ """ISO 3166-1 alpha-2 country code for geo-targeted search results."""
diff --git a/src/parallel/types/beta/task_run_event.py b/src/parallel/types/beta/task_run_event.py
index c4c6c2f..d2cada5 100644
--- a/src/parallel/types/beta/task_run_event.py
+++ b/src/parallel/types/beta/task_run_event.py
@@ -1,37 +1,11 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Union, Optional
-from typing_extensions import Literal, Annotated, TypeAlias
-
-from ..._utils import PropertyInfo
-from ..._models import BaseModel
-from ..task_run import TaskRun
-from .beta_run_input import BetaRunInput
-from ..task_run_json_output import TaskRunJsonOutput
-from ..task_run_text_output import TaskRunTextOutput
+from .. import task_run_event
__all__ = ["TaskRunEvent", "Output"]
-Output: TypeAlias = Annotated[Union[TaskRunTextOutput, TaskRunJsonOutput, None], PropertyInfo(discriminator="type")]
-
-
-class TaskRunEvent(BaseModel):
- """Event when a task run transitions to a non-active status.
-
- May indicate completion, cancellation, or failure.
- """
-
- event_id: Optional[str] = None
- """Cursor to resume the event stream. Always empty for non Task Group runs."""
-
- run: TaskRun
- """Task run object."""
-
- type: Literal["task_run.state"]
- """Event type; always 'task_run.state'."""
-
- input: Optional[BetaRunInput] = None
- """Task run input with additional beta fields."""
+TaskRunEvent = task_run_event.TaskRunEvent
+"""Use parallel.types.task_run.TaskRunEvent instead"""
- output: Optional[Output] = None
- """Output from the run; included only if requested and if status == `completed`."""
+Output = task_run_event.Output
+"""This is deprecated, use `parallel.types.task_run_event.Output` instead"""
diff --git a/src/parallel/types/beta/task_run_events_response.py b/src/parallel/types/beta/task_run_events_response.py
index 79088f5..1516f91 100644
--- a/src/parallel/types/beta/task_run_events_response.py
+++ b/src/parallel/types/beta/task_run_events_response.py
@@ -5,8 +5,8 @@
from ..._utils import PropertyInfo
from ..._models import BaseModel
-from .error_event import ErrorEvent
-from .task_run_event import TaskRunEvent
+from ..error_event import ErrorEvent
+from ..task_run_event import TaskRunEvent
__all__ = [
"TaskRunEventsResponse",
diff --git a/src/parallel/types/beta/usage_item.py b/src/parallel/types/beta/usage_item.py
index b3584bd..587bb3c 100644
--- a/src/parallel/types/beta/usage_item.py
+++ b/src/parallel/types/beta/usage_item.py
@@ -1,15 +1,7 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from ..._models import BaseModel
+from .. import usage_item
__all__ = ["UsageItem"]
-
-class UsageItem(BaseModel):
- """Usage item for a single operation."""
-
- count: int
- """Count of the SKU."""
-
- name: str
- """Name of the SKU."""
+UsageItem = usage_item.UsageItem
diff --git a/src/parallel/types/beta/webhook.py b/src/parallel/types/beta/webhook.py
index 6741b89..814b154 100644
--- a/src/parallel/types/beta/webhook.py
+++ b/src/parallel/types/beta/webhook.py
@@ -1,18 +1,8 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
+from .. import webhook
__all__ = ["Webhook"]
-
-class Webhook(BaseModel):
- """Webhooks for Task Runs."""
-
- url: str
- """URL for the webhook."""
-
- event_types: Optional[List[Literal["task_run.status"]]] = None
- """Event types to send the webhook notifications for."""
+Webhook = webhook.Webhook
+"""Use parallel.types.task_run.Webhook instead"""
diff --git a/src/parallel/types/beta/webhook_param.py b/src/parallel/types/beta/webhook_param.py
index 90a667d..a32d4c7 100644
--- a/src/parallel/types/beta/webhook_param.py
+++ b/src/parallel/types/beta/webhook_param.py
@@ -2,17 +2,6 @@
from __future__ import annotations
-from typing import List
-from typing_extensions import Literal, Required, TypedDict
+from .. import webhook_param
-__all__ = ["WebhookParam"]
-
-
-class WebhookParam(TypedDict, total=False):
- """Webhooks for Task Runs."""
-
- url: Required[str]
- """URL for the webhook."""
-
- event_types: List[Literal["task_run.status"]]
- """Event types to send the webhook notifications for."""
+WebhookParam = webhook_param.WebhookParam
diff --git a/src/parallel/types/client_extract_params.py b/src/parallel/types/client_extract_params.py
new file mode 100644
index 0000000..063ee4c
--- /dev/null
+++ b/src/parallel/types/client_extract_params.py
@@ -0,0 +1,52 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Required, TypedDict
+
+from .._types import SequenceNotStr
+from .advanced_extract_settings_param import AdvancedExtractSettingsParam
+
+__all__ = ["ClientExtractParams"]
+
+
+class ClientExtractParams(TypedDict, total=False):
+ urls: Required[SequenceNotStr[str]]
+ """URLs to extract content from. Up to 20 URLs."""
+
+ advanced_settings: Optional[AdvancedExtractSettingsParam]
+ """Advanced extract configuration.
+
+ These settings may impact result quality and latency unless used carefully. See
+ https://docs.parallel.ai/search/advanced-extract-settings for more info.
+ """
+
+ client_model: Optional[str]
+ """The model generating this request and consuming the results.
+
+ Enables optimizations and tailors default settings for the model's capabilities.
+ """
+
+ max_chars_total: Optional[int]
+ """Upper bound on total characters across excerpts from all extracted results."""
+
+ objective: Optional[str]
+ """
+ As in SearchRequest, a natural-language description of the underlying question
+ or goal driving the request. Used together with search_queries to focus excerpts
+ on the most relevant content.
+ """
+
+ search_queries: Optional[SequenceNotStr[str]]
+ """Optional keyword search queries, as in SearchRequest.
+
+ Used together with objective to focus excerpts on the most relevant content.
+ """
+
+ session_id: Optional[str]
+ """
+ Session identifier to track calls across separate search and extract calls, to
+ be used as part of a larger task. Specifying it may give better contextual
+ results for subsequent API calls.
+ """
diff --git a/src/parallel/types/client_search_params.py b/src/parallel/types/client_search_params.py
new file mode 100644
index 0000000..f81ba5b
--- /dev/null
+++ b/src/parallel/types/client_search_params.py
@@ -0,0 +1,59 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from .._types import SequenceNotStr
+from .advanced_search_settings_param import AdvancedSearchSettingsParam
+
+__all__ = ["ClientSearchParams"]
+
+
+class ClientSearchParams(TypedDict, total=False):
+ search_queries: Required[SequenceNotStr[str]]
+ """Concise keyword search queries, 3-6 words each.
+
+ At least one query is required, provide 2-3 for best results. Used together with
+ objective to focus results on the most relevant content.
+ """
+
+ advanced_settings: Optional[AdvancedSearchSettingsParam]
+ """Advanced search configuration.
+
+ These settings may impact result quality and latency unless used carefully. See
+ https://docs.parallel.ai/search/advanced-search-settings for more info.
+ """
+
+ client_model: Optional[str]
+ """The model generating this request and consuming the results.
+
+ Enables optimizations and tailors default settings for the model's capabilities.
+ """
+
+ max_chars_total: Optional[int]
+ """Upper bound on total characters across excerpts from all results."""
+
+ mode: Optional[Literal["basic", "advanced"]]
+ """Search mode preset: supported values are `basic` and `advanced`.
+
+ Basic mode offers the lowest latency and works best with 2-3 high-quality
+ search_queries. Advanced mode provides higher quality with more advanced
+ retrieval and compression. Defaults to `advanced` when omitted.
+ """
+
+ objective: Optional[str]
+ """
+ Natural-language description of the underlying question or goal driving the
+ search. Used together with search_queries to focus results on the most relevant
+ content. Should be self-contained with enough context to understand the intent
+ of the search.
+ """
+
+ session_id: Optional[str]
+ """
+ Session identifier to track calls across separate search and extract calls, to
+ be used as part of a larger task. Specifying it may give better contextual
+ results for subsequent API calls.
+ """
diff --git a/src/parallel/types/error_event.py b/src/parallel/types/error_event.py
new file mode 100644
index 0000000..3ededc9
--- /dev/null
+++ b/src/parallel/types/error_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .._models import BaseModel
+from .shared.error_object import ErrorObject
+
+__all__ = ["ErrorEvent"]
+
+
+class ErrorEvent(BaseModel):
+ """Event indicating an error."""
+
+ error: ErrorObject
+ """Error."""
+
+ type: Literal["error"]
+ """Event type; always 'error'."""
diff --git a/src/parallel/types/excerpt_settings_param.py b/src/parallel/types/excerpt_settings_param.py
new file mode 100644
index 0000000..17bd00e
--- /dev/null
+++ b/src/parallel/types/excerpt_settings_param.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import TypedDict
+
+__all__ = ["ExcerptSettingsParam"]
+
+
+class ExcerptSettingsParam(TypedDict, total=False):
+ """Optional settings for returning relevant excerpts."""
+
+ max_chars_per_result: Optional[int]
+ """Optional upper bound on the total number of characters to include per url.
+
+ Excerpts may contain fewer characters than this limit to maximize relevance and
+ token efficiency. Values below 1000 will be automatically set to 1000.
+ """
diff --git a/src/parallel/types/extract_error.py b/src/parallel/types/extract_error.py
new file mode 100644
index 0000000..3379cb6
--- /dev/null
+++ b/src/parallel/types/extract_error.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["ExtractError"]
+
+
+class ExtractError(BaseModel):
+ """Extract error details."""
+
+ content: Optional[str] = None
+ """Content returned for http client or server errors, if any."""
+
+ error_type: str
+ """Error type."""
+
+ http_status_code: Optional[int] = None
+ """HTTP status code, if available."""
+
+ url: str
diff --git a/src/parallel/types/extract_response.py b/src/parallel/types/extract_response.py
new file mode 100644
index 0000000..8cb9568
--- /dev/null
+++ b/src/parallel/types/extract_response.py
@@ -0,0 +1,38 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from .._models import BaseModel
+from .usage_item import UsageItem
+from .extract_error import ExtractError
+from .extract_result import ExtractResult
+from .shared.warning import Warning
+
+__all__ = ["ExtractResponse"]
+
+
+class ExtractResponse(BaseModel):
+ """Extract response."""
+
+ errors: List[ExtractError]
+ """Extract errors: requested URLs not in the results."""
+
+ extract_id: str
+ """Extract request ID, e.g. `extract_cad0a6d2dec046bd95ae900527d880e7`"""
+
+ results: List[ExtractResult]
+ """Successful extract results."""
+
+ session_id: str
+ """Session identifier.
+
+ Echoed back from the request if provided, otherwise generated by the server.
+ Should be passed to future search and extract calls made by the agent as part of
+ the same larger task.
+ """
+
+ usage: Optional[List[UsageItem]] = None
+ """Usage metrics for the extract request."""
+
+ warnings: Optional[List[Warning]] = None
+ """Warnings for the extract request, if any."""
diff --git a/src/parallel/types/extract_result.py b/src/parallel/types/extract_result.py
new file mode 100644
index 0000000..e02243b
--- /dev/null
+++ b/src/parallel/types/extract_result.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from .._models import BaseModel
+
+__all__ = ["ExtractResult"]
+
+
+class ExtractResult(BaseModel):
+ """Extract result for a single URL."""
+
+ excerpts: List[str]
+ """Relevant excerpted content from the URL, formatted as markdown."""
+
+ url: str
+ """URL associated with the search result."""
+
+ full_content: Optional[str] = None
+ """Full content from the URL formatted as markdown, if requested."""
+
+ publish_date: Optional[str] = None
+ """Publish date of the webpage in YYYY-MM-DD format, if available."""
+
+ title: Optional[str] = None
+ """Title of the webpage, if available."""
diff --git a/src/parallel/types/fetch_policy_param.py b/src/parallel/types/fetch_policy_param.py
new file mode 100644
index 0000000..5bc4447
--- /dev/null
+++ b/src/parallel/types/fetch_policy_param.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import TypedDict
+
+__all__ = ["FetchPolicyParam"]
+
+
+class FetchPolicyParam(TypedDict, total=False):
+ """Policy for live fetching web results."""
+
+ disable_cache_fallback: bool
+ """
+ If false, fallback to cached content older than max-age if live fetch fails or
+ times out. If true, returns an error instead.
+ """
+
+ max_age_seconds: Optional[int]
+ """Maximum age of cached content in seconds to trigger a live fetch.
+
+ Minimum value 600 seconds (10 minutes).
+ """
+
+ timeout_seconds: Optional[float]
+ """Timeout in seconds for fetching live content if unavailable in cache."""
diff --git a/src/parallel/types/mcp_server.py b/src/parallel/types/mcp_server.py
new file mode 100644
index 0000000..7c4ba25
--- /dev/null
+++ b/src/parallel/types/mcp_server.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["McpServer"]
+
+
+class McpServer(BaseModel):
+ """MCP server configuration."""
+
+ name: str
+ """Name of the MCP server."""
+
+ url: str
+ """URL of the MCP server."""
+
+ allowed_tools: Optional[List[str]] = None
+ """List of allowed tools for the MCP server."""
+
+ headers: Optional[Dict[str, str]] = None
+ """Headers for the MCP server."""
+
+ type: Optional[Literal["url"]] = None
+ """Type of MCP server being configured. Always `url`."""
diff --git a/src/parallel/types/mcp_server_param.py b/src/parallel/types/mcp_server_param.py
new file mode 100644
index 0000000..f3f207a
--- /dev/null
+++ b/src/parallel/types/mcp_server_param.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+from .._types import SequenceNotStr
+
+__all__ = ["McpServerParam"]
+
+
+class McpServerParam(TypedDict, total=False):
+ """MCP server configuration."""
+
+ name: Required[str]
+ """Name of the MCP server."""
+
+ url: Required[str]
+ """URL of the MCP server."""
+
+ allowed_tools: Optional[SequenceNotStr[str]]
+ """List of allowed tools for the MCP server."""
+
+ headers: Optional[Dict[str, str]]
+ """Headers for the MCP server."""
+
+ type: Literal["url"]
+ """Type of MCP server being configured. Always `url`."""
diff --git a/src/parallel/types/mcp_tool_call.py b/src/parallel/types/mcp_tool_call.py
new file mode 100644
index 0000000..6cdccc2
--- /dev/null
+++ b/src/parallel/types/mcp_tool_call.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["McpToolCall"]
+
+
+class McpToolCall(BaseModel):
+ """Result of an MCP tool call."""
+
+ arguments: str
+ """Arguments used to call the MCP tool."""
+
+ server_name: str
+ """Name of the MCP server."""
+
+ tool_call_id: str
+ """Identifier for the tool call."""
+
+ tool_name: str
+ """Name of the tool being called."""
+
+ content: Optional[str] = None
+ """Output received from the tool call, if successful."""
+
+ error: Optional[str] = None
+ """Error message if the tool call failed."""
diff --git a/src/parallel/types/run_input.py b/src/parallel/types/run_input.py
new file mode 100644
index 0000000..8c302d6
--- /dev/null
+++ b/src/parallel/types/run_input.py
@@ -0,0 +1,72 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Union, Optional
+
+from .webhook import Webhook
+from .._models import BaseModel
+from .task_spec import TaskSpec
+from .mcp_server import McpServer
+from .shared.source_policy import SourcePolicy
+
+__all__ = ["RunInput", "AdvancedSettings"]
+
+
+class AdvancedSettings(BaseModel):
+ """Advanced search configuration for a task run."""
+
+ location: Optional[str] = None
+ """ISO 3166-1 alpha-2 country code for geo-targeted search results."""
+
+
+class RunInput(BaseModel):
+ """Request to run a task."""
+
+ input: Union[str, Dict[str, object]]
+ """Input to the task, either text or a JSON object."""
+
+ processor: str
+ """Processor to use for the task."""
+
+ advanced_settings: Optional[AdvancedSettings] = None
+ """Advanced search configuration for a task run."""
+
+ enable_events: Optional[bool] = None
+ """Controls tracking of task run execution progress.
+
+ When set to true, progress events are recorded and can be accessed via the
+ [Task Run events](https://docs.parallel.ai/api-reference) endpoint. When false,
+ no progress events are tracked. Note that progress tracking cannot be enabled
+ after a run has been created. The flag is set to true by default for premium
+ processors (pro and above).
+ """
+
+ mcp_servers: Optional[List[McpServer]] = None
+ """Optional list of MCP servers to use for the run."""
+
+ metadata: Optional[Dict[str, Union[str, float, bool]]] = None
+ """User-provided metadata stored with the run.
+
+ Keys and values must be strings with a maximum length of 16 and 512 characters
+ respectively.
+ """
+
+ previous_interaction_id: Optional[str] = None
+ """Interaction ID to use as context for this request."""
+
+ source_policy: Optional[SourcePolicy] = None
+ """Source policy for web search results.
+
+ This policy governs which sources are allowed/disallowed in results.
+ """
+
+ task_spec: Optional[TaskSpec] = None
+ """Specification for a task.
+
+ Auto output schemas can be specified by setting `output_schema={"type":"auto"}`.
+ Not specifying a TaskSpec is the same as setting an auto output schema.
+
+ For convenience bare strings are also accepted as input or output schemas.
+ """
+
+ webhook: Optional[Webhook] = None
+ """Webhooks for Task Runs."""
diff --git a/src/parallel/types/run_input_param.py b/src/parallel/types/run_input_param.py
new file mode 100644
index 0000000..9fc2605
--- /dev/null
+++ b/src/parallel/types/run_input_param.py
@@ -0,0 +1,74 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Union, Iterable, Optional
+from typing_extensions import Required, TypedDict
+
+from .webhook_param import WebhookParam
+from .task_spec_param import TaskSpecParam
+from .mcp_server_param import McpServerParam
+from .shared_params.source_policy import SourcePolicy
+
+__all__ = ["RunInputParam", "AdvancedSettings"]
+
+
+class AdvancedSettings(TypedDict, total=False):
+ """Advanced search configuration for a task run."""
+
+ location: Optional[str]
+ """ISO 3166-1 alpha-2 country code for geo-targeted search results."""
+
+
+class RunInputParam(TypedDict, total=False):
+ """Request to run a task."""
+
+ input: Required[Union[str, Dict[str, object]]]
+ """Input to the task, either text or a JSON object."""
+
+ processor: Required[str]
+ """Processor to use for the task."""
+
+ advanced_settings: Optional[AdvancedSettings]
+ """Advanced search configuration for a task run."""
+
+ enable_events: Optional[bool]
+ """Controls tracking of task run execution progress.
+
+ When set to true, progress events are recorded and can be accessed via the
+ [Task Run events](https://docs.parallel.ai/api-reference) endpoint. When false,
+ no progress events are tracked. Note that progress tracking cannot be enabled
+ after a run has been created. The flag is set to true by default for premium
+ processors (pro and above).
+ """
+
+ mcp_servers: Optional[Iterable[McpServerParam]]
+ """Optional list of MCP servers to use for the run."""
+
+ metadata: Optional[Dict[str, Union[str, float, bool]]]
+ """User-provided metadata stored with the run.
+
+ Keys and values must be strings with a maximum length of 16 and 512 characters
+ respectively.
+ """
+
+ previous_interaction_id: Optional[str]
+ """Interaction ID to use as context for this request."""
+
+ source_policy: Optional[SourcePolicy]
+ """Source policy for web search results.
+
+ This policy governs which sources are allowed/disallowed in results.
+ """
+
+ task_spec: Optional[TaskSpecParam]
+ """Specification for a task.
+
+ Auto output schemas can be specified by setting `output_schema={"type":"auto"}`.
+ Not specifying a TaskSpec is the same as setting an auto output schema.
+
+ For convenience bare strings are also accepted as input or output schemas.
+ """
+
+ webhook: Optional[WebhookParam]
+ """Webhooks for Task Runs."""
diff --git a/src/parallel/types/search_result.py b/src/parallel/types/search_result.py
new file mode 100644
index 0000000..0320bed
--- /dev/null
+++ b/src/parallel/types/search_result.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from .._models import BaseModel
+from .usage_item import UsageItem
+from .shared.warning import Warning
+from .web_search_result import WebSearchResult
+
+__all__ = ["SearchResult"]
+
+
+class SearchResult(BaseModel):
+ """Search response."""
+
+ results: List[WebSearchResult]
+ """A list of search results, ordered by decreasing relevance."""
+
+ search_id: str
+ """Search ID. Example: `search_cad0a6d2dec046bd95ae900527d880e7`"""
+
+ session_id: str
+ """
+ Session identifier, echoed back from the request if provided, otherwise
+ generated by the server. Should be passed to future search and extract calls
+ made by the agent as part of the same larger task.
+ """
+
+ usage: Optional[List[UsageItem]] = None
+ """Usage metrics for the search request."""
+
+ warnings: Optional[List[Warning]] = None
+ """Warnings for the search request, if any."""
diff --git a/src/parallel/types/shared/source_policy.py b/src/parallel/types/shared/source_policy.py
index 7ea1deb..51b7aa1 100644
--- a/src/parallel/types/shared/source_policy.py
+++ b/src/parallel/types/shared/source_policy.py
@@ -26,7 +26,8 @@ class SourcePolicy(BaseModel):
If specified, sources from these domains will be excluded. Accepts plain domains
(e.g., example.com, subdomain.example.gov) or bare domain extension starting
- with a period (e.g., .gov, .edu, .co.uk).
+ with a period (e.g., .gov, .edu, .co.uk). The combined number of domains in
+ include_domains and exclude_domains cannot exceed 200.
"""
include_domains: Optional[List[str]] = None
@@ -34,5 +35,6 @@ class SourcePolicy(BaseModel):
If specified, only sources from these domains will be included. Accepts plain
domains (e.g., example.com, subdomain.example.gov) or bare domain extension
- starting with a period (e.g., .gov, .edu, .co.uk).
+ starting with a period (e.g., .gov, .edu, .co.uk). The combined number of
+ domains in include_domains and exclude_domains cannot exceed 200.
"""
diff --git a/src/parallel/types/shared_params/source_policy.py b/src/parallel/types/shared_params/source_policy.py
index c3da049..adc5dbb 100644
--- a/src/parallel/types/shared_params/source_policy.py
+++ b/src/parallel/types/shared_params/source_policy.py
@@ -30,7 +30,8 @@ class SourcePolicy(TypedDict, total=False):
If specified, sources from these domains will be excluded. Accepts plain domains
(e.g., example.com, subdomain.example.gov) or bare domain extension starting
- with a period (e.g., .gov, .edu, .co.uk).
+ with a period (e.g., .gov, .edu, .co.uk). The combined number of domains in
+ include_domains and exclude_domains cannot exceed 200.
"""
include_domains: SequenceNotStr[str]
@@ -38,5 +39,6 @@ class SourcePolicy(TypedDict, total=False):
If specified, only sources from these domains will be included. Accepts plain
domains (e.g., example.com, subdomain.example.gov) or bare domain extension
- starting with a period (e.g., .gov, .edu, .co.uk).
+ starting with a period (e.g., .gov, .edu, .co.uk). The combined number of
+ domains in include_domains and exclude_domains cannot exceed 200.
"""
diff --git a/src/parallel/types/task_run.py b/src/parallel/types/task_run.py
index 0bdb0b1..a4f52af 100644
--- a/src/parallel/types/task_run.py
+++ b/src/parallel/types/task_run.py
@@ -9,10 +9,7 @@
from .shared.warning import Warning
from .shared.error_object import ErrorObject
-__all__ = [
- "TaskRun",
- "Warning", # for backwards compatibility with v0.1.3
-]
+__all__ = ["TaskRun"]
class TaskRun(BaseModel):
diff --git a/src/parallel/types/task_run_create_params.py b/src/parallel/types/task_run_create_params.py
index 5f1c572..be5d695 100644
--- a/src/parallel/types/task_run_create_params.py
+++ b/src/parallel/types/task_run_create_params.py
@@ -2,13 +2,17 @@
from __future__ import annotations
-from typing import Dict, Union, Optional
-from typing_extensions import Required, TypedDict
+from typing import Dict, List, Union, Iterable, Optional
+from typing_extensions import Required, Annotated, TypedDict
+from .._utils import PropertyInfo
+from .webhook_param import WebhookParam
from .task_spec_param import TaskSpecParam
+from .mcp_server_param import McpServerParam
+from .beta.parallel_beta_param import ParallelBetaParam
from .shared_params.source_policy import SourcePolicy
-__all__ = ["TaskRunCreateParams"]
+__all__ = ["TaskRunCreateParams", "AdvancedSettings"]
class TaskRunCreateParams(TypedDict, total=False):
@@ -18,6 +22,22 @@ class TaskRunCreateParams(TypedDict, total=False):
processor: Required[str]
"""Processor to use for the task."""
+ advanced_settings: Optional[AdvancedSettings]
+ """Advanced search configuration for a task run."""
+
+ enable_events: Optional[bool]
+ """Controls tracking of task run execution progress.
+
+ When set to true, progress events are recorded and can be accessed via the
+ [Task Run events](https://docs.parallel.ai/api-reference) endpoint. When false,
+ no progress events are tracked. Note that progress tracking cannot be enabled
+ after a run has been created. The flag is set to true by default for premium
+ processors (pro and above).
+ """
+
+ mcp_servers: Optional[Iterable[McpServerParam]]
+ """Optional list of MCP servers to use for the run."""
+
metadata: Optional[Dict[str, Union[str, float, bool]]]
"""User-provided metadata stored with the run.
@@ -42,3 +62,16 @@ class TaskRunCreateParams(TypedDict, total=False):
For convenience bare strings are also accepted as input or output schemas.
"""
+
+ webhook: Optional[WebhookParam]
+ """Webhooks for Task Runs."""
+
+ betas: Annotated[List[ParallelBetaParam], PropertyInfo(alias="parallel-beta")]
+ """Optional header to specify the beta version(s) to enable."""
+
+
+class AdvancedSettings(TypedDict, total=False):
+ """Advanced search configuration for a task run."""
+
+ location: Optional[str]
+ """ISO 3166-1 alpha-2 country code for geo-targeted search results."""
diff --git a/src/parallel/types/task_run_event.py b/src/parallel/types/task_run_event.py
new file mode 100644
index 0000000..4ed9071
--- /dev/null
+++ b/src/parallel/types/task_run_event.py
@@ -0,0 +1,37 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from .._utils import PropertyInfo
+from .._models import BaseModel
+from .task_run import TaskRun
+from .run_input import RunInput
+from .task_run_json_output import TaskRunJsonOutput
+from .task_run_text_output import TaskRunTextOutput
+
+__all__ = ["TaskRunEvent", "Output"]
+
+Output: TypeAlias = Annotated[Union[TaskRunTextOutput, TaskRunJsonOutput, None], PropertyInfo(discriminator="type")]
+
+
+class TaskRunEvent(BaseModel):
+ """Event when a task run transitions to a non-active status.
+
+ May indicate completion, cancellation, or failure.
+ """
+
+ event_id: Optional[str] = None
+ """Cursor to resume the event stream. Always empty for non Task Group runs."""
+
+ run: TaskRun
+ """Task run object."""
+
+ type: Literal["task_run.state"]
+ """Event type; always 'task_run.state'."""
+
+ input: Optional[RunInput] = None
+ """Request to run a task."""
+
+ output: Optional[Output] = None
+ """Output from the run; included only if requested and if status == `completed`."""
diff --git a/src/parallel/types/task_run_events_response.py b/src/parallel/types/task_run_events_response.py
new file mode 100644
index 0000000..20ded6e
--- /dev/null
+++ b/src/parallel/types/task_run_events_response.py
@@ -0,0 +1,70 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from .._utils import PropertyInfo
+from .._models import BaseModel
+from .error_event import ErrorEvent
+from .task_run_event import TaskRunEvent
+
+__all__ = [
+ "TaskRunEventsResponse",
+ "TaskRunProgressStatsEvent",
+ "TaskRunProgressStatsEventSourceStats",
+ "TaskRunProgressMessageEvent",
+]
+
+
+class TaskRunProgressStatsEventSourceStats(BaseModel):
+ """Source stats describing progress so far."""
+
+ num_sources_considered: Optional[int] = None
+ """Number of sources considered in processing the task."""
+
+ num_sources_read: Optional[int] = None
+ """Number of sources read in processing the task."""
+
+ sources_read_sample: Optional[List[str]] = None
+ """A sample of URLs of sources read in processing the task."""
+
+
+class TaskRunProgressStatsEvent(BaseModel):
+ """A progress update for a task run."""
+
+ progress_meter: float
+ """Completion percentage of the task run.
+
+ Ranges from 0 to 100 where 0 indicates no progress and 100 indicates completion.
+ """
+
+ source_stats: TaskRunProgressStatsEventSourceStats
+ """Source stats describing progress so far."""
+
+ type: Literal["task_run.progress_stats"]
+ """Event type; always 'task_run.progress_stats'."""
+
+
+class TaskRunProgressMessageEvent(BaseModel):
+ """A message for a task run progress update."""
+
+ message: str
+ """Progress update message."""
+
+ timestamp: Optional[str] = None
+ """Timestamp of the message."""
+
+ type: Literal[
+ "task_run.progress_msg.plan",
+ "task_run.progress_msg.search",
+ "task_run.progress_msg.result",
+ "task_run.progress_msg.tool_call",
+ "task_run.progress_msg.exec_status",
+ ]
+ """Event type; always starts with 'task_run.progress_msg'."""
+
+
+TaskRunEventsResponse: TypeAlias = Annotated[
+ Union[TaskRunProgressStatsEvent, TaskRunProgressMessageEvent, TaskRunEvent, ErrorEvent],
+ PropertyInfo(discriminator="type"),
+]
diff --git a/src/parallel/types/task_run_json_output.py b/src/parallel/types/task_run_json_output.py
index 8541b61..6a43d58 100644
--- a/src/parallel/types/task_run_json_output.py
+++ b/src/parallel/types/task_run_json_output.py
@@ -5,6 +5,7 @@
from .._models import BaseModel
from .field_basis import FieldBasis
+from .mcp_tool_call import McpToolCall
__all__ = ["TaskRunJsonOutput"]
@@ -32,19 +33,15 @@ class TaskRunJsonOutput(BaseModel):
"""
beta_fields: Optional[Dict[str, object]] = None
- """Additional fields from beta features used in this task run.
-
- When beta features are specified during both task run creation and result
- retrieval, this field will be empty and instead the relevant beta attributes
- will be directly included in the `BetaTaskRunJsonOutput` or corresponding output
- type. However, if beta features were specified during task run creation but not
- during result retrieval, this field will contain the dump of fields from those
- beta features. Each key represents the beta feature version (one amongst
- parallel-beta headers) and the values correspond to the beta feature attributes,
- if any. For now, only MCP server beta features have attributes. For example,
- `{mcp-server-2025-07-17: [{'server_name':'mcp_server', 'tool_call_id': 'tc_123', ...}]}}`
+ """Deprecated.
+
+ mcp-server-2025-07-17 is now included directly in the output (e.g.
+ mcp_tool_calls).
"""
+ mcp_tool_calls: Optional[List[McpToolCall]] = None
+ """MCP tool calls made by the task."""
+
output_schema: Optional[Dict[str, object]] = None
"""Output schema for the Task Run.
diff --git a/src/parallel/types/task_run_result.py b/src/parallel/types/task_run_result.py
index fb9d39e..27e0f9b 100644
--- a/src/parallel/types/task_run_result.py
+++ b/src/parallel/types/task_run_result.py
@@ -40,7 +40,6 @@
OutputTaskRunTextOutputBasisCitation = Citation # for backwards compatibility with v0.1.3
"""This is deprecated, `Citation` should be used instead"""
-
Output: TypeAlias = Annotated[Union[TaskRunTextOutput, TaskRunJsonOutput], PropertyInfo(discriminator="type")]
diff --git a/src/parallel/types/task_run_result_params.py b/src/parallel/types/task_run_result_params.py
index 676bbda..45aaafb 100644
--- a/src/parallel/types/task_run_result_params.py
+++ b/src/parallel/types/task_run_result_params.py
@@ -2,12 +2,17 @@
from __future__ import annotations
+from typing import List
from typing_extensions import Annotated, TypedDict
from .._utils import PropertyInfo
+from .beta.parallel_beta_param import ParallelBetaParam
__all__ = ["TaskRunResultParams"]
class TaskRunResultParams(TypedDict, total=False):
api_timeout: Annotated[int, PropertyInfo(alias="timeout")]
+
+ betas: Annotated[List[ParallelBetaParam], PropertyInfo(alias="parallel-beta")]
+ """Optional header to specify the beta version(s) to enable."""
diff --git a/src/parallel/types/task_run_text_output.py b/src/parallel/types/task_run_text_output.py
index 5d7e4c6..46c23ff 100644
--- a/src/parallel/types/task_run_text_output.py
+++ b/src/parallel/types/task_run_text_output.py
@@ -5,6 +5,7 @@
from .._models import BaseModel
from .field_basis import FieldBasis
+from .mcp_tool_call import McpToolCall
__all__ = ["TaskRunTextOutput"]
@@ -25,15 +26,11 @@ class TaskRunTextOutput(BaseModel):
"""
beta_fields: Optional[Dict[str, object]] = None
- """Additional fields from beta features used in this task run.
-
- When beta features are specified during both task run creation and result
- retrieval, this field will be empty and instead the relevant beta attributes
- will be directly included in the `BetaTaskRunJsonOutput` or corresponding output
- type. However, if beta features were specified during task run creation but not
- during result retrieval, this field will contain the dump of fields from those
- beta features. Each key represents the beta feature version (one amongst
- parallel-beta headers) and the values correspond to the beta feature attributes,
- if any. For now, only MCP server beta features have attributes. For example,
- `{mcp-server-2025-07-17: [{'server_name':'mcp_server', 'tool_call_id': 'tc_123', ...}]}}`
+ """Deprecated.
+
+ mcp-server-2025-07-17 is now included directly in the output (e.g.
+ mcp_tool_calls).
"""
+
+ mcp_tool_calls: Optional[List[McpToolCall]] = None
+ """MCP tool calls made by the task."""
diff --git a/src/parallel/types/usage_item.py b/src/parallel/types/usage_item.py
new file mode 100644
index 0000000..471d112
--- /dev/null
+++ b/src/parallel/types/usage_item.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .._models import BaseModel
+
+__all__ = ["UsageItem"]
+
+
+class UsageItem(BaseModel):
+ """Usage item for a single operation."""
+
+ count: int
+ """Count of the SKU."""
+
+ name: str
+ """Name of the SKU."""
diff --git a/src/parallel/types/web_search_result.py b/src/parallel/types/web_search_result.py
new file mode 100644
index 0000000..6178b6e
--- /dev/null
+++ b/src/parallel/types/web_search_result.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from .._models import BaseModel
+
+__all__ = ["WebSearchResult"]
+
+
+class WebSearchResult(BaseModel):
+ """A single search result from the web search API."""
+
+ excerpts: List[str]
+ """Relevant excerpted content from the URL, formatted as markdown."""
+
+ url: str
+ """URL associated with the search result."""
+
+ publish_date: Optional[str] = None
+ """Publish date of the webpage in YYYY-MM-DD format, if available."""
+
+ title: Optional[str] = None
+ """Title of the webpage, if available."""
diff --git a/src/parallel/types/webhook.py b/src/parallel/types/webhook.py
new file mode 100644
index 0000000..67964b3
--- /dev/null
+++ b/src/parallel/types/webhook.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["Webhook"]
+
+
+class Webhook(BaseModel):
+ """Webhooks for Task Runs."""
+
+ url: str
+ """URL for the webhook."""
+
+ event_types: Optional[List[Literal["task_run.status"]]] = None
+ """Event types to send the webhook notifications for."""
diff --git a/src/parallel/types/webhook_param.py b/src/parallel/types/webhook_param.py
new file mode 100644
index 0000000..90a667d
--- /dev/null
+++ b/src/parallel/types/webhook_param.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["WebhookParam"]
+
+
+class WebhookParam(TypedDict, total=False):
+ """Webhooks for Task Runs."""
+
+ url: Required[str]
+ """URL for the webhook."""
+
+ event_types: List[Literal["task_run.status"]]
+ """Event types to send the webhook notifications for."""
diff --git a/tests/api_resources/beta/test_findall.py b/tests/api_resources/beta/test_findall.py
index 6ee829d..3d73d48 100644
--- a/tests/api_resources/beta/test_findall.py
+++ b/tests/api_resources/beta/test_findall.py
@@ -13,6 +13,7 @@
FindAllRun,
FindAllSchema,
FindAllRunResult,
+ FindAllCandidatesResponse,
)
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -199,6 +200,49 @@ def test_path_params_cancel(self, client: Parallel) -> None:
findall_id="",
)
+ @parametrize
+ def test_method_candidates(self, client: Parallel) -> None:
+ findall = client.beta.findall.candidates(
+ entity_type="company",
+ objective="objective",
+ )
+ assert_matches_type(FindAllCandidatesResponse, findall, path=["response"])
+
+ @parametrize
+ def test_method_candidates_with_all_params(self, client: Parallel) -> None:
+ findall = client.beta.findall.candidates(
+ entity_type="company",
+ objective="objective",
+ match_limit=5,
+ )
+ assert_matches_type(FindAllCandidatesResponse, findall, path=["response"])
+
+ @parametrize
+ def test_raw_response_candidates(self, client: Parallel) -> None:
+ response = client.beta.findall.with_raw_response.candidates(
+ entity_type="company",
+ objective="objective",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ findall = response.parse()
+ assert_matches_type(FindAllCandidatesResponse, findall, path=["response"])
+
+ @parametrize
+ def test_streaming_response_candidates(self, client: Parallel) -> None:
+ with client.beta.findall.with_streaming_response.candidates(
+ entity_type="company",
+ objective="objective",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ findall = response.parse()
+ assert_matches_type(FindAllCandidatesResponse, findall, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
@parametrize
def test_method_enrich(self, client: Parallel) -> None:
findall = client.beta.findall.enrich(
@@ -296,7 +340,6 @@ def test_path_params_enrich(self, client: Parallel) -> None:
},
)
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_method_events(self, client: Parallel) -> None:
findall_stream = client.beta.findall.events(
@@ -304,7 +347,6 @@ def test_method_events(self, client: Parallel) -> None:
)
findall_stream.response.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_method_events_with_all_params(self, client: Parallel) -> None:
findall_stream = client.beta.findall.events(
@@ -315,7 +357,6 @@ def test_method_events_with_all_params(self, client: Parallel) -> None:
)
findall_stream.response.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_raw_response_events(self, client: Parallel) -> None:
response = client.beta.findall.with_raw_response.events(
@@ -326,7 +367,6 @@ def test_raw_response_events(self, client: Parallel) -> None:
stream = response.parse()
stream.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_streaming_response_events(self, client: Parallel) -> None:
with client.beta.findall.with_streaming_response.events(
@@ -340,7 +380,6 @@ def test_streaming_response_events(self, client: Parallel) -> None:
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_path_params_events(self, client: Parallel) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `findall_id` but received ''"):
@@ -714,6 +753,49 @@ async def test_path_params_cancel(self, async_client: AsyncParallel) -> None:
findall_id="",
)
+ @parametrize
+ async def test_method_candidates(self, async_client: AsyncParallel) -> None:
+ findall = await async_client.beta.findall.candidates(
+ entity_type="company",
+ objective="objective",
+ )
+ assert_matches_type(FindAllCandidatesResponse, findall, path=["response"])
+
+ @parametrize
+ async def test_method_candidates_with_all_params(self, async_client: AsyncParallel) -> None:
+ findall = await async_client.beta.findall.candidates(
+ entity_type="company",
+ objective="objective",
+ match_limit=5,
+ )
+ assert_matches_type(FindAllCandidatesResponse, findall, path=["response"])
+
+ @parametrize
+ async def test_raw_response_candidates(self, async_client: AsyncParallel) -> None:
+ response = await async_client.beta.findall.with_raw_response.candidates(
+ entity_type="company",
+ objective="objective",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ findall = await response.parse()
+ assert_matches_type(FindAllCandidatesResponse, findall, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_candidates(self, async_client: AsyncParallel) -> None:
+ async with async_client.beta.findall.with_streaming_response.candidates(
+ entity_type="company",
+ objective="objective",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ findall = await response.parse()
+ assert_matches_type(FindAllCandidatesResponse, findall, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
@parametrize
async def test_method_enrich(self, async_client: AsyncParallel) -> None:
findall = await async_client.beta.findall.enrich(
@@ -811,7 +893,6 @@ async def test_path_params_enrich(self, async_client: AsyncParallel) -> None:
},
)
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_method_events(self, async_client: AsyncParallel) -> None:
findall_stream = await async_client.beta.findall.events(
@@ -819,7 +900,6 @@ async def test_method_events(self, async_client: AsyncParallel) -> None:
)
await findall_stream.response.aclose()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_method_events_with_all_params(self, async_client: AsyncParallel) -> None:
findall_stream = await async_client.beta.findall.events(
@@ -830,7 +910,6 @@ async def test_method_events_with_all_params(self, async_client: AsyncParallel)
)
await findall_stream.response.aclose()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_raw_response_events(self, async_client: AsyncParallel) -> None:
response = await async_client.beta.findall.with_raw_response.events(
@@ -841,7 +920,6 @@ async def test_raw_response_events(self, async_client: AsyncParallel) -> None:
stream = await response.parse()
await stream.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_streaming_response_events(self, async_client: AsyncParallel) -> None:
async with async_client.beta.findall.with_streaming_response.events(
@@ -855,7 +933,6 @@ async def test_streaming_response_events(self, async_client: AsyncParallel) -> N
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_path_params_events(self, async_client: AsyncParallel) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `findall_id` but received ''"):
diff --git a/tests/api_resources/beta/test_task_group.py b/tests/api_resources/beta/test_task_group.py
index cc200ce..9f30f48 100644
--- a/tests/api_resources/beta/test_task_group.py
+++ b/tests/api_resources/beta/test_task_group.py
@@ -112,6 +112,7 @@ def test_method_add_runs_with_all_params(self, client: Parallel) -> None:
{
"input": "What was the GDP of France in 2023?",
"processor": "base",
+ "advanced_settings": {"location": "us"},
"enable_events": True,
"mcp_servers": [
{
@@ -147,6 +148,7 @@ def test_method_add_runs_with_all_params(self, client: Parallel) -> None:
},
}
],
+ refresh_status=True,
default_task_spec={
"output_schema": {
"json_schema": {
@@ -212,7 +214,6 @@ def test_path_params_add_runs(self, client: Parallel) -> None:
],
)
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_method_events(self, client: Parallel) -> None:
task_group_stream = client.beta.task_group.events(
@@ -220,7 +221,6 @@ def test_method_events(self, client: Parallel) -> None:
)
task_group_stream.response.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_method_events_with_all_params(self, client: Parallel) -> None:
task_group_stream = client.beta.task_group.events(
@@ -230,7 +230,6 @@ def test_method_events_with_all_params(self, client: Parallel) -> None:
)
task_group_stream.response.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_raw_response_events(self, client: Parallel) -> None:
response = client.beta.task_group.with_raw_response.events(
@@ -241,7 +240,6 @@ def test_raw_response_events(self, client: Parallel) -> None:
stream = response.parse()
stream.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_streaming_response_events(self, client: Parallel) -> None:
with client.beta.task_group.with_streaming_response.events(
@@ -255,7 +253,6 @@ def test_streaming_response_events(self, client: Parallel) -> None:
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_path_params_events(self, client: Parallel) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `task_group_id` but received ''"):
@@ -263,7 +260,6 @@ def test_path_params_events(self, client: Parallel) -> None:
task_group_id="",
)
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_method_get_runs(self, client: Parallel) -> None:
task_group_stream = client.beta.task_group.get_runs(
@@ -271,7 +267,6 @@ def test_method_get_runs(self, client: Parallel) -> None:
)
task_group_stream.response.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_method_get_runs_with_all_params(self, client: Parallel) -> None:
task_group_stream = client.beta.task_group.get_runs(
@@ -283,7 +278,6 @@ def test_method_get_runs_with_all_params(self, client: Parallel) -> None:
)
task_group_stream.response.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_raw_response_get_runs(self, client: Parallel) -> None:
response = client.beta.task_group.with_raw_response.get_runs(
@@ -294,7 +288,6 @@ def test_raw_response_get_runs(self, client: Parallel) -> None:
stream = response.parse()
stream.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_streaming_response_get_runs(self, client: Parallel) -> None:
with client.beta.task_group.with_streaming_response.get_runs(
@@ -308,7 +301,6 @@ def test_streaming_response_get_runs(self, client: Parallel) -> None:
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_path_params_get_runs(self, client: Parallel) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `task_group_id` but received ''"):
@@ -413,6 +405,7 @@ async def test_method_add_runs_with_all_params(self, async_client: AsyncParallel
{
"input": "What was the GDP of France in 2023?",
"processor": "base",
+ "advanced_settings": {"location": "us"},
"enable_events": True,
"mcp_servers": [
{
@@ -448,6 +441,7 @@ async def test_method_add_runs_with_all_params(self, async_client: AsyncParallel
},
}
],
+ refresh_status=True,
default_task_spec={
"output_schema": {
"json_schema": {
@@ -513,7 +507,6 @@ async def test_path_params_add_runs(self, async_client: AsyncParallel) -> None:
],
)
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_method_events(self, async_client: AsyncParallel) -> None:
task_group_stream = await async_client.beta.task_group.events(
@@ -521,7 +514,6 @@ async def test_method_events(self, async_client: AsyncParallel) -> None:
)
await task_group_stream.response.aclose()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_method_events_with_all_params(self, async_client: AsyncParallel) -> None:
task_group_stream = await async_client.beta.task_group.events(
@@ -531,7 +523,6 @@ async def test_method_events_with_all_params(self, async_client: AsyncParallel)
)
await task_group_stream.response.aclose()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_raw_response_events(self, async_client: AsyncParallel) -> None:
response = await async_client.beta.task_group.with_raw_response.events(
@@ -542,7 +533,6 @@ async def test_raw_response_events(self, async_client: AsyncParallel) -> None:
stream = await response.parse()
await stream.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_streaming_response_events(self, async_client: AsyncParallel) -> None:
async with async_client.beta.task_group.with_streaming_response.events(
@@ -556,7 +546,6 @@ async def test_streaming_response_events(self, async_client: AsyncParallel) -> N
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_path_params_events(self, async_client: AsyncParallel) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `task_group_id` but received ''"):
@@ -564,7 +553,6 @@ async def test_path_params_events(self, async_client: AsyncParallel) -> None:
task_group_id="",
)
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_method_get_runs(self, async_client: AsyncParallel) -> None:
task_group_stream = await async_client.beta.task_group.get_runs(
@@ -572,7 +560,6 @@ async def test_method_get_runs(self, async_client: AsyncParallel) -> None:
)
await task_group_stream.response.aclose()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_method_get_runs_with_all_params(self, async_client: AsyncParallel) -> None:
task_group_stream = await async_client.beta.task_group.get_runs(
@@ -584,7 +571,6 @@ async def test_method_get_runs_with_all_params(self, async_client: AsyncParallel
)
await task_group_stream.response.aclose()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_raw_response_get_runs(self, async_client: AsyncParallel) -> None:
response = await async_client.beta.task_group.with_raw_response.get_runs(
@@ -595,7 +581,6 @@ async def test_raw_response_get_runs(self, async_client: AsyncParallel) -> None:
stream = await response.parse()
await stream.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_streaming_response_get_runs(self, async_client: AsyncParallel) -> None:
async with async_client.beta.task_group.with_streaming_response.get_runs(
@@ -609,7 +594,6 @@ async def test_streaming_response_get_runs(self, async_client: AsyncParallel) ->
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_path_params_get_runs(self, async_client: AsyncParallel) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `task_group_id` but received ''"):
diff --git a/tests/api_resources/beta/test_task_run.py b/tests/api_resources/beta/test_task_run.py
index 794846d..88474c5 100644
--- a/tests/api_resources/beta/test_task_run.py
+++ b/tests/api_resources/beta/test_task_run.py
@@ -9,9 +9,10 @@
from parallel import Parallel, AsyncParallel
from tests.utils import assert_matches_type
-from parallel.types import TaskRun
+from parallel.types import TaskRun, TaskRunResult
from parallel._utils import parse_date
-from parallel.types.beta import BetaTaskRunResult
+
+# pyright: reportDeprecated=false
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -21,60 +22,66 @@ class TestTaskRun:
@parametrize
def test_method_create(self, client: Parallel) -> None:
- task_run = client.beta.task_run.create(
- input="What was the GDP of France in 2023?",
- processor="base",
- )
+ with pytest.warns(DeprecationWarning):
+ task_run = client.beta.task_run.create(
+ input="What was the GDP of France in 2023?",
+ processor="base",
+ )
+
assert_matches_type(TaskRun, task_run, path=["response"])
@parametrize
def test_method_create_with_all_params(self, client: Parallel) -> None:
- task_run = client.beta.task_run.create(
- input="What was the GDP of France in 2023?",
- processor="base",
- enable_events=True,
- mcp_servers=[
- {
- "name": "name",
- "url": "url",
- "allowed_tools": ["string"],
- "headers": {"foo": "string"},
- "type": "url",
- }
- ],
- metadata={"foo": "string"},
- previous_interaction_id="previous_interaction_id",
- source_policy={
- "after_date": parse_date("2024-01-01"),
- "exclude_domains": ["reddit.com", "x.com", ".ai"],
- "include_domains": ["wikipedia.org", "usa.gov", ".edu"],
- },
- task_spec={
- "output_schema": {
- "json_schema": {
- "additionalProperties": "bar",
- "properties": "bar",
- "required": "bar",
- "type": "bar",
+ with pytest.warns(DeprecationWarning):
+ task_run = client.beta.task_run.create(
+ input="What was the GDP of France in 2023?",
+ processor="base",
+ advanced_settings={"location": "us"},
+ enable_events=True,
+ mcp_servers=[
+ {
+ "name": "name",
+ "url": "url",
+ "allowed_tools": ["string"],
+ "headers": {"foo": "string"},
+ "type": "url",
+ }
+ ],
+ metadata={"foo": "string"},
+ previous_interaction_id="previous_interaction_id",
+ source_policy={
+ "after_date": parse_date("2024-01-01"),
+ "exclude_domains": ["reddit.com", "x.com", ".ai"],
+ "include_domains": ["wikipedia.org", "usa.gov", ".edu"],
+ },
+ task_spec={
+ "output_schema": {
+ "json_schema": {
+ "additionalProperties": "bar",
+ "properties": "bar",
+ "required": "bar",
+ "type": "bar",
+ },
+ "type": "json",
},
- "type": "json",
+ "input_schema": "string",
},
- "input_schema": "string",
- },
- webhook={
- "url": "url",
- "event_types": ["task_run.status"],
- },
- betas=["mcp-server-2025-07-17"],
- )
+ webhook={
+ "url": "url",
+ "event_types": ["task_run.status"],
+ },
+ betas=["mcp-server-2025-07-17"],
+ )
+
assert_matches_type(TaskRun, task_run, path=["response"])
@parametrize
def test_raw_response_create(self, client: Parallel) -> None:
- response = client.beta.task_run.with_raw_response.create(
- input="What was the GDP of France in 2023?",
- processor="base",
- )
+ with pytest.warns(DeprecationWarning):
+ response = client.beta.task_run.with_raw_response.create(
+ input="What was the GDP of France in 2023?",
+ processor="base",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -83,105 +90,114 @@ def test_raw_response_create(self, client: Parallel) -> None:
@parametrize
def test_streaming_response_create(self, client: Parallel) -> None:
- with client.beta.task_run.with_streaming_response.create(
- input="What was the GDP of France in 2023?",
- processor="base",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ with client.beta.task_run.with_streaming_response.create(
+ input="What was the GDP of France in 2023?",
+ processor="base",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- task_run = response.parse()
- assert_matches_type(TaskRun, task_run, path=["response"])
+ task_run = response.parse()
+ assert_matches_type(TaskRun, task_run, path=["response"])
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_method_events(self, client: Parallel) -> None:
- task_run_stream = client.beta.task_run.events(
- "run_id",
- )
+ with pytest.warns(DeprecationWarning):
+ task_run_stream = client.beta.task_run.events(
+ "run_id",
+ )
+
task_run_stream.response.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_raw_response_events(self, client: Parallel) -> None:
- response = client.beta.task_run.with_raw_response.events(
- "run_id",
- )
+ with pytest.warns(DeprecationWarning):
+ response = client.beta.task_run.with_raw_response.events(
+ "run_id",
+ )
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
stream = response.parse()
stream.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_streaming_response_events(self, client: Parallel) -> None:
- with client.beta.task_run.with_streaming_response.events(
- "run_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ with client.beta.task_run.with_streaming_response.events(
+ "run_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- stream = response.parse()
- stream.close()
+ stream = response.parse()
+ stream.close()
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_path_params_events(self, client: Parallel) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- client.beta.task_run.with_raw_response.events(
- "",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ client.beta.task_run.with_raw_response.events(
+ "",
+ )
@parametrize
def test_method_result(self, client: Parallel) -> None:
- task_run = client.beta.task_run.result(
- run_id="run_id",
- )
- assert_matches_type(BetaTaskRunResult, task_run, path=["response"])
+ with pytest.warns(DeprecationWarning):
+ task_run = client.beta.task_run.result(
+ run_id="run_id",
+ )
+
+ assert_matches_type(TaskRunResult, task_run, path=["response"])
@parametrize
def test_method_result_with_all_params(self, client: Parallel) -> None:
- task_run = client.beta.task_run.result(
- run_id="run_id",
- api_timeout=0,
- betas=["mcp-server-2025-07-17"],
- )
- assert_matches_type(BetaTaskRunResult, task_run, path=["response"])
+ with pytest.warns(DeprecationWarning):
+ task_run = client.beta.task_run.result(
+ run_id="run_id",
+ api_timeout=0,
+ betas=["mcp-server-2025-07-17"],
+ )
+
+ assert_matches_type(TaskRunResult, task_run, path=["response"])
@parametrize
def test_raw_response_result(self, client: Parallel) -> None:
- response = client.beta.task_run.with_raw_response.result(
- run_id="run_id",
- )
+ with pytest.warns(DeprecationWarning):
+ response = client.beta.task_run.with_raw_response.result(
+ run_id="run_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
task_run = response.parse()
- assert_matches_type(BetaTaskRunResult, task_run, path=["response"])
+ assert_matches_type(TaskRunResult, task_run, path=["response"])
@parametrize
def test_streaming_response_result(self, client: Parallel) -> None:
- with client.beta.task_run.with_streaming_response.result(
- run_id="run_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ with client.beta.task_run.with_streaming_response.result(
+ run_id="run_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- task_run = response.parse()
- assert_matches_type(BetaTaskRunResult, task_run, path=["response"])
+ task_run = response.parse()
+ assert_matches_type(TaskRunResult, task_run, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_result(self, client: Parallel) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- client.beta.task_run.with_raw_response.result(
- run_id="",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ client.beta.task_run.with_raw_response.result(
+ run_id="",
+ )
class TestAsyncTaskRun:
@@ -191,60 +207,66 @@ class TestAsyncTaskRun:
@parametrize
async def test_method_create(self, async_client: AsyncParallel) -> None:
- task_run = await async_client.beta.task_run.create(
- input="What was the GDP of France in 2023?",
- processor="base",
- )
+ with pytest.warns(DeprecationWarning):
+ task_run = await async_client.beta.task_run.create(
+ input="What was the GDP of France in 2023?",
+ processor="base",
+ )
+
assert_matches_type(TaskRun, task_run, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncParallel) -> None:
- task_run = await async_client.beta.task_run.create(
- input="What was the GDP of France in 2023?",
- processor="base",
- enable_events=True,
- mcp_servers=[
- {
- "name": "name",
- "url": "url",
- "allowed_tools": ["string"],
- "headers": {"foo": "string"},
- "type": "url",
- }
- ],
- metadata={"foo": "string"},
- previous_interaction_id="previous_interaction_id",
- source_policy={
- "after_date": parse_date("2024-01-01"),
- "exclude_domains": ["reddit.com", "x.com", ".ai"],
- "include_domains": ["wikipedia.org", "usa.gov", ".edu"],
- },
- task_spec={
- "output_schema": {
- "json_schema": {
- "additionalProperties": "bar",
- "properties": "bar",
- "required": "bar",
- "type": "bar",
+ with pytest.warns(DeprecationWarning):
+ task_run = await async_client.beta.task_run.create(
+ input="What was the GDP of France in 2023?",
+ processor="base",
+ advanced_settings={"location": "us"},
+ enable_events=True,
+ mcp_servers=[
+ {
+ "name": "name",
+ "url": "url",
+ "allowed_tools": ["string"],
+ "headers": {"foo": "string"},
+ "type": "url",
+ }
+ ],
+ metadata={"foo": "string"},
+ previous_interaction_id="previous_interaction_id",
+ source_policy={
+ "after_date": parse_date("2024-01-01"),
+ "exclude_domains": ["reddit.com", "x.com", ".ai"],
+ "include_domains": ["wikipedia.org", "usa.gov", ".edu"],
+ },
+ task_spec={
+ "output_schema": {
+ "json_schema": {
+ "additionalProperties": "bar",
+ "properties": "bar",
+ "required": "bar",
+ "type": "bar",
+ },
+ "type": "json",
},
- "type": "json",
+ "input_schema": "string",
+ },
+ webhook={
+ "url": "url",
+ "event_types": ["task_run.status"],
},
- "input_schema": "string",
- },
- webhook={
- "url": "url",
- "event_types": ["task_run.status"],
- },
- betas=["mcp-server-2025-07-17"],
- )
+ betas=["mcp-server-2025-07-17"],
+ )
+
assert_matches_type(TaskRun, task_run, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncParallel) -> None:
- response = await async_client.beta.task_run.with_raw_response.create(
- input="What was the GDP of France in 2023?",
- processor="base",
- )
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.beta.task_run.with_raw_response.create(
+ input="What was the GDP of France in 2023?",
+ processor="base",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -253,102 +275,111 @@ async def test_raw_response_create(self, async_client: AsyncParallel) -> None:
@parametrize
async def test_streaming_response_create(self, async_client: AsyncParallel) -> None:
- async with async_client.beta.task_run.with_streaming_response.create(
- input="What was the GDP of France in 2023?",
- processor="base",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ async with async_client.beta.task_run.with_streaming_response.create(
+ input="What was the GDP of France in 2023?",
+ processor="base",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- task_run = await response.parse()
- assert_matches_type(TaskRun, task_run, path=["response"])
+ task_run = await response.parse()
+ assert_matches_type(TaskRun, task_run, path=["response"])
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_method_events(self, async_client: AsyncParallel) -> None:
- task_run_stream = await async_client.beta.task_run.events(
- "run_id",
- )
+ with pytest.warns(DeprecationWarning):
+ task_run_stream = await async_client.beta.task_run.events(
+ "run_id",
+ )
+
await task_run_stream.response.aclose()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_raw_response_events(self, async_client: AsyncParallel) -> None:
- response = await async_client.beta.task_run.with_raw_response.events(
- "run_id",
- )
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.beta.task_run.with_raw_response.events(
+ "run_id",
+ )
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
stream = await response.parse()
await stream.close()
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_streaming_response_events(self, async_client: AsyncParallel) -> None:
- async with async_client.beta.task_run.with_streaming_response.events(
- "run_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ async with async_client.beta.task_run.with_streaming_response.events(
+ "run_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- stream = await response.parse()
- await stream.close()
+ stream = await response.parse()
+ await stream.close()
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_path_params_events(self, async_client: AsyncParallel) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- await async_client.beta.task_run.with_raw_response.events(
- "",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ await async_client.beta.task_run.with_raw_response.events(
+ "",
+ )
@parametrize
async def test_method_result(self, async_client: AsyncParallel) -> None:
- task_run = await async_client.beta.task_run.result(
- run_id="run_id",
- )
- assert_matches_type(BetaTaskRunResult, task_run, path=["response"])
+ with pytest.warns(DeprecationWarning):
+ task_run = await async_client.beta.task_run.result(
+ run_id="run_id",
+ )
+
+ assert_matches_type(TaskRunResult, task_run, path=["response"])
@parametrize
async def test_method_result_with_all_params(self, async_client: AsyncParallel) -> None:
- task_run = await async_client.beta.task_run.result(
- run_id="run_id",
- api_timeout=0,
- betas=["mcp-server-2025-07-17"],
- )
- assert_matches_type(BetaTaskRunResult, task_run, path=["response"])
+ with pytest.warns(DeprecationWarning):
+ task_run = await async_client.beta.task_run.result(
+ run_id="run_id",
+ api_timeout=0,
+ betas=["mcp-server-2025-07-17"],
+ )
+
+ assert_matches_type(TaskRunResult, task_run, path=["response"])
@parametrize
async def test_raw_response_result(self, async_client: AsyncParallel) -> None:
- response = await async_client.beta.task_run.with_raw_response.result(
- run_id="run_id",
- )
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.beta.task_run.with_raw_response.result(
+ run_id="run_id",
+ )
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
task_run = await response.parse()
- assert_matches_type(BetaTaskRunResult, task_run, path=["response"])
+ assert_matches_type(TaskRunResult, task_run, path=["response"])
@parametrize
async def test_streaming_response_result(self, async_client: AsyncParallel) -> None:
- async with async_client.beta.task_run.with_streaming_response.result(
- run_id="run_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ with pytest.warns(DeprecationWarning):
+ async with async_client.beta.task_run.with_streaming_response.result(
+ run_id="run_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- task_run = await response.parse()
- assert_matches_type(BetaTaskRunResult, task_run, path=["response"])
+ task_run = await response.parse()
+ assert_matches_type(TaskRunResult, task_run, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_result(self, async_client: AsyncParallel) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
- await async_client.beta.task_run.with_raw_response.result(
- run_id="",
- )
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ await async_client.beta.task_run.with_raw_response.result(
+ run_id="",
+ )
diff --git a/tests/api_resources/test_beta.py b/tests/api_resources/test_beta.py
index 1c98ae1..417825f 100644
--- a/tests/api_resources/test_beta.py
+++ b/tests/api_resources/test_beta.py
@@ -10,10 +10,7 @@
from parallel import Parallel, AsyncParallel
from tests.utils import assert_matches_type
from parallel._utils import parse_date
-from parallel.types.beta import (
- SearchResult,
- ExtractResponse,
-)
+from parallel.types.beta import SearchResult, ExtractResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -41,6 +38,7 @@ def test_method_extract_with_all_params(self, client: Parallel) -> None:
full_content=True,
objective="objective",
search_queries=["string"],
+ session_id="session_id",
betas=["mcp-server-2025-07-17"],
)
assert_matches_type(ExtractResponse, beta, path=["response"])
@@ -86,12 +84,14 @@ def test_method_search_with_all_params(self, client: Parallel) -> None:
"max_age_seconds": 86400,
"timeout_seconds": 60,
},
+ location="us",
max_chars_per_result=0,
max_results=0,
mode="one-shot",
objective="objective",
processor="base",
search_queries=["string"],
+ session_id="session_id",
source_policy={
"after_date": parse_date("2024-01-01"),
"exclude_domains": ["reddit.com", "x.com", ".ai"],
@@ -147,6 +147,7 @@ async def test_method_extract_with_all_params(self, async_client: AsyncParallel)
full_content=True,
objective="objective",
search_queries=["string"],
+ session_id="session_id",
betas=["mcp-server-2025-07-17"],
)
assert_matches_type(ExtractResponse, beta, path=["response"])
@@ -192,12 +193,14 @@ async def test_method_search_with_all_params(self, async_client: AsyncParallel)
"max_age_seconds": 86400,
"timeout_seconds": 60,
},
+ location="us",
max_chars_per_result=0,
max_results=0,
mode="one-shot",
objective="objective",
processor="base",
search_queries=["string"],
+ session_id="session_id",
source_policy={
"after_date": parse_date("2024-01-01"),
"exclude_domains": ["reddit.com", "x.com", ".ai"],
diff --git a/tests/api_resources/test_client.py b/tests/api_resources/test_client.py
new file mode 100644
index 0000000..e2baaba
--- /dev/null
+++ b/tests/api_resources/test_client.py
@@ -0,0 +1,248 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from parallel import Parallel, AsyncParallel
+from tests.utils import assert_matches_type
+from parallel.types import (
+ SearchResult,
+ ExtractResponse,
+)
+from parallel._utils import parse_date
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestClient:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_extract(self, client: Parallel) -> None:
+ client_ = client.extract(
+ urls=["string"],
+ )
+ assert_matches_type(ExtractResponse, client_, path=["response"])
+
+ @parametrize
+ def test_method_extract_with_all_params(self, client: Parallel) -> None:
+ client_ = client.extract(
+ urls=["string"],
+ advanced_settings={
+ "excerpt_settings": {"max_chars_per_result": 0},
+ "fetch_policy": {
+ "disable_cache_fallback": True,
+ "max_age_seconds": 86400,
+ "timeout_seconds": 60,
+ },
+ "full_content": {"max_chars_per_result": 0},
+ },
+ client_model="claude-sonnet-4-6-20260401",
+ max_chars_total=0,
+ objective="objective",
+ search_queries=["string"],
+ session_id="session_id",
+ )
+ assert_matches_type(ExtractResponse, client_, path=["response"])
+
+ @parametrize
+ def test_raw_response_extract(self, client: Parallel) -> None:
+ response = client.with_raw_response.extract(
+ urls=["string"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ client_ = response.parse()
+ assert_matches_type(ExtractResponse, client_, path=["response"])
+
+ @parametrize
+ def test_streaming_response_extract(self, client: Parallel) -> None:
+ with client.with_streaming_response.extract(
+ urls=["string"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ client_ = response.parse()
+ assert_matches_type(ExtractResponse, client_, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_search(self, client: Parallel) -> None:
+ client_ = client.search(
+ search_queries=["string"],
+ )
+ assert_matches_type(SearchResult, client_, path=["response"])
+
+ @parametrize
+ def test_method_search_with_all_params(self, client: Parallel) -> None:
+ client_ = client.search(
+ search_queries=["string"],
+ advanced_settings={
+ "excerpt_settings": {"max_chars_per_result": 0},
+ "fetch_policy": {
+ "disable_cache_fallback": True,
+ "max_age_seconds": 86400,
+ "timeout_seconds": 60,
+ },
+ "location": "us",
+ "max_results": 0,
+ "source_policy": {
+ "after_date": parse_date("2024-01-01"),
+ "exclude_domains": ["reddit.com", "x.com", ".ai"],
+ "include_domains": ["wikipedia.org", "usa.gov", ".edu"],
+ },
+ },
+ client_model="claude-sonnet-4-6-20260401",
+ max_chars_total=0,
+ mode="basic",
+ objective="objective",
+ session_id="session_id",
+ )
+ assert_matches_type(SearchResult, client_, path=["response"])
+
+ @parametrize
+ def test_raw_response_search(self, client: Parallel) -> None:
+ response = client.with_raw_response.search(
+ search_queries=["string"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ client_ = response.parse()
+ assert_matches_type(SearchResult, client_, path=["response"])
+
+ @parametrize
+ def test_streaming_response_search(self, client: Parallel) -> None:
+ with client.with_streaming_response.search(
+ search_queries=["string"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ client_ = response.parse()
+ assert_matches_type(SearchResult, client_, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncClient:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_extract(self, async_client: AsyncParallel) -> None:
+ client = await async_client.extract(
+ urls=["string"],
+ )
+ assert_matches_type(ExtractResponse, client, path=["response"])
+
+ @parametrize
+ async def test_method_extract_with_all_params(self, async_client: AsyncParallel) -> None:
+ client = await async_client.extract(
+ urls=["string"],
+ advanced_settings={
+ "excerpt_settings": {"max_chars_per_result": 0},
+ "fetch_policy": {
+ "disable_cache_fallback": True,
+ "max_age_seconds": 86400,
+ "timeout_seconds": 60,
+ },
+ "full_content": {"max_chars_per_result": 0},
+ },
+ client_model="claude-sonnet-4-6-20260401",
+ max_chars_total=0,
+ objective="objective",
+ search_queries=["string"],
+ session_id="session_id",
+ )
+ assert_matches_type(ExtractResponse, client, path=["response"])
+
+ @parametrize
+ async def test_raw_response_extract(self, async_client: AsyncParallel) -> None:
+ response = await async_client.with_raw_response.extract(
+ urls=["string"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ client = await response.parse()
+ assert_matches_type(ExtractResponse, client, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_extract(self, async_client: AsyncParallel) -> None:
+ async with async_client.with_streaming_response.extract(
+ urls=["string"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ client = await response.parse()
+ assert_matches_type(ExtractResponse, client, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_search(self, async_client: AsyncParallel) -> None:
+ client = await async_client.search(
+ search_queries=["string"],
+ )
+ assert_matches_type(SearchResult, client, path=["response"])
+
+ @parametrize
+ async def test_method_search_with_all_params(self, async_client: AsyncParallel) -> None:
+ client = await async_client.search(
+ search_queries=["string"],
+ advanced_settings={
+ "excerpt_settings": {"max_chars_per_result": 0},
+ "fetch_policy": {
+ "disable_cache_fallback": True,
+ "max_age_seconds": 86400,
+ "timeout_seconds": 60,
+ },
+ "location": "us",
+ "max_results": 0,
+ "source_policy": {
+ "after_date": parse_date("2024-01-01"),
+ "exclude_domains": ["reddit.com", "x.com", ".ai"],
+ "include_domains": ["wikipedia.org", "usa.gov", ".edu"],
+ },
+ },
+ client_model="claude-sonnet-4-6-20260401",
+ max_chars_total=0,
+ mode="basic",
+ objective="objective",
+ session_id="session_id",
+ )
+ assert_matches_type(SearchResult, client, path=["response"])
+
+ @parametrize
+ async def test_raw_response_search(self, async_client: AsyncParallel) -> None:
+ response = await async_client.with_raw_response.search(
+ search_queries=["string"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ client = await response.parse()
+ assert_matches_type(SearchResult, client, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_search(self, async_client: AsyncParallel) -> None:
+ async with async_client.with_streaming_response.search(
+ search_queries=["string"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ client = await response.parse()
+ assert_matches_type(SearchResult, client, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_task_run.py b/tests/api_resources/test_task_run.py
index 68e7db1..4bb9bd4 100644
--- a/tests/api_resources/test_task_run.py
+++ b/tests/api_resources/test_task_run.py
@@ -9,7 +9,10 @@
from parallel import Parallel, AsyncParallel
from tests.utils import assert_matches_type
-from parallel.types import TaskRun, TaskRunResult
+from parallel.types import (
+ TaskRun,
+ TaskRunResult,
+)
from parallel._utils import parse_date
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -31,6 +34,17 @@ def test_method_create_with_all_params(self, client: Parallel) -> None:
task_run = client.task_run.create(
input="What was the GDP of France in 2023?",
processor="base",
+ advanced_settings={"location": "us"},
+ enable_events=True,
+ mcp_servers=[
+ {
+ "name": "name",
+ "url": "url",
+ "allowed_tools": ["string"],
+ "headers": {"foo": "string"},
+ "type": "url",
+ }
+ ],
metadata={"foo": "string"},
previous_interaction_id="previous_interaction_id",
source_policy={
@@ -50,6 +64,11 @@ def test_method_create_with_all_params(self, client: Parallel) -> None:
},
"input_schema": "string",
},
+ webhook={
+ "url": "url",
+ "event_types": ["task_run.status"],
+ },
+ betas=["mcp-server-2025-07-17"],
)
assert_matches_type(TaskRun, task_run, path=["response"])
@@ -117,6 +136,43 @@ def test_path_params_retrieve(self, client: Parallel) -> None:
"",
)
+ @parametrize
+ def test_method_events(self, client: Parallel) -> None:
+ task_run_stream = client.task_run.events(
+ "run_id",
+ )
+ task_run_stream.response.close()
+
+ @parametrize
+ def test_raw_response_events(self, client: Parallel) -> None:
+ response = client.task_run.with_raw_response.events(
+ "run_id",
+ )
+
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ stream = response.parse()
+ stream.close()
+
+ @parametrize
+ def test_streaming_response_events(self, client: Parallel) -> None:
+ with client.task_run.with_streaming_response.events(
+ "run_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ stream = response.parse()
+ stream.close()
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_events(self, client: Parallel) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ client.task_run.with_raw_response.events(
+ "",
+ )
+
@parametrize
def test_method_result(self, client: Parallel) -> None:
task_run = client.task_run.result(
@@ -129,6 +185,7 @@ def test_method_result_with_all_params(self, client: Parallel) -> None:
task_run = client.task_run.result(
run_id="run_id",
api_timeout=0,
+ betas=["mcp-server-2025-07-17"],
)
assert_matches_type(TaskRunResult, task_run, path=["response"])
@@ -182,6 +239,17 @@ async def test_method_create_with_all_params(self, async_client: AsyncParallel)
task_run = await async_client.task_run.create(
input="What was the GDP of France in 2023?",
processor="base",
+ advanced_settings={"location": "us"},
+ enable_events=True,
+ mcp_servers=[
+ {
+ "name": "name",
+ "url": "url",
+ "allowed_tools": ["string"],
+ "headers": {"foo": "string"},
+ "type": "url",
+ }
+ ],
metadata={"foo": "string"},
previous_interaction_id="previous_interaction_id",
source_policy={
@@ -201,6 +269,11 @@ async def test_method_create_with_all_params(self, async_client: AsyncParallel)
},
"input_schema": "string",
},
+ webhook={
+ "url": "url",
+ "event_types": ["task_run.status"],
+ },
+ betas=["mcp-server-2025-07-17"],
)
assert_matches_type(TaskRun, task_run, path=["response"])
@@ -268,6 +341,43 @@ async def test_path_params_retrieve(self, async_client: AsyncParallel) -> None:
"",
)
+ @parametrize
+ async def test_method_events(self, async_client: AsyncParallel) -> None:
+ task_run_stream = await async_client.task_run.events(
+ "run_id",
+ )
+ await task_run_stream.response.aclose()
+
+ @parametrize
+ async def test_raw_response_events(self, async_client: AsyncParallel) -> None:
+ response = await async_client.task_run.with_raw_response.events(
+ "run_id",
+ )
+
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ stream = await response.parse()
+ await stream.close()
+
+ @parametrize
+ async def test_streaming_response_events(self, async_client: AsyncParallel) -> None:
+ async with async_client.task_run.with_streaming_response.events(
+ "run_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ stream = await response.parse()
+ await stream.close()
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_events(self, async_client: AsyncParallel) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ await async_client.task_run.with_raw_response.events(
+ "",
+ )
+
@parametrize
async def test_method_result(self, async_client: AsyncParallel) -> None:
task_run = await async_client.task_run.result(
@@ -280,6 +390,7 @@ async def test_method_result_with_all_params(self, async_client: AsyncParallel)
task_run = await async_client.task_run.result(
run_id="run_id",
api_timeout=0,
+ betas=["mcp-server-2025-07-17"],
)
assert_matches_type(TaskRunResult, task_run, path=["response"])
diff --git a/tests/test_client.py b/tests/test_client.py
index c2b772f..5e022bd 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -429,6 +429,30 @@ def test_default_query_option(self) -> None:
client.close()
+ def test_hardcoded_query_params_in_url(self, client: Parallel) -> None:
+ request = client._build_request(FinalRequestOptions(method="get", url="/foo?beta=true"))
+ url = httpx.URL(request.url)
+ assert dict(url.params) == {"beta": "true"}
+
+ request = client._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/foo?beta=true",
+ params={"limit": "10", "page": "abc"},
+ )
+ )
+ url = httpx.URL(request.url)
+ assert dict(url.params) == {"beta": "true", "limit": "10", "page": "abc"}
+
+ request = client._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/files/a%2Fb?beta=true",
+ params={"limit": "10"},
+ )
+ )
+ assert request.url.raw_path == b"/files/a%2Fb?beta=true&limit=10"
+
def test_request_extra_json(self, client: Parallel) -> None:
request = client._build_request(
FinalRequestOptions(
@@ -1334,6 +1358,30 @@ async def test_default_query_option(self) -> None:
await client.close()
+ async def test_hardcoded_query_params_in_url(self, async_client: AsyncParallel) -> None:
+ request = async_client._build_request(FinalRequestOptions(method="get", url="/foo?beta=true"))
+ url = httpx.URL(request.url)
+ assert dict(url.params) == {"beta": "true"}
+
+ request = async_client._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/foo?beta=true",
+ params={"limit": "10", "page": "abc"},
+ )
+ )
+ url = httpx.URL(request.url)
+ assert dict(url.params) == {"beta": "true", "limit": "10", "page": "abc"}
+
+ request = async_client._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/files/a%2Fb?beta=true",
+ params={"limit": "10"},
+ )
+ )
+ assert request.url.raw_path == b"/files/a%2Fb?beta=true&limit=10"
+
def test_request_extra_json(self, client: Parallel) -> None:
request = client._build_request(
FinalRequestOptions(
diff --git a/tests/test_deepcopy.py b/tests/test_deepcopy.py
deleted file mode 100644
index f6e9be1..0000000
--- a/tests/test_deepcopy.py
+++ /dev/null
@@ -1,58 +0,0 @@
-from parallel._utils import deepcopy_minimal
-
-
-def assert_different_identities(obj1: object, obj2: object) -> None:
- assert obj1 == obj2
- assert id(obj1) != id(obj2)
-
-
-def test_simple_dict() -> None:
- obj1 = {"foo": "bar"}
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
-
-
-def test_nested_dict() -> None:
- obj1 = {"foo": {"bar": True}}
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
- assert_different_identities(obj1["foo"], obj2["foo"])
-
-
-def test_complex_nested_dict() -> None:
- obj1 = {"foo": {"bar": [{"hello": "world"}]}}
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
- assert_different_identities(obj1["foo"], obj2["foo"])
- assert_different_identities(obj1["foo"]["bar"], obj2["foo"]["bar"])
- assert_different_identities(obj1["foo"]["bar"][0], obj2["foo"]["bar"][0])
-
-
-def test_simple_list() -> None:
- obj1 = ["a", "b", "c"]
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
-
-
-def test_nested_list() -> None:
- obj1 = ["a", [1, 2, 3]]
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
- assert_different_identities(obj1[1], obj2[1])
-
-
-class MyObject: ...
-
-
-def test_ignores_other_types() -> None:
- # custom classes
- my_obj = MyObject()
- obj1 = {"foo": my_obj}
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
- assert obj1["foo"] is my_obj
-
- # tuples
- obj3 = ("a", "b")
- obj4 = deepcopy_minimal(obj3)
- assert obj3 is obj4
diff --git a/tests/test_extract_files.py b/tests/test_extract_files.py
index ad0eca3..d9bb4c0 100644
--- a/tests/test_extract_files.py
+++ b/tests/test_extract_files.py
@@ -35,6 +35,15 @@ def test_multiple_files() -> None:
assert query == {"documents": [{}, {}]}
+def test_top_level_file_array() -> None:
+ query = {"files": [b"file one", b"file two"], "title": "hello"}
+ assert extract_files(query, paths=[["files", ""]]) == [
+ ("files[]", b"file one"),
+ ("files[]", b"file two"),
+ ]
+ assert query == {"title": "hello"}
+
+
@pytest.mark.parametrize(
"query,paths,expected",
[
diff --git a/tests/test_files.py b/tests/test_files.py
index 9cd16d8..f488947 100644
--- a/tests/test_files.py
+++ b/tests/test_files.py
@@ -4,7 +4,8 @@
import pytest
from dirty_equals import IsDict, IsList, IsBytes, IsTuple
-from parallel._files import to_httpx_files, async_to_httpx_files
+from parallel._files import to_httpx_files, deepcopy_with_paths, async_to_httpx_files
+from parallel._utils import extract_files
readme_path = Path(__file__).parent.parent.joinpath("README.md")
@@ -49,3 +50,99 @@ def test_string_not_allowed() -> None:
"file": "foo", # type: ignore
}
)
+
+
+def assert_different_identities(obj1: object, obj2: object) -> None:
+ assert obj1 == obj2
+ assert obj1 is not obj2
+
+
+class TestDeepcopyWithPaths:
+ def test_copies_top_level_dict(self) -> None:
+ original = {"file": b"data", "other": "value"}
+ result = deepcopy_with_paths(original, [["file"]])
+ assert_different_identities(result, original)
+
+ def test_file_value_is_same_reference(self) -> None:
+ file_bytes = b"contents"
+ original = {"file": file_bytes}
+ result = deepcopy_with_paths(original, [["file"]])
+ assert_different_identities(result, original)
+ assert result["file"] is file_bytes
+
+ def test_list_popped_wholesale(self) -> None:
+ files = [b"f1", b"f2"]
+ original = {"files": files, "title": "t"}
+ result = deepcopy_with_paths(original, [["files", ""]])
+ assert_different_identities(result, original)
+ result_files = result["files"]
+ assert isinstance(result_files, list)
+ assert_different_identities(result_files, files)
+
+ def test_nested_array_path_copies_list_and_elements(self) -> None:
+ elem1 = {"file": b"f1", "extra": 1}
+ elem2 = {"file": b"f2", "extra": 2}
+ original = {"items": [elem1, elem2]}
+ result = deepcopy_with_paths(original, [["items", "", "file"]])
+ assert_different_identities(result, original)
+ result_items = result["items"]
+ assert isinstance(result_items, list)
+ assert_different_identities(result_items, original["items"])
+ assert_different_identities(result_items[0], elem1)
+ assert_different_identities(result_items[1], elem2)
+
+ def test_empty_paths_returns_same_object(self) -> None:
+ original = {"foo": "bar"}
+ result = deepcopy_with_paths(original, [])
+ assert result is original
+
+ def test_multiple_paths(self) -> None:
+ f1 = b"file1"
+ f2 = b"file2"
+ original = {"a": f1, "b": f2, "c": "unchanged"}
+ result = deepcopy_with_paths(original, [["a"], ["b"]])
+ assert_different_identities(result, original)
+ assert result["a"] is f1
+ assert result["b"] is f2
+ assert result["c"] is original["c"]
+
+ def test_extract_files_does_not_mutate_original_top_level(self) -> None:
+ file_bytes = b"contents"
+ original = {"file": file_bytes, "other": "value"}
+
+ copied = deepcopy_with_paths(original, [["file"]])
+ extracted = extract_files(copied, paths=[["file"]])
+
+ assert extracted == [("file", file_bytes)]
+ assert original == {"file": file_bytes, "other": "value"}
+ assert copied == {"other": "value"}
+
+ def test_extract_files_does_not_mutate_original_nested_array_path(self) -> None:
+ file1 = b"f1"
+ file2 = b"f2"
+ original = {
+ "items": [
+ {"file": file1, "extra": 1},
+ {"file": file2, "extra": 2},
+ ],
+ "title": "example",
+ }
+
+ copied = deepcopy_with_paths(original, [["items", "", "file"]])
+ extracted = extract_files(copied, paths=[["items", "", "file"]])
+
+ assert extracted == [("items[][file]", file1), ("items[][file]", file2)]
+ assert original == {
+ "items": [
+ {"file": file1, "extra": 1},
+ {"file": file2, "extra": 2},
+ ],
+ "title": "example",
+ }
+ assert copied == {
+ "items": [
+ {"extra": 1},
+ {"extra": 2},
+ ],
+ "title": "example",
+ }
diff --git a/tests/test_utils/test_path.py b/tests/test_utils/test_path.py
new file mode 100644
index 0000000..c0364ff
--- /dev/null
+++ b/tests/test_utils/test_path.py
@@ -0,0 +1,89 @@
+from __future__ import annotations
+
+from typing import Any
+
+import pytest
+
+from parallel._utils._path import path_template
+
+
+@pytest.mark.parametrize(
+ "template, kwargs, expected",
+ [
+ ("/v1/{id}", dict(id="abc"), "/v1/abc"),
+ ("/v1/{a}/{b}", dict(a="x", b="y"), "/v1/x/y"),
+ ("/v1/{a}{b}/path/{c}?val={d}#{e}", dict(a="x", b="y", c="z", d="u", e="v"), "/v1/xy/path/z?val=u#v"),
+ ("/{w}/{w}", dict(w="echo"), "/echo/echo"),
+ ("/v1/static", {}, "/v1/static"),
+ ("", {}, ""),
+ ("/v1/?q={n}&count=10", dict(n=42), "/v1/?q=42&count=10"),
+ ("/v1/{v}", dict(v=None), "/v1/null"),
+ ("/v1/{v}", dict(v=True), "/v1/true"),
+ ("/v1/{v}", dict(v=False), "/v1/false"),
+ ("/v1/{v}", dict(v=".hidden"), "/v1/.hidden"), # dot prefix ok
+ ("/v1/{v}", dict(v="file.txt"), "/v1/file.txt"), # dot in middle ok
+ ("/v1/{v}", dict(v="..."), "/v1/..."), # triple dot ok
+ ("/v1/{a}{b}", dict(a=".", b="txt"), "/v1/.txt"), # dot var combining with adjacent to be ok
+ ("/items?q={v}#{f}", dict(v=".", f=".."), "/items?q=.#.."), # dots in query/fragment are fine
+ (
+ "/v1/{a}?query={b}",
+ dict(a="../../other/endpoint", b="a&bad=true"),
+ "/v1/..%2F..%2Fother%2Fendpoint?query=a%26bad%3Dtrue",
+ ),
+ ("/v1/{val}", dict(val="a/b/c"), "/v1/a%2Fb%2Fc"),
+ ("/v1/{val}", dict(val="a/b/c?query=value"), "/v1/a%2Fb%2Fc%3Fquery=value"),
+ ("/v1/{val}", dict(val="a/b/c?query=value&bad=true"), "/v1/a%2Fb%2Fc%3Fquery=value&bad=true"),
+ ("/v1/{val}", dict(val="%20"), "/v1/%2520"), # escapes escape sequences in input
+ # Query: slash and ? are safe, # is not
+ ("/items?q={v}", dict(v="a/b"), "/items?q=a/b"),
+ ("/items?q={v}", dict(v="a?b"), "/items?q=a?b"),
+ ("/items?q={v}", dict(v="a#b"), "/items?q=a%23b"),
+ ("/items?q={v}", dict(v="a b"), "/items?q=a%20b"),
+ # Fragment: slash and ? are safe
+ ("/docs#{v}", dict(v="a/b"), "/docs#a/b"),
+ ("/docs#{v}", dict(v="a?b"), "/docs#a?b"),
+ # Path: slash, ? and # are all encoded
+ ("/v1/{v}", dict(v="a/b"), "/v1/a%2Fb"),
+ ("/v1/{v}", dict(v="a?b"), "/v1/a%3Fb"),
+ ("/v1/{v}", dict(v="a#b"), "/v1/a%23b"),
+ # same var encoded differently by component
+ (
+ "/v1/{v}?q={v}#{v}",
+ dict(v="a/b?c#d"),
+ "/v1/a%2Fb%3Fc%23d?q=a/b?c%23d#a/b?c%23d",
+ ),
+ ("/v1/{val}", dict(val="x?admin=true"), "/v1/x%3Fadmin=true"), # query injection
+ ("/v1/{val}", dict(val="x#admin"), "/v1/x%23admin"), # fragment injection
+ ],
+)
+def test_interpolation(template: str, kwargs: dict[str, Any], expected: str) -> None:
+ assert path_template(template, **kwargs) == expected
+
+
+def test_missing_kwarg_raises_key_error() -> None:
+ with pytest.raises(KeyError, match="org_id"):
+ path_template("/v1/{org_id}")
+
+
+@pytest.mark.parametrize(
+ "template, kwargs",
+ [
+ ("{a}/path", dict(a=".")),
+ ("{a}/path", dict(a="..")),
+ ("/v1/{a}", dict(a=".")),
+ ("/v1/{a}", dict(a="..")),
+ ("/v1/{a}/path", dict(a=".")),
+ ("/v1/{a}/path", dict(a="..")),
+ ("/v1/{a}{b}", dict(a=".", b=".")), # adjacent vars → ".."
+ ("/v1/{a}.", dict(a=".")), # var + static → ".."
+ ("/v1/{a}{b}", dict(a="", b=".")), # empty + dot → "."
+ ("/v1/%2e/{x}", dict(x="ok")), # encoded dot in static text
+ ("/v1/%2e./{x}", dict(x="ok")), # mixed encoded ".." in static
+ ("/v1/.%2E/{x}", dict(x="ok")), # mixed encoded ".." in static
+ ("/v1/{v}?q=1", dict(v="..")),
+ ("/v1/{v}#frag", dict(v="..")),
+ ],
+)
+def test_dot_segment_rejected(template: str, kwargs: dict[str, Any]) -> None:
+ with pytest.raises(ValueError, match="dot-segment"):
+ path_template(template, **kwargs)