From fa75affffb701259be14445da95c77a1cdde512b Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 15 Aug 2025 21:40:13 +0000
Subject: [PATCH 1/4] feat(api): manual updates
---
.stats.yml | 4 +-
README.md | 17 +++
api.md | 2 +
src/supermemory/_files.py | 2 +-
src/supermemory/resources/memories.py | 111 +++++++++++++++++-
src/supermemory/types/__init__.py | 2 +
.../types/memory_upload_file_params.py | 16 +++
.../types/memory_upload_file_response.py | 11 ++
tests/api_resources/test_memories.py | 87 ++++++++++++++
9 files changed, 245 insertions(+), 7 deletions(-)
create mode 100644 src/supermemory/types/memory_upload_file_params.py
create mode 100644 src/supermemory/types/memory_upload_file_response.py
diff --git a/.stats.yml b/.stats.yml
index 476f4440..475fc769 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 16
+configured_endpoints: 17
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/supermemory--inc%2Fsupermemory-new-d52acd1a525b4bfe9f4befcc3a645f5d1289d75e7bad999cf1330e539b2ed84e.yml
openapi_spec_hash: c34df5406cfa4d245812d30f99d28116
-config_hash: be10c837d5319a33f30809a3ec223caf
+config_hash: 38ef57207cda6bd69b47a02098e2fdb0
diff --git a/README.md b/README.md
index 4c0fefb1..5be481b3 100644
--- a/README.md
+++ b/README.md
@@ -111,6 +111,23 @@ Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typ
Typed requests and responses provide autocomplete and documentation within your editor. If you would like to see type errors in VS Code to help catch bugs earlier, set `python.analysis.typeCheckingMode` to `basic`.
+## File uploads
+
+Request parameters that correspond to file uploads can be passed as `bytes`, or a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance or a tuple of `(filename, contents, media type)`.
+
+```python
+from pathlib import Path
+from supermemory import Supermemory
+
+client = Supermemory()
+
+client.memories.upload_file(
+ file=Path("/path/to/file"),
+)
+```
+
+The async client uses the exact same interface. If you pass a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance, the file contents will be read asynchronously automatically.
+
## Handling errors
When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `supermemory.APIConnectionError` is raised.
diff --git a/api.md b/api.md
index b20d9d9a..0748fa80 100644
--- a/api.md
+++ b/api.md
@@ -8,6 +8,7 @@ from supermemory.types import (
MemoryListResponse,
MemoryAddResponse,
MemoryGetResponse,
+ MemoryUploadFileResponse,
)
```
@@ -18,6 +19,7 @@ Methods:
- client.memories.delete(id) -> None
- client.memories.add(\*\*params) -> MemoryAddResponse
- client.memories.get(id) -> MemoryGetResponse
+- client.memories.upload_file(\*\*params) -> MemoryUploadFileResponse
# Search
diff --git a/src/supermemory/_files.py b/src/supermemory/_files.py
index cc14c14f..ae7c4650 100644
--- a/src/supermemory/_files.py
+++ b/src/supermemory/_files.py
@@ -34,7 +34,7 @@ def assert_is_file_content(obj: object, *, key: str | None = None) -> None:
if not is_file_content(obj):
prefix = f"Expected entry at `{key}`" if key is not None else f"Expected file input `{obj!r}`"
raise RuntimeError(
- f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead."
+ f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead. See https://github.com/supermemoryai/python-sdk/tree/main#file-uploads"
) from None
diff --git a/src/supermemory/resources/memories.py b/src/supermemory/resources/memories.py
index 0b34b676..1cf1c08e 100644
--- a/src/supermemory/resources/memories.py
+++ b/src/supermemory/resources/memories.py
@@ -2,14 +2,14 @@
from __future__ import annotations
-from typing import Dict, List, Union
+from typing import Dict, List, Union, Mapping, cast
from typing_extensions import Literal
import httpx
-from ..types import memory_add_params, memory_list_params, memory_update_params
-from .._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven
-from .._utils import maybe_transform, async_maybe_transform
+from ..types import memory_add_params, memory_list_params, memory_update_params, memory_upload_file_params
+from .._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven, FileTypes
+from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
@@ -23,6 +23,7 @@
from ..types.memory_get_response import MemoryGetResponse
from ..types.memory_list_response import MemoryListResponse
from ..types.memory_update_response import MemoryUpdateResponse
+from ..types.memory_upload_file_response import MemoryUploadFileResponse
__all__ = ["MemoriesResource", "AsyncMemoriesResource"]
@@ -305,6 +306,51 @@ def get(
cast_to=MemoryGetResponse,
)
+ def upload_file(
+ self,
+ *,
+ file: FileTypes,
+ container_tags: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> MemoryUploadFileResponse:
+ """
+ Upload a file to be processed
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ body = deepcopy_minimal(
+ {
+ "file": file,
+ "container_tags": container_tags,
+ }
+ )
+ files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
+ # It should be noted that the actual Content-Type header that will be
+ # sent to the server will contain a `boundary` parameter, e.g.
+ # multipart/form-data; boundary=---abc--
+ extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+ return self._post(
+ "/v3/memories/file",
+ body=maybe_transform(body, memory_upload_file_params.MemoryUploadFileParams),
+ files=files,
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=MemoryUploadFileResponse,
+ )
+
class AsyncMemoriesResource(AsyncAPIResource):
@cached_property
@@ -584,6 +630,51 @@ async def get(
cast_to=MemoryGetResponse,
)
+ async def upload_file(
+ self,
+ *,
+ file: FileTypes,
+ container_tags: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> MemoryUploadFileResponse:
+ """
+ Upload a file to be processed
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ body = deepcopy_minimal(
+ {
+ "file": file,
+ "container_tags": container_tags,
+ }
+ )
+ files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
+ # It should be noted that the actual Content-Type header that will be
+ # sent to the server will contain a `boundary` parameter, e.g.
+ # multipart/form-data; boundary=---abc--
+ extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+ return await self._post(
+ "/v3/memories/file",
+ body=await async_maybe_transform(body, memory_upload_file_params.MemoryUploadFileParams),
+ files=files,
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=MemoryUploadFileResponse,
+ )
+
class MemoriesResourceWithRawResponse:
def __init__(self, memories: MemoriesResource) -> None:
@@ -604,6 +695,9 @@ def __init__(self, memories: MemoriesResource) -> None:
self.get = to_raw_response_wrapper(
memories.get,
)
+ self.upload_file = to_raw_response_wrapper(
+ memories.upload_file,
+ )
class AsyncMemoriesResourceWithRawResponse:
@@ -625,6 +719,9 @@ def __init__(self, memories: AsyncMemoriesResource) -> None:
self.get = async_to_raw_response_wrapper(
memories.get,
)
+ self.upload_file = async_to_raw_response_wrapper(
+ memories.upload_file,
+ )
class MemoriesResourceWithStreamingResponse:
@@ -646,6 +743,9 @@ def __init__(self, memories: MemoriesResource) -> None:
self.get = to_streamed_response_wrapper(
memories.get,
)
+ self.upload_file = to_streamed_response_wrapper(
+ memories.upload_file,
+ )
class AsyncMemoriesResourceWithStreamingResponse:
@@ -667,3 +767,6 @@ def __init__(self, memories: AsyncMemoriesResource) -> None:
self.get = async_to_streamed_response_wrapper(
memories.get,
)
+ self.upload_file = async_to_streamed_response_wrapper(
+ memories.upload_file,
+ )
diff --git a/src/supermemory/types/__init__.py b/src/supermemory/types/__init__.py
index 03f53719..8387f8e9 100644
--- a/src/supermemory/types/__init__.py
+++ b/src/supermemory/types/__init__.py
@@ -18,7 +18,9 @@
from .connection_create_params import ConnectionCreateParams as ConnectionCreateParams
from .connection_import_params import ConnectionImportParams as ConnectionImportParams
from .connection_list_response import ConnectionListResponse as ConnectionListResponse
+from .memory_upload_file_params import MemoryUploadFileParams as MemoryUploadFileParams
from .connection_create_response import ConnectionCreateResponse as ConnectionCreateResponse
+from .memory_upload_file_response import MemoryUploadFileResponse as MemoryUploadFileResponse
from .connection_get_by_id_response import ConnectionGetByIDResponse as ConnectionGetByIDResponse
from .connection_get_by_tags_params import ConnectionGetByTagsParams as ConnectionGetByTagsParams
from .connection_get_by_tags_response import ConnectionGetByTagsResponse as ConnectionGetByTagsResponse
diff --git a/src/supermemory/types/memory_upload_file_params.py b/src/supermemory/types/memory_upload_file_params.py
new file mode 100644
index 00000000..ce4dfc40
--- /dev/null
+++ b/src/supermemory/types/memory_upload_file_params.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, Annotated, TypedDict
+
+from .._types import FileTypes
+from .._utils import PropertyInfo
+
+__all__ = ["MemoryUploadFileParams"]
+
+
+class MemoryUploadFileParams(TypedDict, total=False):
+ file: Required[FileTypes]
+
+ container_tags: Annotated[str, PropertyInfo(alias="containerTags")]
diff --git a/src/supermemory/types/memory_upload_file_response.py b/src/supermemory/types/memory_upload_file_response.py
new file mode 100644
index 00000000..f67b958f
--- /dev/null
+++ b/src/supermemory/types/memory_upload_file_response.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .._models import BaseModel
+
+__all__ = ["MemoryUploadFileResponse"]
+
+
+class MemoryUploadFileResponse(BaseModel):
+ id: str
+
+ status: str
diff --git a/tests/api_resources/test_memories.py b/tests/api_resources/test_memories.py
index e4229379..f3d88303 100644
--- a/tests/api_resources/test_memories.py
+++ b/tests/api_resources/test_memories.py
@@ -14,6 +14,7 @@
MemoryGetResponse,
MemoryListResponse,
MemoryUpdateResponse,
+ MemoryUploadFileResponse,
)
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -254,6 +255,49 @@ def test_path_params_get(self, client: Supermemory) -> None:
"",
)
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_upload_file(self, client: Supermemory) -> None:
+ memory = client.memories.upload_file(
+ file=b"raw file contents",
+ )
+ assert_matches_type(MemoryUploadFileResponse, memory, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_upload_file_with_all_params(self, client: Supermemory) -> None:
+ memory = client.memories.upload_file(
+ file=b"raw file contents",
+ container_tags="containerTags",
+ )
+ assert_matches_type(MemoryUploadFileResponse, memory, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_upload_file(self, client: Supermemory) -> None:
+ response = client.memories.with_raw_response.upload_file(
+ file=b"raw file contents",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ memory = response.parse()
+ assert_matches_type(MemoryUploadFileResponse, memory, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_upload_file(self, client: Supermemory) -> None:
+ with client.memories.with_streaming_response.upload_file(
+ file=b"raw file contents",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ memory = response.parse()
+ assert_matches_type(MemoryUploadFileResponse, memory, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
class TestAsyncMemories:
parametrize = pytest.mark.parametrize(
@@ -491,3 +535,46 @@ async def test_path_params_get(self, async_client: AsyncSupermemory) -> None:
await async_client.memories.with_raw_response.get(
"",
)
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_upload_file(self, async_client: AsyncSupermemory) -> None:
+ memory = await async_client.memories.upload_file(
+ file=b"raw file contents",
+ )
+ assert_matches_type(MemoryUploadFileResponse, memory, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_upload_file_with_all_params(self, async_client: AsyncSupermemory) -> None:
+ memory = await async_client.memories.upload_file(
+ file=b"raw file contents",
+ container_tags="containerTags",
+ )
+ assert_matches_type(MemoryUploadFileResponse, memory, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_upload_file(self, async_client: AsyncSupermemory) -> None:
+ response = await async_client.memories.with_raw_response.upload_file(
+ file=b"raw file contents",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ memory = await response.parse()
+ assert_matches_type(MemoryUploadFileResponse, memory, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_upload_file(self, async_client: AsyncSupermemory) -> None:
+ async with async_client.memories.with_streaming_response.upload_file(
+ file=b"raw file contents",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ memory = await response.parse()
+ assert_matches_type(MemoryUploadFileResponse, memory, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
From 9e373ef0b585eb15cb04b95a1bab46c8c102970c Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 15 Aug 2025 21:40:37 +0000
Subject: [PATCH 2/4] feat(api): manual updates
---
.stats.yml | 4 +-
README.md | 22 +-
api.md | 5 +-
src/supermemory/resources/search.py | 193 ++++++++++++++++--
src/supermemory/types/__init__.py | 6 +-
...e_params.py => search_documents_params.py} | 4 +-
...sponse.py => search_documents_response.py} | 4 +-
.../types/search_memories_params.py | 66 ++++++
.../types/search_memories_response.py | 121 +++++++++++
tests/api_resources/test_search.py | 191 ++++++++++++++---
10 files changed, 558 insertions(+), 58 deletions(-)
rename src/supermemory/types/{search_execute_params.py => search_documents_params.py} (96%)
rename src/supermemory/types/{search_execute_response.py => search_documents_response.py} (92%)
create mode 100644 src/supermemory/types/search_memories_params.py
create mode 100644 src/supermemory/types/search_memories_response.py
diff --git a/.stats.yml b/.stats.yml
index 475fc769..4bb80bca 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 17
+configured_endpoints: 18
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/supermemory--inc%2Fsupermemory-new-d52acd1a525b4bfe9f4befcc3a645f5d1289d75e7bad999cf1330e539b2ed84e.yml
openapi_spec_hash: c34df5406cfa4d245812d30f99d28116
-config_hash: 38ef57207cda6bd69b47a02098e2fdb0
+config_hash: f305e457fd9ddd1dec2614b14d7936a5
diff --git a/README.md b/README.md
index 5be481b3..b2851573 100644
--- a/README.md
+++ b/README.md
@@ -32,7 +32,7 @@ client = Supermemory(
api_key=os.environ.get("SUPERMEMORY_API_KEY"), # This is the default and can be omitted
)
-response = client.search.execute(
+response = client.search.documents(
q="documents related to python",
)
print(response.results)
@@ -58,7 +58,7 @@ client = AsyncSupermemory(
async def main() -> None:
- response = await client.search.execute(
+ response = await client.search.documents(
q="documents related to python",
)
print(response.results)
@@ -93,7 +93,7 @@ async def main() -> None:
api_key="My API Key",
http_client=DefaultAioHttpClient(),
) as client:
- response = await client.search.execute(
+ response = await client.search.documents(
q="documents related to python",
)
print(response.results)
@@ -111,6 +111,22 @@ Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typ
Typed requests and responses provide autocomplete and documentation within your editor. If you would like to see type errors in VS Code to help catch bugs earlier, set `python.analysis.typeCheckingMode` to `basic`.
+## Nested params
+
+Nested parameters are dictionaries, typed using `TypedDict`, for example:
+
+```python
+from supermemory import Supermemory
+
+client = Supermemory()
+
+response = client.search.memories(
+ q="machine learning concepts",
+ include={},
+)
+print(response.include)
+```
+
## File uploads
Request parameters that correspond to file uploads can be passed as `bytes`, or a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance or a tuple of `(filename, contents, media type)`.
diff --git a/api.md b/api.md
index 0748fa80..34719710 100644
--- a/api.md
+++ b/api.md
@@ -26,12 +26,13 @@ Methods:
Types:
```python
-from supermemory.types import SearchExecuteResponse
+from supermemory.types import SearchDocumentsResponse, SearchMemoriesResponse
```
Methods:
-- client.search.execute(\*\*params) -> SearchExecuteResponse
+- client.search.documents(\*\*params) -> SearchDocumentsResponse
+- client.search.memories(\*\*params) -> SearchMemoriesResponse
# Settings
diff --git a/src/supermemory/resources/search.py b/src/supermemory/resources/search.py
index 5bb291fb..be0bc298 100644
--- a/src/supermemory/resources/search.py
+++ b/src/supermemory/resources/search.py
@@ -7,7 +7,7 @@
import httpx
-from ..types import search_execute_params
+from ..types import search_memories_params, search_documents_params
from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from .._utils import maybe_transform, async_maybe_transform
from .._compat import cached_property
@@ -19,7 +19,8 @@
async_to_streamed_response_wrapper,
)
from .._base_client import make_request_options
-from ..types.search_execute_response import SearchExecuteResponse
+from ..types.search_memories_response import SearchMemoriesResponse
+from ..types.search_documents_response import SearchDocumentsResponse
__all__ = ["SearchResource", "AsyncSearchResource"]
@@ -44,7 +45,7 @@ def with_streaming_response(self) -> SearchResourceWithStreamingResponse:
"""
return SearchResourceWithStreamingResponse(self)
- def execute(
+ def documents(
self,
*,
q: str,
@@ -53,7 +54,7 @@ def execute(
container_tags: List[str] | NotGiven = NOT_GIVEN,
doc_id: str | NotGiven = NOT_GIVEN,
document_threshold: float | NotGiven = NOT_GIVEN,
- filters: search_execute_params.Filters | NotGiven = NOT_GIVEN,
+ filters: search_documents_params.Filters | NotGiven = NOT_GIVEN,
include_full_docs: bool | NotGiven = NOT_GIVEN,
include_summary: bool | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
@@ -66,7 +67,7 @@ def execute(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> SearchExecuteResponse:
+ ) -> SearchDocumentsResponse:
"""
Search memories with advanced filtering
@@ -135,12 +136,82 @@ def execute(
"rerank": rerank,
"rewrite_query": rewrite_query,
},
- search_execute_params.SearchExecuteParams,
+ search_documents_params.SearchDocumentsParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=SearchExecuteResponse,
+ cast_to=SearchDocumentsResponse,
+ )
+
+ def memories(
+ self,
+ *,
+ q: str,
+ container_tag: str | NotGiven = NOT_GIVEN,
+ filters: search_memories_params.Filters | NotGiven = NOT_GIVEN,
+ include: search_memories_params.Include | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ rerank: bool | NotGiven = NOT_GIVEN,
+ rewrite_query: bool | NotGiven = NOT_GIVEN,
+ threshold: float | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> SearchMemoriesResponse:
+ """
+ Search memory entries - Low latency for conversational
+
+ Args:
+ q: Search query string
+
+ container_tag: Optional tag this search should be containerized by. This can be an ID for your
+ user, a project ID, or any other identifier you wish to use to filter memories.
+
+ filters: Optional filters to apply to the search
+
+ limit: Maximum number of results to return
+
+ rerank: If true, rerank the results based on the query. This is helpful if you want to
+ ensure the most relevant results are returned.
+
+ rewrite_query: If true, rewrites the query to make it easier to find documents. This increases
+ the latency by about 400ms
+
+ threshold: Threshold / sensitivity for memories selection. 0 is least sensitive (returns
+ most memories, more results), 1 is most sensitive (returns lesser memories,
+ accurate results)
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v4/search",
+ body=maybe_transform(
+ {
+ "q": q,
+ "container_tag": container_tag,
+ "filters": filters,
+ "include": include,
+ "limit": limit,
+ "rerank": rerank,
+ "rewrite_query": rewrite_query,
+ "threshold": threshold,
+ },
+ search_memories_params.SearchMemoriesParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=SearchMemoriesResponse,
)
@@ -164,7 +235,7 @@ def with_streaming_response(self) -> AsyncSearchResourceWithStreamingResponse:
"""
return AsyncSearchResourceWithStreamingResponse(self)
- async def execute(
+ async def documents(
self,
*,
q: str,
@@ -173,7 +244,7 @@ async def execute(
container_tags: List[str] | NotGiven = NOT_GIVEN,
doc_id: str | NotGiven = NOT_GIVEN,
document_threshold: float | NotGiven = NOT_GIVEN,
- filters: search_execute_params.Filters | NotGiven = NOT_GIVEN,
+ filters: search_documents_params.Filters | NotGiven = NOT_GIVEN,
include_full_docs: bool | NotGiven = NOT_GIVEN,
include_summary: bool | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
@@ -186,7 +257,7 @@ async def execute(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> SearchExecuteResponse:
+ ) -> SearchDocumentsResponse:
"""
Search memories with advanced filtering
@@ -255,12 +326,82 @@ async def execute(
"rerank": rerank,
"rewrite_query": rewrite_query,
},
- search_execute_params.SearchExecuteParams,
+ search_documents_params.SearchDocumentsParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=SearchDocumentsResponse,
+ )
+
+ async def memories(
+ self,
+ *,
+ q: str,
+ container_tag: str | NotGiven = NOT_GIVEN,
+ filters: search_memories_params.Filters | NotGiven = NOT_GIVEN,
+ include: search_memories_params.Include | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ rerank: bool | NotGiven = NOT_GIVEN,
+ rewrite_query: bool | NotGiven = NOT_GIVEN,
+ threshold: float | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> SearchMemoriesResponse:
+ """
+ Search memory entries - Low latency for conversational
+
+ Args:
+ q: Search query string
+
+ container_tag: Optional tag this search should be containerized by. This can be an ID for your
+ user, a project ID, or any other identifier you wish to use to filter memories.
+
+ filters: Optional filters to apply to the search
+
+ limit: Maximum number of results to return
+
+ rerank: If true, rerank the results based on the query. This is helpful if you want to
+ ensure the most relevant results are returned.
+
+ rewrite_query: If true, rewrites the query to make it easier to find documents. This increases
+ the latency by about 400ms
+
+ threshold: Threshold / sensitivity for memories selection. 0 is least sensitive (returns
+ most memories, more results), 1 is most sensitive (returns lesser memories,
+ accurate results)
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v4/search",
+ body=await async_maybe_transform(
+ {
+ "q": q,
+ "container_tag": container_tag,
+ "filters": filters,
+ "include": include,
+ "limit": limit,
+ "rerank": rerank,
+ "rewrite_query": rewrite_query,
+ "threshold": threshold,
+ },
+ search_memories_params.SearchMemoriesParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=SearchExecuteResponse,
+ cast_to=SearchMemoriesResponse,
)
@@ -268,8 +409,11 @@ class SearchResourceWithRawResponse:
def __init__(self, search: SearchResource) -> None:
self._search = search
- self.execute = to_raw_response_wrapper(
- search.execute,
+ self.documents = to_raw_response_wrapper(
+ search.documents,
+ )
+ self.memories = to_raw_response_wrapper(
+ search.memories,
)
@@ -277,8 +421,11 @@ class AsyncSearchResourceWithRawResponse:
def __init__(self, search: AsyncSearchResource) -> None:
self._search = search
- self.execute = async_to_raw_response_wrapper(
- search.execute,
+ self.documents = async_to_raw_response_wrapper(
+ search.documents,
+ )
+ self.memories = async_to_raw_response_wrapper(
+ search.memories,
)
@@ -286,8 +433,11 @@ class SearchResourceWithStreamingResponse:
def __init__(self, search: SearchResource) -> None:
self._search = search
- self.execute = to_streamed_response_wrapper(
- search.execute,
+ self.documents = to_streamed_response_wrapper(
+ search.documents,
+ )
+ self.memories = to_streamed_response_wrapper(
+ search.memories,
)
@@ -295,6 +445,9 @@ class AsyncSearchResourceWithStreamingResponse:
def __init__(self, search: AsyncSearchResource) -> None:
self._search = search
- self.execute = async_to_streamed_response_wrapper(
- search.execute,
+ self.documents = async_to_streamed_response_wrapper(
+ search.documents,
+ )
+ self.memories = async_to_streamed_response_wrapper(
+ search.memories,
)
diff --git a/src/supermemory/types/__init__.py b/src/supermemory/types/__init__.py
index 8387f8e9..ecb3052c 100644
--- a/src/supermemory/types/__init__.py
+++ b/src/supermemory/types/__init__.py
@@ -9,16 +9,18 @@
from .memory_list_response import MemoryListResponse as MemoryListResponse
from .memory_update_params import MemoryUpdateParams as MemoryUpdateParams
from .setting_get_response import SettingGetResponse as SettingGetResponse
-from .search_execute_params import SearchExecuteParams as SearchExecuteParams
from .setting_update_params import SettingUpdateParams as SettingUpdateParams
from .connection_list_params import ConnectionListParams as ConnectionListParams
from .memory_update_response import MemoryUpdateResponse as MemoryUpdateResponse
-from .search_execute_response import SearchExecuteResponse as SearchExecuteResponse
+from .search_memories_params import SearchMemoriesParams as SearchMemoriesParams
+from .search_documents_params import SearchDocumentsParams as SearchDocumentsParams
from .setting_update_response import SettingUpdateResponse as SettingUpdateResponse
from .connection_create_params import ConnectionCreateParams as ConnectionCreateParams
from .connection_import_params import ConnectionImportParams as ConnectionImportParams
from .connection_list_response import ConnectionListResponse as ConnectionListResponse
+from .search_memories_response import SearchMemoriesResponse as SearchMemoriesResponse
from .memory_upload_file_params import MemoryUploadFileParams as MemoryUploadFileParams
+from .search_documents_response import SearchDocumentsResponse as SearchDocumentsResponse
from .connection_create_response import ConnectionCreateResponse as ConnectionCreateResponse
from .memory_upload_file_response import MemoryUploadFileResponse as MemoryUploadFileResponse
from .connection_get_by_id_response import ConnectionGetByIDResponse as ConnectionGetByIDResponse
diff --git a/src/supermemory/types/search_execute_params.py b/src/supermemory/types/search_documents_params.py
similarity index 96%
rename from src/supermemory/types/search_execute_params.py
rename to src/supermemory/types/search_documents_params.py
index db48a814..b1b2d874 100644
--- a/src/supermemory/types/search_execute_params.py
+++ b/src/supermemory/types/search_documents_params.py
@@ -7,10 +7,10 @@
from .._utils import PropertyInfo
-__all__ = ["SearchExecuteParams", "Filters", "FiltersUnionMember0"]
+__all__ = ["SearchDocumentsParams", "Filters", "FiltersUnionMember0"]
-class SearchExecuteParams(TypedDict, total=False):
+class SearchDocumentsParams(TypedDict, total=False):
q: Required[str]
"""Search query string"""
diff --git a/src/supermemory/types/search_execute_response.py b/src/supermemory/types/search_documents_response.py
similarity index 92%
rename from src/supermemory/types/search_execute_response.py
rename to src/supermemory/types/search_documents_response.py
index c1844442..04ce581e 100644
--- a/src/supermemory/types/search_execute_response.py
+++ b/src/supermemory/types/search_documents_response.py
@@ -7,7 +7,7 @@
from .._models import BaseModel
-__all__ = ["SearchExecuteResponse", "Result", "ResultChunk"]
+__all__ = ["SearchDocumentsResponse", "Result", "ResultChunk"]
class ResultChunk(BaseModel):
@@ -53,7 +53,7 @@ class Result(BaseModel):
"""Document summary"""
-class SearchExecuteResponse(BaseModel):
+class SearchDocumentsResponse(BaseModel):
results: List[Result]
timing: float
diff --git a/src/supermemory/types/search_memories_params.py b/src/supermemory/types/search_memories_params.py
new file mode 100644
index 00000000..060184d1
--- /dev/null
+++ b/src/supermemory/types/search_memories_params.py
@@ -0,0 +1,66 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Union, Iterable
+from typing_extensions import Required, Annotated, TypeAlias, TypedDict
+
+from .._utils import PropertyInfo
+
+__all__ = ["SearchMemoriesParams", "Filters", "FiltersUnionMember0", "Include"]
+
+
+class SearchMemoriesParams(TypedDict, total=False):
+ q: Required[str]
+ """Search query string"""
+
+ container_tag: Annotated[str, PropertyInfo(alias="containerTag")]
+ """Optional tag this search should be containerized by.
+
+ This can be an ID for your user, a project ID, or any other identifier you wish
+ to use to filter memories.
+ """
+
+ filters: Filters
+ """Optional filters to apply to the search"""
+
+ include: Include
+
+ limit: int
+ """Maximum number of results to return"""
+
+ rerank: bool
+ """If true, rerank the results based on the query.
+
+ This is helpful if you want to ensure the most relevant results are returned.
+ """
+
+ rewrite_query: Annotated[bool, PropertyInfo(alias="rewriteQuery")]
+ """If true, rewrites the query to make it easier to find documents.
+
+ This increases the latency by about 400ms
+ """
+
+ threshold: float
+ """Threshold / sensitivity for memories selection.
+
+ 0 is least sensitive (returns most memories, more results), 1 is most sensitive
+ (returns lesser memories, accurate results)
+ """
+
+
+class FiltersUnionMember0(TypedDict, total=False):
+ and_: Annotated[Iterable[object], PropertyInfo(alias="AND")]
+
+ or_: Annotated[Iterable[object], PropertyInfo(alias="OR")]
+
+
+Filters: TypeAlias = Union[FiltersUnionMember0, Dict[str, object]]
+
+
+class Include(TypedDict, total=False):
+ documents: bool
+
+ related_memories: Annotated[bool, PropertyInfo(alias="relatedMemories")]
+
+ summaries: bool
diff --git a/src/supermemory/types/search_memories_response.py b/src/supermemory/types/search_memories_response.py
new file mode 100644
index 00000000..9e18a749
--- /dev/null
+++ b/src/supermemory/types/search_memories_response.py
@@ -0,0 +1,121 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from pydantic import Field as FieldInfo
+
+from .._models import BaseModel
+
+__all__ = [
+ "SearchMemoriesResponse",
+ "Result",
+ "ResultContext",
+ "ResultContextChild",
+ "ResultContextParent",
+ "ResultDocument",
+]
+
+
+class ResultContextChild(BaseModel):
+ memory: str
+ """The contextual memory content"""
+
+ relation: Literal["updates", "extends", "derives"]
+ """Relation type between this memory and its parent/child"""
+
+ updated_at: datetime = FieldInfo(alias="updatedAt")
+ """Contextual memory last update date"""
+
+ metadata: Optional[Dict[str, object]] = None
+ """Contextual memory metadata"""
+
+ version: Optional[float] = None
+ """
+ Relative version distance from the primary memory (+1 for direct child, +2 for
+ grand-child, etc.)
+ """
+
+
+class ResultContextParent(BaseModel):
+ memory: str
+ """The contextual memory content"""
+
+ relation: Literal["updates", "extends", "derives"]
+ """Relation type between this memory and its parent/child"""
+
+ updated_at: datetime = FieldInfo(alias="updatedAt")
+ """Contextual memory last update date"""
+
+ metadata: Optional[Dict[str, object]] = None
+ """Contextual memory metadata"""
+
+ version: Optional[float] = None
+ """
+ Relative version distance from the primary memory (-1 for direct parent, -2 for
+ grand-parent, etc.)
+ """
+
+
+class ResultContext(BaseModel):
+ children: Optional[List[ResultContextChild]] = None
+
+ parents: Optional[List[ResultContextParent]] = None
+
+
+class ResultDocument(BaseModel):
+ id: str
+ """Document ID"""
+
+ created_at: datetime = FieldInfo(alias="createdAt")
+ """Document creation date"""
+
+ metadata: Optional[Dict[str, object]] = None
+ """Document metadata"""
+
+ title: str
+ """Document title"""
+
+ type: str
+ """Document type"""
+
+ updated_at: datetime = FieldInfo(alias="updatedAt")
+ """Document last update date"""
+
+
+class Result(BaseModel):
+ id: str
+ """Memory entry ID"""
+
+ memory: str
+ """The memory content"""
+
+ metadata: Optional[Dict[str, object]] = None
+ """Memory metadata"""
+
+ similarity: float
+ """Similarity score between the query and memory entry"""
+
+ updated_at: datetime = FieldInfo(alias="updatedAt")
+ """Memory last update date"""
+
+ context: Optional[ResultContext] = None
+ """Object containing arrays of parent and child contextual memories"""
+
+ documents: Optional[List[ResultDocument]] = None
+ """Associated documents for this memory entry"""
+
+ version: Optional[float] = None
+ """Version number of this memory entry"""
+
+
+class SearchMemoriesResponse(BaseModel):
+ results: List[Result]
+ """Array of matching memory entries with similarity scores"""
+
+ timing: float
+ """Search execution time in milliseconds"""
+
+ total: float
+ """Total number of results returned"""
diff --git a/tests/api_resources/test_search.py b/tests/api_resources/test_search.py
index 1caa6b76..00f6ad4a 100644
--- a/tests/api_resources/test_search.py
+++ b/tests/api_resources/test_search.py
@@ -9,7 +9,10 @@
from supermemory import Supermemory, AsyncSupermemory
from tests.utils import assert_matches_type
-from supermemory.types import SearchExecuteResponse
+from supermemory.types import (
+ SearchMemoriesResponse,
+ SearchDocumentsResponse,
+)
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -19,16 +22,16 @@ class TestSearch:
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- def test_method_execute(self, client: Supermemory) -> None:
- search = client.search.execute(
+ def test_method_documents(self, client: Supermemory) -> None:
+ search = client.search.documents(
q="machine learning concepts",
)
- assert_matches_type(SearchExecuteResponse, search, path=["response"])
+ assert_matches_type(SearchDocumentsResponse, search, path=["response"])
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- def test_method_execute_with_all_params(self, client: Supermemory) -> None:
- search = client.search.execute(
+ def test_method_documents_with_all_params(self, client: Supermemory) -> None:
+ search = client.search.documents(
q="machine learning concepts",
categories_filter=["technology", "science"],
chunk_threshold=0.5,
@@ -59,31 +62,100 @@ def test_method_execute_with_all_params(self, client: Supermemory) -> None:
rerank=False,
rewrite_query=False,
)
- assert_matches_type(SearchExecuteResponse, search, path=["response"])
+ assert_matches_type(SearchDocumentsResponse, search, path=["response"])
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- def test_raw_response_execute(self, client: Supermemory) -> None:
- response = client.search.with_raw_response.execute(
+ def test_raw_response_documents(self, client: Supermemory) -> None:
+ response = client.search.with_raw_response.documents(
q="machine learning concepts",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
search = response.parse()
- assert_matches_type(SearchExecuteResponse, search, path=["response"])
+ assert_matches_type(SearchDocumentsResponse, search, path=["response"])
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- def test_streaming_response_execute(self, client: Supermemory) -> None:
- with client.search.with_streaming_response.execute(
+ def test_streaming_response_documents(self, client: Supermemory) -> None:
+ with client.search.with_streaming_response.documents(
q="machine learning concepts",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
search = response.parse()
- assert_matches_type(SearchExecuteResponse, search, path=["response"])
+ assert_matches_type(SearchDocumentsResponse, search, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_memories(self, client: Supermemory) -> None:
+ search = client.search.memories(
+ q="machine learning concepts",
+ )
+ assert_matches_type(SearchMemoriesResponse, search, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_memories_with_all_params(self, client: Supermemory) -> None:
+ search = client.search.memories(
+ q="machine learning concepts",
+ container_tag="user_123",
+ filters={
+ "and_": [
+ {
+ "key": "group",
+ "negate": False,
+ "value": "jira_users",
+ },
+ {
+ "filterType": "numeric",
+ "key": "timestamp",
+ "negate": False,
+ "numericOperator": ">",
+ "value": "1742745777",
+ },
+ ],
+ "or_": [{}],
+ },
+ include={
+ "documents": True,
+ "related_memories": True,
+ "summaries": True,
+ },
+ limit=10,
+ rerank=False,
+ rewrite_query=False,
+ threshold=0.5,
+ )
+ assert_matches_type(SearchMemoriesResponse, search, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_memories(self, client: Supermemory) -> None:
+ response = client.search.with_raw_response.memories(
+ q="machine learning concepts",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ search = response.parse()
+ assert_matches_type(SearchMemoriesResponse, search, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_memories(self, client: Supermemory) -> None:
+ with client.search.with_streaming_response.memories(
+ q="machine learning concepts",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ search = response.parse()
+ assert_matches_type(SearchMemoriesResponse, search, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -95,16 +167,16 @@ class TestAsyncSearch:
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- async def test_method_execute(self, async_client: AsyncSupermemory) -> None:
- search = await async_client.search.execute(
+ async def test_method_documents(self, async_client: AsyncSupermemory) -> None:
+ search = await async_client.search.documents(
q="machine learning concepts",
)
- assert_matches_type(SearchExecuteResponse, search, path=["response"])
+ assert_matches_type(SearchDocumentsResponse, search, path=["response"])
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- async def test_method_execute_with_all_params(self, async_client: AsyncSupermemory) -> None:
- search = await async_client.search.execute(
+ async def test_method_documents_with_all_params(self, async_client: AsyncSupermemory) -> None:
+ search = await async_client.search.documents(
q="machine learning concepts",
categories_filter=["technology", "science"],
chunk_threshold=0.5,
@@ -135,30 +207,99 @@ async def test_method_execute_with_all_params(self, async_client: AsyncSupermemo
rerank=False,
rewrite_query=False,
)
- assert_matches_type(SearchExecuteResponse, search, path=["response"])
+ assert_matches_type(SearchDocumentsResponse, search, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_documents(self, async_client: AsyncSupermemory) -> None:
+ response = await async_client.search.with_raw_response.documents(
+ q="machine learning concepts",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ search = await response.parse()
+ assert_matches_type(SearchDocumentsResponse, search, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_documents(self, async_client: AsyncSupermemory) -> None:
+ async with async_client.search.with_streaming_response.documents(
+ q="machine learning concepts",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ search = await response.parse()
+ assert_matches_type(SearchDocumentsResponse, search, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_memories(self, async_client: AsyncSupermemory) -> None:
+ search = await async_client.search.memories(
+ q="machine learning concepts",
+ )
+ assert_matches_type(SearchMemoriesResponse, search, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_memories_with_all_params(self, async_client: AsyncSupermemory) -> None:
+ search = await async_client.search.memories(
+ q="machine learning concepts",
+ container_tag="user_123",
+ filters={
+ "and_": [
+ {
+ "key": "group",
+ "negate": False,
+ "value": "jira_users",
+ },
+ {
+ "filterType": "numeric",
+ "key": "timestamp",
+ "negate": False,
+ "numericOperator": ">",
+ "value": "1742745777",
+ },
+ ],
+ "or_": [{}],
+ },
+ include={
+ "documents": True,
+ "related_memories": True,
+ "summaries": True,
+ },
+ limit=10,
+ rerank=False,
+ rewrite_query=False,
+ threshold=0.5,
+ )
+ assert_matches_type(SearchMemoriesResponse, search, path=["response"])
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- async def test_raw_response_execute(self, async_client: AsyncSupermemory) -> None:
- response = await async_client.search.with_raw_response.execute(
+ async def test_raw_response_memories(self, async_client: AsyncSupermemory) -> None:
+ response = await async_client.search.with_raw_response.memories(
q="machine learning concepts",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
search = await response.parse()
- assert_matches_type(SearchExecuteResponse, search, path=["response"])
+ assert_matches_type(SearchMemoriesResponse, search, path=["response"])
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- async def test_streaming_response_execute(self, async_client: AsyncSupermemory) -> None:
- async with async_client.search.with_streaming_response.execute(
+ async def test_streaming_response_memories(self, async_client: AsyncSupermemory) -> None:
+ async with async_client.search.with_streaming_response.memories(
q="machine learning concepts",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
search = await response.parse()
- assert_matches_type(SearchExecuteResponse, search, path=["response"])
+ assert_matches_type(SearchMemoriesResponse, search, path=["response"])
assert cast(Any, response.is_closed) is True
From c2623b2b645eefd7e2cbb5027eb5a46cee7b62eb Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 15 Aug 2025 21:41:00 +0000
Subject: [PATCH 3/4] feat(api): manual updates
---
.stats.yml | 2 +-
api.md | 3 +-
src/supermemory/resources/search.py | 213 +++++++++++++++++-
src/supermemory/types/__init__.py | 2 +
.../types/search_execute_params.py | 93 ++++++++
.../types/search_execute_response.py | 61 +++++
tests/api_resources/test_search.py | 141 ++++++++++++
7 files changed, 512 insertions(+), 3 deletions(-)
create mode 100644 src/supermemory/types/search_execute_params.py
create mode 100644 src/supermemory/types/search_execute_response.py
diff --git a/.stats.yml b/.stats.yml
index 4bb80bca..bc5bbced 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 18
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/supermemory--inc%2Fsupermemory-new-d52acd1a525b4bfe9f4befcc3a645f5d1289d75e7bad999cf1330e539b2ed84e.yml
openapi_spec_hash: c34df5406cfa4d245812d30f99d28116
-config_hash: f305e457fd9ddd1dec2614b14d7936a5
+config_hash: 9b9291a6c872b063900a46386729ba3c
diff --git a/api.md b/api.md
index 34719710..eadef78d 100644
--- a/api.md
+++ b/api.md
@@ -26,12 +26,13 @@ Methods:
Types:
```python
-from supermemory.types import SearchDocumentsResponse, SearchMemoriesResponse
+from supermemory.types import SearchDocumentsResponse, SearchExecuteResponse, SearchMemoriesResponse
```
Methods:
- client.search.documents(\*\*params) -> SearchDocumentsResponse
+- client.search.execute(\*\*params) -> SearchExecuteResponse
- client.search.memories(\*\*params) -> SearchMemoriesResponse
# Settings
diff --git a/src/supermemory/resources/search.py b/src/supermemory/resources/search.py
index be0bc298..bb9e8ff9 100644
--- a/src/supermemory/resources/search.py
+++ b/src/supermemory/resources/search.py
@@ -7,7 +7,7 @@
import httpx
-from ..types import search_memories_params, search_documents_params
+from ..types import search_execute_params, search_memories_params, search_documents_params
from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from .._utils import maybe_transform, async_maybe_transform
from .._compat import cached_property
@@ -19,6 +19,7 @@
async_to_streamed_response_wrapper,
)
from .._base_client import make_request_options
+from ..types.search_execute_response import SearchExecuteResponse
from ..types.search_memories_response import SearchMemoriesResponse
from ..types.search_documents_response import SearchDocumentsResponse
@@ -144,6 +145,105 @@ def documents(
cast_to=SearchDocumentsResponse,
)
+ def execute(
+ self,
+ *,
+ q: str,
+ categories_filter: List[Literal["technology", "science", "business", "health"]] | NotGiven = NOT_GIVEN,
+ chunk_threshold: float | NotGiven = NOT_GIVEN,
+ container_tags: List[str] | NotGiven = NOT_GIVEN,
+ doc_id: str | NotGiven = NOT_GIVEN,
+ document_threshold: float | NotGiven = NOT_GIVEN,
+ filters: search_execute_params.Filters | NotGiven = NOT_GIVEN,
+ include_full_docs: bool | NotGiven = NOT_GIVEN,
+ include_summary: bool | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ only_matching_chunks: bool | NotGiven = NOT_GIVEN,
+ rerank: bool | NotGiven = NOT_GIVEN,
+ rewrite_query: bool | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> SearchExecuteResponse:
+ """
+ Search memories with advanced filtering
+
+ Args:
+ q: Search query string
+
+ categories_filter: Optional category filters
+
+ chunk_threshold: Threshold / sensitivity for chunk selection. 0 is least sensitive (returns most
+ chunks, more results), 1 is most sensitive (returns lesser chunks, accurate
+ results)
+
+ container_tags: Optional tags this search should be containerized by. This can be an ID for your
+ user, a project ID, or any other identifier you wish to use to filter memories.
+
+ doc_id: Optional document ID to search within. You can use this to find chunks in a very
+ large document.
+
+ document_threshold: Threshold / sensitivity for document selection. 0 is least sensitive (returns
+ most documents, more results), 1 is most sensitive (returns lesser documents,
+ accurate results)
+
+ filters: Optional filters to apply to the search
+
+ include_full_docs: If true, include full document in the response. This is helpful if you want a
+ chatbot to know the full context of the document.
+
+ include_summary: If true, include document summary in the response. This is helpful if you want a
+ chatbot to know the full context of the document.
+
+ limit: Maximum number of results to return
+
+ only_matching_chunks: If true, only return matching chunks without context. Normally, we send the
+ previous and next chunk to provide more context for LLMs. If you only want the
+ matching chunk, set this to true.
+
+ rerank: If true, rerank the results based on the query. This is helpful if you want to
+ ensure the most relevant results are returned.
+
+ rewrite_query: If true, rewrites the query to make it easier to find documents. This increases
+ the latency by about 400ms
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v3/search",
+ body=maybe_transform(
+ {
+ "q": q,
+ "categories_filter": categories_filter,
+ "chunk_threshold": chunk_threshold,
+ "container_tags": container_tags,
+ "doc_id": doc_id,
+ "document_threshold": document_threshold,
+ "filters": filters,
+ "include_full_docs": include_full_docs,
+ "include_summary": include_summary,
+ "limit": limit,
+ "only_matching_chunks": only_matching_chunks,
+ "rerank": rerank,
+ "rewrite_query": rewrite_query,
+ },
+ search_execute_params.SearchExecuteParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=SearchExecuteResponse,
+ )
+
def memories(
self,
*,
@@ -334,6 +434,105 @@ async def documents(
cast_to=SearchDocumentsResponse,
)
+ async def execute(
+ self,
+ *,
+ q: str,
+ categories_filter: List[Literal["technology", "science", "business", "health"]] | NotGiven = NOT_GIVEN,
+ chunk_threshold: float | NotGiven = NOT_GIVEN,
+ container_tags: List[str] | NotGiven = NOT_GIVEN,
+ doc_id: str | NotGiven = NOT_GIVEN,
+ document_threshold: float | NotGiven = NOT_GIVEN,
+ filters: search_execute_params.Filters | NotGiven = NOT_GIVEN,
+ include_full_docs: bool | NotGiven = NOT_GIVEN,
+ include_summary: bool | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ only_matching_chunks: bool | NotGiven = NOT_GIVEN,
+ rerank: bool | NotGiven = NOT_GIVEN,
+ rewrite_query: bool | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> SearchExecuteResponse:
+ """
+ Search memories with advanced filtering
+
+ Args:
+ q: Search query string
+
+ categories_filter: Optional category filters
+
+ chunk_threshold: Threshold / sensitivity for chunk selection. 0 is least sensitive (returns most
+ chunks, more results), 1 is most sensitive (returns lesser chunks, accurate
+ results)
+
+ container_tags: Optional tags this search should be containerized by. This can be an ID for your
+ user, a project ID, or any other identifier you wish to use to filter memories.
+
+ doc_id: Optional document ID to search within. You can use this to find chunks in a very
+ large document.
+
+ document_threshold: Threshold / sensitivity for document selection. 0 is least sensitive (returns
+ most documents, more results), 1 is most sensitive (returns lesser documents,
+ accurate results)
+
+ filters: Optional filters to apply to the search
+
+ include_full_docs: If true, include full document in the response. This is helpful if you want a
+ chatbot to know the full context of the document.
+
+ include_summary: If true, include document summary in the response. This is helpful if you want a
+ chatbot to know the full context of the document.
+
+ limit: Maximum number of results to return
+
+ only_matching_chunks: If true, only return matching chunks without context. Normally, we send the
+ previous and next chunk to provide more context for LLMs. If you only want the
+ matching chunk, set this to true.
+
+ rerank: If true, rerank the results based on the query. This is helpful if you want to
+ ensure the most relevant results are returned.
+
+ rewrite_query: If true, rewrites the query to make it easier to find documents. This increases
+ the latency by about 400ms
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v3/search",
+ body=await async_maybe_transform(
+ {
+ "q": q,
+ "categories_filter": categories_filter,
+ "chunk_threshold": chunk_threshold,
+ "container_tags": container_tags,
+ "doc_id": doc_id,
+ "document_threshold": document_threshold,
+ "filters": filters,
+ "include_full_docs": include_full_docs,
+ "include_summary": include_summary,
+ "limit": limit,
+ "only_matching_chunks": only_matching_chunks,
+ "rerank": rerank,
+ "rewrite_query": rewrite_query,
+ },
+ search_execute_params.SearchExecuteParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=SearchExecuteResponse,
+ )
+
async def memories(
self,
*,
@@ -412,6 +611,9 @@ def __init__(self, search: SearchResource) -> None:
self.documents = to_raw_response_wrapper(
search.documents,
)
+ self.execute = to_raw_response_wrapper(
+ search.execute,
+ )
self.memories = to_raw_response_wrapper(
search.memories,
)
@@ -424,6 +626,9 @@ def __init__(self, search: AsyncSearchResource) -> None:
self.documents = async_to_raw_response_wrapper(
search.documents,
)
+ self.execute = async_to_raw_response_wrapper(
+ search.execute,
+ )
self.memories = async_to_raw_response_wrapper(
search.memories,
)
@@ -436,6 +641,9 @@ def __init__(self, search: SearchResource) -> None:
self.documents = to_streamed_response_wrapper(
search.documents,
)
+ self.execute = to_streamed_response_wrapper(
+ search.execute,
+ )
self.memories = to_streamed_response_wrapper(
search.memories,
)
@@ -448,6 +656,9 @@ def __init__(self, search: AsyncSearchResource) -> None:
self.documents = async_to_streamed_response_wrapper(
search.documents,
)
+ self.execute = async_to_streamed_response_wrapper(
+ search.execute,
+ )
self.memories = async_to_streamed_response_wrapper(
search.memories,
)
diff --git a/src/supermemory/types/__init__.py b/src/supermemory/types/__init__.py
index ecb3052c..e0ca3f8c 100644
--- a/src/supermemory/types/__init__.py
+++ b/src/supermemory/types/__init__.py
@@ -9,11 +9,13 @@
from .memory_list_response import MemoryListResponse as MemoryListResponse
from .memory_update_params import MemoryUpdateParams as MemoryUpdateParams
from .setting_get_response import SettingGetResponse as SettingGetResponse
+from .search_execute_params import SearchExecuteParams as SearchExecuteParams
from .setting_update_params import SettingUpdateParams as SettingUpdateParams
from .connection_list_params import ConnectionListParams as ConnectionListParams
from .memory_update_response import MemoryUpdateResponse as MemoryUpdateResponse
from .search_memories_params import SearchMemoriesParams as SearchMemoriesParams
from .search_documents_params import SearchDocumentsParams as SearchDocumentsParams
+from .search_execute_response import SearchExecuteResponse as SearchExecuteResponse
from .setting_update_response import SettingUpdateResponse as SettingUpdateResponse
from .connection_create_params import ConnectionCreateParams as ConnectionCreateParams
from .connection_import_params import ConnectionImportParams as ConnectionImportParams
diff --git a/src/supermemory/types/search_execute_params.py b/src/supermemory/types/search_execute_params.py
new file mode 100644
index 00000000..db48a814
--- /dev/null
+++ b/src/supermemory/types/search_execute_params.py
@@ -0,0 +1,93 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, List, Union, Iterable
+from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict
+
+from .._utils import PropertyInfo
+
+__all__ = ["SearchExecuteParams", "Filters", "FiltersUnionMember0"]
+
+
+class SearchExecuteParams(TypedDict, total=False):
+ q: Required[str]
+ """Search query string"""
+
+ categories_filter: Annotated[
+ List[Literal["technology", "science", "business", "health"]], PropertyInfo(alias="categoriesFilter")
+ ]
+ """Optional category filters"""
+
+ chunk_threshold: Annotated[float, PropertyInfo(alias="chunkThreshold")]
+ """Threshold / sensitivity for chunk selection.
+
+ 0 is least sensitive (returns most chunks, more results), 1 is most sensitive
+ (returns lesser chunks, accurate results)
+ """
+
+ container_tags: Annotated[List[str], PropertyInfo(alias="containerTags")]
+ """Optional tags this search should be containerized by.
+
+ This can be an ID for your user, a project ID, or any other identifier you wish
+ to use to filter memories.
+ """
+
+ doc_id: Annotated[str, PropertyInfo(alias="docId")]
+ """Optional document ID to search within.
+
+ You can use this to find chunks in a very large document.
+ """
+
+ document_threshold: Annotated[float, PropertyInfo(alias="documentThreshold")]
+ """Threshold / sensitivity for document selection.
+
+ 0 is least sensitive (returns most documents, more results), 1 is most sensitive
+ (returns lesser documents, accurate results)
+ """
+
+ filters: Filters
+ """Optional filters to apply to the search"""
+
+ include_full_docs: Annotated[bool, PropertyInfo(alias="includeFullDocs")]
+ """If true, include full document in the response.
+
+ This is helpful if you want a chatbot to know the full context of the document.
+ """
+
+ include_summary: Annotated[bool, PropertyInfo(alias="includeSummary")]
+ """If true, include document summary in the response.
+
+ This is helpful if you want a chatbot to know the full context of the document.
+ """
+
+ limit: int
+ """Maximum number of results to return"""
+
+ only_matching_chunks: Annotated[bool, PropertyInfo(alias="onlyMatchingChunks")]
+ """If true, only return matching chunks without context.
+
+ Normally, we send the previous and next chunk to provide more context for LLMs.
+ If you only want the matching chunk, set this to true.
+ """
+
+ rerank: bool
+ """If true, rerank the results based on the query.
+
+ This is helpful if you want to ensure the most relevant results are returned.
+ """
+
+ rewrite_query: Annotated[bool, PropertyInfo(alias="rewriteQuery")]
+ """If true, rewrites the query to make it easier to find documents.
+
+ This increases the latency by about 400ms
+ """
+
+
+class FiltersUnionMember0(TypedDict, total=False):
+ and_: Annotated[Iterable[object], PropertyInfo(alias="AND")]
+
+ or_: Annotated[Iterable[object], PropertyInfo(alias="OR")]
+
+
+Filters: TypeAlias = Union[FiltersUnionMember0, Dict[str, object]]
diff --git a/src/supermemory/types/search_execute_response.py b/src/supermemory/types/search_execute_response.py
new file mode 100644
index 00000000..c1844442
--- /dev/null
+++ b/src/supermemory/types/search_execute_response.py
@@ -0,0 +1,61 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Optional
+from datetime import datetime
+
+from pydantic import Field as FieldInfo
+
+from .._models import BaseModel
+
+__all__ = ["SearchExecuteResponse", "Result", "ResultChunk"]
+
+
+class ResultChunk(BaseModel):
+ content: str
+ """Content of the matching chunk"""
+
+ is_relevant: bool = FieldInfo(alias="isRelevant")
+ """Whether this chunk is relevant to the query"""
+
+ score: float
+ """Similarity score for this chunk"""
+
+
+class Result(BaseModel):
+ chunks: List[ResultChunk]
+ """Matching content chunks from the document"""
+
+ created_at: datetime = FieldInfo(alias="createdAt")
+ """Document creation date"""
+
+ document_id: str = FieldInfo(alias="documentId")
+ """ID of the matching document"""
+
+ metadata: Optional[Dict[str, object]] = None
+ """Document metadata"""
+
+ score: float
+ """Relevance score of the match"""
+
+ title: Optional[str] = None
+ """Document title"""
+
+ type: Optional[str] = None
+ """Document type"""
+
+ updated_at: datetime = FieldInfo(alias="updatedAt")
+ """Document last update date"""
+
+ content: Optional[str] = None
+ """Full document content (only included when includeFullDocs=true)"""
+
+ summary: Optional[str] = None
+ """Document summary"""
+
+
+class SearchExecuteResponse(BaseModel):
+ results: List[Result]
+
+ timing: float
+
+ total: float
diff --git a/tests/api_resources/test_search.py b/tests/api_resources/test_search.py
index 00f6ad4a..fc79884f 100644
--- a/tests/api_resources/test_search.py
+++ b/tests/api_resources/test_search.py
@@ -10,6 +10,7 @@
from supermemory import Supermemory, AsyncSupermemory
from tests.utils import assert_matches_type
from supermemory.types import (
+ SearchExecuteResponse,
SearchMemoriesResponse,
SearchDocumentsResponse,
)
@@ -90,6 +91,76 @@ def test_streaming_response_documents(self, client: Supermemory) -> None:
assert cast(Any, response.is_closed) is True
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_execute(self, client: Supermemory) -> None:
+ search = client.search.execute(
+ q="machine learning concepts",
+ )
+ assert_matches_type(SearchExecuteResponse, search, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_execute_with_all_params(self, client: Supermemory) -> None:
+ search = client.search.execute(
+ q="machine learning concepts",
+ categories_filter=["technology", "science"],
+ chunk_threshold=0.5,
+ container_tags=["user_123", "project_123"],
+ doc_id="doc_xyz789",
+ document_threshold=0.5,
+ filters={
+ "and_": [
+ {
+ "key": "group",
+ "negate": False,
+ "value": "jira_users",
+ },
+ {
+ "filterType": "numeric",
+ "key": "timestamp",
+ "negate": False,
+ "numericOperator": ">",
+ "value": "1742745777",
+ },
+ ],
+ "or_": [{}],
+ },
+ include_full_docs=False,
+ include_summary=False,
+ limit=10,
+ only_matching_chunks=False,
+ rerank=False,
+ rewrite_query=False,
+ )
+ assert_matches_type(SearchExecuteResponse, search, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_execute(self, client: Supermemory) -> None:
+ response = client.search.with_raw_response.execute(
+ q="machine learning concepts",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ search = response.parse()
+ assert_matches_type(SearchExecuteResponse, search, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_execute(self, client: Supermemory) -> None:
+ with client.search.with_streaming_response.execute(
+ q="machine learning concepts",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ search = response.parse()
+ assert_matches_type(SearchExecuteResponse, search, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
def test_method_memories(self, client: Supermemory) -> None:
@@ -235,6 +306,76 @@ async def test_streaming_response_documents(self, async_client: AsyncSupermemory
assert cast(Any, response.is_closed) is True
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_execute(self, async_client: AsyncSupermemory) -> None:
+ search = await async_client.search.execute(
+ q="machine learning concepts",
+ )
+ assert_matches_type(SearchExecuteResponse, search, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_execute_with_all_params(self, async_client: AsyncSupermemory) -> None:
+ search = await async_client.search.execute(
+ q="machine learning concepts",
+ categories_filter=["technology", "science"],
+ chunk_threshold=0.5,
+ container_tags=["user_123", "project_123"],
+ doc_id="doc_xyz789",
+ document_threshold=0.5,
+ filters={
+ "and_": [
+ {
+ "key": "group",
+ "negate": False,
+ "value": "jira_users",
+ },
+ {
+ "filterType": "numeric",
+ "key": "timestamp",
+ "negate": False,
+ "numericOperator": ">",
+ "value": "1742745777",
+ },
+ ],
+ "or_": [{}],
+ },
+ include_full_docs=False,
+ include_summary=False,
+ limit=10,
+ only_matching_chunks=False,
+ rerank=False,
+ rewrite_query=False,
+ )
+ assert_matches_type(SearchExecuteResponse, search, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_execute(self, async_client: AsyncSupermemory) -> None:
+ response = await async_client.search.with_raw_response.execute(
+ q="machine learning concepts",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ search = await response.parse()
+ assert_matches_type(SearchExecuteResponse, search, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_execute(self, async_client: AsyncSupermemory) -> None:
+ async with async_client.search.with_streaming_response.execute(
+ q="machine learning concepts",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ search = await response.parse()
+ assert_matches_type(SearchExecuteResponse, search, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
async def test_method_memories(self, async_client: AsyncSupermemory) -> None:
From 271467769f2638c0bf0bbf9ca3f8fd988c5da9a9 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 15 Aug 2025 21:41:18 +0000
Subject: [PATCH 4/4] release: 3.0.0-alpha.26
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 10 ++++++++++
pyproject.toml | 2 +-
src/supermemory/_version.py | 2 +-
4 files changed, 13 insertions(+), 3 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 20403303..6680703a 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "3.0.0-alpha.25"
+ ".": "3.0.0-alpha.26"
}
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 795b48b4..ac06773e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,15 @@
# Changelog
+## 3.0.0-alpha.26 (2025-08-15)
+
+Full Changelog: [v3.0.0-alpha.25...v3.0.0-alpha.26](https://github.com/supermemoryai/python-sdk/compare/v3.0.0-alpha.25...v3.0.0-alpha.26)
+
+### Features
+
+* **api:** manual updates ([c2623b2](https://github.com/supermemoryai/python-sdk/commit/c2623b2b645eefd7e2cbb5027eb5a46cee7b62eb))
+* **api:** manual updates ([9e373ef](https://github.com/supermemoryai/python-sdk/commit/9e373ef0b585eb15cb04b95a1bab46c8c102970c))
+* **api:** manual updates ([fa75aff](https://github.com/supermemoryai/python-sdk/commit/fa75affffb701259be14445da95c77a1cdde512b))
+
## 3.0.0-alpha.25 (2025-08-15)
Full Changelog: [v3.0.0-alpha.24...v3.0.0-alpha.25](https://github.com/supermemoryai/python-sdk/compare/v3.0.0-alpha.24...v3.0.0-alpha.25)
diff --git a/pyproject.toml b/pyproject.toml
index 5d8602cb..64bdb490 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "supermemory"
-version = "3.0.0-alpha.25"
+version = "3.0.0-alpha.26"
description = "The official Python library for the supermemory API"
dynamic = ["readme"]
license = "Apache-2.0"
diff --git a/src/supermemory/_version.py b/src/supermemory/_version.py
index bf237fac..e1dbbcf7 100644
--- a/src/supermemory/_version.py
+++ b/src/supermemory/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "supermemory"
-__version__ = "3.0.0-alpha.25" # x-release-please-version
+__version__ = "3.0.0-alpha.26" # x-release-please-version