diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 20403303..6680703a 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "3.0.0-alpha.25"
+ ".": "3.0.0-alpha.26"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index 476f4440..bc5bbced 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 16
+configured_endpoints: 18
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/supermemory--inc%2Fsupermemory-new-d52acd1a525b4bfe9f4befcc3a645f5d1289d75e7bad999cf1330e539b2ed84e.yml
openapi_spec_hash: c34df5406cfa4d245812d30f99d28116
-config_hash: be10c837d5319a33f30809a3ec223caf
+config_hash: 9b9291a6c872b063900a46386729ba3c
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 795b48b4..ac06773e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,15 @@
# Changelog
+## 3.0.0-alpha.26 (2025-08-15)
+
+Full Changelog: [v3.0.0-alpha.25...v3.0.0-alpha.26](https://github.com/supermemoryai/python-sdk/compare/v3.0.0-alpha.25...v3.0.0-alpha.26)
+
+### Features
+
+* **api:** manual updates ([c2623b2](https://github.com/supermemoryai/python-sdk/commit/c2623b2b645eefd7e2cbb5027eb5a46cee7b62eb))
+* **api:** manual updates ([9e373ef](https://github.com/supermemoryai/python-sdk/commit/9e373ef0b585eb15cb04b95a1bab46c8c102970c))
+* **api:** manual updates ([fa75aff](https://github.com/supermemoryai/python-sdk/commit/fa75affffb701259be14445da95c77a1cdde512b))
+
## 3.0.0-alpha.25 (2025-08-15)
Full Changelog: [v3.0.0-alpha.24...v3.0.0-alpha.25](https://github.com/supermemoryai/python-sdk/compare/v3.0.0-alpha.24...v3.0.0-alpha.25)
diff --git a/README.md b/README.md
index 4c0fefb1..b2851573 100644
--- a/README.md
+++ b/README.md
@@ -32,7 +32,7 @@ client = Supermemory(
api_key=os.environ.get("SUPERMEMORY_API_KEY"), # This is the default and can be omitted
)
-response = client.search.execute(
+response = client.search.documents(
q="documents related to python",
)
print(response.results)
@@ -58,7 +58,7 @@ client = AsyncSupermemory(
async def main() -> None:
- response = await client.search.execute(
+ response = await client.search.documents(
q="documents related to python",
)
print(response.results)
@@ -93,7 +93,7 @@ async def main() -> None:
api_key="My API Key",
http_client=DefaultAioHttpClient(),
) as client:
- response = await client.search.execute(
+ response = await client.search.documents(
q="documents related to python",
)
print(response.results)
@@ -111,6 +111,39 @@ Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typ
Typed requests and responses provide autocomplete and documentation within your editor. If you would like to see type errors in VS Code to help catch bugs earlier, set `python.analysis.typeCheckingMode` to `basic`.
+## Nested params
+
+Nested parameters are dictionaries, typed using `TypedDict`, for example:
+
+```python
+from supermemory import Supermemory
+
+client = Supermemory()
+
+response = client.search.memories(
+ q="machine learning concepts",
+ include={},
+)
+print(response.include)
+```
+
+## File uploads
+
+Request parameters that correspond to file uploads can be passed as `bytes`, or a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance or a tuple of `(filename, contents, media type)`.
+
+```python
+from pathlib import Path
+from supermemory import Supermemory
+
+client = Supermemory()
+
+client.memories.upload_file(
+ file=Path("/path/to/file"),
+)
+```
+
+The async client uses the exact same interface. If you pass a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance, the file contents will be read asynchronously automatically.
+
## Handling errors
When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `supermemory.APIConnectionError` is raised.
diff --git a/api.md b/api.md
index b20d9d9a..eadef78d 100644
--- a/api.md
+++ b/api.md
@@ -8,6 +8,7 @@ from supermemory.types import (
MemoryListResponse,
MemoryAddResponse,
MemoryGetResponse,
+ MemoryUploadFileResponse,
)
```
@@ -18,18 +19,21 @@ Methods:
- client.memories.delete(id) -> None
- client.memories.add(\*\*params) -> MemoryAddResponse
- client.memories.get(id) -> MemoryGetResponse
+- client.memories.upload_file(\*\*params) -> MemoryUploadFileResponse
# Search
Types:
```python
-from supermemory.types import SearchExecuteResponse
+from supermemory.types import SearchDocumentsResponse, SearchExecuteResponse, SearchMemoriesResponse
```
Methods:
+- client.search.documents(\*\*params) -> SearchDocumentsResponse
- client.search.execute(\*\*params) -> SearchExecuteResponse
+- client.search.memories(\*\*params) -> SearchMemoriesResponse
# Settings
diff --git a/pyproject.toml b/pyproject.toml
index 5d8602cb..64bdb490 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "supermemory"
-version = "3.0.0-alpha.25"
+version = "3.0.0-alpha.26"
description = "The official Python library for the supermemory API"
dynamic = ["readme"]
license = "Apache-2.0"
diff --git a/src/supermemory/_files.py b/src/supermemory/_files.py
index cc14c14f..ae7c4650 100644
--- a/src/supermemory/_files.py
+++ b/src/supermemory/_files.py
@@ -34,7 +34,7 @@ def assert_is_file_content(obj: object, *, key: str | None = None) -> None:
if not is_file_content(obj):
prefix = f"Expected entry at `{key}`" if key is not None else f"Expected file input `{obj!r}`"
raise RuntimeError(
- f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead."
+ f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead. See https://github.com/supermemoryai/python-sdk/tree/main#file-uploads"
) from None
diff --git a/src/supermemory/_version.py b/src/supermemory/_version.py
index bf237fac..e1dbbcf7 100644
--- a/src/supermemory/_version.py
+++ b/src/supermemory/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "supermemory"
-__version__ = "3.0.0-alpha.25" # x-release-please-version
+__version__ = "3.0.0-alpha.26" # x-release-please-version
diff --git a/src/supermemory/resources/memories.py b/src/supermemory/resources/memories.py
index 0b34b676..1cf1c08e 100644
--- a/src/supermemory/resources/memories.py
+++ b/src/supermemory/resources/memories.py
@@ -2,14 +2,14 @@
from __future__ import annotations
-from typing import Dict, List, Union
+from typing import Dict, List, Union, Mapping, cast
from typing_extensions import Literal
import httpx
-from ..types import memory_add_params, memory_list_params, memory_update_params
-from .._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven
-from .._utils import maybe_transform, async_maybe_transform
+from ..types import memory_add_params, memory_list_params, memory_update_params, memory_upload_file_params
+from .._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven, FileTypes
+from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
@@ -23,6 +23,7 @@
from ..types.memory_get_response import MemoryGetResponse
from ..types.memory_list_response import MemoryListResponse
from ..types.memory_update_response import MemoryUpdateResponse
+from ..types.memory_upload_file_response import MemoryUploadFileResponse
__all__ = ["MemoriesResource", "AsyncMemoriesResource"]
@@ -305,6 +306,51 @@ def get(
cast_to=MemoryGetResponse,
)
+ def upload_file(
+ self,
+ *,
+ file: FileTypes,
+ container_tags: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> MemoryUploadFileResponse:
+ """
+ Upload a file to be processed
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ body = deepcopy_minimal(
+ {
+ "file": file,
+ "container_tags": container_tags,
+ }
+ )
+ files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
+ # It should be noted that the actual Content-Type header that will be
+ # sent to the server will contain a `boundary` parameter, e.g.
+ # multipart/form-data; boundary=---abc--
+ extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+ return self._post(
+ "/v3/memories/file",
+ body=maybe_transform(body, memory_upload_file_params.MemoryUploadFileParams),
+ files=files,
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=MemoryUploadFileResponse,
+ )
+
class AsyncMemoriesResource(AsyncAPIResource):
@cached_property
@@ -584,6 +630,51 @@ async def get(
cast_to=MemoryGetResponse,
)
+ async def upload_file(
+ self,
+ *,
+ file: FileTypes,
+ container_tags: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> MemoryUploadFileResponse:
+ """
+ Upload a file to be processed
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ body = deepcopy_minimal(
+ {
+ "file": file,
+ "container_tags": container_tags,
+ }
+ )
+ files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
+ # It should be noted that the actual Content-Type header that will be
+ # sent to the server will contain a `boundary` parameter, e.g.
+ # multipart/form-data; boundary=---abc--
+ extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+ return await self._post(
+ "/v3/memories/file",
+ body=await async_maybe_transform(body, memory_upload_file_params.MemoryUploadFileParams),
+ files=files,
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=MemoryUploadFileResponse,
+ )
+
class MemoriesResourceWithRawResponse:
def __init__(self, memories: MemoriesResource) -> None:
@@ -604,6 +695,9 @@ def __init__(self, memories: MemoriesResource) -> None:
self.get = to_raw_response_wrapper(
memories.get,
)
+ self.upload_file = to_raw_response_wrapper(
+ memories.upload_file,
+ )
class AsyncMemoriesResourceWithRawResponse:
@@ -625,6 +719,9 @@ def __init__(self, memories: AsyncMemoriesResource) -> None:
self.get = async_to_raw_response_wrapper(
memories.get,
)
+ self.upload_file = async_to_raw_response_wrapper(
+ memories.upload_file,
+ )
class MemoriesResourceWithStreamingResponse:
@@ -646,6 +743,9 @@ def __init__(self, memories: MemoriesResource) -> None:
self.get = to_streamed_response_wrapper(
memories.get,
)
+ self.upload_file = to_streamed_response_wrapper(
+ memories.upload_file,
+ )
class AsyncMemoriesResourceWithStreamingResponse:
@@ -667,3 +767,6 @@ def __init__(self, memories: AsyncMemoriesResource) -> None:
self.get = async_to_streamed_response_wrapper(
memories.get,
)
+ self.upload_file = async_to_streamed_response_wrapper(
+ memories.upload_file,
+ )
diff --git a/src/supermemory/resources/search.py b/src/supermemory/resources/search.py
index 5bb291fb..bb9e8ff9 100644
--- a/src/supermemory/resources/search.py
+++ b/src/supermemory/resources/search.py
@@ -7,7 +7,7 @@
import httpx
-from ..types import search_execute_params
+from ..types import search_execute_params, search_memories_params, search_documents_params
from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from .._utils import maybe_transform, async_maybe_transform
from .._compat import cached_property
@@ -20,6 +20,8 @@
)
from .._base_client import make_request_options
from ..types.search_execute_response import SearchExecuteResponse
+from ..types.search_memories_response import SearchMemoriesResponse
+from ..types.search_documents_response import SearchDocumentsResponse
__all__ = ["SearchResource", "AsyncSearchResource"]
@@ -44,6 +46,105 @@ def with_streaming_response(self) -> SearchResourceWithStreamingResponse:
"""
return SearchResourceWithStreamingResponse(self)
+ def documents(
+ self,
+ *,
+ q: str,
+ categories_filter: List[Literal["technology", "science", "business", "health"]] | NotGiven = NOT_GIVEN,
+ chunk_threshold: float | NotGiven = NOT_GIVEN,
+ container_tags: List[str] | NotGiven = NOT_GIVEN,
+ doc_id: str | NotGiven = NOT_GIVEN,
+ document_threshold: float | NotGiven = NOT_GIVEN,
+ filters: search_documents_params.Filters | NotGiven = NOT_GIVEN,
+ include_full_docs: bool | NotGiven = NOT_GIVEN,
+ include_summary: bool | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ only_matching_chunks: bool | NotGiven = NOT_GIVEN,
+ rerank: bool | NotGiven = NOT_GIVEN,
+ rewrite_query: bool | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> SearchDocumentsResponse:
+ """
+ Search memories with advanced filtering
+
+ Args:
+ q: Search query string
+
+ categories_filter: Optional category filters
+
+ chunk_threshold: Threshold / sensitivity for chunk selection. 0 is least sensitive (returns most
+ chunks, more results), 1 is most sensitive (returns lesser chunks, accurate
+ results)
+
+ container_tags: Optional tags this search should be containerized by. This can be an ID for your
+ user, a project ID, or any other identifier you wish to use to filter memories.
+
+ doc_id: Optional document ID to search within. You can use this to find chunks in a very
+ large document.
+
+ document_threshold: Threshold / sensitivity for document selection. 0 is least sensitive (returns
+ most documents, more results), 1 is most sensitive (returns lesser documents,
+ accurate results)
+
+ filters: Optional filters to apply to the search
+
+ include_full_docs: If true, include full document in the response. This is helpful if you want a
+ chatbot to know the full context of the document.
+
+ include_summary: If true, include document summary in the response. This is helpful if you want a
+ chatbot to know the full context of the document.
+
+ limit: Maximum number of results to return
+
+ only_matching_chunks: If true, only return matching chunks without context. Normally, we send the
+ previous and next chunk to provide more context for LLMs. If you only want the
+ matching chunk, set this to true.
+
+ rerank: If true, rerank the results based on the query. This is helpful if you want to
+ ensure the most relevant results are returned.
+
+ rewrite_query: If true, rewrites the query to make it easier to find documents. This increases
+ the latency by about 400ms
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v3/search",
+ body=maybe_transform(
+ {
+ "q": q,
+ "categories_filter": categories_filter,
+ "chunk_threshold": chunk_threshold,
+ "container_tags": container_tags,
+ "doc_id": doc_id,
+ "document_threshold": document_threshold,
+ "filters": filters,
+ "include_full_docs": include_full_docs,
+ "include_summary": include_summary,
+ "limit": limit,
+ "only_matching_chunks": only_matching_chunks,
+ "rerank": rerank,
+ "rewrite_query": rewrite_query,
+ },
+ search_documents_params.SearchDocumentsParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=SearchDocumentsResponse,
+ )
+
def execute(
self,
*,
@@ -143,6 +244,76 @@ def execute(
cast_to=SearchExecuteResponse,
)
+ def memories(
+ self,
+ *,
+ q: str,
+ container_tag: str | NotGiven = NOT_GIVEN,
+ filters: search_memories_params.Filters | NotGiven = NOT_GIVEN,
+ include: search_memories_params.Include | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ rerank: bool | NotGiven = NOT_GIVEN,
+ rewrite_query: bool | NotGiven = NOT_GIVEN,
+ threshold: float | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> SearchMemoriesResponse:
+ """
+ Search memory entries - Low latency for conversational
+
+ Args:
+ q: Search query string
+
+ container_tag: Optional tag this search should be containerized by. This can be an ID for your
+ user, a project ID, or any other identifier you wish to use to filter memories.
+
+ filters: Optional filters to apply to the search
+
+ limit: Maximum number of results to return
+
+ rerank: If true, rerank the results based on the query. This is helpful if you want to
+ ensure the most relevant results are returned.
+
+ rewrite_query: If true, rewrites the query to make it easier to find documents. This increases
+ the latency by about 400ms
+
+ threshold: Threshold / sensitivity for memories selection. 0 is least sensitive (returns
+ most memories, more results), 1 is most sensitive (returns lesser memories,
+ accurate results)
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v4/search",
+ body=maybe_transform(
+ {
+ "q": q,
+ "container_tag": container_tag,
+ "filters": filters,
+ "include": include,
+ "limit": limit,
+ "rerank": rerank,
+ "rewrite_query": rewrite_query,
+ "threshold": threshold,
+ },
+ search_memories_params.SearchMemoriesParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=SearchMemoriesResponse,
+ )
+
class AsyncSearchResource(AsyncAPIResource):
@cached_property
@@ -164,6 +335,105 @@ def with_streaming_response(self) -> AsyncSearchResourceWithStreamingResponse:
"""
return AsyncSearchResourceWithStreamingResponse(self)
+ async def documents(
+ self,
+ *,
+ q: str,
+ categories_filter: List[Literal["technology", "science", "business", "health"]] | NotGiven = NOT_GIVEN,
+ chunk_threshold: float | NotGiven = NOT_GIVEN,
+ container_tags: List[str] | NotGiven = NOT_GIVEN,
+ doc_id: str | NotGiven = NOT_GIVEN,
+ document_threshold: float | NotGiven = NOT_GIVEN,
+ filters: search_documents_params.Filters | NotGiven = NOT_GIVEN,
+ include_full_docs: bool | NotGiven = NOT_GIVEN,
+ include_summary: bool | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ only_matching_chunks: bool | NotGiven = NOT_GIVEN,
+ rerank: bool | NotGiven = NOT_GIVEN,
+ rewrite_query: bool | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> SearchDocumentsResponse:
+ """
+ Search memories with advanced filtering
+
+ Args:
+ q: Search query string
+
+ categories_filter: Optional category filters
+
+ chunk_threshold: Threshold / sensitivity for chunk selection. 0 is least sensitive (returns most
+ chunks, more results), 1 is most sensitive (returns lesser chunks, accurate
+ results)
+
+ container_tags: Optional tags this search should be containerized by. This can be an ID for your
+ user, a project ID, or any other identifier you wish to use to filter memories.
+
+ doc_id: Optional document ID to search within. You can use this to find chunks in a very
+ large document.
+
+ document_threshold: Threshold / sensitivity for document selection. 0 is least sensitive (returns
+ most documents, more results), 1 is most sensitive (returns lesser documents,
+ accurate results)
+
+ filters: Optional filters to apply to the search
+
+ include_full_docs: If true, include full document in the response. This is helpful if you want a
+ chatbot to know the full context of the document.
+
+ include_summary: If true, include document summary in the response. This is helpful if you want a
+ chatbot to know the full context of the document.
+
+ limit: Maximum number of results to return
+
+ only_matching_chunks: If true, only return matching chunks without context. Normally, we send the
+ previous and next chunk to provide more context for LLMs. If you only want the
+ matching chunk, set this to true.
+
+ rerank: If true, rerank the results based on the query. This is helpful if you want to
+ ensure the most relevant results are returned.
+
+ rewrite_query: If true, rewrites the query to make it easier to find documents. This increases
+ the latency by about 400ms
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v3/search",
+ body=await async_maybe_transform(
+ {
+ "q": q,
+ "categories_filter": categories_filter,
+ "chunk_threshold": chunk_threshold,
+ "container_tags": container_tags,
+ "doc_id": doc_id,
+ "document_threshold": document_threshold,
+ "filters": filters,
+ "include_full_docs": include_full_docs,
+ "include_summary": include_summary,
+ "limit": limit,
+ "only_matching_chunks": only_matching_chunks,
+ "rerank": rerank,
+ "rewrite_query": rewrite_query,
+ },
+ search_documents_params.SearchDocumentsParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=SearchDocumentsResponse,
+ )
+
async def execute(
self,
*,
@@ -263,38 +533,132 @@ async def execute(
cast_to=SearchExecuteResponse,
)
+ async def memories(
+ self,
+ *,
+ q: str,
+ container_tag: str | NotGiven = NOT_GIVEN,
+ filters: search_memories_params.Filters | NotGiven = NOT_GIVEN,
+ include: search_memories_params.Include | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ rerank: bool | NotGiven = NOT_GIVEN,
+ rewrite_query: bool | NotGiven = NOT_GIVEN,
+ threshold: float | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> SearchMemoriesResponse:
+ """
+ Search memory entries - Low latency for conversational
+
+ Args:
+ q: Search query string
+
+ container_tag: Optional tag this search should be containerized by. This can be an ID for your
+ user, a project ID, or any other identifier you wish to use to filter memories.
+
+ filters: Optional filters to apply to the search
+
+ limit: Maximum number of results to return
+
+ rerank: If true, rerank the results based on the query. This is helpful if you want to
+ ensure the most relevant results are returned.
+
+ rewrite_query: If true, rewrites the query to make it easier to find documents. This increases
+ the latency by about 400ms
+
+ threshold: Threshold / sensitivity for memories selection. 0 is least sensitive (returns
+ most memories, more results), 1 is most sensitive (returns lesser memories,
+ accurate results)
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v4/search",
+ body=await async_maybe_transform(
+ {
+ "q": q,
+ "container_tag": container_tag,
+ "filters": filters,
+ "include": include,
+ "limit": limit,
+ "rerank": rerank,
+ "rewrite_query": rewrite_query,
+ "threshold": threshold,
+ },
+ search_memories_params.SearchMemoriesParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=SearchMemoriesResponse,
+ )
+
class SearchResourceWithRawResponse:
def __init__(self, search: SearchResource) -> None:
self._search = search
+ self.documents = to_raw_response_wrapper(
+ search.documents,
+ )
self.execute = to_raw_response_wrapper(
search.execute,
)
+ self.memories = to_raw_response_wrapper(
+ search.memories,
+ )
class AsyncSearchResourceWithRawResponse:
def __init__(self, search: AsyncSearchResource) -> None:
self._search = search
+ self.documents = async_to_raw_response_wrapper(
+ search.documents,
+ )
self.execute = async_to_raw_response_wrapper(
search.execute,
)
+ self.memories = async_to_raw_response_wrapper(
+ search.memories,
+ )
class SearchResourceWithStreamingResponse:
def __init__(self, search: SearchResource) -> None:
self._search = search
+ self.documents = to_streamed_response_wrapper(
+ search.documents,
+ )
self.execute = to_streamed_response_wrapper(
search.execute,
)
+ self.memories = to_streamed_response_wrapper(
+ search.memories,
+ )
class AsyncSearchResourceWithStreamingResponse:
def __init__(self, search: AsyncSearchResource) -> None:
self._search = search
+ self.documents = async_to_streamed_response_wrapper(
+ search.documents,
+ )
self.execute = async_to_streamed_response_wrapper(
search.execute,
)
+ self.memories = async_to_streamed_response_wrapper(
+ search.memories,
+ )
diff --git a/src/supermemory/types/__init__.py b/src/supermemory/types/__init__.py
index 03f53719..e0ca3f8c 100644
--- a/src/supermemory/types/__init__.py
+++ b/src/supermemory/types/__init__.py
@@ -13,12 +13,18 @@
from .setting_update_params import SettingUpdateParams as SettingUpdateParams
from .connection_list_params import ConnectionListParams as ConnectionListParams
from .memory_update_response import MemoryUpdateResponse as MemoryUpdateResponse
+from .search_memories_params import SearchMemoriesParams as SearchMemoriesParams
+from .search_documents_params import SearchDocumentsParams as SearchDocumentsParams
from .search_execute_response import SearchExecuteResponse as SearchExecuteResponse
from .setting_update_response import SettingUpdateResponse as SettingUpdateResponse
from .connection_create_params import ConnectionCreateParams as ConnectionCreateParams
from .connection_import_params import ConnectionImportParams as ConnectionImportParams
from .connection_list_response import ConnectionListResponse as ConnectionListResponse
+from .search_memories_response import SearchMemoriesResponse as SearchMemoriesResponse
+from .memory_upload_file_params import MemoryUploadFileParams as MemoryUploadFileParams
+from .search_documents_response import SearchDocumentsResponse as SearchDocumentsResponse
from .connection_create_response import ConnectionCreateResponse as ConnectionCreateResponse
+from .memory_upload_file_response import MemoryUploadFileResponse as MemoryUploadFileResponse
from .connection_get_by_id_response import ConnectionGetByIDResponse as ConnectionGetByIDResponse
from .connection_get_by_tags_params import ConnectionGetByTagsParams as ConnectionGetByTagsParams
from .connection_get_by_tags_response import ConnectionGetByTagsResponse as ConnectionGetByTagsResponse
diff --git a/src/supermemory/types/memory_upload_file_params.py b/src/supermemory/types/memory_upload_file_params.py
new file mode 100644
index 00000000..ce4dfc40
--- /dev/null
+++ b/src/supermemory/types/memory_upload_file_params.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, Annotated, TypedDict
+
+from .._types import FileTypes
+from .._utils import PropertyInfo
+
+__all__ = ["MemoryUploadFileParams"]
+
+
+class MemoryUploadFileParams(TypedDict, total=False):
+ file: Required[FileTypes]
+
+ container_tags: Annotated[str, PropertyInfo(alias="containerTags")]
diff --git a/src/supermemory/types/memory_upload_file_response.py b/src/supermemory/types/memory_upload_file_response.py
new file mode 100644
index 00000000..f67b958f
--- /dev/null
+++ b/src/supermemory/types/memory_upload_file_response.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .._models import BaseModel
+
+__all__ = ["MemoryUploadFileResponse"]
+
+
+class MemoryUploadFileResponse(BaseModel):
+ id: str
+
+ status: str
diff --git a/src/supermemory/types/search_documents_params.py b/src/supermemory/types/search_documents_params.py
new file mode 100644
index 00000000..b1b2d874
--- /dev/null
+++ b/src/supermemory/types/search_documents_params.py
@@ -0,0 +1,93 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, List, Union, Iterable
+from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict
+
+from .._utils import PropertyInfo
+
+__all__ = ["SearchDocumentsParams", "Filters", "FiltersUnionMember0"]
+
+
+class SearchDocumentsParams(TypedDict, total=False):
+ q: Required[str]
+ """Search query string"""
+
+ categories_filter: Annotated[
+ List[Literal["technology", "science", "business", "health"]], PropertyInfo(alias="categoriesFilter")
+ ]
+ """Optional category filters"""
+
+ chunk_threshold: Annotated[float, PropertyInfo(alias="chunkThreshold")]
+ """Threshold / sensitivity for chunk selection.
+
+ 0 is least sensitive (returns most chunks, more results), 1 is most sensitive
+ (returns lesser chunks, accurate results)
+ """
+
+ container_tags: Annotated[List[str], PropertyInfo(alias="containerTags")]
+ """Optional tags this search should be containerized by.
+
+ This can be an ID for your user, a project ID, or any other identifier you wish
+ to use to filter memories.
+ """
+
+ doc_id: Annotated[str, PropertyInfo(alias="docId")]
+ """Optional document ID to search within.
+
+ You can use this to find chunks in a very large document.
+ """
+
+ document_threshold: Annotated[float, PropertyInfo(alias="documentThreshold")]
+ """Threshold / sensitivity for document selection.
+
+ 0 is least sensitive (returns most documents, more results), 1 is most sensitive
+ (returns lesser documents, accurate results)
+ """
+
+ filters: Filters
+ """Optional filters to apply to the search"""
+
+ include_full_docs: Annotated[bool, PropertyInfo(alias="includeFullDocs")]
+ """If true, include full document in the response.
+
+ This is helpful if you want a chatbot to know the full context of the document.
+ """
+
+ include_summary: Annotated[bool, PropertyInfo(alias="includeSummary")]
+ """If true, include document summary in the response.
+
+ This is helpful if you want a chatbot to know the full context of the document.
+ """
+
+ limit: int
+ """Maximum number of results to return"""
+
+ only_matching_chunks: Annotated[bool, PropertyInfo(alias="onlyMatchingChunks")]
+ """If true, only return matching chunks without context.
+
+ Normally, we send the previous and next chunk to provide more context for LLMs.
+ If you only want the matching chunk, set this to true.
+ """
+
+ rerank: bool
+ """If true, rerank the results based on the query.
+
+ This is helpful if you want to ensure the most relevant results are returned.
+ """
+
+ rewrite_query: Annotated[bool, PropertyInfo(alias="rewriteQuery")]
+ """If true, rewrites the query to make it easier to find documents.
+
+ This increases the latency by about 400ms
+ """
+
+
+class FiltersUnionMember0(TypedDict, total=False):
+ and_: Annotated[Iterable[object], PropertyInfo(alias="AND")]
+
+ or_: Annotated[Iterable[object], PropertyInfo(alias="OR")]
+
+
+Filters: TypeAlias = Union[FiltersUnionMember0, Dict[str, object]]
diff --git a/src/supermemory/types/search_documents_response.py b/src/supermemory/types/search_documents_response.py
new file mode 100644
index 00000000..04ce581e
--- /dev/null
+++ b/src/supermemory/types/search_documents_response.py
@@ -0,0 +1,61 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Optional
+from datetime import datetime
+
+from pydantic import Field as FieldInfo
+
+from .._models import BaseModel
+
+__all__ = ["SearchDocumentsResponse", "Result", "ResultChunk"]
+
+
+class ResultChunk(BaseModel):
+ content: str
+ """Content of the matching chunk"""
+
+ is_relevant: bool = FieldInfo(alias="isRelevant")
+ """Whether this chunk is relevant to the query"""
+
+ score: float
+ """Similarity score for this chunk"""
+
+
+class Result(BaseModel):
+ chunks: List[ResultChunk]
+ """Matching content chunks from the document"""
+
+ created_at: datetime = FieldInfo(alias="createdAt")
+ """Document creation date"""
+
+ document_id: str = FieldInfo(alias="documentId")
+ """ID of the matching document"""
+
+ metadata: Optional[Dict[str, object]] = None
+ """Document metadata"""
+
+ score: float
+ """Relevance score of the match"""
+
+ title: Optional[str] = None
+ """Document title"""
+
+ type: Optional[str] = None
+ """Document type"""
+
+ updated_at: datetime = FieldInfo(alias="updatedAt")
+ """Document last update date"""
+
+ content: Optional[str] = None
+ """Full document content (only included when includeFullDocs=true)"""
+
+ summary: Optional[str] = None
+ """Document summary"""
+
+
+class SearchDocumentsResponse(BaseModel):
+ results: List[Result]
+
+ timing: float
+
+ total: float
diff --git a/src/supermemory/types/search_memories_params.py b/src/supermemory/types/search_memories_params.py
new file mode 100644
index 00000000..060184d1
--- /dev/null
+++ b/src/supermemory/types/search_memories_params.py
@@ -0,0 +1,66 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Union, Iterable
+from typing_extensions import Required, Annotated, TypeAlias, TypedDict
+
+from .._utils import PropertyInfo
+
+__all__ = ["SearchMemoriesParams", "Filters", "FiltersUnionMember0", "Include"]
+
+
+class SearchMemoriesParams(TypedDict, total=False):
+ q: Required[str]
+ """Search query string"""
+
+ container_tag: Annotated[str, PropertyInfo(alias="containerTag")]
+ """Optional tag this search should be containerized by.
+
+ This can be an ID for your user, a project ID, or any other identifier you wish
+ to use to filter memories.
+ """
+
+ filters: Filters
+ """Optional filters to apply to the search"""
+
+ include: Include
+
+ limit: int
+ """Maximum number of results to return"""
+
+ rerank: bool
+ """If true, rerank the results based on the query.
+
+ This is helpful if you want to ensure the most relevant results are returned.
+ """
+
+ rewrite_query: Annotated[bool, PropertyInfo(alias="rewriteQuery")]
+ """If true, rewrites the query to make it easier to find documents.
+
+ This increases the latency by about 400ms
+ """
+
+ threshold: float
+ """Threshold / sensitivity for memories selection.
+
+ 0 is least sensitive (returns most memories, more results), 1 is most sensitive
+ (returns lesser memories, accurate results)
+ """
+
+
+class FiltersUnionMember0(TypedDict, total=False):
+ and_: Annotated[Iterable[object], PropertyInfo(alias="AND")]
+
+ or_: Annotated[Iterable[object], PropertyInfo(alias="OR")]
+
+
+Filters: TypeAlias = Union[FiltersUnionMember0, Dict[str, object]]
+
+
+class Include(TypedDict, total=False):
+ documents: bool
+
+ related_memories: Annotated[bool, PropertyInfo(alias="relatedMemories")]
+
+ summaries: bool
diff --git a/src/supermemory/types/search_memories_response.py b/src/supermemory/types/search_memories_response.py
new file mode 100644
index 00000000..9e18a749
--- /dev/null
+++ b/src/supermemory/types/search_memories_response.py
@@ -0,0 +1,121 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from pydantic import Field as FieldInfo
+
+from .._models import BaseModel
+
+__all__ = [
+ "SearchMemoriesResponse",
+ "Result",
+ "ResultContext",
+ "ResultContextChild",
+ "ResultContextParent",
+ "ResultDocument",
+]
+
+
+class ResultContextChild(BaseModel):
+ memory: str
+ """The contextual memory content"""
+
+ relation: Literal["updates", "extends", "derives"]
+ """Relation type between this memory and its parent/child"""
+
+ updated_at: datetime = FieldInfo(alias="updatedAt")
+ """Contextual memory last update date"""
+
+ metadata: Optional[Dict[str, object]] = None
+ """Contextual memory metadata"""
+
+ version: Optional[float] = None
+ """
+ Relative version distance from the primary memory (+1 for direct child, +2 for
+ grand-child, etc.)
+ """
+
+
+class ResultContextParent(BaseModel):
+ memory: str
+ """The contextual memory content"""
+
+ relation: Literal["updates", "extends", "derives"]
+ """Relation type between this memory and its parent/child"""
+
+ updated_at: datetime = FieldInfo(alias="updatedAt")
+ """Contextual memory last update date"""
+
+ metadata: Optional[Dict[str, object]] = None
+ """Contextual memory metadata"""
+
+ version: Optional[float] = None
+ """
+ Relative version distance from the primary memory (-1 for direct parent, -2 for
+ grand-parent, etc.)
+ """
+
+
+class ResultContext(BaseModel):
+ children: Optional[List[ResultContextChild]] = None
+
+ parents: Optional[List[ResultContextParent]] = None
+
+
+class ResultDocument(BaseModel):
+ id: str
+ """Document ID"""
+
+ created_at: datetime = FieldInfo(alias="createdAt")
+ """Document creation date"""
+
+ metadata: Optional[Dict[str, object]] = None
+ """Document metadata"""
+
+ title: str
+ """Document title"""
+
+ type: str
+ """Document type"""
+
+ updated_at: datetime = FieldInfo(alias="updatedAt")
+ """Document last update date"""
+
+
+class Result(BaseModel):
+ id: str
+ """Memory entry ID"""
+
+ memory: str
+ """The memory content"""
+
+ metadata: Optional[Dict[str, object]] = None
+ """Memory metadata"""
+
+ similarity: float
+ """Similarity score between the query and memory entry"""
+
+ updated_at: datetime = FieldInfo(alias="updatedAt")
+ """Memory last update date"""
+
+ context: Optional[ResultContext] = None
+ """Object containing arrays of parent and child contextual memories"""
+
+ documents: Optional[List[ResultDocument]] = None
+ """Associated documents for this memory entry"""
+
+ version: Optional[float] = None
+ """Version number of this memory entry"""
+
+
+class SearchMemoriesResponse(BaseModel):
+ results: List[Result]
+ """Array of matching memory entries with similarity scores"""
+
+ timing: float
+ """Search execution time in milliseconds"""
+
+ total: float
+ """Total number of results returned"""
diff --git a/tests/api_resources/test_memories.py b/tests/api_resources/test_memories.py
index e4229379..f3d88303 100644
--- a/tests/api_resources/test_memories.py
+++ b/tests/api_resources/test_memories.py
@@ -14,6 +14,7 @@
MemoryGetResponse,
MemoryListResponse,
MemoryUpdateResponse,
+ MemoryUploadFileResponse,
)
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -254,6 +255,49 @@ def test_path_params_get(self, client: Supermemory) -> None:
"",
)
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_upload_file(self, client: Supermemory) -> None:
+ memory = client.memories.upload_file(
+ file=b"raw file contents",
+ )
+ assert_matches_type(MemoryUploadFileResponse, memory, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_upload_file_with_all_params(self, client: Supermemory) -> None:
+ memory = client.memories.upload_file(
+ file=b"raw file contents",
+ container_tags="containerTags",
+ )
+ assert_matches_type(MemoryUploadFileResponse, memory, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_upload_file(self, client: Supermemory) -> None:
+ response = client.memories.with_raw_response.upload_file(
+ file=b"raw file contents",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ memory = response.parse()
+ assert_matches_type(MemoryUploadFileResponse, memory, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_upload_file(self, client: Supermemory) -> None:
+ with client.memories.with_streaming_response.upload_file(
+ file=b"raw file contents",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ memory = response.parse()
+ assert_matches_type(MemoryUploadFileResponse, memory, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
class TestAsyncMemories:
parametrize = pytest.mark.parametrize(
@@ -491,3 +535,46 @@ async def test_path_params_get(self, async_client: AsyncSupermemory) -> None:
await async_client.memories.with_raw_response.get(
"",
)
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_upload_file(self, async_client: AsyncSupermemory) -> None:
+ memory = await async_client.memories.upload_file(
+ file=b"raw file contents",
+ )
+ assert_matches_type(MemoryUploadFileResponse, memory, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_upload_file_with_all_params(self, async_client: AsyncSupermemory) -> None:
+ memory = await async_client.memories.upload_file(
+ file=b"raw file contents",
+ container_tags="containerTags",
+ )
+ assert_matches_type(MemoryUploadFileResponse, memory, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_upload_file(self, async_client: AsyncSupermemory) -> None:
+ response = await async_client.memories.with_raw_response.upload_file(
+ file=b"raw file contents",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ memory = await response.parse()
+ assert_matches_type(MemoryUploadFileResponse, memory, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_upload_file(self, async_client: AsyncSupermemory) -> None:
+ async with async_client.memories.with_streaming_response.upload_file(
+ file=b"raw file contents",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ memory = await response.parse()
+ assert_matches_type(MemoryUploadFileResponse, memory, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_search.py b/tests/api_resources/test_search.py
index 1caa6b76..fc79884f 100644
--- a/tests/api_resources/test_search.py
+++ b/tests/api_resources/test_search.py
@@ -9,7 +9,11 @@
from supermemory import Supermemory, AsyncSupermemory
from tests.utils import assert_matches_type
-from supermemory.types import SearchExecuteResponse
+from supermemory.types import (
+ SearchExecuteResponse,
+ SearchMemoriesResponse,
+ SearchDocumentsResponse,
+)
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -17,6 +21,76 @@
class TestSearch:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_documents(self, client: Supermemory) -> None:
+ search = client.search.documents(
+ q="machine learning concepts",
+ )
+ assert_matches_type(SearchDocumentsResponse, search, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_documents_with_all_params(self, client: Supermemory) -> None:
+ search = client.search.documents(
+ q="machine learning concepts",
+ categories_filter=["technology", "science"],
+ chunk_threshold=0.5,
+ container_tags=["user_123", "project_123"],
+ doc_id="doc_xyz789",
+ document_threshold=0.5,
+ filters={
+ "and_": [
+ {
+ "key": "group",
+ "negate": False,
+ "value": "jira_users",
+ },
+ {
+ "filterType": "numeric",
+ "key": "timestamp",
+ "negate": False,
+ "numericOperator": ">",
+ "value": "1742745777",
+ },
+ ],
+ "or_": [{}],
+ },
+ include_full_docs=False,
+ include_summary=False,
+ limit=10,
+ only_matching_chunks=False,
+ rerank=False,
+ rewrite_query=False,
+ )
+ assert_matches_type(SearchDocumentsResponse, search, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_documents(self, client: Supermemory) -> None:
+ response = client.search.with_raw_response.documents(
+ q="machine learning concepts",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ search = response.parse()
+ assert_matches_type(SearchDocumentsResponse, search, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_documents(self, client: Supermemory) -> None:
+ with client.search.with_streaming_response.documents(
+ q="machine learning concepts",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ search = response.parse()
+ assert_matches_type(SearchDocumentsResponse, search, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
def test_method_execute(self, client: Supermemory) -> None:
@@ -87,12 +161,151 @@ def test_streaming_response_execute(self, client: Supermemory) -> None:
assert cast(Any, response.is_closed) is True
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_memories(self, client: Supermemory) -> None:
+ search = client.search.memories(
+ q="machine learning concepts",
+ )
+ assert_matches_type(SearchMemoriesResponse, search, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_memories_with_all_params(self, client: Supermemory) -> None:
+ search = client.search.memories(
+ q="machine learning concepts",
+ container_tag="user_123",
+ filters={
+ "and_": [
+ {
+ "key": "group",
+ "negate": False,
+ "value": "jira_users",
+ },
+ {
+ "filterType": "numeric",
+ "key": "timestamp",
+ "negate": False,
+ "numericOperator": ">",
+ "value": "1742745777",
+ },
+ ],
+ "or_": [{}],
+ },
+ include={
+ "documents": True,
+ "related_memories": True,
+ "summaries": True,
+ },
+ limit=10,
+ rerank=False,
+ rewrite_query=False,
+ threshold=0.5,
+ )
+ assert_matches_type(SearchMemoriesResponse, search, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_memories(self, client: Supermemory) -> None:
+ response = client.search.with_raw_response.memories(
+ q="machine learning concepts",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ search = response.parse()
+ assert_matches_type(SearchMemoriesResponse, search, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_memories(self, client: Supermemory) -> None:
+ with client.search.with_streaming_response.memories(
+ q="machine learning concepts",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ search = response.parse()
+ assert_matches_type(SearchMemoriesResponse, search, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
class TestAsyncSearch:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_documents(self, async_client: AsyncSupermemory) -> None:
+ search = await async_client.search.documents(
+ q="machine learning concepts",
+ )
+ assert_matches_type(SearchDocumentsResponse, search, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_documents_with_all_params(self, async_client: AsyncSupermemory) -> None:
+ search = await async_client.search.documents(
+ q="machine learning concepts",
+ categories_filter=["technology", "science"],
+ chunk_threshold=0.5,
+ container_tags=["user_123", "project_123"],
+ doc_id="doc_xyz789",
+ document_threshold=0.5,
+ filters={
+ "and_": [
+ {
+ "key": "group",
+ "negate": False,
+ "value": "jira_users",
+ },
+ {
+ "filterType": "numeric",
+ "key": "timestamp",
+ "negate": False,
+ "numericOperator": ">",
+ "value": "1742745777",
+ },
+ ],
+ "or_": [{}],
+ },
+ include_full_docs=False,
+ include_summary=False,
+ limit=10,
+ only_matching_chunks=False,
+ rerank=False,
+ rewrite_query=False,
+ )
+ assert_matches_type(SearchDocumentsResponse, search, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_documents(self, async_client: AsyncSupermemory) -> None:
+ response = await async_client.search.with_raw_response.documents(
+ q="machine learning concepts",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ search = await response.parse()
+ assert_matches_type(SearchDocumentsResponse, search, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_documents(self, async_client: AsyncSupermemory) -> None:
+ async with async_client.search.with_streaming_response.documents(
+ q="machine learning concepts",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ search = await response.parse()
+ assert_matches_type(SearchDocumentsResponse, search, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
async def test_method_execute(self, async_client: AsyncSupermemory) -> None:
@@ -162,3 +375,72 @@ async def test_streaming_response_execute(self, async_client: AsyncSupermemory)
assert_matches_type(SearchExecuteResponse, search, path=["response"])
assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_memories(self, async_client: AsyncSupermemory) -> None:
+ search = await async_client.search.memories(
+ q="machine learning concepts",
+ )
+ assert_matches_type(SearchMemoriesResponse, search, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_memories_with_all_params(self, async_client: AsyncSupermemory) -> None:
+ search = await async_client.search.memories(
+ q="machine learning concepts",
+ container_tag="user_123",
+ filters={
+ "and_": [
+ {
+ "key": "group",
+ "negate": False,
+ "value": "jira_users",
+ },
+ {
+ "filterType": "numeric",
+ "key": "timestamp",
+ "negate": False,
+ "numericOperator": ">",
+ "value": "1742745777",
+ },
+ ],
+ "or_": [{}],
+ },
+ include={
+ "documents": True,
+ "related_memories": True,
+ "summaries": True,
+ },
+ limit=10,
+ rerank=False,
+ rewrite_query=False,
+ threshold=0.5,
+ )
+ assert_matches_type(SearchMemoriesResponse, search, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_memories(self, async_client: AsyncSupermemory) -> None:
+ response = await async_client.search.with_raw_response.memories(
+ q="machine learning concepts",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ search = await response.parse()
+ assert_matches_type(SearchMemoriesResponse, search, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_memories(self, async_client: AsyncSupermemory) -> None:
+ async with async_client.search.with_streaming_response.memories(
+ q="machine learning concepts",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ search = await response.parse()
+ assert_matches_type(SearchMemoriesResponse, search, path=["response"])
+
+ assert cast(Any, response.is_closed) is True