diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index a5147c1d..e36acabe 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "3.0.0-alpha.29"
+ ".": "3.0.0-alpha.30"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index a23fff93..cac7cf67 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 18
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/supermemory--inc%2Fsupermemory-new-de994787885a5ec28fb19f069715a257ea4e4f1bcff2b25c4b33e928779c6454.yml
-openapi_spec_hash: 7b831b4614b8d9b8caddcaa096bf3817
+configured_endpoints: 12
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/supermemory--inc%2Fsupermemory-new-f181eaeb22a42d197dbd9c45fa61bf9a9b78a91d3334fc0f841494dc73d1a203.yml
+openapi_spec_hash: bb8262ebcdea53979cf1cafbc2c68dc8
config_hash: 9b9291a6c872b063900a46386729ba3c
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8e61cfb9..87bd87cc 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,26 @@
# Changelog
+## 3.0.0-alpha.30 (2025-09-15)
+
+Full Changelog: [v3.0.0-alpha.29...v3.0.0-alpha.30](https://github.com/supermemoryai/python-sdk/compare/v3.0.0-alpha.29...v3.0.0-alpha.30)
+
+### Features
+
+* **api:** api update ([b7df28e](https://github.com/supermemoryai/python-sdk/commit/b7df28ec025c70d7b8e1544aa1ef0262c0be8a03))
+* **api:** api update ([54cf9c1](https://github.com/supermemoryai/python-sdk/commit/54cf9c13bf3ff378dd6a19a15c9e343e822ab99a))
+* **api:** api update ([4812077](https://github.com/supermemoryai/python-sdk/commit/48120771b2476f2d2863a1614edc222e863ddde4))
+* **api:** api update ([a4f4259](https://github.com/supermemoryai/python-sdk/commit/a4f425943298762bdfb7f3b0421f8d56d2e1473c))
+* **api:** api update ([8412e4d](https://github.com/supermemoryai/python-sdk/commit/8412e4d06b0225fd3707a55b743c401d87b1c0aa))
+* improve future compat with pydantic v3 ([70ea8b7](https://github.com/supermemoryai/python-sdk/commit/70ea8b7206b2e8db3d86f5a1674e7dd2f7a7e67b))
+* **types:** replace List[str] with SequenceNotStr in params ([f4bfda3](https://github.com/supermemoryai/python-sdk/commit/f4bfda34d40ca947eae6a32ea323dafeddf51484))
+
+
+### Chores
+
+* **internal:** add Sequence related utils ([d2b96ed](https://github.com/supermemoryai/python-sdk/commit/d2b96ed43577a3d046ffea7cbc87ba6b877beba7))
+* **internal:** move mypy configurations to `pyproject.toml` file ([31832f5](https://github.com/supermemoryai/python-sdk/commit/31832f5046f7b6384c1bb506680319890e3a5194))
+* **tests:** simplify `get_platform` test ([30d8e46](https://github.com/supermemoryai/python-sdk/commit/30d8e464a5d8ceb5cec41a6197c291962b78b0b5))
+
## 3.0.0-alpha.29 (2025-08-27)
Full Changelog: [v3.0.0-alpha.28...v3.0.0-alpha.29](https://github.com/supermemoryai/python-sdk/compare/v3.0.0-alpha.28...v3.0.0-alpha.29)
diff --git a/README.md b/README.md
index b2851573..05e12990 100644
--- a/README.md
+++ b/README.md
@@ -127,23 +127,6 @@ response = client.search.memories(
print(response.include)
```
-## File uploads
-
-Request parameters that correspond to file uploads can be passed as `bytes`, or a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance or a tuple of `(filename, contents, media type)`.
-
-```python
-from pathlib import Path
-from supermemory import Supermemory
-
-client = Supermemory()
-
-client.memories.upload_file(
- file=Path("/path/to/file"),
-)
-```
-
-The async client uses the exact same interface. If you pass a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance, the file contents will be read asynchronously automatically.
-
## Handling errors
When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `supermemory.APIConnectionError` is raised.
@@ -160,7 +143,9 @@ from supermemory import Supermemory
client = Supermemory()
try:
- client.memories.add()
+ client.search.documents(
+ q="machine learning concepts",
+ )
except supermemory.APIConnectionError as e:
print("The server could not be reached")
print(e.__cause__) # an underlying Exception, likely raised within httpx.
@@ -203,7 +188,9 @@ client = Supermemory(
)
# Or, configure per-request:
-client.with_options(max_retries=5).memories.add()
+client.with_options(max_retries=5).search.documents(
+ q="machine learning concepts",
+)
```
### Timeouts
@@ -226,7 +213,9 @@ client = Supermemory(
)
# Override per-request:
-client.with_options(timeout=5.0).memories.add()
+client.with_options(timeout=5.0).search.documents(
+ q="machine learning concepts",
+)
```
On timeout, an `APITimeoutError` is thrown.
@@ -267,11 +256,13 @@ The "raw" Response object can be accessed by prefixing `.with_raw_response.` to
from supermemory import Supermemory
client = Supermemory()
-response = client.memories.with_raw_response.add()
+response = client.search.with_raw_response.documents(
+ q="machine learning concepts",
+)
print(response.headers.get('X-My-Header'))
-memory = response.parse() # get the object that `memories.add()` would have returned
-print(memory.id)
+search = response.parse() # get the object that `search.documents()` would have returned
+print(search.results)
```
These methods return an [`APIResponse`](https://github.com/supermemoryai/python-sdk/tree/main/src/supermemory/_response.py) object.
@@ -285,7 +276,9 @@ The above interface eagerly reads the full response body when you make the reque
To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods.
```python
-with client.memories.with_streaming_response.add() as response:
+with client.search.with_streaming_response.documents(
+ q="machine learning concepts",
+) as response:
print(response.headers.get("X-My-Header"))
for line in response.iter_lines():
diff --git a/api.md b/api.md
index c2ec94dd..083ba491 100644
--- a/api.md
+++ b/api.md
@@ -1,26 +1,3 @@
-# Memories
-
-Types:
-
-```python
-from supermemory.types import (
- MemoryUpdateResponse,
- MemoryListResponse,
- MemoryAddResponse,
- MemoryGetResponse,
- MemoryUploadFileResponse,
-)
-```
-
-Methods:
-
-- client.memories.update(id, \*\*params) -> MemoryUpdateResponse
-- client.memories.list(\*\*params) -> MemoryListResponse
-- client.memories.delete(id) -> None
-- client.memories.add(\*\*params) -> MemoryAddResponse
-- client.memories.get(id) -> MemoryGetResponse
-- client.memories.upload_file(\*\*params) -> MemoryUploadFileResponse
-
# Search
Types:
diff --git a/mypy.ini b/mypy.ini
deleted file mode 100644
index 08c8f486..00000000
--- a/mypy.ini
+++ /dev/null
@@ -1,50 +0,0 @@
-[mypy]
-pretty = True
-show_error_codes = True
-
-# Exclude _files.py because mypy isn't smart enough to apply
-# the correct type narrowing and as this is an internal module
-# it's fine to just use Pyright.
-#
-# We also exclude our `tests` as mypy doesn't always infer
-# types correctly and Pyright will still catch any type errors.
-exclude = ^(src/supermemory/_files\.py|_dev/.*\.py|tests/.*)$
-
-strict_equality = True
-implicit_reexport = True
-check_untyped_defs = True
-no_implicit_optional = True
-
-warn_return_any = True
-warn_unreachable = True
-warn_unused_configs = True
-
-# Turn these options off as it could cause conflicts
-# with the Pyright options.
-warn_unused_ignores = False
-warn_redundant_casts = False
-
-disallow_any_generics = True
-disallow_untyped_defs = True
-disallow_untyped_calls = True
-disallow_subclassing_any = True
-disallow_incomplete_defs = True
-disallow_untyped_decorators = True
-cache_fine_grained = True
-
-# By default, mypy reports an error if you assign a value to the result
-# of a function call that doesn't return anything. We do this in our test
-# cases:
-# ```
-# result = ...
-# assert result is None
-# ```
-# Changing this codegen to make mypy happy would increase complexity
-# and would not be worth it.
-disable_error_code = func-returns-value,overload-cannot-match
-
-# https://github.com/python/mypy/issues/12162
-[mypy.overrides]
-module = "black.files.*"
-ignore_errors = true
-ignore_missing_imports = true
diff --git a/pyproject.toml b/pyproject.toml
index bd51fb43..b941471b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "supermemory"
-version = "3.0.0-alpha.29"
+version = "3.0.0-alpha.30"
description = "The official Python library for the supermemory API"
dynamic = ["readme"]
license = "Apache-2.0"
@@ -56,7 +56,6 @@ dev-dependencies = [
"dirty-equals>=0.6.0",
"importlib-metadata>=6.7.0",
"rich>=13.7.1",
- "nest_asyncio==1.6.0",
"pytest-xdist>=3.6.1",
]
@@ -157,6 +156,58 @@ reportOverlappingOverload = false
reportImportCycles = false
reportPrivateUsage = false
+[tool.mypy]
+pretty = true
+show_error_codes = true
+
+# Exclude _files.py because mypy isn't smart enough to apply
+# the correct type narrowing and as this is an internal module
+# it's fine to just use Pyright.
+#
+# We also exclude our `tests` as mypy doesn't always infer
+# types correctly and Pyright will still catch any type errors.
+exclude = ['src/supermemory/_files.py', '_dev/.*.py', 'tests/.*']
+
+strict_equality = true
+implicit_reexport = true
+check_untyped_defs = true
+no_implicit_optional = true
+
+warn_return_any = true
+warn_unreachable = true
+warn_unused_configs = true
+
+# Turn these options off as it could cause conflicts
+# with the Pyright options.
+warn_unused_ignores = false
+warn_redundant_casts = false
+
+disallow_any_generics = true
+disallow_untyped_defs = true
+disallow_untyped_calls = true
+disallow_subclassing_any = true
+disallow_incomplete_defs = true
+disallow_untyped_decorators = true
+cache_fine_grained = true
+
+# By default, mypy reports an error if you assign a value to the result
+# of a function call that doesn't return anything. We do this in our test
+# cases:
+# ```
+# result = ...
+# assert result is None
+# ```
+# Changing this codegen to make mypy happy would increase complexity
+# and would not be worth it.
+disable_error_code = "func-returns-value,overload-cannot-match"
+
+# https://github.com/python/mypy/issues/12162
+[[tool.mypy.overrides]]
+module = "black.files.*"
+ignore_errors = true
+ignore_missing_imports = true
+
+
[tool.ruff]
line-length = 120
output-format = "grouped"
diff --git a/requirements-dev.lock b/requirements-dev.lock
index 722cf701..b46f0c64 100644
--- a/requirements-dev.lock
+++ b/requirements-dev.lock
@@ -75,7 +75,6 @@ multidict==6.4.4
mypy==1.14.1
mypy-extensions==1.0.0
# via mypy
-nest-asyncio==1.6.0
nodeenv==1.8.0
# via pyright
nox==2023.4.22
diff --git a/src/supermemory/_base_client.py b/src/supermemory/_base_client.py
index 07c3369e..fbcaf523 100644
--- a/src/supermemory/_base_client.py
+++ b/src/supermemory/_base_client.py
@@ -59,7 +59,7 @@
ModelBuilderProtocol,
)
from ._utils import is_dict, is_list, asyncify, is_given, lru_cache, is_mapping
-from ._compat import PYDANTIC_V2, model_copy, model_dump
+from ._compat import PYDANTIC_V1, model_copy, model_dump
from ._models import GenericModel, FinalRequestOptions, validate_type, construct_type
from ._response import (
APIResponse,
@@ -232,7 +232,7 @@ def _set_private_attributes(
model: Type[_T],
options: FinalRequestOptions,
) -> None:
- if PYDANTIC_V2 and getattr(self, "__pydantic_private__", None) is None:
+ if (not PYDANTIC_V1) and getattr(self, "__pydantic_private__", None) is None:
self.__pydantic_private__ = {}
self._model = model
@@ -320,7 +320,7 @@ def _set_private_attributes(
client: AsyncAPIClient,
options: FinalRequestOptions,
) -> None:
- if PYDANTIC_V2 and getattr(self, "__pydantic_private__", None) is None:
+ if (not PYDANTIC_V1) and getattr(self, "__pydantic_private__", None) is None:
self.__pydantic_private__ = {}
self._model = model
diff --git a/src/supermemory/_client.py b/src/supermemory/_client.py
index 2fdf196b..222d8fa6 100644
--- a/src/supermemory/_client.py
+++ b/src/supermemory/_client.py
@@ -21,7 +21,7 @@
)
from ._utils import is_given, get_async_library
from ._version import __version__
-from .resources import search, memories, settings, connections
+from .resources import search, settings, connections
from ._streaming import Stream as Stream, AsyncStream as AsyncStream
from ._exceptions import APIStatusError, SupermemoryError
from ._base_client import (
@@ -43,7 +43,6 @@
class Supermemory(SyncAPIClient):
- memories: memories.MemoriesResource
search: search.SearchResource
settings: settings.SettingsResource
connections: connections.ConnectionsResource
@@ -104,7 +103,6 @@ def __init__(
_strict_response_validation=_strict_response_validation,
)
- self.memories = memories.MemoriesResource(self)
self.search = search.SearchResource(self)
self.settings = settings.SettingsResource(self)
self.connections = connections.ConnectionsResource(self)
@@ -217,7 +215,6 @@ def _make_status_error(
class AsyncSupermemory(AsyncAPIClient):
- memories: memories.AsyncMemoriesResource
search: search.AsyncSearchResource
settings: settings.AsyncSettingsResource
connections: connections.AsyncConnectionsResource
@@ -278,7 +275,6 @@ def __init__(
_strict_response_validation=_strict_response_validation,
)
- self.memories = memories.AsyncMemoriesResource(self)
self.search = search.AsyncSearchResource(self)
self.settings = settings.AsyncSettingsResource(self)
self.connections = connections.AsyncConnectionsResource(self)
@@ -392,7 +388,6 @@ def _make_status_error(
class SupermemoryWithRawResponse:
def __init__(self, client: Supermemory) -> None:
- self.memories = memories.MemoriesResourceWithRawResponse(client.memories)
self.search = search.SearchResourceWithRawResponse(client.search)
self.settings = settings.SettingsResourceWithRawResponse(client.settings)
self.connections = connections.ConnectionsResourceWithRawResponse(client.connections)
@@ -400,7 +395,6 @@ def __init__(self, client: Supermemory) -> None:
class AsyncSupermemoryWithRawResponse:
def __init__(self, client: AsyncSupermemory) -> None:
- self.memories = memories.AsyncMemoriesResourceWithRawResponse(client.memories)
self.search = search.AsyncSearchResourceWithRawResponse(client.search)
self.settings = settings.AsyncSettingsResourceWithRawResponse(client.settings)
self.connections = connections.AsyncConnectionsResourceWithRawResponse(client.connections)
@@ -408,7 +402,6 @@ def __init__(self, client: AsyncSupermemory) -> None:
class SupermemoryWithStreamedResponse:
def __init__(self, client: Supermemory) -> None:
- self.memories = memories.MemoriesResourceWithStreamingResponse(client.memories)
self.search = search.SearchResourceWithStreamingResponse(client.search)
self.settings = settings.SettingsResourceWithStreamingResponse(client.settings)
self.connections = connections.ConnectionsResourceWithStreamingResponse(client.connections)
@@ -416,7 +409,6 @@ def __init__(self, client: Supermemory) -> None:
class AsyncSupermemoryWithStreamedResponse:
def __init__(self, client: AsyncSupermemory) -> None:
- self.memories = memories.AsyncMemoriesResourceWithStreamingResponse(client.memories)
self.search = search.AsyncSearchResourceWithStreamingResponse(client.search)
self.settings = settings.AsyncSettingsResourceWithStreamingResponse(client.settings)
self.connections = connections.AsyncConnectionsResourceWithStreamingResponse(client.connections)
diff --git a/src/supermemory/_compat.py b/src/supermemory/_compat.py
index 92d9ee61..bdef67f0 100644
--- a/src/supermemory/_compat.py
+++ b/src/supermemory/_compat.py
@@ -12,14 +12,13 @@
_T = TypeVar("_T")
_ModelT = TypeVar("_ModelT", bound=pydantic.BaseModel)
-# --------------- Pydantic v2 compatibility ---------------
+# --------------- Pydantic v2, v3 compatibility ---------------
# Pyright incorrectly reports some of our functions as overriding a method when they don't
# pyright: reportIncompatibleMethodOverride=false
-PYDANTIC_V2 = pydantic.VERSION.startswith("2.")
+PYDANTIC_V1 = pydantic.VERSION.startswith("1.")
-# v1 re-exports
if TYPE_CHECKING:
def parse_date(value: date | StrBytesIntFloat) -> date: # noqa: ARG001
@@ -44,90 +43,92 @@ def is_typeddict(type_: type[Any]) -> bool: # noqa: ARG001
...
else:
- if PYDANTIC_V2:
- from pydantic.v1.typing import (
+ # v1 re-exports
+ if PYDANTIC_V1:
+ from pydantic.typing import (
get_args as get_args,
is_union as is_union,
get_origin as get_origin,
is_typeddict as is_typeddict,
is_literal_type as is_literal_type,
)
- from pydantic.v1.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime
+ from pydantic.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime
else:
- from pydantic.typing import (
+ from ._utils import (
get_args as get_args,
is_union as is_union,
get_origin as get_origin,
+ parse_date as parse_date,
is_typeddict as is_typeddict,
+ parse_datetime as parse_datetime,
is_literal_type as is_literal_type,
)
- from pydantic.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime
# refactored config
if TYPE_CHECKING:
from pydantic import ConfigDict as ConfigDict
else:
- if PYDANTIC_V2:
- from pydantic import ConfigDict
- else:
+ if PYDANTIC_V1:
# TODO: provide an error message here?
ConfigDict = None
+ else:
+ from pydantic import ConfigDict as ConfigDict
# renamed methods / properties
def parse_obj(model: type[_ModelT], value: object) -> _ModelT:
- if PYDANTIC_V2:
- return model.model_validate(value)
- else:
+ if PYDANTIC_V1:
return cast(_ModelT, model.parse_obj(value)) # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
+ else:
+ return model.model_validate(value)
def field_is_required(field: FieldInfo) -> bool:
- if PYDANTIC_V2:
- return field.is_required()
- return field.required # type: ignore
+ if PYDANTIC_V1:
+ return field.required # type: ignore
+ return field.is_required()
def field_get_default(field: FieldInfo) -> Any:
value = field.get_default()
- if PYDANTIC_V2:
- from pydantic_core import PydanticUndefined
-
- if value == PydanticUndefined:
- return None
+ if PYDANTIC_V1:
return value
+ from pydantic_core import PydanticUndefined
+
+ if value == PydanticUndefined:
+ return None
return value
def field_outer_type(field: FieldInfo) -> Any:
- if PYDANTIC_V2:
- return field.annotation
- return field.outer_type_ # type: ignore
+ if PYDANTIC_V1:
+ return field.outer_type_ # type: ignore
+ return field.annotation
def get_model_config(model: type[pydantic.BaseModel]) -> Any:
- if PYDANTIC_V2:
- return model.model_config
- return model.__config__ # type: ignore
+ if PYDANTIC_V1:
+ return model.__config__ # type: ignore
+ return model.model_config
def get_model_fields(model: type[pydantic.BaseModel]) -> dict[str, FieldInfo]:
- if PYDANTIC_V2:
- return model.model_fields
- return model.__fields__ # type: ignore
+ if PYDANTIC_V1:
+ return model.__fields__ # type: ignore
+ return model.model_fields
def model_copy(model: _ModelT, *, deep: bool = False) -> _ModelT:
- if PYDANTIC_V2:
- return model.model_copy(deep=deep)
- return model.copy(deep=deep) # type: ignore
+ if PYDANTIC_V1:
+ return model.copy(deep=deep) # type: ignore
+ return model.model_copy(deep=deep)
def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str:
- if PYDANTIC_V2:
- return model.model_dump_json(indent=indent)
- return model.json(indent=indent) # type: ignore
+ if PYDANTIC_V1:
+ return model.json(indent=indent) # type: ignore
+ return model.model_dump_json(indent=indent)
def model_dump(
@@ -139,14 +140,14 @@ def model_dump(
warnings: bool = True,
mode: Literal["json", "python"] = "python",
) -> dict[str, Any]:
- if PYDANTIC_V2 or hasattr(model, "model_dump"):
+ if (not PYDANTIC_V1) or hasattr(model, "model_dump"):
return model.model_dump(
mode=mode,
exclude=exclude,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
# warnings are not supported in Pydantic v1
- warnings=warnings if PYDANTIC_V2 else True,
+ warnings=True if PYDANTIC_V1 else warnings,
)
return cast(
"dict[str, Any]",
@@ -159,9 +160,9 @@ def model_dump(
def model_parse(model: type[_ModelT], data: Any) -> _ModelT:
- if PYDANTIC_V2:
- return model.model_validate(data)
- return model.parse_obj(data) # pyright: ignore[reportDeprecated]
+ if PYDANTIC_V1:
+ return model.parse_obj(data) # pyright: ignore[reportDeprecated]
+ return model.model_validate(data)
# generic models
@@ -170,17 +171,16 @@ def model_parse(model: type[_ModelT], data: Any) -> _ModelT:
class GenericModel(pydantic.BaseModel): ...
else:
- if PYDANTIC_V2:
+ if PYDANTIC_V1:
+ import pydantic.generics
+
+ class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): ...
+ else:
# there no longer needs to be a distinction in v2 but
# we still have to create our own subclass to avoid
# inconsistent MRO ordering errors
class GenericModel(pydantic.BaseModel): ...
- else:
- import pydantic.generics
-
- class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): ...
-
# cached properties
if TYPE_CHECKING:
diff --git a/src/supermemory/_files.py b/src/supermemory/_files.py
index ae7c4650..cc14c14f 100644
--- a/src/supermemory/_files.py
+++ b/src/supermemory/_files.py
@@ -34,7 +34,7 @@ def assert_is_file_content(obj: object, *, key: str | None = None) -> None:
if not is_file_content(obj):
prefix = f"Expected entry at `{key}`" if key is not None else f"Expected file input `{obj!r}`"
raise RuntimeError(
- f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead. See https://github.com/supermemoryai/python-sdk/tree/main#file-uploads"
+ f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead."
) from None
diff --git a/src/supermemory/_models.py b/src/supermemory/_models.py
index 92f7c10b..3a6017ef 100644
--- a/src/supermemory/_models.py
+++ b/src/supermemory/_models.py
@@ -50,7 +50,7 @@
strip_annotated_type,
)
from ._compat import (
- PYDANTIC_V2,
+ PYDANTIC_V1,
ConfigDict,
GenericModel as BaseGenericModel,
get_args,
@@ -81,11 +81,7 @@ class _ConfigProtocol(Protocol):
class BaseModel(pydantic.BaseModel):
- if PYDANTIC_V2:
- model_config: ClassVar[ConfigDict] = ConfigDict(
- extra="allow", defer_build=coerce_boolean(os.environ.get("DEFER_PYDANTIC_BUILD", "true"))
- )
- else:
+ if PYDANTIC_V1:
@property
@override
@@ -95,6 +91,10 @@ def model_fields_set(self) -> set[str]:
class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated]
extra: Any = pydantic.Extra.allow # type: ignore
+ else:
+ model_config: ClassVar[ConfigDict] = ConfigDict(
+ extra="allow", defer_build=coerce_boolean(os.environ.get("DEFER_PYDANTIC_BUILD", "true"))
+ )
def to_dict(
self,
@@ -215,25 +215,25 @@ def construct( # pyright: ignore[reportIncompatibleMethodOverride]
if key not in model_fields:
parsed = construct_type(value=value, type_=extra_field_type) if extra_field_type is not None else value
- if PYDANTIC_V2:
- _extra[key] = parsed
- else:
+ if PYDANTIC_V1:
_fields_set.add(key)
fields_values[key] = parsed
+ else:
+ _extra[key] = parsed
object.__setattr__(m, "__dict__", fields_values)
- if PYDANTIC_V2:
- # these properties are copied from Pydantic's `model_construct()` method
- object.__setattr__(m, "__pydantic_private__", None)
- object.__setattr__(m, "__pydantic_extra__", _extra)
- object.__setattr__(m, "__pydantic_fields_set__", _fields_set)
- else:
+ if PYDANTIC_V1:
# init_private_attributes() does not exist in v2
m._init_private_attributes() # type: ignore
# copied from Pydantic v1's `construct()` method
object.__setattr__(m, "__fields_set__", _fields_set)
+ else:
+ # these properties are copied from Pydantic's `model_construct()` method
+ object.__setattr__(m, "__pydantic_private__", None)
+ object.__setattr__(m, "__pydantic_extra__", _extra)
+ object.__setattr__(m, "__pydantic_fields_set__", _fields_set)
return m
@@ -243,7 +243,7 @@ def construct( # pyright: ignore[reportIncompatibleMethodOverride]
# although not in practice
model_construct = construct
- if not PYDANTIC_V2:
+ if PYDANTIC_V1:
# we define aliases for some of the new pydantic v2 methods so
# that we can just document these methods without having to specify
# a specific pydantic version as some users may not know which
@@ -363,10 +363,10 @@ def _construct_field(value: object, field: FieldInfo, key: str) -> object:
if value is None:
return field_get_default(field)
- if PYDANTIC_V2:
- type_ = field.annotation
- else:
+ if PYDANTIC_V1:
type_ = cast(type, field.outer_type_) # type: ignore
+ else:
+ type_ = field.annotation # type: ignore
if type_ is None:
raise RuntimeError(f"Unexpected field type is None for {key}")
@@ -375,7 +375,7 @@ def _construct_field(value: object, field: FieldInfo, key: str) -> object:
def _get_extra_fields_type(cls: type[pydantic.BaseModel]) -> type | None:
- if not PYDANTIC_V2:
+ if PYDANTIC_V1:
# TODO
return None
@@ -628,30 +628,30 @@ def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any,
for variant in get_args(union):
variant = strip_annotated_type(variant)
if is_basemodel_type(variant):
- if PYDANTIC_V2:
- field = _extract_field_schema_pv2(variant, discriminator_field_name)
- if not field:
+ if PYDANTIC_V1:
+ field_info = cast("dict[str, FieldInfo]", variant.__fields__).get(discriminator_field_name) # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
+ if not field_info:
continue
# Note: if one variant defines an alias then they all should
- discriminator_alias = field.get("serialization_alias")
-
- field_schema = field["schema"]
+ discriminator_alias = field_info.alias
- if field_schema["type"] == "literal":
- for entry in cast("LiteralSchema", field_schema)["expected"]:
+ if (annotation := getattr(field_info, "annotation", None)) and is_literal_type(annotation):
+ for entry in get_args(annotation):
if isinstance(entry, str):
mapping[entry] = variant
else:
- field_info = cast("dict[str, FieldInfo]", variant.__fields__).get(discriminator_field_name) # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
- if not field_info:
+ field = _extract_field_schema_pv2(variant, discriminator_field_name)
+ if not field:
continue
# Note: if one variant defines an alias then they all should
- discriminator_alias = field_info.alias
+ discriminator_alias = field.get("serialization_alias")
- if (annotation := getattr(field_info, "annotation", None)) and is_literal_type(annotation):
- for entry in get_args(annotation):
+ field_schema = field["schema"]
+
+ if field_schema["type"] == "literal":
+ for entry in cast("LiteralSchema", field_schema)["expected"]:
if isinstance(entry, str):
mapping[entry] = variant
@@ -714,7 +714,7 @@ class GenericModel(BaseGenericModel, BaseModel):
pass
-if PYDANTIC_V2:
+if not PYDANTIC_V1:
from pydantic import TypeAdapter as _TypeAdapter
_CachedTypeAdapter = cast("TypeAdapter[object]", lru_cache(maxsize=None)(_TypeAdapter))
@@ -782,12 +782,12 @@ class FinalRequestOptions(pydantic.BaseModel):
json_data: Union[Body, None] = None
extra_json: Union[AnyMapping, None] = None
- if PYDANTIC_V2:
- model_config: ClassVar[ConfigDict] = ConfigDict(arbitrary_types_allowed=True)
- else:
+ if PYDANTIC_V1:
class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated]
arbitrary_types_allowed: bool = True
+ else:
+ model_config: ClassVar[ConfigDict] = ConfigDict(arbitrary_types_allowed=True)
def get_max_retries(self, max_retries: int) -> int:
if isinstance(self.max_retries, NotGiven):
@@ -820,9 +820,9 @@ def construct( # type: ignore
key: strip_not_given(value)
for key, value in values.items()
}
- if PYDANTIC_V2:
- return super().model_construct(_fields_set, **kwargs)
- return cast(FinalRequestOptions, super().construct(_fields_set, **kwargs)) # pyright: ignore[reportDeprecated]
+ if PYDANTIC_V1:
+ return cast(FinalRequestOptions, super().construct(_fields_set, **kwargs)) # pyright: ignore[reportDeprecated]
+ return super().model_construct(_fields_set, **kwargs)
if not TYPE_CHECKING:
# type checkers incorrectly complain about this assignment
diff --git a/src/supermemory/_types.py b/src/supermemory/_types.py
index 8491d513..24417563 100644
--- a/src/supermemory/_types.py
+++ b/src/supermemory/_types.py
@@ -13,10 +13,21 @@
Mapping,
TypeVar,
Callable,
+ Iterator,
Optional,
Sequence,
)
-from typing_extensions import Set, Literal, Protocol, TypeAlias, TypedDict, override, runtime_checkable
+from typing_extensions import (
+ Set,
+ Literal,
+ Protocol,
+ TypeAlias,
+ TypedDict,
+ SupportsIndex,
+ overload,
+ override,
+ runtime_checkable,
+)
import httpx
import pydantic
@@ -217,3 +228,26 @@ class _GenericAlias(Protocol):
class HttpxSendArgs(TypedDict, total=False):
auth: httpx.Auth
follow_redirects: bool
+
+
+_T_co = TypeVar("_T_co", covariant=True)
+
+
+if TYPE_CHECKING:
+ # This works because str.__contains__ does not accept object (either in typeshed or at runtime)
+ # https://github.com/hauntsaninja/useful_types/blob/5e9710f3875107d068e7679fd7fec9cfab0eff3b/useful_types/__init__.py#L285
+ class SequenceNotStr(Protocol[_T_co]):
+ @overload
+ def __getitem__(self, index: SupportsIndex, /) -> _T_co: ...
+ @overload
+ def __getitem__(self, index: slice, /) -> Sequence[_T_co]: ...
+ def __contains__(self, value: object, /) -> bool: ...
+ def __len__(self) -> int: ...
+ def __iter__(self) -> Iterator[_T_co]: ...
+ def index(self, value: Any, start: int = 0, stop: int = ..., /) -> int: ...
+ def count(self, value: Any, /) -> int: ...
+ def __reversed__(self) -> Iterator[_T_co]: ...
+else:
+ # just point this to a normal `Sequence` at runtime to avoid having to special case
+ # deserializing our custom sequence type
+ SequenceNotStr = Sequence
diff --git a/src/supermemory/_utils/__init__.py b/src/supermemory/_utils/__init__.py
index d4fda26f..dc64e29a 100644
--- a/src/supermemory/_utils/__init__.py
+++ b/src/supermemory/_utils/__init__.py
@@ -10,7 +10,6 @@
lru_cache as lru_cache,
is_mapping as is_mapping,
is_tuple_t as is_tuple_t,
- parse_date as parse_date,
is_iterable as is_iterable,
is_sequence as is_sequence,
coerce_float as coerce_float,
@@ -23,7 +22,6 @@
coerce_boolean as coerce_boolean,
coerce_integer as coerce_integer,
file_from_path as file_from_path,
- parse_datetime as parse_datetime,
strip_not_given as strip_not_given,
deepcopy_minimal as deepcopy_minimal,
get_async_library as get_async_library,
@@ -32,12 +30,20 @@
maybe_coerce_boolean as maybe_coerce_boolean,
maybe_coerce_integer as maybe_coerce_integer,
)
+from ._compat import (
+ get_args as get_args,
+ is_union as is_union,
+ get_origin as get_origin,
+ is_typeddict as is_typeddict,
+ is_literal_type as is_literal_type,
+)
from ._typing import (
is_list_type as is_list_type,
is_union_type as is_union_type,
extract_type_arg as extract_type_arg,
is_iterable_type as is_iterable_type,
is_required_type as is_required_type,
+ is_sequence_type as is_sequence_type,
is_annotated_type as is_annotated_type,
is_type_alias_type as is_type_alias_type,
strip_annotated_type as strip_annotated_type,
@@ -55,3 +61,4 @@
function_has_argument as function_has_argument,
assert_signatures_in_sync as assert_signatures_in_sync,
)
+from ._datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime
diff --git a/src/supermemory/_utils/_compat.py b/src/supermemory/_utils/_compat.py
new file mode 100644
index 00000000..dd703233
--- /dev/null
+++ b/src/supermemory/_utils/_compat.py
@@ -0,0 +1,45 @@
+from __future__ import annotations
+
+import sys
+import typing_extensions
+from typing import Any, Type, Union, Literal, Optional
+from datetime import date, datetime
+from typing_extensions import get_args as _get_args, get_origin as _get_origin
+
+from .._types import StrBytesIntFloat
+from ._datetime_parse import parse_date as _parse_date, parse_datetime as _parse_datetime
+
+_LITERAL_TYPES = {Literal, typing_extensions.Literal}
+
+
+def get_args(tp: type[Any]) -> tuple[Any, ...]:
+ return _get_args(tp)
+
+
+def get_origin(tp: type[Any]) -> type[Any] | None:
+ return _get_origin(tp)
+
+
+def is_union(tp: Optional[Type[Any]]) -> bool:
+ if sys.version_info < (3, 10):
+ return tp is Union # type: ignore[comparison-overlap]
+ else:
+ import types
+
+ return tp is Union or tp is types.UnionType
+
+
+def is_typeddict(tp: Type[Any]) -> bool:
+ return typing_extensions.is_typeddict(tp)
+
+
+def is_literal_type(tp: Type[Any]) -> bool:
+ return get_origin(tp) in _LITERAL_TYPES
+
+
+def parse_date(value: Union[date, StrBytesIntFloat]) -> date:
+ return _parse_date(value)
+
+
+def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime:
+ return _parse_datetime(value)
diff --git a/src/supermemory/_utils/_datetime_parse.py b/src/supermemory/_utils/_datetime_parse.py
new file mode 100644
index 00000000..7cb9d9e6
--- /dev/null
+++ b/src/supermemory/_utils/_datetime_parse.py
@@ -0,0 +1,136 @@
+"""
+This file contains code from https://github.com/pydantic/pydantic/blob/main/pydantic/v1/datetime_parse.py
+without the Pydantic v1 specific errors.
+"""
+
+from __future__ import annotations
+
+import re
+from typing import Dict, Union, Optional
+from datetime import date, datetime, timezone, timedelta
+
+from .._types import StrBytesIntFloat
+
+date_expr = r"(?P\d{4})-(?P\d{1,2})-(?P\d{1,2})"
+time_expr = (
+ r"(?P\d{1,2}):(?P\d{1,2})"
+ r"(?::(?P\d{1,2})(?:\.(?P\d{1,6})\d{0,6})?)?"
+ r"(?PZ|[+-]\d{2}(?::?\d{2})?)?$"
+)
+
+date_re = re.compile(f"{date_expr}$")
+datetime_re = re.compile(f"{date_expr}[T ]{time_expr}")
+
+
+EPOCH = datetime(1970, 1, 1)
+# if greater than this, the number is in ms, if less than or equal it's in seconds
+# (in seconds this is 11th October 2603, in ms it's 20th August 1970)
+MS_WATERSHED = int(2e10)
+# slightly more than datetime.max in ns - (datetime.max - EPOCH).total_seconds() * 1e9
+MAX_NUMBER = int(3e20)
+
+
+def _get_numeric(value: StrBytesIntFloat, native_expected_type: str) -> Union[None, int, float]:
+ if isinstance(value, (int, float)):
+ return value
+ try:
+ return float(value)
+ except ValueError:
+ return None
+ except TypeError:
+ raise TypeError(f"invalid type; expected {native_expected_type}, string, bytes, int or float") from None
+
+
+def _from_unix_seconds(seconds: Union[int, float]) -> datetime:
+ if seconds > MAX_NUMBER:
+ return datetime.max
+ elif seconds < -MAX_NUMBER:
+ return datetime.min
+
+ while abs(seconds) > MS_WATERSHED:
+ seconds /= 1000
+ dt = EPOCH + timedelta(seconds=seconds)
+ return dt.replace(tzinfo=timezone.utc)
+
+
+def _parse_timezone(value: Optional[str]) -> Union[None, int, timezone]:
+ if value == "Z":
+ return timezone.utc
+ elif value is not None:
+ offset_mins = int(value[-2:]) if len(value) > 3 else 0
+ offset = 60 * int(value[1:3]) + offset_mins
+ if value[0] == "-":
+ offset = -offset
+ return timezone(timedelta(minutes=offset))
+ else:
+ return None
+
+
+def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime:
+ """
+ Parse a datetime/int/float/string and return a datetime.datetime.
+
+ This function supports time zone offsets. When the input contains one,
+ the output uses a timezone with a fixed offset from UTC.
+
+ Raise ValueError if the input is well formatted but not a valid datetime.
+ Raise ValueError if the input isn't well formatted.
+ """
+ if isinstance(value, datetime):
+ return value
+
+ number = _get_numeric(value, "datetime")
+ if number is not None:
+ return _from_unix_seconds(number)
+
+ if isinstance(value, bytes):
+ value = value.decode()
+
+ assert not isinstance(value, (float, int))
+
+ match = datetime_re.match(value)
+ if match is None:
+ raise ValueError("invalid datetime format")
+
+ kw = match.groupdict()
+ if kw["microsecond"]:
+ kw["microsecond"] = kw["microsecond"].ljust(6, "0")
+
+ tzinfo = _parse_timezone(kw.pop("tzinfo"))
+ kw_: Dict[str, Union[None, int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None}
+ kw_["tzinfo"] = tzinfo
+
+ return datetime(**kw_) # type: ignore
+
+
+def parse_date(value: Union[date, StrBytesIntFloat]) -> date:
+ """
+ Parse a date/int/float/string and return a datetime.date.
+
+ Raise ValueError if the input is well formatted but not a valid date.
+ Raise ValueError if the input isn't well formatted.
+ """
+ if isinstance(value, date):
+ if isinstance(value, datetime):
+ return value.date()
+ else:
+ return value
+
+ number = _get_numeric(value, "date")
+ if number is not None:
+ return _from_unix_seconds(number).date()
+
+ if isinstance(value, bytes):
+ value = value.decode()
+
+ assert not isinstance(value, (float, int))
+ match = date_re.match(value)
+ if match is None:
+ raise ValueError("invalid date format")
+
+ kw = {k: int(v) for k, v in match.groupdict().items()}
+
+ try:
+ return date(**kw)
+ except ValueError:
+ raise ValueError("invalid date format") from None
diff --git a/src/supermemory/_utils/_transform.py b/src/supermemory/_utils/_transform.py
index b0cc20a7..c19124f0 100644
--- a/src/supermemory/_utils/_transform.py
+++ b/src/supermemory/_utils/_transform.py
@@ -16,18 +16,20 @@
lru_cache,
is_mapping,
is_iterable,
+ is_sequence,
)
from .._files import is_base64_file_input
+from ._compat import get_origin, is_typeddict
from ._typing import (
is_list_type,
is_union_type,
extract_type_arg,
is_iterable_type,
is_required_type,
+ is_sequence_type,
is_annotated_type,
strip_annotated_type,
)
-from .._compat import get_origin, model_dump, is_typeddict
_T = TypeVar("_T")
@@ -167,6 +169,8 @@ def _transform_recursive(
Defaults to the same value as the `annotation` argument.
"""
+ from .._compat import model_dump
+
if inner_type is None:
inner_type = annotation
@@ -184,6 +188,8 @@ def _transform_recursive(
(is_list_type(stripped_type) and is_list(data))
# Iterable[T]
or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str))
+ # Sequence[T]
+ or (is_sequence_type(stripped_type) and is_sequence(data) and not isinstance(data, str))
):
# dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually
# intended as an iterable, so we don't transform it.
@@ -329,6 +335,8 @@ async def _async_transform_recursive(
Defaults to the same value as the `annotation` argument.
"""
+ from .._compat import model_dump
+
if inner_type is None:
inner_type = annotation
@@ -346,6 +354,8 @@ async def _async_transform_recursive(
(is_list_type(stripped_type) and is_list(data))
# Iterable[T]
or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str))
+ # Sequence[T]
+ or (is_sequence_type(stripped_type) and is_sequence(data) and not isinstance(data, str))
):
# dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually
# intended as an iterable, so we don't transform it.
diff --git a/src/supermemory/_utils/_typing.py b/src/supermemory/_utils/_typing.py
index 1bac9542..193109f3 100644
--- a/src/supermemory/_utils/_typing.py
+++ b/src/supermemory/_utils/_typing.py
@@ -15,7 +15,7 @@
from ._utils import lru_cache
from .._types import InheritsGeneric
-from .._compat import is_union as _is_union
+from ._compat import is_union as _is_union
def is_annotated_type(typ: type) -> bool:
@@ -26,6 +26,11 @@ def is_list_type(typ: type) -> bool:
return (get_origin(typ) or typ) == list
+def is_sequence_type(typ: type) -> bool:
+ origin = get_origin(typ) or typ
+ return origin == typing_extensions.Sequence or origin == typing.Sequence or origin == _c_abc.Sequence
+
+
def is_iterable_type(typ: type) -> bool:
"""If the given type is `typing.Iterable[T]`"""
origin = get_origin(typ) or typ
diff --git a/src/supermemory/_utils/_utils.py b/src/supermemory/_utils/_utils.py
index ea3cf3f2..f0818595 100644
--- a/src/supermemory/_utils/_utils.py
+++ b/src/supermemory/_utils/_utils.py
@@ -22,7 +22,6 @@
import sniffio
from .._types import NotGiven, FileTypes, NotGivenOr, HeadersLike
-from .._compat import parse_date as parse_date, parse_datetime as parse_datetime
_T = TypeVar("_T")
_TupleT = TypeVar("_TupleT", bound=Tuple[object, ...])
diff --git a/src/supermemory/_version.py b/src/supermemory/_version.py
index 83fc1e35..0e7aa74e 100644
--- a/src/supermemory/_version.py
+++ b/src/supermemory/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "supermemory"
-__version__ = "3.0.0-alpha.29" # x-release-please-version
+__version__ = "3.0.0-alpha.30" # x-release-please-version
diff --git a/src/supermemory/resources/__init__.py b/src/supermemory/resources/__init__.py
index 275ecfbe..de24910f 100644
--- a/src/supermemory/resources/__init__.py
+++ b/src/supermemory/resources/__init__.py
@@ -8,14 +8,6 @@
SearchResourceWithStreamingResponse,
AsyncSearchResourceWithStreamingResponse,
)
-from .memories import (
- MemoriesResource,
- AsyncMemoriesResource,
- MemoriesResourceWithRawResponse,
- AsyncMemoriesResourceWithRawResponse,
- MemoriesResourceWithStreamingResponse,
- AsyncMemoriesResourceWithStreamingResponse,
-)
from .settings import (
SettingsResource,
AsyncSettingsResource,
@@ -34,12 +26,6 @@
)
__all__ = [
- "MemoriesResource",
- "AsyncMemoriesResource",
- "MemoriesResourceWithRawResponse",
- "AsyncMemoriesResourceWithRawResponse",
- "MemoriesResourceWithStreamingResponse",
- "AsyncMemoriesResourceWithStreamingResponse",
"SearchResource",
"AsyncSearchResource",
"SearchResourceWithRawResponse",
diff --git a/src/supermemory/resources/connections.py b/src/supermemory/resources/connections.py
index af94f406..dfd73a81 100644
--- a/src/supermemory/resources/connections.py
+++ b/src/supermemory/resources/connections.py
@@ -2,7 +2,7 @@
from __future__ import annotations
-from typing import Dict, List, Union, Optional
+from typing import Dict, Union, Optional
from typing_extensions import Literal
import httpx
@@ -15,7 +15,7 @@
connection_list_documents_params,
connection_delete_by_provider_params,
)
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr
from .._utils import maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
@@ -61,7 +61,7 @@ def create(
self,
provider: Literal["notion", "google-drive", "onedrive"],
*,
- container_tags: List[str] | NotGiven = NOT_GIVEN,
+ container_tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN,
document_limit: int | NotGiven = NOT_GIVEN,
metadata: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN,
redirect_url: str | NotGiven = NOT_GIVEN,
@@ -106,7 +106,7 @@ def create(
def list(
self,
*,
- container_tags: List[str] | NotGiven = NOT_GIVEN,
+ container_tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -174,7 +174,7 @@ def delete_by_provider(
self,
provider: Literal["notion", "google-drive", "onedrive"],
*,
- container_tags: List[str],
+ container_tags: SequenceNotStr[str],
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -247,7 +247,7 @@ def get_by_tags(
self,
provider: Literal["notion", "google-drive", "onedrive"],
*,
- container_tags: List[str],
+ container_tags: SequenceNotStr[str],
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -286,7 +286,7 @@ def import_(
self,
provider: Literal["notion", "google-drive", "onedrive"],
*,
- container_tags: List[str] | NotGiven = NOT_GIVEN,
+ container_tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -324,7 +324,7 @@ def list_documents(
self,
provider: Literal["notion", "google-drive", "onedrive"],
*,
- container_tags: List[str] | NotGiven = NOT_GIVEN,
+ container_tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -384,7 +384,7 @@ async def create(
self,
provider: Literal["notion", "google-drive", "onedrive"],
*,
- container_tags: List[str] | NotGiven = NOT_GIVEN,
+ container_tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN,
document_limit: int | NotGiven = NOT_GIVEN,
metadata: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN,
redirect_url: str | NotGiven = NOT_GIVEN,
@@ -429,7 +429,7 @@ async def create(
async def list(
self,
*,
- container_tags: List[str] | NotGiven = NOT_GIVEN,
+ container_tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -499,7 +499,7 @@ async def delete_by_provider(
self,
provider: Literal["notion", "google-drive", "onedrive"],
*,
- container_tags: List[str],
+ container_tags: SequenceNotStr[str],
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -572,7 +572,7 @@ async def get_by_tags(
self,
provider: Literal["notion", "google-drive", "onedrive"],
*,
- container_tags: List[str],
+ container_tags: SequenceNotStr[str],
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -611,7 +611,7 @@ async def import_(
self,
provider: Literal["notion", "google-drive", "onedrive"],
*,
- container_tags: List[str] | NotGiven = NOT_GIVEN,
+ container_tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -651,7 +651,7 @@ async def list_documents(
self,
provider: Literal["notion", "google-drive", "onedrive"],
*,
- container_tags: List[str] | NotGiven = NOT_GIVEN,
+ container_tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
diff --git a/src/supermemory/resources/memories.py b/src/supermemory/resources/memories.py
deleted file mode 100644
index 2f07229c..00000000
--- a/src/supermemory/resources/memories.py
+++ /dev/null
@@ -1,806 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Mapping, cast
-from typing_extensions import Literal
-
-import httpx
-
-from ..types import memory_add_params, memory_list_params, memory_update_params, memory_upload_file_params
-from .._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven, FileTypes
-from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .._base_client import make_request_options
-from ..types.memory_add_response import MemoryAddResponse
-from ..types.memory_get_response import MemoryGetResponse
-from ..types.memory_list_response import MemoryListResponse
-from ..types.memory_update_response import MemoryUpdateResponse
-from ..types.memory_upload_file_response import MemoryUploadFileResponse
-
-__all__ = ["MemoriesResource", "AsyncMemoriesResource"]
-
-
-class MemoriesResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> MemoriesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/supermemoryai/python-sdk#accessing-raw-response-data-eg-headers
- """
- return MemoriesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> MemoriesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/supermemoryai/python-sdk#with_streaming_response
- """
- return MemoriesResourceWithStreamingResponse(self)
-
- def update(
- self,
- id: str,
- *,
- container_tag: str | NotGiven = NOT_GIVEN,
- container_tags: List[str] | NotGiven = NOT_GIVEN,
- content: str | NotGiven = NOT_GIVEN,
- custom_id: str | NotGiven = NOT_GIVEN,
- metadata: Dict[str, Union[str, float, bool]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MemoryUpdateResponse:
- """
- Update a memory with any content type (text, url, file, etc.) and metadata
-
- Args:
- container_tag: Optional tag this memory should be containerized by. This can be an ID for your
- user, a project ID, or any other identifier you wish to use to group memories.
-
- container_tags: (DEPRECATED: Use containerTag instead) Optional tags this memory should be
- containerized by. This can be an ID for your user, a project ID, or any other
- identifier you wish to use to group memories.
-
- content: The content to extract and process into a memory. This can be a URL to a
- website, a PDF, an image, or a video.
-
- Plaintext: Any plaintext format
-
- URL: A URL to a website, PDF, image, or video
-
- We automatically detect the content type from the url's response format.
-
- custom_id: Optional custom ID of the memory. This could be an ID from your database that
- will uniquely identify this memory.
-
- metadata: Optional metadata for the memory. This is used to store additional information
- about the memory. You can use this to store any additional information you need
- about the memory. Metadata can be filtered through. Keys must be strings and are
- case sensitive. Values can be strings, numbers, or booleans. You cannot nest
- objects.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not id:
- raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
- return self._patch(
- f"/v3/memories/{id}",
- body=maybe_transform(
- {
- "container_tag": container_tag,
- "container_tags": container_tags,
- "content": content,
- "custom_id": custom_id,
- "metadata": metadata,
- },
- memory_update_params.MemoryUpdateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MemoryUpdateResponse,
- )
-
- def list(
- self,
- *,
- container_tags: List[str] | NotGiven = NOT_GIVEN,
- filters: str | NotGiven = NOT_GIVEN,
- include_content: bool | NotGiven = NOT_GIVEN,
- limit: Union[str, float] | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- page: Union[str, float] | NotGiven = NOT_GIVEN,
- sort: Literal["createdAt", "updatedAt"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MemoryListResponse:
- """
- Retrieves a paginated list of memories with their metadata and workflow status
-
- Args:
- container_tags: Optional tags this memory should be containerized by. This can be an ID for your
- user, a project ID, or any other identifier you wish to use to group memories.
-
- filters: Optional filters to apply to the search
-
- include_content: Whether to include the content field in the response. Warning: This can make
- responses significantly larger.
-
- limit: Number of items per page
-
- order: Sort order
-
- page: Page number to fetch
-
- sort: Field to sort by
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/v3/memories/list",
- body=maybe_transform(
- {
- "container_tags": container_tags,
- "filters": filters,
- "include_content": include_content,
- "limit": limit,
- "order": order,
- "page": page,
- "sort": sort,
- },
- memory_list_params.MemoryListParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MemoryListResponse,
- )
-
- def delete(
- self,
- id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> None:
- """
- Delete a memory by ID
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not id:
- raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return self._delete(
- f"/v3/memories/{id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
- def add(
- self,
- *,
- container_tag: str | NotGiven = NOT_GIVEN,
- container_tags: List[str] | NotGiven = NOT_GIVEN,
- content: str | NotGiven = NOT_GIVEN,
- custom_id: str | NotGiven = NOT_GIVEN,
- metadata: Dict[str, Union[str, float, bool]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MemoryAddResponse:
- """
- Add a memory with any content type (text, url, file, etc.) and metadata
-
- Args:
- container_tag: Optional tag this memory should be containerized by. This can be an ID for your
- user, a project ID, or any other identifier you wish to use to group memories.
-
- container_tags: (DEPRECATED: Use containerTag instead) Optional tags this memory should be
- containerized by. This can be an ID for your user, a project ID, or any other
- identifier you wish to use to group memories.
-
- content: The content to extract and process into a memory. This can be a URL to a
- website, a PDF, an image, or a video.
-
- Plaintext: Any plaintext format
-
- URL: A URL to a website, PDF, image, or video
-
- We automatically detect the content type from the url's response format.
-
- custom_id: Optional custom ID of the memory. This could be an ID from your database that
- will uniquely identify this memory.
-
- metadata: Optional metadata for the memory. This is used to store additional information
- about the memory. You can use this to store any additional information you need
- about the memory. Metadata can be filtered through. Keys must be strings and are
- case sensitive. Values can be strings, numbers, or booleans. You cannot nest
- objects.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/v3/memories",
- body=maybe_transform(
- {
- "container_tag": container_tag,
- "container_tags": container_tags,
- "content": content,
- "custom_id": custom_id,
- "metadata": metadata,
- },
- memory_add_params.MemoryAddParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MemoryAddResponse,
- )
-
- def get(
- self,
- id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MemoryGetResponse:
- """
- Get a memory by ID
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not id:
- raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
- return self._get(
- f"/v3/memories/{id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MemoryGetResponse,
- )
-
- def upload_file(
- self,
- *,
- file: FileTypes,
- container_tags: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MemoryUploadFileResponse:
- """
- Upload a file to be processed
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- body = deepcopy_minimal(
- {
- "file": file,
- "container_tags": container_tags,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return self._post(
- "/v3/memories/file",
- body=maybe_transform(body, memory_upload_file_params.MemoryUploadFileParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MemoryUploadFileResponse,
- )
-
-
-class AsyncMemoriesResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncMemoriesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/supermemoryai/python-sdk#accessing-raw-response-data-eg-headers
- """
- return AsyncMemoriesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncMemoriesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/supermemoryai/python-sdk#with_streaming_response
- """
- return AsyncMemoriesResourceWithStreamingResponse(self)
-
- async def update(
- self,
- id: str,
- *,
- container_tag: str | NotGiven = NOT_GIVEN,
- container_tags: List[str] | NotGiven = NOT_GIVEN,
- content: str | NotGiven = NOT_GIVEN,
- custom_id: str | NotGiven = NOT_GIVEN,
- metadata: Dict[str, Union[str, float, bool]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MemoryUpdateResponse:
- """
- Update a memory with any content type (text, url, file, etc.) and metadata
-
- Args:
- container_tag: Optional tag this memory should be containerized by. This can be an ID for your
- user, a project ID, or any other identifier you wish to use to group memories.
-
- container_tags: (DEPRECATED: Use containerTag instead) Optional tags this memory should be
- containerized by. This can be an ID for your user, a project ID, or any other
- identifier you wish to use to group memories.
-
- content: The content to extract and process into a memory. This can be a URL to a
- website, a PDF, an image, or a video.
-
- Plaintext: Any plaintext format
-
- URL: A URL to a website, PDF, image, or video
-
- We automatically detect the content type from the url's response format.
-
- custom_id: Optional custom ID of the memory. This could be an ID from your database that
- will uniquely identify this memory.
-
- metadata: Optional metadata for the memory. This is used to store additional information
- about the memory. You can use this to store any additional information you need
- about the memory. Metadata can be filtered through. Keys must be strings and are
- case sensitive. Values can be strings, numbers, or booleans. You cannot nest
- objects.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not id:
- raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
- return await self._patch(
- f"/v3/memories/{id}",
- body=await async_maybe_transform(
- {
- "container_tag": container_tag,
- "container_tags": container_tags,
- "content": content,
- "custom_id": custom_id,
- "metadata": metadata,
- },
- memory_update_params.MemoryUpdateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MemoryUpdateResponse,
- )
-
- async def list(
- self,
- *,
- container_tags: List[str] | NotGiven = NOT_GIVEN,
- filters: str | NotGiven = NOT_GIVEN,
- include_content: bool | NotGiven = NOT_GIVEN,
- limit: Union[str, float] | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- page: Union[str, float] | NotGiven = NOT_GIVEN,
- sort: Literal["createdAt", "updatedAt"] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MemoryListResponse:
- """
- Retrieves a paginated list of memories with their metadata and workflow status
-
- Args:
- container_tags: Optional tags this memory should be containerized by. This can be an ID for your
- user, a project ID, or any other identifier you wish to use to group memories.
-
- filters: Optional filters to apply to the search
-
- include_content: Whether to include the content field in the response. Warning: This can make
- responses significantly larger.
-
- limit: Number of items per page
-
- order: Sort order
-
- page: Page number to fetch
-
- sort: Field to sort by
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/v3/memories/list",
- body=await async_maybe_transform(
- {
- "container_tags": container_tags,
- "filters": filters,
- "include_content": include_content,
- "limit": limit,
- "order": order,
- "page": page,
- "sort": sort,
- },
- memory_list_params.MemoryListParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MemoryListResponse,
- )
-
- async def delete(
- self,
- id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> None:
- """
- Delete a memory by ID
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not id:
- raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return await self._delete(
- f"/v3/memories/{id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
- async def add(
- self,
- *,
- container_tag: str | NotGiven = NOT_GIVEN,
- container_tags: List[str] | NotGiven = NOT_GIVEN,
- content: str | NotGiven = NOT_GIVEN,
- custom_id: str | NotGiven = NOT_GIVEN,
- metadata: Dict[str, Union[str, float, bool]] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MemoryAddResponse:
- """
- Add a memory with any content type (text, url, file, etc.) and metadata
-
- Args:
- container_tag: Optional tag this memory should be containerized by. This can be an ID for your
- user, a project ID, or any other identifier you wish to use to group memories.
-
- container_tags: (DEPRECATED: Use containerTag instead) Optional tags this memory should be
- containerized by. This can be an ID for your user, a project ID, or any other
- identifier you wish to use to group memories.
-
- content: The content to extract and process into a memory. This can be a URL to a
- website, a PDF, an image, or a video.
-
- Plaintext: Any plaintext format
-
- URL: A URL to a website, PDF, image, or video
-
- We automatically detect the content type from the url's response format.
-
- custom_id: Optional custom ID of the memory. This could be an ID from your database that
- will uniquely identify this memory.
-
- metadata: Optional metadata for the memory. This is used to store additional information
- about the memory. You can use this to store any additional information you need
- about the memory. Metadata can be filtered through. Keys must be strings and are
- case sensitive. Values can be strings, numbers, or booleans. You cannot nest
- objects.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/v3/memories",
- body=await async_maybe_transform(
- {
- "container_tag": container_tag,
- "container_tags": container_tags,
- "content": content,
- "custom_id": custom_id,
- "metadata": metadata,
- },
- memory_add_params.MemoryAddParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MemoryAddResponse,
- )
-
- async def get(
- self,
- id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MemoryGetResponse:
- """
- Get a memory by ID
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not id:
- raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
- return await self._get(
- f"/v3/memories/{id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MemoryGetResponse,
- )
-
- async def upload_file(
- self,
- *,
- file: FileTypes,
- container_tags: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> MemoryUploadFileResponse:
- """
- Upload a file to be processed
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- body = deepcopy_minimal(
- {
- "file": file,
- "container_tags": container_tags,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return await self._post(
- "/v3/memories/file",
- body=await async_maybe_transform(body, memory_upload_file_params.MemoryUploadFileParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=MemoryUploadFileResponse,
- )
-
-
-class MemoriesResourceWithRawResponse:
- def __init__(self, memories: MemoriesResource) -> None:
- self._memories = memories
-
- self.update = to_raw_response_wrapper(
- memories.update,
- )
- self.list = to_raw_response_wrapper(
- memories.list,
- )
- self.delete = to_raw_response_wrapper(
- memories.delete,
- )
- self.add = to_raw_response_wrapper(
- memories.add,
- )
- self.get = to_raw_response_wrapper(
- memories.get,
- )
- self.upload_file = to_raw_response_wrapper(
- memories.upload_file,
- )
-
-
-class AsyncMemoriesResourceWithRawResponse:
- def __init__(self, memories: AsyncMemoriesResource) -> None:
- self._memories = memories
-
- self.update = async_to_raw_response_wrapper(
- memories.update,
- )
- self.list = async_to_raw_response_wrapper(
- memories.list,
- )
- self.delete = async_to_raw_response_wrapper(
- memories.delete,
- )
- self.add = async_to_raw_response_wrapper(
- memories.add,
- )
- self.get = async_to_raw_response_wrapper(
- memories.get,
- )
- self.upload_file = async_to_raw_response_wrapper(
- memories.upload_file,
- )
-
-
-class MemoriesResourceWithStreamingResponse:
- def __init__(self, memories: MemoriesResource) -> None:
- self._memories = memories
-
- self.update = to_streamed_response_wrapper(
- memories.update,
- )
- self.list = to_streamed_response_wrapper(
- memories.list,
- )
- self.delete = to_streamed_response_wrapper(
- memories.delete,
- )
- self.add = to_streamed_response_wrapper(
- memories.add,
- )
- self.get = to_streamed_response_wrapper(
- memories.get,
- )
- self.upload_file = to_streamed_response_wrapper(
- memories.upload_file,
- )
-
-
-class AsyncMemoriesResourceWithStreamingResponse:
- def __init__(self, memories: AsyncMemoriesResource) -> None:
- self._memories = memories
-
- self.update = async_to_streamed_response_wrapper(
- memories.update,
- )
- self.list = async_to_streamed_response_wrapper(
- memories.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- memories.delete,
- )
- self.add = async_to_streamed_response_wrapper(
- memories.add,
- )
- self.get = async_to_streamed_response_wrapper(
- memories.get,
- )
- self.upload_file = async_to_streamed_response_wrapper(
- memories.upload_file,
- )
diff --git a/src/supermemory/resources/search.py b/src/supermemory/resources/search.py
index bb9e8ff9..6d9eecb4 100644
--- a/src/supermemory/resources/search.py
+++ b/src/supermemory/resources/search.py
@@ -8,7 +8,7 @@
import httpx
from ..types import search_execute_params, search_memories_params, search_documents_params
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr
from .._utils import maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
@@ -52,7 +52,7 @@ def documents(
q: str,
categories_filter: List[Literal["technology", "science", "business", "health"]] | NotGiven = NOT_GIVEN,
chunk_threshold: float | NotGiven = NOT_GIVEN,
- container_tags: List[str] | NotGiven = NOT_GIVEN,
+ container_tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN,
doc_id: str | NotGiven = NOT_GIVEN,
document_threshold: float | NotGiven = NOT_GIVEN,
filters: search_documents_params.Filters | NotGiven = NOT_GIVEN,
@@ -82,7 +82,7 @@ def documents(
results)
container_tags: Optional tags this search should be containerized by. This can be an ID for your
- user, a project ID, or any other identifier you wish to use to filter memories.
+ user, a project ID, or any other identifier you wish to use to filter documents.
doc_id: Optional document ID to search within. You can use this to find chunks in a very
large document.
@@ -151,7 +151,7 @@ def execute(
q: str,
categories_filter: List[Literal["technology", "science", "business", "health"]] | NotGiven = NOT_GIVEN,
chunk_threshold: float | NotGiven = NOT_GIVEN,
- container_tags: List[str] | NotGiven = NOT_GIVEN,
+ container_tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN,
doc_id: str | NotGiven = NOT_GIVEN,
document_threshold: float | NotGiven = NOT_GIVEN,
filters: search_execute_params.Filters | NotGiven = NOT_GIVEN,
@@ -181,7 +181,7 @@ def execute(
results)
container_tags: Optional tags this search should be containerized by. This can be an ID for your
- user, a project ID, or any other identifier you wish to use to filter memories.
+ user, a project ID, or any other identifier you wish to use to filter documents.
doc_id: Optional document ID to search within. You can use this to find chunks in a very
large document.
@@ -341,7 +341,7 @@ async def documents(
q: str,
categories_filter: List[Literal["technology", "science", "business", "health"]] | NotGiven = NOT_GIVEN,
chunk_threshold: float | NotGiven = NOT_GIVEN,
- container_tags: List[str] | NotGiven = NOT_GIVEN,
+ container_tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN,
doc_id: str | NotGiven = NOT_GIVEN,
document_threshold: float | NotGiven = NOT_GIVEN,
filters: search_documents_params.Filters | NotGiven = NOT_GIVEN,
@@ -371,7 +371,7 @@ async def documents(
results)
container_tags: Optional tags this search should be containerized by. This can be an ID for your
- user, a project ID, or any other identifier you wish to use to filter memories.
+ user, a project ID, or any other identifier you wish to use to filter documents.
doc_id: Optional document ID to search within. You can use this to find chunks in a very
large document.
@@ -440,7 +440,7 @@ async def execute(
q: str,
categories_filter: List[Literal["technology", "science", "business", "health"]] | NotGiven = NOT_GIVEN,
chunk_threshold: float | NotGiven = NOT_GIVEN,
- container_tags: List[str] | NotGiven = NOT_GIVEN,
+ container_tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN,
doc_id: str | NotGiven = NOT_GIVEN,
document_threshold: float | NotGiven = NOT_GIVEN,
filters: search_execute_params.Filters | NotGiven = NOT_GIVEN,
@@ -470,7 +470,7 @@ async def execute(
results)
container_tags: Optional tags this search should be containerized by. This can be an ID for your
- user, a project ID, or any other identifier you wish to use to filter memories.
+ user, a project ID, or any other identifier you wish to use to filter documents.
doc_id: Optional document ID to search within. You can use this to find chunks in a very
large document.
diff --git a/src/supermemory/types/__init__.py b/src/supermemory/types/__init__.py
index 7224f980..6a34d7ba 100644
--- a/src/supermemory/types/__init__.py
+++ b/src/supermemory/types/__init__.py
@@ -2,17 +2,10 @@
from __future__ import annotations
-from .memory_add_params import MemoryAddParams as MemoryAddParams
-from .memory_list_params import MemoryListParams as MemoryListParams
-from .memory_add_response import MemoryAddResponse as MemoryAddResponse
-from .memory_get_response import MemoryGetResponse as MemoryGetResponse
-from .memory_list_response import MemoryListResponse as MemoryListResponse
-from .memory_update_params import MemoryUpdateParams as MemoryUpdateParams
from .setting_get_response import SettingGetResponse as SettingGetResponse
from .search_execute_params import SearchExecuteParams as SearchExecuteParams
from .setting_update_params import SettingUpdateParams as SettingUpdateParams
from .connection_list_params import ConnectionListParams as ConnectionListParams
-from .memory_update_response import MemoryUpdateResponse as MemoryUpdateResponse
from .search_memories_params import SearchMemoriesParams as SearchMemoriesParams
from .search_documents_params import SearchDocumentsParams as SearchDocumentsParams
from .search_execute_response import SearchExecuteResponse as SearchExecuteResponse
@@ -21,11 +14,9 @@
from .connection_import_params import ConnectionImportParams as ConnectionImportParams
from .connection_list_response import ConnectionListResponse as ConnectionListResponse
from .search_memories_response import SearchMemoriesResponse as SearchMemoriesResponse
-from .memory_upload_file_params import MemoryUploadFileParams as MemoryUploadFileParams
from .search_documents_response import SearchDocumentsResponse as SearchDocumentsResponse
from .connection_create_response import ConnectionCreateResponse as ConnectionCreateResponse
from .connection_import_response import ConnectionImportResponse as ConnectionImportResponse
-from .memory_upload_file_response import MemoryUploadFileResponse as MemoryUploadFileResponse
from .connection_get_by_id_response import ConnectionGetByIDResponse as ConnectionGetByIDResponse
from .connection_get_by_tags_params import ConnectionGetByTagsParams as ConnectionGetByTagsParams
from .connection_get_by_tags_response import ConnectionGetByTagsResponse as ConnectionGetByTagsResponse
diff --git a/src/supermemory/types/connection_create_params.py b/src/supermemory/types/connection_create_params.py
index 8bb9e071..1c8d823f 100644
--- a/src/supermemory/types/connection_create_params.py
+++ b/src/supermemory/types/connection_create_params.py
@@ -2,16 +2,17 @@
from __future__ import annotations
-from typing import Dict, List, Union, Optional
+from typing import Dict, Union, Optional
from typing_extensions import Annotated, TypedDict
+from .._types import SequenceNotStr
from .._utils import PropertyInfo
__all__ = ["ConnectionCreateParams"]
class ConnectionCreateParams(TypedDict, total=False):
- container_tags: Annotated[List[str], PropertyInfo(alias="containerTags")]
+ container_tags: Annotated[SequenceNotStr[str], PropertyInfo(alias="containerTags")]
document_limit: Annotated[int, PropertyInfo(alias="documentLimit")]
diff --git a/src/supermemory/types/connection_delete_by_provider_params.py b/src/supermemory/types/connection_delete_by_provider_params.py
index 5b7fdaeb..09e136df 100644
--- a/src/supermemory/types/connection_delete_by_provider_params.py
+++ b/src/supermemory/types/connection_delete_by_provider_params.py
@@ -2,14 +2,14 @@
from __future__ import annotations
-from typing import List
from typing_extensions import Required, Annotated, TypedDict
+from .._types import SequenceNotStr
from .._utils import PropertyInfo
__all__ = ["ConnectionDeleteByProviderParams"]
class ConnectionDeleteByProviderParams(TypedDict, total=False):
- container_tags: Required[Annotated[List[str], PropertyInfo(alias="containerTags")]]
+ container_tags: Required[Annotated[SequenceNotStr[str], PropertyInfo(alias="containerTags")]]
"""Optional comma-separated list of container tags to filter connections by"""
diff --git a/src/supermemory/types/connection_get_by_tags_params.py b/src/supermemory/types/connection_get_by_tags_params.py
index a38fa389..51849b90 100644
--- a/src/supermemory/types/connection_get_by_tags_params.py
+++ b/src/supermemory/types/connection_get_by_tags_params.py
@@ -2,14 +2,14 @@
from __future__ import annotations
-from typing import List
from typing_extensions import Required, Annotated, TypedDict
+from .._types import SequenceNotStr
from .._utils import PropertyInfo
__all__ = ["ConnectionGetByTagsParams"]
class ConnectionGetByTagsParams(TypedDict, total=False):
- container_tags: Required[Annotated[List[str], PropertyInfo(alias="containerTags")]]
+ container_tags: Required[Annotated[SequenceNotStr[str], PropertyInfo(alias="containerTags")]]
"""Comma-separated list of container tags to filter connection by"""
diff --git a/src/supermemory/types/connection_import_params.py b/src/supermemory/types/connection_import_params.py
index e25d8b1e..2e19b220 100644
--- a/src/supermemory/types/connection_import_params.py
+++ b/src/supermemory/types/connection_import_params.py
@@ -2,14 +2,14 @@
from __future__ import annotations
-from typing import List
from typing_extensions import Annotated, TypedDict
+from .._types import SequenceNotStr
from .._utils import PropertyInfo
__all__ = ["ConnectionImportParams"]
class ConnectionImportParams(TypedDict, total=False):
- container_tags: Annotated[List[str], PropertyInfo(alias="containerTags")]
+ container_tags: Annotated[SequenceNotStr[str], PropertyInfo(alias="containerTags")]
"""Optional comma-separated list of container tags to filter connections by"""
diff --git a/src/supermemory/types/connection_list_documents_params.py b/src/supermemory/types/connection_list_documents_params.py
index b83f00ff..8673d26d 100644
--- a/src/supermemory/types/connection_list_documents_params.py
+++ b/src/supermemory/types/connection_list_documents_params.py
@@ -2,14 +2,14 @@
from __future__ import annotations
-from typing import List
from typing_extensions import Annotated, TypedDict
+from .._types import SequenceNotStr
from .._utils import PropertyInfo
__all__ = ["ConnectionListDocumentsParams"]
class ConnectionListDocumentsParams(TypedDict, total=False):
- container_tags: Annotated[List[str], PropertyInfo(alias="containerTags")]
+ container_tags: Annotated[SequenceNotStr[str], PropertyInfo(alias="containerTags")]
"""Optional comma-separated list of container tags to filter documents by"""
diff --git a/src/supermemory/types/connection_list_params.py b/src/supermemory/types/connection_list_params.py
index ad7b98eb..2948fa31 100644
--- a/src/supermemory/types/connection_list_params.py
+++ b/src/supermemory/types/connection_list_params.py
@@ -2,14 +2,14 @@
from __future__ import annotations
-from typing import List
from typing_extensions import Annotated, TypedDict
+from .._types import SequenceNotStr
from .._utils import PropertyInfo
__all__ = ["ConnectionListParams"]
class ConnectionListParams(TypedDict, total=False):
- container_tags: Annotated[List[str], PropertyInfo(alias="containerTags")]
+ container_tags: Annotated[SequenceNotStr[str], PropertyInfo(alias="containerTags")]
"""Optional comma-separated list of container tags to filter documents by"""
diff --git a/src/supermemory/types/memory_add_params.py b/src/supermemory/types/memory_add_params.py
deleted file mode 100644
index c822f78e..00000000
--- a/src/supermemory/types/memory_add_params.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union
-from typing_extensions import Annotated, TypedDict
-
-from .._utils import PropertyInfo
-
-__all__ = ["MemoryAddParams"]
-
-
-class MemoryAddParams(TypedDict, total=False):
- container_tag: Annotated[str, PropertyInfo(alias="containerTag")]
- """Optional tag this memory should be containerized by.
-
- This can be an ID for your user, a project ID, or any other identifier you wish
- to use to group memories.
- """
-
- container_tags: Annotated[List[str], PropertyInfo(alias="containerTags")]
- """
- (DEPRECATED: Use containerTag instead) Optional tags this memory should be
- containerized by. This can be an ID for your user, a project ID, or any other
- identifier you wish to use to group memories.
- """
-
- content: str
- """The content to extract and process into a memory.
-
- This can be a URL to a website, a PDF, an image, or a video.
-
- Plaintext: Any plaintext format
-
- URL: A URL to a website, PDF, image, or video
-
- We automatically detect the content type from the url's response format.
- """
-
- custom_id: Annotated[str, PropertyInfo(alias="customId")]
- """Optional custom ID of the memory.
-
- This could be an ID from your database that will uniquely identify this memory.
- """
-
- metadata: Dict[str, Union[str, float, bool]]
- """Optional metadata for the memory.
-
- This is used to store additional information about the memory. You can use this
- to store any additional information you need about the memory. Metadata can be
- filtered through. Keys must be strings and are case sensitive. Values can be
- strings, numbers, or booleans. You cannot nest objects.
- """
diff --git a/src/supermemory/types/memory_add_response.py b/src/supermemory/types/memory_add_response.py
deleted file mode 100644
index 704918e4..00000000
--- a/src/supermemory/types/memory_add_response.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .._models import BaseModel
-
-__all__ = ["MemoryAddResponse"]
-
-
-class MemoryAddResponse(BaseModel):
- id: str
-
- status: str
diff --git a/src/supermemory/types/memory_get_response.py b/src/supermemory/types/memory_get_response.py
deleted file mode 100644
index 335eded2..00000000
--- a/src/supermemory/types/memory_get_response.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from datetime import datetime
-from typing_extensions import Literal
-
-from pydantic import Field as FieldInfo
-
-from .._models import BaseModel
-
-__all__ = ["MemoryGetResponse"]
-
-
-class MemoryGetResponse(BaseModel):
- id: str
- """Unique identifier of the memory."""
-
- connection_id: Optional[str] = FieldInfo(alias="connectionId", default=None)
- """Optional ID of connection the memory was created from.
-
- This is useful for identifying the source of the memory.
- """
-
- content: Optional[str] = None
- """The content to extract and process into a memory.
-
- This can be a URL to a website, a PDF, an image, or a video.
-
- Plaintext: Any plaintext format
-
- URL: A URL to a website, PDF, image, or video
-
- We automatically detect the content type from the url's response format.
- """
-
- created_at: datetime = FieldInfo(alias="createdAt")
- """Creation timestamp"""
-
- custom_id: Optional[str] = FieldInfo(alias="customId", default=None)
- """Optional custom ID of the memory.
-
- This could be an ID from your database that will uniquely identify this memory.
- """
-
- metadata: Union[str, float, bool, Dict[str, object], List[object], None] = None
- """Optional metadata for the memory.
-
- This is used to store additional information about the memory. You can use this
- to store any additional information you need about the memory. Metadata can be
- filtered through. Keys must be strings and are case sensitive. Values can be
- strings, numbers, or booleans. You cannot nest objects.
- """
-
- og_image: Optional[str] = FieldInfo(alias="ogImage", default=None)
-
- source: Optional[str] = None
- """Source of the memory"""
-
- status: Literal["unknown", "queued", "extracting", "chunking", "embedding", "indexing", "done", "failed"]
- """Status of the memory"""
-
- summary: Optional[str] = None
- """Summary of the memory content"""
-
- summary_embedding_model: Optional[str] = FieldInfo(alias="summaryEmbeddingModel", default=None)
-
- summary_embedding_model_new: Optional[str] = FieldInfo(alias="summaryEmbeddingModelNew", default=None)
-
- summary_embedding_new: Optional[List[float]] = FieldInfo(alias="summaryEmbeddingNew", default=None)
-
- title: Optional[str] = None
- """Title of the memory"""
-
- type: Literal[
- "text",
- "pdf",
- "tweet",
- "google_doc",
- "google_slide",
- "google_sheet",
- "image",
- "video",
- "notion_doc",
- "webpage",
- "onedrive",
- ]
- """Type of the memory"""
-
- updated_at: datetime = FieldInfo(alias="updatedAt")
- """Last update timestamp"""
-
- container_tags: Optional[List[str]] = FieldInfo(alias="containerTags", default=None)
- """Optional tags this memory should be containerized by.
-
- This can be an ID for your user, a project ID, or any other identifier you wish
- to use to group memories.
- """
-
- raw: None = None
- """Raw content of the memory"""
-
- url: Optional[str] = None
- """URL of the memory"""
diff --git a/src/supermemory/types/memory_list_params.py b/src/supermemory/types/memory_list_params.py
deleted file mode 100644
index 883ea8b9..00000000
--- a/src/supermemory/types/memory_list_params.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Union
-from typing_extensions import Literal, Annotated, TypedDict
-
-from .._utils import PropertyInfo
-
-__all__ = ["MemoryListParams"]
-
-
-class MemoryListParams(TypedDict, total=False):
- container_tags: Annotated[List[str], PropertyInfo(alias="containerTags")]
- """Optional tags this memory should be containerized by.
-
- This can be an ID for your user, a project ID, or any other identifier you wish
- to use to group memories.
- """
-
- filters: str
- """Optional filters to apply to the search"""
-
- include_content: Annotated[bool, PropertyInfo(alias="includeContent")]
- """Whether to include the content field in the response.
-
- Warning: This can make responses significantly larger.
- """
-
- limit: Union[str, float]
- """Number of items per page"""
-
- order: Literal["asc", "desc"]
- """Sort order"""
-
- page: Union[str, float]
- """Page number to fetch"""
-
- sort: Literal["createdAt", "updatedAt"]
- """Field to sort by"""
diff --git a/src/supermemory/types/memory_list_response.py b/src/supermemory/types/memory_list_response.py
deleted file mode 100644
index d2fdcb6a..00000000
--- a/src/supermemory/types/memory_list_response.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from datetime import datetime
-from typing_extensions import Literal
-
-from pydantic import Field as FieldInfo
-
-from .._models import BaseModel
-
-__all__ = ["MemoryListResponse", "Memory", "Pagination"]
-
-
-class Memory(BaseModel):
- id: str
- """Unique identifier of the memory."""
-
- connection_id: Optional[str] = FieldInfo(alias="connectionId", default=None)
- """Optional ID of connection the memory was created from.
-
- This is useful for identifying the source of the memory.
- """
-
- created_at: datetime = FieldInfo(alias="createdAt")
- """Creation timestamp"""
-
- custom_id: Optional[str] = FieldInfo(alias="customId", default=None)
- """Optional custom ID of the memory.
-
- This could be an ID from your database that will uniquely identify this memory.
- """
-
- metadata: Union[str, float, bool, Dict[str, object], List[object], None] = None
- """Optional metadata for the memory.
-
- This is used to store additional information about the memory. You can use this
- to store any additional information you need about the memory. Metadata can be
- filtered through. Keys must be strings and are case sensitive. Values can be
- strings, numbers, or booleans. You cannot nest objects.
- """
-
- status: Literal["unknown", "queued", "extracting", "chunking", "embedding", "indexing", "done", "failed"]
- """Status of the memory"""
-
- summary: Optional[str] = None
- """Summary of the memory content"""
-
- title: Optional[str] = None
- """Title of the memory"""
-
- type: Literal[
- "text",
- "pdf",
- "tweet",
- "google_doc",
- "google_slide",
- "google_sheet",
- "image",
- "video",
- "notion_doc",
- "webpage",
- "onedrive",
- ]
- """Type of the memory"""
-
- updated_at: datetime = FieldInfo(alias="updatedAt")
- """Last update timestamp"""
-
- container_tags: Optional[List[str]] = FieldInfo(alias="containerTags", default=None)
- """Optional tags this memory should be containerized by.
-
- This can be an ID for your user, a project ID, or any other identifier you wish
- to use to group memories.
- """
-
- content: Optional[str] = None
- """Content of the memory (only included when includeContent=true)"""
-
-
-class Pagination(BaseModel):
- current_page: float = FieldInfo(alias="currentPage")
-
- limit: float
-
- total_items: float = FieldInfo(alias="totalItems")
-
- total_pages: float = FieldInfo(alias="totalPages")
-
-
-class MemoryListResponse(BaseModel):
- memories: List[Memory]
-
- pagination: Pagination
- """Pagination metadata"""
diff --git a/src/supermemory/types/memory_update_params.py b/src/supermemory/types/memory_update_params.py
deleted file mode 100644
index 0faf4478..00000000
--- a/src/supermemory/types/memory_update_params.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union
-from typing_extensions import Annotated, TypedDict
-
-from .._utils import PropertyInfo
-
-__all__ = ["MemoryUpdateParams"]
-
-
-class MemoryUpdateParams(TypedDict, total=False):
- container_tag: Annotated[str, PropertyInfo(alias="containerTag")]
- """Optional tag this memory should be containerized by.
-
- This can be an ID for your user, a project ID, or any other identifier you wish
- to use to group memories.
- """
-
- container_tags: Annotated[List[str], PropertyInfo(alias="containerTags")]
- """
- (DEPRECATED: Use containerTag instead) Optional tags this memory should be
- containerized by. This can be an ID for your user, a project ID, or any other
- identifier you wish to use to group memories.
- """
-
- content: str
- """The content to extract and process into a memory.
-
- This can be a URL to a website, a PDF, an image, or a video.
-
- Plaintext: Any plaintext format
-
- URL: A URL to a website, PDF, image, or video
-
- We automatically detect the content type from the url's response format.
- """
-
- custom_id: Annotated[str, PropertyInfo(alias="customId")]
- """Optional custom ID of the memory.
-
- This could be an ID from your database that will uniquely identify this memory.
- """
-
- metadata: Dict[str, Union[str, float, bool]]
- """Optional metadata for the memory.
-
- This is used to store additional information about the memory. You can use this
- to store any additional information you need about the memory. Metadata can be
- filtered through. Keys must be strings and are case sensitive. Values can be
- strings, numbers, or booleans. You cannot nest objects.
- """
diff --git a/src/supermemory/types/memory_update_response.py b/src/supermemory/types/memory_update_response.py
deleted file mode 100644
index 132b8cf9..00000000
--- a/src/supermemory/types/memory_update_response.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .._models import BaseModel
-
-__all__ = ["MemoryUpdateResponse"]
-
-
-class MemoryUpdateResponse(BaseModel):
- id: str
-
- status: str
diff --git a/src/supermemory/types/memory_upload_file_params.py b/src/supermemory/types/memory_upload_file_params.py
deleted file mode 100644
index ce4dfc40..00000000
--- a/src/supermemory/types/memory_upload_file_params.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Required, Annotated, TypedDict
-
-from .._types import FileTypes
-from .._utils import PropertyInfo
-
-__all__ = ["MemoryUploadFileParams"]
-
-
-class MemoryUploadFileParams(TypedDict, total=False):
- file: Required[FileTypes]
-
- container_tags: Annotated[str, PropertyInfo(alias="containerTags")]
diff --git a/src/supermemory/types/memory_upload_file_response.py b/src/supermemory/types/memory_upload_file_response.py
deleted file mode 100644
index f67b958f..00000000
--- a/src/supermemory/types/memory_upload_file_response.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .._models import BaseModel
-
-__all__ = ["MemoryUploadFileResponse"]
-
-
-class MemoryUploadFileResponse(BaseModel):
- id: str
-
- status: str
diff --git a/src/supermemory/types/search_documents_params.py b/src/supermemory/types/search_documents_params.py
index b1b2d874..13e0a642 100644
--- a/src/supermemory/types/search_documents_params.py
+++ b/src/supermemory/types/search_documents_params.py
@@ -5,6 +5,7 @@
from typing import Dict, List, Union, Iterable
from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict
+from .._types import SequenceNotStr
from .._utils import PropertyInfo
__all__ = ["SearchDocumentsParams", "Filters", "FiltersUnionMember0"]
@@ -26,11 +27,11 @@ class SearchDocumentsParams(TypedDict, total=False):
(returns lesser chunks, accurate results)
"""
- container_tags: Annotated[List[str], PropertyInfo(alias="containerTags")]
+ container_tags: Annotated[SequenceNotStr[str], PropertyInfo(alias="containerTags")]
"""Optional tags this search should be containerized by.
This can be an ID for your user, a project ID, or any other identifier you wish
- to use to filter memories.
+ to use to filter documents.
"""
doc_id: Annotated[str, PropertyInfo(alias="docId")]
diff --git a/src/supermemory/types/search_execute_params.py b/src/supermemory/types/search_execute_params.py
index db48a814..48032181 100644
--- a/src/supermemory/types/search_execute_params.py
+++ b/src/supermemory/types/search_execute_params.py
@@ -5,6 +5,7 @@
from typing import Dict, List, Union, Iterable
from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict
+from .._types import SequenceNotStr
from .._utils import PropertyInfo
__all__ = ["SearchExecuteParams", "Filters", "FiltersUnionMember0"]
@@ -26,11 +27,11 @@ class SearchExecuteParams(TypedDict, total=False):
(returns lesser chunks, accurate results)
"""
- container_tags: Annotated[List[str], PropertyInfo(alias="containerTags")]
+ container_tags: Annotated[SequenceNotStr[str], PropertyInfo(alias="containerTags")]
"""Optional tags this search should be containerized by.
This can be an ID for your user, a project ID, or any other identifier you wish
- to use to filter memories.
+ to use to filter documents.
"""
doc_id: Annotated[str, PropertyInfo(alias="docId")]
diff --git a/src/supermemory/types/search_memories_response.py b/src/supermemory/types/search_memories_response.py
index 9e18a749..224cf83e 100644
--- a/src/supermemory/types/search_memories_response.py
+++ b/src/supermemory/types/search_memories_response.py
@@ -71,17 +71,20 @@ class ResultDocument(BaseModel):
created_at: datetime = FieldInfo(alias="createdAt")
"""Document creation date"""
+ updated_at: datetime = FieldInfo(alias="updatedAt")
+ """Document last update date"""
+
metadata: Optional[Dict[str, object]] = None
- """Document metadata"""
+ """Document metadata (only included when documents=true)"""
- title: str
- """Document title"""
+ summary: Optional[str] = None
+ """Document summary (only included when summaries=true)"""
- type: str
- """Document type"""
+ title: Optional[str] = None
+ """Document title (only included when documents=true)"""
- updated_at: datetime = FieldInfo(alias="updatedAt")
- """Document last update date"""
+ type: Optional[str] = None
+ """Document type (only included when documents=true)"""
class Result(BaseModel):
diff --git a/tests/api_resources/test_memories.py b/tests/api_resources/test_memories.py
deleted file mode 100644
index 7e02d96d..00000000
--- a/tests/api_resources/test_memories.py
+++ /dev/null
@@ -1,586 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from supermemory import Supermemory, AsyncSupermemory
-from tests.utils import assert_matches_type
-from supermemory.types import (
- MemoryAddResponse,
- MemoryGetResponse,
- MemoryListResponse,
- MemoryUpdateResponse,
- MemoryUploadFileResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestMemories:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_method_update(self, client: Supermemory) -> None:
- memory = client.memories.update(
- id="id",
- )
- assert_matches_type(MemoryUpdateResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_method_update_with_all_params(self, client: Supermemory) -> None:
- memory = client.memories.update(
- id="id",
- container_tag="user_123",
- container_tags=["user_123", "project_123"],
- content="This is a detailed article about machine learning concepts...",
- custom_id="mem_abc123",
- metadata={
- "category": "technology",
- "isPublic": True,
- "readingTime": 5,
- "source": "web",
- "tag_1": "ai",
- "tag_2": "machine-learning",
- },
- )
- assert_matches_type(MemoryUpdateResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_raw_response_update(self, client: Supermemory) -> None:
- response = client.memories.with_raw_response.update(
- id="id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- memory = response.parse()
- assert_matches_type(MemoryUpdateResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_streaming_response_update(self, client: Supermemory) -> None:
- with client.memories.with_streaming_response.update(
- id="id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- memory = response.parse()
- assert_matches_type(MemoryUpdateResponse, memory, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_path_params_update(self, client: Supermemory) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
- client.memories.with_raw_response.update(
- id="",
- )
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_method_list(self, client: Supermemory) -> None:
- memory = client.memories.list()
- assert_matches_type(MemoryListResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_method_list_with_all_params(self, client: Supermemory) -> None:
- memory = client.memories.list(
- container_tags=["user_123", "project_123"],
- filters='{"AND":[{"key":"group","negate":false,"value":"jira_users"},{"filterType":"numeric","key":"timestamp","negate":false,"numericOperator":">","value":"1742745777"}]}',
- include_content=False,
- limit=10,
- order="desc",
- page=1,
- sort="createdAt",
- )
- assert_matches_type(MemoryListResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_raw_response_list(self, client: Supermemory) -> None:
- response = client.memories.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- memory = response.parse()
- assert_matches_type(MemoryListResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_streaming_response_list(self, client: Supermemory) -> None:
- with client.memories.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- memory = response.parse()
- assert_matches_type(MemoryListResponse, memory, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_method_delete(self, client: Supermemory) -> None:
- memory = client.memories.delete(
- "id",
- )
- assert memory is None
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_raw_response_delete(self, client: Supermemory) -> None:
- response = client.memories.with_raw_response.delete(
- "id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- memory = response.parse()
- assert memory is None
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_streaming_response_delete(self, client: Supermemory) -> None:
- with client.memories.with_streaming_response.delete(
- "id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- memory = response.parse()
- assert memory is None
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_path_params_delete(self, client: Supermemory) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
- client.memories.with_raw_response.delete(
- "",
- )
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_method_add(self, client: Supermemory) -> None:
- memory = client.memories.add()
- assert_matches_type(MemoryAddResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_method_add_with_all_params(self, client: Supermemory) -> None:
- memory = client.memories.add(
- container_tag="user_123",
- container_tags=["user_123", "project_123"],
- content="This is a detailed article about machine learning concepts...",
- custom_id="mem_abc123",
- metadata={
- "category": "technology",
- "isPublic": True,
- "readingTime": 5,
- "source": "web",
- "tag_1": "ai",
- "tag_2": "machine-learning",
- },
- )
- assert_matches_type(MemoryAddResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_raw_response_add(self, client: Supermemory) -> None:
- response = client.memories.with_raw_response.add()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- memory = response.parse()
- assert_matches_type(MemoryAddResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_streaming_response_add(self, client: Supermemory) -> None:
- with client.memories.with_streaming_response.add() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- memory = response.parse()
- assert_matches_type(MemoryAddResponse, memory, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_method_get(self, client: Supermemory) -> None:
- memory = client.memories.get(
- "id",
- )
- assert_matches_type(MemoryGetResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_raw_response_get(self, client: Supermemory) -> None:
- response = client.memories.with_raw_response.get(
- "id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- memory = response.parse()
- assert_matches_type(MemoryGetResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_streaming_response_get(self, client: Supermemory) -> None:
- with client.memories.with_streaming_response.get(
- "id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- memory = response.parse()
- assert_matches_type(MemoryGetResponse, memory, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_path_params_get(self, client: Supermemory) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
- client.memories.with_raw_response.get(
- "",
- )
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_method_upload_file(self, client: Supermemory) -> None:
- memory = client.memories.upload_file(
- file=b"raw file contents",
- )
- assert_matches_type(MemoryUploadFileResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_method_upload_file_with_all_params(self, client: Supermemory) -> None:
- memory = client.memories.upload_file(
- file=b"raw file contents",
- container_tags="containerTags",
- )
- assert_matches_type(MemoryUploadFileResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_raw_response_upload_file(self, client: Supermemory) -> None:
- response = client.memories.with_raw_response.upload_file(
- file=b"raw file contents",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- memory = response.parse()
- assert_matches_type(MemoryUploadFileResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_streaming_response_upload_file(self, client: Supermemory) -> None:
- with client.memories.with_streaming_response.upload_file(
- file=b"raw file contents",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- memory = response.parse()
- assert_matches_type(MemoryUploadFileResponse, memory, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
-
-class TestAsyncMemories:
- parametrize = pytest.mark.parametrize(
- "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
- )
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_method_update(self, async_client: AsyncSupermemory) -> None:
- memory = await async_client.memories.update(
- id="id",
- )
- assert_matches_type(MemoryUpdateResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_method_update_with_all_params(self, async_client: AsyncSupermemory) -> None:
- memory = await async_client.memories.update(
- id="id",
- container_tag="user_123",
- container_tags=["user_123", "project_123"],
- content="This is a detailed article about machine learning concepts...",
- custom_id="mem_abc123",
- metadata={
- "category": "technology",
- "isPublic": True,
- "readingTime": 5,
- "source": "web",
- "tag_1": "ai",
- "tag_2": "machine-learning",
- },
- )
- assert_matches_type(MemoryUpdateResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_raw_response_update(self, async_client: AsyncSupermemory) -> None:
- response = await async_client.memories.with_raw_response.update(
- id="id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- memory = await response.parse()
- assert_matches_type(MemoryUpdateResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncSupermemory) -> None:
- async with async_client.memories.with_streaming_response.update(
- id="id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- memory = await response.parse()
- assert_matches_type(MemoryUpdateResponse, memory, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_path_params_update(self, async_client: AsyncSupermemory) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
- await async_client.memories.with_raw_response.update(
- id="",
- )
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_method_list(self, async_client: AsyncSupermemory) -> None:
- memory = await async_client.memories.list()
- assert_matches_type(MemoryListResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncSupermemory) -> None:
- memory = await async_client.memories.list(
- container_tags=["user_123", "project_123"],
- filters='{"AND":[{"key":"group","negate":false,"value":"jira_users"},{"filterType":"numeric","key":"timestamp","negate":false,"numericOperator":">","value":"1742745777"}]}',
- include_content=False,
- limit=10,
- order="desc",
- page=1,
- sort="createdAt",
- )
- assert_matches_type(MemoryListResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncSupermemory) -> None:
- response = await async_client.memories.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- memory = await response.parse()
- assert_matches_type(MemoryListResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncSupermemory) -> None:
- async with async_client.memories.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- memory = await response.parse()
- assert_matches_type(MemoryListResponse, memory, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_method_delete(self, async_client: AsyncSupermemory) -> None:
- memory = await async_client.memories.delete(
- "id",
- )
- assert memory is None
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncSupermemory) -> None:
- response = await async_client.memories.with_raw_response.delete(
- "id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- memory = await response.parse()
- assert memory is None
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncSupermemory) -> None:
- async with async_client.memories.with_streaming_response.delete(
- "id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- memory = await response.parse()
- assert memory is None
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncSupermemory) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
- await async_client.memories.with_raw_response.delete(
- "",
- )
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_method_add(self, async_client: AsyncSupermemory) -> None:
- memory = await async_client.memories.add()
- assert_matches_type(MemoryAddResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_method_add_with_all_params(self, async_client: AsyncSupermemory) -> None:
- memory = await async_client.memories.add(
- container_tag="user_123",
- container_tags=["user_123", "project_123"],
- content="This is a detailed article about machine learning concepts...",
- custom_id="mem_abc123",
- metadata={
- "category": "technology",
- "isPublic": True,
- "readingTime": 5,
- "source": "web",
- "tag_1": "ai",
- "tag_2": "machine-learning",
- },
- )
- assert_matches_type(MemoryAddResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_raw_response_add(self, async_client: AsyncSupermemory) -> None:
- response = await async_client.memories.with_raw_response.add()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- memory = await response.parse()
- assert_matches_type(MemoryAddResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_streaming_response_add(self, async_client: AsyncSupermemory) -> None:
- async with async_client.memories.with_streaming_response.add() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- memory = await response.parse()
- assert_matches_type(MemoryAddResponse, memory, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_method_get(self, async_client: AsyncSupermemory) -> None:
- memory = await async_client.memories.get(
- "id",
- )
- assert_matches_type(MemoryGetResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_raw_response_get(self, async_client: AsyncSupermemory) -> None:
- response = await async_client.memories.with_raw_response.get(
- "id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- memory = await response.parse()
- assert_matches_type(MemoryGetResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_streaming_response_get(self, async_client: AsyncSupermemory) -> None:
- async with async_client.memories.with_streaming_response.get(
- "id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- memory = await response.parse()
- assert_matches_type(MemoryGetResponse, memory, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_path_params_get(self, async_client: AsyncSupermemory) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
- await async_client.memories.with_raw_response.get(
- "",
- )
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_method_upload_file(self, async_client: AsyncSupermemory) -> None:
- memory = await async_client.memories.upload_file(
- file=b"raw file contents",
- )
- assert_matches_type(MemoryUploadFileResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_method_upload_file_with_all_params(self, async_client: AsyncSupermemory) -> None:
- memory = await async_client.memories.upload_file(
- file=b"raw file contents",
- container_tags="containerTags",
- )
- assert_matches_type(MemoryUploadFileResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_raw_response_upload_file(self, async_client: AsyncSupermemory) -> None:
- response = await async_client.memories.with_raw_response.upload_file(
- file=b"raw file contents",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- memory = await response.parse()
- assert_matches_type(MemoryUploadFileResponse, memory, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_streaming_response_upload_file(self, async_client: AsyncSupermemory) -> None:
- async with async_client.memories.with_streaming_response.upload_file(
- file=b"raw file contents",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- memory = await response.parse()
- assert_matches_type(MemoryUploadFileResponse, memory, path=["response"])
-
- assert cast(Any, response.is_closed) is True
diff --git a/tests/test_client.py b/tests/test_client.py
index 06127d83..0f948871 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -6,13 +6,10 @@
import os
import sys
import json
-import time
import asyncio
import inspect
-import subprocess
import tracemalloc
from typing import Any, Union, cast
-from textwrap import dedent
from unittest import mock
from typing_extensions import Literal
@@ -23,14 +20,17 @@
from supermemory import Supermemory, AsyncSupermemory, APIResponseValidationError
from supermemory._types import Omit
+from supermemory._utils import asyncify
from supermemory._models import BaseModel, FinalRequestOptions
from supermemory._exceptions import APIStatusError, APITimeoutError, SupermemoryError, APIResponseValidationError
from supermemory._base_client import (
DEFAULT_TIMEOUT,
HTTPX_DEFAULT_TIMEOUT,
BaseClient,
+ OtherPlatform,
DefaultHttpxClient,
DefaultAsyncHttpxClient,
+ get_platform,
make_request_options,
)
@@ -724,20 +724,20 @@ def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str
@mock.patch("supermemory._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, client: Supermemory) -> None:
- respx_mock.post("/v3/memories").mock(side_effect=httpx.TimeoutException("Test timeout error"))
+ respx_mock.post("/v3/search").mock(side_effect=httpx.TimeoutException("Test timeout error"))
with pytest.raises(APITimeoutError):
- client.memories.with_streaming_response.add().__enter__()
+ client.search.with_streaming_response.documents(q="machine learning concepts").__enter__()
assert _get_open_connections(self.client) == 0
@mock.patch("supermemory._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client: Supermemory) -> None:
- respx_mock.post("/v3/memories").mock(return_value=httpx.Response(500))
+ respx_mock.post("/v3/search").mock(return_value=httpx.Response(500))
with pytest.raises(APIStatusError):
- client.memories.with_streaming_response.add().__enter__()
+ client.search.with_streaming_response.documents(q="machine learning concepts").__enter__()
assert _get_open_connections(self.client) == 0
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
@@ -764,9 +764,9 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
return httpx.Response(500)
return httpx.Response(200)
- respx_mock.post("/v3/memories").mock(side_effect=retry_handler)
+ respx_mock.post("/v3/search").mock(side_effect=retry_handler)
- response = client.memories.with_raw_response.add()
+ response = client.search.with_raw_response.documents(q="machine learning concepts")
assert response.retries_taken == failures_before_success
assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success
@@ -788,9 +788,11 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
return httpx.Response(500)
return httpx.Response(200)
- respx_mock.post("/v3/memories").mock(side_effect=retry_handler)
+ respx_mock.post("/v3/search").mock(side_effect=retry_handler)
- response = client.memories.with_raw_response.add(extra_headers={"x-stainless-retry-count": Omit()})
+ response = client.search.with_raw_response.documents(
+ q="machine learning concepts", extra_headers={"x-stainless-retry-count": Omit()}
+ )
assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0
@@ -811,9 +813,11 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
return httpx.Response(500)
return httpx.Response(200)
- respx_mock.post("/v3/memories").mock(side_effect=retry_handler)
+ respx_mock.post("/v3/search").mock(side_effect=retry_handler)
- response = client.memories.with_raw_response.add(extra_headers={"x-stainless-retry-count": "42"})
+ response = client.search.with_raw_response.documents(
+ q="machine learning concepts", extra_headers={"x-stainless-retry-count": "42"}
+ )
assert response.http_request.headers.get("x-stainless-retry-count") == "42"
@@ -1541,10 +1545,10 @@ async def test_parse_retry_after_header(self, remaining_retries: int, retry_afte
async def test_retrying_timeout_errors_doesnt_leak(
self, respx_mock: MockRouter, async_client: AsyncSupermemory
) -> None:
- respx_mock.post("/v3/memories").mock(side_effect=httpx.TimeoutException("Test timeout error"))
+ respx_mock.post("/v3/search").mock(side_effect=httpx.TimeoutException("Test timeout error"))
with pytest.raises(APITimeoutError):
- await async_client.memories.with_streaming_response.add().__aenter__()
+ await async_client.search.with_streaming_response.documents(q="machine learning concepts").__aenter__()
assert _get_open_connections(self.client) == 0
@@ -1553,10 +1557,10 @@ async def test_retrying_timeout_errors_doesnt_leak(
async def test_retrying_status_errors_doesnt_leak(
self, respx_mock: MockRouter, async_client: AsyncSupermemory
) -> None:
- respx_mock.post("/v3/memories").mock(return_value=httpx.Response(500))
+ respx_mock.post("/v3/search").mock(return_value=httpx.Response(500))
with pytest.raises(APIStatusError):
- await async_client.memories.with_streaming_response.add().__aenter__()
+ await async_client.search.with_streaming_response.documents(q="machine learning concepts").__aenter__()
assert _get_open_connections(self.client) == 0
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
@@ -1584,9 +1588,9 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
return httpx.Response(500)
return httpx.Response(200)
- respx_mock.post("/v3/memories").mock(side_effect=retry_handler)
+ respx_mock.post("/v3/search").mock(side_effect=retry_handler)
- response = await client.memories.with_raw_response.add()
+ response = await client.search.with_raw_response.documents(q="machine learning concepts")
assert response.retries_taken == failures_before_success
assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success
@@ -1609,9 +1613,11 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
return httpx.Response(500)
return httpx.Response(200)
- respx_mock.post("/v3/memories").mock(side_effect=retry_handler)
+ respx_mock.post("/v3/search").mock(side_effect=retry_handler)
- response = await client.memories.with_raw_response.add(extra_headers={"x-stainless-retry-count": Omit()})
+ response = await client.search.with_raw_response.documents(
+ q="machine learning concepts", extra_headers={"x-stainless-retry-count": Omit()}
+ )
assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0
@@ -1633,56 +1639,17 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
return httpx.Response(500)
return httpx.Response(200)
- respx_mock.post("/v3/memories").mock(side_effect=retry_handler)
+ respx_mock.post("/v3/search").mock(side_effect=retry_handler)
- response = await client.memories.with_raw_response.add(extra_headers={"x-stainless-retry-count": "42"})
+ response = await client.search.with_raw_response.documents(
+ q="machine learning concepts", extra_headers={"x-stainless-retry-count": "42"}
+ )
assert response.http_request.headers.get("x-stainless-retry-count") == "42"
- def test_get_platform(self) -> None:
- # A previous implementation of asyncify could leave threads unterminated when
- # used with nest_asyncio.
- #
- # Since nest_asyncio.apply() is global and cannot be un-applied, this
- # test is run in a separate process to avoid affecting other tests.
- test_code = dedent("""
- import asyncio
- import nest_asyncio
- import threading
-
- from supermemory._utils import asyncify
- from supermemory._base_client import get_platform
-
- async def test_main() -> None:
- result = await asyncify(get_platform)()
- print(result)
- for thread in threading.enumerate():
- print(thread.name)
-
- nest_asyncio.apply()
- asyncio.run(test_main())
- """)
- with subprocess.Popen(
- [sys.executable, "-c", test_code],
- text=True,
- ) as process:
- timeout = 10 # seconds
-
- start_time = time.monotonic()
- while True:
- return_code = process.poll()
- if return_code is not None:
- if return_code != 0:
- raise AssertionError("calling get_platform using asyncify resulted in a non-zero exit code")
-
- # success
- break
-
- if time.monotonic() - start_time > timeout:
- process.kill()
- raise AssertionError("calling get_platform using asyncify resulted in a hung process")
-
- time.sleep(0.1)
+ async def test_get_platform(self) -> None:
+ platform = await asyncify(get_platform)()
+ assert isinstance(platform, (str, OtherPlatform))
async def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None:
# Test that the proxy environment variables are set correctly
diff --git a/tests/test_models.py b/tests/test_models.py
index c9443074..3afd1f61 100644
--- a/tests/test_models.py
+++ b/tests/test_models.py
@@ -8,7 +8,7 @@
from pydantic import Field
from supermemory._utils import PropertyInfo
-from supermemory._compat import PYDANTIC_V2, parse_obj, model_dump, model_json
+from supermemory._compat import PYDANTIC_V1, parse_obj, model_dump, model_json
from supermemory._models import BaseModel, construct_type
@@ -294,12 +294,12 @@ class Model(BaseModel):
assert cast(bool, m.foo) is True
m = Model.construct(foo={"name": 3})
- if PYDANTIC_V2:
- assert isinstance(m.foo, Submodel1)
- assert m.foo.name == 3 # type: ignore
- else:
+ if PYDANTIC_V1:
assert isinstance(m.foo, Submodel2)
assert m.foo.name == "3"
+ else:
+ assert isinstance(m.foo, Submodel1)
+ assert m.foo.name == 3 # type: ignore
def test_list_of_unions() -> None:
@@ -426,10 +426,10 @@ class Model(BaseModel):
expected = datetime(2019, 12, 27, 18, 11, 19, 117000, tzinfo=timezone.utc)
- if PYDANTIC_V2:
- expected_json = '{"created_at":"2019-12-27T18:11:19.117000Z"}'
- else:
+ if PYDANTIC_V1:
expected_json = '{"created_at": "2019-12-27T18:11:19.117000+00:00"}'
+ else:
+ expected_json = '{"created_at":"2019-12-27T18:11:19.117000Z"}'
model = Model.construct(created_at="2019-12-27T18:11:19.117Z")
assert model.created_at == expected
@@ -531,7 +531,7 @@ class Model2(BaseModel):
assert m4.to_dict(mode="python") == {"created_at": datetime.fromisoformat(time_str)}
assert m4.to_dict(mode="json") == {"created_at": time_str}
- if not PYDANTIC_V2:
+ if PYDANTIC_V1:
with pytest.raises(ValueError, match="warnings is only supported in Pydantic v2"):
m.to_dict(warnings=False)
@@ -556,7 +556,7 @@ class Model(BaseModel):
assert m3.model_dump() == {"foo": None}
assert m3.model_dump(exclude_none=True) == {}
- if not PYDANTIC_V2:
+ if PYDANTIC_V1:
with pytest.raises(ValueError, match="round_trip is only supported in Pydantic v2"):
m.model_dump(round_trip=True)
@@ -580,10 +580,10 @@ class Model(BaseModel):
assert json.loads(m.to_json()) == {"FOO": "hello"}
assert json.loads(m.to_json(use_api_names=False)) == {"foo": "hello"}
- if PYDANTIC_V2:
- assert m.to_json(indent=None) == '{"FOO":"hello"}'
- else:
+ if PYDANTIC_V1:
assert m.to_json(indent=None) == '{"FOO": "hello"}'
+ else:
+ assert m.to_json(indent=None) == '{"FOO":"hello"}'
m2 = Model()
assert json.loads(m2.to_json()) == {}
@@ -595,7 +595,7 @@ class Model(BaseModel):
assert json.loads(m3.to_json()) == {"FOO": None}
assert json.loads(m3.to_json(exclude_none=True)) == {}
- if not PYDANTIC_V2:
+ if PYDANTIC_V1:
with pytest.raises(ValueError, match="warnings is only supported in Pydantic v2"):
m.to_json(warnings=False)
@@ -622,7 +622,7 @@ class Model(BaseModel):
assert json.loads(m3.model_dump_json()) == {"foo": None}
assert json.loads(m3.model_dump_json(exclude_none=True)) == {}
- if not PYDANTIC_V2:
+ if PYDANTIC_V1:
with pytest.raises(ValueError, match="round_trip is only supported in Pydantic v2"):
m.model_dump_json(round_trip=True)
@@ -679,12 +679,12 @@ class B(BaseModel):
)
assert isinstance(m, A)
assert m.type == "a"
- if PYDANTIC_V2:
- assert m.data == 100 # type: ignore[comparison-overlap]
- else:
+ if PYDANTIC_V1:
# pydantic v1 automatically converts inputs to strings
# if the expected type is a str
assert m.data == "100"
+ else:
+ assert m.data == 100 # type: ignore[comparison-overlap]
def test_discriminated_unions_unknown_variant() -> None:
@@ -768,12 +768,12 @@ class B(BaseModel):
)
assert isinstance(m, A)
assert m.foo_type == "a"
- if PYDANTIC_V2:
- assert m.data == 100 # type: ignore[comparison-overlap]
- else:
+ if PYDANTIC_V1:
# pydantic v1 automatically converts inputs to strings
# if the expected type is a str
assert m.data == "100"
+ else:
+ assert m.data == 100 # type: ignore[comparison-overlap]
def test_discriminated_unions_overlapping_discriminators_invalid_data() -> None:
@@ -833,7 +833,7 @@ class B(BaseModel):
assert UnionType.__discriminator__ is discriminator
-@pytest.mark.skipif(not PYDANTIC_V2, reason="TypeAliasType is not supported in Pydantic v1")
+@pytest.mark.skipif(PYDANTIC_V1, reason="TypeAliasType is not supported in Pydantic v1")
def test_type_alias_type() -> None:
Alias = TypeAliasType("Alias", str) # pyright: ignore
@@ -849,7 +849,7 @@ class Model(BaseModel):
assert m.union == "bar"
-@pytest.mark.skipif(not PYDANTIC_V2, reason="TypeAliasType is not supported in Pydantic v1")
+@pytest.mark.skipif(PYDANTIC_V1, reason="TypeAliasType is not supported in Pydantic v1")
def test_field_named_cls() -> None:
class Model(BaseModel):
cls: str
@@ -936,7 +936,7 @@ class Type2(BaseModel):
assert isinstance(model.value, InnerType2)
-@pytest.mark.skipif(not PYDANTIC_V2, reason="this is only supported in pydantic v2 for now")
+@pytest.mark.skipif(PYDANTIC_V1, reason="this is only supported in pydantic v2 for now")
def test_extra_properties() -> None:
class Item(BaseModel):
prop: int
diff --git a/tests/test_transform.py b/tests/test_transform.py
index 8a3a412e..b4c46e41 100644
--- a/tests/test_transform.py
+++ b/tests/test_transform.py
@@ -15,7 +15,7 @@
parse_datetime,
async_transform as _async_transform,
)
-from supermemory._compat import PYDANTIC_V2
+from supermemory._compat import PYDANTIC_V1
from supermemory._models import BaseModel
_T = TypeVar("_T")
@@ -189,7 +189,7 @@ class DateModel(BaseModel):
@pytest.mark.asyncio
async def test_iso8601_format(use_async: bool) -> None:
dt = datetime.fromisoformat("2023-02-23T14:16:36.337692+00:00")
- tz = "Z" if PYDANTIC_V2 else "+00:00"
+ tz = "+00:00" if PYDANTIC_V1 else "Z"
assert await transform({"foo": dt}, DatetimeDict, use_async) == {"foo": "2023-02-23T14:16:36.337692+00:00"} # type: ignore[comparison-overlap]
assert await transform(DatetimeModel(foo=dt), Any, use_async) == {"foo": "2023-02-23T14:16:36.337692" + tz} # type: ignore[comparison-overlap]
@@ -297,11 +297,11 @@ async def test_pydantic_unknown_field(use_async: bool) -> None:
@pytest.mark.asyncio
async def test_pydantic_mismatched_types(use_async: bool) -> None:
model = MyModel.construct(foo=True)
- if PYDANTIC_V2:
+ if PYDANTIC_V1:
+ params = await transform(model, Any, use_async)
+ else:
with pytest.warns(UserWarning):
params = await transform(model, Any, use_async)
- else:
- params = await transform(model, Any, use_async)
assert cast(Any, params) == {"foo": True}
@@ -309,11 +309,11 @@ async def test_pydantic_mismatched_types(use_async: bool) -> None:
@pytest.mark.asyncio
async def test_pydantic_mismatched_object_type(use_async: bool) -> None:
model = MyModel.construct(foo=MyModel.construct(hello="world"))
- if PYDANTIC_V2:
+ if PYDANTIC_V1:
+ params = await transform(model, Any, use_async)
+ else:
with pytest.warns(UserWarning):
params = await transform(model, Any, use_async)
- else:
- params = await transform(model, Any, use_async)
assert cast(Any, params) == {"foo": {"hello": "world"}}
diff --git a/tests/test_utils/test_datetime_parse.py b/tests/test_utils/test_datetime_parse.py
new file mode 100644
index 00000000..f30d48d8
--- /dev/null
+++ b/tests/test_utils/test_datetime_parse.py
@@ -0,0 +1,110 @@
+"""
+Copied from https://github.com/pydantic/pydantic/blob/v1.10.22/tests/test_datetime_parse.py
+with modifications so it works without pydantic v1 imports.
+"""
+
+from typing import Type, Union
+from datetime import date, datetime, timezone, timedelta
+
+import pytest
+
+from supermemory._utils import parse_date, parse_datetime
+
+
+def create_tz(minutes: int) -> timezone:
+ return timezone(timedelta(minutes=minutes))
+
+
+@pytest.mark.parametrize(
+ "value,result",
+ [
+ # Valid inputs
+ ("1494012444.883309", date(2017, 5, 5)),
+ (b"1494012444.883309", date(2017, 5, 5)),
+ (1_494_012_444.883_309, date(2017, 5, 5)),
+ ("1494012444", date(2017, 5, 5)),
+ (1_494_012_444, date(2017, 5, 5)),
+ (0, date(1970, 1, 1)),
+ ("2012-04-23", date(2012, 4, 23)),
+ (b"2012-04-23", date(2012, 4, 23)),
+ ("2012-4-9", date(2012, 4, 9)),
+ (date(2012, 4, 9), date(2012, 4, 9)),
+ (datetime(2012, 4, 9, 12, 15), date(2012, 4, 9)),
+ # Invalid inputs
+ ("x20120423", ValueError),
+ ("2012-04-56", ValueError),
+ (19_999_999_999, date(2603, 10, 11)), # just before watershed
+ (20_000_000_001, date(1970, 8, 20)), # just after watershed
+ (1_549_316_052, date(2019, 2, 4)), # nowish in s
+ (1_549_316_052_104, date(2019, 2, 4)), # nowish in ms
+ (1_549_316_052_104_324, date(2019, 2, 4)), # nowish in μs
+ (1_549_316_052_104_324_096, date(2019, 2, 4)), # nowish in ns
+ ("infinity", date(9999, 12, 31)),
+ ("inf", date(9999, 12, 31)),
+ (float("inf"), date(9999, 12, 31)),
+ ("infinity ", date(9999, 12, 31)),
+ (int("1" + "0" * 100), date(9999, 12, 31)),
+ (1e1000, date(9999, 12, 31)),
+ ("-infinity", date(1, 1, 1)),
+ ("-inf", date(1, 1, 1)),
+ ("nan", ValueError),
+ ],
+)
+def test_date_parsing(value: Union[str, bytes, int, float], result: Union[date, Type[Exception]]) -> None:
+ if type(result) == type and issubclass(result, Exception): # pyright: ignore[reportUnnecessaryIsInstance]
+ with pytest.raises(result):
+ parse_date(value)
+ else:
+ assert parse_date(value) == result
+
+
+@pytest.mark.parametrize(
+ "value,result",
+ [
+ # Valid inputs
+ # values in seconds
+ ("1494012444.883309", datetime(2017, 5, 5, 19, 27, 24, 883_309, tzinfo=timezone.utc)),
+ (1_494_012_444.883_309, datetime(2017, 5, 5, 19, 27, 24, 883_309, tzinfo=timezone.utc)),
+ ("1494012444", datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
+ (b"1494012444", datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
+ (1_494_012_444, datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
+ # values in ms
+ ("1494012444000.883309", datetime(2017, 5, 5, 19, 27, 24, 883, tzinfo=timezone.utc)),
+ ("-1494012444000.883309", datetime(1922, 8, 29, 4, 32, 35, 999117, tzinfo=timezone.utc)),
+ (1_494_012_444_000, datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
+ ("2012-04-23T09:15:00", datetime(2012, 4, 23, 9, 15)),
+ ("2012-4-9 4:8:16", datetime(2012, 4, 9, 4, 8, 16)),
+ ("2012-04-23T09:15:00Z", datetime(2012, 4, 23, 9, 15, 0, 0, timezone.utc)),
+ ("2012-4-9 4:8:16-0320", datetime(2012, 4, 9, 4, 8, 16, 0, create_tz(-200))),
+ ("2012-04-23T10:20:30.400+02:30", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(150))),
+ ("2012-04-23T10:20:30.400+02", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(120))),
+ ("2012-04-23T10:20:30.400-02", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(-120))),
+ (b"2012-04-23T10:20:30.400-02", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(-120))),
+ (datetime(2017, 5, 5), datetime(2017, 5, 5)),
+ (0, datetime(1970, 1, 1, 0, 0, 0, tzinfo=timezone.utc)),
+ # Invalid inputs
+ ("x20120423091500", ValueError),
+ ("2012-04-56T09:15:90", ValueError),
+ ("2012-04-23T11:05:00-25:00", ValueError),
+ (19_999_999_999, datetime(2603, 10, 11, 11, 33, 19, tzinfo=timezone.utc)), # just before watershed
+ (20_000_000_001, datetime(1970, 8, 20, 11, 33, 20, 1000, tzinfo=timezone.utc)), # just after watershed
+ (1_549_316_052, datetime(2019, 2, 4, 21, 34, 12, 0, tzinfo=timezone.utc)), # nowish in s
+ (1_549_316_052_104, datetime(2019, 2, 4, 21, 34, 12, 104_000, tzinfo=timezone.utc)), # nowish in ms
+ (1_549_316_052_104_324, datetime(2019, 2, 4, 21, 34, 12, 104_324, tzinfo=timezone.utc)), # nowish in μs
+ (1_549_316_052_104_324_096, datetime(2019, 2, 4, 21, 34, 12, 104_324, tzinfo=timezone.utc)), # nowish in ns
+ ("infinity", datetime(9999, 12, 31, 23, 59, 59, 999999)),
+ ("inf", datetime(9999, 12, 31, 23, 59, 59, 999999)),
+ ("inf ", datetime(9999, 12, 31, 23, 59, 59, 999999)),
+ (1e50, datetime(9999, 12, 31, 23, 59, 59, 999999)),
+ (float("inf"), datetime(9999, 12, 31, 23, 59, 59, 999999)),
+ ("-infinity", datetime(1, 1, 1, 0, 0)),
+ ("-inf", datetime(1, 1, 1, 0, 0)),
+ ("nan", ValueError),
+ ],
+)
+def test_datetime_parsing(value: Union[str, bytes, int, float], result: Union[datetime, Type[Exception]]) -> None:
+ if type(result) == type and issubclass(result, Exception): # pyright: ignore[reportUnnecessaryIsInstance]
+ with pytest.raises(result):
+ parse_datetime(value)
+ else:
+ assert parse_datetime(value) == result
diff --git a/tests/utils.py b/tests/utils.py
index 0d6779c4..29f5e5f2 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -4,7 +4,7 @@
import inspect
import traceback
import contextlib
-from typing import Any, TypeVar, Iterator, cast
+from typing import Any, TypeVar, Iterator, Sequence, cast
from datetime import date, datetime
from typing_extensions import Literal, get_args, get_origin, assert_type
@@ -15,10 +15,11 @@
is_list_type,
is_union_type,
extract_type_arg,
+ is_sequence_type,
is_annotated_type,
is_type_alias_type,
)
-from supermemory._compat import PYDANTIC_V2, field_outer_type, get_model_fields
+from supermemory._compat import PYDANTIC_V1, field_outer_type, get_model_fields
from supermemory._models import BaseModel
BaseModelT = TypeVar("BaseModelT", bound=BaseModel)
@@ -27,12 +28,12 @@
def assert_matches_model(model: type[BaseModelT], value: BaseModelT, *, path: list[str]) -> bool:
for name, field in get_model_fields(model).items():
field_value = getattr(value, name)
- if PYDANTIC_V2:
- allow_none = False
- else:
+ if PYDANTIC_V1:
# in v1 nullability was structured differently
# https://docs.pydantic.dev/2.0/migration/#required-optional-and-nullable-fields
allow_none = getattr(field, "allow_none", False)
+ else:
+ allow_none = False
assert_matches_type(
field_outer_type(field),
@@ -71,6 +72,13 @@ def assert_matches_type(
if is_list_type(type_):
return _assert_list_type(type_, value)
+ if is_sequence_type(type_):
+ assert isinstance(value, Sequence)
+ inner_type = get_args(type_)[0]
+ for entry in value: # type: ignore
+ assert_type(inner_type, entry) # type: ignore
+ return
+
if origin == str:
assert isinstance(value, str)
elif origin == int: