From cbc561044498e0937172516e944af5d7a948c6fe Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 26 Feb 2026 01:11:08 +0000 Subject: [PATCH 01/10] Update to MkDocs documentation --- mkdocs/site/packages/evo-blockmodels.html | 78 +++++++++++++++++++++++ 1 file changed, 78 insertions(+) diff --git a/mkdocs/site/packages/evo-blockmodels.html b/mkdocs/site/packages/evo-blockmodels.html index 447e2113..c0819c40 100644 --- a/mkdocs/site/packages/evo-blockmodels.html +++ b/mkdocs/site/packages/evo-blockmodels.html @@ -415,6 +415,84 @@

+

+ get_block_model + + + + async + + +

+
get_block_model(bm_id: UUID) -> BlockModel
+
+ +
+ +

Get a block model by ID.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
+ bm_id + + UUID + +
+

The ID of the block model to retrieve.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ BlockModel + +
+

The BlockModel metadata.

+
+
+ + +
+ + + +
+ +

list_all_block_models From 5cdaff8d5ea44ec318d1bc109f700484f69be834 Mon Sep 17 00:00:00 2001 From: Denis Simo <52428161+denis-simo@users.noreply.github.com> Date: Fri, 27 Feb 2026 08:27:10 +1300 Subject: [PATCH 02/10] Added Kriging task support and official docs for the next release of the evo-sdk. --- CHANGELOG.md | 47 + README.md | 81 +- .../running-kriging-compute/README.md | 2 +- .../running-kriging-compute.ipynb | 344 +- docs/img/pointset-output.png | Bin 0 -> 28851 bytes docs/img/service-manager-widget.png | Bin 0 -> 12087 bytes .../BlockModelAPIClient.md} | 0 .../packages/evo-blockmodels/Introduction.md | 88 + .../packages/evo-blockmodels/TypedObjects.md | 51 + .../ColormapAPIClient.md} | 0 .../docs/packages/evo-compute/Introduction.md | 142 + .../JobClient.md} | 0 .../docs/packages/evo-compute/TypedObjects.md | 56 + .../FileAPIClient.md} | 0 .../docs/packages/evo-objects/Introduction.md | 78 + .../docs/packages/evo-objects/TypedObjects.md | 86 + mkdocs/docs/packages/evo-python-sdk.md | 6 +- .../DiscoveryAPIClient.md} | 0 .../WorkspaceAPIClient.md} | 0 .../docs/packages/evo-widgets/Introduction.md | 57 + mkdocs/gen_api_docs.py | 122 +- .../BlockModelAPIClient.html} | 30 +- .../evo-blockmodels/Introduction.html | 206 + .../evo-blockmodels/TypedObjects.html | 2774 ++++++++++ .../ColormapAPIClient.html} | 30 +- .../packages/evo-compute/Introduction.html | 255 + .../JobClient.html} | 91 +- .../packages/evo-compute/TypedObjects.html | 1801 +++++++ .../FileAPIClient.html} | 30 +- .../evo-objects/DownloadedObject.html | 4 +- .../packages/evo-objects/Introduction.html | 193 + .../packages/evo-objects/ObjectAPIClient.html | 4 +- .../packages/evo-objects/TypedObjects.html | 4761 +++++++++++++++++ mkdocs/site/packages/evo-python-sdk.html | 14 +- .../DiscoveryAPIClient.html} | 30 +- .../WorkspaceAPIClient.html} | 30 +- .../packages/evo-widgets/Introduction.html | 189 + mkdocs/typed_objects.txt | 39 + .../evo-compute/docs/examples/kriging.ipynb | 585 ++ .../docs/examples/kriging_multiple.ipynb | 755 +++ packages/evo-compute/pyproject.toml | 3 +- .../evo-compute/src/evo/compute/__init__.py | 2 + .../evo-compute/src/evo/compute/client.py | 26 +- .../src/evo/compute/tasks/__init__.py | 224 + .../src/evo/compute/tasks/common/__init__.py | 50 + .../src/evo/compute/tasks/common/runner.py | 227 + .../src/evo/compute/tasks/common/search.py | 73 + .../evo/compute/tasks/common/source_target.py | 316 ++ .../src/evo/compute/tasks/kriging.py | 694 +++ packages/evo-compute/tests/test_client.py | 98 - .../evo-compute/tests/test_kriging_tasks.py | 778 +++ packages/evo-compute/tests/test_tasks.py | 263 + .../src/evo/objects/typed/attributes.py | 61 +- .../tests/typed/test_attributes.py | 21 +- packages/evo-sdk-common/pyproject.toml | 2 +- .../evo-widgets/src/evo/widgets/__init__.py | 23 + .../evo-widgets/src/evo/widgets/formatters.py | 118 +- packages/evo-widgets/tests/test_formatters.py | 309 ++ pyproject.toml | 6 +- uv.lock | 8 +- 60 files changed, 15818 insertions(+), 465 deletions(-) create mode 100644 docs/img/pointset-output.png create mode 100644 docs/img/service-manager-widget.png rename mkdocs/docs/packages/{evo-blockmodels.md => evo-blockmodels/BlockModelAPIClient.md} (100%) create mode 100644 mkdocs/docs/packages/evo-blockmodels/Introduction.md create mode 100644 mkdocs/docs/packages/evo-blockmodels/TypedObjects.md rename mkdocs/docs/packages/{evo-colormaps.md => evo-colormaps/ColormapAPIClient.md} (100%) create mode 100644 mkdocs/docs/packages/evo-compute/Introduction.md rename mkdocs/docs/packages/{evo-compute.md => evo-compute/JobClient.md} (100%) create mode 100644 mkdocs/docs/packages/evo-compute/TypedObjects.md rename mkdocs/docs/packages/{evo-files.md => evo-files/FileAPIClient.md} (100%) create mode 100644 mkdocs/docs/packages/evo-objects/Introduction.md create mode 100644 mkdocs/docs/packages/evo-objects/TypedObjects.md rename mkdocs/docs/packages/evo-sdk-common/{discovery.md => discovery/DiscoveryAPIClient.md} (100%) rename mkdocs/docs/packages/evo-sdk-common/{workspaces.md => workspaces/WorkspaceAPIClient.md} (100%) create mode 100644 mkdocs/docs/packages/evo-widgets/Introduction.md rename mkdocs/site/packages/{evo-blockmodels.html => evo-blockmodels/BlockModelAPIClient.html} (98%) create mode 100644 mkdocs/site/packages/evo-blockmodels/Introduction.html create mode 100644 mkdocs/site/packages/evo-blockmodels/TypedObjects.html rename mkdocs/site/packages/{evo-colormaps.html => evo-colormaps/ColormapAPIClient.html} (96%) create mode 100644 mkdocs/site/packages/evo-compute/Introduction.html rename mkdocs/site/packages/{evo-compute.html => evo-compute/JobClient.html} (87%) create mode 100644 mkdocs/site/packages/evo-compute/TypedObjects.html rename mkdocs/site/packages/{evo-files.html => evo-files/FileAPIClient.html} (97%) create mode 100644 mkdocs/site/packages/evo-objects/Introduction.html create mode 100644 mkdocs/site/packages/evo-objects/TypedObjects.html rename mkdocs/site/packages/evo-sdk-common/{discovery.html => discovery/DiscoveryAPIClient.html} (88%) rename mkdocs/site/packages/evo-sdk-common/{workspaces.html => workspaces/WorkspaceAPIClient.html} (97%) create mode 100644 mkdocs/site/packages/evo-widgets/Introduction.html create mode 100644 mkdocs/typed_objects.txt create mode 100644 packages/evo-compute/docs/examples/kriging.ipynb create mode 100644 packages/evo-compute/docs/examples/kriging_multiple.ipynb create mode 100644 packages/evo-compute/src/evo/compute/tasks/__init__.py create mode 100644 packages/evo-compute/src/evo/compute/tasks/common/__init__.py create mode 100644 packages/evo-compute/src/evo/compute/tasks/common/runner.py create mode 100644 packages/evo-compute/src/evo/compute/tasks/common/search.py create mode 100644 packages/evo-compute/src/evo/compute/tasks/common/source_target.py create mode 100644 packages/evo-compute/src/evo/compute/tasks/kriging.py create mode 100644 packages/evo-compute/tests/test_kriging_tasks.py create mode 100644 packages/evo-compute/tests/test_tasks.py diff --git a/CHANGELOG.md b/CHANGELOG.md index df79c319..d0553afd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,52 @@ # Changelog +## evo-sdk@v0.2.0 +### What's Changed +#### evo-sdk +* **Typed object interactions** — Simplified interactions with a subset of Evo geoscience objects (Points, Grids, Block Models, Variograms) aimed at geologists and geostatisticians. Typed object interactions abstract API calls and make it easier to access data. +* **evo-widgets** — New `evo-widgets` package with rich HTML rendering of typed objects in Jupyter notebooks. Load via `%load_ext evo.widgets` for pretty-printed output with clickable links to Evo Portal and Viewer. +* **Kriging compute (preview)** — Support for running a preview version of Kriging estimation via `evo-compute`. +* Updated README with Quick start for notebooks, typed object examples, and links to [simplified object interactions](code-samples/geoscience-objects/simplified-object-interactions/) and [running kriging compute](code-samples/geoscience-objects/running-kriging-compute/) notebooks. + +**Full Changelog**: https://github.com/SeequentEvo/evo-python-sdk/compare/evo-sdk@v0.1.20...evo-sdk@v0.2.0 + +## evo-objects@v0.4.0 +### What's Changed +#### evo-objects +* New typed objects support for simplified interactions with geoscience objects (`PointSet`, grids, variograms). Typed objects abstract API calls and provide intuitive Python classes for data access, including `to_dataframe()`, automatic bounding box calculation, and rich HTML display. + +**Full Changelog**: https://github.com/SeequentEvo/evo-python-sdk/compare/evo-objects@v0.3.2...evo-objects@v0.4.0 + +## evo-blockmodels@v0.2.0 +### What's Changed +#### evo-blockmodels +* Typed object support for interactions with block models and block model reports. + +**Full Changelog**: https://github.com/SeequentEvo/evo-python-sdk/compare/evo-blockmodels@v0.1.0...evo-blockmodels@v0.2.0 + +## evo-widgets@v0.2.0 +### What's Changed +#### evo-widgets +* First published release of `evo-widgets` 🎉 +* Rich HTML rendering of typed geoscience objects in Jupyter notebooks. Load via `%load_ext evo.widgets`. +* Supports `PointSet`, `Regular3DGrid`, `TensorGrid`, `BlockModel`, and other typed objects inheriting from `_BaseObject`. +* URL generation for Evo Portal and Viewer links (`get_portal_url_for_object`, `get_viewer_url_for_object`, `get_viewer_url_for_objects`). +* Light/dark mode support via Jupyter theme CSS variables. + +## evo-compute@v0.0.2 +### What's Changed +#### evo-compute +* First preview compute task: Kriging estimation via Evo Compute. + +**Full Changelog**: https://github.com/SeequentEvo/evo-python-sdk/compare/evo-compute@v0.0.1rc3...evo-compute@v0.0.2 + +## evo-sdk-common@v0.5.19 +### What's Changed +#### evo-sdk-common +* Common typed object definitions moved to `evo-sdk-common` for shared use across packages. + +**Full Changelog**: https://github.com/SeequentEvo/evo-python-sdk/compare/evo-sdk-common@v0.5.18...evo-sdk-common@v0.5.19 + ## evo-files@v0.2.4 ## What's Changed diff --git a/README.md b/README.md index 6807f7e9..52b7ac4d 100644 --- a/README.md +++ b/README.md @@ -38,12 +38,81 @@ Before you get started, make sure you have: `evo-python-sdk` is designed for developers, data scientists, and technical users who want to work with Seequent Evo APIs and geoscience data. +This repository contains a number of sub-packages. You may choose to install the `evo-sdk` package, which includes all sub-packages and optional dependencies (e.g. Jupyter notebook support), or choose a specific package to install: + +| Package | Version | Import | Description | +| --- | --- | --- | --- | +| [evo-sdk](README.md) | PyPI - Version | | A metapackage that installs all available Seequent Evo SDKs, including Jupyter notebook examples. | +| [evo-sdk-common](packages/evo-sdk-common/README.md) | PyPI - Version | `evo.common`, `evo.notebooks` | A shared library that provides common functionality for integrating with Seequent's client SDKs. | +| [evo-files](packages/evo-files/README.md) | PyPI - Version | `evo.files` | A service client for interacting with the Evo File API. | +| [evo-objects](packages/evo-objects/README.md) | PyPI - Version | `evo.objects` | A geoscience object service client library designed to help get up and running with the Geoscience Object API. | +| [evo-colormaps](packages/evo-colormaps/README.md) | PyPI - Version | `evo.colormaps` | A service client to create colour mappings and associate them to geoscience data with the Colormap API.| +| [evo-blockmodels](packages/evo-blockmodels/README.md) | PyPI - Version | `evo.blockmodels` | The Block Model API provides the ability to manage and report on block models in your Evo workspaces. | +| [evo-widgets](packages/evo-widgets/README.md) | PyPI - Version | `evo.widgets` | Widgets and presentation layer — rich HTML rendering of typed geoscience objects in Jupyter notebooks. | +| [evo-compute](packages/evo-compute/README.md) | PyPI - Version | `evo.compute` | A service client to send jobs to the Compute Tasks API.| + * To quickly learn how to use Evo APIs, start with the [Getting started with Evo code samples](#getting-started-with-evo-code-samples) section, which contains practical, end-to-end Jupyter notebook examples for common workflows. Most new users should begin with this section. * If you are interested in the underlying SDKs or need to understand the implementation details, explore the [Getting started with Evo SDK development](#getting-started-with-evo-sdk-development) section, which contains the source code for each Evo SDK. * To learn about contributing to this repository, take a look at the [Contributing](#contributing) section. +## Quick start for notebooks + +Once you have an Evo app registered and the SDK installed, you can load and work with geoscience objects in just a few lines of code: + +```python +# Authenticate with Evo +from evo.notebooks import ServiceManagerWidget + +manager = await ServiceManagerWidget.with_auth_code( + client_id="", + cache_location="./notebook-data", +).login() +``` + +> **Output:** +> +> ![ServiceManagerWidget](docs/img/service-manager-widget.png) +> +> *A browser window opens for authentication. After login, select your organization, hub, and workspace from the dropdowns.* + +```python +# Enable rich HTML display for Evo objects in Jupyter +%load_ext evo.widgets + +# Load an object by file path or UUID +from evo.objects.typed import object_from_uuid, object_from_path + +obj = await object_from_path(manager, "") + +# OR + +obj = await object_from_uuid(manager, "") +obj # Displays object info with links to Evo Portal and Viewer +``` + +> **Output:** +> +> ![PointSet object display](docs/img/pointset-output.png) + +```python +# Get data as a pandas DataFrame +df = await obj.to_dataframe() +df.head() +``` + +> **Output:** +> | | x | y | z | Ag_ppm Values | +> |---|---|---|---|---| +> | 0 | 10584.40 | 100608.98 | 214.70 | 12.5 | +> | 1 | 10590.21 | 100615.43 | 220.15 | 8.3 | +> | ... | ... | ... | ... | ... | + +Typed objects like `PointSet`, `BlockModel`, and `Variogram` provide pretty-printed output in Jupyter with clickable links to view your data in Evo. As support for more geoscience objects is added, geologists and geostatisticians can interact with points, variograms, block models, grids, and more — all through intuitive Python classes. To determine the path or UUID of an object, visit the [Evo Portal](https://evo.seequent.com) or use the `ObjectSearchWidget`. + +For a hands-on introduction, see the [simplified object interactions](code-samples/geoscience-objects/simplified-object-interactions/) notebook. For a complete geostatistical workflow including variogram modelling and kriging estimation, see the [running kriging compute](code-samples/geoscience-objects/running-kriging-compute/) notebook. + ## Getting started with Evo code samples For detailed information about creating Evo apps, the authentication setup, available code samples, and step-by-step guides for working with the Jupyter notebooks, please refer to the [**code-samples/README.md**](code-samples/README.md) file. @@ -52,18 +121,6 @@ This comprehensive guide will walk you through everything required to get starte ## Getting started with Evo SDK development -This repository contains a number of sub-packages. You may choose to install the `evo-sdk` package, which includes all -sub-packages and optional dependencies (e.g. Jupyter notebook support), or choose a specific package to install: - -| Package | Version | Description | -| --- | --- | --- | -| [evo-sdk](README.md) | PyPI - Version | A metapackage that installs all available Seequent Evo SDKs, including Jupyter notebook examples. | -| [evo-sdk-common](packages/evo-sdk-common/README.md) | PyPI - Version | A shared library that provides common functionality for integrating with Seequent's client SDKs. | -| [evo-files](packages/evo-files/README.md) | PyPI - Version | A service client for interacting with the Evo File API. | -| [evo-objects](packages/evo-objects/README.md) | PyPI - Version | A geoscience object service client library designed to help get up and running with the Geoscience Object API. | -| [evo-colormaps](packages/evo-colormaps/README.md) | PyPI - Version | A service client to create colour mappings and associate them to geoscience data with the Colormap API.| -| [evo-blockmodels](packages/evo-blockmodels/README.md) | PyPI - Version | The Block Model API provides the ability to manage and report on block models in your Evo workspaces. | -| [evo-compute](packages/evo-compute/README.md) | PyPI - Version | A service client to send jobs to the Compute Tasks API.| ### Getting started diff --git a/code-samples/geoscience-objects/running-kriging-compute/README.md b/code-samples/geoscience-objects/running-kriging-compute/README.md index 103e01bd..d0527123 100644 --- a/code-samples/geoscience-objects/running-kriging-compute/README.md +++ b/code-samples/geoscience-objects/running-kriging-compute/README.md @@ -33,7 +33,7 @@ The variogram uses two nested spherical structures aligned with the dominant ori - **Long-range structure**: Contribution 0.51, ranges 250m × 180m × 100m - **Anisotropy**: Dip 70°, Azimuth 15° (NNE strike direction) -## WIP: Kriging Compute +## Kriging Compute The notebook includes work-in-progress sections demonstrating: - Creating a target `BlockModel` for estimation diff --git a/code-samples/geoscience-objects/running-kriging-compute/running-kriging-compute.ipynb b/code-samples/geoscience-objects/running-kriging-compute/running-kriging-compute.ipynb index 443e9681..64137142 100644 --- a/code-samples/geoscience-objects/running-kriging-compute/running-kriging-compute.ipynb +++ b/code-samples/geoscience-objects/running-kriging-compute/running-kriging-compute.ipynb @@ -32,10 +32,8 @@ ] }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], + "cell_type": "code", "source": [ "from evo.notebooks import ServiceManagerWidget\n", "\n", @@ -48,17 +46,19 @@ " redirect_url=redirect_url,\n", " cache_location=\"./notebook-data\",\n", ").login()" - ] + ], + "outputs": [], + "execution_count": null }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], + "cell_type": "code", "source": [ "# Load the widgets extension for rich HTML display\n", "%load_ext evo.widgets" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -74,9 +74,7 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "import pandas as pd\n", "\n", @@ -85,58 +83,75 @@ "df = pd.read_csv(input_file)\n", "\n", "print(f\"Loaded {len(df)} sample points from {df['Hole ID'].nunique()} downholes\")\n", + "\n", + "# Select only the columns we need and rename coordinates\n", + "df = df[[\"X\", \"Y\", \"Z\", \"CU_pct\"]].rename(columns={\"X\": \"x\", \"Y\": \"y\", \"Z\": \"z\"})\n", + "\n", + "# Remove rows with null values - compute tasks require non-null values\n", + "original_count = len(df)\n", + "df = df.dropna().reset_index(drop=True)\n", + "removed_count = original_count - len(df)\n", + "if removed_count > 0:\n", + " print(f\"\\nRemoved {removed_count} rows with null values\")\n", + "print(f\"Remaining: {len(df)} sample points\")\n", + "\n", + "# Verify no nulls remain\n", + "assert df.isna().sum().sum() == 0, \"DataFrame still contains null values!\"\n", + "\n", "print(\"\\nSpatial extent:\")\n", - "print(f\" X: {df['X'].min():.1f} to {df['X'].max():.1f} ({df['X'].max() - df['X'].min():.1f}m)\")\n", - "print(f\" Y: {df['Y'].min():.1f} to {df['Y'].max():.1f} ({df['Y'].max() - df['Y'].min():.1f}m)\")\n", - "print(f\" Z: {df['Z'].min():.1f} to {df['Z'].max():.1f} ({df['Z'].max() - df['Z'].min():.1f}m)\")\n", + "print(f\" X: {df['x'].min():.1f} to {df['x'].max():.1f} ({df['x'].max() - df['x'].min():.1f}m)\")\n", + "print(f\" Y: {df['y'].min():.1f} to {df['y'].max():.1f} ({df['y'].max() - df['y'].min():.1f}m)\")\n", + "print(f\" Z: {df['z'].min():.1f} to {df['z'].max():.1f} ({df['z'].max() - df['z'].min():.1f}m)\")\n", "print(\"\\nCopper (CU_pct) statistics:\")\n", "print(f\" Mean: {df['CU_pct'].mean():.3f}%, Variance: {df['CU_pct'].var():.3f}\")\n", "df.head()" - ] + ], + "outputs": [], + "execution_count": null }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], + "cell_type": "code", "source": [ "from evo.objects.typed import EpsgCode, PointSet, PointSetData\n", "\n", - "# Prepare the DataFrame with required column names (lowercase x, y, z)\n", - "locations_df = df.rename(columns={\"X\": \"x\", \"Y\": \"y\", \"Z\": \"z\"})\n", + "# Create the pointset with coordinates only first\n", + "coords_df = df[[\"x\", \"y\", \"z\", \"CU_pct\"]].copy()\n", + "print(f\"Points to upload: {len(coords_df)}\")\n", "\n", - "# Create the pointset data\n", "pointset_data = PointSetData(\n", " name=\"WP Drill Hole Assays\",\n", - " description=\"Copper and gold assay data from 55 downholes\",\n", - " locations=locations_df,\n", + " description=\"Copper assay data from 55 downholes\",\n", + " locations=coords_df,\n", " coordinate_reference_system=EpsgCode(32650), # UTM Zone 50N\n", ")\n", "\n", "# Create the pointset in Evo\n", "pointset = await PointSet.create(manager, pointset_data)\n", "print(f\"Created pointset with {pointset.num_points} points\")" - ] + ], + "outputs": [], + "execution_count": null }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], + "cell_type": "code", "source": [ "# Display the pointset with rich HTML formatting\n", "pointset" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "# View available attributes\n", "pointset.attributes" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -167,10 +182,8 @@ ] }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], + "cell_type": "code", "source": [ "from evo.objects.typed import (\n", " Ellipsoid,\n", @@ -230,17 +243,19 @@ "# Create the variogram object in Evo\n", "variogram = await Variogram.create(manager, variogram_data)\n", "print(f\"Created variogram: {variogram.name}\")" - ] + ], + "outputs": [], + "execution_count": null }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], + "cell_type": "code", "source": [ "# Display the variogram with rich HTML formatting\n", "variogram" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -251,9 +266,7 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "print(f\"Variogram: {variogram.name}\")\n", "print(f\"Sill: {variogram.sill}\")\n", @@ -272,7 +285,9 @@ " print(\n", " f\" Ranges: major={ranges.get('major')}m, semi_major={ranges.get('semi_major')}m, minor={ranges.get('minor')}m\"\n", " )" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -285,9 +300,7 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "# Get variogram curves for the three principal directions\n", "major, semi_major, minor = variogram.get_principal_directions()\n", @@ -296,13 +309,13 @@ "print(f\"Semi-major direction: range={semi_major.range_value}m\")\n", "print(f\"Minor direction: range={minor.range_value}m\")\n", "print(f\"Points per curve: {len(major.distance)}\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "import plotly.graph_objects as go\n", "\n", @@ -362,7 +375,9 @@ ")\n", "\n", "fig.show()" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -377,9 +392,7 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "# Get the ellipsoid from the variogram (uses structure with largest volume by default)\n", "var_ellipsoid = variogram.get_ellipsoid()\n", @@ -393,7 +406,9 @@ "search_ellipsoid = var_ellipsoid.scaled(2.0)\n", "\n", "print(f\"\\nSearch ellipsoid (2x): major={search_ellipsoid.ranges.major}m\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -409,9 +424,7 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "# Get pointset data for visualization\n", "points_df = await pointset.to_dataframe()\n", @@ -423,13 +436,13 @@ " points_df[\"z\"].mean(),\n", ")\n", "print(f\"Data centroid: ({center[0]:.1f}, {center[1]:.1f}, {center[2]:.1f})\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "# Generate surface mesh points for visualization\n", "vx, vy, vz = var_ellipsoid.surface_points(center=center, n_points=25)\n", @@ -509,7 +522,9 @@ ")\n", "\n", "fig.show()" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -522,16 +537,16 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "from evo.widgets import get_viewer_url_for_objects\n", "\n", "# Generate a viewer URL to see both objects together\n", "viewer_url = get_viewer_url_for_objects(manager, [pointset, variogram])\n", "print(f\"View in Evo Viewer: {viewer_url}\")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -545,12 +560,11 @@ ] }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], + "cell_type": "code", "source": [ - "from evo.blockmodels import Point3, RegularBlockModel, RegularBlockModelData, Size3d, Size3i, Units\n", + "from evo.blockmodels.typed import Units\n", + "from evo.objects.typed import BlockModel, Point3, RegularBlockModelData, Size3d, Size3i\n", "\n", "# Define block model covering the drill hole extent\n", "bm_data = RegularBlockModelData(\n", @@ -563,157 +577,211 @@ " size_unit_id=Units.METRES,\n", ")\n", "\n", - "block_model = await RegularBlockModel.create(manager, bm_data)\n", + "block_model = await BlockModel.create_regular(manager, bm_data)\n", "print(f\"Created Block Model: {block_model.name}\")\n", - "print(f\"Block Model ID: {block_model.id}\")" - ] + "print(f\"Block Model UUID: {block_model.block_model_uuid}\")" + ], + "outputs": [], + "execution_count": null }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], + "cell_type": "code", "source": [ "# Display the block model metadata\n", - "block_model.version" - ] + "block_model" + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## WIP. Define Kriging Parameters\n", + "## 9. Define Kriging Parameters\n", "\n", "Configure the kriging search neighborhood and estimation parameters." ] }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ - "# from evo.compute.tasks import SearchNeighborhood\n", - "# from evo.compute.tasks.kriging import KrigingParameters\n", - "#\n", - "# # Use the search ellipsoid we created earlier (2x variogram range)\n", - "# params = KrigingParameters(\n", - "# source=pointset.attributes[\"CU_pct\"], # Source attribute\n", - "# target=block_model.attributes[f\"CU_samples_{max_samples}\"]\n", - "# variogram=variogram,\n", - "# search=SearchNeighborhood(\n", - "# ellipsoid=search_ellipsoid,\n", - "# max_samples=16, # Maximum samples per estimate\n", - "# min_samples=4, # Minimum samples required\n", - "# ),\n", - "# )\n", - "#\n", - "# print(f\"Kriging source: CU_pct from pointset\")\n", - "# print(f\"Search ellipsoid: major={search_ellipsoid.ranges.major}m\")" - ] + "from evo.compute.tasks import SearchNeighborhood\n", + "from evo.compute.tasks.kriging import KrigingParameters\n", + "\n", + "# Use the search ellipsoid we created earlier (2x variogram range)\n", + "params = KrigingParameters(\n", + " source=pointset.attributes[\"CU_pct\"], # Source attribute\n", + " target=block_model.attributes[\"CU_estimate\"], # Target attribute on block model\n", + " variogram=variogram,\n", + " search=SearchNeighborhood(\n", + " ellipsoid=search_ellipsoid,\n", + " max_samples=16, # Maximum samples per estimate\n", + " min_samples=4, # Minimum samples required\n", + " ),\n", + ")\n", + "\n", + "print(\"Kriging source: CU_pct from pointset\")\n", + "print(f\"Search ellipsoid: major={search_ellipsoid.ranges.major}m\")" + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## WIP. Run Kriging Task\n", + "## 10. Run Kriging Task\n", "\n", "Submit and run the kriging task using Evo Compute." ] }, { "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": [ + "from evo.compute.tasks import run\n", + "\n", + "# Submit kriging task (progress feedback is shown by default)\n", + "print(\"Submitting kriging task...\")\n", + "result = await run(manager, params, preview=True)\n", + "\n", + "print(\"Kriging complete!\")\n", + "print(f\"Result: {result.message}\")" + ], "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "metadata": {}, "source": [ - "# from evo.compute.tasks import run\n", - "#\n", - "# # Submit kriging task (progress feedback is shown by default)\n", - "# print(\"Submitting kriging task...\")\n", - "# results = await run(manager, [params])\n", - "#\n", - "# print(f\"Kriging complete!\")\n", - "# print(f\"Result: {results[0].status}\")" - ] + "# Display the kriging result (pretty-printed in Jupyter)\n", + "result" + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## WIP. View Kriging Results\n", + "## 11. View Kriging Results\n", "\n", "Refresh the block model and view the estimated grades." ] }, { "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "# Refresh block model to see new attributes\n", - "await block_model.refresh()\n", + "block_model = await block_model.refresh()\n", "\n", - "# Display the block model version (shows updated columns)\n", - "block_model.version" - ] + "# Display the block model (pretty-printed in Jupyter)\n", + "block_model" + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": [ + "# View the block model attributes\n", + "block_model.attributes" + ], "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "metadata": {}, "source": [ "# Get the kriged values as a DataFrame\n", - "results_df = block_model.cell_data\n", + "results_df = await block_model.to_dataframe(columns=[\"CU_estimate\"])\n", "\n", "print(f\"Estimated {len(results_df)} blocks\")\n", "print(\"\\nStatistics for CU_estimate:\")\n", "print(results_df[\"CU_estimate\"].describe())" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## WIP. Running Multiple Kriging Scenarios\n", + "## 12. Running Multiple Kriging Scenarios\n", "\n", "Run multiple kriging tasks concurrently to compare different parameters." ] }, { "cell_type": "code", - "execution_count": null, "metadata": {}, + "source": [ + "max_samples_values = [5, 10, 15, 20]\n", + "\n", + "# Create parameter sets for each scenario\n", + "parameter_sets = []\n", + "for max_samples in max_samples_values:\n", + " params = KrigingParameters(\n", + " source=pointset.attributes[\"CU_pct\"],\n", + " target=block_model.attributes[f\"CU_samples_{max_samples}\"],\n", + " variogram=variogram,\n", + " search=SearchNeighborhood(\n", + " ellipsoid=search_ellipsoid,\n", + " max_samples=max_samples,\n", + " ),\n", + " )\n", + " parameter_sets.append(params)\n", + " print(f\"Prepared scenario with max_samples={max_samples}\")\n", + "\n", + "print(f\"\\nCreated {len(parameter_sets)} parameter sets\")" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "# Run all scenarios in parallel\n", + "print(f\"Submitting {len(parameter_sets)} kriging tasks...\")\n", + "results = await run(manager, parameter_sets, preview=True)\n", + "\n", + "print(f\"\\nAll {len(results)} scenarios completed!\")" + ], "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "metadata": {}, "source": [ - "# max_samples_values = [5, 10, 15, 20]\n", - "#\n", - "# # Create parameter sets for each scenario\n", - "# parameter_sets = []\n", - "# for max_samples in max_samples_values:\n", - "# params = KrigingParameters(\n", - "# source=pointset.attributes[\"CU_pct\"],\n", - "# target=block_model.attributes[f\"CU_samples_{max_samples}\"],\n", - "# variogram=variogram,\n", - "# search=SearchNeighborhood(\n", - "# ellipsoid=search_ellipsoid,\n", - "# max_samples=max_samples,\n", - "# ),\n", - "# )\n", - "# parameter_sets.append(params)\n", - "#\n", - "# # Run all scenarios in parallel\n", - "# print(f\"Submitting {len(parameter_sets)} kriging tasks...\")\n", - "# results = await run(manager, parameter_sets)\n", - "# print(f\"All {len(results)} scenarios completed!\")\n" - ] + "# Display the results (pretty-printed in Jupyter)\n", + "results" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "# Refresh and view block model with all new attributes\n", + "block_model = await block_model.refresh()\n", + "block_model" + ], + "outputs": [], + "execution_count": null } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -727,7 +795,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.0" + "version": "3.10.19" } }, "nbformat": 4, diff --git a/docs/img/pointset-output.png b/docs/img/pointset-output.png new file mode 100644 index 0000000000000000000000000000000000000000..2e430d8f008117635195c9ce76514841e0292021 GIT binary patch literal 28851 zcmce7Wl&pDv@TL;i&H4>Qlz*CZz)!^KnoN}DPAT0j_u&{9G zu&}Um9^qrod~OHyV}7vR^;BPCRgN+2Vh$eIzR-Gsg;kwEcx!=&IezS_X6%lI^~B@f z7kkBq*#`^DRZacX3j-hX!(~u_!A0INI-?J~yH(x4;;g>x^m&x*Q_5+}qk8;?)M{>W zI~83urzC;6554Oj7t|y+KN`=R*JmPGsd?Y)$(%~&o#G)pn!fsX=nf^T9jBG0`8o;wtv07u;gkQ#>ft_iVW zKC~ySC>(Nf@}#71ib}6uy%JFgjfjehvWmqa#+)6Z!K1@mlq^gjhH2xlq_o4dsr8$t z9l49)KYVz-8b*2$qx|aCCc$|jX<$@E1s`htff7F5aR*AE^Zugh-vw9Wk$LI&~uEt6|c`LFJYdz=h2dF-14)_-0p`tLjA|7%CnSGcN;Z}g4Tr}FRT zdh|y}K+UerrUpkbd_=nT0yzjrmx=?Lanu2WsQ|TrAtyHKHRdViyde;~u{ZfB_Pug3 z`_Y0SZm#`CP;Is6TNz3F``7MwOE!Mk$hX;X1&42qH0|GthU$kQwWJ6P0FJ#|J>tcg z01aOI1*Y!`?OMj?lgLBQy6XVPpH~A(PTz|L@_wEs;0acT*c|MkSJroY1p^Ke1M8!r zOxaX;#s@8KDsVOiM6pRO=4X3X*TAkqXOK{KlJ3DbBsXzjw+^8^S=6WU?x4>yd)%FY zRN^T71BWTeah*$6=A54zJokd`KV-ymown9awQ*T&C(JbKtEgg}8|1t>ZWdPw1<9qT z4HYpDmb{44PSG6n7=ZNvaHaC`va2v;Yvnc;tW;Xz5UQ^V5POuU96jJ30Ioq7z(r1ZAmsSMB zz-N>Mpr&r6hQQ)Kxjs*U%tGB^wogJ5GQ5g|^S9O)>USR*I=MBOM2YxKseI=;yb!3o zxe?9|assllSx*1Jqgx7);WX|~h>TxAJ&&&mrdGtpOTlUZT-gLDiC%5LXu;-QLmDjO z8qaTPRjs46IQx&o7Jcfc%(;m-YKNLZ(i+TxS-lFs`W`vq+o5Kr9!wFNa`3AH#UgrY zw&h+;Gq5vUpl(ff3;P#k5p{P3bktoERoci@&e9j zMHM-v79=yS=P*H`ZnsIdd=p!uu<=>VMZu9RP;_3HN3aWLAJ}cpZJO-&cF-%g*Zr|C z{Jf_@=sbN#U;ag@n}`1Ml=$&Azka5!zjUVuXN%lA<;wgi7bL+kP94qp+Gl<`_0^qb#xsW00nzeugddd;gfn?6?#i7^1LXn7{~c zu{Sputcc78%!y)IC3$*sR9Q2|IGbp?C){a~_$@S{5iv%M>}m>tO;fOv1a7~Ca}`IT zvou67=;b0y;@ke^MjT04-}+;$hrYgs9}!D;2W&QSbxq@Ecdp$T2J1E?@iqf?UqfNESPB=(YSkj% z^RN(U^Py{CSjE5hz={lpSfqJSbJV)1nd#|1%eLXi=sQ6TOsJLL+lxDy()W%XuO19a zsJ?RskjwDP_Y~h?4;>@iWZ<_oCAx^!S^N z$lA#3AyzC>V5T$wl-6pZuPUnlg{8{tcaIxkj?T}sy0Kflr+@pj17a+0c+;~{Z9?;= zwn3Kw-fIbZs;7x*{lCY40H{Cg+Z62z{J56iw}rp*kF0U#wnS_f_aAd*+xt)5sN^V( zisfNFtN0dV{h}h$-V_^xo+0d7MS?$=uWgRq%rn|o!26!KeCy@nYMAiq(Yr_ACMhTf zbu82bm`dnt$f+a>Np-bOY^CQ{RLBm$<{y&FfB&8-_jSXTpS2c$kz?{H;*@Dab}Q9K z_$9Fd>eoZcIr|50%@AWhhip%J^3KwIM(*1`S9ZO7(r30`zV$x8mhEe8dh$8!1=IW) z+Gj7z{tGzkrTes453D8rW&i2l#@Nda*XN4N3DQo2)@SgOE!d4MrCU0+V=q^05Qxg{ zKGr{(B<;LB?mGRHar9d_iVm%>??_N;w>Ryl z^Y5_P2y)xZ-e=?!YboXdt$e)(6tX_0^a}VtOLg16$2Ul#HX=DoRg*q7{G}c$!hPZHb~U8*X2AZ*Cm}Buy!zPUx&$_)2#*9s-9jhJ79Y*k z?>aH8dvarv}QQ)|qQX26FeHj^spKP`pW7c&5D`&*oXrM#G`+fXt=5 zX_QVt>cb}yIuT3~m^FQTg}t$BWF2phjd<4=FNvlwK7G+wg&w2_Y+Sz{1LXVPU2gZ( zG8`tHXP=+$9Jy1D<kyI5ov}3ycSd)-Tlp!aIGc(rQ)%3n>atim9N~)${C^CPZ_^RdCN~pPMlT= zomUq4R!MTKkqiTdULRKx}}vft7z)x!qq zrKvx_NkZ)TE>v(D#P+<*$3$S0#R}-W{YP?^Vm?F1_XDEhh2+cwe>>}LrN1<-rEJ$C zr`fwKt~M_&udr>%wgz{iSGOIUh&w=SF1I(AuKV=?;qDA~;Qs7JAeywLvNN5lB(7+G zb{o`o&!oT-)1xCrG@Th6vW1>_Y9`W#;Bw=nzfcS-OKRQ5QjqaTy=%p%a2G5I?j8iu z$o`p-gsraTV2L)BOBFs9GR`j|Cz!Z+FGJ6Co|sW_<@>0?NtAxqJ~cCLh|~6^xmYHB z0IXrGWG970j;ZAFEXJ+vF+W@U_85WI+o`J@+ShXun{xz0bN%Hr9tj6!d@caZS%<^F z8BiAZr&UIU{q1+_F?Er%8M^2xGBX9ILa)Lcz7Ilkn%B!({o(<(^bTMt9IK7Mg+YBl zfGa5srVCjH*BwAo#gEJRMsdd4Ms!x{h;lT6n$hAww!9W%liP|ES5p^U*Sko@qk9U3 zRFqhSk}!g7J;E|Qp)rGw9J!Wa)+x15_DaeJ(Rd2r00!r%eglC7G@Rof7^{3ZOM`CH zp+v&68-wNS7IpY^IDK8ywy!CYgvsn;bH=D`=oOe?7qX$ESWG3x?t=Z)?z0}TaQlE` zTLzp=R}C>*#7Fu${h=qKjjS$P3ctnHG;W`~5u&w3x=L0ZxvO2u*1>3o3qo@AHtQ(^ z77iwMAG_`_q883Q(1S!vHR)$PLq>@-{`Iw=aJpC)8@S}{~>;U9L|-%jAd7t11(4ERWt6R=fV&6;FI0N)0qYC#3De>%?^7W`FH za*S2xX#19!gh{;7%NZ-!vv@<35v$Q#*jMp5lxw~&kc`{n$WbH1SlW}`q94Isv5X?O z1uTHgi}K>lI)B5K6H<-tRa3J3tN&z|r*zRc?@FKfN3IU93!)VWQ0Oq`_IvcsSET7? z&87{z&W2s5HO=OX!}V7sy`NFJotOorIOd^qZt)&V)P|aClj0SVvAmRBfM-U)7=YO0 z)ggDn*5f6yaAU%EHA61vLsUzRS-pb%rCrr;+B`G^Jz9nF(##uvahHt0Ra+}HrCc!& z3+~b^a5^gqvbymRgw20S2h_)%*$WW3JTLL-r!)=k<0~T_2k-5cRk;++YcijVX~>gH zNuwSpv|bYTG@CClWh<|hmgvg%F2VA)T?a?s@^wDF>V2`-VfDAj2lKzt_*A60XMiQK z-45WbBNVY6t5}w?tsJhI^=2fsho|D;bXD9okE03111ZIEVcJ|}vEskxHFp?K)?QXw z_{9t_%dH(r$gw!{&t^jLD>PkiZ1W{k%AZ;|S8s}9Q3mzoOHTCN8i*zeq|T2k)Swu` z-4*1mNMGK6PB={qLh)t7rp`012@bgAhj9ULAFIpxrVr&O6dOAWcLEAg5Qm|y=;n>9 z7qw2qd;4aThp`+9Uk6=dqZPyGOZ1;G;`4NgE*Ik7Y}=>XzRA=F3f3I#FZ@+yEU$H8|X@x14^*a$u})3w%`zSpW!Z8D6MAR*{z>=9IdTzcT7ZcVcwW$S(ORx;=aPM zo+2ESpLIzEh^isK)A9x1U{#6x#D;jaWe~USpJr!)agWM2(4+&tpo zGIC$9h_{$E2x2bY1G)Q^astXO3b&OXGh9CtTiI^5pJD0K6szZSr30`=*SozB0+_N6 zR?3JO4n(F6nT^XsO+tn(X52>>4Wl9Ak=y#nr_`$kqV6hwIc^j4=zg5H_6^kRdm^_v ztyq?9<215crS{4^ec?%@Q87G(!}bfFtS??FIBnG$)mTx*y!ShNF8FiB7S4gMTAH0@ z$y9#v)@PC7sLJWtxcj|o5D%dN@##R8>x!P(9Evw3sQbZB(^TfFEk*8E+;!Lq?O5$% zP_3G~z_g}!45ePdQM%lJBIc1eV-YK*;hc$}08T(<64_l_{CEf3qI*l_DXQf}WyRdP zG%((bN8X)Bd4<}S`0n0nmSaY(wDXt2dL!I5km5dnbau&x4Ym;N`8C)$!NMrm5_A9` zzpwc#7bmUJ_J&lf1=t2&bOsKU1irX=$cnmQsHutwmBe-Qv!iPKbv_Y1c3MSoB7DNbKoP#B7JVK4Z+N!T}v#*vPqh2a~$RKs*E84_I zWgAS+l};ZJr(4#wA!qdzvFpq8;%|B``Ozv@eX5aB-Ol!&%@elQ*SLP$=si(@-2PGc2jW_G^~&RXsA*> zB=1iH1p#9LQUyk~_*$T2f)rGMzvt}zNP;^m5TC7T8B!F?mH#ZVGfX!MR~*mPlK<_5 zVsIwLe`iJ=G9czc-Kg~NuAcYl19$DEm9>B9rxh)QbbG%ogb^i<#%3Qf$k49Ob@wu$ zF3ASWe_E?Z5M4cQMjMwZd@ZzEE0fXW_Q>?pNyqq8rlUQ+L8-kB)e$)A>O1dxA{04l zI6EYOlr>Xf8t)9`j2YXX6XZ;iiFQj4$NR9NTye6toZa$})vYvL_A%Q;Wl3J%R2;g?zafej?ExOxV=uXy>sr1GkVaq@flp7MK}vwwQ=AzmyATPB}) zx(3G9@EuCC%NE}#9;VOP4*f8c^rVNvF?;IItcQE8r}D{b{eeKvEg2E&IUtXU@c7R5 zu1mczR+B>m;_hK-vYbGJ1Zrk00Db*-6MH#M>uT&h%%7G%L%F-nZ=um=sD&-QJ+e_d z#M>cC_Xl28V13yvOvQU?Ii@zBV)^%Y({l%7J77*ppw{0v8X8)mfulRpTcwAuY&y@+ zZ2X^eG$Ecn#U9wzZlYj(9a7@=WBI*CPnPWTD5$_r?{?+ca;C0`9Lj6G7lN}0?-hj- zaxI*YwjNQ>`vz|NogVK*e8X83hZI>^U@yzul3>64>ae~Uf%EkMGWp@QeZl`O)~^rm z*Rsom7CBxoQSQ{8e3NddS;wE1SVRRP&G8&3P9OU=P;o}@6SLY`Ql~FZc?s^-d^42Q zc8yU%yg0o7Y7PSg-tQ;S+eF76&bMkmjCU?_>rUO7TS%2LoV>kJ3@CKRWwlfj6H20WOVnwGHF{fXCiekH1O{mvCPomqQU#Y7H+drUW+=g4X%{9Aos% zDY!~o0NU7+eQDl6m*(rs6E{Adf3~?UY+jT_s373 zg4X*`Be}emvQdQJlB|nLd*Kofy1&pu1}t;dKtL@6pNE$6VkODpnwj?{>ohw>T* zFnv|hF&T!MzV@Z0#2#p+pc~1jAW51|oc$x9xOM3HfpMj0C(z!+u9>NjstvOetXt=K znRBr(PkC5p zGelb|9C&}0fmCLq*y}fn4m%Ln@{R6lg$@_PBk_XL|pnA2EfxbQomN^?R>OT6l zCb$icL?DidT<+4U?a-2l-s|h@waCkC>wD<^NiTSe6b`%axY|TjH%5>KJ-^-kH|m*T zZ!l40Z|B|G6H8{%&ZqKMmBqQrR*MsU#bkhpCd<3&nUg%sFt@jny+dPNu1JvY9COf| zUpw13IB!rMDCpztqBB}DC9os*jw5%ropivAbfbmS9DD~G!OT}L&ftH!dot6SeE6Jg zNPf!WCQHT%rP&w66bH-O%Q&E6kG&XzR*Np)kM3u>T@LU=7 zlTM54+*Yq_ypy_=Kk(56P2P@hKD}Q2J!6e=jLdP33)iT_$7>V%nj@-Hu+GMnOgNB$LzvYzN=Iz&;Ca={p;@PF*sV%yX-Xv5Sk~b{AvE%ixoCxW2Oo0J1a}) zfU0X43)8DjYu&_vSLq*UiwWD-Y>oL0Nm$Z30Tu4fDeM#;Syi9Q>}TaW!3O(821Dk> zQ&Qy4hAJ{R7R|t=Wq5+@8KyQIInvON>bt4n1`7B@-=5IsT0m(^|M(lz#@`d+j?C$< zKY?eJjz#_f?*b)Pe)&6k*Ol!pYao_9j>bn1!9VJkXlKu64reUXEU8|c`IQPL6Mj7I z_u?dqq11hs?32#5Y(z%?RW1FwB|QmU4Hx$>Xl}1ixaZDR#MVLd2+`LHSbd0Gf)P}w z1p{0hY)up31wCnX@bQIKsq&6R(2xGCK<@sJNc@*3?Z1o0K;R_zV~;q!_f0IP3|zDw zVVeMjc)h=XYunjfm!mUr3xOmv5Rl53`5|#bHSR}uzF7YSpdVFKGswitEp@57 z0W-;{z1|&-u0rpzM!vugB283X0WPcr3+KuFfg-UtT{R{Kb)fB(oc9xcM!(0K_kDcW zY5|j1Y+jRhRLf-@vMHdt z8-d*R;>+iX!FxRf5=^$!bUy|MR)!eCUI@0U|MV30P%SA~Zpy96IdrHJDm#)@yRgnL z8AK3Pc!wvo)(sjt{4@Qivx;wWYiv6 z*AzphJGKIx4x~mmvwyPqNjXG*c{u$1zQ5Vbt`gLHJvJxQv7wJV$cy}vGdHN&^o2er z$)f|lIw|HCijP-e1m~oeGTOJ^LLN0)B!|xCG^5LNcJP)GX4V;90tJ7&JYVF4@#3Pg zeQq8oJ{FaC;IZ{Y5ZPccIDK;l1`O{g7K~=%DKX^(d@;n({o=D0{e%M|z`3Yn*d4=7 zY2S$b0shioc_*Fw)+e-b)X`HHBM;A|IP-`jdl~%>1V)?6b?%Gl?8#c~5sRJgRA18d zKJ!9(Ny~F}H$JZhR6}Q?IJ)}eG}xW`iP%uSVoV}bSv`&q1l4iF0%cB>al_P?JS@NZ znQW$|0_2Y^2G?R9XwOba#ITL-%kB14LYbS#p{BebxevQQ3R|U%5i%JCs;`QRG8B`+0ZCU1`<2*heZDVwv)<0p|mD70aLw zIqU{#wr~EE{G=om7NJ~e(mP3SYfNGgx1iwGI=+4u!-QbZ8AxF7>Y@?2jvY{~u_o-#s0BO!Q!?B8Z&5g+QA7T#z#oD`*f)T61nBGzp)<$e zQ;H#AZgzuh6LZx?xlJx{BGCg711(S4v*)Ri#D!E}*`WL&WhR&0J?8_q?Do*{`}e>9 zLOuQICG()m8PjBDXkXH!LhC|t;0G?khUbqZ8@l$^b4V zi7zC&{!7vxAG9s&ktLbYUK8-?rdX?i@tHsD=VIY@#&)!9Mxfk(| z+{2T}kM!BY{;oRbzkRZ-t_|sq9xXbw4#63Y@?)&HL zMy{>Pl)=@x%wF>v!)XOo#gvV$*_wgjR(>ez{zsXwO>PdJtEiPUf{n{6&EN|jxLO+w zuR0u-j`ZH^EJCEJ+g)BfFN1z8fqIrS3TC-)nYF$l7W2#R1g}@gzZfvF^*L71`p`4L zCA9Y5=lF2eXQL50>2|$uStah8tHQ6MCfV%R)E088z1?}**zW!f_gwOg7l<3|Dk@bpFsB8k``#gZu) z_R1vDjMFpXZk9vKnp66hJ+?&&GkDHRM+DN2%KALZl;684P=VViCYd@>hRA2cv^x&= z1(s)Y5~@%W0fnTGn&aH|(4=(#;2V#PYL8e^R8|JCBP@G0H;NYJ4F z$Jl7xm$qrX96O*pp49T|Flb+P+Rdtr`mJNz=xK^juzunPS3<-_43}_9Rys>>qRxcX zqr8B{jvg!Icb{XHO5;qQhnLzBB%QsA@s-7Dxfo@Oym0hOEa|z)$=-s%z}y!%w{fY# z)lw2F?@fpMEu**m%zZMGO-4k3KAAI?$9>cQ?}&C7PeNLAU+*_h|J|5(EN-O+uN`)X z{Ac;ssT%1UQB+jF79m;gia)~klpAS|(a%Xq8yVOeGB88amt7?f1c2!;`8gXz0&iWT zG0UviGOsM*c6dU)m2^8}t}~Z?w)gCH(gT~=je+5Xffp%p2-PQF*#+U1K-5)7b>K#O zf1c+6uRkNjzDpH_B5cL*kyb~B%r|2D3PQghmLeQST&J>}@x-brd)ke9*;dTdegS2B zV86u)JrXeSM&;-w}$o*9#V3tqQ2%Nq&vg(QzJJfza4qGz4IkEsGR%6tPF4msONs0ViN z=r(L>T98y&EfPLiH{p@9CsJ z=JWyIh+nT>m zda}^0gXmMfbcKG)YLefvbeVd>6a)w&eL}IDZ|>wIg>l%PKw-73hKHsc%~DSQPicGHxA}}0_g0bNE$JkS4Ylw#nv1$U2cEw6pX?2JAIO5! z4h3zaEb(eV`HN~69x_8`4ll*V%pTM8U!1Snp; zrDiOD5*D%)`7(lbK(6 zh42W~>uV&xY=^y!f{WPwha2-7*3R;?iH=#>bVF7k%C`@I1r)vTnkxqYr56~t6-jcZ?yhx1cIrpQ|HOHGXDDL;1bgP%3FbxTx8|E)$& zv=rj5SGt_t(iu)~&ml*h!nLVt`rPums(%bTMny^#e%L_YB(}@Qf}rJCkHLd zsn)Jv3Lj0|v@9^MxbIXa{duL}dL$spNz3fbo3qdRpk+th&D(WZO;45BGjiW0ZQ{y?I4}gFtIm&iqDgE%~ z*;7ebOCshz7lm+-G1Vnq(~*(nYuB@<$oJ4rrTM1g+BZUbq4-!EmBp@W8;)q}5EuVS z>=wIQ=Z9fk{>OKZLppE&JV3QRX?pfvf!o;1)+7io_FWJT!l4u5jTSz=sBg+|Hg#k4 z&eB5qK&p`Q!iDds?k?DvoNur(8i9}7J(1VW&BCuZ#-32zvAqc@xtN%;_Uc~fSJlYo zYh*9n_>$}aE=`Bj!$)CF9}|F>RqN|Ww$(2V9$LC;crJqmr8Qg}{(W$<#~F}Y0soP&>O znx3dT>vl&^`-DZDM@kC&q!e>TQ#HBWtJs{b_Lxzo&-%9OZqI_eZN8t;uW+`mk{ofX z2Rsd0GWTl2-?jVnoiOwI5A+o6fSZ6`&SQHl&z7m6IeukfV? z3D%+}-dZ7T@|Z0Tq$MI8wMJfhHYR8LYO?#~kLlEGK)?r|htiianhXOZf7fz=;|M=& z($XxS*=q@Uw4peuzEf#`#pi8F@3nX!(QT)##P=IC76>4%WHW7AKclOts;fkkNzZ1t ze}sFIR0#B3(JcKj!(KNs zdMk`uI$ohw%heE(;e!Rui;D2NOv|%s=Y!4YFmfg4sS~Jv|6IJ_?Ay$_Uou2P3iNQE zY+Iq+d0tT-Pdb>kRAWmz{CTniS=L9{k0`pb&c!u?I-qXCkn33F=`{enid@IQvari5 zUwuG+ne*4q)ouhHX}-zVvXl0QnydL|f+E<1yvC#1JYD{bvbVF1x@YyDm`n_{uBl^u z9S8sX@X^0_W=B-vt$g}}%2GC{@Q)q%2{{dzflze*)(VpoY}U+jen7(r<17BSY-tc& zpE`hCgxLF)v%dWX&Z^rg|E)FpI;jg@9<}dw;J{PdqdF`Y%nm9KBk-s4D_E%cl_kcJ z#j4AAw_KLA^If{~sQ*;u__IC-i3+i=CLEMM2JvM8+lo6plZllZ$dgQVl$o|=*V^{z z8obs>w)3gOi?wR-hurX&5+ zz-`hd23-uNNl}E$zn@*8;D#n7p*|4fNSA5TXp2M|CTj`H9V6bn4u_@>%B#eg7o1jS z85oRwKiQZ)TXBK9Za3OW^*$hw1HxNd@2cl%Hl9@X?5RWfZ8qD~K68R9IKN-!PT&q=lJX8~g7fM7ENsuaJz*rih>@OzX2$ zG`AqS5xN-jIH^_4|4Dk^AL}~R5~OjzVe8M^_^It}moxo3RtYMT$p;Qzq@I_b;uAj7 z2e0T-!dQIizD-?!v)=A_`KIounXg=|M%VhDBXl8Nupg;vm{X(CkW3ZcDn7^I{3)#@mVs?8$Ap zol?D{*GYVdG&_@9=hJ;;24!9aN^g3E=Hiar|mecE@D z=q*bzd3w0ktX_n*hXUyaCXNw}%{4gf*}1SAOyd_Z5326Z)xvB~9uE_~VL&}ROUeh2 zrWkWGll7?1P5(cOPY6E_@;q{~!t`Dgo4=5s1-Y_J#rno?Z|q zehGotW)1aSu;z#=Igpn)Gs^TCAL;qeo%fM~|`Pjrh+{m4e}Lh_(MyD*gBSrI*P8FR$OsjB;D6 z?^1rkL3&~=G0evVgKRZRoT4Uzr~v1M9$;x;ZTR z3@5H_^8Mca-uy`qQ~aG9E>no);t9AWgJJ*7s73D84h(3sON{h%o2O+c2wqJ-@S`Aa zw(EaYK+fg=MU=8uc$B)z5jBqn7CRsvC8yV;zWFC6#?9>Rzs z>e<}NXemNfFp0*Em||CgVjYI>t1b<-rQJ&J>i%Gb{)CE_w#2P6S6W6i#$vQMQWW10 z3ynwo*se2X#_@gX_YkSfnO$ZvvXrL8xGoa4R@NqUAV#9UC)a}0YYO6O>5EG9-^xyU zo(!WoM33Z}cXTG^I zQ`psvg4Shok(eA+2%jK*huKSsE+5j1DziOeXKXx49Mzn8ubvK)mf*TLXSc!VeTx*t zTUSeLBIk{3vuN;sx0&GDR5Mj`1xCHaEqtk%e5hKrd!G!LMvYHQz1E# zl8#8EUVU^Qve9lw=VoN?ERl^ht1}?_xBwhls|XBHWKOJ4brBefZs#w@I_~*AcdV54 zJNIciQ%Ek9NRk1Qt(- z8b1B7s!MSR6^b;msrU{69qbKXWbJC=-pU6r{pLEKLdq#@3fP>&*WYzD2i@u{L34Sk z3oCgDKb_=A(%cEIxbOj=>_=}_B&1=qLOri2RKF21-!i`Lz(>EOz zm`Tw3Ep}e=jjf1W9({mztZW^-@uxC2GzpT6Jhi?Wk`>r_wqG>c_n9`N+!9wQ(NDp6 z!;w%A%MxV}k z-(OtY`E`Jolstx-=px_iltc{Z-7(y{TcdvjPKihF@L$vX^+;o7< z+2ukvt8Zn>2Q5RK=ofaFPbTV`sY_<{fj_yGti<9ltemOjV!mkd;~M#mXQY`&JlLxr z0vWL#=x3)!rsP9tmpscS$TkCO28}Ln$Ui5^42EKv0M#4~rjkt*$-A;_8jh+DZeKR+ za_k1(Iy%wz2aP#emWz|^*q7KpRAkEU91F9^*D(hUJV6lpLv?0E!>|M1!oCK>tiP>7 zAaMeBW;Pl3GER<*4}D1mtZ!WX(rg5OFTH>ZT{JL!NL>39lhIn?3$(VUIzRGBbB<;G z*^nN@ZTx!v0y)`r<+y{Kx_w2wMfjB`UvH$P2rF!YN!mFm>LR9QO_%W!hT_Me)HN}T zP@2OC@+LALiTN$=|0w%aXoC@(NsbmCX|XIqw#BC~826X>kvV=qYH&#=>UmsExc3bC zgA%zL(rUp{Xkh4*kPfZXu{QAVzr;&5p}!@2z@{FquMxRa6Bvfinl&l`jlnab}wVsrN) zMy(>G_yGp;5c}mb%M7d|bhAyNG4QZwghCq(AnkdY>Rk zh~HpXBjDQMD{T3eYDL+6_0eL0ym??Xb%|dbGEvwynYKRr3hM_l@qwIH5r>UKZN zYVp=8#L>qCu;Qyf1iHGy%aH^nI@Z*NQcL#1L!t(E9+jRzfcKLfYAW6LF54JU1fo|X z=*k4e+pL)6jyf@S??yfBh23}rNuj5}B*D@O3Z`qQqULoEpMA4`S_he4li;m$e<_9) zl&h42fZ=MP}@nUk&E&;{yPJ(}{uf|Gu zhw3Q-)Zcdtwa)bv4~xueTz;)D)`kWKLmMjqI&5weJmWMt!Cm3xxK3d}p9saGH{4%RUs*mg~i3WHjR&0bW7(VWxXvT%G!MAt$f?VZDMj@!aeINu`_M`MD+m8? z6+&mP@D^={k;?xTe3C?nsQ}--en@HiOq(cjq|< z_WnPWR;y@XLKEl4MIU~kKsQJ zjQ(j2L?AB=_xlN!k4gG`$h20;1ho~{yH-{o}8PvjO!Pq|M4n8p#;H zDmU{jHsNPnn+)J11STeRTYpLn&_%?Ge5Rk>RCCIWHK0`jYTz=a%>I^Ew#?VHSr?(J zAfEhV?O5qXes-u86v-&9J=ifKkSD_a!`+>o2o#^34gk00I~RAuKaLJ(VO}t4V+G)L zb8HX^BZp}MW%3N#gVYRZ&6XjI3E5gOA=`q(eE=q>gRSAxzbWn*FY&YbXj{_*epz4f zz1o#9husq-q-RZ(=jcd446@DDru9KtJtE*L<`XS>@Uv{^eed@0OH_8I$1sr+d|Tz^ z%u4K(zwg%f82{ra1&8L!mr~aR{*00CldVibXBX7XsXXI}ZzK`wMKW{+mc6gdh5bb< zfIETw#INx6rEiC7*~cKe@7_r~t=SqHT&@gD;+H}3k@$)awU=YT# z25Mo@ld(Xup(@ojv3;F~Pt#wGYo1R=$3Ddbz)yrz@ck!Yk9JH=UPSW$sHQIy`42`r zSI6_`W+$b!8)VT^{R`)zieiuKW7tfsXPyGF&vTyEEx6l&42$Mfeou^oGMPl~J9)O&^9Zr;DfJgqQ= zKHUMewCcTc?~q0@YpANUb(dqE#OOny(r~L^`O=wJgAe~uBgrI>C|~HOf60y-QROhO zHtj>6eLB0+u&ZPQ_NM+{iyIWPTj38!Xlcjfic*&;;9j zIMkQUvBXIp6|XNUz7kq5?e=?jxANE%D7V&Ym;{Nvw5}KhavgRQMg&jtSSGPmv1A35 z&XY;(rmE|3bV~*`b+!MG*3LSt%5GcxfJz8RNvAZ@-66dI2?41^E>gO?m5`E5VlF^3aEcV-pMzUR!90_5CPDMN1hL7~Xn4Sb( zeD4Lv%wE@BywS_b|JwaUEK}IAN|7a7(YNOdy4-h;G|Ph)p>Fyj9oCCUhEgTK{2#2{E!m)6%o!)vCw z34sUwLap!m@ID66j;IIKKm~)E(R#WOdI1|%Z-G&l6PjyNM|`!YQSVykEm&CAbHg6| z4rs&eo&ZhAQR6i{)x~`g+5DpytyQ9c$^{Hd*fzWed z4ixicyZ;p#=B@2l6;3`rbA4(`e0oO4fmT`Sbs%K@aVOab0MGy~Cm_W9H)9Nlb;wb- z9^75zpnpFvrf&R;Bw6Wa*+8mn+G~{A>8{`FV0}ey&YsGphNY99{c3V#Wx_hr0*+m) z9wU7d@GsHKnhSr}Je`rHiY&*O)Nr|MgyFDGS?508xep*Owe4R6v$Boj=RmVzT1H$+L6 zY_*!9DZ>|@Znnl9FtBrN*%4qm0o2qrsP3^}n-;cK(ky&fu0FE#y*6B6Ctj$C2UU_< zK3HA)POQ3F^_Y{yT5Hitl~r7j@RQoE@rTBqC(rpRF+rMVDxn|uh$0V{Jz9f}_8Cv* zgbE5W+-Fx4*GB?-^Jsz28elA`@5}RFN)hYCU(=-VOF!}1)~yXX)X?d472rZB$Y^j3 z>F!z7%k_6!j2sJ*BC4GjS=_~BBUlLK@$n>g;bT2nAWW8U?>R6XezcVyuDM9%n`E?_ zW?pUn7^}}Ib7{R$s$^VvK(r6tIu1aCh;Fgxc`K&vQ93XHZA9iflG0zz(r4}qQyL@S zDcXlgyJG^9UkhQ*I3Fz5c&~b7c>6VGx=rv9$PefF3NC*IM{qy?5_SQAZIStUw ziFUeYF1?R?r3db!WJf5^J&FVqs5A-Zu2$*BLQq9aEA+rh>2*MFt0K2*-IM0CwGo#O z02OpuzIf>lDgnrUv9a9i#t<;eOY`2HWL>F)h^-Mpm+^ZmQW3c3NfC3OlS9kw!;dGB zs~4hFWFJyLZ28|(YA45V)A#JUkVq)il4K?vt}ENEQ?Jj!K(F$Y#?M>k4^PxLuWgF+ zKGben_cWzN9l7zgTv|&LcF2xjb^p~o@u|i>!q9fazW#)EUx&;c{^>={k?bw> z7r`phF~xrAX_2loe3HDKdUS28sdylMTvAS}Of*9$a!Ih3{N}dD%2fhB+9%%p`0M*0 zJ9b@k35Tipl*TtCAz&NWwXVASAm(L@NYK%HhzYc}2-^>^f-}F9EdRjcp>9bTgxnlH znKZKaXzLp`|4>71Iq#W~)>(XO(`=8PHFiAp%gs-@moI*l%9ed#K6AM|Rrt;_%jbPH zR-y)!da; zyrV|MimL#NT)}mnMXMCE7~!@X$DZgv5fPnAVD^V-dv%>&Ie8$yU&J*mPvm--PP^Bz z)ahV!gS)HGS#%yrNaDbM+&yy>v?-*a%9btRCT+(Z02~f1D}YFJyajn8b5Mz?de>Rk zxBmpLR(pMpqVr60VjSULT}gOUn4yemLYRq85cpKLunmSBX@}zIFNp#jTz2=b4|V;mB@=dW>wgpZl6Kxs%Tqvcnr zJ1$m(*_Q`hW+2&ACy0Ni6@HG$?ua&9j9eN5d699)*zug_$HAHRtjb~sk7_qKE!VD2 zDGmJi4ntNwUJ~$9P7_;EZIsf=5WOhlDqW5*e1aR>VjI}`ViMi0sy_1^{Cmd8{$?e* z^YE{AYf0c8zZ)O?Hjc=1%%;10%>dw2GL*V!@X6MDx{O}-4P@>Z(`!Fv8uF0Z)2j@G z>?ahy2~8>FG4q8{fA2Q6Iv*F}eME_u#{Tdd(hty|Da;o;dG3eBug(OpbDamtzJGYW zQ|e0*UuO^NN(vncu}@?$j@ymY<6LLR@9cX(R<<~5nbWXH-OJ^)mt?Vi&Skvcl=ZPP zd`JK!0oL&5@3DNrx9FcD0XLlmpFjhD0t^zA8f9b9xl;+B;=$uJII=EC)AqcpdgYf2 z8MB7s=kz$@=W8+Yx-9rrE^+o0hHi8|nw`CzgokGo(&8z3WLwH=MDC>a<3d}_NLP}v zdcYe?7Z&Ij$bARX^D&X|YPGQa7wF~dX*=0NQzCmx)9lj-^BkES?7{!n;s6NTD*yH6 zz}a8mVDGcr(O@SiJ-+^Tw>Q9&GP%lZ1c;@*&xk~Qn!TfCyYoKdzr6kYuP!wHsqGoC zY8!KG{Q9nr*tK7SJMHo+BKM;}96(R$^?nDKIEkr&28C_yg^H55t*1_q+Yc4;D|dHi|4_}$zz=COyTfjWstMY2@0Fdn4Xr^DdO z<>_KQ_86S0y3FEfMZWEk{b3DB*hIr2ctI@s!i{&R0HW?yKDHao9Di2vbK%V=;aOz0 zb-gKELD!}kF;$S!D7Jm$*YSpkmo}AFb>-36p5?&*?_8hhs{0IZTbRvBTE4=nfF%oN zYaH{;=s_o^Ri+NPeqr3i30TX;k-6ab11p1kXAmrBesR*TSKv7Zut`oU$@`UM<+ATh zY0;Imuef4^-kx>L9D4D-uxkuvxvPAdR(&^UU_9Y<*UwH<*ZivVjf=nxXGS&md^%#S zTUIBzpgdS!S6DO~fai z+5MWbP*8z_r2aR{ryf?O<92;ZDs2FLsJtWh%Y0kO<&8!Z#1AgoZ%|5wy7nK$hIoz&&*VveDxtQ6;_3tzO+66MD*xg_PzF zLM#wb+C)jQ)oWvUaM{PV0w$D-b(NJ2UHAPjg?Ruxgx+|ZhENi%#L@a0ETf+v4_ny) zxDfi(xD6q~7PMEk&P=m%E=ypkdSrVcV;viZ=PVUWya=714V|sJSRfEE53thIWE|19 zs!R&<;4h0KUli0gyHZ=&NlWVw$Ppzl~D=Ogq99#A7)9=x}B#4 z_cAHfjW%d#ra3oVnk-mRu&edV==?vt_F|bXu1;6CIF(w~H0dm7)>JBUGUBDXJq82Co+L4s=%CXiS);2FHYm#VEI zd>_?5h@m8LelLv>64d8YI~&~Yg&UVztXNMAih3gni~f6-3=!x_Xj7VIAMFct$JKiA zbXWm_gaKFA^0=TLqFA615ISJK+=4jMC(V<;AnQ(atkHnXZLN?l=7ExQV>~K*?j$?9 z58jI$CIXRyw<@g)GPq+UxNOMQif0}9_pSVOJ^4dB&10=+=3Z&3ORfQG#hA&~Yl5wU zgk`pDfX+=A)vW(yFuD{w-q)RvnQE2WHLk8V{_^t+i@4f*`Us9o(@A5OP1jE`#jaoW z91ic>)Bx!FN&he2FI z4lVVK=fLomj!x<|do(4l4C$LJjup|yCy*c;hTKoH+|CZ@Z*l}=!=4YGtKPO^ply1l4|Xs@^CLwd3Q=sAE}yVR3|%d#fyR4{LTH&IL1B5 z1?OmG2?%v4fCxagSkEfdFiJb~$y$`AdF_I37Zh4PH8iMIf6dQC^(rH~^z*f2^YdCh zv?xBLRMQ`uU7%4l?7HSIS5}{eat#~zrkf3wszq3G>$=ujtXHu=(>qgPqUR6@A~^qGYu-mV~p0IgViWAuZ!B0xYM2jl=XKwBSq#*!0l_fyV|VQ3W?z2_$!|^o21is{ zqy#YW-9>5fv1ey<*j5In;4Ea%mi4{SnRjv^RV#Tc#chg5Tayq7;3?XVdxL!15i6SO z^&SQHEvfVu`=O__UiaG96NKVz*X%t$W}cngIElyh>(0zaW1F4#4Z0LWjvx0BC`}CMmEvsl@7{eM z&*T;`vlNKJIWy|8Lf^rQ@YKg8aU~_#J?r1|Z6~ znitLDcUJZHe7XqRFTOpeLhT#Y;%dV8~6 zR=)X36ux1RQ`A8|X&|BK&=BD`{mAgMXjMp$ixUk7-$u+--4(7l0A@A^X54JxZB4xS zwD8sde;0w96uB^tlJ>sP)7;K5&qZZNi z^cxR(kw6cR9F#K zvxur~{{mM5CLD_oE*lReB!U8y?g>A@ z5bWMldRggFkii^AHzyy>ee!|~dZKc#(>hE_j^r}~T8|Mde4!bDPDn*!nnumFSZiRc zj&JEANM}9l#W7Bh0x#2Vh#|2d+`W%^r`>cN{AML`!L0+ucE{#p7eWkRWW%tn`qnr9Z|YiD2;i=8zp@H4|csR!zHaL3*84nnHG7C8n2w^4uA7kJRy z-twEhyHwk#+^;B$Mg7R5b{ak>x>$8)*jy)FyV;?--@P#TterWE3GzI7$>U}>cYd`n zCCyUaT!>IK&3P*}y1+?bXps$@3t}~)qXg6zM{ew76}ak#W+k1-opAspJU!K-kh5X@dP)rh`szjz7N?^4)WRNE@z*)e2m{tgG>^Iqu)4N@<|Yl@yqFt9orFA$@;#&%{U%XOvjv5zlE@65ss z-V-w3YRilU>JDO1zwRzzA|XB_a`?1sI)PuHEVRk4~0Khr8wgGw6P~^j>bLR1f>`4b(2qT0QS2KG7z)RQxmf=@^DI$95@gKmJQq zbDJJ@zOb(&WIuPB*g+eAKt&-ifJm@LrQGcP5uyRi!XgN@_r?W9p{MBm4` zXk05Mp0ag0FS7&7A;Z$|@A4;U^>BJNBP)}R@!O|j`|cd?8nHNvZ^Ki?)HjYO<_7Mq z!a~535T;QA3grv;EwwtV`%dS<<9HRG1$483YGJN0Ev3Y|OgP~ordDgLJ(s7uJrUcj z9Q~&F?MXhI6#SAC5-EI6r6Em6j5+%v7|*snPG(rwZ1wk%D(Mq7r02Fz!w*av7#hmL zEALF%UezYJ2@k~$YyP112DI(CEz65C8*S0 zAtBc?v0q`N3idZHL_n^Onf3Y)89P1qf>B&DDy(;&+Z|$BGjF+fj0}6D80N7L)=Evx7}~D9)wAc!pbeD ze^**GPi6F_{$f)!FoLhzEE(ooxbJ@g{E5$F_VHO8N^s}ua*+P5oDb|x_ct;aK_yZU z#o(y>AGO`k@`{JD=9iDw#&dd(bGka61tlg_18ohhTl6V~P`l0A^@-fFA!$i1Gr zc;~fGG}qr%+J`yt$Myp_@S9b^jL^HMscopmBOwH+&Y01R(%;<-6)wz4exE;K%gM)6 zsx|qUyD7Bsj4wcR6vGVR`USj}d?7S=S2L`8gS{pbA6n%+Sxlb?a|y)VV(U9X!VWeJ z8po#?HQRT52z(ZgJLmT3^6c3kFW_BAf0O|&cDnaXFo?3f5{kmFfUsk+W?Phua!ydw z|2Q%%<8(vRRrd>NqvASRTh75hXW>{XILxXOEZ9F}SW-(p@I=h7zt^s#{nhV7Cz!do z?9F}ro7<~NWg^1QDHSnWITik;vMe&2B72@pcOdPNSwAUgy1-$EAt7zie2@t~;S_Z3 z0p$XICC!@2rNNGQUb@Ys*KWm}lab1qnJLdyDYvK3sgxXwbEQf`c$xA}Zu5##B9@Xe zh#lA5Zuoi|f`d&LhlfiT;R$W0NF`-EWfMt~Ipp&MsVZ^iO~`3!wM9sn(XDEMe{LZa z%*p2+kx1Je_ceDNZi8$-!yzt5Kw}S3Qg6|>Hq^VFb65n*eYNUUwS2dn*z*QBCwu_r~dGB@Il5~0bG#ReLpMNy)}+xB~_ z^u7F>C8PJXPO(>07gjzV!UL!tCnfq_G4WycTmV{4pyMC3>uP4@s@Q(AE}av5haXR| zq0z!;#)-C8R7QpDMx?jij!OIu+Wk(>=HX+@$}#^z6xOYGeIrQNGCy6 zUqk7^n=_WlRi~D$9h&lbe)&1pbL)197G%cP2z$@>mV@{=z&SSt3tK+-qj+0l|DeV| zT~t6kTz!IZ@um{5)@~(FJARnaa5smw${mE)JSTa!cWG_X?#8OGjl5#!d;aAXWuG4* zuH2xO22%vy73zk;?0hk4>y{*}(`?KxwlJ-+i*)OH|HRqWboG~qUjn(VTwinEWNUR& zo))H+RpikePBi0u;&ZNiPdPX~O?{d6dG67_=}t?$#TE9(8Kk-j8^^j%Hd)aJ?{1~J zxoDA3@CZ%sHWu4Ncgjgo_e3%x7A+uFSNgT%=*A;Wq?S0mxvIaMQUr&Rxzh5`M^wI| zqLFR)_aPnhEZcB7$>XQI+4$JRzz9FgHQ~~$=Tq;Xd2yg#-?NZF*UzB;CNvNAyMbO3 zJN0Gv80sNl+Ix@l0wGYaA5xVGW{GcRLqliknrzi+xWq{T9ckL z$EYd5aA|qD-ym(Fykiqx5BlG8PvxCK`EYp0qjf{x(<52s)v3wza)%l0pH4}iuw{j# zFDHKRRsHZwiF3Oxu}4csVl&~epb(>%+0f$;kvMyVH^(s-7XSAuJz0( zX`-JBO>?b-T9?is2gp0=%-vmR1lG-#+YHQ52c-2Y zjdNNEw;~2do&m;ptj;tWVDyKwyW=#9-B__Ip>B}`rIA1bzgm26l6wg<&VF9UmFIy%IY zI|SI?1QXUj_qW&f{p?w2H`x72930@6HXF0MAPh;{(~vtU>TVJApRcE!aHCN#A3Kig zMA87P0jhmH1%>F}^c)urMC^9qwu%yk#k`$!0=J3+krRlO&{aFH-7br%=-rYV&Wz`6 z7e2X*O{*C~_q(t-=%!M{K6Y6r3dR5Bc0=Brj;K@ zwkR|zH#_6F%Ol1K!wR{A=ilEVzG@0rH$9Y%>={5aIQ~R4sHGzD)|$Vnb>2!Oboxi3 z2rpBfi%Km!=_RKlJ+#33I=#g3B;4rrI(7_80C6)@+Vfw`LnM?KM?!l})~fIQoLz~w zvrT`JY)vA;^QsR-ltwk*Suzapq-P3l4m%|49gl&V64wa7-Vff!fw3B3I~UhJpszKp7t{EyA{zn(t2 zlYYG@8`m#S9V$9l$4iQr=*uQ#+4MA^G}5?kLj_BnVKF1Ms@ zMi&%DUMaWv3f9oL8(%z~J=dgh1`O-ymldYAtroC>2dDa~9iNpxf&E4=R{q;2KQpC} z-o(kjyk>t?I|sYp*fJR|waTsgLr`^)x%oG^LPPif=a6qQ5*u=kEWNecR;r(+BsnDg zgL=PakMuP56ZIdxHc=(7xBR}Ak@ku*1|G;LN?&f@@^4sD*}V>qNj_-Cyz;#S@|vJO z?PY(cqRE{9V^g5;WIMj%1ien5(B3>Lej<(!!|5%W(9lq@Vt3Sx8ewz=jFb8AIM9OR zfo8nJnlyl!qGSN>CHr5#C6}MMtp2uacx3SdME-gP>)ood-c&Je8&~cRE>e>3RR4nc z@@#lK%)#C&bL?>@lVDSVwVX$Y~uX6 zwS!DbmRYt_k=KpWD^V}Dtyc9O155-1pYjeN0r@cjHG!4cTvz>2R19#EYNP6#QDg~i z-`Q$!ofz{DuP-{@UL`XM&h0eKI28L#kII5RZ;Z6YBhDHV{+{Q`h*ZBc$PNpd>M0~s z-3Cdr&;lhm4_IS<7$6RYU?G%=mg1ZIu`EGCHx;+|s}yE7Fl@5XEmu)2bh(h|=)qc+ z>STXJS|P9%&zl(GNnTPEv6SPo$LNKeE6&C zX(=ij@}nP-#fP4O67mTFy@3+9aUZ@@L9PW;;D!edkQx2!&c{Dc&;f}!P)ZthCPMT` zS*Hs=6#h&sq6G-y2kio0@5I+Zcei86)KY&KkQ}D2lP}&{`RG&EKoj}1(!QHZTnHFY zyl%BP=U%xQe_NvBjLPm3@T#g-mzz58A+r;02*lX_Cks7c}o#?rpBy(l;3`k4G) zJAjx|#o?6beTwUL!{*eWs6~eJ7|1d=62Nqw#yHjdoY-OYB99l=9kJ^-sMX^hM5lC3 zYdE4$rqr&PHunL005%o_&XKH=#1L4e%DG|l;HfL1&&OPE^9@?(jwUznSg;i+YiIm2 zP@Ex|4r~Dz&*=`GG{jSPi{F)Z{AsCoQtvw0bE6J*TI2XWloeWW?iIqa$InIn5s!5K zm>oJa>h|^-$_$f=Yqg(Sh17LGh>hvGgiBz_L zvv4|OTx`-AajI8~AQxR~m6ep%`cpG<&;q$N=52muIHf~xdC^ubt;Q375YlA|_gt9Jz;Wg)`> ze~PSkM^v#!$`P7xY-}743m(n>`+jo3Msqdy?`h~$G<+@8^Izrj6r6Ic%BR)O zep&o36&)lEamWju>9^4EkqOKX;LqcG>(ejkarQ+1r}U&@cgqboTv|zi}ciWi_Y=7M)4T_u0SRbvb*n z=abZu?T!_$*Q^L6Q6(z6JaMivht1~q0Ul@CCu`Y$IT z#^!k|zFuWzrO!PydHZB(tPWc3bbX*vW!M$1iq65o;p5Z3zxtT%abIZ2dAwiDnJRyG zcegSLhmno8Ep0c}$Zjop#qfU`M4wE+lJeO-SYn=afZ7gQbvRO$2ZJCA_D z$&-J?Sq(dbH&CehhlX~o*I(l{-u3#v)7w(q=9bdY)h#VAH(pIA3x8dFHJVuaQ003b zQmY-7mX;>ybtL$@TWVSH@N8veJux{w{lkVn7+0csEO*G1jPU88ym}Y}G9Oz4INcm+ z>*(lEfDe$VsHouKpHi=TFXglF!$wRF=Ff7knu8s(ubTTahbi@BnP69DWMMIvoYy^C zz6E2S8oP!iClkTOIFf`Xl*k+~(T5Y$ZNlx!)K2@a9b$4b-eMSfwSL>+Z!mYt^A-r7 zAaQbO$xBiaV^y@i>*c!oX=~sWR0h_ZnVmKHPlhHqRVyp{Py0%z7x%lGJ)Z`<{kXRLnkn>Fj$RMeyNr21{->zoOC_Zk2XyCt>Mmu#@Ry9SJohMm%bBlgv($IduWj$55E_|c?9dF9tp zPazVryC7s@Xecalz4E=^XcuX@-qOVjzu)yC5_=GaHAmG01l_n+?wy`Qv(itwe@(6f z?!|R<^(QE=&d;eKjRgHe>kAtPbL9+t8n33TZu?&lHMe?^{^DHo5$ZMoCp`)SecPN< zH9dN?mRmMsYM)!)2CvN^1Uw+vbG?iS8<&$Ewh)y@p5>eQlg5~mvWVbGukE}=B#ofp zb!0S0nz3IjIr(J95Jf{hi_^811o>)>5k)jn<0?(bf@8p6<$xDu1bHCt73v!$;*Ybf zcnOW)c<25hdBTx|sZh}U?jD$d`^$@w;M#LZL~o92^1(RM4mU%fT_9pax~< zBg8iBz2q74hiwe`^_G|gOQl-s-e$$!cT~RNlioSG7Y(NA1Oh@0b_qr0v$K57dI!~{ zn$6nCbv{r$TK`wECoiC~z4x>Aqee;txh= zU6-AWDE1fYQ5xqG2Y)2n>BV3qcZ3gX%h-4naE#48dRjQf*0xH5G>hH>asn%7<}b*M zp<_-oUV7OHZ$ARSTAqgrUP%x&_>FjOgRE?jn7+ndf%&2!U4R&o5~uGPKlc7Xu=6zJ zoqHZTQZkQ$fdM;%OLM;YMAq!L_2^s)aF5h0)6p35@2 zZ;Nh9({0@)b(6KZ)_t$(mOxB*^%f$*D|f<~Jp%YZ>$X?7;>0B#&Q~Vo8h?c5d2eP$ zb=s#fu$RK$j%>662^{#rs)4H?vj#JW-_SP4VcCBV2$d)L$+C&WOw|$d&__nnwG&yD z#{tr$f4lw)FLVj6-~MnH6KynA@HY7Nc3>O9XYR9t6bTCtIz?|uCbp!O@z`lG-pnn$ z&_sp#uSQ;8Gjx)83DgGc30Q!Q$rp(%|EAHUUg4G7drKe?n^r6b#b@r**sdSqgMN_% zl*;42;8`lipWKs#2KA$R%6YZVxDx|x;V98_0wfMM_*R9QHUNI;WJL|fB8Ge_&duST z_LPIqB3)H74!=ZVmnB2ABnQD@un||P{MFu6zMLpmM>U?8z!=Ue34oR8ZF;}wFO!dD zo40dFHxz4N~KxZ88dkcr^V?rML>XPb|me#V+ zucv){elaUc$astCS8}qVe-~$l4kJoiF z@_kPuPsJlt_l94I&>*Mfno>d8jjMc3w832f|ShMqnpv8u!ocECq5ecTtF;Z;Yq?4(EMg({Pn- z5AO!GnUa-mQ|7a$NwQ(c3pJ}w#^5=CzSegWk2PEP=b4*79@o$9DZi8#zckN?d2Zh^ z{+-IjVPlSM<-1!-#OO%Q;2RC}bv@9fedo}Xrt@NJ8|aa0VxL)UnCCxa$y99y08K_*?DzZrOL)wDcQ&DJ}ELqle&$P4h4tq=IxN_UDKPB9zc;T=)}Q2 z{m?C0#jz5_uZQMq)+K!6BF5c!NBD>ztkgx|m8GeU3Pqo91IA3&nH(UeMN{i02rm@u z(t*Dk;zt3Oc^7kFZ#I4UNHm=w`A!$ce)1ko2P|?q$2JU zY$7-HjX<;+clhw^wC|0{~g>*JTh3!v-bSIhle zJB6d>4jl`@mMBkqJL9$VQ!d{ZuW#RYr*>c7vZg_f#N=TUZW=Xf$40y7O2XLNy}5Sm z;v;?68A?%?cZzRfvSmLJ;PTVEBGKg+%5gT8(WRRIyw!=XU0|@FC3+Exs{_FnD_OcXeRebblVIk;LwuXbLiiflgtFF5wxhqiV~Tw|4)L_cF>%^KJJ9vUCFX~{ z7Dv;+A8smH=5X}UAnUMSfe#oP4ENG4?2u*vk3xzYaN*5iVxJ(p{sQw_Q6_z4mg;6= za&@)n0ibQ43qaEA^&2tkJd!i2$*DUGI&*Gv+2jPVPbQ2^pPOK|AGL)}T!o zO3114iw71!rvw18N;&3)W&;E4;m}IC`bim#uc$`S-rl~^EUiYYh-Z;sQsyQ%|AN7M5z>iD){nVlL@e4^%~I2Gs`PZ<%~Mg`h59niJE)+>2jCIqp#8# zqkeY+dbjLMLFG~{D%l6n^S@5R&5P)P9v?@=#KbH|Gz}>%z>PXypIyTE|x~I}DkCxnz zCZ91Fen0JH7*0v-z7fcxn_3ClQHrjA(AReka!r*YZ$=S;I~?{FV8c2A7#_r~*hHu6fe zJ+M?-nd@+f4Ruk)HGhJZm(vU7RG9!50)0*kXX`*y#{+v&?)ICG)S;?~Zc)--&R?{` z-ojv5Hxg;*0q87N@)s6HcMOPpbbaJ96&R-2*4trgR=qNO5JPHlax8r}oZBBgi+bca zH%+hmihBm$_QnZ(<5!jZax2KYu3@pS0n6NtgyRa@@-bU`v{R{@Loj$j8u{H)TS!{W z? zi)*0ImF%w#zhK93Vu+)QEH}p6F_0R~t(aIu4!HY$e0)51QnTZMdjv4v1q0^2zW>nq z2XBXGP3TbjmD4*7jXCzPoA!st%hM$txZ1F|uyCMrp{Sqib-hljXq3#;({mF7 z>C4B z=ltR#PqW$plZtRK8eng_wqkhEjc_|tZ+`pgm;A52Zres(EuxkZC8=<*|HSyV024;@ z>&xr0+XgIbuIL$DY1cEBy{YYcS`%DR@0+V(l(-OF2_I*dv=1->ba(F?vk%A~1m3Pg z&YQKot`AIYc5+|=pIBcE?=OLDAFpTQeQw*<9YxMy%sM=Jp}4Otzp*iqqw6W|*U ztTOEs{&!!()5}3U*9vE_HG`kr_WB$uv0^)j?f=IRus8q!u?2^n4gXV|9B&U?oI+t* zz`4Q)*tEdy&Hw6v29u{4VN4yB`r~gz*x;eY{2v}#$M#QdbvsV0{fbI$*Rs~(O%@?c z*W;SZ%*=c+kMae!Gr<~M#bvi5XzW-_+kB!bB_*}`(2YvnA2q#t_-6(3Uy>M17{QhB zz(-?;%^*st|4uFYUmi*jVQiND%AfvgQQ5o#S75prWh9ME&Lfx_OUpZP#SP3Ou;t`V_)OcD*_A`EPGQE1H`3jY5$9DcCC|@^PAIYzX;BmCHW=>5U zybi{7|J~5QJ3T!eIl%7P*wF^-b2_`@J}d5%ARauHhL~^4AHjIPS;svZ{fWGP$=CmC zz+wsgd~sFxbKLvWNfp@Z*H2R=5&I?qlFJ2`vcQ$yzbRuQ`lMy$%eX;{8S*{FNUSNm ze}!2|CAN=PN6A|glYFB{%vQrt!rtK4_{Vw9pD;O7p7%xl$KYsBfq> zFKdX+>=|dSbq9MuBB znAF_VnLm{%(oi?mWn|DIh&as7w+8G&RA7=?@{;?nR%85fS_QSK!--|N@#>gwOZlk> z?Tw1{Y|g(a<{O&knml>cGBtNKcJf=f52tTPLyv_Q-865+g}g;2+qQ(FuVuW+%T|Hz zrv}C<#j=JZ1-!O-qk=vITF`#aM$6wtytWCy4O?ZPr{kLGNrUAoX+;e~JQ^Aijf+{d zg_cvymo`LT>r&O!7~P4X4EaXqTg617At&rUKaGks_6+rVdxlb2n=QjJ&HFZ`vIt=0#2`Jj=N7>t}zR+!v*x*+@`s!RH`%qiB zTA{aW!02kK6o)>;TNo9o30fb~ygy~tJg4C+w8T3ex_MeYm!09|vpR5Uv~0>%X!KQ+ zXvQZXD1<{QnVnTPZq&E3D(j%g`5lp|f7MfXf*T<%q>~@HW)iJuej|#e(&-SEA!UZsE9ivQahBAH|%04UEZ9l#QB@3 z+BodxIEO4%5?p}UXz5;B);jI>rqOaVvyhiff(`JaL?kKq0}K&KI<&N96=oKhZ3~C) z!FrxRm{j0qU@0=5NkygEQ&_qO_$CC*$Ok0PJI5uQPVS68@+9yxu2CW%0}KnT?)Su* zZ4_%o;SyD4pN)&tpfyxLLzBEYvWM@MTqpL4qj#}%tV&Bi^_dE*j?{UOu~nyD-KIN_ z!!=_BJIz2cpw?DS!aSPHkmR=9!d7qC0IVWOhKLv7SJRK)K%RMIQZeV2F>1yD6{QuS zBk@a1xZQs>c+hHIkMz1PLnb_`(>pm)T3&gH$;NLt;y|$`U7?|o?pE6Ig z1by{X(**eOEyhy|(11EK)5|6-2v3C+jNH)Gq*DY_o04Xe--VM9{`CB7QzvFamc+cM z3nvPHe^X3{zfI7;d)Oektb7RO2C@^vzH)laD8-kMid?xOYle z341Zt;-X`>Z;+$}0W=bV%eR0IA#R`OT%hEPkdf!C-{6nR|^H#2IFwyaWf}vVo+$$LS9x14zpbqpZ<3GwRP; zHF5{^-|E?+dUXC-`YTwb`B?{kkKyF{yX;OHS31oiw`ne4Oaw!YI~mS?S(c zubQ+gQjKWs@t025+LgO5UwPjk;>-AZOir4A>2Ef*>V)rX^Bz_|_o9SyvVKVF-`Sun@m&{%5WdqBhG^=(zA?ws zOjR^7ub;f3k8fUeqGt42gVHUC-lDviLpuzN@45`5k(=594o0hh-+K zNejZu-z}>D^lBmyNIs@*5?`NGcv1O$FL^DZD_Dqgk){VE5A)0IJ0}B-IK8~2NHumU zSo2x?{}}w}#x_14j+;3Z2ChV9qitrD-4VX2IzR_1D=>-!A7x;JFUzNk?$R}dj72@H z{O#Xk1xTn5d+-RTp)O@v+%4t4i>m~ne9V?0Z=od(#nCB@L< za5;0ooN_ks__G}-Z`D*X;d%ICt%H&6;HTMGdA6Q^t^K_ z>O1LJek?ty^Sf|QJA(8}bjD$AsVxWwqY>}FrTZU>_~EjWu_IpjMq!5)x_MeQRNv8igom=J6~iRDg`~bR!m?m8uSTM ztO99|YwhH2(@su;q#k#B$%~ytJjX#Jz}2Z$%Mu0iOO3(5zpT_#?5KM4GX#T-O4j-+g>npK;%R1{-%1rV zq1}QVD{g)n0QgPiH76;8uB@Cq*CCpt^Lyskij(hpOd22t?7CTiosgksv`Lw^$~?F2 zAJXePpX;>|mNH)3Khq7`R=a?NOX>FshZii;{G|m$eMifYDT$@g#FQ;dVIc0mCk9T(OMLg8WH8T0EV z8BHn0XK6~;*H(($&>Qm){c#=Fazto=fn@5~t9QmB(btzL&7hGr-NU~-pPiO{860_9 zCQ`Z5sk(AQ- z$`|0ELDEq%#+nyHG^yhc5*AuT(?-U=?&1f@+)vCLfwaedUlCMjJ!3yAQkv>#{F2jE8>!%}y2S#E zWJpURJi=U*+%5b8&#{!Eu3=t0d>`cwSYInDInXe((gg?z5& zLoCuzAOv_%MtV*&k5;TM$c12pQ_KOM&txzd1&hhH8nD`u5@Y#9R;EvSjQWf4mCeb~ zo*M$Lf{8;#Eyyk!?Iw);MT7?V$6WCX`hm9ATC5{vpib=$dq>HF!ot`jSW$#qY5DPL zkzE)Jg6qtvUp-v5_WnpLuw=H3Q5|sREfh^FU9y@e|0~T`IL0*JsCn@8U{Pq9W+$T> z%~r9TrdBbVMH|My3zjA`=v$gM>SBE?@Q*4Gy_vsx<~s?4JgVA0R<9>*HtGX;+Rc~6 zfa}>+i*4@JwIynW)tIVj{3@p8oj)p^w%V6?rPR{+Wd}G`3ohJu%N67xbBqdVOOVvE zvVkQVrMT$^i$qUg#9^a)>EEHsA#cT^r55kv(F|iD9HTDgZwf?;Wlv4Vj)jQ=+ zs|<5e%W}jN1_j6@(a-?^9iu{euG@=bY0(NvsBc47SS-mw!S!z0H6B~FsAp#?_a{=%&hWiOEIv{%o`J#iMriPEGK4Kg%&Z{nzZY@ zD0Ysncc_`NHvlno3!K?*>+S{1cpB5F>3Oe{Ok9rlUko6HUW{B1>bQ|CAL-f~^46qZ6O zl>|Osl>aB8GO;O5b24XAh)@Q_5FbRFmY^!A)jL%}L!FOLypr6~(qa!iEI;1s1W)(I z&5pC!cD;A?^3h=(@|(2=5?iT?m|MwuABf5==zA&AS)|F{#j`y`mq-Qi@-ZHVLx(WZ z)!U0o#bxfJi4G>#>Jj@ZGLs_OkI7^7)(Ej5E`$bT^=pyEwcjp@q6}%5LvZl@Ojt(HO`ru@ z!Hw;8&zx9;&EwNa{kR~OVsQs23xuoYz(tg(^)XO|O`)tzP;JbgStkT* z_WI?^glcMQmX=>!Q9~flELq#7MrU80-oRl@`lfG7o56W?#qJz69A;!TO){pqDOR0Z z7voq1@*ktNq_suHM8^u-2>v94vclb+2OeWKC9l5eJ`zDWC|N6WQUIzNS~Zc#vy^x# zwqI;*C)Kg$Aee6~|&Z(K_A>TAfrigb2Ino;bci zL=1%|P|rpv$#0bqFg|-hlgF}4_terdD&X%-Dn*pe{QX~OA#77tccc+k?k{ldCM>0C#IeOjq7vI1nax`tKL~K8O_PnrykdsI3ng0J#UlWg3I$Cj1N6KsCgdbF{wm}v6EES&=UyMJ^|>1 zm&d=r`~NCc``Z0V*uAgy^yKe{3Yt7#z2+870%`>>5e-zn?+g2Ym{2$}xFR9AJ|J!<4 zn6E^h!BL7&9AnLj2o-nWV9B=taW~Eai)?oy%eBf*G7_pyT~}IJSqKCs`C_7=uLQZ zyrjW*Qb_@@9WU@Rn3-}Gc>`;48s~7L(*k};k_#cByz0Q?L>YP5KNH#oP~ya2Bv-cY ztq&({yNq7py?=SKq+R~e7338#; z%e*w*GPMy7H!OrvEVp?_GBo*CEwfdx-bExeKWBu1E795PtR1wA2Q=+Datgj<%mu$I zz=#RxRtEj;>p-$8oxQbzM@6@_zHLM-uHoQ`J|Kwgx3PGA1R!!=a@dN;KV~dSiI`WM z?&=XvRF%-P*C#D$uq$poi@Q7}Rh%wDk$yDBukB*pz-Z;2u(vW%H~#2G4=_R>@Nkci zSeVe{?a)MXQbqJ>Qu*-GZ!k5K4udnSzviYnx19$k%Jk>{J36XF$AdjO4;xmRt81D; z?m)Q&%>)M%sw;1k22l?M--kB@>O%Jvd7V|*sC3=yT9D625q~i$d*tMbri&eUx_mmG zeI(U3EQ+4F)AjvD#`a-;$oTA6J2x>~XQ9KA8HFRnoVg3k0?Rv8_<*1;Iofji@Km8N zL494&eiua^r#T;o)M~!&NT%_TbO6;^;JFICqX1`UvRH$vnwq_ig`;;+xowKIw5jW3 zF2FkG$Yf2X3~OcsAt)0Q<)@u&N4?Q!qQUASVBcBYKRMxOL|K1d@M`l&{2;D<1v`x4 z_|ejwWYiJJMg~5;CnVttMf)^jAHO(l=YdWNs$0-QE;_}bhr`3$f|1FYCDyZ?#phG^*Gm$ll;C0P1QhSqYO)n z*TuyONr=4kt?O>lUZSW>64Ojt5wa9rNkPcv+*61QGqu$p=<}X=?|zYLOj>NN+X+kn zHpn(g{ZM6Zp5d}};&tO@G;et+qA?h$@=r!RXFGgbH;znxr5?JpIOXEO0;0u$X{A5C zPUC978Ls>FN+cKcH9dY5X6pLB=)TN*b7`*)1XvyTB;N1(B);y-^ZCoBOgC`ybuDm~ zE6>c9lqNkdG}JuLUeQ<@VGjwsWbQ0YArQ_ApsOf^frkL$Q8;El&_`wWPPG+jRc>S(FKS>o<`VOU zZg&s^2|8ZdBT~a`jVyG@B}eXb#@FjbU~flD;q+>5Ge!0M3L+MTnFK<@%6CaS<(Gpa zc5Fl%FVlzwT*FIt-&1~P!MNyNu_9RzX!Say5hJ~1hiDJ7|C?D~SOsgES1w?7zbvCR zlRpo!`3Y~h~Cxj_5z3tMlKE4CMmo6*%=idaFDY?_$|ruPND4JnU>G7!1ghX z6dx)WKOfH!UWYpgA@m8w2__9!xg5DD1PbE zDk^UwgHf=;WhEfY^l)zJ_uMmAUGar!zO9wF|LmmjDp+0czj|w!o6@Xyb)xkB`}=46 zt4mV}pT1$ysy21^p`;MptuOm0u3x?W_nfd{XFd1Qyu1&+eSJ|dkJk}IF6>gGJK-qf zuo?m@qaB~hm;TRwEHZvf`_n3{7g%ZYKgUf!Lg=4I$`30%{l7B%pW9vuuQ&u*QPi1V UcizH^-Qi@U6hBuPyPI - Version | A metapackage that installs all available Seequent Evo SDKs, including Jupyter notebook examples. | | evo-sdk-common ([discovery](evo-python-sdk/evo-sdk-common/discovery) and [workspaces](evo-python-sdk/evo-sdk-common/workspaces)) | PyPI - Version | A shared library that provides common functionality for integrating with Seequent's client SDKs. | | [evo-files](evo-python-sdk/evo-files) | PyPI - Version | A service client for interacting with the Evo File API. | -| evo-objects | PyPI - Version | A geoscience object service client library designed to help get up and running with the Geoscience Object API. | +| [evo-objects](evo-python-sdk/evo-objects) | PyPI - Version | A geoscience object service client library designed to help get up and running with the Geoscience Object API. | | [evo-colormaps](evo-python-sdk/evo-colormaps) | PyPI - Version | A service client to create colour mappings and associate them to geoscience data with the Colormap API.| | [evo-blockmodels](evo-python-sdk/evo-blockmodels) | PyPI - Version | The Block Model API provides the ability to manage and report on block models in your Evo workspaces. | +| [evo-widgets](evo-python-sdk/evo-widgets) | PyPI - Version | Widgets and presentation layer — rich HTML rendering of typed geoscience objects in Jupyter notebooks. | | [evo-compute](evo-python-sdk/evo-compute) | PyPI - Version | A service client to send jobs to the Compute Tasks API.| ### Getting started @@ -57,7 +58,8 @@ For next steps and more information about using Evo, see: * `evo-sdk-common` ([`discovery`](evo-python-sdk/evo-sdk-common/discovery) and [`workspaces`](evo-python-sdk/evo-sdk-common/workspaces)): providing the foundation for all Evo SDKs, as well as tools for performing arbitrary Seequent Evo API requests * [`evo-files`](evo-python-sdk/evo-files): for interacting with the File API -* `evo-objects`: for interacting with the Geoscience Object API +* [`evo-objects`](evo-python-sdk/evo-objects): for interacting with the Geoscience Object API * [`evo-colormaps`](evo-python-sdk/evo-colormaps): for interacting with the Colormap API * [`evo-blockmodels`](evo-python-sdk/evo-blockmodels): for interacting with the Block Model API +* [`evo-widgets`](evo-python-sdk/evo-widgets): for rich HTML rendering of typed geoscience objects in Jupyter notebooks * [`evo-compute`](evo-python-sdk/evo-compute): for interacting with the Compute Tasks API diff --git a/mkdocs/docs/packages/evo-sdk-common/discovery.md b/mkdocs/docs/packages/evo-sdk-common/discovery/DiscoveryAPIClient.md similarity index 100% rename from mkdocs/docs/packages/evo-sdk-common/discovery.md rename to mkdocs/docs/packages/evo-sdk-common/discovery/DiscoveryAPIClient.md diff --git a/mkdocs/docs/packages/evo-sdk-common/workspaces.md b/mkdocs/docs/packages/evo-sdk-common/workspaces/WorkspaceAPIClient.md similarity index 100% rename from mkdocs/docs/packages/evo-sdk-common/workspaces.md rename to mkdocs/docs/packages/evo-sdk-common/workspaces/WorkspaceAPIClient.md diff --git a/mkdocs/docs/packages/evo-widgets/Introduction.md b/mkdocs/docs/packages/evo-widgets/Introduction.md new file mode 100644 index 00000000..3b7f5fd1 --- /dev/null +++ b/mkdocs/docs/packages/evo-widgets/Introduction.md @@ -0,0 +1,57 @@ +# evo-widgets + +[GitHub source](https://github.com/SeequentEvo/evo-python-sdk/blob/main/packages/evo-widgets/src/evo/widgets/__init__.py) + +Widgets and presentation layer for the Evo Python SDK — HTML rendering, URL generation, and IPython formatters for Jupyter notebooks. + +## Usage + +Load the IPython extension in your notebook to enable rich HTML rendering for all Evo SDK typed objects: + +```python +%load_ext evo.widgets +``` + +After loading, typed objects like `PointSet`, `Regular3DGrid`, `TensorGrid`, and `BlockModel` will automatically render with formatted metadata tables, clickable Portal/Viewer links, and bounding box information. + +## URL Functions + +Generate URLs to view objects in the Evo Portal and Viewer: + +```python +from evo.widgets import ( + get_portal_url_for_object, + get_viewer_url_for_object, + get_viewer_url_for_objects, +) + +# Get Portal URL for a single object +portal_url = get_portal_url_for_object(grid) + +# Get Viewer URL for a single object +viewer_url = get_viewer_url_for_object(grid) + +# View multiple objects together in the Viewer +url = get_viewer_url_for_objects(manager, [grid, pointset, tensor_grid]) +``` + +## Formatters + +Rich HTML representations for all typed geoscience objects: + +- `PointSet`, `Regular3DGrid`, `TensorGrid`, `BlockModel` +- `Variogram` +- `Attributes` collections +- `Report` and `ReportResult` +- `TaskResult` and `TaskResults` (compute results) + +All formatters are registered automatically when you load the extension with `%load_ext evo.widgets`. They support light/dark mode via Jupyter theme CSS variables. + +## How It Works + +When you run `%load_ext evo.widgets`, the extension registers HTML formatters with IPython using `for_type_by_name`. This approach: + +1. **Avoids hard dependencies** — The widgets package doesn't import model classes directly +2. **Works with all typed objects** — Formatters are registered for the base class, so all subclasses are covered +3. **Lazy loading** — Formatters only activate when the relevant types are actually used + diff --git a/mkdocs/gen_api_docs.py b/mkdocs/gen_api_docs.py index fdf3018c..ad07361f 100644 --- a/mkdocs/gen_api_docs.py +++ b/mkdocs/gen_api_docs.py @@ -18,26 +18,27 @@ log = logging.getLogger("mkdocs.gen_api_docs") -def on_startup(command: str, dirty: bool) -> None: - mkdocs_dir = Path(__file__).parent - docs_packages_dir = mkdocs_dir / "docs" / "packages" - - api_clients_file = mkdocs_dir / "api_clients.txt" - api_clients = [line.strip() for line in api_clients_file.read_text().splitlines() if line.strip()] - log.info(f"Loaded {len(api_clients)} API clients from {api_clients_file.relative_to(mkdocs_dir)}") - - for old_md in docs_packages_dir.rglob("*.md"): - if old_md.name != "evo-python-sdk.md": - old_md.unlink() - log.info(f"Deleted old doc: {old_md.relative_to(mkdocs_dir)}") +def _parse_api_entries(lines: list[str]) -> dict[str, list[tuple[str, str, str, str]]]: + """Parse api_clients.txt lines into (class_name, module_path, github_url, namespace) grouped by doc_dir. - entries_by_dir = defaultdict(list) - for module_path in api_clients: + The doc_dir determines the directory/file structure: + - evo-sdk-common entries use ``/`` (e.g. ``evo-sdk-common/discovery``) + - All other entries use the package name (e.g. ``evo-objects``, ``evo-blockmodels``) + """ + entries_by_dir: dict[str, list[tuple[str, str, str, str]]] = defaultdict(list) + for module_path in lines: module_parts = module_path.split(".") - _, package, _, _, sub_package, *rest = module_parts - doc_dir = f"{package}/{sub_package}" if package == "evo-sdk-common" else package + # packages..src.<...>. + package = module_parts[1] # e.g. "evo-objects", "evo-sdk-common" class_name = module_parts[-1] + if package == "evo-sdk-common": + # evo-sdk-common uses sub-package directories: evo-sdk-common/discovery, evo-sdk-common/workspaces + sub_package = module_parts[4] # packages.evo-sdk-common.src.evo..* + doc_dir = f"{package}/{sub_package}" + else: + doc_dir = package + file_path_parts = module_parts[:-1] source_file_path = "/".join(file_path_parts) + ".py" github_url = f"{GITHUB_BASE_URL}/{source_file_path}" @@ -45,14 +46,89 @@ def on_startup(command: str, dirty: bool) -> None: src_idx = module_parts.index("src") namespace = ".".join(module_parts[src_idx + 1 :]) entries_by_dir[doc_dir].append((class_name, module_path, github_url, namespace)) + return entries_by_dir + + +def _parse_typed_entries(lines: list[str]) -> dict[str, list[tuple[str, str]]]: + """Parse typed_objects.txt lines into (class_name, namespace) grouped by package name. + + All typed object entries for a package are collected into a single TypedObjects.md. + """ + entries_by_package: dict[str, list[tuple[str, str]]] = defaultdict(list) + for module_path in lines: + module_parts = module_path.split(".") + package = module_parts[1] # e.g. "evo-objects", "evo-blockmodels" + class_name = module_parts[-1] + + src_idx = module_parts.index("src") + namespace = ".".join(module_parts[src_idx + 1 :]) + entries_by_package[package].append((class_name, namespace)) + return entries_by_package + - for doc_dir, entries in entries_by_dir.items(): +def on_startup(command: str, dirty: bool) -> None: + mkdocs_dir = Path(__file__).parent + docs_packages_dir = mkdocs_dir / "docs" / "packages" + + # --- Load API clients --- + api_clients_file = mkdocs_dir / "api_clients.txt" + api_clients = [line.strip() for line in api_clients_file.read_text().splitlines() if line.strip()] + log.info(f"Loaded {len(api_clients)} API clients from {api_clients_file.relative_to(mkdocs_dir)}") + api_entries = _parse_api_entries(api_clients) + + # --- Load typed objects --- + typed_objects_file = mkdocs_dir / "typed_objects.txt" + typed_objects: list[str] = [] + if typed_objects_file.exists(): + typed_objects = [line.strip() for line in typed_objects_file.read_text().splitlines() if line.strip()] + log.info(f"Loaded {len(typed_objects)} typed objects from {typed_objects_file.relative_to(mkdocs_dir)}") + typed_entries = _parse_typed_entries(typed_objects) + + # --- Compute all auto-generated paths --- + auto_generated_paths: set[Path] = set() + + # API client docs: always placed inside package directories as .md + for doc_dir, entries in api_entries.items(): + for class_name, *_ in entries: + doc_path = docs_packages_dir / f"{doc_dir}/{class_name}.md" + auto_generated_paths.add(doc_path.resolve()) + + # Typed object docs: one TypedObjects.md per package directory + for package in typed_entries: + doc_path = docs_packages_dir / f"{package}/TypedObjects.md" + auto_generated_paths.add(doc_path.resolve()) + + # --- Clean up only auto-generated files --- + for old_md in docs_packages_dir.rglob("*.md"): + if old_md.name == "evo-python-sdk.md": + continue + if old_md.resolve() in auto_generated_paths: + old_md.unlink() + log.info(f"Deleted auto-generated doc: {old_md.relative_to(mkdocs_dir)}") + else: + log.info(f"Preserved manual doc: {old_md.relative_to(mkdocs_dir)}") + + # --- Generate API client docs --- + for doc_dir, entries in api_entries.items(): for class_name, module_path, github_url, namespace in entries: - doc_path = ( - docs_packages_dir / f"{doc_dir}.md" - if len(entries) == 1 - else docs_packages_dir / f"{doc_dir}/{class_name}.md" - ) + doc_path = docs_packages_dir / f"{doc_dir}/{class_name}.md" doc_path.parent.mkdir(parents=True, exist_ok=True) doc_path.write_text(f"[GitHub source]({github_url})\n::: {namespace}\n") - log.info(f"Generated: {doc_path.relative_to(mkdocs_dir)}") + log.info(f"Generated API doc: {doc_path.relative_to(mkdocs_dir)}") + + # --- Generate typed object docs --- + for package, entries in typed_entries.items(): + doc_path = docs_packages_dir / f"{package}/TypedObjects.md" + doc_path.parent.mkdir(parents=True, exist_ok=True) + + lines = ["# Typed Objects\n"] + for class_name, namespace in entries: + lines.append(f"::: {namespace}") + lines.append(" options:") + lines.append(" show_root_heading: true") + lines.append(" show_source: false") + lines.append("") + + doc_path.write_text("\n".join(lines)) + log.info(f"Generated typed objects doc: {doc_path.relative_to(mkdocs_dir)}") + diff --git a/mkdocs/site/packages/evo-blockmodels.html b/mkdocs/site/packages/evo-blockmodels/BlockModelAPIClient.html similarity index 98% rename from mkdocs/site/packages/evo-blockmodels.html rename to mkdocs/site/packages/evo-blockmodels/BlockModelAPIClient.html index c0819c40..8e2b2ab7 100644 --- a/mkdocs/site/packages/evo-blockmodels.html +++ b/mkdocs/site/packages/evo-blockmodels/BlockModelAPIClient.html @@ -7,17 +7,17 @@ - - Evo blockmodels - Evo Python SDK - - - - - - + + BlockModelAPIClient - Evo Python SDK + + + + + + - + @@ -25,7 +25,7 @@ + +
+
+
+
+ +

evo-blockmodels

+

GitHub source

+

The evo-blockmodels package provides both a low-level API client and typed Python classes for working with block models in Evo.

+

!!! tip "Using block models from typed objects" + The full functionality of evo-blockmodels — creating, retrieving, updating attributes, running reports — is accessible directly from the BlockModel object in evo.objects.typed. When evo-blockmodels is installed, BlockModel acts as a proxy and delegates data operations to the Block Model Service automatically.

+
```python
+from evo.objects.typed import object_from_path
+
+# Load any block model — full evo-blockmodels functionality is available
+bm = await object_from_path(manager, "my-folder/block-model")
+df = await bm.to_dataframe()
+await bm.add_attribute(data_df, "new_col")
+report = await bm.create_report(spec)
+```
+
+See the [evo-objects Introduction](../evo-objects/Introduction.md#blockmodel-via-evo-blockmodels) for the full API.
+
+

See the Typed Objects page for the full typed API reference.

+

Typed Block Models

+

The typed module provides intuitive classes for creating, retrieving, and updating regular block models with pandas DataFrame support.

+

Creating a block model

+
from evo.blockmodels.typed import RegularBlockModel, RegularBlockModelData, Point3, Size3d, Size3i
+
+data = RegularBlockModelData(
+    name="My Block Model",
+    origin=Point3(0, 0, 0),
+    n_blocks=Size3i(10, 10, 10),
+    block_size=Size3d(1.0, 1.0, 1.0),
+    cell_data=my_dataframe,
+)
+block_model = await RegularBlockModel.create(context, data)
+
+

Retrieving a block model

+
block_model = await RegularBlockModel.get(context, block_model_id)
+df = block_model.cell_data  # pandas DataFrame with all cell attributes
+
+

Updating attributes

+
new_version = await block_model.update_attributes(
+    updated_dataframe,
+    new_columns=["new_col"],
+)
+
+

Reports

+

Reports provide resource estimation summaries for block models — calculating tonnages, grades, and metal content grouped by category (e.g., geological domains, rock types).

+

Creating and running a report

+
from evo.blockmodels.typed import (
+    Report, ReportSpecificationData, ReportColumnSpec, ReportCategorySpec,
+    Aggregation, MassUnits, Units,
+)
+
+spec = ReportSpecificationData(
+    name="Grade Report",
+    category=ReportCategorySpec(column_name="domain"),
+    columns=[
+        ReportColumnSpec(
+            column_name="Au",
+            aggregation=Aggregation.MASS_AVERAGE,
+            output_unit_id="g/t",
+        ),
+    ],
+    mass_units=MassUnits.TONNES,
+)
+
+report = await Report.create(context, block_model, spec)
+result = await report.run(context)
+df = result.to_dataframe()  # Tonnages and grades by domain
+
+
+
+ +
+
+

Documentation built with MkDocs.

+
+ + + + + + + + diff --git a/mkdocs/site/packages/evo-blockmodels/TypedObjects.html b/mkdocs/site/packages/evo-blockmodels/TypedObjects.html new file mode 100644 index 00000000..b458f44c --- /dev/null +++ b/mkdocs/site/packages/evo-blockmodels/TypedObjects.html @@ -0,0 +1,2774 @@ + + + + + + + + + + + Typed Objects - Evo Python SDK + + + + + + + + + + + + + + + + +
+
+
+
+ +

Typed Objects

+ + +
+ + + +

+ evo.blockmodels.typed.regular_block_model.RegularBlockModel + + +

+ + +
+ + + +

A typed wrapper for regular block models providing pandas DataFrame access.

+

This class provides a high-level interface for creating, retrieving, and updating +regular block models with typed access to grid properties and cell data.

+

Example usage:

+
# Create a new block model
+data = RegularBlockModelData(
+    name="My Block Model",
+    origin=Point3(0, 0, 0),
+    n_blocks=Size3i(10, 10, 10),
+    block_size=Size3d(1.0, 1.0, 1.0),
+    cell_data=my_dataframe,
+)
+block_model = await RegularBlockModel.create(context, data)
+
+# Retrieve an existing block model
+block_model = await RegularBlockModel.get(context, bm_id)
+df = block_model.cell_data
+
+# Update attributes
+new_version = await block_model.update_attributes(
+    updated_dataframe,
+    new_columns=["new_col"],
+)
+
+ + + + + + + + + + + +
+ + + + + + + +
+ + + +

+ n_blocks + + + + property + + +

+
n_blocks: Size3i
+
+ +
+ +

The number of blocks in each dimension.

+ +
+ +
+ +
+ + + +

+ block_size + + + + property + + +

+
block_size: Size3d
+
+ +
+ +

The size of each block in each dimension.

+ +
+ +
+ +
+ + + +

+ rotations + + + + property + + +

+
rotations: list[tuple[RotationAxis, float]]
+
+ +
+ +

The rotations applied to the block model.

+ +
+ +
+ + + + +
+ + +

+ __init__ + + +

+
__init__(
+    client: BlockModelAPIClient,
+    metadata: BlockModel,
+    version: Version,
+    cell_data: DataFrame,
+    context: IContext | None = None,
+) -> None
+
+ +
+ +

Initialize a RegularBlockModel instance.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
+ client + + BlockModelAPIClient + +
+

The BlockModelAPIClient used for API operations.

+
+
+ required +
+ metadata + + BlockModel + +
+

The block model metadata.

+
+
+ required +
+ version + + Version + +
+

The current version information.

+
+
+ required +
+ cell_data + + DataFrame + +
+

The cell data as a pandas DataFrame.

+
+
+ required +
+ context + + IContext | None + +
+

Optional IContext for report and other operations.

+
+
+ None +
+ + +
+ +
+ +
+ + +

+ create + + + + async + classmethod + + +

+
create(
+    context: IContext, data: RegularBlockModelData, path: str | None = None, fb: IFeedback = NoFeedback
+) -> RegularBlockModel
+
+ +
+ +

Create a new regular block model.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
+ context + + IContext + +
+

The context containing environment, connector, and cache.

+
+
+ required +
+ data + + RegularBlockModelData + +
+

The data defining the block model to create.

+
+
+ required +
+ path + + str | None + +
+

Optional path for the block model in the workspace.

+
+
+ None +
+ fb + + IFeedback + +
+

Optional feedback interface for progress reporting.

+
+
+ NoFeedback +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ RegularBlockModel + +
+

A RegularBlockModel instance representing the created block model.

+
+
+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ ValueError + +
+

If the data is invalid.

+
+
+ + +
+ +
+ +
+ + +

+ get + + + + async + classmethod + + +

+
get(
+    context: IContext,
+    bm_id: UUID,
+    version_id: UUID | None = None,
+    columns: list[str] | None = None,
+    bbox: BBox | BBoxXYZ | None = None,
+    fb: IFeedback = NoFeedback,
+) -> RegularBlockModel
+
+ +
+ +

Retrieve an existing regular block model.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
+ context + + IContext + +
+

The context containing environment, connector, and cache.

+
+
+ required +
+ bm_id + + UUID + +
+

The UUID of the block model to retrieve.

+
+
+ required +
+ version_id + + UUID | None + +
+

Optional version UUID. Defaults to the latest version.

+
+
+ None +
+ columns + + list[str] | None + +
+

Optional list of columns to retrieve. Defaults to all columns ["*"].

+
+
+ None +
+ bbox + + BBox | BBoxXYZ | None + +
+

Optional bounding box to filter the data.

+
+
+ None +
+ fb + + IFeedback + +
+

Optional feedback interface for progress reporting.

+
+
+ NoFeedback +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ RegularBlockModel + +
+

A RegularBlockModel instance.

+
+
+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ ValueError + +
+

If the block model is not a regular grid.

+
+
+ + +
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ evo.blockmodels.typed.regular_block_model.RegularBlockModelData + + + + dataclass + + +

+ + +
+ + + +

Data class for creating a new regular block model.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
+ name + + str + +
+

The name of the block model.

+
+
+ required +
+ origin + + Point3 + +
+

The origin point of the block model grid.

+
+
+ required +
+ n_blocks + + Size3i + +
+

The number of blocks in each dimension (nx, ny, nz).

+
+
+ required +
+ block_size + + Size3d + +
+

The size of each block in each dimension (dx, dy, dz).

+
+
+ required +
+ rotations + + list[tuple[RotationAxis, float]] + +
+

List of rotations as (axis, angle) tuples. Angle is in degrees, positive angles indicate clockwise rotation when looking down the axis.

+
+
+ list() +
+ cell_data + + DataFrame | None + +
+

Optional DataFrame containing block attribute data. Must include geometry columns (i, j, k) or (x, y, z) and attribute columns.

+
+
+ None +
+ description + + str | None + +
+

Optional description of the block model.

+
+
+ None +
+ coordinate_reference_system + + str | None + +
+

Optional coordinate reference system (e.g., "EPSG:4326").

+
+
+ None +
+ size_unit_id + + str | None + +
+

Optional unit identifier for block sizes (e.g., "m").

+
+
+ None +
+ units + + dict[str, str] + +
+

Optional dictionary mapping column names to unit identifiers.

+
+
+ dict() +
+ + + + + + + + + + + +
+ + + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ evo.blockmodels.typed.report.Report + + +

+ + +
+ + + +

A typed wrapper for block model report specifications.

+

Reports provide resource estimation summaries for block models. They calculate +tonnages, grades, and metal content grouped by categories (e.g., geological domains).

+

Example usage:

+
# Create a report from a block model
+report = await block_model.create_report(ReportSpecificationData(
+    name="Resource Report",
+    columns=[ReportColumnSpec(column_name="Au", output_unit_id="g/t")],
+    categories=[ReportCategorySpec(column_name="domain")],
+    mass_unit_id="t",
+    density_value=2.7,
+    density_unit_id="t/m3",
+))
+
+# Pretty-print shows BlockSync link
+report
+
+# Get the latest result
+result = await report.get_latest_result()
+df = result.to_dataframe()
+
+ + + + + + + + + + + +
+ + + + + + + +
+ + + +

+ id + + + + property + + +

+
id: UUID
+
+ +
+ +

The unique identifier of the report specification.

+ +
+ +
+ +
+ + + +

+ name + + + + property + + +

+
name: str
+
+ +
+ +

The name of the report.

+ +
+ +
+ +
+ + + +

+ description + + + + property + + +

+
description: str | None
+
+ +
+ +

The description of the report.

+ +
+ +
+ +
+ + + +

+ block_model_uuid + + + + property + + +

+
block_model_uuid: UUID
+
+ +
+ +

The UUID of the block model this report is for.

+ +
+ +
+ +
+ + + +

+ revision + + + + property + + +

+
revision: int
+
+ +
+ +

The revision number of the report specification.

+ +
+ +
+ + + + +
+ + +

+ __init__ + + +

+
__init__(
+    context: IContext,
+    block_model_uuid: UUID,
+    specification: ReportSpecificationWithLastRunInfo | ReportSpecificationWithJobUrl,
+    block_model_name: str | None = None,
+) -> None
+
+ +
+ +

Initialize a Report instance.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
+ context + + IContext + +
+

The context containing environment, connector, and cache.

+
+
+ required +
+ block_model_uuid + + UUID + +
+

The UUID of the block model this report is for.

+
+
+ required +
+ specification + + ReportSpecificationWithLastRunInfo | ReportSpecificationWithJobUrl + +
+

The report specification from the API.

+
+
+ required +
+ block_model_name + + str | None + +
+

The name of the block model (for display purposes).

+
+
+ None +
+ + +
+ +
+ +
+ + +

+ create + + + + async + classmethod + + +

+
create(
+    context: IContext,
+    block_model_uuid: UUID,
+    data: ReportSpecificationData,
+    column_id_map: dict[str, UUID],
+    fb: IFeedback = NoFeedback,
+    block_model_name: str | None = None,
+) -> Report
+
+ +
+ +

Create a new report specification.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
+ context + + IContext + +
+

The context containing environment, connector, and cache.

+
+
+ required +
+ block_model_uuid + + UUID + +
+

The UUID of the block model to create the report for.

+
+
+ required +
+ data + + ReportSpecificationData + +
+

The report specification data.

+
+
+ required +
+ column_id_map + + dict[str, UUID] + +
+

Mapping of column names to their UUIDs in the block model.

+
+
+ required +
+ fb + + IFeedback + +
+

Optional feedback interface for progress reporting.

+
+
+ NoFeedback +
+ block_model_name + + str | None + +
+

The name of the block model (for display purposes).

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ Report + +
+

A Report instance representing the created report.

+
+
+ + +
+ +
+ +
+ + +

+ run + + + + async + + +

+
run(version_uuid: UUID | None = None, fb: IFeedback = NoFeedback) -> ReportResult
+
+ +
+ +

Run the report to generate a new result.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
+ version_uuid + + UUID | None + +
+

Optional specific version UUID to run the report on. If None, runs on the latest version.

+
+
+ None +
+ fb + + IFeedback + +
+

Optional feedback interface for progress reporting.

+
+
+ NoFeedback +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ ReportResult + +
+

The generated report result.

+
+
+ + +
+ +
+ +
+ + +

+ refresh + + + + async + + +

+
refresh(fb: IFeedback = NoFeedback, timeout_seconds: float = 120.0, poll_interval_seconds: float = 2.0) -> ReportResult
+
+ +
+ +

Get the most recent result for this report, waiting if necessary.

+

If no results exist yet (e.g., report is still running), this method will +poll until a result is available or the timeout is reached.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
+ fb + + IFeedback + +
+

Optional feedback interface for progress reporting.

+
+
+ NoFeedback +
+ timeout_seconds + + float + +
+

Maximum time to wait for results (default 120 seconds).

+
+
+ 120.0 +
+ poll_interval_seconds + + float + +
+

Time between polling attempts (default 2 seconds).

+
+
+ 2.0 +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ ReportResult + +
+

The latest report result.

+
+
+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ TimeoutError + +
+

If no results are available within the timeout period.

+
+
+ + +
+ +
+ +
+ + +

+ list_results + + + + async + + +

+
list_results(limit: int = 50, fb: IFeedback = NoFeedback) -> list[ReportResult]
+
+ +
+ +

List all results for this report.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
+ limit + + int + +
+

Maximum number of results to return.

+
+
+ 50 +
+ fb + + IFeedback + +
+

Optional feedback interface for progress reporting.

+
+
+ NoFeedback +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ list[ReportResult] + +
+

List of report results, ordered newest first.

+
+
+ + +
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ evo.blockmodels.typed.report.ReportSpecificationData + + + + dataclass + + +

+ + +
+ + + +

Data for creating a report specification.

+

A report specification defines how to calculate resource estimates from a block model. +It includes which columns to report on, how to categorize blocks, and density/mass settings.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
+ name + + str + +
+

The name of the report.

+
+
+ required +
+ columns + + list[ReportColumnSpec] + +
+

List of columns to include in the report with their aggregation settings. Use ReportColumnSpec to define each column.

+
+
+ required +
+ mass_unit_id + + str + +
+

Unit ID for mass output. Common values: - "t" (tonnes) - use MassUnits.TONNES - "kg" (kilograms) - use MassUnits.KILOGRAMS - "oz" (ounces) - use MassUnits.OUNCES

+
+
+ required +
+ categories + + list[ReportCategorySpec] + +
+

List of category columns for grouping blocks. Use ReportCategorySpec to define each category.

+
+
+ list() +
+ description + + str | None + +
+

Optional description of the report.

+
+
+ None +
+ density_value + + float | None + +
+

Fixed density value (requires density_unit_id). Do NOT use with density_column_name.

+
+
+ None +
+ density_unit_id + + str | None + +
+

Unit ID for fixed density (e.g., "t/m3"). Only use with density_value, NOT with density_column_name.

+
+
+ None +
+ density_column_name + + str | None + +
+

Name of the column containing block densities. Do NOT use with density_value or density_unit_id.

+
+
+ None +
+ cutoff_column_name + + str | None + +
+

Name of the column to use for cut-off evaluation.

+
+
+ None +
+ cutoff_values + + list[float] | None + +
+

List of cut-off values to evaluate.

+
+
+ None +
+ autorun + + bool + +
+

Whether to automatically run the report when block model is updated.

+
+
+ True +
+ run_now + + bool + +
+

Whether to run the report immediately after creation. Example with density column: >>> data = ReportSpecificationData( ... name="Gold Resource Report", ... columns=[ ... ReportColumnSpec( ... column_name="Au", ... aggregation="MASS_AVERAGE", # Use for grades ... label="Au Grade", ... output_unit_id="g/t", ... ), ... ], ... categories=[ ... ReportCategorySpec(column_name="domain", label="Domain"), ... ], ... mass_unit_id=MassUnits.TONNES, ... density_column_name="density", # Unit comes from column ... ) Example with fixed density: >>> data = ReportSpecificationData( ... name="Gold Resource Report", ... columns=[...], ... categories=[...], ... mass_unit_id=MassUnits.TONNES, ... density_value=2.7, # Fixed density ... density_unit_id="t/m3", # Required with density_value ... )

+
+
+ True +
+ + + + + + + + + + + +
+ + + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ evo.blockmodels.typed.report.ReportResult + + + + dataclass + + +

+ + +
+ + + +

A result from running a report.

+

Contains the calculated values for each category and cut-off combination.

+ + + + + + + + + + + +
+ + + + + + + + + + +
+ + +

+ to_dataframe + + +

+
to_dataframe() -> pd.DataFrame
+
+ +
+ +

Convert the report result to a pandas DataFrame.

+

Returns a DataFrame with one row per category/cut-off combination, +containing the aggregated values for each report column.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ DataFrame + +
+

DataFrame with report results.

+
+
+ + +
+ +
+ +
+ + +

+ __repr__ + + +

+
__repr__() -> str
+
+ +
+ +

Return a string representation of the report result.

+ + +
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ evo.blockmodels.typed.report.ReportColumnSpec + + + + dataclass + + +

+ + +
+ + + +

Specification for a column in a report.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
+ column_name + + str + +
+

The name of the column in the block model.

+
+
+ required +
+ aggregation + + Aggregation + +
+

How to aggregate the column values. Use Aggregation enum: - Aggregation.MASS_AVERAGE - Mass-weighted average (for grades) - Aggregation.SUM - Sum of values (for metal content)

+
+
+ SUM +
+ label + + str | None + +
+

Display label for the column in the report.

+
+
+ None +
+ output_unit_id + + str | None + +
+

Unit ID for the output values. Use Units class constants: - Units.GRAMS_PER_TONNE - g/t (grades) - Units.PERCENT - % (grades) - Units.PPM - ppm (grades) - Units.KILOGRAMS - kg (metal content) - Units.TONNES - t (metal content) - Units.TROY_OUNCES - oz_tr (metal content) Example: >>> from evo.blockmodels import Units >>> from evo.blockmodels.typed import Aggregation, ReportColumnSpec >>> >>> # For grade columns, use MASS_AVERAGE >>> grade_col = ReportColumnSpec( ... column_name="Au", ... aggregation=Aggregation.MASS_AVERAGE, ... label="Au Grade", ... output_unit_id=Units.GRAMS_PER_TONNE, ... ) >>> # For metal content columns, use SUM >>> metal_col = ReportColumnSpec( ... column_name="Au_metal", ... aggregation=Aggregation.SUM, ... label="Au Metal", ... output_unit_id=Units.KILOGRAMS, ... )

+
+
+ None +
+ + + + + + + + + + + +
+ + + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ evo.blockmodels.typed.report.ReportCategorySpec + + + + dataclass + + +

+ + +
+ + + +

Specification for a category column in a report.

+

Category columns are used to group blocks for reporting (e.g., by domain, rock type).

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
+ column_name + + str + +
+

The name of the category column in the block model.

+
+
+ required +
+ label + + str | None + +
+

Display label for the category in the report.

+
+
+ None +
+ values + + list[str] | None + +
+

Optional list of category values to include. If None, all values are included.

+
+
+ None +
+ + + + + + + + + + + +
+ + + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ evo.blockmodels.typed.report.Aggregation + + +

+ + +
+ + + +

Aggregation methods for report columns.

+

Use these values for the aggregation parameter in ReportColumnSpec.

+

Example: + >>> col = ReportColumnSpec( + ... column_name="Au", + ... aggregation=Aggregation.MASS_AVERAGE, + ... output_unit_id="g/t", + ... )

+ + + + + + + + + + + +
+ + + + + + + +
+ + + +

+ SUM + + + + class-attribute + instance-attribute + + +

+
SUM = 'SUM'
+
+ +
+ +

Sum of values - use for metal content, volume, tonnage, etc.

+ +
+ +
+ +
+ + + +

+ MASS_AVERAGE + + + + class-attribute + instance-attribute + + +

+
MASS_AVERAGE = 'MASS_AVERAGE'
+
+ +
+ +

Mass-weighted average - use for grades, densities, quality metrics, etc.

+ +
+ +
+ + + + + + +
+ +
+ +
+ +
+ + + +

+ evo.blockmodels.typed.units.Units + + +

+ + +
+ + + +

Common unit IDs for block model attributes.

+

These are the most commonly used unit IDs. For a complete list, +use get_available_units() to query the Block Model Service.

+

Example usage: + from evo.blockmodels.typed import Units

+
# Create block model with units
+bm_data = RegularBlockModelData(
+    ...
+    units={
+        "grade": Units.GRAMS_PER_TONNE,
+        "density": Units.TONNES_PER_CUBIC_METRE,
+    },
+)
+
+# Add attribute with unit
+await bm_ref.add_attribute(df, "metal_content", unit=Units.KILOS_PER_CUBIC_METRE)
+
+ + + + + + + + + + + +
+ + + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ evo.blockmodels.typed.units.UnitInfo + + + + dataclass + + +

+ + +
+ + + +

Information about a unit.

+ + + + + + + + + + + +
+ + + + + + + +
+ + + +

+ unit_id + + + + instance-attribute + + +

+
unit_id: str
+
+ +
+ +

The unit ID to use when setting column units.

+ +
+ +
+ +
+ + + +

+ symbol + + + + instance-attribute + + +

+
symbol: str
+
+ +
+ +

Display symbol for the unit.

+ +
+ +
+ +
+ + + +

+ description + + + + instance-attribute + + +

+
description: str
+
+ +
+ +

Human-readable description of the unit.

+ +
+ +
+ +
+ + + +

+ unit_type + + + + instance-attribute + + +

+
unit_type: UnitType
+
+ +
+ +

The type/category of this unit.

+ +
+ +
+ +
+ + + +

+ conversion_factor + + + + instance-attribute + + +

+
conversion_factor: float
+
+ +
+ +

Conversion factor to the reference unit for this unit type.

+ +
+ +
+ + + + + + +
+ +
+ +
+
+
+ +
+
+

Documentation built with MkDocs.

+
+ + + + + + + + diff --git a/mkdocs/site/packages/evo-colormaps.html b/mkdocs/site/packages/evo-colormaps/ColormapAPIClient.html similarity index 96% rename from mkdocs/site/packages/evo-colormaps.html rename to mkdocs/site/packages/evo-colormaps/ColormapAPIClient.html index e3e31dbf..5fb28f83 100644 --- a/mkdocs/site/packages/evo-colormaps.html +++ b/mkdocs/site/packages/evo-colormaps/ColormapAPIClient.html @@ -7,17 +7,17 @@ - - Evo colormaps - Evo Python SDK - - - - - - + + ColormapAPIClient - Evo Python SDK + + + + + + - + @@ -25,7 +25,7 @@ + +
+
+
+
+ +

evo-compute

+

GitHub source

+

The evo-compute package provides a client for running compute tasks on Evo. Tasks are submitted to the Compute Tasks API and polled for results.

+

See the Typed Objects page for the full typed API reference.

+

Running Compute Tasks

+

The run() function is the main entry point for executing compute tasks. It supports running a single task or multiple tasks concurrently.

+

Single task

+
from evo.compute.tasks import run, SearchNeighborhood, Ellipsoid, EllipsoidRanges
+from evo.compute.tasks.kriging import KrigingParameters
+
+params = KrigingParameters(
+    source=pointset.attributes["grade"],
+    target=block_model.attributes["kriged_grade"],  # Creates if new, updates if exists
+    variogram=variogram,
+    search=SearchNeighborhood(
+        ellipsoid=Ellipsoid(ranges=EllipsoidRanges(200, 150, 100)),
+        max_samples=20,
+    ),
+)
+result = await run(manager, params, preview=True)
+
+

Multiple tasks

+

Run multiple kriging tasks concurrently — for example, estimating different attributes or using different parameters:

+
from evo.compute.tasks import run, SearchNeighborhood
+from evo.compute.tasks.kriging import KrigingParameters
+
+results = await run(manager, [
+    KrigingParameters(
+        source=pointset.attributes["Au"],
+        target=block_model.attributes["Au_kriged"],
+        variogram=au_variogram,
+        search=SearchNeighborhood(...),
+    ),
+    KrigingParameters(
+        source=pointset.attributes["Cu"],
+        target=block_model.attributes["Cu_kriged"],
+        variogram=cu_variogram,
+        search=SearchNeighborhood(...),
+    ),
+], preview=True)
+
+results[0]  # First kriging result
+results[1]  # Second kriging result
+
+

Working with results

+

Task results provide convenient methods to access the output:

+
# Pretty-print the result
+result  # Shows ✓ Kriging Result with target and attribute info
+
+# Get the target object
+target = await result.get_target_object()
+
+# Get data as a DataFrame
+df = await result.to_dataframe()
+
+

For complete examples, see the kriging notebook and the multiple kriging notebook.

+

FAQ

+

How do I run parallel tasks that update the same attribute?

+

You can set the target for a compute task using block_model.attributes["name"]. If the attribute does not yet exist, it will be created; if it already exists, it will be updated. This is determined by the local state of the object.

+

When the first task creates a new attribute on the server, your local object doesn't know about it yet. If you then try to run more tasks targeting the same attribute name, the local object still thinks it doesn't exist and will try to create it again — causing a conflict.

+

To avoid this:

+
    +
  1. Run the first task to create the attribute.
  2. +
  3. Refresh the local object so it sees the newly created attribute.
  4. +
  5. Run the remaining tasks — now block_model.attributes["kriged_grade"] resolves to the existing attribute and will update it.
  6. +
+
from evo.compute.tasks import run, SearchNeighborhood
+from evo.compute.tasks.kriging import KrigingParameters, RegionFilter
+
+# Step 1: Run the first task — attribute "kriged_grade" does not exist yet, so it is created
+first_result = await run(manager, KrigingParameters(
+    source=pointset.attributes["grade"],
+    target=block_model.attributes["kriged_grade"],
+    variogram=variogram,
+    search=SearchNeighborhood(...),
+    target_region_filter=RegionFilter(
+        attribute=block_model.attributes["domain"],
+        names=["LMS1"],
+    ),
+), preview=True)
+
+# Step 2: Refresh so the local object recognises the newly created attribute
+block_model = await block_model.refresh()
+
+# Step 3: Now "kriged_grade" exists locally — remaining tasks will update it
+results = await run(manager, [
+    KrigingParameters(
+        source=pointset.attributes["grade"],
+        target=block_model.attributes["kriged_grade"],  # Exists → update
+        variogram=variogram,
+        search=SearchNeighborhood(...),
+        target_region_filter=RegionFilter(
+            attribute=block_model.attributes["domain"],
+            names=["LMS2"],
+        ),
+    ),
+    KrigingParameters(
+        source=pointset.attributes["grade"],
+        target=block_model.attributes["kriged_grade"],  # Exists → update
+        variogram=variogram,
+        search=SearchNeighborhood(...),
+        target_region_filter=RegionFilter(
+            attribute=block_model.attributes["domain"],
+            names=["LMS3"],
+        ),
+    ),
+], preview=True)
+
+

!!! tip + If each task writes to a different attribute name, they can all run in parallel without refreshing — the compute service handles concurrent attribute creation on the same target object. See the multiple kriging notebook for an example.

+

!!! note "Preview APIs" + Kriging and other compute tasks are currently preview features. You must pass preview=True when calling run(). + Preview APIs may change between releases. For more details, see:

+
- [Preview APIs](https://developer.seequent.com/docs/api/fundamentals/preview-apis) — how to opt in and what to expect
+- [API Lifecycle](https://developer.seequent.com/docs/api/fundamentals/lifecycle) — how Evo APIs evolve from preview to stable
+
+
+
+ +
+
+

Documentation built with MkDocs.

+
+ + + + + + + + diff --git a/mkdocs/site/packages/evo-compute.html b/mkdocs/site/packages/evo-compute/JobClient.html similarity index 87% rename from mkdocs/site/packages/evo-compute.html rename to mkdocs/site/packages/evo-compute/JobClient.html index 730d54a4..8c9b232a 100644 --- a/mkdocs/site/packages/evo-compute.html +++ b/mkdocs/site/packages/evo-compute/JobClient.html @@ -7,17 +7,17 @@ - - Evo compute - Evo Python SDK - - - - - - + + JobClient - Evo Python SDK + + + + + + - + @@ -25,7 +25,7 @@ + +
+
+
+
+ +

Typed Objects

+ + +
+ + + +

+ evo.compute.tasks.kriging.KrigingParameters + + + + dataclass + + +

+ + +
+ + + +

Parameters for the kriging task.

+

Defines all inputs needed to run a kriging interpolation task.

+

Example: + >>> from evo.compute.tasks import run, SearchNeighborhood, Ellipsoid, EllipsoidRanges + >>> from evo.compute.tasks.kriging import KrigingParameters, RegionFilter + >>> + >>> params = KrigingParameters( + ... source=pointset.attributes["grade"], # Source attribute + ... target=block_model.attributes["kriged_grade"], # Target attribute (creates if doesn't exist) + ... variogram=variogram, # Variogram model + ... search=SearchNeighborhood( + ... ellipsoid=Ellipsoid(ranges=EllipsoidRanges(200, 150, 100)), + ... max_samples=20, + ... ), + ... # method defaults to ordinary kriging + ... ) + >>> + >>> # With region filter to restrict kriging to specific categories on target: + >>> params_filtered = KrigingParameters( + ... source=pointset.attributes["grade"], + ... target=block_model.attributes["kriged_grade"], + ... variogram=variogram, + ... search=SearchNeighborhood(...), + ... target_region_filter=RegionFilter( + ... attribute=block_model.attributes["domain"], + ... names=["LMS1", "LMS2"], + ... ), + ... )

+ + + + + + + + + + + +
+ + + + + + + +
+ + + +

+ source + + + + instance-attribute + + +

+
source: Source = source
+
+ +
+ +

The source object and attribute containing known values.

+ +
+ +
+ +
+ + + +

+ target + + + + instance-attribute + + +

+
target: Target = target
+
+ +
+ +

The target object and attribute to create or update with kriging results.

+ +
+ +
+ +
+ + + +

+ variogram + + + + instance-attribute + + +

+
variogram: GeoscienceObjectReference = variogram
+
+ +
+ +

Model of the covariance within the domain (Variogram object or reference).

+ +
+ +
+ +
+ + + +

+ search + + + + instance-attribute + + +

+
search: SearchNeighborhood = search
+
+ +
+ +

Search neighborhood parameters.

+ +
+ +
+ +
+ + + +

+ method + + + + class-attribute + instance-attribute + + +

+
method: SimpleKriging | OrdinaryKriging | None = method or OrdinaryKriging()
+
+ +
+ +

The kriging method to use. Defaults to ordinary kriging if not specified.

+ +
+ +
+ +
+ + + +

+ target_region_filter + + + + class-attribute + instance-attribute + + +

+
target_region_filter: RegionFilter | None = target_region_filter
+
+ +
+ +

Optional region filter to restrict kriging to specific categories on the target object.

+ +
+ +
+ +
+ + + +

+ block_discretisation + + + + class-attribute + instance-attribute + + +

+
block_discretisation: BlockDiscretisation | None = block_discretisation
+
+ +
+ +

Optional sub-block discretisation for block kriging.

+

When provided, each target block is subdivided into nx × ny × nz sub-cells +and the kriged value is averaged across these sub-cells. When omitted, +point kriging is performed. Only applicable when the target is a 3D grid +or block model.

+ +
+ +
+ + + + +
+ + +

+ to_dict + + +

+
to_dict() -> dict[str, Any]
+
+ +
+ +

Serialize to dictionary.

+ + +
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ evo.compute.tasks.kriging.SimpleKriging + + + + dataclass + + +

+ + +
+ + + +

Simple kriging method with a known constant mean.

+

Use when the mean of the variable is known and constant across the domain.

+

Example: + >>> method = SimpleKriging(mean=100.0)

+ + + + + + + + + + + +
+ + + + + + + +
+ + + +

+ mean + + + + instance-attribute + + +

+
mean: float = mean
+
+ +
+ +

The mean value, assumed to be constant across the domain.

+ +
+ +
+ + + + +
+ + +

+ to_dict + + +

+
to_dict() -> dict[str, Any]
+
+ +
+ +

Serialize to dictionary.

+ + +
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ evo.compute.tasks.kriging.OrdinaryKriging + + + + dataclass + + +

+ + +
+ + + +

Ordinary kriging method with unknown local mean.

+

The most common kriging method. Estimates the local mean from nearby samples. +This is the default kriging method if none is specified.

+ + + + + + + + + + + +
+ + + + + + + + + + +
+ + +

+ to_dict + + +

+
to_dict() -> dict[str, Any]
+
+ +
+ +

Serialize to dictionary.

+ + +
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ evo.compute.tasks.kriging.BlockDiscretisation + + + + dataclass + + +

+ + +
+ + + +

Sub-block discretisation for block kriging.

+

When provided, each target block is subdivided into nx * ny * nz +sub-cells and the kriged value is averaged across these sub-cells. +When omitted (None), point kriging is performed.

+

Only applicable when the target is a 3D grid or block model.

+

Each dimension must be an integer between 1 and 9 (inclusive). +The default value of 1 in every direction is equivalent to point kriging.

+

Example: + >>> discretisation = BlockDiscretisation(nx=3, ny=3, nz=2)

+ + + + + + + + + + + +
+ + + + + + + +
+ + + +

+ nx + + + + instance-attribute + + +

+
nx: int = nx
+
+ +
+ +

Number of subdivisions in the x direction (1–9).

+ +
+ +
+ +
+ + + +

+ ny + + + + instance-attribute + + +

+
ny: int = ny
+
+ +
+ +

Number of subdivisions in the y direction (1–9).

+ +
+ +
+ +
+ + + +

+ nz + + + + instance-attribute + + +

+
nz: int = nz
+
+ +
+ +

Number of subdivisions in the z direction (1–9).

+ +
+ +
+ + + + +
+ + +

+ to_dict + + +

+
to_dict() -> dict[str, Any]
+
+ +
+ +

Serialize to dictionary.

+ + +
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ evo.compute.tasks.kriging.RegionFilter + + + + dataclass + + +

+ + +
+ + + +

Region filter for restricting kriging to specific categories on the target.

+

Use either names OR values, not both: +- names: Category names (strings) - used for CategoryAttribute with string lookup +- values: Integer values - used for integer-indexed categories or BlockModel integer columns

+

Example: + >>> # Filter by category names (string lookup) + >>> filter_by_name = RegionFilter( + ... attribute=block_model.attributes["domain"], + ... names=["LMS1", "LMS2"], + ... ) + >>> + >>> # Filter by integer values (direct index matching) + >>> filter_by_value = RegionFilter( + ... attribute=block_model.attributes["domain"], + ... values=[1, 2, 3], + ... )

+ + + + + + + + + + + +
+ + + + + + + +
+ + + +

+ attribute + + + + instance-attribute + + +

+
attribute: Any = attribute
+
+ +
+ +

The category attribute to filter on (from target object).

+ +
+ +
+ +
+ + + +

+ names + + + + class-attribute + instance-attribute + + +

+
names: list[str] | None = names
+
+ +
+ +

Category names to include (mutually exclusive with values).

+ +
+ +
+ +
+ + + +

+ values + + + + class-attribute + instance-attribute + + +

+
values: list[int] | None = values
+
+ +
+ +

Integer category keys to include (mutually exclusive with names).

+ +
+ +
+ + + + +
+ + +

+ to_dict + + +

+
to_dict() -> dict[str, Any]
+
+ +
+ +

Serialize to dictionary for the compute task API.

+ + +
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ evo.compute.tasks.kriging.KrigingResult + + +

+ + +
+ + + +

Result of a kriging task.

+

Contains information about the completed kriging operation and provides +convenient methods to access the target object and its data.

+

Example: + >>> result = await run(manager, params) + >>> result # Pretty-prints the result + >>> + >>> # Get data directly as DataFrame (simplest approach) + >>> df = await result.to_dataframe() + >>> + >>> # Or load the target object for more control + >>> target = await result.get_target_object()

+ + + + + + + + + + + +
+ + + + + + + + + + +
+ + +

+ __init__ + + +

+
__init__(message: str, target: _KrigingTarget)
+
+ +
+ +

Initialize a KrigingResult.

+

Args: + message: A message describing what happened in the task. + target: The target information from the kriging result.

+ + +
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ evo.compute.tasks.kriging.TaskResult + + +

+ + +
+ + + +

Base class for compute task results.

+

Provides common functionality for all task results including: +- Pretty-printing in Jupyter notebooks +- Portal URL extraction +- Access to target object and data

+ + + + + + + + + + + +
+ + + + + + + +
+ + + +

+ message + + + + instance-attribute + + +

+
message: str = message
+
+ +
+ +

A message describing what happened in the task.

+ +
+ +
+ +
+ + + +

+ target_name + + + + property + + +

+
target_name: str
+
+ +
+ +

The name of the target object.

+ +
+ +
+ +
+ + + +

+ target_reference + + + + property + + +

+
target_reference: str
+
+ +
+ +

Reference URL to the target object.

+ +
+ +
+ +
+ + + +

+ attribute_name + + + + property + + +

+
attribute_name: str
+
+ +
+ +

The name of the attribute that was created/updated.

+ +
+ +
+ +
+ + + +

+ schema_type + + + + property + + +

+
schema_type: str
+
+ +
+ +

The schema type of the target object (e.g., 'regular-masked-3d-grid').

+ +
+ +
+ + + + +
+ + +

+ get_target_object + + + + async + + +

+
get_target_object(context: IContext | None = None)
+
+ +
+ +

Load and return the target geoscience object.

+

Args: + context: Optional context to use. If not provided, uses the context + from when the task was run.

+

Returns: + The typed geoscience object (e.g., Regular3DGrid, RegularMasked3DGrid, BlockModel)

+

Example: + >>> result = await run(manager, params) + >>> target = await result.get_target_object() + >>> target # Pretty-prints with Portal/Viewer links

+ + +
+ +
+ +
+ + +

+ to_dataframe + + + + async + + +

+
to_dataframe(context: IContext | None = None, columns: list[str] | None = None)
+
+ +
+ +

Get the task results as a DataFrame.

+

This is the simplest way to access the task output data. It loads +the target object and returns its data as a pandas DataFrame.

+

Args: + context: Optional context to use. If not provided, uses the context + from when the task was run. + columns: Optional list of column names to include. If None, includes + all columns. Use ["*"] to explicitly request all columns.

+

Returns: + A pandas DataFrame containing the task results.

+

Example: + >>> result = await run(manager, params) + >>> df = await result.to_dataframe() + >>> df.head()

+ + +
+ +
+ +
+ + +

+ __repr__ + + +

+
__repr__() -> str
+
+ +
+ +

String representation.

+ + +
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ evo.compute.tasks.kriging.TaskResults + + +

+ + +
+ + + +

Container for multiple task results with pretty-printing support.

+

Provides iteration and indexing support for accessing individual results.

+

Example: + >>> results = await run(manager, [params1, params2, params3]) + >>> results # Pretty-prints all results + >>> results[0] # Access first result + >>> for result in results: + ... print(result.attribute_name)

+ + + + + + + + + + + +
+ + + + + + + +
+ + + +

+ results + + + + property + + +

+
results: list[TaskResult]
+
+ +
+ +

The list of task results.

+ +
+ +
+ + + + +
+ + +

+ __repr__ + + +

+
__repr__() -> str
+
+ +
+ +

String representation.

+ + +
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ evo.compute.tasks.common.search.SearchNeighborhood + + + + dataclass + + +

+ + +
+ + + +

Search neighborhood parameters for geostatistical operations.

+

Defines how to find nearby samples when performing spatial interpolation +or estimation. Used by kriging, simulation, and other geostatistical tasks.

+

The search neighborhood is defined by an ellipsoid (spatial extent and +orientation) and constraints on the number of samples to use.

+

Example: + >>> search = SearchNeighborhood( + ... ellipsoid=Ellipsoid( + ... ranges=EllipsoidRanges(major=200.0, semi_major=150.0, minor=100.0), + ... rotation=Rotation(dip_azimuth=45.0), + ... ), + ... max_samples=20, + ... )

+ + + + + + + + + + + +
+ + + + + + + +
+ + + +

+ ellipsoid + + + + instance-attribute + + +

+
ellipsoid: Ellipsoid = ellipsoid
+
+ +
+ +

The ellipsoid defining the spatial extent to search for samples.

+ +
+ +
+ +
+ + + +

+ max_samples + + + + instance-attribute + + +

+
max_samples: int = max_samples
+
+ +
+ +

The maximum number of samples to use for each evaluation point.

+ +
+ +
+ +
+ + + +

+ min_samples + + + + class-attribute + instance-attribute + + +

+
min_samples: int | None = min_samples
+
+ +
+ +

The minimum number of samples required. If fewer are found, the point may be skipped.

+ +
+ +
+ + + + +
+ + +

+ to_dict + + +

+
to_dict() -> dict[str, Any]
+
+ +
+ +

Serialize to dictionary.

+ + +
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ evo.compute.tasks.common.source_target.Source + + + + dataclass + + +

+ + +
+ + + +

The source object and attribute containing known values.

+

Used to specify where input data comes from for geostatistical operations. +Can be initialized directly, or more commonly from a typed object's attribute.

+

Example: + >>> # From a typed object attribute (preferred): + >>> source = pointset.attributes["grade"] + >>> + >>> # Or explicitly: + >>> source = Source(object=pointset, attribute="grade")

+ + + + + + + + + + + +
+ + + + + + + +
+ + + +

+ object + + + + instance-attribute + + +

+
object: GeoscienceObjectReference = object
+
+ +
+ +

Reference to the source geoscience object.

+ +
+ +
+ +
+ + + +

+ attribute + + + + instance-attribute + + +

+
attribute: str = attribute
+
+ +
+ +

Name of the attribute on the source object.

+ +
+ +
+ + + + +
+ + +

+ to_dict + + +

+
to_dict() -> dict[str, Any]
+
+ +
+ +

Serialize to dictionary.

+ + +
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ evo.compute.tasks.common.source_target.Target + + + + dataclass + + +

+ + +
+ + + +

The target object and attribute to create or update with results.

+

Used to specify where output data should be written for geostatistical operations.

+

Example: + >>> # Create a new attribute on a target object: + >>> target = Target.new_attribute(block_model, "kriged_grade") + >>> + >>> # Or update an existing attribute: + >>> target = Target(object=grid, attribute=UpdateAttribute("existing_ref"))

+ + + + + + + + + + + +
+ + + + + + + +
+ + + +

+ object + + + + instance-attribute + + +

+
object: GeoscienceObjectReference = object
+
+ +
+ +

Object to write results onto.

+ +
+ +
+ +
+ + + +

+ attribute + + + + instance-attribute + + +

+
attribute: CreateAttribute | UpdateAttribute = attribute
+
+ +
+ +

Attribute specification (create new or update existing).

+ +
+ +
+ + + + +
+ + +

+ new_attribute + + + + classmethod + + +

+
new_attribute(object: GeoscienceObjectReference, attribute_name: str) -> Target
+
+ +
+ +

Create a Target that will create a new attribute on the target object.

+

Args: + object: The target object to write results onto. + attribute_name: The name of the new attribute to create.

+

Returns: + A Target instance configured to create a new attribute.

+

Example: + >>> target = Target.new_attribute(block_model, "kriged_grade")

+ + +
+ +
+ +
+ + +

+ to_dict + + +

+
to_dict() -> dict[str, Any]
+
+ +
+ +

Serialize to dictionary.

+ + +
+ +
+ + + +
+ +
+ +
+
+
+ +
+
+

Documentation built with MkDocs.

+
+ + + + + + + + diff --git a/mkdocs/site/packages/evo-files.html b/mkdocs/site/packages/evo-files/FileAPIClient.html similarity index 97% rename from mkdocs/site/packages/evo-files.html rename to mkdocs/site/packages/evo-files/FileAPIClient.html index b0817e07..91ce8a00 100644 --- a/mkdocs/site/packages/evo-files.html +++ b/mkdocs/site/packages/evo-files/FileAPIClient.html @@ -7,17 +7,17 @@ - - Evo files - Evo Python SDK - - - - - - + + FileAPIClient - Evo Python SDK + + + + + + - + @@ -25,7 +25,7 @@ + +
+
+
+
+ +

evo-objects

+

GitHub source

+

The evo-objects package provides both a low-level API client and typed Python classes for working with geoscience objects in Evo.

+

Typed Objects

+

The typed objects module provides intuitive Python classes for working with Evo geoscience objects. Instead of dealing with raw API responses, you work with PointSet, Regular3DGrid, Variogram, and other domain-specific types that provide:

+
    +
  • Simple property access (e.g., pointset.num_points, grid.bounding_box)
  • +
  • to_dataframe() for getting data as pandas DataFrames
  • +
  • Rich HTML display in Jupyter notebooks (via %load_ext evo.widgets)
  • +
  • Clickable links to Evo Portal and Viewer
  • +
+

See the Typed Objects page for the full API reference.

+

Loading objects

+

Three convenience functions let you load any typed object by reference, path, or UUID:

+
from evo.objects.typed import object_from_path, object_from_uuid, object_from_reference
+
+# By file path in the workspace
+obj = await object_from_path(manager, "my-folder/assay-data")
+
+# By UUID
+obj = await object_from_uuid(manager, "b208a6c9-6881-4b97-b02d-acb5d81299bb")
+
+# By full object reference URL
+obj = await object_from_reference(manager, reference_url)
+
+

The correct typed class (PointSet, Regular3DGrid, etc.) is selected automatically based on the object's schema.

+

BlockModel (via evo-blockmodels)

+

The BlockModel type is a geoscience object that acts as a proxy to the Block Model Service. When evo-blockmodels is installed (pip install evo-objects[blockmodels]), the full range of block model operations is available directly on the BlockModel object — no need to use the low-level BlockModelAPIClient.

+
from evo.objects.typed import BlockModel, RegularBlockModelData, Point3, Size3d, Size3i
+
+data = RegularBlockModelData(
+    name="My Block Model",
+    origin=Point3(x=0, y=0, z=0),
+    n_blocks=Size3i(nx=10, ny=10, nz=5),
+    block_size=Size3d(dx=2.5, dy=5.0, dz=5.0),
+    cell_data=my_dataframe,
+)
+bm = await BlockModel.create_regular(manager, data)
+
+
# Load an existing block model
+bm = await object_from_path(manager, "my-folder/block-model")
+
+# Get data as a DataFrame
+df = await bm.to_dataframe()
+
+# Add a new attribute
+await bm.add_attribute(data_df, "new_attribute", unit="g/t")
+
+# Create and run a report
+report = await bm.create_report(spec)
+result = await report.run(manager)
+df = result.to_dataframe()
+
+

After a compute task (e.g., kriging) adds attributes on the server, call refresh() to update the local object:

+
bm = await bm.refresh()
+bm.attributes  # Now shows newly added attributes
+
+

For the full evo-blockmodels typed API (RegularBlockModel, Reports, Units), see the evo-blockmodels documentation.

+
+
+ +
+
+

Documentation built with MkDocs.

+
+ + + + + + + + diff --git a/mkdocs/site/packages/evo-objects/ObjectAPIClient.html b/mkdocs/site/packages/evo-objects/ObjectAPIClient.html index 51285cd6..175b3e19 100644 --- a/mkdocs/site/packages/evo-objects/ObjectAPIClient.html +++ b/mkdocs/site/packages/evo-objects/ObjectAPIClient.html @@ -36,12 +36,12 @@

diff --git a/mkdocs/site/packages/evo-sdk-common/discovery.html b/mkdocs/site/packages/evo-sdk-common/discovery/DiscoveryAPIClient.html similarity index 88% rename from mkdocs/site/packages/evo-sdk-common/discovery.html rename to mkdocs/site/packages/evo-sdk-common/discovery/DiscoveryAPIClient.html index 1cac4d8a..22855970 100644 --- a/mkdocs/site/packages/evo-sdk-common/discovery.html +++ b/mkdocs/site/packages/evo-sdk-common/discovery/DiscoveryAPIClient.html @@ -7,17 +7,17 @@ - - Discovery - Evo Python SDK - - - - - - + + DiscoveryAPIClient - Evo Python SDK + + + + + + - + @@ -25,7 +25,7 @@ + +
+
+
+
+ +

evo-widgets

+

GitHub source

+

Widgets and presentation layer for the Evo Python SDK — HTML rendering, URL generation, and IPython formatters for Jupyter notebooks.

+

Usage

+

Load the IPython extension in your notebook to enable rich HTML rendering for all Evo SDK typed objects:

+
%load_ext evo.widgets
+
+

After loading, typed objects like PointSet, Regular3DGrid, TensorGrid, and BlockModel will automatically render with formatted metadata tables, clickable Portal/Viewer links, and bounding box information.

+

URL Functions

+

Generate URLs to view objects in the Evo Portal and Viewer:

+
from evo.widgets import (
+    get_portal_url_for_object,
+    get_viewer_url_for_object,
+    get_viewer_url_for_objects,
+)
+
+# Get Portal URL for a single object
+portal_url = get_portal_url_for_object(grid)
+
+# Get Viewer URL for a single object
+viewer_url = get_viewer_url_for_object(grid)
+
+# View multiple objects together in the Viewer
+url = get_viewer_url_for_objects(manager, [grid, pointset, tensor_grid])
+
+

Formatters

+

Rich HTML representations for all typed geoscience objects:

+
    +
  • PointSet, Regular3DGrid, TensorGrid, BlockModel
  • +
  • Variogram
  • +
  • Attributes collections
  • +
  • Report and ReportResult
  • +
  • TaskResult and TaskResults (compute results)
  • +
+

All formatters are registered automatically when you load the extension with %load_ext evo.widgets. They support light/dark mode via Jupyter theme CSS variables.

+

How It Works

+

When you run %load_ext evo.widgets, the extension registers HTML formatters with IPython using for_type_by_name. This approach:

+
    +
  1. Avoids hard dependencies — The widgets package doesn't import model classes directly
  2. +
  3. Works with all typed objects — Formatters are registered for the base class, so all subclasses are covered
  4. +
  5. Lazy loading — Formatters only activate when the relevant types are actually used
  6. +
+
+
+ + + + + + + + + + diff --git a/mkdocs/typed_objects.txt b/mkdocs/typed_objects.txt new file mode 100644 index 00000000..77330470 --- /dev/null +++ b/mkdocs/typed_objects.txt @@ -0,0 +1,39 @@ +packages.evo-objects.src.evo.objects.typed.base.object_from_path +packages.evo-objects.src.evo.objects.typed.base.object_from_uuid +packages.evo-objects.src.evo.objects.typed.base.object_from_reference +packages.evo-objects.src.evo.objects.typed.pointset.PointSet +packages.evo-objects.src.evo.objects.typed.pointset.PointSetData +packages.evo-objects.src.evo.objects.typed.regular_grid.Regular3DGrid +packages.evo-objects.src.evo.objects.typed.regular_grid.Regular3DGridData +packages.evo-objects.src.evo.objects.typed.regular_masked_grid.RegularMasked3DGrid +packages.evo-objects.src.evo.objects.typed.regular_masked_grid.RegularMasked3DGridData +packages.evo-objects.src.evo.objects.typed.tensor_grid.Tensor3DGrid +packages.evo-objects.src.evo.objects.typed.tensor_grid.Tensor3DGridData +packages.evo-objects.src.evo.objects.typed.variogram.Variogram +packages.evo-objects.src.evo.objects.typed.variogram.VariogramData +packages.evo-objects.src.evo.objects.typed.block_model_ref.BlockModel +packages.evo-objects.src.evo.objects.typed.attributes.Attributes +packages.evo-objects.src.evo.objects.typed.attributes.Attribute +packages.evo-objects.src.evo.objects.typed.types.BoundingBox +packages.evo-blockmodels.src.evo.blockmodels.typed.regular_block_model.RegularBlockModel +packages.evo-blockmodels.src.evo.blockmodels.typed.regular_block_model.RegularBlockModelData +packages.evo-blockmodels.src.evo.blockmodels.typed.report.Report +packages.evo-blockmodels.src.evo.blockmodels.typed.report.ReportSpecificationData +packages.evo-blockmodels.src.evo.blockmodels.typed.report.ReportResult +packages.evo-blockmodels.src.evo.blockmodels.typed.report.ReportColumnSpec +packages.evo-blockmodels.src.evo.blockmodels.typed.report.ReportCategorySpec +packages.evo-blockmodels.src.evo.blockmodels.typed.report.Aggregation +packages.evo-blockmodels.src.evo.blockmodels.typed.units.Units +packages.evo-blockmodels.src.evo.blockmodels.typed.units.UnitInfo +packages.evo-compute.src.evo.compute.tasks.kriging.KrigingParameters +packages.evo-compute.src.evo.compute.tasks.kriging.SimpleKriging +packages.evo-compute.src.evo.compute.tasks.kriging.OrdinaryKriging +packages.evo-compute.src.evo.compute.tasks.kriging.BlockDiscretisation +packages.evo-compute.src.evo.compute.tasks.kriging.RegionFilter +packages.evo-compute.src.evo.compute.tasks.kriging.KrigingResult +packages.evo-compute.src.evo.compute.tasks.kriging.TaskResult +packages.evo-compute.src.evo.compute.tasks.kriging.TaskResults +packages.evo-compute.src.evo.compute.tasks.common.search.SearchNeighborhood +packages.evo-compute.src.evo.compute.tasks.common.source_target.Source +packages.evo-compute.src.evo.compute.tasks.common.source_target.Target + diff --git a/packages/evo-compute/docs/examples/kriging.ipynb b/packages/evo-compute/docs/examples/kriging.ipynb new file mode 100644 index 00000000..83b4f482 --- /dev/null +++ b/packages/evo-compute/docs/examples/kriging.ipynb @@ -0,0 +1,585 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "3fd8937c5ca952ad", + "metadata": {}, + "source": [ + "# Kriging Compute Task\n", + "\n", + "This notebook demonstrates how to run kriging compute tasks using the `evo-compute` package.\n", + "\n", + "Kriging is a geostatistical interpolation technique that estimates values at unsampled locations\n", + "using weighted averages of nearby known values, based on a variogram model of spatial correlation." + ] + }, + { + "cell_type": "markdown", + "id": "16a20624c61f0a8c", + "metadata": {}, + "source": [ + "## Authentication\n", + "\n", + "First, authenticate using the `ServiceManagerWidget`:" + ] + }, + { + "cell_type": "code", + "id": "35b04526197d68a9", + "metadata": {}, + "source": [ + "from evo.notebooks import ServiceManagerWidget\n", + "\n", + "manager = await ServiceManagerWidget.with_auth_code(\n", + " client_id=\"your-client-id\", cache_location=\"./notebook-data\"\n", + ").login()" + ], + "outputs": [], + "execution_count": null + }, + { + "metadata": {}, + "cell_type": "code", + "source": [ + "# Load the widgets extension for rich HTML display\n", + "%load_ext evo.widgets" + ], + "id": "4877a0a895adf9d1", + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "id": "f371e3b7f89aff3f", + "metadata": {}, + "source": [ + "## Example 1: Run Kriging on Existing Objects\n", + "\n", + "This example shows how to run kriging using existing geoscience objects (source pointset, target grid, and variogram)." + ] + }, + { + "cell_type": "markdown", + "id": "7ab0cc0f0b9d79ed", + "metadata": {}, + "source": "### Load the Source PointSet, Target Grid, and Variogram" + }, + { + "cell_type": "code", + "id": "fe9afe5aac33694b", + "metadata": {}, + "source": [ + "from evo.objects.typed import object_from_uuid\n", + "\n", + "# Load objects by UUID (replace with your actual UUIDs)\n", + "source_pointset = await object_from_uuid(manager, \"9100d7dc-44e9-4e61-b427-159635dea22f\")\n", + "# Alternative: load by path\n", + "# source_pointset = await object_from_path(manager, \"path/to/pointset.json\")\n", + "\n", + "target_grid = await object_from_uuid(manager, \"df9c3705-c82e-4f57-af94-b3346b5d58cf\")\n", + "# Alternative: load by path\n", + "# target_grid = await object_from_path(manager, \"path/to/grid.json\")\n", + "\n", + "variogram = await object_from_uuid(manager, \"72cd9b83-90f4-4cb0-9691-95728e3f9cbb\")\n", + "# Alternative: load by path\n", + "# variogram = await object_from_path(manager, \"path/to/variogram.json\")" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "3aeeda68814e89b9", + "metadata": {}, + "source": [ + "# Pretty-print the source pointset (includes Portal/Viewer links)\n", + "source_pointset" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "ed2f2f59ebe818e5", + "metadata": {}, + "source": [ + "# View the source pointset attributes\n", + "source_pointset.attributes" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "c35f77a13fd03c22", + "metadata": {}, + "source": [ + "# Pretty-print the target grid\n", + "target_grid" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "887e895070f8c74d", + "metadata": {}, + "source": [ + "# Pretty-print the variogram\n", + "variogram" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "id": "9700289296c85afb", + "metadata": {}, + "source": "### Run Kriging Task" + }, + { + "cell_type": "code", + "id": "49a9cec09a87b702", + "metadata": {}, + "source": [ + "from evo.compute.tasks import SearchNeighborhood, run\n", + "from evo.compute.tasks.kriging import KrigingParameters\n", + "\n", + "# Get ellipsoid from the variogram structure with largest range (default)\n", + "var_ell = variogram.get_ellipsoid()\n", + "\n", + "# Create search ellipsoid by scaling the variogram ellipsoid by 2x\n", + "search_ellipsoid = var_ell.scaled(2.0)" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "332fd7c8e06a7fd6", + "metadata": {}, + "source": [ + "# Visualize variogram and search ellipsoids with pointset data\n", + "import plotly.graph_objects as go\n", + "\n", + "# Get pointset data for center calculation and scatter plot\n", + "pts = await source_pointset.to_dataframe()\n", + "center = (pts[\"x\"].mean(), pts[\"y\"].mean(), pts[\"z\"].mean())\n", + "\n", + "# Generate mesh surface points for visualization\n", + "vx, vy, vz = var_ell.surface_points(center=center)\n", + "sx, sy, sz = search_ellipsoid.surface_points(center=center)\n", + "\n", + "# Build visualization\n", + "var_mesh = go.Mesh3d(x=vx, y=vy, z=vz, alphahull=0, opacity=0.3, color=\"blue\", name=\"Variogram Ellipsoid\")\n", + "search_mesh = go.Mesh3d(x=sx, y=sy, z=sz, alphahull=0, opacity=0.2, color=\"gold\", name=\"Search Ellipsoid (2x)\")\n", + "scatter = go.Scatter3d(\n", + " x=pts[\"x\"],\n", + " y=pts[\"y\"],\n", + " z=pts[\"z\"],\n", + " mode=\"markers\",\n", + " marker=dict(size=2, color=pts[\"Ag_ppm Values\"], colorscale=\"Viridis\", showscale=True),\n", + " name=\"Sample Points\",\n", + ")\n", + "\n", + "fig = go.Figure(data=[var_mesh, search_mesh, scatter])\n", + "fig.update_layout(title=\"Kriging Inputs: Variogram & Search Ellipsoids\", scene=dict(aspectmode=\"data\"), showlegend=True)\n", + "fig.show()" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "7f375586017fb524", + "metadata": {}, + "source": [ + "# Create kriging parameters\n", + "# Note: method defaults to ordinary kriging, so we don't need to specify it\n", + "kriging_params = KrigingParameters(\n", + " source=source_pointset.attributes[\"Ag_ppm Values\"],\n", + " target=target_grid.attributes[\"kriged_grade 5\"],\n", + " variogram=variogram,\n", + " search=SearchNeighborhood(\n", + " ellipsoid=search_ellipsoid,\n", + " max_samples=20,\n", + " ),\n", + ")\n", + "\n", + "# Run the kriging task (progress feedback is shown by default)\n", + "print(\"Submitting kriging task...\")\n", + "result = await run(manager, kriging_params, preview=True)\n", + "\n", + "# Display the kriging result (pretty-printed)\n", + "result" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "e984ded1ca23d299", + "metadata": {}, + "source": [ + "# Get the data directly as a DataFrame (simplest approach)\n", + "df = await result.to_dataframe()\n", + "print(f\"Retrieved {len(df)} rows\")\n", + "df.head()" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "9ab8a80fe9a8ef02", + "metadata": {}, + "source": [ + "# Or load the target object for more control\n", + "target_grid = await result.get_target_object()\n", + "\n", + "# Pretty-print the updated target grid\n", + "target_grid" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "c2ad59a5687ef8e0", + "metadata": {}, + "source": [ + "# Or get the data as a DataFrame directly\n", + "df = await result.to_dataframe()\n", + "print(f\"Retrieved {len(df)} cells\")\n", + "df.head()" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "id": "8a4093575dfd0d7e", + "metadata": {}, + "source": [ + "## Example 2: Create Objects and Run Kriging\n", + "\n", + "This example shows how to create the input objects from scratch and then run kriging." + ] + }, + { + "cell_type": "markdown", + "id": "fa713815e4d6dabe", + "metadata": {}, + "source": "### Create the Source PointSet" + }, + { + "cell_type": "code", + "id": "3b8e81ed282ffd14", + "metadata": {}, + "source": [ + "import uuid\n", + "\n", + "import numpy as np\n", + "import pandas as pd\n", + "from evo.objects.typed import EpsgCode, PointSet, PointSetData\n", + "\n", + "# Generate sample point data\n", + "n_points = 100\n", + "np.random.seed(42)\n", + "\n", + "# Create points in a 1000x1000x100 domain\n", + "x = np.random.uniform(0, 1000, n_points)\n", + "y = np.random.uniform(0, 1000, n_points)\n", + "z = np.random.uniform(0, 100, n_points)\n", + "\n", + "# Create an elevation attribute (z + some noise)\n", + "elevation = z + np.random.normal(0, 5, n_points)\n", + "\n", + "# Create pointset using PointSetData\n", + "pointset_data = PointSetData(\n", + " name=f\"Sample Source Points - {uuid.uuid4()}\",\n", + " coordinate_reference_system=EpsgCode(32632),\n", + " locations=pd.DataFrame({\"x\": x, \"y\": y, \"z\": z, \"elevation\": elevation}),\n", + ")\n", + "\n", + "# Create the pointset object\n", + "source_pointset_created = await PointSet.create(manager, pointset_data)\n", + "\n", + "print(f\"Created source pointset: {source_pointset_created.name}\")" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "1c6e9dad1ca4c4c5", + "metadata": {}, + "source": [ + "# Pretty-print the created pointset (includes Portal/Viewer links)\n", + "source_pointset_created" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "ba4715009b908d2e", + "metadata": {}, + "source": [ + "# View the created pointset attributes\n", + "source_pointset_created.attributes" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "id": "a29c16aecbcf54ab", + "metadata": {}, + "source": [ + "### Create a Variogram\n", + "\n", + "Create a variogram using the typed `Variogram` class." + ] + }, + { + "metadata": {}, + "cell_type": "code", + "outputs": [], + "execution_count": null, + "source": [ + "from evo.objects.typed import (\n", + " Ellipsoid,\n", + " EllipsoidRanges,\n", + " Rotation,\n", + " SphericalStructure,\n", + " Variogram,\n", + " VariogramData,\n", + ")\n", + "\n", + "# Define a spherical variogram model using typed classes\n", + "variogram_data = VariogramData(\n", + " name=f\"Sample Variogram - {uuid.uuid4()}\",\n", + " sill=1.0,\n", + " nugget=0.1,\n", + " is_rotation_fixed=True,\n", + " modelling_space=\"data\", # Required for kriging\n", + " data_variance=1.0, # Should match sill for non-normalized data\n", + " structures=[\n", + " SphericalStructure(\n", + " contribution=0.9,\n", + " anisotropy=Ellipsoid(\n", + " ranges=EllipsoidRanges(major=200.0, semi_major=200.0, minor=100.0),\n", + " rotation=Rotation(dip_azimuth=10.0, dip=20.0, pitch=30.0),\n", + " ),\n", + " )\n", + " ],\n", + " attribute=\"elevation\",\n", + " domain=\"all\",\n", + ")\n", + "\n", + "variogram_created = await Variogram.create(manager, variogram_data)\n", + "\n", + "print(f\"Created variogram: {variogram_created.name}\")" + ], + "id": "cd6189c97bbbf5ac" + }, + { + "metadata": {}, + "cell_type": "code", + "outputs": [], + "execution_count": null, + "source": [ + "# Pretty-print the created variogram\n", + "variogram_created" + ], + "id": "449afd3af83c5914" + }, + { + "cell_type": "markdown", + "id": "401d111e4247a9bd", + "metadata": {}, + "source": [ + "### Visualize the Variogram\n", + "\n", + "Use the variogram visualization tool to inspect the directional variogram curves and anisotropy ellipsoids.\n", + "This helps verify that the variogram parameters are correct before running kriging." + ] + }, + { + "cell_type": "code", + "id": "151352364fe3b6ff", + "metadata": {}, + "source": [ + "# Visualize variogram and search ellipsoids with pointset data\n", + "import plotly.graph_objects as go\n", + "\n", + "# Get pointset data for center calculation and scatter plot\n", + "pts = await source_pointset_created.to_dataframe()\n", + "center = (pts[\"x\"].mean(), pts[\"y\"].mean(), pts[\"z\"].mean())\n", + "\n", + "# Create ellipsoid from variogram (first structure)\n", + "var_ell = variogram_created.get_ellipsoid()\n", + "vx, vy, vz = var_ell.surface_points(center=center)\n", + "\n", + "# Create search ellipsoid scaled by 2x\n", + "search_ell = var_ell.scaled(2.0)\n", + "sx, sy, sz = search_ell.surface_points(center=center)\n", + "\n", + "# Build visualization\n", + "var_mesh = go.Mesh3d(x=vx, y=vy, z=vz, alphahull=0, opacity=0.3, color=\"blue\", name=\"Variogram Ellipsoid\")\n", + "search_mesh = go.Mesh3d(x=sx, y=sy, z=sz, alphahull=0, opacity=0.2, color=\"gold\", name=\"Search Ellipsoid (2x)\")\n", + "scatter = go.Scatter3d(\n", + " x=pts[\"x\"],\n", + " y=pts[\"y\"],\n", + " z=pts[\"z\"],\n", + " mode=\"markers\",\n", + " marker=dict(size=2, color=pts[\"elevation\"], colorscale=\"Viridis\", showscale=True),\n", + " name=\"Sample Points\",\n", + ")\n", + "\n", + "fig = go.Figure(data=[var_mesh, search_mesh, scatter])\n", + "fig.update_layout(title=\"Kriging Inputs: Variogram & Search Ellipsoids\", scene=dict(aspectmode=\"data\"), showlegend=True)\n", + "fig.show()" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "id": "2223118aa75b50bb", + "metadata": {}, + "source": "### Create the Target Grid" + }, + { + "cell_type": "code", + "id": "66f708dfcd8c3f6f", + "metadata": {}, + "source": [ + "from evo.objects.typed import Point3, RegularMasked3DGrid, RegularMasked3DGridData, Size3d, Size3i\n", + "from evo.objects.typed import Rotation as GridRotation\n", + "\n", + "# Define grid dimensions\n", + "nx, ny, nz = 20, 20, 10 # Number of cells in each direction\n", + "cell_size = 50.0 # Size of each cell\n", + "\n", + "# Create a mask for the grid (all cells active in this example)\n", + "total_cells = nx * ny * nz\n", + "mask = np.ones(total_cells, dtype=bool)\n", + "\n", + "# Optionally, mask out some cells to create a more interesting shape\n", + "for zi in range(nz // 2):\n", + " for yi in range(ny // 2):\n", + " for xi in range(nx // 2):\n", + " idx = xi + yi * nx + zi * nx * ny\n", + " mask[idx] = False\n", + "\n", + "# Create masked grid using RegularMasked3DGridData\n", + "grid_data = RegularMasked3DGridData(\n", + " name=f\"Target Masked Grid - {uuid.uuid4()}\",\n", + " coordinate_reference_system=EpsgCode(32632),\n", + " origin=Point3(0, 0, 0),\n", + " size=Size3i(nx, ny, nz),\n", + " cell_size=Size3d(cell_size, cell_size, cell_size),\n", + " rotation=GridRotation(0, 0, 0),\n", + " mask=mask,\n", + " cell_data=None, # No attributes yet, kriging will add them\n", + ")\n", + "\n", + "# Create the grid object\n", + "target_grid_created = await RegularMasked3DGrid.create(manager, grid_data)\n", + "\n", + "print(f\"Created target grid: {target_grid_created.name}\")\n", + "print(f\" Total cells: {nx} x {ny} x {nz} = {total_cells}\")\n", + "print(f\" Active cells: {int(mask.sum())}\")\n", + "print(f\" Bounding box: {target_grid_created.bounding_box}\")" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "64fec5ce612b7d75", + "metadata": {}, + "source": [ + "# Pretty-print the created grid (includes Portal/Viewer links)\n", + "target_grid_created" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "id": "3c9f833f2804b56e", + "metadata": {}, + "source": "### Run Kriging on Created Objects" + }, + { + "metadata": {}, + "cell_type": "code", + "outputs": [], + "execution_count": null, + "source": [ + "from evo.objects.typed import Ellipsoid, EllipsoidRanges, Rotation\n", + "\n", + "from evo.compute.tasks import SearchNeighborhood, Target, run\n", + "from evo.compute.tasks.kriging import KrigingParameters\n", + "\n", + "# Create kriging parameters using typed Attribute access\n", + "# source_pointset_created.locations.attributes[\"elevation\"] gives us an Attribute object\n", + "kriging_params = KrigingParameters(\n", + " source=source_pointset_created.locations.attributes[\"elevation\"],\n", + " target=Target.new_attribute(target_grid_created, attribute_name=\"kriged_elevation2\"),\n", + " variogram=variogram_created,\n", + " search=SearchNeighborhood(\n", + " ellipsoid=Ellipsoid(\n", + " ranges=EllipsoidRanges(major=134.0, semi_major=90.0, minor=40.0),\n", + " rotation=Rotation(dip_azimuth=100.0, dip=65.0, pitch=75.0),\n", + " ),\n", + " max_samples=20,\n", + " ),\n", + ")\n", + "\n", + "# Run the kriging task\n", + "print(\"Submitting kriging task...\")\n", + "result = await run(manager, kriging_params, preview=True)\n", + "\n", + "print(\"Task completed!\")" + ], + "id": "4d085c0c9c1f30e4" + }, + { + "metadata": {}, + "cell_type": "code", + "outputs": [], + "execution_count": null, + "source": [ + "# Display the kriging result (pretty-printed)\n", + "result" + ], + "id": "acba8b49cfbcc650" + }, + { + "cell_type": "code", + "id": "b23eaf4887cd4de1", + "metadata": {}, + "source": [ + "# Get the data directly as a DataFrame\n", + "df = await result.to_dataframe()\n", + "print(f\"Retrieved {len(df)} cells with kriged values\")\n", + "df.head()" + ], + "outputs": [], + "execution_count": null + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/packages/evo-compute/docs/examples/kriging_multiple.ipynb b/packages/evo-compute/docs/examples/kriging_multiple.ipynb new file mode 100644 index 00000000..c73e9f28 --- /dev/null +++ b/packages/evo-compute/docs/examples/kriging_multiple.ipynb @@ -0,0 +1,755 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0", + "metadata": {}, + "source": [ + "# Running Multiple Kriging Tasks\n", + "\n", + "This notebook demonstrates how to run multiple kriging compute tasks concurrently\n", + "using the `run` function with a list of parameters. This is useful for:\n", + "\n", + "- Scenario analysis with different parameters\n", + "- Sensitivity studies varying neighborhood settings\n", + "- Batch processing multiple attributes\n", + "\n", + "All kriging results are stored as attributes in a single Block Model." + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, + "source": [ + "## Authentication" + ] + }, + { + "cell_type": "code", + "id": "2", + "metadata": {}, + "source": [ + "from evo.notebooks import ServiceManagerWidget\n", + "\n", + "manager = await ServiceManagerWidget.with_auth_code(\n", + " client_id=\"your-client-id\", cache_location=\"./notebook-data\"\n", + ").login()" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "3", + "metadata": {}, + "source": [ + "# Load the widgets extension for rich HTML display\n", + "%load_ext evo.widgets" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "id": "4", + "metadata": {}, + "source": [ + "## Load Source PointSet and Variogram\n", + "\n", + "Load the source pointset and variogram using `object_from_uuid`." + ] + }, + { + "cell_type": "code", + "id": "5", + "metadata": {}, + "source": [ + "from evo.objects.typed import object_from_uuid\n", + "\n", + "# Load by UUID (replace with your actual UUIDs)\n", + "source_pointset = await object_from_uuid(manager, \"9100d7dc-44e9-4e61-b427-159635dea22f\")\n", + "# Alternative: load by path\n", + "# source_pointset = await object_from_path(manager, \"path/to/pointset.json\")\n", + "\n", + "# Display the pointset (pretty-printed in Jupyter)\n", + "source_pointset" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "6", + "metadata": {}, + "source": [ + "# View the pointset attributes\n", + "source_pointset.attributes" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "7", + "metadata": {}, + "source": [ + "# Load the variogram\n", + "variogram = await object_from_uuid(manager, \"72cd9b83-90f4-4cb0-9691-95728e3f9cbb\")\n", + "\n", + "# Alternative: load by path\n", + "# variogram = await object_from_path(manager, \"path/to/variogram.json\")\n", + "\n", + "# Display the variogram (pretty-printed in Jupyter)\n", + "variogram" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "8", + "metadata": {}, + "source": [ + "print(f\"Source: {source_pointset.name}\")\n", + "print(f\"Variogram: {variogram.name}\")" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "id": "9", + "metadata": {}, + "source": [ + "## Create Target Block Model\n", + "\n", + "Create a single Block Model to hold all scenario results as attributes.\n", + "The Block Model Service manages concurrent attribute creation." + ] + }, + { + "cell_type": "code", + "id": "10", + "metadata": {}, + "source": [ + "import uuid\n", + "\n", + "from evo.blockmodels.typed import Units\n", + "from evo.objects.typed import BlockModel, Point3, RegularBlockModelData, Size3d, Size3i\n", + "\n", + "run_uuid = uuid.uuid4()\n", + "\n", + "# Create a Block Model to hold all scenario results\n", + "# Adjust origin, n_blocks, and block_size to match your data domain\n", + "bm_data = RegularBlockModelData(\n", + " name=f\"Kriging Scenarios - {run_uuid}\",\n", + " description=\"Block model with kriging results for different max_samples scenarios\",\n", + " origin=Point3(x=10000, y=100000, z=200),\n", + " n_blocks=Size3i(nx=40, ny=40, nz=40),\n", + " block_size=Size3d(dx=25.0, dy=25.0, dz=10.0),\n", + " coordinate_reference_system=\"EPSG:32632\",\n", + " size_unit_id=Units.METRES,\n", + ")\n", + "\n", + "block_model = await BlockModel.create_regular(manager, bm_data)\n", + "\n", + "print(f\"Created Block Model: {block_model.name}\")\n", + "print(f\"Block Model UUID: {block_model.block_model_uuid}\")\n", + "print(f\"Bounding Box: {block_model.bounding_box}\")" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "11", + "metadata": {}, + "source": [ + "# Use existing block model instead\n", + "# lock_model = await object_from_uuid(manager, \"9e19c1e7-3a52-452a-978f-73dc9440dbbe\")" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "12", + "metadata": {}, + "source": [ + "# Display the block model (pretty-printed in Jupyter)\n", + "block_model" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "id": "13", + "metadata": {}, + "source": [ + "## Define Kriging Scenarios\n", + "\n", + "Create multiple parameter sets varying the `max_samples` parameter to study its effect.\n", + "All scenarios target the same Block Model, creating different attributes.\n", + "\n", + "Each scenario also uses `BlockDiscretisation` to subdivide target blocks into 3×3×2 sub-cells\n", + "for volume-averaged block kriging. Omit `block_discretisation` (or pass `None`) for point kriging." + ] + }, + { + "cell_type": "code", + "id": "14", + "metadata": {}, + "source": [ + "from evo.compute.tasks import BlockDiscretisation, SearchNeighborhood\n", + "from evo.compute.tasks.kriging import KrigingParameters\n", + "\n", + "# Define different max_samples values to test\n", + "max_samples_values = [5, 10, 15, 20]\n", + "# Get ellipsoid from the variogram structure with largest range (default)\n", + "var_ell = variogram.get_ellipsoid()\n", + "\n", + "# Create search ellipsoid by scaling the variogram ellipsoid by 2x\n", + "search_ellipsoid = var_ell.scaled(2.0)" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "15", + "metadata": {}, + "source": [ + "# Visualize variogram and search ellipsoids with pointset data\n", + "import plotly.graph_objects as go\n", + "\n", + "# Get pointset data for center calculation and scatter plot\n", + "pts = await source_pointset.to_dataframe()\n", + "center = (pts[\"x\"].mean(), pts[\"y\"].mean(), pts[\"z\"].mean())\n", + "\n", + "# Generate mesh surface points for visualization\n", + "vx, vy, vz = var_ell.surface_points(center=center)\n", + "sx, sy, sz = search_ellipsoid.surface_points(center=center)\n", + "\n", + "# Build visualization\n", + "var_mesh = go.Mesh3d(x=vx, y=vy, z=vz, alphahull=0, opacity=0.3, color=\"blue\", name=\"Variogram Ellipsoid\")\n", + "search_mesh = go.Mesh3d(x=sx, y=sy, z=sz, alphahull=0, opacity=0.2, color=\"gold\", name=\"Search Ellipsoid (2x)\")\n", + "scatter = go.Scatter3d(\n", + " x=pts[\"x\"],\n", + " y=pts[\"y\"],\n", + " z=pts[\"z\"],\n", + " mode=\"markers\",\n", + " marker=dict(size=2, color=pts[\"Ag_ppm Values\"], colorscale=\"Viridis\", showscale=True),\n", + " name=\"Sample Points\",\n", + ")\n", + "\n", + "fig = go.Figure(data=[var_mesh, search_mesh, scatter])\n", + "fig.update_layout(title=\"Kriging Inputs: Variogram & Search Ellipsoids\", scene=dict(aspectmode=\"data\"), showlegend=True)\n", + "fig.show()" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "16", + "metadata": {}, + "source": [ + "# Base source configuration\n", + "source = source_pointset.attributes[\"Ag_ppm Values\"]\n", + "\n", + "# Block discretisation subdivides each target block into nx * ny * nz sub-cells.\n", + "# The kriged value is averaged across these sub-cells, producing a more accurate\n", + "# volume-averaged estimate (block kriging) compared to point kriging (no discretisation).\n", + "discretisation = BlockDiscretisation(nx=3, ny=3, nz=2)\n", + "\n", + "# Create parameter sets for each scenario, all targeting the same Block Model\n", + "# Note: method defaults to ordinary kriging, so we don't need to specify it\n", + "parameter_sets = []\n", + "for max_samples in max_samples_values:\n", + " params = KrigingParameters(\n", + " source=source,\n", + " target=block_model.attributes[f\"Samples={max_samples}\"],\n", + " variogram=variogram,\n", + " search=SearchNeighborhood(ellipsoid=search_ellipsoid, max_samples=max_samples),\n", + " block_discretisation=discretisation,\n", + " )\n", + " parameter_sets.append(params)\n", + " print(f\"Prepared scenario with max_samples={max_samples}\")\n", + "\n", + "print(f\"\\nCreated {len(parameter_sets)} parameter sets\")" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "id": "17", + "metadata": {}, + "source": [ + "## Run Multiple Kriging Tasks\n", + "\n", + "Execute all scenarios concurrently using `run` with a list of parameters.\n", + "Progress is aggregated across all tasks." + ] + }, + { + "cell_type": "code", + "id": "18", + "metadata": {}, + "source": [ + "from evo.compute.tasks import run\n", + "\n", + "# Run all scenarios in parallel (progress feedback is shown by default)\n", + "print(f\"Submitting {len(parameter_sets)} kriging tasks in parallel...\")\n", + "\n", + "results = await run(manager, parameter_sets, preview=True)\n", + "\n", + "print(f\"\\nAll {len(results)} scenarios completed!\")" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "id": "19", + "metadata": {}, + "source": [ + "## View Block Model Attributes\n", + "\n", + "Display the block model attributes to see all the newly created scenario columns.\n", + "\n", + "> **Note:** The block model object needs to be refreshed to see the newly added attributes." + ] + }, + { + "cell_type": "code", + "id": "20", + "metadata": {}, + "source": [ + "# Refresh the block model to see the new attributes added by kriging\n", + "block_model = await block_model.refresh()\n", + "\n", + "# Pretty-print the block model to see its current state\n", + "block_model" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "21", + "metadata": {}, + "source": [ + "# View just the attributes (pretty-printed table in Jupyter)\n", + "block_model.attributes" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "id": "22", + "metadata": {}, + "source": [ + "## Query Results from Block Model\n", + "\n", + "Get all scenario results from the Block Model." + ] + }, + { + "cell_type": "code", + "id": "23", + "metadata": {}, + "source": [ + "# Query the Block Model for all scenario columns using to_dataframe()\n", + "scenario_columns = [f\"Samples={ms}\" for ms in max_samples_values]\n", + "\n", + "print(\"Querying Block Model for results...\")\n", + "df = await block_model.to_dataframe(columns=scenario_columns)\n", + "\n", + "print(f\"Retrieved {len(df)} blocks with {len(scenario_columns)} scenario columns\")\n", + "df.head(10)" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "id": "24", + "metadata": {}, + "source": [ + "## Display Results" + ] + }, + { + "cell_type": "code", + "id": "25", + "metadata": {}, + "source": [ + "# Pretty-print the Block Model with all scenarios (includes Portal/Viewer links)\n", + "block_model" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "26", + "metadata": {}, + "source": [ + "# Show individual job result messages\n", + "for i, (job_result, max_samples) in enumerate(zip(results, max_samples_values)):\n", + " print(f\"Scenario {i + 1}: max_samples={max_samples} - {job_result.message}\")" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "27", + "metadata": {}, + "source": [ + "# Display first result (pretty-printed)\n", + "results[0]" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "id": "28", + "metadata": {}, + "source": [ + "## Analyze Results\n", + "\n", + "Compare the kriging results across different max_samples values." + ] + }, + { + "cell_type": "code", + "id": "29", + "metadata": {}, + "source": [ + "# Show statistics for each scenario\n", + "print(\"Statistics by max_samples:\")\n", + "print(df[scenario_columns].describe())" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "30", + "metadata": {}, + "source": [ + "# Optional: Visualize the differences using plotly\n", + "try:\n", + " import plotly.express as px\n", + "\n", + " # Melt the data for box plot comparison\n", + " df_melted = df[scenario_columns].melt(var_name=\"Scenario\", value_name=\"value\")\n", + "\n", + " fig = px.box(\n", + " df_melted,\n", + " x=\"Scenario\",\n", + " y=\"value\",\n", + " title=\"Kriging Values by Max Samples\",\n", + " )\n", + " fig.show()\n", + "except ImportError:\n", + " print(\"Install plotly for visualization: pip install plotly\")" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "id": "31", + "metadata": {}, + "source": [ + "## Create a Report on the Block Model\n", + "\n", + "After running kriging, we can create a resource report on the block model.\n", + "\n", + "Reports require:\n", + "1. Columns to have units defined\n", + "2. At least one category column for grouping results" + ] + }, + { + "cell_type": "markdown", + "id": "32", + "metadata": {}, + "source": [ + "### Add a Domain Column\n", + "\n", + "First, let's add a category column for grouping. We'll create simple geological domains\n", + "by slicing the block model into three zones based on elevation (z-coordinate)." + ] + }, + { + "cell_type": "code", + "id": "33", + "metadata": {}, + "source": [ + "# Get block model data\n", + "df = await block_model.to_dataframe()\n", + "\n", + "# Create domain column based on z-coordinate (elevation)\n", + "# Divide into 3 domains: LMS1 (lower), LMS2 (middle), LMS3 (upper)\n", + "z_min, z_max = df[\"z\"].min(), df[\"z\"].max()\n", + "z_range = z_max - z_min\n", + "\n", + "\n", + "def assign_domain(z):\n", + " if z < z_min + z_range / 3:\n", + " return \"LMS1\" # Lower zone\n", + " elif z < z_min + 2 * z_range / 3:\n", + " return \"LMS2\" # Middle zone\n", + " else:\n", + " return \"LMS3\" # Upper zone\n", + "\n", + "\n", + "df[\"domain\"] = df[\"z\"].apply(assign_domain)\n", + "\n", + "# Add the domain column to the block model\n", + "domain_data = df[[\"x\", \"y\", \"z\", \"domain\"]]\n", + "version = await block_model.add_attribute(domain_data, \"domain\")\n", + "print(f\"Added domain column. New version: {version.version_id}\")\n", + "\n", + "# Check domain distribution\n", + "print(\"\\nDomain distribution:\")\n", + "print(df[\"domain\"].value_counts())" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "34", + "metadata": {}, + "source": [ + "# Refresh to see the new attribute\n", + "block_model = await block_model.refresh()\n", + "block_model.attributes" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "id": "35", + "metadata": {}, + "source": [ + "### Set Units on Kriged Attributes\n", + "\n", + "Reports require columns to have units defined. The kriged columns may not have units set,\n", + "so we need to set them before creating a report." + ] + }, + { + "cell_type": "code", + "id": "36", + "metadata": {}, + "source": [ + "from evo.blockmodels.typed import Units\n", + "\n", + "# Set units on the kriged attribute columns\n", + "# Use the first scenario column name as an example\n", + "first_scenario_col = scenario_columns[0]\n", + "\n", + "block_model = await block_model.set_attribute_units(\n", + " {\n", + " first_scenario_col: Units.GRAMS_PER_TONNE, # Set appropriate unit for your data\n", + " }\n", + ")\n", + "print(f\"Set units on {first_scenario_col}\")\n", + "\n", + "# View updated attributes\n", + "block_model.attributes" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "id": "37", + "metadata": {}, + "source": [ + "### Create and Run the Report\n", + "\n", + "Now we can create a report specification that will calculate tonnages and grades by domain.\n", + "\n", + "**Key classes for reports:**\n", + "- `Aggregation` - Enum: `MASS_AVERAGE` (for grades), `SUM` (for metal content)\n", + "- `Units` - Constants for output units (e.g., `Units.GRAMS_PER_TONNE`)\n", + "- `MassUnits` - Constants for mass output (e.g., `MassUnits.TONNES`)" + ] + }, + { + "cell_type": "code", + "id": "38", + "metadata": {}, + "source": [ + "from evo.blockmodels.typed import (\n", + " Aggregation,\n", + " MassUnits,\n", + " ReportCategorySpec,\n", + " ReportColumnSpec,\n", + " ReportSpecificationData,\n", + ")\n", + "\n", + "# Define the report\n", + "report_data = ReportSpecificationData(\n", + " name=\"Kriging Results Report\",\n", + " description=\"Resource estimate by domain using kriged grades\",\n", + " columns=[\n", + " ReportColumnSpec(\n", + " column_name=first_scenario_col,\n", + " aggregation=Aggregation.MASS_AVERAGE, # Use MASS_AVERAGE for grades\n", + " label=\"Kriged Grade\",\n", + " output_unit_id=Units.GRAMS_PER_TONNE, # Use Units class for discoverability\n", + " ),\n", + " ],\n", + " categories=[\n", + " ReportCategorySpec(\n", + " column_name=\"domain\",\n", + " label=\"Domain\",\n", + " values=[\"LMS1\", \"LMS2\", \"LMS3\"],\n", + " ),\n", + " ],\n", + " mass_unit_id=MassUnits.TONNES, # Use MassUnits class\n", + " density_value=2.7, # Fixed density (or use density_column_name)\n", + " density_unit_id=Units.TONNES_PER_CUBIC_METRE,\n", + " run_now=True, # Run immediately\n", + ")\n", + "\n", + "# Create the report\n", + "report = await block_model.create_report(report_data)\n", + "print(f\"Created report: {report.name}\")" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "39", + "metadata": {}, + "source": [ + "# Pretty-print the report (shows BlockSync link)\n", + "report" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "id": "40", + "metadata": {}, + "source": [ + "### View Report Results\n", + "\n", + "Get the report results (waits if report is still running)." + ] + }, + { + "cell_type": "code", + "id": "41", + "metadata": {}, + "source": [ + "# Get the latest report result\n", + "result = await report.refresh()\n", + "\n", + "# Pretty-print the result (displays table in Jupyter)\n", + "result" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "id": "42", + "metadata": {}, + "source": [ + "### Filter kriging results on a category column\n", + "\n", + "Krigging can be run with a filter on a category column, which allows only for a specific area to be updated rather than the entire block model.\n" + ] + }, + { + "cell_type": "code", + "id": "43", + "metadata": {}, + "source": [ + "from evo.compute.tasks import RegionFilter\n", + "\n", + "# Base source configuration\n", + "source = source_pointset.attributes[\"Ag_ppm Values\"]\n", + "\n", + "# Create parameter sets for each scenario, all targeting the same Block Model\n", + "# Note: method defaults to ordinary kriging, so we don't need to specify it\n", + "parameter_sets = []\n", + "for max_samples in max_samples_values:\n", + " params = KrigingParameters(\n", + " source=source,\n", + " target=block_model.attributes[f\"Samples={max_samples}\"],\n", + " variogram=variogram,\n", + " search=SearchNeighborhood(ellipsoid=search_ellipsoid, max_samples=max_samples),\n", + " target_region_filter=RegionFilter(\n", + " attribute=block_model.attributes[\"domain\"],\n", + " names=[\"LMS1\"], # Filter by category name\n", + " ),\n", + " )\n", + " print(params.to_dict())\n", + " parameter_sets.append(params)\n", + " print(f\"Prepared scenario with max_samples={max_samples}\")\n", + "\n", + "# Run all scenarios in parallel (progress feedback is shown by default)\n", + "print(f\"Submitting {len(parameter_sets)} kriging tasks in parallel...\")\n", + "\n", + "results = await run(manager, parameter_sets, preview=True)\n", + "\n", + "print(f\"\\nAll {len(results)} scenarios completed!\")" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "44", + "metadata": {}, + "source": [ + "# Refresh the block model to see the new attributes added by kriging\n", + "block_model = await block_model.refresh()\n", + "\n", + "# Pretty-print the block model to see its current state\n", + "block_model" + ], + "outputs": [], + "execution_count": null + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.0" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/packages/evo-compute/pyproject.toml b/packages/evo-compute/pyproject.toml index 1f46aa99..6b6f33ea 100644 --- a/packages/evo-compute/pyproject.toml +++ b/packages/evo-compute/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "evo-compute" -version = "0.0.1rc3" +version = "0.0.2" requires-python = ">=3.10" license-files = ["LICENSE.md"] dynamic = ["readme"] @@ -11,6 +11,7 @@ authors = [ dependencies = [ "evo-sdk-common", "pydantic>=2", + "typing_extensions>=4.0", ] [project.urls] diff --git a/packages/evo-compute/src/evo/compute/__init__.py b/packages/evo-compute/src/evo/compute/__init__.py index 71e6b33e..2ceae317 100644 --- a/packages/evo-compute/src/evo/compute/__init__.py +++ b/packages/evo-compute/src/evo/compute/__init__.py @@ -9,6 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from . import tasks from .client import JobClient from .data import JobProgress, JobStatusEnum @@ -16,4 +17,5 @@ "JobClient", "JobProgress", "JobStatusEnum", + "tasks", ] diff --git a/packages/evo-compute/src/evo/compute/client.py b/packages/evo-compute/src/evo/compute/client.py index fbbde6af..6b2e47c2 100644 --- a/packages/evo-compute/src/evo/compute/client.py +++ b/packages/evo-compute/src/evo/compute/client.py @@ -84,7 +84,6 @@ def __init__( task: str, job_id: UUID, result_type: type[T_Result] = dict, - preview: bool = False, ) -> None: """ :param connector: The API connector to use. @@ -93,7 +92,6 @@ def __init__( :param task: The task to be executed. :param job_id: The job ID. :param result_type: The type to validate the result against. - :param preview: Whether to use preview mode and include necessary header information. """ self._connector = connector self._org_id = org_id @@ -104,7 +102,6 @@ def __init__( self._type_adapter = TypeAdapter(result_type) self._mutex = asyncio.Lock() self._results: T_Result | JobError | None = None - self._preview = preview @property def id(self) -> UUID: @@ -129,17 +126,8 @@ def url(self) -> str: def __repr__(self) -> str: return self.url - def _get_headers(self) -> dict[str, str]: - """Get the headers dictionary for API requests. - - :return: Headers dictionary with preview header if enabled. - """ - return {"API-Preview": "opt-in"} if self._preview else {} - @staticmethod - def from_url( - connector: APIConnector, url: str, result_type: type[T_Result] = dict, preview: bool = False - ) -> JobClient[T_Result]: + def from_url(connector: APIConnector, url: str, result_type: type[T_Result] = dict) -> JobClient[T_Result]: """Create a job client from a status URL. The URL hostname must match the connector base URL. @@ -147,7 +135,6 @@ def from_url( :param connector: The API connector to use. :param url: The status URL of a submitted job. :param result_type: The type to validate the result against. - :param preview: Whether to use preview mode and include necessary header information. :return: A client for managing the referenced job. """ @@ -163,7 +150,7 @@ def from_url( except ValueError: raise ValueError(f"Invalid {key.removesuffix('_id')} ID in URL: {url}") from None - return JobClient(connector=connector, **path_params, result_type=result_type, preview=preview) + return JobClient(connector=connector, **path_params, result_type=result_type) @staticmethod async def submit( @@ -173,7 +160,6 @@ async def submit( task: str, parameters: Mapping[str, Any], result_type: type[T_Result] = dict, - preview: bool = False, ) -> JobClient[T_Result]: """Trigger an asynchronous task within a specific topic with the given parameters. @@ -182,20 +168,17 @@ async def submit( :param task: The task to be executed. :param parameters: The parameters for the task. :param result_type: The type to validate the result against. - :param preview: Whether to use preview mode and include necessary header information. :return: The job that was created. :raises UnknownResponseError: If the Location header is missing or invalid. """ - async with connector: response = await TasksApi(connector).execute_task( org_id=str(org_id), topic=topic, task=task, execute_task_request={"parameters": dict(parameters)}, - additional_headers={"API-Preview": "opt-in"} if preview else {}, ) # Location header is the status endpoint of the created job. @@ -203,7 +186,7 @@ async def submit( try: job_url = response.headers["Location"] job_url = connector.base_url + job_url.removeprefix(connector.base_url).removeprefix("/") - return JobClient.from_url(connector, job_url, result_type, preview=preview) + return JobClient.from_url(connector, job_url, result_type) except (KeyError, ValueError): raise UnknownResponseError( status=response.status, reason=response.reason, content=None, headers=response.headers @@ -220,7 +203,6 @@ async def get_status(self) -> JobProgress: topic=self._topic, task=self._task, job_id=self._job_id, - additional_headers=self._get_headers(), ) if response.error: @@ -259,7 +241,6 @@ async def _get_results(self) -> T_Result | JobError: topic=self._topic, task=self._task, job_id=self._job_id, - additional_headers=self._get_headers(), ) with _validating_pydantic_model(response): @@ -313,7 +294,6 @@ async def cancel(self) -> None: topic=self._topic, task=self._task, job_id=self._job_id, - additional_headers=self._get_headers(), ) async def wait_for_results( diff --git a/packages/evo-compute/src/evo/compute/tasks/__init__.py b/packages/evo-compute/src/evo/compute/tasks/__init__.py new file mode 100644 index 00000000..b3eab7bd --- /dev/null +++ b/packages/evo-compute/src/evo/compute/tasks/__init__.py @@ -0,0 +1,224 @@ +# Copyright © 2025 Bentley Systems, Incorporated +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Task-specific clients for Evo Compute. + +This module provides a unified interface for running compute tasks. Tasks are +dispatched based on their parameter types using a registry system. + +Example: + >>> from evo.compute.tasks import run, SearchNeighborhood, Target + >>> from evo.compute.tasks.kriging import KrigingParameters + >>> + >>> # Run a single task (preview=True required for preview APIs like kriging) + >>> result = await run(manager, KrigingParameters(...), preview=True) + >>> + >>> # Run multiple tasks (same or different types) + >>> results = await run(manager, [ + ... KrigingParameters(...), + ... KrigingParameters(...), + ... ], preview=True) +""" + +from __future__ import annotations + +from typing import Any, overload + +from evo.common import IContext +from evo.common.interfaces import IFeedback +from evo.common.utils import NoFeedback, split_feedback + +# Import kriging module to trigger registration +from . import kriging as _kriging_module # noqa: F401 + +# Shared components from common module +from .common import ( + CreateAttribute, + Ellipsoid, + EllipsoidRanges, + Rotation, + SearchNeighborhood, + Source, + Target, + UpdateAttribute, +) + +# Result types from kriging (these are general enough for other tasks too) +from .kriging import ( + BlockDiscretisation, + KrigingResult, + RegionFilter, + TaskResult, + TaskResults, +) + + +class _DefaultFeedback: + """Marker class to indicate default feedback should be used.""" + + pass + + +DEFAULT_FEEDBACK = _DefaultFeedback() + + +@overload +async def run( + context: IContext, + parameters: Any, + *, + preview: bool = ..., + fb: IFeedback | _DefaultFeedback = ..., +) -> TaskResult: ... + + +@overload +async def run( + context: IContext, + parameters: list[Any], + *, + preview: bool = ..., + fb: IFeedback | _DefaultFeedback = ..., +) -> TaskResults: ... + + +async def run( + context: IContext, + parameters: Any | list[Any], + *, + preview: bool = False, + fb: IFeedback | _DefaultFeedback = DEFAULT_FEEDBACK, +) -> TaskResult | TaskResults: + """ + Run one or more compute tasks. + + Tasks are dispatched to the appropriate runner based on the parameter type. + This allows running different task types together in a single call. + + Args: + context: The context providing connector and org_id + parameters: A single parameter object or list of parameters (can be mixed types) + preview: If True, sets the ``API-Preview: opt-in`` header on requests. + Required for tasks that are still in preview (e.g. kriging). + Defaults to False. + fb: Feedback interface for progress updates. If not provided, uses default + feedback showing "Running x/y..." + + Returns: + TaskResult for a single task, or TaskResults for multiple tasks + + Example (single task): + >>> from evo.compute.tasks import run, SearchNeighborhood, Target + >>> from evo.compute.tasks.kriging import KrigingParameters + >>> + >>> params = KrigingParameters( + ... source=pointset.attributes["grade"], + ... target=Target.new_attribute(block_model, "kriged_grade"), + ... variogram=variogram, + ... search=SearchNeighborhood( + ... ellipsoid=var_ell.scaled(2.0), + ... max_samples=20, + ... ), + ... ) + >>> result = await run(manager, params, preview=True) + + Example (multiple tasks): + >>> results = await run(manager, [ + ... KrigingParameters(...), + ... KrigingParameters(...), + ... ], preview=True) + >>> results[0] # Access first result + """ + import asyncio + + from .common.runner import _registry + + # Convert single parameter to list for uniform handling + is_single = not isinstance(parameters, list) + param_list = [parameters] if is_single else parameters + + if len(param_list) == 0: + return TaskResults([]) + + total = len(param_list) + + # Create default feedback widget + if isinstance(fb, _DefaultFeedback): + try: + from evo.notebooks import FeedbackWidget + + actual_fb: IFeedback = FeedbackWidget(label="Tasks") + except ImportError: + actual_fb = NoFeedback + else: + actual_fb = fb + + # Validate all parameters have registered runners upfront + runners = [] + for params in param_list: + runner = _registry.get_runner_for_params(params) + runners.append(runner) + + # Split feedback across tasks for proper progress aggregation + per_task_fb = split_feedback(actual_fb, [1.0] * total) + + async def _run_one(i: int, params: Any, runner, task_fb: IFeedback) -> tuple[int, Any]: + result = await runner(context, params, preview=preview) + # Mark this task's portion as complete (progress bar updates automatically via split_feedback) + task_fb.progress(1.0) + return i, result + + tasks = [ + asyncio.create_task(_run_one(i, params, runner, per_task_fb[i])) + for i, (params, runner) in enumerate(zip(param_list, runners)) + ] + + results: list[Any | None] = [None] * total + done_count = 0 + + for fut in asyncio.as_completed(tasks): + try: + i, res = await fut + results[i] = res + done_count += 1 + # Update message with correct count (progress bar is handled by split_feedback) + actual_fb.progress(done_count / total, f"Running {done_count}/{total}...") + except Exception: + # Cancel remaining to fail fast + for t in tasks: + t.cancel() + raise + + # Final completion message + actual_fb.progress(1.0, f"Completed {total}/{total}") + + # Return single result or wrapped results + if is_single: + return results[0] + return TaskResults([r for r in results if r is not None]) + + +__all__ = [ + "BlockDiscretisation", + "CreateAttribute", + "Ellipsoid", + "EllipsoidRanges", + "KrigingResult", + "RegionFilter", + "Rotation", + "SearchNeighborhood", + "Source", + "Target", + "TaskResult", + "TaskResults", + "UpdateAttribute", + "run", +] diff --git a/packages/evo-compute/src/evo/compute/tasks/common/__init__.py b/packages/evo-compute/src/evo/compute/tasks/common/__init__.py new file mode 100644 index 00000000..c817e5c4 --- /dev/null +++ b/packages/evo-compute/src/evo/compute/tasks/common/__init__.py @@ -0,0 +1,50 @@ +# Copyright © 2025 Bentley Systems, Incorporated +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Common primitives shared across geostatistics compute tasks.""" + +from evo.objects.typed.types import Ellipsoid, EllipsoidRanges, Rotation + +from .runner import TaskRegistry, get_task_runner, register_task_runner, run_tasks +from .search import SearchNeighborhood +from .source_target import ( + CreateAttribute, + GeoscienceObjectReference, + Source, + Target, + UpdateAttribute, + get_attribute_expression, + is_typed_attribute, + serialize_object_reference, + source_from_attribute, + target_from_attribute, +) + +__all__ = [ + "CreateAttribute", + "Ellipsoid", + "EllipsoidRanges", + "GeoscienceObjectReference", + "Rotation", + "SearchNeighborhood", + "Source", + "Target", + "TaskRegistry", + "UpdateAttribute", + "get_attribute_expression", + "get_task_runner", + "is_typed_attribute", + "register_task_runner", + "run_tasks", + "serialize_object_reference", + "source_from_attribute", + "target_from_attribute", +] diff --git a/packages/evo-compute/src/evo/compute/tasks/common/runner.py b/packages/evo-compute/src/evo/compute/tasks/common/runner.py new file mode 100644 index 00000000..57e714b7 --- /dev/null +++ b/packages/evo-compute/src/evo/compute/tasks/common/runner.py @@ -0,0 +1,227 @@ +# Copyright © 2025 Bentley Systems, Incorporated +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Task runner registry for dispatching tasks based on parameter types. + +This module provides a registry-based system for running compute tasks. Each task +type registers its parameter class and runner function, allowing the unified `run()` +function to dispatch to the correct runner based on the parameter type. + +This enables running multiple different task types together in a single call. + +Example: + >>> from evo.compute.tasks import run + >>> from evo.compute.tasks.kriging import KrigingParameters + >>> from evo.compute.tasks.simulation import SimulationParameters # future + >>> + >>> # Run mixed task types together + >>> results = await run(manager, [ + ... KrigingParameters(...), + ... SimulationParameters(...), + ... KrigingParameters(...), + ... ], preview=True) +""" + +from __future__ import annotations + +import asyncio +from typing import Any, Awaitable, Callable, TypeVar + +from evo.common import IContext +from evo.common.interfaces import IFeedback +from evo.common.utils import NoFeedback, split_feedback + +__all__ = [ + "TaskRegistry", + "get_task_runner", + "register_task_runner", + "run_tasks", +] + + +# Type for task results +TResult = TypeVar("TResult") + +# Type for runner functions: async (context, params, *, preview) -> result +RunnerFunc = Callable[..., Awaitable[Any]] + + +class TaskRegistry: + """Registry mapping parameter types to their runner functions. + + This is a singleton that stores the mapping from parameter class types + to their corresponding async runner functions. + """ + + _instance: "TaskRegistry | None" = None + _runners: dict[type, RunnerFunc] + + def __new__(cls) -> "TaskRegistry": + if cls._instance is None: + cls._instance = super().__new__(cls) + cls._instance._runners = {} + return cls._instance + + def register(self, param_type: type, runner: RunnerFunc) -> None: + """Register a runner function for a parameter type. + + Args: + param_type: The parameter class (e.g., KrigingParameters) + runner: Async function with signature (context, params) -> result + """ + self._runners[param_type] = runner + + def get_runner(self, param_type: type) -> RunnerFunc | None: + """Get the runner function for a parameter type. + + Args: + param_type: The parameter class to look up + + Returns: + The registered runner function, or None if not found + """ + return self._runners.get(param_type) + + def get_runner_for_params(self, params: Any) -> RunnerFunc: + """Get the runner function for a parameter instance. + + Args: + params: A parameter object instance + + Returns: + The registered runner function + + Raises: + TypeError: If no runner is registered for the parameter type + """ + param_type = type(params) + runner = self._runners.get(param_type) + if runner is None: + registered = ", ".join(t.__name__ for t in self._runners.keys()) + raise TypeError( + f"No task runner registered for parameter type '{param_type.__name__}'. " + f"Registered types: {registered or 'none'}" + ) + return runner + + def clear(self) -> None: + """Clear all registered runners (mainly for testing).""" + self._runners.clear() + + +# Global registry instance +_registry = TaskRegistry() + + +def register_task_runner(param_type: type, runner: RunnerFunc) -> None: + """Register a task runner function for a parameter type. + + This function is called by task modules to register their runners. + + Args: + param_type: The parameter class (e.g., KrigingParameters) + runner: Async function with signature (context, params) -> result + + Example: + >>> from evo.compute.tasks.common.runner import register_task_runner + >>> + >>> async def _run_kriging(context, params): + ... # implementation + ... pass + >>> + >>> register_task_runner(KrigingParameters, _run_kriging) + """ + _registry.register(param_type, runner) + + +def get_task_runner(param_type: type) -> RunnerFunc | None: + """Get the registered runner for a parameter type. + + Args: + param_type: The parameter class to look up + + Returns: + The registered runner function, or None if not found + """ + return _registry.get_runner(param_type) + + +async def run_tasks( + context: IContext, + parameters: list[Any], + *, + fb: IFeedback = NoFeedback, + preview: bool = False, +) -> list[Any]: + """Run multiple tasks concurrently, dispatching based on parameter types. + + This function looks up the appropriate runner for each parameter based on + its type, allowing different task types to be run together. + + Args: + context: The context providing connector and org_id + parameters: List of parameter objects (can be mixed types) + fb: Feedback interface for progress updates + preview: If True, sets the ``API-Preview: opt-in`` header on requests. + Required for tasks that are still in preview. Defaults to False. + + Returns: + List of results in the same order as the input parameters + + Raises: + TypeError: If any parameter type doesn't have a registered runner + + Example: + >>> # Run mixed task types + >>> results = await run_tasks(manager, [ + ... KrigingParameters(...), + ... SimulationParameters(...), # future task type + ... ], preview=True) + """ + if len(parameters) == 0: + return [] + + total = len(parameters) + + # Validate all parameters have registered runners upfront + runners = [] + for params in parameters: + runner = _registry.get_runner_for_params(params) + runners.append(runner) + + # Split feedback across tasks + per_task_fb = split_feedback(fb, [1.0] * total) + + async def _run_one(i: int, params: Any, runner: RunnerFunc) -> tuple[int, Any]: + result = await runner(context, params, preview=preview) + return i, result + + tasks = [ + asyncio.create_task(_run_one(i, params, runner)) for i, (params, runner) in enumerate(zip(parameters, runners)) + ] + + results: list[Any | None] = [None] * total + + done_count = 0 + for fut in asyncio.as_completed(tasks): + try: + i, res = await fut + results[i] = res + done_count += 1 + per_task_fb[i].progress(1.0, f"Completed {done_count}/{total}") + except Exception: + done_count += 1 + # Cancel remaining to fail fast + for t in tasks: + t.cancel() + raise + + return [r for r in results if r is not None] diff --git a/packages/evo-compute/src/evo/compute/tasks/common/search.py b/packages/evo-compute/src/evo/compute/tasks/common/search.py new file mode 100644 index 00000000..29dbd359 --- /dev/null +++ b/packages/evo-compute/src/evo/compute/tasks/common/search.py @@ -0,0 +1,73 @@ +# Copyright © 2025 Bentley Systems, Incorporated +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Search neighborhood parameters for geostatistical operations.""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import Any + +from evo.objects.typed.types import Ellipsoid + +__all__ = [ + "SearchNeighborhood", +] + + +@dataclass +class SearchNeighborhood: + """Search neighborhood parameters for geostatistical operations. + + Defines how to find nearby samples when performing spatial interpolation + or estimation. Used by kriging, simulation, and other geostatistical tasks. + + The search neighborhood is defined by an ellipsoid (spatial extent and + orientation) and constraints on the number of samples to use. + + Example: + >>> search = SearchNeighborhood( + ... ellipsoid=Ellipsoid( + ... ranges=EllipsoidRanges(major=200.0, semi_major=150.0, minor=100.0), + ... rotation=Rotation(dip_azimuth=45.0), + ... ), + ... max_samples=20, + ... ) + """ + + ellipsoid: Ellipsoid + """The ellipsoid defining the spatial extent to search for samples.""" + + max_samples: int + """The maximum number of samples to use for each evaluation point.""" + + min_samples: int | None = None + """The minimum number of samples required. If fewer are found, the point may be skipped.""" + + def __init__( + self, + ellipsoid: Ellipsoid, + max_samples: int, + min_samples: int | None = None, + ): + self.ellipsoid = ellipsoid + self.max_samples = max_samples + self.min_samples = min_samples + + def to_dict(self) -> dict[str, Any]: + """Serialize to dictionary.""" + result = { + "ellipsoid": self.ellipsoid.to_dict(), + "max_samples": self.max_samples, + } + if self.min_samples is not None: + result["min_samples"] = self.min_samples + return result diff --git a/packages/evo-compute/src/evo/compute/tasks/common/source_target.py b/packages/evo-compute/src/evo/compute/tasks/common/source_target.py new file mode 100644 index 00000000..4e99da1a --- /dev/null +++ b/packages/evo-compute/src/evo/compute/tasks/common/source_target.py @@ -0,0 +1,316 @@ +# Copyright © 2025 Bentley Systems, Incorporated +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Source and target specifications for compute tasks.""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import Any, Union + +from evo.objects.typed.attributes import ( + Attribute, + BlockModelAttribute, + BlockModelPendingAttribute, + PendingAttribute, +) +from typing_extensions import TypeAlias + +__all__ = [ + "CreateAttribute", + "GeoscienceObjectReference", + "Source", + "Target", + "UpdateAttribute", + "get_attribute_expression", + "is_typed_attribute", + "serialize_object_reference", + "source_from_attribute", + "target_from_attribute", +] + +# All typed attribute types that compute tasks can work with. +TYPED_ATTRIBUTE_TYPES = (Attribute, PendingAttribute, BlockModelAttribute, BlockModelPendingAttribute) + +# Type alias for any object that can be serialized to a geoscience object reference URL +# Supports: str, ObjectReference, BaseObject, DownloadedObject, ObjectMetadata +GeoscienceObjectReference: TypeAlias = Union[str, Any] + + +def is_typed_attribute(value: Any) -> bool: + """Check if a value is a typed attribute object from evo.objects.typed.""" + return isinstance(value, TYPED_ATTRIBUTE_TYPES) + + +def get_attribute_expression( + attr: Attribute | PendingAttribute | BlockModelAttribute | BlockModelPendingAttribute, +) -> str: + """Get the JMESPath expression to access an attribute from its parent object. + + For ``Attribute`` (existing, from a DownloadedObject): uses the schema path context + and key-based lookup, e.g. ``"locations.attributes[?key=='abc']"``. + + For ``PendingAttribute``, ``BlockModelAttribute``, or ``BlockModelPendingAttribute``: + uses name-based lookup, e.g. ``"attributes[?name=='grade']"``. + + Args: + attr: A typed attribute object. + + Returns: + A JMESPath expression string. + + Raises: + TypeError: If the attribute type is not recognised. + """ + if isinstance(attr, Attribute): + base_path = attr._context.schema_path or "attributes" + return f"{base_path}[?key=='{attr.key}']" + elif isinstance(attr, (PendingAttribute, BlockModelAttribute, BlockModelPendingAttribute)): + return f"attributes[?name=='{attr.name}']" + else: + raise TypeError(f"Cannot get expression for attribute type {type(attr).__name__}") + + +def serialize_object_reference(value: GeoscienceObjectReference) -> str: + """ + Serialize an object reference to a string URL. + + Supports: + - str: returned as-is + - ObjectReference: str(value) + - BaseObject (typed objects like PointSet): value.metadata.url + - DownloadedObject: value.metadata.url + - ObjectMetadata: value.url + + Args: + value: The value to serialize + + Returns: + String URL of the object reference + + Raises: + TypeError: If the value type is not supported + """ + if isinstance(value, str): + return value + + # Check for ObjectReference (has __str__ that returns the URL) + type_name = type(value).__name__ + if type_name == "ObjectReference": + return str(value) + + # Check for typed objects (BaseObject subclasses like PointSet, Regular3DGrid) + if hasattr(value, "metadata") and hasattr(value.metadata, "url"): + return value.metadata.url + + # Check for ObjectMetadata + if hasattr(value, "url") and isinstance(value.url, str): + return value.url + + raise TypeError(f"Cannot serialize object reference of type {type(value)}") + + +@dataclass +class Source: + """The source object and attribute containing known values. + + Used to specify where input data comes from for geostatistical operations. + Can be initialized directly, or more commonly from a typed object's attribute. + + Example: + >>> # From a typed object attribute (preferred): + >>> source = pointset.attributes["grade"] + >>> + >>> # Or explicitly: + >>> source = Source(object=pointset, attribute="grade") + """ + + object: GeoscienceObjectReference + """Reference to the source geoscience object.""" + + attribute: str + """Name of the attribute on the source object.""" + + def __init__(self, object: GeoscienceObjectReference, attribute: str): + self.object = object + self.attribute = attribute + + def to_dict(self) -> dict[str, Any]: + """Serialize to dictionary.""" + return { + "object": serialize_object_reference(self.object), + "attribute": self.attribute, + } + + +@dataclass +class CreateAttribute: + """Specification for creating a new attribute on a target object.""" + + name: str + """The name of the attribute to create.""" + + def __init__(self, name: str): + self.name = name + + def to_dict(self) -> dict[str, Any]: + """Serialize to dictionary.""" + return { + "operation": "create", + "name": self.name, + } + + +@dataclass +class UpdateAttribute: + """Specification for updating an existing attribute on a target object.""" + + reference: str + """Reference to an existing attribute to update.""" + + def __init__(self, reference: str): + self.reference = reference + + def to_dict(self) -> dict[str, Any]: + """Serialize to dictionary.""" + return { + "operation": "update", + "reference": self.reference, + } + + +@dataclass +class Target: + """The target object and attribute to create or update with results. + + Used to specify where output data should be written for geostatistical operations. + + Example: + >>> # Create a new attribute on a target object: + >>> target = Target.new_attribute(block_model, "kriged_grade") + >>> + >>> # Or update an existing attribute: + >>> target = Target(object=grid, attribute=UpdateAttribute("existing_ref")) + """ + + object: GeoscienceObjectReference + """Object to write results onto.""" + + attribute: CreateAttribute | UpdateAttribute + """Attribute specification (create new or update existing).""" + + def __init__(self, object: GeoscienceObjectReference, attribute: CreateAttribute | UpdateAttribute): + self.object = object + self.attribute = attribute + + @classmethod + def new_attribute(cls, object: GeoscienceObjectReference, attribute_name: str) -> Target: + """ + Create a Target that will create a new attribute on the target object. + + Args: + object: The target object to write results onto. + attribute_name: The name of the new attribute to create. + + Returns: + A Target instance configured to create a new attribute. + + Example: + >>> target = Target.new_attribute(block_model, "kriged_grade") + """ + return cls(object=object, attribute=CreateAttribute(name=attribute_name)) + + def to_dict(self) -> dict[str, Any]: + """Serialize to dictionary.""" + if hasattr(self.attribute, "to_dict"): + attribute_value = self.attribute.to_dict() + elif isinstance(self.attribute, dict): + attribute_value = self.attribute + else: + attribute_value = self.attribute + + return { + "object": serialize_object_reference(self.object), + "attribute": attribute_value, + } + + +# ============================================================================= +# Typed attribute → Source / Target conversion +# ============================================================================= + + +def source_from_attribute(attr: Attribute) -> Source: + """Convert a typed ``Attribute`` to a :class:`Source`. + + Only ``Attribute`` (an existing attribute on a DownloadedObject) can be used + as a source, since source data must already exist. + + Args: + attr: An existing ``Attribute`` from a DownloadedObject. + + Returns: + A :class:`Source` referencing the parent object and attribute expression. + + Raises: + TypeError: If *attr* is not an ``Attribute`` instance. + """ + if not isinstance(attr, Attribute): + raise TypeError(f"Only Attribute (from a DownloadedObject) can be used as a source, got {type(attr).__name__}") + + return Source( + object=str(attr._obj.metadata.url), + attribute=get_attribute_expression(attr), + ) + + +def target_from_attribute( + attr: Attribute | PendingAttribute | BlockModelAttribute | BlockModelPendingAttribute, +) -> Target: + """Convert a typed attribute object to a :class:`Target`. + + Handles ``Attribute``, ``PendingAttribute``, ``BlockModelAttribute``, and + ``BlockModelPendingAttribute`` from ``evo.objects.typed.attributes``. + + For existing attributes, returns an update operation referencing the attribute. + For pending attributes, returns a create operation with the attribute name. + + Args: + attr: A typed attribute object. Must have a non-``None`` ``_obj`` + reference to its parent object. + + Returns: + A :class:`Target` configured based on the attribute. + + Raises: + TypeError: If *attr* is not a recognised typed attribute, or if it has + no ``_obj`` reference to its parent object. + """ + if not is_typed_attribute(attr): + raise TypeError( + f"Cannot convert {type(attr).__name__} to a Target. " + "Expected Attribute, PendingAttribute, BlockModelAttribute, or BlockModelPendingAttribute." + ) + + if attr._obj is None: + raise TypeError( + f"Cannot determine target object from attribute type {type(attr).__name__}. " + "Attribute must have an _obj reference to its parent object." + ) + + if attr.exists: + attr_spec: CreateAttribute | UpdateAttribute = UpdateAttribute( + reference=get_attribute_expression(attr), + ) + else: + attr_spec = CreateAttribute(name=attr.name) + + return Target(object=attr._obj, attribute=attr_spec) diff --git a/packages/evo-compute/src/evo/compute/tasks/kriging.py b/packages/evo-compute/src/evo/compute/tasks/kriging.py new file mode 100644 index 00000000..3d7f6931 --- /dev/null +++ b/packages/evo-compute/src/evo/compute/tasks/kriging.py @@ -0,0 +1,694 @@ +# Copyright © 2025 Bentley Systems, Incorporated +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Kriging compute task client. + +This module provides typed dataclass models and convenience functions for running +the Kriging task (geostatistics/kriging). + +Example: + >>> from evo.compute.tasks import run, SearchNeighborhood, Ellipsoid, EllipsoidRanges + >>> from evo.compute.tasks.kriging import KrigingParameters + >>> + >>> params = KrigingParameters( + ... source=pointset.attributes["grade"], + ... target=Target.new_attribute(block_model, "kriged_grade"), + ... variogram=variogram, + ... search=SearchNeighborhood( + ... ellipsoid=Ellipsoid(ranges=EllipsoidRanges(200, 150, 100)), + ... max_samples=20, + ... ), + ... ) + >>> result = await run(manager, params, preview=True) +""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import Any, TypeVar + +from evo.common import IContext +from evo.common.interfaces import IFeedback +from evo.common.utils import NoFeedback, Retry +from evo.objects.typed.attributes import Attribute + +from ..client import JobClient + +# Import shared components +from .common import ( + GeoscienceObjectReference, + SearchNeighborhood, + Source, + Target, + get_attribute_expression, + is_typed_attribute, + serialize_object_reference, + source_from_attribute, + target_from_attribute, +) +from .common.runner import register_task_runner + +__all__ = [ + # Kriging-specific (users import from evo.compute.tasks.kriging) + "BlockDiscretisation", + "KrigingMethod", + "KrigingParameters", + "OrdinaryKriging", + "RegionFilter", + "SimpleKriging", +] + + +# Type variable for generic result type +TResult = TypeVar("TResult", bound="TaskResult") + + +# ============================================================================= +# Kriging Method Types +# ============================================================================= + + +@dataclass +class SimpleKriging: + """Simple kriging method with a known constant mean. + + Use when the mean of the variable is known and constant across the domain. + + Example: + >>> method = SimpleKriging(mean=100.0) + """ + + mean: float + """The mean value, assumed to be constant across the domain.""" + + def __init__(self, mean: float): + self.mean = mean + + def to_dict(self) -> dict[str, Any]: + """Serialize to dictionary.""" + return { + "type": "simple", + "mean": self.mean, + } + + +@dataclass +class OrdinaryKriging: + """Ordinary kriging method with unknown local mean. + + The most common kriging method. Estimates the local mean from nearby samples. + This is the default kriging method if none is specified. + """ + + def to_dict(self) -> dict[str, Any]: + """Serialize to dictionary.""" + return { + "type": "ordinary", + } + + +class KrigingMethod: + """Factory for kriging methods. + + Provides convenient access to kriging method types. + + Example: + >>> # Use ordinary kriging (most common) + >>> method = KrigingMethod.ORDINARY + >>> + >>> # Use simple kriging with known mean + >>> method = KrigingMethod.simple(mean=100.0) + """ + + ORDINARY: OrdinaryKriging = OrdinaryKriging() + """Ordinary kriging - estimates local mean from nearby samples.""" + + @staticmethod + def simple(mean: float) -> SimpleKriging: + """Create a simple kriging method with the given mean. + + Args: + mean: The known constant mean value across the domain. + + Returns: + SimpleKriging instance configured with the given mean. + """ + return SimpleKriging(mean) + + +# ============================================================================= +# Block Discretisation +# ============================================================================= + + +@dataclass +class BlockDiscretisation: + """Sub-block discretisation for block kriging. + + When provided, each target block is subdivided into ``nx * ny * nz`` + sub-cells and the kriged value is averaged across these sub-cells. + When omitted (``None``), point kriging is performed. + + Only applicable when the target is a 3D grid or block model. + + Each dimension must be an integer between 1 and 9 (inclusive). + The default value of 1 in every direction is equivalent to point kriging. + + Example: + >>> discretisation = BlockDiscretisation(nx=3, ny=3, nz=2) + """ + + nx: int + """Number of subdivisions in the x direction (1–9).""" + + ny: int + """Number of subdivisions in the y direction (1–9).""" + + nz: int + """Number of subdivisions in the z direction (1–9).""" + + def __init__(self, nx: int = 1, ny: int = 1, nz: int = 1): + for name, value in [("nx", nx), ("ny", ny), ("nz", nz)]: + if not isinstance(value, int): + raise TypeError(f"{name} must be an integer, got {type(value).__name__}") + if value < 1 or value > 9: + raise ValueError(f"{name} must be between 1 and 9, got {value}") + self.nx = nx + self.ny = ny + self.nz = nz + + def to_dict(self) -> dict[str, Any]: + """Serialize to dictionary.""" + return { + "nx": self.nx, + "ny": self.ny, + "nz": self.nz, + } + + +# ============================================================================= +# Region Filter +# ============================================================================= + + +@dataclass +class RegionFilter: + """Region filter for restricting kriging to specific categories on the target. + + Use either `names` OR `values`, not both: + - `names`: Category names (strings) - used for CategoryAttribute with string lookup + - `values`: Integer values - used for integer-indexed categories or BlockModel integer columns + + Example: + >>> # Filter by category names (string lookup) + >>> filter_by_name = RegionFilter( + ... attribute=block_model.attributes["domain"], + ... names=["LMS1", "LMS2"], + ... ) + >>> + >>> # Filter by integer values (direct index matching) + >>> filter_by_value = RegionFilter( + ... attribute=block_model.attributes["domain"], + ... values=[1, 2, 3], + ... ) + """ + + attribute: Any + """The category attribute to filter on (from target object).""" + + names: list[str] | None = None + """Category names to include (mutually exclusive with values).""" + + values: list[int] | None = None + """Integer category keys to include (mutually exclusive with names).""" + + def __init__( + self, + attribute: Any, + names: list[str] | None = None, + values: list[int] | None = None, + ): + if names is not None and values is not None: + raise ValueError("Only one of 'names' or 'values' may be provided, not both.") + if names is None and values is None: + raise ValueError("One of 'names' or 'values' must be provided.") + + self.attribute = attribute + self.names = names + self.values = values + + def to_dict(self) -> dict[str, Any]: + """Serialize to dictionary for the compute task API.""" + if is_typed_attribute(self.attribute): + attribute_expr = get_attribute_expression(self.attribute) + elif isinstance(self.attribute, str): + attribute_expr = self.attribute + else: + raise TypeError(f"Cannot serialize region filter attribute of type {type(self.attribute)}") + + result: dict[str, Any] = {"attribute": attribute_expr} + + if self.names is not None: + result["names"] = self.names + if self.values is not None: + result["values"] = self.values + + return result + + +# ============================================================================= +# Kriging Parameters +# ============================================================================= + + +@dataclass +class KrigingParameters: + """Parameters for the kriging task. + + Defines all inputs needed to run a kriging interpolation task. + + Example: + >>> from evo.compute.tasks import run, SearchNeighborhood, Ellipsoid, EllipsoidRanges + >>> from evo.compute.tasks.kriging import KrigingParameters, RegionFilter + >>> + >>> params = KrigingParameters( + ... source=pointset.attributes["grade"], # Source attribute + ... target=block_model.attributes["kriged_grade"], # Target attribute (creates if doesn't exist) + ... variogram=variogram, # Variogram model + ... search=SearchNeighborhood( + ... ellipsoid=Ellipsoid(ranges=EllipsoidRanges(200, 150, 100)), + ... max_samples=20, + ... ), + ... # method defaults to ordinary kriging + ... ) + >>> + >>> # With region filter to restrict kriging to specific categories on target: + >>> params_filtered = KrigingParameters( + ... source=pointset.attributes["grade"], + ... target=block_model.attributes["kriged_grade"], + ... variogram=variogram, + ... search=SearchNeighborhood(...), + ... target_region_filter=RegionFilter( + ... attribute=block_model.attributes["domain"], + ... names=["LMS1", "LMS2"], + ... ), + ... ) + """ + + source: Source + """The source object and attribute containing known values.""" + + target: Target + """The target object and attribute to create or update with kriging results.""" + + variogram: GeoscienceObjectReference + """Model of the covariance within the domain (Variogram object or reference).""" + + search: SearchNeighborhood + """Search neighborhood parameters.""" + + method: SimpleKriging | OrdinaryKriging | None = None + """The kriging method to use. Defaults to ordinary kriging if not specified.""" + + target_region_filter: RegionFilter | None = None + """Optional region filter to restrict kriging to specific categories on the target object.""" + + block_discretisation: BlockDiscretisation | None = None + """Optional sub-block discretisation for block kriging. + + When provided, each target block is subdivided into nx × ny × nz sub-cells + and the kriged value is averaged across these sub-cells. When omitted, + point kriging is performed. Only applicable when the target is a 3D grid + or block model. + """ + + def __init__( + self, + source: Source | Any, # Also accepts Attribute from evo.objects.typed + target: Target | Any, # Also accepts Attribute/PendingAttribute from evo.objects.typed + variogram: GeoscienceObjectReference, + search: SearchNeighborhood, + method: SimpleKriging | OrdinaryKriging | None = None, + target_region_filter: RegionFilter | None = None, + block_discretisation: BlockDiscretisation | None = None, + ): + # Handle Attribute types from evo.objects.typed.attributes + if isinstance(source, Attribute): + source = source_from_attribute(source) + + # Handle target attribute types (Attribute, PendingAttribute, BlockModelAttribute, BlockModelPendingAttribute) + if is_typed_attribute(target): + target = target_from_attribute(target) + + self.source = source + self.target = target + self.variogram = variogram + self.search = search + self.method = method or OrdinaryKriging() + self.target_region_filter = target_region_filter + self.block_discretisation = block_discretisation + + def to_dict(self) -> dict[str, Any]: + """Serialize to dictionary.""" + target_dict = self.target.to_dict() + + # Add region filter to target if provided + if self.target_region_filter is not None: + target_dict["region_filter"] = self.target_region_filter.to_dict() + + result = { + "source": self.source.to_dict(), + "target": target_dict, + "variogram": serialize_object_reference(self.variogram), + "neighborhood": self.search.to_dict(), + "kriging_method": self.method.to_dict(), + } + + # Add block discretisation if provided (omit for point kriging) + if self.block_discretisation is not None: + result["block_discretisation"] = self.block_discretisation.to_dict() + + return result + + +# ============================================================================= +# Kriging Result Types +# ============================================================================= + + +@dataclass +class _KrigingAttribute: + """Attribute containing the kriging result (internal).""" + + reference: str + name: str + + +@dataclass +class _KrigingTarget: + """The target that was created or updated (internal).""" + + reference: str + name: str + description: Any + schema_id: str + attribute: _KrigingAttribute + + +# ============================================================================= +# Base Task Result Classes +# ============================================================================= + + +class TaskResult: + """Base class for compute task results. + + Provides common functionality for all task results including: + - Pretty-printing in Jupyter notebooks + - Portal URL extraction + - Access to target object and data + """ + + message: str + """A message describing what happened in the task.""" + + _target: _KrigingTarget + """Internal target information.""" + + _context: IContext | None = None + """The context used to run the task (for convenience methods).""" + + def __init__(self, message: str, target: _KrigingTarget): + self.message = message + self._target = target + self._context = None + + @property + def target_name(self) -> str: + """The name of the target object.""" + return self._target.name + + @property + def target_reference(self) -> str: + """Reference URL to the target object.""" + return self._target.reference + + @property + def attribute_name(self) -> str: + """The name of the attribute that was created/updated.""" + return self._target.attribute.name + + @property + def schema_type(self) -> str: + """The schema type of the target object (e.g., 'regular-masked-3d-grid').""" + schema = self._target.schema_id + if "/" in schema: + parts = schema.split("/") + for part in parts: + if part and not part.startswith("objects") and "." not in part and part[0].isalpha(): + return part + return schema + + async def get_target_object(self, context: IContext | None = None): + """Load and return the target geoscience object. + + Args: + context: Optional context to use. If not provided, uses the context + from when the task was run. + + Returns: + The typed geoscience object (e.g., Regular3DGrid, RegularMasked3DGrid, BlockModel) + + Example: + >>> result = await run(manager, params) + >>> target = await result.get_target_object() + >>> target # Pretty-prints with Portal/Viewer links + """ + from evo.objects.typed import object_from_reference + + ctx = context or self._context + if ctx is None: + raise ValueError( + "No context available. Either pass a context to get_target_object() " + "or ensure the result was returned from run()." + ) + return await object_from_reference(ctx, self._target.reference) + + async def to_dataframe(self, context: IContext | None = None, columns: list[str] | None = None): + """Get the task results as a DataFrame. + + This is the simplest way to access the task output data. It loads + the target object and returns its data as a pandas DataFrame. + + Args: + context: Optional context to use. If not provided, uses the context + from when the task was run. + columns: Optional list of column names to include. If None, includes + all columns. Use ["*"] to explicitly request all columns. + + Returns: + A pandas DataFrame containing the task results. + + Example: + >>> result = await run(manager, params) + >>> df = await result.to_dataframe() + >>> df.head() + """ + target_obj = await self.get_target_object(context) + + # Try different methods to get the dataframe based on object type + if hasattr(target_obj, "to_dataframe"): + # BlockModel, PointSet, and similar objects with to_dataframe + if columns is not None: + return await target_obj.to_dataframe(columns=columns) + return await target_obj.to_dataframe() + elif hasattr(target_obj, "cells") and hasattr(target_obj.cells, "to_dataframe"): + # Grid objects (Regular3DGrid, RegularMasked3DGrid, etc.) + return await target_obj.cells.to_dataframe() + else: + raise TypeError( + f"Don't know how to get DataFrame from {type(target_obj).__name__}. " + "Use get_target_object() and access the data manually." + ) + + def _get_result_type_name(self) -> str: + """Get the display name for this result type.""" + return "Task" + + def __repr__(self) -> str: + """String representation.""" + lines = [ + f"✓ {self._get_result_type_name()} Result", + f" Message: {self.message}", + f" Target: {self.target_name}", + f" Attribute: {self.attribute_name}", + ] + return "\n".join(lines) + + +class TaskResults: + """Container for multiple task results with pretty-printing support. + + Provides iteration and indexing support for accessing individual results. + + Example: + >>> results = await run(manager, [params1, params2, params3]) + >>> results # Pretty-prints all results + >>> results[0] # Access first result + >>> for result in results: + ... print(result.attribute_name) + """ + + def __init__(self, results: list[TaskResult]): + self._results = results + + @property + def results(self) -> list[TaskResult]: + """The list of task results.""" + return self._results + + def __len__(self) -> int: + return len(self._results) + + def __iter__(self): + return iter(self._results) + + def __getitem__(self, index: int) -> TaskResult: + return self._results[index] + + def __repr__(self) -> str: + """String representation.""" + if not self._results: + return "TaskResults([])" + result_type = self._results[0]._get_result_type_name() + lines = [f"✓ {len(self._results)} {result_type} Results:"] + for i, result in enumerate(self._results): + lines.append(f" [{i}] {result.target_name} → {result.attribute_name}") + return "\n".join(lines) + + +class KrigingResult(TaskResult): + """Result of a kriging task. + + Contains information about the completed kriging operation and provides + convenient methods to access the target object and its data. + + Example: + >>> result = await run(manager, params) + >>> result # Pretty-prints the result + >>> + >>> # Get data directly as DataFrame (simplest approach) + >>> df = await result.to_dataframe() + >>> + >>> # Or load the target object for more control + >>> target = await result.get_target_object() + """ + + def __init__(self, message: str, target: _KrigingTarget): + """Initialize a KrigingResult. + + Args: + message: A message describing what happened in the task. + target: The target information from the kriging result. + """ + super().__init__(message=message, target=target) + + def _get_result_type_name(self) -> str: + """Get the display name for this result type.""" + return "Kriging" + + +# ============================================================================= +# Run Functions +# ============================================================================= + + +def _parse_kriging_result(data: dict[str, Any]) -> KrigingResult: + """Parse the kriging result from the API response.""" + target_data = data["target"] + attr_data = target_data["attribute"] + + attribute = _KrigingAttribute( + reference=attr_data["reference"], + name=attr_data["name"], + ) + target = _KrigingTarget( + reference=target_data["reference"], + name=target_data["name"], + description=target_data.get("description"), + schema_id=target_data["schema_id"], + attribute=attribute, + ) + return KrigingResult(message=data["message"], target=target) + + +async def _run_single_kriging( + context: IContext, + parameters: KrigingParameters, + *, + preview: bool = False, + polling_interval_seconds: float = 0.5, + retry: Retry | None = None, + fb: IFeedback = NoFeedback, +) -> KrigingResult: + """Internal function to run a single kriging task.""" + connector = context.get_connector() + org_id = context.get_org_id() + + # Add API-Preview header when the caller has opted into preview APIs + if preview: + if connector._additional_headers is None: + connector._additional_headers = {} + connector._additional_headers["API-Preview"] = "opt-in" + + params_dict = parameters.to_dict() + + # Submit the job + job = await JobClient.submit( + connector=connector, + org_id=org_id, + topic="geostatistics", + task="kriging", + parameters=params_dict, + result_type=dict, # Get raw dict, we'll parse it ourselves + ) + + # Wait for results + raw_result = await job.wait_for_results( + polling_interval_seconds=polling_interval_seconds, + retry=retry, + fb=fb, + ) + + # Parse and return + result = _parse_kriging_result(raw_result) + result._context = context + return result + + +async def _run_kriging_for_registry( + context: IContext, + parameters: KrigingParameters, + *, + preview: bool = False, +) -> KrigingResult: + """Simplified runner function for task registry (no extra options). + + This is the function registered with the TaskRegistry. For more control + over polling and retry behavior, use the full `run()` function. + """ + return await _run_single_kriging(context, parameters, preview=preview) + + +# Register kriging task runner with the task registry + +register_task_runner(KrigingParameters, _run_kriging_for_registry) diff --git a/packages/evo-compute/tests/test_client.py b/packages/evo-compute/tests/test_client.py index 0bc7b78f..34c06782 100644 --- a/packages/evo-compute/tests/test_client.py +++ b/packages/evo-compute/tests/test_client.py @@ -366,101 +366,3 @@ async def test_wait_for_result_cancelled(self) -> None: self.task_path + f"/{self.job.id}", headers={"Accept": "application/json"}, ) - - -class TestJobClientPreview(TestWithConnector): - """Tests for the preview argument functionality.""" - - def setUp(self) -> None: - super().setUp() - self.job = JobClient( - connector=self.connector, - org_id=TEST_ORG.id, - topic=TEST_TOPIC, - task=TEST_TASK, - job_id=TEST_JOB_ID, - preview=True, - ) - self.setup_universal_headers(get_header_metadata(JobClient.__module__)) - - @property - def task_path(self) -> str: - return f"/compute/orgs/{TEST_ORG.id}/{self.job.topic}/{self.job.task}" - - @property - def job_url(self) -> str: - return self.connector.base_url.rstrip("/") + self.task_path + f"/{self.job.id}/status" - - def test_from_url_with_preview(self) -> None: - """Test that a job can be constructed from a URL with preview enabled.""" - job = JobClient.from_url(self.connector, self.job_url, preview=True) - self.assertEqual(TEST_JOB_ID, job.id) - self.assertEqual(TEST_TOPIC, job.topic) - self.assertEqual(TEST_TASK, job.task) - - async def test_submit_with_preview(self) -> None: - """Test that a job can be submitted with preview header.""" - with self.transport.set_http_response(status_code=303, headers={"Location": self.job_url}): - job = await JobClient.submit( - connector=self.connector, - org_id=TEST_ORG.id, - topic=TEST_TOPIC, - task=TEST_TASK, - parameters={"foo": "bar"}, - preview=True, - ) - self.assert_request_made( - RequestMethod.POST, - self.task_path, - headers={"Content-Type": "application/json", "API-Preview": "opt-in"}, - body={"parameters": {"foo": "bar"}}, - ) - self.assertEqual(TEST_JOB_ID, job.id) - - async def test_get_status_with_preview(self) -> None: - """Test that get_status includes the preview header when enabled.""" - response_data = load_test_data("job-response-in-progress.json") - response_data.pop("results", None) - response_json = json.dumps(response_data) - - with self.transport.set_http_response( - status_code=202, - content=response_json, - headers={"Content-Type": "application/json"}, - ): - await self.job.get_status() - - self.assert_request_made( - RequestMethod.GET, - self.task_path + f"/{self.job.id}/status", - headers={"Accept": "application/json", "API-Preview": "opt-in"}, - ) - - async def test_get_results_with_preview(self) -> None: - """Test that get_results includes the preview header when enabled.""" - response_data = load_test_data("job-response-succeeded.json") - response_json = json.dumps(response_data) - - with self.transport.set_http_response( - status_code=200, - content=response_json, - headers={"Content-Type": "application/json"}, - ): - await self.job.get_results() - - self.assert_request_made( - RequestMethod.GET, - self.task_path + f"/{self.job.id}", - headers={"Accept": "application/json", "API-Preview": "opt-in"}, - ) - - async def test_cancel_with_preview(self) -> None: - """Test that cancel includes the preview header when enabled.""" - with self.transport.set_http_response(status_code=204): - await self.job.cancel() - - self.assert_request_made( - RequestMethod.DELETE, - self.task_path + f"/{self.job.id}", - headers={"API-Preview": "opt-in"}, - ) diff --git a/packages/evo-compute/tests/test_kriging_tasks.py b/packages/evo-compute/tests/test_kriging_tasks.py new file mode 100644 index 00000000..172dcb89 --- /dev/null +++ b/packages/evo-compute/tests/test_kriging_tasks.py @@ -0,0 +1,778 @@ +# Copyright © 2025 Bentley Systems, Incorporated +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for kriging task parameter handling.""" + +from unittest import TestCase +from unittest.mock import MagicMock + +from evo.objects.typed.attributes import ( + Attribute, + BlockModelAttribute, + BlockModelPendingAttribute, + PendingAttribute, +) + +from evo.compute.tasks import ( + BlockDiscretisation, + CreateAttribute, + RegionFilter, + SearchNeighborhood, + Source, + Target, + UpdateAttribute, +) +from evo.compute.tasks.common import ( + Ellipsoid, + EllipsoidRanges, + get_attribute_expression, + is_typed_attribute, + source_from_attribute, + target_from_attribute, +) +from evo.compute.tasks.kriging import KrigingParameters + + +def _create_mock_source_attribute(name: str, key: str, object_url: str, schema_path: str = "") -> MagicMock: + """Create a mock Attribute (existing) that passes isinstance checks. + + Uses ``spec=Attribute`` so ``isinstance(mock, Attribute)`` returns True. + Sets the underlying properties that the adapter functions inspect. + """ + attr = MagicMock(spec=Attribute) + attr.name = name + attr.key = key + attr.exists = True + + # ModelContext-like _context + mock_context = MagicMock() + mock_context.schema_path = schema_path + attr._context = mock_context + + # Parent object + mock_obj = MagicMock() + mock_obj.metadata.url = object_url + attr._obj = mock_obj + + return attr + + +def _create_pending_attribute(name: str, parent_obj: MagicMock | None = None) -> PendingAttribute: + """Create a real PendingAttribute with an optional mock parent.""" + mock_parent = MagicMock() + mock_parent._obj = parent_obj + return PendingAttribute(mock_parent, name) + + +class TestAttributeAdapters(TestCase): + """Tests for the attribute adapter functions in source_target.""" + + # ---- is_typed_attribute ---- + + def test_is_typed_attribute_with_attribute(self): + attr = _create_mock_source_attribute("grade", "abc-key", "https://example.com/obj") + self.assertTrue(is_typed_attribute(attr)) + + def test_is_typed_attribute_with_pending_attribute(self): + pending = _create_pending_attribute("new_attr") + self.assertTrue(is_typed_attribute(pending)) + + def test_is_typed_attribute_with_block_model_attribute(self): + bm_attr = BlockModelAttribute(name="grade", attribute_type="Float64") + self.assertTrue(is_typed_attribute(bm_attr)) + + def test_is_typed_attribute_with_block_model_pending_attribute(self): + bm_pending = BlockModelPendingAttribute(obj=None, name="new_col") + self.assertTrue(is_typed_attribute(bm_pending)) + + def test_is_typed_attribute_with_string(self): + self.assertFalse(is_typed_attribute("some_string")) + + def test_is_typed_attribute_with_source(self): + source = Source(object="https://example.com/obj", attribute="grade") + self.assertFalse(is_typed_attribute(source)) + + # ---- get_attribute_expression ---- + + def test_expression_for_attribute_with_schema_path(self): + attr = _create_mock_source_attribute( + "grade", "abc-key", "https://example.com/obj", schema_path="locations.attributes" + ) + result = get_attribute_expression(attr) + self.assertEqual(result, "locations.attributes[?key=='abc-key']") + + def test_expression_for_attribute_without_schema_path(self): + attr = _create_mock_source_attribute("grade", "abc-key", "https://example.com/obj", schema_path="") + result = get_attribute_expression(attr) + self.assertEqual(result, "attributes[?key=='abc-key']") + + def test_expression_for_pending_attribute(self): + pending = _create_pending_attribute("my_attribute") + result = get_attribute_expression(pending) + self.assertEqual(result, "attributes[?name=='my_attribute']") + + def test_expression_for_block_model_attribute(self): + bm_attr = BlockModelAttribute(name="grade", attribute_type="Float64") + result = get_attribute_expression(bm_attr) + self.assertEqual(result, "attributes[?name=='grade']") + + def test_expression_for_block_model_pending_attribute(self): + bm_pending = BlockModelPendingAttribute(obj=None, name="new_col") + result = get_attribute_expression(bm_pending) + self.assertEqual(result, "attributes[?name=='new_col']") + + def test_expression_raises_for_invalid_type(self): + with self.assertRaises(TypeError): + get_attribute_expression("not_an_attribute") + + # ---- source_from_attribute ---- + + def test_source_from_existing_attribute(self): + attr = _create_mock_source_attribute( + "grade", "abc-key", "https://example.com/pointset", schema_path="locations.attributes" + ) + result = source_from_attribute(attr) + self.assertIsInstance(result, Source) + result_dict = result.to_dict() + self.assertEqual(result_dict["object"], "https://example.com/pointset") + self.assertEqual(result_dict["attribute"], "locations.attributes[?key=='abc-key']") + + def test_source_from_attribute_without_schema_path(self): + attr = _create_mock_source_attribute("grade", "abc-key", "https://example.com/pointset", schema_path="") + result = source_from_attribute(attr) + result_dict = result.to_dict() + self.assertEqual(result_dict["object"], "https://example.com/pointset") + self.assertEqual(result_dict["attribute"], "attributes[?key=='abc-key']") + + def test_source_from_attribute_raises_for_pending(self): + pending = _create_pending_attribute("new_attr") + with self.assertRaises(TypeError): + source_from_attribute(pending) + + def test_source_from_attribute_raises_for_block_model_attribute(self): + bm_attr = BlockModelAttribute(name="grade", attribute_type="Float64") + with self.assertRaises(TypeError): + source_from_attribute(bm_attr) + + def test_source_from_attribute_raises_for_string(self): + with self.assertRaises(TypeError): + source_from_attribute("not_an_attribute") + + # ---- target_from_attribute ---- + + def test_target_from_existing_attribute(self): + attr = _create_mock_source_attribute( + "grade", "abc-key", "https://example.com/obj", schema_path="locations.attributes" + ) + result = target_from_attribute(attr) + self.assertIsInstance(result, Target) + result_dict = result.to_dict() + self.assertEqual(result_dict["attribute"]["operation"], "update") + self.assertEqual(result_dict["attribute"]["reference"], "locations.attributes[?key=='abc-key']") + + def test_target_from_pending_attribute(self): + mock_obj = MagicMock() + mock_obj.metadata.url = "https://example.com/grid" + pending = _create_pending_attribute("new_column", parent_obj=mock_obj) + result = target_from_attribute(pending) + self.assertIsInstance(result, Target) + result_dict = result.to_dict() + self.assertEqual(result_dict["attribute"]["operation"], "create") + self.assertEqual(result_dict["attribute"]["name"], "new_column") + + def test_target_from_block_model_existing_attribute(self): + mock_bm = MagicMock() + mock_bm.metadata.url = "https://example.com/blockmodel" + bm_attr = BlockModelAttribute(name="grade", attribute_type="Float64", obj=mock_bm) + result = target_from_attribute(bm_attr) + self.assertIsInstance(result, Target) + result_dict = result.to_dict() + self.assertEqual(result_dict["attribute"]["operation"], "update") + self.assertEqual(result_dict["attribute"]["reference"], "attributes[?name=='grade']") + + def test_target_from_block_model_pending_attribute(self): + mock_bm = MagicMock() + mock_bm.metadata.url = "https://example.com/blockmodel" + bm_pending = BlockModelPendingAttribute(obj=mock_bm, name="new_col") + result = target_from_attribute(bm_pending) + self.assertIsInstance(result, Target) + result_dict = result.to_dict() + self.assertEqual(result_dict["attribute"]["operation"], "create") + self.assertEqual(result_dict["attribute"]["name"], "new_col") + + def test_target_from_attribute_raises_for_invalid_type(self): + with self.assertRaises(TypeError): + target_from_attribute("not_an_attribute") + + def test_target_from_attribute_raises_for_none_obj(self): + bm_pending = BlockModelPendingAttribute(obj=None, name="new_col") + with self.assertRaises(TypeError): + target_from_attribute(bm_pending) + + +class TestKrigingParametersWithAttributes(TestCase): + """Tests for KrigingParameters handling of typed attribute objects.""" + + def test_kriging_params_with_pending_attribute_target(self): + """Test KrigingParameters accepts PendingAttribute as target.""" + source = Source(object="https://example.com/pointset", attribute="locations.attributes[?name=='grade']") + + mock_obj = MagicMock() + mock_obj.metadata.url = "https://example.com/grid" + target_attr = _create_pending_attribute("kriged_grade", parent_obj=mock_obj) + + variogram = "https://example.com/variogram" + search = SearchNeighborhood( + ellipsoid=Ellipsoid(ranges=EllipsoidRanges(100, 100, 50)), + max_samples=20, + ) + + params = KrigingParameters( + source=source, + target=target_attr, + variogram=variogram, + search=search, + ) + + params_dict = params.to_dict() + self.assertEqual(params_dict["target"]["object"], "https://example.com/grid") + self.assertEqual(params_dict["target"]["attribute"]["operation"], "create") + self.assertEqual(params_dict["target"]["attribute"]["name"], "kriged_grade") + + def test_kriging_params_with_existing_attribute_target(self): + """Test KrigingParameters accepts existing Attribute as target.""" + source = Source(object="https://example.com/pointset", attribute="locations.attributes[?name=='grade']") + target_attr = _create_mock_source_attribute( + name="existing_attr", + key="exist-key", + object_url="https://example.com/grid", + schema_path="locations.attributes", + ) + + variogram = "https://example.com/variogram" + search = SearchNeighborhood( + ellipsoid=Ellipsoid(ranges=EllipsoidRanges(100, 100, 50)), + max_samples=20, + ) + + params = KrigingParameters( + source=source, + target=target_attr, + variogram=variogram, + search=search, + ) + + params_dict = params.to_dict() + self.assertEqual(params_dict["target"]["object"], "https://example.com/grid") + self.assertEqual(params_dict["target"]["attribute"]["operation"], "update") + self.assertIn("reference", params_dict["target"]["attribute"]) + + def test_kriging_params_with_block_model_pending_attribute(self): + """Test KrigingParameters accepts BlockModelPendingAttribute as target.""" + source = Source(object="https://example.com/pointset", attribute="locations.attributes[?name=='grade']") + + mock_bm = MagicMock() + mock_bm.metadata.url = "https://example.com/blockmodel" + target_attr = BlockModelPendingAttribute(obj=mock_bm, name="new_bm_attr") + + variogram = "https://example.com/variogram" + search = SearchNeighborhood( + ellipsoid=Ellipsoid(ranges=EllipsoidRanges(100, 100, 50)), + max_samples=20, + ) + + params = KrigingParameters( + source=source, + target=target_attr, + variogram=variogram, + search=search, + ) + + params_dict = params.to_dict() + self.assertEqual(params_dict["target"]["object"], "https://example.com/blockmodel") + self.assertEqual(params_dict["target"]["attribute"]["operation"], "create") + self.assertEqual(params_dict["target"]["attribute"]["name"], "new_bm_attr") + + def test_kriging_params_with_block_model_existing_attribute(self): + """Test KrigingParameters accepts existing BlockModelAttribute as target.""" + source = Source(object="https://example.com/pointset", attribute="locations.attributes[?name=='grade']") + + mock_bm = MagicMock() + mock_bm.metadata.url = "https://example.com/blockmodel" + target_attr = BlockModelAttribute( + name="existing_bm_attr", + attribute_type="Float64", + obj=mock_bm, + ) + + variogram = "https://example.com/variogram" + search = SearchNeighborhood( + ellipsoid=Ellipsoid(ranges=EllipsoidRanges(100, 100, 50)), + max_samples=20, + ) + + params = KrigingParameters( + source=source, + target=target_attr, + variogram=variogram, + search=search, + ) + + params_dict = params.to_dict() + self.assertEqual(params_dict["target"]["object"], "https://example.com/blockmodel") + self.assertEqual(params_dict["target"]["attribute"]["operation"], "update") + self.assertIn("reference", params_dict["target"]["attribute"]) + + def test_kriging_params_with_explicit_target(self): + """Test KrigingParameters still works with explicit Target object.""" + source = Source(object="https://example.com/pointset", attribute="locations.attributes[?name=='grade']") + target = Target.new_attribute("https://example.com/grid", "kriged_grade") + variogram = "https://example.com/variogram" + search = SearchNeighborhood( + ellipsoid=Ellipsoid(ranges=EllipsoidRanges(100, 100, 50)), + max_samples=20, + ) + + params = KrigingParameters( + source=source, + target=target, + variogram=variogram, + search=search, + ) + + params_dict = params.to_dict() + self.assertEqual(params_dict["target"]["object"], "https://example.com/grid") + self.assertEqual(params_dict["target"]["attribute"]["operation"], "create") + self.assertEqual(params_dict["target"]["attribute"]["name"], "kriged_grade") + + def test_kriging_params_source_attribute_conversion(self): + """Test KrigingParameters converts source Attribute correctly.""" + source_attr = _create_mock_source_attribute( + name="grade", + key="grade-key", + object_url="https://example.com/pointset", + schema_path="locations.attributes", + ) + + target = Target.new_attribute("https://example.com/grid", "kriged_grade") + variogram = "https://example.com/variogram" + search = SearchNeighborhood( + ellipsoid=Ellipsoid(ranges=EllipsoidRanges(100, 100, 50)), + max_samples=20, + ) + + params = KrigingParameters( + source=source_attr, + target=target, + variogram=variogram, + search=search, + ) + + params_dict = params.to_dict() + self.assertEqual(params_dict["source"]["object"], "https://example.com/pointset") + self.assertEqual(params_dict["source"]["attribute"], "locations.attributes[?key=='grade-key']") + + +class TestTargetSerialization(TestCase): + """Tests for Target serialization with different attribute types.""" + + def test_target_with_create_attribute(self): + """Test Target serializes CreateAttribute correctly.""" + target = Target( + object="https://example.com/grid", + attribute=CreateAttribute(name="new_attr"), + ) + + result = target.to_dict() + + self.assertEqual(result["object"], "https://example.com/grid") + self.assertEqual(result["attribute"]["operation"], "create") + self.assertEqual(result["attribute"]["name"], "new_attr") + + def test_target_with_update_attribute(self): + """Test Target serializes UpdateAttribute correctly.""" + target = Target( + object="https://example.com/grid", + attribute=UpdateAttribute(reference="cell_attributes[?name=='existing']"), + ) + + result = target.to_dict() + + self.assertEqual(result["object"], "https://example.com/grid") + self.assertEqual(result["attribute"]["operation"], "update") + self.assertEqual(result["attribute"]["reference"], "cell_attributes[?name=='existing']") + + def test_target_with_dict_attribute(self): + """Test Target serializes dict attribute correctly.""" + target = Target( + object="https://example.com/grid", + attribute={"operation": "create", "name": "dict_attr"}, + ) + + result = target.to_dict() + + self.assertEqual(result["object"], "https://example.com/grid") + self.assertEqual(result["attribute"]["operation"], "create") + self.assertEqual(result["attribute"]["name"], "dict_attr") + + def test_target_new_attribute_factory(self): + """Test Target.new_attribute factory method.""" + target = Target.new_attribute("https://example.com/grid", "new_attr") + + result = target.to_dict() + + self.assertEqual(result["object"], "https://example.com/grid") + self.assertEqual(result["attribute"]["operation"], "create") + self.assertEqual(result["attribute"]["name"], "new_attr") + + +class TestRegionFilter(TestCase): + """Tests for RegionFilter class.""" + + def test_region_filter_with_names(self): + """Test RegionFilter with category names.""" + region_filter = RegionFilter( + attribute="domain_attribute", + names=["LMS1", "LMS2"], + ) + + result = region_filter.to_dict() + + self.assertEqual(result["attribute"], "domain_attribute") + self.assertEqual(result["names"], ["LMS1", "LMS2"]) + self.assertNotIn("values", result) + + def test_region_filter_with_values(self): + """Test RegionFilter with integer values.""" + region_filter = RegionFilter( + attribute="domain_code_attribute", + values=[1, 2, 3], + ) + + result = region_filter.to_dict() + + self.assertEqual(result["attribute"], "domain_code_attribute") + self.assertEqual(result["values"], [1, 2, 3]) + self.assertNotIn("names", result) + + def test_region_filter_with_block_model_attribute(self): + """Test RegionFilter with a real BlockModelAttribute.""" + bm_attr = BlockModelAttribute(name="domain", attribute_type="category") + + region_filter = RegionFilter( + attribute=bm_attr, + names=["Zone1"], + ) + + result = region_filter.to_dict() + + self.assertEqual(result["attribute"], "attributes[?name=='domain']") + self.assertEqual(result["names"], ["Zone1"]) + + def test_region_filter_with_pointset_attribute(self): + """Test RegionFilter with a PointSet Attribute (mock with spec).""" + mock_attr = _create_mock_source_attribute( + name="domain", + key="domain-key", + object_url="https://example.com/pointset", + schema_path="locations.attributes", + ) + + region_filter = RegionFilter( + attribute=mock_attr, + names=["Domain1"], + ) + + result = region_filter.to_dict() + + self.assertEqual(result["attribute"], "locations.attributes[?key=='domain-key']") + self.assertEqual(result["names"], ["Domain1"]) + + def test_region_filter_with_pending_attribute(self): + """Test RegionFilter with a PendingAttribute.""" + pending = _create_pending_attribute("domain") + + region_filter = RegionFilter( + attribute=pending, + names=["Zone1"], + ) + + result = region_filter.to_dict() + + self.assertEqual(result["attribute"], "attributes[?name=='domain']") + self.assertEqual(result["names"], ["Zone1"]) + + def test_region_filter_cannot_have_both_names_and_values(self): + """Test RegionFilter raises error when both names and values are provided.""" + with self.assertRaises(ValueError) as context: + RegionFilter( + attribute="domain_attribute", + names=["LMS1"], + values=[1], + ) + + self.assertIn("Only one of 'names' or 'values' may be provided", str(context.exception)) + + def test_region_filter_must_have_names_or_values(self): + """Test RegionFilter raises error when neither names nor values are provided.""" + with self.assertRaises(ValueError) as context: + RegionFilter( + attribute="domain_attribute", + ) + + self.assertIn("One of 'names' or 'values' must be provided", str(context.exception)) + + def test_region_filter_raises_for_unsupported_type(self): + """Test RegionFilter raises TypeError for unsupported attribute type.""" + with self.assertRaises(TypeError): + region_filter = RegionFilter(attribute=12345, names=["Zone1"]) + region_filter.to_dict() + + +class TestKrigingParametersWithRegionFilter(TestCase): + """Tests for KrigingParameters with target region filter support.""" + + def test_kriging_params_with_target_region_filter_names(self): + """Test KrigingParameters with target region filter using category names.""" + source = Source(object="https://example.com/pointset", attribute="grade") + target = Target.new_attribute("https://example.com/grid", "kriged_grade") + variogram = "https://example.com/variogram" + search = SearchNeighborhood( + ellipsoid=Ellipsoid(ranges=EllipsoidRanges(100, 100, 50)), + max_samples=20, + ) + region_filter = RegionFilter( + attribute="domain_attribute", + names=["LMS1", "LMS2"], + ) + + params = KrigingParameters( + source=source, + target=target, + variogram=variogram, + search=search, + target_region_filter=region_filter, + ) + + params_dict = params.to_dict() + + # Verify region filter is in target + self.assertIn("region_filter", params_dict["target"]) + self.assertEqual(params_dict["target"]["region_filter"]["attribute"], "domain_attribute") + self.assertEqual(params_dict["target"]["region_filter"]["names"], ["LMS1", "LMS2"]) + + def test_kriging_params_with_target_region_filter_values(self): + """Test KrigingParameters with target region filter using integer values.""" + source = Source(object="https://example.com/pointset", attribute="grade") + target = Target.new_attribute("https://example.com/grid", "kriged_grade") + variogram = "https://example.com/variogram" + search = SearchNeighborhood( + ellipsoid=Ellipsoid(ranges=EllipsoidRanges(100, 100, 50)), + max_samples=20, + ) + region_filter = RegionFilter( + attribute="domain_code", + values=[1, 2, 3], + ) + + params = KrigingParameters( + source=source, + target=target, + variogram=variogram, + search=search, + target_region_filter=region_filter, + ) + + params_dict = params.to_dict() + + # Verify region filter is in target + self.assertIn("region_filter", params_dict["target"]) + self.assertEqual(params_dict["target"]["region_filter"]["attribute"], "domain_code") + self.assertEqual(params_dict["target"]["region_filter"]["values"], [1, 2, 3]) + + def test_kriging_params_without_target_region_filter(self): + """Test KrigingParameters without target region filter (default behavior).""" + source = Source(object="https://example.com/pointset", attribute="grade") + target = Target.new_attribute("https://example.com/grid", "kriged_grade") + variogram = "https://example.com/variogram" + search = SearchNeighborhood( + ellipsoid=Ellipsoid(ranges=EllipsoidRanges(100, 100, 50)), + max_samples=20, + ) + + params = KrigingParameters( + source=source, + target=target, + variogram=variogram, + search=search, + ) + + params_dict = params.to_dict() + + # Verify region filter is not present + self.assertNotIn("region_filter", params_dict["target"]) + + +class TestBlockDiscretisation(TestCase): + """Tests for BlockDiscretisation class.""" + + def test_default_values(self): + """Test BlockDiscretisation defaults to 1x1x1.""" + bd = BlockDiscretisation() + + self.assertEqual(bd.nx, 1) + self.assertEqual(bd.ny, 1) + self.assertEqual(bd.nz, 1) + + def test_custom_values(self): + """Test BlockDiscretisation with custom values.""" + bd = BlockDiscretisation(nx=3, ny=4, nz=2) + + self.assertEqual(bd.nx, 3) + self.assertEqual(bd.ny, 4) + self.assertEqual(bd.nz, 2) + + def test_maximum_values(self): + """Test BlockDiscretisation with maximum values (9).""" + bd = BlockDiscretisation(nx=9, ny=9, nz=9) + + self.assertEqual(bd.nx, 9) + self.assertEqual(bd.ny, 9) + self.assertEqual(bd.nz, 9) + + def test_to_dict(self): + """Test BlockDiscretisation serializes correctly.""" + bd = BlockDiscretisation(nx=3, ny=3, nz=2) + + result = bd.to_dict() + + self.assertEqual(result, {"nx": 3, "ny": 3, "nz": 2}) + + def test_to_dict_defaults(self): + """Test BlockDiscretisation serializes default values.""" + bd = BlockDiscretisation() + + result = bd.to_dict() + + self.assertEqual(result, {"nx": 1, "ny": 1, "nz": 1}) + + def test_validation_nx_too_low(self): + """Test BlockDiscretisation rejects nx < 1.""" + with self.assertRaises(ValueError) as ctx: + BlockDiscretisation(nx=0) + + self.assertIn("nx", str(ctx.exception)) + self.assertIn("between 1 and 9", str(ctx.exception)) + + def test_validation_ny_too_high(self): + """Test BlockDiscretisation rejects ny > 9.""" + with self.assertRaises(ValueError) as ctx: + BlockDiscretisation(ny=10) + + self.assertIn("ny", str(ctx.exception)) + self.assertIn("between 1 and 9", str(ctx.exception)) + + def test_validation_nz_negative(self): + """Test BlockDiscretisation rejects negative nz.""" + with self.assertRaises(ValueError) as ctx: + BlockDiscretisation(nz=-1) + + self.assertIn("nz", str(ctx.exception)) + self.assertIn("between 1 and 9", str(ctx.exception)) + + def test_validation_non_integer_type(self): + """Test BlockDiscretisation rejects non-integer types.""" + with self.assertRaises(TypeError) as ctx: + BlockDiscretisation(nx=2.5) + + self.assertIn("nx", str(ctx.exception)) + self.assertIn("integer", str(ctx.exception)) + + +class TestKrigingParametersWithBlockDiscretisation(TestCase): + """Tests for KrigingParameters with block_discretisation support.""" + + def test_kriging_params_with_block_discretisation(self): + """Test KrigingParameters includes block_discretisation in to_dict.""" + source = Source(object="https://example.com/pointset", attribute="grade") + target = Target.new_attribute("https://example.com/grid", "kriged_grade") + variogram = "https://example.com/variogram" + search = SearchNeighborhood( + ellipsoid=Ellipsoid(ranges=EllipsoidRanges(100, 100, 50)), + max_samples=20, + ) + bd = BlockDiscretisation(nx=3, ny=3, nz=2) + + params = KrigingParameters( + source=source, + target=target, + variogram=variogram, + search=search, + block_discretisation=bd, + ) + + params_dict = params.to_dict() + + self.assertIn("block_discretisation", params_dict) + self.assertEqual(params_dict["block_discretisation"], {"nx": 3, "ny": 3, "nz": 2}) + + def test_kriging_params_without_block_discretisation(self): + """Test KrigingParameters omits block_discretisation when None (default).""" + source = Source(object="https://example.com/pointset", attribute="grade") + target = Target.new_attribute("https://example.com/grid", "kriged_grade") + variogram = "https://example.com/variogram" + search = SearchNeighborhood( + ellipsoid=Ellipsoid(ranges=EllipsoidRanges(100, 100, 50)), + max_samples=20, + ) + + params = KrigingParameters( + source=source, + target=target, + variogram=variogram, + search=search, + ) + + params_dict = params.to_dict() + + self.assertNotIn("block_discretisation", params_dict) + + def test_kriging_params_block_discretisation_with_region_filter(self): + """Test KrigingParameters with both block_discretisation and region filter.""" + source = Source(object="https://example.com/pointset", attribute="grade") + target = Target.new_attribute("https://example.com/grid", "kriged_grade") + variogram = "https://example.com/variogram" + search = SearchNeighborhood( + ellipsoid=Ellipsoid(ranges=EllipsoidRanges(100, 100, 50)), + max_samples=20, + ) + bd = BlockDiscretisation(nx=2, ny=2, nz=2) + region_filter = RegionFilter( + attribute="domain_attribute", + names=["LMS1"], + ) + + params = KrigingParameters( + source=source, + target=target, + variogram=variogram, + search=search, + block_discretisation=bd, + target_region_filter=region_filter, + ) + + params_dict = params.to_dict() + + # Both should be present + self.assertIn("block_discretisation", params_dict) + self.assertEqual(params_dict["block_discretisation"], {"nx": 2, "ny": 2, "nz": 2}) + self.assertIn("region_filter", params_dict["target"]) + self.assertEqual(params_dict["target"]["region_filter"]["names"], ["LMS1"]) diff --git a/packages/evo-compute/tests/test_tasks.py b/packages/evo-compute/tests/test_tasks.py new file mode 100644 index 00000000..26dc11ed --- /dev/null +++ b/packages/evo-compute/tests/test_tasks.py @@ -0,0 +1,263 @@ +# Copyright © 2025 Bentley Systems, Incorporated +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for the compute tasks module imports and basic functionality.""" + +import inspect +import unittest +from unittest.mock import AsyncMock, MagicMock, patch + +from evo.compute.tasks.common.runner import get_task_runner, run_tasks +from evo.compute.tasks.kriging import KrigingParameters, _run_single_kriging + + +class TestTaskRegistry(unittest.TestCase): + """Tests for the task registry system.""" + + def test_kriging_parameters_registered(self): + """KrigingParameters should be registered with the task registry.""" + from evo.compute.tasks.common.runner import get_task_runner + from evo.compute.tasks.kriging import KrigingParameters + + runner = get_task_runner(KrigingParameters) + self.assertIsNotNone(runner) + + def test_unregistered_type_returns_none(self): + """Unregistered types should return None from get_task_runner.""" + from evo.compute.tasks.common.runner import get_task_runner + + class UnregisteredParams: + pass + + runner = get_task_runner(UnregisteredParams) + self.assertIsNone(runner) + + def test_registry_get_runner_for_params_raises_on_unknown(self): + """get_runner_for_params should raise TypeError for unregistered types.""" + from evo.compute.tasks.common.runner import TaskRegistry + + registry = TaskRegistry() + + class UnknownParams: + pass + + with self.assertRaises(TypeError) as ctx: + registry.get_runner_for_params(UnknownParams()) + + self.assertIn("UnknownParams", str(ctx.exception)) + + +class TestPreviewFlagSignatures(unittest.TestCase): + """Tests for the preview flag signatures on run() and runner functions.""" + + def test_registered_runner_accepts_preview_kwarg(self): + """The registered kriging runner should accept a 'preview' keyword argument.""" + + runner = get_task_runner(KrigingParameters) + sig = inspect.signature(runner) + self.assertIn("preview", sig.parameters) + param = sig.parameters["preview"] + self.assertEqual(param.default, False) + self.assertEqual(param.kind, inspect.Parameter.KEYWORD_ONLY) + + def test_run_single_kriging_accepts_preview_kwarg(self): + """_run_single_kriging should accept a 'preview' keyword argument defaulting to False.""" + sig = inspect.signature(_run_single_kriging) + self.assertIn("preview", sig.parameters) + self.assertEqual(sig.parameters["preview"].default, False) + + def test_run_function_accepts_preview_kwarg(self): + """The public run() function should accept a 'preview' keyword argument defaulting to False.""" + from evo.compute.tasks import run + + sig = inspect.signature(run) + self.assertIn("preview", sig.parameters) + self.assertEqual(sig.parameters["preview"].default, False) + + def test_run_tasks_accepts_preview_kwarg(self): + """run_tasks() should accept a 'preview' keyword argument defaulting to False.""" + sig = inspect.signature(run_tasks) + self.assertIn("preview", sig.parameters) + self.assertEqual(sig.parameters["preview"].default, False) + + +def _mock_kriging_context(): + """Create a mock context + connector for kriging preview tests.""" + mock_connector = MagicMock() + mock_connector._additional_headers = None + + mock_context = MagicMock() + mock_context.get_connector.return_value = mock_connector + mock_context.get_org_id.return_value = "test-org-id" + return mock_context, mock_connector + + +def _mock_kriging_job(): + """Create a mock job that returns a valid kriging result.""" + mock_job = AsyncMock() + mock_job.wait_for_results.return_value = { + "message": "ok", + "target": { + "reference": "ref", + "name": "t", + "description": None, + "schema_id": "s", + "attribute": {"reference": "ar", "name": "an"}, + }, + } + return mock_job + + +class TestPreviewFlagBehavior(unittest.IsolatedAsyncioTestCase): + """Tests for the preview flag runtime behavior on _run_single_kriging.""" + + async def test_run_single_kriging_sets_header_when_preview_true(self): + """_run_single_kriging should set API-Preview header when preview=True.""" + mock_context, mock_connector = _mock_kriging_context() + mock_params = MagicMock(spec=KrigingParameters) + mock_params.to_dict.return_value = {"source": {}, "target": {}} + + with patch( + "evo.compute.tasks.kriging.JobClient.submit", new_callable=AsyncMock, return_value=_mock_kriging_job() + ): + await _run_single_kriging(mock_context, mock_params, preview=True) + + # Verify the header was set + self.assertIsNotNone(mock_connector._additional_headers) + self.assertEqual(mock_connector._additional_headers["API-Preview"], "opt-in") + + async def test_run_single_kriging_does_not_set_header_when_preview_false(self): + """_run_single_kriging should NOT set API-Preview header when preview=False.""" + mock_context, mock_connector = _mock_kriging_context() + mock_params = MagicMock(spec=KrigingParameters) + mock_params.to_dict.return_value = {"source": {}, "target": {}} + + with patch( + "evo.compute.tasks.kriging.JobClient.submit", new_callable=AsyncMock, return_value=_mock_kriging_job() + ): + await _run_single_kriging(mock_context, mock_params, preview=False) + + # Verify the header was NOT set + self.assertIsNone(mock_connector._additional_headers) + + async def test_run_single_kriging_default_preview_is_false(self): + """_run_single_kriging should default to preview=False and not set the header.""" + mock_context, mock_connector = _mock_kriging_context() + mock_params = MagicMock(spec=KrigingParameters) + mock_params.to_dict.return_value = {"source": {}, "target": {}} + + with patch( + "evo.compute.tasks.kriging.JobClient.submit", new_callable=AsyncMock, return_value=_mock_kriging_job() + ): + # Call without preview kwarg — should default to False + await _run_single_kriging(mock_context, mock_params) + + # Verify the header was NOT set + self.assertIsNone(mock_connector._additional_headers) + + +class TestKrigingResultInheritance(unittest.TestCase): + """Tests that KrigingResult inherits from TaskResult.""" + + def test_kriging_result_inherits_from_task_result(self): + """KrigingResult should be a subclass of TaskResult.""" + from evo.compute.tasks import KrigingResult, TaskResult + + self.assertTrue(issubclass(KrigingResult, TaskResult)) + + +class TestTaskResultsContainer(unittest.TestCase): + """Tests for the TaskResults container class.""" + + def test_task_results_iteration(self): + """TaskResults should support iteration.""" + from evo.compute.tasks.kriging import KrigingResult, TaskResults, _KrigingAttribute, _KrigingTarget + + # Create mock results + attr = _KrigingAttribute(reference="ref1", name="attr1") + target = _KrigingTarget( + reference="ref1", + name="target1", + description="desc", + schema_id="schema/1.0.0", + attribute=attr, + ) + result1 = KrigingResult(message="msg1", target=target) + result2 = KrigingResult(message="msg2", target=target) + + results = TaskResults([result1, result2]) + + # Test len + self.assertEqual(len(results), 2) + + # Test iteration + items = list(results) + self.assertEqual(len(items), 2) + self.assertEqual(items[0].message, "msg1") + self.assertEqual(items[1].message, "msg2") + + # Test indexing + self.assertEqual(results[0].message, "msg1") + self.assertEqual(results[1].message, "msg2") + + def test_task_results_results_property(self): + """TaskResults should expose results via .results property.""" + from evo.compute.tasks.kriging import KrigingResult, TaskResults, _KrigingAttribute, _KrigingTarget + + attr = _KrigingAttribute(reference="ref1", name="attr1") + target = _KrigingTarget( + reference="ref1", + name="target1", + description="desc", + schema_id="schema/1.0.0", + attribute=attr, + ) + result = KrigingResult(message="msg", target=target) + + results = TaskResults([result]) + + self.assertEqual(results.results, [result]) + + +class TestKrigingMethod(unittest.TestCase): + """Tests for kriging method classes.""" + + def test_ordinary_kriging_singleton(self): + """KrigingMethod.ORDINARY should be an OrdinaryKriging instance.""" + from evo.compute.tasks.kriging import KrigingMethod, OrdinaryKriging + + self.assertIsInstance(KrigingMethod.ORDINARY, OrdinaryKriging) + + def test_simple_kriging_factory(self): + """KrigingMethod.simple() should create a SimpleKriging instance.""" + from evo.compute.tasks.kriging import KrigingMethod, SimpleKriging + + method = KrigingMethod.simple(mean=100.0) + self.assertIsInstance(method, SimpleKriging) + self.assertEqual(method.mean, 100.0) + + def test_ordinary_kriging_to_dict(self): + """OrdinaryKriging should serialize to dict with type='ordinary'.""" + from evo.compute.tasks.kriging import OrdinaryKriging + + d = OrdinaryKriging().to_dict() + self.assertEqual(d, {"type": "ordinary"}) + + def test_simple_kriging_to_dict(self): + """SimpleKriging should serialize to dict with type='simple' and mean.""" + from evo.compute.tasks.kriging import SimpleKriging + + d = SimpleKriging(mean=50.0).to_dict() + self.assertEqual(d, {"type": "simple", "mean": 50.0}) + + +if __name__ == "__main__": + unittest.main() diff --git a/packages/evo-objects/src/evo/objects/typed/attributes.py b/packages/evo-objects/src/evo/objects/typed/attributes.py index 97ce0661..38412d21 100644 --- a/packages/evo-objects/src/evo/objects/typed/attributes.py +++ b/packages/evo-objects/src/evo/objects/typed/attributes.py @@ -42,6 +42,7 @@ "BlockModelAttribute", "BlockModelAttributes", "BlockModelPendingAttribute", + "PendingAttribute", ] @@ -100,6 +101,14 @@ def attribute_type(self) -> str: """The type of this attribute.""" return self._attribute_type + @property + def exists(self) -> bool: + """Whether this attribute exists on the object. + + :return: True for existing attributes. + """ + return True + async def to_dataframe(self, fb: IFeedback = NoFeedback) -> pd.DataFrame: """Load a DataFrame containing the values for this attribute from the object. @@ -153,13 +162,53 @@ async def _upload_attribute_values( attr_doc["nan_description"] = {"values": []} +class PendingAttribute: + """A placeholder for an attribute that doesn't exist yet on a Geoscience Object. + + This is returned when accessing an attribute by name that doesn't exist. + It can be used as a target for compute tasks, which will create the attribute. + """ + + def __init__(self, parent: "Attributes", name: str) -> None: + """ + :param parent: The Attributes collection this pending attribute belongs to. + :param name: The name of the attribute to create. + """ + self._parent = parent + self._name = name + + @property + def name(self) -> str: + """The name of this attribute.""" + return self._name + + @property + def exists(self) -> bool: + """Whether this attribute exists on the object. + + :return: False for pending attributes. + """ + return False + + @property + def _obj(self) -> "DownloadedObject | None": + """The DownloadedObject containing this attribute's parent object. + + Delegates to the parent Attributes collection. + """ + return self._parent._obj + + def __repr__(self) -> str: + return f"PendingAttribute(name={self._name!r}, exists=False)" + + class Attributes(SchemaList[Attribute]): """A collection of Geoscience Object Attributes""" _schema_path: str | None = None """The full JMESPath to this attributes list within the parent object schema.""" - def __getitem__(self, index_or_name: int | str) -> Attribute: + def __getitem__(self, index_or_name: int | str) -> Attribute | PendingAttribute: """Get an attribute by index or name. :param index_or_name: Either an integer index or the name/key of the attribute. @@ -172,6 +221,8 @@ def __getitem__(self, index_or_name: int | str) -> Attribute: for attr in self: if attr.name == index_or_name or attr.key == index_or_name: return attr + # Return a PendingAttribute for non-existent attributes accessed by name + return PendingAttribute(self, index_or_name) return super().__getitem__(index_or_name) @classmethod @@ -421,6 +472,14 @@ def __init__(self, attributes: list[BlockModelAttribute], block_model: BlockMode ) self._attributes.append(attr_with_obj) + @property + def exists(self) -> bool: + """Whether this attribute exists on the block model. + + :return: True for existing attributes. + """ + return True + @classmethod def from_schema(cls, attributes_list: list[dict], block_model: BlockModel | None = None) -> BlockModelAttributes: """Parse block model attributes from the schema format. diff --git a/packages/evo-objects/tests/typed/test_attributes.py b/packages/evo-objects/tests/typed/test_attributes.py index 63a1e26a..c8928cec 100644 --- a/packages/evo-objects/tests/typed/test_attributes.py +++ b/packages/evo-objects/tests/typed/test_attributes.py @@ -16,7 +16,7 @@ import pandas as pd from parameterized import parameterized -from evo.objects.typed.attributes import UnSupportedDataTypeError, _infer_attribute_type_from_series +from evo.objects.typed.attributes import PendingAttribute, UnSupportedDataTypeError, _infer_attribute_type_from_series class TestAttributeTypeInference(TestCase): @@ -46,3 +46,22 @@ def test_unsupported_dtype(self): series = pd.Series([1 + 2j, 3 + 4j], dtype="complex128") with self.assertRaises(UnSupportedDataTypeError): _infer_attribute_type_from_series(series) + + +class TestPendingAttribute(TestCase): + """Tests for PendingAttribute class.""" + + def test_pending_attribute_name(self): + """Test that PendingAttribute stores the name correctly.""" + pending = PendingAttribute(None, "test_attr") + self.assertEqual(pending.name, "test_attr") + + def test_pending_attribute_exists_is_false(self): + """Test that PendingAttribute.exists returns False.""" + pending = PendingAttribute(None, "test_attr") + self.assertFalse(pending.exists) + + def test_pending_attribute_repr(self): + """Test that PendingAttribute has a useful repr.""" + pending = PendingAttribute(None, "test_attr") + self.assertEqual(repr(pending), "PendingAttribute(name='test_attr', exists=False)") diff --git a/packages/evo-sdk-common/pyproject.toml b/packages/evo-sdk-common/pyproject.toml index feef4140..efa6636d 100644 --- a/packages/evo-sdk-common/pyproject.toml +++ b/packages/evo-sdk-common/pyproject.toml @@ -1,7 +1,7 @@ [project] name = "evo-sdk-common" description = "Python package that establishes a common framework for use by client libraries that interact with Seequent Evo APIs" -version = "0.5.18" +version = "0.5.19" requires-python = ">=3.10" license-files = ["LICENSE.md"] dynamic = ["readme"] diff --git a/packages/evo-widgets/src/evo/widgets/__init__.py b/packages/evo-widgets/src/evo/widgets/__init__.py index f66583f9..97f5d502 100644 --- a/packages/evo-widgets/src/evo/widgets/__init__.py +++ b/packages/evo-widgets/src/evo/widgets/__init__.py @@ -41,6 +41,8 @@ format_block_model_version, format_report, format_report_result, + format_task_result, + format_task_results, format_variogram, ) from .urls import ( @@ -71,6 +73,8 @@ "format_block_model_version", "format_report", "format_report_result", + "format_task_result", + "format_task_results", "format_variogram", "get_blocksync_base_url", "get_blocksync_block_model_url", @@ -155,6 +159,25 @@ def _register_formatters(ipython: InteractiveShell) -> None: format_block_model_attributes, ) + # Register formatters for compute task results + html_formatter.for_type_by_name( + "evo.compute.tasks.kriging", + "TaskResult", + format_task_result, + ) + + html_formatter.for_type_by_name( + "evo.compute.tasks.kriging", + "TaskResults", + format_task_results, + ) + + html_formatter.for_type_by_name( + "evo.compute.tasks.kriging", + "KrigingResult", + format_task_result, + ) + def _unregister_formatters(ipython: InteractiveShell) -> None: """Unregister HTML formatters for Evo SDK types. diff --git a/packages/evo-widgets/src/evo/widgets/formatters.py b/packages/evo-widgets/src/evo/widgets/formatters.py index 553a8992..765968de 100644 --- a/packages/evo-widgets/src/evo/widgets/formatters.py +++ b/packages/evo-widgets/src/evo/widgets/formatters.py @@ -26,7 +26,12 @@ build_table_row_vtop, build_title, ) -from .urls import get_blocksync_block_model_url_from_environment, get_portal_url_for_object, get_viewer_url_for_object +from .urls import ( + get_blocksync_block_model_url_from_environment, + get_portal_url_for_object, + get_portal_url_from_reference, + get_viewer_url_for_object, +) __all__ = [ "format_attributes_collection", @@ -36,6 +41,8 @@ "format_block_model_version", "format_report", "format_report_result", + "format_task_result", + "format_task_results", "format_variogram", ] @@ -561,3 +568,112 @@ def format_block_model(obj: Any) -> str: html += "" return html + + +# ============================================================================= +# Compute Task Result Formatters +# ============================================================================= + + +def _get_task_result_portal_url(result: Any) -> str | None: + """Extract Portal URL from a task result's target reference. + + :param result: A TaskResult object with _target.reference attribute. + :return: Portal URL string or None if not available. + """ + # Check if result has _target attribute + target = getattr(result, "_target", None) + if target is None: + return None + + # Check if target has reference attribute + ref = getattr(target, "reference", None) + if not ref or not isinstance(ref, str): + return None + + # Try to generate portal URL from reference + try: + return get_portal_url_from_reference(ref) + except ValueError: + # Invalid reference URL format + return None + + +def format_task_result(result: Any) -> str: + """Format a TaskResult as HTML. + + This formatter handles TaskResult and KrigingResult objects from evo-compute, + displaying the task completion status, target information, and Portal links. + + :param result: A TaskResult object with message, target_name, schema_type, + attribute_name, and _target attributes. + :return: HTML string for the task result. + """ + portal_url = _get_task_result_portal_url(result) + links = [("Portal", portal_url)] if portal_url else None + + # Get result type name (Task, Kriging, etc.) + result_type = result._get_result_type_name() if hasattr(result, "_get_result_type_name") else "Task" + title = f"✓ {result_type} Result" + + rows = [ + ("Target:", result.target_name), + ("Schema:", result.schema_type), + ("Attribute:", f'{result.attribute_name}'), + ] + + table_rows = [build_table_row(label, value) for label, value in rows] + + html = STYLESHEET + html += '
' + html += build_title(title, links) + html += f'
{result.message}
' + html += f"{''.join(table_rows)}
" + html += "
" + + return html + + +def format_task_results(results: Any) -> str: + """Format a TaskResults collection as HTML. + + This formatter handles TaskResults objects from evo-compute, + displaying a table of all completed tasks with their status and Portal links. + + :param results: A TaskResults object with _results list of TaskResult objects. + :return: HTML string for the task results collection. + """ + result_list = results._results + + if not result_list: + return "
No results
" + + # Get result type from first result + result_type = result_list[0]._get_result_type_name() if hasattr(result_list[0], "_get_result_type_name") else "Task" + title = f"✓ {len(result_list)} {result_type} Results" + + # Build table data + headers = ["#", "Target", "Attribute", "Schema", "Link"] + rows = [] + for i, result in enumerate(result_list): + portal_url = _get_task_result_portal_url(result) + link_html = f'Portal' if portal_url else "N/A" + rows.append( + [ + str(i + 1), + result.target_name, + f'{result.attribute_name}', + result.schema_type, + link_html, + ] + ) + + table = build_nested_table(headers, rows) + + html = STYLESHEET + html += '
' + html += build_title(title) + html += table + html += "
" + + return html diff --git a/packages/evo-widgets/tests/test_formatters.py b/packages/evo-widgets/tests/test_formatters.py index 6e81bd0d..cb0a105e 100644 --- a/packages/evo-widgets/tests/test_formatters.py +++ b/packages/evo-widgets/tests/test_formatters.py @@ -20,6 +20,7 @@ _format_bounding_box, _format_crs, _get_base_metadata, + _get_task_result_portal_url, format_attributes_collection, format_base_object, format_block_model, @@ -27,6 +28,8 @@ format_block_model_version, format_report, format_report_result, + format_task_result, + format_task_results, format_variogram, ) @@ -939,5 +942,311 @@ def test_formats_report_result_table(self): self.assertIn("2.5", html) +class TestFormatTaskResult(unittest.TestCase): + """Tests for the format_task_result function.""" + + def _create_mock_task_result(self, **kwargs): + """Create a mock TaskResult object.""" + defaults = { + "message": "Task completed successfully", + "target_name": "Test Grid", + "schema_type": "objects/regular-3d-grid/v1.0.0", + "attribute_name": "kriged_grade", + "target_reference": ( + "https://350mt.api.seequent.com/geoscience-object" + "/orgs/12345678-1234-1234-1234-123456789abc" + "/workspaces/87654321-4321-4321-4321-abcdef123456" + "/objects/aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" + ), + } + defaults.update(kwargs) + + obj = MagicMock() + obj.message = defaults["message"] + obj.target_name = defaults["target_name"] + obj.schema_type = defaults["schema_type"] + obj.attribute_name = defaults["attribute_name"] + + # Mock _target with reference for portal URL + obj._target = MagicMock() + obj._target.reference = defaults["target_reference"] + + # Mock _get_result_type_name + obj._get_result_type_name = MagicMock(return_value="Kriging") + + return obj + + def test_formats_task_result_basic_info(self): + """Test formatting a task result with basic information.""" + obj = self._create_mock_task_result() + + html = format_task_result(obj) + + self.assertIn("Kriging Result", html) + self.assertIn("Test Grid", html) + self.assertIn("objects/regular-3d-grid/v1.0.0", html) + self.assertIn("kriged_grade", html) + self.assertIn("Task completed successfully", html) + self.assertIn("attr-highlight", html) # Attribute should be highlighted + + def test_formats_task_result_with_portal_link(self): + """Test formatting a task result includes portal link.""" + obj = self._create_mock_task_result() + + html = format_task_result(obj) + + self.assertIn("Portal", html) + self.assertIn("href=", html) + + def test_formats_task_result_without_portal_link(self): + """Test formatting a task result without reference doesn't fail.""" + obj = self._create_mock_task_result(target_reference=None) + + html = format_task_result(obj) + + # Should still render without crashing + self.assertIn("Kriging Result", html) + self.assertIn("Test Grid", html) + + def test_formats_task_result_checkmark(self): + """Test formatting a task result shows checkmark for success.""" + obj = self._create_mock_task_result() + + html = format_task_result(obj) + + self.assertIn("✓", html) + + def test_formats_task_result_target_row(self): + """Test formatting includes Target row.""" + obj = self._create_mock_task_result() + + html = format_task_result(obj) + + self.assertIn("Target:", html) + self.assertIn("Test Grid", html) + + def test_formats_task_result_schema_row(self): + """Test formatting includes Schema row.""" + obj = self._create_mock_task_result() + + html = format_task_result(obj) + + self.assertIn("Schema:", html) + + def test_formats_task_result_attribute_row(self): + """Test formatting includes Attribute row.""" + obj = self._create_mock_task_result() + + html = format_task_result(obj) + + self.assertIn("Attribute:", html) + + def test_formats_task_result_without_get_result_type_name(self): + """Test formatting a task result that doesn't have _get_result_type_name.""" + obj = self._create_mock_task_result() + del obj._get_result_type_name + + html = format_task_result(obj) + + # Should fall back to "Task" + self.assertIn("Task Result", html) + + +class TestFormatTaskResults(unittest.TestCase): + """Tests for the format_task_results function.""" + + def _create_mock_task_result(self, **kwargs): + """Create a mock TaskResult object.""" + defaults = { + "message": "Task completed successfully", + "target_name": "Test Grid", + "schema_type": "objects/regular-3d-grid/v1.0.0", + "attribute_name": "kriged_grade", + "target_reference": ( + "https://350mt.api.seequent.com/geoscience-object" + "/orgs/12345678-1234-1234-1234-123456789abc" + "/workspaces/87654321-4321-4321-4321-abcdef123456" + "/objects/aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" + ), + "result_type": "Kriging", + } + defaults.update(kwargs) + + obj = MagicMock() + obj.message = defaults["message"] + obj.target_name = defaults["target_name"] + obj.schema_type = defaults["schema_type"] + obj.attribute_name = defaults["attribute_name"] + obj._target = MagicMock() + obj._target.reference = defaults["target_reference"] + obj._get_result_type_name = MagicMock(return_value=defaults["result_type"]) + + return obj + + def test_formats_empty_results(self): + """Test formatting an empty results collection.""" + obj = MagicMock() + obj._results = [] + + html = format_task_results(obj) + + self.assertIn("No results", html) + + def test_formats_single_result(self): + """Test formatting a collection with one result.""" + result1 = self._create_mock_task_result(target_name="Grid 1", attribute_name="attr_1") + + obj = MagicMock() + obj._results = [result1] + + html = format_task_results(obj) + + self.assertIn("1 Kriging Results", html) + self.assertIn("Grid 1", html) + self.assertIn("attr_1", html) + self.assertIn("✓", html) + + def test_formats_multiple_results(self): + """Test formatting a collection with multiple results.""" + result1 = self._create_mock_task_result(target_name="Grid 1", attribute_name="attr_1") + result2 = self._create_mock_task_result(target_name="Grid 2", attribute_name="attr_2") + result3 = self._create_mock_task_result(target_name="Grid 3", attribute_name="attr_3") + + obj = MagicMock() + obj._results = [result1, result2, result3] + + html = format_task_results(obj) + + self.assertIn("3 Kriging Results", html) + self.assertIn("Grid 1", html) + self.assertIn("Grid 2", html) + self.assertIn("Grid 3", html) + self.assertIn("attr_1", html) + self.assertIn("attr_2", html) + self.assertIn("attr_3", html) + + def test_formats_results_with_table_headers(self): + """Test formatting includes proper table headers.""" + result1 = self._create_mock_task_result() + + obj = MagicMock() + obj._results = [result1] + + html = format_task_results(obj) + + self.assertIn("#", html) + self.assertIn("Target", html) + self.assertIn("Attribute", html) + self.assertIn("Schema", html) + self.assertIn("Link", html) + + def test_formats_results_with_portal_links(self): + """Test formatting includes portal links for each result.""" + result1 = self._create_mock_task_result(target_name="Grid 1") + result2 = self._create_mock_task_result(target_name="Grid 2") + + obj = MagicMock() + obj._results = [result1, result2] + + html = format_task_results(obj) + + # Should have portal links + self.assertIn("Portal", html) + self.assertIn("href=", html) + + def test_formats_results_without_portal_link(self): + """Test formatting handles results without references.""" + result1 = self._create_mock_task_result(target_reference=None) + + obj = MagicMock() + obj._results = [result1] + + html = format_task_results(obj) + + self.assertIn("N/A", html) + + def test_formats_results_row_numbers(self): + """Test formatting includes sequential row numbers.""" + result1 = self._create_mock_task_result(target_name="Grid 1") + result2 = self._create_mock_task_result(target_name="Grid 2") + + obj = MagicMock() + obj._results = [result1, result2] + + html = format_task_results(obj) + + # Row numbers + self.assertIn(">1<", html) + self.assertIn(">2<", html) + + +class TestGetTaskResultPortalUrl(unittest.TestCase): + """Tests for the _get_task_result_portal_url helper function.""" + + def test_extracts_portal_url_from_valid_reference(self): + """Test extracting portal URL from a valid object reference.""" + result = MagicMock() + result._target = MagicMock() + result._target.reference = ( + "https://350mt.api.seequent.com/geoscience-object" + "/orgs/12345678-1234-1234-1234-123456789abc" + "/workspaces/87654321-4321-4321-4321-abcdef123456" + "/objects/aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" + ) + + url = _get_task_result_portal_url(result) + + self.assertIsNotNone(url) + self.assertIn("evo.seequent.com", url) + + def test_returns_none_for_no_reference(self): + """Test returns None when target has no reference.""" + result = MagicMock() + result._target = MagicMock() + result._target.reference = None + + url = _get_task_result_portal_url(result) + + self.assertIsNone(url) + + def test_returns_none_for_invalid_reference(self): + """Test returns None for invalid reference URL.""" + result = MagicMock() + result._target = MagicMock() + result._target.reference = "not-a-valid-url" + + url = _get_task_result_portal_url(result) + + self.assertIsNone(url) + + def test_returns_none_when_no_target(self): + """Test returns None when result has no _target attribute.""" + result = MagicMock(spec=[]) # Empty spec means no attributes + + url = _get_task_result_portal_url(result) + + self.assertIsNone(url) + + def test_returns_none_for_non_string_reference(self): + """Test returns None when reference is not a string.""" + result = MagicMock() + result._target = MagicMock() + result._target.reference = 12345 # Not a string + + url = _get_task_result_portal_url(result) + + self.assertIsNone(url) + + def test_returns_none_for_empty_string_reference(self): + """Test returns None when reference is an empty string.""" + result = MagicMock() + result._target = MagicMock() + result._target.reference = "" + + url = _get_task_result_portal_url(result) + + self.assertIsNone(url) + + if __name__ == "__main__": unittest.main() diff --git a/pyproject.toml b/pyproject.toml index 626ae14a..f9cbfc62 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,16 +1,16 @@ [project] name = "evo-sdk" -version = "0.1.20" +version = "0.2.0" description = "Python SDK for using Seequent Evo" requires-python = ">=3.10" dependencies = [ - "evo-sdk-common[aiohttp,notebooks,jmespath]>=0.5.12", + "evo-sdk-common[aiohttp,notebooks,jmespath]>=0.5.19", "evo-widgets>=0.2.0", "evo-blockmodels[aiohttp,notebooks,pyarrow]>=0.2.0", "evo-objects[aiohttp,notebooks,utils]>=0.4.0", "evo-files[aiohttp,notebooks]>=0.2.3", "evo-colormaps[aiohttp,notebooks]>=0.0.2", - "evo-compute[aiohttp,notebooks]>=0.0.1rc2", + "evo-compute[aiohttp,notebooks]>=0.0.2", "jupyter", ] dynamic = ["readme"] diff --git a/uv.lock b/uv.lock index 5cf4cc7c..36e13c10 100644 --- a/uv.lock +++ b/uv.lock @@ -874,11 +874,12 @@ test = [ [[package]] name = "evo-compute" -version = "0.0.1rc3" +version = "0.0.2" source = { editable = "packages/evo-compute" } dependencies = [ { name = "evo-sdk-common" }, { name = "pydantic" }, + { name = "typing-extensions" }, ] [package.optional-dependencies] @@ -910,6 +911,7 @@ requires-dist = [ { name = "evo-sdk-common", extras = ["aiohttp"], marker = "extra == 'aiohttp'", editable = "packages/evo-sdk-common" }, { name = "evo-sdk-common", extras = ["notebooks"], marker = "extra == 'notebooks'", editable = "packages/evo-sdk-common" }, { name = "pydantic", specifier = ">=2" }, + { name = "typing-extensions", specifier = ">=4.0" }, ] provides-extras = ["aiohttp", "notebooks"] @@ -1079,7 +1081,7 @@ test = [ [[package]] name = "evo-sdk" -version = "0.1.20" +version = "0.2.0" source = { editable = "." } dependencies = [ { name = "evo-blockmodels", extra = ["aiohttp", "notebooks"] }, @@ -1144,7 +1146,7 @@ test = [ [[package]] name = "evo-sdk-common" -version = "0.5.18" +version = "0.5.19" source = { editable = "packages/evo-sdk-common" } dependencies = [ { name = "pure-interface" }, From 8204ddfda0e9f8c1df109edc35ee59d8774669be Mon Sep 17 00:00:00 2001 From: Denis Simo <52428161+denis-simo@users.noreply.github.com> Date: Fri, 27 Feb 2026 09:05:47 +1300 Subject: [PATCH 03/10] Updating dependencies, as compute now depends on typed objects for tests. --- packages/evo-compute/pyproject.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/evo-compute/pyproject.toml b/packages/evo-compute/pyproject.toml index 6b6f33ea..010c90df 100644 --- a/packages/evo-compute/pyproject.toml +++ b/packages/evo-compute/pyproject.toml @@ -9,6 +9,8 @@ authors = [ ] dependencies = [ + "evo-blockmodels[utils]", + "evo-objects[utils,blockmodels]", "evo-sdk-common", "pydantic>=2", "typing_extensions>=4.0", From 6efacb94fa1c56e8a1ab3492fca9fa8dd1ce5331 Mon Sep 17 00:00:00 2001 From: Denis Simo <52428161+denis-simo@users.noreply.github.com> Date: Fri, 27 Feb 2026 09:38:29 +1300 Subject: [PATCH 04/10] Fixed up documentation links --- mkdocs/docs/packages/evo-python-sdk.md | 83 +++++++++++--- .../evo-blockmodels/Introduction.html | 13 ++- .../packages/evo-compute/Introduction.html | 19 ++-- mkdocs/site/packages/evo-python-sdk.html | 102 +++++++++++++----- 4 files changed, 159 insertions(+), 58 deletions(-) diff --git a/mkdocs/docs/packages/evo-python-sdk.md b/mkdocs/docs/packages/evo-python-sdk.md index 66357424..774de02a 100644 --- a/mkdocs/docs/packages/evo-python-sdk.md +++ b/mkdocs/docs/packages/evo-python-sdk.md @@ -2,6 +2,47 @@ [GitHub repository](https://github.com/SeequentEvo/evo-python-sdk) +`evo-python-sdk` is designed for developers, data scientists, geologists, and geostatisticians who want to work with Seequent Evo APIs and geoscience data. + +## Quick start for notebooks + +Once you have an Evo app registered and the SDK installed, you can load and work with geoscience objects in just a few lines of code: + +```python +# Authenticate with Evo +from evo.notebooks import ServiceManagerWidget + +manager = await ServiceManagerWidget.with_auth_code( + client_id="", + cache_location="./notebook-data", +).login() +``` + +```python +# Enable rich HTML display for Evo objects in Jupyter +%load_ext evo.widgets + +# Load an object by file path or UUID +from evo.objects.typed import object_from_uuid, object_from_path + +obj = await object_from_path(manager, "") + +# OR + +obj = await object_from_uuid(manager, "") +obj # Displays object info with links to Evo Portal and Viewer +``` + +```python +# Get data as a pandas DataFrame +df = await obj.to_dataframe() +df.head() +``` + +Typed objects like `PointSet`, `BlockModel`, and `Variogram` provide pretty-printed output in Jupyter with clickable links to view your data in Evo. As support for more geoscience objects is added, geologists and geostatisticians can interact with points, variograms, block models, grids, and more — all through intuitive Python classes. + +For a hands-on introduction, see the [simplified object interactions](https://github.com/SeequentEvo/evo-python-sdk/tree/main/code-samples/geoscience-objects/simplified-object-interactions/) notebook. For a complete geostatistical workflow including variogram modelling and kriging estimation, see the [running kriging compute](https://github.com/SeequentEvo/evo-python-sdk/tree/main/code-samples/geoscience-objects/running-kriging-compute/) notebook. + ## Getting started with Evo code samples For detailed information about creating Evo apps, the authentication setup, available code samples, and step-by-step guides for working with the Jupyter notebooks, please refer to the [**Quick start guide**](https://developer.seequent.com/docs/guides/getting-started/quick-start-guide), or [**code-samples**](https://github.com/SeequentEvo/evo-python-sdk/tree/main/code-samples) section of the repository. @@ -16,15 +57,15 @@ sub-packages and optional dependencies (e.g. Jupyter notebook support), or choos | Package | Version | Description | | --- | --- | --- | | evo-sdk | PyPI - Version | A metapackage that installs all available Seequent Evo SDKs, including Jupyter notebook examples. | -| evo-sdk-common ([discovery](evo-python-sdk/evo-sdk-common/discovery) and [workspaces](evo-python-sdk/evo-sdk-common/workspaces)) | PyPI - Version | A shared library that provides common functionality for integrating with Seequent's client SDKs. | -| [evo-files](evo-python-sdk/evo-files) | PyPI - Version | A service client for interacting with the Evo File API. | -| [evo-objects](evo-python-sdk/evo-objects) | PyPI - Version | A geoscience object service client library designed to help get up and running with the Geoscience Object API. | -| [evo-colormaps](evo-python-sdk/evo-colormaps) | PyPI - Version | A service client to create colour mappings and associate them to geoscience data with the Colormap API.| -| [evo-blockmodels](evo-python-sdk/evo-blockmodels) | PyPI - Version | The Block Model API provides the ability to manage and report on block models in your Evo workspaces. | -| [evo-widgets](evo-python-sdk/evo-widgets) | PyPI - Version | Widgets and presentation layer — rich HTML rendering of typed geoscience objects in Jupyter notebooks. | -| [evo-compute](evo-python-sdk/evo-compute) | PyPI - Version | A service client to send jobs to the Compute Tasks API.| +| evo-sdk-common ([discovery](evo-sdk-common/discovery/DiscoveryAPIClient.md) and [workspaces](evo-sdk-common/workspaces/WorkspaceAPIClient.md)) | PyPI - Version | A shared library that provides common functionality for integrating with Seequent's client SDKs. | +| evo-files ([api](evo-files/FileAPIClient.md)) | PyPI - Version | A service client for interacting with the Evo File API. | +| evo-objects ([introduction](evo-objects/Introduction.md), [typed objects](evo-objects/TypedObjects.md), [api](evo-objects/ObjectAPIClient.md)) | PyPI - Version | Typed Python classes and an API client for geoscience objects — points, grids, variograms, and more. | +| evo-colormaps ([api](evo-colormaps/ColormapAPIClient.md)) | PyPI - Version | A service client to create colour mappings and associate them to geoscience data with the Colormap API.| +| evo-blockmodels ([introduction](evo-blockmodels/Introduction.md), [typed objects](evo-blockmodels/TypedObjects.md), [api](evo-blockmodels/BlockModelAPIClient.md)) | PyPI - Version | Typed block model interactions, reports, and an API client for managing block models in Evo. | +| evo-widgets ([introduction](evo-widgets/Introduction.md)) | PyPI - Version | Widgets and presentation layer — rich HTML rendering of typed geoscience objects in Jupyter notebooks. | +| evo-compute ([introduction](evo-compute/Introduction.md), [typed objects](evo-compute/TypedObjects.md), [api](evo-compute/JobClient.md)) | PyPI - Version | Run compute tasks (e.g. kriging estimation) via the Compute Tasks API.| -### Getting started +### Getting started with SDK development Now that you have installed the Evo SDK, you can get started by configuring your API connector, and performing a basic API call to list the organizations that you have access to: @@ -53,13 +94,21 @@ async def discovery(): asyncio.run(main()) ``` -For next steps and more information about using Evo, see: +For next steps, start with the packages most relevant to your workflow: + +**Getting started — typed objects & visualisation:** + +* [`evo-objects`](evo-objects/Introduction.md): load and work with points, grids, variograms, and other geoscience objects as typed Python classes +* [`evo-blockmodels`](evo-blockmodels/Introduction.md): create, query, and report on block models with typed interactions +* [`evo-compute`](evo-compute/Introduction.md): run compute tasks such as kriging estimation +* [`evo-widgets`](evo-widgets/Introduction.md): rich HTML rendering of typed geoscience objects in Jupyter notebooks + +**API clients [For developers]:** -* `evo-sdk-common` ([`discovery`](evo-python-sdk/evo-sdk-common/discovery) and [`workspaces`](evo-python-sdk/evo-sdk-common/workspaces)): providing the foundation for all Evo SDKs, as well as tools - for performing arbitrary Seequent Evo API requests -* [`evo-files`](evo-python-sdk/evo-files): for interacting with the File API -* [`evo-objects`](evo-python-sdk/evo-objects): for interacting with the Geoscience Object API -* [`evo-colormaps`](evo-python-sdk/evo-colormaps): for interacting with the Colormap API -* [`evo-blockmodels`](evo-python-sdk/evo-blockmodels): for interacting with the Block Model API -* [`evo-widgets`](evo-python-sdk/evo-widgets): for rich HTML rendering of typed geoscience objects in Jupyter notebooks -* [`evo-compute`](evo-python-sdk/evo-compute): for interacting with the Compute Tasks API +* `evo-sdk-common` ([`discovery`](evo-sdk-common/discovery/DiscoveryAPIClient.md) and [`workspaces`](evo-sdk-common/workspaces/WorkspaceAPIClient.md)): foundation for all Evo SDKs, including arbitrary API requests +* [`evo-files`](evo-files/FileAPIClient.md): low-level File API client +* [`evo-objects` API](evo-objects/ObjectAPIClient.md): low-level Geoscience Object API client +* [`evo-colormaps`](evo-colormaps/ColormapAPIClient.md): Colormap API client +* [`evo-blockmodels` API](evo-blockmodels/BlockModelAPIClient.md): low-level Block Model API client +* [`evo-compute` API](evo-compute/JobClient.md): low-level Compute Tasks API client +* [Seequent Developer Portal](https://developer.seequent.com/docs/guides/getting-started/quick-start-guide): guides, tutorials, and API references diff --git a/mkdocs/site/packages/evo-blockmodels/Introduction.html b/mkdocs/site/packages/evo-blockmodels/Introduction.html index c9442068..a0ac6b15 100644 --- a/mkdocs/site/packages/evo-blockmodels/Introduction.html +++ b/mkdocs/site/packages/evo-blockmodels/Introduction.html @@ -83,20 +83,19 @@

evo-blockmodels

GitHub source

The evo-blockmodels package provides both a low-level API client and typed Python classes for working with block models in Evo.

-

!!! tip "Using block models from typed objects" - The full functionality of evo-blockmodels — creating, retrieving, updating attributes, running reports — is accessible directly from the BlockModel object in evo.objects.typed. When evo-blockmodels is installed, BlockModel acts as a proxy and delegates data operations to the Block Model Service automatically.

-
```python
-from evo.objects.typed import object_from_path
+
+

Using block models from typed objects

+

The full functionality of evo-blockmodels — creating, retrieving, updating attributes, running reports — is accessible directly from the BlockModel object in evo.objects.typed. When evo-blockmodels is installed, BlockModel acts as a proxy and delegates data operations to the Block Model Service automatically.

+
from evo.objects.typed import object_from_path
 
 # Load any block model — full evo-blockmodels functionality is available
 bm = await object_from_path(manager, "my-folder/block-model")
 df = await bm.to_dataframe()
 await bm.add_attribute(data_df, "new_col")
 report = await bm.create_report(spec)
-```
-
-See the [evo-objects Introduction](../evo-objects/Introduction.md#blockmodel-via-evo-blockmodels) for the full API.
 
+

See the evo-objects Introduction for the full API.

+

See the Typed Objects page for the full typed API reference.

Typed Block Models

The typed module provides intuitive classes for creating, retrieving, and updating regular block models with pandas DataFrame support.

diff --git a/mkdocs/site/packages/evo-compute/Introduction.html b/mkdocs/site/packages/evo-compute/Introduction.html index d5803d8d..36aec429 100644 --- a/mkdocs/site/packages/evo-compute/Introduction.html +++ b/mkdocs/site/packages/evo-compute/Introduction.html @@ -188,14 +188,19 @@

How do I run ), ], preview=True)

-

!!! tip - If each task writes to a different attribute name, they can all run in parallel without refreshing — the compute service handles concurrent attribute creation on the same target object. See the multiple kriging notebook for an example.

-

!!! note "Preview APIs" - Kriging and other compute tasks are currently preview features. You must pass preview=True when calling run(). +

+

Tip

+

If each task writes to a different attribute name, they can all run in parallel without refreshing — the compute service handles concurrent attribute creation on the same target object. See the multiple kriging notebook for an example.

+
+
+

Preview APIs

+

Kriging and other compute tasks are currently preview features. You must pass preview=True when calling run(). Preview APIs may change between releases. For more details, see:

-
- [Preview APIs](https://developer.seequent.com/docs/api/fundamentals/preview-apis) — how to opt in and what to expect
-- [API Lifecycle](https://developer.seequent.com/docs/api/fundamentals/lifecycle) — how Evo APIs evolve from preview to stable
-
+ + diff --git a/mkdocs/site/packages/evo-python-sdk.html b/mkdocs/site/packages/evo-python-sdk.html index 98d5a9e8..e1455b15 100644 --- a/mkdocs/site/packages/evo-python-sdk.html +++ b/mkdocs/site/packages/evo-python-sdk.html @@ -4,9 +4,9 @@ - - - + + + evo-python-sdk - Evo Python SDK @@ -19,7 +19,7 @@ - + @@ -41,7 +41,7 @@ @@ -59,12 +59,16 @@ - + @@ -232,3 +279,4 @@ + From ee0ad5482baed4fafe22d56c93ddc857f1b7b983 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 26 Feb 2026 20:39:28 +0000 Subject: [PATCH 05/10] Update to MkDocs documentation --- .../evo-blockmodels/Introduction.html | 13 ++++---- .../packages/evo-compute/Introduction.html | 19 +++++------- mkdocs/site/packages/evo-python-sdk.html | 31 +++++++------------ uv.lock | 4 +++ 4 files changed, 30 insertions(+), 37 deletions(-) diff --git a/mkdocs/site/packages/evo-blockmodels/Introduction.html b/mkdocs/site/packages/evo-blockmodels/Introduction.html index a0ac6b15..c9442068 100644 --- a/mkdocs/site/packages/evo-blockmodels/Introduction.html +++ b/mkdocs/site/packages/evo-blockmodels/Introduction.html @@ -83,19 +83,20 @@

evo-blockmodels

GitHub source

The evo-blockmodels package provides both a low-level API client and typed Python classes for working with block models in Evo.

-
-

Using block models from typed objects

-

The full functionality of evo-blockmodels — creating, retrieving, updating attributes, running reports — is accessible directly from the BlockModel object in evo.objects.typed. When evo-blockmodels is installed, BlockModel acts as a proxy and delegates data operations to the Block Model Service automatically.

-
from evo.objects.typed import object_from_path
+

!!! tip "Using block models from typed objects" + The full functionality of evo-blockmodels — creating, retrieving, updating attributes, running reports — is accessible directly from the BlockModel object in evo.objects.typed. When evo-blockmodels is installed, BlockModel acts as a proxy and delegates data operations to the Block Model Service automatically.

+
```python
+from evo.objects.typed import object_from_path
 
 # Load any block model — full evo-blockmodels functionality is available
 bm = await object_from_path(manager, "my-folder/block-model")
 df = await bm.to_dataframe()
 await bm.add_attribute(data_df, "new_col")
 report = await bm.create_report(spec)
+```
+
+See the [evo-objects Introduction](../evo-objects/Introduction.md#blockmodel-via-evo-blockmodels) for the full API.
 
-

See the evo-objects Introduction for the full API.

-

See the Typed Objects page for the full typed API reference.

Typed Block Models

The typed module provides intuitive classes for creating, retrieving, and updating regular block models with pandas DataFrame support.

diff --git a/mkdocs/site/packages/evo-compute/Introduction.html b/mkdocs/site/packages/evo-compute/Introduction.html index 36aec429..d5803d8d 100644 --- a/mkdocs/site/packages/evo-compute/Introduction.html +++ b/mkdocs/site/packages/evo-compute/Introduction.html @@ -188,19 +188,14 @@

How do I run ), ], preview=True) -
-

Tip

-

If each task writes to a different attribute name, they can all run in parallel without refreshing — the compute service handles concurrent attribute creation on the same target object. See the multiple kriging notebook for an example.

-
-
-

Preview APIs

-

Kriging and other compute tasks are currently preview features. You must pass preview=True when calling run(). +

!!! tip + If each task writes to a different attribute name, they can all run in parallel without refreshing — the compute service handles concurrent attribute creation on the same target object. See the multiple kriging notebook for an example.

+

!!! note "Preview APIs" + Kriging and other compute tasks are currently preview features. You must pass preview=True when calling run(). Preview APIs may change between releases. For more details, see:

- -
+
- [Preview APIs](https://developer.seequent.com/docs/api/fundamentals/preview-apis) — how to opt in and what to expect
+- [API Lifecycle](https://developer.seequent.com/docs/api/fundamentals/lifecycle) — how Evo APIs evolve from preview to stable
+
diff --git a/mkdocs/site/packages/evo-python-sdk.html b/mkdocs/site/packages/evo-python-sdk.html index e1455b15..8da6562b 100644 --- a/mkdocs/site/packages/evo-python-sdk.html +++ b/mkdocs/site/packages/evo-python-sdk.html @@ -4,9 +4,9 @@ - - - + + + evo-python-sdk - Evo Python SDK @@ -19,7 +19,7 @@ - + @@ -41,7 +41,7 @@ @@ -59,10 +59,10 @@ - + @@ -88,38 +92,6 @@

evo-python-sdk

GitHub repository

evo-python-sdk is designed for developers, data scientists, geologists, and geostatisticians who want to work with Seequent Evo APIs and geoscience data.

-

Quick start for notebooks

-

Once you have an Evo app registered and the SDK installed, you can load and work with geoscience objects in just a few lines of code:

-
# Authenticate with Evo
-from evo.notebooks import ServiceManagerWidget
-
-manager = await ServiceManagerWidget.with_auth_code(
-    client_id="<your-client-id>",
-    cache_location="./notebook-data",
-).login()
-
- -
# Enable rich HTML display for Evo objects in Jupyter
-%load_ext evo.widgets
-
-# Load an object by file path or UUID
-from evo.objects.typed import object_from_uuid, object_from_path
-
-obj = await object_from_path(manager, "<your-object-path>")
-
-# OR
-
-obj = await object_from_uuid(manager, "<your-object-uuid>")
-obj  # Displays object info with links to Evo Portal and Viewer
-
- -
# Get data as a pandas DataFrame
-df = await obj.to_dataframe()
-df.head()
-
- -

Typed objects like PointSet, BlockModel, and Variogram provide pretty-printed output in Jupyter with clickable links to view your data in Evo. As support for more geoscience objects is added, geologists and geostatisticians can interact with points, variograms, block models, grids, and more — all through intuitive Python classes.

-

For a hands-on introduction, see the simplified object interactions notebook. For a complete geostatistical workflow including variogram modelling and kriging estimation, see the running kriging compute notebook.

Getting started with Evo code samples

For detailed information about creating Evo apps, the authentication setup, available code samples, and step-by-step guides for working with the Jupyter notebooks, please refer to the Quick start guide, or code-samples section of the repository.

@@ -179,6 +151,40 @@

Getting started with the Evo SDK

+ +

Quick start for notebooks

+

Once you have an Evo app registered and the SDK installed, you can load and work with geoscience objects in just a few lines of code:

+
# Authenticate with Evo
+from evo.notebooks import ServiceManagerWidget
+
+manager = await ServiceManagerWidget.with_auth_code(
+    client_id="<your-client-id>",
+    cache_location="./notebook-data",
+).login()
+
+ +
# Enable rich HTML display for Evo objects in Jupyter
+%load_ext evo.widgets
+
+# Load an object by file path or UUID
+from evo.objects.typed import object_from_uuid, object_from_path
+
+obj = await object_from_path(manager, "<your-object-path>")
+
+# OR
+
+obj = await object_from_uuid(manager, "<your-object-uuid>")
+obj  # Displays object info with links to Evo Portal and Viewer
+
+ +
# Get data as a pandas DataFrame
+df = await obj.to_dataframe()
+df.head()
+
+ +

Typed objects like PointSet, BlockModel, and Variogram provide pretty-printed output in Jupyter with clickable links to view your data in Evo. As support for more geoscience objects is added, geologists and geostatisticians can interact with points, variograms, block models, grids, and more — all through intuitive Python classes.

+

For a hands-on introduction, see the simplified object interactions notebook. For a complete geostatistical workflow including variogram modelling and kriging estimation, see the running kriging compute notebook.

+

Getting started with SDK development

Now that you have installed the Evo SDK, you can get started by configuring your API connector, and performing a basic API call to list the organizations that you have access to:

From 0e93bf9344ea1179f3d61c310fd75753d7a3ad41 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 26 Feb 2026 20:46:02 +0000 Subject: [PATCH 07/10] Update to MkDocs documentation --- mkdocs/site/packages/evo-python-sdk.html | 41 +++++++----------------- 1 file changed, 12 insertions(+), 29 deletions(-) diff --git a/mkdocs/site/packages/evo-python-sdk.html b/mkdocs/site/packages/evo-python-sdk.html index d435ad38..724de352 100644 --- a/mkdocs/site/packages/evo-python-sdk.html +++ b/mkdocs/site/packages/evo-python-sdk.html @@ -4,9 +4,9 @@ - - - + + + evo-python-sdk - Evo Python SDK @@ -19,7 +19,7 @@ - + @@ -41,7 +41,7 @@ @@ -59,10 +59,10 @@ - +