diff --git a/mkdocs/site/packages/evo-widgets/Introduction.html b/mkdocs/site/packages/evo-widgets/Introduction.html
new file mode 100644
index 00000000..dd82a8be
--- /dev/null
+++ b/mkdocs/site/packages/evo-widgets/Introduction.html
@@ -0,0 +1,189 @@
+
+
+
+
+
+
+
+
+
+
+
evo-widgets - Evo Python SDK
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
GitHub source
+
Widgets and presentation layer for the Evo Python SDK — HTML rendering, URL generation, and IPython formatters for Jupyter notebooks.
+
Usage
+
Load the IPython extension in your notebook to enable rich HTML rendering for all Evo SDK typed objects:
+
%load_ext evo.widgets
+
+
After loading, typed objects like PointSet, Regular3DGrid, TensorGrid, and BlockModel will automatically render with formatted metadata tables, clickable Portal/Viewer links, and bounding box information.
+
URL Functions
+
Generate URLs to view objects in the Evo Portal and Viewer:
+
from evo.widgets import (
+ get_portal_url_for_object,
+ get_viewer_url_for_object,
+ get_viewer_url_for_objects,
+)
+
+# Get Portal URL for a single object
+portal_url = get_portal_url_for_object(grid)
+
+# Get Viewer URL for a single object
+viewer_url = get_viewer_url_for_object(grid)
+
+# View multiple objects together in the Viewer
+url = get_viewer_url_for_objects(manager, [grid, pointset, tensor_grid])
+
+
+
Rich HTML representations for all typed geoscience objects:
+
+PointSet, Regular3DGrid, TensorGrid, BlockModel
+Variogram
+Attributes collections
+Report and ReportResult
+TaskResult and TaskResults (compute results)
+
+
All formatters are registered automatically when you load the extension with %load_ext evo.widgets. They support light/dark mode via Jupyter theme CSS variables.
+
How It Works
+
When you run %load_ext evo.widgets, the extension registers HTML formatters with IPython using for_type_by_name. This approach:
+
+Avoids hard dependencies — The widgets package doesn't import model classes directly
+Works with all typed objects — Formatters are registered for the base class, so all subclasses are covered
+Lazy loading — Formatters only activate when the relevant types are actually used
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Keys
+ Action
+
+
+
+
+ ?
+ Open this help
+
+
+ n
+ Next page
+
+
+ p
+ Previous page
+
+
+ s
+ Search
+
+
+
+
+
+
+
+
+
+
+
diff --git a/mkdocs/typed_objects.txt b/mkdocs/typed_objects.txt
new file mode 100644
index 00000000..e32a773e
--- /dev/null
+++ b/mkdocs/typed_objects.txt
@@ -0,0 +1,39 @@
+packages.evo-objects.src.evo.objects.typed.base.object_from_path
+packages.evo-objects.src.evo.objects.typed.base.object_from_uuid
+packages.evo-objects.src.evo.objects.typed.base.object_from_reference
+packages.evo-objects.src.evo.objects.typed.pointset.PointSet
+packages.evo-objects.src.evo.objects.typed.pointset.PointSetData
+packages.evo-objects.src.evo.objects.typed.regular_grid.Regular3DGrid
+packages.evo-objects.src.evo.objects.typed.regular_grid.Regular3DGridData
+packages.evo-objects.src.evo.objects.typed.regular_masked_grid.RegularMasked3DGrid
+packages.evo-objects.src.evo.objects.typed.regular_masked_grid.RegularMasked3DGridData
+packages.evo-objects.src.evo.objects.typed.tensor_grid.Tensor3DGrid
+packages.evo-objects.src.evo.objects.typed.tensor_grid.Tensor3DGridData
+packages.evo-objects.src.evo.objects.typed.variogram.Variogram
+packages.evo-objects.src.evo.objects.typed.variogram.VariogramData
+packages.evo-objects.src.evo.objects.typed.block_model_ref.BlockModel
+packages.evo-objects.src.evo.objects.typed.attributes.Attributes
+packages.evo-objects.src.evo.objects.typed.attributes.Attribute
+packages.evo-objects.src.evo.objects.typed.types.BoundingBox
+packages.evo-blockmodels.src.evo.blockmodels.typed.regular_block_model.RegularBlockModel
+packages.evo-blockmodels.src.evo.blockmodels.typed.regular_block_model.RegularBlockModelData
+packages.evo-blockmodels.src.evo.blockmodels.typed.report.Report
+packages.evo-blockmodels.src.evo.blockmodels.typed.report.ReportSpecificationData
+packages.evo-blockmodels.src.evo.blockmodels.typed.report.ReportResult
+packages.evo-blockmodels.src.evo.blockmodels.typed.report.ReportColumnSpec
+packages.evo-blockmodels.src.evo.blockmodels.typed.report.ReportCategorySpec
+packages.evo-blockmodels.src.evo.blockmodels.typed.report.Aggregation
+packages.evo-blockmodels.src.evo.blockmodels.typed.units.Units
+packages.evo-blockmodels.src.evo.blockmodels.typed.units.UnitInfo
+packages.evo-compute.src.evo.compute.tasks.kriging.KrigingParameters
+packages.evo-compute.src.evo.compute.tasks.kriging.SimpleKriging
+packages.evo-compute.src.evo.compute.tasks.kriging.OrdinaryKriging
+packages.evo-compute.src.evo.compute.tasks.kriging.BlockDiscretisation
+packages.evo-compute.src.evo.compute.tasks.kriging.RegionFilter
+packages.evo-compute.src.evo.compute.tasks.kriging.KrigingResult
+packages.evo-compute.src.evo.compute.tasks.common.results.TaskResult
+packages.evo-compute.src.evo.compute.tasks.common.results.TaskResults
+packages.evo-compute.src.evo.compute.tasks.common.search.SearchNeighborhood
+packages.evo-compute.src.evo.compute.tasks.common.source_target.Source
+packages.evo-compute.src.evo.compute.tasks.common.source_target.Target
+
diff --git a/packages/evo-compute/docs/examples/kriging.ipynb b/packages/evo-compute/docs/examples/kriging.ipynb
new file mode 100644
index 00000000..83b4f482
--- /dev/null
+++ b/packages/evo-compute/docs/examples/kriging.ipynb
@@ -0,0 +1,585 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "3fd8937c5ca952ad",
+ "metadata": {},
+ "source": [
+ "# Kriging Compute Task\n",
+ "\n",
+ "This notebook demonstrates how to run kriging compute tasks using the `evo-compute` package.\n",
+ "\n",
+ "Kriging is a geostatistical interpolation technique that estimates values at unsampled locations\n",
+ "using weighted averages of nearby known values, based on a variogram model of spatial correlation."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "16a20624c61f0a8c",
+ "metadata": {},
+ "source": [
+ "## Authentication\n",
+ "\n",
+ "First, authenticate using the `ServiceManagerWidget`:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "id": "35b04526197d68a9",
+ "metadata": {},
+ "source": [
+ "from evo.notebooks import ServiceManagerWidget\n",
+ "\n",
+ "manager = await ServiceManagerWidget.with_auth_code(\n",
+ " client_id=\"your-client-id\", cache_location=\"./notebook-data\"\n",
+ ").login()"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "metadata": {},
+ "cell_type": "code",
+ "source": [
+ "# Load the widgets extension for rich HTML display\n",
+ "%load_ext evo.widgets"
+ ],
+ "id": "4877a0a895adf9d1",
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "markdown",
+ "id": "f371e3b7f89aff3f",
+ "metadata": {},
+ "source": [
+ "## Example 1: Run Kriging on Existing Objects\n",
+ "\n",
+ "This example shows how to run kriging using existing geoscience objects (source pointset, target grid, and variogram)."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "7ab0cc0f0b9d79ed",
+ "metadata": {},
+ "source": "### Load the Source PointSet, Target Grid, and Variogram"
+ },
+ {
+ "cell_type": "code",
+ "id": "fe9afe5aac33694b",
+ "metadata": {},
+ "source": [
+ "from evo.objects.typed import object_from_uuid\n",
+ "\n",
+ "# Load objects by UUID (replace with your actual UUIDs)\n",
+ "source_pointset = await object_from_uuid(manager, \"9100d7dc-44e9-4e61-b427-159635dea22f\")\n",
+ "# Alternative: load by path\n",
+ "# source_pointset = await object_from_path(manager, \"path/to/pointset.json\")\n",
+ "\n",
+ "target_grid = await object_from_uuid(manager, \"df9c3705-c82e-4f57-af94-b3346b5d58cf\")\n",
+ "# Alternative: load by path\n",
+ "# target_grid = await object_from_path(manager, \"path/to/grid.json\")\n",
+ "\n",
+ "variogram = await object_from_uuid(manager, \"72cd9b83-90f4-4cb0-9691-95728e3f9cbb\")\n",
+ "# Alternative: load by path\n",
+ "# variogram = await object_from_path(manager, \"path/to/variogram.json\")"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "code",
+ "id": "3aeeda68814e89b9",
+ "metadata": {},
+ "source": [
+ "# Pretty-print the source pointset (includes Portal/Viewer links)\n",
+ "source_pointset"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "code",
+ "id": "ed2f2f59ebe818e5",
+ "metadata": {},
+ "source": [
+ "# View the source pointset attributes\n",
+ "source_pointset.attributes"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "code",
+ "id": "c35f77a13fd03c22",
+ "metadata": {},
+ "source": [
+ "# Pretty-print the target grid\n",
+ "target_grid"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "code",
+ "id": "887e895070f8c74d",
+ "metadata": {},
+ "source": [
+ "# Pretty-print the variogram\n",
+ "variogram"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9700289296c85afb",
+ "metadata": {},
+ "source": "### Run Kriging Task"
+ },
+ {
+ "cell_type": "code",
+ "id": "49a9cec09a87b702",
+ "metadata": {},
+ "source": [
+ "from evo.compute.tasks import SearchNeighborhood, run\n",
+ "from evo.compute.tasks.kriging import KrigingParameters\n",
+ "\n",
+ "# Get ellipsoid from the variogram structure with largest range (default)\n",
+ "var_ell = variogram.get_ellipsoid()\n",
+ "\n",
+ "# Create search ellipsoid by scaling the variogram ellipsoid by 2x\n",
+ "search_ellipsoid = var_ell.scaled(2.0)"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "code",
+ "id": "332fd7c8e06a7fd6",
+ "metadata": {},
+ "source": [
+ "# Visualize variogram and search ellipsoids with pointset data\n",
+ "import plotly.graph_objects as go\n",
+ "\n",
+ "# Get pointset data for center calculation and scatter plot\n",
+ "pts = await source_pointset.to_dataframe()\n",
+ "center = (pts[\"x\"].mean(), pts[\"y\"].mean(), pts[\"z\"].mean())\n",
+ "\n",
+ "# Generate mesh surface points for visualization\n",
+ "vx, vy, vz = var_ell.surface_points(center=center)\n",
+ "sx, sy, sz = search_ellipsoid.surface_points(center=center)\n",
+ "\n",
+ "# Build visualization\n",
+ "var_mesh = go.Mesh3d(x=vx, y=vy, z=vz, alphahull=0, opacity=0.3, color=\"blue\", name=\"Variogram Ellipsoid\")\n",
+ "search_mesh = go.Mesh3d(x=sx, y=sy, z=sz, alphahull=0, opacity=0.2, color=\"gold\", name=\"Search Ellipsoid (2x)\")\n",
+ "scatter = go.Scatter3d(\n",
+ " x=pts[\"x\"],\n",
+ " y=pts[\"y\"],\n",
+ " z=pts[\"z\"],\n",
+ " mode=\"markers\",\n",
+ " marker=dict(size=2, color=pts[\"Ag_ppm Values\"], colorscale=\"Viridis\", showscale=True),\n",
+ " name=\"Sample Points\",\n",
+ ")\n",
+ "\n",
+ "fig = go.Figure(data=[var_mesh, search_mesh, scatter])\n",
+ "fig.update_layout(title=\"Kriging Inputs: Variogram & Search Ellipsoids\", scene=dict(aspectmode=\"data\"), showlegend=True)\n",
+ "fig.show()"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "code",
+ "id": "7f375586017fb524",
+ "metadata": {},
+ "source": [
+ "# Create kriging parameters\n",
+ "# Note: method defaults to ordinary kriging, so we don't need to specify it\n",
+ "kriging_params = KrigingParameters(\n",
+ " source=source_pointset.attributes[\"Ag_ppm Values\"],\n",
+ " target=target_grid.attributes[\"kriged_grade 5\"],\n",
+ " variogram=variogram,\n",
+ " search=SearchNeighborhood(\n",
+ " ellipsoid=search_ellipsoid,\n",
+ " max_samples=20,\n",
+ " ),\n",
+ ")\n",
+ "\n",
+ "# Run the kriging task (progress feedback is shown by default)\n",
+ "print(\"Submitting kriging task...\")\n",
+ "result = await run(manager, kriging_params, preview=True)\n",
+ "\n",
+ "# Display the kriging result (pretty-printed)\n",
+ "result"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "code",
+ "id": "e984ded1ca23d299",
+ "metadata": {},
+ "source": [
+ "# Get the data directly as a DataFrame (simplest approach)\n",
+ "df = await result.to_dataframe()\n",
+ "print(f\"Retrieved {len(df)} rows\")\n",
+ "df.head()"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "code",
+ "id": "9ab8a80fe9a8ef02",
+ "metadata": {},
+ "source": [
+ "# Or load the target object for more control\n",
+ "target_grid = await result.get_target_object()\n",
+ "\n",
+ "# Pretty-print the updated target grid\n",
+ "target_grid"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "code",
+ "id": "c2ad59a5687ef8e0",
+ "metadata": {},
+ "source": [
+ "# Or get the data as a DataFrame directly\n",
+ "df = await result.to_dataframe()\n",
+ "print(f\"Retrieved {len(df)} cells\")\n",
+ "df.head()"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "markdown",
+ "id": "8a4093575dfd0d7e",
+ "metadata": {},
+ "source": [
+ "## Example 2: Create Objects and Run Kriging\n",
+ "\n",
+ "This example shows how to create the input objects from scratch and then run kriging."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "fa713815e4d6dabe",
+ "metadata": {},
+ "source": "### Create the Source PointSet"
+ },
+ {
+ "cell_type": "code",
+ "id": "3b8e81ed282ffd14",
+ "metadata": {},
+ "source": [
+ "import uuid\n",
+ "\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "from evo.objects.typed import EpsgCode, PointSet, PointSetData\n",
+ "\n",
+ "# Generate sample point data\n",
+ "n_points = 100\n",
+ "np.random.seed(42)\n",
+ "\n",
+ "# Create points in a 1000x1000x100 domain\n",
+ "x = np.random.uniform(0, 1000, n_points)\n",
+ "y = np.random.uniform(0, 1000, n_points)\n",
+ "z = np.random.uniform(0, 100, n_points)\n",
+ "\n",
+ "# Create an elevation attribute (z + some noise)\n",
+ "elevation = z + np.random.normal(0, 5, n_points)\n",
+ "\n",
+ "# Create pointset using PointSetData\n",
+ "pointset_data = PointSetData(\n",
+ " name=f\"Sample Source Points - {uuid.uuid4()}\",\n",
+ " coordinate_reference_system=EpsgCode(32632),\n",
+ " locations=pd.DataFrame({\"x\": x, \"y\": y, \"z\": z, \"elevation\": elevation}),\n",
+ ")\n",
+ "\n",
+ "# Create the pointset object\n",
+ "source_pointset_created = await PointSet.create(manager, pointset_data)\n",
+ "\n",
+ "print(f\"Created source pointset: {source_pointset_created.name}\")"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "code",
+ "id": "1c6e9dad1ca4c4c5",
+ "metadata": {},
+ "source": [
+ "# Pretty-print the created pointset (includes Portal/Viewer links)\n",
+ "source_pointset_created"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "code",
+ "id": "ba4715009b908d2e",
+ "metadata": {},
+ "source": [
+ "# View the created pointset attributes\n",
+ "source_pointset_created.attributes"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "markdown",
+ "id": "a29c16aecbcf54ab",
+ "metadata": {},
+ "source": [
+ "### Create a Variogram\n",
+ "\n",
+ "Create a variogram using the typed `Variogram` class."
+ ]
+ },
+ {
+ "metadata": {},
+ "cell_type": "code",
+ "outputs": [],
+ "execution_count": null,
+ "source": [
+ "from evo.objects.typed import (\n",
+ " Ellipsoid,\n",
+ " EllipsoidRanges,\n",
+ " Rotation,\n",
+ " SphericalStructure,\n",
+ " Variogram,\n",
+ " VariogramData,\n",
+ ")\n",
+ "\n",
+ "# Define a spherical variogram model using typed classes\n",
+ "variogram_data = VariogramData(\n",
+ " name=f\"Sample Variogram - {uuid.uuid4()}\",\n",
+ " sill=1.0,\n",
+ " nugget=0.1,\n",
+ " is_rotation_fixed=True,\n",
+ " modelling_space=\"data\", # Required for kriging\n",
+ " data_variance=1.0, # Should match sill for non-normalized data\n",
+ " structures=[\n",
+ " SphericalStructure(\n",
+ " contribution=0.9,\n",
+ " anisotropy=Ellipsoid(\n",
+ " ranges=EllipsoidRanges(major=200.0, semi_major=200.0, minor=100.0),\n",
+ " rotation=Rotation(dip_azimuth=10.0, dip=20.0, pitch=30.0),\n",
+ " ),\n",
+ " )\n",
+ " ],\n",
+ " attribute=\"elevation\",\n",
+ " domain=\"all\",\n",
+ ")\n",
+ "\n",
+ "variogram_created = await Variogram.create(manager, variogram_data)\n",
+ "\n",
+ "print(f\"Created variogram: {variogram_created.name}\")"
+ ],
+ "id": "cd6189c97bbbf5ac"
+ },
+ {
+ "metadata": {},
+ "cell_type": "code",
+ "outputs": [],
+ "execution_count": null,
+ "source": [
+ "# Pretty-print the created variogram\n",
+ "variogram_created"
+ ],
+ "id": "449afd3af83c5914"
+ },
+ {
+ "cell_type": "markdown",
+ "id": "401d111e4247a9bd",
+ "metadata": {},
+ "source": [
+ "### Visualize the Variogram\n",
+ "\n",
+ "Use the variogram visualization tool to inspect the directional variogram curves and anisotropy ellipsoids.\n",
+ "This helps verify that the variogram parameters are correct before running kriging."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "id": "151352364fe3b6ff",
+ "metadata": {},
+ "source": [
+ "# Visualize variogram and search ellipsoids with pointset data\n",
+ "import plotly.graph_objects as go\n",
+ "\n",
+ "# Get pointset data for center calculation and scatter plot\n",
+ "pts = await source_pointset_created.to_dataframe()\n",
+ "center = (pts[\"x\"].mean(), pts[\"y\"].mean(), pts[\"z\"].mean())\n",
+ "\n",
+ "# Create ellipsoid from variogram (first structure)\n",
+ "var_ell = variogram_created.get_ellipsoid()\n",
+ "vx, vy, vz = var_ell.surface_points(center=center)\n",
+ "\n",
+ "# Create search ellipsoid scaled by 2x\n",
+ "search_ell = var_ell.scaled(2.0)\n",
+ "sx, sy, sz = search_ell.surface_points(center=center)\n",
+ "\n",
+ "# Build visualization\n",
+ "var_mesh = go.Mesh3d(x=vx, y=vy, z=vz, alphahull=0, opacity=0.3, color=\"blue\", name=\"Variogram Ellipsoid\")\n",
+ "search_mesh = go.Mesh3d(x=sx, y=sy, z=sz, alphahull=0, opacity=0.2, color=\"gold\", name=\"Search Ellipsoid (2x)\")\n",
+ "scatter = go.Scatter3d(\n",
+ " x=pts[\"x\"],\n",
+ " y=pts[\"y\"],\n",
+ " z=pts[\"z\"],\n",
+ " mode=\"markers\",\n",
+ " marker=dict(size=2, color=pts[\"elevation\"], colorscale=\"Viridis\", showscale=True),\n",
+ " name=\"Sample Points\",\n",
+ ")\n",
+ "\n",
+ "fig = go.Figure(data=[var_mesh, search_mesh, scatter])\n",
+ "fig.update_layout(title=\"Kriging Inputs: Variogram & Search Ellipsoids\", scene=dict(aspectmode=\"data\"), showlegend=True)\n",
+ "fig.show()"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "markdown",
+ "id": "2223118aa75b50bb",
+ "metadata": {},
+ "source": "### Create the Target Grid"
+ },
+ {
+ "cell_type": "code",
+ "id": "66f708dfcd8c3f6f",
+ "metadata": {},
+ "source": [
+ "from evo.objects.typed import Point3, RegularMasked3DGrid, RegularMasked3DGridData, Size3d, Size3i\n",
+ "from evo.objects.typed import Rotation as GridRotation\n",
+ "\n",
+ "# Define grid dimensions\n",
+ "nx, ny, nz = 20, 20, 10 # Number of cells in each direction\n",
+ "cell_size = 50.0 # Size of each cell\n",
+ "\n",
+ "# Create a mask for the grid (all cells active in this example)\n",
+ "total_cells = nx * ny * nz\n",
+ "mask = np.ones(total_cells, dtype=bool)\n",
+ "\n",
+ "# Optionally, mask out some cells to create a more interesting shape\n",
+ "for zi in range(nz // 2):\n",
+ " for yi in range(ny // 2):\n",
+ " for xi in range(nx // 2):\n",
+ " idx = xi + yi * nx + zi * nx * ny\n",
+ " mask[idx] = False\n",
+ "\n",
+ "# Create masked grid using RegularMasked3DGridData\n",
+ "grid_data = RegularMasked3DGridData(\n",
+ " name=f\"Target Masked Grid - {uuid.uuid4()}\",\n",
+ " coordinate_reference_system=EpsgCode(32632),\n",
+ " origin=Point3(0, 0, 0),\n",
+ " size=Size3i(nx, ny, nz),\n",
+ " cell_size=Size3d(cell_size, cell_size, cell_size),\n",
+ " rotation=GridRotation(0, 0, 0),\n",
+ " mask=mask,\n",
+ " cell_data=None, # No attributes yet, kriging will add them\n",
+ ")\n",
+ "\n",
+ "# Create the grid object\n",
+ "target_grid_created = await RegularMasked3DGrid.create(manager, grid_data)\n",
+ "\n",
+ "print(f\"Created target grid: {target_grid_created.name}\")\n",
+ "print(f\" Total cells: {nx} x {ny} x {nz} = {total_cells}\")\n",
+ "print(f\" Active cells: {int(mask.sum())}\")\n",
+ "print(f\" Bounding box: {target_grid_created.bounding_box}\")"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "code",
+ "id": "64fec5ce612b7d75",
+ "metadata": {},
+ "source": [
+ "# Pretty-print the created grid (includes Portal/Viewer links)\n",
+ "target_grid_created"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "markdown",
+ "id": "3c9f833f2804b56e",
+ "metadata": {},
+ "source": "### Run Kriging on Created Objects"
+ },
+ {
+ "metadata": {},
+ "cell_type": "code",
+ "outputs": [],
+ "execution_count": null,
+ "source": [
+ "from evo.objects.typed import Ellipsoid, EllipsoidRanges, Rotation\n",
+ "\n",
+ "from evo.compute.tasks import SearchNeighborhood, Target, run\n",
+ "from evo.compute.tasks.kriging import KrigingParameters\n",
+ "\n",
+ "# Create kriging parameters using typed Attribute access\n",
+ "# source_pointset_created.locations.attributes[\"elevation\"] gives us an Attribute object\n",
+ "kriging_params = KrigingParameters(\n",
+ " source=source_pointset_created.locations.attributes[\"elevation\"],\n",
+ " target=Target.new_attribute(target_grid_created, attribute_name=\"kriged_elevation2\"),\n",
+ " variogram=variogram_created,\n",
+ " search=SearchNeighborhood(\n",
+ " ellipsoid=Ellipsoid(\n",
+ " ranges=EllipsoidRanges(major=134.0, semi_major=90.0, minor=40.0),\n",
+ " rotation=Rotation(dip_azimuth=100.0, dip=65.0, pitch=75.0),\n",
+ " ),\n",
+ " max_samples=20,\n",
+ " ),\n",
+ ")\n",
+ "\n",
+ "# Run the kriging task\n",
+ "print(\"Submitting kriging task...\")\n",
+ "result = await run(manager, kriging_params, preview=True)\n",
+ "\n",
+ "print(\"Task completed!\")"
+ ],
+ "id": "4d085c0c9c1f30e4"
+ },
+ {
+ "metadata": {},
+ "cell_type": "code",
+ "outputs": [],
+ "execution_count": null,
+ "source": [
+ "# Display the kriging result (pretty-printed)\n",
+ "result"
+ ],
+ "id": "acba8b49cfbcc650"
+ },
+ {
+ "cell_type": "code",
+ "id": "b23eaf4887cd4de1",
+ "metadata": {},
+ "source": [
+ "# Get the data directly as a DataFrame\n",
+ "df = await result.to_dataframe()\n",
+ "print(f\"Retrieved {len(df)} cells with kriged values\")\n",
+ "df.head()"
+ ],
+ "outputs": [],
+ "execution_count": null
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/packages/evo-compute/docs/examples/kriging_multiple.ipynb b/packages/evo-compute/docs/examples/kriging_multiple.ipynb
new file mode 100644
index 00000000..c73e9f28
--- /dev/null
+++ b/packages/evo-compute/docs/examples/kriging_multiple.ipynb
@@ -0,0 +1,755 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "0",
+ "metadata": {},
+ "source": [
+ "# Running Multiple Kriging Tasks\n",
+ "\n",
+ "This notebook demonstrates how to run multiple kriging compute tasks concurrently\n",
+ "using the `run` function with a list of parameters. This is useful for:\n",
+ "\n",
+ "- Scenario analysis with different parameters\n",
+ "- Sensitivity studies varying neighborhood settings\n",
+ "- Batch processing multiple attributes\n",
+ "\n",
+ "All kriging results are stored as attributes in a single Block Model."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "1",
+ "metadata": {},
+ "source": [
+ "## Authentication"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "id": "2",
+ "metadata": {},
+ "source": [
+ "from evo.notebooks import ServiceManagerWidget\n",
+ "\n",
+ "manager = await ServiceManagerWidget.with_auth_code(\n",
+ " client_id=\"your-client-id\", cache_location=\"./notebook-data\"\n",
+ ").login()"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "code",
+ "id": "3",
+ "metadata": {},
+ "source": [
+ "# Load the widgets extension for rich HTML display\n",
+ "%load_ext evo.widgets"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "markdown",
+ "id": "4",
+ "metadata": {},
+ "source": [
+ "## Load Source PointSet and Variogram\n",
+ "\n",
+ "Load the source pointset and variogram using `object_from_uuid`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "id": "5",
+ "metadata": {},
+ "source": [
+ "from evo.objects.typed import object_from_uuid\n",
+ "\n",
+ "# Load by UUID (replace with your actual UUIDs)\n",
+ "source_pointset = await object_from_uuid(manager, \"9100d7dc-44e9-4e61-b427-159635dea22f\")\n",
+ "# Alternative: load by path\n",
+ "# source_pointset = await object_from_path(manager, \"path/to/pointset.json\")\n",
+ "\n",
+ "# Display the pointset (pretty-printed in Jupyter)\n",
+ "source_pointset"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "code",
+ "id": "6",
+ "metadata": {},
+ "source": [
+ "# View the pointset attributes\n",
+ "source_pointset.attributes"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "code",
+ "id": "7",
+ "metadata": {},
+ "source": [
+ "# Load the variogram\n",
+ "variogram = await object_from_uuid(manager, \"72cd9b83-90f4-4cb0-9691-95728e3f9cbb\")\n",
+ "\n",
+ "# Alternative: load by path\n",
+ "# variogram = await object_from_path(manager, \"path/to/variogram.json\")\n",
+ "\n",
+ "# Display the variogram (pretty-printed in Jupyter)\n",
+ "variogram"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "code",
+ "id": "8",
+ "metadata": {},
+ "source": [
+ "print(f\"Source: {source_pointset.name}\")\n",
+ "print(f\"Variogram: {variogram.name}\")"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9",
+ "metadata": {},
+ "source": [
+ "## Create Target Block Model\n",
+ "\n",
+ "Create a single Block Model to hold all scenario results as attributes.\n",
+ "The Block Model Service manages concurrent attribute creation."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "id": "10",
+ "metadata": {},
+ "source": [
+ "import uuid\n",
+ "\n",
+ "from evo.blockmodels.typed import Units\n",
+ "from evo.objects.typed import BlockModel, Point3, RegularBlockModelData, Size3d, Size3i\n",
+ "\n",
+ "run_uuid = uuid.uuid4()\n",
+ "\n",
+ "# Create a Block Model to hold all scenario results\n",
+ "# Adjust origin, n_blocks, and block_size to match your data domain\n",
+ "bm_data = RegularBlockModelData(\n",
+ " name=f\"Kriging Scenarios - {run_uuid}\",\n",
+ " description=\"Block model with kriging results for different max_samples scenarios\",\n",
+ " origin=Point3(x=10000, y=100000, z=200),\n",
+ " n_blocks=Size3i(nx=40, ny=40, nz=40),\n",
+ " block_size=Size3d(dx=25.0, dy=25.0, dz=10.0),\n",
+ " coordinate_reference_system=\"EPSG:32632\",\n",
+ " size_unit_id=Units.METRES,\n",
+ ")\n",
+ "\n",
+ "block_model = await BlockModel.create_regular(manager, bm_data)\n",
+ "\n",
+ "print(f\"Created Block Model: {block_model.name}\")\n",
+ "print(f\"Block Model UUID: {block_model.block_model_uuid}\")\n",
+ "print(f\"Bounding Box: {block_model.bounding_box}\")"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "code",
+ "id": "11",
+ "metadata": {},
+ "source": [
+ "# Use existing block model instead\n",
+ "# lock_model = await object_from_uuid(manager, \"9e19c1e7-3a52-452a-978f-73dc9440dbbe\")"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "code",
+ "id": "12",
+ "metadata": {},
+ "source": [
+ "# Display the block model (pretty-printed in Jupyter)\n",
+ "block_model"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "markdown",
+ "id": "13",
+ "metadata": {},
+ "source": [
+ "## Define Kriging Scenarios\n",
+ "\n",
+ "Create multiple parameter sets varying the `max_samples` parameter to study its effect.\n",
+ "All scenarios target the same Block Model, creating different attributes.\n",
+ "\n",
+ "Each scenario also uses `BlockDiscretisation` to subdivide target blocks into 3×3×2 sub-cells\n",
+ "for volume-averaged block kriging. Omit `block_discretisation` (or pass `None`) for point kriging."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "id": "14",
+ "metadata": {},
+ "source": [
+ "from evo.compute.tasks import BlockDiscretisation, SearchNeighborhood\n",
+ "from evo.compute.tasks.kriging import KrigingParameters\n",
+ "\n",
+ "# Define different max_samples values to test\n",
+ "max_samples_values = [5, 10, 15, 20]\n",
+ "# Get ellipsoid from the variogram structure with largest range (default)\n",
+ "var_ell = variogram.get_ellipsoid()\n",
+ "\n",
+ "# Create search ellipsoid by scaling the variogram ellipsoid by 2x\n",
+ "search_ellipsoid = var_ell.scaled(2.0)"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "code",
+ "id": "15",
+ "metadata": {},
+ "source": [
+ "# Visualize variogram and search ellipsoids with pointset data\n",
+ "import plotly.graph_objects as go\n",
+ "\n",
+ "# Get pointset data for center calculation and scatter plot\n",
+ "pts = await source_pointset.to_dataframe()\n",
+ "center = (pts[\"x\"].mean(), pts[\"y\"].mean(), pts[\"z\"].mean())\n",
+ "\n",
+ "# Generate mesh surface points for visualization\n",
+ "vx, vy, vz = var_ell.surface_points(center=center)\n",
+ "sx, sy, sz = search_ellipsoid.surface_points(center=center)\n",
+ "\n",
+ "# Build visualization\n",
+ "var_mesh = go.Mesh3d(x=vx, y=vy, z=vz, alphahull=0, opacity=0.3, color=\"blue\", name=\"Variogram Ellipsoid\")\n",
+ "search_mesh = go.Mesh3d(x=sx, y=sy, z=sz, alphahull=0, opacity=0.2, color=\"gold\", name=\"Search Ellipsoid (2x)\")\n",
+ "scatter = go.Scatter3d(\n",
+ " x=pts[\"x\"],\n",
+ " y=pts[\"y\"],\n",
+ " z=pts[\"z\"],\n",
+ " mode=\"markers\",\n",
+ " marker=dict(size=2, color=pts[\"Ag_ppm Values\"], colorscale=\"Viridis\", showscale=True),\n",
+ " name=\"Sample Points\",\n",
+ ")\n",
+ "\n",
+ "fig = go.Figure(data=[var_mesh, search_mesh, scatter])\n",
+ "fig.update_layout(title=\"Kriging Inputs: Variogram & Search Ellipsoids\", scene=dict(aspectmode=\"data\"), showlegend=True)\n",
+ "fig.show()"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "code",
+ "id": "16",
+ "metadata": {},
+ "source": [
+ "# Base source configuration\n",
+ "source = source_pointset.attributes[\"Ag_ppm Values\"]\n",
+ "\n",
+ "# Block discretisation subdivides each target block into nx * ny * nz sub-cells.\n",
+ "# The kriged value is averaged across these sub-cells, producing a more accurate\n",
+ "# volume-averaged estimate (block kriging) compared to point kriging (no discretisation).\n",
+ "discretisation = BlockDiscretisation(nx=3, ny=3, nz=2)\n",
+ "\n",
+ "# Create parameter sets for each scenario, all targeting the same Block Model\n",
+ "# Note: method defaults to ordinary kriging, so we don't need to specify it\n",
+ "parameter_sets = []\n",
+ "for max_samples in max_samples_values:\n",
+ " params = KrigingParameters(\n",
+ " source=source,\n",
+ " target=block_model.attributes[f\"Samples={max_samples}\"],\n",
+ " variogram=variogram,\n",
+ " search=SearchNeighborhood(ellipsoid=search_ellipsoid, max_samples=max_samples),\n",
+ " block_discretisation=discretisation,\n",
+ " )\n",
+ " parameter_sets.append(params)\n",
+ " print(f\"Prepared scenario with max_samples={max_samples}\")\n",
+ "\n",
+ "print(f\"\\nCreated {len(parameter_sets)} parameter sets\")"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "markdown",
+ "id": "17",
+ "metadata": {},
+ "source": [
+ "## Run Multiple Kriging Tasks\n",
+ "\n",
+ "Execute all scenarios concurrently using `run` with a list of parameters.\n",
+ "Progress is aggregated across all tasks."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "id": "18",
+ "metadata": {},
+ "source": [
+ "from evo.compute.tasks import run\n",
+ "\n",
+ "# Run all scenarios in parallel (progress feedback is shown by default)\n",
+ "print(f\"Submitting {len(parameter_sets)} kriging tasks in parallel...\")\n",
+ "\n",
+ "results = await run(manager, parameter_sets, preview=True)\n",
+ "\n",
+ "print(f\"\\nAll {len(results)} scenarios completed!\")"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "markdown",
+ "id": "19",
+ "metadata": {},
+ "source": [
+ "## View Block Model Attributes\n",
+ "\n",
+ "Display the block model attributes to see all the newly created scenario columns.\n",
+ "\n",
+ "> **Note:** The block model object needs to be refreshed to see the newly added attributes."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "id": "20",
+ "metadata": {},
+ "source": [
+ "# Refresh the block model to see the new attributes added by kriging\n",
+ "block_model = await block_model.refresh()\n",
+ "\n",
+ "# Pretty-print the block model to see its current state\n",
+ "block_model"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "code",
+ "id": "21",
+ "metadata": {},
+ "source": [
+ "# View just the attributes (pretty-printed table in Jupyter)\n",
+ "block_model.attributes"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "markdown",
+ "id": "22",
+ "metadata": {},
+ "source": [
+ "## Query Results from Block Model\n",
+ "\n",
+ "Get all scenario results from the Block Model."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "id": "23",
+ "metadata": {},
+ "source": [
+ "# Query the Block Model for all scenario columns using to_dataframe()\n",
+ "scenario_columns = [f\"Samples={ms}\" for ms in max_samples_values]\n",
+ "\n",
+ "print(\"Querying Block Model for results...\")\n",
+ "df = await block_model.to_dataframe(columns=scenario_columns)\n",
+ "\n",
+ "print(f\"Retrieved {len(df)} blocks with {len(scenario_columns)} scenario columns\")\n",
+ "df.head(10)"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "markdown",
+ "id": "24",
+ "metadata": {},
+ "source": [
+ "## Display Results"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "id": "25",
+ "metadata": {},
+ "source": [
+ "# Pretty-print the Block Model with all scenarios (includes Portal/Viewer links)\n",
+ "block_model"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "code",
+ "id": "26",
+ "metadata": {},
+ "source": [
+ "# Show individual job result messages\n",
+ "for i, (job_result, max_samples) in enumerate(zip(results, max_samples_values)):\n",
+ " print(f\"Scenario {i + 1}: max_samples={max_samples} - {job_result.message}\")"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "code",
+ "id": "27",
+ "metadata": {},
+ "source": [
+ "# Display first result (pretty-printed)\n",
+ "results[0]"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "markdown",
+ "id": "28",
+ "metadata": {},
+ "source": [
+ "## Analyze Results\n",
+ "\n",
+ "Compare the kriging results across different max_samples values."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "id": "29",
+ "metadata": {},
+ "source": [
+ "# Show statistics for each scenario\n",
+ "print(\"Statistics by max_samples:\")\n",
+ "print(df[scenario_columns].describe())"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "code",
+ "id": "30",
+ "metadata": {},
+ "source": [
+ "# Optional: Visualize the differences using plotly\n",
+ "try:\n",
+ " import plotly.express as px\n",
+ "\n",
+ " # Melt the data for box plot comparison\n",
+ " df_melted = df[scenario_columns].melt(var_name=\"Scenario\", value_name=\"value\")\n",
+ "\n",
+ " fig = px.box(\n",
+ " df_melted,\n",
+ " x=\"Scenario\",\n",
+ " y=\"value\",\n",
+ " title=\"Kriging Values by Max Samples\",\n",
+ " )\n",
+ " fig.show()\n",
+ "except ImportError:\n",
+ " print(\"Install plotly for visualization: pip install plotly\")"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "markdown",
+ "id": "31",
+ "metadata": {},
+ "source": [
+ "## Create a Report on the Block Model\n",
+ "\n",
+ "After running kriging, we can create a resource report on the block model.\n",
+ "\n",
+ "Reports require:\n",
+ "1. Columns to have units defined\n",
+ "2. At least one category column for grouping results"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "32",
+ "metadata": {},
+ "source": [
+ "### Add a Domain Column\n",
+ "\n",
+ "First, let's add a category column for grouping. We'll create simple geological domains\n",
+ "by slicing the block model into three zones based on elevation (z-coordinate)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "id": "33",
+ "metadata": {},
+ "source": [
+ "# Get block model data\n",
+ "df = await block_model.to_dataframe()\n",
+ "\n",
+ "# Create domain column based on z-coordinate (elevation)\n",
+ "# Divide into 3 domains: LMS1 (lower), LMS2 (middle), LMS3 (upper)\n",
+ "z_min, z_max = df[\"z\"].min(), df[\"z\"].max()\n",
+ "z_range = z_max - z_min\n",
+ "\n",
+ "\n",
+ "def assign_domain(z):\n",
+ " if z < z_min + z_range / 3:\n",
+ " return \"LMS1\" # Lower zone\n",
+ " elif z < z_min + 2 * z_range / 3:\n",
+ " return \"LMS2\" # Middle zone\n",
+ " else:\n",
+ " return \"LMS3\" # Upper zone\n",
+ "\n",
+ "\n",
+ "df[\"domain\"] = df[\"z\"].apply(assign_domain)\n",
+ "\n",
+ "# Add the domain column to the block model\n",
+ "domain_data = df[[\"x\", \"y\", \"z\", \"domain\"]]\n",
+ "version = await block_model.add_attribute(domain_data, \"domain\")\n",
+ "print(f\"Added domain column. New version: {version.version_id}\")\n",
+ "\n",
+ "# Check domain distribution\n",
+ "print(\"\\nDomain distribution:\")\n",
+ "print(df[\"domain\"].value_counts())"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "code",
+ "id": "34",
+ "metadata": {},
+ "source": [
+ "# Refresh to see the new attribute\n",
+ "block_model = await block_model.refresh()\n",
+ "block_model.attributes"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "markdown",
+ "id": "35",
+ "metadata": {},
+ "source": [
+ "### Set Units on Kriged Attributes\n",
+ "\n",
+ "Reports require columns to have units defined. The kriged columns may not have units set,\n",
+ "so we need to set them before creating a report."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "id": "36",
+ "metadata": {},
+ "source": [
+ "from evo.blockmodels.typed import Units\n",
+ "\n",
+ "# Set units on the kriged attribute columns\n",
+ "# Use the first scenario column name as an example\n",
+ "first_scenario_col = scenario_columns[0]\n",
+ "\n",
+ "block_model = await block_model.set_attribute_units(\n",
+ " {\n",
+ " first_scenario_col: Units.GRAMS_PER_TONNE, # Set appropriate unit for your data\n",
+ " }\n",
+ ")\n",
+ "print(f\"Set units on {first_scenario_col}\")\n",
+ "\n",
+ "# View updated attributes\n",
+ "block_model.attributes"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "markdown",
+ "id": "37",
+ "metadata": {},
+ "source": [
+ "### Create and Run the Report\n",
+ "\n",
+ "Now we can create a report specification that will calculate tonnages and grades by domain.\n",
+ "\n",
+ "**Key classes for reports:**\n",
+ "- `Aggregation` - Enum: `MASS_AVERAGE` (for grades), `SUM` (for metal content)\n",
+ "- `Units` - Constants for output units (e.g., `Units.GRAMS_PER_TONNE`)\n",
+ "- `MassUnits` - Constants for mass output (e.g., `MassUnits.TONNES`)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "id": "38",
+ "metadata": {},
+ "source": [
+ "from evo.blockmodels.typed import (\n",
+ " Aggregation,\n",
+ " MassUnits,\n",
+ " ReportCategorySpec,\n",
+ " ReportColumnSpec,\n",
+ " ReportSpecificationData,\n",
+ ")\n",
+ "\n",
+ "# Define the report\n",
+ "report_data = ReportSpecificationData(\n",
+ " name=\"Kriging Results Report\",\n",
+ " description=\"Resource estimate by domain using kriged grades\",\n",
+ " columns=[\n",
+ " ReportColumnSpec(\n",
+ " column_name=first_scenario_col,\n",
+ " aggregation=Aggregation.MASS_AVERAGE, # Use MASS_AVERAGE for grades\n",
+ " label=\"Kriged Grade\",\n",
+ " output_unit_id=Units.GRAMS_PER_TONNE, # Use Units class for discoverability\n",
+ " ),\n",
+ " ],\n",
+ " categories=[\n",
+ " ReportCategorySpec(\n",
+ " column_name=\"domain\",\n",
+ " label=\"Domain\",\n",
+ " values=[\"LMS1\", \"LMS2\", \"LMS3\"],\n",
+ " ),\n",
+ " ],\n",
+ " mass_unit_id=MassUnits.TONNES, # Use MassUnits class\n",
+ " density_value=2.7, # Fixed density (or use density_column_name)\n",
+ " density_unit_id=Units.TONNES_PER_CUBIC_METRE,\n",
+ " run_now=True, # Run immediately\n",
+ ")\n",
+ "\n",
+ "# Create the report\n",
+ "report = await block_model.create_report(report_data)\n",
+ "print(f\"Created report: {report.name}\")"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "code",
+ "id": "39",
+ "metadata": {},
+ "source": [
+ "# Pretty-print the report (shows BlockSync link)\n",
+ "report"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "markdown",
+ "id": "40",
+ "metadata": {},
+ "source": [
+ "### View Report Results\n",
+ "\n",
+ "Get the report results (waits if report is still running)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "id": "41",
+ "metadata": {},
+ "source": [
+ "# Get the latest report result\n",
+ "result = await report.refresh()\n",
+ "\n",
+ "# Pretty-print the result (displays table in Jupyter)\n",
+ "result"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "markdown",
+ "id": "42",
+ "metadata": {},
+ "source": [
+ "### Filter kriging results on a category column\n",
+ "\n",
+ "Krigging can be run with a filter on a category column, which allows only for a specific area to be updated rather than the entire block model.\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "id": "43",
+ "metadata": {},
+ "source": [
+ "from evo.compute.tasks import RegionFilter\n",
+ "\n",
+ "# Base source configuration\n",
+ "source = source_pointset.attributes[\"Ag_ppm Values\"]\n",
+ "\n",
+ "# Create parameter sets for each scenario, all targeting the same Block Model\n",
+ "# Note: method defaults to ordinary kriging, so we don't need to specify it\n",
+ "parameter_sets = []\n",
+ "for max_samples in max_samples_values:\n",
+ " params = KrigingParameters(\n",
+ " source=source,\n",
+ " target=block_model.attributes[f\"Samples={max_samples}\"],\n",
+ " variogram=variogram,\n",
+ " search=SearchNeighborhood(ellipsoid=search_ellipsoid, max_samples=max_samples),\n",
+ " target_region_filter=RegionFilter(\n",
+ " attribute=block_model.attributes[\"domain\"],\n",
+ " names=[\"LMS1\"], # Filter by category name\n",
+ " ),\n",
+ " )\n",
+ " print(params.to_dict())\n",
+ " parameter_sets.append(params)\n",
+ " print(f\"Prepared scenario with max_samples={max_samples}\")\n",
+ "\n",
+ "# Run all scenarios in parallel (progress feedback is shown by default)\n",
+ "print(f\"Submitting {len(parameter_sets)} kriging tasks in parallel...\")\n",
+ "\n",
+ "results = await run(manager, parameter_sets, preview=True)\n",
+ "\n",
+ "print(f\"\\nAll {len(results)} scenarios completed!\")"
+ ],
+ "outputs": [],
+ "execution_count": null
+ },
+ {
+ "cell_type": "code",
+ "id": "44",
+ "metadata": {},
+ "source": [
+ "# Refresh the block model to see the new attributes added by kriging\n",
+ "block_model = await block_model.refresh()\n",
+ "\n",
+ "# Pretty-print the block model to see its current state\n",
+ "block_model"
+ ],
+ "outputs": [],
+ "execution_count": null
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.12.0"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/packages/evo-compute/pyproject.toml b/packages/evo-compute/pyproject.toml
index 1f46aa99..010c90df 100644
--- a/packages/evo-compute/pyproject.toml
+++ b/packages/evo-compute/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "evo-compute"
-version = "0.0.1rc3"
+version = "0.0.2"
requires-python = ">=3.10"
license-files = ["LICENSE.md"]
dynamic = ["readme"]
@@ -9,8 +9,11 @@ authors = [
]
dependencies = [
+ "evo-blockmodels[utils]",
+ "evo-objects[utils,blockmodels]",
"evo-sdk-common",
"pydantic>=2",
+ "typing_extensions>=4.0",
]
[project.urls]
diff --git a/packages/evo-compute/src/evo/compute/__init__.py b/packages/evo-compute/src/evo/compute/__init__.py
index 71e6b33e..2ceae317 100644
--- a/packages/evo-compute/src/evo/compute/__init__.py
+++ b/packages/evo-compute/src/evo/compute/__init__.py
@@ -9,6 +9,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from . import tasks
from .client import JobClient
from .data import JobProgress, JobStatusEnum
@@ -16,4 +17,5 @@
"JobClient",
"JobProgress",
"JobStatusEnum",
+ "tasks",
]
diff --git a/packages/evo-compute/src/evo/compute/tasks/__init__.py b/packages/evo-compute/src/evo/compute/tasks/__init__.py
new file mode 100644
index 00000000..64d86fa1
--- /dev/null
+++ b/packages/evo-compute/src/evo/compute/tasks/__init__.py
@@ -0,0 +1,185 @@
+# Copyright © 2025 Bentley Systems, Incorporated
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Task-specific clients for Evo Compute.
+
+This module provides a unified interface for running compute tasks. Tasks are
+dispatched based on their parameter types using a registry system.
+
+Example:
+ >>> from evo.compute.tasks import run, SearchNeighborhood, Target
+ >>> from evo.compute.tasks.kriging import KrigingParameters
+ >>>
+ >>> # Run a single task (preview=True required for preview APIs like kriging)
+ >>> result = await run(manager, KrigingParameters(...), preview=True)
+ >>>
+ >>> # Run multiple tasks (same or different types)
+ >>> results = await run(manager, [
+ ... KrigingParameters(...),
+ ... KrigingParameters(...),
+ ... ], preview=True)
+"""
+
+from __future__ import annotations
+
+from typing import Any, overload
+
+from evo.common import IContext
+from evo.common.interfaces import IFeedback
+from evo.common.utils import NoFeedback
+
+# Import kriging module to trigger registration
+from . import kriging as _kriging_module # noqa: F401
+
+# Shared components from common module
+from .common import (
+ CreateAttribute,
+ Ellipsoid,
+ EllipsoidRanges,
+ Rotation,
+ SearchNeighborhood,
+ Source,
+ Target,
+ UpdateAttribute,
+)
+
+# Result types from common (generic base classes)
+from .common.results import TaskResult, TaskResults
+
+# Kriging-specific result types
+from .kriging import (
+ BlockDiscretisation,
+ KrigingResult,
+ RegionFilter,
+)
+
+
+class _DefaultFeedback:
+ """Marker class to indicate default feedback should be used."""
+
+ pass
+
+
+DEFAULT_FEEDBACK = _DefaultFeedback()
+
+
+@overload
+async def run(
+ context: IContext,
+ parameters: Any,
+ *,
+ preview: bool = ...,
+ fb: IFeedback | _DefaultFeedback = ...,
+) -> TaskResult: ...
+
+
+@overload
+async def run(
+ context: IContext,
+ parameters: list[Any],
+ *,
+ preview: bool = ...,
+ fb: IFeedback | _DefaultFeedback = ...,
+) -> TaskResults: ...
+
+
+async def run(
+ context: IContext,
+ parameters: Any | list[Any],
+ *,
+ preview: bool = False,
+ fb: IFeedback | _DefaultFeedback = DEFAULT_FEEDBACK,
+) -> TaskResult | TaskResults:
+ """
+ Run one or more compute tasks.
+
+ Tasks are dispatched to the appropriate runner based on the parameter type.
+ This allows running different task types together in a single call.
+
+ Args:
+ context: The context providing connector and org_id
+ parameters: A single parameter object or list of parameters (can be mixed types)
+ preview: If True, sets the ``API-Preview: opt-in`` header on requests.
+ Required for tasks that are still in preview (e.g. kriging).
+ Defaults to False.
+ fb: Feedback interface for progress updates. If not provided, uses default
+ feedback showing "Running x/y..."
+
+ Returns:
+ TaskResult for a single task, or TaskResults for multiple tasks
+
+ Example (single task):
+ >>> from evo.compute.tasks import run, SearchNeighborhood, Target
+ >>> from evo.compute.tasks.kriging import KrigingParameters
+ >>>
+ >>> params = KrigingParameters(
+ ... source=pointset.attributes["grade"],
+ ... target=Target.new_attribute(block_model, "kriged_grade"),
+ ... variogram=variogram,
+ ... search=SearchNeighborhood(
+ ... ellipsoid=var_ell.scaled(2.0),
+ ... max_samples=20,
+ ... ),
+ ... )
+ >>> result = await run(manager, params, preview=True)
+
+ Example (multiple tasks):
+ >>> results = await run(manager, [
+ ... KrigingParameters(...),
+ ... KrigingParameters(...),
+ ... ], preview=True)
+ >>> results[0] # Access first result
+ """
+ from .common.runner import run_tasks
+
+ # Convert single parameter to list for uniform handling
+ is_single = not isinstance(parameters, list)
+ param_list = [parameters] if is_single else parameters
+
+ if len(param_list) == 0:
+ return TaskResults([])
+
+ # Create default feedback widget
+ if isinstance(fb, _DefaultFeedback):
+ try:
+ from evo.notebooks import FeedbackWidget
+
+ actual_fb: IFeedback = FeedbackWidget(label="Tasks")
+ except ImportError:
+ actual_fb = NoFeedback
+ else:
+ actual_fb = fb
+
+ # Delegate to the common run_tasks implementation
+ results = await run_tasks(context, param_list, fb=actual_fb, preview=preview)
+
+ # Return single result or wrapped results
+ if is_single:
+ return results[0]
+ return TaskResults(results)
+
+
+__all__ = [
+ "BlockDiscretisation",
+ "CreateAttribute",
+ "Ellipsoid",
+ "EllipsoidRanges",
+ "KrigingResult",
+ "RegionFilter",
+ "Rotation",
+ "SearchNeighborhood",
+ "Source",
+ "Target",
+ "TaskResult",
+ "TaskResults",
+ "UpdateAttribute",
+ "run",
+]
diff --git a/packages/evo-compute/src/evo/compute/tasks/common/__init__.py b/packages/evo-compute/src/evo/compute/tasks/common/__init__.py
new file mode 100644
index 00000000..ed74bda4
--- /dev/null
+++ b/packages/evo-compute/src/evo/compute/tasks/common/__init__.py
@@ -0,0 +1,56 @@
+# Copyright © 2025 Bentley Systems, Incorporated
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Common primitives shared across geostatistics compute tasks."""
+
+from evo.objects.typed.types import Ellipsoid, EllipsoidRanges, Rotation
+
+from .results import TaskAttribute, TaskResult, TaskResults, TaskTarget, parse_task_target
+from .runner import TaskRegistry, get_task_runner, register_task_runner, run_tasks
+from .search import SearchNeighborhood
+from .source_target import (
+ CreateAttribute,
+ GeoscienceObjectReference,
+ Source,
+ Target,
+ UpdateAttribute,
+ get_attribute_expression,
+ is_typed_attribute,
+ serialize_object_reference,
+ source_from_attribute,
+ target_from_attribute,
+)
+
+__all__ = [
+ "CreateAttribute",
+ "Ellipsoid",
+ "EllipsoidRanges",
+ "GeoscienceObjectReference",
+ "Rotation",
+ "SearchNeighborhood",
+ "Source",
+ "Target",
+ "TaskAttribute",
+ "TaskRegistry",
+ "TaskResult",
+ "TaskResults",
+ "TaskTarget",
+ "UpdateAttribute",
+ "get_attribute_expression",
+ "get_task_runner",
+ "is_typed_attribute",
+ "parse_task_target",
+ "register_task_runner",
+ "run_tasks",
+ "serialize_object_reference",
+ "source_from_attribute",
+ "target_from_attribute",
+]
diff --git a/packages/evo-compute/src/evo/compute/tasks/common/results.py b/packages/evo-compute/src/evo/compute/tasks/common/results.py
new file mode 100644
index 00000000..0bb6e79e
--- /dev/null
+++ b/packages/evo-compute/src/evo/compute/tasks/common/results.py
@@ -0,0 +1,247 @@
+# Copyright © 2025 Bentley Systems, Incorporated
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Common result types for compute tasks.
+
+This module provides base result classes that all compute task types can inherit
+from. These were originally defined in the kriging module but are generic enough
+for any task type.
+"""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Any
+
+from evo.common import IContext
+from evo.objects.data import ObjectSchema
+from evo.objects.exceptions import SchemaIDFormatError
+from evo.objects.typed import object_from_reference
+
+__all__ = [
+ "TaskAttribute",
+ "TaskResult",
+ "TaskResults",
+ "TaskTarget",
+]
+
+
+@dataclass
+class TaskAttribute:
+ """Attribute information from a task result."""
+
+ reference: str
+ name: str
+
+
+@dataclass
+class TaskTarget:
+ """Target information from a task result."""
+
+ reference: str
+ name: str
+ description: Any
+ schema_id: str
+ attribute: TaskAttribute
+
+
+class TaskResult:
+ """Base class for compute task results.
+
+ Provides common functionality for all task results including:
+ - Pretty-printing in Jupyter notebooks
+ - Portal URL extraction
+ - Access to target object and data
+ """
+
+ message: str
+ """A message describing what happened in the task."""
+
+ _target: TaskTarget
+ """Internal target information."""
+
+ _context: IContext | None = None
+ """The context used to run the task (for convenience methods)."""
+
+ def __init__(self, message: str, target: TaskTarget):
+ self.message = message
+ self._target = target
+ self._context = None
+
+ @property
+ def target_name(self) -> str:
+ """The name of the target object."""
+ return self._target.name
+
+ @property
+ def target_reference(self) -> str:
+ """Reference URL to the target object."""
+ return self._target.reference
+
+ @property
+ def attribute_name(self) -> str:
+ """The name of the attribute that was created/updated."""
+ return self._target.attribute.name
+
+ @property
+ def schema_type(self) -> str:
+ """The schema type of the target object (e.g., 'regular-masked-3d-grid').
+
+ Uses ``ObjectSchema.from_id`` to parse the schema ID. Falls back to the
+ raw ``schema_id`` string when it cannot be parsed.
+ """
+ schema = self._target.schema_id
+ try:
+ parsed = ObjectSchema.from_id(schema)
+ return parsed.sub_classification
+ except SchemaIDFormatError:
+ return schema
+
+ async def get_target_object(self, context: IContext | None = None):
+ """Load and return the target geoscience object.
+
+ Args:
+ context: Optional context to use. If not provided, uses the context
+ from when the task was run.
+
+ Returns:
+ The typed geoscience object (e.g., Regular3DGrid, RegularMasked3DGrid, BlockModel)
+
+ Example:
+ >>> result = await run(manager, params)
+ >>> target = await result.get_target_object()
+ >>> target # Pretty-prints with Portal/Viewer links
+ """
+ ctx = context or self._context
+ if ctx is None:
+ raise ValueError(
+ "No context available. Either pass a context to get_target_object() "
+ "or ensure the result was returned from run()."
+ )
+ return await object_from_reference(ctx, self._target.reference)
+
+ async def to_dataframe(self, context: IContext | None = None, columns: list[str] | None = None):
+ """Get the task results as a DataFrame.
+
+ This is the simplest way to access the task output data. It loads
+ the target object and returns its data as a pandas DataFrame.
+
+ Args:
+ context: Optional context to use. If not provided, uses the context
+ from when the task was run.
+ columns: Optional list of column names to include. If None, includes
+ all columns. Use ["*"] to explicitly request all columns.
+
+ Returns:
+ A pandas DataFrame containing the task results.
+
+ Example:
+ >>> result = await run(manager, params)
+ >>> df = await result.to_dataframe()
+ >>> df.head()
+ """
+ target_obj = await self.get_target_object(context)
+
+ # Try different methods to get the dataframe based on object type
+ if hasattr(target_obj, "to_dataframe"):
+ # BlockModel, PointSet, and similar objects with to_dataframe
+ if columns is not None:
+ return await target_obj.to_dataframe(columns=columns)
+ return await target_obj.to_dataframe()
+ elif hasattr(target_obj, "cells") and hasattr(target_obj.cells, "to_dataframe"):
+ # Grid objects (Regular3DGrid, RegularMasked3DGrid, etc.)
+ return await target_obj.cells.to_dataframe()
+ else:
+ raise TypeError(
+ f"Don't know how to get DataFrame from {type(target_obj).__name__}. "
+ "Use get_target_object() and access the data manually."
+ )
+
+ def _get_result_type_name(self) -> str:
+ """Get the display name for this result type."""
+ return "Task"
+
+ def __repr__(self) -> str:
+ """String representation."""
+ lines = [
+ f"✓ {self._get_result_type_name()} Result",
+ f" Message: {self.message}",
+ f" Target: {self.target_name}",
+ f" Attribute: {self.attribute_name}",
+ ]
+ return "\n".join(lines)
+
+
+class TaskResults:
+ """Container for multiple task results with pretty-printing support.
+
+ Provides iteration and indexing support for accessing individual results.
+
+ Example:
+ >>> results = await run(manager, [params1, params2, params3])
+ >>> results # Pretty-prints all results
+ >>> results[0] # Access first result
+ >>> for result in results:
+ ... print(result.attribute_name)
+ """
+
+ def __init__(self, results: list[TaskResult]):
+ self._results = results
+
+ @property
+ def results(self) -> list[TaskResult]:
+ """The list of task results."""
+ return self._results
+
+ def __len__(self) -> int:
+ return len(self._results)
+
+ def __iter__(self):
+ return iter(self._results)
+
+ def __getitem__(self, index: int) -> TaskResult:
+ return self._results[index]
+
+ def __repr__(self) -> str:
+ """String representation."""
+ if not self._results:
+ return "TaskResults([])"
+ result_type = self._results[0]._get_result_type_name()
+ lines = [f"✓ {len(self._results)} {result_type} Results:"]
+ for i, result in enumerate(self._results):
+ lines.append(f" [{i}] {result.target_name} → {result.attribute_name}")
+ return "\n".join(lines)
+
+
+def parse_task_target(data: dict[str, Any]) -> TaskTarget:
+ """Parse target information from an API response dictionary.
+
+ Args:
+ data: The raw API response dictionary containing ``target`` and
+ ``target.attribute`` sub-dicts.
+
+ Returns:
+ A :class:`TaskTarget` populated from the response.
+ """
+ target_data = data["target"]
+ attr_data = target_data["attribute"]
+
+ attribute = TaskAttribute(
+ reference=attr_data["reference"],
+ name=attr_data["name"],
+ )
+ return TaskTarget(
+ reference=target_data["reference"],
+ name=target_data["name"],
+ description=target_data.get("description"),
+ schema_id=target_data["schema_id"],
+ attribute=attribute,
+ )
diff --git a/packages/evo-compute/src/evo/compute/tasks/common/runner.py b/packages/evo-compute/src/evo/compute/tasks/common/runner.py
new file mode 100644
index 00000000..bd35c32a
--- /dev/null
+++ b/packages/evo-compute/src/evo/compute/tasks/common/runner.py
@@ -0,0 +1,231 @@
+# Copyright © 2025 Bentley Systems, Incorporated
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Task runner registry for dispatching tasks based on parameter types.
+
+This module provides a registry-based system for running compute tasks. Each task
+type registers its parameter class and runner function, allowing the unified `run()`
+function to dispatch to the correct runner based on the parameter type.
+
+This enables running multiple different task types together in a single call.
+
+Example:
+ >>> from evo.compute.tasks import run
+ >>> from evo.compute.tasks.kriging import KrigingParameters
+ >>> from evo.compute.tasks.simulation import SimulationParameters # future
+ >>>
+ >>> # Run mixed task types together
+ >>> results = await run(manager, [
+ ... KrigingParameters(...),
+ ... SimulationParameters(...),
+ ... KrigingParameters(...),
+ ... ], preview=True)
+"""
+
+from __future__ import annotations
+
+import asyncio
+from typing import Any, Awaitable, Callable, TypeVar
+
+from evo.common import IContext
+from evo.common.interfaces import IFeedback
+from evo.common.utils import NoFeedback, split_feedback
+
+__all__ = [
+ "TaskRegistry",
+ "get_task_runner",
+ "register_task_runner",
+ "run_tasks",
+]
+
+
+# Type for task results
+TResult = TypeVar("TResult")
+
+# Type for runner functions: async (context, params, *, preview) -> result
+RunnerFunc = Callable[..., Awaitable[Any]]
+
+
+class TaskRegistry:
+ """Registry mapping parameter types to their runner functions.
+
+ This is a singleton that stores the mapping from parameter class types
+ to their corresponding async runner functions.
+ """
+
+ _instance: "TaskRegistry | None" = None
+ _runners: dict[type, RunnerFunc]
+
+ def __new__(cls) -> "TaskRegistry":
+ if cls._instance is None:
+ cls._instance = super().__new__(cls)
+ cls._instance._runners = {}
+ return cls._instance
+
+ def register(self, param_type: type, runner: RunnerFunc) -> None:
+ """Register a runner function for a parameter type.
+
+ Args:
+ param_type: The parameter class (e.g., KrigingParameters)
+ runner: Async function with signature (context, params) -> result
+ """
+ self._runners[param_type] = runner
+
+ def get_runner(self, param_type: type) -> RunnerFunc | None:
+ """Get the runner function for a parameter type.
+
+ Args:
+ param_type: The parameter class to look up
+
+ Returns:
+ The registered runner function, or None if not found
+ """
+ return self._runners.get(param_type)
+
+ def get_runner_for_params(self, params: Any) -> RunnerFunc:
+ """Get the runner function for a parameter instance.
+
+ Args:
+ params: A parameter object instance
+
+ Returns:
+ The registered runner function
+
+ Raises:
+ TypeError: If no runner is registered for the parameter type
+ """
+ param_type = type(params)
+ runner = self._runners.get(param_type)
+ if runner is None:
+ registered = ", ".join(t.__name__ for t in self._runners.keys())
+ raise TypeError(
+ f"No task runner registered for parameter type '{param_type.__name__}'. "
+ f"Registered types: {registered or 'none'}"
+ )
+ return runner
+
+ def clear(self) -> None:
+ """Clear all registered runners (mainly for testing)."""
+ self._runners.clear()
+
+
+# Global registry instance
+_registry = TaskRegistry()
+
+
+def register_task_runner(param_type: type, runner: RunnerFunc) -> None:
+ """Register a task runner function for a parameter type.
+
+ This function is called by task modules to register their runners.
+
+ Args:
+ param_type: The parameter class (e.g., KrigingParameters)
+ runner: Async function with signature (context, params) -> result
+
+ Example:
+ >>> from evo.compute.tasks.common.runner import register_task_runner
+ >>>
+ >>> async def _run_kriging(context, params):
+ ... # implementation
+ ... pass
+ >>>
+ >>> register_task_runner(KrigingParameters, _run_kriging)
+ """
+ _registry.register(param_type, runner)
+
+
+def get_task_runner(param_type: type) -> RunnerFunc | None:
+ """Get the registered runner for a parameter type.
+
+ Args:
+ param_type: The parameter class to look up
+
+ Returns:
+ The registered runner function, or None if not found
+ """
+ return _registry.get_runner(param_type)
+
+
+async def run_tasks(
+ context: IContext,
+ parameters: list[Any],
+ *,
+ fb: IFeedback = NoFeedback,
+ preview: bool = False,
+) -> list[Any]:
+ """Run multiple tasks concurrently, dispatching based on parameter types.
+
+ This function looks up the appropriate runner for each parameter based on
+ its type, allowing different task types to be run together.
+
+ Args:
+ context: The context providing connector and org_id
+ parameters: List of parameter objects (can be mixed types)
+ fb: Feedback interface for progress updates
+ preview: If True, sets the ``API-Preview: opt-in`` header on requests.
+ Required for tasks that are still in preview. Defaults to False.
+
+ Returns:
+ List of results in the same order as the input parameters
+
+ Raises:
+ TypeError: If any parameter type doesn't have a registered runner
+
+ Example:
+ >>> # Run mixed task types
+ >>> results = await run_tasks(manager, [
+ ... KrigingParameters(...),
+ ... SimulationParameters(...), # future task type
+ ... ], preview=True)
+ """
+ if len(parameters) == 0:
+ return []
+
+ total = len(parameters)
+
+ # Validate all parameters have registered runners upfront
+ runners = []
+ for params in parameters:
+ runner = _registry.get_runner_for_params(params)
+ runners.append(runner)
+
+ # Split feedback across tasks
+ per_task_fb = split_feedback(fb, [1.0] * total)
+
+ async def _run_one(i: int, params: Any, runner: RunnerFunc, task_fb: IFeedback) -> tuple[int, Any]:
+ result = await runner(context, params, preview=preview)
+ task_fb.progress(1.0)
+ return i, result
+
+ tasks = [
+ asyncio.create_task(_run_one(i, params, runner, per_task_fb[i]))
+ for i, (params, runner) in enumerate(zip(parameters, runners))
+ ]
+
+ results: list[Any | None] = [None] * total
+
+ done_count = 0
+ for fut in asyncio.as_completed(tasks):
+ try:
+ i, res = await fut
+ results[i] = res
+ done_count += 1
+ fb.progress(done_count / total, f"Running {done_count}/{total}...")
+ except Exception:
+ done_count += 1
+ # Cancel remaining to fail fast
+ for t in tasks:
+ t.cancel()
+ raise
+
+ fb.progress(1.0, f"Completed {total}/{total}")
+
+ return [r for r in results if r is not None]
diff --git a/packages/evo-compute/src/evo/compute/tasks/common/search.py b/packages/evo-compute/src/evo/compute/tasks/common/search.py
new file mode 100644
index 00000000..29dbd359
--- /dev/null
+++ b/packages/evo-compute/src/evo/compute/tasks/common/search.py
@@ -0,0 +1,73 @@
+# Copyright © 2025 Bentley Systems, Incorporated
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Search neighborhood parameters for geostatistical operations."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Any
+
+from evo.objects.typed.types import Ellipsoid
+
+__all__ = [
+ "SearchNeighborhood",
+]
+
+
+@dataclass
+class SearchNeighborhood:
+ """Search neighborhood parameters for geostatistical operations.
+
+ Defines how to find nearby samples when performing spatial interpolation
+ or estimation. Used by kriging, simulation, and other geostatistical tasks.
+
+ The search neighborhood is defined by an ellipsoid (spatial extent and
+ orientation) and constraints on the number of samples to use.
+
+ Example:
+ >>> search = SearchNeighborhood(
+ ... ellipsoid=Ellipsoid(
+ ... ranges=EllipsoidRanges(major=200.0, semi_major=150.0, minor=100.0),
+ ... rotation=Rotation(dip_azimuth=45.0),
+ ... ),
+ ... max_samples=20,
+ ... )
+ """
+
+ ellipsoid: Ellipsoid
+ """The ellipsoid defining the spatial extent to search for samples."""
+
+ max_samples: int
+ """The maximum number of samples to use for each evaluation point."""
+
+ min_samples: int | None = None
+ """The minimum number of samples required. If fewer are found, the point may be skipped."""
+
+ def __init__(
+ self,
+ ellipsoid: Ellipsoid,
+ max_samples: int,
+ min_samples: int | None = None,
+ ):
+ self.ellipsoid = ellipsoid
+ self.max_samples = max_samples
+ self.min_samples = min_samples
+
+ def to_dict(self) -> dict[str, Any]:
+ """Serialize to dictionary."""
+ result = {
+ "ellipsoid": self.ellipsoid.to_dict(),
+ "max_samples": self.max_samples,
+ }
+ if self.min_samples is not None:
+ result["min_samples"] = self.min_samples
+ return result
diff --git a/packages/evo-compute/src/evo/compute/tasks/common/source_target.py b/packages/evo-compute/src/evo/compute/tasks/common/source_target.py
new file mode 100644
index 00000000..419831b8
--- /dev/null
+++ b/packages/evo-compute/src/evo/compute/tasks/common/source_target.py
@@ -0,0 +1,311 @@
+# Copyright © 2025 Bentley Systems, Incorporated
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Source and target specifications for compute tasks."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Any, Union
+
+from evo.objects.typed.attributes import (
+ Attribute,
+ BlockModelAttribute,
+ BlockModelPendingAttribute,
+ PendingAttribute,
+)
+from typing_extensions import TypeAlias
+
+__all__ = [
+ "CreateAttribute",
+ "GeoscienceObjectReference",
+ "Source",
+ "Target",
+ "UpdateAttribute",
+ "get_attribute_expression",
+ "is_typed_attribute",
+ "serialize_object_reference",
+ "source_from_attribute",
+ "target_from_attribute",
+]
+
+# All typed attribute types that compute tasks can work with.
+TYPED_ATTRIBUTE_TYPES = (Attribute, PendingAttribute, BlockModelAttribute, BlockModelPendingAttribute)
+
+# Type alias for any object that can be serialized to a geoscience object reference URL
+# Supports: str, ObjectReference, BaseObject, DownloadedObject, ObjectMetadata
+GeoscienceObjectReference: TypeAlias = Union[str, Any]
+
+
+def is_typed_attribute(value: Any) -> bool:
+ """Check if a value is a typed attribute object from evo.objects.typed."""
+ return isinstance(value, TYPED_ATTRIBUTE_TYPES)
+
+
+def get_attribute_expression(
+ attr: Attribute | PendingAttribute | BlockModelAttribute | BlockModelPendingAttribute,
+) -> str:
+ """Get the JMESPath expression to access an attribute from its parent object.
+
+ For ``Attribute`` (existing, from a DownloadedObject): uses the schema path context
+ and key-based lookup, e.g. ``"locations.attributes[?key=='abc']"``.
+
+ For ``PendingAttribute``, ``BlockModelAttribute``, or ``BlockModelPendingAttribute``:
+ uses name-based lookup, e.g. ``"attributes[?name=='grade']"``.
+
+ Args:
+ attr: A typed attribute object.
+
+ Returns:
+ A JMESPath expression string.
+
+ Raises:
+ TypeError: If the attribute type is not recognised.
+ """
+ if isinstance(attr, Attribute):
+ base_path = attr._context.schema_path or "attributes"
+ return f"{base_path}[?key=='{attr.key}']"
+ elif isinstance(attr, (PendingAttribute, BlockModelAttribute, BlockModelPendingAttribute)):
+ return f"attributes[?name=='{attr.name}']"
+ else:
+ raise TypeError(f"Cannot get expression for attribute type {type(attr).__name__}")
+
+
+def serialize_object_reference(value: GeoscienceObjectReference) -> str:
+ """
+ Serialize an object reference to a string URL.
+
+ Supports:
+ - str: returned as-is
+ - ObjectReference: str(value)
+ - BaseObject (typed objects like PointSet): value.metadata.url
+ - DownloadedObject: value.metadata.url
+ - ObjectMetadata: value.url
+
+ Args:
+ value: The value to serialize
+
+ Returns:
+ String URL of the object reference
+
+ Raises:
+ TypeError: If the value type is not supported
+ """
+ if isinstance(value, str):
+ return value
+
+ # Check for typed objects (BaseObject subclasses like PointSet, Regular3DGrid)
+ if hasattr(value, "metadata") and hasattr(value.metadata, "url"):
+ return value.metadata.url
+
+ # Check for ObjectMetadata
+ if hasattr(value, "url") and isinstance(value.url, str):
+ return value.url
+
+ raise TypeError(f"Cannot serialize object reference of type {type(value)}")
+
+
+@dataclass
+class Source:
+ """The source object and attribute containing known values.
+
+ Used to specify where input data comes from for geostatistical operations.
+ Can be initialized directly, or more commonly from a typed object's attribute.
+
+ Example:
+ >>> # From a typed object attribute (preferred):
+ >>> source = pointset.attributes["grade"]
+ >>>
+ >>> # Or explicitly:
+ >>> source = Source(object=pointset, attribute="grade")
+ """
+
+ object: GeoscienceObjectReference
+ """Reference to the source geoscience object."""
+
+ attribute: str
+ """Name of the attribute on the source object."""
+
+ def __init__(self, object: GeoscienceObjectReference, attribute: str):
+ self.object = object
+ self.attribute = attribute
+
+ def to_dict(self) -> dict[str, Any]:
+ """Serialize to dictionary."""
+ return {
+ "object": serialize_object_reference(self.object),
+ "attribute": self.attribute,
+ }
+
+
+@dataclass
+class CreateAttribute:
+ """Specification for creating a new attribute on a target object."""
+
+ name: str
+ """The name of the attribute to create."""
+
+ def __init__(self, name: str):
+ self.name = name
+
+ def to_dict(self) -> dict[str, Any]:
+ """Serialize to dictionary."""
+ return {
+ "operation": "create",
+ "name": self.name,
+ }
+
+
+@dataclass
+class UpdateAttribute:
+ """Specification for updating an existing attribute on a target object."""
+
+ reference: str
+ """Reference to an existing attribute to update."""
+
+ def __init__(self, reference: str):
+ self.reference = reference
+
+ def to_dict(self) -> dict[str, Any]:
+ """Serialize to dictionary."""
+ return {
+ "operation": "update",
+ "reference": self.reference,
+ }
+
+
+@dataclass
+class Target:
+ """The target object and attribute to create or update with results.
+
+ Used to specify where output data should be written for geostatistical operations.
+
+ Example:
+ >>> # Create a new attribute on a target object:
+ >>> target = Target.new_attribute(block_model, "kriged_grade")
+ >>>
+ >>> # Or update an existing attribute:
+ >>> target = Target(object=grid, attribute=UpdateAttribute("existing_ref"))
+ """
+
+ object: GeoscienceObjectReference
+ """Object to write results onto."""
+
+ attribute: CreateAttribute | UpdateAttribute
+ """Attribute specification (create new or update existing)."""
+
+ def __init__(self, object: GeoscienceObjectReference, attribute: CreateAttribute | UpdateAttribute):
+ self.object = object
+ self.attribute = attribute
+
+ @classmethod
+ def new_attribute(cls, object: GeoscienceObjectReference, attribute_name: str) -> Target:
+ """
+ Create a Target that will create a new attribute on the target object.
+
+ Args:
+ object: The target object to write results onto.
+ attribute_name: The name of the new attribute to create.
+
+ Returns:
+ A Target instance configured to create a new attribute.
+
+ Example:
+ >>> target = Target.new_attribute(block_model, "kriged_grade")
+ """
+ return cls(object=object, attribute=CreateAttribute(name=attribute_name))
+
+ def to_dict(self) -> dict[str, Any]:
+ """Serialize to dictionary."""
+ if hasattr(self.attribute, "to_dict"):
+ attribute_value = self.attribute.to_dict()
+ elif isinstance(self.attribute, dict):
+ attribute_value = self.attribute
+ else:
+ attribute_value = self.attribute
+
+ return {
+ "object": serialize_object_reference(self.object),
+ "attribute": attribute_value,
+ }
+
+
+# =============================================================================
+# Typed attribute → Source / Target conversion
+# =============================================================================
+
+
+def source_from_attribute(attr: Attribute) -> Source:
+ """Convert a typed ``Attribute`` to a :class:`Source`.
+
+ Only ``Attribute`` (an existing attribute on a DownloadedObject) can be used
+ as a source, since source data must already exist.
+
+ Args:
+ attr: An existing ``Attribute`` from a DownloadedObject.
+
+ Returns:
+ A :class:`Source` referencing the parent object and attribute expression.
+
+ Raises:
+ TypeError: If *attr* is not an ``Attribute`` instance.
+ """
+ if not isinstance(attr, Attribute):
+ raise TypeError(f"Only Attribute (from a DownloadedObject) can be used as a source, got {type(attr).__name__}")
+
+ return Source(
+ object=str(attr._obj.metadata.url),
+ attribute=get_attribute_expression(attr),
+ )
+
+
+def target_from_attribute(
+ attr: Attribute | PendingAttribute | BlockModelAttribute | BlockModelPendingAttribute,
+) -> Target:
+ """Convert a typed attribute object to a :class:`Target`.
+
+ Handles ``Attribute``, ``PendingAttribute``, ``BlockModelAttribute``, and
+ ``BlockModelPendingAttribute`` from ``evo.objects.typed.attributes``.
+
+ For existing attributes, returns an update operation referencing the attribute.
+ For pending attributes, returns a create operation with the attribute name.
+
+ Args:
+ attr: A typed attribute object. Must have a non-``None`` ``_obj``
+ reference to its parent object.
+
+ Returns:
+ A :class:`Target` configured based on the attribute.
+
+ Raises:
+ TypeError: If *attr* is not a recognised typed attribute, or if it has
+ no ``_obj`` reference to its parent object.
+ """
+ if not is_typed_attribute(attr):
+ raise TypeError(
+ f"Cannot convert {type(attr).__name__} to a Target. "
+ "Expected Attribute, PendingAttribute, BlockModelAttribute, or BlockModelPendingAttribute."
+ )
+
+ if attr._obj is None:
+ raise TypeError(
+ f"Cannot determine target object from attribute type {type(attr).__name__}. "
+ "Attribute must have an _obj reference to its parent object."
+ )
+
+ if attr.exists:
+ attr_spec: CreateAttribute | UpdateAttribute = UpdateAttribute(
+ reference=get_attribute_expression(attr),
+ )
+ else:
+ attr_spec = CreateAttribute(name=attr.name)
+
+ return Target(object=attr._obj, attribute=attr_spec)
diff --git a/packages/evo-compute/src/evo/compute/tasks/kriging.py b/packages/evo-compute/src/evo/compute/tasks/kriging.py
new file mode 100644
index 00000000..0c0dea16
--- /dev/null
+++ b/packages/evo-compute/src/evo/compute/tasks/kriging.py
@@ -0,0 +1,491 @@
+# Copyright © 2025 Bentley Systems, Incorporated
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Kriging compute task client.
+
+This module provides typed dataclass models and convenience functions for running
+the Kriging task (geostatistics/kriging).
+
+Example:
+ >>> from evo.compute.tasks import run, SearchNeighborhood, Ellipsoid, EllipsoidRanges
+ >>> from evo.compute.tasks.kriging import KrigingParameters
+ >>>
+ >>> params = KrigingParameters(
+ ... source=pointset.attributes["grade"],
+ ... target=Target.new_attribute(block_model, "kriged_grade"),
+ ... variogram=variogram,
+ ... search=SearchNeighborhood(
+ ... ellipsoid=Ellipsoid(ranges=EllipsoidRanges(200, 150, 100)),
+ ... max_samples=20,
+ ... ),
+ ... )
+ >>> result = await run(manager, params, preview=True)
+"""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Any
+
+from evo.common import IContext
+from evo.common.interfaces import IFeedback
+from evo.common.utils import NoFeedback, Retry
+from evo.objects.typed.attributes import Attribute
+
+from ..client import JobClient
+
+# Import shared components
+from .common import (
+ GeoscienceObjectReference,
+ SearchNeighborhood,
+ Source,
+ Target,
+ get_attribute_expression,
+ is_typed_attribute,
+ serialize_object_reference,
+ source_from_attribute,
+ target_from_attribute,
+)
+from .common.results import TaskAttribute, TaskResult, TaskResults, TaskTarget, parse_task_target
+from .common.runner import register_task_runner
+
+__all__ = [
+ # Kriging-specific (users import from evo.compute.tasks.kriging)
+ "BlockDiscretisation",
+ "KrigingMethod",
+ "KrigingParameters",
+ "KrigingResult",
+ "OrdinaryKriging",
+ "RegionFilter",
+ "SimpleKriging",
+ # Re-exported from common for backwards compatibility
+ "TaskResult",
+ "TaskResults",
+]
+
+
+# Backwards-compatible aliases for the renamed internal dataclasses.
+_KrigingAttribute = TaskAttribute
+_KrigingTarget = TaskTarget
+
+
+# =============================================================================
+# Kriging Method Types
+# =============================================================================
+
+
+@dataclass
+class SimpleKriging:
+ """Simple kriging method with a known constant mean.
+
+ Use when the mean of the variable is known and constant across the domain.
+
+ Example:
+ >>> method = SimpleKriging(mean=100.0)
+ """
+
+ mean: float
+ """The mean value, assumed to be constant across the domain."""
+
+ def __init__(self, mean: float):
+ self.mean = mean
+
+ def to_dict(self) -> dict[str, Any]:
+ """Serialize to dictionary."""
+ return {
+ "type": "simple",
+ "mean": self.mean,
+ }
+
+
+@dataclass
+class OrdinaryKriging:
+ """Ordinary kriging method with unknown local mean.
+
+ The most common kriging method. Estimates the local mean from nearby samples.
+ This is the default kriging method if none is specified.
+ """
+
+ def to_dict(self) -> dict[str, Any]:
+ """Serialize to dictionary."""
+ return {
+ "type": "ordinary",
+ }
+
+
+class KrigingMethod:
+ """Factory for kriging methods.
+
+ Provides convenient access to kriging method types.
+
+ Example:
+ >>> # Use ordinary kriging (most common)
+ >>> method = KrigingMethod.ORDINARY
+ >>>
+ >>> # Use simple kriging with known mean
+ >>> method = KrigingMethod.simple(mean=100.0)
+ """
+
+ ORDINARY: OrdinaryKriging = OrdinaryKriging()
+ """Ordinary kriging - estimates local mean from nearby samples."""
+
+ @staticmethod
+ def simple(mean: float) -> SimpleKriging:
+ """Create a simple kriging method with the given mean.
+
+ Args:
+ mean: The known constant mean value across the domain.
+
+ Returns:
+ SimpleKriging instance configured with the given mean.
+ """
+ return SimpleKriging(mean)
+
+
+# =============================================================================
+# Block Discretisation
+# =============================================================================
+
+
+@dataclass
+class BlockDiscretisation:
+ """Sub-block discretisation for block kriging.
+
+ When provided, each target block is subdivided into ``nx * ny * nz``
+ sub-cells and the kriged value is averaged across these sub-cells.
+ When omitted (``None``), point kriging is performed.
+
+ Only applicable when the target is a 3D grid or block model.
+
+ Each dimension must be an integer between 1 and 9 (inclusive).
+ The default value of 1 in every direction is equivalent to point kriging.
+
+ Example:
+ >>> discretisation = BlockDiscretisation(nx=3, ny=3, nz=2)
+ """
+
+ nx: int
+ """Number of subdivisions in the x direction (1–9)."""
+
+ ny: int
+ """Number of subdivisions in the y direction (1–9)."""
+
+ nz: int
+ """Number of subdivisions in the z direction (1–9)."""
+
+ def __init__(self, nx: int = 1, ny: int = 1, nz: int = 1):
+ for name, value in [("nx", nx), ("ny", ny), ("nz", nz)]:
+ if not isinstance(value, int):
+ raise TypeError(f"{name} must be an integer, got {type(value).__name__}")
+ if value < 1 or value > 9:
+ raise ValueError(f"{name} must be between 1 and 9, got {value}")
+ self.nx = nx
+ self.ny = ny
+ self.nz = nz
+
+ def to_dict(self) -> dict[str, Any]:
+ """Serialize to dictionary."""
+ return {
+ "nx": self.nx,
+ "ny": self.ny,
+ "nz": self.nz,
+ }
+
+
+# =============================================================================
+# Region Filter
+# =============================================================================
+
+
+@dataclass
+class RegionFilter:
+ """Region filter for restricting kriging to specific categories on the target.
+
+ Use either `names` OR `values`, not both:
+ - `names`: Category names (strings) - used for CategoryAttribute with string lookup
+ - `values`: Integer values - used for integer-indexed categories or BlockModel integer columns
+
+ Example:
+ >>> # Filter by category names (string lookup)
+ >>> filter_by_name = RegionFilter(
+ ... attribute=block_model.attributes["domain"],
+ ... names=["LMS1", "LMS2"],
+ ... )
+ >>>
+ >>> # Filter by integer values (direct index matching)
+ >>> filter_by_value = RegionFilter(
+ ... attribute=block_model.attributes["domain"],
+ ... values=[1, 2, 3],
+ ... )
+ """
+
+ attribute: Any
+ """The category attribute to filter on (from target object)."""
+
+ names: list[str] | None = None
+ """Category names to include (mutually exclusive with values)."""
+
+ values: list[int] | None = None
+ """Integer category keys to include (mutually exclusive with names)."""
+
+ def __init__(
+ self,
+ attribute: Any,
+ names: list[str] | None = None,
+ values: list[int] | None = None,
+ ):
+ if names is not None and values is not None:
+ raise ValueError("Only one of 'names' or 'values' may be provided, not both.")
+ if names is None and values is None:
+ raise ValueError("One of 'names' or 'values' must be provided.")
+
+ self.attribute = attribute
+ self.names = names
+ self.values = values
+
+ def to_dict(self) -> dict[str, Any]:
+ """Serialize to dictionary for the compute task API."""
+ if is_typed_attribute(self.attribute):
+ attribute_expr = get_attribute_expression(self.attribute)
+ elif isinstance(self.attribute, str):
+ attribute_expr = self.attribute
+ else:
+ raise TypeError(f"Cannot serialize region filter attribute of type {type(self.attribute)}")
+
+ result: dict[str, Any] = {"attribute": attribute_expr}
+
+ if self.names is not None:
+ result["names"] = self.names
+ if self.values is not None:
+ result["values"] = self.values
+
+ return result
+
+
+# =============================================================================
+# Kriging Parameters
+# =============================================================================
+
+
+@dataclass
+class KrigingParameters:
+ """Parameters for the kriging task.
+
+ Defines all inputs needed to run a kriging interpolation task.
+
+ Example:
+ >>> from evo.compute.tasks import run, SearchNeighborhood, Ellipsoid, EllipsoidRanges
+ >>> from evo.compute.tasks.kriging import KrigingParameters, RegionFilter
+ >>>
+ >>> params = KrigingParameters(
+ ... source=pointset.attributes["grade"], # Source attribute
+ ... target=block_model.attributes["kriged_grade"], # Target attribute (creates if doesn't exist)
+ ... variogram=variogram, # Variogram model
+ ... search=SearchNeighborhood(
+ ... ellipsoid=Ellipsoid(ranges=EllipsoidRanges(200, 150, 100)),
+ ... max_samples=20,
+ ... ),
+ ... # method defaults to ordinary kriging
+ ... )
+ >>>
+ >>> # With region filter to restrict kriging to specific categories on target:
+ >>> params_filtered = KrigingParameters(
+ ... source=pointset.attributes["grade"],
+ ... target=block_model.attributes["kriged_grade"],
+ ... variogram=variogram,
+ ... search=SearchNeighborhood(...),
+ ... target_region_filter=RegionFilter(
+ ... attribute=block_model.attributes["domain"],
+ ... names=["LMS1", "LMS2"],
+ ... ),
+ ... )
+ """
+
+ source: Source
+ """The source object and attribute containing known values."""
+
+ target: Target
+ """The target object and attribute to create or update with kriging results."""
+
+ variogram: GeoscienceObjectReference
+ """Model of the covariance within the domain (Variogram object or reference)."""
+
+ search: SearchNeighborhood
+ """Search neighborhood parameters."""
+
+ method: SimpleKriging | OrdinaryKriging | None = None
+ """The kriging method to use. Defaults to ordinary kriging if not specified."""
+
+ target_region_filter: RegionFilter | None = None
+ """Optional region filter to restrict kriging to specific categories on the target object."""
+
+ block_discretisation: BlockDiscretisation | None = None
+ """Optional sub-block discretisation for block kriging.
+
+ When provided, each target block is subdivided into nx × ny × nz sub-cells
+ and the kriged value is averaged across these sub-cells. When omitted,
+ point kriging is performed. Only applicable when the target is a 3D grid
+ or block model.
+ """
+
+ def __init__(
+ self,
+ source: Source | Any, # Also accepts Attribute from evo.objects.typed
+ target: Target | Any, # Also accepts Attribute/PendingAttribute from evo.objects.typed
+ variogram: GeoscienceObjectReference,
+ search: SearchNeighborhood,
+ method: SimpleKriging | OrdinaryKriging | None = None,
+ target_region_filter: RegionFilter | None = None,
+ block_discretisation: BlockDiscretisation | None = None,
+ ):
+ # Handle Attribute types from evo.objects.typed.attributes
+ if isinstance(source, Attribute):
+ source = source_from_attribute(source)
+
+ # Handle target attribute types (Attribute, PendingAttribute, BlockModelAttribute, BlockModelPendingAttribute)
+ if is_typed_attribute(target):
+ target = target_from_attribute(target)
+
+ self.source = source
+ self.target = target
+ self.variogram = variogram
+ self.search = search
+ self.method = method or OrdinaryKriging()
+ self.target_region_filter = target_region_filter
+ self.block_discretisation = block_discretisation
+
+ def to_dict(self) -> dict[str, Any]:
+ """Serialize to dictionary."""
+ target_dict = self.target.to_dict()
+
+ # Add region filter to target if provided
+ if self.target_region_filter is not None:
+ target_dict["region_filter"] = self.target_region_filter.to_dict()
+
+ result = {
+ "source": self.source.to_dict(),
+ "target": target_dict,
+ "variogram": serialize_object_reference(self.variogram),
+ "neighborhood": self.search.to_dict(),
+ "kriging_method": self.method.to_dict(),
+ }
+
+ # Add block discretisation if provided (omit for point kriging)
+ if self.block_discretisation is not None:
+ result["block_discretisation"] = self.block_discretisation.to_dict()
+
+ return result
+
+
+# =============================================================================
+# Kriging Result Types
+# =============================================================================
+
+
+class KrigingResult(TaskResult):
+ """Result of a kriging task.
+
+ Contains information about the completed kriging operation and provides
+ convenient methods to access the target object and its data.
+
+ Example:
+ >>> result = await run(manager, params)
+ >>> result # Pretty-prints the result
+ >>>
+ >>> # Get data directly as DataFrame (simplest approach)
+ >>> df = await result.to_dataframe()
+ >>>
+ >>> # Or load the target object for more control
+ >>> target = await result.get_target_object()
+ """
+
+ def __init__(self, message: str, target: TaskTarget):
+ """Initialize a KrigingResult.
+
+ Args:
+ message: A message describing what happened in the task.
+ target: The target information from the kriging result.
+ """
+ super().__init__(message=message, target=target)
+
+ def _get_result_type_name(self) -> str:
+ """Get the display name for this result type."""
+ return "Kriging"
+
+
+# =============================================================================
+# Run Functions
+# =============================================================================
+
+
+def _parse_kriging_result(data: dict[str, Any]) -> KrigingResult:
+ """Parse the kriging result from the API response."""
+ target = parse_task_target(data)
+ return KrigingResult(message=data["message"], target=target)
+
+
+async def _run_single_kriging(
+ context: IContext,
+ parameters: KrigingParameters,
+ *,
+ preview: bool = False,
+ polling_interval_seconds: float = 0.5,
+ retry: Retry | None = None,
+ fb: IFeedback = NoFeedback,
+) -> KrigingResult:
+ """Internal function to run a single kriging task."""
+ connector = context.get_connector()
+ org_id = context.get_org_id()
+
+ params_dict = parameters.to_dict()
+
+ # Submit the job
+ job = await JobClient.submit(
+ connector=connector,
+ org_id=org_id,
+ topic="geostatistics",
+ task="kriging",
+ parameters=params_dict,
+ result_type=dict, # Get raw dict, we'll parse it ourselves
+ preview=preview,
+ )
+
+ # Wait for results
+ raw_result = await job.wait_for_results(
+ polling_interval_seconds=polling_interval_seconds,
+ retry=retry,
+ fb=fb,
+ )
+
+ # Parse and return
+ result = _parse_kriging_result(raw_result)
+ result._context = context
+ return result
+
+
+async def _run_kriging_for_registry(
+ context: IContext,
+ parameters: KrigingParameters,
+ *,
+ preview: bool = False,
+) -> KrigingResult:
+ """Simplified runner function for task registry (no extra options).
+
+ This is the function registered with the TaskRegistry. For more control
+ over polling and retry behavior, use the full `run()` function.
+ """
+ return await _run_single_kriging(context, parameters, preview=preview)
+
+
+# Register kriging task runner with the task registry
+
+register_task_runner(KrigingParameters, _run_kriging_for_registry)
diff --git a/packages/evo-compute/tests/test_kriging_tasks.py b/packages/evo-compute/tests/test_kriging_tasks.py
new file mode 100644
index 00000000..172dcb89
--- /dev/null
+++ b/packages/evo-compute/tests/test_kriging_tasks.py
@@ -0,0 +1,778 @@
+# Copyright © 2025 Bentley Systems, Incorporated
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests for kriging task parameter handling."""
+
+from unittest import TestCase
+from unittest.mock import MagicMock
+
+from evo.objects.typed.attributes import (
+ Attribute,
+ BlockModelAttribute,
+ BlockModelPendingAttribute,
+ PendingAttribute,
+)
+
+from evo.compute.tasks import (
+ BlockDiscretisation,
+ CreateAttribute,
+ RegionFilter,
+ SearchNeighborhood,
+ Source,
+ Target,
+ UpdateAttribute,
+)
+from evo.compute.tasks.common import (
+ Ellipsoid,
+ EllipsoidRanges,
+ get_attribute_expression,
+ is_typed_attribute,
+ source_from_attribute,
+ target_from_attribute,
+)
+from evo.compute.tasks.kriging import KrigingParameters
+
+
+def _create_mock_source_attribute(name: str, key: str, object_url: str, schema_path: str = "") -> MagicMock:
+ """Create a mock Attribute (existing) that passes isinstance checks.
+
+ Uses ``spec=Attribute`` so ``isinstance(mock, Attribute)`` returns True.
+ Sets the underlying properties that the adapter functions inspect.
+ """
+ attr = MagicMock(spec=Attribute)
+ attr.name = name
+ attr.key = key
+ attr.exists = True
+
+ # ModelContext-like _context
+ mock_context = MagicMock()
+ mock_context.schema_path = schema_path
+ attr._context = mock_context
+
+ # Parent object
+ mock_obj = MagicMock()
+ mock_obj.metadata.url = object_url
+ attr._obj = mock_obj
+
+ return attr
+
+
+def _create_pending_attribute(name: str, parent_obj: MagicMock | None = None) -> PendingAttribute:
+ """Create a real PendingAttribute with an optional mock parent."""
+ mock_parent = MagicMock()
+ mock_parent._obj = parent_obj
+ return PendingAttribute(mock_parent, name)
+
+
+class TestAttributeAdapters(TestCase):
+ """Tests for the attribute adapter functions in source_target."""
+
+ # ---- is_typed_attribute ----
+
+ def test_is_typed_attribute_with_attribute(self):
+ attr = _create_mock_source_attribute("grade", "abc-key", "https://example.com/obj")
+ self.assertTrue(is_typed_attribute(attr))
+
+ def test_is_typed_attribute_with_pending_attribute(self):
+ pending = _create_pending_attribute("new_attr")
+ self.assertTrue(is_typed_attribute(pending))
+
+ def test_is_typed_attribute_with_block_model_attribute(self):
+ bm_attr = BlockModelAttribute(name="grade", attribute_type="Float64")
+ self.assertTrue(is_typed_attribute(bm_attr))
+
+ def test_is_typed_attribute_with_block_model_pending_attribute(self):
+ bm_pending = BlockModelPendingAttribute(obj=None, name="new_col")
+ self.assertTrue(is_typed_attribute(bm_pending))
+
+ def test_is_typed_attribute_with_string(self):
+ self.assertFalse(is_typed_attribute("some_string"))
+
+ def test_is_typed_attribute_with_source(self):
+ source = Source(object="https://example.com/obj", attribute="grade")
+ self.assertFalse(is_typed_attribute(source))
+
+ # ---- get_attribute_expression ----
+
+ def test_expression_for_attribute_with_schema_path(self):
+ attr = _create_mock_source_attribute(
+ "grade", "abc-key", "https://example.com/obj", schema_path="locations.attributes"
+ )
+ result = get_attribute_expression(attr)
+ self.assertEqual(result, "locations.attributes[?key=='abc-key']")
+
+ def test_expression_for_attribute_without_schema_path(self):
+ attr = _create_mock_source_attribute("grade", "abc-key", "https://example.com/obj", schema_path="")
+ result = get_attribute_expression(attr)
+ self.assertEqual(result, "attributes[?key=='abc-key']")
+
+ def test_expression_for_pending_attribute(self):
+ pending = _create_pending_attribute("my_attribute")
+ result = get_attribute_expression(pending)
+ self.assertEqual(result, "attributes[?name=='my_attribute']")
+
+ def test_expression_for_block_model_attribute(self):
+ bm_attr = BlockModelAttribute(name="grade", attribute_type="Float64")
+ result = get_attribute_expression(bm_attr)
+ self.assertEqual(result, "attributes[?name=='grade']")
+
+ def test_expression_for_block_model_pending_attribute(self):
+ bm_pending = BlockModelPendingAttribute(obj=None, name="new_col")
+ result = get_attribute_expression(bm_pending)
+ self.assertEqual(result, "attributes[?name=='new_col']")
+
+ def test_expression_raises_for_invalid_type(self):
+ with self.assertRaises(TypeError):
+ get_attribute_expression("not_an_attribute")
+
+ # ---- source_from_attribute ----
+
+ def test_source_from_existing_attribute(self):
+ attr = _create_mock_source_attribute(
+ "grade", "abc-key", "https://example.com/pointset", schema_path="locations.attributes"
+ )
+ result = source_from_attribute(attr)
+ self.assertIsInstance(result, Source)
+ result_dict = result.to_dict()
+ self.assertEqual(result_dict["object"], "https://example.com/pointset")
+ self.assertEqual(result_dict["attribute"], "locations.attributes[?key=='abc-key']")
+
+ def test_source_from_attribute_without_schema_path(self):
+ attr = _create_mock_source_attribute("grade", "abc-key", "https://example.com/pointset", schema_path="")
+ result = source_from_attribute(attr)
+ result_dict = result.to_dict()
+ self.assertEqual(result_dict["object"], "https://example.com/pointset")
+ self.assertEqual(result_dict["attribute"], "attributes[?key=='abc-key']")
+
+ def test_source_from_attribute_raises_for_pending(self):
+ pending = _create_pending_attribute("new_attr")
+ with self.assertRaises(TypeError):
+ source_from_attribute(pending)
+
+ def test_source_from_attribute_raises_for_block_model_attribute(self):
+ bm_attr = BlockModelAttribute(name="grade", attribute_type="Float64")
+ with self.assertRaises(TypeError):
+ source_from_attribute(bm_attr)
+
+ def test_source_from_attribute_raises_for_string(self):
+ with self.assertRaises(TypeError):
+ source_from_attribute("not_an_attribute")
+
+ # ---- target_from_attribute ----
+
+ def test_target_from_existing_attribute(self):
+ attr = _create_mock_source_attribute(
+ "grade", "abc-key", "https://example.com/obj", schema_path="locations.attributes"
+ )
+ result = target_from_attribute(attr)
+ self.assertIsInstance(result, Target)
+ result_dict = result.to_dict()
+ self.assertEqual(result_dict["attribute"]["operation"], "update")
+ self.assertEqual(result_dict["attribute"]["reference"], "locations.attributes[?key=='abc-key']")
+
+ def test_target_from_pending_attribute(self):
+ mock_obj = MagicMock()
+ mock_obj.metadata.url = "https://example.com/grid"
+ pending = _create_pending_attribute("new_column", parent_obj=mock_obj)
+ result = target_from_attribute(pending)
+ self.assertIsInstance(result, Target)
+ result_dict = result.to_dict()
+ self.assertEqual(result_dict["attribute"]["operation"], "create")
+ self.assertEqual(result_dict["attribute"]["name"], "new_column")
+
+ def test_target_from_block_model_existing_attribute(self):
+ mock_bm = MagicMock()
+ mock_bm.metadata.url = "https://example.com/blockmodel"
+ bm_attr = BlockModelAttribute(name="grade", attribute_type="Float64", obj=mock_bm)
+ result = target_from_attribute(bm_attr)
+ self.assertIsInstance(result, Target)
+ result_dict = result.to_dict()
+ self.assertEqual(result_dict["attribute"]["operation"], "update")
+ self.assertEqual(result_dict["attribute"]["reference"], "attributes[?name=='grade']")
+
+ def test_target_from_block_model_pending_attribute(self):
+ mock_bm = MagicMock()
+ mock_bm.metadata.url = "https://example.com/blockmodel"
+ bm_pending = BlockModelPendingAttribute(obj=mock_bm, name="new_col")
+ result = target_from_attribute(bm_pending)
+ self.assertIsInstance(result, Target)
+ result_dict = result.to_dict()
+ self.assertEqual(result_dict["attribute"]["operation"], "create")
+ self.assertEqual(result_dict["attribute"]["name"], "new_col")
+
+ def test_target_from_attribute_raises_for_invalid_type(self):
+ with self.assertRaises(TypeError):
+ target_from_attribute("not_an_attribute")
+
+ def test_target_from_attribute_raises_for_none_obj(self):
+ bm_pending = BlockModelPendingAttribute(obj=None, name="new_col")
+ with self.assertRaises(TypeError):
+ target_from_attribute(bm_pending)
+
+
+class TestKrigingParametersWithAttributes(TestCase):
+ """Tests for KrigingParameters handling of typed attribute objects."""
+
+ def test_kriging_params_with_pending_attribute_target(self):
+ """Test KrigingParameters accepts PendingAttribute as target."""
+ source = Source(object="https://example.com/pointset", attribute="locations.attributes[?name=='grade']")
+
+ mock_obj = MagicMock()
+ mock_obj.metadata.url = "https://example.com/grid"
+ target_attr = _create_pending_attribute("kriged_grade", parent_obj=mock_obj)
+
+ variogram = "https://example.com/variogram"
+ search = SearchNeighborhood(
+ ellipsoid=Ellipsoid(ranges=EllipsoidRanges(100, 100, 50)),
+ max_samples=20,
+ )
+
+ params = KrigingParameters(
+ source=source,
+ target=target_attr,
+ variogram=variogram,
+ search=search,
+ )
+
+ params_dict = params.to_dict()
+ self.assertEqual(params_dict["target"]["object"], "https://example.com/grid")
+ self.assertEqual(params_dict["target"]["attribute"]["operation"], "create")
+ self.assertEqual(params_dict["target"]["attribute"]["name"], "kriged_grade")
+
+ def test_kriging_params_with_existing_attribute_target(self):
+ """Test KrigingParameters accepts existing Attribute as target."""
+ source = Source(object="https://example.com/pointset", attribute="locations.attributes[?name=='grade']")
+ target_attr = _create_mock_source_attribute(
+ name="existing_attr",
+ key="exist-key",
+ object_url="https://example.com/grid",
+ schema_path="locations.attributes",
+ )
+
+ variogram = "https://example.com/variogram"
+ search = SearchNeighborhood(
+ ellipsoid=Ellipsoid(ranges=EllipsoidRanges(100, 100, 50)),
+ max_samples=20,
+ )
+
+ params = KrigingParameters(
+ source=source,
+ target=target_attr,
+ variogram=variogram,
+ search=search,
+ )
+
+ params_dict = params.to_dict()
+ self.assertEqual(params_dict["target"]["object"], "https://example.com/grid")
+ self.assertEqual(params_dict["target"]["attribute"]["operation"], "update")
+ self.assertIn("reference", params_dict["target"]["attribute"])
+
+ def test_kriging_params_with_block_model_pending_attribute(self):
+ """Test KrigingParameters accepts BlockModelPendingAttribute as target."""
+ source = Source(object="https://example.com/pointset", attribute="locations.attributes[?name=='grade']")
+
+ mock_bm = MagicMock()
+ mock_bm.metadata.url = "https://example.com/blockmodel"
+ target_attr = BlockModelPendingAttribute(obj=mock_bm, name="new_bm_attr")
+
+ variogram = "https://example.com/variogram"
+ search = SearchNeighborhood(
+ ellipsoid=Ellipsoid(ranges=EllipsoidRanges(100, 100, 50)),
+ max_samples=20,
+ )
+
+ params = KrigingParameters(
+ source=source,
+ target=target_attr,
+ variogram=variogram,
+ search=search,
+ )
+
+ params_dict = params.to_dict()
+ self.assertEqual(params_dict["target"]["object"], "https://example.com/blockmodel")
+ self.assertEqual(params_dict["target"]["attribute"]["operation"], "create")
+ self.assertEqual(params_dict["target"]["attribute"]["name"], "new_bm_attr")
+
+ def test_kriging_params_with_block_model_existing_attribute(self):
+ """Test KrigingParameters accepts existing BlockModelAttribute as target."""
+ source = Source(object="https://example.com/pointset", attribute="locations.attributes[?name=='grade']")
+
+ mock_bm = MagicMock()
+ mock_bm.metadata.url = "https://example.com/blockmodel"
+ target_attr = BlockModelAttribute(
+ name="existing_bm_attr",
+ attribute_type="Float64",
+ obj=mock_bm,
+ )
+
+ variogram = "https://example.com/variogram"
+ search = SearchNeighborhood(
+ ellipsoid=Ellipsoid(ranges=EllipsoidRanges(100, 100, 50)),
+ max_samples=20,
+ )
+
+ params = KrigingParameters(
+ source=source,
+ target=target_attr,
+ variogram=variogram,
+ search=search,
+ )
+
+ params_dict = params.to_dict()
+ self.assertEqual(params_dict["target"]["object"], "https://example.com/blockmodel")
+ self.assertEqual(params_dict["target"]["attribute"]["operation"], "update")
+ self.assertIn("reference", params_dict["target"]["attribute"])
+
+ def test_kriging_params_with_explicit_target(self):
+ """Test KrigingParameters still works with explicit Target object."""
+ source = Source(object="https://example.com/pointset", attribute="locations.attributes[?name=='grade']")
+ target = Target.new_attribute("https://example.com/grid", "kriged_grade")
+ variogram = "https://example.com/variogram"
+ search = SearchNeighborhood(
+ ellipsoid=Ellipsoid(ranges=EllipsoidRanges(100, 100, 50)),
+ max_samples=20,
+ )
+
+ params = KrigingParameters(
+ source=source,
+ target=target,
+ variogram=variogram,
+ search=search,
+ )
+
+ params_dict = params.to_dict()
+ self.assertEqual(params_dict["target"]["object"], "https://example.com/grid")
+ self.assertEqual(params_dict["target"]["attribute"]["operation"], "create")
+ self.assertEqual(params_dict["target"]["attribute"]["name"], "kriged_grade")
+
+ def test_kriging_params_source_attribute_conversion(self):
+ """Test KrigingParameters converts source Attribute correctly."""
+ source_attr = _create_mock_source_attribute(
+ name="grade",
+ key="grade-key",
+ object_url="https://example.com/pointset",
+ schema_path="locations.attributes",
+ )
+
+ target = Target.new_attribute("https://example.com/grid", "kriged_grade")
+ variogram = "https://example.com/variogram"
+ search = SearchNeighborhood(
+ ellipsoid=Ellipsoid(ranges=EllipsoidRanges(100, 100, 50)),
+ max_samples=20,
+ )
+
+ params = KrigingParameters(
+ source=source_attr,
+ target=target,
+ variogram=variogram,
+ search=search,
+ )
+
+ params_dict = params.to_dict()
+ self.assertEqual(params_dict["source"]["object"], "https://example.com/pointset")
+ self.assertEqual(params_dict["source"]["attribute"], "locations.attributes[?key=='grade-key']")
+
+
+class TestTargetSerialization(TestCase):
+ """Tests for Target serialization with different attribute types."""
+
+ def test_target_with_create_attribute(self):
+ """Test Target serializes CreateAttribute correctly."""
+ target = Target(
+ object="https://example.com/grid",
+ attribute=CreateAttribute(name="new_attr"),
+ )
+
+ result = target.to_dict()
+
+ self.assertEqual(result["object"], "https://example.com/grid")
+ self.assertEqual(result["attribute"]["operation"], "create")
+ self.assertEqual(result["attribute"]["name"], "new_attr")
+
+ def test_target_with_update_attribute(self):
+ """Test Target serializes UpdateAttribute correctly."""
+ target = Target(
+ object="https://example.com/grid",
+ attribute=UpdateAttribute(reference="cell_attributes[?name=='existing']"),
+ )
+
+ result = target.to_dict()
+
+ self.assertEqual(result["object"], "https://example.com/grid")
+ self.assertEqual(result["attribute"]["operation"], "update")
+ self.assertEqual(result["attribute"]["reference"], "cell_attributes[?name=='existing']")
+
+ def test_target_with_dict_attribute(self):
+ """Test Target serializes dict attribute correctly."""
+ target = Target(
+ object="https://example.com/grid",
+ attribute={"operation": "create", "name": "dict_attr"},
+ )
+
+ result = target.to_dict()
+
+ self.assertEqual(result["object"], "https://example.com/grid")
+ self.assertEqual(result["attribute"]["operation"], "create")
+ self.assertEqual(result["attribute"]["name"], "dict_attr")
+
+ def test_target_new_attribute_factory(self):
+ """Test Target.new_attribute factory method."""
+ target = Target.new_attribute("https://example.com/grid", "new_attr")
+
+ result = target.to_dict()
+
+ self.assertEqual(result["object"], "https://example.com/grid")
+ self.assertEqual(result["attribute"]["operation"], "create")
+ self.assertEqual(result["attribute"]["name"], "new_attr")
+
+
+class TestRegionFilter(TestCase):
+ """Tests for RegionFilter class."""
+
+ def test_region_filter_with_names(self):
+ """Test RegionFilter with category names."""
+ region_filter = RegionFilter(
+ attribute="domain_attribute",
+ names=["LMS1", "LMS2"],
+ )
+
+ result = region_filter.to_dict()
+
+ self.assertEqual(result["attribute"], "domain_attribute")
+ self.assertEqual(result["names"], ["LMS1", "LMS2"])
+ self.assertNotIn("values", result)
+
+ def test_region_filter_with_values(self):
+ """Test RegionFilter with integer values."""
+ region_filter = RegionFilter(
+ attribute="domain_code_attribute",
+ values=[1, 2, 3],
+ )
+
+ result = region_filter.to_dict()
+
+ self.assertEqual(result["attribute"], "domain_code_attribute")
+ self.assertEqual(result["values"], [1, 2, 3])
+ self.assertNotIn("names", result)
+
+ def test_region_filter_with_block_model_attribute(self):
+ """Test RegionFilter with a real BlockModelAttribute."""
+ bm_attr = BlockModelAttribute(name="domain", attribute_type="category")
+
+ region_filter = RegionFilter(
+ attribute=bm_attr,
+ names=["Zone1"],
+ )
+
+ result = region_filter.to_dict()
+
+ self.assertEqual(result["attribute"], "attributes[?name=='domain']")
+ self.assertEqual(result["names"], ["Zone1"])
+
+ def test_region_filter_with_pointset_attribute(self):
+ """Test RegionFilter with a PointSet Attribute (mock with spec)."""
+ mock_attr = _create_mock_source_attribute(
+ name="domain",
+ key="domain-key",
+ object_url="https://example.com/pointset",
+ schema_path="locations.attributes",
+ )
+
+ region_filter = RegionFilter(
+ attribute=mock_attr,
+ names=["Domain1"],
+ )
+
+ result = region_filter.to_dict()
+
+ self.assertEqual(result["attribute"], "locations.attributes[?key=='domain-key']")
+ self.assertEqual(result["names"], ["Domain1"])
+
+ def test_region_filter_with_pending_attribute(self):
+ """Test RegionFilter with a PendingAttribute."""
+ pending = _create_pending_attribute("domain")
+
+ region_filter = RegionFilter(
+ attribute=pending,
+ names=["Zone1"],
+ )
+
+ result = region_filter.to_dict()
+
+ self.assertEqual(result["attribute"], "attributes[?name=='domain']")
+ self.assertEqual(result["names"], ["Zone1"])
+
+ def test_region_filter_cannot_have_both_names_and_values(self):
+ """Test RegionFilter raises error when both names and values are provided."""
+ with self.assertRaises(ValueError) as context:
+ RegionFilter(
+ attribute="domain_attribute",
+ names=["LMS1"],
+ values=[1],
+ )
+
+ self.assertIn("Only one of 'names' or 'values' may be provided", str(context.exception))
+
+ def test_region_filter_must_have_names_or_values(self):
+ """Test RegionFilter raises error when neither names nor values are provided."""
+ with self.assertRaises(ValueError) as context:
+ RegionFilter(
+ attribute="domain_attribute",
+ )
+
+ self.assertIn("One of 'names' or 'values' must be provided", str(context.exception))
+
+ def test_region_filter_raises_for_unsupported_type(self):
+ """Test RegionFilter raises TypeError for unsupported attribute type."""
+ with self.assertRaises(TypeError):
+ region_filter = RegionFilter(attribute=12345, names=["Zone1"])
+ region_filter.to_dict()
+
+
+class TestKrigingParametersWithRegionFilter(TestCase):
+ """Tests for KrigingParameters with target region filter support."""
+
+ def test_kriging_params_with_target_region_filter_names(self):
+ """Test KrigingParameters with target region filter using category names."""
+ source = Source(object="https://example.com/pointset", attribute="grade")
+ target = Target.new_attribute("https://example.com/grid", "kriged_grade")
+ variogram = "https://example.com/variogram"
+ search = SearchNeighborhood(
+ ellipsoid=Ellipsoid(ranges=EllipsoidRanges(100, 100, 50)),
+ max_samples=20,
+ )
+ region_filter = RegionFilter(
+ attribute="domain_attribute",
+ names=["LMS1", "LMS2"],
+ )
+
+ params = KrigingParameters(
+ source=source,
+ target=target,
+ variogram=variogram,
+ search=search,
+ target_region_filter=region_filter,
+ )
+
+ params_dict = params.to_dict()
+
+ # Verify region filter is in target
+ self.assertIn("region_filter", params_dict["target"])
+ self.assertEqual(params_dict["target"]["region_filter"]["attribute"], "domain_attribute")
+ self.assertEqual(params_dict["target"]["region_filter"]["names"], ["LMS1", "LMS2"])
+
+ def test_kriging_params_with_target_region_filter_values(self):
+ """Test KrigingParameters with target region filter using integer values."""
+ source = Source(object="https://example.com/pointset", attribute="grade")
+ target = Target.new_attribute("https://example.com/grid", "kriged_grade")
+ variogram = "https://example.com/variogram"
+ search = SearchNeighborhood(
+ ellipsoid=Ellipsoid(ranges=EllipsoidRanges(100, 100, 50)),
+ max_samples=20,
+ )
+ region_filter = RegionFilter(
+ attribute="domain_code",
+ values=[1, 2, 3],
+ )
+
+ params = KrigingParameters(
+ source=source,
+ target=target,
+ variogram=variogram,
+ search=search,
+ target_region_filter=region_filter,
+ )
+
+ params_dict = params.to_dict()
+
+ # Verify region filter is in target
+ self.assertIn("region_filter", params_dict["target"])
+ self.assertEqual(params_dict["target"]["region_filter"]["attribute"], "domain_code")
+ self.assertEqual(params_dict["target"]["region_filter"]["values"], [1, 2, 3])
+
+ def test_kriging_params_without_target_region_filter(self):
+ """Test KrigingParameters without target region filter (default behavior)."""
+ source = Source(object="https://example.com/pointset", attribute="grade")
+ target = Target.new_attribute("https://example.com/grid", "kriged_grade")
+ variogram = "https://example.com/variogram"
+ search = SearchNeighborhood(
+ ellipsoid=Ellipsoid(ranges=EllipsoidRanges(100, 100, 50)),
+ max_samples=20,
+ )
+
+ params = KrigingParameters(
+ source=source,
+ target=target,
+ variogram=variogram,
+ search=search,
+ )
+
+ params_dict = params.to_dict()
+
+ # Verify region filter is not present
+ self.assertNotIn("region_filter", params_dict["target"])
+
+
+class TestBlockDiscretisation(TestCase):
+ """Tests for BlockDiscretisation class."""
+
+ def test_default_values(self):
+ """Test BlockDiscretisation defaults to 1x1x1."""
+ bd = BlockDiscretisation()
+
+ self.assertEqual(bd.nx, 1)
+ self.assertEqual(bd.ny, 1)
+ self.assertEqual(bd.nz, 1)
+
+ def test_custom_values(self):
+ """Test BlockDiscretisation with custom values."""
+ bd = BlockDiscretisation(nx=3, ny=4, nz=2)
+
+ self.assertEqual(bd.nx, 3)
+ self.assertEqual(bd.ny, 4)
+ self.assertEqual(bd.nz, 2)
+
+ def test_maximum_values(self):
+ """Test BlockDiscretisation with maximum values (9)."""
+ bd = BlockDiscretisation(nx=9, ny=9, nz=9)
+
+ self.assertEqual(bd.nx, 9)
+ self.assertEqual(bd.ny, 9)
+ self.assertEqual(bd.nz, 9)
+
+ def test_to_dict(self):
+ """Test BlockDiscretisation serializes correctly."""
+ bd = BlockDiscretisation(nx=3, ny=3, nz=2)
+
+ result = bd.to_dict()
+
+ self.assertEqual(result, {"nx": 3, "ny": 3, "nz": 2})
+
+ def test_to_dict_defaults(self):
+ """Test BlockDiscretisation serializes default values."""
+ bd = BlockDiscretisation()
+
+ result = bd.to_dict()
+
+ self.assertEqual(result, {"nx": 1, "ny": 1, "nz": 1})
+
+ def test_validation_nx_too_low(self):
+ """Test BlockDiscretisation rejects nx < 1."""
+ with self.assertRaises(ValueError) as ctx:
+ BlockDiscretisation(nx=0)
+
+ self.assertIn("nx", str(ctx.exception))
+ self.assertIn("between 1 and 9", str(ctx.exception))
+
+ def test_validation_ny_too_high(self):
+ """Test BlockDiscretisation rejects ny > 9."""
+ with self.assertRaises(ValueError) as ctx:
+ BlockDiscretisation(ny=10)
+
+ self.assertIn("ny", str(ctx.exception))
+ self.assertIn("between 1 and 9", str(ctx.exception))
+
+ def test_validation_nz_negative(self):
+ """Test BlockDiscretisation rejects negative nz."""
+ with self.assertRaises(ValueError) as ctx:
+ BlockDiscretisation(nz=-1)
+
+ self.assertIn("nz", str(ctx.exception))
+ self.assertIn("between 1 and 9", str(ctx.exception))
+
+ def test_validation_non_integer_type(self):
+ """Test BlockDiscretisation rejects non-integer types."""
+ with self.assertRaises(TypeError) as ctx:
+ BlockDiscretisation(nx=2.5)
+
+ self.assertIn("nx", str(ctx.exception))
+ self.assertIn("integer", str(ctx.exception))
+
+
+class TestKrigingParametersWithBlockDiscretisation(TestCase):
+ """Tests for KrigingParameters with block_discretisation support."""
+
+ def test_kriging_params_with_block_discretisation(self):
+ """Test KrigingParameters includes block_discretisation in to_dict."""
+ source = Source(object="https://example.com/pointset", attribute="grade")
+ target = Target.new_attribute("https://example.com/grid", "kriged_grade")
+ variogram = "https://example.com/variogram"
+ search = SearchNeighborhood(
+ ellipsoid=Ellipsoid(ranges=EllipsoidRanges(100, 100, 50)),
+ max_samples=20,
+ )
+ bd = BlockDiscretisation(nx=3, ny=3, nz=2)
+
+ params = KrigingParameters(
+ source=source,
+ target=target,
+ variogram=variogram,
+ search=search,
+ block_discretisation=bd,
+ )
+
+ params_dict = params.to_dict()
+
+ self.assertIn("block_discretisation", params_dict)
+ self.assertEqual(params_dict["block_discretisation"], {"nx": 3, "ny": 3, "nz": 2})
+
+ def test_kriging_params_without_block_discretisation(self):
+ """Test KrigingParameters omits block_discretisation when None (default)."""
+ source = Source(object="https://example.com/pointset", attribute="grade")
+ target = Target.new_attribute("https://example.com/grid", "kriged_grade")
+ variogram = "https://example.com/variogram"
+ search = SearchNeighborhood(
+ ellipsoid=Ellipsoid(ranges=EllipsoidRanges(100, 100, 50)),
+ max_samples=20,
+ )
+
+ params = KrigingParameters(
+ source=source,
+ target=target,
+ variogram=variogram,
+ search=search,
+ )
+
+ params_dict = params.to_dict()
+
+ self.assertNotIn("block_discretisation", params_dict)
+
+ def test_kriging_params_block_discretisation_with_region_filter(self):
+ """Test KrigingParameters with both block_discretisation and region filter."""
+ source = Source(object="https://example.com/pointset", attribute="grade")
+ target = Target.new_attribute("https://example.com/grid", "kriged_grade")
+ variogram = "https://example.com/variogram"
+ search = SearchNeighborhood(
+ ellipsoid=Ellipsoid(ranges=EllipsoidRanges(100, 100, 50)),
+ max_samples=20,
+ )
+ bd = BlockDiscretisation(nx=2, ny=2, nz=2)
+ region_filter = RegionFilter(
+ attribute="domain_attribute",
+ names=["LMS1"],
+ )
+
+ params = KrigingParameters(
+ source=source,
+ target=target,
+ variogram=variogram,
+ search=search,
+ block_discretisation=bd,
+ target_region_filter=region_filter,
+ )
+
+ params_dict = params.to_dict()
+
+ # Both should be present
+ self.assertIn("block_discretisation", params_dict)
+ self.assertEqual(params_dict["block_discretisation"], {"nx": 2, "ny": 2, "nz": 2})
+ self.assertIn("region_filter", params_dict["target"])
+ self.assertEqual(params_dict["target"]["region_filter"]["names"], ["LMS1"])
diff --git a/packages/evo-compute/tests/test_tasks.py b/packages/evo-compute/tests/test_tasks.py
new file mode 100644
index 00000000..3546d4f8
--- /dev/null
+++ b/packages/evo-compute/tests/test_tasks.py
@@ -0,0 +1,298 @@
+# Copyright © 2025 Bentley Systems, Incorporated
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests for the compute tasks module imports and basic functionality."""
+
+import inspect
+import unittest
+from unittest.mock import AsyncMock, MagicMock, patch
+
+from evo.compute.tasks.common.runner import get_task_runner, run_tasks
+from evo.compute.tasks.kriging import KrigingParameters, _run_single_kriging
+
+
+class TestTaskRegistry(unittest.TestCase):
+ """Tests for the task registry system."""
+
+ def test_kriging_parameters_registered(self):
+ """KrigingParameters should be registered with the task registry."""
+ from evo.compute.tasks.common.runner import get_task_runner
+ from evo.compute.tasks.kriging import KrigingParameters
+
+ runner = get_task_runner(KrigingParameters)
+ self.assertIsNotNone(runner)
+
+ def test_unregistered_type_returns_none(self):
+ """Unregistered types should return None from get_task_runner."""
+ from evo.compute.tasks.common.runner import get_task_runner
+
+ class UnregisteredParams:
+ pass
+
+ runner = get_task_runner(UnregisteredParams)
+ self.assertIsNone(runner)
+
+ def test_registry_get_runner_for_params_raises_on_unknown(self):
+ """get_runner_for_params should raise TypeError for unregistered types."""
+ from evo.compute.tasks.common.runner import TaskRegistry
+
+ registry = TaskRegistry()
+
+ class UnknownParams:
+ pass
+
+ with self.assertRaises(TypeError) as ctx:
+ registry.get_runner_for_params(UnknownParams())
+
+ self.assertIn("UnknownParams", str(ctx.exception))
+
+
+class TestPreviewFlagSignatures(unittest.TestCase):
+ """Tests for the preview flag signatures on run() and runner functions."""
+
+ def test_registered_runner_accepts_preview_kwarg(self):
+ """The registered kriging runner should accept a 'preview' keyword argument."""
+
+ runner = get_task_runner(KrigingParameters)
+ sig = inspect.signature(runner)
+ self.assertIn("preview", sig.parameters)
+ param = sig.parameters["preview"]
+ self.assertEqual(param.default, False)
+ self.assertEqual(param.kind, inspect.Parameter.KEYWORD_ONLY)
+
+ def test_run_single_kriging_accepts_preview_kwarg(self):
+ """_run_single_kriging should accept a 'preview' keyword argument defaulting to False."""
+ sig = inspect.signature(_run_single_kriging)
+ self.assertIn("preview", sig.parameters)
+ self.assertEqual(sig.parameters["preview"].default, False)
+
+ def test_run_function_accepts_preview_kwarg(self):
+ """The public run() function should accept a 'preview' keyword argument defaulting to False."""
+ from evo.compute.tasks import run
+
+ sig = inspect.signature(run)
+ self.assertIn("preview", sig.parameters)
+ self.assertEqual(sig.parameters["preview"].default, False)
+
+ def test_run_tasks_accepts_preview_kwarg(self):
+ """run_tasks() should accept a 'preview' keyword argument defaulting to False."""
+ sig = inspect.signature(run_tasks)
+ self.assertIn("preview", sig.parameters)
+ self.assertEqual(sig.parameters["preview"].default, False)
+
+
+def _mock_kriging_context():
+ """Create a mock context + connector for kriging preview tests."""
+ mock_connector = MagicMock()
+
+ mock_context = MagicMock()
+ mock_context.get_connector.return_value = mock_connector
+ mock_context.get_org_id.return_value = "test-org-id"
+ return mock_context, mock_connector
+
+
+def _mock_kriging_job():
+ """Create a mock job that returns a valid kriging result."""
+ mock_job = AsyncMock()
+ mock_job.wait_for_results.return_value = {
+ "message": "ok",
+ "target": {
+ "reference": "ref",
+ "name": "t",
+ "description": None,
+ "schema_id": "s",
+ "attribute": {"reference": "ar", "name": "an"},
+ },
+ }
+ return mock_job
+
+
+class TestPreviewFlagBehavior(unittest.IsolatedAsyncioTestCase):
+ """Tests for the preview flag runtime behavior on _run_single_kriging."""
+
+ async def test_run_single_kriging_passes_preview_true_to_submit(self):
+ """_run_single_kriging should pass preview=True to JobClient.submit."""
+ mock_context, mock_connector = _mock_kriging_context()
+ mock_params = MagicMock(spec=KrigingParameters)
+ mock_params.to_dict.return_value = {"source": {}, "target": {}}
+
+ with patch(
+ "evo.compute.tasks.kriging.JobClient.submit", new_callable=AsyncMock, return_value=_mock_kriging_job()
+ ) as mock_submit:
+ await _run_single_kriging(mock_context, mock_params, preview=True)
+
+ # Verify preview=True was passed to JobClient.submit
+ mock_submit.assert_called_once()
+ _, kwargs = mock_submit.call_args
+ self.assertTrue(kwargs.get("preview", False))
+
+ async def test_run_single_kriging_passes_preview_false_to_submit(self):
+ """_run_single_kriging should pass preview=False to JobClient.submit when preview=False."""
+ mock_context, mock_connector = _mock_kriging_context()
+ mock_params = MagicMock(spec=KrigingParameters)
+ mock_params.to_dict.return_value = {"source": {}, "target": {}}
+
+ with patch(
+ "evo.compute.tasks.kriging.JobClient.submit", new_callable=AsyncMock, return_value=_mock_kriging_job()
+ ) as mock_submit:
+ await _run_single_kriging(mock_context, mock_params, preview=False)
+
+ # Verify preview=False was passed to JobClient.submit
+ mock_submit.assert_called_once()
+ _, kwargs = mock_submit.call_args
+ self.assertFalse(kwargs.get("preview", True))
+
+ async def test_run_single_kriging_default_preview_is_false(self):
+ """_run_single_kriging should default to preview=False when not specified."""
+ mock_context, mock_connector = _mock_kriging_context()
+ mock_params = MagicMock(spec=KrigingParameters)
+ mock_params.to_dict.return_value = {"source": {}, "target": {}}
+
+ with patch(
+ "evo.compute.tasks.kriging.JobClient.submit", new_callable=AsyncMock, return_value=_mock_kriging_job()
+ ) as mock_submit:
+ # Call without preview kwarg — should default to False
+ await _run_single_kriging(mock_context, mock_params)
+
+ # Verify preview=False was passed to JobClient.submit
+ mock_submit.assert_called_once()
+ _, kwargs = mock_submit.call_args
+ self.assertFalse(kwargs.get("preview", True))
+
+
+class TestKrigingResultInheritance(unittest.TestCase):
+ """Tests that KrigingResult inherits from TaskResult."""
+
+ def test_kriging_result_inherits_from_task_result(self):
+ """KrigingResult should be a subclass of TaskResult."""
+ from evo.compute.tasks import KrigingResult, TaskResult
+
+ self.assertTrue(issubclass(KrigingResult, TaskResult))
+
+
+class TestTaskResultsContainer(unittest.TestCase):
+ """Tests for the TaskResults container class."""
+
+ def test_task_results_iteration(self):
+ """TaskResults should support iteration."""
+ from evo.compute.tasks.kriging import KrigingResult, TaskResults, _KrigingAttribute, _KrigingTarget
+
+ # Create mock results
+ attr = _KrigingAttribute(reference="ref1", name="attr1")
+ target = _KrigingTarget(
+ reference="ref1",
+ name="target1",
+ description="desc",
+ schema_id="/objects/regular-masked-3d-grid/1.0.0/regular-masked-3d-grid.schema.json",
+ attribute=attr,
+ )
+ result1 = KrigingResult(message="msg1", target=target)
+ result2 = KrigingResult(message="msg2", target=target)
+
+ results = TaskResults([result1, result2])
+
+ # Test len
+ self.assertEqual(len(results), 2)
+
+ # Test iteration
+ items = list(results)
+ self.assertEqual(len(items), 2)
+ self.assertEqual(items[0].message, "msg1")
+ self.assertEqual(items[1].message, "msg2")
+
+ # Test indexing
+ self.assertEqual(results[0].message, "msg1")
+ self.assertEqual(results[1].message, "msg2")
+
+ def test_task_results_results_property(self):
+ """TaskResults should expose results via .results property."""
+ from evo.compute.tasks.kriging import KrigingResult, TaskResults, _KrigingAttribute, _KrigingTarget
+
+ attr = _KrigingAttribute(reference="ref1", name="attr1")
+ target = _KrigingTarget(
+ reference="ref1",
+ name="target1",
+ description="desc",
+ schema_id="/objects/regular-masked-3d-grid/1.0.0/regular-masked-3d-grid.schema.json",
+ attribute=attr,
+ )
+ result = KrigingResult(message="msg", target=target)
+
+ results = TaskResults([result])
+
+ self.assertEqual(results.results, [result])
+
+
+class TestTaskResultSchemaType(unittest.TestCase):
+ """Tests for schema_type property using ObjectSchema parsing."""
+
+ def _make_result(self, schema_id: str):
+ from evo.compute.tasks.common.results import TaskAttribute, TaskResult, TaskTarget
+
+ attr = TaskAttribute(reference="ref", name="attr")
+ target = TaskTarget(reference="ref", name="target", description=None, schema_id=schema_id, attribute=attr)
+ return TaskResult(message="ok", target=target)
+
+ def test_schema_type_parses_valid_schema_id(self):
+ """schema_type should return the sub_classification for a valid schema ID."""
+ result = self._make_result("/objects/regular-masked-3d-grid/1.0.0/regular-masked-3d-grid.schema.json")
+ self.assertEqual(result.schema_type, "regular-masked-3d-grid")
+
+ def test_schema_type_parses_different_schema(self):
+ """schema_type should handle different object schema types."""
+ result = self._make_result("/objects/block-model/2.1.0/block-model.schema.json")
+ self.assertEqual(result.schema_type, "block-model")
+
+ def test_schema_type_falls_back_for_malformed_id(self):
+ """schema_type should return the raw schema_id when it cannot be parsed."""
+ result = self._make_result("some-unparseable-string")
+ self.assertEqual(result.schema_type, "some-unparseable-string")
+
+ def test_schema_type_falls_back_for_partial_id(self):
+ """schema_type should fall back gracefully for partial schema paths."""
+ result = self._make_result("schema/1.0.0")
+ self.assertEqual(result.schema_type, "schema/1.0.0")
+
+
+class TestKrigingMethod(unittest.TestCase):
+ """Tests for kriging method classes."""
+
+ def test_ordinary_kriging_singleton(self):
+ """KrigingMethod.ORDINARY should be an OrdinaryKriging instance."""
+ from evo.compute.tasks.kriging import KrigingMethod, OrdinaryKriging
+
+ self.assertIsInstance(KrigingMethod.ORDINARY, OrdinaryKriging)
+
+ def test_simple_kriging_factory(self):
+ """KrigingMethod.simple() should create a SimpleKriging instance."""
+ from evo.compute.tasks.kriging import KrigingMethod, SimpleKriging
+
+ method = KrigingMethod.simple(mean=100.0)
+ self.assertIsInstance(method, SimpleKriging)
+ self.assertEqual(method.mean, 100.0)
+
+ def test_ordinary_kriging_to_dict(self):
+ """OrdinaryKriging should serialize to dict with type='ordinary'."""
+ from evo.compute.tasks.kriging import OrdinaryKriging
+
+ d = OrdinaryKriging().to_dict()
+ self.assertEqual(d, {"type": "ordinary"})
+
+ def test_simple_kriging_to_dict(self):
+ """SimpleKriging should serialize to dict with type='simple' and mean."""
+ from evo.compute.tasks.kriging import SimpleKriging
+
+ d = SimpleKriging(mean=50.0).to_dict()
+ self.assertEqual(d, {"type": "simple", "mean": 50.0})
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/packages/evo-objects/src/evo/objects/typed/attributes.py b/packages/evo-objects/src/evo/objects/typed/attributes.py
index 97ce0661..38412d21 100644
--- a/packages/evo-objects/src/evo/objects/typed/attributes.py
+++ b/packages/evo-objects/src/evo/objects/typed/attributes.py
@@ -42,6 +42,7 @@
"BlockModelAttribute",
"BlockModelAttributes",
"BlockModelPendingAttribute",
+ "PendingAttribute",
]
@@ -100,6 +101,14 @@ def attribute_type(self) -> str:
"""The type of this attribute."""
return self._attribute_type
+ @property
+ def exists(self) -> bool:
+ """Whether this attribute exists on the object.
+
+ :return: True for existing attributes.
+ """
+ return True
+
async def to_dataframe(self, fb: IFeedback = NoFeedback) -> pd.DataFrame:
"""Load a DataFrame containing the values for this attribute from the object.
@@ -153,13 +162,53 @@ async def _upload_attribute_values(
attr_doc["nan_description"] = {"values": []}
+class PendingAttribute:
+ """A placeholder for an attribute that doesn't exist yet on a Geoscience Object.
+
+ This is returned when accessing an attribute by name that doesn't exist.
+ It can be used as a target for compute tasks, which will create the attribute.
+ """
+
+ def __init__(self, parent: "Attributes", name: str) -> None:
+ """
+ :param parent: The Attributes collection this pending attribute belongs to.
+ :param name: The name of the attribute to create.
+ """
+ self._parent = parent
+ self._name = name
+
+ @property
+ def name(self) -> str:
+ """The name of this attribute."""
+ return self._name
+
+ @property
+ def exists(self) -> bool:
+ """Whether this attribute exists on the object.
+
+ :return: False for pending attributes.
+ """
+ return False
+
+ @property
+ def _obj(self) -> "DownloadedObject | None":
+ """The DownloadedObject containing this attribute's parent object.
+
+ Delegates to the parent Attributes collection.
+ """
+ return self._parent._obj
+
+ def __repr__(self) -> str:
+ return f"PendingAttribute(name={self._name!r}, exists=False)"
+
+
class Attributes(SchemaList[Attribute]):
"""A collection of Geoscience Object Attributes"""
_schema_path: str | None = None
"""The full JMESPath to this attributes list within the parent object schema."""
- def __getitem__(self, index_or_name: int | str) -> Attribute:
+ def __getitem__(self, index_or_name: int | str) -> Attribute | PendingAttribute:
"""Get an attribute by index or name.
:param index_or_name: Either an integer index or the name/key of the attribute.
@@ -172,6 +221,8 @@ def __getitem__(self, index_or_name: int | str) -> Attribute:
for attr in self:
if attr.name == index_or_name or attr.key == index_or_name:
return attr
+ # Return a PendingAttribute for non-existent attributes accessed by name
+ return PendingAttribute(self, index_or_name)
return super().__getitem__(index_or_name)
@classmethod
@@ -421,6 +472,14 @@ def __init__(self, attributes: list[BlockModelAttribute], block_model: BlockMode
)
self._attributes.append(attr_with_obj)
+ @property
+ def exists(self) -> bool:
+ """Whether this attribute exists on the block model.
+
+ :return: True for existing attributes.
+ """
+ return True
+
@classmethod
def from_schema(cls, attributes_list: list[dict], block_model: BlockModel | None = None) -> BlockModelAttributes:
"""Parse block model attributes from the schema format.
diff --git a/packages/evo-objects/tests/typed/test_attributes.py b/packages/evo-objects/tests/typed/test_attributes.py
index 63a1e26a..c8928cec 100644
--- a/packages/evo-objects/tests/typed/test_attributes.py
+++ b/packages/evo-objects/tests/typed/test_attributes.py
@@ -16,7 +16,7 @@
import pandas as pd
from parameterized import parameterized
-from evo.objects.typed.attributes import UnSupportedDataTypeError, _infer_attribute_type_from_series
+from evo.objects.typed.attributes import PendingAttribute, UnSupportedDataTypeError, _infer_attribute_type_from_series
class TestAttributeTypeInference(TestCase):
@@ -46,3 +46,22 @@ def test_unsupported_dtype(self):
series = pd.Series([1 + 2j, 3 + 4j], dtype="complex128")
with self.assertRaises(UnSupportedDataTypeError):
_infer_attribute_type_from_series(series)
+
+
+class TestPendingAttribute(TestCase):
+ """Tests for PendingAttribute class."""
+
+ def test_pending_attribute_name(self):
+ """Test that PendingAttribute stores the name correctly."""
+ pending = PendingAttribute(None, "test_attr")
+ self.assertEqual(pending.name, "test_attr")
+
+ def test_pending_attribute_exists_is_false(self):
+ """Test that PendingAttribute.exists returns False."""
+ pending = PendingAttribute(None, "test_attr")
+ self.assertFalse(pending.exists)
+
+ def test_pending_attribute_repr(self):
+ """Test that PendingAttribute has a useful repr."""
+ pending = PendingAttribute(None, "test_attr")
+ self.assertEqual(repr(pending), "PendingAttribute(name='test_attr', exists=False)")
diff --git a/packages/evo-sdk-common/pyproject.toml b/packages/evo-sdk-common/pyproject.toml
index feef4140..efa6636d 100644
--- a/packages/evo-sdk-common/pyproject.toml
+++ b/packages/evo-sdk-common/pyproject.toml
@@ -1,7 +1,7 @@
[project]
name = "evo-sdk-common"
description = "Python package that establishes a common framework for use by client libraries that interact with Seequent Evo APIs"
-version = "0.5.18"
+version = "0.5.19"
requires-python = ">=3.10"
license-files = ["LICENSE.md"]
dynamic = ["readme"]
diff --git a/packages/evo-widgets/src/evo/widgets/__init__.py b/packages/evo-widgets/src/evo/widgets/__init__.py
index f66583f9..8cb56a89 100644
--- a/packages/evo-widgets/src/evo/widgets/__init__.py
+++ b/packages/evo-widgets/src/evo/widgets/__init__.py
@@ -41,6 +41,8 @@
format_block_model_version,
format_report,
format_report_result,
+ format_task_result,
+ format_task_results,
format_variogram,
)
from .urls import (
@@ -71,6 +73,8 @@
"format_block_model_version",
"format_report",
"format_report_result",
+ "format_task_result",
+ "format_task_results",
"format_variogram",
"get_blocksync_base_url",
"get_blocksync_block_model_url",
@@ -155,6 +159,25 @@ def _register_formatters(ipython: InteractiveShell) -> None:
format_block_model_attributes,
)
+ # Register formatters for compute task results
+ html_formatter.for_type_by_name(
+ "evo.compute.tasks.common.results",
+ "TaskResult",
+ format_task_result,
+ )
+
+ html_formatter.for_type_by_name(
+ "evo.compute.tasks.common.results",
+ "TaskResults",
+ format_task_results,
+ )
+
+ html_formatter.for_type_by_name(
+ "evo.compute.tasks.kriging",
+ "KrigingResult",
+ format_task_result,
+ )
+
def _unregister_formatters(ipython: InteractiveShell) -> None:
"""Unregister HTML formatters for Evo SDK types.
diff --git a/packages/evo-widgets/src/evo/widgets/formatters.py b/packages/evo-widgets/src/evo/widgets/formatters.py
index 553a8992..765968de 100644
--- a/packages/evo-widgets/src/evo/widgets/formatters.py
+++ b/packages/evo-widgets/src/evo/widgets/formatters.py
@@ -26,7 +26,12 @@
build_table_row_vtop,
build_title,
)
-from .urls import get_blocksync_block_model_url_from_environment, get_portal_url_for_object, get_viewer_url_for_object
+from .urls import (
+ get_blocksync_block_model_url_from_environment,
+ get_portal_url_for_object,
+ get_portal_url_from_reference,
+ get_viewer_url_for_object,
+)
__all__ = [
"format_attributes_collection",
@@ -36,6 +41,8 @@
"format_block_model_version",
"format_report",
"format_report_result",
+ "format_task_result",
+ "format_task_results",
"format_variogram",
]
@@ -561,3 +568,112 @@ def format_block_model(obj: Any) -> str:
html += "
"
return html
+
+
+# =============================================================================
+# Compute Task Result Formatters
+# =============================================================================
+
+
+def _get_task_result_portal_url(result: Any) -> str | None:
+ """Extract Portal URL from a task result's target reference.
+
+ :param result: A TaskResult object with _target.reference attribute.
+ :return: Portal URL string or None if not available.
+ """
+ # Check if result has _target attribute
+ target = getattr(result, "_target", None)
+ if target is None:
+ return None
+
+ # Check if target has reference attribute
+ ref = getattr(target, "reference", None)
+ if not ref or not isinstance(ref, str):
+ return None
+
+ # Try to generate portal URL from reference
+ try:
+ return get_portal_url_from_reference(ref)
+ except ValueError:
+ # Invalid reference URL format
+ return None
+
+
+def format_task_result(result: Any) -> str:
+ """Format a TaskResult as HTML.
+
+ This formatter handles TaskResult and KrigingResult objects from evo-compute,
+ displaying the task completion status, target information, and Portal links.
+
+ :param result: A TaskResult object with message, target_name, schema_type,
+ attribute_name, and _target attributes.
+ :return: HTML string for the task result.
+ """
+ portal_url = _get_task_result_portal_url(result)
+ links = [("Portal", portal_url)] if portal_url else None
+
+ # Get result type name (Task, Kriging, etc.)
+ result_type = result._get_result_type_name() if hasattr(result, "_get_result_type_name") else "Task"
+ title = f"✓ {result_type} Result"
+
+ rows = [
+ ("Target:", result.target_name),
+ ("Schema:", result.schema_type),
+ ("Attribute:", f'
'),
+ ]
+
+ table_rows = [build_table_row(label, value) for label, value in rows]
+
+ html = STYLESHEET
+ html += '
"
+
+ return html
+
+
+def format_task_results(results: Any) -> str:
+ """Format a TaskResults collection as HTML.
+
+ This formatter handles TaskResults objects from evo-compute,
+ displaying a table of all completed tasks with their status and Portal links.
+
+ :param results: A TaskResults object with _results list of TaskResult objects.
+ :return: HTML string for the task results collection.
+ """
+ result_list = results._results
+
+ if not result_list:
+ return "
"
+
+ # Get result type from first result
+ result_type = result_list[0]._get_result_type_name() if hasattr(result_list[0], "_get_result_type_name") else "Task"
+ title = f"✓ {len(result_list)} {result_type} Results"
+
+ # Build table data
+ headers = ["#", "Target", "Attribute", "Schema", "Link"]
+ rows = []
+ for i, result in enumerate(result_list):
+ portal_url = _get_task_result_portal_url(result)
+ link_html = f'
' if portal_url else "N/A"
+ rows.append(
+ [
+ str(i + 1),
+ result.target_name,
+ f'
',
+ result.schema_type,
+ link_html,
+ ]
+ )
+
+ table = build_nested_table(headers, rows)
+
+ html = STYLESHEET
+ html += '
"
+
+ return html
diff --git a/packages/evo-widgets/tests/test_formatters.py b/packages/evo-widgets/tests/test_formatters.py
index 6e81bd0d..cb0a105e 100644
--- a/packages/evo-widgets/tests/test_formatters.py
+++ b/packages/evo-widgets/tests/test_formatters.py
@@ -20,6 +20,7 @@
_format_bounding_box,
_format_crs,
_get_base_metadata,
+ _get_task_result_portal_url,
format_attributes_collection,
format_base_object,
format_block_model,
@@ -27,6 +28,8 @@
format_block_model_version,
format_report,
format_report_result,
+ format_task_result,
+ format_task_results,
format_variogram,
)
@@ -939,5 +942,311 @@ def test_formats_report_result_table(self):
self.assertIn("2.5", html)
+class TestFormatTaskResult(unittest.TestCase):
+ """Tests for the format_task_result function."""
+
+ def _create_mock_task_result(self, **kwargs):
+ """Create a mock TaskResult object."""
+ defaults = {
+ "message": "Task completed successfully",
+ "target_name": "Test Grid",
+ "schema_type": "objects/regular-3d-grid/v1.0.0",
+ "attribute_name": "kriged_grade",
+ "target_reference": (
+ "https://350mt.api.seequent.com/geoscience-object"
+ "/orgs/12345678-1234-1234-1234-123456789abc"
+ "/workspaces/87654321-4321-4321-4321-abcdef123456"
+ "/objects/aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"
+ ),
+ }
+ defaults.update(kwargs)
+
+ obj = MagicMock()
+ obj.message = defaults["message"]
+ obj.target_name = defaults["target_name"]
+ obj.schema_type = defaults["schema_type"]
+ obj.attribute_name = defaults["attribute_name"]
+
+ # Mock _target with reference for portal URL
+ obj._target = MagicMock()
+ obj._target.reference = defaults["target_reference"]
+
+ # Mock _get_result_type_name
+ obj._get_result_type_name = MagicMock(return_value="Kriging")
+
+ return obj
+
+ def test_formats_task_result_basic_info(self):
+ """Test formatting a task result with basic information."""
+ obj = self._create_mock_task_result()
+
+ html = format_task_result(obj)
+
+ self.assertIn("Kriging Result", html)
+ self.assertIn("Test Grid", html)
+ self.assertIn("objects/regular-3d-grid/v1.0.0", html)
+ self.assertIn("kriged_grade", html)
+ self.assertIn("Task completed successfully", html)
+ self.assertIn("attr-highlight", html) # Attribute should be highlighted
+
+ def test_formats_task_result_with_portal_link(self):
+ """Test formatting a task result includes portal link."""
+ obj = self._create_mock_task_result()
+
+ html = format_task_result(obj)
+
+ self.assertIn("Portal", html)
+ self.assertIn("href=", html)
+
+ def test_formats_task_result_without_portal_link(self):
+ """Test formatting a task result without reference doesn't fail."""
+ obj = self._create_mock_task_result(target_reference=None)
+
+ html = format_task_result(obj)
+
+ # Should still render without crashing
+ self.assertIn("Kriging Result", html)
+ self.assertIn("Test Grid", html)
+
+ def test_formats_task_result_checkmark(self):
+ """Test formatting a task result shows checkmark for success."""
+ obj = self._create_mock_task_result()
+
+ html = format_task_result(obj)
+
+ self.assertIn("✓", html)
+
+ def test_formats_task_result_target_row(self):
+ """Test formatting includes Target row."""
+ obj = self._create_mock_task_result()
+
+ html = format_task_result(obj)
+
+ self.assertIn("Target:", html)
+ self.assertIn("Test Grid", html)
+
+ def test_formats_task_result_schema_row(self):
+ """Test formatting includes Schema row."""
+ obj = self._create_mock_task_result()
+
+ html = format_task_result(obj)
+
+ self.assertIn("Schema:", html)
+
+ def test_formats_task_result_attribute_row(self):
+ """Test formatting includes Attribute row."""
+ obj = self._create_mock_task_result()
+
+ html = format_task_result(obj)
+
+ self.assertIn("Attribute:", html)
+
+ def test_formats_task_result_without_get_result_type_name(self):
+ """Test formatting a task result that doesn't have _get_result_type_name."""
+ obj = self._create_mock_task_result()
+ del obj._get_result_type_name
+
+ html = format_task_result(obj)
+
+ # Should fall back to "Task"
+ self.assertIn("Task Result", html)
+
+
+class TestFormatTaskResults(unittest.TestCase):
+ """Tests for the format_task_results function."""
+
+ def _create_mock_task_result(self, **kwargs):
+ """Create a mock TaskResult object."""
+ defaults = {
+ "message": "Task completed successfully",
+ "target_name": "Test Grid",
+ "schema_type": "objects/regular-3d-grid/v1.0.0",
+ "attribute_name": "kriged_grade",
+ "target_reference": (
+ "https://350mt.api.seequent.com/geoscience-object"
+ "/orgs/12345678-1234-1234-1234-123456789abc"
+ "/workspaces/87654321-4321-4321-4321-abcdef123456"
+ "/objects/aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"
+ ),
+ "result_type": "Kriging",
+ }
+ defaults.update(kwargs)
+
+ obj = MagicMock()
+ obj.message = defaults["message"]
+ obj.target_name = defaults["target_name"]
+ obj.schema_type = defaults["schema_type"]
+ obj.attribute_name = defaults["attribute_name"]
+ obj._target = MagicMock()
+ obj._target.reference = defaults["target_reference"]
+ obj._get_result_type_name = MagicMock(return_value=defaults["result_type"])
+
+ return obj
+
+ def test_formats_empty_results(self):
+ """Test formatting an empty results collection."""
+ obj = MagicMock()
+ obj._results = []
+
+ html = format_task_results(obj)
+
+ self.assertIn("No results", html)
+
+ def test_formats_single_result(self):
+ """Test formatting a collection with one result."""
+ result1 = self._create_mock_task_result(target_name="Grid 1", attribute_name="attr_1")
+
+ obj = MagicMock()
+ obj._results = [result1]
+
+ html = format_task_results(obj)
+
+ self.assertIn("1 Kriging Results", html)
+ self.assertIn("Grid 1", html)
+ self.assertIn("attr_1", html)
+ self.assertIn("✓", html)
+
+ def test_formats_multiple_results(self):
+ """Test formatting a collection with multiple results."""
+ result1 = self._create_mock_task_result(target_name="Grid 1", attribute_name="attr_1")
+ result2 = self._create_mock_task_result(target_name="Grid 2", attribute_name="attr_2")
+ result3 = self._create_mock_task_result(target_name="Grid 3", attribute_name="attr_3")
+
+ obj = MagicMock()
+ obj._results = [result1, result2, result3]
+
+ html = format_task_results(obj)
+
+ self.assertIn("3 Kriging Results", html)
+ self.assertIn("Grid 1", html)
+ self.assertIn("Grid 2", html)
+ self.assertIn("Grid 3", html)
+ self.assertIn("attr_1", html)
+ self.assertIn("attr_2", html)
+ self.assertIn("attr_3", html)
+
+ def test_formats_results_with_table_headers(self):
+ """Test formatting includes proper table headers."""
+ result1 = self._create_mock_task_result()
+
+ obj = MagicMock()
+ obj._results = [result1]
+
+ html = format_task_results(obj)
+
+ self.assertIn("#", html)
+ self.assertIn("Target", html)
+ self.assertIn("Attribute", html)
+ self.assertIn("Schema", html)
+ self.assertIn("Link", html)
+
+ def test_formats_results_with_portal_links(self):
+ """Test formatting includes portal links for each result."""
+ result1 = self._create_mock_task_result(target_name="Grid 1")
+ result2 = self._create_mock_task_result(target_name="Grid 2")
+
+ obj = MagicMock()
+ obj._results = [result1, result2]
+
+ html = format_task_results(obj)
+
+ # Should have portal links
+ self.assertIn("Portal", html)
+ self.assertIn("href=", html)
+
+ def test_formats_results_without_portal_link(self):
+ """Test formatting handles results without references."""
+ result1 = self._create_mock_task_result(target_reference=None)
+
+ obj = MagicMock()
+ obj._results = [result1]
+
+ html = format_task_results(obj)
+
+ self.assertIn("N/A", html)
+
+ def test_formats_results_row_numbers(self):
+ """Test formatting includes sequential row numbers."""
+ result1 = self._create_mock_task_result(target_name="Grid 1")
+ result2 = self._create_mock_task_result(target_name="Grid 2")
+
+ obj = MagicMock()
+ obj._results = [result1, result2]
+
+ html = format_task_results(obj)
+
+ # Row numbers
+ self.assertIn(">1<", html)
+ self.assertIn(">2<", html)
+
+
+class TestGetTaskResultPortalUrl(unittest.TestCase):
+ """Tests for the _get_task_result_portal_url helper function."""
+
+ def test_extracts_portal_url_from_valid_reference(self):
+ """Test extracting portal URL from a valid object reference."""
+ result = MagicMock()
+ result._target = MagicMock()
+ result._target.reference = (
+ "https://350mt.api.seequent.com/geoscience-object"
+ "/orgs/12345678-1234-1234-1234-123456789abc"
+ "/workspaces/87654321-4321-4321-4321-abcdef123456"
+ "/objects/aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"
+ )
+
+ url = _get_task_result_portal_url(result)
+
+ self.assertIsNotNone(url)
+ self.assertIn("evo.seequent.com", url)
+
+ def test_returns_none_for_no_reference(self):
+ """Test returns None when target has no reference."""
+ result = MagicMock()
+ result._target = MagicMock()
+ result._target.reference = None
+
+ url = _get_task_result_portal_url(result)
+
+ self.assertIsNone(url)
+
+ def test_returns_none_for_invalid_reference(self):
+ """Test returns None for invalid reference URL."""
+ result = MagicMock()
+ result._target = MagicMock()
+ result._target.reference = "not-a-valid-url"
+
+ url = _get_task_result_portal_url(result)
+
+ self.assertIsNone(url)
+
+ def test_returns_none_when_no_target(self):
+ """Test returns None when result has no _target attribute."""
+ result = MagicMock(spec=[]) # Empty spec means no attributes
+
+ url = _get_task_result_portal_url(result)
+
+ self.assertIsNone(url)
+
+ def test_returns_none_for_non_string_reference(self):
+ """Test returns None when reference is not a string."""
+ result = MagicMock()
+ result._target = MagicMock()
+ result._target.reference = 12345 # Not a string
+
+ url = _get_task_result_portal_url(result)
+
+ self.assertIsNone(url)
+
+ def test_returns_none_for_empty_string_reference(self):
+ """Test returns None when reference is an empty string."""
+ result = MagicMock()
+ result._target = MagicMock()
+ result._target.reference = ""
+
+ url = _get_task_result_portal_url(result)
+
+ self.assertIsNone(url)
+
+
if __name__ == "__main__":
unittest.main()
diff --git a/pyproject.toml b/pyproject.toml
index 626ae14a..f9cbfc62 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,16 +1,16 @@
[project]
name = "evo-sdk"
-version = "0.1.20"
+version = "0.2.0"
description = "Python SDK for using Seequent Evo"
requires-python = ">=3.10"
dependencies = [
- "evo-sdk-common[aiohttp,notebooks,jmespath]>=0.5.12",
+ "evo-sdk-common[aiohttp,notebooks,jmespath]>=0.5.19",
"evo-widgets>=0.2.0",
"evo-blockmodels[aiohttp,notebooks,pyarrow]>=0.2.0",
"evo-objects[aiohttp,notebooks,utils]>=0.4.0",
"evo-files[aiohttp,notebooks]>=0.2.3",
"evo-colormaps[aiohttp,notebooks]>=0.0.2",
- "evo-compute[aiohttp,notebooks]>=0.0.1rc2",
+ "evo-compute[aiohttp,notebooks]>=0.0.2",
"jupyter",
]
dynamic = ["readme"]
diff --git a/uv.lock b/uv.lock
index 5cf4cc7c..178308fe 100644
--- a/uv.lock
+++ b/uv.lock
@@ -874,11 +874,14 @@ test = [
[[package]]
name = "evo-compute"
-version = "0.0.1rc3"
+version = "0.0.2"
source = { editable = "packages/evo-compute" }
dependencies = [
+ { name = "evo-blockmodels", extra = ["utils"] },
+ { name = "evo-objects", extra = ["blockmodels", "utils"] },
{ name = "evo-sdk-common" },
{ name = "pydantic" },
+ { name = "typing-extensions" },
]
[package.optional-dependencies]
@@ -906,10 +909,13 @@ test = [
[package.metadata]
requires-dist = [
+ { name = "evo-blockmodels", extras = ["utils"], editable = "packages/evo-blockmodels" },
+ { name = "evo-objects", extras = ["utils", "blockmodels"], editable = "packages/evo-objects" },
{ name = "evo-sdk-common", editable = "packages/evo-sdk-common" },
{ name = "evo-sdk-common", extras = ["aiohttp"], marker = "extra == 'aiohttp'", editable = "packages/evo-sdk-common" },
{ name = "evo-sdk-common", extras = ["notebooks"], marker = "extra == 'notebooks'", editable = "packages/evo-sdk-common" },
{ name = "pydantic", specifier = ">=2" },
+ { name = "typing-extensions", specifier = ">=4.0" },
]
provides-extras = ["aiohttp", "notebooks"]
@@ -1079,7 +1085,7 @@ test = [
[[package]]
name = "evo-sdk"
-version = "0.1.20"
+version = "0.2.0"
source = { editable = "." }
dependencies = [
{ name = "evo-blockmodels", extra = ["aiohttp", "notebooks"] },
@@ -1144,7 +1150,7 @@ test = [
[[package]]
name = "evo-sdk-common"
-version = "0.5.18"
+version = "0.5.19"
source = { editable = "packages/evo-sdk-common" }
dependencies = [
{ name = "pure-interface" },