Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
49 changes: 49 additions & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
# Environment files
.env
.env.*
*.env

# Version control
.git
.gitignore
.gitattributes

# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
env/
venv/
.venv/
.pytest_cache/
.coverage
htmlcov/
.tox/
.eggs/
*.egg-info/

# IDE specific files
.idea/
.vscode/
*.swp
*.swo
.DS_Store

# Log files
*.log
logs/

# Local development files
docker-compose.override.yml
docker-compose.*.yml

# Documentation
docs/
*.md
LICENSE

# Test files
tests/
test/
153 changes: 153 additions & 0 deletions bioimageio/engine/model_tester.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,153 @@
import os

import ray
from hypha_rpc import connect_to_server
from ray import serve


@serve.deployment(
ray_actor_options={"num_gpus": 1, "num_cpus": 1},
max_ongoing_requests=1,
num_replicas=1,
max_replicas_per_node=1,
max_queued_requests=10,
)
class ModelTester:
def __init__(self):
pass

async def _download_model(self, model_url: str, package_path: str) -> str:
import os
import zipfile
from pathlib import Path

import aiohttp

archive_path = package_path + ".zip"

async with aiohttp.ClientSession() as session:
async with session.get(model_url) as response:
if response.status != 200:
raise RuntimeError(f"Failed to download model from {model_url}")
content = await response.read()
with open(archive_path, "wb") as f:
f.write(content)

# Unzip package_path
with zipfile.ZipFile(archive_path, "r") as zip_ref:
zip_ref.extractall(package_path)

# Cleanup the archive file
os.remove(archive_path)

return Path(package_path)

async def validate(self, rdf_dict):
from bioimageio.spec import ValidationContext, validate_format

ctx = ValidationContext(perform_io_checks=False)
summary = validate_format(rdf_dict, context=ctx)
return {"success": summary.status == "passed", "details": summary.format()}

async def __call__(self, model_id, model_url=None):
import shutil
from bioimageio.core import test_model
from bioimageio.spec import save_bioimageio_package_as_folder

package_path = f"/tmp/bioengine/{model_id}"

if model_url is not None:
package_path = await self._download_model(model_url, package_path)
else:
package_path = save_bioimageio_package_as_folder(
model_id, output_path=package_path
)

assert package_path.exists()

result = test_model(package_path / "rdf.yaml").model_dump(mode="json")

# Cleanup after test run
shutil.rmtree(str(package_path))

return result


async def ping(context):
return "pong"


async def test_model(model_id, model_url=None, context=None):
print(f"Running model '{model_id}'")

app_handle = ray.serve.get_app_handle("bioengine")

return await app_handle.remote(model_id, model_url)


async def validate(rdf_dict, context=None):
print("Validating RDF dict")
app_handle = ray.serve.get_app_handle("bioengine")

return await app_handle.validate.remote(rdf_dict)


async def register_resvice():
RAY_ADDRESS = os.getenv("RAY_ADDRESS", "ray://raycluster-kuberay-head-svc.ray-cluster.svc.cluster.local:10001")
HYPHA_SERVER_URL = os.getenv("HYPHA_SERVER_URL", "https://hypha.aicell.io")
HYPHA_WORKSPACE = os.getenv("HYPHA_WORKSPACE", "bioimage-io")
HYPHA_SERVICE_ID = os.getenv("HYPHA_SERVICE_ID", "bioimageio-model-runner")
HYPHA_TOKEN = os.getenv("HYPHA_TOKEN")
assert HYPHA_TOKEN, "Please set the HYPHA_TOKEN environment variable"

# Connect to Ray head node
print(f"Initializing Ray with address: {RAY_ADDRESS}")
ray.init(
address=RAY_ADDRESS,
runtime_env={
"pip": [
"torch==2.5.1",
"torchvision==0.20.1",
"tensorflow==2.16.1",
"onnxruntime==1.20.1",
"bioimageio.core==0.7.0",
"hypha-rpc",
],
},
)

# Bind the arguments to the deployment and return an Application.
app = ModelTester.bind()

# Deploy the application
ray.serve.run(app, name="bioengine", route_prefix=None)

client = await connect_to_server(
{"server_url": HYPHA_SERVER_URL, "workspace": HYPHA_WORKSPACE, "token": HYPHA_TOKEN}
)

# Register the service
service_info = await client.register_service(
{
"id": HYPHA_SERVICE_ID,
"config": {
"visibility": "public",
"require_context": True,
},
# Exposed functions:
"ping": ping,
"test_model": test_model,
"validate": validate,
}
)
sid = service_info["id"]

print(f"Service registered with ID: {sid}")

await client.serve()


if __name__ == "__main__":
import asyncio

asyncio.run(register_resvice())
43 changes: 37 additions & 6 deletions environments/bioengine/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,6 +1,37 @@
FROM ghcr.io/amun-ai/hypha:0.20.35.post2
COPY . /app
USER root
RUN pip install -r /app/requirements.txt
RUN pip install /app
USER hypha
# Use an official slim Python 3.11 image
FROM --platform=linux/amd64 python:3.11.9-slim AS build


# Set the working directory
WORKDIR /app/

# Install necessary system dependencies
RUN apt-get update && apt-get install -y \
curl \
jq \
sudo \
&& rm -rf /var/lib/apt/lists/*

# Create a non-root user with sudo privileges
RUN groupadd -r bioengine && useradd -r -g bioengine --create-home bioengine \
&& usermod -aG sudo bioengine \
&& echo "bioengine ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/bioengine

# Copy requirements first for efficient layer caching
COPY ./requirements.txt /app/requirements.txt

# Install Python dependencies with --no-cache-dir to reduce image size
RUN pip install --upgrade pip \
&& pip install --no-cache-dir -r /app/requirements.txt

# Copy application files last to leverage Docker's build cache
COPY . /app/

# Change ownership to the non-root user and ensure scripts are executable
RUN chown -R bioengine:bioengine /app/

# Switch to the non-root user
USER bioengine

# Default entrypoint for running the application
ENTRYPOINT ["python", "/app/start_hypha_service.py"]
9 changes: 6 additions & 3 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
hypha-rpc
hypha>=0.15.45
hypha-rpc==0.20.47
hypha>=0.20.47.post12
PyYAML==6.0.1
hypha-launcher
pyotritonclient
simpervisor
simpervisor
numpy==1.26.4
python-dotenv==1.0.1
ray[serve]==2.33.0
24 changes: 24 additions & 0 deletions scripts/create_workspace.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@


import os
import asyncio
from hypha_rpc import login, connect_to_server

async def start_server(server_url):
print(f"Connecting to server at {server_url}")
token = await login({"server_url": server_url})
server = await connect_to_server({'server_url': server_url, "token": token})

ws = await server.create_workspace({
"name": "bioengine-apps",
"description": "Workspace for bioengine",
"persistent": True,
})

print(f"Workspace created: {ws['name']}")


if __name__ == '__main__':
server_url = 'https://hypha.aicell.io'
print(f"Starting server with URL: {server_url}")
asyncio.run(start_server(server_url))
72 changes: 72 additions & 0 deletions scripts/register_resnet.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
import asyncio
from hypha_rpc import connect_to_server
from pydantic import Field

async def main():
# Connect to the Hypha server
server_url = "https://hypha.aicell.io"
workspace_id = "bioengine-apps"
service_id = "ray-function-registry"

server = await connect_to_server({"server_url": server_url})

# Retrieve the Ray Function Registry service
svc = await server.get_service(f"{workspace_id}/{service_id}")

# Example script to classify an image using ResNet
example_script = """
import torch
import torchvision.transforms as transforms
from PIL import Image
import requests
from io import BytesIO

def execute(image_url: str):
# Load the image from the URL
response = requests.get(image_url)
img = Image.open(BytesIO(response.content))

# Define the transform to match what the model expects
preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])

# Apply the preprocessing
img_tensor = preprocess(img)
img_tensor = img_tensor.unsqueeze(0) # Add a batch dimension

# Load the ResNet model
model = torch.hub.load('pytorch/vision:v0.10.0', 'resnet18', pretrained=True)
model.eval()

# Perform the inference
with torch.no_grad():
output = model(img_tensor)

# Load the labels for ImageNet
labels = requests.get("https://raw.githubusercontent.com/anishathalye/imagenet-simple-labels/master/imagenet-simple-labels.json").json()

# Find the top predicted class
_, predicted_idx = torch.max(output, 1)
predicted_label = labels[predicted_idx.item()]

return predicted_label
"""

# Register the ResNet function
pip_requirements = ['torch', 'torchvision', 'requests', 'pillow']
function_id = await svc.register_function(name="ResNet image classifier", script=example_script, pip_requirements=pip_requirements)
print(f"Registered ResNet function with id: {function_id}")

# Example image URL
image_url = "https://cdn2.thecatapi.com/images/9vs.jpg"

# Run the ResNet function
result = await svc.run_function(function_id=function_id, args=[image_url])
print("Classification Result:", result)

if __name__ == "__main__":
asyncio.run(main())
24 changes: 24 additions & 0 deletions scripts/run_resnet.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import asyncio
from hypha_rpc import connect_to_server

async def main():
# Connect to the Hypha server
server_url = "https://hypha.aicell.io"
workspace_id = "bioengine-apps"
service_id = "ray-function-registry"

server = await connect_to_server({"server_url": server_url})

# Retrieve the Ray Function Registry service
svc = await server.get_service(f"{workspace_id}/{service_id}")
# Example image URL
image_url = "https://i.natgeofe.com/n/d7c8f811-670c-434e-b451-5d08793dade0/NationalGeographic_2802033.jpg"
functions = await svc.list_functions()
# Fine the function named "ResNet image classifier"
resnet_function = next((f for f in functions if f["name"] == "ResNet image classifier"), None)
# Run the ResNet function
result = await svc.run_function(function_id=resnet_function["id"], args=[image_url])
print("Classification Result:", result)

if __name__ == "__main__":
asyncio.run(main())
Loading