Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 40 additions & 0 deletions .github/workflows/benchmark.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
name: Benchmark
on:
push:
branches: [ "main" ]
paths:
- '**.py'
- 'benchmarks/**'

jobs:
benchmark:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4

- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.10'

- name: Install uv
run: |
curl -LsSf https://astral.sh/uv/install.sh | sh

- name: Create venv and install dependencies
run: |
uv venv
source .venv/bin/activate
uv pip install -e .
uv pip install numpy matplotlib

- name: Run benchmarks
run: |
source .venv/bin/activate
python benchmarks/run_benchmarks.py

- name: Store benchmark results
uses: actions/upload-artifact@v3
with:
name: benchmark-results
path: benchmark_results/
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
doc_source/notebooks/Matisse/outlines/*
*.DS_Store

benchmarks/results/*

# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
Expand Down
3 changes: 3 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,9 @@ html:
sphinx-build -M html doc_source docs
rsync -a docs/html/ docs/
rm -r docs/html

benchmark:
python benchmarks/run_benchmarks.py

all:
# Running autopep8
Expand Down
53 changes: 53 additions & 0 deletions benchmarks/benchmark_cw.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
"""Benchmarks for CW complex computations"""
import numpy as np
import time
from ect import ECT, EmbeddedCW, create_example_cw
import json
from pathlib import Path


def benchmark_cw_ect(num_runs=5):
"""Benchmark ECT computation on CW complexes"""
results = {}

configs = [
(8, 10), # Small
(36, 36), # Medium
(360, 360), # Large
]

for num_dir, num_thresh in configs:
times = []
K = create_example_cw()

print(
f"\nTesting ECT with {num_dir} directions, {num_thresh} thresholds")
for _ in range(num_runs):
start_time = time.time()

myect = ECT(num_dirs=num_dir, num_thresh=num_thresh)
myect.calculateECT(K)

execution_time = time.time() - start_time
times.append(execution_time)

results[f'dirs_{num_dir}_thresh_{num_thresh}'] = {
'mean_time': float(np.mean(times)),
'std_time': float(np.std(times)),
'min_time': float(np.min(times)),
'max_time': float(np.max(times))
}

return results


if __name__ == "__main__":
print("Running CW complex benchmarks...")
results = benchmark_cw_ect()

# Save results
output_dir = Path("benchmark_results")
output_dir.mkdir(exist_ok=True)

with open(output_dir / "cw_results.json", "w") as f:
json.dump(results, f, indent=2)
79 changes: 79 additions & 0 deletions benchmarks/benchmark_graph.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
"""Benchmarks for graph-based ECT computations"""
import numpy as np
import time
from ect import ECT, EmbeddedGraph


def create_test_shape(num_points=1000, complexity=1):
"""Create test shape with varying complexity"""
t = np.linspace(0, 2*np.pi, num_points)
x = np.cos(t)
y = np.sin(t)

for i in range(2, complexity + 2):
x += (1/i) * np.cos(i*t)
y += (1/i) * np.sin(i*t)

return np.column_stack([x, y])


def benchmark_graph_ect(num_runs=5):
"""Benchmark ECT computation on graphs"""
results = {}

configs = [
(100, 1),
(1000, 1),
(100, 3),
(1000, 3),
(10000, 3),
]

for points, complexity in configs:
shape = create_test_shape(points, complexity)
G = EmbeddedGraph()
G.add_cycle(shape)

times = []
print(
f"\nTesting shape with {points} points and complexity {complexity}")

for _ in range(num_runs):
start_time = time.time()
myect = ECT(num_dirs=360, num_thresh=360)
myect.calculateECT(G)
times.append(time.time() - start_time)

results[f'points_{points}_complexity_{complexity}'] = {
'mean_time': float(np.mean(times)),
'std_time': float(np.std(times)),
'min_time': float(np.min(times)),
'max_time': float(np.max(times))
}

return results


def benchmark_g_omega(num_runs=5):
"""Benchmark g_omega computation"""
results = {}

sizes = [100, 500, 1000]
for size in sizes:
shape = create_test_shape(size)
G = EmbeddedGraph()
G.add_cycle(shape)

times = []
for _ in range(num_runs):
start_time = time.time()
for theta in np.linspace(0, 2*np.pi, 360):
G.g_omega(theta)
times.append(time.time() - start_time)

results[f'size_{size}'] = {
'mean_time': float(np.mean(times)),
'std_time': float(np.std(times))
}

return results
48 changes: 48 additions & 0 deletions benchmarks/run_benchmarks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
"""Main benchmark runner for ECT package"""
import numpy as np
import time
from pathlib import Path
import json
from benchmark_graph import benchmark_graph_ect, benchmark_g_omega
from benchmark_cw import benchmark_cw_ect
import platform


def run_all_benchmarks(num_runs=5):
"""Run all benchmarks and collect results"""
results = {
'metadata': {
'num_runs': num_runs,
'timestamp': time.strftime('%Y-%m-%d %H:%M:%S'),
'platform': platform.platform(),
'python_version': platform.python_version()
},
'benchmarks': {}
}

print("\nRunning graph ECT benchmarks...")
results['benchmarks']['graph_ect'] = benchmark_graph_ect(num_runs=num_runs)

print("\nRunning CW complex benchmarks...")
results['benchmarks']['cw_ect'] = benchmark_cw_ect(num_runs=num_runs)

print("\nRunning g_omega benchmarks...")
results['benchmarks']['g_omega'] = benchmark_g_omega(num_runs=num_runs)

return results


def save_results(results, output_dir="benchmarks/results"):
"""Save benchmark results to JSON file"""
output_dir = Path(output_dir)
output_dir.mkdir(exist_ok=True)

with open(output_dir / "benchmark_results.json", "w") as f:
json.dump(results, f, indent=2)

print(f"\nResults saved to {output_dir}/benchmark_results.json")


if __name__ == "__main__":
results = run_all_benchmarks()
save_results(results)
Loading