Skip to content

Gsd/auto bench

Gsd/auto bench #1

Workflow file for this run

# Benchmark CI Pipeline
#
# Runs after the "Tests" workflow succeeds on main. Executes the full benchmark
# suite on Python 3.13 and pushes results to gh-pages at /dev/bench/ via
# github-action-benchmark.
#
# PRs receive a benchmark comparison table in Job Summary and fail on
# regressions beyond 150% (PR-01, PR-02, PR-03).
#
# To enforce the merge gate, enable branch protection requiring the
# 'Benchmarks' check to pass: Settings > Branches > Branch protection rules.
#
# CI-04: Release/tag events do NOT get a separate benchmark run. Every push to
# main updates the gh-pages dashboard, so releases inherit the latest baseline.
#
# CI-01: github-action-benchmark with auto-push: true auto-creates the gh-pages
# branch on first run. GitHub Pages must be manually enabled once:
# Settings > Pages > Source: Deploy from a branch > gh-pages / root.
name: Benchmarks
on:
workflow_run:
workflows: ["Tests"]
types: [completed]
branches: [main]
pull_request:
types: [opened, synchronize]
permissions:
contents: write
deployments: write
concurrency:
group: benchmark-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
jobs:
benchmark:
runs-on: ubuntu-latest
if: github.event_name == 'pull_request' || github.event.workflow_run.conclusion == 'success'
services:
redis:
image: redis:7
ports:
- 6379:6379
options: >-
--health-cmd "redis-cli ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
mongodb:
image: mongo:7
env:
MONGO_INITDB_ROOT_USERNAME: root
MONGO_INITDB_ROOT_PASSWORD: example
ports:
- 27017:27017
options: >-
--health-cmd "mongosh --eval 'db.runCommand(\"ping\").ok' --quiet"
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- uses: actions/checkout@v4
- name: Install uv and set the python version
uses: astral-sh/setup-uv@v5
with:
python-version: "3.13"
- name: Install package
run: |
uv sync --all-extras --dev
- name: Run benchmarks
run: |
uv run pytest -m benchmark --benchmark-only --benchmark-json=benchmark_results.json
- name: Store benchmark results (main)
if: github.event_name == 'workflow_run'
uses: benchmark-action/github-action-benchmark@v1
with:
tool: "pytest"
output-file-path: benchmark_results.json
gh-pages-branch: gh-pages
benchmark-data-dir-path: dev/bench
github-token: ${{ secrets.GITHUB_TOKEN }}
auto-push: true
- name: Compare benchmark results (PR)
if: github.event_name == 'pull_request'
uses: benchmark-action/github-action-benchmark@v1
with:
tool: "pytest"
output-file-path: benchmark_results.json
gh-pages-branch: gh-pages
benchmark-data-dir-path: dev/bench
github-token: ${{ secrets.GITHUB_TOKEN }}
auto-push: false
save-data-file: false
summary-always: true
comment-on-alert: true
fail-on-alert: true
alert-threshold: "150%"