-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathjustfile
More file actions
95 lines (63 loc) · 2.97 KB
/
justfile
File metadata and controls
95 lines (63 loc) · 2.97 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
set shell := ["bash", "-cu"]
default := help
help:
@just --list
fmt:
cargo fmt
fmt-check:
cargo fmt -- --check
clippy:
cargo clippy --all-targets --all-features -- -D clippy::correctness -D clippy::suspicious
check:
cargo check --all-targets --all-features
test:
cargo test --all-targets --all-features
test-fast:
cargo test --workspace --lib --tests --all-features -- --skip test_sadf_test
test-slow:
cargo test -p openquant --test structural_breaks test_sadf_test -- --ignored
lint: fmt-check clippy
bench:
cargo bench --all-features
bench-hotspots:
cargo bench -p openquant --bench perf_hotspots
bench-synthetic:
cargo bench -p openquant --bench synthetic_ticker_pipeline
bench-all:
cargo bench -p openquant --bench perf_hotspots --bench synthetic_ticker_pipeline
bench-collect:
python3 scripts/collect_bench_results.py --criterion-dir target/criterion --out benchmarks/latest_benchmarks.json --allow-list benchmarks/benchmark_manifest.json
bench-check:
python3 scripts/check_bench_thresholds.py --baseline benchmarks/baseline_benchmarks.json --latest benchmarks/latest_benchmarks.json --max-regression-pct 35 --overrides benchmarks/threshold_overrides.json
py-develop:
uv run --python .venv/bin/python maturin develop --manifest-path crates/pyopenquant/Cargo.toml
py-build:
uv run --python .venv/bin/python maturin build --manifest-path crates/pyopenquant/Cargo.toml --out dist
py-import-smoke:
uv run --python .venv/bin/python python -c "import openquant; print('openquant bindings OK')"
py-test:
uv run --python .venv/bin/python pytest python/tests -q
py-setup:
uv venv --python 3.13 .venv
uv sync --group dev
py-bench:
uv run --python .venv/bin/python python python/benchmarks/benchmark_pipeline.py --iterations 30 --bars 2048
py-bench-data:
uv run --python .venv/bin/python python python/benchmarks/benchmark_data_processing.py --rows-per-symbol 200000 --symbols 4 --iterations 7 --out benchmarks/data_processing/latest.json
py-bench-data-compare:
uv run --python .venv/bin/python python python/benchmarks/benchmark_data_processing.py --rows-per-symbol 200000 --symbols 4 --iterations 7 --out benchmarks/data_processing/latest.json --baseline benchmarks/data_processing/baseline.json
exp-run:
uv run --python .venv/bin/python python experiments/run_pipeline.py --config experiments/configs/futures_oil_baseline.toml --out experiments/artifacts
notebook-smoke:
uv run --python .venv/bin/python python notebooks/python/scripts/smoke_all.py
research-smoke: py-develop notebook-smoke exp-run
docs-loop-init:
skills/afml-docs-loop/scripts/run_afml_docs_loop.sh init
docs-loop-status:
skills/afml-docs-loop/scripts/run_afml_docs_loop.sh status
docs-loop-next:
skills/afml-docs-loop/scripts/run_afml_docs_loop.sh next --print-prompt
docs-loop-export:
skills/afml-docs-loop/scripts/run_afml_docs_loop.sh export
docs-loop-evidence:
skills/afml-docs-loop/scripts/run_afml_docs_loop.sh evidence