-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
79 lines (66 loc) · 3.22 KB
/
.env.example
File metadata and controls
79 lines (66 loc) · 3.22 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
# LabTrust-Gym environment variables (optional)
# Copy to .env; the CLI loads .env from cwd and from repo root. Default runs need no env.
# See docs/getting-started/installation.md for API keys and LABTRUST_DOTENV_PATH.
# -----------------------------------------------------------------------------
# Policy and partner
# -----------------------------------------------------------------------------
# Path to policy directory (overrides package/repo). Must exist. Run from repo root or set this.
# LABTRUST_POLICY_DIR=
# Partner overlay ID (e.g. hsl_like). Same as --partner on CLI.
# LABTRUST_PARTNER=
# -----------------------------------------------------------------------------
# Live LLM (only when using --pipeline-mode llm_live --allow-network)
# -----------------------------------------------------------------------------
# OpenAI (--llm-backend openai_live). Required for live OpenAI calls.
# OPENAI_API_KEY=
# Anthropic (--llm-backend anthropic_live). Required for live Anthropic calls.
# ANTHROPIC_API_KEY=
# Prime Intellect Inference (--llm-backend prime_intellect_live). Docs also accept PRIME_API_KEY.
# PRIME_INTELLECT_API_KEY=
# LABTRUST_PRIME_INTELLECT_MODEL=meta-llama/llama-3.1-70b-instruct
# LABTRUST_PRIME_INTELLECT_BASE_URL=https://api.pinference.ai/api/v1
# LABTRUST_PRIME_TEAM_ID=
# LABTRUST_PRIME_INTELLECT_FALLBACK_MODEL=
# Optional overrides for LLM backends
# LABTRUST_OPENAI_MODEL=gpt-4o-mini
# LABTRUST_ANTHROPIC_MODEL=claude-3-5-haiku-20241022
# LABTRUST_LLM_TIMEOUT_S=30
# LABTRUST_LLM_RETRIES=2
# When running from a directory other than repo root, point to your .env (e.g. /path/to/repo/.env).
# LABTRUST_DOTENV_PATH=
# Local LLM / Ollama (--llm-backend ollama_live)
# LABTRUST_LOCAL_LLM_URL=http://localhost:11434
# LABTRUST_LOCAL_LLM_MODEL=llama3.2
# LABTRUST_LOCAL_LLM_TIMEOUT=60
# Allow network for live LLM (or use CLI --allow-network). Set to 1 when using llm_live.
# LABTRUST_ALLOW_NETWORK=0
# -----------------------------------------------------------------------------
# Online serve (labtrust serve)
# -----------------------------------------------------------------------------
# LABTRUST_AUTH_MODE=off
# LABTRUST_AUTH_KEY=
# LABTRUST_SERVE_HOST=127.0.0.1
# LABTRUST_SERVE_PORT=8765
# -----------------------------------------------------------------------------
# Development / CI / smoke
# -----------------------------------------------------------------------------
# LABTRUST_POLICY_CACHE=1
# LABTRUST_STRICT_SIGNATURES=0
# LABTRUST_STRICT_REASON_CODES=0
# LABTRUST_REPRO_SMOKE=0
# LABTRUST_PAPER_SMOKE=0
# LABTRUST_OFFICIAL_PACK_SMOKE=0
# LABTRUST_RUN_GOLDEN=0
# LABTRUST_STRICT_COVERAGE=0
# LABTRUST_STRICT_COORD_CONTRACT=0
# -----------------------------------------------------------------------------
# PPO / MARL (eval-agent with PPOAgent)
# -----------------------------------------------------------------------------
# Path to trained model (e.g. model.zip). Default: labtrust_runs/ppo_out/model.zip
# LABTRUST_PPO_MODEL=
# -----------------------------------------------------------------------------
# Optional: tracing, request cache, artifact encryption (advanced)
# -----------------------------------------------------------------------------
# LABTRUST_LLM_TRACE=0
# LABTRUST_LLM_TRACE_FILE=
# LABTRUST_RUNS_DIR=