-
Notifications
You must be signed in to change notification settings - Fork 5
Expand file tree
/
Copy pathconfig.example.yaml
More file actions
148 lines (136 loc) · 6.18 KB
/
config.example.yaml
File metadata and controls
148 lines (136 loc) · 6.18 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
# =============================================================================
# Hypeman Server Configuration (Linux)
# =============================================================================
# Copy this file to /etc/hypeman/config.yaml (or ~/.config/hypeman/config.yaml)
# and customize for your environment.
#
# Configuration precedence (highest to lowest):
# 1. Environment variables (e.g., JWT_SECRET, PORT)
# 2. This YAML config file
# 3. Built-in defaults
# =============================================================================
# Required - used to sign and verify API tokens
jwt_secret: ""
# Data directory (default: /var/lib/hypeman)
data_dir: /var/lib/hypeman
# Server configuration
# port: 8080
# =============================================================================
# Hypervisor Configuration
# =============================================================================
# hypervisor:
# default: cloud-hypervisor
# # Optional: use a custom Firecracker binary path instead of the embedded one.
# # firecracker_binary_path: /usr/local/bin/firecracker
# memory:
# enabled: false
# # performance: init_on_alloc=0 init_on_free=0 (better density)
# # hardened: init_on_alloc=1 init_on_free=1 (stronger hardening)
# kernel_page_init_mode: hardened
# reclaim_enabled: true
# vz_balloon_required: true
# active_ballooning:
# enabled: false
# poll_interval: 2s
# pressure_high_watermark_available_percent: 10
# pressure_low_watermark_available_percent: 15
# protected_floor_percent: 50
# protected_floor_min_bytes: 536870912
# min_adjustment_bytes: 67108864
# per_vm_max_step_bytes: 268435456
# per_vm_cooldown: 5s
# =============================================================================
# Network Configuration
# =============================================================================
# network:
# bridge_name: vmbr0
# subnet_cidr: 10.100.0.0/16
# subnet_gateway: "" # empty = derived from subnet_cidr
# uplink_interface: "" # empty = auto-detect from default route
# dns_server: 1.1.1.1
# =============================================================================
# Logging
# =============================================================================
# logging:
# level: info # debug, info, warn, error
# =============================================================================
# Images
# =============================================================================
# images:
# auto_delete:
# enabled: false # server-wide automatic deletion of cached converted images
# unused_for: 720h # delete only after no instances or snapshots reference the image for this long
# allowed: # safety gate: only delete repositories matching one of these patterns
# - docker.io/library/* # match normalized repository names
# - ghcr.io/kernel/* # use ["*"] to allow deletion for every repository
# # only affects data_dir/images, not the shared OCI cache
# =============================================================================
# Caddy / Ingress Configuration
# =============================================================================
# caddy:
# listen_address: 0.0.0.0
# admin_address: 127.0.0.1
# admin_port: 0 # 0 = random (for dev); install script sets to 2019 for production
# internal_dns_port: 0 # 0 = random (for dev); install script sets to 5353 for production
# stop_on_shutdown: false # Set to true if you want Caddy to stop when hypeman stops
# =============================================================================
# TLS / ACME Configuration (for HTTPS ingresses)
# =============================================================================
# Required for TLS ingresses:
# acme:
# email: admin@example.com
# dns_provider: cloudflare
#
# IMPORTANT: You must specify which domains are allowed for TLS certificates.
# This prevents typos and ensures you only request certificates for domains you control.
# allowed_domains: "*.example.com,api.other.com"
# Supports:
# - Exact matches: api.example.com
# - Wildcard subdomains: *.example.com (matches foo.example.com, NOT foo.bar.example.com)
# If not set, no TLS ingresses are allowed.
#
# Optional ACME settings:
# ca: "" # empty = Let's Encrypt production
# # Use https://acme-staging-v02.api.letsencrypt.org/directory for testing
#
# DNS propagation settings (applies to all providers):
# dns_propagation_timeout: 2m # Max time to wait for DNS propagation
# dns_resolvers: "1.1.1.1,8.8.8.8" # Custom DNS resolvers for propagation checking
#
# Cloudflare DNS Provider (dns_provider: cloudflare)
# cloudflare_api_token: your-api-token
# Token needs Zone:DNS:Edit permissions for the domains you want certificates for
# =============================================================================
# OpenTelemetry Configuration
# =============================================================================
# metrics:
# listen_address: 127.0.0.1
# port: 9464
# vm_label_budget: 200
# resource_refresh_interval: 120s
#
# otel:
# enabled: false
# endpoint: 127.0.0.1:4317
# service_name: hypeman
# service_instance_id: "" # default: hostname
# insecure: true
# metric_export_interval: 60s
# env: dev # deployment environment
# =============================================================================
# Build Configuration
# =============================================================================
# build:
# builder_image: "" # empty = built from Dockerfile on first run
# docker_socket: /var/run/docker.sock
# max_concurrent_source_builds: 2
# timeout: 600
# =============================================================================
# Resource Limits
# =============================================================================
# limits:
# max_vcpus_per_instance: 16
# max_memory_per_instance: 32GB
# max_total_volume_storage: "" # 0 or empty = unlimited
# max_concurrent_builds: 1
# max_overlay_size: 100GB