forked from DreamLab-AI/origin-logseq-AR
-
Notifications
You must be signed in to change notification settings - Fork 19
Expand file tree
/
Copy pathDockerfile.production
More file actions
201 lines (164 loc) · 7.95 KB
/
Dockerfile.production
File metadata and controls
201 lines (164 loc) · 7.95 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
# =============================================================================
# Production Dockerfile - Multi-stage build (CachyOS)
# Optimized layer ordering: toolchain → deps → CUDA PTX → source
# Code-only changes skip dependency download and PTX compilation entirely.
# =============================================================================
# ---------------------------------------------------------------------------
# Stage 1: Toolchain — cached until base image or tool versions change
# ---------------------------------------------------------------------------
# SECURITY: Pin to digest for production builds (e.g., cachyos/cachyos-v3@sha256:abc123...)
FROM cachyos/cachyos-v3:latest AS toolchain
ARG CUDA_ARCH=86
ENV RUST_LOG=warn \
PATH="/root/.cargo/bin:/opt/cuda/bin:${PATH}" \
CUDA_HOME=/opt/cuda \
CUDA_PATH=/opt/cuda \
CUDA_ARCH=${CUDA_ARCH} \
LD_LIBRARY_PATH="/opt/cuda/lib64:${LD_LIBRARY_PATH}"
# Initialize pacman keyring (required for CachyOS signature verification)
RUN pacman-key --init && \
pacman-key --populate archlinux cachyos && \
pacman -Sy --noconfirm archlinux-keyring cachyos-keyring && \
pacman -Syu --noconfirm && \
rm -rf /var/cache/pacman/pkg/*
# Install build dependencies (with retry logic)
RUN for attempt in 1 2 3; do \
echo "=== Build deps install attempt $attempt/3 ===" && \
pacman -S --noconfirm --needed \
curl git gcc base-devel openssl \
&& break || { echo "Attempt $attempt failed, retrying in 10s..."; sleep 10; pacman -Syy --noconfirm; }; \
done && rm -rf /var/cache/pacman/pkg/*
# Install CUDA via pacman (CachyOS installs to /opt/cuda)
RUN for attempt in 1 2 3; do \
echo "=== CUDA install attempt $attempt/3 ===" && \
pacman -S --noconfirm --needed cuda \
&& break || { echo "Attempt $attempt failed, retrying in 10s..."; sleep 10; pacman -Syy --noconfirm; }; \
done && rm -rf /var/cache/pacman/pkg/*
# Install Rust
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable
# Install Node.js 20.x LTS via tarball
RUN curl -fsSL https://nodejs.org/dist/v20.18.3/node-v20.18.3-linux-x64.tar.xz | \
tar -xJ -C /usr/local --strip-components=1
# ---------------------------------------------------------------------------
# Stage 2: Dependency pre-compilation — cached until Cargo.toml/lock change
# ---------------------------------------------------------------------------
FROM toolchain AS deps
WORKDIR /app
# Copy ONLY dependency manifests + path dependencies (whelk-rs + workspace crates)
COPY Cargo.toml Cargo.lock ./
COPY whelk-rs ./whelk-rs
# Workspace members required for cargo resolution — see ADR-053
COPY crates ./crates
# Create stub sources so cargo can resolve and compile all dependencies.
# The dummy lib/main must reference whelk so the path dep compiles too.
RUN mkdir -p src/bin && \
echo 'fn main() {}' > src/main.rs && \
echo 'pub use whelk;' > src/lib.rs && \
echo 'fn main() {}' > src/bin/generate_types.rs && \
echo 'fn main() {}' > src/bin/sync_local.rs && \
echo 'fn main() {}' > src/bin/sync_github.rs && \
echo 'fn main() {}' > src/bin/load_ontology.rs && \
echo 'fn main() {}' > src/bin/test_tcp_connection_fixed.rs && \
mkdir -p examples && \
echo 'fn main() {}' > examples/constraint_integration_debug.rs && \
echo 'fn main() {}' > examples/metadata_debug.rs && \
echo 'fn main() {}' > examples/ontology_constraints_example.rs && \
echo 'fn main() {}' > examples/ontology_validation_example.rs
# Pre-build all 200+ dependencies in release mode (cached until Cargo.toml changes)
# Using a stub build.rs that skips CUDA to avoid needing .cu files at this stage
RUN echo 'fn main() { println!("cargo:warning=Stub build - deps only"); }' > build.rs
RUN cargo build --release 2>&1 || true && \
cargo build --release --lib 2>&1 || true
# ---------------------------------------------------------------------------
# Stage 3: CUDA PTX compilation — cached until .cu files change
# ---------------------------------------------------------------------------
FROM toolchain AS cuda-ptx
ARG CUDA_ARCH=86
WORKDIR /app
# Copy ONLY CUDA source files — PTX layer rebuilds only when .cu files change
COPY src/utils/*.cu src/utils/
RUN mkdir -p /app/ptx && \
for cu_file in src/utils/*.cu; do \
name=$(basename "$cu_file" .cu); \
echo "Compiling $name.ptx..."; \
nvcc -ptx -arch sm_${CUDA_ARCH} -o /app/ptx/${name}.ptx "$cu_file" --use_fast_math -O3; \
done && \
echo "All PTX compilation complete"
# ---------------------------------------------------------------------------
# Stage 4: Frontend build — cached until client/ files change
# ---------------------------------------------------------------------------
FROM toolchain AS frontend
WORKDIR /app/client
ENV DOCKER_BUILD=1 \
VITE_JSS_WS_URL=wss://www.visionflow.info/solid/.notifications
# Copy package manifests and scripts needed by preinstall hook
COPY client/package.json client/package-lock.json ./
COPY client/scripts ./scripts
RUN npm ci
# Copy source and build — only this layer invalidates on client code changes
COPY client/ ./
# Run vite build directly - skip type generation (types are pre-committed)
RUN npx vite build
# ---------------------------------------------------------------------------
# Stage 5: Backend build — only Rust source changes trigger recompilation
# ---------------------------------------------------------------------------
FROM deps AS builder
ARG CUDA_ARCH=86
ARG REBUILD_PTX=false
# Copy pre-compiled PTX from the cuda-ptx stage
COPY --from=cuda-ptx /app/ptx/ /app/src/utils/ptx/
# Now copy the REAL source code (overwrites the stubs from deps stage)
COPY build.rs ./
COPY src ./src
COPY schema ./schema
# Force rebuild of webxr crate to run real build.rs (not cached stub)
# Clean only the webxr build artifacts, keeping deps cached
RUN rm -rf /app/target/release/build/webxr-* /app/target/release/deps/webxr* /app/target/release/deps/libwebxr* && \
cargo build --release && \
cp target/release/webxr /app/webxr
# Runtime stage - same CachyOS base, only runtime packages
# SECURITY: Pin to digest for production builds (e.g., cachyos/cachyos-v3@sha256:abc123...)
FROM cachyos/cachyos-v3:latest
ENV RUST_LOG=${RUST_LOG:-info} \
PATH="/opt/cuda/bin:${PATH}" \
NVIDIA_VISIBLE_DEVICES=all \
NVIDIA_DRIVER_CAPABILITIES=all \
CUDA_HOME=/opt/cuda \
NODE_ENV=production
# Initialize pacman keyring for runtime stage
RUN pacman-key --init && \
pacman-key --populate archlinux cachyos && \
pacman -Sy --noconfirm archlinux-keyring cachyos-keyring && \
pacman -Syu --noconfirm && \
rm -rf /var/cache/pacman/pkg/*
# Install runtime dependencies (no base-devel, no cuda dev headers)
RUN for attempt in 1 2 3; do \
echo "=== Runtime packages install attempt $attempt/3 ===" && \
pacman -S --noconfirm --needed \
curl nginx openbsd-netcat openssl cuda \
&& break || { echo "Attempt $attempt failed, retrying in 10s..."; sleep 10; pacman -Syy --noconfirm; }; \
done && rm -rf /var/cache/pacman/pkg/*
# Create necessary directories
WORKDIR /app
RUN mkdir -p /app/data/markdown \
/app/data/metadata \
/app/data/runtime \
/app/user_settings \
/app/logs \
/var/log/nginx \
/var/run/nginx
# Cache-buster AFTER heavy installs: pass --build-arg CACHE_BUST=$(date +%s) to
# force re-copy of artifacts from builder. Placed here so it only invalidates the
# COPY layers below, NOT the pacman/CUDA install layers above (~2.2 GB).
ARG CACHE_BUST=0
# Copy built artifacts from parallel build stages
COPY --from=builder /app/webxr /app/webxr
COPY --from=frontend /app/client/dist /app/client/dist
COPY --from=cuda-ptx /app/ptx /app/src/utils/ptx
# Copy configuration files
COPY data/settings.yaml /app/settings.yaml
COPY nginx.production.conf /etc/nginx/nginx.conf
COPY scripts/production-startup.sh /app/start.sh
RUN chmod +x /app/start.sh
EXPOSE 3001
ENTRYPOINT ["/app/start.sh"]