-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathDockerfile.libs
More file actions
254 lines (213 loc) · 11.8 KB
/
Dockerfile.libs
File metadata and controls
254 lines (213 loc) · 11.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
# Dockerfile.libs — build linux-amd64 static libraries for llama.cpp and whisper.cpp
#
# Usage (CPU, default):
# docker build -f Dockerfile.libs -o ./out .
#
# Usage (CUDA):
# docker build -f Dockerfile.libs --build-arg GPU_BACKEND=cuda -o ./out .
#
# Usage (Vulkan):
# docker build -f Dockerfile.libs --build-arg GPU_BACKEND=vulkan -o ./out .
#
# Build + link test (ensures .a files link correctly):
# docker build -f Dockerfile.libs --target build-test .
#
# This extracts prebuilt .a files + headers into ./out/ on the host.
ARG GPU_BACKEND=cpu
# ============================================================================
# Stage: Download sources (shared by all backends)
# ============================================================================
FROM golang:1.24-bookworm AS sources
RUN apt-get update && apt-get install -y --no-install-recommends \
wget && \
rm -rf /var/lib/apt/lists/*
WORKDIR /src
# Copy version files so we can read versions from Go
COPY go.mod ./
COPY version.go ./
COPY cmd/versioncmd/ ./cmd/versioncmd/
# Download llama.cpp
RUN LLAMA_VERSION=$(go run ./cmd/versioncmd llama.cpp) && \
echo "Downloading llama.cpp ${LLAMA_VERSION}..." && \
wget -qO llama.cpp.tar.gz "https://github.com/ggerganov/llama.cpp/archive/refs/tags/${LLAMA_VERSION}.tar.gz" && \
mkdir -p llama-src && \
tar xzf llama.cpp.tar.gz --strip-components=1 -C llama-src && \
rm llama.cpp.tar.gz
# Download whisper.cpp
RUN WHISPER_VERSION=$(go run ./cmd/versioncmd whisper.cpp) && \
echo "Downloading whisper.cpp ${WHISPER_VERSION}..." && \
wget -qO whisper.cpp.tar.gz "https://github.com/ggerganov/whisper.cpp/archive/refs/tags/${WHISPER_VERSION}.tar.gz" && \
mkdir -p whisper-src && \
tar xzf whisper.cpp.tar.gz --strip-components=1 -C whisper-src && \
rm whisper.cpp.tar.gz
# ============================================================================
# Builder: CPU (default)
# ============================================================================
FROM golang:1.24-bookworm AS builder-cpu
RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential cmake && \
rm -rf /var/lib/apt/lists/*
WORKDIR /src
COPY --from=sources /src/llama-src llama-src
COPY --from=sources /src/whisper-src whisper-src
# Build llama.cpp (CPU)
RUN cd llama-src && \
cmake -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=OFF && \
cmake --build build --config Release -j$(nproc)
# Build whisper.cpp (CPU)
RUN cd whisper-src && \
cmake -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=OFF && \
cmake --build build --config Release -j$(nproc)
# Collect llama.cpp artifacts and strip debug symbols
RUN mkdir -p /out/llama.cpp/linux-amd64 /out/llama.cpp/include /out/llama.cpp/ggml/include /out/llama.cpp/common && \
find llama-src/build -name "*.a" -exec cp {} /out/llama.cpp/linux-amd64/ \; && \
find /out/llama.cpp/linux-amd64 -name "*.a" -exec strip --strip-debug {} \; && \
cp llama-src/include/*.h /out/llama.cpp/include/ && \
cp llama-src/ggml/include/*.h /out/llama.cpp/ggml/include/ && \
cp llama-src/common/common.h /out/llama.cpp/common/ && \
cp llama-src/common/sampling.h /out/llama.cpp/common/
# Collect whisper.cpp artifacts and strip debug symbols
RUN mkdir -p /out/whisper.cpp/linux-amd64 /out/whisper.cpp/include /out/whisper.cpp/ggml/include && \
find whisper-src/build -name "*.a" -exec cp {} /out/whisper.cpp/linux-amd64/ \; && \
find /out/whisper.cpp/linux-amd64 -name "*.a" -exec strip --strip-debug {} \; && \
cp whisper-src/include/*.h /out/whisper.cpp/include/ && \
cp whisper-src/ggml/include/*.h /out/whisper.cpp/ggml/include/
# ============================================================================
# Builder: CUDA
# ============================================================================
FROM nvidia/cuda:12.8.0-devel-ubuntu24.04 AS builder-cuda
RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential cmake wget && \
rm -rf /var/lib/apt/lists/*
WORKDIR /src
COPY --from=sources /src/llama-src llama-src
COPY --from=sources /src/whisper-src whisper-src
# Build llama.cpp (CUDA)
RUN cd llama-src && \
cmake -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=OFF -DGGML_CUDA=ON && \
cmake --build build --config Release -j$(nproc)
# Build whisper.cpp (CUDA)
RUN cd whisper-src && \
cmake -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=OFF -DGGML_CUDA=ON && \
cmake --build build --config Release -j$(nproc)
# Collect llama.cpp artifacts (CUDA variant) and strip debug symbols
RUN mkdir -p /out/llama.cpp/linux-amd64-cuda /out/llama.cpp/include /out/llama.cpp/ggml/include /out/llama.cpp/common && \
find llama-src/build -name "*.a" -exec cp {} /out/llama.cpp/linux-amd64-cuda/ \; && \
find /out/llama.cpp/linux-amd64-cuda -name "*.a" -exec strip --strip-debug {} \; && \
cp llama-src/include/*.h /out/llama.cpp/include/ && \
cp llama-src/ggml/include/*.h /out/llama.cpp/ggml/include/ && \
cp llama-src/common/common.h /out/llama.cpp/common/ && \
cp llama-src/common/sampling.h /out/llama.cpp/common/
# Collect whisper.cpp artifacts (CUDA variant) and strip debug symbols
RUN mkdir -p /out/whisper.cpp/linux-amd64-cuda /out/whisper.cpp/include /out/whisper.cpp/ggml/include && \
find whisper-src/build -name "*.a" -exec cp {} /out/whisper.cpp/linux-amd64-cuda/ \; && \
find /out/whisper.cpp/linux-amd64-cuda -name "*.a" -exec strip --strip-debug {} \; && \
cp whisper-src/include/*.h /out/whisper.cpp/include/ && \
cp whisper-src/ggml/include/*.h /out/whisper.cpp/ggml/include/
# ============================================================================
# Builder: Vulkan
# ============================================================================
# Use Ubuntu 24.04 for Vulkan — bookworm's Vulkan 1.3.239 is too old
# (llama.cpp b8220+ needs VK_EXT_layer_settings from Vulkan 1.3.261+)
FROM ubuntu:24.04 AS builder-vulkan
RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential cmake wget ca-certificates libvulkan-dev glslang-tools glslc && \
rm -rf /var/lib/apt/lists/*
WORKDIR /src
COPY --from=sources /src/llama-src llama-src
COPY --from=sources /src/whisper-src whisper-src
# Build llama.cpp (Vulkan)
RUN cd llama-src && \
cmake -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=OFF -DGGML_VULKAN=ON && \
cmake --build build --config Release -j$(nproc)
# Build whisper.cpp (Vulkan)
RUN cd whisper-src && \
cmake -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=OFF -DGGML_VULKAN=ON && \
cmake --build build --config Release -j$(nproc)
# Collect llama.cpp artifacts (Vulkan variant) and strip debug symbols
RUN mkdir -p /out/llama.cpp/linux-amd64-vulkan /out/llama.cpp/include /out/llama.cpp/ggml/include /out/llama.cpp/common && \
find llama-src/build -name "*.a" -exec cp {} /out/llama.cpp/linux-amd64-vulkan/ \; && \
find /out/llama.cpp/linux-amd64-vulkan -name "*.a" -exec strip --strip-debug {} \; && \
cp llama-src/include/*.h /out/llama.cpp/include/ && \
cp llama-src/ggml/include/*.h /out/llama.cpp/ggml/include/ && \
cp llama-src/common/common.h /out/llama.cpp/common/ && \
cp llama-src/common/sampling.h /out/llama.cpp/common/
# Collect whisper.cpp artifacts (Vulkan variant) and strip debug symbols
RUN mkdir -p /out/whisper.cpp/linux-amd64-vulkan /out/whisper.cpp/include /out/whisper.cpp/ggml/include && \
find whisper-src/build -name "*.a" -exec cp {} /out/whisper.cpp/linux-amd64-vulkan/ \; && \
find /out/whisper.cpp/linux-amd64-vulkan -name "*.a" -exec strip --strip-debug {} \; && \
cp whisper-src/include/*.h /out/whisper.cpp/include/ && \
cp whisper-src/ggml/include/*.h /out/whisper.cpp/ggml/include/
# ============================================================================
# Dynamic backend selection — picks the right builder stage
# ============================================================================
FROM builder-${GPU_BACKEND} AS builder
# ============================================================================
# Build test bases — provide the right link libraries per backend
# ============================================================================
FROM golang:1.24-bookworm AS build-test-base-cpu
RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential libgomp1 && \
rm -rf /var/lib/apt/lists/*
FROM nvidia/cuda:12.8.0-devel-ubuntu24.04 AS build-test-base-cuda
RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential libgomp1 wget && \
rm -rf /var/lib/apt/lists/* && \
wget -qO go.tar.gz https://go.dev/dl/go1.24.6.linux-amd64.tar.gz && \
tar -C /usr/local -xzf go.tar.gz && rm go.tar.gz
ENV PATH="/usr/local/go/bin:${PATH}"
FROM ubuntu:24.04 AS build-test-base-vulkan
RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential libgomp1 libvulkan-dev wget ca-certificates && \
rm -rf /var/lib/apt/lists/* && \
wget -qO go.tar.gz https://go.dev/dl/go1.24.6.linux-amd64.tar.gz && \
tar -C /usr/local -xzf go.tar.gz && rm go.tar.gz
ENV PATH="/usr/local/go/bin:${PATH}"
# ============================================================================
# Build test — verifies the .a files link correctly with Go CGO
# ============================================================================
FROM build-test-base-${GPU_BACKEND} AS build-test
ARG GPU_BACKEND=cpu
WORKDIR /src
COPY . .
# Determine prebuilt directory suffix based on GPU_BACKEND
RUN SUFFIX="" && \
if [ "$GPU_BACKEND" = "cuda" ]; then SUFFIX="-cuda"; fi && \
if [ "$GPU_BACKEND" = "vulkan" ]; then SUFFIX="-vulkan"; fi && \
echo "Using prebuilt suffix: linux-amd64${SUFFIX}"
# Copy freshly built .a files into the source tree
COPY --from=builder /out/llama.cpp/linux-amd64*/ /tmp/llama-libs/
COPY --from=builder /out/llama.cpp/include/ /src/ggml/llamacpp/third_party/include/
COPY --from=builder /out/llama.cpp/ggml/include/ /src/ggml/llamacpp/third_party/ggml/include/
COPY --from=builder /out/llama.cpp/common/ /src/ggml/llamacpp/third_party/common/
COPY --from=builder /out/whisper.cpp/linux-amd64*/ /tmp/whisper-libs/
COPY --from=builder /out/whisper.cpp/include/ /src/ggml/whispercpp/third_party/include/
COPY --from=builder /out/whisper.cpp/ggml/include/ /src/ggml/whispercpp/third_party/ggml/include/
# Copy .a files to correct prebuilt directory based on GPU_BACKEND
RUN SUFFIX="" && \
if [ "$GPU_BACKEND" = "cuda" ]; then SUFFIX="-cuda"; fi && \
if [ "$GPU_BACKEND" = "vulkan" ]; then SUFFIX="-vulkan"; fi && \
mkdir -p /src/ggml/llamacpp/third_party/prebuilt/linux-amd64${SUFFIX} && \
mkdir -p /src/ggml/whispercpp/third_party/prebuilt/linux-amd64${SUFFIX} && \
cp /tmp/llama-libs/*.a /src/ggml/llamacpp/third_party/prebuilt/linux-amd64${SUFFIX}/ && \
cp /tmp/whisper-libs/*.a /src/ggml/whispercpp/third_party/prebuilt/linux-amd64${SUFFIX}/
# Determine build tags based on GPU_BACKEND
RUN LLAMA_TAGS="llamacpp" && \
WHISPER_TAGS="whispercpp" && \
if [ "$GPU_BACKEND" = "cuda" ]; then LLAMA_TAGS="llamacpp,cuda"; WHISPER_TAGS="whispercpp,cuda"; fi && \
if [ "$GPU_BACKEND" = "vulkan" ]; then LLAMA_TAGS="llamacpp,vulkan"; WHISPER_TAGS="whispercpp,vulkan"; fi && \
echo "Build tags: llama=${LLAMA_TAGS} whisper=${WHISPER_TAGS}" && \
CGO_ENABLED=0 go build ./ggml/llamacpp/... && \
CGO_ENABLED=0 go build ./ggml/whispercpp/... && \
echo "stub builds OK" && \
CGO_ENABLED=1 go build -tags "${LLAMA_TAGS}" ./ggml/llamacpp/... && \
echo "llamacpp CGO build OK (${LLAMA_TAGS})" && \
CGO_ENABLED=1 go build -tags "${WHISPER_TAGS}" ./ggml/whispercpp/... && \
echo "whispercpp CGO build OK (${WHISPER_TAGS})"
# Run stub tests
RUN CGO_ENABLED=0 go test ./ggml/llamacpp/... && \
CGO_ENABLED=0 go test ./ggml/whispercpp/... && \
echo "all tests passed"
# Output stage — docker build -o extracts from here
FROM scratch
COPY --from=builder /out/ /