Skip to content

Commit 52b548c

Browse files
committed
Initial ollama DAppNode package
Separated ollama package with amd/cpu/nvidia variants.
0 parents  commit 52b548c

13 files changed

Lines changed: 145 additions & 0 deletions

.github/workflows/auto_check.yml

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
name: Bump upstream version
2+
3+
on:
4+
schedule:
5+
- cron: "00 */4 * * *"
6+
workflow_dispatch:
7+
push:
8+
branches:
9+
- "master"
10+
11+
jobs:
12+
build:
13+
runs-on: ubuntu-latest
14+
steps:
15+
- uses: actions/checkout@v4
16+
- run: npx @dappnode/dappnodesdk github-action bump-upstream --use_variants
17+
env:
18+
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
19+
PINATA_API_KEY: ${{ secrets.PINATA_API_KEY }}
20+
PINATA_SECRET_API_KEY: ${{ secrets.PINATA_SECRET_API_KEY }}

.github/workflows/main.yml

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
name: "Main"
2+
on:
3+
repository_dispatch:
4+
pull_request:
5+
push:
6+
branches:
7+
- "main"
8+
- "v[0-9]+.[0-9]+.[0-9]+"
9+
paths-ignore:
10+
- "README.md"
11+
12+
jobs:
13+
release:
14+
name: Release
15+
runs-on: ipfs-dev-gateway
16+
if: github.event_name == 'push' || github.event_name == 'repository_dispatch'
17+
steps:
18+
- uses: actions/checkout@v4
19+
- uses: actions/setup-node@v4
20+
with:
21+
node-version: '22'
22+
- name: Log in to GitHub Container Registry
23+
run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
24+
- name: Publish
25+
run: npx @dappnode/dappnodesdk publish patch --github_release --timeout 1h --content_provider=http://10.200.200.7:5001 --eth_provider=https://web3.dappnode.net --all-variants
26+
env:
27+
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
28+
DEVELOPER_ADDRESS: "0xf35960302a07022aba880dffaec2fdd64d5bf1c1"

.gitignore

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
*build*
2+
releases.json
3+
*tmp*

avatar-ollama.png

12 KB
Loading

dappnode_package.json

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
{
2+
"name": "ollama.dnp.dappnode.eth",
3+
"version": "0.1.0",
4+
"upstream": [
5+
{
6+
"repo": "ollama/ollama",
7+
"version": "v0.17.7",
8+
"arg": "OLLAMA_VERSION"
9+
}
10+
],
11+
"shortDescription": "Local LLM inference engine with GPU acceleration",
12+
"description": "Run large language models locally on your DAppNode with GPU acceleration. Ollama provides a fast and efficient LLM inference engine with AMD ROCm support.\n\n**Features:**\n- AMD GPU acceleration via ROCm\n- Complete privacy - all processing stays local\n- Support for multiple LLM models (Llama, Mistral, CodeLlama, etc.)\n\n**Requirements:**\n- AMD GPU with ROCm support\n- At least 8GB RAM (16GB+ recommended)\n- Sufficient storage for models (10GB+ recommended)",
13+
"type": "service",
14+
"author": "DAppNode Association <admin@dappnode.io> (https://github.com/dappnode)",
15+
"license": "GPL-3.0",
16+
"categories": ["AI"],
17+
"architectures": ["linux/amd64"]
18+
}

docker-compose.yml

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
version: "3.5"
2+
services:
3+
ollama:
4+
build:
5+
context: ollama
6+
args:
7+
OLLAMA_VERSION: v0.17.7
8+
container_name: ollama.ollama.dnp.dappnode.eth
9+
volumes:
10+
- ollama:/root/.ollama
11+
restart: unless-stopped
12+
environment:
13+
- OLLAMA_LOG_LEVEL=debug
14+
- OLLAMA_METRICS=1
15+
- OLLAMA_LOG_FORMAT=json
16+
- OLLAMA_TELEMETRY=0
17+
- OLLAMA_CONTEXT_LENGTH=64000
18+
volumes:
19+
ollama: {}

ollama/Dockerfile

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
# syntax=docker/dockerfile:1
2+
ARG OLLAMA_VERSION
3+
ARG OLLAMA_SUFFIX=""
4+
5+
FROM ollama/ollama:${OLLAMA_VERSION#v}${OLLAMA_SUFFIX}
6+
7+
RUN mkdir -p /root/.ollama
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
{
2+
"name": "ollama-amd.dnp.dappnode.eth",
3+
"version": "0.1.0",
4+
"description": "Run large language models locally on your DAppNode with GPU acceleration. Ollama with AMD ROCm support for GPU inference.\n\n**Features:**\n- AMD GPU acceleration via ROCm\n- Complete privacy - all processing stays local\n- Support for multiple LLM models (Llama, Mistral, CodeLlama, etc.)\n\n**Requirements:**\n- AMD GPU with ROCm support\n- At least 8GB RAM (16GB+ recommended)\n- Sufficient storage for models (10GB+ recommended)\n"
5+
}
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
version: "3.5"
2+
3+
services:
4+
ollama:
5+
build:
6+
context: ollama
7+
args:
8+
OLLAMA_SUFFIX: -rocm
9+
devices:
10+
- /dev/kfd
11+
- /dev/dri
12+
container_name: ollama.ollama-amd.dnp.dappnode.eth
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
{
2+
"name": "ollama-cpu.dnp.dappnode.eth",
3+
"version": "0.1.0",
4+
"architectures": ["linux/amd64", "linux/arm64"],
5+
"description": "Run large language models locally on your DAppNode. Ollama inference engine with CPU-only support.\n\n**Features:**\n- CPU acceleration for inference\n- Complete privacy - all processing stays local\n- Support for multiple LLM models (Llama, Mistral, CodeLlama, etc.)\n\n**Requirements:**\n- At least 8GB RAM (16GB+ recommended)\n- Sufficient storage for models (10GB+ recommended)\n"
6+
}

0 commit comments

Comments
 (0)