-
-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathsetup.sh
More file actions
248 lines (211 loc) · 7.4 KB
/
setup.sh
File metadata and controls
248 lines (211 loc) · 7.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
#!/usr/bin/env bash
# ============================================
# NPU-STACK Setup Script (Linux/macOS)
# Made by Fanalogy - Powered by Nirvana
# ============================================
echo ""
echo " ============================================"
echo " NPU-STACK - Neural Processor Toolkit"
echo " Made by Fanalogy - Powered by Nirvana"
echo " ============================================"
echo ""
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
VENV_DIR="$ROOT/.venv"
ENV_FILE="$ROOT/.env"
PYTHON_CMD=""
# =============================================
# STEP 1: Find Python 3.10+
# =============================================
echo "[1/7] Checking for Python..."
if [ -f "$VENV_DIR/bin/python" ]; then
echo " [OK] Virtual environment already exists at .venv/"
PYTHON_CMD="$VENV_DIR/bin/python"
else
# Try python3 first, then python
for cmd in python3 python; do
if command -v "$cmd" &>/dev/null; then
PY_VER=$($cmd --version 2>&1)
echo " Found $PY_VER"
if $cmd -c "import sys; exit(0 if sys.version_info >= (3, 10) else 1)" 2>/dev/null; then
echo " [OK] $cmd is compatible (3.10+)"
PYTHON_CMD="$cmd"
break
else
echo " [!!] $cmd is too old (need 3.10+)"
fi
fi
done
if [ -z "$PYTHON_CMD" ]; then
echo ""
echo " [ERROR] No Python 3.10+ found."
echo " Please install Python 3.10 or newer:"
echo " Ubuntu/Debian: sudo apt install python3 python3-venv python3-pip"
echo " Fedora: sudo dnf install python3"
echo " macOS: brew install python@3.11"
echo " Or download from https://python.org"
echo ""
exit 1
fi
# =============================================
# STEP 2: Create Virtual Environment
# =============================================
echo ""
echo "[2/7] Creating isolated virtual environment..."
$PYTHON_CMD -m venv "$VENV_DIR"
if [ $? -ne 0 ]; then
echo " [ERROR] Failed to create venv. Install python3-venv:"
echo " sudo apt install python3-venv"
exit 1
fi
echo " [OK] Created .venv/"
fi
# Activate venv
source "$VENV_DIR/bin/activate"
PIP="$VENV_DIR/bin/pip"
PYTHON="$VENV_DIR/bin/python"
echo " [OK] Python: $PYTHON"
echo ""
# =============================================
# STEP 3: Install Dependencies
# =============================================
echo "[3/7] Installing backend dependencies..."
echo " This will take several minutes (PyTorch, OpenVINO, etc.)"
echo ""
"$PIP" install --upgrade pip setuptools wheel --quiet
echo " Installing core ML dependencies (Torch 2.9.1+cu130)..."
"$PIP" uninstall torch torchvision torchaudio -y >/dev/null 2>&1
"$PIP" install torch==2.9.1 torchvision==0.24.1 torchaudio==2.9.1 --index-url https://download.pytorch.org/whl/cu130
if [ $? -ne 0 ]; then
echo " [WARN] Optimized PyTorch install failed, falling back to standard..."
fi
echo " Installing llama-cpp-python optimized for CUDA..."
"$PIP" uninstall llama-cpp-python -y >/dev/null 2>&1
CMAKE_ARGS="-DGGML_CUDA=on" "$PIP" install llama-cpp-python --no-cache-dir
if [ $? -ne 0 ]; then
echo " [WARN] CUDA llama-cpp-python install failed, falling back to pre-built binary..."
"$PIP" install llama-cpp-python --prefer-binary
if [ $? -ne 0 ]; then
echo " [WARN] llama-cpp-python could not be installed."
echo " [WARN] GGUF inference features will be unavailable."
echo " [WARN] To fix: ensure gcc, g++, and cmake are installed, then re-run:"
echo " [WARN] pip install llama-cpp-python"
echo " [WARN] Alternatively, use Docker: docker compose up --build"
fi
fi
echo " Installing remaining requirements..."
"$PIP" install -r "$ROOT/backend/requirements.txt"
if [ $? -ne 0 ]; then
echo ""
echo " [WARNING] Some packages may have failed."
echo " Core platform will still work. GPU/NPU features may be limited."
echo ""
fi
echo ""
echo " [OK] Dependencies installed."
echo ""
# =============================================
# STEP 4: Download GGUF Tools
# =============================================
echo "[4/7] Downloading GGUF Tools..."
"$PYTHON" "$ROOT/scripts/download_llama_cpp_tools.py" || {
echo " [WARN] llama.cpp tools download failed or was skipped."
echo " [WARN] GGUF conversion features may be unavailable."
echo " [WARN] You can retry manually: python scripts/download_llama_cpp_tools.py"
}
echo ""
# =============================================
# STEP 5: Generate .env File
# =============================================
echo "[5/7] Generating .env configuration..."
if [ -f "$ENV_FILE" ]; then
echo " [OK] .env already exists, skipping. Delete it to regenerate."
else
cat > "$ENV_FILE" << 'ENVEOF'
# NPU-STACK Environment Configuration
# Generated by setup.sh
# --- Server ---
BACKEND_HOST=0.0.0.0
BACKEND_PORT=8000
FRONTEND_PORT=3000
# --- Database ---
DATABASE_URL=sqlite:///data/npu_stack.db
# --- Model Storage ---
MODEL_STORE_PATH=./data/models
DATASET_CACHE_PATH=./data/datasets
MAX_UPLOAD_SIZE_MB=500
# --- HuggingFace ---
HUGGINGFACE_TOKEN=
HUGGINGFACE_CACHE_DIR=./data/hf_cache
# --- Training ---
DEFAULT_DEVICE=cpu
TORCH_HOME=./data/torch_cache
CUDA_VISIBLE_DEVICES=0
# --- OpenVINO / NPU ---
OPENVINO_LOG_LEVEL=WARNING
NPU_DEVICE_NAME=NPU
# --- CORS ---
CORS_ORIGINS=http://localhost:5173,http://localhost:3000,http://127.0.0.1:5173,http://127.0.0.1:3000
# --- Logging ---
LOG_LEVEL=INFO
DEBUG=false
# --- Branding ---
APP_NAME=NPU-STACK
APP_VERSION=1.0.0
BRAND=Fanalogy
POWERED_BY=Nirvana
ENVEOF
echo " [OK] Created .env"
fi
echo ""
# =============================================
# STEP 6: Create Data Directories
# =============================================
echo "[6/7] Creating data directories..."
mkdir -p "$ROOT/backend/data/models"
mkdir -p "$ROOT/backend/data/datasets"
mkdir -p "$ROOT/backend/data/hf_cache"
echo " [OK] Data directories ready."
echo ""
# =============================================
# STEP 7: Install Frontend Dependencies
# =============================================
echo "[7/7] Installing frontend dependencies..."
if command -v npm &>/dev/null; then
cd "$ROOT/frontend"
if [ ! -d "node_modules" ]; then
npm install
else
echo " [OK] node_modules already exists."
fi
cd "$ROOT"
else
echo " [WARNING] npm not found. Install Node.js to run the frontend:"
echo " Ubuntu: sudo apt install nodejs npm"
echo " macOS: brew install node"
echo " Or use: https://nodejs.org"
fi
echo ""
# =============================================
# Make launcher scripts executable
# =============================================
chmod +x "$ROOT/run-backend.sh" "$ROOT/run-frontend.sh" "$ROOT/run-all.sh" 2>/dev/null || true
# =============================================
# DONE
# =============================================
echo " ============================================"
echo " Setup Complete!"
echo " ============================================"
echo ""
echo " Python: $PYTHON"
echo " Config: .env"
echo ""
echo " Quick Start:"
echo " ./run-backend.sh - Start API server"
echo " ./run-frontend.sh - Start React dev server"
echo " ./run-all.sh - Start both"
echo ""
echo " Or with Docker:"
echo " docker compose up --build"
echo ""
echo " ============================================"
echo ""