diff --git a/.dockerignore b/.dockerignore index 6c8e6fb5..2c97c009 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,2 +1,5 @@ /create-a-container/data/database.sqlite -/create-a-container/certs/* \ No newline at end of file +/create-a-container/certs/* +/create-a-container/.envcompose.override.yml +/mie-opensource-landing/build +*/node_modules diff --git a/.github/workflows/docker-build-push.yml b/.github/workflows/docker-build-push.yml index 9db13405..66087122 100644 --- a/.github/workflows/docker-build-push.yml +++ b/.github/workflows/docker-build-push.yml @@ -2,11 +2,6 @@ name: Build and Push Docker Image on: push: - branches: - - main - pull_request: - branches: - - main env: REGISTRY: ghcr.io @@ -34,10 +29,6 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Extract branch name - id: branch - run: echo "name=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> $GITHUB_OUTPUT - - name: Extract metadata id: meta uses: docker/metadata-action@v5 @@ -54,7 +45,5 @@ jobs: push: ${{ github.event_name != 'pull_request' }} tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} - build-args: | - OPENSOURCE_SERVER_BRANCH=${{ steps.branch.outputs.name }} cache-from: type=gha cache-to: type=gha,mode=max diff --git a/Dockerfile b/Dockerfile index c0a3a2df..7920f89b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -23,7 +23,7 @@ RUN apt update && apt -y install curl gnupg2 ca-certificates lsb-release debian- && cat /etc/apt/preferences.d/99nginx \ && apt update \ && apt install -y nginx ssl-cert \ - && systemctl enable nginx + && echo 'disable nginx-debug.service' >/etc/systemd/system-preset/00-nginx.preset # Install DNSMasq and configure it to only get it's config from our pull-config RUN apt update && apt -y install dnsmasq && systemctl enable dnsmasq @@ -39,19 +39,18 @@ ARG LEGO_VERSION=v4.28.1 RUN curl -fsSL "https://github.com/go-acme/lego/releases/download/${LEGO_VERSION}/lego_${LEGO_VERSION}_linux_amd64.tar.gz" \ | tar -xz -C /usr/local/bin lego +# Install Postgres 18 from the PGDG repository +RUN apt update && apt -y install postgresql-common \ + && /usr/share/postgresql-common/pgdg/apt.postgresql.org.sh -y \ + && apt -y install postgresql-18 + +# We install the nodesource repo for newer versions of NPM fixing compatibility +# with unprivileged containers. This sets 24.x up which is the LTS at this time +RUN curl -fsSL https://deb.nodesource.com/setup_24.x | bash - + # Install requisites: git for updating the software, make and npm for installing # and management. -RUN apt update && apt -y install git make npm - -# Install the software. We include the .git directory so that the software can -# update itself without replacing the entire container. -ARG OPENSOURCE_SERVER_BRANCH=main -RUN git clone \ - --branch=${OPENSOURCE_SERVER_BRANCH} \ - https://github.com/mieweb/opensource-server.git \ - /opt/opensource-server \ - && cd /opt/opensource-server \ - && make install +RUN apt update && apt -y install git make nodejs sudo # Install the ldap-gateway package ARG LDAP_GATEWAY_BRANCH=main @@ -67,6 +66,12 @@ RUN git clone \ && cp /opt/ldap-gateway/nfpm/systemd/ldap-gateway.service /etc/systemd/system/ldap-gateway.service \ && systemctl enable ldap-gateway +# Install the software. We include the .git directory so that the software can +# update itself without replacing the entire container. +COPY . /opt/opensource-server +RUN cd /opt/opensource-server \ + && make install + # Tag the exposed ports for services handled by the container # NGINX (http, https, quic) EXPOSE 80 @@ -81,4 +86,4 @@ EXPOSE 686 # Configure systemd to run properly in a container. This isn't nessary for LXC # in Proxmox, but is useful for testing with Docker directly. STOPSIGNAL SIGRTMIN+3 -ENTRYPOINT [ "/sbin/init" ] \ No newline at end of file +ENTRYPOINT [ "/sbin/init" ] diff --git a/Makefile b/Makefile index 376c0564..be8b61e2 100644 --- a/Makefile +++ b/Makefile @@ -12,13 +12,15 @@ help: install: install-create-container install-pull-config install-docs +SYSTEMD_DIR := create-a-container/systemd +SERVICES := $(wildcard $(SYSTEMD_DIR)/*.service) install-create-container: - cd create-a-container && npm install --production - cd create-a-container && npm run db:migrate - install -m644 -oroot -groot create-a-container/systemd/container-creator.service /etc/systemd/system/container-creator.service + cd create-a-container && npm install --omit=dev + install -m 644 -o root -g root $(SERVICES) /etc/systemd/system/ systemctl daemon-reload || true - systemctl enable container-creator.service - systemctl start container-creator.service || true + @for service in $(notdir $(SERVICES)); do \ + systemctl enable $$service; \ + done install-pull-config: cd pull-config && bash install.sh diff --git a/create-a-container/README.md b/create-a-container/README.md index a89352e1..95894d09 100644 --- a/create-a-container/README.md +++ b/create-a-container/README.md @@ -7,13 +7,17 @@ A web application for managing LXC container creation, configuration, and lifecy ```mermaid erDiagram Node ||--o{ Container : "hosts" + Node ||--o{ Volume : "stores" Container ||--o{ Service : "exposes" + Container ||--o{ ContainerVolume : "mounts" + ContainerVolume }o--|| Volume : "references" Node { int id PK string name UK "Proxmox node name" string apiUrl "Proxmox API URL" boolean tlsVerify "Verify TLS certificates" + int placeholderCtId "VMID for volume storage" datetime createdAt datetime updatedAt } @@ -22,16 +26,39 @@ erDiagram int id PK string hostname UK "FQDN hostname" string username "Owner username" - string osRelease "OS distribution" + string status "pending,creating,running,failed" + string template "Template name" + int creationJobId FK "References Job" int nodeId FK "References Node" int containerId UK "Proxmox VMID" - string macAddress UK "MAC address" - string ipv4Address UK "IPv4 address" + string macAddress UK "MAC address (nullable)" + string ipv4Address UK "IPv4 address (nullable)" string aiContainer "Node type flag" datetime createdAt datetime updatedAt } + Volume { + int id PK + string name "User-friendly name" + string username "Owner username" + string proxmoxVolume "Proxmox reference" + int sizeGb "Size in GB" + int siteId FK "References Site" + int nodeId FK "References Node" + datetime createdAt + datetime updatedAt + } + + ContainerVolume { + int id PK + int containerId FK "References Container" + int volumeId FK "References Volume" + string mountPath "Mount point path" + datetime createdAt + datetime updatedAt + } + Service { int id PK int containerId FK "References Container" @@ -49,6 +76,9 @@ erDiagram - `(Node.name)` - Unique - `(Container.hostname)` - Unique - `(Container.nodeId, Container.containerId)` - Unique (same VMID can exist on different nodes) +- `(Volume.username, Volume.name, Volume.siteId)` - Unique (volume names unique per user per site) +- `(ContainerVolume.containerId, ContainerVolume.volumeId)` - Unique (one attachment per volume per container) +- `(ContainerVolume.containerId, ContainerVolume.mountPath)` - Unique (mount paths unique per container) - `(Service.externalHostname)` - Unique when type='http' - `(Service.type, Service.externalPort)` - Unique when type='tcp' or type='udp' @@ -56,6 +86,8 @@ erDiagram - **User Authentication** - Proxmox VE authentication integration - **Container Management** - Create, list, and track LXC containers +- **Docker/OCI Support** - Pull and deploy containers from Docker Hub, GHCR, or any OCI registry +- **Persistent Volumes** - Named volumes that survive container deletion for data persistence - **Service Registry** - Track HTTP/TCP/UDP services running on containers - **Dynamic Nginx Config** - Generate nginx reverse proxy configurations on-demand - **Real-time Progress** - SSE (Server-Sent Events) for container creation progress @@ -206,12 +238,14 @@ List all containers for authenticated user Display container creation form #### `POST /containers` -Create or register a container -- **Query Parameter**: `init` (boolean) - If true, requires auth and spawns container creation -- **Body (init=true)**: `{ hostname, osRelease, httpPort, aiContainer }` -- **Body (init=false)**: Container registration data (for scripts) -- **Returns (init=true)**: Redirect to status page -- **Returns (init=false)**: `{ containerId, message }` +Create a container asynchronously via a background job +- **Body**: `{ hostname, template, customTemplate, services }` where: + - `hostname`: Container hostname + - `template`: Template selection in format "nodeName,vmid" OR "custom" for Docker images + - `customTemplate`: Docker image reference when template="custom" (e.g., `nginx`, `nginx:alpine`, `myorg/myapp:v1`, `ghcr.io/org/image:tag`) + - `services`: Object of service definitions +- **Returns**: Redirect to containers list with flash message +- **Process**: Creates pending container, services, and job in a single transaction. Docker image references are normalized to full format (`host/org/image:tag`). The job-runner executes the actual Proxmox operations. #### `DELETE /containers/:id` (Auth Required) Delete a container from both Proxmox and the database @@ -402,6 +436,75 @@ SELECT id, status FROM Jobs WHERE id = ; - Add batching or file-based logs for high-volume output to reduce DB pressure - Implement job timeout/deadline and automatic cancellation +### Volume Management Routes + +#### `GET /sites/:siteId/volumes` (Auth Required) +List all volumes owned by the authenticated user in a site +- **Returns**: HTML page with volume list + +#### `GET /sites/:siteId/volumes/new` (Auth Required) +Display volume creation form +- **Returns**: HTML page with form + +#### `POST /sites/:siteId/volumes` (Auth Required) +Create a new persistent volume +- **Body**: `{ name, nodeId }` + - `name`: Volume name (alphanumeric, dash, underscore only) + - `nodeId`: Node where the volume should be created +- **Process**: + 1. Allocates disk on the node's storage + 2. Attaches to the node's placeholder container + 3. Creates Volume record in database +- **Returns**: Redirect to volumes list + +#### `DELETE /sites/:siteId/volumes/:id` (Auth Required) +Delete a volume permanently +- **Path Parameter**: `id` - Volume database ID +- **Authorization**: User can only delete their own volumes +- **Validation**: Volume must not be attached to any container +- **Process**: + 1. Detaches from placeholder container + 2. Deletes disk from Proxmox storage + 3. Removes Volume record from database +- **Returns**: `{ success: true, message: "Volume deleted successfully" }` +- **Errors**: + - `400` - Volume is currently attached to a container + - `403` - User doesn't own the volume + - `404` - Volume not found + +### Volume Attachment + +Volumes can be attached to containers during container creation: + +#### During `POST /sites/:siteId/containers` +- **Additional Body Fields**: + - `volumes`: Array of volume attachments + - `volumes[N][volumeId]`: Volume ID to attach + - `volumes[N][mountPath]`: Mount point inside container (e.g., `/data`) +- **Process**: + 1. Validates all volumes exist and are owned by user + 2. Validates volumes are on the same node as the target container + 3. Creates container and ContainerVolume records + 4. Job runner moves volumes from placeholder to new container +- **Note**: Cross-node volume attachment requires manual migration + +#### During `DELETE /sites/:siteId/containers/:id` +When a container with attached volumes is deleted: +1. All attached volumes are transferred to the placeholder container +2. ContainerVolume records are deleted +3. Volume records are preserved (data persists) +4. Volumes can be reattached to new containers + +### Placeholder Container + +Each Proxmox node has a "placeholder container" for volume storage: + +- **Purpose**: Holds volumes not attached to user containers +- **Auto-creation**: Created when node is registered +- **Configuration**: Minimal Alpine, 16MB RAM, no network, protection enabled +- **VMID**: Stored in `Node.placeholderCtId` +- **Never started**: Exists only to own volumes + ### Configuration Routes #### `GET /nginx.conf` @@ -430,8 +533,11 @@ Test email configuration (development/testing) id INT PRIMARY KEY AUTO_INCREMENT hostname VARCHAR(255) UNIQUE NOT NULL username VARCHAR(255) NOT NULL -osRelease VARCHAR(255) -containerId INT UNSIGNED UNIQUE +status VARCHAR(20) NOT NULL DEFAULT 'pending' +template VARCHAR(255) +creationJobId INT FOREIGN KEY REFERENCES Jobs(id) +nodeId INT FOREIGN KEY REFERENCES Nodes(id) +containerId INT UNSIGNED NOT NULL macAddress VARCHAR(17) UNIQUE ipv4Address VARCHAR(45) UNIQUE aiContainer VARCHAR(50) DEFAULT 'N' diff --git a/create-a-container/bin/create-container.js b/create-a-container/bin/create-container.js new file mode 100755 index 00000000..e20cf082 --- /dev/null +++ b/create-a-container/bin/create-container.js @@ -0,0 +1,653 @@ +#!/usr/bin/env node +/** + * create-container.js + * + * Background job script that performs the actual Proxmox container creation. + * This script is executed by the job-runner after a pending container record + * has been created in the database. + * + * Usage: node bin/create-container.js --container-id= + * + * The script will: + * 1. Load the container record from the database + * 2. Either clone a Proxmox template OR pull a Docker image and create from it + * 3. Configure the container (cores, memory, network) + * 4. Start the container + * 5. Query MAC address from Proxmox config + * 6. Query IP address from Proxmox interfaces API + * 7. Update the container record with MAC, IP, and status='running' + * + * Docker images are detected by the presence of '/' in the template field. + * Format: host/org/image:tag (e.g., docker.io/library/nginx:latest) + * + * All output is logged to STDOUT for capture by the job-runner. + * Exit code 0 = success, non-zero = failure. + */ + +const path = require('path'); +const https = require('https'); + +// Load models from parent directory +const db = require(path.join(__dirname, '..', 'models')); +const { Container, Node, Site, Volume, ContainerVolume } = db; + +// Load utilities +const { parseArgs } = require(path.join(__dirname, '..', 'utils', 'cli')); + +/** + * Fetch JSON from a URL with optional headers + * @param {string} url - The URL to fetch + * @param {object} headers - Optional headers + * @returns {Promise} Parsed JSON response + */ +function fetchJson(url, headers = {}) { + return new Promise((resolve, reject) => { + const req = https.get(url, { headers }, (res) => { + let data = ''; + res.on('data', chunk => data += chunk); + res.on('end', () => { + if (res.statusCode >= 400) { + reject(new Error(`HTTP ${res.statusCode}: ${data}`)); + } else { + try { + resolve(JSON.parse(data)); + } catch (e) { + reject(new Error(`Failed to parse JSON: ${e.message}`)); + } + } + }); + }); + req.on('error', reject); + }); +} + +/** + * Get the digest (sha256 hash) of a Docker/OCI image from the registry + * Handles both single-arch and multi-arch (manifest list) images + * @param {string} registry - Registry hostname (e.g., 'docker.io') + * @param {string} repo - Repository (e.g., 'library/nginx') + * @param {string} tag - Tag (e.g., 'latest') + * @returns {Promise} Short digest (first 12 chars of sha256) + */ +async function getImageDigest(registry, repo, tag) { + let headers = {}; + + // Docker Hub requires auth token + if (registry === 'docker.io' || registry === 'registry-1.docker.io') { + const tokenUrl = `https://auth.docker.io/token?service=registry.docker.io&scope=repository:${repo}:pull`; + const tokenData = await fetchJson(tokenUrl); + headers['Authorization'] = `Bearer ${tokenData.token}`; + } + + const registryHost = registry === 'docker.io' ? 'registry-1.docker.io' : registry; + + // Fetch manifest - accept both single manifest and manifest list + headers['Accept'] = [ + 'application/vnd.docker.distribution.manifest.v2+json', + 'application/vnd.oci.image.manifest.v1+json', + 'application/vnd.docker.distribution.manifest.list.v2+json', + 'application/vnd.oci.image.index.v1+json' + ].join(', '); + + const manifestUrl = `https://${registryHost}/v2/${repo}/manifests/${tag}`; + let manifest = await fetchJson(manifestUrl, headers); + + // Handle manifest list (multi-arch) - select amd64/linux + if (manifest.manifests && Array.isArray(manifest.manifests)) { + const amd64Manifest = manifest.manifests.find(m => + m.platform?.architecture === 'amd64' && m.platform?.os === 'linux' + ); + if (!amd64Manifest) { + throw new Error('No amd64/linux manifest found in manifest list'); + } + + // Fetch the actual manifest for amd64 + headers['Accept'] = 'application/vnd.docker.distribution.manifest.v2+json, application/vnd.oci.image.manifest.v1+json'; + const archManifestUrl = `https://${registryHost}/v2/${repo}/manifests/${amd64Manifest.digest}`; + manifest = await fetchJson(archManifestUrl, headers); + } + + // Get config digest from manifest + const configDigest = manifest.config?.digest; + if (!configDigest) { + throw new Error('No config digest in manifest'); + } + + // Return short hash (sha256:abc123... -> abc123...) + const hash = configDigest.replace('sha256:', ''); + return hash.substring(0, 12); +} + +/** + * Check if a template is a Docker image reference (contains '/') + * @param {string} template - The template string + * @returns {boolean} True if Docker image, false if Proxmox template + */ +function isDockerImage(template) { + return template.includes('/'); +} + +/** + * Parse a normalized Docker image reference into components + * Format: host/org/image:tag + * @param {string} ref - The normalized Docker reference + * @returns {object} Parsed components: { registry, namespace, image, tag } + */ +function parseDockerRef(ref) { + // Split off tag + const [imagePart, tag] = ref.split(':'); + const parts = imagePart.split('/'); + + // Format is always host/org/image after normalization + const registry = parts[0]; + const image = parts[parts.length - 1]; + const namespace = parts.slice(1, -1).join('/'); + + return { registry, namespace, image, tag }; +} + +/** + * Generate a filename for a pulled Docker image + * Replaces special chars with underscores, includes digest for cache busting + * Note: Proxmox automatically appends .tar, so we don't include it here + * @param {object} parsed - Parsed Docker ref components + * @param {string} digest - Short digest hash + * @returns {string} Sanitized filename (e.g., "docker.io_library_nginx_latest_abc123def456") + */ +function generateImageFilename(parsed, digest) { + const { registry, namespace, image, tag } = parsed; + const sanitized = `${registry}_${namespace}_${image}_${tag}_${digest}`.replace(/[/:]/g, '_'); + return sanitized; +} + +/** + * Main function + */ +async function main() { + const args = parseArgs(); + + if (!args['container-id']) { + console.error('Usage: node create-container.js --container-id='); + process.exit(1); + } + + const containerId = parseInt(args['container-id'], 10); + console.log(`Starting container creation for container ID: ${containerId}`); + + // Parse new volume arguments + const newVolumes = []; + for (const key in args) { + if (key === 'new-volume') { + const values = Array.isArray(args[key]) ? args[key] : [args[key]]; + for (const val of values) { + try { + newVolumes.push(JSON.parse(decodeURIComponent(val))); + } catch (e) { + console.error(`Failed to parse new-volume argument: ${val}`); + } + } + } + } + + if (newVolumes.length > 0) { + console.log(`Will create ${newVolumes.length} new volume(s)`); + } + + // Load the container record with its node and site + const container = await Container.findByPk(containerId, { + include: [{ + model: Node, + as: 'node', + include: [{ + model: Site, + as: 'site' + }] + }] + }); + + if (!container) { + console.error(`Container with ID ${containerId} not found`); + process.exit(1); + } + + if (container.status !== 'pending') { + console.error(`Container is not in pending status (current: ${container.status})`); + process.exit(1); + } + + const node = container.node; + const site = node.site; + + if (!node) { + console.error('Container has no associated node'); + process.exit(1); + } + + if (!site) { + console.error('Node has no associated site'); + process.exit(1); + } + + console.log(`Container: ${container.hostname}`); + console.log(`Node: ${node.name}`); + console.log(`Site: ${site.name} (${site.internalDomain})`); + console.log(`Template: ${container.template}`); + + const isDocker = isDockerImage(container.template); + console.log(`Template type: ${isDocker ? 'Docker image' : 'Proxmox template'}`); + + try { + // Update status to 'creating' + await container.update({ status: 'creating' }); + console.log('Status updated to: creating'); + + // Get the Proxmox API client + const client = await node.api(); + console.log('Proxmox API client initialized'); + + // Allocate VMID right before creating to minimize race condition window + console.log('Allocating VMID from Proxmox...'); + const vmid = await client.nextId(); + console.log(`Allocated VMID: ${vmid}`); + + if (isDocker) { + // Docker image: pull from OCI registry, then create container + const parsed = parseDockerRef(container.template); + console.log(`Docker image: ${parsed.registry}/${parsed.namespace}/${parsed.image}:${parsed.tag}`); + + const storage = node.imageStorage || 'local'; + console.log(`Using storage: ${storage}`); + + // Get image digest from registry to create unique filename + const repo = parsed.namespace ? `${parsed.namespace}/${parsed.image}` : parsed.image; + console.log(`Fetching digest for ${parsed.registry}/${repo}:${parsed.tag}...`); + const digest = await getImageDigest(parsed.registry, repo, parsed.tag); + console.log(`Image digest: ${digest}`); + + const filename = generateImageFilename(parsed, digest); + console.log(`Target filename: ${filename}`); + + // Check if image already exists in storage + const existingContents = await client.storageContents(node.name, storage, 'vztmpl'); + const expectedVolid = `${storage}:vztmpl/${filename}.tar`; + const imageExists = existingContents.some(item => item.volid === expectedVolid); + + if (imageExists) { + console.log(`Image already exists in storage: ${expectedVolid}`); + } else { + // Pull the image from OCI registry + const imageRef = container.template; + console.log(`Pulling image ${imageRef}...`); + const pullUpid = await client.pullOciImage(node.name, storage, { + reference: imageRef, + filename + }); + console.log(`Pull task started: ${pullUpid}`); + + // Wait for pull to complete + await client.waitForTask(node.name, pullUpid); + console.log('Image pulled successfully'); + } + + // Create container from the pulled image (Proxmox adds .tar to the filename) + console.log(`Creating container from ${filename}.tar...`); + const ostemplate = `${storage}:vztmpl/${filename}.tar`; + const createUpid = await client.createLxc(node.name, { + vmid, + hostname: container.hostname, + ostemplate, + description: `Created from Docker image ${container.template}`, + cores: 4, + features: 'nesting=1,keyctl=1,fuse=1', + memory: 4096, + net0: 'name=eth0,ip=dhcp,bridge=vmbr0,host-managed=1', + searchdomain: site.internalDomain, + swap: 0, + onboot: 1, + tags: container.username, + unprivileged: 1, + storage: 'local-lvm' + }); + console.log(`Create task started: ${createUpid}`); + + // Wait for create to complete + await client.waitForTask(node.name, createUpid); + console.log('Container created successfully'); + + } else { + // Proxmox template: clone existing container + console.log(`Looking for template: ${container.template}`); + const templates = await client.getLxcTemplates(node.name); + const templateContainer = templates.find(t => t.name === container.template); + + if (!templateContainer) { + throw new Error(`Template "${container.template}" not found on node ${node.name}`); + } + + const templateVmid = templateContainer.vmid; + console.log(`Found template VMID: ${templateVmid}`); + + // Clone the template + console.log(`Cloning template ${templateVmid} to VMID ${vmid}...`); + const cloneUpid = await client.cloneLxc(node.name, templateVmid, vmid, { + hostname: container.hostname, + description: `Cloned from template ${container.template}`, + full: 1 + }); + console.log(`Clone task started: ${cloneUpid}`); + + // Wait for clone to complete + await client.waitForTask(node.name, cloneUpid); + console.log('Clone completed successfully'); + + // Configure the container (Docker containers are configured at creation time) + console.log('Configuring container...'); + await client.updateLxcConfig(node.name, vmid, { + cores: 4, + features: 'nesting=1,keyctl=1,fuse=1', + memory: 4096, + net0: 'name=eth0,ip=dhcp,bridge=vmbr0', + searchdomain: site.internalDomain, + swap: 0, + onboot: 1, + tags: container.username + }); + console.log('Container configured'); + } + + // Apply environment variables and entrypoint + // First read defaults from the image, then merge with user-specified values + const defaultConfig = await client.lxcConfig(node.name, vmid); + const defaultEntrypoint = defaultConfig['entrypoint'] || null; + const defaultEnvStr = defaultConfig['env'] || null; + + // Parse default env vars + let mergedEnvVars = {}; + if (defaultEnvStr) { + const pairs = defaultEnvStr.split('\0'); + for (const pair of pairs) { + const eqIndex = pair.indexOf('='); + if (eqIndex > 0) { + mergedEnvVars[pair.substring(0, eqIndex)] = pair.substring(eqIndex + 1); + } + } + } + + // Merge user-specified env vars (user values override defaults) + const userEnvVars = container.environmentVars ? JSON.parse(container.environmentVars) : {}; + mergedEnvVars = { ...mergedEnvVars, ...userEnvVars }; + + // Use user entrypoint if specified, otherwise keep default + const finalEntrypoint = container.entrypoint || defaultEntrypoint; + + // Build config to apply + const envConfig = {}; + if (finalEntrypoint) { + envConfig.entrypoint = finalEntrypoint; + } + if (Object.keys(mergedEnvVars).length > 0) { + envConfig.env = Object.entries(mergedEnvVars) + .map(([key, value]) => `${key}=${value}`) + .join('\0'); + } + + if (Object.keys(envConfig).length > 0) { + console.log('Applying environment variables and entrypoint...'); + if (defaultEntrypoint) console.log(`Default entrypoint: ${defaultEntrypoint}`); + if (defaultEnvStr) console.log(`Default env vars: ${Object.keys(mergedEnvVars).length - Object.keys(userEnvVars).length} from image`); + if (Object.keys(userEnvVars).length > 0) console.log(`User env vars: ${Object.keys(userEnvVars).length} overrides`); + await client.updateLxcConfig(node.name, vmid, envConfig); + console.log('Environment/entrypoint configuration applied'); + } + + // Store the VMID now that creation succeeded + await container.update({ containerId: vmid }); + console.log(`Container VMID ${vmid} stored in database`); + + // Create new volumes if any were requested + if (newVolumes.length > 0) { + console.log(`Creating ${newVolumes.length} new volume(s)...`); + + // Check node has placeholder + if (!node.placeholderCtId) { + throw new Error(`Node ${node.name} does not have a placeholder container. Create one first.`); + } + + // Find storage for volumes + const storages = await client.datastores(node.name, 'rootdir', true); + if (storages.length === 0) { + throw new Error('No storage available for volumes'); + } + const volumeStorage = storages[0].storage; + + for (const volSpec of newVolumes) { + console.log(` Creating volume "${volSpec.name}" (${volSpec.sizeGb}GB)...`); + + // Allocate the disk on the placeholder + const volumeId = await client.allocateDisk( + node.name, + volumeStorage, + node.placeholderCtId, + volSpec.sizeGb + ); + console.log(` Allocated: ${volumeId}`); + + // Create volume record in database + const volume = await Volume.create({ + name: volSpec.name, + username: container.username, + proxmoxVolume: volumeId, + sizeGb: volSpec.sizeGb, + siteId: site.id, + nodeId: node.id + }); + console.log(` Volume record created: ID ${volume.id}`); + + // Attach to placeholder temporarily (disable protection first) + await client.updateLxcConfig(node.name, node.placeholderCtId, { protection: 0 }); + const placeholderMp = await client.findNextMountPoint(node.name, node.placeholderCtId); + const placeholderMountPath = `/${container.username}/${volSpec.name}`; + await client.updateLxcConfig(node.name, node.placeholderCtId, { + [placeholderMp]: `${volumeId},mp=${placeholderMountPath}` + }); + await client.updateLxcConfig(node.name, node.placeholderCtId, { protection: 1 }); + console.log(` Attached to placeholder at ${placeholderMp}`); + + // Create ContainerVolume record for attachment + await ContainerVolume.create({ + containerId: container.id, + volumeId: volume.id, + mountPath: volSpec.mountPath + }); + console.log(` Queued for attachment at ${volSpec.mountPath}`); + } + } + + // Attach volumes if any were requested (including newly created ones) + const volumeAttachments = await ContainerVolume.findAll({ + where: { containerId: container.id }, + include: [{ + model: Volume, + as: 'volume', + include: [{ model: Node, as: 'node' }] + }] + }); + + if (volumeAttachments.length > 0) { + console.log(`Attaching ${volumeAttachments.length} volume(s)...`); + + for (const attachment of volumeAttachments) { + const volume = attachment.volume; + const mountPath = attachment.mountPath; + + console.log(` Attaching volume "${volume.name}" at ${mountPath}`); + + // Check if volume is on same node + if (volume.nodeId !== node.id) { + console.log(` Volume is on different node (${volume.node.name}), migrating to ${node.name}...`); + + // Get the source node + const sourceNode = await Node.findByPk(volume.nodeId); + if (!sourceNode || !sourceNode.placeholderCtId) { + throw new Error(`Source node for volume "${volume.name}" not found or has no placeholder`); + } + + // Get API client for source node + const sourceClient = await sourceNode.api(); + + // Find the volume on source placeholder + const sourceMp = await sourceClient.findMountPointForVolume(sourceNode.name, sourceNode.placeholderCtId, volume.proxmoxVolume); + if (!sourceMp) { + throw new Error(`Volume "${volume.name}" not found on source placeholder container`); + } + + // Strategy: Move volume to a temporary minimal container, migrate it, then extract + // For now, we'll use a simpler approach: create the volume fresh on target and warn about data loss + // TODO: Implement proper storage-level migration when Proxmox supports it better + + // Alternative: Use pct move command with --target-node option (requires shared storage) + // For local storage, we need to: + // 1. Create a temp container with just this volume on source + // 2. Migrate the temp container to target + // 3. Move volume from temp to target placeholder + // 4. Delete temp container + + // Check if target node has placeholder + if (!node.placeholderCtId) { + throw new Error(`Target node ${node.name} does not have a placeholder container configured`); + } + + // For MVP: Use backup/restore approach through shared storage or error out + // This is complex and depends on infrastructure setup + throw new Error( + `Cross-node volume migration for "${volume.name}" requires manual intervention. ` + + `Volume is on node "${sourceNode.name}" but container is being created on "${node.name}". ` + + `Please create the container on the same node as the volume, or migrate the volume manually using Proxmox.` + ); + } + + // Find the mount point on the placeholder container + const placeholderCtId = node.placeholderCtId; + if (!placeholderCtId) { + throw new Error(`Node ${node.name} does not have a placeholder container configured`); + } + + const sourceMp = await client.findMountPointForVolume(node.name, placeholderCtId, volume.proxmoxVolume); + if (!sourceMp) { + throw new Error(`Volume "${volume.name}" not found on placeholder container`); + } + + // Find next available mount point on target container + const targetMp = await client.findNextMountPoint(node.name, vmid); + + // Move volume from placeholder to new container (disable protection first) + await client.updateLxcConfig(node.name, placeholderCtId, { protection: 0 }); + console.log(` Moving ${sourceMp} from placeholder CT ${placeholderCtId} to ${targetMp} on CT ${vmid}`); + const moveUpid = await client.moveVolume(node.name, placeholderCtId, sourceMp, vmid, targetMp); + await client.waitForTask(node.name, moveUpid); + await client.updateLxcConfig(node.name, placeholderCtId, { protection: 1 }); + + // Update the mount path on the target container + await client.updateLxcConfig(node.name, vmid, { + [targetMp]: `${volume.proxmoxVolume},mp=${mountPath}` + }); + + console.log(` Volume "${volume.name}" attached at ${mountPath}`); + } + + console.log('All volumes attached successfully'); + } + + // Start the container + console.log('Starting container...'); + const startUpid = await client.startLxc(node.name, vmid); + console.log(`Start task started: ${startUpid}`); + + // Wait for start to complete + await client.waitForTask(node.name, startUpid); + console.log('Container started successfully'); + + // Get MAC address from config + const macAddress = await client.getLxcMacAddress(node.name, vmid); + + if (!macAddress) { + throw new Error('Could not extract MAC address from container configuration'); + } + + // Read back entrypoint and environment variables from config + console.log('Querying container configuration...'); + const config = await client.lxcConfig(node.name, vmid); + const actualEntrypoint = config['entrypoint'] || null; + const actualEnv = config['env'] || null; + + // Parse NUL-separated env string back to JSON object + let environmentVars = {}; + if (actualEnv) { + const pairs = actualEnv.split('\0'); + for (const pair of pairs) { + const eqIndex = pair.indexOf('='); + if (eqIndex > 0) { + const key = pair.substring(0, eqIndex); + const value = pair.substring(eqIndex + 1); + environmentVars[key] = value; + } + } + } + + if (actualEntrypoint) { + console.log(`Entrypoint: ${actualEntrypoint}`); + } + if (Object.keys(environmentVars).length > 0) { + console.log(`Environment variables: ${Object.keys(environmentVars).length} vars`); + } + + // Get IP address from Proxmox interfaces API + const ipv4Address = await client.getLxcIpAddress(node.name, vmid); + + if (!ipv4Address) { + throw new Error('Could not get IP address from Proxmox interfaces API'); + } + + // Update the container record + console.log('Updating container record...'); + await container.update({ + macAddress, + ipv4Address, + entrypoint: actualEntrypoint, + environmentVars: JSON.stringify(environmentVars), + status: 'running' + }); + + console.log('Container creation completed successfully!'); + console.log(` Hostname: ${container.hostname}`); + console.log(` VMID: ${vmid}`); + console.log(` MAC: ${macAddress}`); + console.log(` IP: ${ipv4Address}`); + console.log(` Status: running`); + + process.exit(0); + } catch (err) { + console.error('Container creation failed:', err.message); + + // Log axios error details if available + if (err.response?.data) { + console.error('API Error Details:', JSON.stringify(err.response.data, null, 2)); + } + + // Update status to failed + try { + await container.update({ status: 'failed' }); + console.log('Status updated to: failed'); + } catch (updateErr) { + console.error('Failed to update container status:', updateErr.message); + } + + process.exit(1); + } +} + +// Run the main function +main().catch(err => { + console.error('Unhandled error:', err); + process.exit(1); +}); diff --git a/create-a-container/bin/create-placeholder.js b/create-a-container/bin/create-placeholder.js new file mode 100644 index 00000000..aecc9642 --- /dev/null +++ b/create-a-container/bin/create-placeholder.js @@ -0,0 +1,206 @@ +#!/usr/bin/env node + +/** + * Create placeholder container for volume storage on a Proxmox node + * + * Usage: node bin/create-placeholder.js --node-id= + * + * This script: + * 1. Checks for an existing Alpine template + * 2. Downloads Alpine template if not present + * 3. Creates a minimal placeholder container + * 4. Updates the Node record with the placeholder VMID + */ + +const path = require('path'); +require('dotenv').config({ path: path.join(__dirname, '..', '.env') }); + +const { Node } = require('../models'); + +async function main() { + // Parse arguments + const args = process.argv.slice(2); + const nodeIdArg = args.find(a => a.startsWith('--node-id=')); + + if (!nodeIdArg) { + console.error('Usage: node bin/create-placeholder.js --node-id='); + process.exit(1); + } + + const nodeId = parseInt(nodeIdArg.split('=')[1], 10); + + console.log(`Creating placeholder container for node ID ${nodeId}`); + + // Load node + const node = await Node.findByPk(nodeId); + if (!node) { + console.error(`Node ${nodeId} not found`); + process.exit(1); + } + + console.log(`Node: ${node.name}`); + + if (!node.apiUrl || !node.tokenId || !node.secret) { + console.error('Node does not have API credentials configured'); + process.exit(1); + } + + if (node.placeholderCtId) { + console.log(`Node already has placeholder container: CT ${node.placeholderCtId}`); + + // Verify it still exists + const client = await node.api(); + const exists = await client.lxcExists(node.name, node.placeholderCtId); + if (exists) { + console.log('Placeholder container verified'); + process.exit(0); + } + console.log('Placeholder container no longer exists, recreating...'); + } + + const client = await node.api(); + + // Step 1: Find template storage + console.log('Finding template storage...'); + const storages = await client.datastores(node.name, 'vztmpl', true); + if (storages.length === 0) { + console.error('No storage available for container templates'); + process.exit(1); + } + + const templateStorage = storages[0].storage; + console.log(`Using template storage: ${templateStorage}`); + + // Step 2: Look for existing template + console.log('Checking for existing templates...'); + let ostemplate = null; + + for (const storage of storages) { + const contents = await client.storageContents(node.name, storage.storage, 'vztmpl'); + console.log(`Storage ${storage.storage}: ${contents.length} templates`); + + const alpineTemplate = contents.find(c => c.volid.toLowerCase().includes('alpine')); + if (alpineTemplate) { + ostemplate = alpineTemplate.volid; + console.log(`Found Alpine template: ${ostemplate}`); + break; + } + // Fallback to any template + if (!ostemplate && contents.length > 0) { + ostemplate = contents[0].volid; + console.log(`Fallback to template: ${ostemplate}`); + } + } + + // Step 3: Download template if needed + if (!ostemplate) { + console.log('No templates found, downloading from Proxmox repository...'); + + const availableTemplates = await client.getAvailableTemplates(node.name); + console.log(`Repository has ${availableTemplates.length} templates available`); + + // Find Alpine LXC template + const alpineTemplate = availableTemplates.find(t => + t.template && t.template.toLowerCase().includes('alpine') && t.type === 'lxc' + ); + + // Or fallback to any small template + const targetTemplate = alpineTemplate || availableTemplates.find(t => t.type === 'lxc'); + + if (!targetTemplate) { + console.error('No LXC templates available in Proxmox repository'); + process.exit(1); + } + + console.log(`Downloading template: ${targetTemplate.template}`); + const downloadUpid = await client.downloadTemplate(node.name, templateStorage, targetTemplate.template); + console.log(`Download task: ${downloadUpid}`); + + await client.waitForTask(node.name, downloadUpid); + console.log('Template download complete'); + + // Get the template volid + const contents = await client.storageContents(node.name, templateStorage, 'vztmpl'); + if (contents.length === 0) { + console.error('Template download succeeded but no templates found in storage'); + process.exit(1); + } + ostemplate = contents.find(c => c.volid.toLowerCase().includes('alpine'))?.volid || contents[0].volid; + console.log(`Using template: ${ostemplate}`); + } + + // Step 4: Get next VMID + const vmid = await client.nextId(); + console.log(`Allocated VMID: ${vmid}`); + + // Step 5: Find rootfs storage that supports container directories + console.log('Finding rootfs storage...'); + const allStorages = await client.datastores(node.name, 'rootdir', true); + let rootfsStorage = null; + + // Prefer node.imageStorage if it supports rootdir + if (node.imageStorage) { + const preferred = allStorages.find(s => s.storage === node.imageStorage); + if (preferred) { + rootfsStorage = node.imageStorage; + } + } + + // Otherwise use first available storage that supports rootdir + if (!rootfsStorage && allStorages.length > 0) { + rootfsStorage = allStorages[0].storage; + } + + if (!rootfsStorage) { + console.error('No storage available that supports container rootfs (rootdir content type)'); + console.error('Available storages:', allStorages.map(s => s.storage)); + process.exit(1); + } + + console.log(`Using rootfs storage: ${rootfsStorage}`); + + // Step 6: Create placeholder container + console.log('Creating placeholder container...'); + + const createOptions = { + vmid, + ostemplate, + hostname: 'mie-volume-placeholder', + memory: 16, + swap: 0, + cores: 1, + unprivileged: 1, + start: 0, + rootfs: `${rootfsStorage}:2`, + net0: 'name=eth0,bridge=vmbr0', + protection: 1, + description: 'MIE Opensource: Volume placeholder container. DO NOT DELETE.' + }; + + console.log('Create options:', JSON.stringify(createOptions, null, 2)); + + try { + const createUpid = await client.createLxc(node.name, createOptions); + console.log(`Create task: ${createUpid}`); + await client.waitForTask(node.name, createUpid); + } catch (createErr) { + console.error('Create LXC error:', createErr.message); + if (createErr.response?.data) { + console.error('Proxmox error:', JSON.stringify(createErr.response.data, null, 2)); + } + throw createErr; + } + + // Step 7: Update node record + await node.update({ placeholderCtId: vmid }); + console.log(`Placeholder container CT ${vmid} created successfully`); + console.log('Node record updated'); + + process.exit(0); +} + +main().catch(err => { + console.error('Error:', err.message); + console.error(err.stack); + process.exit(1); +}); diff --git a/create-a-container/bin/json-to-sql.js b/create-a-container/bin/json-to-sql.js index 6f9958e4..5dba9845 100644 --- a/create-a-container/bin/json-to-sql.js +++ b/create-a-container/bin/json-to-sql.js @@ -177,7 +177,7 @@ async function run() { console.log(`Container: hostname=${hostname}`); console.log(` ipv4Address=${obj.ip}`); console.log(` username=${obj.user}`); - console.log(` osRelease=${obj.os_release}`); + console.log(` template=${obj.template || 'N/A'}`); console.log(` containerId=${obj.ctid}`); console.log(` macAddress=${obj.mac}`); if (obj.ports) { @@ -196,13 +196,12 @@ async function run() { for (const [hostname, obj] of Object.entries(data)) { // If fields missing and Proxmox creds provided, try to fill them - if ((obj.user === undefined || obj.os_release === undefined || obj.ctid === undefined || obj.mac === undefined || obj.ip === undefined) && (PROXMOX_URL && PROXMOX_USER && PROXMOX_PASSWORD)) { + if ((obj.user === undefined || obj.ctid === undefined || obj.mac === undefined || obj.ip === undefined) && (PROXMOX_URL && PROXMOX_USER && PROXMOX_PASSWORD)) { const pmx = await lookupProxmoxByHostname(hostname); if (pmx.ctid && obj.ctid === undefined) obj.ctid = pmx.ctid; if (pmx.mac && obj.mac === undefined) obj.mac = pmx.mac; if (pmx.ip && obj.ip === undefined) obj.ip = pmx.ip; if (pmx.user && obj.user === undefined) obj.user = pmx.user; - if (pmx.os_release && obj.os_release === undefined) obj.os_release = pmx.os_release; } // Upsert Container by hostname @@ -215,7 +214,8 @@ async function run() { hostname, ipv4Address: obj.ip, username: obj.user || '', - osRelease: obj.os_release, + status: 'running', + template: obj.template || null, containerId: obj.ctid, macAddress: obj.mac }); @@ -224,7 +224,8 @@ async function run() { await container.update({ ipv4Address: obj.ip, username: obj.user || '', - osRelease: obj.os_release, + status: container.status || 'running', + template: obj.template || container.template, containerId: obj.ctid, macAddress: obj.mac }); diff --git a/create-a-container/bin/reconfigure-container.js b/create-a-container/bin/reconfigure-container.js new file mode 100644 index 00000000..b08026fe --- /dev/null +++ b/create-a-container/bin/reconfigure-container.js @@ -0,0 +1,172 @@ +#!/usr/bin/env node +/** + * reconfigure-container.js + * + * Background job script that applies configuration changes and restarts a container. + * This script is executed by the job-runner when environment variables or entrypoint + * are changed on an existing container. + * + * Usage: node bin/reconfigure-container.js --container-id= + * + * The script will: + * 1. Load the container record from the database + * 2. Apply env and entrypoint config via Proxmox API + * 3. Stop the container + * 4. Start the container + * 5. Update the container status to 'running' + * + * All output is logged to STDOUT for capture by the job-runner. + * Exit code 0 = success, non-zero = failure. + */ + +const path = require('path'); + +// Load models from parent directory +const db = require(path.join(__dirname, '..', 'models')); +const { Container, Node, Site } = db; + +// Load utilities +const { parseArgs } = require(path.join(__dirname, '..', 'utils', 'cli')); + +/** + * Main function + */ +async function main() { + const args = parseArgs(); + + if (!args['container-id']) { + console.error('Usage: node reconfigure-container.js --container-id='); + process.exit(1); + } + + const containerId = parseInt(args['container-id'], 10); + console.log(`Starting container reconfiguration for container ID: ${containerId}`); + + // Load the container record with its node and site + const container = await Container.findByPk(containerId, { + include: [{ + model: Node, + as: 'node', + include: [{ + model: Site, + as: 'site' + }] + }] + }); + + if (!container) { + console.error(`Container with ID ${containerId} not found`); + process.exit(1); + } + + if (!container.containerId) { + console.error('Container has no Proxmox VMID - cannot reconfigure'); + process.exit(1); + } + + const node = container.node; + + if (!node) { + console.error('Container has no associated node'); + process.exit(1); + } + + console.log(`Container: ${container.hostname}`); + console.log(`Node: ${node.name}`); + console.log(`VMID: ${container.containerId}`); + + try { + // Get the Proxmox API client + const client = await node.api(); + console.log('Proxmox API client initialized'); + + // Build config from environment variables and entrypoint + const lxcConfig = container.buildLxcEnvConfig(); + + if (Object.keys(lxcConfig).length > 0) { + console.log('Applying LXC configuration...'); + console.log('Config:', JSON.stringify(lxcConfig, null, 2)); + await client.updateLxcConfig(node.name, container.containerId, lxcConfig); + console.log('Configuration applied'); + } else { + console.log('No configuration changes to apply'); + } + + // Check container status before stop/start cycle + const lxcStatus = await client.getLxcStatus(node.name, container.containerId); + console.log(`Container current status: ${lxcStatus.status}`); + + // Only stop if the container is running + if (lxcStatus.status === 'running') { + console.log('Stopping container...'); + const stopUpid = await client.stopLxc(node.name, container.containerId); + console.log(`Stop task started: ${stopUpid}`); + + // Wait for stop to complete (shorter timeout for stop/start) + await client.waitForTask(node.name, stopUpid, 2000, 60000); + console.log('Container stopped'); + } else { + console.log('Container not running, skipping stop'); + } + + // Start the container + console.log('Starting container...'); + const startUpid = await client.startLxc(node.name, container.containerId); + console.log(`Start task started: ${startUpid}`); + + // Wait for start to complete + await client.waitForTask(node.name, startUpid, 2000, 60000); + console.log('Container started'); + + // Get MAC address from config (in case it wasn't captured during failed create) + const macAddress = await client.getLxcMacAddress(node.name, container.containerId); + + if (!macAddress) { + throw new Error('Could not get MAC address from container configuration'); + } + + // Get IP address from Proxmox interfaces API + const ipv4Address = await client.getLxcIpAddress(node.name, container.containerId); + + if (!ipv4Address) { + throw new Error('Could not get IP address from Proxmox interfaces API'); + } + + // Update container record with MAC/IP and running status + await container.update({ + status: 'running', + macAddress, + ipv4Address + }); + + console.log('Status updated to: running'); + console.log(` MAC: ${macAddress}`); + console.log(` IP: ${ipv4Address}`); + + console.log('Container reconfiguration completed successfully!'); + process.exit(0); + } catch (err) { + console.error('Container reconfiguration failed:', err.message); + + // Log axios error details if available + if (err.response?.data) { + console.error('API Error Details:', JSON.stringify(err.response.data, null, 2)); + } + + // Update status to failed + try { + await container.update({ status: 'failed' }); + console.log('Status updated to: failed'); + } catch (updateErr) { + console.error('Failed to update container status:', updateErr.message); + } + + process.exit(1); + } +} + +// Run the main function +main().catch(err => { + console.error('Unhandled error:', err); + process.exit(1); +}); diff --git a/create-a-container/test-api-key.sh b/create-a-container/bin/test-api-key.sh similarity index 100% rename from create-a-container/test-api-key.sh rename to create-a-container/bin/test-api-key.sh diff --git a/create-a-container/compose.yml b/create-a-container/compose.yml new file mode 100644 index 00000000..d3a6a344 --- /dev/null +++ b/create-a-container/compose.yml @@ -0,0 +1,15 @@ +--- +services: + postgres: + image: postgres:18 + volumes: + - postgres-data:/var/lib/postgresql + environment: + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:?Missing password} + POSTGRES_USER: ${POSTGRES_USER:?Missing user} + POSTGRES_DB: ${POSTGRES_DATABASE:?Missing database} + ports: + - 5432:5432 + +volumes: + postgres-data: \ No newline at end of file diff --git a/create-a-container/create-container-wrapper.sh b/create-a-container/create-container-wrapper.sh deleted file mode 100644 index 1817f58d..00000000 --- a/create-a-container/create-container-wrapper.sh +++ /dev/null @@ -1,223 +0,0 @@ -#!/bin/bash -# Wrapper for non-interactive container creation -# Reads all inputs from environment variables and validates them -# Exits with error messages if invalid/missing - -set -euo pipefail - -GH_ACTION="${GH_ACTION:-}" - -RESET="\033[0m" -BOLD="\033[1m" -MAGENTA='\033[35m' - -outputError() { - echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" - echo -e "${BOLD}${MAGENTA}❌ Script Failed. Exiting... ${RESET}" - echo -e "$1" - echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" - exit 1 -} - -echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" -echo -e "${BOLD}${MAGENTA}📦 MIE Container Creation Script (Wrapper)${RESET}" -echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" - -# Required variables, fail if not set or empty -: "${PROXMOX_USERNAME:?Environment variable PROXMOX_USERNAME is required}" -: "${PROXMOX_PASSWORD:?Environment variable PROXMOX_PASSWORD is required}" -: "${CONTAINER_NAME:?Environment variable CONTAINER_NAME is required}" -: "${LINUX_DISTRIBUTION:?Environment variable LINUX_DISTRIBUTION is required}" -: "${HTTP_PORT:?Environment variable HTTP_PORT is required}" -: "${DEPLOY_ON_START:=n}" # default to "n" if not set - -# Convert container name and linux distribution to lowercase -CONTAINER_NAME="${CONTAINER_NAME,,}" -LINUX_DISTRIBUTION="${LINUX_DISTRIBUTION,,}" -DEPLOY_ON_START="${DEPLOY_ON_START,,}" - -# Optional: AI_CONTAINER (default to "N" if not set) -AI_CONTAINER="${AI_CONTAINER:-N}" -AI_CONTAINER="${AI_CONTAINER^^}" # normalize - -# Validate allowed values -if [[ "$AI_CONTAINER" != "N" && "$AI_CONTAINER" != "PHOENIX" && "$AI_CONTAINER" != "FORTWAYNE" ]]; then - outputError "AI_CONTAINER must be one of: N, PHOENIX, FORTWAYNE." -fi - - -# Validate Proxmox credentials using your Node.js authenticateUser -USER_AUTHENTICATED=$(node /root/bin/js/runner.js authenticateUser "$PROXMOX_USERNAME" "$PROXMOX_PASSWORD") -if [ "$USER_AUTHENTICATED" != "true" ]; then - outputError "Invalid Proxmox Credentials." -fi - -echo "🎉 Proxmox user '$PROXMOX_USERNAME' authenticated." - -# Validate container name: alphanumeric + dash only -if ! [[ "$CONTAINER_NAME" =~ ^[a-z0-9-]+$ ]]; then - outputError "Invalid container name: Only lowercase letters, numbers, and dashes are allowed." -fi - -# Check if hostname already exists remotely -HOST_NAME_EXISTS=$(ssh root@10.15.20.69 "node /etc/nginx/checkHostnameRunner.js checkHostnameExists ${CONTAINER_NAME}") -if [ "$HOST_NAME_EXISTS" == "true" ]; then - outputError "Container hostname '$CONTAINER_NAME' already exists." -fi -echo "✅ Container name '$CONTAINER_NAME' is available." - -# Validate Linux distribution choice -if [[ "$LINUX_DISTRIBUTION" != "debian" && "$LINUX_DISTRIBUTION" != "rocky" ]]; then - outputError "Linux distribution must be 'debian' or 'rocky'." -fi - -# Validate HTTP_PORT: integer between 80 and 60000 -if ! [[ "$HTTP_PORT" =~ ^[0-9]+$ ]] || [ "$HTTP_PORT" -lt 80 ] || [ "$HTTP_PORT" -gt 60000 ]; then - outputError "HTTP_PORT must be a number between 80 and 60000." -fi - -echo "✅ HTTP port set to $HTTP_PORT." - -# Public key optional -if [ -n "${PUBLIC_KEY-}" ]; then - # Validate public key format (simple check) - if echo "$PUBLIC_KEY" | ssh-keygen -l -f - &>/dev/null; then - AUTHORIZED_KEYS="/root/.ssh/authorized_keys" - echo "$PUBLIC_KEY" > "$AUTHORIZED_KEYS" - systemctl restart ssh - echo "$PUBLIC_KEY" > "/root/bin/ssh/temp_pubs/key_$(shuf -i 100000-999999 -n1).pub" - sudo /root/bin/ssh/publicKeyAppendJumpHost.sh "$PUBLIC_KEY" - echo "🔐 Public key added." - else - outputError "Invalid PUBLIC_KEY format." - fi -else - echo "ℹ️ No public key provided." -fi - -# Protocol list handling (optional) -PROTOCOL_BASE_FILE="protocol_list_$(shuf -i 100000-999999 -n 1).txt" -PROTOCOL_FILE="/root/bin/protocols/$PROTOCOL_BASE_FILE" -touch "$PROTOCOL_FILE" - -# --- Logic for named protocols from a list (existing) --- -if [[ "${USE_OTHER_PROTOCOLS-}" == "y" || "${USE_OTHER_PROTOCOLS-}" == "Y" ]]; then - if [ -z "${OTHER_PROTOCOLS_LIST-}" ]; then - outputError "USE_OTHER_PROTOCOLS is yes but OTHER_PROTOCOLS_LIST is empty." - fi - IFS=',' read -ra PROTOCOLS <<< "$OTHER_PROTOCOLS_LIST" - for PROTOCOL_NAME in "${PROTOCOLS[@]}"; do - PROTOCOL_NAME=$(echo "$PROTOCOL_NAME" | tr '[:lower:]' '[:upper:]') - FOUND=0 - while read -r line; do - PROTOCOL_ABBRV=$(echo "$line" | awk '{print $1}') - if [[ "$PROTOCOL_ABBRV" == "$PROTOCOL_NAME" ]]; then - echo "$line" >> "$PROTOCOL_FILE" - echo " ^|^e Protocol $PROTOCOL_NAME added." - FOUND=1 - break - fi - done < "/root/bin/protocols/master_protocol_list.txt" - if [ "$FOUND" -eq 0 ]; then - echo " ^}^l Protocol $PROTOCOL_NAME not found, skipping." - fi - done -fi - -# --- START: Added logic for single custom port --- -# Check if the OTHER_PORT variable is set and not empty -if [ -n "${OTHER_PORT-}" ]; then - # Validate that it's an integer - if [[ "$OTHER_PORT" =~ ^[0-9]+$ ]]; then - echo "TCP $OTHER_PORT" >> "$PROTOCOL_FILE" - echo "UDP $OTHER_PORT" >> "$PROTOCOL_FILE" - echo " ^|^e Custom port $OTHER_PORT (TCP/UDP) added." - else - echo " ^}^l Invalid custom port specified: $OTHER_PORT. Must be an integer. Skipping." - fi -fi - -# Deploy on start must be y or n -if [[ "$DEPLOY_ON_START" != "y" && "$DEPLOY_ON_START" != "n" ]]; then - outputError "DEPLOY_ON_START must be 'y' or 'n'." -fi - -if [ "$DEPLOY_ON_START" == "y" ]; then - source /root/bin/deploy-application.sh -fi - -# Send files to hypervisor (public keys, protocols, env vars, services) -send_file_to_hypervisor() { - local LOCAL_FILE="$1" - local REMOTE_FOLDER="$2" - if [ "$REMOTE_FOLDER" != "container-env-vars" ]; then - if [ -s "$LOCAL_FILE" ]; then - sftp root@10.15.0.4 < /dev/null -put $LOCAL_FILE /var/lib/vz/snippets/$REMOTE_FOLDER/ -EOF - fi - else - if [ -d "$LOCAL_FILE" ]; then - sftp root@10.15.0.4 < /dev/null -put -r $LOCAL_FILE /var/lib/vz/snippets/$REMOTE_FOLDER/ -EOF - fi - fi -} - -# Example paths, set or export these in environment if used -send_file_to_hypervisor "/root/bin/ssh/temp_pubs/key_*.pub" "container-public-keys" -send_file_to_hypervisor "$PROTOCOL_FILE" "container-port-maps" -send_file_to_hypervisor "${ENV_FOLDER_PATH:-}" "container-env-vars" -send_file_to_hypervisor "${TEMP_SERVICES_FILE_PATH:-}" "container-services" - -echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" -echo -e "${BOLD}${MAGENTA}🚀 Starting Container Creation...${RESET}" -echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" - -# Safely get the basename of the temporary public key file. -KEY_BASENAME="" -# The 'find' command is safer than 'ls' for script usage. -KEY_FILE=$(find /root/bin/ssh/temp_pubs -type f -name "*.pub" | head -n1) - -if [[ -n "$KEY_FILE" ]]; then - KEY_BASENAME=$(basename "$KEY_FILE") -fi - -# Run your create-container.sh remotely over SSH with corrected quoting and simplified variable -ssh -t root@10.15.0.4 "bash -c \"/var/lib/vz/snippets/create-container-new.sh \ - '$CONTAINER_NAME' \ - '$GH_ACTION' \ - '$HTTP_PORT' \ - '$PROXMOX_USERNAME' \ - '$KEY_BASENAME' \ - '$PROTOCOL_BASE_FILE' \ - '$DEPLOY_ON_START' \ - '${PROJECT_REPOSITORY:-}' \ - '${PROJECT_BRANCH:-}' \ - '${PROJECT_ROOT:-}' \ - '${INSTALL_COMMAND:-}' \ - '${BUILD_COMMAND:-}' \ - '${START_COMMAND:-}' \ - '${RUNTIME_LANGUAGE:-}' \ - '${ENV_FOLDER:-}' \ - '${SERVICES_FILE:-}' \ - '$LINUX_DISTRIBUTION' \ - '${MULTI_COMPONENT:-}' \ - '${ROOT_START_COMMAND:-}' \ - '${SELF_HOSTED_RUNNER:-}' \ - '${VERSIONS_DICT:-}' \ - '$AI_CONTAINER' # Corrected: Pass the variable's value in the correct position -\"" - -# Clean up temp files -rm -f "$PROTOCOL_FILE" -rm -f /root/bin/ssh/temp_pubs/key_*.pub -rm -f "${TEMP_SERVICES_FILE_PATH:-}" -rm -rf "${ENV_FOLDER_PATH:-}" - -# Unset sensitive variables -unset CONFIRM_PASSWORD -unset PUBLIC_KEY - -echo "✅ Container creation wrapper script finished successfully." \ No newline at end of file diff --git a/create-a-container/example.env b/create-a-container/example.env index 45aa572f..c403cc30 100644 --- a/create-a-container/example.env +++ b/create-a-container/example.env @@ -1,6 +1,3 @@ -# secret used to compute express-session hash, generate randomly -SESSION_SECRET= - # dialect used by sequelize (mysql, sqlite, postgres) # you also need to configure the relevant variables for the selected dialect DATABASE_DIALECT= @@ -20,9 +17,4 @@ POSTGRES_HOST= POSTGRES_PORT= POSTGRES_USER= POSTGRES_PASSWORD= -POSTGRES_DATABASE= - -# Only used for bin/json-to-sql.js (for now) -PROXMOX_URL= -PROXMOX_USER= -PROXMOX_PASSWORD= \ No newline at end of file +POSTGRES_DATABASE= \ No newline at end of file diff --git a/create-a-container/migrations/20251104193722-convert-container-node-to-node-id.js b/create-a-container/migrations/20251104193722-convert-container-node-to-node-id.js index 078aba06..17963119 100644 --- a/create-a-container/migrations/20251104193722-convert-container-node-to-node-id.js +++ b/create-a-container/migrations/20251104193722-convert-container-node-to-node-id.js @@ -60,17 +60,13 @@ module.exports = { allowNull: true }); - // Populate node from nodeId - const [nodes, _] = await queryInterface.sequelize.query( - 'SELECT id, name FROM Nodes' - ); - for (const { id, name } of nodes) { - await queryInterface.bulkUpdate('Containers', { - node: name - }, { - nodeId: id - }); - } + // Populate node from nodeId using a LEFT JOIN to handle case where Nodes table might not exist or is empty + await queryInterface.sequelize.query(` + UPDATE "Containers" c + SET node = n.name + FROM "Nodes" n + WHERE c."nodeId" = n.id + `); // Make node NOT NULL await queryInterface.changeColumn('Containers', 'node', { diff --git a/create-a-container/migrations/20251202180408-refactor-services-to-sti.js b/create-a-container/migrations/20251202180408-refactor-services-to-sti.js index 0bd4d5a7..ebdada03 100644 --- a/create-a-container/migrations/20251202180408-refactor-services-to-sti.js +++ b/create-a-container/migrations/20251202180408-refactor-services-to-sti.js @@ -126,15 +126,48 @@ module.exports = { await queryInterface.removeColumn('Services', 'externalDomainId'); // rename tcp and udp service types to transport - await queryInterface.changeColumn('Services', 'type', { - type: Sequelize.ENUM('http', 'transport', 'tcp', 'udp'), - allowNull: false - }); - await queryInterface.bulkUpdate('Services', { type: 'transport' }, { [Sequelize.Op.or]: [ { type: 'tcp' }, { type: 'udp' } ] }); - await queryInterface.changeColumn('Services', 'type', { - type: Sequelize.ENUM('http', 'transport'), - allowNull: false - }); + // For PostgreSQL, we need to handle ENUM modification differently + const dialect = queryInterface.sequelize.getDialect(); + + if (dialect === 'postgres') { + // Rename the existing enum to a backup name + await queryInterface.sequelize.query('ALTER TYPE "enum_Services_type" RENAME TO "enum_Services_type_old"'); + + // Create new enum with transport added + await queryInterface.sequelize.query("CREATE TYPE \"enum_Services_type\" AS ENUM ('http', 'transport', 'tcp', 'udp')"); + + // Update the column to use the new enum + await queryInterface.sequelize.query('ALTER TABLE "Services" ALTER COLUMN "type" TYPE "enum_Services_type" USING "type"::text::"enum_Services_type"'); + + // Update tcp and udp to transport + await queryInterface.bulkUpdate('Services', { type: 'transport' }, { [Sequelize.Op.or]: [ { type: 'tcp' }, { type: 'udp' } ] }); + + // Drop old enum + await queryInterface.sequelize.query('DROP TYPE "enum_Services_type_old"'); + + // Rename enum again to update it to final values + await queryInterface.sequelize.query('ALTER TYPE "enum_Services_type" RENAME TO "enum_Services_type_old"'); + + // Create final enum with only http and transport + await queryInterface.sequelize.query("CREATE TYPE \"enum_Services_type\" AS ENUM ('http', 'transport')"); + + // Update the column to use the final enum + await queryInterface.sequelize.query('ALTER TABLE "Services" ALTER COLUMN "type" TYPE "enum_Services_type" USING "type"::text::"enum_Services_type"'); + + // Drop old enum + await queryInterface.sequelize.query('DROP TYPE "enum_Services_type_old"'); + } else { + // SQLite and other databases + await queryInterface.changeColumn('Services', 'type', { + type: Sequelize.ENUM('http', 'transport', 'tcp', 'udp'), + allowNull: false + }); + await queryInterface.bulkUpdate('Services', { type: 'transport' }, { [Sequelize.Op.or]: [ { type: 'tcp' }, { type: 'udp' } ] }); + await queryInterface.changeColumn('Services', 'type', { + type: Sequelize.ENUM('http', 'transport'), + allowNull: false + }); + } // insert migrated data into new tables AFTER schema changes because of how sqlite3 handles cascades if (httpServices.length > 0) @@ -144,21 +177,7 @@ module.exports = { }, async down (queryInterface, Sequelize) { - // Recreate old indexes on Services table - await queryInterface.addIndex('Services', ['externalHostname', 'externalDomainId'], { - unique: true, - name: 'services_http_unique_hostname_domain', - where: { - type: 'http' - } - }); - - await queryInterface.addIndex('Services', ['type', 'externalPort'], { - unique: true, - name: 'services_layer4_unique_port' - }); - - // Add columns back to Services table + // Add columns back to Services table first await queryInterface.addColumn('Services', 'externalHostname', { type: Sequelize.STRING(255), allowNull: true @@ -184,10 +203,27 @@ module.exports = { }); // Change type enum back to include tcp and udp - await queryInterface.changeColumn('Services', 'type', { - type: Sequelize.ENUM('http', 'transport', 'tcp', 'udp'), - allowNull: false - }); + const dialect = queryInterface.sequelize.getDialect(); + + if (dialect === 'postgres') { + // Rename the existing enum + await queryInterface.sequelize.query('ALTER TYPE "enum_Services_type" RENAME TO "enum_Services_type_old"'); + + // Create new enum with tcp, udp, and transport + await queryInterface.sequelize.query("CREATE TYPE \"enum_Services_type\" AS ENUM ('http', 'transport', 'tcp', 'udp')"); + + // Update the column to use the new enum + await queryInterface.sequelize.query('ALTER TABLE "Services" ALTER COLUMN "type" TYPE "enum_Services_type" USING "type"::text::"enum_Services_type"'); + + // Drop old enum + await queryInterface.sequelize.query('DROP TYPE "enum_Services_type_old"'); + } else { + // SQLite and other databases + await queryInterface.changeColumn('Services', 'type', { + type: Sequelize.ENUM('http', 'transport', 'tcp', 'udp'), + allowNull: false + }); + } // Migrate data back from child tables const servicesTable = queryInterface.quoteIdentifier('Services'); @@ -218,9 +254,38 @@ module.exports = { } // Remove transport from enum, leaving only http, tcp, udp - await queryInterface.changeColumn('Services', 'type', { - type: Sequelize.ENUM('http', 'tcp', 'udp'), - allowNull: false + if (dialect === 'postgres') { + // Rename the existing enum + await queryInterface.sequelize.query('ALTER TYPE "enum_Services_type" RENAME TO "enum_Services_type_old"'); + + // Create new enum with only http, tcp, udp + await queryInterface.sequelize.query("CREATE TYPE \"enum_Services_type\" AS ENUM ('http', 'tcp', 'udp')"); + + // Update the column to use the new enum + await queryInterface.sequelize.query('ALTER TABLE "Services" ALTER COLUMN "type" TYPE "enum_Services_type" USING "type"::text::"enum_Services_type"'); + + // Drop old enum + await queryInterface.sequelize.query('DROP TYPE "enum_Services_type_old"'); + } else { + // SQLite and other databases + await queryInterface.changeColumn('Services', 'type', { + type: Sequelize.ENUM('http', 'tcp', 'udp'), + allowNull: false + }); + } + + // Recreate old indexes on Services table + await queryInterface.addIndex('Services', ['externalHostname', 'externalDomainId'], { + unique: true, + name: 'services_http_unique_hostname_domain', + where: { + type: 'http' + } + }); + + await queryInterface.addIndex('Services', ['type', 'externalPort'], { + unique: true, + name: 'services_layer4_unique_port' }); // Drop child tables diff --git a/create-a-container/migrations/20251202201123-add-dns-services.js b/create-a-container/migrations/20251202201123-add-dns-services.js index 4f1964d3..8ca9da61 100644 --- a/create-a-container/migrations/20251202201123-add-dns-services.js +++ b/create-a-container/migrations/20251202201123-add-dns-services.js @@ -4,10 +4,27 @@ module.exports = { async up (queryInterface, Sequelize) { // Add 'dns' to Service type enum - await queryInterface.changeColumn('Services', 'type', { - type: Sequelize.ENUM('http', 'transport', 'dns'), - allowNull: false - }); + const dialect = queryInterface.sequelize.getDialect(); + + if (dialect === 'postgres') { + // Rename the existing enum + await queryInterface.sequelize.query('ALTER TYPE "enum_Services_type" RENAME TO "enum_Services_type_old"'); + + // Create new enum with dns added + await queryInterface.sequelize.query("CREATE TYPE \"enum_Services_type\" AS ENUM ('http', 'transport', 'dns')"); + + // Update the column to use the new enum + await queryInterface.sequelize.query('ALTER TABLE "Services" ALTER COLUMN "type" TYPE "enum_Services_type" USING "type"::text::"enum_Services_type"'); + + // Drop old enum + await queryInterface.sequelize.query('DROP TYPE "enum_Services_type_old"'); + } else { + // SQLite and other databases + await queryInterface.changeColumn('Services', 'type', { + type: Sequelize.ENUM('http', 'transport', 'dns'), + allowNull: false + }); + } // Create DnsServices table await queryInterface.createTable('DnsServices', { @@ -51,9 +68,26 @@ module.exports = { await queryInterface.dropTable('DnsServices'); // Remove 'dns' from Service type enum - await queryInterface.changeColumn('Services', 'type', { - type: Sequelize.ENUM('http', 'transport'), - allowNull: false - }); + const dialect = queryInterface.sequelize.getDialect(); + + if (dialect === 'postgres') { + // Rename the existing enum + await queryInterface.sequelize.query('ALTER TYPE "enum_Services_type" RENAME TO "enum_Services_type_old"'); + + // Create new enum without dns + await queryInterface.sequelize.query("CREATE TYPE \"enum_Services_type\" AS ENUM ('http', 'transport')"); + + // Update the column to use the new enum + await queryInterface.sequelize.query('ALTER TABLE "Services" ALTER COLUMN "type" TYPE "enum_Services_type" USING "type"::text::"enum_Services_type"'); + + // Drop old enum + await queryInterface.sequelize.query('DROP TYPE "enum_Services_type_old"'); + } else { + // SQLite and other databases + await queryInterface.changeColumn('Services', 'type', { + type: Sequelize.ENUM('http', 'transport'), + allowNull: false + }); + } } }; diff --git a/create-a-container/migrations/20260126120000-fix-services-cascade-delete.js b/create-a-container/migrations/20260126120000-fix-services-cascade-delete.js new file mode 100644 index 00000000..6e998210 --- /dev/null +++ b/create-a-container/migrations/20260126120000-fix-services-cascade-delete.js @@ -0,0 +1,33 @@ +'use strict'; + +/** @type {import('sequelize-cli').Migration} */ +module.exports = { + async up(queryInterface, Sequelize) { + // Fix: Restore ON DELETE CASCADE to Services.containerId foreign key + // This was lost during the 20251202180408-refactor-services-to-sti migration + // when columns were removed + + await queryInterface.changeColumn('Services', 'containerId', { + type: Sequelize.INTEGER, + allowNull: false, + references: { + model: 'Containers', + key: 'id' + }, + onDelete: 'CASCADE' + }); + }, + + async down(queryInterface, Sequelize) { + // This migration fixes a bug, so down migration would recreate the bug + // We'll leave the constraint as-is + await queryInterface.changeColumn('Services', 'containerId', { + type: Sequelize.INTEGER, + allowNull: false, + references: { + model: 'Containers', + key: 'id' + } + }); + } +}; diff --git a/create-a-container/migrations/20260127180000-add-container-status.js b/create-a-container/migrations/20260127180000-add-container-status.js new file mode 100644 index 00000000..172cfb66 --- /dev/null +++ b/create-a-container/migrations/20260127180000-add-container-status.js @@ -0,0 +1,15 @@ +'use strict'; +/** @type {import('sequelize-cli').Migration} */ +module.exports = { + async up(queryInterface, Sequelize) { + await queryInterface.addColumn('Containers', 'status', { + type: Sequelize.STRING(20), + allowNull: false, + defaultValue: 'running' + }); + }, + + async down(queryInterface, Sequelize) { + await queryInterface.removeColumn('Containers', 'status'); + } +}; diff --git a/create-a-container/migrations/20260127180001-add-container-creation-job-id.js b/create-a-container/migrations/20260127180001-add-container-creation-job-id.js new file mode 100644 index 00000000..be86d716 --- /dev/null +++ b/create-a-container/migrations/20260127180001-add-container-creation-job-id.js @@ -0,0 +1,20 @@ +'use strict'; +/** @type {import('sequelize-cli').Migration} */ +module.exports = { + async up(queryInterface, Sequelize) { + await queryInterface.addColumn('Containers', 'creationJobId', { + type: Sequelize.INTEGER, + allowNull: true, + references: { + model: 'Jobs', + key: 'id' + }, + onUpdate: 'CASCADE', + onDelete: 'SET NULL' + }); + }, + + async down(queryInterface, Sequelize) { + await queryInterface.removeColumn('Containers', 'creationJobId'); + } +}; diff --git a/create-a-container/migrations/20260127180002-add-container-template.js b/create-a-container/migrations/20260127180002-add-container-template.js new file mode 100644 index 00000000..2e291cb6 --- /dev/null +++ b/create-a-container/migrations/20260127180002-add-container-template.js @@ -0,0 +1,14 @@ +'use strict'; +/** @type {import('sequelize-cli').Migration} */ +module.exports = { + async up(queryInterface, Sequelize) { + await queryInterface.addColumn('Containers', 'template', { + type: Sequelize.STRING(255), + allowNull: true + }); + }, + + async down(queryInterface, Sequelize) { + await queryInterface.removeColumn('Containers', 'template'); + } +}; diff --git a/create-a-container/migrations/20260127180003-remove-container-os-release.js b/create-a-container/migrations/20260127180003-remove-container-os-release.js new file mode 100644 index 00000000..2059b736 --- /dev/null +++ b/create-a-container/migrations/20260127180003-remove-container-os-release.js @@ -0,0 +1,14 @@ +'use strict'; +/** @type {import('sequelize-cli').Migration} */ +module.exports = { + async up(queryInterface, Sequelize) { + await queryInterface.removeColumn('Containers', 'osRelease'); + }, + + async down(queryInterface, Sequelize) { + await queryInterface.addColumn('Containers', 'osRelease', { + type: Sequelize.STRING(255), + allowNull: true + }); + } +}; diff --git a/create-a-container/migrations/20260127180004-make-container-network-fields-nullable.js b/create-a-container/migrations/20260127180004-make-container-network-fields-nullable.js new file mode 100644 index 00000000..f4b0c823 --- /dev/null +++ b/create-a-container/migrations/20260127180004-make-container-network-fields-nullable.js @@ -0,0 +1,29 @@ +'use strict'; +/** @type {import('sequelize-cli').Migration} */ +module.exports = { + async up(queryInterface, Sequelize) { + await queryInterface.changeColumn('Containers', 'macAddress', { + type: Sequelize.STRING(17), + allowNull: true, + unique: true + }); + await queryInterface.changeColumn('Containers', 'ipv4Address', { + type: Sequelize.STRING(45), + allowNull: true, + unique: true + }); + }, + + async down(queryInterface, Sequelize) { + await queryInterface.changeColumn('Containers', 'macAddress', { + type: Sequelize.STRING(17), + allowNull: false, + unique: true + }); + await queryInterface.changeColumn('Containers', 'ipv4Address', { + type: Sequelize.STRING(45), + allowNull: false, + unique: true + }); + } +}; diff --git a/create-a-container/migrations/20260127191000-make-container-id-nullable.js b/create-a-container/migrations/20260127191000-make-container-id-nullable.js new file mode 100644 index 00000000..3479ea4c --- /dev/null +++ b/create-a-container/migrations/20260127191000-make-container-id-nullable.js @@ -0,0 +1,17 @@ +'use strict'; +/** @type {import('sequelize-cli').Migration} */ +module.exports = { + async up(queryInterface, Sequelize) { + await queryInterface.changeColumn('Containers', 'containerId', { + type: Sequelize.INTEGER.UNSIGNED, + allowNull: true + }); + }, + + async down(queryInterface, Sequelize) { + await queryInterface.changeColumn('Containers', 'containerId', { + type: Sequelize.INTEGER.UNSIGNED, + allowNull: false + }); + } +}; diff --git a/create-a-container/migrations/20260127200000-add-node-image-storage.js b/create-a-container/migrations/20260127200000-add-node-image-storage.js new file mode 100644 index 00000000..27feaaec --- /dev/null +++ b/create-a-container/migrations/20260127200000-add-node-image-storage.js @@ -0,0 +1,16 @@ +'use strict'; + +/** @type {import('sequelize-cli').Migration} */ +module.exports = { + async up(queryInterface, Sequelize) { + await queryInterface.addColumn('Nodes', 'imageStorage', { + type: Sequelize.STRING(255), + allowNull: false, + defaultValue: 'local' + }); + }, + + async down(queryInterface, Sequelize) { + await queryInterface.removeColumn('Nodes', 'imageStorage'); + } +}; diff --git a/create-a-container/migrations/20260127210000-add-container-env-entrypoint.js b/create-a-container/migrations/20260127210000-add-container-env-entrypoint.js new file mode 100644 index 00000000..26161f73 --- /dev/null +++ b/create-a-container/migrations/20260127210000-add-container-env-entrypoint.js @@ -0,0 +1,23 @@ +'use strict'; + +/** @type {import('sequelize-cli').Migration} */ +module.exports = { + async up(queryInterface, Sequelize) { + await queryInterface.addColumn('Containers', 'environmentVars', { + type: Sequelize.TEXT, + allowNull: true, + defaultValue: null + }); + + await queryInterface.addColumn('Containers', 'entrypoint', { + type: Sequelize.STRING(2000), + allowNull: true, + defaultValue: null + }); + }, + + async down(queryInterface, Sequelize) { + await queryInterface.removeColumn('Containers', 'entrypoint'); + await queryInterface.removeColumn('Containers', 'environmentVars'); + } +}; diff --git a/create-a-container/migrations/20260129160000-create-volumes.js b/create-a-container/migrations/20260129160000-create-volumes.js new file mode 100644 index 00000000..7a3ddc15 --- /dev/null +++ b/create-a-container/migrations/20260129160000-create-volumes.js @@ -0,0 +1,68 @@ +'use strict'; + +/** @type {import('sequelize-cli').Migration} */ +module.exports = { + async up(queryInterface, Sequelize) { + await queryInterface.createTable('Volumes', { + id: { + allowNull: false, + autoIncrement: true, + primaryKey: true, + type: Sequelize.INTEGER + }, + name: { + type: Sequelize.STRING(255), + allowNull: false + }, + username: { + type: Sequelize.STRING(255), + allowNull: false + }, + siteId: { + type: Sequelize.INTEGER, + allowNull: false, + references: { + model: 'Sites', + key: 'id' + }, + onDelete: 'CASCADE' + }, + nodeId: { + type: Sequelize.INTEGER, + allowNull: false, + references: { + model: 'Nodes', + key: 'id' + }, + onDelete: 'RESTRICT' + }, + proxmoxVolume: { + type: Sequelize.STRING(255), + allowNull: false + }, + sizeGb: { + type: Sequelize.INTEGER, + allowNull: false, + defaultValue: 50 + }, + createdAt: { + allowNull: false, + type: Sequelize.DATE + }, + updatedAt: { + allowNull: false, + type: Sequelize.DATE + } + }); + + // Volume names are unique per user per site + await queryInterface.addIndex('Volumes', ['username', 'name', 'siteId'], { + unique: true, + name: 'volumes_unique_name_per_user_per_site' + }); + }, + + async down(queryInterface, Sequelize) { + await queryInterface.dropTable('Volumes'); + } +}; diff --git a/create-a-container/migrations/20260129160001-create-container-volumes.js b/create-a-container/migrations/20260129160001-create-container-volumes.js new file mode 100644 index 00000000..8915111d --- /dev/null +++ b/create-a-container/migrations/20260129160001-create-container-volumes.js @@ -0,0 +1,61 @@ +'use strict'; + +/** @type {import('sequelize-cli').Migration} */ +module.exports = { + async up(queryInterface, Sequelize) { + await queryInterface.createTable('ContainerVolumes', { + id: { + allowNull: false, + autoIncrement: true, + primaryKey: true, + type: Sequelize.INTEGER + }, + containerId: { + type: Sequelize.INTEGER, + allowNull: false, + references: { + model: 'Containers', + key: 'id' + }, + onDelete: 'CASCADE' + }, + volumeId: { + type: Sequelize.INTEGER, + allowNull: false, + references: { + model: 'Volumes', + key: 'id' + }, + onDelete: 'RESTRICT' + }, + mountPath: { + type: Sequelize.STRING(500), + allowNull: false + }, + createdAt: { + allowNull: false, + type: Sequelize.DATE + }, + updatedAt: { + allowNull: false, + type: Sequelize.DATE + } + }); + + // A volume can only be attached to a container once + await queryInterface.addIndex('ContainerVolumes', ['containerId', 'volumeId'], { + unique: true, + name: 'container_volumes_unique_container_volume' + }); + + // Mount paths must be unique within a container + await queryInterface.addIndex('ContainerVolumes', ['containerId', 'mountPath'], { + unique: true, + name: 'container_volumes_unique_container_mount_path' + }); + }, + + async down(queryInterface, Sequelize) { + await queryInterface.dropTable('ContainerVolumes'); + } +}; diff --git a/create-a-container/migrations/20260129160002-add-placeholder-ctid-to-nodes.js b/create-a-container/migrations/20260129160002-add-placeholder-ctid-to-nodes.js new file mode 100644 index 00000000..d81b55e3 --- /dev/null +++ b/create-a-container/migrations/20260129160002-add-placeholder-ctid-to-nodes.js @@ -0,0 +1,15 @@ +'use strict'; + +/** @type {import('sequelize-cli').Migration} */ +module.exports = { + async up(queryInterface, Sequelize) { + await queryInterface.addColumn('Nodes', 'placeholderCtId', { + type: Sequelize.INTEGER.UNSIGNED, + allowNull: true + }); + }, + + async down(queryInterface, Sequelize) { + await queryInterface.removeColumn('Nodes', 'placeholderCtId'); + } +}; diff --git a/create-a-container/models/container-volume.js b/create-a-container/models/container-volume.js new file mode 100644 index 00000000..773ea421 --- /dev/null +++ b/create-a-container/models/container-volume.js @@ -0,0 +1,75 @@ +'use strict'; +const { + Model +} = require('sequelize'); + +module.exports = (sequelize, DataTypes) => { + class ContainerVolume extends Model { + static associate(models) { + // A container volume attachment belongs to a container + ContainerVolume.belongsTo(models.Container, { + foreignKey: 'containerId', + as: 'container' + }); + + // A container volume attachment belongs to a volume + ContainerVolume.belongsTo(models.Volume, { + foreignKey: 'volumeId', + as: 'volume' + }); + } + } + + ContainerVolume.init({ + containerId: { + type: DataTypes.INTEGER, + allowNull: false, + references: { + model: 'Containers', + key: 'id' + } + }, + volumeId: { + type: DataTypes.INTEGER, + allowNull: false, + references: { + model: 'Volumes', + key: 'id' + } + }, + mountPath: { + type: DataTypes.STRING(500), + allowNull: false, + validate: { + notEmpty: true, + isAbsolutePath(value) { + if (!value.startsWith('/')) { + throw new Error('Mount path must be an absolute path starting with /'); + } + }, + noTraversal(value) { + if (value.includes('..')) { + throw new Error('Mount path cannot contain path traversal (..)'); + } + } + } + } + }, { + sequelize, + modelName: 'ContainerVolume', + indexes: [ + { + name: 'container_volumes_unique_container_volume', + unique: true, + fields: ['containerId', 'volumeId'] + }, + { + name: 'container_volumes_unique_container_mount_path', + unique: true, + fields: ['containerId', 'mountPath'] + } + ] + }); + + return ContainerVolume; +}; diff --git a/create-a-container/models/container.js b/create-a-container/models/container.js index 02d94231..2e50f20b 100644 --- a/create-a-container/models/container.js +++ b/create-a-container/models/container.js @@ -14,6 +14,64 @@ module.exports = (sequelize, DataTypes) => { Container.hasMany(models.Service, { foreignKey: 'containerId', as: 'services' }); // a container belongs to a node Container.belongsTo(models.Node, { foreignKey: 'nodeId', as: 'node' }); + // a container may have a creation job + Container.belongsTo(models.Job, { foreignKey: 'creationJobId', as: 'creationJob' }); + // a container can have many volume attachments + Container.hasMany(models.ContainerVolume, { foreignKey: 'containerId', as: 'volumeAttachments' }); + // a container can have many volumes (through ContainerVolumes) + Container.belongsToMany(models.Volume, { + through: models.ContainerVolume, + foreignKey: 'containerId', + otherKey: 'volumeId', + as: 'volumes' + }); + } + + /** + * Build LXC config object for environment variables and entrypoint + * Returns config suitable for Proxmox API updateLxcConfig + * @returns {object} Config object with 'env' and 'entrypoint' properties + */ + buildLxcEnvConfig() { + const config = {}; + const deleteList = []; + + // Parse environment variables from JSON and format as NUL-separated list + // Format: KEY1=value1\0KEY2=value2\0KEY3=value3 + if (this.environmentVars) { + try { + const envObj = JSON.parse(this.environmentVars); + const envPairs = []; + for (const [key, value] of Object.entries(envObj)) { + if (key && value !== undefined) { + envPairs.push(`${key}=${value}`); + } + } + if (envPairs.length > 0) { + config['env'] = envPairs.join('\0'); + } else { + deleteList.push('env'); + } + } catch (err) { + console.error('Failed to parse environment variables JSON:', err.message); + } + } else { + deleteList.push('env'); + } + + // Set entrypoint command + if (this.entrypoint && this.entrypoint.trim()) { + config['entrypoint'] = this.entrypoint.trim(); + } else { + deleteList.push('entrypoint'); + } + + // Add delete parameter if there are options to remove + if (deleteList.length > 0) { + config['delete'] = deleteList.join(','); + } + + return config; } } Container.init({ @@ -26,10 +84,23 @@ module.exports = (sequelize, DataTypes) => { type: DataTypes.STRING(255), allowNull: false }, - osRelease: { + status: { + type: DataTypes.STRING(20), + allowNull: false, + defaultValue: 'pending' + }, + template: { type: DataTypes.STRING(255), allowNull: true }, + creationJobId: { + type: DataTypes.INTEGER, + allowNull: true, + references: { + model: 'Jobs', + key: 'id' + } + }, nodeId: { type: DataTypes.INTEGER, allowNull: false, @@ -40,22 +111,32 @@ module.exports = (sequelize, DataTypes) => { }, containerId: { type: DataTypes.INTEGER.UNSIGNED, - allowNull: false + allowNull: true }, macAddress: { type: DataTypes.STRING(17), - allowNull: false, + allowNull: true, unique: true }, ipv4Address: { type: DataTypes.STRING(45), - allowNull: false, + allowNull: true, unique: true }, aiContainer: { type: DataTypes.STRING(50), allowNull: false, defaultValue: 'N' + }, + environmentVars: { + type: DataTypes.TEXT, + allowNull: true, + defaultValue: null + }, + entrypoint: { + type: DataTypes.STRING(2000), + allowNull: true, + defaultValue: null } }, { sequelize, diff --git a/create-a-container/models/node.js b/create-a-container/models/node.js index 7181d9d9..ccea4162 100644 --- a/create-a-container/models/node.js +++ b/create-a-container/models/node.js @@ -2,6 +2,9 @@ const { Model } = require('sequelize'); +const https = require('https'); +const ProxmoxApi = require('../utils/proxmox-api'); + module.exports = (sequelize, DataTypes) => { class Node extends Model { /** @@ -18,6 +21,42 @@ module.exports = (sequelize, DataTypes) => { foreignKey: 'siteId', as: 'site' }); + + // A node has many volumes physically stored on it + Node.hasMany(models.Volume, { foreignKey: 'nodeId', as: 'volumes' }); + } + + /** + * Create an authenticated ProxmoxApi client for this node. + * Detects whether stored credentials are username/password or API token + * based on presence of '!' in tokenId (Proxmox convention). + * @returns {Promise} Authenticated API client + * @throws {Error} If credentials are missing or authentication fails + */ + async api() { + if (!this.tokenId || !this.secret) { + throw new Error(`Node ${this.name}: Missing credentials (tokenId and secret required)`); + } + + const httpsAgent = new https.Agent({ + rejectUnauthorized: this.tlsVerify !== false + }); + + const isApiToken = this.tokenId.includes('!'); + + if (isApiToken) { + // API token authentication - pass directly to constructor + return new ProxmoxApi(this.apiUrl, this.tokenId, this.secret, { httpsAgent }); + } + + // Username/password authentication - authenticate and return client + const client = new ProxmoxApi(this.apiUrl, null, null, { httpsAgent }); + try { + await client.authenticate(this.tokenId, this.secret); + return client; + } catch (error) { + throw new Error(`Node ${this.name}: Authentication failed - ${error.message}`); + } } } Node.init({ @@ -45,6 +84,15 @@ module.exports = (sequelize, DataTypes) => { tlsVerify: { type: DataTypes.BOOLEAN, allowNull: true + }, + imageStorage: { + type: DataTypes.STRING(255), + allowNull: false, + defaultValue: 'local' + }, + placeholderCtId: { + type: DataTypes.INTEGER.UNSIGNED, + allowNull: true } }, { sequelize, diff --git a/create-a-container/models/site.js b/create-a-container/models/site.js index 0296a904..1d305bd2 100644 --- a/create-a-container/models/site.js +++ b/create-a-container/models/site.js @@ -21,6 +21,12 @@ module.exports = (sequelize, DataTypes) => { foreignKey: 'siteId', as: 'externalDomains' }); + + // A Site has many Volumes (scoped to site) + Site.hasMany(models.Volume, { + foreignKey: 'siteId', + as: 'volumes' + }); } } Site.init({ diff --git a/create-a-container/models/transport-service.js b/create-a-container/models/transport-service.js index 42b103be..10397805 100644 --- a/create-a-container/models/transport-service.js +++ b/create-a-container/models/transport-service.js @@ -8,8 +8,8 @@ module.exports = (sequelize, DataTypes) => { } // Find the next available external port for the given protocol in the specified range - static async nextAvailablePortInRange(protocol, minPort, maxPort) { - const usedServices = await TransportService.findAll({ + static async nextAvailablePortInRange(protocol, minPort, maxPort, transaction = null) { + const queryOptions = { where: { protocol: protocol, externalPort: { @@ -18,7 +18,14 @@ module.exports = (sequelize, DataTypes) => { }, attributes: ['externalPort'], order: [['externalPort', 'ASC']] - }); + }; + + if (transaction) { + queryOptions.transaction = transaction; + queryOptions.lock = sequelize.Sequelize.Transaction.LOCK.UPDATE; + } + + const usedServices = await TransportService.findAll(queryOptions); const usedPorts = new Set(usedServices.map(s => s.externalPort)); diff --git a/create-a-container/models/volume.js b/create-a-container/models/volume.js new file mode 100644 index 00000000..3b9fd26a --- /dev/null +++ b/create-a-container/models/volume.js @@ -0,0 +1,124 @@ +'use strict'; +const { + Model +} = require('sequelize'); + +module.exports = (sequelize, DataTypes) => { + class Volume extends Model { + static associate(models) { + // A volume belongs to a site + Volume.belongsTo(models.Site, { + foreignKey: 'siteId', + as: 'site' + }); + + // A volume is physically located on a node + Volume.belongsTo(models.Node, { + foreignKey: 'nodeId', + as: 'node' + }); + + // A volume can be attached to containers through ContainerVolumes + Volume.hasMany(models.ContainerVolume, { + foreignKey: 'volumeId', + as: 'attachments' + }); + + // A volume can have many containers (through ContainerVolumes) + Volume.belongsToMany(models.Container, { + through: models.ContainerVolume, + foreignKey: 'volumeId', + otherKey: 'containerId', + as: 'containers' + }); + } + + /** + * Check if this volume is currently attached to any container + * @returns {Promise} + */ + async isAttached() { + const attachments = await this.getAttachments(); + return attachments.length > 0; + } + + /** + * Get the container this volume is attached to (if any) + * Returns null if volume is on placeholder container + * @returns {Promise} + */ + async getAttachedContainer() { + const attachments = await this.getAttachments({ + include: ['container'] + }); + if (attachments.length === 0) return null; + return attachments[0].container; + } + + /** + * Generate the placeholder mount path for this volume + * Used when volume is stored on placeholder container + * Format: // + * @returns {string} + */ + getPlaceholderMountPath() { + return `/${this.username}/${this.name}`; + } + } + + Volume.init({ + name: { + type: DataTypes.STRING(255), + allowNull: false, + validate: { + notEmpty: true, + is: /^[a-zA-Z0-9_-]+$/ + } + }, + username: { + type: DataTypes.STRING(255), + allowNull: false + }, + siteId: { + type: DataTypes.INTEGER, + allowNull: false, + references: { + model: 'Sites', + key: 'id' + } + }, + nodeId: { + type: DataTypes.INTEGER, + allowNull: false, + references: { + model: 'Nodes', + key: 'id' + } + }, + proxmoxVolume: { + type: DataTypes.STRING(255), + allowNull: false + }, + sizeGb: { + type: DataTypes.INTEGER, + allowNull: false, + defaultValue: 50, + validate: { + min: 1, + max: 1000 + } + } + }, { + sequelize, + modelName: 'Volume', + indexes: [ + { + name: 'volumes_unique_name_per_user_per_site', + unique: true, + fields: ['username', 'name', 'siteId'] + } + ] + }); + + return Volume; +}; diff --git a/create-a-container/routers/containers.js b/create-a-container/routers/containers.js index a03006c5..38c84d2c 100644 --- a/create-a-container/routers/containers.js +++ b/create-a-container/routers/containers.js @@ -2,11 +2,65 @@ const express = require('express'); const router = express.Router({ mergeParams: true }); // Enable access to :siteId param const https = require('https'); const dns = require('dns').promises; -const { Container, Service, HTTPService, TransportService, DnsService, Node, Site, ExternalDomain, Sequelize, sequelize } = require('../models'); +const { Container, Service, HTTPService, TransportService, DnsService, Node, Site, ExternalDomain, Job, Volume, ContainerVolume, Sequelize, sequelize } = require('../models'); const { requireAuth } = require('../middlewares'); const ProxmoxApi = require('../utils/proxmox-api'); const serviceMap = require('../data/services.json'); +/** + * Normalize a Docker image reference to full format: host/org/image:tag + * Examples: + * nginx → docker.io/library/nginx:latest + * nginx:alpine → docker.io/library/nginx:alpine + * myorg/myapp → docker.io/myorg/myapp:latest + * myorg/myapp:v1 → docker.io/myorg/myapp:v1 + * ghcr.io/org/app:v1 → ghcr.io/org/app:v1 + */ +function normalizeDockerRef(ref) { + // Split off tag first + let tag = 'latest'; + let imagePart = ref; + + const lastColon = ref.lastIndexOf(':'); + if (lastColon !== -1) { + const potentialTag = ref.substring(lastColon + 1); + // Make sure this isn't a port number in a registry URL (e.g., registry:5000/image) + if (!potentialTag.includes('/')) { + tag = potentialTag; + imagePart = ref.substring(0, lastColon); + } + } + + const parts = imagePart.split('/'); + + let host = 'docker.io'; + let org = 'library'; + let image; + + if (parts.length === 1) { + // Just image name: nginx + image = parts[0]; + } else if (parts.length === 2) { + // Could be org/image or host/image + // If first part contains a dot or colon, it's a registry host + if (parts[0].includes('.') || parts[0].includes(':')) { + host = parts[0]; + image = parts[1]; + } else { + // org/image + org = parts[0]; + image = parts[1]; + } + } else { + // host/org/image or host/path/to/image + host = parts[0]; + image = parts[parts.length - 1]; + org = parts.slice(1, -1).join('/'); + } + + return `${host}/${org}/${image}:${tag}`; +} + // GET /sites/:siteId/containers/new - Display form for creating a new container router.get('/new', requireAuth, async (req, res) => { // verify site exists @@ -32,11 +86,7 @@ router.get('/new', requireAuth, async (req, res) => { // TODO: use datamodel backed templates instead of querying Proxmox here for (const node of nodes) { - const client = new ProxmoxApi(node.apiUrl, node.tokenId, node.secret, { - httpsAgent: new https.Agent({ - rejectUnauthorized: node.tlsVerify !== false - }) - }); + const client = await node.api(); const lxcTemplates = await client.getLxcTemplates(node.name); @@ -56,10 +106,27 @@ router.get('/new', requireAuth, async (req, res) => { order: [['name', 'ASC']] }); + // Get available (unattached) volumes for this user in this site + const allVolumes = await Volume.findAll({ + where: { + siteId, + username: req.session.user + }, + include: [ + { model: Node, as: 'node', attributes: ['id', 'name'] }, + { model: ContainerVolume, as: 'attachments' } + ], + order: [['name', 'ASC']] + }); + + // Filter to only unattached volumes + const availableVolumes = allVolumes.filter(v => v.attachments.length === 0); + return res.render('containers/form', { site, templates, externalDomains, + availableVolumes, container: undefined, // Not editing req }); @@ -113,7 +180,9 @@ router.get('/', requireAuth, async (req, res) => { id: c.id, hostname: c.hostname, ipv4Address: c.ipv4Address, - osRelease: c.osRelease, + status: c.status, + template: c.template, + creationJobId: c.creationJobId, sshPort, httpPort, nodeName: c.node ? c.node.name : '-' @@ -196,7 +265,7 @@ router.get('/:id/edit', requireAuth, async (req, res) => { }); }); -// POST /sites/:siteId/containers - Create a new container +// POST /sites/:siteId/containers - Create a new container (async via job) router.post('/', async (req, res) => { const siteId = parseInt(req.params.siteId, 10); @@ -207,174 +276,290 @@ router.post('/', async (req, res) => { return res.redirect('/sites'); } - // TODO: build the container async in a Job - try { - const { hostname, template, services } = req.body; - const [ nodeName, templateVmid ] = template.split(','); - const node = await Node.findOne({ where: { name: nodeName, siteId } }); - const client = new ProxmoxApi(node.apiUrl, node.tokenId, node.secret, { - httpsAgent: new https.Agent({ - rejectUnauthorized: node.tlsVerify !== false - }) - }); - const vmid = await client.nextId(); - const upid = await client.cloneLxc(node.name, parseInt(templateVmid, 10), vmid, { - hostname, - description: `Cloned from template ${templateVmid}`, - full: 1 - }); - - // wait for the task to complete - while (true) { - const status = await client.taskStatus(node.name, upid); - if (status.status === 'stopped') break; - } - - // Configure the cloned container - await client.updateLxcConfig(node.name, vmid, { - cores: 4, - features: 'nesting=1', - memory: 4096, - net0: 'name=eth0,ip=dhcp,bridge=vmbr0', - searchdomain: site.internalDomain, - swap: 0, - onboot: 1, - tags: req.session.user, - }); - - // Start the container - const startUpid = await client.startLxc(node.name, vmid); + const t = await sequelize.transaction(); - // wait for the start task to complete - while (true) { - const status = await client.taskStatus(node.name, startUpid); - if (status.status === 'stopped') break; - } - - // record container information - const config = await client.lxcConfig(node.name, vmid); - const macAddress = config['net0'].match(/hwaddr=([0-9A-Fa-f:]+)/)[1]; - const ipv4Address = await (async () => { - const maxRetries = 10; - const retryDelay = 3000; - for (let attempt = 1; attempt <= maxRetries; attempt++) { - try { - const domainName = `${hostname}.${site.internalDomain}`; - const lookup = await dns.lookup(domainName); - return lookup.address; - } catch (err) { - console.error('DNS lookup failed:', err); - await new Promise(resolve => setTimeout(resolve, retryDelay)); + try { + const { hostname, template, customTemplate, services, environmentVars, entrypoint } = req.body; + + // Convert environment variables array to JSON object + let envVarsJson = null; + if (environmentVars && Array.isArray(environmentVars)) { + const envObj = {}; + for (const env of environmentVars) { + if (env.key && env.key.trim()) { + envObj[env.key.trim()] = env.value || ''; + } + } + if (Object.keys(envObj).length > 0) { + envVarsJson = JSON.stringify(envObj); } } - console.error('DNS lookup failed after maximum retries'); - return null - })(); - - const container = await Container.create({ - hostname, - username: req.session.user, - nodeId: node.id, - containerId: vmid, - macAddress, - ipv4Address - }); - - // Create services if provided - if (services && typeof services === 'object') { - for (const key in services) { - const service = services[key]; - const { type, internalPort, externalHostname, externalDomainId, dnsName } = service; + + let nodeName, templateName, node; + + if (template === 'custom' || !template) { + // Custom Docker image - parse and normalize the reference + if (!customTemplate || customTemplate.trim() === '') { + throw new Error('Custom template image is required'); + } - // Validate required fields - if (!type || !internalPort) continue; + templateName = normalizeDockerRef(customTemplate.trim()); - // Determine the service type (http, transport, or dns) - let serviceType; - let protocol = null; + // For custom templates, pick the first available node in the site + node = await Node.findOne({ + where: { + siteId, + apiUrl: { [Sequelize.Op.ne]: null }, + tokenId: { [Sequelize.Op.ne]: null }, + secret: { [Sequelize.Op.ne]: null } + } + }); - if (type === 'http') { - serviceType = 'http'; - } else if (type === 'srv') { - serviceType = 'dns'; - } else { - // tcp or udp - serviceType = 'transport'; - protocol = type; + if (!node) { + throw new Error('No nodes with API access available in this site'); } + } else { + // Standard Proxmox template + const [ nodeNamePart, templateVmid ] = template.split(','); + nodeName = nodeNamePart; + node = await Node.findOne({ where: { name: nodeName, siteId } }); - const serviceData = { - containerId: container.id, - type: serviceType, - internalPort: parseInt(internalPort, 10) - }; - - // Create the base service - const createdService = await Service.create(serviceData); - - if (serviceType === 'http') { - // Validate that both hostname and domain are set - if (!externalHostname || !externalDomainId || externalDomainId === '') { - req.flash('error', 'HTTP services must have both an external hostname and external domain'); - return res.redirect(`/sites/${siteId}/containers/new`); + if (!node) { + throw new Error(`Node "${nodeName}" not found`); + } + + // Get the template name from Proxmox + const client = await node.api(); + const templates = await client.getLxcTemplates(node.name); + const templateContainer = templates.find(t => t.vmid === parseInt(templateVmid, 10)); + + if (!templateContainer) { + throw new Error(`Template with VMID ${templateVmid} not found on node ${nodeName}`); + } + + templateName = templateContainer.name; + } + + // Create the container record in pending status (VMID allocated by job) + const container = await Container.create({ + hostname, + username: req.session.user, + status: 'pending', + template: templateName, + nodeId: node.id, + containerId: null, + macAddress: null, + ipv4Address: null, + environmentVars: envVarsJson, + entrypoint: entrypoint && entrypoint.trim() ? entrypoint.trim() : null + }, { transaction: t }); + + // Create services if provided (validate within transaction) + if (services && typeof services === 'object') { + for (const key in services) { + const service = services[key]; + const { type, internalPort, externalHostname, externalDomainId, dnsName } = service; + + // Validate required fields + if (!type || !internalPort) continue; + + // Determine the service type (http, transport, or dns) + let serviceType; + let protocol = null; + + if (type === 'http') { + serviceType = 'http'; + } else if (type === 'srv') { + serviceType = 'dns'; + } else { + // tcp or udp + serviceType = 'transport'; + protocol = type; + } + + const serviceData = { + containerId: container.id, + type: serviceType, + internalPort: parseInt(internalPort, 10) + }; + + // Create the base service + const createdService = await Service.create(serviceData, { transaction: t }); + + if (serviceType === 'http') { + // Validate that both hostname and domain are set + if (!externalHostname || !externalDomainId || externalDomainId === '') { + throw new Error('HTTP services must have both an external hostname and external domain'); + } + + // Create HTTPService entry + await HTTPService.create({ + serviceId: createdService.id, + externalHostname, + externalDomainId: parseInt(externalDomainId, 10) + }, { transaction: t }); + } else if (serviceType === 'dns') { + // Validate DNS name is set + if (!dnsName) { + throw new Error('DNS services must have a DNS name'); + } + + // Create DnsService entry + await DnsService.create({ + serviceId: createdService.id, + recordType: 'SRV', + dnsName + }, { transaction: t }); + } else { + // For TCP/UDP services, auto-assign external port + const minPort = 2000; + const maxPort = 65565; + const externalPort = await TransportService.nextAvailablePortInRange(protocol, minPort, maxPort, t); + + // Create TransportService entry + await TransportService.create({ + serviceId: createdService.id, + protocol: protocol, + externalPort + }, { transaction: t }); } + } + } + + // Handle volume attachments (both existing and new volumes) + const { volumes } = req.body; + const volumeAttachments = []; + const newVolumesToCreate = []; + + if (volumes && typeof volumes === 'object') { + for (const key in volumes) { + const volData = volumes[key]; + const { volumeId, volumeName, mountPath, sizeGb } = volData; + + if (!mountPath) continue; - // Create HTTPService entry - await HTTPService.create({ - serviceId: createdService.id, - externalHostname, - externalDomainId: parseInt(externalDomainId, 10) - }); - } else if (serviceType === 'dns') { - // Validate DNS name is set - if (!dnsName) { - req.flash('error', 'DNS services must have a DNS name'); - return res.redirect(`/sites/${siteId}/containers/new`); + // Validate mount path + if (!mountPath.startsWith('/')) { + throw new Error(`Mount path must be absolute (start with /)`); } - // Create DnsService entry - await DnsService.create({ - serviceId: createdService.id, - recordType: 'SRV', - dnsName - }); - } else { - // For TCP/UDP services, auto-assign external port - const minPort = 2000; - const maxPort = 65565; - const externalPort = await TransportService.nextAvailablePortInRange(protocol, minPort, maxPort); + if (mountPath.includes('..')) { + throw new Error(`Mount path cannot contain path traversal (..)`); + } - // Create TransportService entry - await TransportService.create({ - serviceId: createdService.id, - protocol: protocol, - externalPort - }); + if (volumeId) { + // Existing volume - validate it exists and belongs to user + const volume = await Volume.findOne({ + where: { + id: parseInt(volumeId, 10), + username: req.session.user, + siteId + }, + include: [{ model: ContainerVolume, as: 'attachments' }] + }); + + if (!volume) { + throw new Error(`Volume not found or not owned by you`); + } + + // Check volume is not already attached + if (volume.attachments.length > 0) { + throw new Error(`Volume "${volume.name}" is already attached to another container`); + } + + volumeAttachments.push({ volumeId: volume.id, mountPath }); + } else if (volumeName) { + // New volume to create + const cleanName = volumeName.trim(); + + // Validate volume name + if (!/^[a-zA-Z0-9][a-zA-Z0-9_-]*$/.test(cleanName)) { + throw new Error(`Volume name "${cleanName}" is invalid. Use alphanumeric characters, dashes, and underscores only.`); + } + + // Check for duplicate name + const existingVolume = await Volume.findOne({ + where: { + name: cleanName, + username: req.session.user, + siteId + } + }); + + if (existingVolume) { + throw new Error(`You already have a volume named "${cleanName}"`); + } + + newVolumesToCreate.push({ + name: cleanName, + mountPath, + sizeGb: parseInt(sizeGb, 10) || 50 + }); + } } } - } - return res.redirect(`/sites/${siteId}/containers`); -} catch (err) { - console.error('Error creating container:', err); - - // Handle axios errors with detailed messages - let errorMessage = 'Failed to create container: '; - if (err.response?.data) { - if (err.response.data.errors) { - errorMessage += JSON.stringify(err.response.data.errors); - } else if (err.response.data.message) { - errorMessage += err.response.data.message; + // Create the job to perform the actual container creation + const job = await Job.create({ + command: `node bin/create-container.js --container-id=${container.id}`, + createdBy: req.session.user, + status: 'pending' + }, { transaction: t }); + + // Link the container to the job + await container.update({ creationJobId: job.id }, { transaction: t }); + + // Create volume attachment records for existing volumes + for (const attachment of volumeAttachments) { + await ContainerVolume.create({ + containerId: container.id, + volumeId: attachment.volumeId, + mountPath: attachment.mountPath + }, { transaction: t }); + } + + // Store new volumes to create in a JSON field or separate table + // We'll pass them via a temp record that create-container.js will read + if (newVolumesToCreate.length > 0) { + // Store pending volumes as JSON in container description temporarily + // or use a separate PendingVolume model - for now we'll use the job command + const volumeArgs = newVolumesToCreate.map(v => + `--new-volume=${encodeURIComponent(JSON.stringify(v))}` + ).join(' '); + + await job.update({ + command: `node bin/create-container.js --container-id=${container.id} ${volumeArgs}` + }, { transaction: t }); + } + + // Commit the transaction + await t.commit(); + + req.flash('success', `Container "${hostname}" is being created. Check back shortly for status updates.`); + return res.redirect(`/jobs/${job.id}`); + } catch (err) { + // Rollback the transaction + await t.rollback(); + + console.error('Error creating container:', err); + + // Handle axios errors with detailed messages + let errorMessage = 'Failed to create container: '; + if (err.response?.data) { + if (err.response.data.errors) { + errorMessage += JSON.stringify(err.response.data.errors); + } else if (err.response.data.message) { + errorMessage += err.response.data.message; + } else { + errorMessage += err.message; + } } else { errorMessage += err.message; } - } else { - errorMessage += err.message; + + req.flash('error', errorMessage); + return res.redirect(`/sites/${siteId}/containers/new`); } - - req.flash('error', errorMessage); - return res.redirect(`/sites/${siteId}/containers/new`); -} }); // PUT /sites/:siteId/containers/:id - Update container services @@ -409,10 +594,58 @@ router.put('/:id', requireAuth, async (req, res) => { return res.redirect(`/sites/${siteId}/containers`); } - const { services } = req.body; + const { services, environmentVars, entrypoint } = req.body; + + // Check if this is a restart-only request (no config changes) + const forceRestart = req.body.restart === 'true'; + const isRestartOnly = forceRestart && !services && !environmentVars && entrypoint === undefined; + + // Convert environment variables array to JSON object + let envVarsJson = container.environmentVars; // Default to existing + if (!isRestartOnly && environmentVars && Array.isArray(environmentVars)) { + const envObj = {}; + for (const env of environmentVars) { + if (env.key && env.key.trim()) { + envObj[env.key.trim()] = env.value || ''; + } + } + envVarsJson = Object.keys(envObj).length > 0 ? JSON.stringify(envObj) : null; + } else if (!isRestartOnly && !environmentVars) { + envVarsJson = null; + } + + const newEntrypoint = isRestartOnly ? container.entrypoint : + (entrypoint && entrypoint.trim() ? entrypoint.trim() : null); + + // Check if env vars or entrypoint changed + const envChanged = !isRestartOnly && container.environmentVars !== envVarsJson; + const entrypointChanged = !isRestartOnly && container.entrypoint !== newEntrypoint; + const needsRestart = forceRestart || envChanged || entrypointChanged; // Wrap all database operations in a transaction + let restartJob = null; await sequelize.transaction(async (t) => { + // Update environment variables and entrypoint if changed + if (envChanged || entrypointChanged) { + await container.update({ + environmentVars: envVarsJson, + entrypoint: newEntrypoint, + status: needsRestart && container.containerId ? 'restarting' : container.status + }, { transaction: t }); + } else if (forceRestart && container.containerId) { + // Just update status for force restart + await container.update({ status: 'restarting' }, { transaction: t }); + } + + // Create restart job if needed and container has a VMID + if (needsRestart && container.containerId) { + restartJob = await Job.create({ + command: `node bin/reconfigure-container.js --container-id=${container.id}`, + createdBy: req.session.user, + status: 'pending' + }, { transaction: t }); + } + // Process services in two phases: delete first, then create new if (services && typeof services === 'object') { // Phase 1: Delete marked services @@ -503,7 +736,12 @@ router.put('/:id', requireAuth, async (req, res) => { } }); - req.flash('success', 'Container services updated successfully'); + if (restartJob) { + req.flash('success', 'Container configuration updated. Restarting container...'); + return res.redirect(`/jobs/${restartJob.id}`); + } else { + req.flash('success', 'Container services updated successfully'); + } return res.redirect(`/sites/${siteId}/containers`); } catch (err) { console.error('Error updating container:', err); @@ -530,11 +768,18 @@ router.delete('/:id', requireAuth, async (req, res) => { id: containerId, username: req.session.user }, - include: [{ - model: Node, - as: 'node', - attributes: ['id', 'name', 'apiUrl', 'tokenId', 'secret', 'tlsVerify', 'siteId'] - }] + include: [ + { + model: Node, + as: 'node', + attributes: ['id', 'name', 'apiUrl', 'tokenId', 'secret', 'tlsVerify', 'siteId', 'placeholderCtId'] + }, + { + model: ContainerVolume, + as: 'volumeAttachments', + include: [{ model: Volume, as: 'volume' }] + } + ] }); if (!container) { @@ -559,30 +804,108 @@ router.delete('/:id', requireAuth, async (req, res) => { return res.redirect(`/sites/${siteId}/containers`); } - // Delete from Proxmox try { - const api = new ProxmoxApi( - node.apiUrl, - node.tokenId, - node.secret, - { - httpsAgent: new https.Agent({ - rejectUnauthorized: node.tlsVerify !== false, - }) + // Only attempt Proxmox operations if containerId exists + if (container.containerId) { + const api = await node.api(); + + // Sanity check: verify the container in Proxmox matches our database record + try { + const proxmoxConfig = await api.lxcConfig(node.name, container.containerId); + const proxmoxHostname = proxmoxConfig.hostname; + + if (proxmoxHostname && proxmoxHostname !== container.hostname) { + console.error(`Hostname mismatch: DB has "${container.hostname}", Proxmox has "${proxmoxHostname}" for VMID ${container.containerId}`); + req.flash('error', `Safety check failed: Proxmox container hostname "${proxmoxHostname}" does not match database hostname "${container.hostname}". Manual intervention required.`); + return res.redirect(`/sites/${siteId}/containers`); + } + + // Transfer volumes back to placeholder container before deletion + if (container.volumeAttachments && container.volumeAttachments.length > 0 && node.placeholderCtId) { + console.log(`Transferring ${container.volumeAttachments.length} volume(s) to placeholder container`); + + // Stop the container first to allow volume moves + try { + const status = await api.getLxcStatus(node.name, container.containerId); + if (status.status === 'running') { + console.log('Stopping container before volume transfer...'); + const stopUpid = await api.stopLxc(node.name, container.containerId); + await api.waitForTask(node.name, stopUpid); + console.log('Container stopped'); + } + } catch (stopErr) { + console.log(`Note: Could not stop container: ${stopErr.message}`); + } + + for (const attachment of container.volumeAttachments) { + const volume = attachment.volume; + + try { + // Find the mount point on the container + const sourceMp = await api.findMountPointForVolume(node.name, container.containerId, volume.proxmoxVolume); + + if (sourceMp) { + // Find next available mount point on placeholder + const targetMp = await api.findNextMountPoint(node.name, node.placeholderCtId); + + // Calculate placeholder mount path for reconstruction + const placeholderMountPath = `/${volume.username}/${volume.name}`; + + // Disable protection, move volume, re-enable protection + await api.updateLxcConfig(node.name, node.placeholderCtId, { protection: 0 }); + console.log(` Moving volume "${volume.name}" from ${sourceMp} to placeholder ${targetMp}`); + const moveUpid = await api.moveVolume(node.name, container.containerId, sourceMp, node.placeholderCtId, targetMp); + await api.waitForTask(node.name, moveUpid); + + // Update mount path on placeholder for reconstruction + await api.updateLxcConfig(node.name, node.placeholderCtId, { + [targetMp]: `${volume.proxmoxVolume},mp=${placeholderMountPath}` + }); + await api.updateLxcConfig(node.name, node.placeholderCtId, { protection: 1 }); + + console.log(` Volume "${volume.name}" preserved on placeholder`); + } else { + console.log(` Volume "${volume.name}" not found on container, may already be detached`); + } + } catch (volErr) { + console.error(` Failed to transfer volume "${volume.name}": ${volErr.message}`); + // Continue with other volumes even if one fails + } + } + } + + // Delete from Proxmox (purge=true will clean up any remaining volumes) + await api.deleteContainer(node.name, container.containerId, true, true); + console.log(`Deleted container ${container.containerId} from Proxmox node ${node.name}`); + } catch (proxmoxError) { + // If container doesn't exist in Proxmox (404 or similar), continue with DB deletion + if (proxmoxError.response?.status === 500 && proxmoxError.response?.data?.errors?.vmid) { + console.log(`Container ${container.containerId} not found in Proxmox, proceeding with DB deletion`); + } else if (proxmoxError.response?.status === 404) { + console.log(`Container ${container.containerId} not found in Proxmox, proceeding with DB deletion`); + } else { + throw proxmoxError; + } } - ); + } else { + console.log(`Container ${container.hostname} has no containerId, skipping Proxmox deletion`); + } - await api.deleteContainer(node.name, container.containerId, true, true); + // Delete volume attachments (volumes themselves are preserved) + await ContainerVolume.destroy({ where: { containerId: container.id } }); + + // Delete from database (cascade deletes associated services) + await container.destroy(); + + const volumeCount = container.volumeAttachments?.length || 0; + const volumeMsg = volumeCount > 0 ? ` ${volumeCount} volume(s) preserved.` : ''; + req.flash('success', `Container ${container.hostname} deleted successfully.${volumeMsg}`); } catch (error) { console.error(error); - req.flash('error', `Failed to delete container from Proxmox: ${error.message}`); + req.flash('error', `Failed to delete container: ${error.message}`); return res.redirect(`/sites/${siteId}/containers`); } - // Delete from database (cascade deletes associated services) - await container.destroy(); - - req.flash('success', `Container ${container.hostname} deleted successfully`); return res.redirect(`/sites/${siteId}/containers`); }); diff --git a/create-a-container/routers/jobs.js b/create-a-container/routers/jobs.js index 46fa03c8..874790a7 100644 --- a/create-a-container/routers/jobs.js +++ b/create-a-container/routers/jobs.js @@ -1,11 +1,20 @@ const express = require('express'); const router = express.Router(); -const { Job, JobStatus, sequelize } = require('../models'); +const { Job, JobStatus, Container, Node, sequelize } = require('../models'); const { requireAuth, requireAdmin } = require('../middlewares'); // All job endpoints require authentication router.use(requireAuth); +/** + * Helper to check if user can access a job + */ +async function canAccessJob(job, req) { + const username = req.session && req.session.user; + const isAdmin = req.session && req.session.isAdmin; + return isAdmin || job.createdBy === username; +} + // POST /jobs - enqueue a new job (admins only) router.post('/', requireAdmin, async (req, res) => { try { @@ -28,26 +37,145 @@ router.post('/', requireAdmin, async (req, res) => { } }); -// GET /jobs/:id - job metadata +// GET /jobs/:id - job metadata (HTML or JSON based on Accept header) router.get('/:id', async (req, res) => { try { const id = parseInt(req.params.id, 10); const job = await Job.findByPk(id); - if (!job) return res.status(404).json({ error: 'Job not found' }); + if (!job) { + if (req.accepts('html')) { + req.flash('error', 'Job not found'); + return res.redirect('/'); + } + return res.status(404).json({ error: 'Job not found' }); + } + // Authorization: only owner or admin can view - const username = req.session && req.session.user; - const isAdmin = req.session && req.session.isAdmin; - if (!isAdmin && job.createdBy !== username) { + if (!await canAccessJob(job, req)) { + if (req.accepts('html')) { + req.flash('error', 'Job not found'); + return res.redirect('/'); + } return res.status(404).json({ error: 'Job not found' }); } - return res.json({ id: job.id, command: job.command, status: job.status, createdAt: job.createdAt, updatedAt: job.updatedAt, createdBy: job.createdBy }); + // If client accepts HTML, render the job view + if (req.accepts('html')) { + // Get initial output for completed jobs or first batch for running jobs + const initialOutput = await JobStatus.findAll({ + where: { jobId: id }, + order: [['id', 'ASC']], + limit: 1000 + }); + + // Find the container associated with this job (if any) + const container = await Container.findOne({ + where: { creationJobId: id }, + include: [{ model: Node, as: 'node' }] + }); + + return res.render('jobs/show', { + job, + initialOutput, + container, + req + }); + } + + // JSON response for API clients + return res.json({ + id: job.id, + command: job.command, + status: job.status, + createdAt: job.createdAt, + updatedAt: job.updatedAt, + createdBy: job.createdBy + }); } catch (err) { console.error('Failed to fetch job:', err); + if (req.accepts('html')) { + req.flash('error', 'Failed to load job'); + return res.redirect('/'); + } return res.status(500).json({ error: 'Failed to fetch job' }); } }); +// GET /jobs/:id/stream - SSE endpoint for streaming job output +router.get('/:id/stream', async (req, res) => { + const id = parseInt(req.params.id, 10); + + try { + const job = await Job.findByPk(id); + if (!job || !await canAccessJob(job, req)) { + return res.status(404).json({ error: 'Job not found' }); + } + + // Set up SSE headers + res.setHeader('Content-Type', 'text/event-stream'); + res.setHeader('Cache-Control', 'no-cache'); + res.setHeader('Connection', 'keep-alive'); + res.setHeader('X-Accel-Buffering', 'no'); // Disable nginx buffering + res.flushHeaders(); + + // Track last sent ID for incremental updates + let lastId = req.query.lastId ? parseInt(req.query.lastId, 10) : 0; + let isRunning = true; + + // Send keepalive every 15 seconds + const keepaliveInterval = setInterval(() => { + if (isRunning) { + res.write(':keepalive\n\n'); + } + }, 15000); + + // Poll for new output every 2 seconds + const pollInterval = setInterval(async () => { + try { + // Fetch new log entries + const newLogs = await JobStatus.findAll({ + where: { + jobId: id, + id: { [sequelize.Sequelize.Op.gt]: lastId } + }, + order: [['id', 'ASC']], + limit: 100 + }); + + // Send each new log entry + for (const log of newLogs) { + res.write(`event: log\ndata: ${JSON.stringify({ id: log.id, output: log.output, timestamp: log.createdAt })}\n\n`); + lastId = log.id; + } + + // Check if job is still running + const currentJob = await Job.findByPk(id); + if (!currentJob || !['pending', 'running'].includes(currentJob.status)) { + // Send final status and close + res.write(`event: status\ndata: ${JSON.stringify({ status: currentJob ? currentJob.status : 'unknown' })}\n\n`); + cleanup(); + res.end(); + } + } catch (err) { + console.error('SSE poll error:', err); + } + }, 2000); + + function cleanup() { + isRunning = false; + clearInterval(keepaliveInterval); + clearInterval(pollInterval); + } + + // Clean up on client disconnect + req.on('close', cleanup); + + } catch (err) { + console.error('SSE setup error:', err); + res.status(500).json({ error: 'Failed to start stream' }); + } +}); + // GET /jobs/:id/status - fetch job status rows router.get('/:id/status', async (req, res) => { try { @@ -62,11 +190,7 @@ router.get('/:id/status', async (req, res) => { // Ensure only owner or admin can fetch statuses const job = await Job.findByPk(id); - if (!job) return res.status(404).json({ error: 'Job not found' }); - const username = req.session && req.session.user; - const isAdmin = req.session && req.session.isAdmin; - if (!isAdmin && job.createdBy !== username) { - // Hide existence to prevent information leakage + if (!job || !await canAccessJob(job, req)) { return res.status(404).json({ error: 'Job not found' }); } diff --git a/create-a-container/routers/nodes.js b/create-a-container/routers/nodes.js index 710e10fe..db07eaf0 100644 --- a/create-a-container/routers/nodes.js +++ b/create-a-container/routers/nodes.js @@ -1,6 +1,6 @@ const express = require('express'); const router = express.Router({ mergeParams: true }); // Enable access to :siteId param -const { Node, Container, Site } = require('../models'); +const { Node, Container, Site, Volume, Job } = require('../models'); const { requireAuth, requireAdmin } = require('../middlewares'); const axios = require('axios'); const https = require('https'); @@ -36,6 +36,7 @@ router.get('/', async (req, res) => { ipv4Address: n.ipv4Address, apiUrl: n.apiUrl, tlsVerify: n.tlsVerify, + placeholderCtId: n.placeholderCtId, containerCount: n.containers ? n.containers.length : 0 })); @@ -117,18 +118,28 @@ router.post('/', async (req, res) => { return res.redirect('/sites'); } - const { name, ipv4Address, apiUrl, tokenId, secret, tlsVerify } = req.body; + const { name, ipv4Address, apiUrl, tokenId, secret, tlsVerify, imageStorage } = req.body; - await Node.create({ + const node = await Node.create({ name, ipv4Address: ipv4Address || null, apiUrl: apiUrl || null, tokenId: tokenId || null, secret: secret || null, tlsVerify: tlsVerify === '' || tlsVerify === null ? null : tlsVerify === 'true', + imageStorage: imageStorage || 'local', siteId }); + // Create placeholder container via job if API credentials are configured + if (apiUrl && tokenId && secret) { + await Job.create({ + command: `node bin/create-placeholder.js --node-id=${node.id}`, + createdBy: req.session.user, + status: 'pending' + }); + } + req.flash('success', `Node ${name} created successfully`); return res.redirect(`/sites/${siteId}/nodes`); } catch (err) { @@ -149,60 +160,72 @@ router.post('/import', async (req, res) => { const { apiUrl, username, password, tlsVerify } = req.body; const httpsAgent = new https.Agent({ rejectUnauthorized: tlsVerify !== 'false' }); - let tokenId = username.includes('!') ? username : null; - let secret = tokenId ? password : null; + const tokenId = username; + const secret = password; - // create an api token if a username/password was provided + // Create temporary node instance to use api() method for authentication try { - if (!tokenId) { - const client = new ProxmoxApi(apiUrl, null, null, { httpsAgent }); - await client.authenticate(username, password); - const ticketData = await client.createApiToken(username, `import-${Date.now()}`); - tokenId = ticketData['full-tokenid']; - secret = ticketData['value']; - - // set privileges for the created token - await client.updateAcl('/', 'Administrator', null, true, tokenId, null); - } + const tempNode = Node.build({ + name: 'temp', + apiUrl, + tokenId, + secret, + tlsVerify: tlsVerify !== 'false' + }); - const client = new ProxmoxApi(apiUrl, tokenId, secret, { httpsAgent }); + const client = await tempNode.api(); const nodes = await client.nodes(); - // Fetch network information for each node to get IP address + // Fetch network information and storage for each node const nodesWithIp = await Promise.all(nodes.map(async (n) => { + let ipv4Address = null; + let imageStorage = 'local'; + try { const networkInterfaces = await client.nodeNetwork(n.node); // Find the primary network interface (usually vmbr0 or the one with type 'bridge' and active) const primaryInterface = networkInterfaces.find(iface => iface.iface === 'vmbr0' || (iface.type === 'bridge' && iface.active) ); - const ipv4Address = primaryInterface?.address || null; - - return { - name: n.node, - ipv4Address, - apiUrl, - tokenId, - secret, - tlsVerify: tlsVerify === '' || tlsVerify === null ? null : tlsVerify === 'true', - siteId - }; + ipv4Address = primaryInterface?.address || null; } catch (err) { console.error(`Failed to fetch network info for node ${n.node}:`, err.message); - return { - name: n.node, - ipv4Address: null, - apiUrl, - tokenId, - secret, - tlsVerify: tlsVerify === '' || tlsVerify === null ? null : tlsVerify === 'true', - siteId - }; } + + // Find largest storage supporting CT templates (vztmpl) + try { + const storages = await client.datastores(n.node, 'vztmpl', true); + if (storages.length > 0) { + const largest = storages.reduce((max, s) => (s.total > max.total ? s : max), storages[0]); + imageStorage = largest.storage; + } + } catch (err) { + console.error(`Failed to fetch storages for node ${n.node}:`, err.message); + } + + return { + name: n.node, + ipv4Address, + apiUrl, + tokenId, + secret, + tlsVerify: tlsVerify === '' || tlsVerify === null ? null : tlsVerify === 'true', + imageStorage, + siteId + }; })); const importedNodes = await Node.bulkCreate(nodesWithIp); + // Create placeholder containers for all imported nodes via jobs + for (const node of importedNodes) { + await Job.create({ + command: `node bin/create-placeholder.js --node-id=${node.id}`, + createdBy: req.session.user, + status: 'pending' + }); + } + const containerList = await client.clusterResources('lxc'); const containers = await Promise.all(containerList.map(async (c) => { const config = await client.lxcConfig(c.node, c.vmid); @@ -245,14 +268,15 @@ router.put('/:id', async (req, res) => { return res.redirect(`/sites/${siteId}/nodes`); } - const { name, ipv4Address, apiUrl, tokenId, secret, tlsVerify } = req.body; + const { name, ipv4Address, apiUrl, tokenId, secret, tlsVerify, imageStorage } = req.body; const updateData = { name, ipv4Address: ipv4Address || null, apiUrl: apiUrl || null, tokenId: tokenId || null, - tlsVerify: tlsVerify === '' || tlsVerify === null ? null : tlsVerify === 'true' + tlsVerify: tlsVerify === '' || tlsVerify === null ? null : tlsVerify === 'true', + imageStorage: imageStorage || 'local' }; // Only update secret if a new value was provided @@ -262,6 +286,19 @@ router.put('/:id', async (req, res) => { await node.update(updateData); + // Ensure placeholder container exists if API credentials are configured + const hasCredentials = (apiUrl || node.apiUrl) && + (tokenId || node.tokenId) && + (secret || node.secret); + if (hasCredentials && !node.placeholderCtId) { + // Create placeholder via job + await Job.create({ + command: `node bin/create-placeholder.js --node-id=${node.id}`, + createdBy: req.session.user, + status: 'pending' + }); + } + req.flash('success', `Node ${name} updated successfully`); return res.redirect(`/sites/${siteId}/nodes`); } catch (err) { @@ -271,6 +308,75 @@ router.put('/:id', async (req, res) => { } }); +// POST /sites/:siteId/nodes/:id/placeholder - Create placeholder container for a node +router.post('/:id/placeholder', async (req, res) => { + const siteId = parseInt(req.params.siteId, 10); + const nodeId = parseInt(req.params.id, 10); + + try { + const node = await Node.findOne({ + where: { id: nodeId, siteId } + }); + + if (!node) { + req.flash('error', 'Node not found'); + return res.redirect(`/sites/${siteId}/nodes`); + } + + if (node.placeholderCtId) { + req.flash('info', `Node ${node.name} already has a placeholder container (CT ${node.placeholderCtId})`); + return res.redirect(`/sites/${siteId}/nodes`); + } + + if (!node.apiUrl || !node.tokenId || !node.secret) { + req.flash('error', 'Node must have API credentials configured before creating a placeholder'); + return res.redirect(`/sites/${siteId}/nodes`); + } + + // Create a job to handle placeholder creation asynchronously + const job = await Job.create({ + command: `node bin/create-placeholder.js --node-id=${node.id}`, + createdBy: req.session.user, + status: 'pending' + }); + + // Redirect to job status page so user can watch the job run + return res.redirect(`/jobs/${job.id}`); + } catch (err) { + console.error('Error creating placeholder job:', err); + req.flash('error', `Failed to create placeholder: ${err.message}`); + return res.redirect(`/sites/${siteId}/nodes`); + } +}); + +// GET /sites/:siteId/nodes/:id/storages - Get storages supporting CT templates +router.get('/:id/storages', async (req, res) => { + const siteId = parseInt(req.params.siteId, 10); + const nodeId = parseInt(req.params.id, 10); + + try { + const node = await Node.findOne({ + where: { id: nodeId, siteId } + }); + + if (!node || !node.apiUrl || !node.tokenId || !node.secret) { + return res.json([]); + } + + const client = await node.api(); + const storages = await client.datastores(node.name, 'vztmpl', true); + + return res.json(storages.map(s => ({ + name: s.storage, + total: s.total, + available: s.avail + }))); + } catch (err) { + console.error('Error fetching storages:', err.message); + return res.json([]); + } +}); + // DELETE /sites/:siteId/nodes/:id - Delete a node router.delete('/:id', async (req, res) => { const siteId = parseInt(req.params.siteId, 10); diff --git a/create-a-container/routers/sites.js b/create-a-container/routers/sites.js index 803b5da4..ab319d56 100644 --- a/create-a-container/routers/sites.js +++ b/create-a-container/routers/sites.js @@ -19,6 +19,8 @@ router.get('/:siteId/dnsmasq.conf', requireLocalhost, async (req, res) => { include: [{ model: Container, as: 'containers', + where: { status: 'running' }, + required: false, attributes: ['macAddress', 'ipv4Address', 'hostname'], include: [{ model: Service, @@ -44,7 +46,7 @@ router.get('/:siteId/dnsmasq.conf', requireLocalhost, async (req, res) => { router.get('/:siteId/nginx.conf', requireLocalhost, async (req, res) => { const siteId = parseInt(req.params.siteId, 10); - // fetch services for the specific site + // fetch services for the specific site (only from running containers) const site = await Site.findByPk(siteId, { include: [{ model: Node, @@ -52,6 +54,8 @@ router.get('/:siteId/nginx.conf', requireLocalhost, async (req, res) => { include: [{ model: Container, as: 'containers', + where: { status: 'running' }, + required: false, include: [{ model: Service, as: 'services', @@ -115,6 +119,7 @@ router.get('/:siteId/ldap.conf', requireLocalhost, async (req, res) => { // define the environment object const env = { DIRECTORY_BACKEND: 'sql', + REQUIRE_AUTH_FOR_SEARCH: false, }; // Configure AUTH_BACKENDS and NOTIFICATION_URL based on push notification settings @@ -149,9 +154,9 @@ router.get('/:siteId/ldap.conf', requireLocalhost, async (req, res) => { // config/config.js and construct the SQL URL const config = require('../config/config')[process.env.NODE_ENV || 'development']; const sqlUrlBuilder = new URL(`${config.dialect}://`); + sqlUrlBuilder.hostname = config.host || ''; sqlUrlBuilder.username = config.username || ''; sqlUrlBuilder.password = config.password || ''; - sqlUrlBuilder.hostname = config.host || ''; sqlUrlBuilder.port = config.port || ''; sqlUrlBuilder.pathname = config.database || path.resolve(config.storage); env.SQL_URI = sqlUrlBuilder.toString(); @@ -204,9 +209,11 @@ router.use('/:siteId', setCurrentSite); const nodesRouter = require('./nodes'); const containersRouter = require('./containers'); const externalDomainsRouter = require('./external-domains'); +const volumesRouter = require('./volumes'); router.use('/:siteId/nodes', nodesRouter); router.use('/:siteId/containers', containersRouter); router.use('/:siteId/external-domains', externalDomainsRouter); +router.use('/:siteId/volumes', volumesRouter); // GET /sites - List all sites (available to all authenticated users) router.get('/', async (req, res) => { diff --git a/create-a-container/routers/volumes.js b/create-a-container/routers/volumes.js new file mode 100644 index 00000000..33dc09d0 --- /dev/null +++ b/create-a-container/routers/volumes.js @@ -0,0 +1,257 @@ +const express = require('express'); +const router = express.Router({ mergeParams: true }); +const { Volume, Node, Site, ContainerVolume, Container } = require('../models'); +const { requireAuth } = require('../middlewares'); + +// Apply auth to all routes +router.use(requireAuth); + +// GET /sites/:siteId/volumes - List user's volumes in site +router.get('/', async (req, res) => { + const siteId = parseInt(req.params.siteId, 10); + + const site = await Site.findByPk(siteId, { + include: [{ model: Node, as: 'nodes', attributes: ['id', 'name'] }] + }); + + if (!site) { + req.flash('error', 'Site not found'); + return res.redirect('/sites'); + } + + const volumes = await Volume.findAll({ + where: { + siteId, + username: req.session.user + }, + include: [ + { model: Node, as: 'node', attributes: ['id', 'name'] }, + { + model: ContainerVolume, + as: 'attachments', + include: [{ model: Container, as: 'container', attributes: ['id', 'hostname'] }] + } + ], + order: [['name', 'ASC']] + }); + + const rows = volumes.map(v => ({ + id: v.id, + name: v.name, + sizeGb: v.sizeGb, + nodeName: v.node ? v.node.name : 'Unknown', + nodeId: v.nodeId, + proxmoxVolume: v.proxmoxVolume, + attachedTo: v.attachments.length > 0 ? v.attachments[0].container : null, + mountPath: v.attachments.length > 0 ? v.attachments[0].mountPath : null, + createdAt: v.createdAt + })); + + return res.render('volumes/index', { + rows, + site, + nodes: site.nodes, + req + }); +}); + +// GET /sites/:siteId/volumes/new - Display form for creating a new volume +router.get('/new', async (req, res) => { + const siteId = parseInt(req.params.siteId, 10); + + const site = await Site.findByPk(siteId, { + include: [{ + model: Node, + as: 'nodes', + attributes: ['id', 'name', 'placeholderCtId'], + where: { placeholderCtId: { [require('sequelize').Op.ne]: null } }, + required: false + }] + }); + + if (!site) { + req.flash('error', 'Site not found'); + return res.redirect('/sites'); + } + + // Filter to only nodes with placeholder containers + const availableNodes = site.nodes.filter(n => n.placeholderCtId); + + if (availableNodes.length === 0) { + req.flash('error', 'No nodes with volume support available. Please configure API credentials for at least one node.'); + return res.redirect(`/sites/${siteId}/volumes`); + } + + return res.render('volumes/form', { + volume: null, + site, + nodes: availableNodes, + isEdit: false, + req + }); +}); + +// POST /sites/:siteId/volumes - Create a new volume +router.post('/', async (req, res) => { + const siteId = parseInt(req.params.siteId, 10); + + try { + const site = await Site.findByPk(siteId); + if (!site) { + req.flash('error', 'Site not found'); + return res.redirect('/sites'); + } + + const { name, nodeId } = req.body; + + // Validate name format + if (!/^[a-zA-Z0-9_-]+$/.test(name)) { + req.flash('error', 'Volume name can only contain letters, numbers, dashes, and underscores'); + return res.redirect(`/sites/${siteId}/volumes/new`); + } + + // Check for duplicate name + const existing = await Volume.findOne({ + where: { name, username: req.session.user, siteId } + }); + if (existing) { + req.flash('error', `Volume "${name}" already exists`); + return res.redirect(`/sites/${siteId}/volumes/new`); + } + + // Get the node with placeholder container + const node = await Node.findOne({ + where: { id: nodeId, siteId } + }); + + if (!node) { + req.flash('error', 'Node not found'); + return res.redirect(`/sites/${siteId}/volumes/new`); + } + + if (!node.placeholderCtId) { + req.flash('error', 'Selected node does not have volume support configured'); + return res.redirect(`/sites/${siteId}/volumes/new`); + } + + // Create volume in Proxmox on the placeholder container + const client = await node.api(); + const storage = node.imageStorage || 'local'; + const sizeGb = 50; // Hardcoded default + + // Allocate the disk + const proxmoxVolume = await client.allocateDisk(node.name, storage, node.placeholderCtId, sizeGb); + console.log(`Created volume ${proxmoxVolume} on node ${node.name}`); + + // Attach to placeholder container with encoded mount path (disable protection first) + const placeholderMountPath = `/${req.session.user}/${name}`; + const nextMp = await client.findNextMountPoint(node.name, node.placeholderCtId); + + await client.updateLxcConfig(node.name, node.placeholderCtId, { protection: 0 }); + await client.updateLxcConfig(node.name, node.placeholderCtId, { + [nextMp]: `${proxmoxVolume},mp=${placeholderMountPath}` + }); + await client.updateLxcConfig(node.name, node.placeholderCtId, { protection: 1 }); + console.log(`Attached volume to placeholder at ${nextMp} with mount path ${placeholderMountPath}`); + + // Create database record + await Volume.create({ + name, + username: req.session.user, + siteId, + nodeId: node.id, + proxmoxVolume, + sizeGb + }); + + req.flash('success', `Volume "${name}" created successfully`); + return res.redirect(`/sites/${siteId}/volumes`); + } catch (err) { + console.error('Error creating volume:', err); + req.flash('error', `Failed to create volume: ${err.message}`); + return res.redirect(`/sites/${siteId}/volumes/new`); + } +}); + +// DELETE /sites/:siteId/volumes/:id - Delete a volume +router.delete('/:id', async (req, res) => { + const siteId = parseInt(req.params.siteId, 10); + const volumeId = parseInt(req.params.id, 10); + + try { + const site = await Site.findByPk(siteId); + if (!site) { + req.flash('error', 'Site not found'); + return res.redirect('/sites'); + } + + const volume = await Volume.findOne({ + where: { + id: volumeId, + siteId, + username: req.session.user + }, + include: [ + { model: Node, as: 'node' }, + { model: ContainerVolume, as: 'attachments' } + ] + }); + + if (!volume) { + req.flash('error', 'Volume not found'); + return res.redirect(`/sites/${siteId}/volumes`); + } + + // Check if attached to a container + if (volume.attachments.length > 0) { + req.flash('error', 'Cannot delete volume while attached to a container. Detach it first.'); + return res.redirect(`/sites/${siteId}/volumes`); + } + + const node = volume.node; + if (!node || !node.placeholderCtId) { + req.flash('error', 'Node configuration error'); + return res.redirect(`/sites/${siteId}/volumes`); + } + + const client = await node.api(); + + // Disable protection to modify placeholder + await client.updateLxcConfig(node.name, node.placeholderCtId, { protection: 0 }); + + // Find the mount point on the placeholder container + const mpKey = await client.findMountPointForVolume(node.name, node.placeholderCtId, volume.proxmoxVolume); + + if (mpKey) { + // Detach from placeholder + await client.updateLxcConfig(node.name, node.placeholderCtId, { + 'delete': mpKey + }); + console.log(`Detached volume from placeholder at ${mpKey}`); + } + + // Re-enable protection + await client.updateLxcConfig(node.name, node.placeholderCtId, { protection: 1 }); + + // Delete the volume from Proxmox storage + try { + await client.deleteVolume(node.name, volume.proxmoxVolume); + console.log(`Deleted volume ${volume.proxmoxVolume} from storage`); + } catch (err) { + // Volume might already be deleted from Proxmox + console.log(`Note: Volume may already be deleted from Proxmox: ${err.message}`); + } + + // Delete database record + await volume.destroy(); + + req.flash('success', `Volume "${volume.name}" deleted successfully`); + return res.redirect(`/sites/${siteId}/volumes`); + } catch (err) { + console.error('Error deleting volume:', err); + req.flash('error', `Failed to delete volume: ${err.message}`); + return res.redirect(`/sites/${siteId}/volumes`); + } +}); + +module.exports = router; diff --git a/create-a-container/systemd/container-creator-init.service b/create-a-container/systemd/container-creator-init.service new file mode 100644 index 00000000..10d5112a --- /dev/null +++ b/create-a-container/systemd/container-creator-init.service @@ -0,0 +1,25 @@ +[Unit] +Description=Initialize PostgreSQL for Container Creator +After=postgresql.service +ConditionPathExists=!/opt/opensource-server/create-a-container/.env + +[Service] +Type=oneshot +WorkingDirectory=/opt/opensource-server/create-a-container +ExecStart=/bin/bash -c '\ + for i in {1..30}; do pg_isready -h localhost && break || sleep 1; done; \ + POSTGRES_PASSWORD=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 24); \ + POSTGRES_USER="cluster_manager"; \ + POSTGRES_DATABASE="cluster_manager"; \ + POSTGRES_HOST="localhost"; \ + sudo -u postgres psql -c "CREATE USER $${POSTGRES_USER} WITH PASSWORD '"'"'$${POSTGRES_PASSWORD}'"'"';"; \ + sudo -u postgres psql -c "CREATE DATABASE $${POSTGRES_DATABASE} OWNER $${POSTGRES_USER};"; \ + echo "DATABASE_DIALECT=postgres" > .env; \ + echo "POSTGRES_HOST=$${POSTGRES_HOST}" >> .env; \ + echo "POSTGRES_DATABASE=$${POSTGRES_DATABASE}" >> .env; \ + echo "POSTGRES_USER=$${POSTGRES_USER}" >> .env; \ + echo "POSTGRES_PASSWORD=$${POSTGRES_PASSWORD}" >> .env; \ + npm run db:migrate;' + +[Install] +WantedBy=multi-user.target diff --git a/create-a-container/systemd/container-creator.service b/create-a-container/systemd/container-creator.service index cd734568..27ab76ba 100644 --- a/create-a-container/systemd/container-creator.service +++ b/create-a-container/systemd/container-creator.service @@ -1,6 +1,7 @@ [Unit] Description=Container Creator Node.js App -After=network.target +After=container-creator-init.service +Wants=container-creator-init.service [Service] Type=simple diff --git a/create-a-container/utils/cli.js b/create-a-container/utils/cli.js new file mode 100644 index 00000000..6a74a61d --- /dev/null +++ b/create-a-container/utils/cli.js @@ -0,0 +1,22 @@ +/** + * CLI utility functions for job scripts + */ + +/** + * Parse command line arguments in --key=value format + * @returns {object} Parsed arguments as key-value pairs + */ +function parseArgs() { + const args = {}; + for (const arg of process.argv.slice(2)) { + const match = arg.match(/^--([^=]+)=(.+)$/); + if (match) { + args[match[1]] = match[2]; + } + } + return args; +} + +module.exports = { + parseArgs +}; diff --git a/create-a-container/utils/proxmox-api.js b/create-a-container/utils/proxmox-api.js index 3e2d56bf..69481646 100644 --- a/create-a-container/utils/proxmox-api.js +++ b/create-a-container/utils/proxmox-api.js @@ -234,6 +234,35 @@ class ProxmoxApi { return response.data.data; } + /** + * Wait for a Proxmox task to complete + * @param {string} node - The node name + * @param {string} upid - The task UPID + * @param {number} pollInterval - Polling interval in ms (default 2000) + * @param {number} timeout - Timeout in ms (default 300000 = 5 minutes) + * @returns {Promise} The final task status + */ + async waitForTask(node, upid, pollInterval = 2000, timeout = 300000) { + const startTime = Date.now(); + while (true) { + const status = await this.taskStatus(node, upid); + console.log(`Task ${upid}: status=${status.status}, exitstatus=${status.exitstatus || 'N/A'}`); + + if (status.status === 'stopped') { + if (status.exitstatus && status.exitstatus !== 'OK') { + throw new Error(`Task failed with status: ${status.exitstatus}`); + } + return status; + } + + if (Date.now() - startTime > timeout) { + throw new Error(`Task ${upid} timed out after ${timeout}ms`); + } + + await new Promise(resolve => setTimeout(resolve, pollInterval)); + } + } + /** * Delete a container * @param {string} nodeName @@ -319,6 +348,377 @@ class ProxmoxApi { ); return response.data.data; } + + /** + * Stop an LXC container + * @param {string} node - The node name + * @param {number} vmid - The container VMID + * @returns {Promise} - The task UPID + */ + async stopLxc(node, vmid) { + const response = await axios.post( + `${this.baseUrl}/api2/json/nodes/${node}/lxc/${vmid}/status/stop`, + {}, + this.options + ); + return response.data.data; + } + + /** + * Get LXC container current status + * @param {string} node - The node name + * @param {number} vmid - The container VMID + * @returns {Promise} - Container status object with status field ('running', 'stopped', etc.) + */ + async getLxcStatus(node, vmid) { + const response = await axios.get( + `${this.baseUrl}/api2/json/nodes/${node}/lxc/${vmid}/status/current`, + this.options + ); + return response.data.data; + } + + /** + * Get LXC container network interfaces + * @param {string} node - The node name + * @param {number} vmid - The container VMID + * @returns {Promise} - Array of network interfaces + */ + async lxcInterfaces(node, vmid) { + const response = await axios.get( + `${this.baseUrl}/api2/json/nodes/${node}/lxc/${vmid}/interfaces`, + this.options + ); + return response.data.data; + } + + /** + * Get available appliance templates from the Proxmox repository + * @param {string} node - Node name + * @returns {Promise} - Array of available templates + */ + async getAvailableTemplates(node) { + const response = await axios.get( + `${this.baseUrl}/api2/json/nodes/${node}/aplinfo`, + this.options + ); + return response.data.data || []; + } + + /** + * Download a template from the Proxmox repository to storage + * @param {string} node - Node name + * @param {string} storage - Storage name (e.g., 'local') + * @param {string} template - Template name from aplinfo (e.g., 'alpine-3.21-default_20241217_amd64.tar.xz') + * @returns {Promise} - UPID of the download task + */ + async downloadTemplate(node, storage, template) { + const response = await axios.post( + `${this.baseUrl}/api2/json/nodes/${node}/aplinfo`, + { + storage, + template + }, + this.options + ); + return response.data.data; + } + + /** + * Pull an OCI/Docker image from a registry to Proxmox storage + * @param {string} node - The node name + * @param {string} storage - The storage name (e.g., 'local') + * @param {Object} options - Pull options + * @param {string} options.reference - Full image reference (e.g., 'docker.io/library/nginx:latest') + * @param {string} [options.filename] - Target filename (e.g., 'nginx_latest.tar') + * @param {string} [options.username] - Registry username for private images + * @param {string} [options.password] - Registry password for private images + * @returns {Promise} - UPID of the pull task + */ + async pullOciImage(node, storage, options) { + const response = await axios.post( + `${this.baseUrl}/api2/json/nodes/${node}/storage/${storage}/oci-registry-pull`, + options, + this.options + ); + return response.data.data; + } + + /** + * Get MAC address from container configuration + * @param {string} node - Node name + * @param {number} vmid - Container VMID + * @returns {Promise} - MAC address or null if not found + */ + async getLxcMacAddress(node, vmid) { + console.log('Querying container configuration for MAC address...'); + const config = await this.lxcConfig(node, vmid); + const net0 = config['net0']; + + if (!net0) { + console.log('No net0 configuration found'); + return null; + } + + const macMatch = net0.match(/hwaddr=([0-9A-Fa-f:]+)/); + if (macMatch) { + console.log(`MAC address: ${macMatch[1]}`); + return macMatch[1]; + } + + console.log('Could not extract MAC address from net0 configuration'); + return null; + } + + /** + * Get IPv4 address from container interfaces with retry logic + * @param {string} node - Node name + * @param {number} vmid - Container VMID + * @param {number} maxRetries - Maximum retry attempts (default: 10) + * @param {number} retryDelay - Delay between retries in ms (default: 3000) + * @returns {Promise} - IPv4 address or null if not found + */ + async getLxcIpAddress(node, vmid, maxRetries = 10, retryDelay = 3000) { + console.log('Querying IP address from Proxmox interfaces API...'); + + for (let attempt = 1; attempt <= maxRetries; attempt++) { + try { + const interfaces = await this.lxcInterfaces(node, vmid); + + // Find eth0 interface and get its IPv4 address + const eth0 = interfaces.find(iface => iface.name === 'eth0'); + if (eth0 && eth0['ip-addresses']) { + const ipv4 = eth0['ip-addresses'].find(addr => addr['ip-address-type'] === 'inet'); + if (ipv4 && ipv4['ip-address']) { + console.log(`IP address found (attempt ${attempt}): ${ipv4['ip-address']}`); + return ipv4['ip-address']; + } + } + + // Also check the 'inet' field as fallback + if (eth0 && eth0.inet) { + const ip = eth0.inet.split('/')[0]; + console.log(`IP address found from inet field (attempt ${attempt}): ${ip}`); + return ip; + } + + console.log(`IP address not yet available (attempt ${attempt}/${maxRetries})`); + if (attempt < maxRetries) { + await new Promise(resolve => setTimeout(resolve, retryDelay)); + } + } catch (err) { + console.log(`Interfaces query attempt ${attempt}/${maxRetries} failed: ${err.message}`); + if (attempt < maxRetries) { + await new Promise(resolve => setTimeout(resolve, retryDelay)); + } + } + } + + console.error(`Failed to get IP address after ${maxRetries} attempts`); + return null; + } + + /** + * Get both MAC and IP address for a container + * @param {string} node - Node name + * @param {number} vmid - Container VMID + * @returns {Promise<{macAddress: string|null, ipv4Address: string|null}>} + */ + async getLxcNetworkInfo(node, vmid) { + const macAddress = await this.getLxcMacAddress(node, vmid); + const ipv4Address = await this.getLxcIpAddress(node, vmid); + + return { macAddress, ipv4Address }; + } + + /** + * Create a placeholder container for volume storage + * This container is minimal (no network, stopped) and only holds volumes + * @param {string} node - Node name + * @param {number} vmid - VMID for the placeholder container + * @param {string} storage - Storage name for rootfs (e.g., 'local-lvm') + * @returns {Promise} Task UPID + */ + async createPlaceholderContainer(node, vmid, storage) { + const response = await axios.post( + `${this.baseUrl}/api2/json/nodes/${node}/lxc`, + { + vmid, + ostemplate: 'local:vztmpl/alpine-3.21-default_20241217_amd64.tar.xz', + hostname: 'mie-volume-placeholder', + memory: 16, + swap: 0, + cores: 1, + unprivileged: 1, + start: 0, + rootfs: `${storage}:0.5`, + net0: 'name=eth0,bridge=vmbr0', + protection: 1, + description: 'MIE Opensource: Volume placeholder container. DO NOT DELETE. Holds orphaned volumes when containers are deleted.' + }, + this.options + ); + return response.data.data; + } + + /** + * Check if a container exists + * @param {string} node - Node name + * @param {number} vmid - Container VMID + * @returns {Promise} + */ + async lxcExists(node, vmid) { + try { + await this.lxcConfig(node, vmid); + return true; + } catch (err) { + if (err.response?.status === 500 || err.response?.status === 404) { + return false; + } + throw err; + } + } + + /** + * Allocate a new disk volume on storage + * @param {string} node - Node name + * @param {string} storage - Storage name (e.g., 'local-lvm') + * @param {number} vmid - VMID to associate the disk with + * @param {number} sizeGb - Size in gigabytes + * @returns {Promise} Volume ID (e.g., "local-lvm:vm-105-disk-1") + */ + async allocateDisk(node, storage, vmid, sizeGb) { + // Generate a unique filename for the disk + const filename = `vm-${vmid}-disk-${Date.now()}`; + + const response = await axios.post( + `${this.baseUrl}/api2/json/nodes/${node}/storage/${storage}/content`, + { + vmid, + filename, + size: `${sizeGb}G` + }, + this.options + ); + return response.data.data; + } + + /** + * Delete a volume from storage + * @param {string} node - Node name + * @param {string} volume - Volume ID (e.g., "local-lvm:vm-105-disk-1") + * @returns {Promise} Task UPID + */ + async deleteVolume(node, volume) { + const response = await axios.delete( + `${this.baseUrl}/api2/json/nodes/${node}/storage/${volume.split(':')[0]}/content/${encodeURIComponent(volume)}`, + this.options + ); + return response.data.data; + } + + /** + * Move a volume from one container to another + * @param {string} node - Node name + * @param {number} sourceVmid - Source container VMID + * @param {string} sourceMp - Source mount point key (e.g., 'mp0', 'mp1') + * @param {number} targetVmid - Target container VMID + * @param {string} targetMp - Target mount point key (e.g., 'mp0') + * @returns {Promise} Task UPID + */ + async moveVolume(node, sourceVmid, sourceMp, targetVmid, targetMp) { + const response = await axios.post( + `${this.baseUrl}/api2/json/nodes/${node}/lxc/${sourceVmid}/move_volume`, + { + volume: sourceMp, + 'target-vmid': targetVmid, + 'target-volume': targetMp + }, + this.options + ); + return response.data.data; + } + + /** + * Resize a container disk + * @param {string} node - Node name + * @param {number} vmid - Container VMID + * @param {string} disk - Disk name (e.g., 'mp0', 'rootfs') + * @param {string} size - New size (e.g., '50G', '+10G') + * @returns {Promise} Task UPID + */ + async resizeDisk(node, vmid, disk, size) { + const response = await axios.put( + `${this.baseUrl}/api2/json/nodes/${node}/lxc/${vmid}/resize`, + { disk, size }, + this.options + ); + return response.data.data; + } + + /** + * Find the next available mount point slot (mp0, mp1, etc.) + * @param {string} node - Node name + * @param {number} vmid - Container VMID + * @returns {Promise} Next available mp key (e.g., 'mp0', 'mp1') + */ + async findNextMountPoint(node, vmid) { + const config = await this.lxcConfig(node, vmid); + let mpIndex = 0; + while (config[`mp${mpIndex}`]) { + mpIndex++; + } + return `mp${mpIndex}`; + } + + /** + * Find the mount point key for a specific volume + * @param {string} node - Node name + * @param {number} vmid - Container VMID + * @param {string} volumeId - Volume ID to find (e.g., "local-lvm:vm-105-disk-1") + * @returns {Promise} Mount point key (e.g., 'mp0') or null if not found + */ + async findMountPointForVolume(node, vmid, volumeId) { + const config = await this.lxcConfig(node, vmid); + for (let i = 0; i < 256; i++) { + const mpKey = `mp${i}`; + const mpValue = config[mpKey]; + if (!mpValue) continue; + if (mpValue.startsWith(volumeId + ',') || mpValue === volumeId) { + return mpKey; + } + } + return null; + } + + /** + * Migrate a container (and its volumes) to another node + * This can be used to migrate placeholder containers with their volumes + * @param {string} sourceNode - Source node name + * @param {number} vmid - Container VMID to migrate + * @param {string} targetNode - Target node name + * @param {string} targetStorage - Target storage name (optional, defaults to same as source) + * @param {boolean} online - Perform online migration (default: false for stopped containers) + * @returns {Promise} Task UPID + */ + async migrateContainer(sourceNode, vmid, targetNode, targetStorage = null, online = false) { + const params = { + target: targetNode + }; + if (targetStorage) { + params['target-storage'] = targetStorage; + } + if (online) { + params.online = 1; + } + + const response = await axios.post( + `${this.baseUrl}/api2/json/nodes/${sourceNode}/lxc/${vmid}/migrate`, + params, + this.options + ); + return response.data.data; + } } module.exports = ProxmoxApi; diff --git a/create-a-container/views/containers/form.ejs b/create-a-container/views/containers/form.ejs index cad0d069..04ac1b7c 100644 --- a/create-a-container/views/containers/form.ejs +++ b/create-a-container/views/containers/form.ejs @@ -34,9 +34,9 @@ const breadcrumbLabel = isEdit ? 'Edit' : 'New';
<% if (isEdit) { %> - + <% } else { %> - <% if (typeof templates !== 'undefined' && templates && templates.length > 0) { %> <% templates.forEach(template => { %> @@ -44,50 +44,129 @@ const breadcrumbLabel = isEdit ? 'Edit' : 'New'; <%= template.name %> (<%= template.node %>) <% }) %> - <% } else { %> - <% } %> + + <% } %>
-
- - -
-
- - - - - - - - - - - - -
TypeInternal Port - External - - - - Action
-
+
+ + Services + +
+ +
+
+ + + + + + + + + + + + +
TypeInternal Port + External + + + + Action
+
+
- +
+ + Environment Variables + +
+ +
+
+ + + + + + + + + + + +
KeyValueAction
+
+
+ +
+ + Entrypoint Command + +
+ +
The command to run when the container starts (overrides the default entrypoint)
+
+
+ + <% if (!isEdit) { %> +
+ + Volumes + +
+ +
+
+ + + + + + + + + + + +
VolumeMount PathAction
+

+ Enter a new volume name to create it, or select an existing volume from the dropdown. +

+
+
+ <% } %> + +
@@ -112,7 +191,29 @@ const breadcrumbLabel = isEdit ? 'Edit' : 'New'; } : null }))) : '[]' %>; const externalDomains = <%- JSON.stringify((externalDomains || []).map(d => ({ id: d.id, name: d.name }))) %>; + const existingEnvVars = <%- isEdit && container && container.environmentVars ? container.environmentVars : '{}' %>; let serviceCounter = 0; + let envVarCounter = 0; + + // Custom template toggle logic + const templateSelect = document.getElementById('templateSelect'); + const customTemplateContainer = document.getElementById('customTemplateContainer'); + const customTemplateInput = document.getElementById('customTemplate'); + + if (templateSelect) { + templateSelect.addEventListener('change', function() { + if (this.value === 'custom') { + customTemplateContainer.style.display = 'block'; + customTemplateInput.required = true; + this.removeAttribute('name'); // Don't submit the select value + } else { + customTemplateContainer.style.display = 'none'; + customTemplateInput.required = false; + customTemplateInput.value = ''; + this.setAttribute('name', 'template'); // Submit select value + } + }); + } function addServiceRow(type = 'http', internalPort = '', externalHostname = '', externalDomainId = '', serviceId = null, externalPort = null, dnsName = '') { const row = document.createElement('tr'); @@ -303,6 +404,17 @@ const breadcrumbLabel = isEdit ? 'Edit' : 'New'; if (!row || !deletedInput || !removeBtn) return; + // Check if this is an existing service (has an ID hidden field) + const serviceIdInput = row.querySelector('input[name*="[id]"]'); + const isExistingService = serviceIdInput && serviceIdInput.value; + + // For new services (no ID), remove immediately from DOM + if (!isExistingService) { + row.remove(); + return; + } + + // For existing services, toggle between deleted and undo states const isDeleted = deletedInput.value === 'true'; if (isDeleted) { @@ -361,9 +473,6 @@ const breadcrumbLabel = isEdit ? 'Edit' : 'New'; addServiceRow(type, service.internalPort, externalHostname, externalDomainId, service.id, externalPort, dnsName); }); - } else { - // Add default HTTP service on port 80 with hostname from form for new containers - addServiceRow('http', '80', hostnameField.value || '', ''); } // Update externalHostname when hostname field changes (only for new containers) @@ -377,6 +486,254 @@ const breadcrumbLabel = isEdit ? 'Edit' : 'New'; }); }); } + + // Environment Variables functionality + function addEnvVarRow(key = '', value = '') { + const row = document.createElement('tr'); + row.id = `env-row-${envVarCounter}`; + + // Key cell + const keyCell = document.createElement('td'); + keyCell.style.cssText = 'border: 1px solid #ddd; padding: 8px;'; + + const keyInput = document.createElement('input'); + keyInput.type = 'text'; + keyInput.name = `environmentVars[${envVarCounter}][key]`; + keyInput.value = key; + keyInput.placeholder = 'KEY_NAME'; + keyInput.pattern = '[A-Za-z_][A-Za-z0-9_]*'; + keyInput.style.cssText = 'width: 100%; padding: 4px;'; + + keyCell.appendChild(keyInput); + + // Value cell + const valueCell = document.createElement('td'); + valueCell.style.cssText = 'border: 1px solid #ddd; padding: 8px;'; + + const valueInput = document.createElement('input'); + valueInput.type = 'text'; + valueInput.name = `environmentVars[${envVarCounter}][value]`; + valueInput.value = value; + valueInput.placeholder = 'value'; + valueInput.style.cssText = 'width: 100%; padding: 4px;'; + + valueCell.appendChild(valueInput); + + // Action cell + const actionCell = document.createElement('td'); + actionCell.style.cssText = 'border: 1px solid #ddd; padding: 8px; text-align: center;'; + + const removeBtn = document.createElement('button'); + removeBtn.type = 'button'; + removeBtn.textContent = 'Delete'; + removeBtn.style.cssText = 'padding: 4px 8px; cursor: pointer; background-color: #dc3545; color: white; border: none; border-radius: 4px;'; + const currentIndex = envVarCounter; + removeBtn.onclick = () => { + document.getElementById(`env-row-${currentIndex}`).remove(); + }; + + actionCell.appendChild(removeBtn); + + row.appendChild(keyCell); + row.appendChild(valueCell); + row.appendChild(actionCell); + + document.getElementById('envVarsTableBody').appendChild(row); + envVarCounter++; + } + + document.getElementById('addEnvBtn').addEventListener('click', () => { + addEnvVarRow(); + }); + + // Initialize existing environment variables + if (existingEnvVars && typeof existingEnvVars === 'object') { + for (const [key, value] of Object.entries(existingEnvVars)) { + addEnvVarRow(key, value); + } + } + + // Volume attachment functionality (only for new containers) + <% if (!isEdit) { %> + const availableVolumes = <%- JSON.stringify((availableVolumes || []).map(v => ({ id: v.id, name: v.name, nodeId: v.nodeId, nodeName: v.node?.name || 'Unknown', sizeGb: v.sizeGb }))) %>; + let volumeCounter = 0; + const usedVolumeIds = new Set(); + const usedVolumeNames = new Set(); + + function getSelectedNodeId() { + const nodeSelect = document.getElementById('node'); + return nodeSelect ? parseInt(nodeSelect.value) : null; + } + + function addVolumeRow() { + const row = document.createElement('tr'); + row.id = `volume-row-${volumeCounter}`; + const selectedNodeId = getSelectedNodeId(); + const currentIndex = volumeCounter; + + // Volume name/select cell + const volumeCell = document.createElement('td'); + volumeCell.style.cssText = 'border: 1px solid #ddd; padding: 8px;'; + + // Create a wrapper for the input/select combo + const volumeWrapper = document.createElement('div'); + + // Text input for new volume name + const volumeInput = document.createElement('input'); + volumeInput.type = 'text'; + volumeInput.name = `volumes[${volumeCounter}][volumeName]`; + volumeInput.placeholder = 'Enter new volume name...'; + volumeInput.pattern = '^[a-zA-Z0-9][a-zA-Z0-9_-]*$'; + volumeInput.style.cssText = 'width: 100%; padding: 4px; margin-bottom: 4px;'; + volumeWrapper.appendChild(volumeInput); + + // Or select existing + if (availableVolumes.length > 0) { + const orLabel = document.createElement('div'); + orLabel.textContent = '— or select existing —'; + orLabel.style.cssText = 'font-size: 0.8em; color: #666; text-align: center; margin: 4px 0;'; + volumeWrapper.appendChild(orLabel); + + const volumeSelect = document.createElement('select'); + volumeSelect.name = `volumes[${volumeCounter}][volumeId]`; + volumeSelect.style.cssText = 'width: 100%; padding: 4px;'; + volumeSelect.dataset.index = volumeCounter; + + const defaultOption = document.createElement('option'); + defaultOption.value = ''; + defaultOption.textContent = 'Select existing volume...'; + volumeSelect.appendChild(defaultOption); + + availableVolumes.forEach(v => { + if (!usedVolumeIds.has(v.id)) { + const option = document.createElement('option'); + option.value = v.id; + option.dataset.nodeId = v.nodeId; + const sameNode = v.nodeId === selectedNodeId; + option.textContent = `${v.name} (${v.sizeGb}GB on ${v.nodeName})${!sameNode ? ' ⚠️ Different node' : ''}`; + if (!sameNode && selectedNodeId) { + option.style.color = '#856404'; + } + volumeSelect.appendChild(option); + } + }); + + // When selecting existing, clear the new name input + volumeSelect.addEventListener('change', function(e) { + if (this.value) { + volumeInput.value = ''; + volumeInput.required = false; + } + updateUsedVolumes(); + }); + + // When typing new name, clear the select + volumeInput.addEventListener('input', function(e) { + if (this.value) { + volumeSelect.value = ''; + } + updateUsedVolumes(); + }); + + volumeWrapper.appendChild(volumeSelect); + } + + volumeCell.appendChild(volumeWrapper); + + // Mount path cell + const mountPathCell = document.createElement('td'); + mountPathCell.style.cssText = 'border: 1px solid #ddd; padding: 8px;'; + + const mountPathInput = document.createElement('input'); + mountPathInput.type = 'text'; + mountPathInput.name = `volumes[${volumeCounter}][mountPath]`; + mountPathInput.placeholder = '/data'; + mountPathInput.pattern = '^/[^.][^.]*$'; + mountPathInput.required = true; + mountPathInput.style.cssText = 'width: 100%; padding: 4px;'; + + mountPathCell.appendChild(mountPathInput); + + // Action cell + const actionCell = document.createElement('td'); + actionCell.style.cssText = 'border: 1px solid #ddd; padding: 8px; text-align: center;'; + + const removeBtn = document.createElement('button'); + removeBtn.type = 'button'; + removeBtn.textContent = 'Delete'; + removeBtn.style.cssText = 'padding: 4px 8px; cursor: pointer; background-color: #dc3545; color: white; border: none; border-radius: 4px;'; + removeBtn.onclick = () => { + document.getElementById(`volume-row-${currentIndex}`).remove(); + updateUsedVolumes(); + }; + + actionCell.appendChild(removeBtn); + + row.appendChild(volumeCell); + row.appendChild(mountPathCell); + row.appendChild(actionCell); + + document.getElementById('volumesTableBody').appendChild(row); + volumeCounter++; + } + + function updateUsedVolumes() { + usedVolumeIds.clear(); + usedVolumeNames.clear(); + + document.querySelectorAll('select[name*="[volumeId]"]').forEach(select => { + if (select.value) { + usedVolumeIds.add(parseInt(select.value)); + } + }); + + document.querySelectorAll('input[name*="[volumeName]"]').forEach(input => { + if (input.value) { + usedVolumeNames.add(input.value.toLowerCase()); + } + }); + + // Update all select options to hide already-used volumes + document.querySelectorAll('select[name*="[volumeId]"]').forEach(select => { + const currentValue = select.value; + select.querySelectorAll('option').forEach(option => { + if (option.value && option.value !== currentValue) { + option.style.display = usedVolumeIds.has(parseInt(option.value)) ? 'none' : ''; + } + }); + }); + } + + function updateVolumeOptionsForNode() { + const selectedNodeId = getSelectedNodeId(); + document.querySelectorAll('select[name*="[volumeId]"]').forEach(select => { + select.querySelectorAll('option').forEach(option => { + if (option.value) { + const optNodeId = parseInt(option.dataset.nodeId); + const vol = availableVolumes.find(v => v.id === parseInt(option.value)); + if (vol) { + const sameNode = optNodeId === selectedNodeId; + option.textContent = `${vol.name} (${vol.sizeGb}GB on ${vol.nodeName})${!sameNode ? ' ⚠️ Different node' : ''}`; + option.style.color = (!sameNode && selectedNodeId) ? '#856404' : ''; + } + } + }); + }); + } + + const addVolumeBtnEl = document.getElementById('addVolumeBtn'); + if (addVolumeBtnEl) { + addVolumeBtnEl.addEventListener('click', () => { + addVolumeRow(); + }); + } + + // Update volume options when node selection changes + const nodeSelectEl = document.getElementById('node'); + if (nodeSelectEl) { + nodeSelectEl.addEventListener('change', updateVolumeOptionsForNode); + } + <% } %> <%- include('../layouts/footer') %> diff --git a/create-a-container/views/containers/index.ejs b/create-a-container/views/containers/index.ejs index 4c0e26bb..7fb6f476 100644 --- a/create-a-container/views/containers/index.ejs +++ b/create-a-container/views/containers/index.ejs @@ -20,8 +20,9 @@ Hostname + Status IPv4 - OS Release + Template Node SSH Port HTTP Port @@ -33,13 +34,49 @@ <% rows.forEach(r => { %> <%= r.hostname %> + + <% if (r.status === 'running') { %> + Running + <% } else if (r.status === 'pending') { %> + + + Pending + + <% } else if (r.status === 'creating') { %> + + + Creating + + <% } else if (r.status === 'failed') { %> + Failed + <% } else if (r.status === 'restarting') { %> + + + Restarting + + <% } else { %> + <%= r.status || 'Unknown' %> + <% } %> + <% if (r.creationJobId && (r.status === 'pending' || r.status === 'creating' || r.status === 'failed')) { %> + + Details + + <% } %> + <%= r.ipv4Address || '-' %> - <%= r.osRelease || '-' %> + <%= r.template || '-' %> <%= r.nodeName %> <%= r.sshPort || '-' %> <%= r.httpPort || '-' %> - Edit + <% if (r.status === 'running' || r.status === 'failed') { %> + Edit +
+ + + +
+ <% } %>
@@ -49,7 +86,7 @@ <% }) %> <% } else { %> - + No containers found. Click "New Container" to create your first one. diff --git a/create-a-container/views/jobs/show.ejs b/create-a-container/views/jobs/show.ejs new file mode 100644 index 00000000..b4e19984 --- /dev/null +++ b/create-a-container/views/jobs/show.ejs @@ -0,0 +1,228 @@ +<%- include('../layouts/header', { + title: `Job #${job.id} - MIE`, + breadcrumbs: [ + { label: 'Jobs', url: '/jobs' }, + { label: `Job #${job.id}`, url: `/jobs/${job.id}` } + ], + colWidth: 'col-12 col-lg-10', + req +}) %> + +
+
+

+ Job #<%= job.id %> + + <% if (job.status === 'pending' || job.status === 'running') { %> + + <% } %> + <%= job.status.charAt(0).toUpperCase() + job.status.slice(1) %> + +

+ <% if (container) { %> + + Back to Containers + + <% } %> +
+ +
+
+
+

Command:

+ <%= job.command %> +
+
+

Created:

+ <%= new Date(job.createdAt).toLocaleString() %> +
+
+

Created By:

+ <%= job.createdBy || 'System' %> +
+
+ + <% if (container) { %> + + <% } %> + +
+
Output
+
+ + +
+
+ +
+ <% if (initialOutput && initialOutput.length > 0) { %> + <% initialOutput.forEach(line => { %><%= line.output %><% }) %> + <% } %> +
+ +
+ <% if (job.status === 'pending' || job.status === 'running') { %> + Connecting to live stream... + <% } else { %> + Job completed + <% } %> +
+
+ + +
+ + + + + +<%- include('../layouts/footer') %> diff --git a/create-a-container/views/layouts/header.ejs b/create-a-container/views/layouts/header.ejs index e166197a..d6425d1d 100644 --- a/create-a-container/views/layouts/header.ejs +++ b/create-a-container/views/layouts/header.ejs @@ -66,6 +66,9 @@
  • Containers
  • +
  • + Volumes +
  • <% if (req.session && req.session.isAdmin) { %>
  • Nodes diff --git a/create-a-container/views/nodes/form.ejs b/create-a-container/views/nodes/form.ejs index 79cad2be..23849bba 100644 --- a/create-a-container/views/nodes/form.ejs +++ b/create-a-container/views/nodes/form.ejs @@ -59,21 +59,21 @@
    - + -
    API token identifier (optional)
    +
    Proxmox username (e.g., root@pam)
    - +
    - <%= isEdit ? 'Leave blank to keep existing secret. Enter new secret to update.' : 'API token secret (optional)' %> + <%= isEdit ? 'Leave blank to keep existing password. Enter new password to update.' : 'Proxmox password' %>
    @@ -98,6 +98,22 @@
    Whether to verify TLS certificates when connecting to this node
    +
    + + + +
    Storage for CT Template images used when building containers
    +
    +
    Cancel
    <%- include('../layouts/footer') %> + +<% if (isEdit) { %> + +<% } %> diff --git a/create-a-container/views/nodes/import.ejs b/create-a-container/views/nodes/import.ejs index d8b550a0..451c7d4a 100644 --- a/create-a-container/views/nodes/import.ejs +++ b/create-a-container/views/nodes/import.ejs @@ -28,19 +28,20 @@
    - + +
    Proxmox username (e.g., root@pam)
    - + +
    Proxmox password
    diff --git a/create-a-container/views/nodes/index.ejs b/create-a-container/views/nodes/index.ejs index f48da939..2dac8f5a 100644 --- a/create-a-container/views/nodes/index.ejs +++ b/create-a-container/views/nodes/index.ejs @@ -25,6 +25,7 @@ IPv4 Address API URL TLS Verify + Placeholder Containers Actions @@ -45,6 +46,17 @@ Not Set <% } %> + + <% if (r.placeholderCtId) { %> + CT <%= r.placeholderCtId %> + <% } else { %> + + + + <% } %> + <%= r.containerCount %> Edit @@ -67,7 +79,7 @@ <% }) %> <% } else { %> - + No nodes found. Click "New Node" to create your first one. diff --git a/create-a-container/views/volumes/form.ejs b/create-a-container/views/volumes/form.ejs new file mode 100644 index 00000000..ea2f1978 --- /dev/null +++ b/create-a-container/views/volumes/form.ejs @@ -0,0 +1,69 @@ +<%- include('../layouts/header', { + title: (isEdit ? 'Edit Volume' : 'New Volume') + ' - MIE', + breadcrumbs: [ + { label: 'Sites', url: '/sites' }, + { label: site.name, url: `/sites/${site.id}/containers` }, + { label: 'Volumes', url: `/sites/${site.id}/volumes` }, + { label: isEdit ? 'Edit' : 'New', url: '#' } + ], + colWidth: 'col-lg-6', + req +}) %> + +
    +
    +

    <%= isEdit ? 'Edit Volume' : 'Create New Volume' %>

    + +
    + <% if (isEdit) { %> + + <% } %> + +
    + + + > +
    + Letters, numbers, dashes, and underscores only. Cannot be changed after creation. +
    +
    + + <% if (!isEdit) { %> +
    + + +
    + The Proxmox node where this volume will be stored. +
    +
    + <% } %> + +
    + Cancel + +
    +
    +
    + +
    + +<%- include('../layouts/footer') %> diff --git a/create-a-container/views/volumes/index.ejs b/create-a-container/views/volumes/index.ejs new file mode 100644 index 00000000..53e75257 --- /dev/null +++ b/create-a-container/views/volumes/index.ejs @@ -0,0 +1,79 @@ +<%- include('../layouts/header', { + title: 'Volumes - MIE', + breadcrumbs: [ + { label: 'Sites', url: '/sites' }, + { label: site.name, url: `/sites/${site.id}/containers` }, + { label: 'Volumes', url: `/sites/${site.id}/volumes` } + ], + colWidth: 'col-12 col-lg-10', + req +}) %> + +
    +
    +
    +

    Your Volumes - <%= site.name %>

    + New Volume +
    + + + +
    + + + + + + + + + + + <% if (rows && rows.length) { %> + <% rows.forEach(r => { %> + + + + + + + <% }) %> + <% } else { %> + + + + <% } %> + +
    NameNodeStatusActions
    <%= r.name %><%= r.nodeName %> + <% if (r.attachedTo) { %> + + Attached to <%= r.attachedTo.hostname %> + +
    + Mount: <%= r.mountPath %> + <% } else { %> + Available + <% } %> +
    + <% if (!r.attachedTo) { %> +
    + + +
    + <% } else { %> + + <% } %> +
    + No volumes found. Click "New Volume" to create your first one. +
    +
    +
    + +
    + +<%- include('../layouts/footer') %> diff --git a/mie-opensource-landing/docs/developers/core-technologies.md b/mie-opensource-landing/docs/developers/core-technologies.md index 6746b45a..d9503a6c 100644 --- a/mie-opensource-landing/docs/developers/core-technologies.md +++ b/mie-opensource-landing/docs/developers/core-technologies.md @@ -127,6 +127,20 @@ The MIE Opensource Proxmox Cluster is built on several key open-source technolog - **Official Documentation**: [docs.npmjs.com](https://docs.npmjs.com/) - **CLI Commands**: [npm CLI](https://docs.npmjs.com/cli/v10/commands) +### Docker & Docker Compose + +**Docker** provides containerization for development and deployment, while **Docker Compose** orchestrates multi-container applications. + +- **Docker Documentation**: [docs.docker.com](https://docs.docker.com/) +- **Docker Compose Documentation**: [docs.docker.com/compose](https://docs.docker.com/compose/) +- **Dockerfile Reference**: [Dockerfile Reference](https://docs.docker.com/reference/dockerfile/) + +**Used For:** +- Building container images for deployment +- Local development environment (PostgreSQL via compose.yml) +- CI/CD pipeline image building +- Testing in isolated environments + ## Related Resources - [System Architecture](system-architecture): Understand how these technologies work together diff --git a/mie-opensource-landing/docs/developers/database-schema.md b/mie-opensource-landing/docs/developers/database-schema.md index cf2a5e75..2f6bca39 100644 --- a/mie-opensource-landing/docs/developers/database-schema.md +++ b/mie-opensource-landing/docs/developers/database-schema.md @@ -12,8 +12,13 @@ The cluster management system uses a relational database to store all configurat erDiagram Sites ||--o{ Nodes : contains Sites ||--o{ ExternalDomains : has + Sites ||--o{ Volumes : "scoped to" Nodes ||--o{ Containers : hosts + Nodes ||--o{ Volumes : "physically on" Containers ||--o{ Services : exposes + Containers ||--o{ ContainerVolumes : "mounts" + ContainerVolumes }o--|| Volumes : references + Containers }o--o| Jobs : "created by" Services ||--|| HTTPServices : "type: http" Services ||--|| TransportServices : "type: transport" Services ||--|| DnsServices : "type: dns" @@ -41,19 +46,40 @@ erDiagram string apiTokenIdOrUsername string apiTokenSecretOrPassword boolean disableTlsVerification + string imageStorage "default: local" + int placeholderCtId "VMID for volume placeholder" int siteId FK } + Volumes { + int id PK + string name + string username + string proxmoxVolume + int sizeGb + int siteId FK + int nodeId FK + } + + ContainerVolumes { + int id PK + int containerId FK + int volumeId FK + string mountPath + } + Containers { int id PK string hostname UK - string name - string description + string username + string status "pending,creating,running,failed" + string template + int creationJobId FK int nodeId FK int containerId string macAddress UK string ipv4Address UK - string status + string aiContainer } Services { @@ -171,10 +197,13 @@ The **Node** model represents a Proxmox VE server within a site. - `apiTokenIdOrUsername`: Authentication credential (username or token ID) - `apiTokenSecretOrPassword`: Authentication secret - `disableTlsVerification`: Skip TLS certificate validation +- `imageStorage`: Proxmox storage name for pulled Docker/OCI images (default: 'local') +- `placeholderCtId`: VMID of the placeholder container used for volume storage (auto-created) **Relationships:** - Belongs to Site - Has many Containers +- Has many Volumes (physically located on this node) **Constraints:** - `name` is unique across all nodes @@ -184,21 +213,69 @@ The **Node** model represents a Proxmox VE server within a site. The **Container** model represents an LXC container running on a Proxmox node. **Key Fields:** -- `hostname`: Container hostname -- `name`: Display name +- `hostname`: Container hostname (unique) +- `username`: Owner of the container (who created it) +- `status`: Container creation state ('pending', 'creating', 'running', 'failed') +- `template`: Name of the Proxmox template or Docker image reference (e.g., `docker.io/library/nginx:latest`) +- `creationJobId`: Foreign key to the Job that created this container (nullable) - `containerId`: Proxmox container ID (CTID) -- `macAddress`: Unique MAC address -- `ipv4Address`: Assigned IP address -- `status`: Container state (e.g., 'running', 'stopped') +- `macAddress`: Unique MAC address (nullable for pending containers) +- `ipv4Address`: Assigned IP address (nullable for pending containers) +- `aiContainer`: AI container flag (default: 'N') **Relationships:** - Belongs to Node - Has many Services +- Has many ContainerVolumes (mounted volumes) +- Belongs to Job (optional, via creationJobId) **Constraints:** - Unique composite index on `(nodeId, containerId)` - `hostname`, `macAddress`, and `ipv4Address` are globally unique +### Volume + +The **Volume** model represents a persistent data volume that survives container deletion. + +**Key Fields:** +- `name`: User-friendly volume name +- `username`: Owner of the volume +- `proxmoxVolume`: Proxmox volume reference (e.g., 'local-lvm:vm-105-disk-1') +- `sizeGb`: Volume size in gigabytes (default: 50) + +**Relationships:** +- Belongs to Site (scoping) +- Belongs to Node (physical location) +- Has many ContainerVolumes (attachments) + +**Constraints:** +- Unique composite index on `(username, name, siteId)` - volume names are unique per user per site + +**Lifecycle:** +1. Volumes are created on the node's placeholder container +2. When attached to a container, they are transferred from the placeholder +3. When the container is deleted, volumes are transferred back to the placeholder +4. The placeholder mount path (`//`) allows reconstruction + +### ContainerVolume + +The **ContainerVolume** model is a join table tracking which volumes are attached to which containers, and at what mount path. + +**Key Fields:** +- `containerId`: Container the volume is attached to +- `volumeId`: The volume being attached +- `mountPath`: Mount point inside the container (e.g., '/data', '/storage') + +**Relationships:** +- Belongs to Container +- Belongs to Volume + +**Constraints:** +- Unique composite index on `(containerId, volumeId)` - each volume attached once per container +- Unique composite index on `(containerId, mountPath)` - mount paths unique within container + +**Note:** The mount path is stored in this join table, not the Volume model. This allows the same volume to be mounted at different paths when reattached to new containers. + ### Service (Base Model) The **Service** model uses Single Table Inheritance (STI) to represent different types of services exposed by containers. diff --git a/mie-opensource-landing/docs/developers/development-workflow.md b/mie-opensource-landing/docs/developers/development-workflow.md index 4ce050a4..cd5c6528 100644 --- a/mie-opensource-landing/docs/developers/development-workflow.md +++ b/mie-opensource-landing/docs/developers/development-workflow.md @@ -17,6 +17,42 @@ To contribute to the cluster management software: ## Local Development Setup +### Option 1: Using Docker Compose (Recommended) + +The simplest way to develop is using the included Docker Compose setup which provides a PostgreSQL database: + +```bash +# Clone the repository +git clone https://github.com/mieweb/opensource-server +cd opensource-server/create-a-container + +# Configure environment +cp example.env .env +# Edit .env with your Proxmox settings and database configuration: +# DATABASE_DIALECT=postgres +# POSTGRES_HOST=localhost +# POSTGRES_PORT=5432 +# POSTGRES_USER=your_user +# POSTGRES_PASSWORD=your_password +# POSTGRES_DATABASE=your_db + +# Start PostgreSQL +docker compose up -d + +# Install dependencies +npm install + +# Run database migrations +npm run db:migrate + +# Start the development server +npm run dev +``` + +### Option 2: Manual Setup + +For development without Docker: + ```bash # Clone the repository git clone https://github.com/mieweb/opensource-server @@ -28,9 +64,11 @@ npm install # Configure environment cp example.env .env # Edit .env with your Proxmox and database settings +# For SQLite (default): no additional database setup required +# For PostgreSQL/MySQL: ensure database server is running # Run database migrations -npx sequelize-cli db:migrate +npm run db:migrate # Start the development server npm run dev @@ -99,6 +137,45 @@ Before submitting changes: ## Debugging +### Local Docker Image Build + +You can build and test the Docker image locally before deploying: + +```bash +# Build the Docker image from the repository root +docker build -t opensource-server:dev . + +# Run the container (requires systemd support) +docker run -d --privileged \ + --name opensource-test \ + -p 80:80 -p 443:443 -p 53:53/udp \ + opensource-server:dev + +# View container logs +docker logs -f opensource-test + +# Access a shell in the container +docker exec -it opensource-test bash + +# Stop and remove the test container +docker stop opensource-test && docker rm opensource-test +``` + +**Note:** The Dockerfile copies your local repository code (including uncommitted changes), making it ideal for testing changes before pushing to GitHub. + +### CI/CD Pipeline + +The project uses GitHub Actions to automatically build and push Docker images: + +- **Trigger**: On every push to any branch +- **Registry**: GitHub Container Registry (ghcr.io) +- **Tags**: + - Branch name (e.g., `sprint`, `main`) + - `latest` tag for main branch only +- **Build optimization**: Uses GitHub Actions cache for faster builds + +The workflow file is located at `.github/workflows/docker-build-push.yml`. + ### API Server ```bash @@ -125,8 +202,15 @@ DB_LOGGING=true **Database Connection Errors:** - Verify database credentials in `.env` -- Ensure database service is running +- Ensure database service is running (for Docker: `docker compose ps`) - Check network connectivity +- For PostgreSQL via Docker Compose: ensure ports are not in use + +**Docker Compose Issues:** +- Check container status: `docker compose ps` +- View logs: `docker compose logs postgres` +- Restart services: `docker compose restart` +- Clean start: `docker compose down -v && docker compose up -d` **Proxmox API Errors:** - Verify API credentials are correct diff --git a/mie-opensource-landing/docs/developers/system-architecture.md b/mie-opensource-landing/docs/developers/system-architecture.md index 1ae27373..22272fd5 100644 --- a/mie-opensource-landing/docs/developers/system-architecture.md +++ b/mie-opensource-landing/docs/developers/system-architecture.md @@ -267,6 +267,70 @@ sequenceDiagram NGINX-->>Client: HTTPS response ``` +## Volume Lifecycle + +Named volumes provide persistent storage that survives container deletion. Each Proxmox node has a "placeholder container" that holds orphaned volumes. + +### Volume Flow Diagram + +```mermaid +flowchart TB + subgraph "Volume Creation" + A[User creates volume] --> B[Allocate disk on node storage] + B --> C[Attach to placeholder container] + C --> D[Mount at /username/volume_name] + D --> E[Store Volume record in DB] + end + + subgraph "Container with Volume" + F[User creates container with volume] --> G{Same node?} + G -->|Yes| H[Detach from placeholder] + G -->|No| I[Manual migration required] + H --> J[Move volume to new container] + J --> K[Mount at user-specified path] + K --> L[Create ContainerVolume record] + end + + subgraph "Container Deletion" + M[User deletes container] --> N[Stop container] + N --> O[For each attached volume] + O --> P[Move volume to placeholder] + P --> Q[Remount at /username/volume_name] + Q --> R[Delete ContainerVolume record] + R --> S[Delete container from Proxmox] + end + + subgraph "Volume Deletion" + T[User deletes volume] --> U{Attached to container?} + U -->|Yes| V[Error: detach first] + U -->|No| W[Detach from placeholder] + W --> X[Delete disk from storage] + X --> Y[Delete Volume record] + end + + classDef action fill:#e1f5fe + classDef decision fill:#fff3e0 + class A,B,C,D,E,F,H,I,J,K,L,M,N,O,P,Q,R,S,T,W,X,Y action + class G,U decision +``` + +### Placeholder Container + +Each Proxmox node automatically gets a placeholder container when it's registered: + +- **Purpose**: Acts as a "parking lot" for volumes not attached to user containers +- **Configuration**: Minimal Alpine, 16MB RAM, no network, protection enabled +- **Naming**: `mie-volume-placeholder` +- **VMID**: Dynamically assigned, stored in `Node.placeholderCtId` + +The placeholder container is never started. It only exists to satisfy Proxmox's requirement that all CT volumes must be owned by a container. + +### Mount Path Handling + +- **On placeholder**: Volumes are mounted at `//` for identification +- **On user containers**: Users specify their desired mount path (e.g., `/data`, `/storage`) +- **Reattachment**: The same volume can be mounted at different paths each time it's attached + ## Next Steps For more information: diff --git a/mie-opensource-landing/docs/users/creating-containers/web-gui.mdx b/mie-opensource-landing/docs/users/creating-containers/web-gui.mdx index 3b2ba607..9aa733e1 100644 --- a/mie-opensource-landing/docs/users/creating-containers/web-gui.mdx +++ b/mie-opensource-landing/docs/users/creating-containers/web-gui.mdx @@ -160,4 +160,39 @@ As of writing, you are able to reboot, start, and shutdown your container as you --- +## Docker Container Configuration + +When creating containers from Docker images, you can configure additional settings that are specific to containerized applications. + +### Environment Variables + +Environment variables allow you to pass configuration to your containerized application at runtime. These are commonly used for: +- Database connection strings +- API keys and secrets +- Feature flags +- Application modes (development, production, etc.) + +To add environment variables, expand the **Environment Variables** section and click **Add Variable** to create key-value pairs. + +:::warning System Containers +Environment variables are intended for **Docker-based containers** only. System containers (created from Proxmox templates like Debian or Rocky Linux) typically expect `init` as PID 1 and may not use environment variables in the same way. If you're using a standard Linux template, you generally don't need to set environment variables here. +::: + +### Entrypoint Command + +The entrypoint command overrides the default startup command for a Docker container. This is useful when you need to: +- Run a specific script or binary +- Pass custom arguments to your application +- Chain multiple startup commands + +:::warning System Containers +The entrypoint command is intended for **Docker-based containers** only. System containers expect `init` as PID 1 and should not have their entrypoint overridden. Changing the entrypoint on a system container may prevent it from starting correctly. +::: + +### Restarting After Configuration Changes + +When you modify environment variables or the entrypoint command on an existing container, the system will automatically restart the container to apply the changes. You can monitor the restart progress from the container list page. + +--- + **Need Help?**: For questions about container configuration or troubleshooting, contact the MIE team.