From 0388243e70a19a71870bee0b08166c1f00ff572e Mon Sep 17 00:00:00 2001 From: William Hollacsek Date: Fri, 26 Dec 2025 01:39:43 +0800 Subject: [PATCH] Add Supabase template 2025-12-18 checkpoint with Postgres 15 & 17 --- blueprints/supabase-pg17/docker-compose.yml | 542 +++++++++ blueprints/supabase-pg17/supabase.svg | 15 + blueprints/supabase-pg17/template.toml | 1156 +++++++++++++++++++ blueprints/supabase/docker-compose.yml | 77 +- blueprints/supabase/template.toml | 100 +- meta.json | 22 +- 6 files changed, 1861 insertions(+), 51 deletions(-) create mode 100644 blueprints/supabase-pg17/docker-compose.yml create mode 100644 blueprints/supabase-pg17/supabase.svg create mode 100644 blueprints/supabase-pg17/template.toml diff --git a/blueprints/supabase-pg17/docker-compose.yml b/blueprints/supabase-pg17/docker-compose.yml new file mode 100644 index 000000000..ea02fea2d --- /dev/null +++ b/blueprints/supabase-pg17/docker-compose.yml @@ -0,0 +1,542 @@ +# Usage +# Start: docker compose up +# With helpers: docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml up +# Stop: docker compose down +# Destroy: docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml down -v --remove-orphans +# Reset everything: ./reset.sh + +name: supabase + +services: + + studio: + container_name: ${CONTAINER_PREFIX}-studio + image: supabase/studio:2025.12.17-sha-43f4f7f + restart: unless-stopped + healthcheck: + test: + [ + "CMD", + "node", + "-e", + "fetch('http://studio:3000/api/platform/profile').then((r) => {if (r.status !== 200) throw new Error(r.status)})" + ] + timeout: 10s + interval: 5s + retries: 3 + depends_on: + analytics: + condition: service_healthy + environment: + # Binds nestjs listener to both IPv4 and IPv6 network interfaces + HOSTNAME: "::" + + STUDIO_PG_META_URL: http://meta:8080 + POSTGRES_PORT: ${POSTGRES_PORT} + POSTGRES_HOST: ${POSTGRES_HOST} + POSTGRES_DB: ${POSTGRES_DB} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + PG_META_CRYPTO_KEY: ${PG_META_CRYPTO_KEY} + + DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION} + DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT} + OPENAI_API_KEY: ${OPENAI_API_KEY:-} + + SUPABASE_URL: http://kong:8000 + SUPABASE_PUBLIC_URL: ${SUPABASE_PUBLIC_URL} + SUPABASE_ANON_KEY: ${ANON_KEY} + SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY} + AUTH_JWT_SECRET: ${JWT_SECRET} + + # LOGFLARE_API_KEY is deprecated + LOGFLARE_API_KEY: ${LOGFLARE_PUBLIC_ACCESS_TOKEN} + LOGFLARE_PUBLIC_ACCESS_TOKEN: ${LOGFLARE_PUBLIC_ACCESS_TOKEN} + LOGFLARE_PRIVATE_ACCESS_TOKEN: ${LOGFLARE_PRIVATE_ACCESS_TOKEN} + + LOGFLARE_URL: http://analytics:4000 + NEXT_PUBLIC_ENABLE_LOGS: true + # Comment to use Big Query backend for analytics + NEXT_ANALYTICS_BACKEND_PROVIDER: postgres + # Uncomment to use Big Query backend for analytics + # NEXT_ANALYTICS_BACKEND_PROVIDER: bigquery + + kong: + container_name: ${CONTAINER_PREFIX}-kong + image: kong:2.8.1 + restart: unless-stopped + # ports: + # - ${KONG_HTTP_PORT}:8000/tcp + # - ${KONG_HTTPS_PORT}:8443/tcp + expose: + - 8000 + - 8443 + volumes: + # https://github.com/supabase/supabase/issues/12661 + - ../files/volumes/api/kong.yml:/home/kong/temp.yml:ro,z + depends_on: + analytics: + condition: service_healthy + environment: + KONG_DATABASE: "off" + KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml + # https://github.com/supabase/cli/issues/14 + KONG_DNS_ORDER: LAST,A,CNAME + KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth,request-termination,ip-restriction + KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k + KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k + SUPABASE_ANON_KEY: ${ANON_KEY} + SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY} + DASHBOARD_USERNAME: ${DASHBOARD_USERNAME} + DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD} + # https://unix.stackexchange.com/a/294837 + entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start' + + auth: + container_name: ${CONTAINER_PREFIX}-auth + image: supabase/gotrue:v2.184.0 + restart: unless-stopped + healthcheck: + test: + [ + "CMD", + "wget", + "--no-verbose", + "--tries=1", + "--spider", + "http://localhost:9999/health" + ] + timeout: 5s + interval: 5s + retries: 3 + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + analytics: + condition: service_healthy + environment: + # the next line seems required if you want to be able to send mails from the supabase GUI + GOTRUE_MAILER_EXTERNAL_HOSTS: kong,${SUPABASE_HOST} + GOTRUE_API_HOST: 0.0.0.0 + GOTRUE_API_PORT: 9999 + API_EXTERNAL_URL: ${API_EXTERNAL_URL} + + GOTRUE_DB_DRIVER: postgres + GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} + + GOTRUE_SITE_URL: ${SITE_URL} + GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS} + GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP} + + GOTRUE_JWT_ADMIN_ROLES: service_role + GOTRUE_JWT_AUD: authenticated + GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated + GOTRUE_JWT_EXP: ${JWT_EXPIRY} + GOTRUE_JWT_SECRET: ${JWT_SECRET} + + GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP} + GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED: ${ENABLE_ANONYMOUS_USERS} + GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM} + + # Uncomment to bypass nonce check in ID Token flow. Commonly set to true when using Google Sign In on mobile. + # GOTRUE_EXTERNAL_SKIP_NONCE_CHECK: true + + # GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: true + # GOTRUE_SMTP_MAX_FREQUENCY: 1s + GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL} + GOTRUE_SMTP_HOST: ${SMTP_HOST} + GOTRUE_SMTP_PORT: ${SMTP_PORT} + GOTRUE_SMTP_USER: ${SMTP_USER} + GOTRUE_SMTP_PASS: ${SMTP_PASS} + GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME} + GOTRUE_MAILER_URLPATHS_INVITE: ${MAILER_URLPATHS_INVITE} + GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION} + GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY} + GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE} + + GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP} + GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM} + # Uncomment to enable custom access token hook. Please see: https://supabase.com/docs/guides/auth/auth-hooks for full list of hooks and additional details about custom_access_token_hook + + # GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_ENABLED: "true" + # GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_URI: "pg-functions://postgres/public/custom_access_token_hook" + # GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_SECRETS: "" + + # GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_ENABLED: "true" + # GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_URI: "pg-functions://postgres/public/mfa_verification_attempt" + + # GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_ENABLED: "true" + # GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_URI: "pg-functions://postgres/public/password_verification_attempt" + + # GOTRUE_HOOK_SEND_SMS_ENABLED: "false" + # GOTRUE_HOOK_SEND_SMS_URI: "pg-functions://postgres/public/custom_access_token_hook" + # GOTRUE_HOOK_SEND_SMS_SECRETS: "v1,whsec_VGhpcyBpcyBhbiBleGFtcGxlIG9mIGEgc2hvcnRlciBCYXNlNjQgc3RyaW5n" + + # GOTRUE_HOOK_SEND_EMAIL_ENABLED: "false" + # GOTRUE_HOOK_SEND_EMAIL_URI: "http://host.docker.internal:54321/functions/v1/email_sender" + # GOTRUE_HOOK_SEND_EMAIL_SECRETS: "v1,whsec_VGhpcyBpcyBhbiBleGFtcGxlIG9mIGEgc2hvcnRlciBCYXNlNjQgc3RyaW5n" + + rest: + container_name: ${CONTAINER_PREFIX}-rest + image: postgrest/postgrest:v14.1 + restart: unless-stopped + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + analytics: + condition: service_healthy + environment: + PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} + PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS} + PGRST_DB_ANON_ROLE: anon + PGRST_JWT_SECRET: ${JWT_SECRET} + PGRST_DB_USE_LEGACY_GUCS: "false" + PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET} + PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY} + command: + [ + "postgrest" + ] + + realtime: + # This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain + container_name: realtime-dev.${CONTAINER_PREFIX}-realtime + image: supabase/realtime:v2.68.0 + restart: unless-stopped + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + analytics: + condition: service_healthy + healthcheck: + test: + [ + "CMD-SHELL", + "curl -sSfL --head -o /dev/null -H \"Authorization: Bearer ${ANON_KEY}\" http://localhost:4000/api/tenants/realtime-dev/health" + ] + timeout: 5s + interval: 5s + retries: 3 + environment: + PORT: 4000 + DB_HOST: ${POSTGRES_HOST} + DB_PORT: ${POSTGRES_PORT} + DB_USER: supabase_admin + DB_PASSWORD: ${POSTGRES_PASSWORD} + DB_NAME: ${POSTGRES_DB} + DB_AFTER_CONNECT_QUERY: 'SET search_path TO _realtime' + DB_ENC_KEY: supabaserealtime + API_JWT_SECRET: ${JWT_SECRET} + SECRET_KEY_BASE: ${SECRET_KEY_BASE} + ERL_AFLAGS: -proto_dist inet_tcp + DNS_NODES: "''" + RLIMIT_NOFILE: "10000" + APP_NAME: realtime + SEED_SELF_HOST: "true" + RUN_JANITOR: "true" + + # To use S3 backed storage: docker compose -f docker-compose.yml -f docker-compose.s3.yml up + storage: + container_name: ${CONTAINER_PREFIX}-storage + image: supabase/storage-api:v1.33.0 + restart: unless-stopped + volumes: + - ../files/volumes/storage:/var/lib/storage:z + healthcheck: + test: + [ + "CMD", + "wget", + "--no-verbose", + "--tries=1", + "--spider", + "http://storage:5000/status" + ] + timeout: 5s + interval: 5s + retries: 3 + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + rest: + condition: service_started + imgproxy: + condition: service_started + environment: + ANON_KEY: ${ANON_KEY} + SERVICE_KEY: ${SERVICE_ROLE_KEY} + POSTGREST_URL: http://rest:3000 + PGRST_JWT_SECRET: ${JWT_SECRET} + DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} + REQUEST_ALLOW_X_FORWARDED_PATH: "true" + FILE_SIZE_LIMIT: 52428800 + STORAGE_BACKEND: file + FILE_STORAGE_BACKEND_PATH: /var/lib/storage + TENANT_ID: stub + # TODO: https://github.com/supabase/storage-api/issues/55 + REGION: stub + GLOBAL_S3_BUCKET: stub + ENABLE_IMAGE_TRANSFORMATION: "true" + IMGPROXY_URL: http://imgproxy:5001 + + imgproxy: + container_name: ${CONTAINER_PREFIX}-imgproxy + image: darthsim/imgproxy:v3.8.0 + restart: unless-stopped + volumes: + - ../files/volumes/storage:/var/lib/storage:z + healthcheck: + test: + [ + "CMD", + "imgproxy", + "health" + ] + timeout: 5s + interval: 5s + retries: 3 + environment: + IMGPROXY_BIND: ":5001" + IMGPROXY_LOCAL_FILESYSTEM_ROOT: / + IMGPROXY_USE_ETAG: "true" + IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION} + + meta: + container_name: ${CONTAINER_PREFIX}-meta + image: supabase/postgres-meta:v0.95.1 + restart: unless-stopped + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + analytics: + condition: service_healthy + environment: + PG_META_PORT: 8080 + PG_META_DB_HOST: ${POSTGRES_HOST} + PG_META_DB_PORT: ${POSTGRES_PORT} + PG_META_DB_NAME: ${POSTGRES_DB} + PG_META_DB_USER: supabase_admin + PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD} + CRYPTO_KEY: ${PG_META_CRYPTO_KEY} + + functions: + container_name: ${CONTAINER_PREFIX}-edge-functions + image: supabase/edge-runtime:v1.69.28 + restart: unless-stopped + volumes: + - ../files/volumes/functions:/home/deno/functions:Z + depends_on: + analytics: + condition: service_healthy + environment: + JWT_SECRET: ${JWT_SECRET} + SUPABASE_URL: http://kong:8000 + SUPABASE_ANON_KEY: ${ANON_KEY} + SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY} + SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} + # TODO: Allow configuring VERIFY_JWT per function. This PR might help: https://github.com/supabase/cli/pull/786 + VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}" + command: + [ + "start", + "--main-service", + "/home/deno/functions/main" + ] + + analytics: + container_name: ${CONTAINER_PREFIX}-analytics + image: supabase/logflare:1.27.0 + restart: unless-stopped + # ports: + # - 4000:4000 + expose: + - 4000 + # Uncomment to use Big Query backend for analytics + # volumes: + # - type: bind + # source: ${PWD}/gcloud.json + # target: /opt/app/rel/logflare/bin/gcloud.json + # read_only: true + healthcheck: + test: + [ + "CMD", + "curl", + "http://localhost:4000/health" + ] + timeout: 5s + interval: 5s + retries: 10 + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + environment: + LOGFLARE_NODE_HOST: 127.0.0.1 + DB_USERNAME: supabase_admin + DB_DATABASE: _supabase + DB_HOSTNAME: ${POSTGRES_HOST} + DB_PORT: ${POSTGRES_PORT} + DB_PASSWORD: ${POSTGRES_PASSWORD} + DB_SCHEMA: _analytics + LOGFLARE_PUBLIC_ACCESS_TOKEN: ${LOGFLARE_PUBLIC_ACCESS_TOKEN} + LOGFLARE_PRIVATE_ACCESS_TOKEN: ${LOGFLARE_PRIVATE_ACCESS_TOKEN} + LOGFLARE_SINGLE_TENANT: true + LOGFLARE_SUPABASE_MODE: true + + # Comment variables to use Big Query backend for analytics + POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/_supabase + POSTGRES_BACKEND_SCHEMA: _analytics + LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true + # Uncomment to use Big Query backend for analytics + # GOOGLE_PROJECT_ID: ${GOOGLE_PROJECT_ID} + # GOOGLE_PROJECT_NUMBER: ${GOOGLE_PROJECT_NUMBER} + + # Comment out everything below this point if you are using an external Postgres database + db: + container_name: ${CONTAINER_PREFIX}-db + image: supabase/postgres:17.6.1.066 + restart: unless-stopped + volumes: + - ../files/volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z + # Must be superuser to create event trigger + - ../files/volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z + # Must be superuser to alter reserved role + - ../files/volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z + # Initialize the database settings with JWT_SECRET and JWT_EXP + - ../files/volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z + # PGDATA directory is persisted between restarts + - ../files/volumes/db/data:/var/lib/postgresql/data:Z + # Changes required for internal supabase data such as _analytics + - ../files/volumes/db/_supabase.sql:/docker-entrypoint-initdb.d/migrations/97-_supabase.sql:Z + # Changes required for Analytics support + - ../files/volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z + # Changes required for Pooler support + - ../files/volumes/db/pooler.sql:/docker-entrypoint-initdb.d/migrations/99-pooler.sql:Z + # Use named volume to persist pgsodium decryption key between restarts + - db-config:/etc/postgresql-custom + healthcheck: + test: + [ + "CMD", + "pg_isready", + "-U", + "postgres", + "-h", + "localhost" + ] + interval: 5s + timeout: 5s + retries: 10 + depends_on: + vector: + condition: service_healthy + environment: + POSTGRES_HOST: /var/run/postgresql + PGPORT: ${POSTGRES_PORT} + POSTGRES_PORT: ${POSTGRES_PORT} + PGPASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + PGDATABASE: ${POSTGRES_DB} + POSTGRES_DB: ${POSTGRES_DB} + JWT_SECRET: ${JWT_SECRET} + JWT_EXP: ${JWT_EXPIRY} + command: + [ + "postgres", + "-c", + "config_file=/etc/postgresql/postgresql.conf", + "-c", + "log_min_messages=fatal" # prevents Realtime polling queries from appearing in logs + ] + + vector: + container_name: ${CONTAINER_PREFIX}-vector + image: timberio/vector:0.28.1-alpine + restart: unless-stopped + volumes: + - ../files/volumes/logs/vector.yml:/etc/vector/vector.yml:ro,z + - ${DOCKER_SOCKET_LOCATION}:/var/run/docker.sock:ro,z + healthcheck: + test: + [ + "CMD", + "wget", + "--no-verbose", + "--tries=1", + "--spider", + "http://vector:9001/health" + ] + timeout: 5s + interval: 5s + retries: 3 + environment: + LOGFLARE_PUBLIC_ACCESS_TOKEN: ${LOGFLARE_PUBLIC_ACCESS_TOKEN} + command: + [ + "--config", + "/etc/vector/vector.yml" + ] + security_opt: + - "label=disable" + + # Update the DATABASE_URL if you are using an external Postgres database + supavisor: + container_name: ${CONTAINER_PREFIX}-pooler + image: supabase/supavisor:2.7.4 + restart: unless-stopped + ports: # expose supavisor to the host to enable db pooler connection + - ${POSTGRES_PORT}:5432 + - ${POOLER_PROXY_PORT_TRANSACTION}:6543 + volumes: + - ../files/volumes/pooler/pooler.exs:/etc/pooler/pooler.exs:ro,z + healthcheck: + test: + [ + "CMD", + "curl", + "-sSfL", + "--head", + "-o", + "/dev/null", + "http://127.0.0.1:4000/api/health" + ] + interval: 10s + timeout: 5s + retries: 5 + depends_on: + db: + condition: service_healthy + analytics: + condition: service_healthy + environment: + PORT: 4000 + POSTGRES_PORT: ${POSTGRES_PORT} + POSTGRES_DB: ${POSTGRES_DB} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + DATABASE_URL: ecto://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/_supabase + CLUSTER_POSTGRES: true + SECRET_KEY_BASE: ${SECRET_KEY_BASE} + VAULT_ENC_KEY: ${VAULT_ENC_KEY} + API_JWT_SECRET: ${JWT_SECRET} + METRICS_JWT_SECRET: ${JWT_SECRET} + REGION: local + ERL_AFLAGS: -proto_dist inet_tcp + POOLER_TENANT_ID: ${POOLER_TENANT_ID} + POOLER_DEFAULT_POOL_SIZE: ${POOLER_DEFAULT_POOL_SIZE} + POOLER_MAX_CLIENT_CONN: ${POOLER_MAX_CLIENT_CONN} + POOLER_POOL_MODE: transaction + DB_POOL_SIZE: ${POOLER_DB_POOL_SIZE} + command: + [ + "/bin/sh", + "-c", + "/app/bin/migrate && /app/bin/supavisor eval \"$$(cat /etc/pooler/pooler.exs)\" && /app/bin/server" + ] + +volumes: + db-config: diff --git a/blueprints/supabase-pg17/supabase.svg b/blueprints/supabase-pg17/supabase.svg new file mode 100644 index 000000000..2b69d42e7 --- /dev/null +++ b/blueprints/supabase-pg17/supabase.svg @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/blueprints/supabase-pg17/template.toml b/blueprints/supabase-pg17/template.toml new file mode 100644 index 000000000..9a082a282 --- /dev/null +++ b/blueprints/supabase-pg17/template.toml @@ -0,0 +1,1156 @@ +[variables] +main_domain = "${domain}" +postgres_password = "${password:32}" +dashboard_password = "${password:32}" +logflare_public_token = "${password:32}" +logflare_private_token = "${password:32}" +secret_key_base = "${password:64}" +vault_enc_key = "${password:32}" +jwt_secret = "${password:32}" +pg_meta_crypto_key = "${password:32}" +container_name_prefix = "${APP_NAME}-supabase" +anon_key_payload = """{ + "role": "anon", + "iss": "supabase", + "exp": ${timestamps:2030-01-01T00:00:00Z} +} +""" +service_role_key_payload = """{ + "role": "service_role", + "iss": "supabase", + "exp": ${timestamps:2030-01-01T00:00:00Z} +} +""" + +[[config.domains]] +serviceName = "kong" +port = 8_000 +host = "${main_domain}" + +[config] +env = [ +'############', +'# To get a proper working configuration you should at least take a look at:', +'# - SUPABASE_PUBLIC_URL, API_EXTERNAL_URL should point to your supabase domain with correct http/https scheme', +'# - SMTP_* are required for auth mail sending', +'# - ADDITIONAL_REDIRECT_URLS, SITE_URL should point to application using supabase for authentication', +'# They are used for redirecting after login/signup and gotrue will check them before sending emails', +'# - POSTGRES_PORT, POOLER_PROXY_PORT_TRANSACTION should be changed if you are already running other instances of supabase', +'#', +'# Supabase uses container names in part of its configuration so it is important to keep them', +'# This template generates a random prefix for the container names to avoid conflicts', +'# If you change it you will need to update routes in the vector.yml file in advanced->mounts section', +'############', +'CONTAINER_PREFIX=${container_name_prefix}', +'', +'############', +'# Secrets', +'# YOU MUST CHANGE THESE BEFORE GOING INTO PRODUCTION', +'# https://supabase.com/docs/guides/self-hosting/docker#securing-your-services', +'# In this version of the template they are generated randomly by dokploy helpers', +'# so you do not need to change them manually', +'# Go to https://supabase.com/docs/guides/self-hosting for more information', +'############', +'', +'SUPABASE_HOST=${main_domain}', +'POSTGRES_PASSWORD=${postgres_password}', +'JWT_SECRET=${jwt_secret}', +'ANON_KEY=${jwt:jwt_secret:anon_key_payload}', +'SERVICE_ROLE_KEY=${jwt:jwt_secret:service_role_key_payload}', +'DASHBOARD_USERNAME=supabase', +'DASHBOARD_PASSWORD=${dashboard_password}', +'SECRET_KEY_BASE=${secret_key_base}', +'VAULT_ENC_KEY=${vault_enc_key}', +'PG_META_CRYPTO_KEY=${pg_meta_crypto_key}', +'', +'', +'############', +'# Database - You can change these to any PostgreSQL database that has logical replication enabled.', +'############', +'', +'POSTGRES_HOST=db', +'POSTGRES_DB=postgres', +'POSTGRES_PORT=5432', +'# default user is postgres', +'', +'', +'############', +'# Supavisor -- Database pooler', +'############', +'# Port Supavisor listens on for transaction pooling connections', +'POOLER_PROXY_PORT_TRANSACTION=6543', +'# Maximum number of PostgreSQL connections Supavisor opens per pool', +'POOLER_DEFAULT_POOL_SIZE=20', +'# Maximum number of client connections Supavisor accepts per pool', +'POOLER_MAX_CLIENT_CONN=100', +'# Unique tenant identifier', +'POOLER_TENANT_ID=your-tenant-id', +'# Pool size for internal metadata storage used by Supavisor', +'# This is separate from client connections and used only by Supavisor itself', +'POOLER_DB_POOL_SIZE=5', +'', +'', +'############', +'# API Proxy - Configuration for the Kong Reverse proxy.', +'# Following ports should not be changed for a dokploy config unless you know what you are doing.', +'############', +'', +'KONG_HTTP_PORT=8000', +'KONG_HTTPS_PORT=8443', +'', +'', +'############', +'# API - Configuration for PostgREST.', +'############', +'', +'PGRST_DB_SCHEMAS=public,storage,graphql_public', +'', +'', +'############', +'# Auth - Configuration for the GoTrue authentication server.', +'############', +'', +'## General', +'SITE_URL=http://localhost:3000', +'ADDITIONAL_REDIRECT_URLS=http://${main_domain}/*,http://localhost:3000/*', +'JWT_EXPIRY=3600', +'DISABLE_SIGNUP=false', +'API_EXTERNAL_URL=http://${main_domain}', +'', +'## Mailer Config', +'MAILER_URLPATHS_CONFIRMATION="/auth/v1/verify"', +'MAILER_URLPATHS_INVITE="/auth/v1/verify"', +'MAILER_URLPATHS_RECOVERY="/auth/v1/verify"', +'MAILER_URLPATHS_EMAIL_CHANGE="/auth/v1/verify"', +'', +'## Email auth', +'ENABLE_EMAIL_SIGNUP=true', +'ENABLE_EMAIL_AUTOCONFIRM=false', +'SMTP_ADMIN_EMAIL=admin@example.com', +'SMTP_HOST=supabase-mail', +'SMTP_PORT=2500', +'SMTP_USER=fake_mail_user', +'SMTP_PASS=fake_mail_password', +'SMTP_SENDER_NAME=fake_sender', +'ENABLE_ANONYMOUS_USERS=false', +'', +'## Phone auth', +'ENABLE_PHONE_SIGNUP=true', +'ENABLE_PHONE_AUTOCONFIRM=true', +'', +'', +'############', +'# Studio - Configuration for the Dashboard', +'############', +'', +'STUDIO_DEFAULT_ORGANIZATION=Default Organization', +'STUDIO_DEFAULT_PROJECT=Default Project', +'', +'# replace if you intend to use Studio outside of localhost', +'SUPABASE_PUBLIC_URL=http://${main_domain}', +'', +'# Enable webp support', +'IMGPROXY_ENABLE_WEBP_DETECTION=true', +'', +'# Add your OpenAI API key to enable SQL Editor Assistant', +'OPENAI_API_KEY=', +'', +'', +'############', +'# Functions - Configuration for Functions', +'############', +'# NOTE: VERIFY_JWT applies to all functions. Per-function VERIFY_JWT is not supported yet.', +'FUNCTIONS_VERIFY_JWT=false', +'', +'', +'############', +'# Logs - Configuration for Analytics', +'# Please refer to https://supabase.com/docs/reference/self-hosting-analytics/introduction', +'############', +'', +'# Change vector.toml sinks to reflect this change', +'# these cannot be the same value', +'LOGFLARE_PUBLIC_ACCESS_TOKEN=${logflare_public_token}', +'LOGFLARE_PRIVATE_ACCESS_TOKEN=${logflare_private_token}', +'', +'# Docker socket location - this value will differ depending on your OS', +'DOCKER_SOCKET_LOCATION=/var/run/docker.sock', +'', +'# Google Cloud Project details', +'GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID', +'GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER'] + +[[config.mounts]] +filePath = "/volumes/api/kong.yml" +content = """_format_version: '2.1' +_transform: true + +### +### Consumers / Users +### +consumers: + - username: DASHBOARD + - username: anon + keyauth_credentials: + - key: $SUPABASE_ANON_KEY + - username: service_role + keyauth_credentials: + - key: $SUPABASE_SERVICE_KEY + +### +### Access Control List +### +acls: + - consumer: anon + group: anon + - consumer: service_role + group: admin + +### +### Dashboard credentials +### +basicauth_credentials: + - consumer: DASHBOARD + username: $DASHBOARD_USERNAME + password: $DASHBOARD_PASSWORD + +### +### API Routes +### +services: + ## Open Auth routes + - name: auth-v1-open + url: http://auth:9999/verify + routes: + - name: auth-v1-open + strip_path: true + paths: + - /auth/v1/verify + plugins: + - name: cors + - name: auth-v1-open-callback + url: http://auth:9999/callback + routes: + - name: auth-v1-open-callback + strip_path: true + paths: + - /auth/v1/callback + plugins: + - name: cors + - name: auth-v1-open-authorize + url: http://auth:9999/authorize + routes: + - name: auth-v1-open-authorize + strip_path: true + paths: + - /auth/v1/authorize + plugins: + - name: cors + + ## Secure Auth routes + - name: auth-v1 + _comment: 'GoTrue: /auth/v1/* -> http://auth:9999/*' + url: http://auth:9999/ + routes: + - name: auth-v1-all + strip_path: true + paths: + - /auth/v1/ + plugins: + - name: cors + - name: key-auth + config: + hide_credentials: false + - name: acl + config: + hide_groups_header: true + allow: + - admin + - anon + + ## Secure REST routes + - name: rest-v1 + _comment: 'PostgREST: /rest/v1/* -> http://rest:3000/*' + url: http://rest:3000/ + routes: + - name: rest-v1-all + strip_path: true + paths: + - /rest/v1/ + plugins: + - name: cors + - name: key-auth + config: + hide_credentials: true + - name: acl + config: + hide_groups_header: true + allow: + - admin + - anon + + ## Secure GraphQL routes + - name: graphql-v1 + _comment: 'PostgREST: /graphql/v1/* -> http://rest:3000/rpc/graphql' + url: http://rest:3000/rpc/graphql + routes: + - name: graphql-v1-all + strip_path: true + paths: + - /graphql/v1 + plugins: + - name: cors + - name: key-auth + config: + hide_credentials: true + - name: request-transformer + config: + add: + headers: + - Content-Profile:graphql_public + - name: acl + config: + hide_groups_header: true + allow: + - admin + - anon + + ## Secure Realtime routes + - name: realtime-v1-ws + _comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*' + url: http://realtime-dev.supabase-realtime:4000/socket + protocol: ws + routes: + - name: realtime-v1-ws + strip_path: true + paths: + - /realtime/v1/ + plugins: + - name: cors + - name: key-auth + config: + hide_credentials: false + - name: acl + config: + hide_groups_header: true + allow: + - admin + - anon + - name: realtime-v1-rest + _comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*' + url: http://realtime-dev.supabase-realtime:4000/api + protocol: http + routes: + - name: realtime-v1-rest + strip_path: true + paths: + - /realtime/v1/api + plugins: + - name: cors + - name: key-auth + config: + hide_credentials: false + - name: acl + config: + hide_groups_header: true + allow: + - admin + - anon + ## Storage routes: the storage server manages its own auth + - name: storage-v1 + _comment: 'Storage: /storage/v1/* -> http://storage:5000/*' + url: http://storage:5000/ + routes: + - name: storage-v1-all + strip_path: true + paths: + - /storage/v1/ + plugins: + - name: cors + + ## Edge Functions routes + - name: functions-v1 + _comment: 'Edge Functions: /functions/v1/* -> http://functions:9000/*' + url: http://functions:9000/ + routes: + - name: functions-v1-all + strip_path: true + paths: + - /functions/v1/ + plugins: + - name: cors + + ## Analytics routes + - name: analytics-v1 + _comment: 'Analytics: /analytics/v1/* -> http://logflare:4000/*' + url: http://analytics:4000/ + routes: + - name: analytics-v1-all + strip_path: true + paths: + - /analytics/v1/ + + ## Block access to /api/mcp + - name: mcp-blocker + _comment: 'Block direct access to /api/mcp' + url: http://studio:3000/api/mcp + routes: + - name: mcp-blocker-route + strip_path: true + paths: + - /api/mcp + plugins: + - name: request-termination + config: + status_code: 403 + message: "Access is forbidden." + + ## MCP endpoint - local access + - name: mcp + _comment: 'MCP: /mcp -> http://studio:3000/api/mcp (local access)' + url: http://studio:3000/api/mcp + routes: + - name: mcp + strip_path: true + paths: + - /mcp + plugins: + # Block access to /mcp by default + - name: request-termination + config: + status_code: 403 + message: "Access is forbidden." + # Enable local access (danger zone!) + # 1. Comment out the 'request-termination' section above + # 2. Uncomment the entire section below, including 'deny' + # 3. Add your local IPs to the 'allow' list + #- name: cors + #- name: ip-restriction + # config: + # allow: + # - 127.0.0.1 + # - ::1 + # deny: [] + + ## Secure Database routes + - name: meta + _comment: 'pg-meta: /pg/* -> http://pg-meta:8080/*' + url: http://meta:8080/ + routes: + - name: meta-all + strip_path: true + paths: + - /pg/ + plugins: + - name: key-auth + config: + hide_credentials: false + - name: acl + config: + hide_groups_header: true + allow: + - admin + + ## Protected Dashboard - catch all remaining routes + - name: dashboard + _comment: 'Studio: /* -> http://studio:3000/*' + url: http://studio:3000/ + routes: + - name: dashboard-all + strip_path: true + paths: + - / + plugins: + - name: cors + - name: basic-auth + config: + hide_credentials: true +""" + +[[config.mounts]] +filePath = "/volumes/db/init/data.sql" +content = "" + +[[config.mounts]] +filePath = "/volumes/db/_supabase.sql" +content = """\\set pguser `echo "$POSTGRES_USER"` + +CREATE DATABASE _supabase WITH OWNER :pguser; +""" + +[[config.mounts]] +filePath = "/volumes/db/jwt.sql" +content = """ +\\set jwt_secret `echo "$JWT_SECRET"` +\\set jwt_exp `echo "$JWT_EXP"` + +ALTER DATABASE postgres SET "app.settings.jwt_secret" TO :'jwt_secret'; +ALTER DATABASE postgres SET "app.settings.jwt_exp" TO :'jwt_exp'; +""" + +[[config.mounts]] +filePath = "/volumes/db/logs.sql" +content = """ +\\set pguser `echo "$POSTGRES_USER"` + +\\c _supabase +create schema if not exists _analytics; +alter schema _analytics owner to :pguser; +\\c postgres +""" + +[[config.mounts]] +filePath = "/volumes/db/pooler.sql" +content = """ +\\set pguser `echo "$POSTGRES_USER"` + +\\c _supabase +create schema if not exists _supavisor; +alter schema _supavisor owner to :pguser; +\\c postgres +""" + +[[config.mounts]] +filePath = "/volumes/db/realtime.sql" +content = """ +\\set pguser `echo "$POSTGRES_USER"` + +create schema if not exists _realtime; +alter schema _realtime owner to :pguser; +""" + +[[config.mounts]] +filePath = "/volumes/db/roles.sql" +content = """ +-- NOTE: change to your own passwords for production environments +\\set pgpass `echo "$POSTGRES_PASSWORD"` + +ALTER USER authenticator WITH PASSWORD :'pgpass'; +ALTER USER pgbouncer WITH PASSWORD :'pgpass'; +ALTER USER supabase_auth_admin WITH PASSWORD :'pgpass'; +ALTER USER supabase_functions_admin WITH PASSWORD :'pgpass'; +ALTER USER supabase_storage_admin WITH PASSWORD :'pgpass'; +""" + +[[config.mounts]] +filePath = "/volumes/db/webhooks.sql" +content = """ +BEGIN; + -- Create pg_net extension + CREATE EXTENSION IF NOT EXISTS pg_net SCHEMA extensions; + -- Create supabase_functions schema + CREATE SCHEMA supabase_functions AUTHORIZATION supabase_admin; + GRANT USAGE ON SCHEMA supabase_functions TO postgres, anon, authenticated, service_role; + ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON TABLES TO postgres, anon, authenticated, service_role; + ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON FUNCTIONS TO postgres, anon, authenticated, service_role; + ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON SEQUENCES TO postgres, anon, authenticated, service_role; + -- supabase_functions.migrations definition + CREATE TABLE supabase_functions.migrations ( + version text PRIMARY KEY, + inserted_at timestamptz NOT NULL DEFAULT NOW() + ); + -- Initial supabase_functions migration + INSERT INTO supabase_functions.migrations (version) VALUES ('initial'); + -- supabase_functions.hooks definition + CREATE TABLE supabase_functions.hooks ( + id bigserial PRIMARY KEY, + hook_table_id integer NOT NULL, + hook_name text NOT NULL, + created_at timestamptz NOT NULL DEFAULT NOW(), + request_id bigint + ); + CREATE INDEX supabase_functions_hooks_request_id_idx ON supabase_functions.hooks USING btree (request_id); + CREATE INDEX supabase_functions_hooks_h_table_id_h_name_idx ON supabase_functions.hooks USING btree (hook_table_id, hook_name); + COMMENT ON TABLE supabase_functions.hooks IS 'Supabase Functions Hooks: Audit trail for triggered hooks.'; + CREATE FUNCTION supabase_functions.http_request() + RETURNS trigger + LANGUAGE plpgsql + AS $function$ + DECLARE + request_id bigint; + payload jsonb; + url text := TG_ARGV[0]::text; + method text := TG_ARGV[1]::text; + headers jsonb DEFAULT '{}'::jsonb; + params jsonb DEFAULT '{}'::jsonb; + timeout_ms integer DEFAULT 1000; + BEGIN + IF url IS NULL OR url = 'null' THEN + RAISE EXCEPTION 'url argument is missing'; + END IF; + + IF method IS NULL OR method = 'null' THEN + RAISE EXCEPTION 'method argument is missing'; + END IF; + + IF TG_ARGV[2] IS NULL OR TG_ARGV[2] = 'null' THEN + headers = '{"Content-Type": "application/json"}'::jsonb; + ELSE + headers = TG_ARGV[2]::jsonb; + END IF; + + IF TG_ARGV[3] IS NULL OR TG_ARGV[3] = 'null' THEN + params = '{}'::jsonb; + ELSE + params = TG_ARGV[3]::jsonb; + END IF; + + IF TG_ARGV[4] IS NULL OR TG_ARGV[4] = 'null' THEN + timeout_ms = 1000; + ELSE + timeout_ms = TG_ARGV[4]::integer; + END IF; + + CASE + WHEN method = 'GET' THEN + SELECT http_get INTO request_id FROM net.http_get( + url, + params, + headers, + timeout_ms + ); + WHEN method = 'POST' THEN + payload = jsonb_build_object( + 'old_record', OLD, + 'record', NEW, + 'type', TG_OP, + 'table', TG_TABLE_NAME, + 'schema', TG_TABLE_SCHEMA + ); + + SELECT http_post INTO request_id FROM net.http_post( + url, + payload, + params, + headers, + timeout_ms + ); + ELSE + RAISE EXCEPTION 'method argument % is invalid', method; + END CASE; + + INSERT INTO supabase_functions.hooks + (hook_table_id, hook_name, request_id) + VALUES + (TG_RELID, TG_NAME, request_id); + + RETURN NEW; + END + $function$; + -- Supabase super admin + DO + $$ + BEGIN + IF NOT EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'supabase_functions_admin' + ) + THEN + CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; + END IF; + END + $$; + GRANT ALL PRIVILEGES ON SCHEMA supabase_functions TO supabase_functions_admin; + GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA supabase_functions TO supabase_functions_admin; + GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA supabase_functions TO supabase_functions_admin; + ALTER USER supabase_functions_admin SET search_path = "supabase_functions"; + ALTER table "supabase_functions".migrations OWNER TO supabase_functions_admin; + ALTER table "supabase_functions".hooks OWNER TO supabase_functions_admin; + ALTER function "supabase_functions".http_request() OWNER TO supabase_functions_admin; + GRANT supabase_functions_admin TO postgres; + -- Remove unused supabase_pg_net_admin role + DO + $$ + BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'supabase_pg_net_admin' + ) + THEN + REASSIGN OWNED BY supabase_pg_net_admin TO supabase_admin; + DROP OWNED BY supabase_pg_net_admin; + DROP ROLE supabase_pg_net_admin; + END IF; + END + $$; + -- pg_net grants when extension is already enabled + DO + $$ + BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_extension + WHERE extname = 'pg_net' + ) + THEN + GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role; + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + END IF; + END + $$; + -- Event trigger for pg_net + CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access() + RETURNS event_trigger + LANGUAGE plpgsql + AS $$ + BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_net' + ) + THEN + GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role; + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + END IF; + END; + $$; + COMMENT ON FUNCTION extensions.grant_pg_net_access IS 'Grants access to pg_net'; + DO + $$ + BEGIN + IF NOT EXISTS ( + SELECT 1 + FROM pg_event_trigger + WHERE evtname = 'issue_pg_net_access' + ) THEN + CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end WHEN TAG IN ('CREATE EXTENSION') + EXECUTE PROCEDURE extensions.grant_pg_net_access(); + END IF; + END + $$; + INSERT INTO supabase_functions.migrations (version) VALUES ('20210809183423_update_grants'); + ALTER function supabase_functions.http_request() SECURITY DEFINER; + ALTER function supabase_functions.http_request() SET search_path = supabase_functions; + REVOKE ALL ON FUNCTION supabase_functions.http_request() FROM PUBLIC; + GRANT EXECUTE ON FUNCTION supabase_functions.http_request() TO postgres, anon, authenticated, service_role; +COMMIT; +""" + +[[config.mounts]] +filePath = "/volumes/functions/hello/index.ts" +content = """ +// Follow this setup guide to integrate the Deno language server with your editor: +// https://deno.land/manual/getting_started/setup_your_environment +// This enables autocomplete, go to definition, etc. + +import { serve } from "https://deno.land/std@0.177.1/http/server.ts" + +serve(async () => { + return new Response( + `"Hello from Edge Functions!"`, + { headers: { "Content-Type": "application/json" } }, + ) +}) + +// To invoke: +// curl 'http://localhost:/functions/v1/hello' \ +// --header 'Authorization: Bearer ' +""" + +[[config.mounts]] +filePath = "/volumes/functions/main/index.ts" +content = """import { serve } from 'https://deno.land/std@0.131.0/http/server.ts' +import * as jose from 'https://deno.land/x/jose@v4.14.4/index.ts' + +console.log('main function started') + +const JWT_SECRET = Deno.env.get('JWT_SECRET') +const VERIFY_JWT = Deno.env.get('VERIFY_JWT') === 'true' + +function getAuthToken(req: Request) { + const authHeader = req.headers.get('authorization') + if (!authHeader) { + throw new Error('Missing authorization header') + } + const [bearer, token] = authHeader.split(' ') + if (bearer !== 'Bearer') { + throw new Error(`Auth header is not 'Bearer {token}'`) + } + return token +} + +async function verifyJWT(jwt: string): Promise { + const encoder = new TextEncoder() + const secretKey = encoder.encode(JWT_SECRET) + try { + await jose.jwtVerify(jwt, secretKey) + } catch (err) { + console.error(err) + return false + } + return true +} + +serve(async (req: Request) => { + if (req.method !== 'OPTIONS' && VERIFY_JWT) { + try { + const token = getAuthToken(req) + const isValidJWT = await verifyJWT(token) + + if (!isValidJWT) { + return new Response(JSON.stringify({ msg: 'Invalid JWT' }), { + status: 401, + headers: { 'Content-Type': 'application/json' }, + }) + } + } catch (e) { + console.error(e) + return new Response(JSON.stringify({ msg: e.toString() }), { + status: 401, + headers: { 'Content-Type': 'application/json' }, + }) + } + } + + const url = new URL(req.url) + const { pathname } = url + const path_parts = pathname.split('/') + const service_name = path_parts[1] + + if (!service_name || service_name === '') { + const error = { msg: 'missing function name in request' } + return new Response(JSON.stringify(error), { + status: 400, + headers: { 'Content-Type': 'application/json' }, + }) + } + + const servicePath = `/home/deno/functions/${service_name}` + console.error(`serving the request with ${servicePath}`) + + const memoryLimitMb = 150 + const workerTimeoutMs = 1 * 60 * 1000 + const noModuleCache = false + const importMapPath = null + const envVarsObj = Deno.env.toObject() + const envVars = Object.keys(envVarsObj).map((k) => [k, envVarsObj[k]]) + + try { + const worker = await EdgeRuntime.userWorkers.create({ + servicePath, + memoryLimitMb, + workerTimeoutMs, + noModuleCache, + importMapPath, + envVars, + }) + return await worker.fetch(req) + } catch (e) { + const error = { msg: e.toString() } + return new Response(JSON.stringify(error), { + status: 500, + headers: { 'Content-Type': 'application/json' }, + }) + } +}) +""" + +[[config.mounts]] +filePath = "/volumes/logs/vector.yml" +content = """api: + enabled: true + address: 0.0.0.0:9001 + +sources: + docker_host: + type: docker_logs + exclude_containers: + - ${container_name_prefix}-vector + +transforms: + project_logs: + type: remap + inputs: + - docker_host + source: |- + .project = "default" + .event_message = del(.message) + .appname = replace!(del(.container_name), "${container_name_prefix}", "supabase") + del(.container_created_at) + del(.container_id) + del(.source_type) + del(.stream) + del(.label) + del(.image) + del(.host) + del(.stream) + router: + type: route + inputs: + - project_logs + route: + kong: '.appname == "supabase-kong"' + auth: '.appname == "supabase-auth"' + rest: '.appname == "supabase-rest"' + realtime: '.appname == "realtime-dev.supabase-realtime"' + storage: '.appname == "supabase-storage"' + functions: '.appname == "supabase-edge-functions"' + db: '.appname == "supabase-db"' + # Ignores non nginx errors since they are related with kong booting up + kong_logs: + type: remap + inputs: + - router.kong + source: |- + req, err = parse_nginx_log(.event_message, "combined") + if err == null { + .timestamp = req.timestamp + .metadata.request.headers.referer = req.referer + .metadata.request.headers.user_agent = req.agent + .metadata.request.headers.cf_connecting_ip = req.client + .metadata.request.method = req.method + .metadata.request.path = req.path + .metadata.request.protocol = req.protocol + .metadata.response.status_code = req.status + } + if err != null { + abort + } + # Ignores non nginx errors since they are related with kong booting up + kong_err: + type: remap + inputs: + - router.kong + source: |- + .metadata.request.method = "GET" + .metadata.response.status_code = 200 + parsed, err = parse_nginx_log(.event_message, "error") + if err == null { + .timestamp = parsed.timestamp + .severity = parsed.severity + .metadata.request.host = parsed.host + .metadata.request.headers.cf_connecting_ip = parsed.client + url, err = split(parsed.request, " ") + if err == null { + .metadata.request.method = url[0] + .metadata.request.path = url[1] + .metadata.request.protocol = url[2] + } + } + if err != null { + abort + } + # Gotrue logs are structured json strings which frontend parses directly. But we keep metadata for consistency. + auth_logs: + type: remap + inputs: + - router.auth + source: |- + parsed, err = parse_json(.event_message) + if err == null { + .metadata.timestamp = parsed.time + .metadata = merge!(.metadata, parsed) + } + # PostgREST logs are structured so we separate timestamp from message using regex + rest_logs: + type: remap + inputs: + - router.rest + source: |- + parsed, err = parse_regex(.event_message, r'^(?P