Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
513 changes: 393 additions & 120 deletions graphile/graphile-presigned-url-plugin/src/plugin.ts

Large diffs are not rendered by default.

20 changes: 18 additions & 2 deletions graphile/graphile-presigned-url-plugin/src/storage-module-cache.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ const DEFAULT_DOWNLOAD_URL_EXPIRY_SECONDS = 3600; // 1 hour
const DEFAULT_MAX_FILE_SIZE = 200 * 1024 * 1024; // 200MB
const DEFAULT_MAX_FILENAME_LENGTH = 1024;
const DEFAULT_CACHE_TTL_SECONDS = process.env.NODE_ENV === 'development' ? 300 : 3600;
const DEFAULT_MAX_BULK_FILES = 100;
const DEFAULT_MAX_BULK_TOTAL_SIZE = 1073741824; // 1GB

const FIVE_MINUTES_MS = 1000 * 60 * 5;
const ONE_HOUR_MS = 1000 * 60 * 60;
Expand Down Expand Up @@ -57,6 +59,9 @@ const APP_STORAGE_MODULE_QUERY = `
sm.default_max_file_size,
sm.max_filename_length,
sm.cache_ttl_seconds,
sm.max_bulk_files,
sm.max_bulk_total_size,
sm.has_path_shares,
NULL AS entity_schema,
NULL AS entity_table
FROM metaschema_modules_public.storage_module sm
Expand Down Expand Up @@ -93,6 +98,9 @@ const ALL_STORAGE_MODULES_QUERY = `
sm.default_max_file_size,
sm.max_filename_length,
sm.cache_ttl_seconds,
sm.max_bulk_files,
sm.max_bulk_total_size,
sm.has_path_shares,
es.schema_name AS entity_schema,
et.name AS entity_table
FROM metaschema_modules_public.storage_module sm
Expand Down Expand Up @@ -122,6 +130,9 @@ interface StorageModuleRow {
default_max_file_size: number | null;
max_filename_length: number | null;
cache_ttl_seconds: number | null;
max_bulk_files: number | null;
max_bulk_total_size: number | null;
has_path_shares: boolean;
entity_schema: string | null;
entity_table: string | null;
}
Expand Down Expand Up @@ -152,6 +163,9 @@ function buildConfig(row: StorageModuleRow): StorageModuleConfig {
defaultMaxFileSize: row.default_max_file_size ?? DEFAULT_MAX_FILE_SIZE,
maxFilenameLength: row.max_filename_length ?? DEFAULT_MAX_FILENAME_LENGTH,
cacheTtlSeconds,
hasPathShares: row.has_path_shares ?? false,
maxBulkFiles: row.max_bulk_files ?? DEFAULT_MAX_BULK_FILES,
maxBulkTotalSize: row.max_bulk_total_size ?? DEFAULT_MAX_BULK_TOTAL_SIZE,
};
}

Expand Down Expand Up @@ -362,11 +376,11 @@ export async function getBucketConfig(
const hasOwner = ownerId && storageConfig.membershipType !== null;
const result = await pgClient.query({
text: hasOwner
? `SELECT id, key, type, is_public, owner_id, allowed_mime_types, max_file_size
? `SELECT id, key, type, is_public, owner_id, allowed_mime_types, max_file_size, allow_custom_keys
FROM ${storageConfig.bucketsQualifiedName}
WHERE key = $1 AND owner_id = $2
LIMIT 1`
: `SELECT id, key, type, is_public, ${storageConfig.membershipType !== null ? 'owner_id,' : ''} allowed_mime_types, max_file_size
: `SELECT id, key, type, is_public, ${storageConfig.membershipType !== null ? 'owner_id,' : ''} allowed_mime_types, max_file_size, allow_custom_keys
FROM ${storageConfig.bucketsQualifiedName}
WHERE key = $1
LIMIT 1`,
Expand All @@ -385,6 +399,7 @@ export async function getBucketConfig(
owner_id: string | null;
allowed_mime_types: string[] | null;
max_file_size: number | null;
allow_custom_keys: boolean;
};

const config: BucketConfig = {
Expand All @@ -395,6 +410,7 @@ export async function getBucketConfig(
owner_id: row.owner_id ?? null,
allowed_mime_types: row.allowed_mime_types,
max_file_size: row.max_file_size,
allow_custom_keys: row.allow_custom_keys ?? false,
};

bucketCache.set(cacheKey, config);
Expand Down
19 changes: 19 additions & 0 deletions graphile/graphile-presigned-url-plugin/src/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ export interface BucketConfig {
owner_id: string | null;
allowed_mime_types: string[] | null;
max_file_size: number | null;
allow_custom_keys: boolean;
}

/**
Expand Down Expand Up @@ -62,6 +63,15 @@ export interface StorageModuleConfig {
maxFilenameLength: number;
/** Cache TTL in seconds for this config entry (default: 300 dev / 3600 prod) */
cacheTtlSeconds: number;
/** Whether this storage module uses ltree path + path shares (determines if path column exists on files) */
hasPathShares: boolean;

// --- Bulk upload limits ---

/** Max files per requestBulkUploadUrls batch (default: 100) */
maxBulkFiles: number;
/** Max total size per bulk upload batch in bytes (default: 1GB) */
maxBulkTotalSize: number;
}

/**
Expand All @@ -85,6 +95,13 @@ export interface RequestUploadUrlInput {
size: number;
/** Original filename (optional, for display/Content-Disposition) */
filename?: string;
/**
* Custom S3 key for the file (only allowed when bucket has allow_custom_keys=true).
* When omitted, key defaults to contentHash (content-addressed dedup).
* When provided, the file is stored at this key; dedup is bypassed.
* Max 1024 chars. Must not contain path traversal (.. or leading /).
*/
key?: string;
}

/**
Expand All @@ -101,6 +118,8 @@ export interface RequestUploadUrlPayload {
deduplicated: boolean;
/** Presigned URL expiry time (null if deduplicated) */
expiresAt: string | null;
/** ID of the previous version (set when re-uploading to an existing custom key) */
previousVersionId: string | null;
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ CREATE TABLE IF NOT EXISTS "simple-storage-public".buckets (
owner_id uuid,
allowed_mime_types text[] NULL,
max_file_size bigint NULL,
allow_custom_keys boolean NOT NULL DEFAULT false,
created_at timestamptz DEFAULT now(),
updated_at timestamptz DEFAULT now(),
UNIQUE (key)
Expand All @@ -40,11 +41,13 @@ CREATE TABLE IF NOT EXISTS "simple-storage-public".files (
id uuid PRIMARY KEY DEFAULT uuid_generate_v4(),
bucket_id uuid NOT NULL REFERENCES "simple-storage-public".buckets(id),
key text NOT NULL,
content_hash text NOT NULL,
mime_type text NOT NULL,
size bigint,
filename text,
owner_id uuid,
is_public boolean NOT NULL DEFAULT false,
previous_version_id uuid REFERENCES "simple-storage-public".files(id),
created_at timestamptz DEFAULT now(),
updated_at timestamptz DEFAULT now(),
UNIQUE (bucket_id, key)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,11 +55,16 @@ CREATE TABLE IF NOT EXISTS metaschema_modules_public.storage_module (
public_url_prefix text NULL,
provider text NULL,
allowed_origins text[] NULL,
restrict_reads boolean NOT NULL DEFAULT false,
has_path_shares boolean NOT NULL DEFAULT false,
path_shares_table_id uuid NULL DEFAULT NULL,
upload_url_expiry_seconds integer NULL,
download_url_expiry_seconds integer NULL,
default_max_file_size bigint NULL,
max_filename_length integer NULL,
cache_ttl_seconds integer NULL,
max_bulk_files integer NULL,
max_bulk_total_size bigint NULL,
CONSTRAINT sm_db_fkey FOREIGN KEY (database_id) REFERENCES metaschema_public.database (id) ON DELETE CASCADE
);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2089,6 +2089,15 @@ input RequestUploadUrlInput {

"""Original filename (optional, for display and Content-Disposition)"""
filename: String

"""
Custom S3 key (e.g., "reports/2024/Q1.pdf").
Only allowed when the bucket has allow_custom_keys=true.
When omitted, key defaults to contentHash (content-addressed dedup).
When provided, the file is stored at this key.
Re-uploading to an existing key auto-creates a new version.
"""
key: String
}

type RequestUploadUrlPayload {
Expand All @@ -2106,6 +2115,69 @@ type RequestUploadUrlPayload {

"""Presigned URL expiry time (null if deduplicated)"""
expiresAt: Datetime

"""
ID of the previous version (set when re-uploading to an existing custom key)
"""
previousVersionId: UUID
}

input BulkUploadFileInput {
"""SHA-256 content hash computed by the client (hex-encoded, 64 chars)"""
contentHash: String!

"""MIME type of the file (e.g., "image/png")"""
contentType: String!

"""File size in bytes"""
size: Int!

"""Original filename (optional, for display and Content-Disposition)"""
filename: String

"""Custom S3 key (only when bucket has allow_custom_keys=true)"""
key: String
}

input RequestBulkUploadUrlsInput {
"""Bucket key (e.g., "public", "private")"""
bucketKey: String!

"""Owner entity ID for entity-scoped uploads"""
ownerId: UUID

"""Array of files to upload"""
files: [BulkUploadFileInput!]!
}

type BulkUploadFilePayload {
"""Presigned PUT URL (null if file was deduplicated)"""
uploadUrl: String

"""The file ID"""
fileId: UUID!

"""The S3 object key"""
key: String!

"""Whether this file was deduplicated"""
deduplicated: Boolean!

"""Presigned URL expiry time (null if deduplicated)"""
expiresAt: Datetime

"""
ID of the previous version (set when re-uploading to an existing custom key)
"""
previousVersionId: UUID

"""Index of this file in the input array (for client correlation)"""
index: Int!
}

type RequestBulkUploadUrlsPayload {
"""Array of results, one per input file"""
files: [BulkUploadFilePayload!]!
}

"""The root query type which gives access points into the data universe."""
Expand Down Expand Up @@ -2398,6 +2470,18 @@ type Mutation {
input: RequestUploadUrlInput!
): RequestUploadUrlPayload

"""
Request presigned URLs for uploading multiple files in a single batch.
Subject to per-storage-module limits (max_bulk_files, max_bulk_total_size).
Each file is processed independently — some may dedup while others get fresh URLs.
"""
requestBulkUploadUrls(
"""
The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields.
"""
input: RequestBulkUploadUrlsInput!
): RequestBulkUploadUrlsPayload

"""
Provision an S3 bucket for a logical bucket in the database.
Reads the bucket config via RLS, then creates and configures
Expand Down
Loading
Loading