From c98ab53cef65a82b60f1c0a911ad75d75d381837 Mon Sep 17 00:00:00 2001 From: fenos Date: Wed, 27 Mar 2024 17:32:48 +0000 Subject: [PATCH] feat: stable S3 Protocol --- README.md | 10 +- docker-compose-multi-tenant.yml | 100 +- jest-setup.ts | 4 + jest.config.js | 2 +- .../0020-list-objects-with-delimiter.sql | 39 + .../tenant/0021-s3-multipart-uploads.sql | 66 ++ package-lock.json | 53 ++ package.json | 3 + src/app.ts | 2 + src/auth/jwt.ts | 3 +- src/database/client.ts | 17 +- src/database/connection.ts | 11 +- src/database/tenant.ts | 8 +- src/http/error-handler.ts | 6 +- src/http/plugins/xml.ts | 16 +- src/http/routes/object/copyObject.ts | 2 +- src/http/routes/object/deleteObject.ts | 4 +- src/http/routes/object/getSignedObject.ts | 6 +- src/http/routes/object/uploadSignedObject.ts | 8 +- src/http/routes/render/renderSignedImage.ts | 6 +- .../s3/commands/abort-multipart-upload.ts | 33 + .../s3/commands/complete-multipart-upload.ts | 62 ++ src/http/routes/s3/commands/copy-object.ts | 52 ++ src/http/routes/s3/commands/create-bucket.ts | 27 + .../s3/commands/create-multipart-upload.ts | 45 + src/http/routes/s3/commands/delete-bucket.ts | 21 + src/http/routes/s3/commands/delete-object.ts | 78 ++ src/http/routes/s3/commands/get-bucket.ts | 28 + src/http/routes/s3/commands/get-object.ts | 37 + src/http/routes/s3/commands/head-bucket.ts | 20 + src/http/routes/s3/commands/head-object.ts | 25 + src/http/routes/s3/commands/list-buckets.ts | 13 + .../s3/commands/list-multipart-uploads.ts | 40 + src/http/routes/s3/commands/list-objects.ts | 40 + src/http/routes/s3/commands/upload-part.ts | 82 ++ src/http/routes/s3/index.ts | 187 +++- src/http/routes/s3/router.ts | 214 +++++ src/http/routes/tus/index.ts | 20 +- src/http/routes/tus/lifecycle.ts | 4 +- src/server.ts | 10 - .../backend/{generic.ts => adapter.ts} | 17 +- src/storage/backend/file.ts | 30 +- src/storage/backend/index.ts | 4 +- src/storage/backend/s3.ts | 95 +- src/storage/database/adapter.ts | 42 +- src/storage/database/knex.ts | 343 +++++-- src/storage/errors.ts | 354 +++++++- src/storage/limits.ts | 28 +- src/storage/object.ts | 69 +- src/storage/protocols/s3/byte-limit-stream.ts | 20 + src/storage/protocols/s3/handler.ts | 332 ------- src/storage/protocols/s3/router.ts | 191 ---- src/storage/protocols/s3/s3-handler.ts | 852 ++++++++++++++++++ src/storage/protocols/s3/signature-v4.ts | 14 +- src/storage/protocols/tus/postgres-locker.ts | 3 +- src/storage/protocols/tus/upload-id.ts | 20 +- src/storage/renderer/image.ts | 6 +- src/storage/schemas/index.ts | 1 + src/storage/schemas/multipart.ts | 46 + src/storage/storage.ts | 38 +- src/storage/uploader.ts | 40 +- src/test/common.ts | 2 + src/test/object.test.ts | 12 +- src/test/rls_tests.yaml | 24 +- src/test/s3-protocol.test.ts | 567 ++++++++++-- src/test/tenant.test.ts | 4 +- src/test/webhooks.test.ts | 19 +- 67 files changed, 3533 insertions(+), 1044 deletions(-) create mode 100644 migrations/tenant/0020-list-objects-with-delimiter.sql create mode 100644 migrations/tenant/0021-s3-multipart-uploads.sql create mode 100644 src/http/routes/s3/commands/abort-multipart-upload.ts create mode 100644 src/http/routes/s3/commands/complete-multipart-upload.ts create mode 100644 src/http/routes/s3/commands/copy-object.ts create mode 100644 src/http/routes/s3/commands/create-bucket.ts create mode 100644 src/http/routes/s3/commands/create-multipart-upload.ts create mode 100644 src/http/routes/s3/commands/delete-bucket.ts create mode 100644 src/http/routes/s3/commands/delete-object.ts create mode 100644 src/http/routes/s3/commands/get-bucket.ts create mode 100644 src/http/routes/s3/commands/get-object.ts create mode 100644 src/http/routes/s3/commands/head-bucket.ts create mode 100644 src/http/routes/s3/commands/head-object.ts create mode 100644 src/http/routes/s3/commands/list-buckets.ts create mode 100644 src/http/routes/s3/commands/list-multipart-uploads.ts create mode 100644 src/http/routes/s3/commands/list-objects.ts create mode 100644 src/http/routes/s3/commands/upload-part.ts create mode 100644 src/http/routes/s3/router.ts rename src/storage/backend/{generic.ts => adapter.ts} (90%) create mode 100644 src/storage/protocols/s3/byte-limit-stream.ts delete mode 100644 src/storage/protocols/s3/handler.ts delete mode 100644 src/storage/protocols/s3/router.ts create mode 100644 src/storage/protocols/s3/s3-handler.ts create mode 100644 src/storage/schemas/multipart.ts diff --git a/README.md b/README.md index 31b662be..f794d989 100644 --- a/README.md +++ b/README.md @@ -6,11 +6,19 @@ A scalable, light-weight object storage service. > Read [this post](https://supabase.io/blog/2021/03/30/supabase-storage) on why we decided to build a new object storage service. +- Multi-protocol support (HTTP, TUS, S3) - Uses Postgres as its datastore for storing metadata - Authorization rules are written as Postgres Row Level Security policies -- Integrates with S3 as the storage backend (with more in the pipeline!) +- Integrates with S3 Compatible Storages - Extremely lightweight and performant + +**Supported Protocols** + +- [x] HTTP/REST +- [x] TUS Resumable Upload +- [x] S3 Compatible API + ![Architecture](./static/architecture.png?raw=true 'Architecture') ## Documentation diff --git a/docker-compose-multi-tenant.yml b/docker-compose-multi-tenant.yml index d3797b1b..eb8faa68 100644 --- a/docker-compose-multi-tenant.yml +++ b/docker-compose-multi-tenant.yml @@ -2,56 +2,56 @@ version: '3' services: -# storage: -# image: supabase/storage-api:latest -# ports: -# - '5000:5000' -# - '5001:5001' -# depends_on: -# tenant_db: -# condition: service_healthy -# multitenant_db: -# condition: service_healthy -# supavisor: -# condition: service_started -# minio_setup: -# condition: service_completed_successfully -# environment: -# # Server -# SERVER_PORT: 5000 -# SERVER_REGION: local -# # Auth -# AUTH_JWT_SECRET: f023d3db-39dc-4ac9-87b2-b2be72e9162b -# AUTH_JWT_ALGORITHM: HS256 -# AUTH_ENCRYPTION_KEY: encryptionkey -# # Multi tenant Mode -# MULTI_TENANT: true -# DATABASE_MULTITENANT_URL: postgresql://postgres:postgres@multitenant_db:5432/postgres -# SERVER_ADMIN_API_KEYS: apikey -# SERVER_ADMIN_PORT: 5001 -# REQUEST_X_FORWARDED_HOST_REGEXP: "^([a-z]{20}).local.(?:com|dev)$" -# # Migrations -# DB_INSTALL_ROLES: true # set to false if you want to manage roles yourself -# # Storage -# STORAGE_BACKEND: s3 -# STORAGE_S3_BUCKET: supa-storage-bucket # name of s3 bucket where you want to store objects -# STORAGE_S3_ENDPOINT: http://minio:9000 -# STORAGE_S3_FORCE_PATH_STYLE: "true" -# STORAGE_S3_REGION: us-east-1 -# AWS_ACCESS_KEY_ID: supa-storage -# AWS_SECRET_ACCESS_KEY: secret1234 -# # Upload -# UPLOAD_FILE_SIZE_LIMIT: 524288000 -# UPLOAD_FILE_SIZE_LIMIT_STANDARD: 52428800 -# UPLOAD_SIGNED_URL_EXPIRATION_TIME: 120 -# TUS_URL_PATH: /upload/resumable -# TUS_URL_EXPIRY_MS: 3600000 -# # Image Tranformation -# IMAGE_TRANSFORMATION_ENABLED: "true" -# IMGPROXY_URL: http://imgproxy:8080 -# IMGPROXY_REQUEST_TIMEOUT: 15 -# -# PG_QUEUE_ENABLE: "true" + storage: + image: supabase/storage-api:latest + ports: + - '5000:5000' + - '5001:5001' + depends_on: + tenant_db: + condition: service_healthy + multitenant_db: + condition: service_healthy + supavisor: + condition: service_started + minio_setup: + condition: service_completed_successfully + environment: + # Server + SERVER_PORT: 5000 + SERVER_REGION: local + # Auth + AUTH_JWT_SECRET: f023d3db-39dc-4ac9-87b2-b2be72e9162b + AUTH_JWT_ALGORITHM: HS256 + AUTH_ENCRYPTION_KEY: encryptionkey + # Multi tenant Mode + MULTI_TENANT: true + DATABASE_MULTITENANT_URL: postgresql://postgres:postgres@multitenant_db:5432/postgres + SERVER_ADMIN_API_KEYS: apikey + SERVER_ADMIN_PORT: 5001 + REQUEST_X_FORWARDED_HOST_REGEXP: "^([a-z]{20}).local.(?:com|dev)$" + # Migrations + DB_INSTALL_ROLES: true # set to false if you want to manage roles yourself + # Storage + STORAGE_BACKEND: s3 + STORAGE_S3_BUCKET: supa-storage-bucket # name of s3 bucket where you want to store objects + STORAGE_S3_ENDPOINT: http://minio:9000 + STORAGE_S3_FORCE_PATH_STYLE: "true" + STORAGE_S3_REGION: us-east-1 + AWS_ACCESS_KEY_ID: supa-storage + AWS_SECRET_ACCESS_KEY: secret1234 + # Upload + UPLOAD_FILE_SIZE_LIMIT: 524288000 + UPLOAD_FILE_SIZE_LIMIT_STANDARD: 52428800 + UPLOAD_SIGNED_URL_EXPIRATION_TIME: 120 + TUS_URL_PATH: /upload/resumable + TUS_URL_EXPIRY_MS: 3600000 + # Image Tranformation + IMAGE_TRANSFORMATION_ENABLED: "true" + IMGPROXY_URL: http://imgproxy:8080 + IMGPROXY_REQUEST_TIMEOUT: 15 + + PG_QUEUE_ENABLE: "true" tenant_db: extends: diff --git a/jest-setup.ts b/jest-setup.ts index 6e5f25b3..4988cc38 100644 --- a/jest-setup.ts +++ b/jest-setup.ts @@ -1,3 +1,7 @@ import { getConfig, setEnvPaths } from './src/config' setEnvPaths(['.env.test', '.env']) + +beforeEach(() => { + getConfig({ reload: true }) +}) diff --git a/jest.config.js b/jest.config.js index ef01b156..39a46f76 100644 --- a/jest.config.js +++ b/jest.config.js @@ -4,7 +4,7 @@ module.exports = { transform: { '^.+\\.(t|j)sx?$': 'ts-jest', }, - setupFiles: ['/jest-setup.ts'], + setupFilesAfterEnv: ['/jest-setup.ts'], testEnvironment: 'node', testPathIgnorePatterns: ['node_modules', 'dist'], coverageProvider: 'v8', diff --git a/migrations/tenant/0020-list-objects-with-delimiter.sql b/migrations/tenant/0020-list-objects-with-delimiter.sql new file mode 100644 index 00000000..3b7d813e --- /dev/null +++ b/migrations/tenant/0020-list-objects-with-delimiter.sql @@ -0,0 +1,39 @@ + + +CREATE OR REPLACE FUNCTION storage.list_objects_with_delimiter(bucket_id text, prefix_param text, delimiter_param text, max_keys integer default 100, next_token text DEFAULT '') + RETURNS TABLE (name text, id uuid, metadata jsonb, updated_at timestamptz) AS +$$ +BEGIN + RETURN QUERY EXECUTE + 'SELECT DISTINCT ON(name COLLATE "C") * from ( + SELECT + CASE + WHEN position($2 IN substring(name from length($1) + 1)) > 0 THEN + substring(name from 1 for length($1) + position($2 IN substring(name from length($1) + 1))) + ELSE + name + END AS name, id, metadata, updated_at + FROM + storage.objects + WHERE + bucket_id = $5 AND + name ILIKE $1 || ''%'' AND + CASE + WHEN $4 != '''' THEN + CASE + WHEN position($2 IN substring(name from length($1) + 1)) > 0 THEN + substring(name from 1 for length($1) + position($2 IN substring(name from length($1) + 1))) COLLATE "C" > $4 + ELSE + name COLLATE "C" > $4 + END + ELSE + true + END + ORDER BY + name COLLATE "C" ASC) as e order by name COLLATE "C" LIMIT $3' + USING prefix_param, delimiter_param, max_keys, next_token, bucket_id; +END; +$$ LANGUAGE plpgsql; + +CREATE INDEX idx_objects_bucket_id_name + ON storage.objects (bucket_id, (name COLLATE "C")); \ No newline at end of file diff --git a/migrations/tenant/0021-s3-multipart-uploads.sql b/migrations/tenant/0021-s3-multipart-uploads.sql new file mode 100644 index 00000000..d7005403 --- /dev/null +++ b/migrations/tenant/0021-s3-multipart-uploads.sql @@ -0,0 +1,66 @@ + +CREATE TABLE IF NOT EXISTS storage._s3_multipart_uploads ( + id text PRIMARY KEY, + in_progress_size int NOT NULL default 0, + upload_signature text NOT NULL, + bucket_id text NOT NULL references storage.buckets(id), + key text NOT NULL, + version text NOT NULL, + created_at timestamp NOT NULL default now() +); + +CREATE TABLE IF NOT EXISTS storage._s3_multipart_uploads_parts ( + id uuid PRIMARY KEY default gen_random_uuid(), + upload_id text NOT NULL references storage._s3_multipart_uploads(id) ON DELETE CASCADE, + size int NOT NULL default 0, + part_number int NOT NULL, + bucket_id text NOT NULL references storage.buckets(id), + key text NOT NULL, + etag text NOT NULL, + version text NOT NULL, + created_at timestamp NOT NULL default now() +); + +CREATE INDEX idx_multipart_uploads_list + ON storage._s3_multipart_uploads (bucket_id, (key COLLATE "C"), created_at ASC); + +CREATE OR REPLACE FUNCTION storage.list_multipart_uploads_with_delimiter(bucket_id text, prefix_param text, delimiter_param text, max_keys integer default 100, next_key_token text DEFAULT '', next_upload_token text default '') + RETURNS TABLE (key text, id text, created_at timestamptz) AS +$$ +BEGIN + RETURN QUERY EXECUTE + 'SELECT DISTINCT ON(name COLLATE "C") * from ( + SELECT + CASE + WHEN position($2 IN substring(name from length($1) + 1)) > 0 THEN + substring(key from 1 for length($1) + position($2 IN substring(key from length($1) + 1))) + ELSE + key + END AS key, id, created_at + FROM + storage._s3_multipart_uploads + WHERE + bucket_id = $5 AND + key ILIKE $1 || ''%'' AND + CASE + WHEN $4 != '''' THEN + CASE + WHEN position($2 IN substring(key from length($1) + 1)) > 0 THEN + substring(key from 1 for length($1) + position($2 IN substring(key from length($1) + 1))) > $4 + ELSE + key > $4 + END + ELSE + true + END + CASE + WHEN $6 != '''' THEN + id > $6 + ELSE + true + END + ORDER BY + key COLLATE "C" ASC, created_at ASC) as e order by key COLLATE "C" LIMIT $3' + USING prefix_param, delimiter_param, max_keys, next_key_token, bucket_id, next_upload_token; +END; +$$ LANGUAGE plpgsql; \ No newline at end of file diff --git a/package-lock.json b/package-lock.json index 719b91d8..85338854 100644 --- a/package-lock.json +++ b/package-lock.json @@ -37,6 +37,7 @@ "fastify-plugin": "^4.0.0", "fastify-xml-body-parser": "^2.2.0", "fs-extra": "^10.0.1", + "fs-xattr": "0.3.1", "ioredis": "^5.2.4", "jsonwebtoken": "^9.0.2", "knex": "^3.1.0", @@ -64,6 +65,7 @@ "@types/mustache": "^4.2.2", "@types/node": "^20.11.5", "@types/pg": "^8.6.4", + "@types/stream-buffers": "^3.0.7", "@types/xml2js": "^0.4.14", "@typescript-eslint/eslint-plugin": "^5.12.1", "@typescript-eslint/parser": "^5.12.1", @@ -78,6 +80,7 @@ "mustache": "^4.2.0", "pino-pretty": "^8.1.0", "prettier": "^2.8.8", + "stream-buffers": "^3.0.2", "ts-jest": "^29.0.3", "ts-node-dev": "^1.1.8", "tsx": "^3.13.0", @@ -3982,6 +3985,15 @@ "integrity": "sha512-Hl219/BT5fLAaz6NDkSuhzasy49dwQS/DSdu4MdggFB8zcXv7vflBI3xp7FEmkmdDkBUI2bPUNeMttp2knYdxw==", "dev": true }, + "node_modules/@types/stream-buffers": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/stream-buffers/-/stream-buffers-3.0.7.tgz", + "integrity": "sha512-azOCy05sXVXrO+qklf0c/B07H/oHaIuDDAiHPVwlk3A9Ek+ksHyTeMajLZl3r76FxpPpxem//4Te61G1iW3Giw==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, "node_modules/@types/strip-bom": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/@types/strip-bom/-/strip-bom-3.0.0.tgz", @@ -6032,6 +6044,18 @@ "node": ">=12" } }, + "node_modules/fs-xattr": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/fs-xattr/-/fs-xattr-0.3.1.tgz", + "integrity": "sha512-UVqkrEW0GfDabw4C3HOrFlxKfx0eeigfRne69FxSBdHIP8Qt5Sq6Pu3RM9KmMlkygtC4pPKkj5CiPO5USnj2GA==", + "hasInstallScript": true, + "os": [ + "!win32" + ], + "engines": { + "node": ">=8.6.0" + } + }, "node_modules/fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", @@ -9114,6 +9138,15 @@ "readable-stream": "^3.5.0" } }, + "node_modules/stream-buffers": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/stream-buffers/-/stream-buffers-3.0.2.tgz", + "integrity": "sha512-DQi1h8VEBA/lURbSwFtEHnSTb9s2/pwLEaFuNhXwy1Dx3Sa0lOuYT2yNUr4/j2fs8oCAMANtrZ5OrPZtyVs3MQ==", + "dev": true, + "engines": { + "node": ">= 0.10.0" + } + }, "node_modules/stream-shift": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz", @@ -13080,6 +13113,15 @@ "integrity": "sha512-Hl219/BT5fLAaz6NDkSuhzasy49dwQS/DSdu4MdggFB8zcXv7vflBI3xp7FEmkmdDkBUI2bPUNeMttp2knYdxw==", "dev": true }, + "@types/stream-buffers": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/stream-buffers/-/stream-buffers-3.0.7.tgz", + "integrity": "sha512-azOCy05sXVXrO+qklf0c/B07H/oHaIuDDAiHPVwlk3A9Ek+ksHyTeMajLZl3r76FxpPpxem//4Te61G1iW3Giw==", + "dev": true, + "requires": { + "@types/node": "*" + } + }, "@types/strip-bom": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/@types/strip-bom/-/strip-bom-3.0.0.tgz", @@ -14596,6 +14638,11 @@ "universalify": "^2.0.0" } }, + "fs-xattr": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/fs-xattr/-/fs-xattr-0.3.1.tgz", + "integrity": "sha512-UVqkrEW0GfDabw4C3HOrFlxKfx0eeigfRne69FxSBdHIP8Qt5Sq6Pu3RM9KmMlkygtC4pPKkj5CiPO5USnj2GA==" + }, "fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", @@ -16911,6 +16958,12 @@ "readable-stream": "^3.5.0" } }, + "stream-buffers": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/stream-buffers/-/stream-buffers-3.0.2.tgz", + "integrity": "sha512-DQi1h8VEBA/lURbSwFtEHnSTb9s2/pwLEaFuNhXwy1Dx3Sa0lOuYT2yNUr4/j2fs8oCAMANtrZ5OrPZtyVs3MQ==", + "dev": true + }, "stream-shift": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz", diff --git a/package.json b/package.json index d5e13f9e..981ff9fe 100644 --- a/package.json +++ b/package.json @@ -53,6 +53,7 @@ "fastify-plugin": "^4.0.0", "fastify-xml-body-parser": "^2.2.0", "fs-extra": "^10.0.1", + "fs-xattr": "0.3.1", "ioredis": "^5.2.4", "jsonwebtoken": "^9.0.2", "knex": "^3.1.0", @@ -77,6 +78,7 @@ "@types/mustache": "^4.2.2", "@types/node": "^20.11.5", "@types/pg": "^8.6.4", + "@types/stream-buffers": "^3.0.7", "@types/xml2js": "^0.4.14", "@typescript-eslint/eslint-plugin": "^5.12.1", "@typescript-eslint/parser": "^5.12.1", @@ -91,6 +93,7 @@ "mustache": "^4.2.0", "pino-pretty": "^8.1.0", "prettier": "^2.8.8", + "stream-buffers": "^3.0.2", "ts-jest": "^29.0.3", "ts-node-dev": "^1.1.8", "tsx": "^3.13.0", diff --git a/src/app.ts b/src/app.ts index f4ba0bea..3bc69993 100644 --- a/src/app.ts +++ b/src/app.ts @@ -42,6 +42,8 @@ const build = (opts: buildOpts = {}): FastifyInstance => { tags: [ { name: 'object', description: 'Object end-points' }, { name: 'bucket', description: 'Bucket end-points' }, + { name: 's3', description: 'S3 end-points' }, + { name: 'resumable', description: 'Resumable Upload end-points' }, { name: 'deprecated', description: 'Deprecated end-points' }, ], }, diff --git a/src/auth/jwt.ts b/src/auth/jwt.ts index 2bd5d724..e680c702 100644 --- a/src/auth/jwt.ts +++ b/src/auth/jwt.ts @@ -1,10 +1,9 @@ import * as crypto from 'crypto' import jwt from 'jsonwebtoken' -import { getJwtSecret as getJwtSecretForTenant } from '../database/tenant' import { getConfig } from '../config' -const { isMultitenant, jwtSecret, jwtAlgorithm, jwtJWKS } = getConfig() +const { jwtAlgorithm } = getConfig() const JWT_HMAC_ALGOS: jwt.Algorithm[] = ['HS256', 'HS384', 'HS512'] const JWT_RSA_ALGOS: jwt.Algorithm[] = ['RS256', 'RS384', 'RS512'] diff --git a/src/database/client.ts b/src/database/client.ts index 6db8e9df..71e2d1eb 100644 --- a/src/database/client.ts +++ b/src/database/client.ts @@ -1,6 +1,6 @@ import { getConfig } from '../config' import { getTenantConfig } from './tenant' -import { StorageBackendError } from '../storage' +import { ERRORS } from '../storage' import { User, TenantConnection } from './connection' interface ConnectionOptions { @@ -48,26 +48,17 @@ async function getDbCredentials( if (isMultitenant) { if (!tenantId) { - throw new StorageBackendError('Invalid Tenant Id', 400, 'Tenant id not provided') + throw ERRORS.InvalidTenantId() } - if (options) { - options.disableHostCheck = true - } if (requestXForwardedHostRegExp && !options?.disableHostCheck) { const xForwardedHost = host if (typeof xForwardedHost !== 'string') { - throw new StorageBackendError( - 'Invalid Header', - 400, - 'X-Forwarded-Host header is not a string' - ) + throw ERRORS.InvalidXForwardedHeader('X-Forwarded-Host header is not a string') } if (!new RegExp(requestXForwardedHostRegExp).test(xForwardedHost)) { - throw new StorageBackendError( - 'Invalid Header', - 400, + throw ERRORS.InvalidXForwardedHeader( 'X-Forwarded-Host header does not match regular expression' ) } diff --git a/src/database/connection.ts b/src/database/connection.ts index d4913e5f..e13c6529 100644 --- a/src/database/connection.ts +++ b/src/database/connection.ts @@ -5,7 +5,7 @@ import retry from 'async-retry' import TTLCache from '@isaacs/ttlcache' import { getConfig } from '../config' import { DbActiveConnection, DbActivePool } from '../monitoring/metrics' -import { StorageBackendError } from '../storage' +import { ERRORS } from '../storage' import KnexTimeoutError = knex.KnexTimeoutError // https://github.com/knex/knex/issues/387#issuecomment-51554522 @@ -178,7 +178,7 @@ export class TenantConnection { ) if (!tnx) { - throw new StorageBackendError('Could not create transaction', 500, 'transaction_failed') + throw ERRORS.InternalError(undefined, 'Could not create transaction') } if (!instance && this.options.isExternalPool) { @@ -188,12 +188,7 @@ export class TenantConnection { return tnx } catch (e) { if (e instanceof KnexTimeoutError) { - throw StorageBackendError.withStatusCode( - 'database_timeout', - 544, - 'The connection to the database timed out', - e - ) + throw ERRORS.DatabaseTimeout(e) } throw e diff --git a/src/database/tenant.ts b/src/database/tenant.ts index 7c9f3b56..24f6070b 100644 --- a/src/database/tenant.ts +++ b/src/database/tenant.ts @@ -1,7 +1,7 @@ import { getConfig } from '../config' import { decrypt, verifyJWT } from '../auth' import { multitenantKnex } from './multitenant-db' -import { StorageBackendError } from '../storage' +import { ERRORS } from '../storage' import { JwtPayload } from 'jsonwebtoken' import { PubSubAdapter } from '../pubsub' import { lastMigrationName } from './migrations' @@ -167,11 +167,7 @@ export async function getTenantConfig(tenantId: string): Promise { const tenant = await multitenantKnex('tenants').first().where('id', tenantId) if (!tenant) { - throw new StorageBackendError( - 'Missing Tenant config', - 400, - `Tenant config for ${tenantId} not found` - ) + throw ERRORS.MissingTenantConfig(tenantId) } const { anon_key, diff --git a/src/http/error-handler.ts b/src/http/error-handler.ts index 7c4f0bcb..761e5bbc 100644 --- a/src/http/error-handler.ts +++ b/src/http/error-handler.ts @@ -23,7 +23,10 @@ export const setErrorHandler = (app: FastifyInstance) => { ? 500 : 400 - return reply.status(statusCode).send(renderableError) + return reply.status(statusCode).send({ + ...renderableError, + error: error.error || renderableError.code, + }) } // database error @@ -47,7 +50,6 @@ export const setErrorHandler = (app: FastifyInstance) => { // Fastify errors if ('statusCode' in error) { const err = error as FastifyError - console.log(error) return reply.status((error as any).statusCode || 500).send({ statusCode: `${err.statusCode}`, error: err.name, diff --git a/src/http/plugins/xml.ts b/src/http/plugins/xml.ts index 28314042..ccc1c07c 100644 --- a/src/http/plugins/xml.ts +++ b/src/http/plugins/xml.ts @@ -3,26 +3,26 @@ import accepts from '@fastify/accepts' import fastifyPlugin from 'fastify-plugin' import xml from 'xml2js' +// no types exists for this package +// eslint-disable-next-line @typescript-eslint/ban-ts-comment // @ts-ignore import xmlBodyParser from 'fastify-xml-body-parser' export const jsonToXml = fastifyPlugin(async function (fastify: FastifyInstance) { - fastify.register(xmlBodyParser, { - ignoreAttributes: true, - processEntities: false, - suppressEmptyNode: false, - suppressUnpairedNode: false, - suppressBooleanAttributes: false, - }) fastify.register(accepts) + fastify.register(xmlBodyParser) fastify.addHook('preSerialization', async (req, res, payload) => { const accept = req.accepts() if ( res.getHeader('content-type')?.toString()?.includes('application/json') && accept.types(['application/xml', 'application/json']) === 'application/xml' ) { - const xmlBuilder = new xml.Builder() + const xmlBuilder = new xml.Builder({ + renderOpts: { + pretty: false, + }, + }) const xmlPayload = await xmlBuilder.buildObject(payload) res.type('application/xml') res.header('content-type', 'application/xml; charset=utf-8') diff --git a/src/http/routes/object/copyObject.ts b/src/http/routes/object/copyObject.ts index 60eed6bb..96d02675 100644 --- a/src/http/routes/object/copyObject.ts +++ b/src/http/routes/object/copyObject.ts @@ -48,7 +48,7 @@ export default async function routes(fastify: FastifyInstance) { const result = await request.storage .from(bucketId) - .copyObject(sourceKey, destinationKey, request.owner) + .copyObject(sourceKey, bucketId, destinationKey, request.owner) return response.status(result.httpStatusCode ?? 200).send({ Key: `${bucketId}/${destinationKey}`, diff --git a/src/http/routes/object/deleteObject.ts b/src/http/routes/object/deleteObject.ts index a4c09a15..929d8f4f 100644 --- a/src/http/routes/object/deleteObject.ts +++ b/src/http/routes/object/deleteObject.ts @@ -1,5 +1,5 @@ import { FastifyInstance } from 'fastify' -import { FromSchema } from 'json-schema-to-ts' +import { FromSchema, JSONSchema } from 'json-schema-to-ts' import { createDefaultSchema, createResponse } from '../../generic-routes' import { AuthenticatedRequest } from '../../request' @@ -33,7 +33,7 @@ export default async function routes(fastify: FastifyInstance) { fastify.delete( '/:bucketName/*', { - schema, + schema: schema, }, async (request, response) => { const { bucketName } = request.params diff --git a/src/http/routes/object/getSignedObject.ts b/src/http/routes/object/getSignedObject.ts index 6a2bb22f..3e22e292 100644 --- a/src/http/routes/object/getSignedObject.ts +++ b/src/http/routes/object/getSignedObject.ts @@ -2,7 +2,7 @@ import { FastifyInstance } from 'fastify' import { FromSchema } from 'json-schema-to-ts' import { getConfig } from '../../../config' import { SignedToken, verifyJWT } from '../../../auth' -import { StorageBackendError } from '../../../storage' +import { ERRORS } from '../../../storage' import { getJwtSecret } from '../../../database/tenant' const { storageS3Bucket } = getConfig() @@ -63,14 +63,14 @@ export default async function routes(fastify: FastifyInstance) { payload = (await verifyJWT(token, jwtSecret)) as SignedToken } catch (e) { const err = e as Error - throw new StorageBackendError('Invalid JWT', 400, err.message, err) + throw ERRORS.InvalidJWT(err) } const { url, exp } = payload const path = `${request.params.bucketName}/${request.params['*']}` if (url !== path) { - throw new StorageBackendError('InvalidSignature', 400, 'The url do not match the signature') + throw ERRORS.InvalidSignature() } const s3Key = `${request.tenantId}/${url}` diff --git a/src/http/routes/object/uploadSignedObject.ts b/src/http/routes/object/uploadSignedObject.ts index 871a347b..ac6edbd1 100644 --- a/src/http/routes/object/uploadSignedObject.ts +++ b/src/http/routes/object/uploadSignedObject.ts @@ -1,7 +1,7 @@ import { FastifyInstance } from 'fastify' import { FromSchema } from 'json-schema-to-ts' import { SignedUploadToken, verifyJWT } from '../../../auth' -import { StorageBackendError } from '../../../storage' +import { ERRORS } from '../../../storage' import { getJwtSecret } from '../../../database/tenant' const uploadSignedObjectParamsSchema = { @@ -78,7 +78,7 @@ export default async function routes(fastify: FastifyInstance) { payload = (await verifyJWT(token, jwtSecret)) as SignedUploadToken } catch (e) { const err = e as Error - throw new StorageBackendError('Invalid JWT', 400, err.message, err) + throw ERRORS.InvalidJWT(err) } const { url, exp, owner } = payload @@ -86,11 +86,11 @@ export default async function routes(fastify: FastifyInstance) { const objectName = request.params['*'] if (url !== `${bucketName}/${objectName}`) { - throw new StorageBackendError('InvalidSignature', 400, 'The url do not match the signature') + throw ERRORS.InvalidSignature() } if (exp * 1000 < Date.now()) { - throw new StorageBackendError('ExpiredSignature', 400, 'The signature has expired') + throw ERRORS.ExpiredSignature() } const { objectMetadata, path } = await request.storage diff --git a/src/http/routes/render/renderSignedImage.ts b/src/http/routes/render/renderSignedImage.ts index c801df1c..0b94f7a6 100644 --- a/src/http/routes/render/renderSignedImage.ts +++ b/src/http/routes/render/renderSignedImage.ts @@ -3,7 +3,7 @@ import { FastifyInstance } from 'fastify' import { getConfig } from '../../../config' import { ImageRenderer } from '../../../storage/renderer' import { SignedToken, verifyJWT } from '../../../auth' -import { StorageBackendError } from '../../../storage' +import { ERRORS } from '../../../storage' import { getJwtSecret } from '../../../database/tenant' const { storageS3Bucket } = getConfig() @@ -60,7 +60,7 @@ export default async function routes(fastify: FastifyInstance) { payload = (await verifyJWT(token, jwtSecret)) as SignedToken } catch (e) { const err = e as Error - throw new StorageBackendError('Invalid JWT', 400, err.message, err) + throw ERRORS.InvalidJWT(err) } const { url, transformations, exp } = payload @@ -68,7 +68,7 @@ export default async function routes(fastify: FastifyInstance) { const path = `${request.params.bucketName}/${request.params['*']}` if (url !== path) { - throw new StorageBackendError('InvalidSignature', 400, 'The url do not match the signature') + throw ERRORS.InvalidSignature() } const s3Key = `${request.tenantId}/${url}` diff --git a/src/http/routes/s3/commands/abort-multipart-upload.ts b/src/http/routes/s3/commands/abort-multipart-upload.ts new file mode 100644 index 00000000..03d26457 --- /dev/null +++ b/src/http/routes/s3/commands/abort-multipart-upload.ts @@ -0,0 +1,33 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const AbortMultiPartUploadInput = { + summary: 'Abort MultiPart Upload', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + '*': { type: 'string' }, + }, + required: ['Bucket', '*'], + }, + Querystring: { + type: 'object', + properties: { + uploadId: { type: 'string' }, + }, + required: ['uploadId'], + }, +} as const + +export default function AbortMultiPartUpload(s3Router: S3Router) { + s3Router.delete('/:Bucket/*?uploadId', AbortMultiPartUploadInput, (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + + return s3Protocol.abortMultipartUpload({ + Bucket: req.Params.Bucket, + Key: req.Params['*'], + UploadId: req.Querystring.uploadId, + }) + }) +} diff --git a/src/http/routes/s3/commands/complete-multipart-upload.ts b/src/http/routes/s3/commands/complete-multipart-upload.ts new file mode 100644 index 00000000..317287c7 --- /dev/null +++ b/src/http/routes/s3/commands/complete-multipart-upload.ts @@ -0,0 +1,62 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const CompletedMultipartUpload = { + summary: 'Complete multipart upload', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + '*': { type: 'string' }, + }, + required: ['Bucket', '*'], + }, + Querystring: { + type: 'object', + properties: { + uploadId: { type: 'string' }, + }, + required: ['uploadId'], + }, + Headers: { + type: 'object', + properties: { + authorization: { type: 'string' }, + }, + }, + Body: { + type: 'object', + properties: { + CompleteMultipartUpload: { + type: 'object', + properties: { + Parts: { + type: 'array', + items: { + type: 'object', + properties: { + PartNumber: { type: 'integer' }, + ETag: { type: 'string' }, + }, + // required: ['PartNumber', 'ETag'], + }, + }, + }, + }, + }, + }, +} as const + +export default function CompleteMultipartUpload(s3Router: S3Router) { + s3Router.post('/:Bucket/*?uploadId', CompletedMultipartUpload, (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + return s3Protocol.completeMultiPartUpload({ + Bucket: req.Params.Bucket, + Key: req.Params['*'], + UploadId: req.Querystring.uploadId, + MultipartUpload: { + Parts: req.Body?.CompleteMultipartUpload?.Parts || [], + }, + }) + }) +} diff --git a/src/http/routes/s3/commands/copy-object.ts b/src/http/routes/s3/commands/copy-object.ts new file mode 100644 index 00000000..a9935691 --- /dev/null +++ b/src/http/routes/s3/commands/copy-object.ts @@ -0,0 +1,52 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const CopyObjectInput = { + summary: 'Copy Object', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + '*': { type: 'string' }, + }, + required: ['Bucket', '*'], + }, + Headers: { + type: 'object', + properties: { + 'x-amz-copy-source': { type: 'string' }, + 'x-amz-copy-source-if-match': { type: 'string' }, + 'x-amz-copy-source-if-modified-since': { type: 'string' }, + 'x-amz-copy-source-if-none-match': { type: 'string' }, + 'x-amz-copy-source-if-unmodified-since': { type: 'string' }, + 'content-encoding': { type: 'string' }, + 'content-type': { type: 'string' }, + 'cache-control': { type: 'string' }, + expires: { type: 'string' }, + }, + }, +} as const + +export default function CopyObject(s3Router: S3Router) { + s3Router.put('/:Bucket/*|x-amz-copy-source', CopyObjectInput, (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + + return s3Protocol.copyObject({ + Bucket: req.Params.Bucket, + Key: req.Params['*'], + CopySource: req.Headers['x-amz-copy-source'], + ContentType: req.Headers['content-type'], + CacheControl: req.Headers['cache-control'], + Expires: req.Headers.expires ? new Date(req.Headers.expires) : undefined, + ContentEncoding: req.Headers['content-encoding'], + CopySourceIfMatch: req.Headers['x-amz-copy-source-if-match'], + CopySourceIfModifiedSince: req.Headers['x-amz-copy-source-if-modified-since'] + ? new Date(req.Headers['x-amz-copy-source-if-modified-since']) + : undefined, + CopySourceIfNoneMatch: req.Headers['x-amz-copy-source-if-none-match'], + CopySourceIfUnmodifiedSince: req.Headers['x-amz-copy-source-if-unmodified-since'] + ? new Date(req.Headers['x-amz-copy-source-if-unmodified-since']) + : undefined, + }) + }) +} diff --git a/src/http/routes/s3/commands/create-bucket.ts b/src/http/routes/s3/commands/create-bucket.ts new file mode 100644 index 00000000..da987581 --- /dev/null +++ b/src/http/routes/s3/commands/create-bucket.ts @@ -0,0 +1,27 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const CreateBucketInput = { + summary: 'Create Bucket', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + }, + required: ['Bucket'], + }, + Headers: { + type: 'object', + properties: { + 'x-amz-acl': { type: 'string' }, + }, + }, +} as const + +export default function CreateBucket(s3Router: S3Router) { + s3Router.put('/:Bucket', CreateBucketInput, (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + + return s3Protocol.createBucket(req.Params.Bucket, req.Headers?.['x-amz-acl'] === 'public-read') + }) +} diff --git a/src/http/routes/s3/commands/create-multipart-upload.ts b/src/http/routes/s3/commands/create-multipart-upload.ts new file mode 100644 index 00000000..ed2f32a0 --- /dev/null +++ b/src/http/routes/s3/commands/create-multipart-upload.ts @@ -0,0 +1,45 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const CreateMultiPartUploadInput = { + summary: 'Create multipart upload', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + '*': { type: 'string' }, + }, + required: ['Bucket', '*'], + }, + Querystring: { + type: 'object', + properties: { + uploads: { type: 'string' }, + }, + }, + Headers: { + type: 'object', + properties: { + 'content-type': { type: 'string' }, + 'cache-control': { type: 'string' }, + 'content-disposition': { type: 'string' }, + 'content-encoding': { type: 'string' }, + }, + }, + Body: {}, +} as const + +export default function CreateMultipartUpload(s3Router: S3Router) { + s3Router.post('/:Bucket/*?uploads', CreateMultiPartUploadInput, (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + + return s3Protocol.createMultiPartUpload({ + Bucket: req.Params.Bucket, + Key: req.Params['*'], + ContentType: req.Headers?.['content-type'], + CacheControl: req.Headers?.['cache-control'], + ContentDisposition: req.Headers?.['content-disposition'], + ContentEncoding: req.Headers?.['content-encoding'], + }) + }) +} diff --git a/src/http/routes/s3/commands/delete-bucket.ts b/src/http/routes/s3/commands/delete-bucket.ts new file mode 100644 index 00000000..5e432608 --- /dev/null +++ b/src/http/routes/s3/commands/delete-bucket.ts @@ -0,0 +1,21 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const DeleteBucketInput = { + summary: 'Delete Bucket', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + }, + required: ['Bucket'], + }, +} as const + +export default function DeleteBucket(s3Router: S3Router) { + s3Router.delete('/:Bucket', DeleteBucketInput, (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + + return s3Protocol.deleteBucket(req.Params.Bucket) + }) +} diff --git a/src/http/routes/s3/commands/delete-object.ts b/src/http/routes/s3/commands/delete-object.ts new file mode 100644 index 00000000..ab0ff333 --- /dev/null +++ b/src/http/routes/s3/commands/delete-object.ts @@ -0,0 +1,78 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const DeleteObjectInput = { + summary: 'Delete Object', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + '*': { type: 'string' }, + }, + required: ['Bucket', '*'], + }, + Querystring: {}, +} as const + +const DeleteObjectsInput = { + summary: 'Delete Objects', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + }, + required: ['Bucket'], + }, + Body: { + type: 'object', + properties: { + Delete: { + type: 'object', + properties: { + Object: { + type: 'array', + items: { + type: 'object', + properties: { + Key: { type: 'string' }, + }, + required: ['Key'], + }, + }, + }, + required: ['Object'], + }, + }, + required: ['Delete'], + }, + Querystring: { + type: 'object', + properties: { + delete: { type: 'string' }, + }, + }, +} as const + +export default function DeleteObject(s3Router: S3Router) { + // Delete multiple objects + s3Router.post('/:Bucket?delete', DeleteObjectsInput, (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + + return s3Protocol.deleteObjects({ + Bucket: req.Params.Bucket, + Delete: { + Objects: req.Body.Delete.Object, + }, + }) + }) + + // Delete single object + s3Router.delete('/:Bucket/*', DeleteObjectInput, (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + + return s3Protocol.deleteObject({ + Bucket: req.Params.Bucket, + Key: req.Params['*'], + }) + }) +} diff --git a/src/http/routes/s3/commands/get-bucket.ts b/src/http/routes/s3/commands/get-bucket.ts new file mode 100644 index 00000000..e711a683 --- /dev/null +++ b/src/http/routes/s3/commands/get-bucket.ts @@ -0,0 +1,28 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const GetBucketInput = { + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + }, + required: ['Bucket'], + }, +} as const + +export default function GetBucket(s3Router: S3Router) { + s3Router.get('/:Bucket?location', GetBucketInput, async (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + await ctx.storage.findBucket(req.Params.Bucket) + + return s3Protocol.getBucketLocation() + }) + + s3Router.get('/:Bucket?versioning', GetBucketInput, async (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + await ctx.storage.findBucket(req.Params.Bucket) + + return s3Protocol.getBucketVersioning() + }) +} diff --git a/src/http/routes/s3/commands/get-object.ts b/src/http/routes/s3/commands/get-object.ts new file mode 100644 index 00000000..316e0798 --- /dev/null +++ b/src/http/routes/s3/commands/get-object.ts @@ -0,0 +1,37 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const ListObjectsInput = { + summary: 'Get Object', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + '*': { type: 'string' }, + }, + required: ['Bucket', '*'], + }, + Headers: { + type: 'object', + properties: { + range: { type: 'string' }, + 'if-none-match': { type: 'string' }, + 'if-modified-since': { type: 'string' }, + }, + }, +} as const + +export default function ListObjects(s3Router: S3Router) { + s3Router.get('/:Bucket/*', ListObjectsInput, (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + const ifModifiedSince = req.Headers?.['if-modified-since'] + + return s3Protocol.getObject({ + Bucket: req.Params.Bucket, + Key: req.Params['*'], + Range: req.Headers?.['range'], + IfNoneMatch: req.Headers?.['if-none-match'], + IfModifiedSince: ifModifiedSince ? new Date(ifModifiedSince) : undefined, + }) + }) +} diff --git a/src/http/routes/s3/commands/head-bucket.ts b/src/http/routes/s3/commands/head-bucket.ts new file mode 100644 index 00000000..e2554f6b --- /dev/null +++ b/src/http/routes/s3/commands/head-bucket.ts @@ -0,0 +1,20 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const GetBucketInput = { + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + }, + required: ['Bucket'], + }, +} as const + +export default function HeadBucket(s3Router: S3Router) { + s3Router.head('/:Bucket', GetBucketInput, async (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + + return s3Protocol.headBucket(req.Params.Bucket) + }) +} diff --git a/src/http/routes/s3/commands/head-object.ts b/src/http/routes/s3/commands/head-object.ts new file mode 100644 index 00000000..5acd6f5e --- /dev/null +++ b/src/http/routes/s3/commands/head-object.ts @@ -0,0 +1,25 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const HeadObjectInput = { + summary: 'Head Object', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + '*': { type: 'string' }, + }, + required: ['Bucket', '*'], + }, +} as const + +export default function HeadObject(s3Router: S3Router) { + s3Router.head('/:Bucket/*', HeadObjectInput, (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + + return s3Protocol.headObject({ + Bucket: req.Params.Bucket, + Key: req.Params['*'], + }) + }) +} diff --git a/src/http/routes/s3/commands/list-buckets.ts b/src/http/routes/s3/commands/list-buckets.ts new file mode 100644 index 00000000..07c89b94 --- /dev/null +++ b/src/http/routes/s3/commands/list-buckets.ts @@ -0,0 +1,13 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const ListObjectsInput = { + summary: 'List buckets', +} as const + +export default function ListBuckets(s3Router: S3Router) { + s3Router.get('/', ListObjectsInput, (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + return s3Protocol.listBuckets() + }) +} diff --git a/src/http/routes/s3/commands/list-multipart-uploads.ts b/src/http/routes/s3/commands/list-multipart-uploads.ts new file mode 100644 index 00000000..e1467603 --- /dev/null +++ b/src/http/routes/s3/commands/list-multipart-uploads.ts @@ -0,0 +1,40 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const ListObjectsInput = { + summary: 'List Objects', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + }, + required: ['Bucket'], + }, + Querystring: { + type: 'object', + properties: { + delimiter: { type: 'string' }, + 'encoding-type': { type: 'string' }, + 'max-uploads': { type: 'number' }, + 'key-marker': { type: 'string' }, + 'upload-id-marker': { type: 'string' }, + prefix: { type: 'string' }, + }, + }, +} as const + +export default function ListMultipartUploads(s3Router: S3Router) { + s3Router.get('/:Bucket?uploads', ListObjectsInput, async (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + + return s3Protocol.listMultipartUploads({ + Bucket: req.Params.Bucket, + Prefix: req.Querystring?.prefix || '', + KeyMarker: req.Querystring?.['key-marker'], + UploadIdMarker: req.Querystring?.['upload-id-marker'], + EncodingType: req.Querystring?.['encoding-type'] as any, + MaxUploads: req.Querystring?.['max-uploads'], + Delimiter: req.Querystring?.delimiter, + }) + }) +} diff --git a/src/http/routes/s3/commands/list-objects.ts b/src/http/routes/s3/commands/list-objects.ts new file mode 100644 index 00000000..b20cb36d --- /dev/null +++ b/src/http/routes/s3/commands/list-objects.ts @@ -0,0 +1,40 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const ListObjectsInput = { + summary: 'List Objects', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + }, + required: ['Bucket'], + }, + Querystring: { + type: 'object', + properties: { + delimiter: { type: 'string' }, + 'encoding-type': { type: 'string' }, + 'max-keys': { type: 'number' }, + prefix: { type: 'string' }, + 'continuation-token': { type: 'string' }, + 'start-after': { type: 'string' }, + }, + }, +} as const + +export default function ListObjects(s3Router: S3Router) { + s3Router.get('/:Bucket?list-type=2', ListObjectsInput, async (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + + return s3Protocol.listObjectsV2({ + Bucket: req.Params.Bucket, + Prefix: req.Querystring?.prefix || '', + ContinuationToken: req.Querystring?.['continuation-token'], + StartAfter: req.Querystring?.['start-after'], + EncodingType: req.Querystring?.['encoding-type'] as any, + MaxKeys: req.Querystring?.['max-keys'], + Delimiter: req.Querystring?.delimiter, + }) + }) +} diff --git a/src/http/routes/s3/commands/upload-part.ts b/src/http/routes/s3/commands/upload-part.ts new file mode 100644 index 00000000..255f69c8 --- /dev/null +++ b/src/http/routes/s3/commands/upload-part.ts @@ -0,0 +1,82 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const PutObjectInput = { + summary: 'Put Object', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + '*': { type: 'string' }, + }, + required: ['Bucket', '*'], + }, + Querystring: { + type: 'object', + }, + Headers: { + type: 'object', + properties: { + authorization: { type: 'string' }, + host: { type: 'string' }, + 'x-amz-content-sha256': { type: 'string' }, + 'x-amz-date': { type: 'string' }, + 'content-type': { type: 'string' }, + }, + }, +} as const + +const UploadPartInput = { + summary: 'Upload Part', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + '*': { type: 'string' }, + }, + required: ['Bucket', '*'], + }, + Querystring: { + type: 'object', + properties: { + uploadId: { type: 'string' }, + partNumber: { type: 'number', minimum: 1, maximum: 5000 }, + }, + required: ['uploadId', 'partNumber'], + }, + Headers: { + type: 'object', + properties: { + host: { type: 'string' }, + 'x-amz-content-sha256': { type: 'string' }, + 'x-amz-date': { type: 'string' }, + 'content-type': { type: 'string' }, + 'content-length': { type: 'integer' }, + }, + required: ['content-length'], + }, +} as const + +export default function UploadPart(s3Router: S3Router) { + s3Router.put('/:Bucket/*?uploadId&partNumber', UploadPartInput, (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + + return s3Protocol.uploadPart({ + Body: (req.raw as any).raw, + UploadId: req.Querystring?.uploadId, + Bucket: req.Params.Bucket, + Key: req.Params['*'], + PartNumber: req.Querystring?.partNumber, + ContentLength: req.Headers?.['content-length'], + }) + }) + + s3Router.put('/:Bucket/*', PutObjectInput, (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + return s3Protocol.putObject({ + Body: req.raw, + Bucket: req.Params.Bucket, + Key: req.Params['*'], + }) + }) +} diff --git a/src/http/routes/s3/index.ts b/src/http/routes/s3/index.ts index 706149f2..4c87acda 100644 --- a/src/http/routes/s3/index.ts +++ b/src/http/routes/s3/index.ts @@ -1,7 +1,11 @@ -import { FastifyInstance } from 'fastify' -import { db, jsonToXml, signatureV4, storage } from '../../plugins' -import { S3ProtocolHandler } from '../../../storage/protocols/s3/handler' -import { Router } from '../../../storage/protocols/s3/router' +import { FastifyInstance, RouteHandlerMethod } from 'fastify' +import { db, dbSuperUser, jsonToXml, signatureV4, storage } from '../../plugins' +import { getRouter } from './router' +import { FastifyError } from '@fastify/error' +import { FastifyRequest } from 'fastify/types/request' +import { FastifyReply } from 'fastify/types/reply' +import { StorageBackendError } from '../../../storage' +import { DatabaseError } from 'pg' export default async function routes(fastify: FastifyInstance) { fastify.register(async (fastify) => { @@ -10,31 +14,174 @@ export default async function routes(fastify: FastifyInstance) { fastify.register(db) fastify.register(storage) - const s3Router = new Router() - + const s3Router = getRouter() const s3Routes = s3Router.routes() - Object.keys(s3Routes).forEach((route) => { - fastify.all(route, async (req, reply) => { - const routeMatch = s3Routes[route as keyof typeof s3Routes] - const routeHandler = routeMatch(req as any) + Array.from(s3Routes.keys()).forEach((routePath) => { + const routes = s3Routes.get(routePath) + if (!routes || routes?.length === 0) { + return + } - if (!routeHandler) { - return reply.status(404).send() - } + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore + const methods = new Set(routes.map((e) => e.method)) + + methods.forEach((method) => { + const routesByMethod = routes.filter((e) => e.method === method) + + const schemaTypes = routesByMethod.map((r) => r.schema) + const schema = schemaTypes.reduce( + (acc, curr) => { + if (curr.summary) { + if (acc.summary) { + acc.summary = `${acc.summary} | ${curr.summary}` + } else { + acc.summary = `${curr.summary}` + } + } + if (curr.Params) { + acc.params = { + ...acc.params, + ...(curr.Params as any), + } + } + + if (curr.Querystring) { + acc.querystring = { + ...acc.querystring, + anyOf: [...(acc.querystring?.anyOf ? acc.querystring.anyOf : []), curr.Querystring], + } + } + + if (curr.Headers) { + acc.headers = { + ...acc.headers, + anyOf: [...(acc.headers?.anyOf ? acc.headers.anyOf : []), curr.Headers], + } + } + + if (curr.Body && ['put', 'post', 'patch'].includes(method)) { + acc.body = { + ...acc.body, + anyOf: [...(acc.body?.oneOf ? acc.body.oneOf : []), curr.Body], + } + } + + return acc + }, + { + tags: ['s3'], + } as any + ) - const s3Protocol = new S3ProtocolHandler(req.storage, req.tenantId) - const output = await routeHandler(s3Protocol) + const errorHandler = ( + error: FastifyError, + request: FastifyRequest, + reply: FastifyReply + ) => { + request.executionError = error + const resource = request.url.split('?')[0].replace('/s3', '').split('/') - const headers = output.headers + if (error instanceof StorageBackendError) { + return reply.status(error.httpStatusCode || 500).send({ + Error: { + Resource: resource, + Code: error.code, + Message: error.message, + }, + }) + } - if (headers) { - Object.keys(headers).forEach((header) => { - reply.header(header, headers[header]) + // database error + if ( + error instanceof DatabaseError && + [ + 'Max client connections reached', + 'remaining connection slots are reserved for non-replication superuser connections', + 'no more connections allowed', + 'sorry, too many clients already', + 'server login has been failing, try again later', + ].some((msg) => (error as DatabaseError).message.includes(msg)) + ) { + return reply.status(429).send({ + Error: { + Resource: resource, + Code: 'SlowDown', + Message: 'Too many connections issued to the database', + }, + }) + } + + return reply.status(500).send({ + Error: { + Code: 'Internal', + Message: 'Internal Server Error', + }, }) } - return reply.status(output.statusCode || 200).send(output.responseBody) + const routeHandler: RouteHandlerMethod = async (req, reply) => { + for (const route of routesByMethod) { + if ( + s3Router.matchRoute(route, { + query: (req.query as any) || {}, + headers: (req.headers as any) || {}, + }) + ) { + if (!route.handler) { + throw new Error('no handler found') + } + const output = await route.handler( + { + Params: req.params as any, + Body: req.body as any, + Headers: req.headers as any, + Querystring: req.query as any, + raw: req as any, + }, + { + storage: req.storage, + tenantId: req.tenantId, + } + ) + + const headers = output.headers + + if (headers) { + Object.keys(headers).forEach((header) => { + reply.header(header, headers[header]) + }) + } + return reply.status(output.statusCode || 200).send(output.responseBody) + } + } + + return reply.status(404).send() + } + + fastify[method]( + routePath, + { + schema, + exposeHeadRoute: false, + errorHandler: errorHandler, + }, + routeHandler + ) + + // handle optional trailing slash + if (!routePath.endsWith('*') && !routePath.endsWith('/')) { + fastify[method]( + routePath + '/', + { + schema, + exposeHeadRoute: false, + errorHandler: errorHandler, + }, + routeHandler + ) + } }) }) }) diff --git a/src/http/routes/s3/router.ts b/src/http/routes/s3/router.ts new file mode 100644 index 00000000..8b527ef1 --- /dev/null +++ b/src/http/routes/s3/router.ts @@ -0,0 +1,214 @@ +import { Storage } from '../../../storage' + +import { default as CreateBucket } from './commands/create-bucket' +import { default as ListBucket } from './commands/list-buckets' +import { default as ListObjects } from './commands/list-objects' +import { default as GetObject } from './commands/get-object' +import { default as CompleteMultipartUpload } from './commands/complete-multipart-upload' +import { default as DeleteBucket } from './commands/delete-bucket' +import { default as CreateMultipartUpload } from './commands/create-multipart-upload' +import { default as UploadPart } from './commands/upload-part' +import { default as HeadObject } from './commands/head-object' +import { default as DeleteObject } from './commands/delete-object' +import { default as AbortMultiPartUpload } from './commands/abort-multipart-upload' +import { default as GetBucket } from './commands/get-bucket' +import { default as HeadBucket } from './commands/head-bucket' +import { default as CopyObject } from './commands/copy-object' + +import { FromSchema, JSONSchema } from 'json-schema-to-ts' + +export type Context = { storage: Storage; tenantId: string } +export type S3Router = Router + +const s3Commands = [ + CopyObject, + DeleteBucket, + GetObject, + HeadObject, + ListObjects, + CreateBucket, + CompleteMultipartUpload, + CreateMultipartUpload, + UploadPart, + DeleteObject, + AbortMultiPartUpload, + GetBucket, + HeadBucket, + ListBucket, +] + +export function getRouter() { + const router = new Router() + s3Commands.forEach((command) => command(router)) + return router +} + +export type HTTPMethod = 'get' | 'put' | 'post' | 'head' | 'delete' | 'patch' + +export type Schema< + Q extends JSONSchema = JSONSchema, + H extends JSONSchema = JSONSchema, + P extends JSONSchema = JSONSchema, + B extends JSONSchema = JSONSchema +> = { + summary?: string + Querystring?: Q + Headers?: H + Params?: P + Body?: B +} + +type ResponseType = { + statusCode?: number + headers?: Record + responseBody?: unknown +} + +type RequestInput< + S extends Schema, + A extends { + [key in keyof S]: S[key] extends JSONSchema ? FromSchema : undefined + } = { + [key in keyof S]: S[key] extends JSONSchema ? FromSchema : undefined + } +> = { + Querystring: A['Querystring'] + Headers: A['Headers'] + Params: A['Params'] + Body: A['Body'] + raw: ReadableStream +} + +type Handler = ( + req: RequestInput, + ctx: Context +) => Promise + +type Route = { + method: HTTPMethod + path: string + querystringMatches: { key: string; value: string }[] + headersMatches: string[] + handler?: Handler + schema: S +} + +export class Router { + protected _routes: Map[]> = new Map[]>() + + registerRoute( + method: HTTPMethod, + url: string, + schema: R, + handler: Handler + ) { + const { query, headers } = this.parseQueryString(url) + const normalizedUrl = url.split('?')[0].split('|')[0] + + const existingPath = this._routes.get(normalizedUrl) + const newRoute: Route = { + method: method as HTTPMethod, + path: normalizedUrl, + querystringMatches: query, + headersMatches: headers, + schema: schema, + handler: handler as Handler, + } as const + + if (!existingPath) { + this._routes.set(normalizedUrl, [newRoute as unknown as Route]) + return + } + + existingPath.push(newRoute as unknown as Route) + this._routes.set(normalizedUrl, existingPath) + } + + get(url: string, schema: R, handler: Handler) { + this.registerRoute('get', url, schema, handler as any) + } + + post(url: string, schema: R, handler: Handler) { + this.registerRoute('post', url, schema, handler as any) + } + + put(url: string, schema: R, handler: Handler) { + this.registerRoute('put', url, schema, handler as any) + } + + delete(url: string, schema: R, handler: Handler) { + this.registerRoute('delete', url, schema, handler as any) + } + + head(url: string, schema: R, handler: Handler) { + this.registerRoute('head', url, schema, handler as any) + } + + parseQueryMatch(query: string) { + const [key, value] = query.split('=') + return { key, value } + } + + parseQueryString(queryString: string) { + const queries = queryString.split('?')[1]?.split('&') || [] + const headers = queryString.split('|').splice(1) + + if (queries.length === 0) { + return { query: [{ key: '*', value: '*' }], headers: headers } + } + return { query: queries.map(this.parseQueryMatch), headers: headers } + } + + routes() { + return this._routes + } + + matchRoute( + route: Route, + match: { query: Record; headers: Record } + ) { + if ((route.headersMatches?.length || 0) > 0) { + return ( + this.matchHeaders(route.headersMatches, match.headers) && + this.matchQueryString(route.querystringMatches, match.query) + ) + } + + return this.matchQueryString(route.querystringMatches, match.query) + } + + protected matchHeaders(headers: string[], received?: Record) { + if (!received) { + return headers.length === 0 + } + + return headers.every((header) => received[header] !== undefined) + } + + protected matchQueryString( + matches: { key: string; value: string }[], + received?: Record + ) { + const keys = Object.keys(received || {}) + if (keys.length === 0 || !received) { + return matches.find((m) => m.key === '*') + } + + const foundMatches = matches.every((m) => { + const key = Object.keys(received).find((k) => k === m.key) + return ( + (m.key === key && m.value !== undefined && m.value === received[m.key]) || + (m.key === key && m.value === undefined) + ) + }) + + if (foundMatches) { + return true + } + + if (!foundMatches && matches.find((m) => m.key === '*')) { + return true + } + return false + } +} diff --git a/src/http/routes/tus/index.ts b/src/http/routes/tus/index.ts index a9d2c058..7e886a76 100644 --- a/src/http/routes/tus/index.ts +++ b/src/http/routes/tus/index.ts @@ -154,7 +154,7 @@ export default async function routes(fastify: FastifyInstance) { fastify.post( '/', - { schema: { summary: 'Handle POST request for TUS Resumable uploads', tags: ['object'] } }, + { schema: { summary: 'Handle POST request for TUS Resumable uploads', tags: ['resumable'] } }, (req, res) => { tusServer.handle(req.raw, res.raw) } @@ -162,7 +162,7 @@ export default async function routes(fastify: FastifyInstance) { fastify.post( '/*', - { schema: { summary: 'Handle POST request for TUS Resumable uploads', tags: ['object'] } }, + { schema: { summary: 'Handle POST request for TUS Resumable uploads', tags: ['resumable'] } }, (req, res) => { tusServer.handle(req.raw, res.raw) } @@ -170,28 +170,32 @@ export default async function routes(fastify: FastifyInstance) { fastify.put( '/*', - { schema: { summary: 'Handle PUT request for TUS Resumable uploads', tags: ['object'] } }, + { schema: { summary: 'Handle PUT request for TUS Resumable uploads', tags: ['resumable'] } }, (req, res) => { tusServer.handle(req.raw, res.raw) } ) fastify.patch( '/*', - { schema: { summary: 'Handle PATCH request for TUS Resumable uploads', tags: ['object'] } }, + { + schema: { summary: 'Handle PATCH request for TUS Resumable uploads', tags: ['resumable'] }, + }, (req, res) => { tusServer.handle(req.raw, res.raw) } ) fastify.head( '/*', - { schema: { summary: 'Handle HEAD request for TUS Resumable uploads', tags: ['object'] } }, + { schema: { summary: 'Handle HEAD request for TUS Resumable uploads', tags: ['resumable'] } }, (req, res) => { tusServer.handle(req.raw, res.raw) } ) fastify.delete( '/*', - { schema: { summary: 'Handle DELETE request for TUS Resumable uploads', tags: ['object'] } }, + { + schema: { summary: 'Handle DELETE request for TUS Resumable uploads', tags: ['resumable'] }, + }, (req, res) => { tusServer.handle(req.raw, res.raw) } @@ -220,7 +224,7 @@ export default async function routes(fastify: FastifyInstance) { '/', { schema: { - tags: ['object'], + tags: ['resumable'], summary: 'Handle OPTIONS request for TUS Resumable uploads', description: 'Handle OPTIONS request for TUS Resumable uploads', }, @@ -234,7 +238,7 @@ export default async function routes(fastify: FastifyInstance) { '/*', { schema: { - tags: ['object'], + tags: ['resumable'], summary: 'Handle OPTIONS request for TUS Resumable uploads', description: 'Handle OPTIONS request for TUS Resumable uploads', }, diff --git a/src/http/routes/tus/lifecycle.ts b/src/http/routes/tus/lifecycle.ts index ae2f6e4d..26a89f43 100644 --- a/src/http/routes/tus/lifecycle.ts +++ b/src/http/routes/tus/lifecycle.ts @@ -2,7 +2,7 @@ import http from 'http' import { BaseLogger } from 'pino' import { Upload } from '@tus/server' import { randomUUID } from 'crypto' -import { isRenderableError, Storage, StorageBackendError } from '../../../storage' +import { ERRORS, isRenderableError, Storage } from '../../../storage' import { getConfig } from '../../../config' import { Uploader } from '../../../storage/uploader' import { TenantConnection } from '../../../database' @@ -102,7 +102,7 @@ export function namingFunction( } if (!metadata) { - throw new StorageBackendError('metadata_header_invalid', 400, 'metadata header invalid') + throw ERRORS.MetadataRequired() } try { diff --git a/src/server.ts b/src/server.ts index 5299660b..aa792516 100644 --- a/src/server.ts +++ b/src/server.ts @@ -50,16 +50,6 @@ const exposeDocs = true disableRequestLogging: true, exposeDocs, requestIdHeader: requestTraceHeader, - ignoreTrailingSlash: true, - ajv: { - customOptions: { - coerceTypes: false, - }, - }, - // https: { - // key: fs.readFileSync(path.join(__dirname, '..', 'data', 'localhost-key.pem')), - // cert: fs.readFileSync(path.join(__dirname, '..', 'data', 'localhost.pem')), - // } as any, }) await PubSub.connect() diff --git a/src/storage/backend/generic.ts b/src/storage/backend/adapter.ts similarity index 90% rename from src/storage/backend/generic.ts rename to src/storage/backend/adapter.ts index f1ed8ce4..f53d78aa 100644 --- a/src/storage/backend/generic.ts +++ b/src/storage/backend/adapter.ts @@ -102,14 +102,21 @@ export abstract class StorageBackendAdapter { * @param version * @param destination * @param destinationVersion + * @param conditions */ async copyObject( bucket: string, source: string, version: string | undefined, destination: string, - destinationVersion: string | undefined - ): Promise> { + destinationVersion: string | undefined, + conditions?: { + ifMatch?: string + ifNoneMatch?: string + ifModifiedSince?: Date + ifUnmodifiedSince?: Date + } + ): Promise> { throw new Error('copyObject not implemented') } @@ -159,6 +166,7 @@ export abstract class StorageBackendAdapter { async uploadPart( bucketName: string, key: string, + version: string, uploadId: string, partNumber: number, body?: string | Uint8Array | Buffer | Readable, @@ -171,6 +179,7 @@ export abstract class StorageBackendAdapter { bucketName: string, key: string, uploadId: string, + version: string, parts: UploadPart[] ): Promise< Omit & { @@ -181,6 +190,10 @@ export abstract class StorageBackendAdapter { > { throw new Error('not implemented') } + + async abortMultipartUpload(bucketName: string, key: string, uploadId: string): Promise { + throw new Error('not implemented') + } } const { tusUseFileVersionSeparator } = getConfig() diff --git a/src/storage/backend/file.ts b/src/storage/backend/file.ts index d3754c40..64818b58 100644 --- a/src/storage/backend/file.ts +++ b/src/storage/backend/file.ts @@ -1,4 +1,4 @@ -// import xattr from 'fs-xattr' +import * as xattr from 'fs-xattr' import fs from 'fs-extra' import path from 'path' import fileChecksum from 'md5-file' @@ -12,7 +12,7 @@ import { withOptionalVersion, BrowserCacheHeaders, UploadPart, -} from './generic' +} from './adapter' import { StorageBackendError } from '../errors' const pipeline = promisify(stream.pipeline) @@ -182,7 +182,7 @@ export class FileBackend implements StorageBackendAdapter { version: string | undefined, destination: string, destinationVersion: string - ): Promise> { + ): Promise> { const srcFile = path.resolve(this.filePath, withOptionalVersion(`${bucket}/${source}`, version)) const destFile = path.resolve( this.filePath, @@ -194,8 +194,13 @@ export class FileBackend implements StorageBackendAdapter { await this.setFileMetadata(destFile, await this.getFileMetadata(srcFile)) + const fileStat = await fs.lstat(destFile) + const checksum = await fileChecksum(destFile) + return { httpStatusCode: 200, + lastModified: fileStat.mtime, + eTag: checksum, } } @@ -264,10 +269,10 @@ export class FileBackend implements StorageBackendAdapter { async uploadPart( bucketName: string, key: string, + version: string, uploadId: string, partNumber: number, - body: stream.Readable, - length: number + body: stream.Readable ): Promise<{ ETag?: string }> { throw new Error('not implemented') } @@ -276,6 +281,7 @@ export class FileBackend implements StorageBackendAdapter { bucketName: string, key: string, uploadId: string, + version: string, parts: UploadPart[] ): Promise< Omit & { @@ -305,6 +311,10 @@ export class FileBackend implements StorageBackendAdapter { ]) } + async abortMultipartUpload(bucketName: string, key: string, uploadId: string): Promise { + return Promise.resolve(undefined) + } + protected async getFileMetadata(file: string) { const platform = process.platform == 'darwin' ? 'darwin' : 'linux' const [cacheControl, contentType] = await Promise.all([ @@ -319,14 +329,12 @@ export class FileBackend implements StorageBackendAdapter { } protected getMetadataAttr(file: string, attribute: string): Promise { - return Promise.resolve(undefined) - // return xattr.get(file, attribute).then((value) => { - // return value?.toString() ?? undefined - // }) + return xattr.get(file, attribute).then((value: any) => { + return value?.toString() ?? undefined + }) } protected setMetadataAttr(file: string, attribute: string, value: string): Promise { - return Promise.resolve(undefined) - // return xattr.set(file, attribute, value) + return xattr.set(file, attribute, value) } } diff --git a/src/storage/backend/index.ts b/src/storage/backend/index.ts index 071fda19..903a5cc1 100644 --- a/src/storage/backend/index.ts +++ b/src/storage/backend/index.ts @@ -1,11 +1,11 @@ -import { StorageBackendAdapter } from './generic' +import { StorageBackendAdapter } from './adapter' import { FileBackend } from './file' import { S3Backend, S3ClientOptions } from './s3' import { getConfig, StorageBackendType } from '../../config' export * from './s3' export * from './file' -export * from './generic' +export * from './adapter' const { storageS3Region, storageS3Endpoint, storageS3ForcePathStyle } = getConfig() diff --git a/src/storage/backend/s3.ts b/src/storage/backend/s3.ts index 1376af2a..50efd791 100644 --- a/src/storage/backend/s3.ts +++ b/src/storage/backend/s3.ts @@ -1,4 +1,5 @@ import { + AbortMultipartUploadCommand, CompleteMultipartUploadCommand, CompleteMultipartUploadCommandOutput, CopyObjectCommand, @@ -24,12 +25,12 @@ import { ObjectResponse, withOptionalVersion, UploadPart, -} from './generic' +} from './adapter' import { getSignedUrl } from '@aws-sdk/s3-request-presigner' -import { StorageBackendError } from '../errors' +import { ERRORS, StorageBackendError } from '../errors' import { getConfig } from '../../config' import Agent, { HttpsAgent } from 'agentkeepalive' -import stream, { Readable } from 'stream' +import { Readable } from 'stream' const { storageS3MaxSockets } = getConfig() @@ -195,23 +196,36 @@ export class S3Backend implements StorageBackendAdapter { * @param version * @param destination * @param destinationVersion + * @param conditions */ async copyObject( bucket: string, source: string, version: string | undefined, destination: string, - destinationVersion: string | undefined - ): Promise> { + destinationVersion: string | undefined, + conditions?: { + ifMatch?: string + ifNoneMatch?: string + ifModifiedSince?: Date + ifUnmodifiedSince?: Date + } + ): Promise> { try { const command = new CopyObjectCommand({ Bucket: bucket, CopySource: `${bucket}/${withOptionalVersion(source, version)}`, Key: withOptionalVersion(destination, destinationVersion), + CopySourceIfMatch: conditions?.ifMatch, + CopySourceIfNoneMatch: conditions?.ifNoneMatch, + CopySourceIfModifiedSince: conditions?.ifModifiedSince, + CopySourceIfUnmodifiedSince: conditions?.ifUnmodifiedSince, }) const data = await this.client.send(command) return { httpStatusCode: data.$metadata.httpStatusCode || 200, + eTag: data.CopyObjectResult?.ETag || '', + lastModified: data.CopyObjectResult?.LastModified, } } catch (e: any) { throw StorageBackendError.fromError(e) @@ -304,16 +318,12 @@ export class S3Backend implements StorageBackendAdapter { Version: version || '', }, }) + const resp = await this.client.send(createMultiPart) - const uploadInfo = new PutObjectCommand({ - Bucket: bucketName, - Key: `.info.${withOptionalVersion(key)}/${resp.UploadId}`, - Metadata: { - Version: version || '', - }, - }) - await this.client.send(uploadInfo) + if (!resp.UploadId) { + throw ERRORS.InvalidUploadId() + } return resp.UploadId } @@ -321,30 +331,15 @@ export class S3Backend implements StorageBackendAdapter { async uploadPart( bucketName: string, key: string, + version: string, uploadId: string, partNumber: number, body?: string | Uint8Array | Buffer | Readable, length?: number ) { - const uploadInfo = new HeadObjectCommand({ - Bucket: bucketName, - Key: `.info.${withOptionalVersion(key)}/${uploadId}`, - }) - const objMapping = await this.client.send(uploadInfo) - - if (!objMapping) { - throw new Error('Upload ID not found') - } - - const version = objMapping.Metadata?.version - - if (!version) { - throw new Error('missing version metadata') - } - const paralellUploadS3 = new UploadPartCommand({ Bucket: bucketName, - Key: `${key}/${objMapping.Metadata?.version}`, + Key: `${key}/${version}`, UploadId: uploadId, PartNumber: partNumber, Body: body, @@ -354,7 +349,6 @@ export class S3Backend implements StorageBackendAdapter { const resp = await this.client.send(paralellUploadS3) return { - // partNumber: resp. version, ETag: resp.ETag, } @@ -364,23 +358,10 @@ export class S3Backend implements StorageBackendAdapter { bucketName: string, key: string, uploadId: string, + version: string, parts: UploadPart[] ) { - const uploadInfo = new HeadObjectCommand({ - Bucket: bucketName, - Key: `.info.${key}/${uploadId}`, - }) - const objMapping = await this.client.send(uploadInfo) - - if (!objMapping) { - throw new Error('Upload ID not found') - } - - const version = objMapping.Metadata?.version - - if (!version) { - throw new Error('missing version metadata') - } + const keyParts = key.split('/') if (parts.length === 0) { const listPartsInput = new ListPartsCommand({ @@ -397,15 +378,18 @@ export class S3Backend implements StorageBackendAdapter { Bucket: bucketName, Key: key + '/' + version, UploadId: uploadId, - MultipartUpload: { - Parts: parts, - }, + MultipartUpload: + parts.length === 0 + ? undefined + : { + Parts: parts, + }, }) const response = await this.client.send(completeUpload) - const keyParts = key.split('/') - const tenantId = keyParts.shift() + const locationParts = key.split('/') + locationParts.shift() // tenant-id const bucket = keyParts.shift() // remove object version from key @@ -419,4 +403,13 @@ export class S3Backend implements StorageBackendAdapter { ...response, } } + + async abortMultipartUpload(bucketName: string, key: string, uploadId: string): Promise { + const abortUpload = new AbortMultipartUploadCommand({ + Bucket: bucketName, + Key: key, + UploadId: uploadId, + }) + await this.client.send(abortUpload) + } } diff --git a/src/storage/database/adapter.ts b/src/storage/database/adapter.ts index f34cb742..71b5bcc9 100644 --- a/src/storage/database/adapter.ts +++ b/src/storage/database/adapter.ts @@ -1,4 +1,4 @@ -import { Bucket, Obj } from '../schemas' +import { Bucket, S3MultipartUpload, Obj, S3PartUpload } from '../schemas' import { ObjectMetadata } from '../backend' import { TenantConnection } from '../../database/connection' @@ -77,6 +77,21 @@ export interface Database { deleteBucket(bucketId: string | string[]): Promise listObjects(bucketId: string, columns: string, limit: number): Promise + listObjectsV2( + bucketId: string, + options?: { prefix?: string; deltimeter?: string; nextToken?: string; maxKeys?: number } + ): Promise + + listMultipartUploads( + bucketId: string, + options?: { + prefix?: string + deltimeter?: string + nextUploadToken?: string + nextUploadKeyToken?: string + maxKeys?: number + } + ): Promise listBuckets(columns: string): Promise mustLockObject(bucketId: string, objectName: string, version?: string): Promise @@ -120,4 +135,29 @@ export interface Database { healthcheck(): Promise destroyConnection(): Promise + + createMultipartUpload( + uploadId: string, + bucketId: string, + objectName: string, + version: string, + signature: string + ): Promise + + findMultipartUpload( + uploadId: string, + columns: string, + options?: { forUpdate?: boolean } + ): Promise + + updateMultipartUploadProgress( + uploadId: string, + progress: BigInt, + signature: string + ): Promise + + deleteMultipartUpload(uploadId: string): Promise + + insertUploadPart(part: S3PartUpload): Promise + listAllParts(uploadId: string): Promise } diff --git a/src/storage/database/knex.ts b/src/storage/database/knex.ts index 9417f385..17196869 100644 --- a/src/storage/database/knex.ts +++ b/src/storage/database/knex.ts @@ -1,5 +1,12 @@ -import { Bucket, Obj } from '../schemas' -import { RenderableError, StorageBackendError, StorageError } from '../errors' +import { Bucket, S3MultipartUpload, Obj, S3PartUpload } from '../schemas' +import { + ErrorCode, + ERRORS, + isStorageError, + RenderableError, + StorageBackendError, + StorageErrorOptions, +} from '../errors' import { ObjectMetadata } from '../backend' import { Knex } from 'knex' import { @@ -8,10 +15,9 @@ import { FindBucketFilters, FindObjectFilters, SearchObjectOption, - TransactionOptions, } from './adapter' import { DatabaseError } from 'pg' -import { TenantConnection } from '../../database/connection' +import { TenantConnection } from '../../database' import { DbQueryPerformance } from '../../monitoring/metrics' import { isUuid } from '../limits' @@ -36,17 +42,14 @@ export class StorageKnexDB implements Database { } //eslint-disable-next-line @typescript-eslint/no-explicit-any - async withTransaction Promise>( - fn: T, - transactionOptions?: TransactionOptions - ) { + async withTransaction Promise>(fn: T) { const tnx = await this.connection.transactionProvider(this.options.tnx)() try { await this.connection.setScope(tnx) - tnx.once('query-error', (error) => { - throw DBError.fromDBError(error) + tnx.once('query-error', (error, q) => { + throw DBError.fromDBError(error, q.sql) }) const opts = { ...this.options, tnx } @@ -83,10 +86,10 @@ export class StorageKnexDB implements Database { try { await this.withTransaction(async (db) => { result = await fn(db) - throw new StorageBackendError('permission_ok', 200, 'permission pass') + throw true }) } catch (e) { - if (e instanceof StorageBackendError && e.name === 'permission_ok') { + if (e === true) { return result } throw e @@ -109,17 +112,22 @@ export class StorageKnexDB implements Database { file_size_limit: data.file_size_limit, } - const bucket = await this.runQuery('CreateBucket', async (knex) => { - return knex.from('buckets').insert(bucketData) as Promise<{ rowCount: number }> - }) - - if (bucket.rowCount === 0) { - throw new DBError('Bucket not found', 404, 'Bucket not found', undefined, { - bucketId: data.id, + try { + const bucket = await this.runQuery('CreateBucket', async (knex) => { + return knex.from('buckets').insert(bucketData) as Promise<{ rowCount: number }> }) - } - return bucketData + if (bucket.rowCount === 0) { + throw ERRORS.NoSuchBucket(data.id) + } + + return bucketData + } catch (e) { + if (isStorageError(ErrorCode.ResourceAlreadyExists, e)) { + throw ERRORS.BucketAlreadyExists(data.id, e) + } + throw e + } } async findBucketById(bucketId: string, columns = 'id', filters?: FindBucketFilters) { @@ -142,9 +150,7 @@ export class StorageKnexDB implements Database { }) if (!result && !filters?.dontErrorOnEmpty) { - throw new DBError('Bucket not found', 404, 'Bucket not found', undefined, { - bucketId, - }) + throw ERRORS.NoSuchBucket(bucketId) } return result @@ -183,6 +189,46 @@ export class StorageKnexDB implements Database { return data } + async listObjectsV2( + bucketId: string, + options?: { prefix?: string; deltimeter?: string; nextToken?: string; maxKeys?: number } + ) { + return this.runQuery('ListObjectsV2', async (knex) => { + if (!options?.deltimeter) { + const query = knex + .table('objects') + .where('bucket_id', bucketId) + .select(['id', 'name', 'metadata', 'updated_at']) + .limit(options?.maxKeys || 100) + + // knex typing is wrong, it doesn't accept a knex.raw on orderBy, even though is totally legit + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore + query.orderBy(knex.raw('name COLLATE "C"')) + + if (options?.prefix) { + query.where('name', 'ilike', `${options.prefix}%`) + } + + if (options?.nextToken) { + query.andWhere(knex.raw('name COLLATE "C" > ?', [options?.nextToken])) + } + + return query + } + + const query = await knex.raw('select * from storage.list_objects_with_delimiter(?,?,?,?,?)', [ + bucketId, + options?.prefix, + options?.deltimeter, + options?.maxKeys, + options?.nextToken || '', + ]) + + return query.rows + }) + } + async listBuckets(columns = 'id') { const data = await this.runQuery('ListBuckets', (knex) => { return knex.from('buckets').select(columns.split(',')) @@ -191,6 +237,61 @@ export class StorageKnexDB implements Database { return data as Bucket[] } + listMultipartUploads( + bucketId: string, + options?: { + prefix?: string + deltimeter?: string + nextUploadToken?: string + nextUploadKeyToken?: string + maxKeys?: number + } + ) { + return this.runQuery('ListMultipartsUploads', async (knex) => { + if (!options?.deltimeter) { + const query = knex + .table('_s3_multipart_uploads') + .select(['id', 'name']) + .limit(options?.maxKeys || 100) + + // knex typing is wrong, it doesn't accept a knex.raw on orderBy, even though is totally legit + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore + query.orderBy(knex.raw('name COLLATE "C"')) + query.orderBy('created_at') + + if (options?.prefix) { + query.where('name', 'ilike', `${options.prefix}%`) + } + + if (options?.nextUploadKeyToken) { + const operator = options?.nextUploadToken ? '>' : '>=' + query.andWhere(knex.raw(`name COLLATE "C" ${operator} ?`, [options?.nextUploadKeyToken])) + } + + if (options?.nextUploadToken) { + query.andWhere(knex.raw('id COLLATE "C" > ?', [options?.nextUploadToken])) + } + + return query + } + + const query = await knex.raw( + 'select * from storage.list_multipart_uploads_with_delimiter(?,?,?,?,?,?)', + [ + bucketId, + options?.prefix, + options?.deltimeter, + options?.maxKeys, + options?.nextUploadKeyToken || '', + options.nextUploadToken, + ] + ) + + return query.rows + }) + } + async updateBucket( bucketId: string, fields: Pick @@ -204,9 +305,7 @@ export class StorageKnexDB implements Database { }) if (bucket === 0) { - throw new DBError('Bucket not found', 404, 'Bucket not found', undefined, { - bucketId, - }) + throw ERRORS.NoSuchBucket(bucketId) } return @@ -261,26 +360,33 @@ export class StorageKnexDB implements Database { }) if (!object) { - throw new DBError('Not Found', 404, 'object not found') + throw ERRORS.NoSuchKey(name) } return object } async createObject(data: Pick) { - const object = { - name: data.name, - owner: isUuid(data.owner || '') ? data.owner : undefined, - owner_id: data.owner, - bucket_id: data.bucket_id, - metadata: data.metadata, - version: data.version, - } - await this.runQuery('CreateObject', (knex) => { - return knex.from('objects').insert(object) - }) + try { + const object = { + name: data.name, + owner: isUuid(data.owner || '') ? data.owner : undefined, + owner_id: data.owner, + bucket_id: data.bucket_id, + metadata: data.metadata, + version: data.version, + } + await this.runQuery('CreateObject', (knex) => { + return knex.from('objects').insert(object) + }) - return object + return object + } catch (e) { + if (isStorageError(ErrorCode.ResourceAlreadyExists, e)) { + throw ERRORS.KeyAlreadyExists(data.name, e) + } + throw e + } } async deleteObject(bucketId: string, objectName: string, version?: string) { @@ -340,9 +446,7 @@ export class StorageKnexDB implements Database { }) if (!object) { - throw new DBError('Object not found', 404, 'not_found', undefined, { - bucketId, - }) + throw ERRORS.NoSuchKey(objectName) } return object @@ -380,9 +484,7 @@ export class StorageKnexDB implements Database { }) if (!object && !filters?.dontErrorOnEmpty) { - throw new DBError('Object not found', 404, 'not_found', undefined, { - bucketId, - }) + throw ERRORS.NoSuchKey(objectName) } return object as typeof filters extends FindObjectFilters @@ -411,7 +513,7 @@ export class StorageKnexDB implements Database { const lockAcquired = result.rows.shift()?.pg_try_advisory_xact_lock || false if (!lockAcquired) { - throw new DBError('resource_locked', 409, 'Resource is locked') + throw ERRORS.ResourceLocked() } return true @@ -443,6 +545,84 @@ export class StorageKnexDB implements Database { }) } + async createMultipartUpload( + uploadId: string, + bucketId: string, + objectName: string, + version: string, + signature: string + ) { + return this.runQuery('CreateMultipartUpload', async (knex) => { + const multipart = await knex + .table('_s3_multipart_uploads') + .insert({ + id: uploadId, + bucket_id: bucketId, + key: objectName, + version, + upload_signature: signature, + }) + .returning('*') + + return multipart[0] as S3MultipartUpload + }) + } + + async findMultipartUpload(uploadId: string, columns = 'id', options?: { forUpdate?: boolean }) { + const multiPart = await this.runQuery('FindMultipartUpload', async (knex) => { + const query = knex + .from('_s3_multipart_uploads') + .select(columns.split(',')) + .where('id', uploadId) + + if (options?.forUpdate) { + return query.forUpdate().first() + } + return query.first() + }) + + if (!multiPart) { + throw ERRORS.InvalidUploadId(uploadId) + } + return multiPart + } + + async updateMultipartUploadProgress(uploadId: string, progress: BigInt, signature: string) { + return this.runQuery('UpdateMultipartUploadProgress', async (knex) => { + await knex + .from('_s3_multipart_uploads') + .update({ in_progress_size: progress, upload_signature: signature }) + .where('id', uploadId) + }) + } + + async deleteMultipartUpload(uploadId: string) { + return this.runQuery('DeleteMultipartUpload', async (knex) => { + await knex.from('_s3_multipart_uploads').delete().where('id', uploadId) + }) + } + + async insertUploadPart(part: S3PartUpload) { + return this.runQuery('InsertUploadPart', async (knex) => { + const storedPart = await knex + .table('_s3_multipart_uploads_parts') + .insert(part) + .returning('*') + + return storedPart[0] + }) + } + + async listAllParts(uploadId: string) { + return this.runQuery('ListAllParts', async (knex) => { + return knex + .from('_s3_multipart_uploads_parts') + .select('*') + .orderBy('part_number') + .where('upload_id', uploadId) + }) + } + healthcheck() { return this.runQuery('Healthcheck', (knex) => { return knex.raw('SELECT id from storage.buckets limit 1') @@ -510,58 +690,43 @@ export class StorageKnexDB implements Database { } } -export class DBError extends Error implements RenderableError { - constructor( - message: string, - public readonly statusCode: number, - public readonly error: string, - public readonly originalError?: Error, - public readonly metadata?: Record, - public readonly details?: string, - public readonly query?: string - ) { - super(message) - this.message = message +export class DBError extends StorageBackendError implements RenderableError { + constructor(options: StorageErrorOptions) { + super(options) Object.setPrototypeOf(this, DBError.prototype) } static fromDBError(pgError: DatabaseError, query?: string) { - let message = 'Internal Server Error' - let statusCode = 500 - let error = 'internal' - switch (pgError.code) { case '42501': - message = 'new row violates row-level security policy' - statusCode = 403 - error = 'Unauthorized' - break + return ERRORS.AccessDenied( + 'new row violates row-level security policy', + pgError + ).withMetadata({ + query, + code: pgError.code, + }) case '23505': - message = 'The resource already exists' - statusCode = 409 - error = 'Duplicate' - break + return ERRORS.ResourceAlreadyExists(pgError).withMetadata({ + query, + code: pgError.code, + }) case '23503': - message = 'The parent resource is not found' - statusCode = 404 - error = 'Not Found' - break + return ERRORS.RelatedResourceNotFound(pgError).withMetadata({ + query, + code: pgError.code, + }) case '55P03': case 'resource_locked': - message = 'Resource Locked, an upload might be in progress for this resource' - statusCode = 400 - error = 'resource_locked' - break - } - - return new DBError(message, statusCode, error, pgError, undefined, pgError.message, query) - } - - render(): StorageError { - return { - message: this.message, - statusCode: `${this.statusCode}`, - error: this.error, + return ERRORS.ResourceLocked(pgError).withMetadata({ + query, + code: pgError.code, + }) + default: + return ERRORS.DatabaseError(pgError.message, pgError).withMetadata({ + query, + code: pgError.code, + }) } } diff --git a/src/storage/errors.ts b/src/storage/errors.ts index 90cd5aa1..8edceb68 100644 --- a/src/storage/errors.ts +++ b/src/storage/errors.ts @@ -2,16 +2,304 @@ import { S3ServiceException } from '@aws-sdk/client-s3' export type StorageError = { statusCode: string + code: ErrorCode error: string message: string query?: string } +export enum ErrorCode { + NoSuchBucket = 'NoSuchBucket', + NoSuchKey = 'NoSuchKey', + InvalidJWT = 'InvalidJWT', + InvalidRequest = 'InvalidRequest', + TenantNotFound = 'TenantNotFound', + EntityTooLarge = 'EntityTooLarge', + InternalError = 'InternalError', + ResourceAlreadyExists = 'ResourceAlreadyExists', + InvalidBucketName = 'InvalidBucketName', + InvalidKey = 'InvalidKey', + KeyAlreadyExists = 'KeyAlreadyExists', + BucketAlreadyExists = 'BucketAlreadyExists', + DatabaseTimeout = 'DatabaseTimeout', + InvalidSignature = 'InvalidSignature', + AccessDenied = 'AccessDenied', + ResourceLocked = 'ResourceLocked', + DatabaseError = 'DatabaseError', + MissingContentLength = 'MissingContentLength', + MissingParameter = 'MissingParameter', + InvalidUploadSignature = 'InvalidUploadSignature', + LockTimeout = 'LockTimeout', + S3Error = 'S3Error', +} + +export const ERRORS = { + BucketNotEmpty: (bucket: string, e?: Error) => + new StorageBackendError({ + code: ErrorCode.InvalidRequest, + resource: bucket, + httpStatusCode: 409, + message: `The bucket you tried to delete is not empty: ${bucket}`, + originalError: e, + }), + NoSuchBucket: (bucket: string, e?: Error) => + new StorageBackendError({ + code: ErrorCode.NoSuchBucket, + resource: bucket, + httpStatusCode: 404, + message: `Bucket not found`, + originalError: e, + }), + NoSuchKey: (resource: string, e?: Error) => + new StorageBackendError({ + code: ErrorCode.NoSuchKey, + resource, + httpStatusCode: 404, + message: `Object not found`, + originalError: e, + }), + + MissingParameter: (parameter: string, e?: Error) => + new StorageBackendError({ + code: ErrorCode.MissingParameter, + httpStatusCode: 404, + message: `Missing Required Parameter ${parameter}`, + originalError: e, + }), + + InvalidJWT: (e?: Error) => + new StorageBackendError({ + code: ErrorCode.InvalidJWT, + httpStatusCode: 400, + message: e?.message || 'Invalid JWT', + }), + + MissingContentLength: (e?: Error) => + new StorageBackendError({ + code: ErrorCode.MissingContentLength, + httpStatusCode: 400, + message: e?.message || 'You must provide the Content-Length HTTP header.', + }), + + AccessDenied: (action: string, e?: Error) => + new StorageBackendError({ + error: 'Unauthorized', + code: ErrorCode.AccessDenied, + httpStatusCode: 403, + message: action || 'Access denied', + originalError: e, + }), + + ResourceAlreadyExists: (e?: Error) => + new StorageBackendError({ + error: 'Duplicate', + code: ErrorCode.ResourceAlreadyExists, + httpStatusCode: 409, + message: 'The resource already exists', + originalError: e, + }), + + MetadataRequired: (e?: Error) => + new StorageBackendError({ + code: ErrorCode.InvalidRequest, + httpStatusCode: 400, + message: 'Metadata header is required', + originalError: e, + }), + + InvalidSignature: (e?: Error) => + new StorageBackendError({ + code: ErrorCode.InvalidSignature, + httpStatusCode: 400, + message: 'Invalid signature', + originalError: e, + }), + + ExpiredSignature: (e?: Error) => + new StorageBackendError({ + code: ErrorCode.InvalidSignature, + httpStatusCode: 400, + message: 'Expired signature', + originalError: e, + }), + + InvalidXForwardedHeader: (message?: string, e?: Error) => + new StorageBackendError({ + code: ErrorCode.InvalidRequest, + httpStatusCode: 400, + message: message || 'Invalid X-Forwarded-Host header', + originalError: e, + }), + + InvalidTenantId: (e?: Error) => + new StorageBackendError({ + code: ErrorCode.TenantNotFound, + httpStatusCode: 400, + message: e?.message || 'Invalid tenant id', + originalError: e, + }), + + InvalidUploadId: (message?: string, e?: Error) => + new StorageBackendError({ + code: ErrorCode.InvalidRequest, + httpStatusCode: 400, + message: message || 'Invalid upload id', + originalError: e, + }), + + MissingTenantConfig: (tenantId: string) => + new StorageBackendError({ + code: ErrorCode.TenantNotFound, + httpStatusCode: 400, + message: `Missing tenant config for tenant ${tenantId}`, + }), + + InvalidMimeType: (mimeType: string) => + new StorageBackendError({ + error: 'invalid_mime_type', + code: ErrorCode.InvalidRequest, + httpStatusCode: 415, + message: `mime type ${mimeType} is not supported`, + }), + + EntityTooLarge: (e?: Error) => + new StorageBackendError({ + error: 'Payload too large', + code: ErrorCode.EntityTooLarge, + httpStatusCode: 413, + message: 'The object exceeded the maximum allowed size', + originalError: e, + }), + + InternalError: (e?: Error, message?: string) => + new StorageBackendError({ + code: ErrorCode.InternalError, + httpStatusCode: 500, + message: message || 'Internal server error', + originalError: e, + }), + + ImageProcessingError: (statusCode: number, message: string, e?: Error) => + new StorageBackendError({ + code: statusCode > 499 ? ErrorCode.InternalError : ErrorCode.InvalidRequest, + httpStatusCode: statusCode, + message: message, + originalError: e, + }), + + InvalidBucketName: (bucket: string, e?: Error) => + new StorageBackendError({ + error: 'Invalid Input', + code: ErrorCode.InvalidBucketName, + resource: bucket, + httpStatusCode: 400, + message: `Bucket name invalid`, + originalError: e, + }), + + InvalidFileSizeLimit: (e?: Error) => + new StorageBackendError({ + code: ErrorCode.InvalidRequest, + httpStatusCode: 400, + message: e?.message || 'Invalid file size format, hint: use 20GB / 20MB / 30KB / 3B', + originalError: e, + }), + + InvalidUploadSignature: (e?: Error) => + new StorageBackendError({ + code: ErrorCode.InvalidUploadSignature, + httpStatusCode: 400, + message: e?.message || 'Invalid upload Signature', + originalError: e, + }), + + InvalidKey: (key: string, e?: Error) => + new StorageBackendError({ + code: ErrorCode.InvalidKey, + resource: key, + httpStatusCode: 400, + message: `Invalid key: ${key}`, + originalError: e, + }), + + KeyAlreadyExists: (key: string, e?: Error) => + new StorageBackendError({ + code: ErrorCode.KeyAlreadyExists, + resource: key, + httpStatusCode: 409, + message: `Key already exists: ${key}`, + originalError: e, + }), + + BucketAlreadyExists: (bucket: string, e?: Error) => + new StorageBackendError({ + code: ErrorCode.BucketAlreadyExists, + resource: bucket, + httpStatusCode: 409, + message: `Bucket already exists: ${bucket}`, + originalError: e, + }), + + NoContentProvided: (e?: Error) => + new StorageBackendError({ + code: ErrorCode.InvalidRequest, + httpStatusCode: 400, + message: e?.message || 'No content provided', + originalError: e, + }), + + DatabaseTimeout: (e?: Error) => + StorageBackendError.withStatusCode(544, { + code: ErrorCode.DatabaseTimeout, + httpStatusCode: 544, + message: 'The connection to the database timed out', + originalError: e, + }), + + ResourceLocked: (e?: Error) => + new StorageBackendError({ + code: ErrorCode.ResourceLocked, + httpStatusCode: 423, + message: `The resource is locked`, + originalError: e, + }), + + RelatedResourceNotFound: (e?: Error) => + new StorageBackendError({ + code: ErrorCode.InvalidRequest, + httpStatusCode: 404, + message: `The related resource does not exist`, + originalError: e, + }), + + DatabaseError: (message: string, err?: Error) => + new StorageBackendError({ + code: ErrorCode.DatabaseError, + httpStatusCode: 500, + message: message, + originalError: err, + }), + + LockTimeout: (err?: Error) => + new StorageBackendError({ + error: 'acquiring_lock_timeout', + code: ErrorCode.LockTimeout, + httpStatusCode: 503, + message: 'acquiring lock timeout', + originalError: err, + }), +} + +export function isStorageError(errorType: ErrorCode, error: any): error is StorageBackendError { + return error instanceof StorageBackendError && error.code === errorType +} + /** * A renderable error is a handled error * that we want to display to our users */ export interface RenderableError { + error?: string userStatusCode?: number render(): StorageError getOriginalError(): unknown @@ -33,6 +321,15 @@ export function isS3Error(error: unknown): error is S3ServiceException { return !!error && typeof error === 'object' && '$metadata' in error } +export interface StorageErrorOptions { + code: ErrorCode + httpStatusCode: number + message: string + resource?: string + originalError?: unknown + error?: string +} + /** * A generic error that should be always thrown for generic exceptions */ @@ -40,54 +337,71 @@ export class StorageBackendError extends Error implements RenderableError { httpStatusCode: number originalError: unknown userStatusCode: number + resource?: string + code: ErrorCode + metadata?: Record = {} + error?: string // backwards compatible error - constructor(name: string, httpStatusCode: number, message: string, originalError?: unknown) { - super(message) - this.name = name - this.httpStatusCode = httpStatusCode - this.userStatusCode = httpStatusCode === 500 ? 500 : 400 - this.message = message - this.originalError = originalError + constructor(options: StorageErrorOptions) { + super(options.message) + this.code = options.code + this.httpStatusCode = options.httpStatusCode + this.userStatusCode = options.httpStatusCode === 500 ? 500 : 400 + this.message = options.message + this.originalError = options.originalError + this.resource = options.resource + this.error = options.error Object.setPrototypeOf(this, StorageBackendError.prototype) } - static withStatusCode( - name: string, - statusCode: number, - message: string, - originalError?: unknown - ) { - const error = new StorageBackendError(name, statusCode, message, originalError) + static withStatusCode(statusCode: number, options: StorageErrorOptions) { + const error = new StorageBackendError(options) error.userStatusCode = statusCode return error } static fromError(error?: unknown) { - let name: string + let oldErrorMessage: string let httpStatusCode: number let message: string + let code: ErrorCode if (isS3Error(error)) { - name = error.message + code = ErrorCode.S3Error + oldErrorMessage = error.message httpStatusCode = error.$metadata.httpStatusCode ?? 500 message = error.name } else if (error instanceof Error) { - name = error.name + code = ErrorCode.InternalError + oldErrorMessage = error.name httpStatusCode = 500 message = error.message } else { - name = 'Internal server error' + code = ErrorCode.InternalError + oldErrorMessage = 'Internal server error' httpStatusCode = 500 message = 'Internal server error' } - return new StorageBackendError(name, httpStatusCode, message, error) + return new StorageBackendError({ + error: oldErrorMessage, + code: code, + httpStatusCode, + message, + originalError: error, + }) + } + + withMetadata(metadata: Record) { + this.metadata = metadata + return this } render() { return { statusCode: this.httpStatusCode.toString(), - error: this.name, + code: this.code, + error: this.code, message: this.message, } } diff --git a/src/storage/limits.ts b/src/storage/limits.ts index 3b9f28e5..2a58f0f6 100644 --- a/src/storage/limits.ts +++ b/src/storage/limits.ts @@ -1,6 +1,6 @@ import { getConfig } from '../config' import { getFileSizeLimit as getFileSizeLimitForTenant, getFeatures } from '../database/tenant' -import { StorageBackendError } from './errors' +import { ERRORS } from './errors' const { isMultitenant, imageTransformationEnabled } = getConfig() @@ -57,11 +57,10 @@ export function isValidBucketName(bucketName: string): boolean { * Validates if a given object key is valid * throws if invalid * @param key - * @param message */ -export function mustBeValidKey(key: string, message: string) { - if (!isValidKey(key)) { - throw new StorageBackendError('Invalid Input', 400, message) +export function mustBeValidKey(key?: string): asserts key is string { + if (!key || !isValidKey(key)) { + throw ERRORS.InvalidKey(key || '') } } @@ -69,11 +68,10 @@ export function mustBeValidKey(key: string, message: string) { * Validates if a given bucket name is valid * throws if invalid * @param key - * @param message */ -export function mustBeValidBucketName(key: string, message: string) { - if (!isValidBucketName(key)) { - throw new StorageBackendError('Invalid Input', 400, message) +export function mustBeValidBucketName(key?: string): asserts key is string { + if (!key || !isValidBucketName(key)) { + throw ERRORS.InvalidBucketName(key || '') } } @@ -81,11 +79,7 @@ export function parseFileSizeToBytes(valueWithUnit: string) { const valuesRegex = /(^[0-9]+(?:\.[0-9]+)?)(gb|mb|kb|b)$/i if (!valuesRegex.test(valueWithUnit)) { - throw new StorageBackendError( - 'file_size_limit', - 422, - 'the requested file_size_limit uses an invalid format, use 20GB / 20MB / 30KB / 3B' - ) + throw ERRORS.InvalidFileSizeLimit() } // eslint-disable-next-line @typescript-eslint/no-non-null-assertion @@ -102,11 +96,7 @@ export function parseFileSizeToBytes(valueWithUnit: string) { case 'B': return value default: - throw new StorageBackendError( - 'file_size_limit', - 422, - 'the requested file_size_limit unit is not supported, use GB/MB/KB/B' - ) + throw ERRORS.InvalidFileSizeLimit() } } diff --git a/src/storage/object.ts b/src/storage/object.ts index e370b4c0..98b21c67 100644 --- a/src/storage/object.ts +++ b/src/storage/object.ts @@ -14,7 +14,7 @@ import { ObjectUpdatedMetadata, } from '../queue' import { randomUUID } from 'crypto' -import { StorageBackendError } from './errors' +import { ERRORS } from './errors' import { getJwtSecret } from '../database/tenant' export interface UploadObjectOptions { @@ -55,7 +55,7 @@ export class ObjectStorage { * @param options */ async uploadNewObject(request: FastifyRequest, options: UploadObjectOptions) { - mustBeValidKey(options.objectName, 'The object name contains invalid characters') + mustBeValidKey(options.objectName) const path = `${this.bucketId}/${options.objectName}` @@ -74,7 +74,7 @@ export class ObjectStorage { } public async uploadOverridingObject(request: FastifyRequest, options: UploadObjectOptions) { - mustBeValidKey(options.objectName, 'The object name contains invalid characters') + mustBeValidKey(options.objectName) const path = `${this.bucketId}/${options.objectName}` @@ -115,16 +115,14 @@ export class ObjectStorage { const deleted = await db.deleteObject(this.bucketId, objectName) if (!deleted) { - throw new StorageBackendError('not_found', 404, 'Object Not Found') + throw ERRORS.NoSuchKey(objectName) } - await ObjectAdminDelete.send({ - tenant: this.db.tenant(), - name: objectName, - bucketId: this.bucketId, - version: obj.version, - reqId: this.db.reqId, - }) + await this.backend.deleteObject( + storageS3Bucket, + `${this.db.tenantId}/${this.bucketId}/${objectName}`, + obj.version + ) }) await ObjectRemoved.sendWebhook({ @@ -197,7 +195,7 @@ export class ObjectStorage { * @param metadata */ async updateObjectMetadata(objectName: string, metadata: ObjectMetadata) { - mustBeValidKey(objectName, 'The object name contains invalid characters') + mustBeValidKey(objectName) const result = await this.db.updateObjectMetadata(this.bucketId, objectName, metadata) @@ -228,7 +226,7 @@ export class ObjectStorage { * @param filters */ async findObject(objectName: string, columns = 'id', filters?: FindObjectFilters) { - mustBeValidKey(objectName, 'The object name contains invalid characters') + mustBeValidKey(objectName) return this.db.findObject(this.bucketId, objectName, columns, filters) } @@ -245,11 +243,24 @@ export class ObjectStorage { /** * Copies an existing remote object to a given location * @param sourceKey + * @param destinationBucket * @param destinationKey * @param owner + * @param conditions */ - async copyObject(sourceKey: string, destinationKey: string, owner?: string) { - mustBeValidKey(destinationKey, 'The destination object name contains invalid characters') + async copyObject( + sourceKey: string, + destinationBucket: string, + destinationKey: string, + owner?: string, + conditions?: { + ifMatch?: string + ifNoneMatch?: string + ifModifiedSince?: Date + ifUnmodifiedSince?: Date + } + ) { + mustBeValidKey(destinationKey) if (sourceKey === destinationKey) { return { @@ -261,7 +272,7 @@ export class ObjectStorage { const newVersion = randomUUID() const bucketId = this.bucketId const s3SourceKey = `${this.db.tenantId}/${bucketId}/${sourceKey}` - const s3DestinationKey = `${this.db.tenantId}/${bucketId}/${destinationKey}` + const s3DestinationKey = `${this.db.tenantId}/${destinationBucket}/${destinationKey}` try { // We check if the user has permission to copy the object to the destination key @@ -272,7 +283,7 @@ export class ObjectStorage { ) await this.uploader.canUpload({ - bucketId: this.bucketId, + bucketId: destinationBucket, objectName: destinationKey, owner, isUpsert: false, @@ -283,13 +294,15 @@ export class ObjectStorage { s3SourceKey, originObject.version, s3DestinationKey, - newVersion + newVersion, + conditions ) const metadata = await this.backend.headObject(storageS3Bucket, s3DestinationKey, newVersion) const destObject = await this.db.createObject({ ...originObject, + bucket_id: destinationBucket, name: destinationKey, owner, metadata, @@ -307,6 +320,8 @@ export class ObjectStorage { return { destObject, httpStatusCode: copyResult.httpStatusCode, + eTag: copyResult.eTag, + lastModified: copyResult.lastModified, } } catch (e) { await ObjectAdminDelete.send({ @@ -327,7 +342,7 @@ export class ObjectStorage { * @param owner */ async moveObject(sourceObjectName: string, destinationObjectName: string, owner?: string) { - mustBeValidKey(destinationObjectName, 'The destination object name contains invalid characters') + mustBeValidKey(destinationObjectName) if (sourceObjectName === destinationObjectName) { return @@ -436,13 +451,13 @@ export class ObjectStorage { return this.db.searchObjects(this.bucketId, prefix, options) } - async listObjects(prefix: string, options: SearchObjectOption) { - // if (prefix.length > 0 && !prefix.endsWith('/')) { - // // assuming prefix is always a folder - // prefix = `${prefix}/` - // } - - return this.db.listObjects(this.bucketId, prefix, options.limit || 100) + async listObjectsV2(options?: { + prefix?: string + deltimeter?: string + nextToken?: string + maxKeys?: number + }) { + return this.db.listObjectsV2(this.bucketId, options) } /** @@ -542,7 +557,7 @@ export class ObjectStorage { }) if (found) { - throw new StorageBackendError('Duplicate', 409, 'The resource already exists') + throw ERRORS.KeyAlreadyExists(objectName) } // check if user has INSERT permissions diff --git a/src/storage/protocols/s3/byte-limit-stream.ts b/src/storage/protocols/s3/byte-limit-stream.ts new file mode 100644 index 00000000..24bc1e65 --- /dev/null +++ b/src/storage/protocols/s3/byte-limit-stream.ts @@ -0,0 +1,20 @@ +import { Transform, TransformCallback } from 'stream' +import { ERRORS } from '../../errors' + +export class ByteLimitTransformStream extends Transform { + bytesProcessed = BigInt(0) + + constructor(private readonly limit: bigint) { + super() + } + + _transform(chunk: Buffer, encoding: BufferEncoding, callback: TransformCallback) { + this.bytesProcessed += BigInt(chunk.length) + + if (this.bytesProcessed > this.limit) { + callback(ERRORS.EntityTooLarge()) + } else { + callback(null, chunk) + } + } +} diff --git a/src/storage/protocols/s3/handler.ts b/src/storage/protocols/s3/handler.ts deleted file mode 100644 index 520a2ea6..00000000 --- a/src/storage/protocols/s3/handler.ts +++ /dev/null @@ -1,332 +0,0 @@ -import { Storage } from '../../storage' -import { getConfig } from '../../../config' -import { Uploader } from '../../uploader' -import { - CompleteMultipartUploadCommandInput, - CreateMultipartUploadCommandInput, - GetObjectCommandInput, - HeadObjectCommandInput, - ListObjectsV2CommandInput, - PutObjectCommandInput, - UploadPartCommandInput, -} from '@aws-sdk/client-s3' -import { Readable } from 'stream' -import { isValidBucketName, isValidKey, mustBeValidBucketName, mustBeValidKey } from '../../limits' - -const { storageS3Region, storageS3Bucket } = getConfig() - -export class S3ProtocolHandler { - constructor(protected readonly storage: Storage, protected readonly tenantId: string) {} - - async getBucketVersioning() { - return { - responseBody: { - VersioningConfiguration: { - Status: 'Suspended', - MfaDelete: 'Disabled', - }, - }, - } - } - - async getBucketLocation() { - return { - responseBody: { - LocationConstraint: { - LocationConstraint: storageS3Region, - }, - }, - } - } - - async listBuckets() { - const buckets = await this.storage.listBuckets('name,created_at') - - return { - responseBody: { - ListAllMyBucketsResult: { - Buckets: toXmlList( - 'Bucket', - buckets.map((bucket) => ({ - Name: bucket.name, - CreationDate: bucket.created_at - ? new Date(bucket.created_at || '').toISOString().replace('Z', '+00:00') - : undefined, - })) - ), - }, - }, - } - } - - async createBucket(Bucket: string, isPublic: boolean) { - if (!Bucket) { - throw new Error('Bucket name is required') - } - - await this.storage.createBucket({ - name: Bucket, - id: Bucket, - public: isPublic, - }) - - return { - headers: { - Location: `/${Bucket}`, - }, - } - } - - async deleteBucket(name: string) { - await this.storage.deleteBucket(name) - - return { - statusCode: 204, - } - } - - async listObjectsV2(command: ListObjectsV2CommandInput) { - const continuationToken = command.ContinuationToken - const startAfter = command.StartAfter - const encodingType = command.EncodingType - const delimiter = command.Delimiter - const prefix = command.Prefix - const maxKeys = command.MaxKeys - const bucket = command.Bucket! - - const limit = maxKeys || 200 - const offset = continuationToken ? parseInt(continuationToken.split('=')[1]) : 0 - - const objects = await this.storage.from(bucket).searchObjects(prefix || '', { - limit: limit, - offset: offset, - sortBy: { column: 'created_at', order: 'desc' }, - }) - - const commonPrefeixes = objects - .map((object) => { - if (object.id === null) { - return { Prefix: object.name + '/' } - } - }) - .filter((e) => e) - - const contents = - objects - .filter((o) => o.id) - .map((o) => ({ - Key: o.name, - LastModified: o.updated_at - ? new Date(o.updated_at).toISOString().replace('Z', '+00:00') - : undefined, - ETag: o.metadata?.eTag, - Size: o.metadata?.size, - StorageClass: 'STANDARD', - })) || [] - - const isTruncated = objects.length === 0 || objects.length < limit - const response = { - ListBucketResult: { - Name: bucket, - Prefix: prefix, - ContinuationToken: continuationToken, - Contents: contents, - IsTruncated: isTruncated, - MaxKeys: limit, - Delimiter: delimiter, - EncodingType: encodingType, - KeyCount: objects.length, - CommonPrefixes: commonPrefeixes, - NextContinuationToken: !isTruncated ? `offset=${offset + limit}` : undefined, - StartAfter: offset, - }, - } - - return { - responseBody: response, - } - } - - async createMultiPartUpload(command: CreateMultipartUploadCommandInput) { - const uploader = new Uploader(this.storage.backend, this.storage.db) - - // Create Multi Part Upload - const version = await uploader.prepareUpload({ - bucketId: command.Bucket as string, - objectName: command.Key as string, - isUpsert: true, - }) - - const uploadId = await this.storage.backend.createMultiPartUpload( - storageS3Bucket, - `${this.tenantId}/${command.Bucket}/${command.Key}`, - version, - command.ContentType || '', - command.CacheControl || '' - ) - - return { - responseBody: { - InitiateMultipartUploadResult: { - Bucket: command.Bucket, - Key: `${command.Key}`, - UploadId: uploadId, - }, - }, - } - } - - async completeMultiPartUpload(command: CompleteMultipartUploadCommandInput) { - const uploader = new Uploader(this.storage.backend, this.storage.db) - const { Bucket, Key, UploadId } = command - - await uploader.canUpload({ - bucketId: Bucket as string, - objectName: Key as string, - isUpsert: true, - }) - - const resp = await this.storage.backend.completeMultipartUpload( - storageS3Bucket, - `${this.tenantId}/${Bucket}/${Key}`, - UploadId as string, - command.MultipartUpload?.Parts || [] - ) - - const metadata = await this.storage.backend.headObject( - storageS3Bucket, - `${this.tenantId}/${Bucket}/${Key}`, - resp.version - ) - - await uploader.completeUpload({ - bucketId: Bucket as string, - objectName: Key as string, - version: resp.version, - isUpsert: true, - isMultipart: false, - objectMetadata: metadata, - }) - - return { - responseBody: { - CompleteMultipartUpload: { - Location: resp.location, - Bucket: resp.bucket, - ChecksumCRC32: resp.ChecksumCRC32, - ChecksumCRC32C: resp.ChecksumCRC32, - ChecksumSHA1: resp.ChecksumSHA1, - ChecksumSHA256: resp.ChecksumSHA256, - ETag: resp.ETag, - }, - }, - } - } - - async uploadPart(command: UploadPartCommandInput) { - const { Bucket, PartNumber, UploadId, Key, Body } = command - - if (!UploadId) { - throw new Error('UploadId is required') - } - - const uploadPart = await this.storage.backend.uploadPart( - storageS3Bucket, - `${this.tenantId}/${Bucket}/${Key}`, - UploadId, - PartNumber || 0, - Body as string | Uint8Array | Buffer | Readable, - command.ContentLength - ) - - return { - headers: { - etag: uploadPart.ETag || '', - }, - } - } - - async putObject(command: PutObjectCommandInput) { - const uploader = new Uploader(this.storage.backend, this.storage.db) - const upload = await uploader.upload(command.Body as any, { - bucketId: command.Bucket as string, - objectName: command.Key as string, - isUpsert: true, - isMultipart: false, - }) - - return { - headers: { - etag: upload.metadata.eTag, - }, - } - } - - async headObject(command: HeadObjectCommandInput) { - const { Bucket, Key } = command - - if (!Bucket || !Key) { - throw new Error('Bucket and Key are required') - } - - const object = await this.storage.from(Bucket).findObject(Key, '*') - - if (!object) { - throw new Error('Object not found') - } - - return { - headers: { - 'created-at': (object.created_at as string) || '', - 'cache-control': (object.metadata?.cacheControl as string) || '', - expires: (object.metadata?.expires as string) || '', - 'content-length': (object.metadata?.size as string) || '', - 'content-type': (object.metadata?.contentType as string) || '', - etag: (object.metadata?.eTag as string) || '', - 'last-modified': object.updated_at ? new Date(object.updated_at).toISOString() || '' : '', - }, - } - } - - async getObject(command: GetObjectCommandInput) { - const bucket = command.Bucket as string - const key = command.Key as string - - mustBeValidBucketName(bucket || '', 'Invalid Bucket Name') - mustBeValidKey(key || '', 'Invalid Key') - - const object = await this.storage.from(bucket).findObject(key, '*') - const response = await this.storage.backend.getObject( - storageS3Bucket, - `${this.tenantId}/${bucket}/${key}`, - object.version, - { - ifModifiedSince: command.IfModifiedSince?.toISOString(), - ifNoneMatch: command.IfNoneMatch, - range: command.Range, - } - ) - return { - headers: { - 'cache-control': response.metadata.cacheControl, - 'content-length': response.metadata.contentLength.toString(), - 'content-type': response.metadata.mimetype, - etag: response.metadata.eTag, - 'last-modified': response.metadata.lastModified?.toISOString() || '', - }, - responseBody: response.body, - } - } -} - -function toXmlList>(name: string, list: T) { - if (list.length === 0) { - return undefined - } - - if (list.length === 1) { - return list.map((e) => ({ [name]: e })) - } - - return list -} diff --git a/src/storage/protocols/s3/router.ts b/src/storage/protocols/s3/router.ts deleted file mode 100644 index 0c33c785..00000000 --- a/src/storage/protocols/s3/router.ts +++ /dev/null @@ -1,191 +0,0 @@ -import { S3ProtocolHandler } from './handler' - -export type HTTPMethod = 'get' | 'put' | 'post' | 'head' | 'delete' | 'patch' - -type ResponseType = { - statusCode?: number - headers?: Record - responseBody?: unknown -} -type Handler = (s3Protocol: S3ProtocolHandler) => Promise - -export interface EndpointSchema< - Params = unknown, - Headers = unknown, - Query = unknown, - Body = unknown -> { - url: string - method: HTTPMethod | string - body?: Body - query?: Query - params?: Params - headers?: Headers - response?: unknown - raw?: ReadableStream -} - -export class Router< - Req extends EndpointSchema< - Record, - Record, - Record, - Record - > -> { - routes() { - const routes: Record Handler | undefined> = { - '/': this.handleRoot.bind(this), - '/:Bucket/': this.handleBuckets.bind(this), - '/:Bucket/*': this.handleObjects.bind(this), - } - - return routes - } - - protected handleRoot(req: Req) { - switch (req.method.toLowerCase()) { - case 'get': - return this.matchQueryString(req.query, { - '*': (s3Protocol) => s3Protocol.listBuckets(), - }) - } - } - - /** - * Handles Buckets actions - * @param req - * @protected - */ - protected handleBuckets(req: Req) { - switch (req.method.toLowerCase()) { - case 'get': - return this.matchQueryString(req.query, { - '*': (s3Protocol) => - s3Protocol.listObjectsV2({ - Bucket: this.getParam(req, 'Bucket'), - Prefix: req.query?.prefix || '', - ContinuationToken: req.query?.['continuation-token'], - StartAfter: req.query?.['start-after'], - EncodingType: req.query?.['encoding-type'], - MaxKeys: req.query?.['max-keys'], - Delimiter: req.query?.delimiter, - }), - }) - case 'put': - return this.matchQueryString(req.query, { - '*': (s3Protocol) => - s3Protocol.createBucket( - this.getParam(req, 'Bucket'), - req.headers?.['x-amz-acl'] === 'public-read' - ), - }) - case 'delete': - return this.matchQueryString(req.query, { - '*': (s3Protocol) => s3Protocol.deleteBucket(this.getParam(req, 'Bucket')), - }) - } - } - - /** - * Handles Objects actions - * @param req - * @protected - */ - protected handleObjects(req: Req) { - switch (req.method.toLowerCase()) { - case 'get': - return this.matchQueryString(req.query, { - '*': (s3Protocol) => - s3Protocol.getObject({ - Bucket: this.getParam(req, 'Bucket'), - Key: this.getParam(req, '*'), - Range: req.headers?.['range'], - IfNoneMatch: req.headers?.['if-none-match'], - IfModifiedSince: req.headers?.['if-modified-since'], - }), - }) - case 'post': - return this.matchQueryString(req.query, { - uploadId: (s3Protocol) => - s3Protocol.completeMultiPartUpload({ - Bucket: this.getParam(req, 'Bucket'), - Key: this.getParam(req, '*'), - UploadId: req.query?.uploadId, - MultipartUpload: req.body?.CompleteMultipartUpload, - }), - uploads: (s3Protocol) => - s3Protocol.createMultiPartUpload({ - Bucket: this.getParam(req, 'Bucket'), - Key: this.getParam(req, '*'), - ContentType: req.headers?.['content-type'], - CacheControl: req.headers?.['cache-control'], - ContentDisposition: req.headers?.['content-disposition'], - ContentEncoding: req.headers?.['content-encoding'], - }), - }) - case 'put': - return this.matchQueryString(req.query, { - '*': (s3Protocol) => - s3Protocol.putObject({ - Body: req as any, - Bucket: this.getParam(req, 'Bucket'), - Key: this.getParam(req, '*'), - }), - uploadId: (s3Protocol) => - s3Protocol.uploadPart({ - Body: req.raw, - UploadId: req.query?.uploadId, - Bucket: this.getParam(req, 'Bucket'), - Key: this.getParam(req, '*'), - PartNumber: req.query?.partNumber, - ContentLength: req.headers?.['content-length'], - }), - }) - case 'head': - return this.matchQueryString(req.query, { - '*': (s3Protocol) => - s3Protocol.headObject({ - Bucket: this.getParam(req, 'Bucket'), - Key: this.getParam(req, '*'), - }), - }) - // case 'delete': - // return this.matchQueryString(req.query, { - // '*': (s3Protocol) => - // s3Protocol.deleteObject(this.getParam(req, 'Bucket'), this.getParam(req, 'Key')), - // }) - } - } - - protected matchQueryString(querystring: unknown, objs: Record) { - if (!querystring) { - if (objs['*']) { - return objs['*'] - } - } - - if (typeof querystring !== 'object') { - throw new Error('invalid querystring format') - } - - const q = querystring as Record - const matchingKeys = Object.keys(q).find((key) => { - return objs[key] - }) - - if (!matchingKeys) { - return objs['*'] - } - - return objs[matchingKeys] - } - - protected getParam(req: EndpointSchema, param: string) { - const value = (req.params as Record)[param] - if (!value) { - throw new Error(`missing param: ${param}`) - } - return value - } -} diff --git a/src/storage/protocols/s3/s3-handler.ts b/src/storage/protocols/s3/s3-handler.ts new file mode 100644 index 00000000..2063992c --- /dev/null +++ b/src/storage/protocols/s3/s3-handler.ts @@ -0,0 +1,852 @@ +import { Storage } from '../../storage' +import { getConfig } from '../../../config' +import { getMaxFileSizeLimit, Uploader } from '../../uploader' +import { + AbortMultipartUploadCommandInput, + CompleteMultipartUploadCommandInput, + CopyObjectCommandInput, + CreateMultipartUploadCommandInput, + DeleteObjectCommandInput, + DeleteObjectsCommandInput, + GetObjectCommandInput, + HeadObjectCommandInput, + ListMultipartUploadsCommandInput, + ListObjectsV2CommandInput, + PutObjectCommandInput, + UploadPartCommandInput, +} from '@aws-sdk/client-s3' +import { PassThrough, Readable } from 'stream' +import stream from 'stream/promises' +import { mustBeValidBucketName, mustBeValidKey } from '../../limits' +import { ERRORS } from '../../errors' +import { S3MultipartUpload, Obj } from '../../schemas' +import { decrypt, encrypt } from '../../../auth' +import { ByteLimitTransformStream } from './byte-limit-stream' +import { randomUUID } from 'crypto' + +const { storageS3Region, storageS3Bucket } = getConfig() + +export class S3ProtocolHandler { + constructor(protected readonly storage: Storage, protected readonly tenantId: string) {} + + /** + * Get the versioning configuration of a bucket + * default: versioning is suspended + */ + async getBucketVersioning() { + return { + responseBody: { + VersioningConfiguration: { + Status: 'Suspended', + MfaDelete: 'Disabled', + }, + }, + } + } + + /** + * Get the location of a bucket + */ + async getBucketLocation() { + return { + responseBody: { + LocationConstraint: { + LocationConstraint: storageS3Region, + }, + }, + } + } + + /** + * List all buckets + */ + async listBuckets() { + const buckets = await this.storage.listBuckets('name,created_at') + + return { + responseBody: { + ListAllMyBucketsResult: { + Buckets: { + Bucket: buckets.map((bucket) => ({ + Name: bucket.name, + CreationDate: bucket.created_at + ? new Date(bucket.created_at || '').toISOString() + : undefined, + })), + }, + }, + }, + } + } + + /** + * Create a new bucket + * @param Bucket + * @param isPublic + */ + async createBucket(Bucket: string, isPublic: boolean) { + mustBeValidBucketName(Bucket || '') + + await this.storage.createBucket({ + name: Bucket, + id: Bucket, + public: isPublic, + }) + + return { + headers: { + Location: `/${Bucket}`, + }, + } + } + + /** + * Delete a bucket + * @param name + */ + async deleteBucket(name: string) { + await this.storage.deleteBucket(name) + + return { + statusCode: 204, + } + } + + /** + * Head bucket + * @param name + */ + async headBucket(name: string) { + await this.storage.findBucket(name) + return { + statusCode: 200, + headers: { + 'x-amz-bucket-region': storageS3Region, + }, + } + } + + /** + * List objects in a bucket, implements the ListObjectsV2Command + * @param command + */ + async listObjectsV2(command: ListObjectsV2CommandInput) { + if (!command.Bucket) { + throw ERRORS.MissingParameter('Bucket') + } + + await this.storage.asSuperUser().findBucket(command.Bucket) + + const continuationToken = command.ContinuationToken + const startAfter = command.StartAfter + const encodingType = command.EncodingType + const delimiter = command.Delimiter + const prefix = command.Prefix || '' + const maxKeys = command.MaxKeys + const bucket = command.Bucket! + + const limit = maxKeys || 200 + + const objects = await this.storage.from(bucket).listObjectsV2({ + prefix, + deltimeter: delimiter, + maxKeys: limit + 1, + nextToken: continuationToken ? decodeContinuationToken(continuationToken) : undefined, + }) + + let results = objects + let prevPrefix = '' + + if (delimiter) { + const delimitedResults: Obj[] = [] + for (const object of objects) { + let idx = object.name.replace(prefix, '').indexOf(delimiter) + + if (idx >= 0) { + idx = prefix.length + idx + delimiter.length + const currPrefix = object.name.substring(0, idx) + if (currPrefix === prevPrefix) { + continue + } + prevPrefix = currPrefix + delimitedResults.push({ + id: null, + name: currPrefix, + bucket_id: bucket, + owner: '', + metadata: null, + created_at: '', + updated_at: '', + version: '', + }) + continue + } + + delimitedResults.push(object) + } + results = delimitedResults + } + + let isTruncated = false + + if (results.length > limit) { + results = results.slice(0, limit) + isTruncated = true + } + + const commonPrefixes = results + .map((object) => { + if (object.id === null) { + return { Prefix: object.name } + } + }) + .filter((e) => e) + + const contents = + results + .filter((o) => o.id) + .map((o) => ({ + Key: o.name, + LastModified: o.updated_at ? new Date(o.updated_at).toISOString() : undefined, + ETag: o.metadata?.eTag, + Size: o.metadata?.size, + StorageClass: 'STANDARD', + })) || [] + + const nextContinuationToken = isTruncated + ? encodeContinuationToken(results[results.length - 1].name) + : undefined + + const response = { + ListBucketResult: { + Name: bucket, + Prefix: prefix, + ContinuationToken: continuationToken, + Contents: contents, + IsTruncated: isTruncated, + MaxKeys: limit, + Delimiter: delimiter, + EncodingType: encodingType, + KeyCount: results.length, + CommonPrefixes: commonPrefixes, + NextContinuationToken: nextContinuationToken, + }, + } + + return { + responseBody: response, + } + } + + async listMultipartUploads(command: ListMultipartUploadsCommandInput) { + if (!command.Bucket) { + throw ERRORS.MissingParameter('Bucket') + } + + await this.storage.asSuperUser().findBucket(command.Bucket) + + const keyContinuationToken = command.KeyMarker + const uploadContinuationToken = command.UploadIdMarker + + const encodingType = command.EncodingType + const delimiter = command.Delimiter + const prefix = command.Prefix || '' + const maxKeys = command.MaxUploads + const bucket = command.Bucket! + + const limit = maxKeys || 200 + + const multipartUploads = await this.storage.db.listMultipartUploads(bucket, { + prefix, + deltimeter: delimiter, + maxKeys: limit + 1, + nextUploadKeyToken: keyContinuationToken + ? decodeContinuationToken(keyContinuationToken) + : undefined, + nextUploadToken: uploadContinuationToken + ? decodeContinuationToken(uploadContinuationToken) + : undefined, + }) + + let results: Partial[] = multipartUploads + let prevPrefix = '' + + if (delimiter) { + const delimitedResults: Partial[] = [] + for (const object of multipartUploads) { + let idx = object.key.replace(prefix, '').indexOf(delimiter) + + if (idx >= 0) { + idx = prefix.length + idx + delimiter.length + const currPrefix = object.key.substring(0, idx) + if (currPrefix === prevPrefix) { + continue + } + prevPrefix = currPrefix + delimitedResults.push({ + id: '', + key: currPrefix, + bucket_id: bucket, + }) + continue + } + + delimitedResults.push(object) + } + results = delimitedResults + } + + let isTruncated = false + + if (results.length > limit) { + results = results.slice(0, limit) + isTruncated = true + } + + const commonPrefixes = results + .map((object) => { + if (object.id === null) { + return { Prefix: object.key } + } + }) + .filter((e) => e) + + const uploads = + results + .filter((o) => o.id) + .map((o) => ({ + Key: o.key, + Initiated: o.created_at ? new Date(o.created_at).toISOString() : undefined, + UploadId: o.id, + StorageClass: 'STANDARD', + })) || [] + + let keyNextContinuationToken: string | undefined + let uploadNextContinuationToken: string | undefined + + if (isTruncated) { + const lastItem = results[results.length - 1] + if (!lastItem.id) { + keyNextContinuationToken = encodeContinuationToken(lastItem.key!) + } else { + uploadNextContinuationToken = encodeContinuationToken(lastItem.id) + } + } + + const response = { + ListMultipartUploadsResult: { + Name: bucket, + Prefix: prefix, + KeyMarker: keyNextContinuationToken, + UploadIdMarker: uploadNextContinuationToken, + Uploads: uploads, + IsTruncated: isTruncated, + MaxUploads: limit, + Delimiter: delimiter, + EncodingType: encodingType, + KeyCount: results.length, + CommonPrefixes: commonPrefixes, + }, + } + + return { + responseBody: response, + } + } + + /** + * Create a multipart upload + * @param command + */ + async createMultiPartUpload(command: CreateMultipartUploadCommandInput) { + const uploader = new Uploader(this.storage.backend, this.storage.db) + const { Bucket, Key } = command + + mustBeValidBucketName(Bucket) + mustBeValidKey(Key) + + await this.storage.asSuperUser().findBucket(Bucket) + + // Create Multi Part Upload + const version = await uploader.prepareUpload({ + bucketId: command.Bucket as string, + objectName: command.Key as string, + isUpsert: true, + }) + + const uploadId = await this.storage.backend.createMultiPartUpload( + storageS3Bucket, + `${this.tenantId}/${command.Bucket}/${command.Key}`, + version, + command.ContentType || '', + command.CacheControl || '' + ) + + if (!uploadId) { + throw ERRORS.InvalidUploadId(uploadId) + } + + const signature = this.uploadSignature({ in_progress_size: BigInt(0) }) + await this.storage.db + .asSuperUser() + .createMultipartUpload(uploadId, Bucket, Key, version, signature) + + return { + responseBody: { + InitiateMultipartUploadResult: { + Bucket: command.Bucket, + Key: `${command.Key}`, + UploadId: uploadId, + }, + }, + } + } + + /** + * Complete a multipart upload + * @param command + */ + async completeMultiPartUpload(command: CompleteMultipartUploadCommandInput) { + const uploader = new Uploader(this.storage.backend, this.storage.db) + const { Bucket, Key, UploadId } = command + + if (!UploadId) { + throw ERRORS.InvalidUploadId() + } + + await uploader.canUpload({ + bucketId: Bucket as string, + objectName: Key as string, + isUpsert: true, + }) + + const multiPartUpload = await this.storage.db + .asSuperUser() + .findMultipartUpload(UploadId, 'id,version') + + const parts = command.MultipartUpload?.Parts || [] + + if (!parts || parts.length === 0) { + const currentParts = await this.storage.db.listAllParts(UploadId) + parts.push( + ...currentParts.map((part) => ({ + PartNumber: part.part_number, + ETag: part.etag, + })) + ) + } + + const resp = await this.storage.backend.completeMultipartUpload( + storageS3Bucket, + `${this.tenantId}/${Bucket}/${Key}`, + UploadId as string, + multiPartUpload.version, + parts + ) + + const metadata = await this.storage.backend.headObject( + storageS3Bucket, + `${this.tenantId}/${Bucket}/${Key}`, + resp.version + ) + + await uploader.completeUpload({ + bucketId: Bucket as string, + objectName: Key as string, + version: resp.version, + isUpsert: true, + isMultipart: false, + objectMetadata: metadata, + }) + + await this.storage.db.asSuperUser().deleteMultipartUpload(UploadId) + + return { + responseBody: { + CompleteMultipartUpload: { + Location: resp.location, + Bucket: resp.bucket, + ChecksumCRC32: resp.ChecksumCRC32, + ChecksumCRC32C: resp.ChecksumCRC32, + ChecksumSHA1: resp.ChecksumSHA1, + ChecksumSHA256: resp.ChecksumSHA256, + ETag: resp.ETag, + }, + }, + } + } + + /** + * Upload a part of a multipart upload + * @param command + */ + async uploadPart(command: UploadPartCommandInput) { + const { Bucket, PartNumber, UploadId, Key, Body, ContentLength } = command + + if (!UploadId) { + throw ERRORS.InvalidUploadId() + } + + if (!Bucket) { + throw ERRORS.MissingParameter('Bucket') + } + + if (typeof ContentLength === 'undefined') { + throw ERRORS.MissingContentLength() + } + + const bucket = await this.storage.asSuperUser().findBucket(Bucket, 'file_size_limit') + + const maxFileSize = await getMaxFileSizeLimit(this.storage.db.tenantId, bucket?.file_size_limit) + const multipart = await this.storage.db.asSuperUser().withTransaction(async (db) => { + const multipart = await db.findMultipartUpload( + UploadId, + 'in_progress_size,version,upload_signature', + { + forUpdate: true, + } + ) + + const { progress } = this.decryptUploadSignature(multipart.upload_signature) + + if (progress !== BigInt(multipart.in_progress_size)) { + throw ERRORS.InvalidUploadSignature() + } + + const currentProgress = BigInt(multipart.in_progress_size) + BigInt(ContentLength) + + if (currentProgress > maxFileSize) { + throw ERRORS.EntityTooLarge() + } + + const signature = this.uploadSignature({ in_progress_size: currentProgress }) + await db.updateMultipartUploadProgress(UploadId, currentProgress, signature) + return multipart + }) + + const proxy = new PassThrough() + + if (Body instanceof Readable) { + proxy.on('error', () => { + Body.unpipe(proxy) + }) + + Body.on('error', () => { + if (!proxy.closed) { + proxy.destroy() + } + }) + } + + const body = Body instanceof Readable ? Body.pipe(proxy) : Readable.from(Body as Buffer) + + try { + const uploadPart = await stream.pipeline( + body, + new ByteLimitTransformStream(BigInt(ContentLength)), + async (stream) => { + return this.storage.backend.uploadPart( + storageS3Bucket, + `${this.tenantId}/${Bucket}/${Key}`, + multipart.version, + UploadId, + PartNumber || 0, + stream as Readable, + ContentLength + ) + } + ) + + await this.storage.db.insertUploadPart({ + upload_id: UploadId, + version: multipart.version, + part_number: PartNumber || 0, + etag: uploadPart.ETag || '', + key: Key as string, + bucket_id: Bucket, + }) + + return { + headers: { + etag: uploadPart.ETag || '', + }, + } + } catch (e) { + await this.storage.db.asSuperUser().withTransaction(async (db) => { + const multipart = await db.findMultipartUpload(UploadId, 'in_progress_size', { + forUpdate: true, + }) + + const diff = BigInt(multipart.in_progress_size) - BigInt(ContentLength) + const signature = this.uploadSignature({ in_progress_size: diff }) + await db.updateMultipartUploadProgress(UploadId, diff, signature) + }) + + throw e + } + } + + /** + * Put an object in a bucket + * @param command + */ + async putObject(command: PutObjectCommandInput) { + const uploader = new Uploader(this.storage.backend, this.storage.db) + + mustBeValidBucketName(command.Bucket) + mustBeValidKey(command.Key) + + const upload = await uploader.upload(command.Body as any, { + bucketId: command.Bucket as string, + objectName: command.Key as string, + isUpsert: true, + isMultipart: false, + }) + + return { + headers: { + etag: upload.metadata.eTag, + }, + } + } + + /** + * Abort a multipart upload + * @param command + */ + async abortMultipartUpload(command: AbortMultipartUploadCommandInput) { + const { Bucket, Key, UploadId } = command + + if (!UploadId) { + throw ERRORS.InvalidUploadId() + } + + await this.storage.backend.abortMultipartUpload( + storageS3Bucket, + `${this.tenantId}/${Bucket}/${Key}`, + UploadId + ) + + await this.storage.db.asSuperUser().deleteMultipartUpload(UploadId) + + return {} + } + + /** + * Head Object + * @param command + */ + async headObject(command: HeadObjectCommandInput) { + const { Bucket, Key } = command + + if (!Bucket) { + throw ERRORS.MissingParameter('Bucket') + } + + if (!Key) { + throw ERRORS.MissingParameter('Bucket') + } + + const object = await this.storage.from(Bucket).findObject(Key, '*') + + if (!object) { + throw ERRORS.NoSuchKey(Key) + } + + return { + headers: { + 'created-at': (object.created_at as string) || '', + 'cache-control': (object.metadata?.cacheControl as string) || '', + expires: (object.metadata?.expires as string) || '', + 'content-length': (object.metadata?.size as string) || '', + 'content-type': (object.metadata?.contentType as string) || '', + etag: (object.metadata?.eTag as string) || '', + 'last-modified': object.updated_at ? new Date(object.updated_at).toISOString() || '' : '', + }, + } + } + + /** + * Get Object + * @param command + */ + async getObject(command: GetObjectCommandInput) { + const bucket = command.Bucket as string + const key = command.Key as string + + const object = await this.storage.from(bucket).findObject(key, '*') + const response = await this.storage.backend.getObject( + storageS3Bucket, + `${this.tenantId}/${bucket}/${key}`, + object.version, + { + ifModifiedSince: command.IfModifiedSince?.toISOString(), + ifNoneMatch: command.IfNoneMatch, + range: command.Range, + } + ) + return { + headers: { + 'cache-control': response.metadata.cacheControl, + 'content-length': response.metadata.contentLength.toString(), + 'content-type': response.metadata.mimetype, + etag: response.metadata.eTag, + 'last-modified': response.metadata.lastModified?.toUTCString() || '', + }, + responseBody: response.body, + statusCode: command.Range ? 206 : 200, + } + } + + /** + * Delete Object + * @param command + */ + async deleteObject(command: DeleteObjectCommandInput) { + const { Bucket, Key } = command + + if (!Bucket) { + throw ERRORS.MissingParameter('Bucket') + } + + if (!Key) { + throw ERRORS.MissingParameter('Key') + } + + await this.storage.from(Bucket).deleteObject(Key) + + return {} + } + + /** + * Delete Multiple Objects + * @param command + */ + async deleteObjects(command: DeleteObjectsCommandInput) { + const { Bucket, Delete } = command + + if (!Bucket) { + throw ERRORS.MissingParameter('Bucket') + } + + if (!Delete) { + throw ERRORS.MissingParameter('Delete') + } + + if (!Delete.Objects) { + throw ERRORS.MissingParameter('Objects') + } + + if (Delete.Objects.length === 0) { + return {} + } + + const deletedResult = await this.storage + .from(Bucket) + .deleteObjects(Delete.Objects.map((o) => o.Key || '')) + + return { + responseBody: { + DeletedResult: { + Deleted: Delete.Objects.map((o) => { + const isDeleted = deletedResult.find((d) => d.name === o.Key) + if (isDeleted) { + return { + Deleted: { + Key: o.Key, + }, + } + } + + return { + Error: { + Key: o.Key, + Code: 'AccessDenied', + Message: + "You do not have permission to delete this object or the object doesn't exists", + }, + } + }), + }, + }, + } + } + + async copyObject(command: CopyObjectCommandInput) { + const { Bucket, Key, CopySource } = command + + if (!Bucket) { + throw ERRORS.MissingParameter('Bucket') + } + + if (!Key) { + throw ERRORS.MissingParameter('Key') + } + + if (!CopySource) { + throw ERRORS.MissingParameter('CopySource') + } + + const sourceBucket = CopySource.split('/').shift() + const sourceKey = CopySource.split('/').slice(1).join('/') + + if (!sourceBucket) { + throw ERRORS.MissingParameter('CopySource') + } + + if (!sourceKey) { + throw ERRORS.MissingParameter('CopySource') + } + + const object = await this.storage.from(sourceBucket).findObject(sourceKey, '*') + + if (!object) { + throw ERRORS.NoSuchKey(sourceKey) + } + + const copyResult = await this.storage + .from(sourceBucket) + .copyObject(sourceKey, Bucket, Key, object.owner, { + ifMatch: command.CopySourceIfMatch, + ifNoneMatch: command.CopySourceIfNoneMatch, + ifModifiedSince: command.CopySourceIfModifiedSince, + ifUnmodifiedSince: command.CopySourceIfUnmodifiedSince, + }) + + return { + responseBody: { + CopyObjectResult: { + ETag: copyResult.eTag, + LastModified: copyResult.lastModified?.toISOString(), + }, + }, + } + } + + protected uploadSignature({ in_progress_size }: { in_progress_size: BigInt }) { + return `${encrypt('progress:' + in_progress_size.toString())}` + } + + protected decryptUploadSignature(signature: string) { + const originalSignature = decrypt(signature) + const [_, value] = originalSignature.split(':') + + return { + progress: BigInt(value), + } + } +} + +function encodeContinuationToken(name: string) { + return Buffer.from(`l:${name}`).toString('base64') +} + +function decodeContinuationToken(token: string) { + const decoded = Buffer.from(token, 'base64').toString().split(':') + + if (decoded.length === 0) { + throw new Error('Invalid continuation token') + } + + return decoded[1] +} diff --git a/src/storage/protocols/s3/signature-v4.ts b/src/storage/protocols/s3/signature-v4.ts index 25bc4f63..cc8124be 100644 --- a/src/storage/protocols/s3/signature-v4.ts +++ b/src/storage/protocols/s3/signature-v4.ts @@ -1,4 +1,5 @@ import crypto from 'crypto' +import { ERRORS } from '../../errors' interface SignatureV4Options { region: string @@ -28,7 +29,7 @@ export class SignatureV4 { sign(request: SignatureRequest) { const authorizationHeader = this.getHeader(request, 'authorization') if (!authorizationHeader) { - throw new Error('Missing Authorization header') + throw ERRORS.AccessDenied('Missing authorization header') } const { credentials, signedHeaders, signature } = @@ -37,22 +38,21 @@ export class SignatureV4 { // Extract additional information from the credentials const [accessKey, shortDate, region, service] = credentials.split('/') if (accessKey !== this.options.tenantId) { - throw new Error('no correct tenant') + throw ERRORS.AccessDenied('Invalid Access Key') } // Ensure the region and service match the expected values if (region !== this.options.region || service !== this.options.service) { - throw new Error('Region or service mismatch') + throw ERRORS.AccessDenied('Invalid Region') } const longDate = request.headers['x-amz-date'] as string if (!longDate) { - throw new Error('no date provided') + throw ERRORS.AccessDenied('No date header provided') } // Construct the Canonical Request and String to Sign const canonicalRequest = this.constructCanonicalRequest(request, signedHeaders) - const stringToSign = this.constructStringToSign( longDate, shortDate, @@ -183,9 +183,9 @@ export class SignatureV4 { } } - return `${header.toLowerCase()}:${( + return `${header.toLowerCase()}:${ (request.headers[header.toLowerCase()] || '') as string - ).trim()}` + }` }) .join('\n') + '\n' diff --git a/src/storage/protocols/tus/postgres-locker.ts b/src/storage/protocols/tus/postgres-locker.ts index ce66ab7a..61e516c0 100644 --- a/src/storage/protocols/tus/postgres-locker.ts +++ b/src/storage/protocols/tus/postgres-locker.ts @@ -4,6 +4,7 @@ import EventEmitter from 'events' import { Database, DBError } from '../../database' import { PubSubAdapter } from '../../../pubsub' import { UploadId } from './upload-id' +import { ERRORS } from '../../errors' const REQUEST_LOCK_RELEASE_MESSAGE = 'REQUEST_LOCK_RELEASE' @@ -60,7 +61,7 @@ export class PgLock implements Lock { abortController.abort() if (!acquired) { - throw new DBError('acquiring lock timeout', 503, 'acquiring_lock_timeout') + throw ERRORS.LockTimeout() } await new Promise((innerResolve) => { diff --git a/src/storage/protocols/tus/upload-id.ts b/src/storage/protocols/tus/upload-id.ts index 14a49676..645d8ea7 100644 --- a/src/storage/protocols/tus/upload-id.ts +++ b/src/storage/protocols/tus/upload-id.ts @@ -1,5 +1,5 @@ import { getConfig } from '../../../config' -import { StorageBackendError } from '../../errors' +import { ERRORS } from '../../errors' import { mustBeValidBucketName, mustBeValidKey } from '../../limits' import { FILE_VERSION_SEPARATOR, PATH_SEPARATOR, SEPARATOR } from '../../backend' @@ -24,15 +24,15 @@ export class UploadId { this.objectName = options.objectName this.version = options.version - mustBeValidBucketName(options.bucket, 'invalid bucket name') - mustBeValidKey(options.objectName, 'invalid object name') + mustBeValidBucketName(options.bucket) + mustBeValidKey(options.objectName) if (!options.tenant) { - throw new StorageBackendError('tenant_not_found', 422, 'tenant not provided') + throw ERRORS.InvalidTenantId() } if (!options.version) { - throw new StorageBackendError('version_not_found', 422, 'version not provided') + throw ERRORS.InvalidUploadId('Version not provided') } } @@ -52,14 +52,14 @@ function fromPathSeparator(id: string) { const idParts = id.split(PATH_SEPARATOR) if (idParts.length < 3) { - throw new StorageBackendError('id_missmatch', 422, 'id format invalid') + throw ERRORS.InvalidUploadId() } const [tenant, bucket, ...objParts] = idParts const version = objParts.pop() if (!version) { - throw new StorageBackendError('version_not_found', 422, 'version not provided') + throw ERRORS.InvalidUploadId('Version not provided') } return { @@ -74,7 +74,7 @@ function fromFileSeparator(id: string) { const idParts = id.split(PATH_SEPARATOR) if (idParts.length < 3) { - throw new StorageBackendError('id_missmatch', 422, 'id format invalid') + throw ERRORS.InvalidUploadId() } const [tenant, bucket, ...objParts] = idParts @@ -84,14 +84,14 @@ function fromFileSeparator(id: string) { const objectNameParts = objectWithVersion?.split(separator) || [] if (objectNameParts.length < 2) { - throw new StorageBackendError('object_name_invalid', 422, 'object name invalid') + throw ERRORS.InvalidUploadId('Object name is invalid') } const version = objectNameParts[1] const objectName = objectNameParts[0] if (!version) { - throw new StorageBackendError('version_not_found', 422, 'version not provided') + throw ERRORS.InvalidUploadId('Version not provided') } objParts.push(objectName) diff --git a/src/storage/renderer/image.ts b/src/storage/renderer/image.ts index d09bdcdf..618a1718 100644 --- a/src/storage/renderer/image.ts +++ b/src/storage/renderer/image.ts @@ -4,7 +4,7 @@ import { getConfig } from '../../config' import { FastifyRequest } from 'fastify' import { Renderer, RenderOptions } from './renderer' import axiosRetry from 'axios-retry' -import { StorageBackendError } from '../errors' +import { ERRORS } from '../errors' import { Stream } from 'stream' import Agent from 'agentkeepalive' @@ -241,7 +241,7 @@ export class ImageRenderer extends Renderer { protected async handleRequestError(error: AxiosError) { const stream = error.response?.data as Stream if (!stream) { - throw new StorageBackendError('Internal Server Error', 500, 'Internal Server Error', error) + throw ERRORS.InternalError(error) } const errorResponse = await new Promise((resolve) => { @@ -257,7 +257,7 @@ export class ImageRenderer extends Renderer { }) const statusCode = error.response?.status || 500 - throw new StorageBackendError('ImageProcessingError', statusCode, errorResponse, error) + throw ERRORS.ImageProcessingError(statusCode, errorResponse, error) } } diff --git a/src/storage/schemas/index.ts b/src/storage/schemas/index.ts index 90c69575..a9c814cf 100644 --- a/src/storage/schemas/index.ts +++ b/src/storage/schemas/index.ts @@ -1,2 +1,3 @@ export * from './object' export * from './bucket' +export * from './multipart' diff --git a/src/storage/schemas/multipart.ts b/src/storage/schemas/multipart.ts new file mode 100644 index 00000000..bbd25c00 --- /dev/null +++ b/src/storage/schemas/multipart.ts @@ -0,0 +1,46 @@ +import { FromSchema } from 'json-schema-to-ts' + +export const multipartUploadSchema = { + $id: 'multipartUploadSchema', + type: 'object', + properties: { + id: { type: 'string' }, + bucket_id: { type: 'string' }, + key: { type: 'string' }, + in_progress_size: { type: 'number' }, + upload_signature: { type: 'string' }, + version: { type: 'string' }, + created_at: { type: 'string' }, + }, + required: [ + 'id', + 'bucket_id', + 'key', + 'version', + 'created_at', + 'in_progress_size', + 'upload_signature', + ], + additionalProperties: false, +} as const + +export type S3MultipartUpload = FromSchema + +export const uploadPartSchema = { + $id: 'uploadPartSchema', + type: 'object', + properties: { + id: { type: 'string' }, + upload_id: { type: 'string' }, + bucket_id: { type: 'string' }, + key: { type: 'string' }, + part_number: { type: 'number' }, + version: { type: 'string' }, + created_at: { type: 'string' }, + etag: { type: 'string' }, + }, + required: ['upload_id', 'bucket_id', 'key', 'version', 'part_number'], + additionalProperties: false, +} as const + +export type S3PartUpload = FromSchema diff --git a/src/storage/storage.ts b/src/storage/storage.ts index 6e84c663..d07178ce 100644 --- a/src/storage/storage.ts +++ b/src/storage/storage.ts @@ -1,6 +1,6 @@ import { StorageBackendAdapter, withOptionalVersion } from './backend' import { Database, FindBucketFilters } from './database' -import { StorageBackendError } from './errors' +import { ERRORS } from './errors' import { AssetRenderer, HeadRenderer, ImageRenderer } from './renderer' import { getFileSizeLimit, mustBeValidBucketName, parseFileSizeToBytes } from './limits' import { getConfig } from '../config' @@ -21,7 +21,7 @@ export class Storage { * @param bucketId */ from(bucketId: string) { - mustBeValidBucketName(bucketId, 'The bucketId name contains invalid characters') + mustBeValidBucketName(bucketId) return new ObjectStorage(this.backend, this.db, bucketId) } @@ -82,7 +82,7 @@ export class Storage { allowedMimeTypes?: null | string[] } ) { - mustBeValidBucketName(data.name, 'Bucket name invalid') + mustBeValidBucketName(data.name) const bucketData: Parameters[0] = data @@ -117,7 +117,7 @@ export class Storage { allowedMimeTypes?: null | string[] } ) { - mustBeValidBucketName(id, 'Bucket name invalid') + mustBeValidBucketName(id) const bucketData: Parameters[1] = data @@ -158,17 +158,13 @@ export class Storage { const countObjects = await db.asSuperUser().countObjectsInBucket(id) if (countObjects && countObjects > 0) { - throw new StorageBackendError( - 'Storage not empty', - 400, - 'Storage must be empty before you can delete it' - ) + throw ERRORS.BucketNotEmpty(id) } const deleted = await db.deleteBucket(id) if (!deleted) { - throw new StorageBackendError('not_found', 404, 'Bucket Not Found') + throw ERRORS.NoSuchBucket(id) } return deleted @@ -216,9 +212,7 @@ export class Storage { .filter(({ name }) => !deletedNames.has(name)) .map(({ name }) => name) - throw new StorageBackendError( - 'Cannot delete', - 400, + throw ERRORS.AccessDenied( `Cannot delete: ${remainingNames.join( ' ,' )}, you may have SELECT but not DELETE permissions` @@ -230,21 +224,13 @@ export class Storage { validateMimeType(mimeType: string[]) { for (const type of mimeType) { if (type.length > 1000) { - throw new StorageBackendError( - 'invalid_mime_type', - 422, - `the requested mime type "${type}" is invalid` - ) + throw ERRORS.InvalidMimeType(type) } if ( !type.match(/^([a-zA-Z0-9\-+.]+)\/([a-zA-Z0-9\-+.]+)(;\s*charset=[a-zA-Z0-9\-]+)?$|\*$/) ) { - throw new StorageBackendError( - 'invalid_mime_type', - 422, - `the requested mime type "${type} is invalid` - ) + throw ERRORS.InvalidMimeType(type) } } return true @@ -262,11 +248,7 @@ export class Storage { const globalMaxLimit = await getFileSizeLimit(this.db.tenantId) if (maxFileLimit > globalMaxLimit) { - throw new StorageBackendError( - 'max_file_size', - 422, - 'the requested max_file_size exceed the global limit' - ) + throw ERRORS.EntityTooLarge() } return maxFileLimit diff --git a/src/storage/uploader.ts b/src/storage/uploader.ts index 773b4da1..0dbd393c 100644 --- a/src/storage/uploader.ts +++ b/src/storage/uploader.ts @@ -2,7 +2,7 @@ import { FastifyRequest } from 'fastify' import { getFileSizeLimit } from './limits' import { ObjectMetadata, StorageBackendAdapter } from './backend' import { getConfig } from '../config' -import { StorageBackendError } from './errors' +import { ERRORS } from './errors' import { Database } from './database' import { ObjectAdminDelete, ObjectCreatedPostEvent, ObjectCreatedPutEvent } from '../queue' import { randomUUID } from 'crypto' @@ -99,11 +99,7 @@ export class Uploader { ) if (file.isTruncated()) { - throw new StorageBackendError( - 'Payload too large', - 413, - 'The object exceeded the maximum allowed size' - ) + throw ERRORS.EntityTooLarge() } return this.completeUpload({ @@ -210,11 +206,7 @@ export class Uploader { const requestedMime = mimeType.split('/') if (requestedMime.length < 2) { - throw new StorageBackendError( - 'invalid_mime_type', - 422, - `mime type ${mimeType} is not formatted properly` - ) + throw ERRORS.InvalidMimeType(mimeType) } const [type, ext] = requestedMime @@ -237,11 +229,7 @@ export class Uploader { } } - throw new StorageBackendError( - 'invalid_mime_type', - 422, - `mime type ${mimeType} is not supported` - ) + throw ERRORS.InvalidMimeType(mimeType) } protected async incomingFileInfo( @@ -249,7 +237,7 @@ export class Uploader { options?: Pick ) { const contentType = request.headers['content-type'] - const fileSizeLimit = await this.getFileSizeLimit(request.tenantId, options?.fileSizeLimit) + const fileSizeLimit = await getMaxFileSizeLimit(this.db.tenantId, options?.fileSizeLimit) let body: NodeJS.ReadableStream let mimeType: string @@ -261,7 +249,7 @@ export class Uploader { const formData = await request.file({ limits: { fileSize: fileSizeLimit } }) if (!formData) { - throw new StorageBackendError(`no_file_provided`, 400, 'No file provided') + throw ERRORS.NoContentProvided() } // https://github.com/fastify/fastify-multipart/issues/162 @@ -274,7 +262,7 @@ export class Uploader { cacheControl = cacheTime ? `max-age=${cacheTime}` : 'no-cache' isTruncated = () => formData.file.truncated } catch (e) { - throw new StorageBackendError('empty_file', 400, 'Unexpected empty file received', e) + throw ERRORS.NoContentProvided(e as Error) } } else { // just assume it's a binary file @@ -309,3 +297,17 @@ export class Uploader { return globalFileSizeLimit } } + +export async function getMaxFileSizeLimit(tenantId: string, bucketSizeLimit?: number | null) { + let globalFileSizeLimit = await getFileSizeLimit(tenantId) + + if (typeof bucketSizeLimit === 'number') { + globalFileSizeLimit = Math.min(bucketSizeLimit, globalFileSizeLimit) + } + + if (uploadFileSizeLimitStandard && uploadFileSizeLimitStandard > 0) { + globalFileSizeLimit = Math.min(uploadFileSizeLimitStandard, globalFileSizeLimit) + } + + return globalFileSizeLimit +} diff --git a/src/test/common.ts b/src/test/common.ts index f2938ac0..87749d1a 100644 --- a/src/test/common.ts +++ b/src/test/common.ts @@ -57,6 +57,8 @@ export function useMockObject() { jest.spyOn(S3Backend.prototype, 'copyObject').mockResolvedValue({ httpStatusCode: 200, + lastModified: new Date('Thu, 12 Aug 2021 16:00:00 GMT'), + eTag: 'abc', }) jest.spyOn(S3Backend.prototype, 'deleteObject').mockResolvedValue() diff --git a/src/test/object.test.ts b/src/test/object.test.ts index fcbe509c..e72f7711 100644 --- a/src/test/object.test.ts +++ b/src/test/object.test.ts @@ -7,7 +7,7 @@ import { getConfig, mergeConfig } from '../config' import { S3Backend } from '../storage/backend' import { Obj } from '../storage/schemas' import { signJWT } from '../auth' -import { StorageBackendError } from '../storage' +import { ErrorCode, StorageBackendError } from '../storage' import { useMockObject, useMockQueue } from './common' import { getPostgresConnection } from '../database' import { getServiceKeyUser } from '../database/tenant' @@ -376,7 +376,7 @@ describe('testing POST object via multipart upload', () => { expect(await response.json()).toEqual({ error: 'invalid_mime_type', message: `mime type image/png is not supported`, - statusCode: '422', + statusCode: '415', }) expect(S3Backend.prototype.uploadObject).not.toHaveBeenCalled() }) @@ -399,8 +399,8 @@ describe('testing POST object via multipart upload', () => { expect(response.statusCode).toBe(400) expect(await response.json()).toEqual({ error: 'invalid_mime_type', - message: `mime type thisisnotarealmimetype is not formatted properly`, - statusCode: '422', + message: `mime type thisisnotarealmimetype is not supported`, + statusCode: '415', }) expect(S3Backend.prototype.uploadObject).not.toHaveBeenCalled() }) @@ -498,6 +498,7 @@ describe('testing POST object via multipart upload', () => { }) expect(createObjectResponse.statusCode).toBe(500) expect(JSON.parse(createObjectResponse.body)).toStrictEqual({ + code: ErrorCode.S3Error, statusCode: '500', error: 'Unknown error', message: 'S3ServiceException', @@ -742,6 +743,7 @@ describe('testing POST object via binary upload', () => { expect(createObjectResponse.statusCode).toBe(500) expect(JSON.parse(createObjectResponse.body)).toStrictEqual({ statusCode: '500', + code: ErrorCode.S3Error, error: 'Unknown error', message: 'S3ServiceException', }) @@ -1069,7 +1071,7 @@ describe('testing delete object', () => { }, }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.deleteObjects).toBeCalled() + expect(S3Backend.prototype.deleteObject).toBeCalled() }) test('check if RLS policies are respected: anon user is not able to delete authenticated resource', async () => { diff --git a/src/test/rls_tests.yaml b/src/test/rls_tests.yaml index d3fa72dc..150c43a7 100644 --- a/src/test/rls_tests.yaml +++ b/src/test/rls_tests.yaml @@ -66,7 +66,7 @@ tests: - operation: bucket.delete status: 400 - error: 'Bucket Not Found' + error: 'Bucket not found' - operation: bucket.update status: 400 @@ -125,7 +125,7 @@ tests: - operation: bucket.delete status: 400 - error: 'Bucket Not Found' + error: 'Bucket not found' - operation: bucket.update status: 400 @@ -161,7 +161,7 @@ tests: - operation: bucket.delete status: 400 - error: 'Bucket Not Found' + error: 'Bucket not found' - operation: bucket.update status: 400 @@ -191,7 +191,7 @@ tests: - operation: bucket.delete status: 400 - error: 'Bucket Not Found' + error: 'Bucket not found' - operation: bucket.update status: 400 @@ -244,7 +244,7 @@ tests: - operation: bucket.delete status: 400 - error: 'Bucket Not Found' + error: 'Bucket not found' - operation: upload status: 400 @@ -265,7 +265,7 @@ tests: - operation: object.delete status: 400 - error: 'Object Not Found' + error: 'Object not found' - operation: bucket.update status: 200 @@ -285,7 +285,7 @@ tests: - operation: bucket.delete status: 400 - error: 'Bucket Not Found' + error: 'Bucket not found' - operation: upload bucketName: 'bucket_{{runId}}' @@ -302,7 +302,7 @@ tests: - operation: object.delete status: 400 - error: 'Object Not Found' + error: 'Object not found' - operation: bucket.update status: 400 @@ -343,7 +343,7 @@ tests: - operation: object.delete status: 400 - error: 'Object Not Found' + error: 'Object not found' - operation: object.delete role: service @@ -393,7 +393,7 @@ tests: - operation: bucket.delete status: 400 - error: 'Bucket Not Found' + error: 'Bucket not found' - description: 'Will only able to move objects when authenticated' policies: @@ -433,7 +433,7 @@ tests: - operation: bucket.delete status: 400 - error: 'Bucket Not Found' + error: 'Bucket not found' - description: 'Will only able to copy owned objects when authenticated' policies: @@ -474,4 +474,4 @@ tests: - operation: bucket.delete status: 400 - error: 'Bucket Not Found' + error: 'Bucket not found' diff --git a/src/test/s3-protocol.test.ts b/src/test/s3-protocol.test.ts index 4922372c..bb905d72 100644 --- a/src/test/s3-protocol.test.ts +++ b/src/test/s3-protocol.test.ts @@ -1,114 +1,579 @@ import { + CopyObjectCommand, CreateBucketCommand, - CreateBucketRequest, CreateMultipartUploadCommand, + DeleteObjectCommand, + DeleteObjectsCommand, + GetBucketLocationCommand, + GetBucketVersioningCommand, + GetObjectCommand, ListBucketsCommand, ListObjectsV2Command, + PutObjectCommand, S3Client, + S3ServiceException, + UploadPartCommand, } from '@aws-sdk/client-s3' -import { getConfig } from '../config' +import { getConfig, mergeConfig } from '../config' import app from '../app' import { FastifyInstance } from 'fastify' import { Upload } from '@aws-sdk/lib-storage' +import { Readable } from 'stream' +import { ReadableStreamBuffer } from 'stream-buffers' -const { tenantId, serviceKey } = getConfig() +const { tenantId, serviceKey, storageS3Region } = getConfig() + +async function createBucket(client: S3Client, name?: string, publicRead = true) { + if (!name) { + name = `TestBucket-${Date.now()}` + } + + name = `${name}-${Date.now()}` + + const createBucketRequest = new CreateBucketCommand({ + Bucket: name, + ACL: publicRead ? 'public-read' : undefined, + }) + + await client.send(createBucketRequest) + + return name +} + +async function uploadFile(client: S3Client, bucketName: string, key: string, mb: number) { + const uploader = new Upload({ + client: client, + params: { + Bucket: bucketName, + Key: key, + ContentType: 'image/jpg', + Body: Buffer.alloc(1024 * 1024 * mb), + }, + }) + + return await uploader.done() +} describe('S3 Protocol', () => { describe('Bucket', () => { let testApp: FastifyInstance let client: S3Client + let clientMinio: S3Client beforeAll(async () => { - testApp = app({ - ignoreTrailingSlash: true, - }) + testApp = app() const listener = await testApp.listen() - console.log('listen', `${listener.replace('[::1]', 'localhost')}/s3`) client = new S3Client({ endpoint: `${listener.replace('[::1]', 'localhost')}/s3`, + // endpoint: 'http://localhost:5000/s3', forcePathStyle: true, region: 'us-east-1', + // logger: console, credentials: { accessKeyId: tenantId, secretAccessKey: serviceKey, }, }) + + clientMinio = new S3Client({ + forcePathStyle: true, + region: 'us-east-1', + logger: console, + endpoint: 'http://localhost:9000', + credentials: { + accessKeyId: 'supa-storage', + secretAccessKey: 'secret1234', + }, + }) }) afterAll(async () => { await testApp.close() }) - it('creates a bucket', async () => { - const createBucketRequest = new CreateBucketCommand({ - Bucket: `SomeBucket-${Date.now()}`, - ACL: 'public-read', + describe('CreateBucketCommand', () => { + it('creates a bucket', async () => { + const createBucketRequest = new CreateBucketCommand({ + Bucket: `SomeBucket-${Date.now()}`, + ACL: 'public-read', + }) + + const { Location } = await client.send(createBucketRequest) + + expect(Location).toBeTruthy() }) - const { Location, $metadata, ...rest } = await client.send(createBucketRequest) - console.log(Location) + it('can get bucket versioning', async () => { + const bucket = await createBucket(client) + const bucketVersioningCommand = new GetBucketVersioningCommand({ + Bucket: bucket, + }) + + const resp = await client.send(bucketVersioningCommand) + expect(resp.Status).toEqual('Suspended') + expect(resp.MFADelete).toEqual('Disabled') + }) - expect(Location).toBeTruthy() + it('can get bucket location', async () => { + const bucket = await createBucket(client) + const bucketVersioningCommand = new GetBucketLocationCommand({ + Bucket: bucket, + }) + + const resp = await client.send(bucketVersioningCommand) + expect(resp.LocationConstraint).toEqual(storageS3Region) + }) }) - it('can list buckets', async () => { - const listBuckets = new ListBucketsCommand({ - Bucket: `SomeBucket-${Date.now()}`, + describe('DeleteBucketCommand', () => { + it('can delete an empty bucket', () => { + // TODO: }) - const resp = await client.send(listBuckets) - console.log(resp) + it('cannot delete an empty bucket', () => { + // TODO: + }) }) - it('can list content', async () => { - const listBuckets = new ListObjectsV2Command({ - Bucket: `super`, + describe('HeadBucketCommand', () => { + it('return bucket information when exists', async () => { + // TODO: + }) + it('will return bucket not found error', async () => { + // TODO: }) - - const resp = await client.send(listBuckets) - console.log(resp) }) - it('creates a multi part upload', async () => { - const bucketName = 'SomeBucket-1708340404949' - const createMultiPartUpload = new CreateMultipartUploadCommand({ - Bucket: bucketName, - Key: 'test-1.jpg', - ContentType: 'image/jpg', - CacheControl: 'max-age=2000', + describe('ListBucketsCommand', () => { + it('can list buckets', async () => { + await createBucket(client) + const listBuckets = new ListBucketsCommand({}) + + const resp = await client.send(listBuckets) + expect(resp.Buckets?.length || 0).toBeGreaterThan(0) }) - const resp = await client.send(createMultiPartUpload) - expect(resp.UploadId).toBeTruthy() }) - it('upload a part', async () => { - const bucketName = 'SomeBucket-1708340404949' - const createMultiPartUpload = new CreateMultipartUploadCommand({ - Bucket: bucketName, - Key: 'test-1.jpg', - ContentType: 'image/jpg', - CacheControl: 'max-age=2000', + describe('ListObjectsV2Command', () => { + it('list empty bucket', async () => { + const bucket = await createBucket(client) + const listBuckets = new ListObjectsV2Command({ + Bucket: bucket, + }) + + const resp = await client.send(listBuckets) + expect(resp.Contents?.length).toBe(undefined) + }) + + it('list all keys', async () => { + const bucket = await createBucket(client) + const listBuckets = new ListObjectsV2Command({ + Bucket: bucket, + }) + + await Promise.all([ + uploadFile(client, bucket, 'test-1.jpg', 1), + uploadFile(client, bucket, 'prefix-1/test-1.jpg', 1), + uploadFile(client, bucket, 'prefix-3/test-1.jpg', 1), + ]) + + const resp = await client.send(listBuckets) + expect(resp.Contents?.length).toBe(3) + }) + + it('list keys and common prefixes', async () => { + const bucket = await createBucket(client) + const listBuckets = new ListObjectsV2Command({ + Bucket: bucket, + Delimiter: '/', + }) + + await Promise.all([ + uploadFile(client, bucket, 'test-1.jpg', 1), + uploadFile(client, bucket, 'prefix-1/test-1.jpg', 1), + uploadFile(client, bucket, 'prefix-3/test-1.jpg', 1), + ]) + + const resp = await client.send(listBuckets) + expect(resp.Contents?.length).toBe(1) + expect(resp.CommonPrefixes?.length).toBe(2) + }) + + it('paginate keys and common prefixes', async () => { + const bucket = await createBucket(client) + const listBucketsPage1 = new ListObjectsV2Command({ + Bucket: bucket, + Delimiter: '/', + MaxKeys: 1, + }) + + await Promise.all([ + uploadFile(client, bucket, 'test-1.jpg', 1), + uploadFile(client, bucket, 'prefix-1/test-1.jpg', 1), + uploadFile(client, bucket, 'prefix-3/test-1.jpg', 1), + ]) + + const objectsPage1 = await client.send(listBucketsPage1) + expect(objectsPage1.Contents?.length).toBe(undefined) + expect(objectsPage1.CommonPrefixes?.length).toBe(1) + expect(objectsPage1.CommonPrefixes?.[0].Prefix).toBe('prefix-1/') + + const listBucketsPage2 = new ListObjectsV2Command({ + Bucket: bucket, + Delimiter: '/', + MaxKeys: 1, + ContinuationToken: objectsPage1.NextContinuationToken, + }) + + const objectsPage2 = await client.send(listBucketsPage2) + + expect(objectsPage2.Contents?.length).toBe(undefined) + expect(objectsPage2.CommonPrefixes?.length).toBe(1) + expect(objectsPage2.CommonPrefixes?.[0].Prefix).toBe('prefix-3/') + + const listBucketsPage3 = new ListObjectsV2Command({ + Bucket: bucket, + Delimiter: '/', + MaxKeys: 1, + ContinuationToken: objectsPage2.NextContinuationToken, + }) + + const objectsPage3 = await client.send(listBucketsPage3) + + expect(objectsPage3.Contents?.length).toBe(1) + expect(objectsPage3.CommonPrefixes?.length).toBe(undefined) + expect(objectsPage3.Contents?.[0].Key).toBe('test-1.jpg') }) - const resp = await client.send(createMultiPartUpload) - expect(resp.UploadId).toBeTruthy() }) - it('upload a file', async () => { - const bucketName = 'SomeBucket-1708340404949' + describe('MultiPartUpload', () => { + it('creates a multi part upload', async () => { + const bucketName = await createBucket(client) + const createMultiPartUpload = new CreateMultipartUploadCommand({ + Bucket: bucketName, + Key: 'test-1.jpg', + ContentType: 'image/jpg', + CacheControl: 'max-age=2000', + }) + const resp = await client.send(createMultiPartUpload) + expect(resp.UploadId).toBeTruthy() + }) - const uploader = new Upload({ - client: client, - params: { + it('upload a part', async () => { + const bucketName = await createBucket(client) + const createMultiPartUpload = new CreateMultipartUploadCommand({ Bucket: bucketName, Key: 'test-1.jpg', ContentType: 'image/jpg', + CacheControl: 'max-age=2000', + }) + const resp = await client.send(createMultiPartUpload) + expect(resp.UploadId).toBeTruthy() + + const data = Buffer.alloc(1024 * 1024 * 5) + + const uploadPart = new UploadPartCommand({ + Bucket: bucketName, + Key: 'test-1.jpg', + ContentLength: data.length, + UploadId: resp.UploadId, + Body: data, + PartNumber: 1, + }) + + const partResp = await client.send(uploadPart) + expect(partResp.ETag).toBeTruthy() + }) + + it('completes a multipart upload', () => { + // TODO: + }) + it('aborts a multipart upload', () => { + // TODO: + }) + + it('upload a file using putObject', async () => { + const bucketName = await createBucket(client) + + const putObject = new PutObjectCommand({ + Bucket: bucketName, + Key: 'test-1-put-object.jpg', Body: Buffer.alloc(1024 * 1024 * 12), - }, + }) + + const resp = await client.send(putObject) + expect(resp.$metadata.httpStatusCode).toEqual(200) + }) + + it('it will not allow to upload a file using putObject when exceeding maxFileSize', async () => { + const bucketName = await createBucket(client) + + mergeConfig({ + uploadFileSizeLimit: 1024 * 1024 * 10, + }) + + const putObject = new PutObjectCommand({ + Bucket: bucketName, + Key: 'test-1-put-object.jpg', + Body: Buffer.alloc(1024 * 1024 * 12), + }) + + try { + await client.send(putObject) + throw new Error('Should not reach here') + } catch (e) { + expect((e as Error).message).not.toEqual('Should not reach here') + expect((e as S3ServiceException).$metadata.httpStatusCode).toEqual(413) + expect((e as S3ServiceException).message).toEqual( + 'The object exceeded the maximum allowed size' + ) + expect((e as S3ServiceException).name).toEqual('EntityTooLarge') + } + }) + + it('will not allow uploading a file that exceeded the maxFileSize', async () => { + const bucketName = await createBucket(client) + + mergeConfig({ + uploadFileSizeLimit: 1024 * 1024 * 10, + }) + + const uploader = new Upload({ + client: client, + leavePartsOnError: true, + + params: { + Bucket: bucketName, + Key: 'test-1.jpg', + ContentType: 'image/jpg', + Body: Buffer.alloc(1024 * 1024 * 12), + }, + }) + + try { + await uploader.done() + throw new Error('Should not reach here') + } catch (e) { + expect((e as Error).message).not.toEqual('Should not reach here') + expect((e as S3ServiceException).$metadata.httpStatusCode).toEqual(413) + expect((e as S3ServiceException).message).toEqual( + 'The object exceeded the maximum allowed size' + ) + expect((e as S3ServiceException).name).toEqual('EntityTooLarge') + } + }) + + it('will not allow uploading a part that exceeded the maxFileSize', async () => { + const bucketName = await createBucket(client, 'try-test-1') + + mergeConfig({ + uploadFileSizeLimit: 1024 * 1024 * 10, + }) + + const createMultiPartUpload = new CreateMultipartUploadCommand({ + Bucket: bucketName, + Key: 'test-1.jpg', + ContentType: 'image/jpg', + CacheControl: 'max-age=2000', + }) + const resp = await client.send(createMultiPartUpload) + expect(resp.UploadId).toBeTruthy() + + const readable = new ReadableStreamBuffer({ + frequency: 500, + chunkSize: 1024 * 1024 * 3, + }) + + readable.put(Buffer.alloc(1024 * 1024 * 12)) + readable.stop() + + const uploadPart = new UploadPartCommand({ + Bucket: bucketName, + Key: 'test-1.jpg', + UploadId: resp.UploadId, + Body: readable, + PartNumber: 1, + ContentLength: 1024 * 1024 * 12, + }) + + try { + await client.send(uploadPart) + throw new Error('Should not reach here') + } catch (e) { + expect((e as Error).message).not.toEqual('Should not reach here') + expect((e as S3ServiceException).$metadata.httpStatusCode).toEqual(413) + expect((e as S3ServiceException).message).toEqual( + 'The object exceeded the maximum allowed size' + ) + expect((e as S3ServiceException).name).toEqual('EntityTooLarge') + } + }) + + it('upload a file using multipart upload', async () => { + const bucketName = await createBucket(client) + + const uploader = new Upload({ + client: client, + params: { + Bucket: bucketName, + Key: 'test-1.jpg', + ContentType: 'image/jpg', + Body: Buffer.alloc(1024 * 1024 * 12), + }, + }) + + const resp = await uploader.done() + + expect(resp.$metadata).toBeTruthy() + }) + }) + + describe('GetObject', () => { + it('can get an existing object', async () => { + const bucketName = await createBucket(client) + const key = 'test-1.jpg' + await uploadFile(client, bucketName, key, 1) + + const getObject = new GetObjectCommand({ + Bucket: bucketName, + Key: key, + }) + + const resp = await client.send(getObject) + const data = await resp.Body?.transformToByteArray() + expect(data).toBeTruthy() + expect(resp.ETag).toBeTruthy() + }) + + it('will return an error when object does not exist', async () => { + const bucketName = await createBucket(client) + const key = 'test-1.jpg' + + const getObject = new GetObjectCommand({ + Bucket: bucketName, + Key: key, + }) + + try { + await client.send(getObject) + } catch (e) { + expect((e as S3ServiceException).$metadata.httpStatusCode).toEqual(404) + expect((e as S3ServiceException).message).toEqual('Object not found') + expect((e as S3ServiceException).name).toEqual('NoSuchKey') + } + }) + + it('can get an object using range requests', async () => { + const bucketName = await createBucket(client) + const key = 'test-1.jpg' + await uploadFile(client, bucketName, key, 1) + + const getObject = new GetObjectCommand({ + Bucket: bucketName, + Key: key, + Range: 'bytes=0-100', + }) + + const resp = await client.send(getObject) + const data = await resp.Body?.transformToByteArray() + expect(resp.$metadata.httpStatusCode).toEqual(206) + expect(data).toBeTruthy() + expect(resp.ETag).toBeTruthy() + }) + }) + + describe('DeleteObjectCommand', () => { + it('can delete an existing object', async () => { + const bucketName = await createBucket(client) + const key = 'test-1.jpg' + await uploadFile(client, bucketName, key, 1) + + const deleteObject = new DeleteObjectCommand({ + Bucket: bucketName, + Key: key, + }) + + await client.send(deleteObject) + + const getObject = new GetObjectCommand({ + Bucket: bucketName, + Key: key, + }) + + try { + await client.send(getObject) + } catch (e) { + expect((e as S3ServiceException).$metadata.httpStatusCode).toEqual(404) + } }) + }) + + describe('DeleteObjectsCommand', () => { + it('can delete multiple objects', async () => { + const bucketName = await createBucket(client) + await Promise.all([ + uploadFile(client, bucketName, 'test-1.jpg', 1), + uploadFile(client, bucketName, 'test-2.jpg', 1), + uploadFile(client, bucketName, 'test-3.jpg', 1), + ]) + + const deleteObjectsCommand = new DeleteObjectsCommand({ + Bucket: bucketName, + Delete: { + Objects: [ + { + Key: 'test-1.jpg', + }, + { + Key: 'test-2.jpg', + }, + { + Key: 'test-3.jpg', + }, + ], + }, + }) + + await client.send(deleteObjectsCommand) - const resp = await uploader.done() + const listObjectsCommand = new ListObjectsV2Command({ + Bucket: bucketName, + }) + + const resp = await client.send(listObjectsCommand) + expect(resp.Contents).toBe(undefined) + }) + }) - expect(resp.$metadata).toBeTruthy() + describe('CopyObjectCommand', () => { + it('will copy an object in the same bucket', async () => { + const bucketName = await createBucket(client) + await uploadFile(client, bucketName, 'test-copy-1.jpg', 1) + + const copyObjectCommand = new CopyObjectCommand({ + Bucket: bucketName, + Key: 'test-copied-2.jpg', + CopySource: `${bucketName}/test-copy-1.jpg`, + }) + + const resp = await client.send(copyObjectCommand) + expect(resp.CopyObjectResult?.ETag).toBeTruthy() + }) + + it('will copy an object in a different bucket', async () => { + const bucketName1 = await createBucket(client) + const bucketName2 = await createBucket(client) + await uploadFile(client, bucketName1, 'test-copy-1.jpg', 1) + + const copyObjectCommand = new CopyObjectCommand({ + Bucket: bucketName2, + Key: 'test-copied-2.jpg', + CopySource: `${bucketName1}/test-copy-1.jpg`, + }) + + const resp = await client.send(copyObjectCommand) + expect(resp.CopyObjectResult?.ETag).toBeTruthy() + }) }) }) }) diff --git a/src/test/tenant.test.ts b/src/test/tenant.test.ts index 8b3913c9..eac11796 100644 --- a/src/test/tenant.test.ts +++ b/src/test/tenant.test.ts @@ -16,7 +16,7 @@ const payload = { serviceKey: 'd', jwks: { keys: [] }, migrationStatus: 'COMPLETED', - migrationVersion: 'alter-default-value-objects-id', + migrationVersion: 's3-multipart-uploads', features: { imageTransformation: { enabled: true, @@ -34,7 +34,7 @@ const payload2 = { serviceKey: 'h', jwks: null, migrationStatus: 'COMPLETED', - migrationVersion: 'alter-default-value-objects-id', + migrationVersion: 's3-multipart-uploads', features: { imageTransformation: { enabled: false, diff --git a/src/test/webhooks.test.ts b/src/test/webhooks.test.ts index 39d01742..62f9ad10 100644 --- a/src/test/webhooks.test.ts +++ b/src/test/webhooks.test.ts @@ -105,25 +105,10 @@ describe('Webhooks', () => { }, }) expect(response.statusCode).toBe(200) - expect(sendSpy).toBeCalledTimes(2) - expect(sendSpy).toHaveBeenNthCalledWith(1, { - data: { - $version: 'v1', - bucketId: 'bucket6', - name: obj.name, - tenant: { - host: undefined, - ref: 'bjhaohmqunupljrqypxz', - }, - reqId: expect.any(String), - version: expect.any(String), - }, - name: 'object:admin:delete', - options: undefined, - }) + expect(sendSpy).toBeCalledTimes(1) expect(sendSpy).toHaveBeenNthCalledWith( - 2, + 1, expect.objectContaining({ name: 'webhooks', options: undefined,