From bade70fc3278857ff7670dc45c1a872450ccc095 Mon Sep 17 00:00:00 2001 From: fenos Date: Sat, 6 Apr 2024 10:55:30 +0100 Subject: [PATCH] feat: reworked access credentials --- .env.sample | 9 + .env.test.sample | 4 + .github/workflows/ci.yml | 4 + .../0008-tenants-s3-credentials.sql | 50 ++++- ...9-add-scope-token-column-to-tenants-s3.sql | 3 + .../tenant/0021-s3-multipart-uploads.sql | 30 +-- .../0022-s3-multipart-uploads-big-ints.sql | 4 +- .../tenant/0023-optimize-search-function.sql | 78 +++++++ .../tenant/0024-create-index-path-tokens.sql | 0 package-lock.json | 140 +++++++----- package.json | 5 +- src/admin-app.ts | 1 + src/auth/jwt.ts | 50 ++--- src/config.ts | 16 ++ src/database/connection.ts | 2 +- src/database/tenant.ts | 156 +++++++++++++- src/http/plugins/db.ts | 3 +- src/http/plugins/jwt.ts | 13 +- src/http/plugins/signature-v4.ts | 203 +++++++++++++++--- src/http/routes/admin/index.ts | 1 + src/http/routes/admin/s3.ts | 128 +++++++++++ src/http/routes/object/getObjectInfo.ts | 3 + .../s3/commands/abort-multipart-upload.ts | 2 +- .../s3/commands/complete-multipart-upload.ts | 2 +- src/http/routes/s3/commands/copy-object.ts | 2 +- src/http/routes/s3/commands/create-bucket.ts | 2 +- .../s3/commands/create-multipart-upload.ts | 2 +- src/http/routes/s3/commands/delete-bucket.ts | 2 +- src/http/routes/s3/commands/delete-object.ts | 4 +- src/http/routes/s3/commands/get-bucket.ts | 4 +- src/http/routes/s3/commands/get-object.ts | 2 +- src/http/routes/s3/commands/head-bucket.ts | 2 +- src/http/routes/s3/commands/head-object.ts | 2 +- src/http/routes/s3/commands/list-buckets.ts | 2 +- .../s3/commands/list-multipart-uploads.ts | 2 +- src/http/routes/s3/commands/list-objects.ts | 4 +- src/http/routes/s3/commands/list-parts.ts | 2 +- .../routes/s3/commands/upload-part-copy.ts | 2 +- src/http/routes/s3/commands/upload-part.ts | 4 +- src/http/routes/s3/error-handler.ts | 2 + src/http/routes/s3/index.ts | 1 + src/http/routes/s3/router.ts | 2 +- src/server.ts | 5 +- src/storage/backend/adapter.ts | 7 +- src/storage/database/adapter.ts | 5 +- src/storage/database/knex.ts | 20 +- src/storage/errors.ts | 40 ++++ src/storage/limits.ts | 11 +- src/storage/protocols/s3/byte-limit-stream.ts | 6 +- src/storage/protocols/s3/s3-handler.ts | 80 +++++-- src/storage/protocols/s3/signature-v4.ts | 133 +++++++----- src/storage/schemas/multipart.ts | 2 + src/storage/uploader.ts | 24 +-- src/test/s3-protocol.test.ts | 13 +- src/test/tenant.test.ts | 4 +- 55 files changed, 1019 insertions(+), 281 deletions(-) create mode 100644 migrations/multitenant/0009-add-scope-token-column-to-tenants-s3.sql delete mode 100644 migrations/tenant/0024-create-index-path-tokens.sql diff --git a/.env.sample b/.env.sample index 07f3a33b..79bf2b9f 100644 --- a/.env.sample +++ b/.env.sample @@ -65,10 +65,19 @@ UPLOAD_FILE_SIZE_LIMIT=524288000 UPLOAD_FILE_SIZE_LIMIT_STANDARD=52428800 UPLOAD_SIGNED_URL_EXPIRATION_TIME=60 +####################################### +# TUS Protocol +####################################### TUS_URL_PATH=/upload/resumable TUS_URL_EXPIRY_MS=3600000 TUS_PART_SIZE=50 +####################################### +# S3 Protocol +####################################### +S3_PROTOCOL_ACCESS_KEY_ID=b585f311d839730f8a980a3457be2787 +S3_PROTOCOL_ACCESS_KEY_SECRET=67d161a7a8a46a24a17a75b26e7724f11d56b8d49a119227c66b13b6595601fb +S3_PROTOCOL_ALLOWS_SERVICE_KEY_AS_SECRET=false ####################################### # Storage Backend Driver diff --git a/.env.test.sample b/.env.test.sample index 915f7d49..fee2ccbb 100644 --- a/.env.test.sample +++ b/.env.test.sample @@ -2,6 +2,10 @@ AUTHENTICATED_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhd ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJyb2xlIjoiYW5vbiIsImlhdCI6MTYxMzUzMTk4NSwiZXhwIjoxOTI5MTA3OTg1fQ.mqfi__KnQB4v6PkIjkhzfwWrYyF94MEbSC6LnuvVniE SERVICE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJyb2xlIjoic2VydmljZV9yb2xlIiwiaWF0IjoxNjEzNTMxOTg1LCJleHAiOjE5MjkxMDc5ODV9.th84OKK0Iz8QchDyXZRrojmKSEZ-OuitQm_5DvLiSIc +S3_PROTOCOL_ACCESS_KEY_ID=b585f311d839730f8a980a3457be2787 +S3_PROTOCOL_ACCESS_KEY_SECRET=67d161a7a8a46a24a17a75b26e7724f11d56b8d49a119227c66b13b6595601fb +S3_PROTOCOL_ALLOWS_SERVICE_KEY_AS_SECRET=false + TENANT_ID=bjhaohmqunupljrqypxz DEFAULT_METRICS_ENABLED=false PG_QUEUE_ENABLE=false diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 268b1743..d4646432 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -76,6 +76,10 @@ jobs: ENABLE_DEFAULT_METRICS: false PG_QUEUE_ENABLE: false MULTI_TENANT: false + S3_PROTOCOL_ACCESS_KEY_ID: ${{ secrets.TENANT_ID }} + S3_PROTOCOL_ACCESS_KEY_SECRET: ${{ secrets.SERVICE_KEY }} + S3_PROTOCOL_ALLOWS_SERVICE_KEY_AS_SECRET: true + - name: Upload coverage results to Coveralls uses: coverallsapp/github-action@master diff --git a/migrations/multitenant/0008-tenants-s3-credentials.sql b/migrations/multitenant/0008-tenants-s3-credentials.sql index e85b5b16..924a348a 100644 --- a/migrations/multitenant/0008-tenants-s3-credentials.sql +++ b/migrations/multitenant/0008-tenants-s3-credentials.sql @@ -1,6 +1,46 @@ -ALTER TABLE tenants ADD COLUMN IF NOT EXISTS cursor_id SERIAL; -ALTER TABLE tenants ADD COLUMN IF NOT EXISTS created_at TIMESTAMP DEFAULT current_timestamp; -ALTER TABLE tenants ADD COLUMN IF NOT EXISTS migrations_version text null DEFAULT null; -ALTER TABLE tenants ADD COLUMN IF NOT EXISTS migrations_status text null DEFAULT null; -create index if not exists tenants_migration_version_idx on tenants(cursor_id, migrations_version, migrations_status); \ No newline at end of file + +CREATE TABLE IF NOT EXISTS tenants_s3_credentials ( + id UUID PRIMARY KEY default gen_random_uuid(), + description text NOT NULL, + tenant_id text REFERENCES tenants(id) ON DELETE CASCADE, + access_key text NOT NULL, + secret_key text NOT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS tenants_s3_credentials_tenant_id_idx ON tenants_s3_credentials(tenant_id); +CREATE UNIQUE INDEX IF NOT EXISTS tenants_s3_credentials_access_key_idx ON tenants_s3_credentials(tenant_id, access_key); + + +CREATE FUNCTION tenants_s3_credentials_update_notify_trigger () + RETURNS TRIGGER +AS $$ +BEGIN + PERFORM + pg_notify('tenants_s3_credentials_update', '"' || NEW.id || ':' || NEW.access_key || '"'); + RETURN NULL; +END; +$$ + LANGUAGE plpgsql; + +CREATE FUNCTION tenants_s3_credentials_delete_notify_trigger () + RETURNS TRIGGER +AS $$ +BEGIN + PERFORM + pg_notify('tenants_s3_credentials_update', '"' || OLD.id || ':' || OLD.access_key || '"'); + RETURN NULL; +END; +$$ + LANGUAGE plpgsql; + +CREATE TRIGGER tenants_s3_credentials_update_notify_trigger + AFTER UPDATE ON tenants_s3_credentials + FOR EACH ROW +EXECUTE PROCEDURE tenants_s3_credentials_update_notify_trigger (); + +CREATE TRIGGER tenants_s3_credentials_delete_notify_trigger + AFTER DELETE ON tenants_s3_credentials + FOR EACH ROW +EXECUTE PROCEDURE tenants_s3_credentials_delete_notify_trigger (); diff --git a/migrations/multitenant/0009-add-scope-token-column-to-tenants-s3.sql b/migrations/multitenant/0009-add-scope-token-column-to-tenants-s3.sql new file mode 100644 index 00000000..db8ff519 --- /dev/null +++ b/migrations/multitenant/0009-add-scope-token-column-to-tenants-s3.sql @@ -0,0 +1,3 @@ + + +ALTER TABLE tenants_s3_credentials ADD COLUMN scopes json NOT NULL DEFAULT '{}'; diff --git a/migrations/tenant/0021-s3-multipart-uploads.sql b/migrations/tenant/0021-s3-multipart-uploads.sql index 9ca34936..8b6f614e 100644 --- a/migrations/tenant/0021-s3-multipart-uploads.sql +++ b/migrations/tenant/0021-s3-multipart-uploads.sql @@ -1,28 +1,29 @@ - -CREATE TABLE IF NOT EXISTS storage._s3_multipart_uploads ( +CREATE TABLE IF NOT EXISTS storage.s3_multipart_uploads ( id text PRIMARY KEY, in_progress_size int NOT NULL default 0, upload_signature text NOT NULL, bucket_id text NOT NULL references storage.buckets(id), key text COLLATE "C" NOT NULL , version text NOT NULL, + owner_id text NULL, created_at timestamptz NOT NULL default now() ); -CREATE TABLE IF NOT EXISTS storage._s3_multipart_uploads_parts ( +CREATE TABLE IF NOT EXISTS storage.s3_multipart_uploads_parts ( id uuid PRIMARY KEY default gen_random_uuid(), - upload_id text NOT NULL references storage._s3_multipart_uploads(id) ON DELETE CASCADE, + upload_id text NOT NULL references storage.s3_multipart_uploads(id) ON DELETE CASCADE, size int NOT NULL default 0, part_number int NOT NULL, bucket_id text NOT NULL references storage.buckets(id), key text COLLATE "C" NOT NULL, etag text NOT NULL, + owner_id text NULL, version text NOT NULL, created_at timestamptz NOT NULL default now() ); -CREATE INDEX idx_multipart_uploads_list - ON storage._s3_multipart_uploads (bucket_id, (key COLLATE "C"), created_at ASC); +CREATE INDEX IF NOT EXISTS idx_multipart_uploads_list + ON storage.s3_multipart_uploads (bucket_id, (key COLLATE "C"), created_at ASC); CREATE OR REPLACE FUNCTION storage.list_multipart_uploads_with_delimiter(bucket_id text, prefix_param text, delimiter_param text, max_keys integer default 100, next_key_token text DEFAULT '', next_upload_token text default '') RETURNS TABLE (key text, id text, created_at timestamptz) AS @@ -38,7 +39,7 @@ BEGIN key END AS key, id, created_at FROM - storage._s3_multipart_uploads + storage.s3_multipart_uploads WHERE bucket_id = $5 AND key ILIKE $1 || ''%'' AND @@ -65,18 +66,19 @@ BEGIN END; $$ LANGUAGE plpgsql; -ALTER TABLE storage._s3_multipart_uploads ENABLE ROW LEVEL SECURITY; -ALTER TABLE storage._s3_multipart_uploads_parts ENABLE ROW LEVEL SECURITY; +ALTER TABLE storage.s3_multipart_uploads ENABLE ROW LEVEL SECURITY; +ALTER TABLE storage.s3_multipart_uploads_parts ENABLE ROW LEVEL SECURITY; --- Do not expose this tables to PostgREST DO $$ DECLARE anon_role text = COALESCE(current_setting('storage.anon_role', true), 'anon'); authenticated_role text = COALESCE(current_setting('storage.authenticated_role', true), 'authenticated'); service_role text = COALESCE(current_setting('storage.service_role', true), 'service_role'); BEGIN - EXECUTE 'revoke all on storage._s3_multipart_uploads from ' || anon_role || ', ' || authenticated_role; - EXECUTE 'revoke all on storage._s3_multipart_uploads_parts from ' || anon_role || ', ' || authenticated_role; - EXECUTE 'GRANT ALL ON TABLE storage._s3_multipart_uploads TO ' || service_role; - EXECUTE 'GRANT ALL ON TABLE storage._s3_multipart_uploads_parts TO ' || service_role; + EXECUTE 'revoke all on storage.s3_multipart_uploads from ' || anon_role || ', ' || authenticated_role; + EXECUTE 'revoke all on storage.s3_multipart_uploads_parts from ' || anon_role || ', ' || authenticated_role; + EXECUTE 'GRANT ALL ON TABLE storage.s3_multipart_uploads TO ' || service_role; + EXECUTE 'GRANT ALL ON TABLE storage.s3_multipart_uploads_parts TO ' || service_role; + EXECUTE 'GRANT SELECT ON TABLE storage.s3_multipart_uploads TO ' || authenticated_role || ', ' || anon_role; + EXECUTE 'GRANT SELECT ON TABLE storage.s3_multipart_uploads_parts TO ' || authenticated_role || ', ' || anon_role; END$$; \ No newline at end of file diff --git a/migrations/tenant/0022-s3-multipart-uploads-big-ints.sql b/migrations/tenant/0022-s3-multipart-uploads-big-ints.sql index 7bffed8f..86a765f8 100644 --- a/migrations/tenant/0022-s3-multipart-uploads-big-ints.sql +++ b/migrations/tenant/0022-s3-multipart-uploads-big-ints.sql @@ -1,2 +1,2 @@ -ALTER TABLE storage._s3_multipart_uploads ALTER COLUMN in_progress_size TYPE bigint; -ALTER TABLE storage._s3_multipart_uploads_parts ALTER COLUMN size TYPE bigint; \ No newline at end of file +ALTER TABLE storage.s3_multipart_uploads ALTER COLUMN in_progress_size TYPE bigint; +ALTER TABLE storage.s3_multipart_uploads_parts ALTER COLUMN size TYPE bigint; \ No newline at end of file diff --git a/migrations/tenant/0023-optimize-search-function.sql b/migrations/tenant/0023-optimize-search-function.sql index e69de29b..7721d46f 100644 --- a/migrations/tenant/0023-optimize-search-function.sql +++ b/migrations/tenant/0023-optimize-search-function.sql @@ -0,0 +1,78 @@ +create or replace function storage.search ( + prefix text, + bucketname text, + limits int default 100, + levels int default 1, + offsets int default 0, + search text default '', + sortcolumn text default 'name', + sortorder text default 'asc' +) returns table ( + name text, + id uuid, + updated_at timestamptz, + created_at timestamptz, + last_accessed_at timestamptz, + metadata jsonb + ) +as $$ +declare + v_order_by text; + v_sort_order text; +begin + case + when sortcolumn = 'name' then + v_order_by = 'name'; + when sortcolumn = 'updated_at' then + v_order_by = 'updated_at'; + when sortcolumn = 'created_at' then + v_order_by = 'created_at'; + when sortcolumn = 'last_accessed_at' then + v_order_by = 'last_accessed_at'; + else + v_order_by = 'name'; + end case; + + case + when sortorder = 'asc' then + v_sort_order = 'asc'; + when sortorder = 'desc' then + v_sort_order = 'desc'; + else + v_sort_order = 'asc'; + end case; + + v_order_by = v_order_by || ' ' || v_sort_order; + + return query execute + 'with folders as ( + select path_tokens[$1] as folder + from storage.objects + where objects.name ilike $2 || $3 || ''%'' + and bucket_id = $4 + and array_length(objects.path_tokens, 1) <> $1 + group by folder + order by folder ' || v_sort_order || ' + ) + (select folder as "name", + null as id, + null as updated_at, + null as created_at, + null as last_accessed_at, + null as metadata from folders) + union all + (select path_tokens[$1] as "name", + id, + updated_at, + created_at, + last_accessed_at, + metadata + from storage.objects + where objects.name ilike $2 || $3 || ''%'' + and bucket_id = $4 + and array_length(objects.path_tokens, 1) = $1 + order by ' || v_order_by || ') + limit $5 + offset $6' using levels, prefix, search, bucketname, limits, offsets; +end; +$$ language plpgsql stable; \ No newline at end of file diff --git a/migrations/tenant/0024-create-index-path-tokens.sql b/migrations/tenant/0024-create-index-path-tokens.sql deleted file mode 100644 index e69de29b..00000000 diff --git a/package-lock.json b/package-lock.json index 85338854..3d7825d8 100644 --- a/package-lock.json +++ b/package-lock.json @@ -32,7 +32,6 @@ "crypto-js": "^4.2.0", "dotenv": "^16.0.0", "fastify": "^4.8.1", - "fastify-json-to-xml": "^1.1.11", "fastify-metrics": "^10.2.0", "fastify-plugin": "^4.0.0", "fastify-xml-body-parser": "^2.2.0", @@ -41,7 +40,10 @@ "ioredis": "^5.2.4", "jsonwebtoken": "^9.0.2", "knex": "^3.1.0", + "lru-cache": "^10.2.0", "md5-file": "^5.0.0", + "multistream": "^4.1.0", + "object-sizeof": "^2.6.4", "pg": "^8.11.3", "pg-boss": "^9.0.3", "pg-listen": "^1.7.0", @@ -62,6 +64,7 @@ "@types/jest": "^29.2.1", "@types/js-yaml": "^4.0.5", "@types/jsonwebtoken": "^9.0.5", + "@types/multistream": "^4.1.3", "@types/mustache": "^4.2.2", "@types/node": "^20.11.5", "@types/pg": "^8.6.4", @@ -3941,6 +3944,15 @@ "@types/node": "*" } }, + "node_modules/@types/multistream": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/@types/multistream/-/multistream-4.1.3.tgz", + "integrity": "sha512-t57vmDEJOZuC0M3IrZYfCd9wolTcr3ZTCGk1iwHNosvgBX+7/SMvCGcR8wP9lidpelBZQ12crSuINOxkk0azPA==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, "node_modules/@types/mustache": { "version": "4.2.2", "resolved": "https://registry.npmjs.org/@types/mustache/-/mustache-4.2.2.tgz", @@ -5847,23 +5859,6 @@ "tiny-lru": "^10.0.0" } }, - "node_modules/fastify-json-to-xml": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/fastify-json-to-xml/-/fastify-json-to-xml-1.1.11.tgz", - "integrity": "sha512-XYa3KWqxR/CFiAPj1kaSjotbo2U5pYN0Le81O5tROLWNA5T3IoCsopWpHCED+u7/OLpkphtLWtD/asBQOGS+rg==", - "dependencies": { - "accepts": "^1.3.8", - "fastify-plugin": "^4.5.1", - "js2xmlparser": "^5.0.0", - "secure-json-parse": "^2.7.0" - }, - "engines": { - "node": ">=14.18.0" - }, - "funding": { - "url": "https://github.com/sponsors/Fdawgs" - } - }, "node_modules/fastify-metrics": { "version": "10.2.0", "resolved": "https://registry.npmjs.org/fastify-metrics/-/fastify-metrics-10.2.0.tgz", @@ -7228,14 +7223,6 @@ "js-yaml": "bin/js-yaml.js" } }, - "node_modules/js2xmlparser": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/js2xmlparser/-/js2xmlparser-5.0.0.tgz", - "integrity": "sha512-ckXs0Fzd6icWurbeAXuqo+3Mhq2m8pOPygsQjTPh8K5UWgKaUgDSHrdDxAfexmT11xvBKOQ6sgYwPkYc5RW/bg==", - "dependencies": { - "xmlcreate": "^2.0.4" - } - }, "node_modules/jsesc": { "version": "2.5.2", "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", @@ -7606,6 +7593,14 @@ "resolved": "https://registry.npmjs.org/logflare-transport-core/-/logflare-transport-core-0.3.3.tgz", "integrity": "sha512-n82NsRVWvlaa3jd9QQ8rDroCjCJcIamQOlarLDBou9RsF0QaRv39rduy0ToPmlGQn1OPZBwlsv+R36lXupSmVQ==" }, + "node_modules/lru-cache": { + "version": "10.2.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.2.0.tgz", + "integrity": "sha512-2bIM8x+VAf6JT4bKAljS1qUWgMsqZRPGJS6FSahIMPVvctcNhyVp7AJu7quxOW9jwkryBReKZY5tY5JYv2n/7Q==", + "engines": { + "node": "14 || >=16.14" + } + }, "node_modules/luxon": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/luxon/-/luxon-3.2.1.tgz", @@ -7845,6 +7840,37 @@ "node": ">=8" } }, + "node_modules/object-sizeof": { + "version": "2.6.4", + "resolved": "https://registry.npmjs.org/object-sizeof/-/object-sizeof-2.6.4.tgz", + "integrity": "sha512-YuJAf7Bi61KROcYmXm8RCeBrBw8UOaJDzTm1gp0eU7RjYi1xEte3/Nmg/VyPaHcJZ3sNojs1Y0xvSrgwkLmcFw==", + "dependencies": { + "buffer": "^6.0.3" + } + }, + "node_modules/object-sizeof/node_modules/buffer": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", + "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.2.1" + } + }, "node_modules/on-exit-leak-free": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/on-exit-leak-free/-/on-exit-leak-free-2.1.0.tgz", @@ -9907,11 +9933,6 @@ "node": ">=4.0" } }, - "node_modules/xmlcreate": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/xmlcreate/-/xmlcreate-2.0.4.tgz", - "integrity": "sha512-nquOebG4sngPmGPICTS5EnxqhKbCmz5Ox5hsszI2T6U5qdrJizBc+0ilYSEjTSzU0yZcmvppztXe/5Al5fUwdg==" - }, "node_modules/xtend": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", @@ -13069,6 +13090,15 @@ "@types/node": "*" } }, + "@types/multistream": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/@types/multistream/-/multistream-4.1.3.tgz", + "integrity": "sha512-t57vmDEJOZuC0M3IrZYfCd9wolTcr3ZTCGk1iwHNosvgBX+7/SMvCGcR8wP9lidpelBZQ12crSuINOxkk0azPA==", + "dev": true, + "requires": { + "@types/node": "*" + } + }, "@types/mustache": { "version": "4.2.2", "resolved": "https://registry.npmjs.org/@types/mustache/-/mustache-4.2.2.tgz", @@ -14491,17 +14521,6 @@ } } }, - "fastify-json-to-xml": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/fastify-json-to-xml/-/fastify-json-to-xml-1.1.11.tgz", - "integrity": "sha512-XYa3KWqxR/CFiAPj1kaSjotbo2U5pYN0Le81O5tROLWNA5T3IoCsopWpHCED+u7/OLpkphtLWtD/asBQOGS+rg==", - "requires": { - "accepts": "^1.3.8", - "fastify-plugin": "^4.5.1", - "js2xmlparser": "^5.0.0", - "secure-json-parse": "^2.7.0" - } - }, "fastify-metrics": { "version": "10.2.0", "resolved": "https://registry.npmjs.org/fastify-metrics/-/fastify-metrics-10.2.0.tgz", @@ -15511,14 +15530,6 @@ "argparse": "^2.0.1" } }, - "js2xmlparser": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/js2xmlparser/-/js2xmlparser-5.0.0.tgz", - "integrity": "sha512-ckXs0Fzd6icWurbeAXuqo+3Mhq2m8pOPygsQjTPh8K5UWgKaUgDSHrdDxAfexmT11xvBKOQ6sgYwPkYc5RW/bg==", - "requires": { - "xmlcreate": "^2.0.4" - } - }, "jsesc": { "version": "2.5.2", "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", @@ -15825,6 +15836,11 @@ "resolved": "https://registry.npmjs.org/logflare-transport-core/-/logflare-transport-core-0.3.3.tgz", "integrity": "sha512-n82NsRVWvlaa3jd9QQ8rDroCjCJcIamQOlarLDBou9RsF0QaRv39rduy0ToPmlGQn1OPZBwlsv+R36lXupSmVQ==" }, + "lru-cache": { + "version": "10.2.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.2.0.tgz", + "integrity": "sha512-2bIM8x+VAf6JT4bKAljS1qUWgMsqZRPGJS6FSahIMPVvctcNhyVp7AJu7quxOW9jwkryBReKZY5tY5JYv2n/7Q==" + }, "luxon": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/luxon/-/luxon-3.2.1.tgz", @@ -15992,6 +16008,25 @@ "path-key": "^3.0.0" } }, + "object-sizeof": { + "version": "2.6.4", + "resolved": "https://registry.npmjs.org/object-sizeof/-/object-sizeof-2.6.4.tgz", + "integrity": "sha512-YuJAf7Bi61KROcYmXm8RCeBrBw8UOaJDzTm1gp0eU7RjYi1xEte3/Nmg/VyPaHcJZ3sNojs1Y0xvSrgwkLmcFw==", + "requires": { + "buffer": "^6.0.3" + }, + "dependencies": { + "buffer": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", + "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", + "requires": { + "base64-js": "^1.3.1", + "ieee754": "^1.2.1" + } + } + } + }, "on-exit-leak-free": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/on-exit-leak-free/-/on-exit-leak-free-2.1.0.tgz", @@ -17501,11 +17536,6 @@ "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-11.0.1.tgz", "integrity": "sha512-fDlsI/kFEx7gLvbecc0/ohLG50fugQp8ryHzMTuW9vSa1GJ0XYWKnhsUx7oie3G98+r56aTQIUB4kht42R3JvA==" }, - "xmlcreate": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/xmlcreate/-/xmlcreate-2.0.4.tgz", - "integrity": "sha512-nquOebG4sngPmGPICTS5EnxqhKbCmz5Ox5hsszI2T6U5qdrJizBc+0ilYSEjTSzU0yZcmvppztXe/5Al5fUwdg==" - }, "xtend": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", diff --git a/package.json b/package.json index 981ff9fe..7519e9ed 100644 --- a/package.json +++ b/package.json @@ -48,7 +48,6 @@ "crypto-js": "^4.2.0", "dotenv": "^16.0.0", "fastify": "^4.8.1", - "fastify-json-to-xml": "^1.1.11", "fastify-metrics": "^10.2.0", "fastify-plugin": "^4.0.0", "fastify-xml-body-parser": "^2.2.0", @@ -57,7 +56,10 @@ "ioredis": "^5.2.4", "jsonwebtoken": "^9.0.2", "knex": "^3.1.0", + "lru-cache": "^10.2.0", "md5-file": "^5.0.0", + "multistream": "^4.1.0", + "object-sizeof": "^2.6.4", "pg": "^8.11.3", "pg-boss": "^9.0.3", "pg-listen": "^1.7.0", @@ -75,6 +77,7 @@ "@types/jest": "^29.2.1", "@types/js-yaml": "^4.0.5", "@types/jsonwebtoken": "^9.0.5", + "@types/multistream": "^4.1.3", "@types/mustache": "^4.2.2", "@types/node": "^20.11.5", "@types/pg": "^8.6.4", diff --git a/src/admin-app.ts b/src/admin-app.ts index 40f3d0da..6d097dc9 100644 --- a/src/admin-app.ts +++ b/src/admin-app.ts @@ -9,6 +9,7 @@ const build = (opts: FastifyServerOptions = {}, appInstance?: FastifyInstance): app.register(plugins.logRequest({ excludeUrls: ['/status', '/metrics', '/health'] })) app.register(routes.tenants, { prefix: 'tenants' }) app.register(routes.migrations, { prefix: 'migrations' }) + app.register(routes.s3Credentials, { prefix: 's3' }) let registriesToMerge: Registry[] = [] diff --git a/src/auth/jwt.ts b/src/auth/jwt.ts index e680c702..59e95afd 100644 --- a/src/auth/jwt.ts +++ b/src/auth/jwt.ts @@ -2,6 +2,7 @@ import * as crypto from 'crypto' import jwt from 'jsonwebtoken' import { getConfig } from '../config' +import { ERRORS } from '../storage' const { jwtAlgorithm } = getConfig() @@ -100,10 +101,7 @@ function getJWTVerificationKey( } } -export function getJWTAlgorithms( - secret: string, - jwks: { keys: { kid?: string; kty: string }[] } | null -) { +export function getJWTAlgorithms(jwks: { keys: { kid?: string; kty: string }[] } | null) { let algorithms: jwt.Algorithm[] if (jwks && jwks.keys && jwks.keys.length) { @@ -143,9 +141,9 @@ export function verifyJWT( jwt.verify( token, getJWTVerificationKey(secret, jwks || null), - { algorithms: getJWTAlgorithms(secret, jwks || null) }, + { algorithms: getJWTAlgorithms(jwks || null) }, (err, decoded) => { - if (err) return reject(err) + if (err) return reject(ERRORS.AccessDenied(err.message, err)) resolve(decoded as jwt.JwtPayload & T) } ) @@ -161,32 +159,18 @@ export function verifyJWT( export function signJWT( payload: string | object | Buffer, secret: string, - expiresIn: string | number -): Promise { - return new Promise((resolve, reject) => { - jwt.sign( - payload, - secret, - { expiresIn, algorithm: jwtAlgorithm as jwt.Algorithm }, - (err, token) => { - if (err) return reject(err) - resolve(token) - } - ) - }) -} + expiresIn: string | number | undefined +): Promise { + const options: jwt.SignOptions = { algorithm: jwtAlgorithm as jwt.Algorithm } -/** - * Extract the owner (user) from the provided JWT - * @param token - * @param secret - * @param jwks - */ -export async function getOwner( - token: string, - secret: string, - jwks: { keys: { kid?: string; kty: string }[] } | null -): Promise { - const decodedJWT = await verifyJWT(token, secret, jwks) - return (decodedJWT as jwtInterface)?.sub + if (expiresIn) { + options.expiresIn = expiresIn + } + + return new Promise((resolve, reject) => { + jwt.sign(payload, secret, options, (err, token) => { + if (err) return reject(err) + resolve(token as string) + }) + }) } diff --git a/src/config.ts b/src/config.ts index 2c38f82d..25456ef7 100644 --- a/src/config.ts +++ b/src/config.ts @@ -47,6 +47,7 @@ type StorageConfigType = { requestTraceHeader?: string requestEtagHeaders: string[] responseSMaxAge: number + anonKey: string serviceKey: string storageBackendType: StorageBackendType tenantId: string @@ -98,6 +99,9 @@ type StorageConfigType = { defaultMetricsEnabled: boolean s3ProtocolPrefix: string s3ProtocolEnforceRegion: boolean + s3ProtocolAccessKeyId?: string + s3ProtocolAccessKeySecret?: string + s3ProtocolAllowServiceKeyAsSecret: boolean } function getOptionalConfigFromEnv(key: string, fallback?: string): string | undefined { @@ -185,6 +189,7 @@ export function getConfig(options?: { reload?: boolean }): StorageConfigType { // Auth serviceKey: getOptionalConfigFromEnv('SERVICE_KEY') || '', + anonKey: getOptionalConfigFromEnv('ANON_KEY') || '', encryptionKey: getOptionalConfigFromEnv('AUTH_ENCRYPTION_KEY', 'ENCRYPTION_KEY') || '', jwtSecret: getOptionalIfMultitenantConfigFromEnv('AUTH_JWT_SECRET', 'PGRST_JWT_SECRET') || '', @@ -220,6 +225,10 @@ export function getConfig(options?: { reload?: boolean }): StorageConfigType { // S3 Protocol s3ProtocolPrefix: getOptionalConfigFromEnv('S3_PROTOCOL_PREFIX') || '', s3ProtocolEnforceRegion: getOptionalConfigFromEnv('S3_PROTOCOL_ENFORCE_REGION') === 'true', + s3ProtocolAccessKeyId: getOptionalConfigFromEnv('S3_PROTOCOL_ACCESS_KEY_ID'), + s3ProtocolAccessKeySecret: getOptionalConfigFromEnv('S3_PROTOCOL_ACCESS_KEY_SECRET'), + s3ProtocolAllowServiceKeyAsSecret: + getOptionalConfigFromEnv('S3_PROTOCOL_ALLOWS_SERVICE_KEY_AS_SECRET') === 'true', // Storage storageBackendType: getOptionalConfigFromEnv('STORAGE_BACKEND') as StorageBackendType, @@ -372,6 +381,13 @@ export function getConfig(options?: { reload?: boolean }): StorageConfigType { }) } + if (!config.isMultitenant && !config.anonKey) { + config.anonKey = jwt.sign({ role: config.dbAnonRole }, config.jwtSecret, { + expiresIn: '10y', + algorithm: config.jwtAlgorithm as jwt.Algorithm, + }) + } + const jwtJWKS = getOptionalConfigFromEnv('JWT_JWKS') || null if (jwtJWKS) { diff --git a/src/database/connection.ts b/src/database/connection.ts index e13c6529..5ca5ea28 100644 --- a/src/database/connection.ts +++ b/src/database/connection.ts @@ -99,7 +99,7 @@ export class TenantConnection { max: isExternalPool ? 1 : options.maxConnections || databaseMaxConnections, acquireTimeoutMillis: databaseConnectionTimeout, idleTimeoutMillis: isExternalPool - ? options.idleTimeoutMillis || 1000000 + ? options.idleTimeoutMillis || 100 : databaseFreePoolAfterInactivity, reapIntervalMillis: isExternalPool ? 50 : undefined, }, diff --git a/src/database/tenant.ts b/src/database/tenant.ts index 24f6070b..c96ed49a 100644 --- a/src/database/tenant.ts +++ b/src/database/tenant.ts @@ -1,11 +1,14 @@ +import crypto from 'crypto' import { getConfig } from '../config' -import { decrypt, verifyJWT } from '../auth' +import { decrypt, encrypt, verifyJWT } from '../auth' import { multitenantKnex } from './multitenant-db' import { ERRORS } from '../storage' import { JwtPayload } from 'jsonwebtoken' import { PubSubAdapter } from '../pubsub' -import { lastMigrationName } from './migrations' import { createMutexByKey } from '../concurrency' +import { LRUCache } from 'lru-cache' +import objectSizeOf from 'object-sizeof' +import { lastMigrationName } from './migrations/migrate' interface TenantConfig { anonKey?: string @@ -43,10 +46,26 @@ export enum TenantMigrationStatus { FAILED_STALE = 'FAILED_STALE', } -const { isMultitenant, dbServiceRole, serviceKey, jwtSecret, jwtJWKS } = getConfig() +interface S3Credentials { + accessKey: string + secretKey: string + scopes: { role: string; sub?: string; [key: string]: any } +} + +const { isMultitenant, dbServiceRole, serviceKey, jwtSecret, jwtJWKS, dbSuperUser } = getConfig() const tenantConfigCache = new Map() + +const tenantS3CredentialsCache = new LRUCache({ + maxSize: 1024 * 1024 * 50, // 50MB + ttl: 1000 * 60 * 60, // 1 hour + sizeCalculation: (value) => objectSizeOf(value), + updateAgeOnGet: true, + allowStale: false, +}) + const tenantMutex = createMutexByKey() +const s3CredentialsMutex = createMutexByKey() const singleTenantServiceKey: | { @@ -274,12 +293,139 @@ export async function getFeatures(tenantId: string): Promise { } const TENANTS_UPDATE_CHANNEL = 'tenants_update' +const TENANTS_S3_CREDENTIALS_UPDATE_CHANNEL = 'tenants_s3_credentials_update' /** * Keeps the in memory config cache up to date */ export async function listenForTenantUpdate(pubSub: PubSubAdapter): Promise { - await pubSub.subscribe(TENANTS_UPDATE_CHANNEL, (tenantId) => { - tenantConfigCache.delete(tenantId) + await pubSub.subscribe(TENANTS_UPDATE_CHANNEL, (cacheKey) => { + tenantConfigCache.delete(cacheKey) + }) + + await pubSub.subscribe(TENANTS_S3_CREDENTIALS_UPDATE_CHANNEL, (cacheKey) => { + tenantConfigCache.delete(cacheKey) + }) +} + +/** + * Create S3 Credential for a tenant + * @param tenantId + * @param data + */ +export async function createS3Credentials( + tenantId: string, + data: { description: string; scopes?: S3Credentials['scopes'] } +) { + const existingCount = await countS3Credentials(tenantId) + + if (existingCount >= 50) { + throw ERRORS.MaximumCredentialsLimit() + } + + const secretAccessKeyId = crypto.randomBytes(32).toString('hex').slice(0, 32) + const secretAccessKey = crypto.randomBytes(64).toString('hex').slice(0, 64) + + if (data.scopes) { + delete data.scopes.iss + delete data.scopes.issuer + delete data.scopes.exp + delete data.scopes.iat + } + + data.scopes = { + ...(data.scopes || {}), + role: data.scopes?.role ?? dbServiceRole, + issuer: `supabase.storage.${tenantId}`, + sub: data.scopes?.sub || dbServiceRole, + } + + const credentials = await multitenantKnex + .table('tenants_s3_credentials') + .insert({ + tenant_id: tenantId, + description: data.description, + access_key: secretAccessKeyId, + secret_key: encrypt(secretAccessKey), + scopes: JSON.stringify(data.scopes), + }) + .returning('id') + + return { + id: credentials[0].id, + access_key: secretAccessKeyId, + secret_key: secretAccessKey, + } +} + +export async function getS3CredentialsByAccessKey( + tenantId: string, + accessKey: string +): Promise { + const cacheKey = `${tenantId}:${accessKey}` + const cachedCredentials = tenantS3CredentialsCache.get(cacheKey) + + if (cachedCredentials) { + return cachedCredentials + } + + return s3CredentialsMutex(cacheKey, async () => { + const cachedCredentials = tenantS3CredentialsCache.get(cacheKey) + + if (cachedCredentials) { + return cachedCredentials + } + + const data = await multitenantKnex + .table('tenants_s3_credentials') + .select('access_key', 'secret_key', 'scopes') + .where('tenant_id', tenantId) + .where('access_key', accessKey) + .first() + + if (!data) { + throw ERRORS.MissingS3Credentials() + } + + const secretKey = decrypt(data.secret_key) + + tenantS3CredentialsCache.set(cacheKey, { + accessKey: data.access_key, + secretKey: secretKey, + scopes: data.scopes, + }) + + return { + accessKey: data.access_key, + secretKey: secretKey, + scopes: data.scopes, + } }) } + +export function deleteS3Credential(tenantId: string, credentialId: string) { + return multitenantKnex + .table('tenants_s3_credentials') + .where('tenant_id', tenantId) + .where('id', credentialId) + .delete() + .returning('id') + .first() +} + +export function listS3Credentials(tenantId: string) { + return multitenantKnex + .table('tenants_s3_credentials') + .select('id', 'description', 'created_at') + .where('tenant_id', tenantId) +} + +export async function countS3Credentials(tenantId: string) { + const data = await multitenantKnex + .table('tenants_s3_credentials') + .count('id') + .where('tenant_id', tenantId) + .first() + + return Number((data as any)?.count || 0) +} diff --git a/src/http/plugins/db.ts b/src/http/plugins/db.ts index 7fbc49b1..bad637eb 100644 --- a/src/http/plugins/db.ts +++ b/src/http/plugins/db.ts @@ -29,7 +29,8 @@ export const db = fastifyPlugin(async (fastify) => { fastify.addHook('preHandler', async (request) => { const adminUser = await getServiceKeyUser(request.tenantId) - const userPayload = await verifyJWT<{ role?: string }>(request.jwt, adminUser.jwtSecret) + const userPayload = + request.jwtPayload ?? (await verifyJWT<{ role?: string }>(request.jwt, adminUser.jwtSecret)) request.db = await getPostgresConnection({ user: { diff --git a/src/http/plugins/jwt.ts b/src/http/plugins/jwt.ts index 40bb934f..631a4894 100644 --- a/src/http/plugins/jwt.ts +++ b/src/http/plugins/jwt.ts @@ -1,11 +1,13 @@ import fastifyPlugin from 'fastify-plugin' import { createResponse } from '../generic-routes' -import { getOwner } from '../../auth' -import { getJwtSecret } from '../../database/tenant' +import { verifyJWT } from '../../auth' +import { getJwtSecret } from '../../database' +import { JwtPayload } from 'jsonwebtoken' declare module 'fastify' { interface FastifyRequest { jwt: string + jwtPayload?: JwtPayload & { role?: string } owner?: string } } @@ -14,14 +16,17 @@ const BEARER = /^Bearer\s+/i export const jwt = fastifyPlugin(async (fastify) => { fastify.decorateRequest('jwt', '') + fastify.decorateRequest('jwtPayload', undefined) + fastify.addHook('preHandler', async (request, reply) => { request.jwt = (request.headers.authorization || '').replace(BEARER, '') const { secret, jwks } = await getJwtSecret(request.tenantId) try { - const owner = await getOwner(request.jwt, secret, jwks || null) - request.owner = owner + const payload = await verifyJWT(request.jwt, secret, jwks || null) + request.jwtPayload = payload + request.owner = payload.sub } catch (err: any) { request.log.error({ error: err }, 'unable to get owner') return reply.status(400).send(createResponse(err.message, '400', err.message)) diff --git a/src/http/plugins/signature-v4.ts b/src/http/plugins/signature-v4.ts index 025ee495..11c61830 100644 --- a/src/http/plugins/signature-v4.ts +++ b/src/http/plugins/signature-v4.ts @@ -1,43 +1,186 @@ -import { FastifyInstance, FastifyReply, FastifyRequest } from 'fastify' +import { FastifyInstance, FastifyRequest } from 'fastify' import fastifyPlugin from 'fastify-plugin' -import { getConfig } from '../../config' -import { getServiceKeyUser } from '../../database' -import { SignatureV4 } from '../../storage/protocols/s3' +import { getS3CredentialsByAccessKey, getTenantConfig } from '../../database' +import { ClientSignature, SignatureV4 } from '../../storage/protocols/s3' import { ERRORS } from '../../storage' +import { signJWT, verifyJWT } from '../../auth' +import { getConfig } from '../../config' -const { storageS3Region, s3ProtocolPrefix, s3ProtocolEnforceRegion } = getConfig() +const { + jwtSecret, + jwtJWKS, + serviceKey, + storageS3Region, + isMultitenant, + s3ProtocolAllowServiceKeyAsSecret, + s3ProtocolPrefix, + s3ProtocolEnforceRegion, + s3ProtocolAccessKeyId, + s3ProtocolAccessKeySecret, +} = getConfig() export const signatureV4 = fastifyPlugin(async function (fastify: FastifyInstance) { - fastify.addHook('preHandler', async (request: FastifyRequest, reply: FastifyReply) => { - const awsRegion = storageS3Region - const awsService = 's3' + fastify.addHook('preHandler', async (request: FastifyRequest) => { + const clientCredentials = SignatureV4.parseAuthorizationHeader( + request.headers.authorization as string + ) - const serviceKey = await getServiceKeyUser(request.tenantId) - const signatureV4 = new SignatureV4({ - region: awsRegion, - service: awsService, - tenantId: request.tenantId, - secretKey: serviceKey.jwt, - enforceRegion: s3ProtocolEnforceRegion, + const sessionToken = request.headers['x-amz-security-token'] as string | undefined + + const { + signature: signatureV4, + scopes, + token, + } = await createSignature(request.tenantId, clientCredentials, { + sessionToken: sessionToken, + allowServiceKeyAsSecret: s3ProtocolAllowServiceKeyAsSecret, }) - try { - const isVerified = signatureV4.verify({ - url: request.url, - body: request.body as string | ReadableStream | Buffer, - headers: request.headers as Record, - method: request.method, - query: request.query as Record, - prefix: s3ProtocolPrefix, - }) + const isVerified = signatureV4.verify({ + url: request.url, + body: request.body as string | ReadableStream | Buffer, + headers: request.headers as Record, + method: request.method, + query: request.query as Record, + prefix: s3ProtocolPrefix, + credentials: clientCredentials.credentials, + signature: clientCredentials.signature, + signedHeaders: clientCredentials.signedHeaders, + }) + + if (!isVerified && !sessionToken) { + throw ERRORS.SignatureDoesNotMatch( + 'The request signature we calculated does not match the signature you provided. Check your key and signing method.' + ) + } + + if (!isVerified && sessionToken) { + throw ERRORS.SignatureDoesNotMatch( + 'The request signature we calculated does not match the signature you provided, Check your credentials. ' + + 'The session token should be a valid JWT token' + ) + } + + const jwtSecrets = { + jwtSecret: jwtSecret, + jwks: jwtJWKS, + } + + if (isMultitenant) { + const tenant = await getTenantConfig(request.tenantId) + jwtSecrets.jwtSecret = tenant.jwtSecret + jwtSecrets.jwks = tenant.jwks || undefined + } - if (!isVerified) { - throw ERRORS.AccessDenied('Invalid Signature') - } + // it is a session token authentication, we validate the incoming session + if (sessionToken) { + const payload = await verifyJWT(sessionToken, jwtSecrets.jwtSecret, jwtSecrets.jwks) + request.jwt = sessionToken + request.jwtPayload = payload + request.owner = payload.sub + return + } - request.jwt = serviceKey.jwt - } catch (e) { - throw e + if (token) { + const payload = await verifyJWT(token, jwtSecrets.jwtSecret, jwtSecrets.jwks) + request.jwt = token + request.jwtPayload = payload + request.owner = payload.sub + return } + + if (!scopes) { + throw ERRORS.AccessDenied('Missing scopes') + } + + const jwt = await signJWT(scopes, jwtSecrets.jwtSecret, '5m') + + request.jwt = jwt + request.jwtPayload = scopes + request.owner = scopes.sub }) }) + +async function createSignature( + tenantId: string, + clientSignature: ClientSignature, + session?: { sessionToken?: string; allowServiceKeyAsSecret: boolean } +) { + const awsRegion = storageS3Region + const awsService = 's3' + + if (session?.sessionToken) { + const tenant = await getTenantConfig(tenantId) + + if (!tenant.anonKey) { + throw ERRORS.AccessDenied('Missing tenant anon key') + } + + const signature = new SignatureV4({ + enforceRegion: s3ProtocolEnforceRegion, + credentials: { + accessKey: tenantId, + secretKey: tenant.anonKey, + region: awsRegion, + service: awsService, + }, + }) + + return { signature, scopes: undefined } + } + + if (session?.allowServiceKeyAsSecret && clientSignature.credentials.accessKey === tenantId) { + const tenantServiceKey = isMultitenant + ? (await getTenantConfig(tenantId)).serviceKey + : serviceKey + + const signature = new SignatureV4({ + enforceRegion: s3ProtocolEnforceRegion, + credentials: { + accessKey: tenantId, + secretKey: tenantServiceKey, + region: awsRegion, + service: awsService, + }, + }) + + return { signature, scopes: undefined, token: tenantServiceKey } + } + + if (isMultitenant) { + const credential = await getS3CredentialsByAccessKey( + tenantId, + clientSignature.credentials.accessKey + ) + + const signature = new SignatureV4({ + enforceRegion: s3ProtocolEnforceRegion, + credentials: { + accessKey: credential.accessKey, + secretKey: credential.secretKey, + region: awsRegion, + service: awsService, + }, + }) + + return { signature, scopes: credential.scopes, token: undefined } + } + + if (!s3ProtocolAccessKeyId || !s3ProtocolAccessKeySecret) { + throw ERRORS.AccessDenied( + 'Missing S3 Protocol Access Key ID or Secret Key Environment variables' + ) + } + + const signature = new SignatureV4({ + enforceRegion: s3ProtocolEnforceRegion, + credentials: { + accessKey: s3ProtocolAccessKeyId, + secretKey: s3ProtocolAccessKeySecret, + region: awsRegion, + service: awsService, + }, + }) + + return { signature, scopes: undefined, token: serviceKey } +} diff --git a/src/http/routes/admin/index.ts b/src/http/routes/admin/index.ts index 49971460..5a35c323 100644 --- a/src/http/routes/admin/index.ts +++ b/src/http/routes/admin/index.ts @@ -1,2 +1,3 @@ export { default as migrations } from './migrations' export { default as tenants } from './tenants' +export { default as s3Credentials } from './s3' diff --git a/src/http/routes/admin/s3.ts b/src/http/routes/admin/s3.ts index e69de29b..985079fe 100644 --- a/src/http/routes/admin/s3.ts +++ b/src/http/routes/admin/s3.ts @@ -0,0 +1,128 @@ +import { FastifyInstance, RequestGenericInterface } from 'fastify' +import apiKey from '../../plugins/apikey' +import { createS3Credentials, deleteS3Credential, listS3Credentials } from '../../../database' +import { FromSchema } from 'json-schema-to-ts' + +const createCredentialsSchema = { + description: 'Create S3 Credentials', + params: { + type: 'object', + properties: { + tenantId: { type: 'string' }, + }, + required: ['tenantId'], + }, + body: { + type: 'object', + properties: { + description: { type: 'string', minLength: 3, maxLength: 2000 }, + scopes: { + type: 'object', + properties: { + role: { type: 'string' }, + sub: { type: 'string' }, + }, + required: ['role'], + additionalProperties: true, + }, + }, + required: ['description'], + }, +} as const + +const deleteCredentialsSchema = { + description: 'Delete S3 Credentials', + params: { + type: 'object', + properties: { + tenantId: { type: 'string' }, + }, + required: ['tenantId'], + }, + body: { + type: 'object', + properties: { + access_key: { type: 'string' }, + }, + required: ['access_key'], + }, +} as const + +const listCredentialsSchema = { + description: 'List S3 Credentials', + params: { + type: 'object', + properties: { + tenantId: { type: 'string' }, + }, + required: ['tenantId'], + }, +} as const + +interface CreateCredentialsRequest extends RequestGenericInterface { + Body: FromSchema + Params: { + tenantId: string + } +} + +interface DeleteCredentialsRequest extends RequestGenericInterface { + Body: FromSchema + Params: { + tenantId: string + } +} + +interface ListCredentialsRequest extends RequestGenericInterface { + Params: { + tenantId: string + } +} + +export default async function routes(fastify: FastifyInstance) { + fastify.register(apiKey) + + fastify.post( + '/:tenantId/credentials', + { + schema: createCredentialsSchema, + }, + async (req, reply) => { + const credentials = await createS3Credentials(req.params.tenantId, { + description: req.body.description, + scopes: req.body.scopes, + }) + + reply.status(201).send({ + id: credentials.id, + access_key: credentials.access_key, + secret_key: credentials.secret_key, + description: req.body.description, + }) + } + ) + + fastify.get( + '/:tenantId/credentials', + { schema: listCredentialsSchema }, + async (req, reply) => { + const credentials = await listS3Credentials(req.params.tenantId) + + return reply.send(credentials) + } + ) + + fastify.delete( + '/:tenantId/credentials', + { schema: deleteCredentialsSchema }, + async (req, reply) => { + const deleted = await deleteS3Credential(req.params.tenantId, req.body.access_key) + + if (!deleted) { + return reply.code(404).send({ message: 'Credentials not found' }) + } + + return reply.code(204).send() + } + ) +} diff --git a/src/http/routes/object/getObjectInfo.ts b/src/http/routes/object/getObjectInfo.ts index 45c8bc75..31c23389 100644 --- a/src/http/routes/object/getObjectInfo.ts +++ b/src/http/routes/object/getObjectInfo.ts @@ -38,6 +38,9 @@ async function requestHandler( let obj: Obj if (publicRoute) { + await request.storage.asSuperUser().findBucket(bucketName, 'id', { + isPublic: true, + }) obj = await request.storage.asSuperUser().from(bucketName).findObject(objectName, 'id,version') } else { obj = await request.storage.from(bucketName).findObject(objectName, 'id,version') diff --git a/src/http/routes/s3/commands/abort-multipart-upload.ts b/src/http/routes/s3/commands/abort-multipart-upload.ts index 03d26457..4eafbedc 100644 --- a/src/http/routes/s3/commands/abort-multipart-upload.ts +++ b/src/http/routes/s3/commands/abort-multipart-upload.ts @@ -22,7 +22,7 @@ const AbortMultiPartUploadInput = { export default function AbortMultiPartUpload(s3Router: S3Router) { s3Router.delete('/:Bucket/*?uploadId', AbortMultiPartUploadInput, (req, ctx) => { - const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) return s3Protocol.abortMultipartUpload({ Bucket: req.Params.Bucket, diff --git a/src/http/routes/s3/commands/complete-multipart-upload.ts b/src/http/routes/s3/commands/complete-multipart-upload.ts index 317287c7..c1489efd 100644 --- a/src/http/routes/s3/commands/complete-multipart-upload.ts +++ b/src/http/routes/s3/commands/complete-multipart-upload.ts @@ -49,7 +49,7 @@ const CompletedMultipartUpload = { export default function CompleteMultipartUpload(s3Router: S3Router) { s3Router.post('/:Bucket/*?uploadId', CompletedMultipartUpload, (req, ctx) => { - const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) return s3Protocol.completeMultiPartUpload({ Bucket: req.Params.Bucket, Key: req.Params['*'], diff --git a/src/http/routes/s3/commands/copy-object.ts b/src/http/routes/s3/commands/copy-object.ts index 6806d64d..c3279e04 100644 --- a/src/http/routes/s3/commands/copy-object.ts +++ b/src/http/routes/s3/commands/copy-object.ts @@ -30,7 +30,7 @@ const CopyObjectInput = { export default function CopyObject(s3Router: S3Router) { s3Router.put('/:Bucket/*|x-amz-copy-source', CopyObjectInput, (req, ctx) => { - const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) return s3Protocol.copyObject({ Bucket: req.Params.Bucket, diff --git a/src/http/routes/s3/commands/create-bucket.ts b/src/http/routes/s3/commands/create-bucket.ts index da987581..249b6d8e 100644 --- a/src/http/routes/s3/commands/create-bucket.ts +++ b/src/http/routes/s3/commands/create-bucket.ts @@ -20,7 +20,7 @@ const CreateBucketInput = { export default function CreateBucket(s3Router: S3Router) { s3Router.put('/:Bucket', CreateBucketInput, (req, ctx) => { - const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) return s3Protocol.createBucket(req.Params.Bucket, req.Headers?.['x-amz-acl'] === 'public-read') }) diff --git a/src/http/routes/s3/commands/create-multipart-upload.ts b/src/http/routes/s3/commands/create-multipart-upload.ts index a4c37839..87766327 100644 --- a/src/http/routes/s3/commands/create-multipart-upload.ts +++ b/src/http/routes/s3/commands/create-multipart-upload.ts @@ -32,7 +32,7 @@ const CreateMultiPartUploadInput = { export default function CreateMultipartUpload(s3Router: S3Router) { s3Router.post('/:Bucket/*?uploads', CreateMultiPartUploadInput, (req, ctx) => { - const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) return s3Protocol.createMultiPartUpload({ Bucket: req.Params.Bucket, diff --git a/src/http/routes/s3/commands/delete-bucket.ts b/src/http/routes/s3/commands/delete-bucket.ts index 5e432608..207f4b1d 100644 --- a/src/http/routes/s3/commands/delete-bucket.ts +++ b/src/http/routes/s3/commands/delete-bucket.ts @@ -14,7 +14,7 @@ const DeleteBucketInput = { export default function DeleteBucket(s3Router: S3Router) { s3Router.delete('/:Bucket', DeleteBucketInput, (req, ctx) => { - const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) return s3Protocol.deleteBucket(req.Params.Bucket) }) diff --git a/src/http/routes/s3/commands/delete-object.ts b/src/http/routes/s3/commands/delete-object.ts index a20da69b..8843355b 100644 --- a/src/http/routes/s3/commands/delete-object.ts +++ b/src/http/routes/s3/commands/delete-object.ts @@ -57,7 +57,7 @@ const DeleteObjectsInput = { export default function DeleteObject(s3Router: S3Router) { // Delete multiple objects s3Router.post('/:Bucket?delete', DeleteObjectsInput, (req, ctx) => { - const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) return s3Protocol.deleteObjects({ Bucket: req.Params.Bucket, @@ -69,7 +69,7 @@ export default function DeleteObject(s3Router: S3Router) { // Delete single object s3Router.delete('/:Bucket/*', DeleteObjectInput, (req, ctx) => { - const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) return s3Protocol.deleteObject({ Bucket: req.Params.Bucket, diff --git a/src/http/routes/s3/commands/get-bucket.ts b/src/http/routes/s3/commands/get-bucket.ts index 5563b11f..ff39f41c 100644 --- a/src/http/routes/s3/commands/get-bucket.ts +++ b/src/http/routes/s3/commands/get-bucket.ts @@ -37,14 +37,14 @@ const GetBucketVersioningInput = { export default function GetBucket(s3Router: S3Router) { s3Router.get('/:Bucket?location', GetBucketLocationInput, async (req, ctx) => { - const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) await ctx.storage.findBucket(req.Params.Bucket) return s3Protocol.getBucketLocation() }) s3Router.get('/:Bucket?versioning', GetBucketVersioningInput, async (req, ctx) => { - const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) await ctx.storage.findBucket(req.Params.Bucket) return s3Protocol.getBucketVersioning() diff --git a/src/http/routes/s3/commands/get-object.ts b/src/http/routes/s3/commands/get-object.ts index e5d42bb1..bc4c0ae1 100644 --- a/src/http/routes/s3/commands/get-object.ts +++ b/src/http/routes/s3/commands/get-object.ts @@ -24,7 +24,7 @@ const ListObjectsInput = { export default function ListObjects(s3Router: S3Router) { s3Router.get('/:Bucket/*', ListObjectsInput, (req, ctx) => { - const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) const ifModifiedSince = req.Headers?.['if-modified-since'] return s3Protocol.getObject({ diff --git a/src/http/routes/s3/commands/head-bucket.ts b/src/http/routes/s3/commands/head-bucket.ts index f00d54e9..dab350cb 100644 --- a/src/http/routes/s3/commands/head-bucket.ts +++ b/src/http/routes/s3/commands/head-bucket.ts @@ -13,7 +13,7 @@ const HeadBucketInput = { export default function HeadBucket(s3Router: S3Router) { s3Router.head('/:Bucket', HeadBucketInput, async (req, ctx) => { - const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) return s3Protocol.headBucket(req.Params.Bucket) }) diff --git a/src/http/routes/s3/commands/head-object.ts b/src/http/routes/s3/commands/head-object.ts index 5acd6f5e..e10b7051 100644 --- a/src/http/routes/s3/commands/head-object.ts +++ b/src/http/routes/s3/commands/head-object.ts @@ -15,7 +15,7 @@ const HeadObjectInput = { export default function HeadObject(s3Router: S3Router) { s3Router.head('/:Bucket/*', HeadObjectInput, (req, ctx) => { - const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) return s3Protocol.headObject({ Bucket: req.Params.Bucket, diff --git a/src/http/routes/s3/commands/list-buckets.ts b/src/http/routes/s3/commands/list-buckets.ts index 07c89b94..5e4c7852 100644 --- a/src/http/routes/s3/commands/list-buckets.ts +++ b/src/http/routes/s3/commands/list-buckets.ts @@ -7,7 +7,7 @@ const ListObjectsInput = { export default function ListBuckets(s3Router: S3Router) { s3Router.get('/', ListObjectsInput, (req, ctx) => { - const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) return s3Protocol.listBuckets() }) } diff --git a/src/http/routes/s3/commands/list-multipart-uploads.ts b/src/http/routes/s3/commands/list-multipart-uploads.ts index 64c0e5de..4b6a8a15 100644 --- a/src/http/routes/s3/commands/list-multipart-uploads.ts +++ b/src/http/routes/s3/commands/list-multipart-uploads.ts @@ -27,7 +27,7 @@ const ListObjectsInput = { export default function ListMultipartUploads(s3Router: S3Router) { s3Router.get('/:Bucket?uploads', ListObjectsInput, async (req, ctx) => { - const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) return s3Protocol.listMultipartUploads({ Bucket: req.Params.Bucket, diff --git a/src/http/routes/s3/commands/list-objects.ts b/src/http/routes/s3/commands/list-objects.ts index 3aba5c7d..6d76fec6 100644 --- a/src/http/routes/s3/commands/list-objects.ts +++ b/src/http/routes/s3/commands/list-objects.ts @@ -48,7 +48,7 @@ const ListObjectsInput = { export default function ListObjects(s3Router: S3Router) { s3Router.get('/:Bucket?list-type=2', ListObjectsV2Input, async (req, ctx) => { - const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) return s3Protocol.listObjectsV2({ Bucket: req.Params.Bucket, @@ -62,7 +62,7 @@ export default function ListObjects(s3Router: S3Router) { }) s3Router.get('/:Bucket', ListObjectsInput, async (req, ctx) => { - const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) return s3Protocol.listObjects({ Bucket: req.Params.Bucket, diff --git a/src/http/routes/s3/commands/list-parts.ts b/src/http/routes/s3/commands/list-parts.ts index 8bfae893..eb6835b3 100644 --- a/src/http/routes/s3/commands/list-parts.ts +++ b/src/http/routes/s3/commands/list-parts.ts @@ -24,7 +24,7 @@ const ListPartsInput = { export default function ListParts(s3Router: S3Router) { s3Router.get('/:Bucket/*?uploadId', ListPartsInput, async (req, ctx) => { - const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) return s3Protocol.listParts({ Bucket: req.Params.Bucket, diff --git a/src/http/routes/s3/commands/upload-part-copy.ts b/src/http/routes/s3/commands/upload-part-copy.ts index 09f60db2..266d89c5 100644 --- a/src/http/routes/s3/commands/upload-part-copy.ts +++ b/src/http/routes/s3/commands/upload-part-copy.ts @@ -39,7 +39,7 @@ export default function UploadPartCopy(s3Router: S3Router) { '/:Bucket/*?partNumber&uploadId|x-amz-copy-source', UploadPartCopyInput, (req, ctx) => { - const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) return s3Protocol.uploadPartCopy({ Bucket: req.Params.Bucket, diff --git a/src/http/routes/s3/commands/upload-part.ts b/src/http/routes/s3/commands/upload-part.ts index 255f69c8..1da356f9 100644 --- a/src/http/routes/s3/commands/upload-part.ts +++ b/src/http/routes/s3/commands/upload-part.ts @@ -59,7 +59,7 @@ const UploadPartInput = { export default function UploadPart(s3Router: S3Router) { s3Router.put('/:Bucket/*?uploadId&partNumber', UploadPartInput, (req, ctx) => { - const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) return s3Protocol.uploadPart({ Body: (req.raw as any).raw, @@ -72,7 +72,7 @@ export default function UploadPart(s3Router: S3Router) { }) s3Router.put('/:Bucket/*', PutObjectInput, (req, ctx) => { - const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId) + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) return s3Protocol.putObject({ Body: req.raw, Bucket: req.Params.Bucket, diff --git a/src/http/routes/s3/error-handler.ts b/src/http/routes/s3/error-handler.ts index b63604f5..2cb00574 100644 --- a/src/http/routes/s3/error-handler.ts +++ b/src/http/routes/s3/error-handler.ts @@ -12,6 +12,8 @@ export const s3ErrorHandler = ( ) => { request.executionError = error + console.log(error) + const resource = request.url .split('?')[0] .replace('/s3', '') diff --git a/src/http/routes/s3/index.ts b/src/http/routes/s3/index.ts index 0fb09d52..3204a732 100644 --- a/src/http/routes/s3/index.ts +++ b/src/http/routes/s3/index.ts @@ -93,6 +93,7 @@ export default async function routes(fastify: FastifyInstance) { { storage: req.storage, tenantId: req.tenantId, + owner: req.owner, } ) diff --git a/src/http/routes/s3/router.ts b/src/http/routes/s3/router.ts index 4aa230d2..3b7da705 100644 --- a/src/http/routes/s3/router.ts +++ b/src/http/routes/s3/router.ts @@ -20,7 +20,7 @@ import { default as UploadPartCopy } from './commands/upload-part-copy' import { FromSchema, JSONSchema } from 'json-schema-to-ts' -export type Context = { storage: Storage; tenantId: string } +export type Context = { storage: Storage; tenantId: string; owner?: string } export type S3Router = Router const s3Commands = [ diff --git a/src/server.ts b/src/server.ts index aa792516..3c34f980 100644 --- a/src/server.ts +++ b/src/server.ts @@ -81,7 +81,10 @@ const exposeDocs = true type: 'adminAppStartError', error: err, }) - process.exit(1) + + if (process.env.NODE_ENV !== 'production') { + process.exit(1) + } } } diff --git a/src/storage/backend/adapter.ts b/src/storage/backend/adapter.ts index b3cf0118..6f8b09a7 100644 --- a/src/storage/backend/adapter.ts +++ b/src/storage/backend/adapter.ts @@ -191,7 +191,12 @@ export abstract class StorageBackendAdapter { throw new Error('not implemented') } - async abortMultipartUpload(bucketName: string, key: string, uploadId: string): Promise { + async abortMultipartUpload( + bucketName: string, + key: string, + uploadId: string, + version?: string + ): Promise { throw new Error('not implemented') } diff --git a/src/storage/database/adapter.ts b/src/storage/database/adapter.ts index ac3dee93..593cbf64 100644 --- a/src/storage/database/adapter.ts +++ b/src/storage/database/adapter.ts @@ -147,7 +147,8 @@ export interface Database { bucketId: string, objectName: string, version: string, - signature: string + signature: string, + owner?: string ): Promise findMultipartUpload( @@ -158,7 +159,7 @@ export interface Database { updateMultipartUploadProgress( uploadId: string, - progress: BigInt, + progress: number, signature: string ): Promise diff --git a/src/storage/database/knex.ts b/src/storage/database/knex.ts index 874fd56a..7d345498 100644 --- a/src/storage/database/knex.ts +++ b/src/storage/database/knex.ts @@ -260,7 +260,7 @@ export class StorageKnexDB implements Database { return this.runQuery('ListMultipartsUploads', async (knex) => { if (!options?.deltimeter) { const query = knex - .table('_s3_multipart_uploads') + .table('s3_multipart_uploads') .select(['id', 'key', 'created_at']) .where('bucket_id', bucketId) .limit(options?.maxKeys || 100) @@ -560,17 +560,19 @@ export class StorageKnexDB implements Database { bucketId: string, objectName: string, version: string, - signature: string + signature: string, + owner?: string ) { return this.runQuery('CreateMultipartUpload', async (knex) => { const multipart = await knex - .table('_s3_multipart_uploads') + .table('s3_multipart_uploads') .insert({ id: uploadId, bucket_id: bucketId, key: objectName, version, upload_signature: signature, + owner_id: owner, }) .returning('*') @@ -581,7 +583,7 @@ export class StorageKnexDB implements Database { async findMultipartUpload(uploadId: string, columns = 'id', options?: { forUpdate?: boolean }) { const multiPart = await this.runQuery('FindMultipartUpload', async (knex) => { const query = knex - .from('_s3_multipart_uploads') + .from('s3_multipart_uploads') .select(columns.split(',')) .where('id', uploadId) @@ -597,10 +599,10 @@ export class StorageKnexDB implements Database { return multiPart } - async updateMultipartUploadProgress(uploadId: string, progress: BigInt, signature: string) { + async updateMultipartUploadProgress(uploadId: string, progress: number, signature: string) { return this.runQuery('UpdateMultipartUploadProgress', async (knex) => { await knex - .from('_s3_multipart_uploads') + .from('s3_multipart_uploads') .update({ in_progress_size: progress, upload_signature: signature }) .where('id', uploadId) }) @@ -608,14 +610,14 @@ export class StorageKnexDB implements Database { async deleteMultipartUpload(uploadId: string) { return this.runQuery('DeleteMultipartUpload', async (knex) => { - await knex.from('_s3_multipart_uploads').delete().where('id', uploadId) + await knex.from('s3_multipart_uploads').delete().where('id', uploadId) }) } async insertUploadPart(part: S3PartUpload) { return this.runQuery('InsertUploadPart', async (knex) => { const storedPart = await knex - .table('_s3_multipart_uploads_parts') + .table('s3_multipart_uploads_parts') .insert(part) .returning('*') @@ -629,7 +631,7 @@ export class StorageKnexDB implements Database { ): Promise { return this.runQuery('ListParts', async (knex) => { const query = knex - .from('_s3_multipart_uploads_parts') + .from('s3_multipart_uploads_parts') .select('etag', 'part_number', 'size', 'upload_id', 'created_at') .where('upload_id', uploadId) .orderBy('part_number') diff --git a/src/storage/errors.ts b/src/storage/errors.ts index 77e430f2..68bfef50 100644 --- a/src/storage/errors.ts +++ b/src/storage/errors.ts @@ -27,6 +27,7 @@ export enum ErrorCode { BucketAlreadyExists = 'BucketAlreadyExists', DatabaseTimeout = 'DatabaseTimeout', InvalidSignature = 'InvalidSignature', + SignatureDoesNotMatch = 'SignatureDoesNotMatch', AccessDenied = 'AccessDenied', ResourceLocked = 'ResourceLocked', DatabaseError = 'DatabaseError', @@ -35,6 +36,10 @@ export enum ErrorCode { InvalidUploadSignature = 'InvalidUploadSignature', LockTimeout = 'LockTimeout', S3Error = 'S3Error', + S3InvalidAccessKeyId = 'InvalidAccessKeyId', + S3MaximumCredentialsLimit = 'MaximumCredentialsLimit', + InvalidChecksum = 'InvalidChecksum', + MissingPart = 'MissingPart', SlowDown = 'SlowDown', } @@ -122,6 +127,13 @@ export const ERRORS = { originalError: e, }), + SignatureDoesNotMatch: (message?: string) => + new StorageBackendError({ + code: ErrorCode.SignatureDoesNotMatch, + httpStatusCode: 403, + message: message || 'Signature does not match', + }), + InvalidSignature: (message?: string, e?: Error) => new StorageBackendError({ code: ErrorCode.InvalidSignature, @@ -313,6 +325,34 @@ export const ERRORS = { message: 'acquiring lock timeout', originalError: err, }), + + MissingS3Credentials: () => + new StorageBackendError({ + code: ErrorCode.S3InvalidAccessKeyId, + httpStatusCode: 403, + message: 'The Access Key Id you provided does not exist in our records.', + }), + + MaximumCredentialsLimit: () => + new StorageBackendError({ + code: ErrorCode.S3MaximumCredentialsLimit, + httpStatusCode: 400, + message: 'You have reached the maximum number of credentials allowed', + }), + + InvalidChecksum: (message: string) => + new StorageBackendError({ + code: ErrorCode.InvalidChecksum, + httpStatusCode: 400, + message: message, + }), + + MissingPart: (partNumber: number, uploadId: string) => + new StorageBackendError({ + code: ErrorCode.MissingPart, + httpStatusCode: 400, + message: `Part ${partNumber} is missing for upload id ${uploadId}`, + }), } export function isStorageError(errorType: ErrorCode, error: any): error is StorageBackendError { diff --git a/src/storage/limits.ts b/src/storage/limits.ts index 2a58f0f6..692e61f1 100644 --- a/src/storage/limits.ts +++ b/src/storage/limits.ts @@ -7,12 +7,21 @@ const { isMultitenant, imageTransformationEnabled } = getConfig() /** * Get the maximum file size for a specific project * @param tenantId + * @param maxUpperLimit */ -export async function getFileSizeLimit(tenantId: string): Promise { +export async function getFileSizeLimit( + tenantId: string, + maxUpperLimit?: number | null +): Promise { let { uploadFileSizeLimit } = getConfig() if (isMultitenant) { uploadFileSizeLimit = await getFileSizeLimitForTenant(tenantId) } + + if (maxUpperLimit) { + return Math.min(uploadFileSizeLimit, maxUpperLimit) + } + return uploadFileSizeLimit } diff --git a/src/storage/protocols/s3/byte-limit-stream.ts b/src/storage/protocols/s3/byte-limit-stream.ts index 24bc1e65..c14008c3 100644 --- a/src/storage/protocols/s3/byte-limit-stream.ts +++ b/src/storage/protocols/s3/byte-limit-stream.ts @@ -2,14 +2,14 @@ import { Transform, TransformCallback } from 'stream' import { ERRORS } from '../../errors' export class ByteLimitTransformStream extends Transform { - bytesProcessed = BigInt(0) + bytesProcessed = 0 - constructor(private readonly limit: bigint) { + constructor(private readonly limit: number) { super() } _transform(chunk: Buffer, encoding: BufferEncoding, callback: TransformCallback) { - this.bytesProcessed += BigInt(chunk.length) + this.bytesProcessed += chunk.length if (this.bytesProcessed > this.limit) { callback(ERRORS.EntityTooLarge()) diff --git a/src/storage/protocols/s3/s3-handler.ts b/src/storage/protocols/s3/s3-handler.ts index 80329173..19f58cba 100644 --- a/src/storage/protocols/s3/s3-handler.ts +++ b/src/storage/protocols/s3/s3-handler.ts @@ -1,6 +1,6 @@ import { Storage } from '../../storage' import { getConfig } from '../../../config' -import { getMaxFileSizeLimit, Uploader } from '../../uploader' +import { Uploader } from '../../uploader' import { AbortMultipartUploadCommandInput, CompleteMultipartUploadCommandInput, @@ -21,7 +21,7 @@ import { } from '@aws-sdk/client-s3' import { PassThrough, Readable } from 'stream' import stream from 'stream/promises' -import { mustBeValidBucketName, mustBeValidKey } from '../../limits' +import { getFileSizeLimit, mustBeValidBucketName, mustBeValidKey } from '../../limits' import { ERRORS } from '../../errors' import { S3MultipartUpload, Obj } from '../../schemas' import { decrypt, encrypt } from '../../../auth' @@ -30,7 +30,11 @@ import { ByteLimitTransformStream } from './byte-limit-stream' const { storageS3Region, storageS3Bucket } = getConfig() export class S3ProtocolHandler { - constructor(protected readonly storage: Storage, protected readonly tenantId: string) {} + constructor( + protected readonly storage: Storage, + protected readonly tenantId: string, + protected readonly owner?: string + ) {} /** * Returns the versioning state of a bucket. @@ -103,6 +107,7 @@ export class S3ProtocolHandler { name: Bucket, id: Bucket, public: isPublic, + owner: this.owner, }) return { @@ -319,7 +324,7 @@ export class S3ProtocolHandler { const limit = maxKeys || 200 - const multipartUploads = await this.storage.db.asSuperUser().listMultipartUploads(bucket, { + const multipartUploads = await this.storage.db.listMultipartUploads(bucket, { prefix, deltimeter: delimiter, maxKeys: limit + 1, @@ -431,13 +436,18 @@ export class S3ProtocolHandler { mustBeValidBucketName(Bucket) mustBeValidKey(Key) - await this.storage.asSuperUser().findBucket(Bucket, 'id') + const bucket = await this.storage.asSuperUser().findBucket(Bucket, 'id,allowed_mime_types') + + if (command.ContentType && bucket.allowed_mime_types && bucket.allowed_mime_types.length > 0) { + uploader.validateMimeType(command.ContentType, bucket.allowed_mime_types || []) + } // Create Multi Part Upload const version = await uploader.prepareUpload({ bucketId: command.Bucket as string, objectName: command.Key as string, isUpsert: true, + owner: this.owner, }) const uploadId = await this.storage.backend.createMultiPartUpload( @@ -452,10 +462,10 @@ export class S3ProtocolHandler { throw ERRORS.InvalidUploadId(uploadId) } - const signature = this.uploadSignature({ in_progress_size: BigInt(0) }) + const signature = this.uploadSignature({ in_progress_size: 0 }) await this.storage.db .asSuperUser() - .createMultipartUpload(uploadId, Bucket, Key, version, signature) + .createMultipartUpload(uploadId, Bucket, Key, version, signature, this.owner) return { responseBody: { @@ -487,6 +497,7 @@ export class S3ProtocolHandler { bucketId: Bucket as string, objectName: Key as string, isUpsert: true, + owner: this.owner, }) const multiPartUpload = await this.storage.db @@ -495,6 +506,19 @@ export class S3ProtocolHandler { const parts = command.MultipartUpload?.Parts || [] + if (parts.length === 0) { + const allParts = await this.storage.db.asSuperUser().listParts(UploadId, { + maxParts: 1000, + }) + + parts.push( + ...allParts.map((part) => ({ + PartNumber: part.part_number, + ETag: part.etag, + })) + ) + } + const resp = await this.storage.backend.completeMultipartUpload( storageS3Bucket, `${this.tenantId}/${Bucket}/${Key}`, @@ -516,15 +540,16 @@ export class S3ProtocolHandler { isUpsert: true, isMultipart: false, objectMetadata: metadata, + owner: this.owner, }) await this.storage.db.asSuperUser().deleteMultipartUpload(UploadId) return { responseBody: { - CompleteMultipartUpload: { - Location: resp.location, - Bucket: resp.bucket, + CompleteMultipartUploadResult: { + Location: `${Bucket}/${Key}`, + Bucket: Bucket, Key: Key, ChecksumCRC32: resp.ChecksumCRC32, ChecksumCRC32C: resp.ChecksumCRC32, @@ -559,7 +584,8 @@ export class S3ProtocolHandler { const bucket = await this.storage.asSuperUser().findBucket(Bucket, 'file_size_limit') - const maxFileSize = await getMaxFileSizeLimit(this.storage.db.tenantId, bucket?.file_size_limit) + const maxFileSize = await getFileSizeLimit(this.storage.db.tenantId, bucket?.file_size_limit) + const multipart = await this.shouldAllowPartUpload(UploadId, ContentLength, maxFileSize) const proxy = new PassThrough() @@ -581,7 +607,7 @@ export class S3ProtocolHandler { try { const uploadPart = await stream.pipeline( body, - new ByteLimitTransformStream(BigInt(ContentLength)), + new ByteLimitTransformStream(ContentLength), async (stream) => { return this.storage.backend.uploadPart( storageS3Bucket, @@ -602,6 +628,7 @@ export class S3ProtocolHandler { etag: uploadPart.ETag || '', key: Key as string, bucket_id: Bucket, + owner_id: this.owner, }) return { @@ -615,7 +642,7 @@ export class S3ProtocolHandler { forUpdate: true, }) - const diff = BigInt(multipart.in_progress_size) - BigInt(ContentLength) + const diff = multipart.in_progress_size - ContentLength const signature = this.uploadSignature({ in_progress_size: diff }) await db.updateMultipartUploadProgress(UploadId, diff, signature) }) @@ -645,11 +672,18 @@ export class S3ProtocolHandler { command.Key += '.emptyFolderPlaceholder' } + const bucket = await this.storage + .asSuperUser() + .findBucket(command.Bucket, 'id,file_size_limit,allowed_mime_types') + const upload = await uploader.upload(command.Body as any, { bucketId: command.Bucket as string, objectName: command.Key as string, + owner: this.owner, isUpsert: true, isMultipart: false, + fileSizeLimit: bucket.file_size_limit, + allowedMimeTypes: bucket.allowed_mime_types, }) return { @@ -673,10 +707,15 @@ export class S3ProtocolHandler { throw ERRORS.InvalidUploadId() } + const multipart = await this.storage.db + .asSuperUser() + .findMultipartUpload(UploadId, 'id,version') + await this.storage.backend.abortMultipartUpload( storageS3Bucket, `${this.tenantId}/${Bucket}/${Key}`, - UploadId + UploadId, + multipart.version ) await this.storage.db.asSuperUser().deleteMultipartUpload(UploadId) @@ -716,7 +755,7 @@ export class S3ProtocolHandler { 'content-length': (object.metadata?.size as string) || '', 'content-type': (object.metadata?.contentType as string) || '', etag: (object.metadata?.eTag as string) || '', - 'last-modified': object.updated_at ? new Date(object.updated_at).toISOString() || '' : '', + 'last-modified': object.updated_at ? new Date(object.updated_at).toUTCString() || '' : '', }, } } @@ -1032,7 +1071,7 @@ export class S3ProtocolHandler { db.findBucketById(sourceBucketName, 'id'), ]) }) - const maxFileSize = await getMaxFileSizeLimit( + const maxFileSize = await getFileSizeLimit( this.storage.db.tenantId, destinationBucket?.file_size_limit ) @@ -1056,6 +1095,7 @@ export class S3ProtocolHandler { etag: uploadPart.eTag || '', key: Key as string, bucket_id: Bucket, + owner_id: this.owner, }) return { @@ -1066,7 +1106,7 @@ export class S3ProtocolHandler { } } - protected uploadSignature({ in_progress_size }: { in_progress_size: BigInt }) { + protected uploadSignature({ in_progress_size }: { in_progress_size: number }) { return `${encrypt('progress:' + in_progress_size.toString())}` } @@ -1075,7 +1115,7 @@ export class S3ProtocolHandler { const [, value] = originalSignature.split(':') return { - progress: BigInt(value), + progress: parseInt(value, 10), } } @@ -1095,11 +1135,11 @@ export class S3ProtocolHandler { const { progress } = this.decryptUploadSignature(multipart.upload_signature) - if (progress !== BigInt(multipart.in_progress_size)) { + if (progress !== multipart.in_progress_size) { throw ERRORS.InvalidUploadSignature() } - const currentProgress = BigInt(multipart.in_progress_size) + BigInt(contentLength) + const currentProgress = multipart.in_progress_size + contentLength if (currentProgress > maxFileSize) { throw ERRORS.EntityTooLarge() diff --git a/src/storage/protocols/s3/signature-v4.ts b/src/storage/protocols/s3/signature-v4.ts index e626d449..771e752d 100644 --- a/src/storage/protocols/s3/signature-v4.ts +++ b/src/storage/protocols/s3/signature-v4.ts @@ -2,11 +2,14 @@ import crypto from 'crypto' import { ERRORS } from '../../errors' interface SignatureV4Options { - region: string - service: string - tenantId: string - secretKey: string enforceRegion: boolean + credentials: Omit & { secretKey: string } +} + +export interface ClientSignature { + credentials: Credentials + signature: string + signedHeaders: string[] } interface SignatureRequest { @@ -16,6 +19,16 @@ interface SignatureRequest { method: string query?: Record prefix?: string + credentials: Credentials + signature: string + signedHeaders: string[] +} + +interface Credentials { + accessKey: string + shortDate: string + region: string + service: string } /** @@ -41,7 +54,57 @@ export const ALWAYS_UNSIGNABLE_HEADERS = { } export class SignatureV4 { - constructor(protected readonly options: SignatureV4Options) {} + public readonly serverCredentials: SignatureV4Options['credentials'] + enforceRegion: boolean + + constructor(options: SignatureV4Options) { + this.serverCredentials = options.credentials + this.enforceRegion = options.enforceRegion + } + + static parseAuthorizationHeader(header: string) { + const parts = header.split(' ') + if (parts[0] !== 'AWS4-HMAC-SHA256') { + throw ERRORS.InvalidSignature('Unsupported authorization type') + } + + const params = header + .replace('AWS4-HMAC-SHA256 ', '') + .split(',') + .reduce((values, value) => { + const [k, v] = value.split('=') + values.set(k.trim(), v) + return values + }, new Map()) + + const credentialPart = params.get('Credential') + const signedHeadersPart = params.get('SignedHeaders') + const signaturePart = params.get('Signature') + + if (!credentialPart || !signedHeadersPart || !signaturePart) { + throw ERRORS.InvalidSignature('Invalid signature format') + } + const signedHeaders = signedHeadersPart.split(';') || [] + + const credentialsPart = credentialPart.split('/') + + if (credentialsPart.length !== 5) { + throw ERRORS.InvalidSignature('Invalid credentials') + } + + const [accessKey, shortDate, region, service] = credentialsPart + + return { + credentials: { + accessKey, + shortDate, + region, + service, + }, + signedHeaders, + signature: signaturePart, + } + } verify(request: SignatureRequest) { const { clientSignature, serverSignature } = this.sign(request) @@ -55,21 +118,16 @@ export class SignatureV4 { throw ERRORS.AccessDenied('Missing authorization header') } - const { credentials, signedHeaders, signature } = - this.parseAuthorizationHeader(authorizationHeader) - - // Extract additional information from the credentials - const [accessKey, shortDate, region, service] = credentials.split('/') - if (accessKey !== this.options.tenantId) { + if (request.credentials.accessKey !== this.serverCredentials.accessKey) { throw ERRORS.AccessDenied('Invalid Access Key') } // Ensure the region and service match the expected values - if (this.options.enforceRegion && region !== this.options.region) { + if (this.enforceRegion && request.credentials.region !== this.serverCredentials.region) { throw ERRORS.AccessDenied('Invalid Region') } - if (service !== this.options.service) { + if (request.credentials.service !== this.serverCredentials.service) { throw ERRORS.AccessDenied('Invalid Service') } @@ -83,33 +141,35 @@ export class SignatureV4 { // - us-east-1 // - the region set in the env if ( - !this.options.enforceRegion && - !['auto', 'us-east-1', this.options.region, ''].includes(region) + !this.enforceRegion && + !['auto', 'us-east-1', this.serverCredentials.region, ''].includes(request.credentials.region) ) { throw ERRORS.AccessDenied('Invalid Region') } - const selectedRegion = this.options.enforceRegion ? this.options.region : region + const selectedRegion = this.enforceRegion + ? this.serverCredentials.region + : request.credentials.region // Construct the Canonical Request and String to Sign - const canonicalRequest = this.constructCanonicalRequest(request, signedHeaders) + const canonicalRequest = this.constructCanonicalRequest(request, request.signedHeaders) const stringToSign = this.constructStringToSign( longDate, - shortDate, + request.credentials.shortDate, selectedRegion, - this.options.service, + this.serverCredentials.service, canonicalRequest ) const signingKey = this.signingKey( - this.options.secretKey, - shortDate, + this.serverCredentials.secretKey, + request.credentials.shortDate, selectedRegion, - this.options.service + this.serverCredentials.service ) return { - clientSignature: signature, + clientSignature: request.signature, serverSignature: this.hmac(signingKey, stringToSign).toString('hex'), } } @@ -174,33 +234,6 @@ export class SignatureV4 { return this.hmac(kService, 'aws4_request') } - protected parseAuthorizationHeader(header: string) { - const parts = header.split(' ') - if (parts[0] !== 'AWS4-HMAC-SHA256') { - throw ERRORS.InvalidSignature('Unsupported authorization type') - } - - const params = header - .replace('AWS4-HMAC-SHA256 ', '') - .split(',') - .reduce((values, value) => { - const [k, v] = value.split('=') - values.set(k.trim(), v) - return values - }, new Map()) - - const credentialPart = params.get('Credential') - const signedHeadersPart = params.get('SignedHeaders') - const signaturePart = params.get('Signature') - - if (!credentialPart || !signedHeadersPart || !signaturePart) { - throw ERRORS.InvalidSignature('Invalid signature format') - } - const signedHeaders = signedHeadersPart.split(';') || [] - - return { credentials: credentialPart, signedHeaders, signature: signaturePart } - } - protected constructCanonicalRequest(request: SignatureRequest, signedHeaders: string[]) { const method = request.method const canonicalUri = new URL(`http://localhost:8080${request.prefix || ''}${request.url}`) diff --git a/src/storage/schemas/multipart.ts b/src/storage/schemas/multipart.ts index bbd25c00..df0796aa 100644 --- a/src/storage/schemas/multipart.ts +++ b/src/storage/schemas/multipart.ts @@ -10,6 +10,7 @@ export const multipartUploadSchema = { in_progress_size: { type: 'number' }, upload_signature: { type: 'string' }, version: { type: 'string' }, + owner_id: { type: 'string' }, created_at: { type: 'string' }, }, required: [ @@ -38,6 +39,7 @@ export const uploadPartSchema = { version: { type: 'string' }, created_at: { type: 'string' }, etag: { type: 'string' }, + owner_id: { type: 'string' }, }, required: ['upload_id', 'bucket_id', 'key', 'version', 'part_number'], additionalProperties: false, diff --git a/src/storage/uploader.ts b/src/storage/uploader.ts index 0dbd393c..9979598e 100644 --- a/src/storage/uploader.ts +++ b/src/storage/uploader.ts @@ -237,7 +237,10 @@ export class Uploader { options?: Pick ) { const contentType = request.headers['content-type'] - const fileSizeLimit = await getMaxFileSizeLimit(this.db.tenantId, options?.fileSizeLimit) + const fileSizeLimit = await getStandardMaxFileSizeLimit( + this.db.tenantId, + options?.fileSizeLimit + ) let body: NodeJS.ReadableStream let mimeType: string @@ -282,23 +285,12 @@ export class Uploader { isTruncated, } } - - protected async getFileSizeLimit(tenantId: string, bucketSizeLimit?: number | null) { - let globalFileSizeLimit = await getFileSizeLimit(tenantId) - - if (typeof bucketSizeLimit === 'number') { - globalFileSizeLimit = Math.min(bucketSizeLimit, globalFileSizeLimit) - } - - if (uploadFileSizeLimitStandard && uploadFileSizeLimitStandard > 0) { - globalFileSizeLimit = Math.min(uploadFileSizeLimitStandard, globalFileSizeLimit) - } - - return globalFileSizeLimit - } } -export async function getMaxFileSizeLimit(tenantId: string, bucketSizeLimit?: number | null) { +export async function getStandardMaxFileSizeLimit( + tenantId: string, + bucketSizeLimit?: number | null +) { let globalFileSizeLimit = await getFileSizeLimit(tenantId) if (typeof bucketSizeLimit === 'number') { diff --git a/src/test/s3-protocol.test.ts b/src/test/s3-protocol.test.ts index 45a4a5d9..5b9fa313 100644 --- a/src/test/s3-protocol.test.ts +++ b/src/test/s3-protocol.test.ts @@ -29,7 +29,14 @@ import { Upload } from '@aws-sdk/lib-storage' import { ReadableStreamBuffer } from 'stream-buffers' import { randomUUID } from 'crypto' -const { tenantId, serviceKey, storageS3Region } = getConfig() +const { + s3ProtocolAccessKeySecret, + s3ProtocolAccessKeyId, + storageS3Region, + tenantId, + anonKey, + serviceKey, +} = getConfig() async function createBucket(client: S3Client, name?: string, publicRead = true) { let bucketName: string @@ -79,8 +86,8 @@ describe('S3 Protocol', () => { forcePathStyle: true, region: storageS3Region, credentials: { - accessKeyId: tenantId, - secretAccessKey: serviceKey, + accessKeyId: s3ProtocolAccessKeyId!, + secretAccessKey: s3ProtocolAccessKeySecret!, }, }) diff --git a/src/test/tenant.test.ts b/src/test/tenant.test.ts index b00ff587..e53353b9 100644 --- a/src/test/tenant.test.ts +++ b/src/test/tenant.test.ts @@ -16,7 +16,7 @@ const payload = { serviceKey: 'd', jwks: { keys: [] }, migrationStatus: 'COMPLETED', - migrationVersion: 's3-multipart-uploads-big-ints', + migrationVersion: 'create-index-path-tokens', features: { imageTransformation: { enabled: true, @@ -34,7 +34,7 @@ const payload2 = { serviceKey: 'h', jwks: null, migrationStatus: 'COMPLETED', - migrationVersion: 's3-multipart-uploads-big-ints', + migrationVersion: 'create-index-path-tokens', features: { imageTransformation: { enabled: false,