Skip to content

Commit

Permalink
feat: reworked access credentials
Browse files Browse the repository at this point in the history
  • Loading branch information
fenos committed Apr 7, 2024
1 parent 280f3e2 commit bade70f
Show file tree
Hide file tree
Showing 55 changed files with 1,019 additions and 281 deletions.
9 changes: 9 additions & 0 deletions .env.sample
Original file line number Diff line number Diff line change
Expand Up @@ -65,10 +65,19 @@ UPLOAD_FILE_SIZE_LIMIT=524288000
UPLOAD_FILE_SIZE_LIMIT_STANDARD=52428800
UPLOAD_SIGNED_URL_EXPIRATION_TIME=60

#######################################
# TUS Protocol
#######################################
TUS_URL_PATH=/upload/resumable
TUS_URL_EXPIRY_MS=3600000
TUS_PART_SIZE=50

#######################################
# S3 Protocol
#######################################
S3_PROTOCOL_ACCESS_KEY_ID=b585f311d839730f8a980a3457be2787
S3_PROTOCOL_ACCESS_KEY_SECRET=67d161a7a8a46a24a17a75b26e7724f11d56b8d49a119227c66b13b6595601fb
S3_PROTOCOL_ALLOWS_SERVICE_KEY_AS_SECRET=false

#######################################
# Storage Backend Driver
Expand Down
4 changes: 4 additions & 0 deletions .env.test.sample
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,10 @@ AUTHENTICATED_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhd
ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJyb2xlIjoiYW5vbiIsImlhdCI6MTYxMzUzMTk4NSwiZXhwIjoxOTI5MTA3OTg1fQ.mqfi__KnQB4v6PkIjkhzfwWrYyF94MEbSC6LnuvVniE
SERVICE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJyb2xlIjoic2VydmljZV9yb2xlIiwiaWF0IjoxNjEzNTMxOTg1LCJleHAiOjE5MjkxMDc5ODV9.th84OKK0Iz8QchDyXZRrojmKSEZ-OuitQm_5DvLiSIc

S3_PROTOCOL_ACCESS_KEY_ID=b585f311d839730f8a980a3457be2787
S3_PROTOCOL_ACCESS_KEY_SECRET=67d161a7a8a46a24a17a75b26e7724f11d56b8d49a119227c66b13b6595601fb
S3_PROTOCOL_ALLOWS_SERVICE_KEY_AS_SECRET=false

TENANT_ID=bjhaohmqunupljrqypxz
DEFAULT_METRICS_ENABLED=false
PG_QUEUE_ENABLE=false
Expand Down
4 changes: 4 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,10 @@ jobs:
ENABLE_DEFAULT_METRICS: false
PG_QUEUE_ENABLE: false
MULTI_TENANT: false
S3_PROTOCOL_ACCESS_KEY_ID: ${{ secrets.TENANT_ID }}
S3_PROTOCOL_ACCESS_KEY_SECRET: ${{ secrets.SERVICE_KEY }}
S3_PROTOCOL_ALLOWS_SERVICE_KEY_AS_SECRET: true


- name: Upload coverage results to Coveralls
uses: coverallsapp/github-action@master
Expand Down
50 changes: 45 additions & 5 deletions migrations/multitenant/0008-tenants-s3-credentials.sql
Original file line number Diff line number Diff line change
@@ -1,6 +1,46 @@
ALTER TABLE tenants ADD COLUMN IF NOT EXISTS cursor_id SERIAL;
ALTER TABLE tenants ADD COLUMN IF NOT EXISTS created_at TIMESTAMP DEFAULT current_timestamp;
ALTER TABLE tenants ADD COLUMN IF NOT EXISTS migrations_version text null DEFAULT null;
ALTER TABLE tenants ADD COLUMN IF NOT EXISTS migrations_status text null DEFAULT null;

create index if not exists tenants_migration_version_idx on tenants(cursor_id, migrations_version, migrations_status);

CREATE TABLE IF NOT EXISTS tenants_s3_credentials (
id UUID PRIMARY KEY default gen_random_uuid(),
description text NOT NULL,
tenant_id text REFERENCES tenants(id) ON DELETE CASCADE,
access_key text NOT NULL,
secret_key text NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);

CREATE INDEX IF NOT EXISTS tenants_s3_credentials_tenant_id_idx ON tenants_s3_credentials(tenant_id);
CREATE UNIQUE INDEX IF NOT EXISTS tenants_s3_credentials_access_key_idx ON tenants_s3_credentials(tenant_id, access_key);


CREATE FUNCTION tenants_s3_credentials_update_notify_trigger ()
RETURNS TRIGGER
AS $$
BEGIN
PERFORM
pg_notify('tenants_s3_credentials_update', '"' || NEW.id || ':' || NEW.access_key || '"');
RETURN NULL;
END;
$$
LANGUAGE plpgsql;

CREATE FUNCTION tenants_s3_credentials_delete_notify_trigger ()
RETURNS TRIGGER
AS $$
BEGIN
PERFORM
pg_notify('tenants_s3_credentials_update', '"' || OLD.id || ':' || OLD.access_key || '"');
RETURN NULL;
END;
$$
LANGUAGE plpgsql;

CREATE TRIGGER tenants_s3_credentials_update_notify_trigger
AFTER UPDATE ON tenants_s3_credentials
FOR EACH ROW
EXECUTE PROCEDURE tenants_s3_credentials_update_notify_trigger ();

CREATE TRIGGER tenants_s3_credentials_delete_notify_trigger
AFTER DELETE ON tenants_s3_credentials
FOR EACH ROW
EXECUTE PROCEDURE tenants_s3_credentials_delete_notify_trigger ();
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@


ALTER TABLE tenants_s3_credentials ADD COLUMN scopes json NOT NULL DEFAULT '{}';
30 changes: 16 additions & 14 deletions migrations/tenant/0021-s3-multipart-uploads.sql
Original file line number Diff line number Diff line change
@@ -1,28 +1,29 @@

CREATE TABLE IF NOT EXISTS storage._s3_multipart_uploads (
CREATE TABLE IF NOT EXISTS storage.s3_multipart_uploads (
id text PRIMARY KEY,
in_progress_size int NOT NULL default 0,
upload_signature text NOT NULL,
bucket_id text NOT NULL references storage.buckets(id),
key text COLLATE "C" NOT NULL ,
version text NOT NULL,
owner_id text NULL,
created_at timestamptz NOT NULL default now()
);

CREATE TABLE IF NOT EXISTS storage._s3_multipart_uploads_parts (
CREATE TABLE IF NOT EXISTS storage.s3_multipart_uploads_parts (
id uuid PRIMARY KEY default gen_random_uuid(),
upload_id text NOT NULL references storage._s3_multipart_uploads(id) ON DELETE CASCADE,
upload_id text NOT NULL references storage.s3_multipart_uploads(id) ON DELETE CASCADE,
size int NOT NULL default 0,
part_number int NOT NULL,
bucket_id text NOT NULL references storage.buckets(id),
key text COLLATE "C" NOT NULL,
etag text NOT NULL,
owner_id text NULL,
version text NOT NULL,
created_at timestamptz NOT NULL default now()
);

CREATE INDEX idx_multipart_uploads_list
ON storage._s3_multipart_uploads (bucket_id, (key COLLATE "C"), created_at ASC);
CREATE INDEX IF NOT EXISTS idx_multipart_uploads_list
ON storage.s3_multipart_uploads (bucket_id, (key COLLATE "C"), created_at ASC);

CREATE OR REPLACE FUNCTION storage.list_multipart_uploads_with_delimiter(bucket_id text, prefix_param text, delimiter_param text, max_keys integer default 100, next_key_token text DEFAULT '', next_upload_token text default '')
RETURNS TABLE (key text, id text, created_at timestamptz) AS
Expand All @@ -38,7 +39,7 @@ BEGIN
key
END AS key, id, created_at
FROM
storage._s3_multipart_uploads
storage.s3_multipart_uploads
WHERE
bucket_id = $5 AND
key ILIKE $1 || ''%'' AND
Expand All @@ -65,18 +66,19 @@ BEGIN
END;
$$ LANGUAGE plpgsql;

ALTER TABLE storage._s3_multipart_uploads ENABLE ROW LEVEL SECURITY;
ALTER TABLE storage._s3_multipart_uploads_parts ENABLE ROW LEVEL SECURITY;
ALTER TABLE storage.s3_multipart_uploads ENABLE ROW LEVEL SECURITY;
ALTER TABLE storage.s3_multipart_uploads_parts ENABLE ROW LEVEL SECURITY;

-- Do not expose this tables to PostgREST
DO $$
DECLARE
anon_role text = COALESCE(current_setting('storage.anon_role', true), 'anon');
authenticated_role text = COALESCE(current_setting('storage.authenticated_role', true), 'authenticated');
service_role text = COALESCE(current_setting('storage.service_role', true), 'service_role');
BEGIN
EXECUTE 'revoke all on storage._s3_multipart_uploads from ' || anon_role || ', ' || authenticated_role;
EXECUTE 'revoke all on storage._s3_multipart_uploads_parts from ' || anon_role || ', ' || authenticated_role;
EXECUTE 'GRANT ALL ON TABLE storage._s3_multipart_uploads TO ' || service_role;
EXECUTE 'GRANT ALL ON TABLE storage._s3_multipart_uploads_parts TO ' || service_role;
EXECUTE 'revoke all on storage.s3_multipart_uploads from ' || anon_role || ', ' || authenticated_role;
EXECUTE 'revoke all on storage.s3_multipart_uploads_parts from ' || anon_role || ', ' || authenticated_role;
EXECUTE 'GRANT ALL ON TABLE storage.s3_multipart_uploads TO ' || service_role;
EXECUTE 'GRANT ALL ON TABLE storage.s3_multipart_uploads_parts TO ' || service_role;
EXECUTE 'GRANT SELECT ON TABLE storage.s3_multipart_uploads TO ' || authenticated_role || ', ' || anon_role;
EXECUTE 'GRANT SELECT ON TABLE storage.s3_multipart_uploads_parts TO ' || authenticated_role || ', ' || anon_role;
END$$;
4 changes: 2 additions & 2 deletions migrations/tenant/0022-s3-multipart-uploads-big-ints.sql
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
ALTER TABLE storage._s3_multipart_uploads ALTER COLUMN in_progress_size TYPE bigint;
ALTER TABLE storage._s3_multipart_uploads_parts ALTER COLUMN size TYPE bigint;
ALTER TABLE storage.s3_multipart_uploads ALTER COLUMN in_progress_size TYPE bigint;
ALTER TABLE storage.s3_multipart_uploads_parts ALTER COLUMN size TYPE bigint;
78 changes: 78 additions & 0 deletions migrations/tenant/0023-optimize-search-function.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
create or replace function storage.search (
prefix text,
bucketname text,
limits int default 100,
levels int default 1,
offsets int default 0,
search text default '',
sortcolumn text default 'name',
sortorder text default 'asc'
) returns table (
name text,
id uuid,
updated_at timestamptz,
created_at timestamptz,
last_accessed_at timestamptz,
metadata jsonb
)
as $$
declare
v_order_by text;
v_sort_order text;
begin
case
when sortcolumn = 'name' then
v_order_by = 'name';
when sortcolumn = 'updated_at' then
v_order_by = 'updated_at';
when sortcolumn = 'created_at' then
v_order_by = 'created_at';
when sortcolumn = 'last_accessed_at' then
v_order_by = 'last_accessed_at';
else
v_order_by = 'name';
end case;

case
when sortorder = 'asc' then
v_sort_order = 'asc';
when sortorder = 'desc' then
v_sort_order = 'desc';
else
v_sort_order = 'asc';
end case;

v_order_by = v_order_by || ' ' || v_sort_order;

return query execute
'with folders as (
select path_tokens[$1] as folder
from storage.objects
where objects.name ilike $2 || $3 || ''%''
and bucket_id = $4
and array_length(objects.path_tokens, 1) <> $1
group by folder
order by folder ' || v_sort_order || '
)
(select folder as "name",
null as id,
null as updated_at,
null as created_at,
null as last_accessed_at,
null as metadata from folders)
union all
(select path_tokens[$1] as "name",
id,
updated_at,
created_at,
last_accessed_at,
metadata
from storage.objects
where objects.name ilike $2 || $3 || ''%''
and bucket_id = $4
and array_length(objects.path_tokens, 1) = $1
order by ' || v_order_by || ')
limit $5
offset $6' using levels, prefix, search, bucketname, limits, offsets;
end;
$$ language plpgsql stable;
Empty file.
Loading

0 comments on commit bade70f

Please sign in to comment.