From a112e210d6098e7b1e5036c260974315e0c0be61 Mon Sep 17 00:00:00 2001 From: Ivan Leskin Date: Fri, 4 Sep 2020 16:17:32 +0300 Subject: [PATCH 01/15] Reposition PostgreSQL GUC declaration GUC variables must be declared before any other actions are made by diskquota. In particular, the value of 'diskquota_max_active_tables' (GUC 'diskquota.max_active_tables') is used in 'DiskQuotaShmemSize()'. The late GUC variable declaration caused this variable to be '0', thus leading to allocation of insufficient amount of memory. Fix this: 1. Move GUC declarations to a separate static function 2. Call this function before any other actions performed in _PG_init() --- diskquota.c | 86 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 48 insertions(+), 38 deletions(-) diff --git a/diskquota.c b/diskquota.c index f411d212..5e998f24 100644 --- a/diskquota.c +++ b/diskquota.c @@ -95,6 +95,7 @@ void disk_quota_launcher_main(Datum); static void disk_quota_sigterm(SIGNAL_ARGS); static void disk_quota_sighup(SIGNAL_ARGS); +static void define_guc_variables(void); static bool start_worker_by_dboid(Oid dbid); static void start_workers_from_dblist(void); static void create_monitor_db_table(void); @@ -128,48 +129,13 @@ _PG_init(void) if (!process_shared_preload_libraries_in_progress) ereport(ERROR, (errmsg("diskquota.so not in shared_preload_libraries."))); + /* values are used in later calls */ + define_guc_variables(); + init_disk_quota_shmem(); init_disk_quota_enforcement(); init_active_table_hook(); - /* get the configuration */ - DefineCustomIntVariable("diskquota.naptime", - "Duration between each check (in seconds).", - NULL, - &diskquota_naptime, - 2, - 1, - INT_MAX, - PGC_SIGHUP, - 0, - NULL, - NULL, - NULL); - - DefineCustomIntVariable("diskquota.max_active_tables", - "max number of active tables monitored by disk-quota", - NULL, - &diskquota_max_active_tables, - 1 * 1024 * 1024, - 1, - INT_MAX, - PGC_SIGHUP, - 0, - NULL, - NULL, - NULL); - - DefineCustomBoolVariable("diskquota.enable_hardlimit", - "Use in-query diskquota enforcement", - NULL, - &diskquota_enable_hardlimit, - false, - PGC_SIGHUP, - 0, - NULL, - NULL, - NULL); - /* start disk quota launcher only on master */ if (!IS_QUERY_DISPATCHER()) { @@ -250,6 +216,50 @@ disk_quota_sigusr1(SIGNAL_ARGS) errno = save_errno; } +/* + * Define GUC variables used by diskquota + */ +static void +define_guc_variables(void) +{ + DefineCustomIntVariable("diskquota.naptime", + "Duration between each check (in seconds).", + NULL, + &diskquota_naptime, + 2, + 1, + INT_MAX, + PGC_SIGHUP, + 0, + NULL, + NULL, + NULL); + + DefineCustomIntVariable("diskquota.max_active_tables", + "max number of active tables monitored by disk-quota", + NULL, + &diskquota_max_active_tables, + 1 * 1024 * 1024, + 1, + INT_MAX, + PGC_SIGHUP, + 0, + NULL, + NULL, + NULL); + + DefineCustomBoolVariable("diskquota.enable_hardlimit", + "Use in-query diskquota enforcement", + NULL, + &diskquota_enable_hardlimit, + false, + PGC_SIGHUP, + 0, + NULL, + NULL, + NULL); +} + /* ---- Functions for disk quota worker process ---- */ /* From 5e385ffca196823d2076532de4b0359ab312b978 Mon Sep 17 00:00:00 2001 From: t1mursadykov Date: Wed, 10 Feb 2021 00:04:01 +0300 Subject: [PATCH 02/15] implement extension upgrade Added a script, which update the extension to version 1.0.3 --- Makefile | 12 +++++++++++- diskquota--1.0--1.0.3.sql | 8 ++++++++ diskquota--1.0.sql | 6 ------ diskquota.control | 2 +- 4 files changed, 20 insertions(+), 8 deletions(-) create mode 100644 diskquota--1.0--1.0.3.sql diff --git a/Makefile b/Makefile index c4f84b33..6e45f2b7 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,14 @@ MODULE_big = diskquota EXTENSION = diskquota -DATA = diskquota--1.0.sql +EXTENSION_VERSION = 1.0.3 + +DATA = \ + diskquota--1.0.sql \ + diskquota--1.0--1.0.3.sql \ + +DATA_built = diskquota--$(EXTENSION_VERSION).sql + SRCDIR = ./ FILES = diskquota.c enforcement.c quotamodel.c gp_activetable.c diskquota_utility.c OBJS = diskquota.o enforcement.o quotamodel.o gp_activetable.o diskquota_utility.o @@ -18,3 +25,6 @@ REGRESS_OPTS = --schedule=diskquota_schedule --init-file=init_file endif PGXS := $(shell pg_config --pgxs) include $(PGXS) + +diskquota--$(EXTENSION_VERSION).sql: + cat $(DATA) > diskquota--$(EXTENSION_VERSION).sql diff --git a/diskquota--1.0--1.0.3.sql b/diskquota--1.0--1.0.3.sql new file mode 100644 index 00000000..0f1f3b82 --- /dev/null +++ b/diskquota--1.0--1.0.3.sql @@ -0,0 +1,8 @@ +\echo use "alter extension diskquota update to '1.0.3'" to load this file. \quit + +SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.quota_config', '') from gp_dist_random('gp_id'); + +CREATE FUNCTION diskquota.update_diskquota_db_list(oid, int4) +RETURNS void STRICT +AS 'MODULE_PATHNAME' +LANGUAGE C; diff --git a/diskquota--1.0.sql b/diskquota--1.0.sql index af22a2ff..69297962 100644 --- a/diskquota--1.0.sql +++ b/diskquota--1.0.sql @@ -9,7 +9,6 @@ CREATE SCHEMA diskquota; CREATE TABLE diskquota.quota_config (targetOid oid, quotatype int, quotalimitMB int8, PRIMARY KEY(targetOid, quotatype)); SELECT pg_catalog.pg_extension_config_dump('diskquota.quota_config', ''); -SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.quota_config', '') from gp_dist_random('gp_id'); CREATE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT @@ -21,11 +20,6 @@ RETURNS void STRICT AS 'MODULE_PATHNAME' LANGUAGE C; -CREATE FUNCTION diskquota.update_diskquota_db_list(oid, int4) -RETURNS void STRICT -AS 'MODULE_PATHNAME' -LANGUAGE C; - CREATE TABLE diskquota.table_size (tableid oid, size bigint, PRIMARY KEY(tableid)); CREATE TABLE diskquota.state (state int, PRIMARY KEY(state)); diff --git a/diskquota.control b/diskquota.control index 6c25e7cd..58693a2b 100644 --- a/diskquota.control +++ b/diskquota.control @@ -1,5 +1,5 @@ # diskquota extension comment = 'Disk Quota Main Program' -default_version = '1.0' +default_version = '1.0.3' module_pathname = '$libdir/diskquota' relocatable = true From c183c6c755d8571e0d31f1b220b28b7493f920f8 Mon Sep 17 00:00:00 2001 From: Georgy Shelkovy Date: Thu, 14 Jul 2022 13:19:55 +0500 Subject: [PATCH 03/15] ADBDEV-2818: Provide upgrade scripts for diskquota extension --- CMakeLists.txt | 1 + diskquota--1.0.3--2.0.sql | 284 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 285 insertions(+) create mode 100644 diskquota--1.0.3--2.0.sql diff --git a/CMakeLists.txt b/CMakeLists.txt index cb1c80e7..417f3aec 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -80,6 +80,7 @@ list( diskquota.control diskquota--1.0.sql diskquota--1.0--2.0.sql + diskquota--1.0.3--2.0.sql diskquota--2.0.sql diskquota--2.0--1.0.sql) diff --git a/diskquota--1.0.3--2.0.sql b/diskquota--1.0.3--2.0.sql new file mode 100644 index 00000000..40a7969d --- /dev/null +++ b/diskquota--1.0.3--2.0.sql @@ -0,0 +1,284 @@ +-- TODO check if worker should not refresh, current lib should be diskquota-2.0.so + +-- table part +ALTER TABLE diskquota.quota_config ADD COLUMN segratio float4 DEFAULT 0; + +CREATE TABLE diskquota.target ( + rowId serial, + quotatype int, -- REFERENCES disquota.quota_config.quotatype, + primaryOid oid, + tablespaceOid oid, -- REFERENCES pg_tablespace.oid, + PRIMARY KEY (primaryOid, tablespaceOid, quotatype) +); +-- TODO ALTER TABLE diskquota.target SET DEPENDS ON EXTENSION diskquota; + +ALTER TABLE diskquota.table_size ADD COLUMN segid smallint DEFAULT -1; -- segid = coordinator means table size in cluster level +ALTER TABLE diskquota.table_size DROP CONSTRAINT table_size_pkey; +ALTER TABLE diskquota.table_size ADD PRIMARY KEY (tableid, segid); +ALTER TABLE diskquota.table_size SET WITH (REORGANIZE=true) DISTRIBUTED BY (tableid, segid); + +-- TODO SELECT pg_catalog.pg_extension_config_dump('diskquota.target', ''); +-- TODO SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.target', '') FROM gp_dist_random('gp_id'); +-- table part end + +-- type define +ALTER TYPE diskquota.diskquota_active_table_type ADD ATTRIBUTE "GP_SEGMENT_ID" smallint; + +CREATE TYPE diskquota.rejectmap_entry AS ( + target_oid oid, + database_oid oid, + tablespace_oid oid, + target_type integer, + seg_exceeded boolean +); + +CREATE TYPE diskquota.rejectmap_entry_detail AS ( + target_type text, + target_oid oid, + database_oid oid, + tablespace_oid oid, + seg_exceeded boolean, + dbnode oid, + spcnode oid, + relnode oid, + segid int +); + +CREATE TYPE diskquota.relation_cache_detail AS ( + RELID oid, + PRIMARY_TABLE_OID oid, + AUXREL_NUM int, + OWNEROID oid, + NAMESPACEOID oid, + BACKENDID int, + SPCNODE oid, + DBNODE oid, + RELNODE oid, + RELSTORAGE "char", + AUXREL_OID oid[] +); +-- type define end + +-- UDF +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_schema_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.set_role_quota(text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.init_table_size_table() RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +/* ALTER */ CREATE OR REPLACE FUNCTION diskquota.diskquota_fetch_table_stat(int4, oid[]) RETURNS setof diskquota.diskquota_active_table_type AS '$libdir/diskquota-2.0.so', 'diskquota_fetch_table_stat' LANGUAGE C VOLATILE; + +-- TODO solve dependency DROP FUNCTION diskquota.update_diskquota_db_list(oid, int4); + +CREATE FUNCTION diskquota.set_schema_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +CREATE FUNCTION diskquota.set_role_tablespace_quota(text, text, text) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +CREATE FUNCTION diskquota.set_per_segment_quota(text, float4) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +CREATE FUNCTION diskquota.refresh_rejectmap(diskquota.rejectmap_entry[], oid[]) RETURNS void STRICT AS '$libdir/diskquota-2.0.so' LANGUAGE C; +CREATE FUNCTION diskquota.show_rejectmap() RETURNS setof diskquota.rejectmap_entry_detail AS '$libdir/diskquota-2.0.so', 'show_rejectmap' LANGUAGE C; +CREATE FUNCTION diskquota.pause() RETURNS void STRICT AS '$libdir/diskquota-2.0.so', 'diskquota_pause' LANGUAGE C; +CREATE FUNCTION diskquota.resume() RETURNS void STRICT AS '$libdir/diskquota-2.0.so', 'diskquota_resume' LANGUAGE C; +CREATE FUNCTION diskquota.show_worker_epoch() RETURNS bigint STRICT AS '$libdir/diskquota-2.0.so', 'show_worker_epoch' LANGUAGE C; +CREATE FUNCTION diskquota.wait_for_worker_new_epoch() RETURNS boolean STRICT AS '$libdir/diskquota-2.0.so', 'wait_for_worker_new_epoch' LANGUAGE C; +CREATE FUNCTION diskquota.status() RETURNS TABLE ("name" text, "status" text) STRICT AS '$libdir/diskquota-2.0.so', 'diskquota_status' LANGUAGE C; +CREATE FUNCTION diskquota.show_relation_cache() RETURNS setof diskquota.relation_cache_detail AS '$libdir/diskquota-2.0.so', 'show_relation_cache' LANGUAGE C; +CREATE FUNCTION diskquota.relation_size_local( + reltablespace oid, + relfilenode oid, + relpersistence "char", + relstorage "char") +RETURNS bigint STRICT AS '$libdir/diskquota-2.0.so', 'relation_size_local' LANGUAGE C; +CREATE FUNCTION diskquota.pull_all_table_size(OUT tableid oid, OUT size bigint, OUT segid smallint) RETURNS SETOF RECORD AS '$libdir/diskquota-2.0.so', 'pull_all_table_size' LANGUAGE C; +CREATE FUNCTION diskquota.relation_size(relation regclass) RETURNS bigint STRICT AS $$ + SELECT SUM(size)::bigint FROM ( + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size + FROM gp_dist_random('pg_class') WHERE oid = relation + UNION ALL + SELECT diskquota.relation_size_local(reltablespace, relfilenode, relpersistence, relstorage) AS size + FROM pg_class WHERE oid = relation + ) AS t $$ LANGUAGE SQL; + +CREATE FUNCTION diskquota.show_relation_cache_all_seg() RETURNS setof diskquota.relation_cache_detail AS $$ + WITH relation_cache AS ( + SELECT diskquota.show_relation_cache() AS a + FROM gp_dist_random('gp_id') + ) + SELECT (a).* FROM relation_cache; $$ LANGUAGE SQL; +-- UDF end + +-- views +CREATE VIEW diskquota.rejectmap AS SELECT * FROM diskquota.show_rejectmap() AS BM; + +/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_database_size_view AS +SELECT ( + (SELECT SUM(pg_relation_size(oid)) FROM pg_class WHERE oid <= 16384) + + + (SELECT SUM(size) FROM diskquota.table_size WHERE segid = -1) +) AS dbsize; + +/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_schema_quota_view AS +WITH + quota_usage AS ( + SELECT + relnamespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + pg_class + WHERE + tableid = pg_class.oid AND + segid = -1 + GROUP BY + relnamespace + ) +SELECT + nspname AS schema_name, + targetoid AS schema_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS nspsize_in_bytes +FROM + diskquota.quota_config JOIN + pg_namespace ON targetoid = pg_namespace.oid LEFT OUTER JOIN + quota_usage ON pg_namespace.oid = relnamespace +WHERE + quotaType = 0; -- NAMESPACE_QUOTA + +/* ALTER */ CREATE OR REPLACE VIEW diskquota.show_fast_role_quota_view AS +WITH + quota_usage AS ( + SELECT + relowner, + SUM(size) AS total_size + FROM + diskquota.table_size, + pg_class + WHERE + tableid = pg_class.oid AND + segid = -1 + GROUP BY + relowner + ) +SELECT + rolname AS role_name, + targetoid AS role_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS rolsize_in_bytes +FROM + diskquota.quota_config JOIN + pg_roles ON targetoid = pg_roles.oid LEFT OUTER JOIN + quota_usage ON pg_roles.oid = relowner +WHERE + quotaType = 1; -- ROLE_QUOTA + +CREATE VIEW diskquota.show_fast_schema_tablespace_quota_view AS +WITH + default_tablespace AS ( + SELECT dattablespace FROM pg_database + WHERE datname = current_database() + ), + quota_usage AS ( + SELECT + relnamespace, + CASE + WHEN reltablespace = 0 THEN dattablespace + ELSE reltablespace + END AS reltablespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + pg_class, + default_tablespace + WHERE + tableid = pg_class.oid AND + segid = -1 + GROUP BY + relnamespace, + reltablespace, + dattablespace + ), + full_quota_config AS ( + SELECT + primaryOid, + tablespaceoid, + quotalimitMB + FROM + diskquota.quota_config AS config, + diskquota.target AS target + WHERE + config.targetOid = target.rowId AND + config.quotaType = target.quotaType AND + config.quotaType = 2 -- NAMESPACE_TABLESPACE_QUOTA + ) +SELECT + nspname AS schema_name, + primaryoid AS schema_oid, + spcname AS tablespace_name, + tablespaceoid AS tablespace_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS nspsize_tablespace_in_bytes +FROM + full_quota_config JOIN + pg_namespace ON primaryoid = pg_namespace.oid JOIN + pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN + quota_usage ON pg_namespace.oid = relnamespace AND pg_tablespace.oid = reltablespace; + +CREATE VIEW diskquota.show_fast_role_tablespace_quota_view AS +WITH + default_tablespace AS ( + SELECT dattablespace FROM pg_database + WHERE datname = current_database() + ), + quota_usage AS ( + SELECT + relowner, + CASE + WHEN reltablespace = 0 THEN dattablespace + ELSE reltablespace + END AS reltablespace, + SUM(size) AS total_size + FROM + diskquota.table_size, + pg_class, + default_tablespace + WHERE + tableid = pg_class.oid AND + segid = -1 + GROUP BY + relowner, + reltablespace, + dattablespace + ), + full_quota_config AS ( + SELECT + primaryOid, + tablespaceoid, + quotalimitMB + FROM + diskquota.quota_config AS config, + diskquota.target AS target + WHERE + config.targetOid = target.rowId AND + config.quotaType = target.quotaType AND + config.quotaType = 3 -- ROLE_TABLESPACE_QUOTA + ) +SELECT + rolname AS role_name, + primaryoid AS role_oid, + spcname AS tablespace_name, + tablespaceoid AS tablespace_oid, + quotalimitMB AS quota_in_mb, + COALESCE(total_size, 0) AS rolsize_tablespace_in_bytes +FROM + full_quota_config JOIN + pg_roles ON primaryoid = pg_roles.oid JOIN + pg_tablespace ON tablespaceoid = pg_tablespace.oid LEFT OUTER JOIN + quota_usage ON pg_roles.oid = relowner AND pg_tablespace.oid = reltablespace; + +CREATE VIEW diskquota.show_segment_ratio_quota_view AS +SELECT + spcname as tablespace_name, + pg_tablespace.oid as tablespace_oid, + segratio as per_seg_quota_ratio +FROM + diskquota.quota_config JOIN + pg_tablespace ON targetOid = pg_tablespace.oid + AND quotatype = 4; + +-- views end + From a20709b3305f3e028273ad426d65e0ed3d422861 Mon Sep 17 00:00:00 2001 From: Georgy Shelkovy Date: Fri, 12 Aug 2022 14:56:39 +0500 Subject: [PATCH 04/15] Updating marks new table diskquota.target as needed to dump too Ensure updating corresponds clean install. --- diskquota--1.0--2.0.sql | 4 ++-- diskquota--1.0.3--2.0.sql | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/diskquota--1.0--2.0.sql b/diskquota--1.0--2.0.sql index 40a7969d..c2413236 100644 --- a/diskquota--1.0--2.0.sql +++ b/diskquota--1.0--2.0.sql @@ -17,8 +17,8 @@ ALTER TABLE diskquota.table_size DROP CONSTRAINT table_size_pkey; ALTER TABLE diskquota.table_size ADD PRIMARY KEY (tableid, segid); ALTER TABLE diskquota.table_size SET WITH (REORGANIZE=true) DISTRIBUTED BY (tableid, segid); --- TODO SELECT pg_catalog.pg_extension_config_dump('diskquota.target', ''); --- TODO SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.target', '') FROM gp_dist_random('gp_id'); +SELECT pg_catalog.pg_extension_config_dump('diskquota.target', ''); +SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.target', '') FROM gp_dist_random('gp_id'); -- table part end -- type define diff --git a/diskquota--1.0.3--2.0.sql b/diskquota--1.0.3--2.0.sql index 40a7969d..c2413236 100644 --- a/diskquota--1.0.3--2.0.sql +++ b/diskquota--1.0.3--2.0.sql @@ -17,8 +17,8 @@ ALTER TABLE diskquota.table_size DROP CONSTRAINT table_size_pkey; ALTER TABLE diskquota.table_size ADD PRIMARY KEY (tableid, segid); ALTER TABLE diskquota.table_size SET WITH (REORGANIZE=true) DISTRIBUTED BY (tableid, segid); --- TODO SELECT pg_catalog.pg_extension_config_dump('diskquota.target', ''); --- TODO SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.target', '') FROM gp_dist_random('gp_id'); +SELECT pg_catalog.pg_extension_config_dump('diskquota.target', ''); +SELECT gp_segment_id, pg_catalog.pg_extension_config_dump('diskquota.target', '') FROM gp_dist_random('gp_id'); -- table part end -- type define From 472eb0652d7d71d8605a70b9f8f15005009ae1c9 Mon Sep 17 00:00:00 2001 From: Georgy Shelkovy Date: Tue, 28 Mar 2023 20:01:33 +0500 Subject: [PATCH 05/15] fix updating from 1.x to 2.y when y > 0 (#12) Functions in the SQL script for each diskquota 2.x version refer to the exact name of shared library, e.g. for diskquota-2.1.so. As a result, upgrade from 2.0 to 2.2 is impossible without presence of 2.1 shared library as far as each extension function validated by loading shared library and checking symbol name. This patch implement symlinks creation logic to coup this problem. Symlinks for each previous release to the latest shared library will be created. But we need to control changing of sql function during next syncs. --- CMakeLists.txt | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 011266c3..361b3cbf 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -183,3 +183,11 @@ install(PROGRAMS "cmake/install_gpdb_component" DESTINATION ".") install(FILES ${diskquota_DDL} DESTINATION "share/postgresql/extension/") install(TARGETS diskquota DESTINATION "lib/postgresql/") install(FILES ${build_info_PATH} DESTINATION ".") + +file(GLOB sql_files RELATIVE ${CMAKE_SOURCE_DIR} diskquota--2.*.sql) +list(FILTER sql_files EXCLUDE REGEX ".*--.*--.*") +list(FILTER sql_files EXCLUDE REGEX ".*diskquota--${DISKQUOTA_MAJOR_VERSION}.${DISKQUOTA_MINOR_VERSION}.*") +foreach(so IN LISTS sql_files) + string(REGEX REPLACE "^diskquota--([0-9]+)\.([0-9]+).sql$" "diskquota-\\1.\\2.so" so ${so}) + install(CODE "execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink ${DISKQUOTA_BINARY_NAME}.so \"\$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/lib/postgresql/${so}\")") +endforeach() From d5b5d1ecf2ffb5afe08f3ee910807b6a9a5e9932 Mon Sep 17 00:00:00 2001 From: Evgeniy Ratkov Date: Mon, 24 Apr 2023 14:04:06 +0700 Subject: [PATCH 06/15] add fixes to run regression tests in Docker container (#14) Removed useless commands from `concourse/scripts/entry.sh`, because our docker images already have source of GPDB at `/home/gpadmin/gpdb_src` and do not need to create or configure this directory. Added file `arenadata/README.md` with information about how to run tests at docker container. Removed `--load-extension=diskquota_test` from `CMakeLists.txt` at regression tests. Creating extension `diskquota_test` requires configured extension `diskquota`. But `diskquota` is configured when `tests/regress/sql/config.sql` is executed, and load-extension is done before this test is started. At all tests (where `diskquota_test` is used) command `CREATE EXTENSION diskquota_test;` is executed. --- arenadata/README.md | 28 ++++++++++++++++++++++++++++ concourse/scripts/entry.sh | 15 +-------------- tests/CMakeLists.txt | 1 - 3 files changed, 29 insertions(+), 15 deletions(-) create mode 100644 arenadata/README.md diff --git a/arenadata/README.md b/arenadata/README.md new file mode 100644 index 00000000..fff0b986 --- /dev/null +++ b/arenadata/README.md @@ -0,0 +1,28 @@ +# Run regression tests in Docker container + +You can build your Docker image from GPDB source or use prebuilt images from hub.adsw.io. +How to build Docker image: (["readme.md"](https://github.com/arenadata/gpdb/blob/f7ff7c8ecae4ce7ab3b73fd46171cdaa457b3591/arenadata/readme.md)). + +1. Download the cmake-3.20 install script from ([source](https://github.com/Kitware/CMake/releases/download/v3.20.0/cmake-3.20.0-linux-x86_64.sh)). + +2. Build diskquota in the Docker container. +Change and to the appropriate paths on your local machine. + +``` +docker run --rm -it -e DISKQUOTA_OS=rhel7 \ + -v /tmp/diskquota_artifacts:/home/gpadmin/diskquota_artifacts \ + -v :/home/gpadmin/diskquota_src \ + -v :/home/gpadmin/bin_cmake/cmake-3.20.0-linux-x86_64.sh \ + hub.adsw.io/library/gpdb6_regress:latest diskquota_src/concourse/scripts/entry.sh build +``` + +3. Run tests. +Change and to the appropriate paths on your local machine. + +``` +docker run --rm -it --sysctl 'kernel.sem=500 1024000 200 4096' \ + -v /tmp/diskquota_artifacts:/home/gpadmin/bin_diskquota \ + -v :/home/gpadmin/diskquota_src \ + -v :/home/gpadmin/bin_cmake/cmake-3.20.0-linux-x86_64.sh \ + hub.adsw.io/library/gpdb6_regress:latest diskquota_src/concourse/scripts/entry.sh test +``` diff --git a/concourse/scripts/entry.sh b/concourse/scripts/entry.sh index b3c70ddd..3324713d 100755 --- a/concourse/scripts/entry.sh +++ b/concourse/scripts/entry.sh @@ -108,15 +108,12 @@ setup_gpadmin() { fi mkdir -p /home/gpadmin chown gpadmin:gpadmin /home/gpadmin - - chown -R gpadmin:gpadmin /tmp/build - ln -s "${CONCOURSE_WORK_DIR}"/* /home/gpadmin } # Extract gpdb binary function install_gpdb() { [ ! -d /usr/local/greenplum-db-devel ] && mkdir -p /usr/local/greenplum-db-devel - tar -xzf "${CONCOURSE_WORK_DIR}"/bin_gpdb/*.tar.gz -C /usr/local/greenplum-db-devel + tar -xzf "${CONCOURSE_WORK_DIR}"/bin_gpdb/bin_gpdb.tar.gz -C /usr/local/greenplum-db-devel chown -R gpadmin:gpadmin /usr/local/greenplum-db-devel } @@ -130,21 +127,11 @@ function create_fake_gpdb_src() { grep -rhw '/usr/local/greenplum-db-devel' -e 'abs_top_srcdir = .*' |\ head -n 1 | awk '{ print $NF; }')" - if [ -d "${fake_gpdb_src}" ]; then - echo "Fake gpdb source directory has been configured." - return - fi - pushd /home/gpadmin/gpdb_src ./configure --prefix=/usr/local/greenplum-db-devel \ --without-zstd \ --disable-orca --disable-gpcloud --enable-debug-extensions popd - - local fake_root - fake_root=$(dirname "${fake_gpdb_src}") - mkdir -p "${fake_root}" - ln -s /home/gpadmin/gpdb_src "${fake_gpdb_src}" } # Setup common environment diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 9a774572..dbe3aabf 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -15,7 +15,6 @@ RegressTarget_Add(regress SCHEDULE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/regress/diskquota_schedule${EXPECTED_DIR_SUFFIX} REGRESS_OPTS --load-extension=gp_inject_fault - --load-extension=diskquota_test --dbname=contrib_regression) RegressTarget_Add(isolation2 From 50ed2e4e1883ec8ec4e7086b750cb28cdc5a2dc0 Mon Sep 17 00:00:00 2001 From: Vasiliy Ivanov Date: Thu, 4 May 2023 15:32:57 +1000 Subject: [PATCH 07/15] temporary disable upgrade tests until ADBDEV-3649 is not resolved --- CMakeLists.txt | 1 - concourse/scripts/build_diskquota.sh | 3 --- concourse/scripts/test_diskquota.sh | 2 -- 3 files changed, 6 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 38d32e7c..5d44f3e4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -150,7 +150,6 @@ BuildInfo_Create(${build_info_PATH} # Add installcheck targets add_subdirectory(tests) -add_subdirectory(upgrade_test) # NOTE: keep install part at the end of file, to overwrite previous binary install(PROGRAMS "cmake/install_gpdb_component" DESTINATION ".") diff --git a/concourse/scripts/build_diskquota.sh b/concourse/scripts/build_diskquota.sh index 7b2ee30b..10b66e34 100755 --- a/concourse/scripts/build_diskquota.sh +++ b/concourse/scripts/build_diskquota.sh @@ -11,10 +11,7 @@ function pkg() { export CXX="$(which g++)" pushd /home/gpadmin/diskquota_artifacts - local last_release_path - last_release_path=$(readlink -eq /home/gpadmin/last_released_diskquota_bin/diskquota-*.tar.gz) cmake /home/gpadmin/diskquota_src \ - -DDISKQUOTA_LAST_RELEASE_PATH="${last_release_path}" \ -DCMAKE_BUILD_TYPE="${BUILD_TYPE}" cmake --build . --target create_artifact popd diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index 85b2bce1..245196ff 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -38,8 +38,6 @@ function _main() { activate_standby time cmake --build . --target installcheck fi - # Run upgrade test (with standby master) - time cmake --build . --target upgradecheck popd } From be945ba3ede3a42f8974e86086041a8c1aac8d43 Mon Sep 17 00:00:00 2001 From: Evgeniy Ratkov Date: Mon, 5 Jun 2023 16:03:52 +0300 Subject: [PATCH 08/15] fix creating extension with global option appendonly=true (#19) All tables use primary keys. Unique index tables are created for primary keys. Unique index tables can not be created for table with appendonly=true. appendonly=false has been added for each table with primary key. --- control/ddl/diskquota--1.0--2.0.sql | 2 +- control/ddl/diskquota--1.0.3--2.0.sql | 2 +- control/ddl/diskquota--1.0.sql | 6 +++--- control/ddl/diskquota--2.0.sql | 8 ++++---- control/ddl/diskquota--2.1.sql | 8 ++++---- control/ddl/diskquota--2.2.sql | 8 ++++---- tests/isolation2/expected/test_create_extension.out | 13 +++++++++++++ tests/isolation2/sql/test_create_extension.sql | 9 +++++++++ tests/regress/expected/test_create_extension.out | 5 +++++ tests/regress/sql/test_create_extension.sql | 9 +++++++++ 10 files changed, 53 insertions(+), 17 deletions(-) diff --git a/control/ddl/diskquota--1.0--2.0.sql b/control/ddl/diskquota--1.0--2.0.sql index c2413236..5ec20786 100644 --- a/control/ddl/diskquota--1.0--2.0.sql +++ b/control/ddl/diskquota--1.0--2.0.sql @@ -9,7 +9,7 @@ CREATE TABLE diskquota.target ( primaryOid oid, tablespaceOid oid, -- REFERENCES pg_tablespace.oid, PRIMARY KEY (primaryOid, tablespaceOid, quotatype) -); +) WITH (appendonly=false); -- TODO ALTER TABLE diskquota.target SET DEPENDS ON EXTENSION diskquota; ALTER TABLE diskquota.table_size ADD COLUMN segid smallint DEFAULT -1; -- segid = coordinator means table size in cluster level diff --git a/control/ddl/diskquota--1.0.3--2.0.sql b/control/ddl/diskquota--1.0.3--2.0.sql index c2413236..5ec20786 100644 --- a/control/ddl/diskquota--1.0.3--2.0.sql +++ b/control/ddl/diskquota--1.0.3--2.0.sql @@ -9,7 +9,7 @@ CREATE TABLE diskquota.target ( primaryOid oid, tablespaceOid oid, -- REFERENCES pg_tablespace.oid, PRIMARY KEY (primaryOid, tablespaceOid, quotatype) -); +) WITH (appendonly=false); -- TODO ALTER TABLE diskquota.target SET DEPENDS ON EXTENSION diskquota; ALTER TABLE diskquota.table_size ADD COLUMN segid smallint DEFAULT -1; -- segid = coordinator means table size in cluster level diff --git a/control/ddl/diskquota--1.0.sql b/control/ddl/diskquota--1.0.sql index d4dd4e79..4431b79e 100644 --- a/control/ddl/diskquota--1.0.sql +++ b/control/ddl/diskquota--1.0.sql @@ -9,18 +9,18 @@ CREATE TABLE diskquota.quota_config( quotatype int, quotalimitMB int8, PRIMARY KEY(targetOid, quotatype) -); +) WITH (appendonly=false); CREATE TABLE diskquota.table_size( tableid oid, size bigint, PRIMARY KEY(tableid) -); +) WITH (appendonly=false); CREATE TABLE diskquota.state( state int, PRIMARY KEY(state) -); +) WITH (appendonly=false); -- only diskquota.quota_config is dump-able, other table can be generate on fly SELECT pg_catalog.pg_extension_config_dump('diskquota.quota_config', ''); diff --git a/control/ddl/diskquota--2.0.sql b/control/ddl/diskquota--2.0.sql index 0587fc77..ca16c040 100644 --- a/control/ddl/diskquota--2.0.sql +++ b/control/ddl/diskquota--2.0.sql @@ -11,7 +11,7 @@ CREATE TABLE diskquota.quota_config( quotalimitMB int8, segratio float4 DEFAULT 0, PRIMARY KEY(targetOid, quotatype) -) DISTRIBUTED BY (targetOid, quotatype); +) WITH (appendonly=false) DISTRIBUTED BY (targetOid, quotatype); CREATE TABLE diskquota.target ( rowId serial, @@ -19,19 +19,19 @@ CREATE TABLE diskquota.target ( primaryOid oid, tablespaceOid oid, --REFERENCES pg_tablespace.oid, PRIMARY KEY (primaryOid, tablespaceOid, quotatype) -); +) WITH (appendonly=false); CREATE TABLE diskquota.table_size( tableid oid, size bigint, segid smallint, PRIMARY KEY(tableid, segid) -) DISTRIBUTED BY (tableid, segid); +) WITH (appendonly=false) DISTRIBUTED BY (tableid, segid); CREATE TABLE diskquota.state( state int, PRIMARY KEY(state) -) DISTRIBUTED BY (state); +) WITH (appendonly=false) DISTRIBUTED BY (state); -- diskquota.quota_config AND diskquota.target is dump-able, other table can be generate on fly SELECT pg_catalog.pg_extension_config_dump('diskquota.quota_config', ''); diff --git a/control/ddl/diskquota--2.1.sql b/control/ddl/diskquota--2.1.sql index eb12606d..d38b6d93 100644 --- a/control/ddl/diskquota--2.1.sql +++ b/control/ddl/diskquota--2.1.sql @@ -11,7 +11,7 @@ CREATE TABLE diskquota.quota_config( quotalimitMB int8, segratio float4 DEFAULT 0, PRIMARY KEY(targetOid, quotatype) -) DISTRIBUTED BY (targetOid, quotatype); +) WITH (appendonly=false) DISTRIBUTED BY (targetOid, quotatype); CREATE TABLE diskquota.target ( rowId serial, @@ -19,19 +19,19 @@ CREATE TABLE diskquota.target ( primaryOid oid, tablespaceOid oid, --REFERENCES pg_tablespace.oid, PRIMARY KEY (primaryOid, tablespaceOid, quotatype) -); +) WITH (appendonly=false); CREATE TABLE diskquota.table_size( tableid oid, size bigint, segid smallint, PRIMARY KEY(tableid, segid) -) DISTRIBUTED BY (tableid, segid); +) WITH (appendonly=false) DISTRIBUTED BY (tableid, segid); CREATE TABLE diskquota.state( state int, PRIMARY KEY(state) -) DISTRIBUTED BY (state); +) WITH (appendonly=false) DISTRIBUTED BY (state); -- diskquota.quota_config AND diskquota.target is dump-able, other table can be generate on fly SELECT pg_catalog.pg_extension_config_dump('diskquota.quota_config', ''); diff --git a/control/ddl/diskquota--2.2.sql b/control/ddl/diskquota--2.2.sql index 49a4b1db..680392fa 100644 --- a/control/ddl/diskquota--2.2.sql +++ b/control/ddl/diskquota--2.2.sql @@ -11,7 +11,7 @@ CREATE TABLE diskquota.quota_config( quotalimitMB int8, segratio float4 DEFAULT 0, PRIMARY KEY(targetOid, quotatype) -) DISTRIBUTED BY (targetOid, quotatype); +) WITH (appendonly=false) DISTRIBUTED BY (targetOid, quotatype); CREATE TABLE diskquota.target ( rowId serial, @@ -19,19 +19,19 @@ CREATE TABLE diskquota.target ( primaryOid oid, tablespaceOid oid, --REFERENCES pg_tablespace.oid, PRIMARY KEY (primaryOid, tablespaceOid, quotatype) -); +) WITH (appendonly=false); CREATE TABLE diskquota.table_size( tableid oid, size bigint, segid smallint, PRIMARY KEY(tableid, segid) -) DISTRIBUTED BY (tableid, segid); +) WITH (appendonly=false) DISTRIBUTED BY (tableid, segid); CREATE TABLE diskquota.state( state int, PRIMARY KEY(state) -) DISTRIBUTED BY (state); +) WITH (appendonly=false) DISTRIBUTED BY (state); -- diskquota.quota_config AND diskquota.target is dump-able, other table can be generate on fly SELECT pg_catalog.pg_extension_config_dump('diskquota.quota_config', ''); diff --git a/tests/isolation2/expected/test_create_extension.out b/tests/isolation2/expected/test_create_extension.out index 211ebd63..bbf84475 100644 --- a/tests/isolation2/expected/test_create_extension.out +++ b/tests/isolation2/expected/test_create_extension.out @@ -1,3 +1,16 @@ +-- check that diskquota ignores global flag appendonly=true + +SET gp_default_storage_options='appendonly=true'; +SET + +CREATE EXTENSION diskquota; +CREATE +DROP EXTENSION diskquota; +DROP + +SET gp_default_storage_options='appendonly=false'; +SET + CREATE EXTENSION diskquota; CREATE diff --git a/tests/isolation2/sql/test_create_extension.sql b/tests/isolation2/sql/test_create_extension.sql index 1cc9c9cb..7b90f5d2 100644 --- a/tests/isolation2/sql/test_create_extension.sql +++ b/tests/isolation2/sql/test_create_extension.sql @@ -1,3 +1,12 @@ +-- check that diskquota ignores global flag appendonly=true + +SET gp_default_storage_options='appendonly=true'; + +CREATE EXTENSION diskquota; +DROP EXTENSION diskquota; + +SET gp_default_storage_options='appendonly=false'; + CREATE EXTENSION diskquota; SELECT diskquota.init_table_size_table(); diff --git a/tests/regress/expected/test_create_extension.out b/tests/regress/expected/test_create_extension.out index a90178ce..b0040506 100644 --- a/tests/regress/expected/test_create_extension.out +++ b/tests/regress/expected/test_create_extension.out @@ -1,3 +1,8 @@ +-- check that diskquota ignores global flag appendonly=true +SET gp_default_storage_options='appendonly=true'; +CREATE EXTENSION diskquota; +DROP EXTENSION diskquota; +SET gp_default_storage_options='appendonly=false'; CREATE EXTENSION diskquota; SELECT diskquota.init_table_size_table(); init_table_size_table diff --git a/tests/regress/sql/test_create_extension.sql b/tests/regress/sql/test_create_extension.sql index dfbc96a3..7987ff3d 100644 --- a/tests/regress/sql/test_create_extension.sql +++ b/tests/regress/sql/test_create_extension.sql @@ -1,3 +1,12 @@ +-- check that diskquota ignores global flag appendonly=true + +SET gp_default_storage_options='appendonly=true'; + +CREATE EXTENSION diskquota; +DROP EXTENSION diskquota; + +SET gp_default_storage_options='appendonly=false'; + CREATE EXTENSION diskquota; SELECT diskquota.init_table_size_table(); From 3b06e37e4882b4a3d95448df1c944ded79ff9c07 Mon Sep 17 00:00:00 2001 From: Alexander Kondakov Date: Thu, 29 Jun 2023 09:50:35 +0300 Subject: [PATCH 09/15] ADBDEV-3685 Error handling for disqkuota worker startup stage (#20) During diskquota worker's first run the initial set of active tables with their sizes is being loaded from diskquota.table_size table in order to warm up diskquota rejectmap and other shared memory objects. If an error occurs during this initialization process, the error will be ignored in PG_CATCH() block. Because of that local_active_table_stat_map will not be filled properly. And at the next loop iteration tables, that are not in acitive table list will be marked as irrelevant and to be deleted both from table_size_map and table_size table in flush_to_table_size function. In case when the inital set of active tables is huge (thousands of tables), this error ignorance could lead to the formation of a too long delete statement, which the SPI executor won't be able to process due to memory limits. And this case can lead to worker's segmentation fault or other errorneous behaviour of whole extension. This commit proposes the handling of the initialization errors, which occur during worker's first run. In the DiskquotaDBEntry structure the bool variable "corrupted" is added in order to indicate, that the worker wasn't able to initialize itself on given database. And DiskquotaDBEntry also is now passed to refresh_disk_quota_model function from worker main loop, because one need to change the state of dbEntry. The state is changed when the refresh_disk_quota_usage function catches an error, which occured during the initialization step, in PG_CATCH() block. And after the error is catched, the "corrupted" flag is set in given dbEntry, and then the error is rethrown. This leads to worker process termination. The launcher will not be able to start it again, because added flag is set in the database structure, and this flag is being checked inside the disk_quota_launcher_main function. The flag can be reseted by calling resetBackgroundWorkerCorruption function, which is currently called in SIGHUP handler. --- src/diskquota.c | 34 ++++++++++---- src/diskquota.h | 7 +-- src/gp_activetable.c | 1 + src/quotamodel.c | 19 ++++++-- .../expected/test_worker_init_failure.out | 46 +++++++++++++++++++ tests/isolation2/isolation2_schedule | 1 + .../sql/test_worker_init_failure.sql | 40 ++++++++++++++++ 7 files changed, 133 insertions(+), 15 deletions(-) create mode 100644 tests/isolation2/expected/test_worker_init_failure.out create mode 100644 tests/isolation2/sql/test_worker_init_failure.sql diff --git a/src/diskquota.c b/src/diskquota.c index d5630700..4f0fe43c 100644 --- a/src/diskquota.c +++ b/src/diskquota.c @@ -142,6 +142,7 @@ static void vacuum_db_entry(DiskquotaDBEntry *db); static void init_bgworker_handles(void); static BackgroundWorkerHandle *get_bgworker_handle(uint32 worker_id); static void free_bgworker_handle(uint32 worker_id); +static void resetBackgroundWorkerCorruption(void); #if GP_VERSION_NUM < 70000 /* WaitForBackgroundWorkerShutdown is copied from gpdb7 */ static BgwHandleStatus WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle *handle); @@ -525,7 +526,7 @@ disk_quota_worker_main(Datum main_arg) if (!diskquota_is_paused()) { /* Refresh quota model with init mode */ - refresh_disk_quota_model(!MyWorkerInfo->dbEntry->inited); + refresh_disk_quota_model(MyWorkerInfo->dbEntry); MyWorkerInfo->dbEntry->inited = true; is_gang_destroyed = false; } @@ -762,6 +763,7 @@ disk_quota_launcher_main(Datum main_arg) { elog(DEBUG1, "[diskquota] got sighup"); got_sighup = false; + resetBackgroundWorkerCorruption(); ProcessConfigFile(PGC_SIGHUP); } @@ -787,11 +789,12 @@ disk_quota_launcher_main(Datum main_arg) * When curDB->in_use is false means dbEtnry has been romoved * When curDB->dbid doesn't equtal curDBId, it means the slot has * been used by another db - * + * When curDB->corrupted is true means worker couldn't initialize + * the extension in the first run. * For the above conditions, we just skip this loop and try to fetch * next db to run. */ - if (curDB == NULL || !curDB->in_use || curDB->dbid != curDBId) + if (curDB == NULL || !curDB->in_use || curDB->dbid != curDBId || curDB->corrupted) { advance_one_db = true; continue; @@ -1796,7 +1799,9 @@ next_db(DiskquotaDBEntry *curDB) if (nextSlot >= MAX_NUM_MONITORED_DB) nextSlot = 0; DiskquotaDBEntry *dbEntry = &DiskquotaLauncherShmem->dbArray[nextSlot]; nextSlot++; - if (!dbEntry->in_use || dbEntry->workerId != INVALID_WORKER_ID || dbEntry->dbid == InvalidOid) continue; + if (!dbEntry->in_use || dbEntry->workerId != INVALID_WORKER_ID || dbEntry->dbid == InvalidOid || + dbEntry->corrupted) + continue; /* TODO: should release the invalid db related things */ if (!is_valid_dbid(dbEntry->dbid)) continue; result = dbEntry; @@ -1860,10 +1865,11 @@ static void vacuum_db_entry(DiskquotaDBEntry *db) { if (db == NULL) return; - db->dbid = InvalidOid; - db->inited = false; - db->workerId = INVALID_WORKER_ID; - db->in_use = false; + db->dbid = InvalidOid; + db->inited = false; + db->workerId = INVALID_WORKER_ID; + db->in_use = false; + db->corrupted = false; } static void @@ -1898,6 +1904,18 @@ free_bgworker_handle(uint32 worker_id) } } +static void +resetBackgroundWorkerCorruption(void) +{ + LWLockAcquire(diskquota_locks.dblist_lock, LW_EXCLUSIVE); + for (int i = 0; i < MAX_NUM_MONITORED_DB; i++) + { + DiskquotaDBEntry *dbEntry = &DiskquotaLauncherShmem->dbArray[i]; + if (dbEntry->corrupted) dbEntry->corrupted = false; + } + LWLockRelease(diskquota_locks.dblist_lock); +} + #if GP_VERSION_NUM < 70000 static BgwHandleStatus WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle *handle) diff --git a/src/diskquota.h b/src/diskquota.h index f044773b..58a00f46 100644 --- a/src/diskquota.h +++ b/src/diskquota.h @@ -215,8 +215,9 @@ struct DiskquotaDBEntry TimestampTz last_run_time; int16 cost; // ms - bool inited; // this entry is inited, will set to true after the worker finish the frist run. - bool in_use; // this slot is in using. AKA dbid != 0 + bool inited; // this entry is inited, will set to true after the worker finish the frist run. + bool in_use; // this slot is in using. AKA dbid != 0 + bool corrupted; // consider this entry as invalid to start the worker on }; typedef enum MonitorDBStatus @@ -249,7 +250,7 @@ extern void invalidate_database_rejectmap(Oid dbid); /* quota model interface*/ extern void init_disk_quota_shmem(void); extern void init_disk_quota_model(uint32 id); -extern void refresh_disk_quota_model(bool force); +extern void refresh_disk_quota_model(DiskquotaDBEntry *dbEntry); extern bool check_diskquota_state_is_ready(void); extern bool quota_check_common(Oid reloid, RelFileNode *relfilenode); diff --git a/src/gp_activetable.c b/src/gp_activetable.c index cf3178b3..cbf6e7b6 100644 --- a/src/gp_activetable.c +++ b/src/gp_activetable.c @@ -378,6 +378,7 @@ gp_fetch_active_tables(bool is_init) if (is_init) { + SIMPLE_FAULT_INJECTOR("diskquota_worker_initialization"); load_table_size(local_table_stats_map); } else diff --git a/src/quotamodel.c b/src/quotamodel.c index 6b8507b3..a0f01dbd 100644 --- a/src/quotamodel.c +++ b/src/quotamodel.c @@ -227,7 +227,7 @@ static void clear_all_quota_maps(void); static void transfer_table_for_quota(int64 totalsize, QuotaType type, Oid *old_keys, Oid *new_keys, int16 segid); /* functions to refresh disk quota model*/ -static void refresh_disk_quota_usage(bool is_init); +static void refresh_disk_quota_usage(DiskquotaDBEntry *dbEntry); static void calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map); static void flush_to_table_size(void); static bool flush_local_reject_map(void); @@ -761,8 +761,10 @@ do_check_diskquota_state_is_ready(void) * recalculate the changed disk usage. */ void -refresh_disk_quota_model(bool is_init) +refresh_disk_quota_model(DiskquotaDBEntry *dbEntry) { + bool is_init = !dbEntry->inited; + SEGCOUNT = getgpsegmentCount(); if (SEGCOUNT <= 0) { @@ -773,7 +775,7 @@ refresh_disk_quota_model(bool is_init) /* skip refresh model when load_quotas failed */ if (load_quotas()) { - refresh_disk_quota_usage(is_init); + refresh_disk_quota_usage(dbEntry); } if (is_init) ereport(LOG, (errmsg("[diskquota] initialize quota model finished"))); } @@ -785,11 +787,12 @@ refresh_disk_quota_model(bool is_init) * process is constructing quota model. */ static void -refresh_disk_quota_usage(bool is_init) +refresh_disk_quota_usage(DiskquotaDBEntry *dbEntry) { bool connected = false; bool pushed_active_snap = false; bool ret = true; + bool is_init = !dbEntry->inited; HTAB *local_active_table_stat_map = NULL; StartTransactionCommand(); @@ -841,6 +844,14 @@ refresh_disk_quota_usage(bool is_init) } PG_CATCH(); { + /* Initialization failed. */ + if (is_init) + { + LWLockAcquire(diskquota_locks.dblist_lock, LW_EXCLUSIVE); + dbEntry->corrupted = true; + LWLockRelease(diskquota_locks.dblist_lock); + PG_RE_THROW(); + } /* Prevents interrupts while cleaning up */ HOLD_INTERRUPTS(); EmitErrorReport(); diff --git a/tests/isolation2/expected/test_worker_init_failure.out b/tests/isolation2/expected/test_worker_init_failure.out new file mode 100644 index 00000000..75e6b6e0 --- /dev/null +++ b/tests/isolation2/expected/test_worker_init_failure.out @@ -0,0 +1,46 @@ +-- +-- Tests for error handling when the worker catches the error during +-- its first run. +-- + +-- Function checking whether worker on given db is up +CREATE or REPLACE LANGUAGE plpython2u; +CREATE +CREATE or REPLACE FUNCTION check_worker_presence(dbname text, wait_time int) RETURNS boolean AS $$ import psutil import time worker_name = 'bgworker: [diskquota] ' + dbname time.sleep(wait_time) for proc in psutil.process_iter(): try: if 'postgres' in proc.name().lower(): for val in proc.cmdline(): if worker_name in val: return True except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): pass return False $$ LANGUAGE plpython2u EXECUTE ON MASTER; +CREATE + +-- Test diskquota behavior when an error occurs during the worker's first run. +-- The error leads to process termination. And launcher won't start it again +-- until extension reload or SIGHUP signal. +CREATE EXTENSION diskquota; +CREATE +SELECT check_worker_presence(current_database(), 0); + check_worker_presence +----------------------- + t +(1 row) +SELECT gp_inject_fault('diskquota_worker_initialization', 'error', dbid) FROM gp_segment_configuration WHERE role='p' AND content=-1; + gp_inject_fault +----------------- + Success: +(1 row) +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) +SELECT check_worker_presence(current_database(), current_setting('diskquota.worker_timeout')::int / 2); + check_worker_presence +----------------------- + f +(1 row) +-- Reload configuration and check that worker is up again +!\retcode gpstop -u; +(exited with code 0) +SELECT check_worker_presence(current_database(), current_setting('diskquota.worker_timeout')::int / 2); + check_worker_presence +----------------------- + t +(1 row) +DROP EXTENSION diskquota; +DROP diff --git a/tests/isolation2/isolation2_schedule b/tests/isolation2/isolation2_schedule index 090c5cc5..c61f3d97 100644 --- a/tests/isolation2/isolation2_schedule +++ b/tests/isolation2/isolation2_schedule @@ -11,4 +11,5 @@ test: test_per_segment_config test: test_relation_cache test: test_ereport_from_seg test: test_drop_extension +test: test_worker_init_failure test: reset_config diff --git a/tests/isolation2/sql/test_worker_init_failure.sql b/tests/isolation2/sql/test_worker_init_failure.sql new file mode 100644 index 00000000..4e48908a --- /dev/null +++ b/tests/isolation2/sql/test_worker_init_failure.sql @@ -0,0 +1,40 @@ +-- +-- Tests for error handling when the worker catches the error during +-- its first run. +-- + +-- Function checking whether worker on given db is up +CREATE or REPLACE LANGUAGE plpython2u; +CREATE or REPLACE FUNCTION check_worker_presence(dbname text, wait_time int) + RETURNS boolean +AS $$ + import psutil + import time + worker_name = 'bgworker: [diskquota] ' + dbname + time.sleep(wait_time) + for proc in psutil.process_iter(): + try: + if 'postgres' in proc.name().lower(): + for val in proc.cmdline(): + if worker_name in val: + return True + except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): + pass + return False +$$ LANGUAGE plpython2u EXECUTE ON MASTER; + +-- Test diskquota behavior when an error occurs during the worker's first run. +-- The error leads to process termination. And launcher won't start it again +-- until extension reload or SIGHUP signal. +CREATE EXTENSION diskquota; +SELECT check_worker_presence(current_database(), 0); +SELECT gp_inject_fault('diskquota_worker_initialization', 'error', dbid) + FROM gp_segment_configuration WHERE role='p' AND content=-1; +SELECT diskquota.init_table_size_table(); +SELECT check_worker_presence(current_database(), + current_setting('diskquota.worker_timeout')::int / 2); +-- Reload configuration and check that worker is up again +!\retcode gpstop -u; +SELECT check_worker_presence(current_database(), + current_setting('diskquota.worker_timeout')::int / 2); +DROP EXTENSION diskquota; From 87b5dc3bb8960d0cc9e115d366fbff457ca0be53 Mon Sep 17 00:00:00 2001 From: Georgy Shelkovy Date: Mon, 2 Oct 2023 16:08:30 +0500 Subject: [PATCH 10/15] Fix bug: diskquota stop working after removing any extension (#379) (#25) Fix the bug caused by #220: After the user removes any extension, the bgworker in the current database will be stopped. Cherry-picked-from: 8210b78 Co-authored-by: Zhang Hao --- src/gp_activetable.c | 5 ++- tests/regress/diskquota_schedule | 1 + .../expected/test_drop_any_extension.out | 34 +++++++++++++++++++ tests/regress/sql/test_drop_any_extension.sql | 23 +++++++++++++ 4 files changed, 60 insertions(+), 3 deletions(-) create mode 100644 tests/regress/expected/test_drop_any_extension.out create mode 100644 tests/regress/sql/test_drop_any_extension.sql diff --git a/src/gp_activetable.c b/src/gp_activetable.c index cbf6e7b6..85450ae6 100644 --- a/src/gp_activetable.c +++ b/src/gp_activetable.c @@ -190,15 +190,14 @@ object_access_hook_QuotaStmt(ObjectAccessType access, Oid classId, Oid objectId, { if (prev_object_access_hook) (*prev_object_access_hook)(access, classId, objectId, subId, arg); - // if is 'drop extension diskquota' + /* if is 'drop extension diskquota' */ if (classId == ExtensionRelationId && access == OAT_DROP) { if (get_extension_oid("diskquota", true) == objectId) { invalidate_database_rejectmap(MyDatabaseId); + diskquota_stop_worker(); } - - diskquota_stop_worker(); return; } diff --git a/tests/regress/diskquota_schedule b/tests/regress/diskquota_schedule index 9805a8e4..3851a040 100644 --- a/tests/regress/diskquota_schedule +++ b/tests/regress/diskquota_schedule @@ -42,5 +42,6 @@ test: test_tablespace_diff_schema test: test_worker_schedule test: test_worker_schedule_exception test: test_dbname_encoding +test: test_drop_any_extension test: test_drop_extension test: reset_config diff --git a/tests/regress/expected/test_drop_any_extension.out b/tests/regress/expected/test_drop_any_extension.out new file mode 100644 index 00000000..1c8fbc66 --- /dev/null +++ b/tests/regress/expected/test_drop_any_extension.out @@ -0,0 +1,34 @@ +CREATE DATABASE test_drop_db; +\c test_drop_db +CREATE EXTENSION diskquota; +CREATE EXTENSION gp_inject_fault; +SELECT diskquota.init_table_size_table(); + init_table_size_table +----------------------- + +(1 row) + +SELECT diskquota.set_schema_quota(current_schema, '1MB'); + set_schema_quota +------------------ + +(1 row) + +CREATE TABLE t(i int); +NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i' as the Greenplum Database data distribution key for this table. +HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. +DROP EXTENSION gp_inject_fault; +-- expect success +INSERT INTO t SELECT generate_series(1, 100000); +SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) + +-- expect fail +INSERT INTO t SELECT generate_series(1, 100000); +ERROR: schema's disk space quota exceeded with name: public +DROP EXTENSION diskquota; +\c contrib_regression +DROP DATABASE test_drop_db; diff --git a/tests/regress/sql/test_drop_any_extension.sql b/tests/regress/sql/test_drop_any_extension.sql new file mode 100644 index 00000000..91a95dc2 --- /dev/null +++ b/tests/regress/sql/test_drop_any_extension.sql @@ -0,0 +1,23 @@ +CREATE DATABASE test_drop_db; + +\c test_drop_db + +CREATE EXTENSION diskquota; +CREATE EXTENSION gp_inject_fault; +SELECT diskquota.init_table_size_table(); + +SELECT diskquota.set_schema_quota(current_schema, '1MB'); +CREATE TABLE t(i int); + +DROP EXTENSION gp_inject_fault; + +-- expect success +INSERT INTO t SELECT generate_series(1, 100000); +SELECT diskquota.wait_for_worker_new_epoch(); +-- expect fail +INSERT INTO t SELECT generate_series(1, 100000); + +DROP EXTENSION diskquota; + +\c contrib_regression +DROP DATABASE test_drop_db; From e6899aaac6dc36d095c740d8e09c99b326a189fa Mon Sep 17 00:00:00 2001 From: Georgy Shelkovy Date: Wed, 22 Nov 2023 11:42:29 +0500 Subject: [PATCH 11/15] Invalidate diskquota.table_size entries during startup (#27) Diskquota calculates sizes and stores information in the diskquota.table_size table periodically with a pause in diskquota.naptime, 2 seconds by default. If we restart the cluster during this pause, then diskquota will lose all changes that have occurred since the last save to the diskquota.table_size table. We could create temporary tables, wait when it will be flushed to diskquota.table_size table, restart the cluster, and diskquota would remember the information about the temporary tables. Or we could delete the tables, restart the cluster, and again diskquota will remember information about the deleted tables. This happens because at the start of the cluster, diskquota remembers all the information written to the diskquota.table_size table, but does not check that some tables may have already been deleted. As a solution, we invalidate diskquota.table_size during diskquota worker start in addition to pg_class validation. --- src/diskquota.h | 2 +- src/diskquota_utility.c | 18 +++++-- src/quotamodel.c | 28 ++++++++++- .../expected/test_dropped_table.out | 49 +++++++++++++++++++ .../expected/test_temporary_table.out | 47 ++++++++++++++++++ tests/isolation2/isolation2_schedule | 2 + tests/isolation2/sql/test_dropped_table.sql | 22 +++++++++ tests/isolation2/sql/test_temporary_table.sql | 21 ++++++++ 8 files changed, 182 insertions(+), 7 deletions(-) create mode 100644 tests/isolation2/expected/test_dropped_table.out create mode 100644 tests/isolation2/expected/test_temporary_table.out create mode 100644 tests/isolation2/sql/test_dropped_table.sql create mode 100644 tests/isolation2/sql/test_temporary_table.sql diff --git a/src/diskquota.h b/src/diskquota.h index 58a00f46..0114b4f2 100644 --- a/src/diskquota.h +++ b/src/diskquota.h @@ -265,7 +265,7 @@ extern bool diskquota_hardlimit; extern int SEGCOUNT; extern int worker_spi_get_extension_version(int *major, int *minor); extern void truncateStringInfo(StringInfo str, int nchars); -extern List *get_rel_oid_list(void); +extern List *get_rel_oid_list(bool is_init); extern int64 calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage, Oid relam); extern Relation diskquota_relation_open(Oid relid); extern bool get_rel_name_namespace(Oid relid, Oid *nsOid, char *relname); diff --git a/src/diskquota_utility.c b/src/diskquota_utility.c index f406809c..acf5abb9 100644 --- a/src/diskquota_utility.c +++ b/src/diskquota_utility.c @@ -113,8 +113,6 @@ static float4 get_per_segment_ratio(Oid spcoid); static bool to_delete_quota(QuotaType type, int64 quota_limit_mb, float4 segratio); static void check_role(Oid roleoid, char *rolname, int64 quota_limit_mb); -List *get_rel_oid_list(void); - /* ---- Help Functions to set quota limit. ---- */ /* * Initialize table diskquota.table_size. @@ -1294,17 +1292,24 @@ worker_spi_get_extension_version(int *major, int *minor) * Get the list of oids of the tables which diskquota * needs to care about in the database. * Firstly the all the table oids which relkind is 'r' - * or 'm' and not system table. + * or 'm' and not system table. On init stage, oids from + * diskquota.table_size are added to invalidate them. * Then, fetch the indexes of those tables. */ List * -get_rel_oid_list(void) +get_rel_oid_list(bool is_init) { List *oidlist = NIL; int ret; - ret = SPI_execute_with_args("select oid from pg_class where oid >= $1 and (relkind='r' or relkind='m')", 1, +#define SELECT_FROM_PG_CATALOG_PG_CLASS "select oid from pg_catalog.pg_class where oid >= $1 and relkind in ('r', 'm')" + + ret = SPI_execute_with_args(is_init ? SELECT_FROM_PG_CATALOG_PG_CLASS + " union distinct" + " select tableid from diskquota.table_size where segid = -1" + : SELECT_FROM_PG_CATALOG_PG_CLASS, + 1, (Oid[]){ OIDOID, }, @@ -1312,6 +1317,9 @@ get_rel_oid_list(void) ObjectIdGetDatum(FirstNormalObjectId), }, NULL, false, 0); + +#undef SELECT_FROM_PG_CATALOG_PG_CLASS + if (ret != SPI_OK_SELECT) elog(ERROR, "cannot fetch in pg_class. error code %d", ret); TupleDesc tupdesc = SPI_tuptable->tupdesc; diff --git a/src/quotamodel.c b/src/quotamodel.c index a0f01dbd..cdd48412 100644 --- a/src/quotamodel.c +++ b/src/quotamodel.c @@ -247,6 +247,8 @@ static bool get_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag static void reset_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag flag); static void set_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag flag); +static void delete_from_table_size_map(char *str); + /* add a new entry quota or update the old entry quota */ static void update_size_for_quota(int64 size, QuotaType type, Oid *keys, int16 segid) @@ -923,6 +925,10 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) TableEntryKey active_table_key; List *oidlist; ListCell *l; + int delete_entries_num = 0; + StringInfoData delete_statement; + + initStringInfo(&delete_statement); /* * unset is_exist flag for tsentry in table_size_map this is used to @@ -939,7 +945,7 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) * calculate the file size for active table and update namespace_size_map * and role_size_map */ - oidlist = get_rel_oid_list(); + oidlist = get_rel_oid_list(is_init); oidlist = merge_uncommitted_table_to_oidlist(oidlist); @@ -973,6 +979,23 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) { elog(WARNING, "cache lookup failed for relation %u", relOid); LWLockRelease(diskquota_locks.relation_cache_lock); + + if (!is_init) continue; + + for (int i = -1; i < SEGCOUNT; i++) + { + appendStringInfo(&delete_statement, "%s(%u,%d)", (delete_entries_num == 0) ? " " : ", ", relOid, i); + + delete_entries_num++; + + if (delete_entries_num > SQL_MAX_VALUES_NUMBER) + { + delete_from_table_size_map(delete_statement.data); + resetStringInfo(&delete_statement); + delete_entries_num = 0; + } + } + continue; } relnamespace = relation_entry->namespaceoid; @@ -1112,6 +1135,9 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) } } + if (delete_entries_num) delete_from_table_size_map(delete_statement.data); + + pfree(delete_statement.data); list_free(oidlist); /* diff --git a/tests/isolation2/expected/test_dropped_table.out b/tests/isolation2/expected/test_dropped_table.out new file mode 100644 index 00000000..6ab80521 --- /dev/null +++ b/tests/isolation2/expected/test_dropped_table.out @@ -0,0 +1,49 @@ +-- Ensure diskquota does not save information about dropped table during restart cluster by invalidates it at startup + +1: CREATE SCHEMA dropped_schema; +CREATE +1: SET search_path TO dropped_schema; +SET +1: SELECT diskquota.set_schema_quota('dropped_schema', '1 MB'); + set_schema_quota +------------------ + +(1 row) +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1: CREATE TABLE dropped_table(id int) DISTRIBUTED BY (id); +CREATE +1: INSERT INTO dropped_table SELECT generate_series(1, 100000); +INSERT 100000 +-- Wait for the diskquota bgworker refreshing the size of 'dropped_table'. +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1: DROP TABLE dropped_table; +DROP +1q: ... + +-- Restart cluster fastly +!\retcode gpstop -afr; +-- start_ignore +-- end_ignore +(exited with code 0) + +-- Indicates that there is no dropped table in pg_catalog.pg_class +1: SELECT oid FROM pg_catalog.pg_class WHERE relname = 'dropped_table'; + oid +----- +(0 rows) +-- Indicates that there are no entries in diskquota.table_size that are not present in pg_catalog.pg_class +1: SELECT tableid FROM diskquota.table_size WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_class WHERE tableid = oid) AND segid = -1; + tableid +--------- +(0 rows) +1: DROP SCHEMA dropped_schema CASCADE; +DROP +1q: ... diff --git a/tests/isolation2/expected/test_temporary_table.out b/tests/isolation2/expected/test_temporary_table.out new file mode 100644 index 00000000..44b592a1 --- /dev/null +++ b/tests/isolation2/expected/test_temporary_table.out @@ -0,0 +1,47 @@ +-- Ensure diskquota does not save information about temporary table during restart cluster by invalidates it at startup + +1: CREATE SCHEMA temporary_schema; +CREATE +1: SET search_path TO temporary_schema; +SET +1: SELECT diskquota.set_schema_quota('temporary_schema', '1 MB'); + set_schema_quota +------------------ + +(1 row) +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1: CREATE TEMPORARY TABLE temporary_table(id int) DISTRIBUTED BY (id); +CREATE +1: INSERT INTO temporary_table SELECT generate_series(1, 100000); +INSERT 100000 +-- Wait for the diskquota bgworker refreshing the size of 'temporary_table'. +1: SELECT diskquota.wait_for_worker_new_epoch(); + wait_for_worker_new_epoch +--------------------------- + t +(1 row) +1q: ... + +-- Restart cluster fastly +!\retcode gpstop -afr; +-- start_ignore +-- end_ignore +(exited with code 0) + +-- Indicates that there is no temporary table in pg_catalog.pg_class +1: SELECT oid FROM pg_catalog.pg_class WHERE relname = 'temporary_table'; + oid +----- +(0 rows) +-- Indicates that there are no entries in diskquota.table_size that are not present in pg_catalog.pg_class +1: SELECT tableid FROM diskquota.table_size WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_class WHERE tableid = oid) AND segid = -1; + tableid +--------- +(0 rows) +1: DROP SCHEMA temporary_schema CASCADE; +DROP +1q: ... diff --git a/tests/isolation2/isolation2_schedule b/tests/isolation2/isolation2_schedule index c61f3d97..c9c0efbe 100644 --- a/tests/isolation2/isolation2_schedule +++ b/tests/isolation2/isolation2_schedule @@ -5,6 +5,8 @@ test: test_relation_size test: test_rejectmap test: test_vacuum test: test_truncate +test: test_temporary_table +test: test_dropped_table test: test_postmaster_restart test: test_worker_timeout test: test_per_segment_config diff --git a/tests/isolation2/sql/test_dropped_table.sql b/tests/isolation2/sql/test_dropped_table.sql new file mode 100644 index 00000000..e05949e7 --- /dev/null +++ b/tests/isolation2/sql/test_dropped_table.sql @@ -0,0 +1,22 @@ +-- Ensure diskquota does not save information about dropped table during restart cluster by invalidates it at startup + +1: CREATE SCHEMA dropped_schema; +1: SET search_path TO dropped_schema; +1: SELECT diskquota.set_schema_quota('dropped_schema', '1 MB'); +1: SELECT diskquota.wait_for_worker_new_epoch(); +1: CREATE TABLE dropped_table(id int) DISTRIBUTED BY (id); +1: INSERT INTO dropped_table SELECT generate_series(1, 100000); +-- Wait for the diskquota bgworker refreshing the size of 'dropped_table'. +1: SELECT diskquota.wait_for_worker_new_epoch(); +1: DROP TABLE dropped_table; +1q: + +-- Restart cluster fastly +!\retcode gpstop -afr; + +-- Indicates that there is no dropped table in pg_catalog.pg_class +1: SELECT oid FROM pg_catalog.pg_class WHERE relname = 'dropped_table'; +-- Indicates that there are no entries in diskquota.table_size that are not present in pg_catalog.pg_class +1: SELECT tableid FROM diskquota.table_size WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_class WHERE tableid = oid) AND segid = -1; +1: DROP SCHEMA dropped_schema CASCADE; +1q: diff --git a/tests/isolation2/sql/test_temporary_table.sql b/tests/isolation2/sql/test_temporary_table.sql new file mode 100644 index 00000000..606f2fa5 --- /dev/null +++ b/tests/isolation2/sql/test_temporary_table.sql @@ -0,0 +1,21 @@ +-- Ensure diskquota does not save information about temporary table during restart cluster by invalidates it at startup + +1: CREATE SCHEMA temporary_schema; +1: SET search_path TO temporary_schema; +1: SELECT diskquota.set_schema_quota('temporary_schema', '1 MB'); +1: SELECT diskquota.wait_for_worker_new_epoch(); +1: CREATE TEMPORARY TABLE temporary_table(id int) DISTRIBUTED BY (id); +1: INSERT INTO temporary_table SELECT generate_series(1, 100000); +-- Wait for the diskquota bgworker refreshing the size of 'temporary_table'. +1: SELECT diskquota.wait_for_worker_new_epoch(); +1q: + +-- Restart cluster fastly +!\retcode gpstop -afr; + +-- Indicates that there is no temporary table in pg_catalog.pg_class +1: SELECT oid FROM pg_catalog.pg_class WHERE relname = 'temporary_table'; +-- Indicates that there are no entries in diskquota.table_size that are not present in pg_catalog.pg_class +1: SELECT tableid FROM diskquota.table_size WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_class WHERE tableid = oid) AND segid = -1; +1: DROP SCHEMA temporary_schema CASCADE; +1q: From 49499953e5c396ad44b9ad8ab3e73c941a2662da Mon Sep 17 00:00:00 2001 From: Evgeniy Ratkov Date: Thu, 23 Nov 2023 06:34:15 +0300 Subject: [PATCH 12/15] Revert "ADBDEV-3685 Error handling for disqkuota worker startup stage (#20)" This reverts commit 3b06e37e4882b4a3d95448df1c944ded79ff9c07. --- src/diskquota.c | 34 ++++---------- src/diskquota.h | 7 ++- src/gp_activetable.c | 1 - src/quotamodel.c | 19 ++------ .../expected/test_worker_init_failure.out | 46 ------------------- tests/isolation2/isolation2_schedule | 1 - .../sql/test_worker_init_failure.sql | 40 ---------------- 7 files changed, 15 insertions(+), 133 deletions(-) delete mode 100644 tests/isolation2/expected/test_worker_init_failure.out delete mode 100644 tests/isolation2/sql/test_worker_init_failure.sql diff --git a/src/diskquota.c b/src/diskquota.c index 4f0fe43c..d5630700 100644 --- a/src/diskquota.c +++ b/src/diskquota.c @@ -142,7 +142,6 @@ static void vacuum_db_entry(DiskquotaDBEntry *db); static void init_bgworker_handles(void); static BackgroundWorkerHandle *get_bgworker_handle(uint32 worker_id); static void free_bgworker_handle(uint32 worker_id); -static void resetBackgroundWorkerCorruption(void); #if GP_VERSION_NUM < 70000 /* WaitForBackgroundWorkerShutdown is copied from gpdb7 */ static BgwHandleStatus WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle *handle); @@ -526,7 +525,7 @@ disk_quota_worker_main(Datum main_arg) if (!diskquota_is_paused()) { /* Refresh quota model with init mode */ - refresh_disk_quota_model(MyWorkerInfo->dbEntry); + refresh_disk_quota_model(!MyWorkerInfo->dbEntry->inited); MyWorkerInfo->dbEntry->inited = true; is_gang_destroyed = false; } @@ -763,7 +762,6 @@ disk_quota_launcher_main(Datum main_arg) { elog(DEBUG1, "[diskquota] got sighup"); got_sighup = false; - resetBackgroundWorkerCorruption(); ProcessConfigFile(PGC_SIGHUP); } @@ -789,12 +787,11 @@ disk_quota_launcher_main(Datum main_arg) * When curDB->in_use is false means dbEtnry has been romoved * When curDB->dbid doesn't equtal curDBId, it means the slot has * been used by another db - * When curDB->corrupted is true means worker couldn't initialize - * the extension in the first run. + * * For the above conditions, we just skip this loop and try to fetch * next db to run. */ - if (curDB == NULL || !curDB->in_use || curDB->dbid != curDBId || curDB->corrupted) + if (curDB == NULL || !curDB->in_use || curDB->dbid != curDBId) { advance_one_db = true; continue; @@ -1799,9 +1796,7 @@ next_db(DiskquotaDBEntry *curDB) if (nextSlot >= MAX_NUM_MONITORED_DB) nextSlot = 0; DiskquotaDBEntry *dbEntry = &DiskquotaLauncherShmem->dbArray[nextSlot]; nextSlot++; - if (!dbEntry->in_use || dbEntry->workerId != INVALID_WORKER_ID || dbEntry->dbid == InvalidOid || - dbEntry->corrupted) - continue; + if (!dbEntry->in_use || dbEntry->workerId != INVALID_WORKER_ID || dbEntry->dbid == InvalidOid) continue; /* TODO: should release the invalid db related things */ if (!is_valid_dbid(dbEntry->dbid)) continue; result = dbEntry; @@ -1865,11 +1860,10 @@ static void vacuum_db_entry(DiskquotaDBEntry *db) { if (db == NULL) return; - db->dbid = InvalidOid; - db->inited = false; - db->workerId = INVALID_WORKER_ID; - db->in_use = false; - db->corrupted = false; + db->dbid = InvalidOid; + db->inited = false; + db->workerId = INVALID_WORKER_ID; + db->in_use = false; } static void @@ -1904,18 +1898,6 @@ free_bgworker_handle(uint32 worker_id) } } -static void -resetBackgroundWorkerCorruption(void) -{ - LWLockAcquire(diskquota_locks.dblist_lock, LW_EXCLUSIVE); - for (int i = 0; i < MAX_NUM_MONITORED_DB; i++) - { - DiskquotaDBEntry *dbEntry = &DiskquotaLauncherShmem->dbArray[i]; - if (dbEntry->corrupted) dbEntry->corrupted = false; - } - LWLockRelease(diskquota_locks.dblist_lock); -} - #if GP_VERSION_NUM < 70000 static BgwHandleStatus WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle *handle) diff --git a/src/diskquota.h b/src/diskquota.h index 0114b4f2..12015b86 100644 --- a/src/diskquota.h +++ b/src/diskquota.h @@ -215,9 +215,8 @@ struct DiskquotaDBEntry TimestampTz last_run_time; int16 cost; // ms - bool inited; // this entry is inited, will set to true after the worker finish the frist run. - bool in_use; // this slot is in using. AKA dbid != 0 - bool corrupted; // consider this entry as invalid to start the worker on + bool inited; // this entry is inited, will set to true after the worker finish the frist run. + bool in_use; // this slot is in using. AKA dbid != 0 }; typedef enum MonitorDBStatus @@ -250,7 +249,7 @@ extern void invalidate_database_rejectmap(Oid dbid); /* quota model interface*/ extern void init_disk_quota_shmem(void); extern void init_disk_quota_model(uint32 id); -extern void refresh_disk_quota_model(DiskquotaDBEntry *dbEntry); +extern void refresh_disk_quota_model(bool force); extern bool check_diskquota_state_is_ready(void); extern bool quota_check_common(Oid reloid, RelFileNode *relfilenode); diff --git a/src/gp_activetable.c b/src/gp_activetable.c index 85450ae6..e234d99b 100644 --- a/src/gp_activetable.c +++ b/src/gp_activetable.c @@ -377,7 +377,6 @@ gp_fetch_active_tables(bool is_init) if (is_init) { - SIMPLE_FAULT_INJECTOR("diskquota_worker_initialization"); load_table_size(local_table_stats_map); } else diff --git a/src/quotamodel.c b/src/quotamodel.c index cdd48412..a9cb8248 100644 --- a/src/quotamodel.c +++ b/src/quotamodel.c @@ -227,7 +227,7 @@ static void clear_all_quota_maps(void); static void transfer_table_for_quota(int64 totalsize, QuotaType type, Oid *old_keys, Oid *new_keys, int16 segid); /* functions to refresh disk quota model*/ -static void refresh_disk_quota_usage(DiskquotaDBEntry *dbEntry); +static void refresh_disk_quota_usage(bool is_init); static void calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map); static void flush_to_table_size(void); static bool flush_local_reject_map(void); @@ -763,10 +763,8 @@ do_check_diskquota_state_is_ready(void) * recalculate the changed disk usage. */ void -refresh_disk_quota_model(DiskquotaDBEntry *dbEntry) +refresh_disk_quota_model(bool is_init) { - bool is_init = !dbEntry->inited; - SEGCOUNT = getgpsegmentCount(); if (SEGCOUNT <= 0) { @@ -777,7 +775,7 @@ refresh_disk_quota_model(DiskquotaDBEntry *dbEntry) /* skip refresh model when load_quotas failed */ if (load_quotas()) { - refresh_disk_quota_usage(dbEntry); + refresh_disk_quota_usage(is_init); } if (is_init) ereport(LOG, (errmsg("[diskquota] initialize quota model finished"))); } @@ -789,12 +787,11 @@ refresh_disk_quota_model(DiskquotaDBEntry *dbEntry) * process is constructing quota model. */ static void -refresh_disk_quota_usage(DiskquotaDBEntry *dbEntry) +refresh_disk_quota_usage(bool is_init) { bool connected = false; bool pushed_active_snap = false; bool ret = true; - bool is_init = !dbEntry->inited; HTAB *local_active_table_stat_map = NULL; StartTransactionCommand(); @@ -846,14 +843,6 @@ refresh_disk_quota_usage(DiskquotaDBEntry *dbEntry) } PG_CATCH(); { - /* Initialization failed. */ - if (is_init) - { - LWLockAcquire(diskquota_locks.dblist_lock, LW_EXCLUSIVE); - dbEntry->corrupted = true; - LWLockRelease(diskquota_locks.dblist_lock); - PG_RE_THROW(); - } /* Prevents interrupts while cleaning up */ HOLD_INTERRUPTS(); EmitErrorReport(); diff --git a/tests/isolation2/expected/test_worker_init_failure.out b/tests/isolation2/expected/test_worker_init_failure.out deleted file mode 100644 index 75e6b6e0..00000000 --- a/tests/isolation2/expected/test_worker_init_failure.out +++ /dev/null @@ -1,46 +0,0 @@ --- --- Tests for error handling when the worker catches the error during --- its first run. --- - --- Function checking whether worker on given db is up -CREATE or REPLACE LANGUAGE plpython2u; -CREATE -CREATE or REPLACE FUNCTION check_worker_presence(dbname text, wait_time int) RETURNS boolean AS $$ import psutil import time worker_name = 'bgworker: [diskquota] ' + dbname time.sleep(wait_time) for proc in psutil.process_iter(): try: if 'postgres' in proc.name().lower(): for val in proc.cmdline(): if worker_name in val: return True except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): pass return False $$ LANGUAGE plpython2u EXECUTE ON MASTER; -CREATE - --- Test diskquota behavior when an error occurs during the worker's first run. --- The error leads to process termination. And launcher won't start it again --- until extension reload or SIGHUP signal. -CREATE EXTENSION diskquota; -CREATE -SELECT check_worker_presence(current_database(), 0); - check_worker_presence ------------------------ - t -(1 row) -SELECT gp_inject_fault('diskquota_worker_initialization', 'error', dbid) FROM gp_segment_configuration WHERE role='p' AND content=-1; - gp_inject_fault ------------------ - Success: -(1 row) -SELECT diskquota.init_table_size_table(); - init_table_size_table ------------------------ - -(1 row) -SELECT check_worker_presence(current_database(), current_setting('diskquota.worker_timeout')::int / 2); - check_worker_presence ------------------------ - f -(1 row) --- Reload configuration and check that worker is up again -!\retcode gpstop -u; -(exited with code 0) -SELECT check_worker_presence(current_database(), current_setting('diskquota.worker_timeout')::int / 2); - check_worker_presence ------------------------ - t -(1 row) -DROP EXTENSION diskquota; -DROP diff --git a/tests/isolation2/isolation2_schedule b/tests/isolation2/isolation2_schedule index c9c0efbe..5ed558d6 100644 --- a/tests/isolation2/isolation2_schedule +++ b/tests/isolation2/isolation2_schedule @@ -13,5 +13,4 @@ test: test_per_segment_config test: test_relation_cache test: test_ereport_from_seg test: test_drop_extension -test: test_worker_init_failure test: reset_config diff --git a/tests/isolation2/sql/test_worker_init_failure.sql b/tests/isolation2/sql/test_worker_init_failure.sql deleted file mode 100644 index 4e48908a..00000000 --- a/tests/isolation2/sql/test_worker_init_failure.sql +++ /dev/null @@ -1,40 +0,0 @@ --- --- Tests for error handling when the worker catches the error during --- its first run. --- - --- Function checking whether worker on given db is up -CREATE or REPLACE LANGUAGE plpython2u; -CREATE or REPLACE FUNCTION check_worker_presence(dbname text, wait_time int) - RETURNS boolean -AS $$ - import psutil - import time - worker_name = 'bgworker: [diskquota] ' + dbname - time.sleep(wait_time) - for proc in psutil.process_iter(): - try: - if 'postgres' in proc.name().lower(): - for val in proc.cmdline(): - if worker_name in val: - return True - except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): - pass - return False -$$ LANGUAGE plpython2u EXECUTE ON MASTER; - --- Test diskquota behavior when an error occurs during the worker's first run. --- The error leads to process termination. And launcher won't start it again --- until extension reload or SIGHUP signal. -CREATE EXTENSION diskquota; -SELECT check_worker_presence(current_database(), 0); -SELECT gp_inject_fault('diskquota_worker_initialization', 'error', dbid) - FROM gp_segment_configuration WHERE role='p' AND content=-1; -SELECT diskquota.init_table_size_table(); -SELECT check_worker_presence(current_database(), - current_setting('diskquota.worker_timeout')::int / 2); --- Reload configuration and check that worker is up again -!\retcode gpstop -u; -SELECT check_worker_presence(current_database(), - current_setting('diskquota.worker_timeout')::int / 2); -DROP EXTENSION diskquota; From efa7182fce9b359941dc733042547afc5a64dce6 Mon Sep 17 00:00:00 2001 From: Evgeniy Ratkov Date: Thu, 23 Nov 2023 06:34:22 +0300 Subject: [PATCH 13/15] Revert "temporary disable upgrade tests" This reverts commit 50ed2e4e1883ec8ec4e7086b750cb28cdc5a2dc0. --- CMakeLists.txt | 1 + concourse/scripts/build_diskquota.sh | 3 +++ concourse/scripts/test_diskquota.sh | 2 ++ 3 files changed, 6 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 5d44f3e4..38d32e7c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -150,6 +150,7 @@ BuildInfo_Create(${build_info_PATH} # Add installcheck targets add_subdirectory(tests) +add_subdirectory(upgrade_test) # NOTE: keep install part at the end of file, to overwrite previous binary install(PROGRAMS "cmake/install_gpdb_component" DESTINATION ".") diff --git a/concourse/scripts/build_diskquota.sh b/concourse/scripts/build_diskquota.sh index 10b66e34..7b2ee30b 100755 --- a/concourse/scripts/build_diskquota.sh +++ b/concourse/scripts/build_diskquota.sh @@ -11,7 +11,10 @@ function pkg() { export CXX="$(which g++)" pushd /home/gpadmin/diskquota_artifacts + local last_release_path + last_release_path=$(readlink -eq /home/gpadmin/last_released_diskquota_bin/diskquota-*.tar.gz) cmake /home/gpadmin/diskquota_src \ + -DDISKQUOTA_LAST_RELEASE_PATH="${last_release_path}" \ -DCMAKE_BUILD_TYPE="${BUILD_TYPE}" cmake --build . --target create_artifact popd diff --git a/concourse/scripts/test_diskquota.sh b/concourse/scripts/test_diskquota.sh index 245196ff..85b2bce1 100755 --- a/concourse/scripts/test_diskquota.sh +++ b/concourse/scripts/test_diskquota.sh @@ -38,6 +38,8 @@ function _main() { activate_standby time cmake --build . --target installcheck fi + # Run upgrade test (with standby master) + time cmake --build . --target upgradecheck popd } From 7b948148ca704058c7d2576d1e7366258864d6ec Mon Sep 17 00:00:00 2001 From: Evgeniy Ratkov Date: Thu, 23 Nov 2023 06:34:36 +0300 Subject: [PATCH 14/15] Revert "add fixes to run regression tests in Docker container (#14)" This reverts commit d5b5d1ecf2ffb5afe08f3ee910807b6a9a5e9932. --- arenadata/README.md | 28 ---------------------------- concourse/scripts/entry.sh | 15 ++++++++++++++- tests/CMakeLists.txt | 1 + 3 files changed, 15 insertions(+), 29 deletions(-) delete mode 100644 arenadata/README.md diff --git a/arenadata/README.md b/arenadata/README.md deleted file mode 100644 index fff0b986..00000000 --- a/arenadata/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# Run regression tests in Docker container - -You can build your Docker image from GPDB source or use prebuilt images from hub.adsw.io. -How to build Docker image: (["readme.md"](https://github.com/arenadata/gpdb/blob/f7ff7c8ecae4ce7ab3b73fd46171cdaa457b3591/arenadata/readme.md)). - -1. Download the cmake-3.20 install script from ([source](https://github.com/Kitware/CMake/releases/download/v3.20.0/cmake-3.20.0-linux-x86_64.sh)). - -2. Build diskquota in the Docker container. -Change and to the appropriate paths on your local machine. - -``` -docker run --rm -it -e DISKQUOTA_OS=rhel7 \ - -v /tmp/diskquota_artifacts:/home/gpadmin/diskquota_artifacts \ - -v :/home/gpadmin/diskquota_src \ - -v :/home/gpadmin/bin_cmake/cmake-3.20.0-linux-x86_64.sh \ - hub.adsw.io/library/gpdb6_regress:latest diskquota_src/concourse/scripts/entry.sh build -``` - -3. Run tests. -Change and to the appropriate paths on your local machine. - -``` -docker run --rm -it --sysctl 'kernel.sem=500 1024000 200 4096' \ - -v /tmp/diskquota_artifacts:/home/gpadmin/bin_diskquota \ - -v :/home/gpadmin/diskquota_src \ - -v :/home/gpadmin/bin_cmake/cmake-3.20.0-linux-x86_64.sh \ - hub.adsw.io/library/gpdb6_regress:latest diskquota_src/concourse/scripts/entry.sh test -``` diff --git a/concourse/scripts/entry.sh b/concourse/scripts/entry.sh index 3324713d..b3c70ddd 100755 --- a/concourse/scripts/entry.sh +++ b/concourse/scripts/entry.sh @@ -108,12 +108,15 @@ setup_gpadmin() { fi mkdir -p /home/gpadmin chown gpadmin:gpadmin /home/gpadmin + + chown -R gpadmin:gpadmin /tmp/build + ln -s "${CONCOURSE_WORK_DIR}"/* /home/gpadmin } # Extract gpdb binary function install_gpdb() { [ ! -d /usr/local/greenplum-db-devel ] && mkdir -p /usr/local/greenplum-db-devel - tar -xzf "${CONCOURSE_WORK_DIR}"/bin_gpdb/bin_gpdb.tar.gz -C /usr/local/greenplum-db-devel + tar -xzf "${CONCOURSE_WORK_DIR}"/bin_gpdb/*.tar.gz -C /usr/local/greenplum-db-devel chown -R gpadmin:gpadmin /usr/local/greenplum-db-devel } @@ -127,11 +130,21 @@ function create_fake_gpdb_src() { grep -rhw '/usr/local/greenplum-db-devel' -e 'abs_top_srcdir = .*' |\ head -n 1 | awk '{ print $NF; }')" + if [ -d "${fake_gpdb_src}" ]; then + echo "Fake gpdb source directory has been configured." + return + fi + pushd /home/gpadmin/gpdb_src ./configure --prefix=/usr/local/greenplum-db-devel \ --without-zstd \ --disable-orca --disable-gpcloud --enable-debug-extensions popd + + local fake_root + fake_root=$(dirname "${fake_gpdb_src}") + mkdir -p "${fake_root}" + ln -s /home/gpadmin/gpdb_src "${fake_gpdb_src}" } # Setup common environment diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 895d7f9f..ee56ea15 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -31,6 +31,7 @@ RegressTarget_Add(regress EXCLUDE_FAULT_INJECT_TEST ${exclude_fault_injector} REGRESS_OPTS --load-extension=gp_inject_fault + --load-extension=diskquota_test --dbname=contrib_regression) RegressTarget_Add(isolation2 From 6212270c31e36e65cb65a4c1c906d3684a549b16 Mon Sep 17 00:00:00 2001 From: Evgeniy Ratkov Date: Thu, 23 Nov 2023 06:48:49 +0300 Subject: [PATCH 15/15] Revert "Invalidate diskquota.table_size entries during startup (#27)" This reverts commit e6899aaac6dc36d095c740d8e09c99b326a189fa. --- src/diskquota.h | 2 +- src/diskquota_utility.c | 18 ++----- src/quotamodel.c | 28 +---------- .../expected/test_dropped_table.out | 49 ------------------- .../expected/test_temporary_table.out | 47 ------------------ tests/isolation2/isolation2_schedule | 2 - tests/isolation2/sql/test_dropped_table.sql | 22 --------- tests/isolation2/sql/test_temporary_table.sql | 21 -------- 8 files changed, 7 insertions(+), 182 deletions(-) delete mode 100644 tests/isolation2/expected/test_dropped_table.out delete mode 100644 tests/isolation2/expected/test_temporary_table.out delete mode 100644 tests/isolation2/sql/test_dropped_table.sql delete mode 100644 tests/isolation2/sql/test_temporary_table.sql diff --git a/src/diskquota.h b/src/diskquota.h index 12015b86..f044773b 100644 --- a/src/diskquota.h +++ b/src/diskquota.h @@ -264,7 +264,7 @@ extern bool diskquota_hardlimit; extern int SEGCOUNT; extern int worker_spi_get_extension_version(int *major, int *minor); extern void truncateStringInfo(StringInfo str, int nchars); -extern List *get_rel_oid_list(bool is_init); +extern List *get_rel_oid_list(void); extern int64 calculate_relation_size_all_forks(RelFileNodeBackend *rnode, char relstorage, Oid relam); extern Relation diskquota_relation_open(Oid relid); extern bool get_rel_name_namespace(Oid relid, Oid *nsOid, char *relname); diff --git a/src/diskquota_utility.c b/src/diskquota_utility.c index acf5abb9..f406809c 100644 --- a/src/diskquota_utility.c +++ b/src/diskquota_utility.c @@ -113,6 +113,8 @@ static float4 get_per_segment_ratio(Oid spcoid); static bool to_delete_quota(QuotaType type, int64 quota_limit_mb, float4 segratio); static void check_role(Oid roleoid, char *rolname, int64 quota_limit_mb); +List *get_rel_oid_list(void); + /* ---- Help Functions to set quota limit. ---- */ /* * Initialize table diskquota.table_size. @@ -1292,24 +1294,17 @@ worker_spi_get_extension_version(int *major, int *minor) * Get the list of oids of the tables which diskquota * needs to care about in the database. * Firstly the all the table oids which relkind is 'r' - * or 'm' and not system table. On init stage, oids from - * diskquota.table_size are added to invalidate them. + * or 'm' and not system table. * Then, fetch the indexes of those tables. */ List * -get_rel_oid_list(bool is_init) +get_rel_oid_list(void) { List *oidlist = NIL; int ret; -#define SELECT_FROM_PG_CATALOG_PG_CLASS "select oid from pg_catalog.pg_class where oid >= $1 and relkind in ('r', 'm')" - - ret = SPI_execute_with_args(is_init ? SELECT_FROM_PG_CATALOG_PG_CLASS - " union distinct" - " select tableid from diskquota.table_size where segid = -1" - : SELECT_FROM_PG_CATALOG_PG_CLASS, - 1, + ret = SPI_execute_with_args("select oid from pg_class where oid >= $1 and (relkind='r' or relkind='m')", 1, (Oid[]){ OIDOID, }, @@ -1317,9 +1312,6 @@ get_rel_oid_list(bool is_init) ObjectIdGetDatum(FirstNormalObjectId), }, NULL, false, 0); - -#undef SELECT_FROM_PG_CATALOG_PG_CLASS - if (ret != SPI_OK_SELECT) elog(ERROR, "cannot fetch in pg_class. error code %d", ret); TupleDesc tupdesc = SPI_tuptable->tupdesc; diff --git a/src/quotamodel.c b/src/quotamodel.c index a9cb8248..6b8507b3 100644 --- a/src/quotamodel.c +++ b/src/quotamodel.c @@ -247,8 +247,6 @@ static bool get_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag static void reset_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag flag); static void set_table_size_entry_flag(TableSizeEntry *entry, TableSizeEntryFlag flag); -static void delete_from_table_size_map(char *str); - /* add a new entry quota or update the old entry quota */ static void update_size_for_quota(int64 size, QuotaType type, Oid *keys, int16 segid) @@ -914,10 +912,6 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) TableEntryKey active_table_key; List *oidlist; ListCell *l; - int delete_entries_num = 0; - StringInfoData delete_statement; - - initStringInfo(&delete_statement); /* * unset is_exist flag for tsentry in table_size_map this is used to @@ -934,7 +928,7 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) * calculate the file size for active table and update namespace_size_map * and role_size_map */ - oidlist = get_rel_oid_list(is_init); + oidlist = get_rel_oid_list(); oidlist = merge_uncommitted_table_to_oidlist(oidlist); @@ -968,23 +962,6 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) { elog(WARNING, "cache lookup failed for relation %u", relOid); LWLockRelease(diskquota_locks.relation_cache_lock); - - if (!is_init) continue; - - for (int i = -1; i < SEGCOUNT; i++) - { - appendStringInfo(&delete_statement, "%s(%u,%d)", (delete_entries_num == 0) ? " " : ", ", relOid, i); - - delete_entries_num++; - - if (delete_entries_num > SQL_MAX_VALUES_NUMBER) - { - delete_from_table_size_map(delete_statement.data); - resetStringInfo(&delete_statement); - delete_entries_num = 0; - } - } - continue; } relnamespace = relation_entry->namespaceoid; @@ -1124,9 +1101,6 @@ calculate_table_disk_usage(bool is_init, HTAB *local_active_table_stat_map) } } - if (delete_entries_num) delete_from_table_size_map(delete_statement.data); - - pfree(delete_statement.data); list_free(oidlist); /* diff --git a/tests/isolation2/expected/test_dropped_table.out b/tests/isolation2/expected/test_dropped_table.out deleted file mode 100644 index 6ab80521..00000000 --- a/tests/isolation2/expected/test_dropped_table.out +++ /dev/null @@ -1,49 +0,0 @@ --- Ensure diskquota does not save information about dropped table during restart cluster by invalidates it at startup - -1: CREATE SCHEMA dropped_schema; -CREATE -1: SET search_path TO dropped_schema; -SET -1: SELECT diskquota.set_schema_quota('dropped_schema', '1 MB'); - set_schema_quota ------------------- - -(1 row) -1: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -1: CREATE TABLE dropped_table(id int) DISTRIBUTED BY (id); -CREATE -1: INSERT INTO dropped_table SELECT generate_series(1, 100000); -INSERT 100000 --- Wait for the diskquota bgworker refreshing the size of 'dropped_table'. -1: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -1: DROP TABLE dropped_table; -DROP -1q: ... - --- Restart cluster fastly -!\retcode gpstop -afr; --- start_ignore --- end_ignore -(exited with code 0) - --- Indicates that there is no dropped table in pg_catalog.pg_class -1: SELECT oid FROM pg_catalog.pg_class WHERE relname = 'dropped_table'; - oid ------ -(0 rows) --- Indicates that there are no entries in diskquota.table_size that are not present in pg_catalog.pg_class -1: SELECT tableid FROM diskquota.table_size WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_class WHERE tableid = oid) AND segid = -1; - tableid ---------- -(0 rows) -1: DROP SCHEMA dropped_schema CASCADE; -DROP -1q: ... diff --git a/tests/isolation2/expected/test_temporary_table.out b/tests/isolation2/expected/test_temporary_table.out deleted file mode 100644 index 44b592a1..00000000 --- a/tests/isolation2/expected/test_temporary_table.out +++ /dev/null @@ -1,47 +0,0 @@ --- Ensure diskquota does not save information about temporary table during restart cluster by invalidates it at startup - -1: CREATE SCHEMA temporary_schema; -CREATE -1: SET search_path TO temporary_schema; -SET -1: SELECT diskquota.set_schema_quota('temporary_schema', '1 MB'); - set_schema_quota ------------------- - -(1 row) -1: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -1: CREATE TEMPORARY TABLE temporary_table(id int) DISTRIBUTED BY (id); -CREATE -1: INSERT INTO temporary_table SELECT generate_series(1, 100000); -INSERT 100000 --- Wait for the diskquota bgworker refreshing the size of 'temporary_table'. -1: SELECT diskquota.wait_for_worker_new_epoch(); - wait_for_worker_new_epoch ---------------------------- - t -(1 row) -1q: ... - --- Restart cluster fastly -!\retcode gpstop -afr; --- start_ignore --- end_ignore -(exited with code 0) - --- Indicates that there is no temporary table in pg_catalog.pg_class -1: SELECT oid FROM pg_catalog.pg_class WHERE relname = 'temporary_table'; - oid ------ -(0 rows) --- Indicates that there are no entries in diskquota.table_size that are not present in pg_catalog.pg_class -1: SELECT tableid FROM diskquota.table_size WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_class WHERE tableid = oid) AND segid = -1; - tableid ---------- -(0 rows) -1: DROP SCHEMA temporary_schema CASCADE; -DROP -1q: ... diff --git a/tests/isolation2/isolation2_schedule b/tests/isolation2/isolation2_schedule index 5ed558d6..090c5cc5 100644 --- a/tests/isolation2/isolation2_schedule +++ b/tests/isolation2/isolation2_schedule @@ -5,8 +5,6 @@ test: test_relation_size test: test_rejectmap test: test_vacuum test: test_truncate -test: test_temporary_table -test: test_dropped_table test: test_postmaster_restart test: test_worker_timeout test: test_per_segment_config diff --git a/tests/isolation2/sql/test_dropped_table.sql b/tests/isolation2/sql/test_dropped_table.sql deleted file mode 100644 index e05949e7..00000000 --- a/tests/isolation2/sql/test_dropped_table.sql +++ /dev/null @@ -1,22 +0,0 @@ --- Ensure diskquota does not save information about dropped table during restart cluster by invalidates it at startup - -1: CREATE SCHEMA dropped_schema; -1: SET search_path TO dropped_schema; -1: SELECT diskquota.set_schema_quota('dropped_schema', '1 MB'); -1: SELECT diskquota.wait_for_worker_new_epoch(); -1: CREATE TABLE dropped_table(id int) DISTRIBUTED BY (id); -1: INSERT INTO dropped_table SELECT generate_series(1, 100000); --- Wait for the diskquota bgworker refreshing the size of 'dropped_table'. -1: SELECT diskquota.wait_for_worker_new_epoch(); -1: DROP TABLE dropped_table; -1q: - --- Restart cluster fastly -!\retcode gpstop -afr; - --- Indicates that there is no dropped table in pg_catalog.pg_class -1: SELECT oid FROM pg_catalog.pg_class WHERE relname = 'dropped_table'; --- Indicates that there are no entries in diskquota.table_size that are not present in pg_catalog.pg_class -1: SELECT tableid FROM diskquota.table_size WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_class WHERE tableid = oid) AND segid = -1; -1: DROP SCHEMA dropped_schema CASCADE; -1q: diff --git a/tests/isolation2/sql/test_temporary_table.sql b/tests/isolation2/sql/test_temporary_table.sql deleted file mode 100644 index 606f2fa5..00000000 --- a/tests/isolation2/sql/test_temporary_table.sql +++ /dev/null @@ -1,21 +0,0 @@ --- Ensure diskquota does not save information about temporary table during restart cluster by invalidates it at startup - -1: CREATE SCHEMA temporary_schema; -1: SET search_path TO temporary_schema; -1: SELECT diskquota.set_schema_quota('temporary_schema', '1 MB'); -1: SELECT diskquota.wait_for_worker_new_epoch(); -1: CREATE TEMPORARY TABLE temporary_table(id int) DISTRIBUTED BY (id); -1: INSERT INTO temporary_table SELECT generate_series(1, 100000); --- Wait for the diskquota bgworker refreshing the size of 'temporary_table'. -1: SELECT diskquota.wait_for_worker_new_epoch(); -1q: - --- Restart cluster fastly -!\retcode gpstop -afr; - --- Indicates that there is no temporary table in pg_catalog.pg_class -1: SELECT oid FROM pg_catalog.pg_class WHERE relname = 'temporary_table'; --- Indicates that there are no entries in diskquota.table_size that are not present in pg_catalog.pg_class -1: SELECT tableid FROM diskquota.table_size WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_class WHERE tableid = oid) AND segid = -1; -1: DROP SCHEMA temporary_schema CASCADE; -1q: