diff --git a/cloud/src/meta-service/meta_service_resource.cpp b/cloud/src/meta-service/meta_service_resource.cpp index 4fa8cc5a132296..6eb9ac061158ba 100644 --- a/cloud/src/meta-service/meta_service_resource.cpp +++ b/cloud/src/meta-service/meta_service_resource.cpp @@ -523,9 +523,18 @@ static void set_default_vault_log_helper(const InstanceInfoPB& instance, LOG(INFO) << vault_msg; } -static int alter_hdfs_storage_vault(InstanceInfoPB& instance, std::unique_ptr txn, +static bool vault_exist(const InstanceInfoPB& instance, const std::string& new_vault_name) { + for (auto& name : instance.storage_vault_names()) { + if (new_vault_name == name) { + return true; + } + } + return false; +} + +static int alter_hdfs_storage_vault(InstanceInfoPB& instance, std::unique_ptr& txn, const StorageVaultPB& vault, MetaServiceCode& code, - std::string& msg) { + std::string& msg, AlterObjStoreInfoResponse* response) { if (!vault.has_hdfs_info()) { code = MetaServiceCode::INVALID_ARGUMENT; std::stringstream ss; @@ -591,6 +600,13 @@ static int alter_hdfs_storage_vault(InstanceInfoPB& instance, std::unique_ptrput(vault_key, val); LOG(INFO) << "put vault_id=" << vault_id << ", vault_key=" << hex(vault_key) << ", origin vault=" << origin_vault_info << ", new_vault=" << new_vault_info; - err = txn->commit(); - if (err != TxnErrorCode::TXN_OK) { - code = cast_as(err); - msg = fmt::format("failed to commit kv txn, err={}", err); - LOG(WARNING) << msg; - } - + // err = txn->commit(); + // if (err != TxnErrorCode::TXN_OK) { + // code = cast_as(err); + // msg = fmt::format("failed to commit kv txn, err={}", err); + // LOG(WARNING) << msg; + // } + + DCHECK_EQ(new_vault.id(), vault_id); + response->set_storage_vault_id(new_vault.id()); + response->set_storage_vault_name(new_vault.name()); return 0; } -static int alter_s3_storage_vault(InstanceInfoPB& instance, std::unique_ptr txn, +static int alter_s3_storage_vault(InstanceInfoPB& instance, std::unique_ptr& txn, const StorageVaultPB& vault, MetaServiceCode& code, - std::string& msg) { + std::string& msg, AlterObjStoreInfoResponse* response) { if (!vault.has_obj_info()) { code = MetaServiceCode::INVALID_ARGUMENT; std::stringstream ss; @@ -708,6 +727,13 @@ static int alter_s3_storage_vault(InstanceInfoPB& instance, std::unique_ptrput(vault_key, val); LOG(INFO) << "put vault_id=" << vault_id << ", vault_key=" << hex(vault_key) << ", origin vault=" << origin_vault_info << ", new vault=" << new_vault_info; - err = txn->commit(); - if (err != TxnErrorCode::TXN_OK) { - code = cast_as(err); - msg = fmt::format("failed to commit kv txn, err={}", err); - LOG(WARNING) << msg; - } - + // err = txn->commit(); + // if (err != TxnErrorCode::TXN_OK) { + // code = cast_as(err); + // msg = fmt::format("failed to commit kv txn, err={}", err); + // LOG(WARNING) << msg; + // } + + DCHECK_EQ(new_vault.id(), vault_id); + response->set_storage_vault_id(new_vault.id()); + response->set_storage_vault_name(new_vault.name()); return 0; } @@ -1100,12 +1129,12 @@ void MetaServiceImpl::alter_storage_vault(google::protobuf::RpcController* contr break; } case AlterObjStoreInfoRequest::ALTER_S3_VAULT: { - alter_s3_storage_vault(instance, std::move(txn), request->vault(), code, msg); - return; + alter_s3_storage_vault(instance, txn, request->vault(), code, msg, response); + break; } case AlterObjStoreInfoRequest::ALTER_HDFS_VAULT: { - alter_hdfs_storage_vault(instance, std::move(txn), request->vault(), code, msg); - return; + alter_hdfs_storage_vault(instance, txn, request->vault(), code, msg, response); + break; } case AlterObjStoreInfoRequest::DROP_S3_VAULT: [[fallthrough]]; diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java index 1c6345613d768d..ac1cb20433c035 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java @@ -3721,7 +3721,10 @@ private static void addOlapTablePropertyInfo(OlapTable olapTable, StringBuilder } // Storage Vault - if (!olapTable.getStorageVaultName().isEmpty()) { + if (!Strings.isNullOrEmpty(olapTable.getStorageVaultId())) { + sb.append(",\n\"").append(PropertyAnalyzer + .PROPERTIES_STORAGE_VAULT_ID).append("\" = \""); + sb.append(olapTable.getStorageVaultId()).append("\""); sb.append(",\n\"").append(PropertyAnalyzer .PROPERTIES_STORAGE_VAULT_NAME).append("\" = \""); sb.append(olapTable.getStorageVaultName()).append("\""); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java index 477f76301120d2..fae4e349fb8459 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java @@ -265,22 +265,18 @@ public void setIsBeingSynced(boolean isBeingSynced) { String.valueOf(isBeingSynced)); } - public void setStorageVaultName(String storageVaultName) throws DdlException { - if (storageVaultName == null || storageVaultName.isEmpty()) { - return; - } - getOrCreatTableProperty().setStorageVaultName(storageVaultName); - } - public String getStorageVaultName() { - return getOrCreatTableProperty().getStorageVaultName(); + if (Strings.isNullOrEmpty(getStorageVaultId())) { + return ""; + } + return Env.getCurrentEnv().getStorageVaultMgr().getVaultNameById(getStorageVaultId()); } - public void setStorageVaultId(String setStorageVaultId) throws DdlException { - if (setStorageVaultId == null || setStorageVaultId.isEmpty()) { - throw new DdlException("Invalid Storage Vault, please set one useful storage vault"); + public void setStorageVaultId(String storageVaultId) throws DdlException { + if (Strings.isNullOrEmpty(storageVaultId)) { + throw new DdlException("Invalid storage vault id, please set an available storage vault"); } - getOrCreatTableProperty().setStorageVaultId(setStorageVaultId); + getOrCreatTableProperty().setStorageVaultId(storageVaultId); } public String getStorageVaultId() { diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVault.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVault.java index 9d45ce7bdd8f51..6ffffe6d9ef03b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVault.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVault.java @@ -45,6 +45,8 @@ public abstract class StorageVault { public static final String LOWER_CASE_META_NAMES = "lower_case_meta_names"; public static final String META_NAMES_MAPPING = "meta_names_mapping"; + public static final String VAULT_NAME = "VAULT_NAME"; + public enum StorageVaultType { UNKNOWN, S3, @@ -60,7 +62,6 @@ public static StorageVaultType fromString(String storageVaultTypeType) { } } - protected static final String VAULT_NAME = "VAULT_NAME"; protected String name; protected StorageVaultType type; protected String id; @@ -146,7 +147,7 @@ public void setId(String id) { break; case S3: if (!stmt.getProperties().containsKey(PropertyConverter.USE_PATH_STYLE)) { - stmt.getProperties().put(PropertyConverter.USE_PATH_STYLE, "true"); + stmt.getProperties().put(PropertyConverter.USE_PATH_STYLE, "false"); } CreateResourceStmt resourceStmt = new CreateResourceStmt(false, ifNotExists, name, stmt.getProperties()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVaultMgr.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVaultMgr.java index 4014219c5a49b6..daabb85cf09128 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVaultMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVaultMgr.java @@ -36,6 +36,8 @@ import org.apache.doris.thrift.TNetworkAddress; import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -80,12 +82,42 @@ public void refreshVaultMap(Map vaultMap) { rwLock.writeLock().unlock(); } - public String getVaultIdByName(String name) { - String vaultId; - rwLock.readLock().lock(); - vaultId = vaultNameToVaultId.getOrDefault(name, ""); - rwLock.readLock().unlock(); - return vaultId; + public String getVaultIdByName(String vaultName) { + try { + rwLock.readLock().lock(); + return vaultNameToVaultId.getOrDefault(vaultName, ""); + } finally { + rwLock.readLock().unlock(); + } + } + + public String getVaultNameById(String vaultId) { + try { + rwLock.readLock().lock(); + for (Map.Entry entry : vaultNameToVaultId.entrySet()) { + if (entry.getValue().equals(vaultId)) { + return entry.getKey(); + } + } + return ""; + } finally { + rwLock.readLock().unlock(); + } + } + + private void updateVaultNameToIdCache(String oldVaultName, String newVaultName, String vaultId) { + try { + rwLock.writeLock().lock(); + String cachedVaultId = vaultNameToVaultId.get(oldVaultName); + vaultNameToVaultId.remove(oldVaultName); + Preconditions.checkArgument(!Strings.isNullOrEmpty(cachedVaultId), cachedVaultId, + "Cached vault id is null or empty"); + Preconditions.checkArgument(cachedVaultId.equals(vaultId), + "Cached vault id not equal to remote storage." + cachedVaultId + " - " + vaultId); + vaultNameToVaultId.put(newVaultName, vaultId); + } finally { + rwLock.writeLock().unlock(); + } } private Cloud.StorageVaultPB.Builder buildAlterS3VaultRequest(Map properties, String name) @@ -166,8 +198,10 @@ public void alterStorageVault(StorageVaultType type, Map propert LOG.warn("failed to alter storage vault response: {} ", response); throw new DdlException(response.getStatus().getMsg()); } - LOG.info("Succeed to alter storage vault {}, id {}, origin default vault replaced {}", - name, response.getStorageVaultId(), response.getDefaultStorageVaultReplaced()); + + updateVaultNameToIdCache(name, response.getStorageVaultName(), response.getStorageVaultId()); + LOG.info("Succeed to alter storage vault, old name:{} new name: {} id:{}", name, + response.getStorageVaultName(), response.getStorageVaultId()); // Make BE eagerly fetch the storage vault info from Meta Service ALTER_BE_SYNC_THREAD_POOL.execute(() -> alterSyncVaultTask()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/TableProperty.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/TableProperty.java index 9c1e09d7d1624f..83ad3b65df2a71 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/TableProperty.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/TableProperty.java @@ -793,10 +793,6 @@ public String getStorageVaultName() { return properties.getOrDefault(PropertyAnalyzer.PROPERTIES_STORAGE_VAULT_NAME, ""); } - public void setStorageVaultName(String storageVaultName) { - properties.put(PropertyAnalyzer.PROPERTIES_STORAGE_VAULT_NAME, storageVaultName); - } - public String getPropertiesString() throws IOException { ObjectMapper objectMapper = new ObjectMapper(); try { diff --git a/fe/fe-core/src/main/java/org/apache/doris/cloud/datasource/CloudInternalCatalog.java b/fe/fe-core/src/main/java/org/apache/doris/cloud/datasource/CloudInternalCatalog.java index fb0df9e488b3c2..437eab18fc25f4 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/cloud/datasource/CloudInternalCatalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/cloud/datasource/CloudInternalCatalog.java @@ -55,7 +55,6 @@ import org.apache.doris.common.Config; import org.apache.doris.common.DdlException; import org.apache.doris.common.MetaNotFoundException; -import org.apache.doris.common.Pair; import org.apache.doris.datasource.InternalCatalog; import org.apache.doris.proto.OlapCommon; import org.apache.doris.proto.OlapFile; @@ -106,6 +105,12 @@ protected Partition createPartitionWithIndices(long dbId, OlapTable tbl, long pa throws DdlException { // create base index first. Preconditions.checkArgument(tbl.getBaseIndexId() != -1); + + if (((CloudEnv) Env.getCurrentEnv()).getEnableStorageVault()) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(tbl.getStorageVaultId()), + "Storage vault id is null or empty"); + } + MaterializedIndex baseIndex = new MaterializedIndex(tbl.getBaseIndexId(), IndexState.NORMAL); LOG.info("begin create cloud partition"); @@ -129,9 +134,6 @@ protected Partition createPartitionWithIndices(long dbId, OlapTable tbl, long pa long version = partition.getVisibleVersion(); - final String storageVaultName = tbl.getStorageVaultName(); - boolean storageVaultIdSet = tbl.getStorageVaultId().isEmpty(); - // short totalReplicaNum = replicaAlloc.getTotalReplicaNum(); for (Map.Entry entry : indexMap.entrySet()) { long indexId = entry.getKey(); @@ -184,29 +186,11 @@ protected Partition createPartitionWithIndices(long dbId, OlapTable tbl, long pa tbl.storagePageSize()); requestBuilder.addTabletMetas(builder); } - if (!storageVaultIdSet && ((CloudEnv) Env.getCurrentEnv()).getEnableStorageVault()) { - requestBuilder.setStorageVaultName(storageVaultName); - } requestBuilder.setDbId(dbId); - - LOG.info("create tablets, dbId: {}, tableId: {}, tableName: {}, partitionId: {}, partitionName: {}, " - + "indexId: {}, vault name: {}", - dbId, tbl.getId(), tbl.getName(), partitionId, partitionName, indexId, storageVaultName); - Cloud.CreateTabletsResponse resp = sendCreateTabletsRpc(requestBuilder); - // If the resp has no vault id set, it means the MS is running with enable_storage_vault false - if (resp.hasStorageVaultId() && !storageVaultIdSet) { - tbl.setStorageVaultId(resp.getStorageVaultId()); - storageVaultIdSet = true; - if (storageVaultName.isEmpty()) { - // If user doesn't specify the vault name for this table, we should set it - // to make the show create table stmt return correct stmt - // TODO(ByteYue): setDefaultStorageVault for vaultMgr might override user's - // defualt vault, maybe we should set it using show default storage vault stmt - tbl.setStorageVaultName(resp.getStorageVaultName()); - Env.getCurrentEnv().getStorageVaultMgr().setDefaultStorageVault( - Pair.of(resp.getStorageVaultName(), resp.getStorageVaultId())); - } - } + LOG.info("create tablets dbId: {} tableId: {} tableName: {} partitionId: {} partitionName: {} " + + "indexId: {} vaultId: {}", + dbId, tbl.getId(), tbl.getName(), partitionId, partitionName, indexId, tbl.getStorageVaultId()); + sendCreateTabletsRpc(requestBuilder); if (index.getId() != tbl.getBaseIndexId()) { // add rollup index to partition partition.createRollupIndex(index); diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java index 7c7ded88f55960..2fd98d3f670319 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalCatalog.java @@ -2771,7 +2771,6 @@ private boolean createOlapTable(Database db, CreateTableStmt stmt) throws UserEx + "' for storage vault '" + storageVaultName + "'"); } - olapTable.setStorageVaultName(storageVaultName); storageVaultId = env.getStorageVaultMgr().getVaultIdByName(storageVaultName); if (Strings.isNullOrEmpty(storageVaultId)) { throw new DdlException("Storage vault '" + storageVaultName + "' does not exist. " diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/AlterStorageVaultCommand.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/AlterStorageVaultCommand.java index cbdc5765839b40..0766d236439d89 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/AlterStorageVaultCommand.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/AlterStorageVaultCommand.java @@ -21,11 +21,14 @@ import org.apache.doris.catalog.StorageVault; import org.apache.doris.catalog.StorageVault.StorageVaultType; import org.apache.doris.common.AnalysisException; +import org.apache.doris.common.FeNameFormat; import org.apache.doris.nereids.trees.plans.PlanType; import org.apache.doris.nereids.trees.plans.visitor.PlanVisitor; import org.apache.doris.qe.ConnectContext; import org.apache.doris.qe.StmtExecutor; +import com.google.common.base.Preconditions; + import java.util.Map; /** @@ -48,6 +51,13 @@ public void run(ConnectContext ctx, StmtExecutor executor) throws Exception { if (vaultType == StorageVault.StorageVaultType.UNKNOWN) { throw new AnalysisException("Unsupported Storage Vault type: " + type); } + + FeNameFormat.checkStorageVaultName(name); + if (properties.containsKey(StorageVault.VAULT_NAME)) { + String newName = properties.get(StorageVault.VAULT_NAME); + FeNameFormat.checkStorageVaultName(newName); + Preconditions.checkArgument(!name.equalsIgnoreCase(newName), "vault name no change"); + } Env.getCurrentEnv().getStorageVaultMgr().alterStorageVault(vaultType, properties, name); } diff --git a/gensrc/proto/cloud.proto b/gensrc/proto/cloud.proto index c113868a2c3286..46d7f29ba4fb2e 100644 --- a/gensrc/proto/cloud.proto +++ b/gensrc/proto/cloud.proto @@ -887,6 +887,9 @@ message AlterObjStoreInfoResponse { optional MetaServiceResponseStatus status = 1; optional string storage_vault_id = 2; optional bool default_storage_vault_replaced = 3; + + // storage_vault_name maybe changed, so return new storage_vault_name + optional string storage_vault_name = 4; } message UpdateAkSkRequest { diff --git a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy index f5d811514b375d..71558a07bb24c3 100644 --- a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy +++ b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy @@ -812,11 +812,12 @@ class Suite implements GroovyInterceptable { return randomBoolean ? "true" : "false" } - void expectExceptionLike(Closure userFunction, String errorMessage = null) { + void expectExceptionLike(Closure userFunction, String errMsg = null) { try { userFunction() + throw new Exception("Expect exception, but success"); } catch (Exception e) { - if (!e.getMessage().contains(errorMessage)) { + if (!Strings.isNullOrEmpty(errMsg) && !e.getMessage().contains(errMsg)) { throw e } } diff --git a/regression-test/suites/vault_p0/alter/test_alter_vault_name.groovy b/regression-test/suites/vault_p0/alter/test_alter_vault_name.groovy new file mode 100644 index 00000000000000..4592e72292a664 --- /dev/null +++ b/regression-test/suites/vault_p0/alter/test_alter_vault_name.groovy @@ -0,0 +1,232 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_alter_vault_name", "nonConcurrent") { + def suiteName = name; + if (!isCloudMode()) { + logger.info("skip ${suiteName} case, because not cloud mode") + return + } + + if (!enableStoragevault()) { + logger.info("skip ${suiteName} case, because storage vault not enabled") + return + } + + def vaultName = UUID.randomUUID().toString().replace("-", "") + def hdfsVaultName = "hdfs_" + vaultName + sql """ + CREATE STORAGE VAULT ${hdfsVaultName} + PROPERTIES ( + "type" = "HDFS", + "fs.defaultFS" = "${getHdfsFs()}", + "path_prefix" = "${hdfsVaultName}", + "hadoop.username" = "${getHdfsUser()}" + ); + """ + + def s3VaultName = "s3_" + vaultName + sql """ + CREATE STORAGE VAULT ${s3VaultName} + PROPERTIES ( + "type" = "S3", + "s3.endpoint" = "${getS3Endpoint()}", + "s3.region" = "${getS3Region()}", + "s3.access_key" = "${getS3AK()}", + "s3.secret_key" = "${getS3SK()}", + "s3.root.path" = "${s3VaultName}", + "s3.bucket" = "${getS3BucketName()}", + "s3.external_endpoint" = "", + "provider" = "${getS3Provider()}", + "use_path_style" = "false" + ); + """ + + // case1 + expectExceptionLike({ + sql """ + ALTER STORAGE VAULT ${hdfsVaultName} + PROPERTIES ( + "type" = "hdfs", + "VAULT_NAME" = "${hdfsVaultName}" + ); + """ + }, "vault name no change") + + // case2 + expectExceptionLike({ + sql """ + ALTER STORAGE VAULT ${hdfsVaultName} + PROPERTIES ( + "type" = "hdfs", + "VAULT_NAME" = "${s3VaultName}" + ); + """ + }, "already existed") + + // case3 + expectExceptionLike({ + sql """ + ALTER STORAGE VAULT ${s3VaultName} + PROPERTIES ( + "type" = "s3", + "VAULT_NAME" = "${s3VaultName}" + ); + """ + }, "vault name no change") + + // case4 + expectExceptionLike({ + sql """ + ALTER STORAGE VAULT ${s3VaultName} + PROPERTIES ( + "type" = "s3", + "VAULT_NAME" = "${hdfsVaultName}" + ); + """ + }, "already existed") + + // case5 + sql """ + CREATE TABLE ${hdfsVaultName} ( + C_CUSTKEY INTEGER NOT NULL, + C_NAME INTEGER NOT NULL + ) + DUPLICATE KEY(C_CUSTKEY, C_NAME) + DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1", + "storage_vault_name" = ${hdfsVaultName} + ) + """ + sql """ insert into ${hdfsVaultName} values(1, 1); """ + sql """ sync;""" + def result = sql """ select * from ${hdfsVaultName}; """ + assertEquals(result.size(), 1); + + sql """ + ALTER STORAGE VAULT ${hdfsVaultName} + PROPERTIES ( + "type" = "hdfs", + "VAULT_NAME" = "${hdfsVaultName}_new" + ); + """ + sql """ insert into ${hdfsVaultName} values(2, 2); """ + sql """ sync;""" + result = sql """ select * from ${hdfsVaultName}; """ + assertEquals(result.size(), 2); + + // case6 + expectExceptionLike({ + sql """ + CREATE TABLE ${hdfsVaultName}_new ( + C_CUSTKEY INTEGER NOT NULL, + C_NAME INTEGER NOT NULL + ) + DUPLICATE KEY(C_CUSTKEY, C_NAME) + DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1", + "storage_vault_name" = ${hdfsVaultName} + ) + """ + }, "does not exis") + + // case7 + sql """ + CREATE TABLE ${hdfsVaultName}_new ( + C_CUSTKEY INTEGER NOT NULL, + C_NAME INTEGER NOT NULL + ) + DUPLICATE KEY(C_CUSTKEY, C_NAME) + DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1", + "storage_vault_name" = ${hdfsVaultName}_new + ) + """ + + sql """ insert into ${hdfsVaultName}_new values(1, 1); """ + sql """ sync;""" + result = sql """ select * from ${hdfsVaultName}_new; """ + assertEquals(result.size(), 1); + + // case8 + sql """ + CREATE TABLE ${s3VaultName} ( + C_CUSTKEY INTEGER NOT NULL, + C_NAME INTEGER NOT NULL + ) + DUPLICATE KEY(C_CUSTKEY, C_NAME) + DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1", + "storage_vault_name" = ${s3VaultName} + ) + """ + sql """ insert into ${s3VaultName} values(1, 1); """ + sql """ sync;""" + result = sql """ select * from ${s3VaultName}; """ + assertEquals(result.size(), 1); + + sql """ + ALTER STORAGE VAULT ${s3VaultName} + PROPERTIES ( + "type" = "s3", + "VAULT_NAME" = "${s3VaultName}_new" + ); + """ + sql """ insert into ${s3VaultName} values(2, 2); """ + sql """ sync;""" + result = sql """ select * from ${s3VaultName}; """ + assertEquals(result.size(), 2); + + // case9 + expectExceptionLike({ + sql """ + CREATE TABLE ${s3VaultName}_new ( + C_CUSTKEY INTEGER NOT NULL, + C_NAME INTEGER NOT NULL + ) + DUPLICATE KEY(C_CUSTKEY, C_NAME) + DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1", + "storage_vault_name" = ${s3VaultName} + ) + """ + }, "does not exis") + + // case10 + sql """ + CREATE TABLE ${s3VaultName}_new ( + C_CUSTKEY INTEGER NOT NULL, + C_NAME INTEGER NOT NULL + ) + DUPLICATE KEY(C_CUSTKEY, C_NAME) + DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1", + "storage_vault_name" = ${s3VaultName}_new + ) + """ + + sql """ insert into ${s3VaultName}_new values(1, 1); """ + sql """ sync;""" + result = sql """ select * from ${s3VaultName}_new; """ + assertEquals(result.size(), 1); +} \ No newline at end of file