Skip to content

Commit

Permalink
HIVE-27857: Do not check write permission while dropping external tab…
Browse files Browse the repository at this point in the history
…le or partition (apache#4860) (Wechar Yu, Reviewed by Ayush Saxena, Sai Hemanth Gantasala)
  • Loading branch information
wecharyu authored Jan 9, 2024
1 parent 24fffdc commit 0c3b822
Show file tree
Hide file tree
Showing 3 changed files with 56 additions and 16 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -823,7 +823,7 @@ private void dropTempTable(org.apache.hadoop.hive.metastore.api.Table table, boo
if (pathStr != null) {
try {
tablePath = new Path(table.getSd().getLocation());
if (!getWh().isWritable(tablePath.getParent())) {
if (deleteData && !isExternalTable(table) && !getWh().isWritable(tablePath.getParent())) {
throw new MetaException("Table metadata not deleted since " + tablePath.getParent() +
" is not writable by " + SecurityUtils.getUser());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2969,7 +2969,7 @@ private boolean drop_table_core(final RawStore ms, final String catName, final S
firePreEvent(new PreDropTableEvent(tbl, deleteData, this));

tableDataShouldBeDeleted = checkTableDataShouldBeDeleted(tbl, deleteData);
if (tbl.getSd().getLocation() != null) {
if (tableDataShouldBeDeleted && tbl.getSd().getLocation() != null) {
tblPath = new Path(tbl.getSd().getLocation());
if (!wh.isWritable(tblPath.getParent())) {
String target = indexName == null ? "Table" : "Index table";
Expand Down Expand Up @@ -4971,6 +4971,7 @@ private boolean drop_partition_common(RawStore ms, String catName, String db_nam
Table tbl = null;
Partition part = null;
boolean mustPurge = false;
boolean tableDataShouldBeDeleted = false;
long writeId = 0;

Map<String, String> transactionalListenerResponses = Collections.emptyMap();
Expand All @@ -4994,20 +4995,21 @@ private boolean drop_partition_common(RawStore ms, String catName, String db_nam
request.setCatName(catName);
tbl = get_table_core(request);
firePreEvent(new PreDropPartitionEvent(tbl, part, deleteData, this));


tableDataShouldBeDeleted = checkTableDataShouldBeDeleted(tbl, deleteData);
mustPurge = isMustPurge(envContext, tbl);
writeId = getWriteId(envContext);

if (part == null) {
throw new NoSuchObjectException("Partition doesn't exist. " + part_vals);
}
isArchived = MetaStoreUtils.isArchived(part);
if (isArchived) {
if (tableDataShouldBeDeleted && isArchived) {
archiveParentDir = MetaStoreUtils.getOriginalLocation(part);
verifyIsWritablePath(archiveParentDir);
}

if ((part.getSd() != null) && (part.getSd().getLocation() != null)) {
if (tableDataShouldBeDeleted && (part.getSd() != null) && (part.getSd().getLocation() != null)) {
partPath = new Path(part.getSd().getLocation());
verifyIsWritablePath(partPath);
}
Expand All @@ -5027,9 +5029,7 @@ private boolean drop_partition_common(RawStore ms, String catName, String db_nam
} finally {
if (!success) {
ms.rollbackTransaction();
} else if (checkTableDataShouldBeDeleted(tbl, deleteData) &&
(partPath != null || archiveParentDir != null)) {

} else if (tableDataShouldBeDeleted && (partPath != null || archiveParentDir != null)) {
LOG.info(mustPurge ?
"dropPartition() will purge " + partPath + " directly, skipping trash." :
"dropPartition() will move " + partPath + " to trash-directory.");
Expand Down Expand Up @@ -5159,7 +5159,7 @@ public DropPartitionsResult drop_partitions_req(
boolean deleteData = request.isSetDeleteData() && request.isDeleteData();
boolean ignoreProtection = request.isSetIgnoreProtection() && request.isIgnoreProtection();
boolean needResult = !request.isSetNeedResult() || request.isNeedResult();

List<PathAndDepth> dirsToDelete = new ArrayList<>();
List<Path> archToDelete = new ArrayList<>();
EnvironmentContext envContext =
Expand All @@ -5169,19 +5169,21 @@ public DropPartitionsResult drop_partitions_req(
Table tbl = null;
List<Partition> parts = null;
boolean mustPurge = false;
boolean tableDataShouldBeDeleted = false;
long writeId = 0;

Map<String, String> transactionalListenerResponses = null;
boolean needsCm = false;

try {
ms.openTransaction();
// We need Partition-s for firing events and for result; DN needs MPartition-s to drop.
// Great... Maybe we could bypass fetching MPartitions by issuing direct SQL deletes.
tbl = get_table_core(catName, dbName, tblName);
mustPurge = isMustPurge(envContext, tbl);
tableDataShouldBeDeleted = checkTableDataShouldBeDeleted(tbl, deleteData);
writeId = getWriteId(envContext);

int minCount = 0;
RequestPartsSpec spec = request.getParts();
List<String> partNames = null;
Expand Down Expand Up @@ -5245,14 +5247,12 @@ public DropPartitionsResult drop_partitions_req(
if (colNames != null) {
partNames.add(FileUtils.makePartName(colNames, part.getValues()));
}
// Preserve the old behavior of failing when we cannot write, even w/o deleteData,
// and even if the table is external. That might not make any sense.
if (MetaStoreUtils.isArchived(part)) {
if (tableDataShouldBeDeleted && MetaStoreUtils.isArchived(part)) {
Path archiveParentDir = MetaStoreUtils.getOriginalLocation(part);
verifyIsWritablePath(archiveParentDir);
archToDelete.add(archiveParentDir);
}
if ((part.getSd() != null) && (part.getSd().getLocation() != null)) {
if (tableDataShouldBeDeleted && (part.getSd() != null) && (part.getSd().getLocation() != null)) {
Path partPath = new Path(part.getSd().getLocation());
verifyIsWritablePath(partPath);
dirsToDelete.add(new PathAndDepth(partPath, part.getValues().size()));
Expand All @@ -5276,7 +5276,7 @@ public DropPartitionsResult drop_partitions_req(
} finally {
if (!success) {
ms.rollbackTransaction();
} else if (checkTableDataShouldBeDeleted(tbl, deleteData)) {
} else if (tableDataShouldBeDeleted) {
LOG.info(mustPurge ?
"dropPartition() will purge partition-directories directly, skipping trash."
: "dropPartition() will move partition-directories to trash-directory.");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,10 @@

package org.apache.hadoop.hive.metastore.client;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.common.TableName;
import org.apache.hadoop.hive.metastore.ColumnType;
Expand Down Expand Up @@ -68,6 +71,7 @@
import org.junit.runners.Parameterized;

import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
Expand Down Expand Up @@ -118,6 +122,7 @@ public static void startMetaStores() {
extraConf.put("fs.trash.interval", "30"); // FS_TRASH_INTERVAL_KEY (hadoop-2)
extraConf.put(ConfVars.HIVE_IN_TEST.getVarname(), "true");
extraConf.put(ConfVars.METASTORE_METADATA_TRANSFORMER_CLASS.getVarname(), " ");
extraConf.put(ConfVars.AUTHORIZATION_STORAGE_AUTH_CHECKS.getVarname(), "true");

startMetaStores(msConf, extraConf);
}
Expand Down Expand Up @@ -1563,6 +1568,41 @@ public void dropTableBogusCatalog() throws TException {
client.dropTable("nosuch", testTables[0].getDbName(), testTables[0].getTableName(), true, false);
}

@Test(expected = MetaException.class)
public void testDropManagedTableWithoutStoragePermission() throws TException, IOException {
String dbName = testTables[0].getDbName();
String tblName = testTables[0].getTableName();
Table table = client.getTable(dbName, tblName);
Path tablePath = new Path(table.getSd().getLocation());
FileSystem fs = Warehouse.getFs(tablePath, new Configuration());
fs.setPermission(tablePath.getParent(), new FsPermission((short) 0555));

try {
client.dropTable(dbName, tblName);
} finally {
// recover write permission so that file can be cleaned.
fs.setPermission(tablePath.getParent(), new FsPermission((short) 0755));
}
}

@Test
public void testDropExternalTableWithoutStoragePermission() throws TException, IOException {
// external table
String dbName = testTables[4].getDbName();
String tblName = testTables[4].getTableName();
Table table = client.getTable(dbName, tblName);
Path tablePath = new Path(table.getSd().getLocation());
FileSystem fs = Warehouse.getFs(tablePath, new Configuration());
fs.setPermission(tablePath.getParent(), new FsPermission((short) 0555));

try {
client.dropTable(dbName, tblName);
} finally {
// recover write permission so that file can be cleaned.
fs.setPermission(tablePath.getParent(), new FsPermission((short) 0755));
}
}

/**
* Creates a Table with all of the parameters set. The temporary table is available only on HS2
* server, so do not use it.
Expand Down

0 comments on commit 0c3b822

Please sign in to comment.