diff --git a/hadoop-tools/hadoop-compat-bench/pom.xml b/hadoop-tools/hadoop-compat-bench/pom.xml
new file mode 100644
index 0000000000000..e8dbe65e8a236
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/pom.xml
@@ -0,0 +1,118 @@
+
+
+
+ * http://www.apache.org/licenses/LICENSE-2.0 + *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat;
+
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.io.PrintStream;
+import java.nio.charset.StandardCharsets;
+import java.security.PrivilegedExceptionAction;
+import java.util.Collection;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.compat.common.HdfsCompatCommand;
+import org.apache.hadoop.fs.compat.common.HdfsCompatIllegalArgumentException;
+import org.apache.hadoop.fs.compat.common.HdfsCompatReport;
+import org.apache.hadoop.fs.shell.CommandFormat;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.util.VersionInfo;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Tool for triggering a compatibility report
+ * for a specific FileSystem implementation.
+ */
+public class HdfsCompatTool extends Configured implements Tool {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(HdfsCompatTool.class);
+
+ private static final String DESCRIPTION = "hadoop jar" +
+ " hadoop-compat-bench-{version}.jar -uri
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.cases;
+
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.compat.common.*;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryScope;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.junit.Assert;
+
+import java.io.IOException;
+import java.util.List;
+
+@HdfsCompatCaseGroup(name = "ACL")
+public class HdfsCompatAcl extends AbstractHdfsCompatCase {
+ private static final String INIT_FILE_ACL =
+ "user::rwx,group::rwx,other::rwx,user:foo:rwx";
+ private static final String INIT_DIR_ACL =
+ "default:user::rwx,default:group::rwx,default:other::rwx";
+ private Path dir;
+ private Path file;
+
+ @HdfsCompatCasePrepare
+ public void prepare() throws IOException {
+ this.dir = makePath("dir");
+ this.file = new Path(this.dir, "file");
+ HdfsCompatUtil.createFile(fs(), this.file, 0);
+ List
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.cases;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.compat.common.*;
+import org.apache.hadoop.fs.CommonPathCapabilities;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+
+import java.io.IOException;
+import java.net.URI;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.List;
+
+@HdfsCompatCaseGroup(name = "FileSystem")
+public class HdfsCompatBasics extends AbstractHdfsCompatCase {
+ @HdfsCompatCase
+ public void initialize() throws IOException {
+ FileSystem another = FileSystem.newInstance(fs().getUri(), fs().getConf());
+ HdfsCompatUtil.checkImplementation(() ->
+ another.initialize(URI.create("hdfs:///"), new Configuration())
+ );
+ }
+
+ @HdfsCompatCase
+ public void getScheme() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getScheme()
+ );
+ }
+
+ @HdfsCompatCase
+ public void getUri() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getUri()
+ );
+ }
+
+ @HdfsCompatCase
+ public void getCanonicalServiceName() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getCanonicalServiceName()
+ );
+ }
+
+ @HdfsCompatCase
+ public void getName() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getName()
+ );
+ }
+
+ @HdfsCompatCase
+ public void makeQualified() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().makeQualified(new Path("/"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void getChildFileSystems() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getChildFileSystems()
+ );
+ }
+
+ @HdfsCompatCase
+ public void resolvePath() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().resolvePath(new Path("/"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void getHomeDirectory() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getHomeDirectory()
+ );
+ }
+
+ @HdfsCompatCase
+ public void setWorkingDirectory() throws IOException {
+ FileSystem another = FileSystem.newInstance(fs().getUri(), fs().getConf());
+ HdfsCompatUtil.checkImplementation(() ->
+ another.setWorkingDirectory(makePath("/tmp"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void getWorkingDirectory() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getWorkingDirectory()
+ );
+ }
+
+ @HdfsCompatCase
+ public void close() throws IOException {
+ FileSystem another = FileSystem.newInstance(fs().getUri(), fs().getConf());
+ HdfsCompatUtil.checkImplementation(another::close);
+ }
+
+ @HdfsCompatCase
+ public void getDefaultBlockSize() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getDefaultBlockSize(getBasePath())
+ );
+ }
+
+ @HdfsCompatCase
+ public void getDefaultReplication() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getDefaultReplication(getBasePath())
+ );
+ }
+
+ @HdfsCompatCase
+ public void getStorageStatistics() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getStorageStatistics()
+ );
+ }
+
+ @HdfsCompatCase
+ public void setVerifyChecksum() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().setVerifyChecksum(true)
+ );
+ }
+
+ @HdfsCompatCase
+ public void setWriteChecksum() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().setWriteChecksum(true)
+ );
+ }
+
+ @HdfsCompatCase
+ public void getDelegationToken() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getDelegationToken("hadoop")
+ );
+ }
+
+ @HdfsCompatCase
+ public void getAdditionalTokenIssuers() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getAdditionalTokenIssuers()
+ );
+ }
+
+ @HdfsCompatCase
+ public void getServerDefaults() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getServerDefaults(new Path("/"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void msync() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().msync()
+ );
+ }
+
+ @HdfsCompatCase
+ public void getStatus() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getStatus(new Path("/"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void getTrashRoot() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getTrashRoot(new Path("/user/hadoop/tmp"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void getTrashRoots() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getTrashRoots(true)
+ );
+ }
+
+ @HdfsCompatCase
+ public void getAllStoragePolicies() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getAllStoragePolicies()
+ );
+ }
+
+ @HdfsCompatCase
+ public void supportsSymlinks() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().supportsSymlinks()
+ );
+ }
+
+ @HdfsCompatCase
+ public void hasPathCapability() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().hasPathCapability(getBasePath(),
+ CommonPathCapabilities.FS_TRUNCATE)
+ );
+ }
+
+ @HdfsCompatCase
+ public void mkdirs() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().mkdirs(makePath("mkdir"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void getFileStatus() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getFileStatus(makePath("file"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void exists() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().exists(makePath("file"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void isDirectory() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().isDirectory(makePath("file"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void isFile() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().isFile(makePath("file"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void getLength() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getLength(makePath("file"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void getBlockSize() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getBlockSize(makePath("file"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void listStatus() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().listStatus(makePath("dir"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void globStatus() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().globStatus(makePath("dir"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void listLocatedStatus() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().listLocatedStatus(makePath("dir"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void listStatusIterator() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().listStatusIterator(makePath("dir"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void listFiles() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().listFiles(makePath("dir"), false)
+ );
+ }
+
+ @HdfsCompatCase
+ public void rename() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().rename(makePath("src"), makePath("dst"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void delete() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().delete(makePath("file"), true)
+ );
+ }
+
+ @HdfsCompatCase
+ public void deleteOnExit() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().deleteOnExit(makePath("file"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void cancelDeleteOnExit() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().cancelDeleteOnExit(makePath("file"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void truncate() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().truncate(makePath("file"), 1)
+ );
+ }
+
+ @HdfsCompatCase
+ public void setOwner() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().setOwner(makePath("file"), "test-user", "test-group")
+ );
+ }
+
+ @HdfsCompatCase
+ public void setTimes() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().setTimes(makePath("file"), 1696089600L, 1696089600L)
+ );
+ }
+
+ @HdfsCompatCase
+ public void concat() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().concat(makePath("file"),
+ new Path[]{makePath("file1"), makePath("file2")})
+ );
+ }
+
+ @HdfsCompatCase
+ public void getFileChecksum() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getFileChecksum(makePath("file"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void getFileBlockLocations() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getFileBlockLocations(new FileStatus(), 0, 128)
+ );
+ }
+
+ @HdfsCompatCase
+ public void listCorruptFileBlocks() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().listCorruptFileBlocks(makePath("file"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void getReplication() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getReplication(makePath("file"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void setReplication() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().setReplication(makePath("file"), (short) 2)
+ );
+ }
+
+ @HdfsCompatCase
+ public void getPathHandle() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getPathHandle(new FileStatus())
+ );
+ }
+
+ @HdfsCompatCase
+ public void create() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().create(makePath("file"), true)
+ );
+ }
+
+ @HdfsCompatCase
+ public void createNonRecursive() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().createNonRecursive(makePath("file"), true, 1024,
+ (short) 1, 1048576, null)
+ );
+ }
+
+ @HdfsCompatCase
+ public void createNewFile() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().createNewFile(makePath("file"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void append() throws IOException {
+ final Path file = makePath("file");
+ try {
+ HdfsCompatUtil.createFile(fs(), file, 0);
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().append(file)
+ );
+ } finally {
+ HdfsCompatUtil.deleteQuietly(fs(), file, true);
+ }
+ }
+
+ @HdfsCompatCase
+ public void createFile() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().createFile(makePath("file"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void appendFile() throws IOException {
+ final Path file = makePath("file");
+ try {
+ HdfsCompatUtil.createFile(fs(), file, 0);
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().appendFile(file)
+ );
+ } finally {
+ HdfsCompatUtil.deleteQuietly(fs(), file, true);
+ }
+ }
+
+ @HdfsCompatCase
+ public void createMultipartUploader() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().createMultipartUploader(makePath("file"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void open() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().open(makePath("file"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void openFile() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().openFile(makePath("file"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void getContentSummary() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getContentSummary(makePath("dir"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void getUsed() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getUsed(makePath("dir"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void getQuotaUsage() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getQuotaUsage(makePath("dir"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void setQuota() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().setQuota(makePath("dir"), 1024L, 1048576L)
+ );
+ }
+
+ @HdfsCompatCase
+ public void setQuotaByStorageType() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().setQuotaByStorageType(makePath("dir"), StorageType.SSD, 1048576L)
+ );
+ }
+
+ @HdfsCompatCase
+ public void access() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().access(makePath("file"), FsAction.EXECUTE)
+ );
+ }
+
+ @HdfsCompatCase
+ public void setPermission() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().setPermission(makePath("file"), FsPermission.getDefault())
+ );
+ }
+
+ @HdfsCompatCase
+ public void createSymlink() {
+ FileSystem.enableSymlinks();
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().createSymlink(makePath("file"), makePath("link"), true)
+ );
+ }
+
+ @HdfsCompatCase
+ public void getFileLinkStatus() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getFileLinkStatus(makePath("file"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void getLinkTarget() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getLinkTarget(makePath("link"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void modifyAclEntries() {
+ List
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.cases;
+
+import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.compat.common.*;
+import org.apache.hadoop.io.IOUtils;
+import org.junit.Assert;
+
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.concurrent.CompletableFuture;
+
+@HdfsCompatCaseGroup(name = "Create")
+public class HdfsCompatCreate extends AbstractHdfsCompatCase {
+ private Path path;
+
+ @HdfsCompatCasePrepare
+ public void prepare() {
+ this.path = makePath("path");
+ }
+
+ @HdfsCompatCaseCleanup
+ public void cleanup() {
+ HdfsCompatUtil.deleteQuietly(fs(), this.path, true);
+ }
+
+ @HdfsCompatCase
+ public void mkdirs() throws IOException {
+ fs().mkdirs(path);
+ Assert.assertTrue(fs().exists(path));
+ }
+
+ @HdfsCompatCase
+ public void create() throws IOException {
+ FSDataOutputStream out = null;
+ try {
+ out = fs().create(path, true);
+ Assert.assertTrue(fs().exists(path));
+ } finally {
+ IOUtils.closeStream(out);
+ }
+ }
+
+ @HdfsCompatCase
+ public void createNonRecursive() {
+ Path file = new Path(path, "file-no-parent");
+ try {
+ fs().createNonRecursive(file, true, 1024, (short) 1, 1048576, null);
+ Assert.fail("Should fail since parent does not exist");
+ } catch (IOException ignored) {
+ }
+ }
+
+ @HdfsCompatCase
+ public void createNewFile() throws IOException {
+ HdfsCompatUtil.createFile(fs(), path, 0);
+ Assert.assertFalse(fs().createNewFile(path));
+ }
+
+ @HdfsCompatCase
+ public void append() throws IOException {
+ HdfsCompatUtil.createFile(fs(), path, 128);
+ FSDataOutputStream out = null;
+ byte[] data = new byte[64];
+ try {
+ out = fs().append(path);
+ out.write(data);
+ out.close();
+ out = null;
+ FileStatus fileStatus = fs().getFileStatus(path);
+ Assert.assertEquals(128 + 64, fileStatus.getLen());
+ } finally {
+ IOUtils.closeStream(out);
+ }
+ }
+
+ @HdfsCompatCase
+ public void createFile() throws IOException {
+ FSDataOutputStream out = null;
+ fs().mkdirs(path);
+ final Path file = new Path(path, "file");
+ try {
+ FSDataOutputStreamBuilder builder = fs().createFile(file);
+ out = builder.blockSize(1048576 * 2).build();
+ out.write("Hello World!".getBytes(StandardCharsets.UTF_8));
+ out.close();
+ out = null;
+ Assert.assertTrue(fs().exists(file));
+ } finally {
+ IOUtils.closeStream(out);
+ }
+ }
+
+ @HdfsCompatCase
+ public void appendFile() throws IOException {
+ HdfsCompatUtil.createFile(fs(), path, 128);
+ FSDataOutputStream out = null;
+ byte[] data = new byte[64];
+ try {
+ FSDataOutputStreamBuilder builder = fs().appendFile(path);
+ out = builder.build();
+ out.write(data);
+ out.close();
+ out = null;
+ FileStatus fileStatus = fs().getFileStatus(path);
+ Assert.assertEquals(128 + 64, fileStatus.getLen());
+ } finally {
+ IOUtils.closeStream(out);
+ }
+ }
+
+ @HdfsCompatCase
+ public void createMultipartUploader() throws Exception {
+ MultipartUploader mpu = null;
+ UploadHandle handle = null;
+ try {
+ MultipartUploaderBuilder builder = fs().createMultipartUploader(path);
+ final Path file = fs().makeQualified(new Path(path, "file"));
+ mpu = builder.blockSize(1048576).build();
+ CompletableFuture
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.cases;
+
+import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.compat.common.*;
+import org.junit.Assert;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+@HdfsCompatCaseGroup(name = "Directory")
+public class HdfsCompatDirectory extends AbstractHdfsCompatCase {
+ private static final int FILE_LEN = 128;
+ private Path dir = null;
+ private Path file = null;
+
+ @HdfsCompatCasePrepare
+ public void prepare() throws IOException {
+ this.dir = makePath("dir");
+ this.file = new Path(this.dir, "file");
+ HdfsCompatUtil.createFile(fs(), file, FILE_LEN);
+ }
+
+ @HdfsCompatCaseCleanup
+ public void cleanup() throws IOException {
+ HdfsCompatUtil.deleteQuietly(fs(), this.dir, true);
+ }
+
+ @HdfsCompatCase
+ public void isDirectory() throws IOException {
+ Assert.assertTrue(fs().isDirectory(dir));
+ }
+
+ @HdfsCompatCase
+ public void listStatus() throws IOException {
+ FileStatus[] files = fs().listStatus(dir);
+ Assert.assertNotNull(files);
+ Assert.assertEquals(1, files.length);
+ Assert.assertEquals(file.getName(), files[0].getPath().getName());
+ }
+
+ @HdfsCompatCase
+ public void globStatus() throws IOException {
+ FileStatus[] files = fs().globStatus(new Path(dir, "*ile"));
+ Assert.assertNotNull(files);
+ Assert.assertEquals(1, files.length);
+ Assert.assertEquals(file.getName(), files[0].getPath().getName());
+ }
+
+ @HdfsCompatCase
+ public void listLocatedStatus() throws IOException {
+ RemoteIterator
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.cases;
+
+import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.compat.common.*;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.DataChecksum;
+import org.junit.Assert;
+
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+import java.util.Random;
+
+@HdfsCompatCaseGroup(name = "File")
+public class HdfsCompatFile extends AbstractHdfsCompatCase {
+ private static final int FILE_LEN = 128;
+ private static final long BLOCK_SIZE = 1048576;
+ private static final short REPLICATION = 1;
+ private static final Random RANDOM = new Random();
+ private Path file = null;
+
+ @HdfsCompatCasePrepare
+ public void prepare() throws IOException {
+ this.file = makePath("file");
+ HdfsCompatUtil.createFile(fs(), this.file, true,
+ 1024, FILE_LEN, BLOCK_SIZE, REPLICATION);
+ }
+
+ @HdfsCompatCaseCleanup
+ public void cleanup() throws IOException {
+ HdfsCompatUtil.deleteQuietly(fs(), this.file, true);
+ }
+
+ @HdfsCompatCase
+ public void getFileStatus() throws IOException {
+ FileStatus fileStatus = fs().getFileStatus(file);
+ Assert.assertNotNull(fileStatus);
+ Assert.assertEquals(file.getName(), fileStatus.getPath().getName());
+ }
+
+ @HdfsCompatCase
+ public void exists() throws IOException {
+ Assert.assertTrue(fs().exists(file));
+ }
+
+ @HdfsCompatCase
+ public void isFile() throws IOException {
+ Assert.assertTrue(fs().isFile(file));
+ }
+
+ @HdfsCompatCase
+ public void getLength() throws IOException {
+ Assert.assertEquals(FILE_LEN, fs().getLength(file));
+ }
+
+ @HdfsCompatCase(brief = "arbitrary blockSize")
+ public void getBlockSize() throws IOException {
+ Assert.assertEquals(BLOCK_SIZE, fs().getBlockSize(file));
+ }
+
+ @HdfsCompatCase
+ public void renameFile() throws IOException {
+ Path dst = new Path(file.toString() + "_rename_dst");
+ fs().rename(file, dst);
+ Assert.assertFalse(fs().exists(file));
+ Assert.assertTrue(fs().exists(dst));
+ }
+
+ @HdfsCompatCase
+ public void deleteFile() throws IOException {
+ fs().delete(file, true);
+ Assert.assertFalse(fs().exists(file));
+ }
+
+ @HdfsCompatCase
+ public void deleteOnExit() throws IOException {
+ FileSystem newFs = FileSystem.newInstance(fs().getUri(), fs().getConf());
+ newFs.deleteOnExit(file);
+ newFs.close();
+ Assert.assertFalse(fs().exists(file));
+ }
+
+ @HdfsCompatCase
+ public void cancelDeleteOnExit() throws IOException {
+ FileSystem newFs = FileSystem.newInstance(fs().getUri(), fs().getConf());
+ newFs.deleteOnExit(file);
+ newFs.cancelDeleteOnExit(file);
+ newFs.close();
+ Assert.assertTrue(fs().exists(file));
+ }
+
+ @HdfsCompatCase
+ public void truncate() throws IOException, InterruptedException {
+ int newLen = RANDOM.nextInt(FILE_LEN);
+ boolean finished = fs().truncate(file, newLen);
+ while (!finished) {
+ Thread.sleep(1000);
+ finished = fs().truncate(file, newLen);
+ }
+ FileStatus fileStatus = fs().getFileStatus(file);
+ Assert.assertEquals(newLen, fileStatus.getLen());
+ }
+
+ @HdfsCompatCase
+ public void setOwner() throws Exception {
+ final String owner = "test_" + RANDOM.nextInt(1024);
+ final String group = "test_" + RANDOM.nextInt(1024);
+ final String privileged = getPrivilegedUser();
+ UserGroupInformation.createRemoteUser(privileged).doAs(
+ (PrivilegedExceptionAction
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.cases;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.compat.common.*;
+import org.junit.Assert;
+
+import java.io.IOException;
+import java.util.Random;
+
+@HdfsCompatCaseGroup(name = "Local")
+public class HdfsCompatLocal extends AbstractHdfsCompatCase {
+ private static final int FILE_LEN = 128;
+ private static final Random RANDOM = new Random();
+ private LocalFileSystem localFs;
+ private Path localBasePath;
+ private Path localSrc;
+ private Path localDst;
+ private Path src;
+ private Path dst;
+
+ @HdfsCompatCaseSetUp
+ public void setUp() throws IOException {
+ localFs = FileSystem.getLocal(fs().getConf());
+ localBasePath = localFs.makeQualified(getLocalPath());
+ }
+
+ @HdfsCompatCaseTearDown
+ public void tearDown() {
+ HdfsCompatUtil.deleteQuietly(localFs, localBasePath, true);
+ }
+
+ @HdfsCompatCasePrepare
+ public void prepare() throws IOException {
+ final String unique = System.currentTimeMillis()
+ + "_" + RANDOM.nextLong() + "/";
+ this.localSrc = new Path(localBasePath, unique + "src");
+ this.localDst = new Path(localBasePath, unique + "dst");
+ this.src = new Path(getBasePath(), unique + "src");
+ this.dst = new Path(getBasePath(), unique + "dst");
+ HdfsCompatUtil.createFile(localFs, this.localSrc, FILE_LEN);
+ HdfsCompatUtil.createFile(fs(), this.src, FILE_LEN);
+ }
+
+ @HdfsCompatCaseCleanup
+ public void cleanup() {
+ HdfsCompatUtil.deleteQuietly(fs(), this.src.getParent(), true);
+ HdfsCompatUtil.deleteQuietly(localFs, this.localSrc.getParent(), true);
+ }
+
+ @HdfsCompatCase
+ public void copyFromLocalFile() throws IOException {
+ fs().copyFromLocalFile(localSrc, dst);
+ Assert.assertTrue(localFs.exists(localSrc));
+ Assert.assertTrue(fs().exists(dst));
+ }
+
+ @HdfsCompatCase
+ public void moveFromLocalFile() throws IOException {
+ fs().moveFromLocalFile(localSrc, dst);
+ Assert.assertFalse(localFs.exists(localSrc));
+ Assert.assertTrue(fs().exists(dst));
+ }
+
+ @HdfsCompatCase
+ public void copyToLocalFile() throws IOException {
+ fs().copyToLocalFile(src, localDst);
+ Assert.assertTrue(fs().exists(src));
+ Assert.assertTrue(localFs.exists(localDst));
+ }
+
+ @HdfsCompatCase
+ public void moveToLocalFile() throws IOException {
+ fs().moveToLocalFile(src, localDst);
+ Assert.assertFalse(fs().exists(src));
+ Assert.assertTrue(localFs.exists(localDst));
+ }
+
+ @HdfsCompatCase
+ public void startLocalOutput() throws IOException {
+ Path local = fs().startLocalOutput(dst, localDst);
+ HdfsCompatUtil.createFile(localFs, local, 16);
+ Assert.assertTrue(localFs.exists(local));
+ }
+
+ @HdfsCompatCase
+ public void completeLocalOutput() throws IOException {
+ Path local = fs().startLocalOutput(dst, localDst);
+ HdfsCompatUtil.createFile(localFs, local, 16);
+ fs().completeLocalOutput(dst, localDst);
+ Assert.assertTrue(fs().exists(dst));
+ }
+}
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatServer.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatServer.java
new file mode 100644
index 0000000000000..aa988fba3e08e
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatServer.java
@@ -0,0 +1,223 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.cases;
+
+import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.compat.common.*;
+import org.junit.Assert;
+
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+@HdfsCompatCaseGroup(name = "Server")
+public class HdfsCompatServer extends AbstractHdfsCompatCase {
+ private void isValid(String name) {
+ Assert.assertNotNull(name);
+ Assert.assertFalse(name.isEmpty());
+ }
+
+ @HdfsCompatCase
+ public void initialize() throws Exception {
+ Class extends FileSystem> cls = FileSystem.getFileSystemClass(
+ getBasePath().toUri().getScheme(), fs().getConf());
+ Constructor extends FileSystem> ctor =
+ cls.getDeclaredConstructor();
+ ctor.setAccessible(true);
+ FileSystem newFs = ctor.newInstance();
+ newFs.initialize(fs().getUri(), fs().getConf());
+ }
+
+ @HdfsCompatCase
+ public void getScheme() {
+ final String scheme = fs().getScheme();
+ isValid(scheme);
+ }
+
+ @HdfsCompatCase
+ public void getUri() {
+ URI uri = fs().getUri();
+ isValid(uri.getScheme());
+ }
+
+ @HdfsCompatCase
+ public void getCanonicalServiceName() {
+ final String serviceName = fs().getCanonicalServiceName();
+ isValid(serviceName);
+ }
+
+ @HdfsCompatCase
+ public void getName() {
+ final String name = fs().getName();
+ isValid(name);
+ }
+
+ @HdfsCompatCase
+ public void makeQualified() {
+ Path path = fs().makeQualified(makePath("file"));
+ isValid(path.toUri().getScheme());
+ }
+
+ @HdfsCompatCase
+ public void getChildFileSystems() {
+ fs().getChildFileSystems();
+ }
+
+ @HdfsCompatCase
+ public void resolvePath() throws IOException {
+ FileSystem.enableSymlinks();
+ Path file = makePath("file");
+ Path link = new Path(file.toString() + "_link");
+ HdfsCompatUtil.createFile(fs(), file, 0);
+ fs().createSymlink(file, link, true);
+ Path resolved = fs().resolvePath(link);
+ Assert.assertEquals(file.getName(), resolved.getName());
+ }
+
+ @HdfsCompatCase
+ public void getHomeDirectory() {
+ final Path home = fs().getHomeDirectory();
+ isValid(home.toString());
+ }
+
+ @HdfsCompatCase
+ public void setWorkingDirectory() throws IOException {
+ FileSystem another = FileSystem.newInstance(fs().getUri(), fs().getConf());
+ Path work = makePath("work");
+ another.setWorkingDirectory(work);
+ Assert.assertEquals(work.getName(),
+ another.getWorkingDirectory().getName());
+ }
+
+ @HdfsCompatCase
+ public void getWorkingDirectory() {
+ Path work = fs().getWorkingDirectory();
+ isValid(work.toString());
+ }
+
+ @HdfsCompatCase
+ public void close() throws IOException {
+ FileSystem another = FileSystem.newInstance(fs().getUri(), fs().getConf());
+ another.close();
+ }
+
+ @HdfsCompatCase
+ public void getDefaultBlockSize() {
+ Assert.assertTrue(fs().getDefaultBlockSize(getBasePath()) >= 0);
+ }
+
+ @HdfsCompatCase
+ public void getDefaultReplication() {
+ Assert.assertTrue(fs().getDefaultReplication(getBasePath()) >= 0);
+ }
+
+ @HdfsCompatCase
+ public void getStorageStatistics() {
+ Assert.assertNotNull(fs().getStorageStatistics());
+ }
+
+ // @HdfsCompatCase
+ public void setVerifyChecksum() {
+ }
+
+ // @HdfsCompatCase
+ public void setWriteChecksum() {
+ }
+
+ @HdfsCompatCase
+ public void getDelegationToken() throws IOException {
+ Assert.assertNotNull(fs().getDelegationToken(getDelegationTokenRenewer()));
+ }
+
+ @HdfsCompatCase
+ public void getAdditionalTokenIssuers() throws IOException {
+ Assert.assertNotNull(fs().getAdditionalTokenIssuers());
+ }
+
+ @HdfsCompatCase
+ public void getServerDefaults() throws IOException {
+ FsServerDefaults d = fs().getServerDefaults(getBasePath());
+ Assert.assertTrue(d.getBlockSize() >= 0);
+ }
+
+ @HdfsCompatCase
+ public void msync() throws IOException {
+ fs().msync();
+ }
+
+ @HdfsCompatCase
+ public void getStatus() throws IOException {
+ FsStatus status = fs().getStatus();
+ Assert.assertTrue(status.getRemaining() > 0);
+ }
+
+ @HdfsCompatCase
+ public void getTrashRoot() {
+ Path trash = fs().getTrashRoot(makePath("file"));
+ isValid(trash.toString());
+ }
+
+ @HdfsCompatCase
+ public void getTrashRoots() {
+ Collection
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.cases;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.compat.common.*;
+import org.junit.Assert;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+
+@HdfsCompatCaseGroup(name = "Snapshot")
+public class HdfsCompatSnapshot extends AbstractHdfsCompatCase {
+ private static final Logger LOG = LoggerFactory.getLogger(HdfsCompatSnapshot.class);
+ private final String snapshotName = "s-name";
+ private final String fileName = "file";
+ private Path base;
+ private Path dir;
+ private Path snapshot;
+ private Method allow;
+ private Method disallow;
+
+ private static Path getSnapshotPath(Path path, String snapshotName) {
+ return new Path(path, ".snapshot/" + snapshotName);
+ }
+
+ @HdfsCompatCaseSetUp
+ public void setUp() throws Exception {
+ this.base = getUniquePath();
+ fs().mkdirs(this.base);
+ try {
+ Method allowSnapshotMethod = fs().getClass()
+ .getMethod("allowSnapshot", Path.class);
+ allowSnapshotMethod.setAccessible(true);
+ allowSnapshotMethod.invoke(fs(), this.base);
+ this.allow = allowSnapshotMethod;
+
+ Method disallowSnapshotMethod = fs().getClass()
+ .getMethod("disallowSnapshot", Path.class);
+ disallowSnapshotMethod.setAccessible(true);
+ disallowSnapshotMethod.invoke(fs(), this.base);
+ this.disallow = disallowSnapshotMethod;
+ } catch (InvocationTargetException e) {
+ // Method exists but the invocation throws an exception.
+ Throwable cause = e.getCause();
+ if (cause instanceof Exception) {
+ throw (Exception) cause;
+ } else {
+ throw new RuntimeException(cause);
+ }
+ } catch (ReflectiveOperationException e) {
+ if (this.allow == null) {
+ LOG.warn("No allowSnapshot method found.");
+ }
+ if (this.disallow == null) {
+ LOG.warn("No disallowSnapshot method found.");
+ }
+ }
+ }
+
+ @HdfsCompatCaseTearDown
+ public void tearDown() throws ReflectiveOperationException {
+ try {
+ if (this.disallow != null) {
+ disallow.invoke(fs(), this.base);
+ }
+ } finally {
+ HdfsCompatUtil.deleteQuietly(fs(), this.base, true);
+ }
+ }
+
+ @HdfsCompatCasePrepare
+ public void prepare() throws IOException, ReflectiveOperationException {
+ this.dir = getUniquePath(base);
+ HdfsCompatUtil.createFile(fs(), new Path(this.dir, this.fileName), 0);
+ if (this.allow != null) {
+ allow.invoke(fs(), this.dir);
+ }
+ this.snapshot = fs().createSnapshot(this.dir, this.snapshotName);
+ }
+
+ @HdfsCompatCaseCleanup
+ public void cleanup() throws ReflectiveOperationException {
+ try {
+ try {
+ fs().deleteSnapshot(this.dir, this.snapshotName);
+ } catch (IOException ignored) {
+ }
+ if (this.disallow != null) {
+ disallow.invoke(fs(), this.dir);
+ }
+ } finally {
+ HdfsCompatUtil.deleteQuietly(fs(), this.dir, true);
+ }
+ }
+
+ @HdfsCompatCase
+ public void createSnapshot() throws IOException {
+ Assert.assertNotEquals(snapshot.toString(), dir.toString());
+ Assert.assertTrue(fs().exists(snapshot));
+ Assert.assertTrue(fs().exists(new Path(snapshot, fileName)));
+ }
+
+ @HdfsCompatCase
+ public void renameSnapshot() throws IOException {
+ fs().renameSnapshot(dir, snapshotName, "s-name2");
+ Assert.assertFalse(fs().exists(new Path(snapshot, fileName)));
+ snapshot = getSnapshotPath(dir, "s-name2");
+ Assert.assertTrue(fs().exists(new Path(snapshot, fileName)));
+ fs().renameSnapshot(dir, "s-name2", snapshotName);
+ }
+
+ @HdfsCompatCase
+ public void deleteSnapshot() throws IOException {
+ fs().deleteSnapshot(dir, snapshotName);
+ Assert.assertFalse(fs().exists(snapshot));
+ Assert.assertFalse(fs().exists(new Path(snapshot, fileName)));
+ }
+}
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatStoragePolicy.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatStoragePolicy.java
new file mode 100644
index 0000000000000..38bdde9afbaf4
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatStoragePolicy.java
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.cases;
+
+import org.apache.hadoop.fs.BlockStoragePolicySpi;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.compat.common.*;
+import org.junit.Assert;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
+@HdfsCompatCaseGroup(name = "StoragePolicy")
+public class HdfsCompatStoragePolicy extends AbstractHdfsCompatCase {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(HdfsCompatStoragePolicy.class);
+ private static final Random RANDOM = new Random();
+ private Path dir;
+ private Path file;
+ private String[] policies;
+ private String defaultPolicyName;
+ private String policyName;
+
+ @HdfsCompatCaseSetUp
+ public void setUp() throws IOException {
+ policies = getStoragePolicyNames();
+ }
+
+ @HdfsCompatCasePrepare
+ public void prepare() throws IOException {
+ this.dir = makePath("dir");
+ this.file = new Path(this.dir, "file");
+ HdfsCompatUtil.createFile(fs(), file, 0);
+
+ BlockStoragePolicySpi policy = fs().getStoragePolicy(this.dir);
+ this.defaultPolicyName = (policy == null) ? null : policy.getName();
+
+ List
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.cases;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.compat.common.*;
+import org.junit.Assert;
+
+import java.io.IOException;
+
+@HdfsCompatCaseGroup(name = "Symlink")
+public class HdfsCompatSymlink extends AbstractHdfsCompatCase {
+ private static final int FILE_LEN = 128;
+ private Path target = null;
+ private Path link = null;
+
+ @HdfsCompatCaseSetUp
+ public void setUp() {
+ FileSystem.enableSymlinks();
+ }
+
+ @HdfsCompatCasePrepare
+ public void prepare() throws IOException {
+ this.target = makePath("target");
+ this.link = new Path(this.target.getParent(), "link");
+ HdfsCompatUtil.createFile(fs(), this.target, FILE_LEN);
+ fs().createSymlink(this.target, this.link, true);
+ }
+
+ @HdfsCompatCaseCleanup
+ public void cleanup() throws IOException {
+ HdfsCompatUtil.deleteQuietly(fs(), this.link, true);
+ HdfsCompatUtil.deleteQuietly(fs(), this.target, true);
+ }
+
+ @HdfsCompatCase
+ public void createSymlink() throws IOException {
+ Assert.assertTrue(fs().exists(link));
+ }
+
+ @HdfsCompatCase
+ public void getFileLinkStatus() throws IOException {
+ FileStatus linkStatus = fs().getFileLinkStatus(link);
+ Assert.assertTrue(linkStatus.isSymlink());
+ Assert.assertEquals(target.getName(), linkStatus.getSymlink().getName());
+ }
+
+ @HdfsCompatCase
+ public void getLinkTarget() throws IOException {
+ Path src = fs().getLinkTarget(link);
+ Assert.assertEquals(target.getName(), src.getName());
+ }
+}
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatTpcds.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatTpcds.java
new file mode 100644
index 0000000000000..421e6d4a61850
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatTpcds.java
@@ -0,0 +1,121 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.cases;
+
+import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.compat.common.*;
+import org.junit.Assert;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
+@HdfsCompatCaseGroup(name = "TPCDS")
+public class HdfsCompatTpcds extends AbstractHdfsCompatCase {
+ private static final int FILE_LEN = 8;
+ private static final Random RANDOM = new Random();
+ private Path path = null;
+
+ @HdfsCompatCasePrepare
+ public void prepare() throws IOException {
+ path = makePath("path");
+ }
+
+ @HdfsCompatCaseCleanup
+ public void cleanup() throws IOException {
+ HdfsCompatUtil.deleteQuietly(fs(), path, true);
+ }
+
+ @HdfsCompatCase
+ public void open() throws IOException {
+ HdfsCompatUtil.createFile(fs(), path, FILE_LEN);
+ byte[] data = new byte[FILE_LEN];
+ try (FSDataInputStream in = fs().open(path)) {
+ in.readFully(data);
+ }
+ }
+
+ @HdfsCompatCase
+ public void create() throws IOException {
+ byte[] data = new byte[FILE_LEN];
+ RANDOM.nextBytes(data);
+ try (FSDataOutputStream out = fs().create(path, true)) {
+ out.write(data);
+ }
+ }
+
+ @HdfsCompatCase
+ public void mkdirs() throws IOException {
+ Assert.assertTrue(fs().mkdirs(path));
+ }
+
+ @HdfsCompatCase
+ public void getFileStatus() throws IOException {
+ HdfsCompatUtil.createFile(fs(), path, FILE_LEN);
+ FileStatus fileStatus = fs().getFileStatus(path);
+ Assert.assertEquals(FILE_LEN, fileStatus.getLen());
+ }
+
+ @HdfsCompatCase
+ public void listStatus() throws IOException {
+ HdfsCompatUtil.createFile(fs(), new Path(path, "file"), FILE_LEN);
+ FileStatus[] files = fs().listStatus(path);
+ Assert.assertEquals(1, files.length);
+ Assert.assertEquals(FILE_LEN, files[0].getLen());
+ }
+
+ @HdfsCompatCase
+ public void listLocatedStatus() throws IOException {
+ HdfsCompatUtil.createFile(fs(), new Path(path, "file"), FILE_LEN);
+ RemoteIterator
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.cases;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.compat.common.*;
+import org.junit.Assert;
+
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+@HdfsCompatCaseGroup(name = "XAttr")
+public class HdfsCompatXAttr extends AbstractHdfsCompatCase {
+ private Path file;
+
+ @HdfsCompatCasePrepare
+ public void prepare() throws IOException {
+ this.file = makePath("file");
+ HdfsCompatUtil.createFile(fs(), this.file, 0);
+ }
+
+ @HdfsCompatCaseCleanup
+ public void cleanup() {
+ HdfsCompatUtil.deleteQuietly(fs(), this.file, true);
+ }
+
+ @HdfsCompatCase
+ public void setXAttr() throws IOException {
+ final String key = "user.key";
+ final byte[] value = "value".getBytes(StandardCharsets.UTF_8);
+ fs().setXAttr(file, key, value);
+ Map
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This contains default cases for
+ * {@link org.apache.hadoop.fs.FileSystem} APIs.
+ */
+package org.apache.hadoop.fs.compat.cases;
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/AbstractHdfsCompatCase.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/AbstractHdfsCompatCase.java
new file mode 100644
index 0000000000000..270ff7833b5ab
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/AbstractHdfsCompatCase.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.common;
+
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+
+import java.util.Random;
+
+public abstract class AbstractHdfsCompatCase {
+ private static final Random RANDOM = new Random();
+
+ private FileSystem fs;
+ private HdfsCompatEnvironment env;
+ private Path localPath;
+
+ public AbstractHdfsCompatCase() {
+ }
+
+ public void init(HdfsCompatEnvironment environment) {
+ this.env = environment;
+ this.fs = env.getFileSystem();
+ LocalFileSystem localFs = env.getLocalFileSystem();
+ this.localPath = localFs.makeQualified(new Path(env.getLocalTmpDir()));
+ }
+
+ public FileSystem fs() {
+ return fs;
+ }
+
+ public Path getRootPath() {
+ return this.env.getRoot();
+ }
+
+ public Path getBasePath() {
+ return this.env.getBase();
+ }
+
+ public Path getUniquePath() {
+ return getUniquePath(getBasePath());
+ }
+
+ public static Path getUniquePath(Path basePath) {
+ return new Path(basePath, System.currentTimeMillis()
+ + "_" + RANDOM.nextLong());
+ }
+
+ public Path makePath(String name) {
+ return new Path(getUniquePath(), name);
+ }
+
+ public Path getLocalPath() {
+ return localPath;
+ }
+
+ public String getPrivilegedUser() {
+ return this.env.getPrivilegedUser();
+ }
+
+ public String[] getStoragePolicyNames() {
+ return this.env.getStoragePolicyNames();
+ }
+
+ public String getDelegationTokenRenewer() {
+ return this.env.getDelegationTokenRenewer();
+ }
+}
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatApiScope.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatApiScope.java
new file mode 100644
index 0000000000000..8783272687f9f
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatApiScope.java
@@ -0,0 +1,358 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.common;
+
+
+import org.apache.hadoop.classification.VisibleForTesting;
+import org.apache.hadoop.fs.compat.HdfsCompatTool;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+
+public class HdfsCompatApiScope {
+ static final boolean SKIP_NO_SUCH_METHOD_ERROR = true;
+ private static final Logger LOG =
+ LoggerFactory.getLogger(HdfsCompatApiScope.class);
+
+ private final HdfsCompatEnvironment env;
+ private final HdfsCompatSuite suite;
+
+ public HdfsCompatApiScope(HdfsCompatEnvironment env, HdfsCompatSuite suite) {
+ this.env = env;
+ this.suite = suite;
+ }
+
+ public HdfsCompatReport apply() {
+ List
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.common;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ElementType.METHOD})
+public @interface HdfsCompatCase {
+ String brief() default "";
+
+ String ifDef() default "";
+}
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatCaseCleanup.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatCaseCleanup.java
new file mode 100644
index 0000000000000..487eed8590aaf
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatCaseCleanup.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.common;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ElementType.METHOD})
+public @interface HdfsCompatCaseCleanup {
+}
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatCaseGroup.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatCaseGroup.java
new file mode 100644
index 0000000000000..516acedb9a4ae
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatCaseGroup.java
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.common;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ElementType.TYPE})
+public @interface HdfsCompatCaseGroup {
+ String name() default "";
+}
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatCasePrepare.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatCasePrepare.java
new file mode 100644
index 0000000000000..a9f360d3d3046
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatCasePrepare.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.common;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ElementType.METHOD})
+public @interface HdfsCompatCasePrepare {
+}
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatCaseSetUp.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatCaseSetUp.java
new file mode 100644
index 0000000000000..91d7ad484302f
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatCaseSetUp.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.common;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ElementType.METHOD})
+public @interface HdfsCompatCaseSetUp {
+}
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatCaseTearDown.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatCaseTearDown.java
new file mode 100644
index 0000000000000..bdadd6a08b112
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatCaseTearDown.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.common;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ElementType.METHOD})
+public @interface HdfsCompatCaseTearDown {
+}
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatCommand.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatCommand.java
new file mode 100644
index 0000000000000..644b53ee4efd0
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatCommand.java
@@ -0,0 +1,127 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.common;
+
+import org.apache.hadoop.classification.VisibleForTesting;
+import org.apache.hadoop.fs.compat.suites.HdfsCompatSuiteForAll;
+import org.apache.hadoop.fs.compat.suites.HdfsCompatSuiteForShell;
+import org.apache.hadoop.fs.compat.suites.HdfsCompatSuiteForTpcds;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.util.HashMap;
+import java.util.Map;
+
+public class HdfsCompatCommand {
+ private final Path uri;
+ private final String suiteName;
+ private final Configuration conf;
+ private HdfsCompatSuite suite;
+ private HdfsCompatApiScope api;
+ private HdfsCompatShellScope shell;
+
+ public HdfsCompatCommand(String uri, String suiteName, Configuration conf) {
+ this.uri = new Path(uri);
+ this.suiteName = suiteName.toLowerCase();
+ this.conf = conf;
+ }
+
+ public void initialize() throws ReflectiveOperationException, IOException {
+ initSuite();
+ HdfsCompatEnvironment env = new HdfsCompatEnvironment(uri, conf);
+ env.init();
+ if (hasApiCase()) {
+ api = new HdfsCompatApiScope(env, suite);
+ }
+ if (hasShellCase()) {
+ shell = new HdfsCompatShellScope(env, suite);
+ }
+ }
+
+ public HdfsCompatReport apply() throws Exception {
+ HdfsCompatReport report = new HdfsCompatReport(uri.toString(), suite);
+ if (api != null) {
+ report.merge(api.apply());
+ }
+ if (shell != null) {
+ report.merge(shell.apply());
+ }
+ return report;
+ }
+
+ private void initSuite() throws ReflectiveOperationException {
+ Map
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.common;
+
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockStoragePolicySpi;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.text.SimpleDateFormat;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Date;
+import java.util.List;
+import java.util.Random;
+import java.util.UUID;
+import java.util.stream.Collectors;
+
+public class HdfsCompatEnvironment {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(HdfsCompatEnvironment.class);
+ private static final String DATE_FORMAT = "yyyy_MM_dd_HH_mm_ss";
+ private static final Random RANDOM = new Random();
+ private final Path uri;
+ private final Configuration conf;
+ private FileSystem fs;
+ private LocalFileSystem localFs;
+ private Path rootDir;
+ private Path baseDir;
+ private String defaultLocalDir;
+ private String[] defaultStoragePolicyNames;
+
+ public HdfsCompatEnvironment(Path uri, Configuration conf) {
+ this.conf = conf;
+ this.uri = uri;
+ }
+
+ public void init() throws IOException {
+ Date now = new Date();
+ String uuid = UUID.randomUUID().toString();
+ String uniqueDir = "hadoop-compatibility-benchmark/" +
+ new SimpleDateFormat(DATE_FORMAT).format(now) + "/" + uuid;
+
+ this.fs = uri.getFileSystem(conf);
+ this.localFs = FileSystem.getLocal(conf);
+ this.rootDir = fs.makeQualified(new Path("/"));
+ this.baseDir = fs.makeQualified(new Path(uri, uniqueDir));
+ String tmpdir = getEnvTmpDir();
+ if ((tmpdir == null) || tmpdir.isEmpty()) {
+ LOG.warn("Cannot get valid io.tmpdir, will use /tmp");
+ tmpdir = "/tmp";
+ }
+ this.defaultLocalDir = new File(tmpdir, uniqueDir).getAbsolutePath();
+ this.defaultStoragePolicyNames = getDefaultStoragePolicyNames();
+ }
+
+ public FileSystem getFileSystem() {
+ return fs;
+ }
+
+ public LocalFileSystem getLocalFileSystem() {
+ return localFs;
+ }
+
+ public Path getRoot() {
+ return rootDir;
+ }
+
+ public Path getBase() {
+ return baseDir;
+ }
+
+ public String getLocalTmpDir() {
+ final String scheme = this.uri.toUri().getScheme();
+ final String key = "fs." + scheme + ".compatibility.local.tmpdir";
+ final String localDir = conf.get(key, null);
+ return (localDir != null) ? localDir : defaultLocalDir;
+ }
+
+ public String getPrivilegedUser() {
+ final String scheme = this.uri.toUri().getScheme();
+ final String key = "fs." + scheme + ".compatibility.privileged.user";
+ final String privileged = conf.get(key, null);
+ return (privileged != null) ? privileged :
+ conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY,
+ DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
+ }
+
+ public String[] getStoragePolicyNames() {
+ final String scheme = this.uri.toUri().getScheme();
+ final String key = "fs." + scheme + ".compatibility.storage.policies";
+ final String storagePolicies = conf.get(key, null);
+ return (storagePolicies != null) ? storagePolicies.split(",") :
+ defaultStoragePolicyNames.clone();
+ }
+
+ public String getDelegationTokenRenewer() {
+ final String scheme = this.uri.toUri().getScheme();
+ final String key = "fs." + scheme + ".compatibility.delegation.token.renewer";
+ return conf.get(key, "");
+ }
+
+ private String getEnvTmpDir() {
+ final String systemDefault = System.getProperty("java.io.tmpdir");
+ if ((systemDefault == null) || systemDefault.isEmpty()) {
+ return null;
+ }
+ String[] tmpDirs = systemDefault.split(",|" + File.pathSeparator);
+ List
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.common;
+
+public class HdfsCompatIllegalArgumentException
+ extends IllegalArgumentException {
+ public HdfsCompatIllegalArgumentException(String message) {
+ super(message);
+ }
+}
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatIllegalCaseException.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatIllegalCaseException.java
new file mode 100644
index 0000000000000..c57b232b36d7d
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatIllegalCaseException.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.common;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
+
+public class HdfsCompatIllegalCaseException
+ extends HadoopIllegalArgumentException {
+ /**
+ * Constructs exception with the specified detail message.
+ * @param message detailed message.
+ */
+ public HdfsCompatIllegalCaseException(final String message) {
+ super(message);
+ }
+}
diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatReport.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatReport.java
new file mode 100644
index 0000000000000..077c16822348e
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatReport.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.common;
+
+import java.util.Collection;
+import java.util.concurrent.ConcurrentLinkedQueue;
+
+public class HdfsCompatReport {
+ private final String uri;
+ private final HdfsCompatSuite suite;
+ private final ConcurrentLinkedQueue
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.common;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.classification.VisibleForTesting;
+import org.apache.hadoop.fs.BlockStoragePolicySpi;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.*;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.util.*;
+
+public class HdfsCompatShellScope {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(HdfsCompatShellScope.class);
+ private static final Random RANDOM = new Random();
+ private final HdfsCompatEnvironment env;
+ private final HdfsCompatSuite suite;
+ private File stdoutDir = null;
+ private File passList = null;
+ private File failList = null;
+ private File skipList = null;
+ private Path snapshotPath = null;
+ private String storagePolicy = null;
+ private Method disallowSnapshot = null;
+
+ public HdfsCompatShellScope(HdfsCompatEnvironment env, HdfsCompatSuite suite) {
+ this.env = env;
+ this.suite = suite;
+ }
+
+ public HdfsCompatReport apply() throws Exception {
+ File localTmpDir = null;
+ try {
+ localTmpDir = new File(this.env.getLocalTmpDir());
+ LOG.info("Local tmp dir: " + localTmpDir.getAbsolutePath());
+ return runShell(localTmpDir);
+ } finally {
+ try {
+ if (this.disallowSnapshot != null) {
+ try {
+ this.disallowSnapshot.invoke(this.env.getFileSystem(),
+ this.snapshotPath);
+ } catch (InvocationTargetException e) {
+ LOG.error("Cannot disallow snapshot", e.getCause());
+ } catch (ReflectiveOperationException e) {
+ LOG.error("Disallow snapshot method is invalid", e);
+ }
+ }
+ } finally {
+ FileUtils.deleteQuietly(localTmpDir);
+ }
+ }
+ }
+
+ private HdfsCompatReport runShell(File localTmpDir) throws Exception {
+ File localDir = new File(localTmpDir, "test");
+ File scriptDir = new File(localTmpDir, "scripts");
+ File confDir = new File(localTmpDir, "hadoop-conf");
+ copyScriptsResource(scriptDir);
+ try {
+ setShellLogConf(confDir);
+ } catch (Exception e) {
+ LOG.error("Cannot set new conf dir", e);
+ confDir = null;
+ }
+
+ prepareSnapshot();
+ this.storagePolicy = getStoragePolicy();
+ String[] confEnv = getEnv(localDir, scriptDir, confDir);
+ ExecResult result = exec(confEnv, scriptDir);
+ printLog(result);
+ return export();
+ }
+
+ private void copyScriptsResource(File scriptDir) throws IOException {
+ Files.createDirectories(new File(scriptDir, "cases").toPath());
+ copyResource("/misc.sh", new File(scriptDir, "misc.sh"));
+ String[] cases = suite.getShellCases();
+ for (String res : cases) {
+ copyResource("/cases/" + res, new File(scriptDir, "cases/" + res));
+ }
+ }
+
+ private void setShellLogConf(File confDir) throws IOException {
+ final String hadoopHome = System.getenv("HADOOP_HOME");
+ final String hadoopConfDir = System.getenv("HADOOP_CONF_DIR");
+ if ((hadoopHome == null) || hadoopHome.isEmpty()) {
+ LOG.error("HADOOP_HOME not configured");
+ }
+ if ((hadoopConfDir == null) || hadoopConfDir.isEmpty()) {
+ throw new IOException("HADOOP_CONF_DIR not configured");
+ }
+ File srcDir = new File(hadoopConfDir).getAbsoluteFile();
+ if (!srcDir.isDirectory()) {
+ throw new IOException("HADOOP_CONF_DIR is not valid: " + srcDir);
+ }
+
+ Files.createDirectories(confDir.toPath());
+ FileUtils.copyDirectory(srcDir, confDir);
+ File logConfFile = new File(confDir, "log4j.properties");
+ copyResource("/hadoop-compat-bench-log4j.properties", logConfFile, true);
+ }
+
+ @VisibleForTesting
+ protected void copyResource(String res, File dst) throws IOException {
+ copyResource(res, dst, false);
+ }
+
+ private void copyResource(String res, File dst, boolean overwrite)
+ throws IOException {
+ InputStream in = null;
+ try {
+ in = this.getClass().getResourceAsStream(res);
+ if (in == null) {
+ in = this.suite.getClass().getResourceAsStream(res);
+ }
+ if (in == null) {
+ throw new IOException("Resource not found" +
+ " during scripts prepare: " + res);
+ }
+
+ if (dst.exists() && !overwrite) {
+ throw new IOException("Cannot overwrite existing resource file");
+ }
+
+ Files.createDirectories(dst.getParentFile().toPath());
+
+ byte[] buf = new byte[1024];
+ try (OutputStream out = new FileOutputStream(dst)) {
+ int nRead = in.read(buf);
+ while (nRead != -1) {
+ out.write(buf, 0, nRead);
+ nRead = in.read(buf);
+ }
+ }
+ } finally {
+ if (in != null) {
+ in.close();
+ }
+ }
+ }
+
+ private void prepareSnapshot() {
+ this.snapshotPath = AbstractHdfsCompatCase.getUniquePath(this.env.getBase());
+ Method allowSnapshot = null;
+ try {
+ FileSystem fs = this.env.getFileSystem();
+ fs.mkdirs(snapshotPath);
+ Method allowSnapshotMethod = fs.getClass()
+ .getMethod("allowSnapshot", Path.class);
+ allowSnapshotMethod.setAccessible(true);
+ allowSnapshotMethod.invoke(fs, snapshotPath);
+ allowSnapshot = allowSnapshotMethod;
+
+ Method disallowSnapshotMethod = fs.getClass()
+ .getMethod("disallowSnapshot", Path.class);
+ disallowSnapshotMethod.setAccessible(true);
+ this.disallowSnapshot = disallowSnapshotMethod;
+ } catch (IOException e) {
+ LOG.error("Cannot prepare snapshot path", e);
+ } catch (InvocationTargetException e) {
+ LOG.error("Cannot allow snapshot", e.getCause());
+ } catch (ReflectiveOperationException e) {
+ LOG.warn("Get admin snapshot methods failed.");
+ } catch (Exception e) {
+ LOG.warn("Prepare snapshot failed", e);
+ }
+ if (allowSnapshot == null) {
+ LOG.warn("No allowSnapshot method found.");
+ }
+ if (this.disallowSnapshot == null) {
+ LOG.warn("No disallowSnapshot method found.");
+ }
+ }
+
+ private String getStoragePolicy() {
+ BlockStoragePolicySpi def;
+ String[] policies;
+ try {
+ FileSystem fs = this.env.getFileSystem();
+ Path base = this.env.getBase();
+ fs.mkdirs(base);
+ def = fs.getStoragePolicy(base);
+ policies = env.getStoragePolicyNames();
+ } catch (Exception e) {
+ LOG.warn("Cannot get storage policy", e);
+ return "Hot";
+ }
+
+ List
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.common;
+
+
+public interface HdfsCompatSuite {
+ String getSuiteName();
+
+ Class extends AbstractHdfsCompatCase>[] getApiCases();
+
+ String[] getShellCases();
+}
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatUtil.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatUtil.java
new file mode 100644
index 0000000000000..40ead8514cdba
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatUtil.java
@@ -0,0 +1,120 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.common;
+
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.util.Random;
+
+public final class HdfsCompatUtil {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(HdfsCompatUtil.class);
+ private static final Random RANDOM = new Random();
+
+ private HdfsCompatUtil() {
+ }
+
+ public static void checkImplementation(ImplementationFunction func) {
+ try {
+ func.apply();
+ } catch (UnsupportedOperationException e) {
+ throw e;
+ } catch (NoSuchMethodError e) {
+ if (HdfsCompatApiScope.SKIP_NO_SUCH_METHOD_ERROR) {
+ throw e;
+ } else {
+ throw new UnsupportedOperationException(e);
+ }
+ } catch (Throwable ignored) {
+ }
+ }
+
+ public static void createFile(FileSystem fs, Path file, long fileLen)
+ throws IOException {
+ createFile(fs, file, true, 1024, fileLen, 1048576L, (short) 1);
+ }
+
+ public static void createFile(FileSystem fs, Path file, byte[] data)
+ throws IOException {
+ createFile(fs, file, true, data, 1048576L, (short) 1);
+ }
+
+ public static void createFile(FileSystem fs, Path file, boolean overwrite,
+ int bufferSize, long fileLen, long blockSize,
+ short replication) throws IOException {
+ assert (bufferSize > 0);
+ try (FSDataOutputStream out = fs.create(file, overwrite,
+ bufferSize, replication, blockSize)) {
+ if (fileLen > 0) {
+ byte[] toWrite = new byte[bufferSize];
+ long bytesToWrite = fileLen;
+ while (bytesToWrite > 0) {
+ RANDOM.nextBytes(toWrite);
+ int bytesToWriteNext = (bufferSize < bytesToWrite) ?
+ bufferSize : (int) bytesToWrite;
+ out.write(toWrite, 0, bytesToWriteNext);
+ bytesToWrite -= bytesToWriteNext;
+ }
+ }
+ }
+ }
+
+ public static void createFile(FileSystem fs, Path file, boolean overwrite,
+ byte[] data, long blockSize,
+ short replication) throws IOException {
+ try (FSDataOutputStream out = fs.create(file, overwrite,
+ (data.length > 0) ? data.length : 1024, replication, blockSize)) {
+ if (data.length > 0) {
+ out.write(data);
+ }
+ }
+ }
+
+ public static byte[] readFileBuffer(FileSystem fs, Path fileName)
+ throws IOException {
+ try (ByteArrayOutputStream os = new ByteArrayOutputStream();
+ FSDataInputStream in = fs.open(fileName)) {
+ IOUtils.copyBytes(in, os, 1024, true);
+ return os.toByteArray();
+ }
+ }
+
+ public static void deleteQuietly(FileSystem fs, Path path,
+ boolean recursive) {
+ if (fs != null && path != null) {
+ try {
+ fs.delete(path, recursive);
+ } catch (Throwable e) {
+ LOG.warn("When deleting {}", path, e);
+ }
+ }
+ }
+
+ public interface ImplementationFunction {
+ void apply() throws Exception;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/package-info.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/package-info.java
new file mode 100644
index 0000000000000..879eed84e0bd1
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/package-info.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This contains the main code and definitions of the tool
+ * {@link org.apache.hadoop.fs.compat.HdfsCompatTool}.
+ */
+package org.apache.hadoop.fs.compat.common;
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/package-info.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/package-info.java
new file mode 100644
index 0000000000000..342e6869bbfa8
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/package-info.java
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * HdfsCompatibility is a benchmark tool to quickly assess availabilities
+ * of Hadoop-Compatible File System APIs defined in
+ * {@link org.apache.hadoop.fs.FileSystem} for a specific FS implementation.
+ */
+package org.apache.hadoop.fs.compat;
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/suites/HdfsCompatSuiteForAll.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/suites/HdfsCompatSuiteForAll.java
new file mode 100644
index 0000000000000..99835ae3c9680
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/suites/HdfsCompatSuiteForAll.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.suites;
+
+import org.apache.hadoop.fs.compat.common.AbstractHdfsCompatCase;
+import org.apache.hadoop.fs.compat.common.HdfsCompatSuite;
+import org.apache.hadoop.fs.compat.cases.*;
+
+public class HdfsCompatSuiteForAll implements HdfsCompatSuite {
+ @Override
+ public String getSuiteName() {
+ return "ALL";
+ }
+
+ @Override
+ public Class extends AbstractHdfsCompatCase>[] getApiCases() {
+ return new Class[]{
+ HdfsCompatBasics.class,
+ HdfsCompatAcl.class,
+ HdfsCompatCreate.class,
+ HdfsCompatDirectory.class,
+ HdfsCompatFile.class,
+ HdfsCompatLocal.class,
+ HdfsCompatServer.class,
+ HdfsCompatSnapshot.class,
+ HdfsCompatStoragePolicy.class,
+ HdfsCompatSymlink.class,
+ HdfsCompatXAttr.class,
+ };
+ }
+
+ @Override
+ public String[] getShellCases() {
+ return new String[]{
+ "directory.t",
+ "fileinfo.t",
+ "read.t",
+ "write.t",
+ "remove.t",
+ "attr.t",
+ "copy.t",
+ "move.t",
+ "concat.t",
+ "snapshot.t",
+ "storagePolicy.t",
+ };
+ }
+}
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/suites/HdfsCompatSuiteForShell.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/suites/HdfsCompatSuiteForShell.java
new file mode 100644
index 0000000000000..61901d491ef90
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/suites/HdfsCompatSuiteForShell.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.suites;
+
+import org.apache.hadoop.fs.compat.common.AbstractHdfsCompatCase;
+import org.apache.hadoop.fs.compat.common.HdfsCompatSuite;
+
+public class HdfsCompatSuiteForShell implements HdfsCompatSuite {
+ @Override
+ public String getSuiteName() {
+ return "Shell";
+ }
+
+ @Override
+ public Class extends AbstractHdfsCompatCase>[] getApiCases() {
+ return new Class[0];
+ }
+
+ @Override
+ public String[] getShellCases() {
+ return new String[]{
+ "directory.t",
+ "fileinfo.t",
+ "read.t",
+ "write.t",
+ "remove.t",
+ "attr.t",
+ "copy.t",
+ "move.t",
+ "concat.t",
+ "snapshot.t",
+ "storagePolicy.t",
+ };
+ }
+}
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/suites/HdfsCompatSuiteForTpcds.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/suites/HdfsCompatSuiteForTpcds.java
new file mode 100644
index 0000000000000..cae6c6b96a4fa
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/suites/HdfsCompatSuiteForTpcds.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.suites;
+
+import org.apache.hadoop.fs.compat.common.AbstractHdfsCompatCase;
+import org.apache.hadoop.fs.compat.common.HdfsCompatSuite;
+import org.apache.hadoop.fs.compat.cases.HdfsCompatTpcds;
+
+public class HdfsCompatSuiteForTpcds implements HdfsCompatSuite {
+ @Override
+ public String getSuiteName() {
+ return "TPCDS";
+ }
+
+ @Override
+ public Class extends AbstractHdfsCompatCase>[] getApiCases() {
+ return new Class[]{
+ HdfsCompatTpcds.class
+ };
+ }
+
+ @Override
+ public String[] getShellCases() {
+ return new String[0];
+ }
+}
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/suites/package-info.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/suites/package-info.java
new file mode 100644
index 0000000000000..bf8a4fc925b28
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/suites/package-info.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This contains default suites for
+ * {@link org.apache.hadoop.fs.compat.HdfsCompatTool} command.
+ */
+package org.apache.hadoop.fs.compat.suites;
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-compat-bench/src/main/resources/hadoop-compat-bench-log4j.properties b/hadoop-tools/hadoop-compat-bench/src/main/resources/hadoop-compat-bench-log4j.properties
new file mode 100644
index 0000000000000..58a6b1325818a
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/main/resources/hadoop-compat-bench-log4j.properties
@@ -0,0 +1,24 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# log4j configuration used during build and unit tests
+
+log4j.rootLogger=info,stderr
+log4j.threshold=ALL
+log4j.appender.stderr=org.apache.log4j.ConsoleAppender
+log4j.appender.stderr.Target=System.err
+log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
+log4j.appender.stderr.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n
diff --git a/hadoop-tools/hadoop-compat-bench/src/site/markdown/HdfsCompatBench.md b/hadoop-tools/hadoop-compat-bench/src/site/markdown/HdfsCompatBench.md
new file mode 100644
index 0000000000000..7521e9ea3cb7c
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/site/markdown/HdfsCompatBench.md
@@ -0,0 +1,101 @@
+
+
+# Compatibility Benchmark over HCFS Implementations
+
+
+
+## Overview
+
+Hadoop-Compatible File System (HCFS) is a core conception in big data storage ecosystem,
+providing unified interfaces and generally clear semantics,
+and has become the de-factor standard for industry storage systems to follow and conform with.
+There have been a series of HCFS implementations in Hadoop,
+such as S3AFileSystem for Amazon's S3 Object Store,
+WASB for Microsoft's Azure Blob Storage, OSS connector for Alibaba Cloud Object Storage,
+and more from storage service's providers on their own.
+
+Meanwhile, Hadoop is also developing and new features are continuously contributing to HCFS interfaces
+for existing implementations to follow and update.
+However, we need a tool to check whether the features are supported by a specific implementation.
+
+This module defines an HCFS compatibility benchmark and provides a corresponding tool
+to do the compatibility assessment for an HCFS storage system.
+The tool is a jar file which is executable by `hadoop jar`,
+after which an HCFS compatibility report is generated showing an overall score,
+and a detailed list of passed and failed cases (optional).
+
+## Prepare
+
+First of all, there must be a properly installed Hadoop environment to run `hadoop jar` command.
+See [HdfsUserGuide](./HdfsUserGuide.html) for more information about how to set up a Hadoop environment.
+Then, two things should be done before a quick benchmark assessment.
+
+#### FileSystem implementation
+
+There must be a Java FileSystem implementation.
+The FS is known to Hadoop by config key `fs.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.cases;
+
+import org.apache.hadoop.fs.compat.common.AbstractHdfsCompatCase;
+import org.apache.hadoop.fs.compat.common.HdfsCompatCase;
+import org.apache.hadoop.fs.compat.common.HdfsCompatUtil;
+
+import java.util.ArrayList;
+
+public class HdfsCompatAclTestCases extends AbstractHdfsCompatCase {
+ @HdfsCompatCase
+ public void modifyAclEntries() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().modifyAclEntries(makePath("modifyAclEntries"), new ArrayList<>())
+ );
+ }
+
+ @HdfsCompatCase
+ public void removeAclEntries() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().removeAclEntries(makePath("removeAclEntries"), new ArrayList<>())
+ );
+ }
+
+ @HdfsCompatCase
+ public void removeDefaultAcl() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().removeDefaultAcl(makePath("removeDefaultAcl"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void removeAcl() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().removeAcl(makePath("removeAcl"))
+ );
+ }
+
+ @HdfsCompatCase
+ public void setAcl() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().setAcl(makePath("setAcl"), new ArrayList<>())
+ );
+ }
+
+ @HdfsCompatCase
+ public void getAclStatus() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().getAclStatus(makePath("getAclStatus"))
+ );
+ }
+}
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/cases/HdfsCompatMkdirTestCases.java b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/cases/HdfsCompatMkdirTestCases.java
new file mode 100644
index 0000000000000..f105ec770795a
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/cases/HdfsCompatMkdirTestCases.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.cases;
+
+import org.apache.hadoop.fs.compat.common.AbstractHdfsCompatCase;
+import org.apache.hadoop.fs.compat.common.HdfsCompatCase;
+import org.apache.hadoop.fs.compat.common.HdfsCompatUtil;
+
+public class HdfsCompatMkdirTestCases extends AbstractHdfsCompatCase {
+ @HdfsCompatCase
+ public void mkdirs() {
+ HdfsCompatUtil.checkImplementation(() ->
+ fs().mkdirs(makePath("mkdir"))
+ );
+ }
+}
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/common/TestHdfsCompatDefaultSuites.java b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/common/TestHdfsCompatDefaultSuites.java
new file mode 100644
index 0000000000000..882d1fe8ef9b6
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/common/TestHdfsCompatDefaultSuites.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.common;
+
+import org.apache.hadoop.fs.compat.HdfsCompatTool;
+import org.apache.hadoop.fs.compat.hdfs.HdfsCompatMiniCluster;
+import org.apache.hadoop.fs.compat.hdfs.HdfsCompatTestCommand;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestHdfsCompatDefaultSuites {
+ @Test
+ public void testSuiteAll() throws Exception {
+ HdfsCompatMiniCluster cluster = new HdfsCompatMiniCluster();
+ try {
+ cluster.start();
+ final String uri = cluster.getUri() + "/tmp";
+ Configuration conf = cluster.getConf();
+ HdfsCompatCommand cmd = new HdfsCompatTestCommand(uri, "ALL", conf);
+ cmd.initialize();
+ HdfsCompatReport report = cmd.apply();
+ Assert.assertEquals(0, report.getFailedCase().size());
+ new HdfsCompatTool(conf).printReport(report, System.out);
+ } finally {
+ cluster.shutdown();
+ }
+ }
+
+ @Test
+ public void testSuiteTpcds() throws Exception {
+ HdfsCompatMiniCluster cluster = new HdfsCompatMiniCluster();
+ try {
+ cluster.start();
+ final String uri = cluster.getUri() + "/tmp";
+ Configuration conf = cluster.getConf();
+ HdfsCompatCommand cmd = new HdfsCompatTestCommand(uri, "TPCDS", conf);
+ cmd.initialize();
+ HdfsCompatReport report = cmd.apply();
+ Assert.assertEquals(0, report.getFailedCase().size());
+ new HdfsCompatTool(conf).printReport(report, System.out);
+ } finally {
+ cluster.shutdown();
+ }
+ }
+}
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/common/TestHdfsCompatFsCommand.java b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/common/TestHdfsCompatFsCommand.java
new file mode 100644
index 0000000000000..c2d3b0260d005
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/common/TestHdfsCompatFsCommand.java
@@ -0,0 +1,180 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.common;
+
+
+import org.apache.hadoop.fs.compat.HdfsCompatTool;
+import org.apache.hadoop.fs.compat.hdfs.HdfsCompatMiniCluster;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.compat.cases.HdfsCompatAclTestCases;
+import org.apache.hadoop.fs.compat.cases.HdfsCompatMkdirTestCases;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.util.HashMap;
+import java.util.Map;
+
+public class TestHdfsCompatFsCommand {
+ @Test
+ public void testDfsCompatibility() throws Exception {
+ final String suite = "ALL";
+ HdfsCompatMiniCluster cluster = null;
+ try {
+ cluster = new HdfsCompatMiniCluster();
+ cluster.start();
+ final String uri = cluster.getUri() + "/tmp";
+ final Configuration conf = cluster.getConf();
+
+ HdfsCompatCommand cmd = new TestCommand(uri, suite, conf);
+ cmd.initialize();
+ HdfsCompatReport report = cmd.apply();
+ Assert.assertEquals(7, report.getPassedCase().size());
+ Assert.assertEquals(0, report.getFailedCase().size());
+ show(conf, report);
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
+ @Test
+ public void testLocalFsCompatibility() throws Exception {
+ final String uri = "file:///tmp/";
+ final String suite = "ALL";
+ final Configuration conf = new Configuration();
+ HdfsCompatCommand cmd = new TestCommand(uri, suite, conf);
+ cmd.initialize();
+ HdfsCompatReport report = cmd.apply();
+ Assert.assertEquals(1, report.getPassedCase().size());
+ Assert.assertEquals(6, report.getFailedCase().size());
+ show(conf, report);
+ cleanup(cmd, conf);
+ }
+
+ @Test
+ public void testFsCompatibilityWithSuite() throws Exception {
+ final String uri = "file:///tmp/";
+ final String suite = "acl";
+ final Configuration conf = new Configuration();
+ HdfsCompatCommand cmd = new TestCommand(uri, suite, conf);
+ cmd.initialize();
+ HdfsCompatReport report = cmd.apply();
+ Assert.assertEquals(0, report.getPassedCase().size());
+ Assert.assertEquals(6, report.getFailedCase().size());
+ show(conf, report);
+ cleanup(cmd, conf);
+ }
+
+ private void show(Configuration conf, HdfsCompatReport report) throws IOException {
+ new HdfsCompatTool(conf).printReport(report, System.out);
+ }
+
+ private void cleanup(HdfsCompatCommand cmd, Configuration conf) throws Exception {
+ Path basePath = ((TestCommand) cmd).getBasePath();
+ FileSystem fs = basePath.getFileSystem(conf);
+ fs.delete(basePath, true);
+ }
+
+ private static final class TestCommand extends HdfsCompatCommand {
+ private TestCommand(String uri, String suiteName, Configuration conf) {
+ super(uri, suiteName, conf);
+ }
+
+ @Override
+ protected Map
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.common;
+
+
+import org.apache.hadoop.fs.compat.cases.HdfsCompatBasics;
+import org.apache.hadoop.fs.FileSystem;
+import org.junit.Assert;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import java.lang.reflect.Method;
+import java.util.HashSet;
+import java.util.Set;
+
+public class TestHdfsCompatInterfaceCoverage {
+ @Test
+ @Ignore
+ public void testFsCompatibility() {
+ Set
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.common;
+
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.fs.compat.HdfsCompatTool;
+import org.apache.hadoop.fs.compat.hdfs.HdfsCompatMiniCluster;
+import org.apache.hadoop.fs.compat.hdfs.HdfsCompatTestCommand;
+import org.apache.hadoop.fs.compat.hdfs.HdfsCompatTestShellScope;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+
+public class TestHdfsCompatShellCommand {
+ private HdfsCompatMiniCluster cluster;
+
+ @Before
+ public void runCluster() throws IOException {
+ this.cluster = new HdfsCompatMiniCluster();
+ this.cluster.start();
+ }
+
+ @After
+ public void shutdownCluster() {
+ this.cluster.shutdown();
+ this.cluster = null;
+ }
+
+ @Test
+ public void testDfsCompatibility() throws Exception {
+ final String uri = cluster.getUri() + "/tmp";
+ final Configuration conf = cluster.getConf();
+ HdfsCompatCommand cmd = new TestCommand(uri, conf);
+ cmd.initialize();
+ HdfsCompatReport report = cmd.apply();
+ Assert.assertEquals(3, report.getPassedCase().size());
+ Assert.assertEquals(0, report.getFailedCase().size());
+ show(conf, report);
+ }
+
+ @Test
+ public void testSkipCompatibility() throws Exception {
+ final String uri = cluster.getUri() + "/tmp";
+ final Configuration conf = cluster.getConf();
+ HdfsCompatCommand cmd = new TestSkipCommand(uri, conf);
+ cmd.initialize();
+ HdfsCompatReport report = cmd.apply();
+ Assert.assertEquals(2, report.getPassedCase().size());
+ Assert.assertEquals(0, report.getFailedCase().size());
+ show(conf, report);
+ }
+
+ private void show(Configuration conf, HdfsCompatReport report) throws IOException {
+ new HdfsCompatTool(conf).printReport(report, System.out);
+ }
+
+ private static final class TestCommand extends HdfsCompatTestCommand {
+ private TestCommand(String uri, Configuration conf) {
+ super(uri, "shell", conf);
+ }
+
+ @Override
+ protected HdfsCompatShellScope getShellScope(HdfsCompatEnvironment env, HdfsCompatSuite suite) {
+ return new TestShellScope(env, suite);
+ }
+ }
+
+ private static final class TestSkipCommand extends HdfsCompatTestCommand {
+ private TestSkipCommand(String uri, Configuration conf) {
+ super(uri, "shell", conf);
+ }
+
+ @Override
+ protected HdfsCompatShellScope getShellScope(HdfsCompatEnvironment env, HdfsCompatSuite suite) {
+ return new TestShellScopeForSkip(env, suite);
+ }
+ }
+
+ private static final class TestShellScope extends HdfsCompatTestShellScope {
+ private TestShellScope(HdfsCompatEnvironment env, HdfsCompatSuite suite) {
+ super(env, suite);
+ }
+
+ @Override
+ protected void replace(File scriptDir) throws IOException {
+ File casesDir = new File(scriptDir, "cases");
+ FileUtils.deleteDirectory(casesDir);
+ Files.createDirectories(casesDir.toPath());
+ copyResource("/test-case-simple.t", new File(casesDir, "test-case-simple.t"));
+ }
+ }
+
+ private static final class TestShellScopeForSkip extends HdfsCompatTestShellScope {
+ private TestShellScopeForSkip(HdfsCompatEnvironment env, HdfsCompatSuite suite) {
+ super(env, suite);
+ }
+
+ @Override
+ protected void replace(File scriptDir) throws IOException {
+ File casesDir = new File(scriptDir, "cases");
+ FileUtils.deleteDirectory(casesDir);
+ Files.createDirectories(casesDir.toPath());
+ copyResource("/test-case-skip.t", new File(casesDir, "test-case-skip.t"));
+ }
+ }
+}
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/hdfs/HdfsCompatMiniCluster.java b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/hdfs/HdfsCompatMiniCluster.java
new file mode 100644
index 0000000000000..6de006418fd76
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/hdfs/HdfsCompatMiniCluster.java
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.hdfs;
+
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.URI;
+
+
+public class HdfsCompatMiniCluster {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(HdfsCompatMiniCluster.class);
+
+ private MiniDFSCluster cluster = null;
+
+ public HdfsCompatMiniCluster() {
+ }
+
+ public synchronized void start() throws IOException {
+ FileSystem.enableSymlinks();
+ Configuration conf = new Configuration();
+ conf.set(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, "true");
+ conf.set(DFSConfigKeys.HADOOP_SECURITY_KEY_PROVIDER_PATH,
+ "kms://http@localhost:9600/kms/foo");
+ conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY, "external");
+ conf.set(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, "true");
+ conf.set("fs.hdfs.compatibility.privileged.user",
+ UserGroupInformation.getCurrentUser().getShortUserName());
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+ cluster.waitClusterUp();
+ }
+
+ public synchronized void shutdown() {
+ if (cluster != null) {
+ cluster.shutdown(true);
+ cluster = null;
+ }
+ }
+
+ public synchronized Configuration getConf() throws IOException {
+ if (cluster == null) {
+ throw new IOException("Cluster not running");
+ }
+ return cluster.getFileSystem().getConf();
+ }
+
+ public synchronized URI getUri() throws IOException {
+ if (cluster == null) {
+ throw new IOException("Cluster not running");
+ }
+ return cluster.getFileSystem().getUri();
+ }
+
+ public static void main(String[] args)
+ throws IOException, InterruptedException {
+ long duration = 5L * 60L * 1000L;
+ if ((args != null) && (args.length > 0)) {
+ duration = Long.parseLong(args[0]);
+ }
+
+ HdfsCompatMiniCluster cluster = new HdfsCompatMiniCluster();
+ try {
+ cluster.start();
+ Configuration conf = cluster.getConf();
+
+ final String confDir = System.getenv("HADOOP_CONF_DIR");
+ final File confFile = new File(confDir, "core-site.xml");
+ try (OutputStream out = new FileOutputStream(confFile)) {
+ conf.writeXml(out);
+ }
+
+ final long endTime = System.currentTimeMillis() + duration;
+ long sleepTime = getSleepTime(endTime);
+ while (sleepTime > 0) {
+ LOG.warn("Service running ...");
+ Thread.sleep(sleepTime);
+ sleepTime = getSleepTime(endTime);
+ }
+ } finally {
+ cluster.shutdown();
+ }
+ }
+
+ private static long getSleepTime(long endTime) {
+ long maxTime = endTime - System.currentTimeMillis();
+ return (maxTime < 5000) ? maxTime : 5000;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/hdfs/HdfsCompatTestCommand.java b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/hdfs/HdfsCompatTestCommand.java
new file mode 100644
index 0000000000000..b7baac1228abc
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/hdfs/HdfsCompatTestCommand.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.hdfs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.compat.common.HdfsCompatCommand;
+import org.apache.hadoop.fs.compat.common.HdfsCompatEnvironment;
+import org.apache.hadoop.fs.compat.common.HdfsCompatShellScope;
+import org.apache.hadoop.fs.compat.common.HdfsCompatSuite;
+
+import java.io.IOException;
+import java.lang.reflect.Field;
+
+public class HdfsCompatTestCommand extends HdfsCompatCommand {
+ public HdfsCompatTestCommand(String uri, String suiteName, Configuration conf) {
+ super(uri, suiteName, conf);
+ }
+
+ @Override
+ public void initialize() throws IOException, ReflectiveOperationException {
+ super.initialize();
+ Field shellField = HdfsCompatCommand.class.getDeclaredField("shell");
+ shellField.setAccessible(true);
+ HdfsCompatShellScope shell = (HdfsCompatShellScope) shellField.get(this);
+ if (shell != null) {
+ Field envField = shell.getClass().getDeclaredField("env");
+ envField.setAccessible(true);
+ HdfsCompatEnvironment env = (HdfsCompatEnvironment) envField.get(shell);
+ Field suiteField = HdfsCompatCommand.class.getDeclaredField("suite");
+ suiteField.setAccessible(true);
+ HdfsCompatSuite suite = (HdfsCompatSuite) suiteField.get(this);
+ shellField.set(this, getShellScope(env, suite));
+ }
+ }
+
+ protected HdfsCompatShellScope getShellScope(HdfsCompatEnvironment env, HdfsCompatSuite suite) {
+ return new HdfsCompatTestShellScope(env, suite);
+ }
+}
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/hdfs/HdfsCompatTestShellScope.java b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/hdfs/HdfsCompatTestShellScope.java
new file mode 100644
index 0000000000000..9782e310263c6
--- /dev/null
+++ b/hadoop-tools/hadoop-compat-bench/src/test/java/org/apache/hadoop/fs/compat/hdfs/HdfsCompatTestShellScope.java
@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.compat.hdfs;
+
+import org.apache.hadoop.classification.VisibleForTesting;
+import org.apache.hadoop.fs.compat.common.HdfsCompatEnvironment;
+import org.apache.hadoop.fs.compat.common.HdfsCompatShellScope;
+import org.apache.hadoop.fs.compat.common.HdfsCompatSuite;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.file.Files;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+public class HdfsCompatTestShellScope extends HdfsCompatShellScope {
+ private final HdfsCompatEnvironment env;
+
+ public HdfsCompatTestShellScope(HdfsCompatEnvironment env, HdfsCompatSuite suite) {
+ super(env, suite);
+ this.env = env;
+ }
+
+ @Override
+ protected String[] getEnv(File localDir, File scriptDir, File confDir)
+ throws IOException {
+ replace(scriptDir);
+ File binDir = new File(scriptDir, "bin");
+ copyToBin(binDir);
+ confDir = new File(scriptDir, "hadoop-conf-ut");
+ writeConf(confDir);
+ File logConfFile = new File(confDir, "log4j.properties");
+ copyResource("/hadoop-compat-bench-log4j.properties", logConfFile);
+
+ String javaHome = System.getProperty("java.home");
+ String javaBin = javaHome + File.separator + "bin" +
+ File.separator + "java";
+ String classpath = confDir.getAbsolutePath() + ":" +
+ System.getProperty("java.class.path");
+ String pathenv = System.getenv("PATH");
+ if ((pathenv == null) || pathenv.isEmpty()) {
+ pathenv = binDir.getAbsolutePath();
+ } else {
+ pathenv = binDir.getAbsolutePath() + ":" + pathenv;
+ }
+
+ List