diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java index 612617712c915..e7b3ba000c296 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java @@ -43,7 +43,6 @@ import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.net.unix.TemporarySocketDirectory; import org.apache.hadoop.util.Lists; -import org.junit.Assert; import org.junit.Assume; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -71,7 +70,6 @@ import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; - import javax.management.MBeanServer; import javax.management.ObjectName; @@ -237,7 +235,6 @@ public void testFsDatasetMetrics() throws Exception { String bpid = cluster.getNameNode().getNamesystem().getBlockPoolId(); List datanodes = cluster.getDataNodes(); DataNode datanode = datanodes.get(0); - // Verify both of metrics set to 0 when initialize. MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name()); assertCounter("CreateRbwOpNumOps", 0L, rb); @@ -374,133 +371,6 @@ public void testTimeoutMetric() throws Exception { } } - @Test(timeout=60000) - public void testDatanodeNetworkErrorsMetricDefaultConf() throws Exception { - final Configuration conf = new HdfsConfiguration(); - final MiniDFSCluster cluster = - new MiniDFSCluster.Builder(conf).numDataNodes(6).build(); - final List streams = Lists.newArrayList(); - - DataNodeFaultInjector oldInjector = DataNodeFaultInjector.get(); - DataNodeFaultInjector newInjector = new DataNodeFaultInjector() { - public void incrementDatanodeNetworkErrors(DataXceiver dataXceiver) { - dataXceiver.incrDatanodeNetworkErrorsWithPort(); - } - }; - DataNodeFaultInjector.set(newInjector); - try { - GenericTestUtils.waitFor(new Supplier() { - @Override - public Boolean get() { - try { - for (int i = 0; i < 100; i++) { - final Path path = new Path("/test" + i); - final FSDataOutputStream out = - cluster.getFileSystem().create(path, (short) 3); - streams.add(out); - out.writeBytes("old gs data\n"); - out.hflush(); - out.close(); - } - } catch (IOException e) { - e.printStackTrace(); - } - - final MetricsRecordBuilder dnMetrics = - getMetrics(cluster.getDataNodes().get(0).getMetrics().name()); - long datanodeNetworkErrors = getLongCounter("DatanodeNetworkErrors", dnMetrics); - return datanodeNetworkErrors > 10; - } - }, 1000, 60000); - - /* Test JMX datanode network counts. */ - final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); - final ObjectName mxbeanName = - new ObjectName("Hadoop:service=DataNode,name=DataNodeInfo"); - final Object dnc = - mbs.getAttribute(mxbeanName, "DatanodeNetworkCounts"); - - // Compute number of DatanodeNetworkCounts. - final String allDnc = dnc.toString(); - int oldStringLength = allDnc.length(); - String keyword = "key=networkErrors, value"; - int newStringLength = allDnc.replace(keyword, "").length(); - int networkErrorsCount = (oldStringLength - newStringLength) / keyword.length(); - final MetricsRecordBuilder dnMetrics = - getMetrics(cluster.getDataNodes().get(0).getMetrics().name()); - long datanodeNetworkErrors = getLongCounter("DatanodeNetworkErrors", dnMetrics); - Assert.assertEquals(datanodeNetworkErrors, networkErrorsCount); - } finally { - IOUtils.cleanupWithLogger(LOG, streams.toArray(new Closeable[0])); - if (cluster != null) { - cluster.shutdown(); - } - DataNodeFaultInjector.set(oldInjector); - } - } - - @Test(timeout=60000) - public void testDatanodeNetworkErrorsMetricTopN() throws Exception { - final Configuration conf = new HdfsConfiguration(); - conf.setInt(DFSConfigKeys.DFS_DATANODE_NETWORKERRORS_DISPLAY_TOPCOUNT, 2); - final MiniDFSCluster cluster = - new MiniDFSCluster.Builder(conf).numDataNodes(6).build(); - final List streams = Lists.newArrayList(); - - DataNodeFaultInjector oldInjector = DataNodeFaultInjector.get(); - DataNodeFaultInjector newInjector = new DataNodeFaultInjector() { - public void incrementDatanodeNetworkErrors(DataXceiver dataXceiver) { - dataXceiver.incrDatanodeNetworkErrorsWithPort(); - } - }; - DataNodeFaultInjector.set(newInjector); - try { - GenericTestUtils.waitFor(new Supplier() { - @Override - public Boolean get() { - try { - for (int i = 0; i < 100; i++) { - final Path path = new Path("/test" + i); - final FSDataOutputStream out = - cluster.getFileSystem().create(path, (short) 3); - streams.add(out); - out.writeBytes("old gs data\n"); - out.hflush(); - out.close(); - } - } catch (IOException e) { - e.printStackTrace(); - } - - final MetricsRecordBuilder dnMetrics = - getMetrics(cluster.getDataNodes().get(0).getMetrics().name()); - long datanodeNetworkErrors = getLongCounter("DatanodeNetworkErrors", dnMetrics); - return datanodeNetworkErrors > 10; - } - }, 1000, 60000); - /* Test JMX datanode network counts. */ - final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); - final ObjectName mxbeanName = - new ObjectName("Hadoop:service=DataNode,name=DataNodeInfo"); - final Object dnc = - mbs.getAttribute(mxbeanName, "DatanodeNetworkCounts"); - - // Compute number of DatanodeNetworkCounts. - final String allDnc = dnc.toString(); - int oldStringLength = allDnc.length(); - String keyword = "key=networkErrors, value"; - int newStringLength = allDnc.replace(keyword, "").length(); - int networkErrorsCount = (oldStringLength - newStringLength) / keyword.length(); - Assert.assertEquals(2, networkErrorsCount); - } finally { - IOUtils.cleanupWithLogger(LOG, streams.toArray(new Closeable[0])); - if (cluster != null) { - cluster.shutdown(); - } - DataNodeFaultInjector.set(oldInjector); - } - } - /** * This function ensures that writing causes TotalWritetime to increment * and reading causes totalReadTime to move. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeNetworkErrorsWithDefaultConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeNetworkErrorsWithDefaultConf.java new file mode 100644 index 0000000000000..a47984d46bbba --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeNetworkErrorsWithDefaultConf.java @@ -0,0 +1,92 @@ +package org.apache.hadoop.hdfs.server.datanode; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Lists; +import org.junit.Assert; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import javax.management.MBeanServer; +import javax.management.ObjectName; +import java.io.Closeable; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.util.List; +import java.util.function.Supplier; + +import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; +import static org.apache.hadoop.test.MetricsAsserts.getMetrics; + +public class TestDataNodeNetworkErrorsWithDefaultConf { + private static final Logger LOG = + LoggerFactory.getLogger(TestDataNodeNetworkErrorsWithDefaultConf.class); + + @Test(timeout = 60000) + public void testDatanodeNetworkErrorsMetricDefaultConf() throws Exception { + final Configuration conf = new HdfsConfiguration(); + final MiniDFSCluster cluster = + new MiniDFSCluster.Builder(conf).numDataNodes(6).build(); + cluster.waitActive(); + final List streams = Lists.newArrayList(); + DataNodeFaultInjector oldInjector = DataNodeFaultInjector.get(); + DataNodeFaultInjector newInjector = new DataNodeFaultInjector() { + public void incrementDatanodeNetworkErrors(DataXceiver dataXceiver) { + dataXceiver.incrDatanodeNetworkErrorsWithPort(); + } + }; + DataNodeFaultInjector.set(newInjector); + try { + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + try { + for (int i = 0; i < 100; i++) { + final Path path = new Path("/test" + i); + final FSDataOutputStream out = + cluster.getFileSystem().create(path, (short) 3); + streams.add(out); + out.writeBytes("old gs data\n"); + out.hflush(); + } + } catch (IOException e) { + e.printStackTrace(); + } + + final MetricsRecordBuilder dnMetrics = + getMetrics(cluster.getDataNodes().get(0).getMetrics().name()); + long datanodeNetworkErrors = getLongCounter("DatanodeNetworkErrors", dnMetrics); + return datanodeNetworkErrors > 10; + } + }, 1000, 60000); + + final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); + final ObjectName mxbeanName = + new ObjectName("Hadoop:service=DataNode,name=DataNodeInfo"); + final Object dnc = + mbs.getAttribute(mxbeanName, "DatanodeNetworkCounts"); + // Compute number of DatanodeNetworkCounts. + final String allDnc = dnc.toString(); + int oldStringLength = allDnc.length(); + String keyword = "key=networkErrors, value"; + int newStringLength = allDnc.replace(keyword, "").length(); + int networkErrorsCount = (oldStringLength - newStringLength) / keyword.length(); + final MetricsRecordBuilder dnMetrics = + getMetrics(cluster.getDataNodes().get(0).getMetrics().name()); + long datanodeNetworkErrors = getLongCounter("DatanodeNetworkErrors", dnMetrics); + Assert.assertEquals(datanodeNetworkErrors, networkErrorsCount); + } finally { + IOUtils.cleanupWithLogger(LOG, streams.toArray(new Closeable[0])); + if (cluster != null) { + cluster.shutdown(); + } + DataNodeFaultInjector.set(oldInjector); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeNetworkErrorsWithTopNConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeNetworkErrorsWithTopNConf.java new file mode 100644 index 0000000000000..851df85ac75ab --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeNetworkErrorsWithTopNConf.java @@ -0,0 +1,91 @@ +package org.apache.hadoop.hdfs.server.datanode; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Lists; +import org.junit.Assert; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.management.MBeanServer; +import javax.management.ObjectName; +import java.io.Closeable; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.util.List; +import java.util.function.Supplier; + +import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; +import static org.apache.hadoop.test.MetricsAsserts.getMetrics; + +public class TestDataNodeNetworkErrorsWithTopNConf { + private static final Logger LOG = + LoggerFactory.getLogger(TestDataNodeNetworkErrorsWithTopNConf.class); + + @Test(timeout=60000) + public void testDatanodeNetworkErrorsMetricTopN() throws Exception { + final Configuration conf = new HdfsConfiguration(); + conf.setInt(DFSConfigKeys.DFS_DATANODE_NETWORKERRORS_DISPLAY_TOPCOUNT, 2); + final MiniDFSCluster cluster = + new MiniDFSCluster.Builder(conf).numDataNodes(6).build(); + cluster.waitActive(); + final List streams = Lists.newArrayList(); + DataNodeFaultInjector oldInjector = DataNodeFaultInjector.get(); + DataNodeFaultInjector newInjector = new DataNodeFaultInjector() { + public void incrementDatanodeNetworkErrors(DataXceiver dataXceiver) { + dataXceiver.incrDatanodeNetworkErrorsWithPort(); + } + }; + DataNodeFaultInjector.set(newInjector); + try { + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + try { + for (int i = 0; i < 100; i++) { + final Path path = new Path("/test" + i); + final FSDataOutputStream out = + cluster.getFileSystem().create(path, (short) 3); + streams.add(out); + out.writeBytes("old gs data\n"); + out.hflush(); + } + } catch (IOException e) { + e.printStackTrace(); + } + + final MetricsRecordBuilder dnMetrics = + getMetrics(cluster.getDataNodes().get(0).getMetrics().name()); + long datanodeNetworkErrors = getLongCounter("DatanodeNetworkErrors", dnMetrics); + return datanodeNetworkErrors > 10; + } + }, 1000, 60000); + final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); + final ObjectName mxbeanName = + new ObjectName("Hadoop:service=DataNode,name=DataNodeInfo"); + final Object dnc = + mbs.getAttribute(mxbeanName, "DatanodeNetworkCounts"); + // Compute number of DatanodeNetworkCounts. + final String allDnc = dnc.toString(); + int oldStringLength = allDnc.length(); + String keyword = "key=networkErrors, value"; + int newStringLength = allDnc.replace(keyword, "").length(); + int networkErrorsCount = (oldStringLength - newStringLength) / keyword.length(); + Assert.assertEquals(2, networkErrorsCount); + } finally { + IOUtils.cleanupWithLogger(LOG, streams.toArray(new Closeable[0])); + if (cluster != null) { + cluster.shutdown(); + } + DataNodeFaultInjector.set(oldInjector); + } + } +}