Skip to content

Commit

Permalink
modify unit tests failures.
Browse files Browse the repository at this point in the history
  • Loading branch information
hfutatzhanghb committed Jul 21, 2023
1 parent 9008daf commit ee10d45
Show file tree
Hide file tree
Showing 3 changed files with 183 additions and 130 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
import org.apache.hadoop.util.Lists;
import org.junit.Assert;
import org.junit.Assume;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Expand Down Expand Up @@ -71,7 +70,6 @@
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;

import javax.management.MBeanServer;
import javax.management.ObjectName;

Expand Down Expand Up @@ -237,7 +235,6 @@ public void testFsDatasetMetrics() throws Exception {
String bpid = cluster.getNameNode().getNamesystem().getBlockPoolId();
List<DataNode> datanodes = cluster.getDataNodes();
DataNode datanode = datanodes.get(0);

// Verify both of metrics set to 0 when initialize.
MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
assertCounter("CreateRbwOpNumOps", 0L, rb);
Expand Down Expand Up @@ -374,133 +371,6 @@ public void testTimeoutMetric() throws Exception {
}
}

@Test(timeout=60000)
public void testDatanodeNetworkErrorsMetricDefaultConf() throws Exception {
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(6).build();
final List<FSDataOutputStream> streams = Lists.newArrayList();

DataNodeFaultInjector oldInjector = DataNodeFaultInjector.get();
DataNodeFaultInjector newInjector = new DataNodeFaultInjector() {
public void incrementDatanodeNetworkErrors(DataXceiver dataXceiver) {
dataXceiver.incrDatanodeNetworkErrorsWithPort();
}
};
DataNodeFaultInjector.set(newInjector);
try {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
for (int i = 0; i < 100; i++) {
final Path path = new Path("/test" + i);
final FSDataOutputStream out =
cluster.getFileSystem().create(path, (short) 3);
streams.add(out);
out.writeBytes("old gs data\n");
out.hflush();
out.close();
}
} catch (IOException e) {
e.printStackTrace();
}

final MetricsRecordBuilder dnMetrics =
getMetrics(cluster.getDataNodes().get(0).getMetrics().name());
long datanodeNetworkErrors = getLongCounter("DatanodeNetworkErrors", dnMetrics);
return datanodeNetworkErrors > 10;
}
}, 1000, 60000);

/* Test JMX datanode network counts. */
final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
final ObjectName mxbeanName =
new ObjectName("Hadoop:service=DataNode,name=DataNodeInfo");
final Object dnc =
mbs.getAttribute(mxbeanName, "DatanodeNetworkCounts");

// Compute number of DatanodeNetworkCounts.
final String allDnc = dnc.toString();
int oldStringLength = allDnc.length();
String keyword = "key=networkErrors, value";
int newStringLength = allDnc.replace(keyword, "").length();
int networkErrorsCount = (oldStringLength - newStringLength) / keyword.length();
final MetricsRecordBuilder dnMetrics =
getMetrics(cluster.getDataNodes().get(0).getMetrics().name());
long datanodeNetworkErrors = getLongCounter("DatanodeNetworkErrors", dnMetrics);
Assert.assertEquals(datanodeNetworkErrors, networkErrorsCount);
} finally {
IOUtils.cleanupWithLogger(LOG, streams.toArray(new Closeable[0]));
if (cluster != null) {
cluster.shutdown();
}
DataNodeFaultInjector.set(oldInjector);
}
}

@Test(timeout=60000)
public void testDatanodeNetworkErrorsMetricTopN() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_NETWORKERRORS_DISPLAY_TOPCOUNT, 2);
final MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(6).build();
final List<FSDataOutputStream> streams = Lists.newArrayList();

DataNodeFaultInjector oldInjector = DataNodeFaultInjector.get();
DataNodeFaultInjector newInjector = new DataNodeFaultInjector() {
public void incrementDatanodeNetworkErrors(DataXceiver dataXceiver) {
dataXceiver.incrDatanodeNetworkErrorsWithPort();
}
};
DataNodeFaultInjector.set(newInjector);
try {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
for (int i = 0; i < 100; i++) {
final Path path = new Path("/test" + i);
final FSDataOutputStream out =
cluster.getFileSystem().create(path, (short) 3);
streams.add(out);
out.writeBytes("old gs data\n");
out.hflush();
out.close();
}
} catch (IOException e) {
e.printStackTrace();
}

final MetricsRecordBuilder dnMetrics =
getMetrics(cluster.getDataNodes().get(0).getMetrics().name());
long datanodeNetworkErrors = getLongCounter("DatanodeNetworkErrors", dnMetrics);
return datanodeNetworkErrors > 10;
}
}, 1000, 60000);
/* Test JMX datanode network counts. */
final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
final ObjectName mxbeanName =
new ObjectName("Hadoop:service=DataNode,name=DataNodeInfo");
final Object dnc =
mbs.getAttribute(mxbeanName, "DatanodeNetworkCounts");

// Compute number of DatanodeNetworkCounts.
final String allDnc = dnc.toString();
int oldStringLength = allDnc.length();
String keyword = "key=networkErrors, value";
int newStringLength = allDnc.replace(keyword, "").length();
int networkErrorsCount = (oldStringLength - newStringLength) / keyword.length();
Assert.assertEquals(2, networkErrorsCount);
} finally {
IOUtils.cleanupWithLogger(LOG, streams.toArray(new Closeable[0]));
if (cluster != null) {
cluster.shutdown();
}
DataNodeFaultInjector.set(oldInjector);
}
}

/**
* This function ensures that writing causes TotalWritetime to increment
* and reading causes totalReadTime to move.
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
package org.apache.hadoop.hdfs.server.datanode;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Lists;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import java.io.Closeable;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.util.List;
import java.util.function.Supplier;

import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;

public class TestDataNodeNetworkErrorsWithDefaultConf {
private static final Logger LOG =
LoggerFactory.getLogger(TestDataNodeNetworkErrorsWithDefaultConf.class);

@Test(timeout = 60000)
public void testDatanodeNetworkErrorsMetricDefaultConf() throws Exception {
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(6).build();
cluster.waitActive();
final List<FSDataOutputStream> streams = Lists.newArrayList();
DataNodeFaultInjector oldInjector = DataNodeFaultInjector.get();
DataNodeFaultInjector newInjector = new DataNodeFaultInjector() {
public void incrementDatanodeNetworkErrors(DataXceiver dataXceiver) {
dataXceiver.incrDatanodeNetworkErrorsWithPort();
}
};
DataNodeFaultInjector.set(newInjector);
try {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
for (int i = 0; i < 100; i++) {
final Path path = new Path("/test" + i);
final FSDataOutputStream out =
cluster.getFileSystem().create(path, (short) 3);
streams.add(out);
out.writeBytes("old gs data\n");
out.hflush();
}
} catch (IOException e) {
e.printStackTrace();
}

final MetricsRecordBuilder dnMetrics =
getMetrics(cluster.getDataNodes().get(0).getMetrics().name());
long datanodeNetworkErrors = getLongCounter("DatanodeNetworkErrors", dnMetrics);
return datanodeNetworkErrors > 10;
}
}, 1000, 60000);

final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
final ObjectName mxbeanName =
new ObjectName("Hadoop:service=DataNode,name=DataNodeInfo");
final Object dnc =
mbs.getAttribute(mxbeanName, "DatanodeNetworkCounts");
// Compute number of DatanodeNetworkCounts.
final String allDnc = dnc.toString();
int oldStringLength = allDnc.length();
String keyword = "key=networkErrors, value";
int newStringLength = allDnc.replace(keyword, "").length();
int networkErrorsCount = (oldStringLength - newStringLength) / keyword.length();
final MetricsRecordBuilder dnMetrics =
getMetrics(cluster.getDataNodes().get(0).getMetrics().name());
long datanodeNetworkErrors = getLongCounter("DatanodeNetworkErrors", dnMetrics);
Assert.assertEquals(datanodeNetworkErrors, networkErrorsCount);
} finally {
IOUtils.cleanupWithLogger(LOG, streams.toArray(new Closeable[0]));
if (cluster != null) {
cluster.shutdown();
}
DataNodeFaultInjector.set(oldInjector);
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
package org.apache.hadoop.hdfs.server.datanode;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Lists;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import javax.management.MBeanServer;
import javax.management.ObjectName;
import java.io.Closeable;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.util.List;
import java.util.function.Supplier;

import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;

public class TestDataNodeNetworkErrorsWithTopNConf {
private static final Logger LOG =
LoggerFactory.getLogger(TestDataNodeNetworkErrorsWithTopNConf.class);

@Test(timeout=60000)
public void testDatanodeNetworkErrorsMetricTopN() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_NETWORKERRORS_DISPLAY_TOPCOUNT, 2);
final MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(6).build();
cluster.waitActive();
final List<FSDataOutputStream> streams = Lists.newArrayList();
DataNodeFaultInjector oldInjector = DataNodeFaultInjector.get();
DataNodeFaultInjector newInjector = new DataNodeFaultInjector() {
public void incrementDatanodeNetworkErrors(DataXceiver dataXceiver) {
dataXceiver.incrDatanodeNetworkErrorsWithPort();
}
};
DataNodeFaultInjector.set(newInjector);
try {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
for (int i = 0; i < 100; i++) {
final Path path = new Path("/test" + i);
final FSDataOutputStream out =
cluster.getFileSystem().create(path, (short) 3);
streams.add(out);
out.writeBytes("old gs data\n");
out.hflush();
}
} catch (IOException e) {
e.printStackTrace();
}

final MetricsRecordBuilder dnMetrics =
getMetrics(cluster.getDataNodes().get(0).getMetrics().name());
long datanodeNetworkErrors = getLongCounter("DatanodeNetworkErrors", dnMetrics);
return datanodeNetworkErrors > 10;
}
}, 1000, 60000);
final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
final ObjectName mxbeanName =
new ObjectName("Hadoop:service=DataNode,name=DataNodeInfo");
final Object dnc =
mbs.getAttribute(mxbeanName, "DatanodeNetworkCounts");
// Compute number of DatanodeNetworkCounts.
final String allDnc = dnc.toString();
int oldStringLength = allDnc.length();
String keyword = "key=networkErrors, value";
int newStringLength = allDnc.replace(keyword, "").length();
int networkErrorsCount = (oldStringLength - newStringLength) / keyword.length();
Assert.assertEquals(2, networkErrorsCount);
} finally {
IOUtils.cleanupWithLogger(LOG, streams.toArray(new Closeable[0]));
if (cluster != null) {
cluster.shutdown();
}
DataNodeFaultInjector.set(oldInjector);
}
}
}

0 comments on commit ee10d45

Please sign in to comment.