Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

MAPREDUCE-7453. Revert HADOOP-18649. #6102

Merged
merged 7 commits into from
Oct 1, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,8 @@
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.util.ApplicationClassLoader;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.ContainerLogAppender;
import org.apache.hadoop.yarn.ContainerRollingLogAppender;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.yarn.api.records.LocalResource;
Expand Down Expand Up @@ -586,7 +588,8 @@ public static String getChildLogLevel(Configuration conf, boolean isMap) {

/**
* Add the JVM system properties necessary to configure
* {@link org.apache.log4j.RollingFileAppender}.
* {@link ContainerLogAppender} or
* {@link ContainerRollingLogAppender}.
*
* @param task for map/reduce, or null for app master
* @param vargs the argument list to append to
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -840,17 +840,24 @@
<name>yarn.app.mapreduce.task.container.log.backups</name>
<value>0</value>
<description>Number of backup files for task logs when using
RollingFileAppender (RFA). See
org.apache.log4j.RollingFileAppender.maxBackupIndex.
ContainerRollingLogAppender (CRLA). See
org.apache.log4j.RollingFileAppender.maxBackupIndex. By default,
ContainerLogAppender (CLA) is used, and container logs are not rolled. CRLA
is enabled for tasks when both mapreduce.task.userlog.limit.kb and
yarn.app.mapreduce.task.container.log.backups are greater than zero.
</description>
</property>

<property>
<name>yarn.app.mapreduce.am.container.log.backups</name>
<value>0</value>
<description>Number of backup files for the ApplicationMaster logs when using
RollingFileAppender (RFA). See
org.apache.log4j.RollingFileAppender.maxBackupIndex.
ContainerRollingLogAppender (CRLA). See
org.apache.log4j.RollingFileAppender.maxBackupIndex. By default,
ContainerLogAppender (CLA) is used, and container logs are not rolled. CRLA
is enabled for the ApplicationMaster when both
yarn.app.mapreduce.am.container.log.limit.kb and
yarn.app.mapreduce.am.container.log.backups are greater than zero.
</description>
</property>

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.yarn;

import java.io.File;
import java.io.Flushable;
import java.util.ArrayDeque;
import java.util.Deque;

import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.log4j.FileAppender;
import org.apache.log4j.spi.LoggingEvent;

/**
* A simple log4j-appender for container's logs.
*/
@Public
@Unstable
public class ContainerLogAppender extends FileAppender
implements Flushable {

private String containerLogDir;
private String containerLogFile;
private int maxEvents;
private Deque<LoggingEvent> eventBuffer;
private boolean closed = false;

@Override
public synchronized void activateOptions() {
if (maxEvents > 0) {
this.eventBuffer = new ArrayDeque<>();
}
setFile(new File(this.containerLogDir, containerLogFile).toString());
setAppend(true);
super.activateOptions();
}

@Override
public synchronized void append(LoggingEvent event) {
if (closed) {
return;
}
if (eventBuffer != null) {
if (eventBuffer.size() == maxEvents) {
eventBuffer.removeFirst();
}
eventBuffer.addLast(event);
} else {
super.append(event);
}
}

@Override
public void flush() {
if (qw != null) {
qw.flush();
}
}

@Override
public synchronized void close() {
if (!closed) {
closed = true;
if (eventBuffer != null) {
for (LoggingEvent event : eventBuffer) {
super.append(event);
}
// let garbage collection do its work
eventBuffer = null;
}
super.close();
}
}

/**
* Getter/Setter methods for log4j.
*
* @return containerLogDir.
*/
public String getContainerLogDir() {
return this.containerLogDir;
}

public void setContainerLogDir(String containerLogDir) {
this.containerLogDir = containerLogDir;
}

public String getContainerLogFile() {
return containerLogFile;
}

public void setContainerLogFile(String containerLogFile) {
this.containerLogFile = containerLogFile;
}

private static final long EVENT_SIZE = 100;

public long getTotalLogFileSize() {
return maxEvents * EVENT_SIZE;
}

/**
* Setter so that log4j can configure it from the
* configuration(log4j.properties).
*
* @param logSize log size.
*/
public void setTotalLogFileSize(long logSize) {
maxEvents = (int)(logSize / EVENT_SIZE);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.yarn;

import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.log4j.RollingFileAppender;

import java.io.File;
import java.io.Flushable;

/**
* A simple log4j-appender for container's logs.
*
*/
@Public
@Unstable
public class ContainerRollingLogAppender extends RollingFileAppender implements Flushable {
private String containerLogDir;
private String containerLogFile;

@Override
public void activateOptions() {
synchronized (this) {
setFile(new File(this.containerLogDir, containerLogFile).toString());
setAppend(true);
super.activateOptions();
}
}

@Override
public void flush() {
if (qw != null) {
qw.flush();
}
}

/**
* Getter/Setter methods for log4j.
*
* @return containerLogDir.
*/

public String getContainerLogDir() {
return this.containerLogDir;
}

public void setContainerLogDir(String containerLogDir) {
this.containerLogDir = containerLogDir;
}

public String getContainerLogFile() {
return containerLogFile;
}

public void setContainerLogFile(String containerLogFile) {
this.containerLogFile = containerLogFile;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.yarn;

import org.junit.jupiter.api.Test;

import org.apache.log4j.Logger;
import org.apache.log4j.PatternLayout;

public class TestContainerLogAppender {

@Test
void testAppendInClose() throws Exception {
final ContainerLogAppender claAppender = new ContainerLogAppender();
claAppender.setName("testCLA");
claAppender.setLayout(new PatternLayout("%-5p [%t]: %m%n"));
claAppender.setContainerLogDir("target/testAppendInClose/logDir");
claAppender.setContainerLogFile("syslog");
claAppender.setTotalLogFileSize(1000);
claAppender.activateOptions();
final Logger claLog = Logger.getLogger("testAppendInClose-catergory");
claLog.setAdditivity(false);
claLog.addAppender(claAppender);
claLog.info(new Object() {
public String toString() {
claLog.info("message1");
return "return message1";
}
});
claAppender.close();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -26,35 +26,36 @@ log4j.threshold=ALL
#

#Default values
yarn.app.container.log.filesize=100MB
yarn.app.container.log.backups=1
yarn.app.mapreduce.shuffle.log.backups=1
yarn.app.container.log.dir=null
yarn.app.container.log.filesize=100

log4j.appender.CLA=org.apache.log4j.RollingFileAppender
log4j.appender.CLA.File=${yarn.app.container.log.dir}/${hadoop.root.logfile}
log4j.appender.CLA.MaxFileSize=${yarn.app.container.log.filesize}
log4j.appender.CLA.MaxBackupIndex=${yarn.app.container.log.backups}
log4j.appender.CLA=org.apache.hadoop.yarn.ContainerLogAppender
log4j.appender.CLA.containerLogDir=${yarn.app.container.log.dir}
log4j.appender.CLA.containerLogFile=${hadoop.root.logfile}
log4j.appender.CLA.totalLogFileSize=${yarn.app.container.log.filesize}
log4j.appender.CLA.layout=org.apache.log4j.PatternLayout
log4j.appender.CLA.layout.ConversionPattern=%d{ISO8601} %p [%t] %c: %m%n

log4j.appender.CRLA=org.apache.log4j.RollingFileAppender
log4j.appender.CRLA.File=${yarn.app.container.log.dir}/${hadoop.root.logfile}
log4j.appender.CRLA.MaxFileSize=${yarn.app.container.log.filesize}
log4j.appender.CRLA.MaxBackupIndex=${yarn.app.container.log.backups}
log4j.appender.CRLA=org.apache.hadoop.yarn.ContainerRollingLogAppender
log4j.appender.CRLA.containerLogDir=${yarn.app.container.log.dir}
log4j.appender.CRLA.containerLogFile=${hadoop.root.logfile}
log4j.appender.CRLA.maximumFileSize=${yarn.app.container.log.filesize}
log4j.appender.CRLA.maxBackupIndex=${yarn.app.container.log.backups}
log4j.appender.CRLA.layout=org.apache.log4j.PatternLayout
log4j.appender.CRLA.layout.ConversionPattern=%d{ISO8601} %p [%t] %c: %m%n

log4j.appender.shuffleCLA=org.apache.log4j.RollingFileAppender
log4j.appender.shuffleCLA.File=${yarn.app.container.log.dir}/${yarn.app.mapreduce.shuffle.logfile}
log4j.appender.shuffleCLA.MaxFileSize=${yarn.app.mapreduce.shuffle.log.filesize}
log4j.appender.shuffleCLA.MaxBackupIndex=${yarn.app.mapreduce.shuffle.log.backups}
log4j.appender.shuffleCLA=org.apache.hadoop.yarn.ContainerLogAppender
log4j.appender.shuffleCLA.containerLogDir=${yarn.app.container.log.dir}
log4j.appender.shuffleCLA.containerLogFile=${yarn.app.mapreduce.shuffle.logfile}
log4j.appender.shuffleCLA.totalLogFileSize=${yarn.app.mapreduce.shuffle.log.filesize}
log4j.appender.shuffleCLA.layout=org.apache.log4j.PatternLayout
log4j.appender.shuffleCLA.layout.ConversionPattern=%d{ISO8601} %p [%t] %c: %m%n

log4j.appender.shuffleCRLA=org.apache.log4j.RollingFileAppender
log4j.appender.shuffleCRLA.File=${yarn.app.container.log.dir}/${yarn.app.mapreduce.shuffle.logfile}
log4j.appender.shuffleCRLA.MaxFileSize=${yarn.app.mapreduce.shuffle.log.filesize}
log4j.appender.shuffleCRLA.MaxBackupIndex=${yarn.app.mapreduce.shuffle.log.backups}
log4j.appender.shuffleCRLA=org.apache.hadoop.yarn.ContainerRollingLogAppender
log4j.appender.shuffleCRLA.containerLogDir=${yarn.app.container.log.dir}
log4j.appender.shuffleCRLA.containerLogFile=${yarn.app.mapreduce.shuffle.logfile}
log4j.appender.shuffleCRLA.maximumFileSize=${yarn.app.mapreduce.shuffle.log.filesize}
log4j.appender.shuffleCRLA.maxBackupIndex=${yarn.app.mapreduce.shuffle.log.backups}
log4j.appender.shuffleCRLA.layout=org.apache.log4j.PatternLayout
log4j.appender.shuffleCRLA.layout.ConversionPattern=%d{ISO8601} %p [%t] %c: %m%n

Expand Down