Java – the dropwizard does not record custom recorders to files
I have a dropwizard application. I configure the logger appender as a file, as shown below:
logging:
level: INFO
loggers:
"mylogger": INFO
"com.path.to.class": INFO
appenders:
- type: file
currentLogFilename: .logs/mylogs.log
archivedLogFilenamePattern: .logs/archive.%d.log.gz
archivedFileCount: 14
In addition, a recorder is created in my application:
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private final Logger OpLogger = LoggerFactory.getLogger("mylogger");
(and)
private final Logger ClassLogger = LoggerFactory.getLogger(pathToClass.class);
Make some test records in main():
OpLogger.info("test 1");
ClassLogger.info("test 2);
There is no problem when the application starts and runs; But I didn't get any logs (except jetty's access log, of course, correctly printed to mylogs. Log), whether in stdout or mylogs. Log Log file Instead, if I delete configuration According to the recorder configuration in YML, I will print all logs to stdout Maybe this is a dropwizard problem, or I have to report to configuration YML add something? I am using dropwizard 0.8 0
Solution
Update the latest version of dropwizard to support log configuration out of the box
I encountered the same problem and tried to set the dropwizard (0.8.4) with a separate file I have the same problem So I dug deeper and found a solution for me (not the cleanest, but I can't seem to work in a different way)
The problem is that loggingfactory #configure automatically adds each appender to root This is not ideal, so it needs to be covered What I do is:
>Overwrite loggingfactory
This is a bit messy because there are some things that need to be sadly copied: (this is my implementation:
import java.io.PrintStream;
import java.lang.management.ManagementFactory;
import java.util.Map;
import javax.management.InstanceAlreadyExistsException;
import javax.management.MBeanRegistrationException;
import javax.management.MBeanServer;
import javax.management.MalformedObjectNameException;
import javax.management.NotCompliantMBeanException;
import javax.management.ObjectName;
import org.slf4j.LoggerFactory;
import org.slf4j.bridge.SLF4JBridgeHandler;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.logback.InstrumentedAppender;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.collect.ImmutableMap;
import ch.qos.logback.classic.Level;
import ch.qos.logback.classic.Logger;
import ch.qos.logback.classic.LoggerContext;
import ch.qos.logback.classic.PatternLayout;
import ch.qos.logback.classic.jmx.JMXConfigurator;
import ch.qos.logback.classic.jul.LevelChangePropagator;
import ch.qos.logback.classic.spi.ILoggingEvent;
import ch.qos.logback.core.Appender;
import ch.qos.logback.core.util.StatusPrinter;
import io.dropwizard.logging.AppenderFactory;
import io.dropwizard.logging.LoggingFactory;
public class BetterDropWizardLoggingConfig extends LoggingFactory {
@JsonIgnore
final LoggerContext loggerContext;
@JsonIgnore
final PrintStream configurationErroRSStream;
@JsonProperty("loggerMapping")
private ImmutableMap<String,String> loggerMappings;
private static void hijackJDKLogging() {
SLF4JBridgeHandler.removeHandlersForRootLogger();
SLF4JBridgeHandler.install();
}
public BetterDropWizardLoggingConfig() {
PatternLayout.defaultConverterMap.put("h",HostNameConverter.class.getName());
this.loggerContext = (LoggerContext) LoggerFactory.getILoggerFactory();
this.configurationErroRSStream = System.err;
}
private Logger configureLevels() {
final Logger root = loggerContext.getLogger(org.slf4j.Logger.ROOT_LOGGER_NAME);
loggerContext.reset();
final LevelChangePropagator propagator = new LevelChangePropagator();
propagator.setContext(loggerContext);
propagator.setResetJUL(true);
loggerContext.addListener(propagator);
root.setLevel(getLevel());
for (Map.Entry<String,Level> entry : getLoggers().entrySet()) {
loggerContext.getLogger(entry.getKey()).setLevel(entry.getValue());
}
return root;
}
@Override
public void configure(MetricRegistry metricRegistry,String name) {
hijackJDKLogging();
final Logger root = configureLevels();
for (AppenderFactory output : getAppenders()) {
Appender<ILoggingEvent> build = output.build(loggerContext,name,null);
if(output instanceof MappedLogger && ((MappedLogger) output).getLoggerName() != null) {
String appenderName = ((MappedLogger) output).getLoggerName();
String loggerName = loggerMappings.get(appenderName);
Logger logger = this.loggerContext.getLogger(loggerName);
logger.addAppender(build);
} else {
root.addAppender(build);
}
}
StatusPrinter.setPrintStream(configurationErroRSStream);
try {
StatusPrinter.printIfErrorsOccured(loggerContext);
} finally {
StatusPrinter.setPrintStream(System.out);
}
final MBeanServer server = ManagementFactory.getPlatformMBeanServer();
try {
final ObjectName objectName = new ObjectName("io.dropwizard:type=Logging");
if (!server.isRegistered(objectName)) {
server.registerMBean(new JMXConfigurator(loggerContext,server,objectName),objectName);
}
} catch (MalformedObjectNameException | InstanceAlreadyExistsException | NotCompliantMBeanException
| MBeanRegistrationException e) {
throw new RuntimeException(e);
}
configureInstrumentation(root,metricRegistry);
}
private void configureInstrumentation(Logger root,MetricRegistry metricRegistry) {
final InstrumentedAppender appender = new InstrumentedAppender(metricRegistry);
appender.setContext(loggerContext);
appender.start();
root.addAppender(appender);
}
}
You can do this, I have to copy / paste some private members and methods to make things work as expected
I added a new field:
@JsonProperty("loggerMapping")
private ImmutableMap<String,String> loggerMappings;
This allows me to configure mappings for each logger This is not out of the box, because I can't get a name (the default appender name of the dropwizard is very inconvenient...)
So I added a new logger and replaced the host name in my example, which I need for different reasons To do this, I override the old fileappenderfactory and implement my own mappedlogger interface Implement here:
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonTypeName;
import ch.qos.logback.classic.LoggerContext;
import ch.qos.logback.classic.spi.ILoggingEvent;
import ch.qos.logback.core.FileAppender;
import ch.qos.logback.core.rolling.RollingFileAppender;
import io.dropwizard.logging.AppenderFactory;
import io.dropwizard.logging.FileAppenderFactory;
@JsonTypeName("hostnameFile")
public class HostnameFileAppender extends FileAppenderFactory implements AppenderFactory,MappedLogger {
private static String uuid = UUID.randomUUID().toString();
@JsonProperty
private String name;
public void setCurrentLogFilename(String currentLogFilename) {
super.setCurrentLogFilename(substitute(currentLogFilename));
}
private String substitute(final String pattern) {
String substitute = null;
try {
substitute = InetAddress.getLocalHost().getHostName();
} catch (UnkNownHostException e) {
System.err.println("Failed to get local hostname:");
e.printStackTrace(System.err);
substitute = uuid;
System.err.println("Using " + substitute + " as fallback.");
}
return pattern.replace("${HOSTNAME}",substitute);
}
@Override
public void setArchivedLogFilenamePattern(String archivedLogFilenamePattern) {
super.setArchivedLogFilenamePattern(substitute(archivedLogFilenamePattern));
}
@Override
public String getLoggerName() {
return name;
}
}
Note that in order to add a new JSON type, you must follow the Javadoc in appenderfactory (add meta inf to the classpath and make the new appender discoverable)
So far, we now have a configuration that can receive recorder mappings, and we have a recorder with a selectable name
In the configuration method, I now combine the two:
for (AppenderFactory output : getAppenders()) {
Appender<ILoggingEvent> build = output.build(loggerContext,null);
if(output instanceof MappedLogger && ((MappedLogger) output).getLoggerName() != null) {
String appenderName = ((MappedLogger) output).getLoggerName();
String loggerName = loggerMappings.get(appenderName);
Logger logger = this.loggerContext.getLogger(loggerName);
logger.addAppender(build);
} else {
root.addAppender(build);
}
}
For backward compatibility, I keep the default behavior If no name is defined, the appender is added to the root logger Otherwise, I parse the input logger and add an appender as needed
Last but not least, the old yaml configuration:
logging:
# The default level of all loggers. Can be OFF,ERROR,WARN,INFO,DEBUG,TRACE,or ALL.
level: INFO
loggers:
"EVENT" : INFO
loggerMapping:
# for easier search this is defined as: appenderName -> loggerName rather than the other way around
"eventLog" : "EVENT"
appenders:
- type: console
threshold: ALL
logFormat: "myformat"
- type: hostnameFile # NOTE THE NEW TYPE WITH HOSTNAME RESOLVE
currentLogFilename: /Users/artur/tmp/log/my-${HOSTNAME}.log
threshold: ALL
archive: true
archivedLogFilenamePattern: mypattern
archivedFileCount: 31
timeZone: UTC
logFormat: "myFormat"
- type: hostnameFile
name: eventLog # NOTE THE APPENDER NAME
currentLogFilename: something
threshold: ALL
archive: true
archivedLogFilenamePattern: something
archivedFileCount: 31
timeZone: UTC
logFormat: "myFormat"
- type: hostnameFile
currentLogFilename: something
threshold: ERROR
archive: true
archivedLogFilenamePattern: something
archivedFileCount: 31
timeZone: UTC
logFormat: "myFormat"
As you can see, I map the event appender to the event logger In this way, all my events will end in file a, and other information will end elsewhere
I hope it helps It may not be the cleanest solution, but I don't think dropwizard allows this feature at present
