当一台机器已经无法支撑业务量,一般会选择将一个服务部署到多台服务器上,当服务报错时,即时定位错误将会变得麻烦,这时候可能迫切需要一个能统一查看甚至是搜索日志的平台,前一阵子学习了ElasticSearch,又碰巧看了log4j的实现,花了一天时间,笔者鼓捣出了一个日志搜索平台,使用的技术栈:ElasticSearch+Kibana+Log4j;本文将会把实现的源码和配置贴出来,供感兴趣的人参考下,笔者水平有限,如有不合理的地方,烦请指出
先来看下效果图
Step1.ElasticSearch环境搭建:https://blog.csdn.net/qq_23536449/article/details/90896657
Step2.Kibana环境搭建:https://blog.csdn.net/qq_23536449/article/details/90896657
Step3.编写EsAppender类
package com.gysoft.utils.log4j;
import com.gysoft.utils.EmptyUtils;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.Layout;
import org.apache.log4j.spi.ErrorCode;
import org.apache.log4j.spi.LocationInfo;
import org.apache.log4j.spi.LoggingEvent;
import org.apache.log4j.spi.ThrowableInformation;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.transport.client.PreBuiltTransportClient;
import java.io.IOException;
import java.net.InetAddress;
import java.net.NetworkInterface;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Date;
import java.util.Enumeration;
import java.util.List;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
/**
* @author 周宁
* @Date 2019-04-17 9:39
*/
public class EsAppender extends AppenderSkeleton {
private static final String COLON_SEPARATOR = ":";
private static final String COMMA_SEPARATOR = ",";
private static final int DEFAULT_BUFFER_SIZE = 1;
private static ThreadLocal<String> PRODUCT_NUM_HOLDER = new ThreadLocal();
/**
* 集群名称
*/
private String clusterName;
/**
* Es集群地址
* ip:port,ip2:port2,ip3:port3
*/
private String address;
/**
* 日志缓冲池大小,如果缓冲池大小为1则日志会被立即同步到ES中</br>
* 否则需要等到缓冲池Size达到bufferSize了后才会将日志刷新至ES</br>
* bufferSize默认初始化为1
*/
private int bufferSize;
/**
* 日志缓冲数据
*/
private List<XContentBuilder> buffers;
/**
* 日志删除数据
*/
private List<XContentBuilder> removes;
/**
* 操作ES集群的客户端
*/
private TransportClient client;
/**
* 插入索引
*/
private String index;
/**
* 插入类型
*/
private String type;
public EsAppender() {
buffers = new ArrayList<>();
removes = new ArrayList<>();
}
public String getClusterName() {
return clusterName;
}
public void setClusterName(String clusterName) {
this.clusterName = clusterName;
}
public String getAddress() {
return address;
}
public void setAddress(String address) {
this.address = address;
}
public int getBufferSize() {
return bufferSize;
}
public void setBufferSize(int bufferSize) {
this.bufferSize = bufferSize;
}
public String getIndex() {
return index;
}
public void setIndex(String index) {
this.index = index;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
@Override
protected void append(LoggingEvent event) {
parseLog(event);
if (buffers.size() >= (bufferSize == 0 ? DEFAULT_BUFFER_SIZE : bufferSize)) {
flushBuffer();
}
}
private void parseLog(LoggingEvent event) {
ThrowableInformation throwableInformation = event.getThrowableInformation();
if (throwableInformation != null) {
LocationInfo locationInfo = event.getLocationInformation();
Throwable throwable = throwableInformation.getThrowable();
try {
String productNum = PRODUCT_NUM_HOLDER.get();
if (null != productNum) {
PRODUCT_NUM_HOLDER.remove();
}
StringBuilder throwStackTrace = new StringBuilder();
String[] s = event.getThrowableStrRep();
if (s != null) {
int len = s.length;
for (int i = 0; i < len; i++) {
throwStackTrace.append(s[i]);
throwStackTrace.append(Layout.LINE_SEP);
}
}
buffers.add(jsonBuilder()
.startObject()
.field("className", locationInfo.getClassName())
.field("productNum", productNum)
.field("fileName", locationInfo.getFileName())
.field("lineNumber", locationInfo.getLineNumber())
.field("methodName", locationInfo.getMethodName())
.field("serverIp", getIp())
.field("logName", event.getLogger().getName())
.field("logLevel", event.getLevel())
.field("logThread", event.getThreadName())
.field("logMills", new Date(event.getTimeStamp()))
.field("logMessage", event.getMessage().toString())
.field("throwMessage", throwable.getMessage())
.field("throwDetailMessage", throwable.toString())
.field("throwStackTrace", throwStackTrace)
.endObject());
} catch (IOException e) {
errorHandler.error("Error parseLog", e, ErrorCode.GENERIC_FAILURE);
}
}
}
/**
* 将数据写入到ES中
*/
private void flushBuffer() {
try {
if (EmptyUtils.isNotEmpty(buffers)) {
BulkRequestBuilder bulkRequestBuilder = getClient().prepareBulk();
for (XContentBuilder xContentBuilder : buffers) {
bulkRequestBuilder.add(getClient().prepareIndex(index, type).setSource(xContentBuilder));
removes.add(xContentBuilder);
}
bulkRequestBuilder.get();
buffers.removeAll(removes);
removes.clear();
}
} catch (Exception e) {
errorHandler.error("Error flushBuffer", e, ErrorCode.GENERIC_FAILURE);
}
}
@Override
public void close() {
flushBuffer();
try {
if (client != null)
client.close();
} catch (Exception e) {
errorHandler.error("Error closing client", e, ErrorCode.GENERIC_FAILURE);
}
this.closed = true;
}
@Override
public boolean requiresLayout() {
return false;
}
private TransportClient getClient() throws UnknownHostException {
if (client == null) {
System.setProperty("es.set.netty.runtime.available.processors", "false");
Settings settings = Settings.builder().put("cluster.name", clusterName).build();
client = new PreBuiltTransportClient(settings);
String[] addressArr = address.split(COMMA_SEPARATOR);
for (String address : addressArr) {
String[] arr = address.split(COLON_SEPARATOR);
client.addTransportAddresses(new TransportAddress(InetAddress.getByName(arr[0]), Integer.parseInt(arr[1])));
}
}
return client;
}
private String getIp() throws UnknownHostException {
try {
InetAddress candidateAddress = null;
// 遍历所有的网络接口
for (Enumeration ifaces = NetworkInterface.getNetworkInterfaces(); ifaces.hasMoreElements(); ) {
NetworkInterface iface = (NetworkInterface) ifaces.nextElement();
// 在所有的接口下再遍历IP
for (Enumeration inetAddrs = iface.getInetAddresses(); inetAddrs.hasMoreElements(); ) {
InetAddress inetAddr = (InetAddress) inetAddrs.nextElement();
if (!inetAddr.isLoopbackAddress()) {// 排除loopback类型地址
if (inetAddr.isSiteLocalAddress()) {
// 如果是site-local地址,就是它了
return inetAddr.getHostAddress();
} else if (candidateAddress == null) {
// site-local类型的地址未被发现,先记录候选地址
candidateAddress = inetAddr;
}
}
}
}
if (candidateAddress != null) {
return candidateAddress.getHostAddress();
}
// 如果没有发现 non-loopback地址.只能用最次选的方案
InetAddress jdkSuppliedAddress = InetAddress.getLocalHost();
if (jdkSuppliedAddress == null) {
throw new UnknownHostException("The JDK InetAddress.getLocalHost() method unexpectedly returned null.");
}
return jdkSuppliedAddress.getHostAddress();
} catch (Exception e) {
UnknownHostException unknownHostException = new UnknownHostException(
"Failed to determine LAN address: " + e);
unknownHostException.initCause(e);
throw unknownHostException;
}
}
public static void setProductNum(String productNum) {
PRODUCT_NUM_HOLDER.set(productNum);
}
}
Step4.配置log4j添加配置
#定义根日志级别和输出端(定义了两个输出端)
#添加了ESrootLogger配置
log4j.rootLogger=ERROR,CONSOLE,DAILY_ROLLING_FILE,ES #定义第一个输出端,输出到控制台
log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
log4j.appender.Threshold=debug
log4j.appender.CONSOLE.Target=System.out
log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
log4j.appender.CONSOLE.layout.ConversionPattern=[%-5p] %d{yyyy-MM-dd HH??ss,SSS} [%c{1}:%L] %m%n #定义第二个输出端,输出到文件
log4j.appender.DAILY_ROLLING_FILE=org.apache.log4j.DailyRollingFileAppender
log4j.appender.DAILY_ROLLING_FILE.Append=true
log4j.appender.DAILY_ROLLING_FILE.Threshold=debug
log4j.appender.DAILY_ROLLING_FILE.Encoding=UTF-8
#${com.gysoft.app.log.dir}环境变量 jvm tomcat可配置
log4j.appender.DAILY_ROLLING_FILE.File=${com.gysoft.log.dir}/gy-file-log.txt
log4j.appender.DAILY_ROLLING_FILE.DatePattern='.'yyyy-MM-dd
log4j.appender.DAILY_ROLLING_FILE.layout=org.apache.log4j.PatternLayout
log4j.appender.DAILY_ROLLING_FILE.layout.ConversionPattern=[%-5p] %d{yyyy-MM-dd HH??ss,SSS} [%c{1}]:%L %m%n
#定义日志输出到ES集群
log4j.appender.ES=com.gysoft.utils.log4j.EsAppender
#ES集群名称
log4j.appender.ES.clusterName=es-cluster
#ES集群地址格式为ip:port,ip2:port2
log4j.appender.ES.address=192.168.1.233:9300
#日志索引名称
log4j.appender.ES.index=dubbo-file
#日志类型
log4j.appender.ES.type=dubbo-file-log
#缓冲大小,当缓冲池容量达到bufferSize时会将日志刷新到ES中,默认不配置为1,此时日志时效性高
#可以自行调整bufferSize大小来降低ES并发,但是日志刷新到ES的时效性会降低
log4j.appender.ES.bufferSize=1
Step5.使用Kibana尽情享受查看日志的乐趣吧。