#请使用该socket
log4j.appender.socket=org.apache.log4j.net.SocketAppender
#logstash服务主机端口号
log4j.appender.socket.Port=4567
#logstash服务主机
log4j.appender.socket.RemoteHost=168.7.1.67
log4j.appender.socket.ReconnectionDelay=10000
#输出类名、行、文件名等字段信息
log4j.appender.socket.LocationInfo=true
#appender-ref 中引用 socketAppender
input {
log4j {
mode => "server"
host => "168.37.1.67"
port => 4567
codec => plain { charset => "GB2312" }
}
}
filter {
#判断trade.log
if [method] == "execute" and (![stack_trace]) {
grok {
match => { "message" => "%{WORD:opeType}\|%{WORD:name}\|Oid: %{WORD:oid}\|IP: %{IP:ip}\|MAC: %{GREEDYDATA:mac}\|%{WORD:result}\|%{GREEDYDATA:exception}\|" }
match => { "message" => "%{WORD:opeType}\|%{WORD:name}\|Oid: %{WORD:oid}\|IP: %{IP:ip}\|MAC: %{GREEDYDATA:mac}\|%{WORD:result}\|" }
match => { "message" => "%{WORD:opeType}\|%{WORD:name}\|Oid: %{WORD:oid}\|IP: %{IP:ip}\|MAC: %{GREEDYDATA:mac}\|" }
match => { "message" => "%{WORD:opeType}\|IP: %{IP:ip}\|MAC: %{GREEDYDATA:mac}\|%{WORD:result}\|" }
match => { "message" => "%{WORD:opeType}\|IP: %{IP:ip}\|MAC: %{GREEDYDATA:mac}\|" }
remove_field => ["message","thread","class","file","method"]
add_field => [ "type", "tradelog" ]
}
}
#判断error.log
else if [logger_name] == "error.except" and (![stack_trace]) {
kv {
source => "message"
field_split => "\|"
value_split => "="
remove_field => ["message","thread","class","file","method"]
add_field => [ "type", "errorlog" ]
}
}
#不合条件的多余的消息不往下执行
else {
drop {}
}
#解析时间字段
date {
match => ["timestamp","UNIX_MS"]
remove_field => "timestamp"
}
}
output {
stdout{codec=> rubydebug}
if [type] == "tradelog" {
elasticsearch {
index => "log4j-tradelog"
hosts => ["168.37.1.67:9200"]
manage_template => true
template_overwrite => true
template => "/home/elk/myconf/tradelog_template.json"
}
}
if [type] == "errorlog" {
elasticsearch {
index => "log4j-errorlog"
hosts => ["168.37.1.67:9200"]
manage_template => true
template_overwrite => true
template => "/home/elk/myconf/errorlog_template.json"
}
}
}
if [method] == "execute" and (![stack_trace]) {
grok {
match => { "message" => "%{WORD:opeType}\|%{WORD:name}\|Oid: %{WORD:oid}\|IP: %{IP:ip}\|MAC: %{GREEDYDATA:mac}\|%{WORD:result}\|%{GREEDYDATA:exception}\|" }
match => { "message" => "%{WORD:opeType}\|%{WORD:name}\|Oid: %{WORD:oid}\|IP: %{IP:ip}\|MAC: %{GREEDYDATA:mac}\|%{WORD:result}\|" }
match => { "message" => "%{WORD:opeType}\|%{WORD:name}\|Oid: %{WORD:oid}\|IP: %{IP:ip}\|MAC: %{GREEDYDATA:mac}\|" }
match => { "message" => "%{WORD:opeType}\|IP: %{IP:ip}\|MAC: %{GREEDYDATA:mac}\|%{WORD:result}\|" }
match => { "message" => "%{WORD:opeType}\|IP: %{IP:ip}\|MAC: %{GREEDYDATA:mac}\|" }
remove_field => ["message","thread","class","file","method"]
add_field => [ "type", "tradelog" ]
}
}
所以采用5种正则规则去匹配,logstash默认会从上到下按规则去匹配,直到匹配上为止。(注意5种正则规则的上下顺序,下面的规则放在上面会导致可能内容解析不全,比如源数据是:请求交易名|操作员登录名|操作员编号|ip地址|mac地址|返回结果|异常信息,如果按照“请求交易名|ip地址|mac地址|”规则去匹配,只能识别出3个字段,而且匹配成功,不继续往下执行,这样识别的内容就不全)
else if [logger_name] == "error.except" and (![stack_trace]) {
kv {
source => "message"
field_split => "\|"
value_split => "="
remove_field => ["message","thread","class","file","method"]
add_field => [ "type", "errorlog" ]
}
}
以UNIX_MS格式解析原始时间字段timestamp,解析成Date类型数据并赋值给@timestamp字段,并移除原始字段timestamp。