整合kafka->telegraf->influxdb来记录日志

https://www.cnblogs.com/siodoon/p/11422608.html
https://jasper-zhang1.gitbooks.io/influxdb/Write_protocols/line_protocol.html#

采集
https://www.jianshu.com/p/469648907036

1.本地流程 切面日志进kafka

package com.sdyy.common.aop;

import com.sdyy.common.annotation.OperLog;
import com.sdyy.common.controller.BaseController;
import com.sdyy.common.entity.KafkaMessage;
import com.sdyy.common.entity.Log;
import com.sdyy.common.kafka.KafkaProducer;
import com.sdyy.common.retobj.BaseEntity;
import com.sdyy.common.utils.DateUtils;
import com.sdyy.core.auth.operator.entity.AuthOperator;
import lombok.extern.log4j.Log4j;
import org.aspectj.lang.ProceedingJoinPoint;
import org.aspectj.lang.annotation.Around;
import org.aspectj.lang.annotation.Aspect;
import org.aspectj.lang.annotation.Pointcut;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import org.springframework.web.context.request.RequestAttributes;
import org.springframework.web.context.request.RequestContextHolder;
import org.springframework.web.context.request.ServletRequestAttributes;

import javax.servlet.http.HttpServletRequest;

/**
 *
 *    * @className: LogAspect
 *   * @description:日志切面
 *   * @param:
 *   * @return:
 *   * @throws:
 *   * @author: lizz
 *   * @date: 2020/03/15 18:37
 *
 */
@Log4j
@Component
@Aspect
public class LogAspect {

    @Autowired
    private KafkaProducer producer;
    /**
     * @Description: 方法切入点
     * @Author: lizz
     * @Date: 2020/3/15 18:56
     * @Reviser:修改人
     * @ReviseDate:修改时间
     * @Revision:修改内容
     **/
    @Pointcut("execution(* com.sdyy.maint.*.controller.*.*(..))")
    public void pointerCutMethod() {
    }

    /**
     * @Description: 环绕通知
     * @Author: lizz
     * @Date: 2020/3/15 18:56
     * @Reviser:修改人
     * @ReviseDate:修改时间
     * @Revision:修改内容
     **/
    @Around(value = "pointerCutMethod() && @annotation(annotation)")
    public Object doAround(ProceedingJoinPoint joinPoint, OperLog annotation) {
        Log logs = new Log();
        //通过注解获取当前属于那个模块
        logs.setModuleName(annotation.moduleName());
        //通过注解获取当前的操作方式
        logs.setOperation(annotation.option());
        logs.setCreatedate(DateUtils.getCurrentDate());
        RequestAttributes ra = RequestContextHolder.getRequestAttributes();
        Long beginTime = System.currentTimeMillis();
        if (ra != null) {
            ServletRequestAttributes sra = (ServletRequestAttributes) ra;
            HttpServletRequest request = sra.getRequest();

            String ip = request.getRemoteHost();
            logs.setUserIp(ip);
            logs.setReqUrl(request.getRequestURI());
            // 从session中获取用户信息
            AuthOperator authOperator = (AuthOperator) request.getSession().getAttribute(BaseController.LOGIN_IN_OPERATOR_SESSION);
            if (authOperator != null) {
                logs.setUserName(authOperator.getUserId());
            }
        }
        try {
            Object object = joinPoint.proceed();
            if (object != null) {
                if (object instanceof BaseEntity) {
                    if(((BaseEntity) object).getStatus()==200){
                        logs.setOperResult("成功");
                    }else {
                        logs.setOperResult(((BaseEntity) object).getMessage());
                    }

                }
            }
            KafkaMessage ms =new KafkaMessage();
            ms.setMsg(logs);
            producer.send("logs", ms);
            return object;
        } catch (Throwable e) {

            logs.setOperResult("失败:" + e.getMessage());
            KafkaMessage ms =new KafkaMessage();
            ms.setMsg(logs);
            producer.send("logs", ms);
            log.error("异常"+e);
            return null;
        }
    }
}

2.telegraf收集配置如下

[[inputs.kafka_consumer]]
  ## Kafka brokers.
  brokers = ["10.9.44.14:9092"]

  ## Topics to consume.
  topics = ["logs"]

  ## When set this tag will be added to all metrics with the topic as the value.
 #topic_tag = "msg"

  ## Optional Client id
  # client_id = "Telegraf"

  ## Set the minimal supported Kafka version.  Setting this enables the use of new
  ## Kafka features and APIs.  Must be 0.10.2.0 or greater.
  ##   ex: version = "1.1.0"
  # version = ""

  ## Optional TLS Config
  # tls_ca = "/etc/telegraf/ca.pem"
  # tls_cert = "/etc/telegraf/cert.pem"
  # tls_key = "/etc/telegraf/key.pem"
  ## Use TLS but skip chain & host verification
  # insecure_skip_verify = false

  ## Optional SASL Config
  # sasl_username = "kafka"
  # sasl_password = "secret"

  ## Name of the consumer group.
  # consumer_group = "telegraf_metrics_consumers"

  ## Initial offset position; one of "oldest" or "newest".
  # offset = "oldest"

  ## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky".
  # balance_strategy = "range"

  ## Maximum length of a message to consume, in bytes (default 0/unlimited);
  ## larger messages are dropped
  max_message_len = 1000000

  ## Maximum messages to read from the broker that have not been written by an
  ## output.  For best throughput set based on the number of metrics within
  ## each message and the size of the output's metric_batch_size.
  ##
  ## For example, if each message from the queue contains 10 metrics and the
  ## output metric_batch_size is 1000, setting this to 100 will ensure that a
  ## full batch is collected and the write is triggered immediately without
  ## waiting until the next flush_interval.
  # max_undelivered_messages = 1000

  ## Data format to consume.
  ## Each data format has its own unique set of configuration options, read
  ## more about them here:
  ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  data_format = "json"

  name_override = "topicB" 
  tag_keys = ["moduleName"]
  json_string_fields = ["operation","createdate","userIp","userName","operResult","reqUrl"]
  json_query = "msg"
[[outputs.influxdb]]
   urls = ["http://localhost:8086"] # required 
   database = "mydb" # required
   retention_policy = ""
   precision = "s"
   timeout = "5s"
   username = ""
   password = ""

data_format 为json
name_override 重命名json

json_string_fields 收集的id
tag_keys tag名
json_query消息data

启动telegraf --config telegraf1.conf

你可能感兴趣的:(influxdb,时序数据库,架构)