Springmvc和kafka得整合,相对于springboot复杂了一下,需要手动得注册监听器,实现对kafka消息得监听,需要手动注入KafkaTemplate 得配置,然后调用。
<dependency>
<groupId>org.apache.kafkagroupId>
<artifactId>kafka-clientsartifactId>
<version>1.0.1version>
dependency>
<dependency>
<groupId>org.springframework.kafkagroupId>
<artifactId>spring-kafkaartifactId>
<version>1.1.1.RELEASEversion>
dependency>
配置生产者
# kafka配置
kafka.bootstrap-servers=192.168.10.100:9092,192.168.10.101:9092,192.168.10.102:9092,192.168.10.103:9092,192.168.10.104:9092
#topic配置
kafka.topic.name=ba_jump_api
#尝试次数
kafka.producer.retries=1
#批次大小
kafka.producer.batch-size=16384
#内存大小
kafka.producer.buffer-memory=33554432
# 指定消息key和消息体的编解码方式
kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer
kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer
<bean id="kafkaServer"
class="com.yellowcong.service.KafkaServer">
<property name="kafkaTemplate" ref="kafkaTemplate" >property>
<property name="topic" value="${kafka.topic.name}">property>
bean>
<bean id="producerProperties" class="java.util.HashMap">
<constructor-arg>
<map>
<entry key="bootstrap.servers" value="${kafka.bootstrap-servers}" />
<entry key="retries" value="10" />
<entry key="batch.size" value="33554432" />
<entry key="linger.ms" value="1" />
<entry key="buffer.memory" value="33554432 " />
<entry key="acks" value="all" />
<entry key="key.serializer" value="org.apache.kafka.common.serialization.StringSerializer" />
<entry key="value.serializer" value="org.apache.kafka.common.serialization.StringSerializer" />
map>
constructor-arg>
bean>
<bean id="producerFactory"
class="org.springframework.kafka.core.DefaultKafkaProducerFactory">
<constructor-arg>
<ref bean="producerProperties" />
constructor-arg>
bean>
<bean id="kafkaTemplate" class="org.springframework.kafka.core.KafkaTemplate">
<constructor-arg ref="producerFactory" />
<property name="defaultTopic" value="${kafka.topic.name}" />
bean>
通过这个类进行发送消息
package com.yellowcong.service;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.stereotype.Service;
import com.yellowcong.common.AppLog;
/**
* 代码创建: yellowcong
* 创建日期: 2019年3月18日
* 功能描述:
*/
@Service
public class KafkaServer {
KafkaTemplate<String,String> kafkaTemplate;
private String topic;
public void setTopic(String topic) {
this.topic = topic;
}
public void setKafkaTemplate(KafkaTemplate kafkaTemplate) {
this.kafkaTemplate = kafkaTemplate;
}
/**
* 代码创建: yellowcong
* 创建日期: 2019年3月18日
* 功能描述: 异步发送日志到kafka中
*/
public void sendLogAsyn(final String log) {
try {
//走我们设定的默认的topic
kafkaTemplate.send(topic,log);
} catch (Exception e) {
e.printStackTrace();
AppLog.kafka.error(e.getMessage());
}
}
/**
* 代码创建: yellowcong
* 创建日期: 2019年3月18日
* 功能描述: 同步发送日志到kafka中
*/
public void sendLogSync(final String log ) {
try {
kafkaTemplate.send(null,log).get();
} catch (Exception e) {
e.printStackTrace();
}
}
}
# kafka配置
kafka.bootstrap-servers=192.168.10.100:9092,192.168.10.101:9092,192.168.10.102:9092,192.168.10.103:9092,192.168.10.104:9092
#格式
kafka.consumer.group-id=consumer-ba-jump-2
#自动提交
kafka.consumer.enable-auto-commit=true
#kafka超时时间
kafka.consumer.session.timeout=20000
#topic得名称
kafka.topic.name=ba_jump_api
#消费线程数
kafka.consumer.concurrency=10
#消费方式,当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,从头开始消费
kafka.auto.offset.reset=earliest
<bean id="consumerProperties" class="java.util.HashMap">
<constructor-arg>
<map>
<entry key="bootstrap.servers" value="${kafka.bootstrap-servers}" />
<entry key="group.id" value="${kafka.consumer.group-id}" />
<entry key="enable.auto.commit" value="${kafka.consumer.enable-auto-commit}" />
<entry key="session.timeout.ms" value="${kafka.consumer.session.timeout} " />
<entry key="auto.offset.reset" value="${kafka.auto.offset.reset}" />
<entry key="key.deserializer" value="org.apache.kafka.common.serialization.StringDeserializer" />
<entry key="value.deserializer" value="org.apache.kafka.common.serialization.StringDeserializer" />
map>
constructor-arg>
bean>
<bean id="kafkaConsumerListener" class="com.yellowcong.sys.listener.KafkaConsumerListener" />
<bean id="consumerFactory" class="org.springframework.kafka.core.DefaultKafkaConsumerFactory">
<constructor-arg>
<ref bean="consumerProperties"/>
constructor-arg>
bean>
<bean id="containerProperties" class="org.springframework.kafka.listener.config.ContainerProperties">
<constructor-arg value="${kafka.topic.name}"/>
<property name="messageListener" ref="kafkaConsumerListener"/>
bean>
<bean id="messageListenerContainer" class="org.springframework.kafka.listener.KafkaMessageListenerContainer" init-method="doStart">
<constructor-arg ref="consumerFactory"/>
<constructor-arg ref="containerProperties"/>
bean>
创建一个kafka得消费者,需要实现MessageListener得接口,然后复写里面得onMessage 实现消息得监听处理
package com.yellowcong.sys.listener;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.listener.MessageListener;
import com.yellowcong.jump.service.PvuvService;
/**
* 代码创建: yellowcong
* 创建日期: 2019年3月19日
* 功能描述:
*/
public class KafkaConsumerListener implements MessageListener<String, String> {
@Autowired
PvuvService logService;
@Override
public void onMessage(ConsumerRecord<String, String> data) {
//监听messge的处理
// logService.countPvUv(data.value());
System.out.println("获取到数据:\t"+data.value());
}
}
https://blog.csdn.net/wu18296184782/article/details/80164190