SpringBoot连接kafka
1.创建SpringBoot项目导入依赖
创建SpringBoot项目集成SpringBoot父工程: jar包和插件的版本锁定
xml
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>2.2.5.RELEASE</version>
<relativePath/>
</parent>
xml
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.xushuai</groupId>
<artifactId>kafka02-boot</artifactId>
<version>1.0-SNAPSHOT</version>
<!-- springboot父工程 -->
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>2.2.5.RELEASE</version>
<relativePath/>
</parent>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<!--redis依赖-->
<!-- <dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-data-redis</artifactId>
</dependency>-->
<!--kafka的依赖-->
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
</project>
2.添加相关的配置
之前使用原生的java连接kafka时,需要在Properties对象中配置kafka的相关信息
yml
spring:
application:
name: springboot-kafkademo
kafka:
# 设置kafka的地址
bootstrap-servers: 192.168.200.131:9092,192.168.200.132:9092,192.168.200.133:9092
# 配置生产者相关信息
producer:
# acks参数表示当生产者生产消息的时候,写入到副本的要求严格程度 0(最高) 1(中等) -1或all(最慢)
acks: 1
# 发送消息时重试次数
retries: 0
# 每次批量发送消息的数量
batch-size: 16384
# 缓冲存储器
buffer-memory: 3355443
#指定消息key和消息体的编解码方式
key-serializer: org.apache.kafka.common.serialization.StringSerializer
value-serializer: org.apache.kafka.common.serialization.StringSerializer
consumer:
# 关闭kafka的自动提交
enable-auto-commit: false
# 指定默认消费者group id
group-id: test-consumer-group
# latest 当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据提交过offset,latest和earliest没有区别,但是在没有提交offset情况下,用latest直接会导致无法读取旧数据。
auto-offset-reset: latest
# 自动提交的间隔时间 毫秒
auto-commit-interval: 100
key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
3.生产消息
java
package com.baidu.kafka.controller;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.util.concurrent.ListenableFuture;
import org.springframework.util.concurrent.ListenableFutureCallback;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import java.util.UUID;
import java.util.concurrent.ExecutionException;
/**
* @Author: 小许好楠
* @name:ProductorController
* @Date:2024/7/19 10:32
*/
@RestController
@RequestMapping("/send")
public class ProductorController {
@Autowired
private KafkaTemplate template;
// 定义主题
private static final String topic = "xushuai";
// 同步消息
@RequestMapping("/msg1/{msg}")
public String sendMsg1(@PathVariable("msg") String msg){
try {
// 生成一个唯一的键
// String key = UUID.randomUUID().toString();
//发送消息
ListenableFuture send = template.send(topic, msg);
System.out.println("发送成功");
System.out.println(send.get().toString());
} catch (Exception e) {
e.printStackTrace();
}
return "发送成功";
}
/**
* 异步发送消息
*/
@RequestMapping("/msg2/{msg}")
public String sendMsg2(@PathVariable("msg")String msg) throws InterruptedException {
//参数:1.主题,2.这个消息的唯一标识;3.你要发送的消息
//这里返回的对象就是能够检测能否发送成功
ListenableFuture<SendResult<String, String>> send = template.send(topic, msg);
// 异步回调
send.addCallback(new ListenableFutureCallback<SendResult<String, String>>() {
@Override
public void onFailure(Throwable throwable) {
//消息发送失败,会执行这个函数
//可以发送相关提醒给开发人员,重试
System.out.println("发送失败: 失败后重试或者记录日志");
}
@Override
public void onSuccess(SendResult<String, String> stringStringSendResult) {
//消息发送成功会执行这个函数
System.out.println("发送成功");
}
});
return "Success";
}
}
4.消费消息
java
/**
* 接收消息
*/
@KafkaListener(id = "", topics = topic, groupId = "group.demo")
public void listener(String input) {
logger.info("input value:{}", input);
}
@KafkaListener(topics = {"AAA"},containerFactory="ackContainerFactory" ,id="ack")
public void msg(ConsumerRecord<String, String> msgObject, Acknowledgment ack){
String brokerKey = msgObject.key();
//setNx set if not exist
//如果返回true,就代表着,首次存,能存进去 false:第二次存,存不进去了
Boolean b = redisTemplate.opsForValue().setIfAbsent(brokerKey, "sss");
//TODO
if(!b){
System.out.println("重复消费");//错误的思路
}else{
String msg = msgObject.value();
Lease lease = leaseService.findById(msg);
leaseService.updateStatus(lease.getId());
SimpleMailMessage mailMessage = new SimpleMailMessage();
mailMessage.setSubject("【维数公寓】友情提示");//主题
mailMessage.setText("本期租房已到期,请及时续费,如果已续费,请忽略");//内容 mailMessage.setTo("xxxx@163.com");
mailMessage.setFrom("851900534@qq.com");
mailMessage.setTo(lease.getEmail());
javaMailSender.send(mailMessage);
System.out.println("催费成功");
// 继续消费
System.out.println("消息是==============:"+msg);//已经收到消息了
//开始修改数据的代码
System.out.println("修改数据库");
}
System.out.println("消息的唯一标识:"+msgObject.key());
//手工提交,让broker移动offsets-->让指针偏移,这行代码一定要放在消费者代码执行完之后
ack.acknowledge();
}
java
package com.xs.kafka.consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Component;
// 创建当前类对并交给IOC容器管理
@Component
public class KafkaConsumer {
@Autowired
private RedisTemplate<String ,String> redisTemplate;
/**
* 接收消息
*/
// @KafkaListener(topics = {"xushuai"}, groupId = "group.demo")
// public void listener(String msg) {
// System.out.println("==========: " + msg);
// }
@KafkaListener(topics = {"xushuai"},containerFactory="ackContainerFactory" ,id="ack")
public void msg(ConsumerRecord<String, String> msgObject, Acknowledgment ack){
String msgKey = msgObject.key();
//setNx set if not exist
//如果返回true,就代表着,首次存,能存进去 false:第二次存,存不进去了
Boolean b = redisTemplate.opsForValue().setIfAbsent(msgKey, "sss");
//TODO
if(!b){
System.out.println("重复消费");//错误的思路
}else{
String msg = msgObject.value();
// 继续消费
System.out.println("消息是==============:"+msg);//已经收到消息了
}
System.out.println("消息的唯一标识:"+msgObject.key());
//手工提交,让broker移动offsets-->让指针偏移,这行代码一定要放在消费者代码执行完之后
ack.acknowledge();
}
}
消费者手动确认
添加配置类: 通过配置类设置手动确认
java
package com.xs.kafka.config;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.listener.ContainerProperties;
// 声明配置类
@Configuration
public class Kafkaconfig {
//
@Bean("ackContainerFactory")
public ConcurrentKafkaListenerContainerFactory ackContainerFactory(ConsumerFactory consumerFactory) {
// 创建 并发Kafka侦听器容器工厂 对象
ConcurrentKafkaListenerContainerFactory factory =
new ConcurrentKafkaListenerContainerFactory();
// 设置并非kafka监听器工厂对象的相关配置 : 手动确认
factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
factory.setConsumerFactory(consumerFactory);
return factory;
}
}
在消费者方法上指定手动确认工厂对象
java
/**
* 手动确认消费
* @param msgRecord: 从kafka中获取的消息记录
* @param ack: 用于手动确认
*/
@KafkaListener(topics = {"xushuai"},containerFactory = "ackContainerFactory")
public void listener(ConsumerRecord<String, String> msgRecord,Acknowledgment ack) {
// 获取当前消息的key
String key = msgRecord.key();
// 获取当前消息的内容
String msg = msgRecord.value();
// 获取当前消息所在的主题
String topic = msgRecord.topic();
// 获取当前消息所在的分区
int partition = msgRecord.partition();
// 获取消息在当前分区中的偏移量
long offset = msgRecord.offset();
System.out.println(key+" : "+ msg+" : "+topic+" : "+partition+" : "+offset);
// 手动提交offset值
ack.acknowledge();
}
防止重复消费
java
/**
* 防止重复消费
* @param msgRecord
* @param ack
*/
@KafkaListener(topics = {"xushuai"},containerFactory = "ackContainerFactory")
public void listener(ConsumerRecord<String, String> msgRecord,Acknowledgment ack) {
// 获取当前消息的key
String key = msgRecord.key();
// 将key作为唯一标记存放到redis中
// setnx key value : 当key不存在时设置当前key和value数据
Boolean flag = redisTemplate.opsForValue().setIfAbsent(key, "a", 60, TimeUnit.SECONDS);
if (!flag){
// 重复消费
System.out.println("重复消费");
// 这个地方什么都不做
}else{
// 获取当前消息的内容
String msg = msgRecord.value();
// 获取当前消息所在的主题
String topic = msgRecord.topic();
// 获取当前消息所在的分区
int partition = msgRecord.partition();
// 获取消息在当前分区中的偏移量
long offset = msgRecord.offset();
System.out.println(key+" : "+ msg+" : "+topic+" : "+partition+" : "+offset);
}
// 手动提交
ack.acknowledge();
}